aboutsummaryrefslogtreecommitdiff
path: root/drivers/net
diff options
context:
space:
mode:
authorDavid Woodhouse <David.Woodhouse@intel.com>2008-08-12 11:28:00 +0100
committerDavid Woodhouse <David.Woodhouse@intel.com>2008-08-12 11:28:00 +0100
commit742c52533b05d8ae83c794bd6811100675b85ce5 (patch)
treede89a81d88c19504d1dc4f023a4b480c9022b3b5 /drivers/net
parent36cd4fb5d277f34fe9e4db0deac2d4efd7dff735 (diff)
parent10fec20ef5eec1c91913baec1225400f0d02df40 (diff)
Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux-2.6
Conflicts: include/asm-arm/arch-omap/onenand.h
Diffstat (limited to 'drivers/net')
-rw-r--r--drivers/net/3c523.c4
-rw-r--r--drivers/net/3c527.c9
-rw-r--r--drivers/net/3c59x.c14
-rw-r--r--drivers/net/8390.c13
-rw-r--r--drivers/net/8390p.c19
-rw-r--r--drivers/net/Kconfig5
-rw-r--r--drivers/net/arm/am79c961a.c2
-rw-r--r--drivers/net/arm/at91_ether.c6
-rw-r--r--drivers/net/arm/ep93xx_eth.c4
-rw-r--r--drivers/net/arm/ixp4xx_eth.c4
-rw-r--r--drivers/net/atl1e/atl1e_main.c4
-rw-r--r--drivers/net/atlx/atl1.c19
-rw-r--r--drivers/net/atp.c9
-rw-r--r--drivers/net/bfin_mac.c111
-rw-r--r--drivers/net/bnx2x_main.c2
-rw-r--r--drivers/net/bonding/bond_3ad.c1
-rw-r--r--drivers/net/bonding/bond_main.c394
-rw-r--r--drivers/net/bonding/bond_sysfs.c3
-rw-r--r--drivers/net/cs89x0.c2
-rw-r--r--drivers/net/cxgb3/t3_hw.c2
-rw-r--r--drivers/net/de620.c7
-rw-r--r--drivers/net/dm9000.c5
-rw-r--r--drivers/net/e1000e/e1000.h31
-rw-r--r--drivers/net/e1000e/ethtool.c44
-rw-r--r--drivers/net/e1000e/netdev.c246
-rw-r--r--drivers/net/e1000e/param.c31
-rw-r--r--drivers/net/eepro.c8
-rw-r--r--drivers/net/ehea/ehea_main.c4
-rw-r--r--drivers/net/enc28j60.c6
-rw-r--r--drivers/net/eth16i.c1
-rw-r--r--drivers/net/forcedeth.c174
-rw-r--r--drivers/net/fs_enet/mac-fcc.c2
-rw-r--r--drivers/net/gianfar.c4
-rw-r--r--drivers/net/hamradio/mkiss.c2
-rw-r--r--drivers/net/ifb.c12
-rw-r--r--drivers/net/igb/e1000_82575.c72
-rw-r--r--drivers/net/igb/e1000_82575.h1
-rw-r--r--drivers/net/igb/e1000_defines.h1
-rw-r--r--drivers/net/igb/e1000_hw.h1
-rw-r--r--drivers/net/igb/e1000_mac.c84
-rw-r--r--drivers/net/igb/e1000_mac.h5
-rw-r--r--drivers/net/igb/e1000_regs.h3
-rw-r--r--drivers/net/igb/igb_main.c30
-rw-r--r--drivers/net/irda/act200l-sir.c10
-rw-r--r--drivers/net/irda/actisys-sir.c2
-rw-r--r--drivers/net/irda/ali-ircc.c246
-rw-r--r--drivers/net/irda/donauboe.c68
-rw-r--r--drivers/net/irda/ep7211-sir.c2
-rw-r--r--drivers/net/irda/girbil-sir.c12
-rw-r--r--drivers/net/irda/irda-usb.c92
-rw-r--r--drivers/net/irda/irtty-sir.c10
-rw-r--r--drivers/net/irda/kingsun-sir.c2
-rw-r--r--drivers/net/irda/litelink-sir.c8
-rw-r--r--drivers/net/irda/ma600-sir.c16
-rw-r--r--drivers/net/irda/mcp2120-sir.c12
-rw-r--r--drivers/net/irda/nsc-ircc.c119
-rw-r--r--drivers/net/irda/nsc-ircc.h3
-rw-r--r--drivers/net/irda/old_belkin-sir.c8
-rw-r--r--drivers/net/irda/pxaficp_ir.c4
-rw-r--r--drivers/net/irda/sa1100_ir.c2
-rw-r--r--drivers/net/irda/sir_dev.c63
-rw-r--r--drivers/net/irda/sir_dongle.c2
-rw-r--r--drivers/net/irda/smsc-ircc2.c120
-rw-r--r--drivers/net/irda/tekram-sir.c10
-rw-r--r--drivers/net/irda/toim3232-sir.c10
-rw-r--r--drivers/net/irda/via-ircc.c80
-rw-r--r--drivers/net/irda/vlsi_ir.c92
-rw-r--r--drivers/net/irda/vlsi_ir.h2
-rw-r--r--drivers/net/irda/w83977af_ir.c62
-rw-r--r--drivers/net/ixp2000/ixp2400-msf.c4
-rw-r--r--drivers/net/ixp2000/ixpdev.c1
-rw-r--r--drivers/net/lp486e.c2
-rw-r--r--drivers/net/macb.c4
-rw-r--r--drivers/net/meth.c2
-rw-r--r--drivers/net/mv643xx_eth.c358
-rw-r--r--drivers/net/myri10ge/myri10ge.c1
-rw-r--r--drivers/net/myri10ge/myri10ge_mcp.h52
-rw-r--r--drivers/net/myri10ge/myri10ge_mcp_gen_header.h2
-rw-r--r--drivers/net/ne.c6
-rw-r--r--drivers/net/netconsole.c2
-rw-r--r--drivers/net/netx-eth.c11
-rw-r--r--drivers/net/netxen/netxen_nic.h41
-rw-r--r--drivers/net/netxen/netxen_nic_ctx.c9
-rw-r--r--drivers/net/netxen/netxen_nic_ethtool.c35
-rw-r--r--drivers/net/netxen/netxen_nic_hdr.h10
-rw-r--r--drivers/net/netxen/netxen_nic_hw.c103
-rw-r--r--drivers/net/netxen/netxen_nic_hw.h13
-rw-r--r--drivers/net/netxen/netxen_nic_init.c5
-rw-r--r--drivers/net/netxen/netxen_nic_main.c99
-rw-r--r--drivers/net/netxen/netxen_nic_niu.c16
-rw-r--r--drivers/net/netxen/netxen_nic_phan_reg.h4
-rw-r--r--drivers/net/ni5010.c1
-rw-r--r--drivers/net/ni52.c2
-rw-r--r--drivers/net/qla3xxx.c23
-rw-r--r--drivers/net/qla3xxx.h105
-rw-r--r--drivers/net/s2io.c29
-rw-r--r--drivers/net/sh_eth.c257
-rw-r--r--drivers/net/sh_eth.h444
-rw-r--r--drivers/net/skfp/smt.c13
-rw-r--r--drivers/net/sky2.c103
-rw-r--r--drivers/net/sky2.h2
-rw-r--r--drivers/net/smc911x.h2
-rw-r--r--drivers/net/smc91x.h6
-rw-r--r--drivers/net/stnic.c2
-rw-r--r--drivers/net/sun3_82586.c7
-rw-r--r--drivers/net/tg3.c87
-rw-r--r--drivers/net/tokenring/3c359.c8
-rw-r--r--drivers/net/usb/dm9601.c52
-rw-r--r--drivers/net/usb/pegasus.c21
-rw-r--r--drivers/net/via-velocity.c301
-rw-r--r--drivers/net/via-velocity.h50
-rw-r--r--drivers/net/wan/Kconfig15
-rw-r--r--drivers/net/wan/Makefile11
-rw-r--r--drivers/net/wan/cosa.c293
-rw-r--r--drivers/net/wan/dscc4.c1
-rw-r--r--drivers/net/wan/farsync.c5
-rw-r--r--drivers/net/wan/farsync.h6
-rw-r--r--drivers/net/wan/hdlc.c25
-rw-r--r--drivers/net/wan/hdlc_cisco.c29
-rw-r--r--drivers/net/wan/hdlc_fr.c19
-rw-r--r--drivers/net/wan/hdlc_ppp.c15
-rw-r--r--drivers/net/wan/hdlc_raw.c15
-rw-r--r--drivers/net/wan/hdlc_raw_eth.c17
-rw-r--r--drivers/net/wan/hdlc_x25.c17
-rw-r--r--drivers/net/wan/hostess_sv11.c382
-rw-r--r--drivers/net/wan/lmc/lmc.h11
-rw-r--r--drivers/net/wan/lmc/lmc_debug.c7
-rw-r--r--drivers/net/wan/lmc/lmc_debug.h6
-rw-r--r--drivers/net/wan/lmc/lmc_ioctl.h2
-rw-r--r--drivers/net/wan/lmc/lmc_main.c672
-rw-r--r--drivers/net/wan/lmc/lmc_media.c66
-rw-r--r--drivers/net/wan/lmc/lmc_proto.c146
-rw-r--r--drivers/net/wan/lmc/lmc_proto.h14
-rw-r--r--drivers/net/wan/lmc/lmc_var.h360
-rw-r--r--drivers/net/wan/pc300.h228
-rw-r--r--drivers/net/wan/pc300_drv.c146
-rw-r--r--drivers/net/wan/sealevel.c361
-rw-r--r--drivers/net/wan/syncppp.c9
-rw-r--r--drivers/net/wan/z85230.c193
-rw-r--r--drivers/net/wan/z85230.h10
-rw-r--r--drivers/net/wd.c2
-rw-r--r--drivers/net/wireless/Kconfig2
-rw-r--r--drivers/net/wireless/Makefile1
-rw-r--r--drivers/net/wireless/ath5k/ath5k.h8
-rw-r--r--drivers/net/wireless/ath5k/base.c3
-rw-r--r--drivers/net/wireless/ath5k/debug.c2
-rw-r--r--drivers/net/wireless/ath5k/debug.h1
-rw-r--r--drivers/net/wireless/ath5k/hw.c239
-rw-r--r--drivers/net/wireless/ath5k/initvals.c4
-rw-r--r--drivers/net/wireless/ath5k/phy.c185
-rw-r--r--drivers/net/wireless/ath5k/reg.h934
-rw-r--r--drivers/net/wireless/ath9k/Kconfig8
-rw-r--r--drivers/net/wireless/ath9k/Makefile11
-rw-r--r--drivers/net/wireless/ath9k/ath9k.h1021
-rw-r--r--drivers/net/wireless/ath9k/beacon.c979
-rw-r--r--drivers/net/wireless/ath9k/core.c1923
-rw-r--r--drivers/net/wireless/ath9k/core.h1072
-rw-r--r--drivers/net/wireless/ath9k/hw.c8571
-rw-r--r--drivers/net/wireless/ath9k/hw.h969
-rw-r--r--drivers/net/wireless/ath9k/initvals.h3146
-rw-r--r--drivers/net/wireless/ath9k/main.c1470
-rw-r--r--drivers/net/wireless/ath9k/phy.c436
-rw-r--r--drivers/net/wireless/ath9k/phy.h543
-rw-r--r--drivers/net/wireless/ath9k/rc.c2126
-rw-r--r--drivers/net/wireless/ath9k/rc.h316
-rw-r--r--drivers/net/wireless/ath9k/recv.c1318
-rw-r--r--drivers/net/wireless/ath9k/reg.h1385
-rw-r--r--drivers/net/wireless/ath9k/regd.c1026
-rw-r--r--drivers/net/wireless/ath9k/regd.h412
-rw-r--r--drivers/net/wireless/ath9k/regd_common.h1915
-rw-r--r--drivers/net/wireless/ath9k/xmit.c2871
-rw-r--r--drivers/net/wireless/ipw2200.c7
-rw-r--r--drivers/net/wireless/iwlwifi/Kconfig98
-rw-r--r--drivers/net/wireless/iwlwifi/Makefile13
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-3945-led.c33
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-3945-led.h1
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-3945.c17
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-4965.c158
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-5000.c71
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-rs.c (renamed from drivers/net/wireless/iwlwifi/iwl-4965-rs.c)327
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-rs.h (renamed from drivers/net/wireless/iwlwifi/iwl-4965-rs.h)23
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn.c (renamed from drivers/net/wireless/iwlwifi/iwl4965-base.c)236
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-commands.h44
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-core.c7
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-core.h3
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-csr.h10
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-debug.h7
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-debugfs.c11
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-dev.h22
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-eeprom.c3
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-hcmd.c2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-led.c69
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-led.h1
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-power.c45
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-power.h33
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-prph.h12
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-rx.c59
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-scan.c1
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-sta.c4
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-tx.c120
-rw-r--r--drivers/net/wireless/iwlwifi/iwl3945-base.c58
-rw-r--r--drivers/net/wireless/libertas/main.c15
-rw-r--r--drivers/net/wireless/orinoco.c7
-rw-r--r--drivers/net/wireless/p54/p54.h2
-rw-r--r--drivers/net/wireless/p54/p54common.c24
-rw-r--r--drivers/net/wireless/prism54/isl_ioctl.c2
-rw-r--r--drivers/net/wireless/rt2x00/rt2500pci.c1
-rw-r--r--drivers/net/wireless/rt2x00/rt2500usb.c20
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00.h6
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00config.c4
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00debug.c17
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00mac.c55
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00queue.c5
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00usb.c4
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00usb.h2
-rw-r--r--drivers/net/wireless/rt2x00/rt61pci.c5
-rw-r--r--drivers/net/wireless/rtl8187.h4
-rw-r--r--drivers/net/wireless/rtl8187_dev.c17
-rw-r--r--drivers/net/wireless/wavelan.c3
-rw-r--r--drivers/net/wireless/wavelan_cs.c6
-rw-r--r--drivers/net/xen-netfront.c2
221 files changed, 37227 insertions, 5202 deletions
diff --git a/drivers/net/3c523.c b/drivers/net/3c523.c
index dc6e474229b..e2ce41d3828 100644
--- a/drivers/net/3c523.c
+++ b/drivers/net/3c523.c
@@ -640,10 +640,8 @@ static int init586(struct net_device *dev)
cfg_cmd->time_low = 0x00;
cfg_cmd->time_high = 0xf2;
cfg_cmd->promisc = 0;
- if (dev->flags & (IFF_ALLMULTI | IFF_PROMISC)) {
+ if (dev->flags & (IFF_ALLMULTI | IFF_PROMISC))
cfg_cmd->promisc = 1;
- dev->flags |= IFF_PROMISC;
- }
cfg_cmd->carr_coll = 0x00;
p->scb->cbl_offset = make16(cfg_cmd);
diff --git a/drivers/net/3c527.c b/drivers/net/3c527.c
index 6aca0c640f1..abc84f76597 100644
--- a/drivers/net/3c527.c
+++ b/drivers/net/3c527.c
@@ -1521,14 +1521,11 @@ static void do_mc32_set_multicast_list(struct net_device *dev, int retry)
struct mc32_local *lp = netdev_priv(dev);
u16 filt = (1<<2); /* Save Bad Packets, for stats purposes */
- if (dev->flags&IFF_PROMISC)
+ if ((dev->flags&IFF_PROMISC) ||
+ (dev->flags&IFF_ALLMULTI) ||
+ dev->mc_count > 10)
/* Enable promiscuous mode */
filt |= 1;
- else if((dev->flags&IFF_ALLMULTI) || dev->mc_count > 10)
- {
- dev->flags|=IFF_PROMISC;
- filt |= 1;
- }
else if(dev->mc_count)
{
unsigned char block[62];
diff --git a/drivers/net/3c59x.c b/drivers/net/3c59x.c
index 8db4e6b8948..491ee16da5c 100644
--- a/drivers/net/3c59x.c
+++ b/drivers/net/3c59x.c
@@ -1692,12 +1692,14 @@ vortex_open(struct net_device *dev)
vp->rx_ring[i].next = cpu_to_le32(vp->rx_ring_dma + sizeof(struct boom_rx_desc) * (i+1));
vp->rx_ring[i].status = 0; /* Clear complete bit. */
vp->rx_ring[i].length = cpu_to_le32(PKT_BUF_SZ | LAST_FRAG);
- skb = dev_alloc_skb(PKT_BUF_SZ);
+
+ skb = __netdev_alloc_skb(dev, PKT_BUF_SZ + NET_IP_ALIGN,
+ GFP_KERNEL);
vp->rx_skbuff[i] = skb;
if (skb == NULL)
break; /* Bad news! */
- skb->dev = dev; /* Mark as being used by this device. */
- skb_reserve(skb, 2); /* Align IP on 16 byte boundaries */
+
+ skb_reserve(skb, NET_IP_ALIGN); /* Align IP on 16 byte boundaries */
vp->rx_ring[i].addr = cpu_to_le32(pci_map_single(VORTEX_PCI(vp), skb->data, PKT_BUF_SZ, PCI_DMA_FROMDEVICE));
}
if (i != RX_RING_SIZE) {
@@ -2538,7 +2540,7 @@ boomerang_rx(struct net_device *dev)
struct sk_buff *skb;
entry = vp->dirty_rx % RX_RING_SIZE;
if (vp->rx_skbuff[entry] == NULL) {
- skb = dev_alloc_skb(PKT_BUF_SZ);
+ skb = netdev_alloc_skb(dev, PKT_BUF_SZ + NET_IP_ALIGN);
if (skb == NULL) {
static unsigned long last_jif;
if (time_after(jiffies, last_jif + 10 * HZ)) {
@@ -2549,8 +2551,8 @@ boomerang_rx(struct net_device *dev)
mod_timer(&vp->rx_oom_timer, RUN_AT(HZ * 1));
break; /* Bad news! */
}
- skb->dev = dev; /* Mark as being used by this device. */
- skb_reserve(skb, 2); /* Align IP on 16 byte boundaries */
+
+ skb_reserve(skb, NET_IP_ALIGN);
vp->rx_ring[entry].addr = cpu_to_le32(pci_map_single(VORTEX_PCI(vp), skb->data, PKT_BUF_SZ, PCI_DMA_FROMDEVICE));
vp->rx_skbuff[entry] = skb;
}
diff --git a/drivers/net/8390.c b/drivers/net/8390.c
index dc5d2584bd0..f72a2e87d56 100644
--- a/drivers/net/8390.c
+++ b/drivers/net/8390.c
@@ -9,42 +9,39 @@ int ei_open(struct net_device *dev)
{
return __ei_open(dev);
}
+EXPORT_SYMBOL(ei_open);
int ei_close(struct net_device *dev)
{
return __ei_close(dev);
}
+EXPORT_SYMBOL(ei_close);
irqreturn_t ei_interrupt(int irq, void *dev_id)
{
return __ei_interrupt(irq, dev_id);
}
+EXPORT_SYMBOL(ei_interrupt);
#ifdef CONFIG_NET_POLL_CONTROLLER
void ei_poll(struct net_device *dev)
{
__ei_poll(dev);
}
+EXPORT_SYMBOL(ei_poll);
#endif
struct net_device *__alloc_ei_netdev(int size)
{
return ____alloc_ei_netdev(size);
}
+EXPORT_SYMBOL(__alloc_ei_netdev);
void NS8390_init(struct net_device *dev, int startp)
{
__NS8390_init(dev, startp);
}
-
-EXPORT_SYMBOL(ei_open);
-EXPORT_SYMBOL(ei_close);
-EXPORT_SYMBOL(ei_interrupt);
-#ifdef CONFIG_NET_POLL_CONTROLLER
-EXPORT_SYMBOL(ei_poll);
-#endif
EXPORT_SYMBOL(NS8390_init);
-EXPORT_SYMBOL(__alloc_ei_netdev);
#if defined(MODULE)
diff --git a/drivers/net/8390p.c b/drivers/net/8390p.c
index 71f19884c4b..4c6eea4611a 100644
--- a/drivers/net/8390p.c
+++ b/drivers/net/8390p.c
@@ -4,9 +4,9 @@ static const char version[] =
"8390p.c:v1.10cvs 9/23/94 Donald Becker (becker@cesdis.gsfc.nasa.gov)\n";
#define ei_inb(_p) inb(_p)
-#define ei_outb(_v,_p) outb(_v,_p)
+#define ei_outb(_v, _p) outb(_v, _p)
#define ei_inb_p(_p) inb_p(_p)
-#define ei_outb_p(_v,_p) outb_p(_v,_p)
+#define ei_outb_p(_v, _p) outb_p(_v, _p)
#include "lib8390.c"
@@ -14,42 +14,39 @@ int eip_open(struct net_device *dev)
{
return __ei_open(dev);
}
+EXPORT_SYMBOL(eip_open);
int eip_close(struct net_device *dev)
{
return __ei_close(dev);
}
+EXPORT_SYMBOL(eip_close);
irqreturn_t eip_interrupt(int irq, void *dev_id)
{
return __ei_interrupt(irq, dev_id);
}
+EXPORT_SYMBOL(eip_interrupt);
#ifdef CONFIG_NET_POLL_CONTROLLER
void eip_poll(struct net_device *dev)
{
__ei_poll(dev);
}
+EXPORT_SYMBOL(eip_poll);
#endif
struct net_device *__alloc_eip_netdev(int size)
{
return ____alloc_ei_netdev(size);
}
+EXPORT_SYMBOL(__alloc_eip_netdev);
void NS8390p_init(struct net_device *dev, int startp)
{
- return __NS8390_init(dev, startp);
+ __NS8390_init(dev, startp);
}
-
-EXPORT_SYMBOL(eip_open);
-EXPORT_SYMBOL(eip_close);
-EXPORT_SYMBOL(eip_interrupt);
-#ifdef CONFIG_NET_POLL_CONTROLLER
-EXPORT_SYMBOL(eip_poll);
-#endif
EXPORT_SYMBOL(NS8390p_init);
-EXPORT_SYMBOL(__alloc_eip_netdev);
#if defined(MODULE)
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index fa533c27052..4b4cb2bf4f1 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -510,14 +510,15 @@ config STNIC
config SH_ETH
tristate "Renesas SuperH Ethernet support"
depends on SUPERH && \
- (CPU_SUBTYPE_SH7710 || CPU_SUBTYPE_SH7712)
+ (CPU_SUBTYPE_SH7710 || CPU_SUBTYPE_SH7712 || CPU_SUBTYPE_SH7763 || \
+ CPU_SUBTYPE_SH7619)
select CRC32
select MII
select MDIO_BITBANG
select PHYLIB
help
Renesas SuperH Ethernet device driver.
- This driver support SH7710 and SH7712.
+ This driver support SH7710, SH7712, SH7763 and SH7619.
config SUNLANCE
tristate "Sun LANCE support"
diff --git a/drivers/net/arm/am79c961a.c b/drivers/net/arm/am79c961a.c
index a637910b02d..aa4a5246be5 100644
--- a/drivers/net/arm/am79c961a.c
+++ b/drivers/net/arm/am79c961a.c
@@ -28,7 +28,7 @@
#include <linux/bitops.h>
#include <linux/platform_device.h>
-#include <asm/hardware.h>
+#include <mach/hardware.h>
#include <asm/io.h>
#include <asm/system.h>
diff --git a/drivers/net/arm/at91_ether.c b/drivers/net/arm/at91_ether.c
index ffae266e2d7..0fa53464efb 100644
--- a/drivers/net/arm/at91_ether.c
+++ b/drivers/net/arm/at91_ether.c
@@ -32,9 +32,9 @@
#include <asm/uaccess.h>
#include <asm/mach-types.h>
-#include <asm/arch/at91rm9200_emac.h>
-#include <asm/arch/gpio.h>
-#include <asm/arch/board.h>
+#include <mach/at91rm9200_emac.h>
+#include <mach/gpio.h>
+#include <mach/board.h>
#include "at91_ether.h"
diff --git a/drivers/net/arm/ep93xx_eth.c b/drivers/net/arm/ep93xx_eth.c
index 18d3eeb7eab..1267444d79d 100644
--- a/drivers/net/arm/ep93xx_eth.c
+++ b/drivers/net/arm/ep93xx_eth.c
@@ -20,8 +20,8 @@
#include <linux/moduleparam.h>
#include <linux/platform_device.h>
#include <linux/delay.h>
-#include <asm/arch/ep93xx-regs.h>
-#include <asm/arch/platform.h>
+#include <mach/ep93xx-regs.h>
+#include <mach/platform.h>
#include <asm/io.h>
#define DRV_MODULE_NAME "ep93xx-eth"
diff --git a/drivers/net/arm/ixp4xx_eth.c b/drivers/net/arm/ixp4xx_eth.c
index 9b777d9433c..020771bfb60 100644
--- a/drivers/net/arm/ixp4xx_eth.c
+++ b/drivers/net/arm/ixp4xx_eth.c
@@ -32,8 +32,8 @@
#include <linux/kernel.h>
#include <linux/mii.h>
#include <linux/platform_device.h>
-#include <asm/arch/npe.h>
-#include <asm/arch/qmgr.h>
+#include <mach/npe.h>
+#include <mach/qmgr.h>
#define DEBUG_QUEUES 0
#define DEBUG_DESC 0
diff --git a/drivers/net/atl1e/atl1e_main.c b/drivers/net/atl1e/atl1e_main.c
index 35264c244cf..82d7be1655d 100644
--- a/drivers/net/atl1e/atl1e_main.c
+++ b/drivers/net/atl1e/atl1e_main.c
@@ -47,7 +47,7 @@ MODULE_DESCRIPTION("Atheros 1000M Ethernet Network Driver");
MODULE_LICENSE("GPL");
MODULE_VERSION(DRV_VERSION);
-static inline void atl1e_setup_mac_ctrl(struct atl1e_adapter *adapter);
+static void atl1e_setup_mac_ctrl(struct atl1e_adapter *adapter);
static const u16
atl1e_rx_page_vld_regs[AT_MAX_RECEIVE_QUEUE][AT_PAGE_NUM_PER_QUEUE] =
@@ -1037,7 +1037,7 @@ static inline void atl1e_configure_dma(struct atl1e_adapter *adapter)
return;
}
-static inline void atl1e_setup_mac_ctrl(struct atl1e_adapter *adapter)
+static void atl1e_setup_mac_ctrl(struct atl1e_adapter *adapter)
{
u32 value;
struct atl1e_hw *hw = &adapter->hw;
diff --git a/drivers/net/atlx/atl1.c b/drivers/net/atlx/atl1.c
index f12e3d12474..e6a7bb79d4d 100644
--- a/drivers/net/atlx/atl1.c
+++ b/drivers/net/atlx/atl1.c
@@ -1790,6 +1790,17 @@ static void atl1_rx_checksum(struct atl1_adapter *adapter,
{
struct pci_dev *pdev = adapter->pdev;
+ /*
+ * The L1 hardware contains a bug that erroneously sets the
+ * PACKET_FLAG_ERR and ERR_FLAG_L4_CHKSUM bits whenever a
+ * fragmented IP packet is received, even though the packet
+ * is perfectly valid and its checksum is correct. There's
+ * no way to distinguish between one of these good packets
+ * and a packet that actually contains a TCP/UDP checksum
+ * error, so all we can do is allow it to be handed up to
+ * the higher layers and let it be sorted out there.
+ */
+
skb->ip_summed = CHECKSUM_NONE;
if (unlikely(rrd->pkt_flg & PACKET_FLAG_ERR)) {
@@ -1816,14 +1827,6 @@ static void atl1_rx_checksum(struct atl1_adapter *adapter,
return;
}
- /* IPv4, but hardware thinks its checksum is wrong */
- if (netif_msg_rx_err(adapter))
- dev_printk(KERN_DEBUG, &pdev->dev,
- "hw csum wrong, pkt_flag:%x, err_flag:%x\n",
- rrd->pkt_flg, rrd->err_flg);
- skb->ip_summed = CHECKSUM_COMPLETE;
- skb->csum = htons(rrd->xsz.xsum_sz.rx_chksum);
- adapter->hw_csum_err++;
return;
}
diff --git a/drivers/net/atp.c b/drivers/net/atp.c
index 3d4433358a3..c10cd8058e2 100644
--- a/drivers/net/atp.c
+++ b/drivers/net/atp.c
@@ -854,14 +854,9 @@ static void set_rx_mode_8002(struct net_device *dev)
struct net_local *lp = netdev_priv(dev);
long ioaddr = dev->base_addr;
- if ( dev->mc_count > 0 || (dev->flags & (IFF_ALLMULTI|IFF_PROMISC))) {
- /* We must make the kernel realise we had to move
- * into promisc mode or we start all out war on
- * the cable. - AC
- */
- dev->flags|=IFF_PROMISC;
+ if (dev->mc_count > 0 || (dev->flags & (IFF_ALLMULTI|IFF_PROMISC)))
lp->addr_mode = CMR2h_PROMISC;
- } else
+ else
lp->addr_mode = CMR2h_Normal;
write_reg_high(ioaddr, CMR2, lp->addr_mode);
}
diff --git a/drivers/net/bfin_mac.c b/drivers/net/bfin_mac.c
index a8ec60e1ed7..3db7db1828e 100644
--- a/drivers/net/bfin_mac.c
+++ b/drivers/net/bfin_mac.c
@@ -605,36 +605,87 @@ adjust_head:
static int bfin_mac_hard_start_xmit(struct sk_buff *skb,
struct net_device *dev)
{
- unsigned int data;
+ u16 *data;
current_tx_ptr->skb = skb;
- /*
- * Is skb->data always 16-bit aligned?
- * Do we need to memcpy((char *)(tail->packet + 2), skb->data, len)?
- */
- if ((((unsigned int)(skb->data)) & 0x02) == 2) {
- /* move skb->data to current_tx_ptr payload */
- data = (unsigned int)(skb->data) - 2;
- *((unsigned short *)data) = (unsigned short)(skb->len);
- current_tx_ptr->desc_a.start_addr = (unsigned long)data;
- /* this is important! */
- blackfin_dcache_flush_range(data, (data + (skb->len)) + 2);
-
+ if (ANOMALY_05000285) {
+ /*
+ * TXDWA feature is not avaible to older revision < 0.3 silicon
+ * of BF537
+ *
+ * Only if data buffer is ODD WORD alignment, we do not
+ * need to memcpy
+ */
+ u32 data_align = (u32)(skb->data) & 0x3;
+ if (data_align == 0x2) {
+ /* move skb->data to current_tx_ptr payload */
+ data = (u16 *)(skb->data) - 1;
+ *data = (u16)(skb->len);
+ current_tx_ptr->desc_a.start_addr = (u32)data;
+ /* this is important! */
+ blackfin_dcache_flush_range((u32)data,
+ (u32)((u8 *)data + skb->len + 4));
+ } else {
+ *((u16 *)(current_tx_ptr->packet)) = (u16)(skb->len);
+ memcpy((u8 *)(current_tx_ptr->packet + 2), skb->data,
+ skb->len);
+ current_tx_ptr->desc_a.start_addr =
+ (u32)current_tx_ptr->packet;
+ if (current_tx_ptr->status.status_word != 0)
+ current_tx_ptr->status.status_word = 0;
+ blackfin_dcache_flush_range(
+ (u32)current_tx_ptr->packet,
+ (u32)(current_tx_ptr->packet + skb->len + 2));
+ }
} else {
- *((unsigned short *)(current_tx_ptr->packet)) =
- (unsigned short)(skb->len);
- memcpy((char *)(current_tx_ptr->packet + 2), skb->data,
- (skb->len));
- current_tx_ptr->desc_a.start_addr =
- (unsigned long)current_tx_ptr->packet;
- if (current_tx_ptr->status.status_word != 0)
- current_tx_ptr->status.status_word = 0;
- blackfin_dcache_flush_range((unsigned int)current_tx_ptr->
- packet,
- (unsigned int)(current_tx_ptr->
- packet + skb->len) +
- 2);
+ /*
+ * TXDWA feature is avaible to revision < 0.3 silicon of
+ * BF537 and always avaible to BF52x
+ */
+ u32 data_align = (u32)(skb->data) & 0x3;
+ if (data_align == 0x0) {
+ u16 sysctl = bfin_read_EMAC_SYSCTL();
+ sysctl |= TXDWA;
+ bfin_write_EMAC_SYSCTL(sysctl);
+
+ /* move skb->data to current_tx_ptr payload */
+ data = (u16 *)(skb->data) - 2;
+ *data = (u16)(skb->len);
+ current_tx_ptr->desc_a.start_addr = (u32)data;
+ /* this is important! */
+ blackfin_dcache_flush_range(
+ (u32)data,
+ (u32)((u8 *)data + skb->len + 4));
+ } else if (data_align == 0x2) {
+ u16 sysctl = bfin_read_EMAC_SYSCTL();
+ sysctl &= ~TXDWA;
+ bfin_write_EMAC_SYSCTL(sysctl);
+
+ /* move skb->data to current_tx_ptr payload */
+ data = (u16 *)(skb->data) - 1;
+ *data = (u16)(skb->len);
+ current_tx_ptr->desc_a.start_addr = (u32)data;
+ /* this is important! */
+ blackfin_dcache_flush_range(
+ (u32)data,
+ (u32)((u8 *)data + skb->len + 4));
+ } else {
+ u16 sysctl = bfin_read_EMAC_SYSCTL();
+ sysctl &= ~TXDWA;
+ bfin_write_EMAC_SYSCTL(sysctl);
+
+ *((u16 *)(current_tx_ptr->packet)) = (u16)(skb->len);
+ memcpy((u8 *)(current_tx_ptr->packet + 2), skb->data,
+ skb->len);
+ current_tx_ptr->desc_a.start_addr =
+ (u32)current_tx_ptr->packet;
+ if (current_tx_ptr->status.status_word != 0)
+ current_tx_ptr->status.status_word = 0;
+ blackfin_dcache_flush_range(
+ (u32)current_tx_ptr->packet,
+ (u32)(current_tx_ptr->packet + skb->len + 2));
+ }
}
/* enable this packet's dma */
@@ -691,7 +742,6 @@ static void bfin_mac_rx(struct net_device *dev)
(unsigned long)skb->tail);
dev->last_rx = jiffies;
- skb->dev = dev;
skb->protocol = eth_type_trans(skb, dev);
#if defined(BFIN_MAC_CSUM_OFFLOAD)
skb->csum = current_rx_ptr->status.ip_payload_csum;
@@ -920,6 +970,7 @@ static int bfin_mac_open(struct net_device *dev)
phy_start(lp->phydev);
phy_write(lp->phydev, MII_BMCR, BMCR_RESET);
setup_system_regs(dev);
+ setup_mac_addr(dev->dev_addr);
bfin_mac_disable();
bfin_mac_enable();
pr_debug("hardware init finished\n");
@@ -955,7 +1006,7 @@ static int bfin_mac_close(struct net_device *dev)
return 0;
}
-static int __init bfin_mac_probe(struct platform_device *pdev)
+static int __devinit bfin_mac_probe(struct platform_device *pdev)
{
struct net_device *ndev;
struct bfin_mac_local *lp;
@@ -1081,7 +1132,7 @@ out_err_probe_mac:
return rc;
}
-static int bfin_mac_remove(struct platform_device *pdev)
+static int __devexit bfin_mac_remove(struct platform_device *pdev)
{
struct net_device *ndev = platform_get_drvdata(pdev);
struct bfin_mac_local *lp = netdev_priv(ndev);
@@ -1128,7 +1179,7 @@ static int bfin_mac_resume(struct platform_device *pdev)
static struct platform_driver bfin_mac_driver = {
.probe = bfin_mac_probe,
- .remove = bfin_mac_remove,
+ .remove = __devexit_p(bfin_mac_remove),
.resume = bfin_mac_resume,
.suspend = bfin_mac_suspend,
.driver = {
diff --git a/drivers/net/bnx2x_main.c b/drivers/net/bnx2x_main.c
index af251a5df84..272a4bd2595 100644
--- a/drivers/net/bnx2x_main.c
+++ b/drivers/net/bnx2x_main.c
@@ -7202,7 +7202,7 @@ static void __devinit bnx2x_link_settings_requested(struct bnx2x *bp)
bp->link_params.req_flow_ctrl = (bp->port.link_config &
PORT_FEATURE_FLOW_CONTROL_MASK);
if ((bp->link_params.req_flow_ctrl == FLOW_CTRL_AUTO) &&
- (!bp->port.supported & SUPPORTED_Autoneg))
+ !(bp->port.supported & SUPPORTED_Autoneg))
bp->link_params.req_flow_ctrl = FLOW_CTRL_NONE;
BNX2X_DEV_INFO("req_line_speed %d req_duplex %d req_flow_ctrl 0x%x"
diff --git a/drivers/net/bonding/bond_3ad.c b/drivers/net/bonding/bond_3ad.c
index ebb539e090c..6106660a4a4 100644
--- a/drivers/net/bonding/bond_3ad.c
+++ b/drivers/net/bonding/bond_3ad.c
@@ -2107,6 +2107,7 @@ void bond_3ad_state_machine_handler(struct work_struct *work)
aggregator = __get_first_agg(port);
ad_agg_selection_logic(aggregator);
}
+ bond_3ad_set_carrier(bond);
}
// for each port run the state machines
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index a641eeaa2a2..c792138511e 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -2223,272 +2223,217 @@ static int bond_slave_info_query(struct net_device *bond_dev, struct ifslave *in
/*-------------------------------- Monitoring -------------------------------*/
-/*
- * if !have_locks, return nonzero if a failover is necessary. if
- * have_locks, do whatever failover activities are needed.
- *
- * This is to separate the inspection and failover steps for locking
- * purposes; failover requires rtnl, but acquiring it for every
- * inspection is undesirable, so a wrapper first does inspection, and
- * the acquires the necessary locks and calls again to perform
- * failover if needed. Since all locks are dropped, a complete
- * restart is needed between calls.
- */
-static int __bond_mii_monitor(struct bonding *bond, int have_locks)
-{
- struct slave *slave, *oldcurrent;
- int do_failover = 0;
- int i;
-
- if (bond->slave_cnt == 0)
- goto out;
- /* we will try to read the link status of each of our slaves, and
- * set their IFF_RUNNING flag appropriately. For each slave not
- * supporting MII status, we won't do anything so that a user-space
- * program could monitor the link itself if needed.
- */
-
- read_lock(&bond->curr_slave_lock);
- oldcurrent = bond->curr_active_slave;
- read_unlock(&bond->curr_slave_lock);
+static int bond_miimon_inspect(struct bonding *bond)
+{
+ struct slave *slave;
+ int i, link_state, commit = 0;
bond_for_each_slave(bond, slave, i) {
- struct net_device *slave_dev = slave->dev;
- int link_state;
- u16 old_speed = slave->speed;
- u8 old_duplex = slave->duplex;
+ slave->new_link = BOND_LINK_NOCHANGE;
- link_state = bond_check_dev_link(bond, slave_dev, 0);
+ link_state = bond_check_dev_link(bond, slave->dev, 0);
switch (slave->link) {
- case BOND_LINK_UP: /* the link was up */
- if (link_state == BMSR_LSTATUS) {
- if (!oldcurrent) {
- if (!have_locks)
- return 1;
- do_failover = 1;
- }
- break;
- } else { /* link going down */
- slave->link = BOND_LINK_FAIL;
- slave->delay = bond->params.downdelay;
-
- if (slave->link_failure_count < UINT_MAX) {
- slave->link_failure_count++;
- }
+ case BOND_LINK_UP:
+ if (link_state)
+ continue;
- if (bond->params.downdelay) {
- printk(KERN_INFO DRV_NAME
- ": %s: link status down for %s "
- "interface %s, disabling it in "
- "%d ms.\n",
- bond->dev->name,
- IS_UP(slave_dev)
- ? ((bond->params.mode == BOND_MODE_ACTIVEBACKUP)
- ? ((slave == oldcurrent)
- ? "active " : "backup ")
- : "")
- : "idle ",
- slave_dev->name,
- bond->params.downdelay * bond->params.miimon);
- }
+ slave->link = BOND_LINK_FAIL;
+ slave->delay = bond->params.downdelay;
+ if (slave->delay) {
+ printk(KERN_INFO DRV_NAME
+ ": %s: link status down for %s"
+ "interface %s, disabling it in %d ms.\n",
+ bond->dev->name,
+ (bond->params.mode ==
+ BOND_MODE_ACTIVEBACKUP) ?
+ ((slave->state == BOND_STATE_ACTIVE) ?
+ "active " : "backup ") : "",
+ slave->dev->name,
+ bond->params.downdelay * bond->params.miimon);
}
- /* no break ! fall through the BOND_LINK_FAIL test to
- ensure proper action to be taken
- */
- case BOND_LINK_FAIL: /* the link has just gone down */
- if (link_state != BMSR_LSTATUS) {
- /* link stays down */
- if (slave->delay <= 0) {
- if (!have_locks)
- return 1;
-
- /* link down for too long time */
- slave->link = BOND_LINK_DOWN;
-
- /* in active/backup mode, we must
- * completely disable this interface
- */
- if ((bond->params.mode == BOND_MODE_ACTIVEBACKUP) ||
- (bond->params.mode == BOND_MODE_8023AD)) {
- bond_set_slave_inactive_flags(slave);
- }
-
- printk(KERN_INFO DRV_NAME
- ": %s: link status definitely "
- "down for interface %s, "
- "disabling it\n",
- bond->dev->name,
- slave_dev->name);
-
- /* notify ad that the link status has changed */
- if (bond->params.mode == BOND_MODE_8023AD) {
- bond_3ad_handle_link_change(slave, BOND_LINK_DOWN);
- }
-
- if ((bond->params.mode == BOND_MODE_TLB) ||
- (bond->params.mode == BOND_MODE_ALB)) {
- bond_alb_handle_link_change(bond, slave, BOND_LINK_DOWN);
- }
-
- if (slave == oldcurrent) {
- do_failover = 1;
- }
- } else {
- slave->delay--;
- }
- } else {
- /* link up again */
- slave->link = BOND_LINK_UP;
+ /*FALLTHRU*/
+ case BOND_LINK_FAIL:
+ if (link_state) {
+ /*
+ * recovered before downdelay expired
+ */
+ slave->link = BOND_LINK_UP;
slave->jiffies = jiffies;
printk(KERN_INFO DRV_NAME
": %s: link status up again after %d "
"ms for interface %s.\n",
bond->dev->name,
- (bond->params.downdelay - slave->delay) * bond->params.miimon,
- slave_dev->name);
+ (bond->params.downdelay - slave->delay) *
+ bond->params.miimon,
+ slave->dev->name);
+ continue;
}
- break;
- case BOND_LINK_DOWN: /* the link was down */
- if (link_state != BMSR_LSTATUS) {
- /* the link stays down, nothing more to do */
- break;
- } else { /* link going up */
- slave->link = BOND_LINK_BACK;
- slave->delay = bond->params.updelay;
- if (bond->params.updelay) {
- /* if updelay == 0, no need to
- advertise about a 0 ms delay */
- printk(KERN_INFO DRV_NAME
- ": %s: link status up for "
- "interface %s, enabling it "
- "in %d ms.\n",
- bond->dev->name,
- slave_dev->name,
- bond->params.updelay * bond->params.miimon);
- }
+ if (slave->delay <= 0) {
+ slave->new_link = BOND_LINK_DOWN;
+ commit++;
+ continue;
}
- /* no break ! fall through the BOND_LINK_BACK state in
- case there's something to do.
- */
- case BOND_LINK_BACK: /* the link has just come back */
- if (link_state != BMSR_LSTATUS) {
- /* link down again */
- slave->link = BOND_LINK_DOWN;
+ slave->delay--;
+ break;
+
+ case BOND_LINK_DOWN:
+ if (!link_state)
+ continue;
+
+ slave->link = BOND_LINK_BACK;
+ slave->delay = bond->params.updelay;
+
+ if (slave->delay) {
+ printk(KERN_INFO DRV_NAME
+ ": %s: link status up for "
+ "interface %s, enabling it in %d ms.\n",
+ bond->dev->name, slave->dev->name,
+ bond->params.updelay *
+ bond->params.miimon);
+ }
+ /*FALLTHRU*/
+ case BOND_LINK_BACK:
+ if (!link_state) {
+ slave->link = BOND_LINK_DOWN;
printk(KERN_INFO DRV_NAME
": %s: link status down again after %d "
"ms for interface %s.\n",
bond->dev->name,
- (bond->params.updelay - slave->delay) * bond->params.miimon,
- slave_dev->name);
- } else {
- /* link stays up */
- if (slave->delay == 0) {
- if (!have_locks)
- return 1;
-
- /* now the link has been up for long time enough */
- slave->link = BOND_LINK_UP;
- slave->jiffies = jiffies;
-
- if (bond->params.mode == BOND_MODE_8023AD) {
- /* prevent it from being the active one */
- slave->state = BOND_STATE_BACKUP;
- } else if (bond->params.mode != BOND_MODE_ACTIVEBACKUP) {
- /* make it immediately active */
- slave->state = BOND_STATE_ACTIVE;
- } else if (slave != bond->primary_slave) {
- /* prevent it from being the active one */
- slave->state = BOND_STATE_BACKUP;
- }
+ (bond->params.updelay - slave->delay) *
+ bond->params.miimon,
+ slave->dev->name);
- printk(KERN_INFO DRV_NAME
- ": %s: link status definitely "
- "up for interface %s.\n",
- bond->dev->name,
- slave_dev->name);
-
- /* notify ad that the link status has changed */
- if (bond->params.mode == BOND_MODE_8023AD) {
- bond_3ad_handle_link_change(slave, BOND_LINK_UP);
- }
-
- if ((bond->params.mode == BOND_MODE_TLB) ||
- (bond->params.mode == BOND_MODE_ALB)) {
- bond_alb_handle_link_change(bond, slave, BOND_LINK_UP);
- }
-
- if ((!oldcurrent) ||
- (slave == bond->primary_slave)) {
- do_failover = 1;
- }
- } else {
- slave->delay--;
- }
+ continue;
}
+
+ if (slave->delay <= 0) {
+ slave->new_link = BOND_LINK_UP;
+ commit++;
+ continue;
+ }
+
+ slave->delay--;
break;
- default:
- /* Should not happen */
- printk(KERN_ERR DRV_NAME
- ": %s: Error: %s Illegal value (link=%d)\n",
- bond->dev->name,
- slave->dev->name,
- slave->link);
- goto out;
- } /* end of switch (slave->link) */
+ }
+ }
- bond_update_speed_duplex(slave);
+ return commit;
+}
- if (bond->params.mode == BOND_MODE_8023AD) {
- if (old_speed != slave->speed) {
- bond_3ad_adapter_speed_changed(slave);
- }
+static void bond_miimon_commit(struct bonding *bond)
+{
+ struct slave *slave;
+ int i;
+
+ bond_for_each_slave(bond, slave, i) {
+ switch (slave->new_link) {
+ case BOND_LINK_NOCHANGE:
+ continue;
+
+ case BOND_LINK_UP:
+ slave->link = BOND_LINK_UP;
+ slave->jiffies = jiffies;
- if (old_duplex != slave->duplex) {
- bond_3ad_adapter_duplex_changed(slave);
+ if (bond->params.mode == BOND_MODE_8023AD) {
+ /* prevent it from being the active one */
+ slave->state = BOND_STATE_BACKUP;
+ } else if (bond->params.mode != BOND_MODE_ACTIVEBACKUP) {
+ /* make it immediately active */
+ slave->state = BOND_STATE_ACTIVE;
+ } else if (slave != bond->primary_slave) {
+ /* prevent it from being the active one */
+ slave->state = BOND_STATE_BACKUP;
}
- }
- } /* end of for */
+ printk(KERN_INFO DRV_NAME
+ ": %s: link status definitely "
+ "up for interface %s.\n",
+ bond->dev->name, slave->dev->name);
- if (do_failover) {
- ASSERT_RTNL();
+ /* notify ad that the link status has changed */
+ if (bond->params.mode == BOND_MODE_8023AD)
+ bond_3ad_handle_link_change(slave, BOND_LINK_UP);
- write_lock_bh(&bond->curr_slave_lock);
+ if ((bond->params.mode == BOND_MODE_TLB) ||
+ (bond->params.mode == BOND_MODE_ALB))
+ bond_alb_handle_link_change(bond, slave,
+ BOND_LINK_UP);
- bond_select_active_slave(bond);
+ if (!bond->curr_active_slave ||
+ (slave == bond->primary_slave))
+ goto do_failover;
- write_unlock_bh(&bond->curr_slave_lock);
+ continue;
- } else
- bond_set_carrier(bond);
+ case BOND_LINK_DOWN:
+ slave->link = BOND_LINK_DOWN;
-out:
- return 0;
+ if (bond->params.mode == BOND_MODE_ACTIVEBACKUP ||
+ bond->params.mode == BOND_MODE_8023AD)
+ bond_set_slave_inactive_flags(slave);
+
+ printk(KERN_INFO DRV_NAME
+ ": %s: link status definitely down for "
+ "interface %s, disabling it\n",
+ bond->dev->name, slave->dev->name);
+
+ if (bond->params.mode == BOND_MODE_8023AD)
+ bond_3ad_handle_link_change(slave,
+ BOND_LINK_DOWN);
+
+ if (bond->params.mode == BOND_MODE_TLB ||
+ bond->params.mode == BOND_MODE_ALB)
+ bond_alb_handle_link_change(bond, slave,
+ BOND_LINK_DOWN);
+
+ if (slave == bond->curr_active_slave)
+ goto do_failover;
+
+ continue;
+
+ default:
+ printk(KERN_ERR DRV_NAME
+ ": %s: invalid new link %d on slave %s\n",
+ bond->dev->name, slave->new_link,
+ slave->dev->name);
+ slave->new_link = BOND_LINK_NOCHANGE;
+
+ continue;
+ }
+
+do_failover:
+ ASSERT_RTNL();
+ write_lock_bh(&bond->curr_slave_lock);
+ bond_select_active_slave(bond);
+ write_unlock_bh(&bond->curr_slave_lock);
+ }
+
+ bond_set_carrier(bond);
}
/*
* bond_mii_monitor
*
* Really a wrapper that splits the mii monitor into two phases: an
- * inspection, then (if inspection indicates something needs to be
- * done) an acquisition of appropriate locks followed by another pass
- * to implement whatever link state changes are indicated.
+ * inspection, then (if inspection indicates something needs to be done)
+ * an acquisition of appropriate locks followed by a commit phase to
+ * implement whatever link state changes are indicated.
*/
void bond_mii_monitor(struct work_struct *work)
{
struct bonding *bond = container_of(work, struct bonding,
mii_work.work);
- unsigned long delay;
read_lock(&bond->lock);
- if (bond->kill_timers) {
- read_unlock(&bond->lock);
- return;
- }
+ if (bond->kill_timers)
+ goto out;
+
+ if (bond->slave_cnt == 0)
+ goto re_arm;
if (bond->send_grat_arp) {
read_lock(&bond->curr_slave_lock);
@@ -2496,19 +2441,24 @@ void bond_mii_monitor(struct work_struct *work)
read_unlock(&bond->curr_slave_lock);
}
- if (__bond_mii_monitor(bond, 0)) {
+ if (bond_miimon_inspect(bond)) {
read_unlock(&bond->lock);
rtnl_lock();
read_lock(&bond->lock);
- __bond_mii_monitor(bond, 1);
+
+ bond_miimon_commit(bond);
+
read_unlock(&bond->lock);
rtnl_unlock(); /* might sleep, hold no other locks */
read_lock(&bond->lock);
}
- delay = msecs_to_jiffies(bond->params.miimon);
+re_arm:
+ if (bond->params.miimon)
+ queue_delayed_work(bond->wq, &bond->mii_work,
+ msecs_to_jiffies(bond->params.miimon));
+out:
read_unlock(&bond->lock);
- queue_delayed_work(bond->wq, &bond->mii_work, delay);
}
static __be32 bond_glean_dev_ip(struct net_device *dev)
diff --git a/drivers/net/bonding/bond_sysfs.c b/drivers/net/bonding/bond_sysfs.c
index 6caac0ffb2f..3bdb4738252 100644
--- a/drivers/net/bonding/bond_sysfs.c
+++ b/drivers/net/bonding/bond_sysfs.c
@@ -350,9 +350,6 @@ static ssize_t bonding_store_slaves(struct device *d,
if (dev) {
printk(KERN_INFO DRV_NAME ": %s: Removing slave %s\n",
bond->dev->name, dev->name);
- if (bond->setup_by_slave)
- res = bond_release_and_destroy(bond->dev, dev);
- else
res = bond_release(bond->dev, dev);
if (res) {
ret = res;
diff --git a/drivers/net/cs89x0.c b/drivers/net/cs89x0.c
index fba87abe78e..ea6144a9565 100644
--- a/drivers/net/cs89x0.c
+++ b/drivers/net/cs89x0.c
@@ -189,7 +189,7 @@ static unsigned int netcard_portlist[] __used __initdata = {IXDP2X01_CS8900_VIRT
static unsigned int cs8900_irq_map[] = {IRQ_IXDP2X01_CS8900, 0, 0, 0};
#elif defined(CONFIG_ARCH_PNX010X)
#include <asm/irq.h>
-#include <asm/arch/gpio.h>
+#include <mach/gpio.h>
#define CIRRUS_DEFAULT_BASE IO_ADDRESS(EXT_STATIC2_s0_BASE + 0x200000) /* = Physical address 0x48200000 */
#define CIRRUS_DEFAULT_IRQ VH_INTC_INT_NUM_CASCADED_INTERRUPT_1 /* Event inputs bank 1 - ID 35/bit 3 */
static unsigned int netcard_portlist[] __used __initdata = {CIRRUS_DEFAULT_BASE, 0};
diff --git a/drivers/net/cxgb3/t3_hw.c b/drivers/net/cxgb3/t3_hw.c
index 47d51788a46..04c0e90119a 100644
--- a/drivers/net/cxgb3/t3_hw.c
+++ b/drivers/net/cxgb3/t3_hw.c
@@ -683,7 +683,7 @@ enum {
SF_ERASE_SECTOR = 0xd8, /* erase sector */
FW_FLASH_BOOT_ADDR = 0x70000, /* start address of FW in flash */
- FW_VERS_ADDR = 0x77ffc, /* flash address holding FW version */
+ FW_VERS_ADDR = 0x7fffc, /* flash address holding FW version */
FW_MIN_SIZE = 8 /* at least version and csum */
};
diff --git a/drivers/net/de620.c b/drivers/net/de620.c
index 3f5190c654c..d454e143483 100644
--- a/drivers/net/de620.c
+++ b/drivers/net/de620.c
@@ -488,13 +488,6 @@ static void de620_set_multicast_list(struct net_device *dev)
{
if (dev->mc_count || dev->flags&(IFF_ALLMULTI|IFF_PROMISC))
{ /* Enable promiscuous mode */
- /*
- * We must make the kernel realise we had to move
- * into promisc mode or we start all out war on
- * the cable. - AC
- */
- dev->flags|=IFF_PROMISC;
-
de620_set_register(dev, W_TCR, (TCR_DEF & ~RXPBM) | RXALL);
}
else
diff --git a/drivers/net/dm9000.c b/drivers/net/dm9000.c
index 0b0f1c407a7..f42c23f4265 100644
--- a/drivers/net/dm9000.c
+++ b/drivers/net/dm9000.c
@@ -1374,6 +1374,11 @@ dm9000_probe(struct platform_device *pdev)
for (i = 0; i < 6; i += 2)
dm9000_read_eeprom(db, i / 2, ndev->dev_addr+i);
+ if (!is_valid_ether_addr(ndev->dev_addr) && pdata != NULL) {
+ mac_src = "platform data";
+ memcpy(ndev->dev_addr, pdata->dev_addr, 6);
+ }
+
if (!is_valid_ether_addr(ndev->dev_addr)) {
/* try reading from mac */
diff --git a/drivers/net/e1000e/e1000.h b/drivers/net/e1000e/e1000.h
index 4a4f62e002b..cf57050d99d 100644
--- a/drivers/net/e1000e/e1000.h
+++ b/drivers/net/e1000e/e1000.h
@@ -41,24 +41,25 @@
struct e1000_info;
-#define ndev_printk(level, netdev, format, arg...) \
- printk(level "%s: " format, (netdev)->name, ## arg)
+#define e_printk(level, adapter, format, arg...) \
+ printk(level "%s: %s: " format, pci_name(adapter->pdev), \
+ adapter->netdev->name, ## arg)
#ifdef DEBUG
-#define ndev_dbg(netdev, format, arg...) \
- ndev_printk(KERN_DEBUG , netdev, format, ## arg)
+#define e_dbg(format, arg...) \
+ e_printk(KERN_DEBUG , adapter, format, ## arg)
#else
-#define ndev_dbg(netdev, format, arg...) do { (void)(netdev); } while (0)
+#define e_dbg(format, arg...) do { (void)(adapter); } while (0)
#endif
-#define ndev_err(netdev, format, arg...) \
- ndev_printk(KERN_ERR , netdev, format, ## arg)
-#define ndev_info(netdev, format, arg...) \
- ndev_printk(KERN_INFO , netdev, format, ## arg)
-#define ndev_warn(netdev, format, arg...) \
- ndev_printk(KERN_WARNING , netdev, format, ## arg)
-#define ndev_notice(netdev, format, arg...) \
- ndev_printk(KERN_NOTICE , netdev, format, ## arg)
+#define e_err(format, arg...) \
+ e_printk(KERN_ERR, adapter, format, ## arg)
+#define e_info(format, arg...) \
+ e_printk(KERN_INFO, adapter, format, ## arg)
+#define e_warn(format, arg...) \
+ e_printk(KERN_WARNING, adapter, format, ## arg)
+#define e_notice(format, arg...) \
+ e_printk(KERN_NOTICE, adapter, format, ## arg)
/* Tx/Rx descriptor defines */
@@ -283,10 +284,6 @@ struct e1000_adapter {
unsigned long led_status;
unsigned int flags;
-
- /* for ioport free */
- int bars;
- int need_ioport;
};
struct e1000_info {
diff --git a/drivers/net/e1000e/ethtool.c b/drivers/net/e1000e/ethtool.c
index 9350564065e..cf9679f2b7c 100644
--- a/drivers/net/e1000e/ethtool.c
+++ b/drivers/net/e1000e/ethtool.c
@@ -189,8 +189,7 @@ static int e1000_set_spd_dplx(struct e1000_adapter *adapter, u16 spddplx)
/* Fiber NICs only allow 1000 gbps Full duplex */
if ((adapter->hw.phy.media_type == e1000_media_type_fiber) &&
spddplx != (SPEED_1000 + DUPLEX_FULL)) {
- ndev_err(adapter->netdev, "Unsupported Speed/Duplex "
- "configuration\n");
+ e_err("Unsupported Speed/Duplex configuration\n");
return -EINVAL;
}
@@ -213,8 +212,7 @@ static int e1000_set_spd_dplx(struct e1000_adapter *adapter, u16 spddplx)
break;
case SPEED_1000 + DUPLEX_HALF: /* not supported */
default:
- ndev_err(adapter->netdev, "Unsupported Speed/Duplex "
- "configuration\n");
+ e_err("Unsupported Speed/Duplex configuration\n");
return -EINVAL;
}
return 0;
@@ -231,8 +229,8 @@ static int e1000_set_settings(struct net_device *netdev,
* cannot be changed
*/
if (e1000_check_reset_block(hw)) {
- ndev_err(netdev, "Cannot change link "
- "characteristics when SoL/IDER is active.\n");
+ e_err("Cannot change link characteristics when SoL/IDER is "
+ "active.\n");
return -EINVAL;
}
@@ -380,8 +378,7 @@ static int e1000_set_tso(struct net_device *netdev, u32 data)
netdev->features &= ~NETIF_F_TSO6;
}
- ndev_info(netdev, "TSO is %s\n",
- data ? "Enabled" : "Disabled");
+ e_info("TSO is %s\n", data ? "Enabled" : "Disabled");
adapter->flags |= FLAG_TSO_FORCE;
return 0;
}
@@ -722,10 +719,9 @@ static bool reg_pattern_test(struct e1000_adapter *adapter, u64 *data,
(test[pat] & write));
val = E1000_READ_REG_ARRAY(&adapter->hw, reg, offset);
if (val != (test[pat] & write & mask)) {
- ndev_err(adapter->netdev, "pattern test reg %04X "
- "failed: got 0x%08X expected 0x%08X\n",
- reg + offset,
- val, (test[pat] & write & mask));
+ e_err("pattern test reg %04X failed: got 0x%08X "
+ "expected 0x%08X\n", reg + offset, val,
+ (test[pat] & write & mask));
*data = reg;
return 1;
}
@@ -740,9 +736,8 @@ static bool reg_set_and_check(struct e1000_adapter *adapter, u64 *data,
__ew32(&adapter->hw, reg, write & mask);
val = __er32(&adapter->hw, reg);
if ((write & mask) != (val & mask)) {
- ndev_err(adapter->netdev, "set/check reg %04X test failed: "
- "got 0x%08X expected 0x%08X\n", reg, (val & mask),
- (write & mask));
+ e_err("set/check reg %04X test failed: got 0x%08X "
+ "expected 0x%08X\n", reg, (val & mask), (write & mask));
*data = reg;
return 1;
}
@@ -766,7 +761,6 @@ static int e1000_reg_test(struct e1000_adapter *adapter, u64 *data)
{
struct e1000_hw *hw = &adapter->hw;
struct e1000_mac_info *mac = &adapter->hw.mac;
- struct net_device *netdev = adapter->netdev;
u32 value;
u32 before;
u32 after;
@@ -799,8 +793,8 @@ static int e1000_reg_test(struct e1000_adapter *adapter, u64 *data)
ew32(STATUS, toggle);
after = er32(STATUS) & toggle;
if (value != after) {
- ndev_err(netdev, "failed STATUS register test got: "
- "0x%08X expected: 0x%08X\n", after, value);
+ e_err("failed STATUS register test got: 0x%08X expected: "
+ "0x%08X\n", after, value);
*data = 1;
return 1;
}
@@ -903,8 +897,7 @@ static int e1000_intr_test(struct e1000_adapter *adapter, u64 *data)
*data = 1;
return -1;
}
- ndev_info(netdev, "testing %s interrupt\n",
- (shared_int ? "shared" : "unshared"));
+ e_info("testing %s interrupt\n", (shared_int ? "shared" : "unshared"));
/* Disable all the interrupts */
ew32(IMC, 0xFFFFFFFF);
@@ -1526,8 +1519,7 @@ static int e1000_loopback_test(struct e1000_adapter *adapter, u64 *data)
* sessions are active
*/
if (e1000_check_reset_block(&adapter->hw)) {
- ndev_err(adapter->netdev, "Cannot do PHY loopback test "
- "when SoL/IDER is active.\n");
+ e_err("Cannot do PHY loopback test when SoL/IDER is active.\n");
*data = 0;
goto out;
}
@@ -1612,7 +1604,7 @@ static void e1000_diag_test(struct net_device *netdev,
forced_speed_duplex = adapter->hw.mac.forced_speed_duplex;
autoneg = adapter->hw.mac.autoneg;
- ndev_info(netdev, "offline testing starting\n");
+ e_info("offline testing starting\n");
/*
* Link test performed before hardware reset so autoneg doesn't
@@ -1658,7 +1650,7 @@ static void e1000_diag_test(struct net_device *netdev,
if (if_running)
dev_open(netdev);
} else {
- ndev_info(netdev, "online testing starting\n");
+ e_info("online testing starting\n");
/* Online tests */
if (e1000_link_test(adapter, &data[4]))
eth_test->flags |= ETH_TEST_FL_FAILED;
@@ -1694,8 +1686,8 @@ static void e1000_get_wol(struct net_device *netdev,
wol->supported &= ~WAKE_UCAST;
if (adapter->wol & E1000_WUFC_EX)
- ndev_err(netdev, "Interface does not support "
- "directed (unicast) frame wake-up packets\n");
+ e_err("Interface does not support directed (unicast) "
+ "frame wake-up packets\n");
}
if (adapter->wol & E1000_WUFC_EX)
diff --git a/drivers/net/e1000e/netdev.c b/drivers/net/e1000e/netdev.c
index d1367789976..05b0b2f9c54 100644
--- a/drivers/net/e1000e/netdev.c
+++ b/drivers/net/e1000e/netdev.c
@@ -484,8 +484,8 @@ static bool e1000_clean_rx_irq(struct e1000_adapter *adapter,
* packet, also make sure the frame isn't just CRC only */
if (!(status & E1000_RXD_STAT_EOP) || (length <= 4)) {
/* All receives must fit into a single buffer */
- ndev_dbg(netdev, "%s: Receive packet consumed "
- "multiple buffers\n", netdev->name);
+ e_dbg("%s: Receive packet consumed multiple buffers\n",
+ netdev->name);
/* recycle */
buffer_info->skb = skb;
goto next_desc;
@@ -576,28 +576,26 @@ static void e1000_print_tx_hang(struct e1000_adapter *adapter)
unsigned int i = tx_ring->next_to_clean;
unsigned int eop = tx_ring->buffer_info[i].next_to_watch;
struct e1000_tx_desc *eop_desc = E1000_TX_DESC(*tx_ring, eop);
- struct net_device *netdev = adapter->netdev;
/* detected Tx unit hang */
- ndev_err(netdev,
- "Detected Tx Unit Hang:\n"
- " TDH <%x>\n"
- " TDT <%x>\n"
- " next_to_use <%x>\n"
- " next_to_clean <%x>\n"
- "buffer_info[next_to_clean]:\n"
- " time_stamp <%lx>\n"
- " next_to_watch <%x>\n"
- " jiffies <%lx>\n"
- " next_to_watch.status <%x>\n",
- readl(adapter->hw.hw_addr + tx_ring->head),
- readl(adapter->hw.hw_addr + tx_ring->tail),
- tx_ring->next_to_use,
- tx_ring->next_to_clean,
- tx_ring->buffer_info[eop].time_stamp,
- eop,
- jiffies,
- eop_desc->upper.fields.status);
+ e_err("Detected Tx Unit Hang:\n"
+ " TDH <%x>\n"
+ " TDT <%x>\n"
+ " next_to_use <%x>\n"
+ " next_to_clean <%x>\n"
+ "buffer_info[next_to_clean]:\n"
+ " time_stamp <%lx>\n"
+ " next_to_watch <%x>\n"
+ " jiffies <%lx>\n"
+ " next_to_watch.status <%x>\n",
+ readl(adapter->hw.hw_addr + tx_ring->head),
+ readl(adapter->hw.hw_addr + tx_ring->tail),
+ tx_ring->next_to_use,
+ tx_ring->next_to_clean,
+ tx_ring->buffer_info[eop].time_stamp,
+ eop,
+ jiffies,
+ eop_desc->upper.fields.status);
}
/**
@@ -747,8 +745,8 @@ static bool e1000_clean_rx_irq_ps(struct e1000_adapter *adapter,
buffer_info->dma = 0;
if (!(staterr & E1000_RXD_STAT_EOP)) {
- ndev_dbg(netdev, "%s: Packet Split buffers didn't pick "
- "up the full packet\n", netdev->name);
+ e_dbg("%s: Packet Split buffers didn't pick up the "
+ "full packet\n", netdev->name);
dev_kfree_skb_irq(skb);
goto next_desc;
}
@@ -761,8 +759,8 @@ static bool e1000_clean_rx_irq_ps(struct e1000_adapter *adapter,
length = le16_to_cpu(rx_desc->wb.middle.length0);
if (!length) {
- ndev_dbg(netdev, "%s: Last part of the packet spanning"
- " multiple descriptors\n", netdev->name);
+ e_dbg("%s: Last part of the packet spanning multiple "
+ "descriptors\n", netdev->name);
dev_kfree_skb_irq(skb);
goto next_desc;
}
@@ -1011,7 +1009,7 @@ static bool e1000_clean_jumbo_rx_irq(struct e1000_adapter *adapter,
/* eth type trans needs skb->data to point to something */
if (!pskb_may_pull(skb, ETH_HLEN)) {
- ndev_err(netdev, "pskb_may_pull failed.\n");
+ e_err("pskb_may_pull failed.\n");
dev_kfree_skb(skb);
goto next_desc;
}
@@ -1251,10 +1249,8 @@ static int e1000_request_irq(struct e1000_adapter *adapter)
err = request_irq(adapter->pdev->irq, handler, irq_flags, netdev->name,
netdev);
if (err) {
- ndev_err(netdev,
- "Unable to allocate %s interrupt (return: %d)\n",
- adapter->flags & FLAG_MSI_ENABLED ? "MSI":"INTx",
- err);
+ e_err("Unable to allocate %s interrupt (return: %d)\n",
+ adapter->flags & FLAG_MSI_ENABLED ? "MSI":"INTx", err);
if (adapter->flags & FLAG_MSI_ENABLED)
pci_disable_msi(adapter->pdev);
}
@@ -1395,8 +1391,7 @@ int e1000e_setup_tx_resources(struct e1000_adapter *adapter)
return 0;
err:
vfree(tx_ring->buffer_info);
- ndev_err(adapter->netdev,
- "Unable to allocate memory for the transmit descriptor ring\n");
+ e_err("Unable to allocate memory for the transmit descriptor ring\n");
return err;
}
@@ -1450,8 +1445,7 @@ err_pages:
}
err:
vfree(rx_ring->buffer_info);
- ndev_err(adapter->netdev,
- "Unable to allocate memory for the transmit descriptor ring\n");
+ e_err("Unable to allocate memory for the transmit descriptor ring\n");
return err;
}
@@ -2450,13 +2444,13 @@ void e1000e_reset(struct e1000_adapter *adapter)
* For parts with AMT enabled, let the firmware know
* that the network interface is in control
*/
- if ((adapter->flags & FLAG_HAS_AMT) && e1000e_check_mng_mode(hw))
+ if (adapter->flags & FLAG_HAS_AMT)
e1000_get_hw_control(adapter);
ew32(WUC, 0);
if (mac->ops.init_hw(hw))
- ndev_err(adapter->netdev, "Hardware Error\n");
+ e_err("Hardware Error\n");
e1000_update_mng_vlan(adapter);
@@ -2591,7 +2585,7 @@ static int __devinit e1000_sw_init(struct e1000_adapter *adapter)
return 0;
err:
- ndev_err(netdev, "Unable to allocate memory for queues\n");
+ e_err("Unable to allocate memory for queues\n");
kfree(adapter->rx_ring);
kfree(adapter->tx_ring);
return -ENOMEM;
@@ -2640,8 +2634,7 @@ static int e1000_open(struct net_device *netdev)
* If AMT is enabled, let the firmware know that the network
* interface is now open
*/
- if ((adapter->flags & FLAG_HAS_AMT) &&
- e1000e_check_mng_mode(&adapter->hw))
+ if (adapter->flags & FLAG_HAS_AMT)
e1000_get_hw_control(adapter);
/*
@@ -2719,8 +2712,7 @@ static int e1000_close(struct net_device *netdev)
* If AMT is enabled, let the firmware know that the network
* interface is now closed
*/
- if ((adapter->flags & FLAG_HAS_AMT) &&
- e1000e_check_mng_mode(&adapter->hw))
+ if (adapter->flags & FLAG_HAS_AMT)
e1000_release_hw_control(adapter);
return 0;
@@ -2917,8 +2909,7 @@ static void e1000_phy_read_status(struct e1000_adapter *adapter)
ret_val |= e1e_rphy(hw, PHY_1000T_STATUS, &phy->stat1000);
ret_val |= e1e_rphy(hw, PHY_EXT_STATUS, &phy->estatus);
if (ret_val)
- ndev_warn(adapter->netdev,
- "Error reading PHY register\n");
+ e_warn("Error reading PHY register\n");
} else {
/*
* Do not read PHY registers if link is not up
@@ -2943,18 +2934,16 @@ static void e1000_phy_read_status(struct e1000_adapter *adapter)
static void e1000_print_link_info(struct e1000_adapter *adapter)
{
struct e1000_hw *hw = &adapter->hw;
- struct net_device *netdev = adapter->netdev;
u32 ctrl = er32(CTRL);
- ndev_info(netdev,
- "Link is Up %d Mbps %s, Flow Control: %s\n",
- adapter->link_speed,
- (adapter->link_duplex == FULL_DUPLEX) ?
- "Full Duplex" : "Half Duplex",
- ((ctrl & E1000_CTRL_TFCE) && (ctrl & E1000_CTRL_RFCE)) ?
- "RX/TX" :
- ((ctrl & E1000_CTRL_RFCE) ? "RX" :
- ((ctrl & E1000_CTRL_TFCE) ? "TX" : "None" )));
+ e_info("Link is Up %d Mbps %s, Flow Control: %s\n",
+ adapter->link_speed,
+ (adapter->link_duplex == FULL_DUPLEX) ?
+ "Full Duplex" : "Half Duplex",
+ ((ctrl & E1000_CTRL_TFCE) && (ctrl & E1000_CTRL_RFCE)) ?
+ "RX/TX" :
+ ((ctrl & E1000_CTRL_RFCE) ? "RX" :
+ ((ctrl & E1000_CTRL_TFCE) ? "TX" : "None" )));
}
static bool e1000_has_link(struct e1000_adapter *adapter)
@@ -2994,8 +2983,7 @@ static bool e1000_has_link(struct e1000_adapter *adapter)
if ((ret_val == E1000_ERR_PHY) && (hw->phy.type == e1000_phy_igp_3) &&
(er32(CTRL) & E1000_PHY_CTRL_GBE_DISABLE)) {
/* See e1000_kmrn_lock_loss_workaround_ich8lan() */
- ndev_info(adapter->netdev,
- "Gigabit has been disabled, downgrading speed\n");
+ e_info("Gigabit has been disabled, downgrading speed\n");
}
return link_active;
@@ -3096,8 +3084,7 @@ static void e1000_watchdog_task(struct work_struct *work)
switch (adapter->link_speed) {
case SPEED_10:
case SPEED_100:
- ndev_info(netdev,
- "10/100 speed: disabling TSO\n");
+ e_info("10/100 speed: disabling TSO\n");
netdev->features &= ~NETIF_F_TSO;
netdev->features &= ~NETIF_F_TSO6;
break;
@@ -3130,7 +3117,7 @@ static void e1000_watchdog_task(struct work_struct *work)
if (netif_carrier_ok(netdev)) {
adapter->link_speed = 0;
adapter->link_duplex = 0;
- ndev_info(netdev, "Link is Down\n");
+ e_info("Link is Down\n");
netif_carrier_off(netdev);
netif_tx_stop_all_queues(netdev);
if (!test_bit(__E1000_DOWN, &adapter->state))
@@ -3604,8 +3591,7 @@ static int e1000_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
pull_size = min((unsigned int)4, skb->data_len);
if (!__pskb_pull_tail(skb, pull_size)) {
- ndev_err(netdev,
- "__pskb_pull_tail failed.\n");
+ e_err("__pskb_pull_tail failed.\n");
dev_kfree_skb_any(skb);
return NETDEV_TX_OK;
}
@@ -3737,25 +3723,25 @@ static int e1000_change_mtu(struct net_device *netdev, int new_mtu)
if ((max_frame < ETH_ZLEN + ETH_FCS_LEN) ||
(max_frame > MAX_JUMBO_FRAME_SIZE)) {
- ndev_err(netdev, "Invalid MTU setting\n");
+ e_err("Invalid MTU setting\n");
return -EINVAL;
}
/* Jumbo frame size limits */
if (max_frame > ETH_FRAME_LEN + ETH_FCS_LEN) {
if (!(adapter->flags & FLAG_HAS_JUMBO_FRAMES)) {
- ndev_err(netdev, "Jumbo Frames not supported.\n");
+ e_err("Jumbo Frames not supported.\n");
return -EINVAL;
}
if (adapter->hw.phy.type == e1000_phy_ife) {
- ndev_err(netdev, "Jumbo Frames not supported.\n");
+ e_err("Jumbo Frames not supported.\n");
return -EINVAL;
}
}
#define MAX_STD_JUMBO_FRAME_SIZE 9234
if (max_frame > MAX_STD_JUMBO_FRAME_SIZE) {
- ndev_err(netdev, "MTU > 9216 not supported.\n");
+ e_err("MTU > 9216 not supported.\n");
return -EINVAL;
}
@@ -3792,8 +3778,7 @@ static int e1000_change_mtu(struct net_device *netdev, int new_mtu)
adapter->rx_buffer_len = ETH_FRAME_LEN + VLAN_HLEN
+ ETH_FCS_LEN;
- ndev_info(netdev, "changing MTU from %d to %d\n",
- netdev->mtu, new_mtu);
+ e_info("changing MTU from %d to %d\n", netdev->mtu, new_mtu);
netdev->mtu = new_mtu;
if (netif_running(netdev))
@@ -4006,10 +3991,7 @@ static int e1000_resume(struct pci_dev *pdev)
pci_restore_state(pdev);
e1000e_disable_l1aspm(pdev);
- if (adapter->need_ioport)
- err = pci_enable_device(pdev);
- else
- err = pci_enable_device_mem(pdev);
+ err = pci_enable_device_mem(pdev);
if (err) {
dev_err(&pdev->dev,
"Cannot enable PCI device from suspend\n");
@@ -4043,7 +4025,7 @@ static int e1000_resume(struct pci_dev *pdev)
* is up. For all other cases, let the f/w know that the h/w is now
* under the control of the driver.
*/
- if (!(adapter->flags & FLAG_HAS_AMT) || !e1000e_check_mng_mode(&adapter->hw))
+ if (!(adapter->flags & FLAG_HAS_AMT))
e1000_get_hw_control(adapter);
return 0;
@@ -4111,10 +4093,7 @@ static pci_ers_result_t e1000_io_slot_reset(struct pci_dev *pdev)
int err;
e1000e_disable_l1aspm(pdev);
- if (adapter->need_ioport)
- err = pci_enable_device(pdev);
- else
- err = pci_enable_device_mem(pdev);
+ err = pci_enable_device_mem(pdev);
if (err) {
dev_err(&pdev->dev,
"Cannot re-enable PCI device after reset.\n");
@@ -4162,8 +4141,7 @@ static void e1000_io_resume(struct pci_dev *pdev)
* is up. For all other cases, let the f/w know that the h/w is now
* under the control of the driver.
*/
- if (!(adapter->flags & FLAG_HAS_AMT) ||
- !e1000e_check_mng_mode(&adapter->hw))
+ if (!(adapter->flags & FLAG_HAS_AMT))
e1000_get_hw_control(adapter);
}
@@ -4175,36 +4153,40 @@ static void e1000_print_device_info(struct e1000_adapter *adapter)
u32 pba_num;
/* print bus type/speed/width info */
- ndev_info(netdev, "(PCI Express:2.5GB/s:%s) "
- "%02x:%02x:%02x:%02x:%02x:%02x\n",
- /* bus width */
- ((hw->bus.width == e1000_bus_width_pcie_x4) ? "Width x4" :
- "Width x1"),
- /* MAC address */
- netdev->dev_addr[0], netdev->dev_addr[1],
- netdev->dev_addr[2], netdev->dev_addr[3],
- netdev->dev_addr[4], netdev->dev_addr[5]);
- ndev_info(netdev, "Intel(R) PRO/%s Network Connection\n",
- (hw->phy.type == e1000_phy_ife)
- ? "10/100" : "1000");
+ e_info("(PCI Express:2.5GB/s:%s) %02x:%02x:%02x:%02x:%02x:%02x\n",
+ /* bus width */
+ ((hw->bus.width == e1000_bus_width_pcie_x4) ? "Width x4" :
+ "Width x1"),
+ /* MAC address */
+ netdev->dev_addr[0], netdev->dev_addr[1],
+ netdev->dev_addr[2], netdev->dev_addr[3],
+ netdev->dev_addr[4], netdev->dev_addr[5]);
+ e_info("Intel(R) PRO/%s Network Connection\n",
+ (hw->phy.type == e1000_phy_ife) ? "10/100" : "1000");
e1000e_read_pba_num(hw, &pba_num);
- ndev_info(netdev, "MAC: %d, PHY: %d, PBA No: %06x-%03x\n",
- hw->mac.type, hw->phy.type,
- (pba_num >> 8), (pba_num & 0xff));
+ e_info("MAC: %d, PHY: %d, PBA No: %06x-%03x\n",
+ hw->mac.type, hw->phy.type, (pba_num >> 8), (pba_num & 0xff));
}
-/**
- * e1000e_is_need_ioport - determine if an adapter needs ioport resources or not
- * @pdev: PCI device information struct
- *
- * Returns true if an adapters needs ioport resources
- **/
-static int e1000e_is_need_ioport(struct pci_dev *pdev)
+static void e1000_eeprom_checks(struct e1000_adapter *adapter)
{
- switch (pdev->device) {
- /* Currently there are no adapters that need ioport resources */
- default:
- return false;
+ struct e1000_hw *hw = &adapter->hw;
+ int ret_val;
+ u16 buf = 0;
+
+ if (hw->mac.type != e1000_82573)
+ return;
+
+ ret_val = e1000_read_nvm(hw, NVM_INIT_CONTROL2_REG, 1, &buf);
+ if (!(le16_to_cpu(buf) & (1 << 0))) {
+ /* Deep Smart Power Down (DSPD) */
+ e_warn("Warning: detected DSPD enabled in EEPROM\n");
+ }
+
+ ret_val = e1000_read_nvm(hw, NVM_INIT_3GIO_3, 1, &buf);
+ if (le16_to_cpu(buf) & (3 << 2)) {
+ /* ASPM enable */
+ e_warn("Warning: detected ASPM enabled in EEPROM\n");
}
}
@@ -4233,19 +4215,10 @@ static int __devinit e1000_probe(struct pci_dev *pdev,
int i, err, pci_using_dac;
u16 eeprom_data = 0;
u16 eeprom_apme_mask = E1000_EEPROM_APME;
- int bars, need_ioport;
e1000e_disable_l1aspm(pdev);
- /* do not allocate ioport bars when not needed */
- need_ioport = e1000e_is_need_ioport(pdev);
- if (need_ioport) {
- bars = pci_select_bars(pdev, IORESOURCE_MEM | IORESOURCE_IO);
- err = pci_enable_device(pdev);
- } else {
- bars = pci_select_bars(pdev, IORESOURCE_MEM);
- err = pci_enable_device_mem(pdev);
- }
+ err = pci_enable_device_mem(pdev);
if (err)
return err;
@@ -4268,7 +4241,9 @@ static int __devinit e1000_probe(struct pci_dev *pdev,
}
}
- err = pci_request_selected_regions(pdev, bars, e1000e_driver_name);
+ err = pci_request_selected_regions(pdev,
+ pci_select_bars(pdev, IORESOURCE_MEM),
+ e1000e_driver_name);
if (err)
goto err_pci_reg;
@@ -4293,8 +4268,6 @@ static int __devinit e1000_probe(struct pci_dev *pdev,
adapter->hw.adapter = adapter;
adapter->hw.mac.type = ei->mac;
adapter->msg_enable = (1 << NETIF_MSG_DRV | NETIF_MSG_PROBE) - 1;
- adapter->bars = bars;
- adapter->need_ioport = need_ioport;
mmio_start = pci_resource_start(pdev, 0);
mmio_len = pci_resource_len(pdev, 0);
@@ -4366,8 +4339,7 @@ static int __devinit e1000_probe(struct pci_dev *pdev,
}
if (e1000_check_reset_block(&adapter->hw))
- ndev_info(netdev,
- "PHY reset is blocked due to SOL/IDER session.\n");
+ e_info("PHY reset is blocked due to SOL/IDER session.\n");
netdev->features = NETIF_F_SG |
NETIF_F_HW_CSUM |
@@ -4411,25 +4383,26 @@ static int __devinit e1000_probe(struct pci_dev *pdev,
if (e1000_validate_nvm_checksum(&adapter->hw) >= 0)
break;
if (i == 2) {
- ndev_err(netdev, "The NVM Checksum Is Not Valid\n");
+ e_err("The NVM Checksum Is Not Valid\n");
err = -EIO;
goto err_eeprom;
}
}
+ e1000_eeprom_checks(adapter);
+
/* copy the MAC address out of the NVM */
if (e1000e_read_mac_addr(&adapter->hw))
- ndev_err(netdev, "NVM Read Error while reading MAC address\n");
+ e_err("NVM Read Error while reading MAC address\n");
memcpy(netdev->dev_addr, adapter->hw.mac.addr, netdev->addr_len);
memcpy(netdev->perm_addr, adapter->hw.mac.addr, netdev->addr_len);
if (!is_valid_ether_addr(netdev->perm_addr)) {
- ndev_err(netdev, "Invalid MAC Address: "
- "%02x:%02x:%02x:%02x:%02x:%02x\n",
- netdev->perm_addr[0], netdev->perm_addr[1],
- netdev->perm_addr[2], netdev->perm_addr[3],
- netdev->perm_addr[4], netdev->perm_addr[5]);
+ e_err("Invalid MAC Address: %02x:%02x:%02x:%02x:%02x:%02x\n",
+ netdev->perm_addr[0], netdev->perm_addr[1],
+ netdev->perm_addr[2], netdev->perm_addr[3],
+ netdev->perm_addr[4], netdev->perm_addr[5]);
err = -EIO;
goto err_eeprom;
}
@@ -4499,8 +4472,7 @@ static int __devinit e1000_probe(struct pci_dev *pdev,
* is up. For all other cases, let the f/w know that the h/w is now
* under the control of the driver.
*/
- if (!(adapter->flags & FLAG_HAS_AMT) ||
- !e1000e_check_mng_mode(&adapter->hw))
+ if (!(adapter->flags & FLAG_HAS_AMT))
e1000_get_hw_control(adapter);
/* tell the stack to leave us alone until e1000_open() is called */
@@ -4517,24 +4489,25 @@ static int __devinit e1000_probe(struct pci_dev *pdev,
return 0;
err_register:
-err_hw_init:
- e1000_release_hw_control(adapter);
+ if (!(adapter->flags & FLAG_HAS_AMT))
+ e1000_release_hw_control(adapter);
err_eeprom:
if (!e1000_check_reset_block(&adapter->hw))
e1000_phy_hw_reset(&adapter->hw);
+err_hw_init:
- if (adapter->hw.flash_address)
- iounmap(adapter->hw.flash_address);
-
-err_flashmap:
kfree(adapter->tx_ring);
kfree(adapter->rx_ring);
err_sw_init:
+ if (adapter->hw.flash_address)
+ iounmap(adapter->hw.flash_address);
+err_flashmap:
iounmap(adapter->hw.hw_addr);
err_ioremap:
free_netdev(netdev);
err_alloc_etherdev:
- pci_release_selected_regions(pdev, bars);
+ pci_release_selected_regions(pdev,
+ pci_select_bars(pdev, IORESOURCE_MEM));
err_pci_reg:
err_dma:
pci_disable_device(pdev);
@@ -4582,7 +4555,8 @@ static void __devexit e1000_remove(struct pci_dev *pdev)
iounmap(adapter->hw.hw_addr);
if (adapter->hw.flash_address)
iounmap(adapter->hw.flash_address);
- pci_release_selected_regions(pdev, adapter->bars);
+ pci_release_selected_regions(pdev,
+ pci_select_bars(pdev, IORESOURCE_MEM));
free_netdev(netdev);
diff --git a/drivers/net/e1000e/param.c b/drivers/net/e1000e/param.c
index a66b92efcf8..8effc3107f9 100644
--- a/drivers/net/e1000e/param.c
+++ b/drivers/net/e1000e/param.c
@@ -27,6 +27,7 @@
*******************************************************************************/
#include <linux/netdevice.h>
+#include <linux/pci.h>
#include "e1000.h"
@@ -162,17 +163,16 @@ static int __devinit e1000_validate_option(unsigned int *value,
case enable_option:
switch (*value) {
case OPTION_ENABLED:
- ndev_info(adapter->netdev, "%s Enabled\n", opt->name);
+ e_info("%s Enabled\n", opt->name);
return 0;
case OPTION_DISABLED:
- ndev_info(adapter->netdev, "%s Disabled\n", opt->name);
+ e_info("%s Disabled\n", opt->name);
return 0;
}
break;
case range_option:
if (*value >= opt->arg.r.min && *value <= opt->arg.r.max) {
- ndev_info(adapter->netdev,
- "%s set to %i\n", opt->name, *value);
+ e_info("%s set to %i\n", opt->name, *value);
return 0;
}
break;
@@ -184,8 +184,7 @@ static int __devinit e1000_validate_option(unsigned int *value,
ent = &opt->arg.l.p[i];
if (*value == ent->i) {
if (ent->str[0] != '\0')
- ndev_info(adapter->netdev, "%s\n",
- ent->str);
+ e_info("%s\n", ent->str);
return 0;
}
}
@@ -195,8 +194,8 @@ static int __devinit e1000_validate_option(unsigned int *value,
BUG();
}
- ndev_info(adapter->netdev, "Invalid %s value specified (%i) %s\n",
- opt->name, *value, opt->err);
+ e_info("Invalid %s value specified (%i) %s\n", opt->name, *value,
+ opt->err);
*value = opt->def;
return -1;
}
@@ -213,13 +212,11 @@ static int __devinit e1000_validate_option(unsigned int *value,
void __devinit e1000e_check_options(struct e1000_adapter *adapter)
{
struct e1000_hw *hw = &adapter->hw;
- struct net_device *netdev = adapter->netdev;
int bd = adapter->bd_number;
if (bd >= E1000_MAX_NIC) {
- ndev_notice(netdev,
- "Warning: no configuration for board #%i\n", bd);
- ndev_notice(netdev, "Using defaults for all values\n");
+ e_notice("Warning: no configuration for board #%i\n", bd);
+ e_notice("Using defaults for all values\n");
}
{ /* Transmit Interrupt Delay */
@@ -313,19 +310,15 @@ void __devinit e1000e_check_options(struct e1000_adapter *adapter)
adapter->itr = InterruptThrottleRate[bd];
switch (adapter->itr) {
case 0:
- ndev_info(netdev, "%s turned off\n",
- opt.name);
+ e_info("%s turned off\n", opt.name);
break;
case 1:
- ndev_info(netdev,
- "%s set to dynamic mode\n",
- opt.name);
+ e_info("%s set to dynamic mode\n", opt.name);
adapter->itr_setting = adapter->itr;
adapter->itr = 20000;
break;
case 3:
- ndev_info(netdev,
- "%s set to dynamic conservative mode\n",
+ e_info("%s set to dynamic conservative mode\n",
opt.name);
adapter->itr_setting = adapter->itr;
adapter->itr = 20000;
diff --git a/drivers/net/eepro.c b/drivers/net/eepro.c
index 56f50491a45..1f11350e16c 100644
--- a/drivers/net/eepro.c
+++ b/drivers/net/eepro.c
@@ -1283,14 +1283,6 @@ set_multicast_list(struct net_device *dev)
if (dev->flags&(IFF_ALLMULTI|IFF_PROMISC) || dev->mc_count > 63)
{
- /*
- * We must make the kernel realise we had to move
- * into promisc mode or we start all out war on
- * the cable. If it was a promisc request the
- * flag is already set. If not we assert it.
- */
- dev->flags|=IFF_PROMISC;
-
eepro_sw2bank2(ioaddr); /* be CAREFUL, BANK 2 now */
mode = inb(ioaddr + REG2);
outb(mode | PRMSC_Mode, ioaddr + REG2);
diff --git a/drivers/net/ehea/ehea_main.c b/drivers/net/ehea/ehea_main.c
index 0920b796bd7..b70c5314f53 100644
--- a/drivers/net/ehea/ehea_main.c
+++ b/drivers/net/ehea/ehea_main.c
@@ -2937,9 +2937,9 @@ static void ehea_rereg_mrs(struct work_struct *work)
}
}
}
- mutex_unlock(&dlpar_mem_lock);
- ehea_info("re-initializing driver complete");
+ ehea_info("re-initializing driver complete");
out:
+ mutex_unlock(&dlpar_mem_lock);
return;
}
diff --git a/drivers/net/enc28j60.c b/drivers/net/enc28j60.c
index c05cb159c77..aa0bf6e1c69 100644
--- a/drivers/net/enc28j60.c
+++ b/drivers/net/enc28j60.c
@@ -1547,8 +1547,10 @@ static int __devinit enc28j60_probe(struct spi_device *spi)
random_ether_addr(dev->dev_addr);
enc28j60_set_hw_macaddr(dev);
- ret = request_irq(spi->irq, enc28j60_irq, IRQF_TRIGGER_FALLING,
- DRV_NAME, priv);
+ /* Board setup must set the relevant edge trigger type;
+ * level triggers won't currently work.
+ */
+ ret = request_irq(spi->irq, enc28j60_irq, 0, DRV_NAME, priv);
if (ret < 0) {
if (netif_msg_probe(priv))
dev_err(&spi->dev, DRV_NAME ": request irq %d failed "
diff --git a/drivers/net/eth16i.c b/drivers/net/eth16i.c
index e3dd8b13690..bee8b3fbc56 100644
--- a/drivers/net/eth16i.c
+++ b/drivers/net/eth16i.c
@@ -1356,7 +1356,6 @@ static void eth16i_multicast(struct net_device *dev)
if(dev->mc_count || dev->flags&(IFF_ALLMULTI|IFF_PROMISC))
{
- dev->flags|=IFF_PROMISC; /* Must do this */
outb(3, ioaddr + RECEIVE_MODE_REG);
} else {
outb(2, ioaddr + RECEIVE_MODE_REG);
diff --git a/drivers/net/forcedeth.c b/drivers/net/forcedeth.c
index 4ed89fa9ae4..053971e5fc9 100644
--- a/drivers/net/forcedeth.c
+++ b/drivers/net/forcedeth.c
@@ -77,26 +77,27 @@
* Hardware access:
*/
-#define DEV_NEED_TIMERIRQ 0x00001 /* set the timer irq flag in the irq mask */
-#define DEV_NEED_LINKTIMER 0x00002 /* poll link settings. Relies on the timer irq */
-#define DEV_HAS_LARGEDESC 0x00004 /* device supports jumbo frames and needs packet format 2 */
-#define DEV_HAS_HIGH_DMA 0x00008 /* device supports 64bit dma */
-#define DEV_HAS_CHECKSUM 0x00010 /* device supports tx and rx checksum offloads */
-#define DEV_HAS_VLAN 0x00020 /* device supports vlan tagging and striping */
-#define DEV_HAS_MSI 0x00040 /* device supports MSI */
-#define DEV_HAS_MSI_X 0x00080 /* device supports MSI-X */
-#define DEV_HAS_POWER_CNTRL 0x00100 /* device supports power savings */
-#define DEV_HAS_STATISTICS_V1 0x00200 /* device supports hw statistics version 1 */
-#define DEV_HAS_STATISTICS_V2 0x00400 /* device supports hw statistics version 2 */
-#define DEV_HAS_TEST_EXTENDED 0x00800 /* device supports extended diagnostic test */
-#define DEV_HAS_MGMT_UNIT 0x01000 /* device supports management unit */
-#define DEV_HAS_CORRECT_MACADDR 0x02000 /* device supports correct mac address order */
-#define DEV_HAS_COLLISION_FIX 0x04000 /* device supports tx collision fix */
-#define DEV_HAS_PAUSEFRAME_TX_V1 0x08000 /* device supports tx pause frames version 1 */
-#define DEV_HAS_PAUSEFRAME_TX_V2 0x10000 /* device supports tx pause frames version 2 */
-#define DEV_HAS_PAUSEFRAME_TX_V3 0x20000 /* device supports tx pause frames version 3 */
-#define DEV_NEED_TX_LIMIT 0x40000 /* device needs to limit tx */
-#define DEV_HAS_GEAR_MODE 0x80000 /* device supports gear mode */
+#define DEV_NEED_TIMERIRQ 0x000001 /* set the timer irq flag in the irq mask */
+#define DEV_NEED_LINKTIMER 0x000002 /* poll link settings. Relies on the timer irq */
+#define DEV_HAS_LARGEDESC 0x000004 /* device supports jumbo frames and needs packet format 2 */
+#define DEV_HAS_HIGH_DMA 0x000008 /* device supports 64bit dma */
+#define DEV_HAS_CHECKSUM 0x000010 /* device supports tx and rx checksum offloads */
+#define DEV_HAS_VLAN 0x000020 /* device supports vlan tagging and striping */
+#define DEV_HAS_MSI 0x000040 /* device supports MSI */
+#define DEV_HAS_MSI_X 0x000080 /* device supports MSI-X */
+#define DEV_HAS_POWER_CNTRL 0x000100 /* device supports power savings */
+#define DEV_HAS_STATISTICS_V1 0x000200 /* device supports hw statistics version 1 */
+#define DEV_HAS_STATISTICS_V2 0x000400 /* device supports hw statistics version 2 */
+#define DEV_HAS_STATISTICS_V3 0x000800 /* device supports hw statistics version 3 */
+#define DEV_HAS_TEST_EXTENDED 0x001000 /* device supports extended diagnostic test */
+#define DEV_HAS_MGMT_UNIT 0x002000 /* device supports management unit */
+#define DEV_HAS_CORRECT_MACADDR 0x004000 /* device supports correct mac address order */
+#define DEV_HAS_COLLISION_FIX 0x008000 /* device supports tx collision fix */
+#define DEV_HAS_PAUSEFRAME_TX_V1 0x010000 /* device supports tx pause frames version 1 */
+#define DEV_HAS_PAUSEFRAME_TX_V2 0x020000 /* device supports tx pause frames version 2 */
+#define DEV_HAS_PAUSEFRAME_TX_V3 0x040000 /* device supports tx pause frames version 3 */
+#define DEV_NEED_TX_LIMIT 0x080000 /* device needs to limit tx */
+#define DEV_HAS_GEAR_MODE 0x100000 /* device supports gear mode */
enum {
NvRegIrqStatus = 0x000,
@@ -248,6 +249,8 @@ enum {
#define NVREG_TX_PAUSEFRAME_ENABLE_V1 0x01800010
#define NVREG_TX_PAUSEFRAME_ENABLE_V2 0x056003f0
#define NVREG_TX_PAUSEFRAME_ENABLE_V3 0x09f00880
+ NvRegTxPauseFrameLimit = 0x174,
+#define NVREG_TX_PAUSEFRAMELIMIT_ENABLE 0x00010000
NvRegMIIStatus = 0x180,
#define NVREG_MIISTAT_ERROR 0x0001
#define NVREG_MIISTAT_LINKCHANGE 0x0008
@@ -270,6 +273,9 @@ enum {
#define NVREG_MIICTL_WRITE 0x00400
#define NVREG_MIICTL_ADDRSHIFT 5
NvRegMIIData = 0x194,
+ NvRegTxUnicast = 0x1a0,
+ NvRegTxMulticast = 0x1a4,
+ NvRegTxBroadcast = 0x1a8,
NvRegWakeUpFlags = 0x200,
#define NVREG_WAKEUPFLAGS_VAL 0x7770
#define NVREG_WAKEUPFLAGS_BUSYSHIFT 24
@@ -333,6 +339,7 @@ enum {
NvRegPowerState2 = 0x600,
#define NVREG_POWERSTATE2_POWERUP_MASK 0x0F11
#define NVREG_POWERSTATE2_POWERUP_REV_A3 0x0001
+#define NVREG_POWERSTATE2_PHY_RESET 0x0004
};
/* Big endian: should work, but is untested */
@@ -401,6 +408,7 @@ union ring_type {
#define NV_RX_FRAMINGERR (1<<29)
#define NV_RX_ERROR (1<<30)
#define NV_RX_AVAIL (1<<31)
+#define NV_RX_ERROR_MASK (NV_RX_ERROR1|NV_RX_ERROR2|NV_RX_ERROR3|NV_RX_ERROR4|NV_RX_CRCERR|NV_RX_OVERFLOW|NV_RX_FRAMINGERR)
#define NV_RX2_CHECKSUMMASK (0x1C000000)
#define NV_RX2_CHECKSUM_IP (0x10000000)
@@ -418,6 +426,7 @@ union ring_type {
/* error and avail are the same for both */
#define NV_RX2_ERROR (1<<30)
#define NV_RX2_AVAIL (1<<31)
+#define NV_RX2_ERROR_MASK (NV_RX2_ERROR1|NV_RX2_ERROR2|NV_RX2_ERROR3|NV_RX2_ERROR4|NV_RX2_CRCERR|NV_RX2_OVERFLOW|NV_RX2_FRAMINGERR)
#define NV_RX3_VLAN_TAG_PRESENT (1<<16)
#define NV_RX3_VLAN_TAG_MASK (0x0000FFFF)
@@ -529,6 +538,7 @@ union ring_type {
#define PHY_REALTEK_INIT_REG4 0x14
#define PHY_REALTEK_INIT_REG5 0x18
#define PHY_REALTEK_INIT_REG6 0x11
+#define PHY_REALTEK_INIT_REG7 0x01
#define PHY_REALTEK_INIT1 0x0000
#define PHY_REALTEK_INIT2 0x8e00
#define PHY_REALTEK_INIT3 0x0001
@@ -537,6 +547,9 @@ union ring_type {
#define PHY_REALTEK_INIT6 0xf5c7
#define PHY_REALTEK_INIT7 0x1000
#define PHY_REALTEK_INIT8 0x0003
+#define PHY_REALTEK_INIT9 0x0008
+#define PHY_REALTEK_INIT10 0x0005
+#define PHY_REALTEK_INIT11 0x0200
#define PHY_REALTEK_INIT_MSK1 0x0003
#define PHY_GIGABIT 0x0100
@@ -611,7 +624,12 @@ static const struct nv_ethtool_str nv_estats_str[] = {
{ "rx_bytes" },
{ "tx_pause" },
{ "rx_pause" },
- { "rx_drop_frame" }
+ { "rx_drop_frame" },
+
+ /* version 3 stats */
+ { "tx_unicast" },
+ { "tx_multicast" },
+ { "tx_broadcast" }
};
struct nv_ethtool_stats {
@@ -647,9 +665,15 @@ struct nv_ethtool_stats {
u64 tx_pause;
u64 rx_pause;
u64 rx_drop_frame;
+
+ /* version 3 stats */
+ u64 tx_unicast;
+ u64 tx_multicast;
+ u64 tx_broadcast;
};
-#define NV_DEV_STATISTICS_V2_COUNT (sizeof(struct nv_ethtool_stats)/sizeof(u64))
+#define NV_DEV_STATISTICS_V3_COUNT (sizeof(struct nv_ethtool_stats)/sizeof(u64))
+#define NV_DEV_STATISTICS_V2_COUNT (NV_DEV_STATISTICS_V3_COUNT - 3)
#define NV_DEV_STATISTICS_V1_COUNT (NV_DEV_STATISTICS_V2_COUNT - 6)
/* diagnostics */
@@ -1149,6 +1173,42 @@ static int phy_init(struct net_device *dev)
return PHY_ERROR;
}
}
+ if (np->phy_model == PHY_MODEL_REALTEK_8211 &&
+ np->phy_rev == PHY_REV_REALTEK_8211C) {
+ u32 powerstate = readl(base + NvRegPowerState2);
+
+ /* need to perform hw phy reset */
+ powerstate |= NVREG_POWERSTATE2_PHY_RESET;
+ writel(powerstate, base + NvRegPowerState2);
+ msleep(25);
+
+ powerstate &= ~NVREG_POWERSTATE2_PHY_RESET;
+ writel(powerstate, base + NvRegPowerState2);
+ msleep(25);
+
+ reg = mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG6, MII_READ);
+ reg |= PHY_REALTEK_INIT9;
+ if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG6, reg)) {
+ printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
+ return PHY_ERROR;
+ }
+ if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT10)) {
+ printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
+ return PHY_ERROR;
+ }
+ reg = mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG7, MII_READ);
+ if (!(reg & PHY_REALTEK_INIT11)) {
+ reg |= PHY_REALTEK_INIT11;
+ if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG7, reg)) {
+ printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
+ return PHY_ERROR;
+ }
+ }
+ if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT1)) {
+ printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
+ return PHY_ERROR;
+ }
+ }
if (np->phy_model == PHY_MODEL_REALTEK_8201) {
if (np->device_id == PCI_DEVICE_ID_NVIDIA_NVENET_32 ||
np->device_id == PCI_DEVICE_ID_NVIDIA_NVENET_33 ||
@@ -1201,12 +1261,23 @@ static int phy_init(struct net_device *dev)
mii_control = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ);
mii_control |= BMCR_ANENABLE;
- /* reset the phy
- * (certain phys need bmcr to be setup with reset)
- */
- if (phy_reset(dev, mii_control)) {
- printk(KERN_INFO "%s: phy reset failed\n", pci_name(np->pci_dev));
- return PHY_ERROR;
+ if (np->phy_oui == PHY_OUI_REALTEK &&
+ np->phy_model == PHY_MODEL_REALTEK_8211 &&
+ np->phy_rev == PHY_REV_REALTEK_8211C) {
+ /* start autoneg since we already performed hw reset above */
+ mii_control |= BMCR_ANRESTART;
+ if (mii_rw(dev, np->phyaddr, MII_BMCR, mii_control)) {
+ printk(KERN_INFO "%s: phy init failed\n", pci_name(np->pci_dev));
+ return PHY_ERROR;
+ }
+ } else {
+ /* reset the phy
+ * (certain phys need bmcr to be setup with reset)
+ */
+ if (phy_reset(dev, mii_control)) {
+ printk(KERN_INFO "%s: phy reset failed\n", pci_name(np->pci_dev));
+ return PHY_ERROR;
+ }
}
/* phy vendor specific configuration */
@@ -1576,6 +1647,12 @@ static void nv_get_hw_stats(struct net_device *dev)
np->estats.rx_pause += readl(base + NvRegRxPause);
np->estats.rx_drop_frame += readl(base + NvRegRxDropFrame);
}
+
+ if (np->driver_data & DEV_HAS_STATISTICS_V3) {
+ np->estats.tx_unicast += readl(base + NvRegTxUnicast);
+ np->estats.tx_multicast += readl(base + NvRegTxMulticast);
+ np->estats.tx_broadcast += readl(base + NvRegTxBroadcast);
+ }
}
/*
@@ -1589,7 +1666,7 @@ static struct net_device_stats *nv_get_stats(struct net_device *dev)
struct fe_priv *np = netdev_priv(dev);
/* If the nic supports hw counters then retrieve latest values */
- if (np->driver_data & (DEV_HAS_STATISTICS_V1|DEV_HAS_STATISTICS_V2)) {
+ if (np->driver_data & (DEV_HAS_STATISTICS_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_STATISTICS_V3)) {
nv_get_hw_stats(dev);
/* copy to net_device stats */
@@ -2580,7 +2657,7 @@ static int nv_rx_process(struct net_device *dev, int limit)
if (likely(flags & NV_RX_DESCRIPTORVALID)) {
len = flags & LEN_MASK_V1;
if (unlikely(flags & NV_RX_ERROR)) {
- if (flags & NV_RX_ERROR4) {
+ if ((flags & NV_RX_ERROR_MASK) == NV_RX_ERROR4) {
len = nv_getlen(dev, skb->data, len);
if (len < 0) {
dev->stats.rx_errors++;
@@ -2589,7 +2666,7 @@ static int nv_rx_process(struct net_device *dev, int limit)
}
}
/* framing errors are soft errors */
- else if (flags & NV_RX_FRAMINGERR) {
+ else if ((flags & NV_RX_ERROR_MASK) == NV_RX_FRAMINGERR) {
if (flags & NV_RX_SUBSTRACT1) {
len--;
}
@@ -2615,7 +2692,7 @@ static int nv_rx_process(struct net_device *dev, int limit)
if (likely(flags & NV_RX2_DESCRIPTORVALID)) {
len = flags & LEN_MASK_V2;
if (unlikely(flags & NV_RX2_ERROR)) {
- if (flags & NV_RX2_ERROR4) {
+ if ((flags & NV_RX2_ERROR_MASK) == NV_RX2_ERROR4) {
len = nv_getlen(dev, skb->data, len);
if (len < 0) {
dev->stats.rx_errors++;
@@ -2624,7 +2701,7 @@ static int nv_rx_process(struct net_device *dev, int limit)
}
}
/* framing errors are soft errors */
- else if (flags & NV_RX2_FRAMINGERR) {
+ else if ((flags & NV_RX2_ERROR_MASK) == NV_RX2_FRAMINGERR) {
if (flags & NV_RX2_SUBSTRACT1) {
len--;
}
@@ -2714,7 +2791,7 @@ static int nv_rx_process_optimized(struct net_device *dev, int limit)
if (likely(flags & NV_RX2_DESCRIPTORVALID)) {
len = flags & LEN_MASK_V2;
if (unlikely(flags & NV_RX2_ERROR)) {
- if (flags & NV_RX2_ERROR4) {
+ if ((flags & NV_RX2_ERROR_MASK) == NV_RX2_ERROR4) {
len = nv_getlen(dev, skb->data, len);
if (len < 0) {
dev_kfree_skb(skb);
@@ -2722,7 +2799,7 @@ static int nv_rx_process_optimized(struct net_device *dev, int limit)
}
}
/* framing errors are soft errors */
- else if (flags & NV_RX2_FRAMINGERR) {
+ else if ((flags & NV_RX2_ERROR_MASK) == NV_RX2_FRAMINGERR) {
if (flags & NV_RX2_SUBSTRACT1) {
len--;
}
@@ -3001,8 +3078,11 @@ static void nv_update_pause(struct net_device *dev, u32 pause_flags)
u32 pause_enable = NVREG_TX_PAUSEFRAME_ENABLE_V1;
if (np->driver_data & DEV_HAS_PAUSEFRAME_TX_V2)
pause_enable = NVREG_TX_PAUSEFRAME_ENABLE_V2;
- if (np->driver_data & DEV_HAS_PAUSEFRAME_TX_V3)
+ if (np->driver_data & DEV_HAS_PAUSEFRAME_TX_V3) {
pause_enable = NVREG_TX_PAUSEFRAME_ENABLE_V3;
+ /* limit the number of tx pause frames to a default of 8 */
+ writel(readl(base + NvRegTxPauseFrameLimit)|NVREG_TX_PAUSEFRAMELIMIT_ENABLE, base + NvRegTxPauseFrameLimit);
+ }
writel(pause_enable, base + NvRegTxPauseFrame);
writel(regmisc|NVREG_MISC1_PAUSE_TX, base + NvRegMisc1);
np->pause_flags |= NV_PAUSEFRAME_TX_ENABLE;
@@ -4688,6 +4768,8 @@ static int nv_get_sset_count(struct net_device *dev, int sset)
return NV_DEV_STATISTICS_V1_COUNT;
else if (np->driver_data & DEV_HAS_STATISTICS_V2)
return NV_DEV_STATISTICS_V2_COUNT;
+ else if (np->driver_data & DEV_HAS_STATISTICS_V3)
+ return NV_DEV_STATISTICS_V3_COUNT;
else
return 0;
default:
@@ -5272,7 +5354,7 @@ static int nv_open(struct net_device *dev)
mod_timer(&np->oom_kick, jiffies + OOM_REFILL);
/* start statistics timer */
- if (np->driver_data & (DEV_HAS_STATISTICS_V1|DEV_HAS_STATISTICS_V2))
+ if (np->driver_data & (DEV_HAS_STATISTICS_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_STATISTICS_V3))
mod_timer(&np->stats_poll,
round_jiffies(jiffies + STATS_INTERVAL));
@@ -5376,7 +5458,7 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i
if (err < 0)
goto out_disable;
- if (id->driver_data & (DEV_HAS_VLAN|DEV_HAS_MSI_X|DEV_HAS_POWER_CNTRL|DEV_HAS_STATISTICS_V2))
+ if (id->driver_data & (DEV_HAS_VLAN|DEV_HAS_MSI_X|DEV_HAS_POWER_CNTRL|DEV_HAS_STATISTICS_V2|DEV_HAS_STATISTICS_V3))
np->register_size = NV_PCI_REGSZ_VER3;
else if (id->driver_data & DEV_HAS_STATISTICS_V1)
np->register_size = NV_PCI_REGSZ_VER2;
@@ -6031,35 +6113,35 @@ static struct pci_device_id pci_tbl[] = {
},
{ /* MCP77 Ethernet Controller */
PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_32),
- .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V2|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT|DEV_HAS_GEAR_MODE,
+ .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V2|DEV_HAS_STATISTICS_V3|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT|DEV_HAS_GEAR_MODE,
},
{ /* MCP77 Ethernet Controller */
PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_33),
- .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V2|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT|DEV_HAS_GEAR_MODE,
+ .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V2|DEV_HAS_STATISTICS_V3|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT|DEV_HAS_GEAR_MODE,
},
{ /* MCP77 Ethernet Controller */
PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_34),
- .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V2|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT|DEV_HAS_GEAR_MODE,
+ .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V2|DEV_HAS_STATISTICS_V3|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT|DEV_HAS_GEAR_MODE,
},
{ /* MCP77 Ethernet Controller */
PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_35),
- .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V2|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT|DEV_HAS_GEAR_MODE,
+ .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V2|DEV_HAS_STATISTICS_V3|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT|DEV_HAS_GEAR_MODE,
},
{ /* MCP79 Ethernet Controller */
PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_36),
- .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V3|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT|DEV_HAS_GEAR_MODE,
+ .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V3|DEV_HAS_STATISTICS_V3|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT|DEV_HAS_GEAR_MODE,
},
{ /* MCP79 Ethernet Controller */
PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_37),
- .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V3|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT|DEV_HAS_GEAR_MODE,
+ .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V3|DEV_HAS_STATISTICS_V3|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT|DEV_HAS_GEAR_MODE,
},
{ /* MCP79 Ethernet Controller */
PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_38),
- .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V3|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT|DEV_HAS_GEAR_MODE,
+ .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V3|DEV_HAS_STATISTICS_V3|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT|DEV_HAS_GEAR_MODE,
},
{ /* MCP79 Ethernet Controller */
PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_39),
- .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V3|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT|DEV_HAS_GEAR_MODE,
+ .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V3|DEV_HAS_STATISTICS_V3|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT|DEV_HAS_GEAR_MODE,
},
{0,},
};
diff --git a/drivers/net/fs_enet/mac-fcc.c b/drivers/net/fs_enet/mac-fcc.c
index 0a97fc2d97e..1c7ef812a8e 100644
--- a/drivers/net/fs_enet/mac-fcc.c
+++ b/drivers/net/fs_enet/mac-fcc.c
@@ -126,7 +126,7 @@ out:
#define FCC_NAPI_RX_EVENT_MSK (FCC_ENET_RXF | FCC_ENET_RXB)
#define FCC_RX_EVENT (FCC_ENET_RXF)
#define FCC_TX_EVENT (FCC_ENET_TXB)
-#define FCC_ERR_EVENT_MSK (FCC_ENET_TXE | FCC_ENET_BSY)
+#define FCC_ERR_EVENT_MSK (FCC_ENET_TXE)
static int setup_data(struct net_device *dev)
{
diff --git a/drivers/net/gianfar.c b/drivers/net/gianfar.c
index b8394cf134e..ca6cf6ecb37 100644
--- a/drivers/net/gianfar.c
+++ b/drivers/net/gianfar.c
@@ -414,9 +414,7 @@ static int gfar_suspend(struct platform_device *pdev, pm_message_t state)
spin_unlock(&priv->rxlock);
spin_unlock_irqrestore(&priv->txlock, flags);
-#ifdef CONFIG_GFAR_NAPI
napi_disable(&priv->napi);
-#endif
if (magic_packet) {
/* Enable interrupt on Magic Packet */
@@ -469,9 +467,7 @@ static int gfar_resume(struct platform_device *pdev)
netif_device_attach(dev);
-#ifdef CONFIG_GFAR_NAPI
napi_enable(&priv->napi);
-#endif
return 0;
}
diff --git a/drivers/net/hamradio/mkiss.c b/drivers/net/hamradio/mkiss.c
index 3249df5e0f1..b8e25c4624d 100644
--- a/drivers/net/hamradio/mkiss.c
+++ b/drivers/net/hamradio/mkiss.c
@@ -548,7 +548,7 @@ static int ax_xmit(struct sk_buff *skb, struct net_device *dev)
}
printk(KERN_ERR "mkiss: %s: transmit timed out, %s?\n", dev->name,
- (ax->tty->ops->chars_in_buffer(ax->tty) || ax->xleft) ?
+ (tty_chars_in_buffer(ax->tty) || ax->xleft) ?
"bad line quality" : "driver error");
ax->xleft = 0;
diff --git a/drivers/net/ifb.c b/drivers/net/ifb.c
index 0960e69b2da..e4fbefc8c82 100644
--- a/drivers/net/ifb.c
+++ b/drivers/net/ifb.c
@@ -69,18 +69,20 @@ static void ri_tasklet(unsigned long dev)
struct net_device *_dev = (struct net_device *)dev;
struct ifb_private *dp = netdev_priv(_dev);
struct net_device_stats *stats = &_dev->stats;
+ struct netdev_queue *txq;
struct sk_buff *skb;
+ txq = netdev_get_tx_queue(_dev, 0);
dp->st_task_enter++;
if ((skb = skb_peek(&dp->tq)) == NULL) {
dp->st_txq_refl_try++;
- if (netif_tx_trylock(_dev)) {
+ if (__netif_tx_trylock(txq)) {
dp->st_rxq_enter++;
while ((skb = skb_dequeue(&dp->rq)) != NULL) {
skb_queue_tail(&dp->tq, skb);
dp->st_rx2tx_tran++;
}
- netif_tx_unlock(_dev);
+ __netif_tx_unlock(txq);
} else {
/* reschedule */
dp->st_rxq_notenter++;
@@ -115,7 +117,7 @@ static void ri_tasklet(unsigned long dev)
BUG();
}
- if (netif_tx_trylock(_dev)) {
+ if (__netif_tx_trylock(txq)) {
dp->st_rxq_check++;
if ((skb = skb_peek(&dp->rq)) == NULL) {
dp->tasklet_pending = 0;
@@ -123,10 +125,10 @@ static void ri_tasklet(unsigned long dev)
netif_wake_queue(_dev);
} else {
dp->st_rxq_rsch++;
- netif_tx_unlock(_dev);
+ __netif_tx_unlock(txq);
goto resched;
}
- netif_tx_unlock(_dev);
+ __netif_tx_unlock(txq);
} else {
resched:
dp->tasklet_pending = 1;
diff --git a/drivers/net/igb/e1000_82575.c b/drivers/net/igb/e1000_82575.c
index e098f234770..bb823acc744 100644
--- a/drivers/net/igb/e1000_82575.c
+++ b/drivers/net/igb/e1000_82575.c
@@ -850,7 +850,7 @@ void igb_update_mc_addr_list_82575(struct e1000_hw *hw,
for (; mc_addr_count > 0; mc_addr_count--) {
hash_value = igb_hash_mc_addr(hw, mc_addr_list);
hw_dbg("Hash value = 0x%03X\n", hash_value);
- hw->mac.ops.mta_set(hw, hash_value);
+ igb_mta_set(hw, hash_value);
mc_addr_list += ETH_ALEN;
}
}
@@ -1136,6 +1136,12 @@ static s32 igb_setup_fiber_serdes_link_82575(struct e1000_hw *hw)
E1000_PCS_LCTL_FORCE_LINK; /* Force Link */
hw_dbg("Configuring Forced Link; PCS_LCTL = 0x%08X\n", reg);
}
+
+ if (hw->mac.type == e1000_82576) {
+ reg |= E1000_PCS_LCTL_FORCE_FCTRL;
+ igb_force_mac_fc(hw);
+ }
+
wr32(E1000_PCS_LCTL, reg);
return 0;
@@ -1232,70 +1238,6 @@ out:
}
/**
- * igb_translate_register_82576 - Translate the proper register offset
- * @reg: e1000 register to be read
- *
- * Registers in 82576 are located in different offsets than other adapters
- * even though they function in the same manner. This function takes in
- * the name of the register to read and returns the correct offset for
- * 82576 silicon.
- **/
-u32 igb_translate_register_82576(u32 reg)
-{
- /*
- * Some of the Kawela registers are located at different
- * offsets than they are in older adapters.
- * Despite the difference in location, the registers
- * function in the same manner.
- */
- switch (reg) {
- case E1000_TDBAL(0):
- reg = 0x0E000;
- break;
- case E1000_TDBAH(0):
- reg = 0x0E004;
- break;
- case E1000_TDLEN(0):
- reg = 0x0E008;
- break;
- case E1000_TDH(0):
- reg = 0x0E010;
- break;
- case E1000_TDT(0):
- reg = 0x0E018;
- break;
- case E1000_TXDCTL(0):
- reg = 0x0E028;
- break;
- case E1000_RDBAL(0):
- reg = 0x0C000;
- break;
- case E1000_RDBAH(0):
- reg = 0x0C004;
- break;
- case E1000_RDLEN(0):
- reg = 0x0C008;
- break;
- case E1000_RDH(0):
- reg = 0x0C010;
- break;
- case E1000_RDT(0):
- reg = 0x0C018;
- break;
- case E1000_RXDCTL(0):
- reg = 0x0C028;
- break;
- case E1000_SRRCTL(0):
- reg = 0x0C00C;
- break;
- default:
- break;
- }
-
- return reg;
-}
-
-/**
* igb_reset_init_script_82575 - Inits HW defaults after reset
* @hw: pointer to the HW structure
*
diff --git a/drivers/net/igb/e1000_82575.h b/drivers/net/igb/e1000_82575.h
index 2f848e578a2..c1928b5efe1 100644
--- a/drivers/net/igb/e1000_82575.h
+++ b/drivers/net/igb/e1000_82575.h
@@ -28,7 +28,6 @@
#ifndef _E1000_82575_H_
#define _E1000_82575_H_
-u32 igb_translate_register_82576(u32 reg);
void igb_update_mc_addr_list_82575(struct e1000_hw*, u8*, u32, u32, u32);
extern void igb_shutdown_fiber_serdes_link_82575(struct e1000_hw *hw);
extern void igb_rx_fifo_flush_82575(struct e1000_hw *hw);
diff --git a/drivers/net/igb/e1000_defines.h b/drivers/net/igb/e1000_defines.h
index afdba3c9073..ce700689fb5 100644
--- a/drivers/net/igb/e1000_defines.h
+++ b/drivers/net/igb/e1000_defines.h
@@ -257,6 +257,7 @@
#define E1000_PCS_LCTL_FDV_FULL 8
#define E1000_PCS_LCTL_FSD 0x10
#define E1000_PCS_LCTL_FORCE_LINK 0x20
+#define E1000_PCS_LCTL_FORCE_FCTRL 0x80
#define E1000_PCS_LCTL_AN_ENABLE 0x10000
#define E1000_PCS_LCTL_AN_RESTART 0x20000
#define E1000_PCS_LCTL_AN_TIMEOUT 0x40000
diff --git a/drivers/net/igb/e1000_hw.h b/drivers/net/igb/e1000_hw.h
index 19fa4ee96f2..a65ccc3095c 100644
--- a/drivers/net/igb/e1000_hw.h
+++ b/drivers/net/igb/e1000_hw.h
@@ -420,7 +420,6 @@ struct e1000_mac_operations {
void (*rar_set)(struct e1000_hw *, u8 *, u32);
s32 (*read_mac_addr)(struct e1000_hw *);
s32 (*get_speed_and_duplex)(struct e1000_hw *, u16 *, u16 *);
- void (*mta_set)(struct e1000_hw *, u32);
};
struct e1000_phy_operations {
diff --git a/drivers/net/igb/e1000_mac.c b/drivers/net/igb/e1000_mac.c
index 20408aa1f91..e18747c70be 100644
--- a/drivers/net/igb/e1000_mac.c
+++ b/drivers/net/igb/e1000_mac.c
@@ -144,34 +144,6 @@ void igb_write_vfta(struct e1000_hw *hw, u32 offset, u32 value)
}
/**
- * igb_init_rx_addrs - Initialize receive address's
- * @hw: pointer to the HW structure
- * @rar_count: receive address registers
- *
- * Setups the receive address registers by setting the base receive address
- * register to the devices MAC address and clearing all the other receive
- * address registers to 0.
- **/
-void igb_init_rx_addrs(struct e1000_hw *hw, u16 rar_count)
-{
- u32 i;
-
- /* Setup the receive address */
- hw_dbg("Programming MAC Address into RAR[0]\n");
-
- hw->mac.ops.rar_set(hw, hw->mac.addr, 0);
-
- /* Zero out the other (rar_entry_count - 1) receive addresses */
- hw_dbg("Clearing RAR[1-%u]\n", rar_count-1);
- for (i = 1; i < rar_count; i++) {
- array_wr32(E1000_RA, (i << 1), 0);
- wrfl();
- array_wr32(E1000_RA, ((i << 1) + 1), 0);
- wrfl();
- }
-}
-
-/**
* igb_check_alt_mac_addr - Check for alternate MAC addr
* @hw: pointer to the HW structure
*
@@ -271,7 +243,7 @@ void igb_rar_set(struct e1000_hw *hw, u8 *addr, u32 index)
* current value is read, the new bit is OR'd in and the new value is
* written back into the register.
**/
-static void igb_mta_set(struct e1000_hw *hw, u32 hash_value)
+void igb_mta_set(struct e1000_hw *hw, u32 hash_value)
{
u32 hash_bit, hash_reg, mta;
@@ -297,60 +269,6 @@ static void igb_mta_set(struct e1000_hw *hw, u32 hash_value)
}
/**
- * igb_update_mc_addr_list - Update Multicast addresses
- * @hw: pointer to the HW structure
- * @mc_addr_list: array of multicast addresses to program
- * @mc_addr_count: number of multicast addresses to program
- * @rar_used_count: the first RAR register free to program
- * @rar_count: total number of supported Receive Address Registers
- *
- * Updates the Receive Address Registers and Multicast Table Array.
- * The caller must have a packed mc_addr_list of multicast addresses.
- * The parameter rar_count will usually be hw->mac.rar_entry_count
- * unless there are workarounds that change this.
- **/
-void igb_update_mc_addr_list(struct e1000_hw *hw,
- u8 *mc_addr_list, u32 mc_addr_count,
- u32 rar_used_count, u32 rar_count)
-{
- u32 hash_value;
- u32 i;
-
- /*
- * Load the first set of multicast addresses into the exact
- * filters (RAR). If there are not enough to fill the RAR
- * array, clear the filters.
- */
- for (i = rar_used_count; i < rar_count; i++) {
- if (mc_addr_count) {
- hw->mac.ops.rar_set(hw, mc_addr_list, i);
- mc_addr_count--;
- mc_addr_list += ETH_ALEN;
- } else {
- array_wr32(E1000_RA, i << 1, 0);
- wrfl();
- array_wr32(E1000_RA, (i << 1) + 1, 0);
- wrfl();
- }
- }
-
- /* Clear the old settings from the MTA */
- hw_dbg("Clearing MTA\n");
- for (i = 0; i < hw->mac.mta_reg_count; i++) {
- array_wr32(E1000_MTA, i, 0);
- wrfl();
- }
-
- /* Load any remaining multicast addresses into the hash table. */
- for (; mc_addr_count > 0; mc_addr_count--) {
- hash_value = igb_hash_mc_addr(hw, mc_addr_list);
- hw_dbg("Hash value = 0x%03X\n", hash_value);
- igb_mta_set(hw, hash_value);
- mc_addr_list += ETH_ALEN;
- }
-}
-
-/**
* igb_hash_mc_addr - Generate a multicast hash value
* @hw: pointer to the HW structure
* @mc_addr: pointer to a multicast address
diff --git a/drivers/net/igb/e1000_mac.h b/drivers/net/igb/e1000_mac.h
index dc2f8cce15e..cbee6af7d91 100644
--- a/drivers/net/igb/e1000_mac.h
+++ b/drivers/net/igb/e1000_mac.h
@@ -51,9 +51,6 @@ s32 igb_get_speed_and_duplex_copper(struct e1000_hw *hw, u16 *speed,
u16 *duplex);
s32 igb_id_led_init(struct e1000_hw *hw);
s32 igb_led_off(struct e1000_hw *hw);
-void igb_update_mc_addr_list(struct e1000_hw *hw,
- u8 *mc_addr_list, u32 mc_addr_count,
- u32 rar_used_count, u32 rar_count);
s32 igb_setup_link(struct e1000_hw *hw);
s32 igb_validate_mdi_setting(struct e1000_hw *hw);
s32 igb_write_8bit_ctrl_reg(struct e1000_hw *hw, u32 reg,
@@ -62,7 +59,7 @@ s32 igb_write_8bit_ctrl_reg(struct e1000_hw *hw, u32 reg,
void igb_clear_hw_cntrs_base(struct e1000_hw *hw);
void igb_clear_vfta(struct e1000_hw *hw);
void igb_config_collision_dist(struct e1000_hw *hw);
-void igb_init_rx_addrs(struct e1000_hw *hw, u16 rar_count);
+void igb_mta_set(struct e1000_hw *hw, u32 hash_value);
void igb_put_hw_semaphore(struct e1000_hw *hw);
void igb_rar_set(struct e1000_hw *hw, u8 *addr, u32 index);
s32 igb_check_alt_mac_addr(struct e1000_hw *hw);
diff --git a/drivers/net/igb/e1000_regs.h b/drivers/net/igb/e1000_regs.h
index b95093d24c0..95523af2605 100644
--- a/drivers/net/igb/e1000_regs.h
+++ b/drivers/net/igb/e1000_regs.h
@@ -262,9 +262,6 @@
#define E1000_RETA(_i) (0x05C00 + ((_i) * 4))
#define E1000_RSSRK(_i) (0x05C80 + ((_i) * 4)) /* RSS Random Key - RW Array */
-#define E1000_REGISTER(a, reg) (((a)->mac.type < e1000_82576) \
- ? reg : e1000_translate_register_82576(reg))
-
#define wr32(reg, value) (writel(value, hw->hw_addr + reg))
#define rd32(reg) (readl(hw->hw_addr + reg))
#define wrfl() ((void)rd32(E1000_STATUS))
diff --git a/drivers/net/igb/igb_main.c b/drivers/net/igb/igb_main.c
index b602c4dd0d1..8f66e15ec8d 100644
--- a/drivers/net/igb/igb_main.c
+++ b/drivers/net/igb/igb_main.c
@@ -311,7 +311,7 @@ static void igb_assign_vector(struct igb_adapter *adapter, int rx_queue,
array_wr32(E1000_MSIXBM(0), msix_vector, msixbm);
break;
case e1000_82576:
- /* Kawela uses a table-based method for assigning vectors.
+ /* The 82576 uses a table-based method for assigning vectors.
Each queue has a single entry in the table to which we write
a vector number along with a "valid" bit. Sadly, the layout
of the table is somewhat counterintuitive. */
@@ -720,28 +720,6 @@ static void igb_get_hw_control(struct igb_adapter *adapter)
ctrl_ext | E1000_CTRL_EXT_DRV_LOAD);
}
-static void igb_init_manageability(struct igb_adapter *adapter)
-{
- struct e1000_hw *hw = &adapter->hw;
-
- if (adapter->en_mng_pt) {
- u32 manc2h = rd32(E1000_MANC2H);
- u32 manc = rd32(E1000_MANC);
-
- /* enable receiving management packets to the host */
- /* this will probably generate destination unreachable messages
- * from the host OS, but the packets will be handled on SMBUS */
- manc |= E1000_MANC_EN_MNG2HOST;
-#define E1000_MNG2HOST_PORT_623 (1 << 5)
-#define E1000_MNG2HOST_PORT_664 (1 << 6)
- manc2h |= E1000_MNG2HOST_PORT_623;
- manc2h |= E1000_MNG2HOST_PORT_664;
- wr32(E1000_MANC2H, manc2h);
-
- wr32(E1000_MANC, manc);
- }
-}
-
/**
* igb_configure - configure the hardware for RX and TX
* @adapter: private board structure
@@ -755,7 +733,6 @@ static void igb_configure(struct igb_adapter *adapter)
igb_set_multi(netdev);
igb_restore_vlan(adapter);
- igb_init_manageability(adapter);
igb_configure_tx(adapter);
igb_setup_rctl(adapter);
@@ -1372,7 +1349,8 @@ static void __devexit igb_remove(struct pci_dev *pdev)
unregister_netdev(netdev);
- if (!igb_check_reset_block(&adapter->hw))
+ if (adapter->hw.phy.ops.reset_phy &&
+ !igb_check_reset_block(&adapter->hw))
adapter->hw.phy.ops.reset_phy(&adapter->hw);
igb_remove_device(&adapter->hw);
@@ -4523,8 +4501,6 @@ static void igb_io_resume(struct pci_dev *pdev)
struct net_device *netdev = pci_get_drvdata(pdev);
struct igb_adapter *adapter = netdev_priv(netdev);
- igb_init_manageability(adapter);
-
if (netif_running(netdev)) {
if (igb_up(adapter)) {
dev_err(&pdev->dev, "igb_up failed after reset\n");
diff --git a/drivers/net/irda/act200l-sir.c b/drivers/net/irda/act200l-sir.c
index d8b89c74aab..37ab8c85571 100644
--- a/drivers/net/irda/act200l-sir.c
+++ b/drivers/net/irda/act200l-sir.c
@@ -107,7 +107,7 @@ static int act200l_open(struct sir_dev *dev)
{
struct qos_info *qos = &dev->qos;
- IRDA_DEBUG(2, "%s()\n", __FUNCTION__ );
+ IRDA_DEBUG(2, "%s()\n", __func__ );
/* Power on the dongle */
sirdev_set_dtr_rts(dev, TRUE, TRUE);
@@ -124,7 +124,7 @@ static int act200l_open(struct sir_dev *dev)
static int act200l_close(struct sir_dev *dev)
{
- IRDA_DEBUG(2, "%s()\n", __FUNCTION__ );
+ IRDA_DEBUG(2, "%s()\n", __func__ );
/* Power off the dongle */
sirdev_set_dtr_rts(dev, FALSE, FALSE);
@@ -143,7 +143,7 @@ static int act200l_change_speed(struct sir_dev *dev, unsigned speed)
u8 control[3];
int ret = 0;
- IRDA_DEBUG(2, "%s()\n", __FUNCTION__ );
+ IRDA_DEBUG(2, "%s()\n", __func__ );
/* Clear DTR and set RTS to enter command mode */
sirdev_set_dtr_rts(dev, FALSE, TRUE);
@@ -212,7 +212,7 @@ static int act200l_reset(struct sir_dev *dev)
};
int ret = 0;
- IRDA_DEBUG(2, "%s()\n", __FUNCTION__ );
+ IRDA_DEBUG(2, "%s()\n", __func__ );
switch (state) {
case SIRDEV_STATE_DONGLE_RESET:
@@ -240,7 +240,7 @@ static int act200l_reset(struct sir_dev *dev)
dev->speed = 9600;
break;
default:
- IRDA_ERROR("%s(), unknown state %d\n", __FUNCTION__, state);
+ IRDA_ERROR("%s(), unknown state %d\n", __func__, state);
ret = -1;
break;
}
diff --git a/drivers/net/irda/actisys-sir.c b/drivers/net/irda/actisys-sir.c
index 736d2473b7e..50b2141a610 100644
--- a/drivers/net/irda/actisys-sir.c
+++ b/drivers/net/irda/actisys-sir.c
@@ -165,7 +165,7 @@ static int actisys_change_speed(struct sir_dev *dev, unsigned speed)
int ret = 0;
int i = 0;
- IRDA_DEBUG(4, "%s(), speed=%d (was %d)\n", __FUNCTION__,
+ IRDA_DEBUG(4, "%s(), speed=%d (was %d)\n", __func__,
speed, dev->speed);
/* dongle was already resetted from irda_request state machine,
diff --git a/drivers/net/irda/ali-ircc.c b/drivers/net/irda/ali-ircc.c
index 083b0dd70fe..2ff181861d2 100644
--- a/drivers/net/irda/ali-ircc.c
+++ b/drivers/net/irda/ali-ircc.c
@@ -152,7 +152,7 @@ static int __init ali_ircc_init(void)
int reg, revision;
int i = 0;
- IRDA_DEBUG(2, "%s(), ---------------- Start ----------------\n", __FUNCTION__);
+ IRDA_DEBUG(2, "%s(), ---------------- Start ----------------\n", __func__);
ret = platform_driver_register(&ali_ircc_driver);
if (ret) {
@@ -166,7 +166,7 @@ static int __init ali_ircc_init(void)
/* Probe for all the ALi chipsets we know about */
for (chip= chips; chip->name; chip++, i++)
{
- IRDA_DEBUG(2, "%s(), Probing for %s ...\n", __FUNCTION__, chip->name);
+ IRDA_DEBUG(2, "%s(), Probing for %s ...\n", __func__, chip->name);
/* Try all config registers for this chip */
for (cfg=0; cfg<2; cfg++)
@@ -196,11 +196,11 @@ static int __init ali_ircc_init(void)
if (reg == chip->cid_value)
{
- IRDA_DEBUG(2, "%s(), Chip found at 0x%03x\n", __FUNCTION__, cfg_base);
+ IRDA_DEBUG(2, "%s(), Chip found at 0x%03x\n", __func__, cfg_base);
outb(0x1F, cfg_base);
revision = inb(cfg_base+1);
- IRDA_DEBUG(2, "%s(), Found %s chip, revision=%d\n", __FUNCTION__,
+ IRDA_DEBUG(2, "%s(), Found %s chip, revision=%d\n", __func__,
chip->name, revision);
/*
@@ -223,14 +223,14 @@ static int __init ali_ircc_init(void)
}
else
{
- IRDA_DEBUG(2, "%s(), No %s chip at 0x%03x\n", __FUNCTION__, chip->name, cfg_base);
+ IRDA_DEBUG(2, "%s(), No %s chip at 0x%03x\n", __func__, chip->name, cfg_base);
}
/* Exit configuration */
outb(0xbb, cfg_base);
}
}
- IRDA_DEBUG(2, "%s(), ----------------- End -----------------\n", __FUNCTION__);
+ IRDA_DEBUG(2, "%s(), ----------------- End -----------------\n", __func__);
if (ret)
platform_driver_unregister(&ali_ircc_driver);
@@ -248,7 +248,7 @@ static void __exit ali_ircc_cleanup(void)
{
int i;
- IRDA_DEBUG(2, "%s(), ---------------- Start ----------------\n", __FUNCTION__);
+ IRDA_DEBUG(2, "%s(), ---------------- Start ----------------\n", __func__);
for (i=0; i < ARRAY_SIZE(dev_self); i++) {
if (dev_self[i])
@@ -257,7 +257,7 @@ static void __exit ali_ircc_cleanup(void)
platform_driver_unregister(&ali_ircc_driver);
- IRDA_DEBUG(2, "%s(), ----------------- End -----------------\n", __FUNCTION__);
+ IRDA_DEBUG(2, "%s(), ----------------- End -----------------\n", __func__);
}
/*
@@ -273,11 +273,11 @@ static int ali_ircc_open(int i, chipio_t *info)
int dongle_id;
int err;
- IRDA_DEBUG(2, "%s(), ---------------- Start ----------------\n", __FUNCTION__);
+ IRDA_DEBUG(2, "%s(), ---------------- Start ----------------\n", __func__);
if (i >= ARRAY_SIZE(dev_self)) {
IRDA_ERROR("%s(), maximum number of supported chips reached!\n",
- __FUNCTION__);
+ __func__);
return -ENOMEM;
}
@@ -288,7 +288,7 @@ static int ali_ircc_open(int i, chipio_t *info)
dev = alloc_irdadev(sizeof(*self));
if (dev == NULL) {
IRDA_ERROR("%s(), can't allocate memory for control block!\n",
- __FUNCTION__);
+ __func__);
return -ENOMEM;
}
@@ -312,7 +312,7 @@ static int ali_ircc_open(int i, chipio_t *info)
/* Reserve the ioports that we need */
if (!request_region(self->io.fir_base, self->io.fir_ext,
ALI_IRCC_DRIVER_NAME)) {
- IRDA_WARNING("%s(), can't get iobase of 0x%03x\n", __FUNCTION__,
+ IRDA_WARNING("%s(), can't get iobase of 0x%03x\n", __func__,
self->io.fir_base);
err = -ENODEV;
goto err_out1;
@@ -370,19 +370,19 @@ static int ali_ircc_open(int i, chipio_t *info)
err = register_netdev(dev);
if (err) {
- IRDA_ERROR("%s(), register_netdev() failed!\n", __FUNCTION__);
+ IRDA_ERROR("%s(), register_netdev() failed!\n", __func__);
goto err_out4;
}
IRDA_MESSAGE("IrDA: Registered device %s\n", dev->name);
/* Check dongle id */
dongle_id = ali_ircc_read_dongle_id(i, info);
- IRDA_MESSAGE("%s(), %s, Found dongle: %s\n", __FUNCTION__,
+ IRDA_MESSAGE("%s(), %s, Found dongle: %s\n", __func__,
ALI_IRCC_DRIVER_NAME, dongle_types[dongle_id]);
self->io.dongle_id = dongle_id;
- IRDA_DEBUG(2, "%s(), ----------------- End -----------------\n", __FUNCTION__);
+ IRDA_DEBUG(2, "%s(), ----------------- End -----------------\n", __func__);
return 0;
@@ -411,7 +411,7 @@ static int __exit ali_ircc_close(struct ali_ircc_cb *self)
{
int iobase;
- IRDA_DEBUG(4, "%s(), ---------------- Start ----------------\n", __FUNCTION__);
+ IRDA_DEBUG(4, "%s(), ---------------- Start ----------------\n", __func__);
IRDA_ASSERT(self != NULL, return -1;);
@@ -421,7 +421,7 @@ static int __exit ali_ircc_close(struct ali_ircc_cb *self)
unregister_netdev(self->netdev);
/* Release the PORT that this driver is using */
- IRDA_DEBUG(4, "%s(), Releasing Region %03x\n", __FUNCTION__, self->io.fir_base);
+ IRDA_DEBUG(4, "%s(), Releasing Region %03x\n", __func__, self->io.fir_base);
release_region(self->io.fir_base, self->io.fir_ext);
if (self->tx_buff.head)
@@ -435,7 +435,7 @@ static int __exit ali_ircc_close(struct ali_ircc_cb *self)
dev_self[self->index] = NULL;
free_netdev(self->netdev);
- IRDA_DEBUG(2, "%s(), ----------------- End -----------------\n", __FUNCTION__);
+ IRDA_DEBUG(2, "%s(), ----------------- End -----------------\n", __func__);
return 0;
}
@@ -478,7 +478,7 @@ static int ali_ircc_probe_53(ali_chip_t *chip, chipio_t *info)
int cfg_base = info->cfg_base;
int hi, low, reg;
- IRDA_DEBUG(2, "%s(), ---------------- Start ----------------\n", __FUNCTION__);
+ IRDA_DEBUG(2, "%s(), ---------------- Start ----------------\n", __func__);
/* Enter Configuration */
outb(chip->entr1, cfg_base);
@@ -497,13 +497,13 @@ static int ali_ircc_probe_53(ali_chip_t *chip, chipio_t *info)
info->sir_base = info->fir_base;
- IRDA_DEBUG(2, "%s(), probing fir_base=0x%03x\n", __FUNCTION__, info->fir_base);
+ IRDA_DEBUG(2, "%s(), probing fir_base=0x%03x\n", __func__, info->fir_base);
/* Read IRQ control register */
outb(0x70, cfg_base);
reg = inb(cfg_base+1);
info->irq = reg & 0x0f;
- IRDA_DEBUG(2, "%s(), probing irq=%d\n", __FUNCTION__, info->irq);
+ IRDA_DEBUG(2, "%s(), probing irq=%d\n", __func__, info->irq);
/* Read DMA channel */
outb(0x74, cfg_base);
@@ -511,26 +511,26 @@ static int ali_ircc_probe_53(ali_chip_t *chip, chipio_t *info)
info->dma = reg & 0x07;
if(info->dma == 0x04)
- IRDA_WARNING("%s(), No DMA channel assigned !\n", __FUNCTION__);
+ IRDA_WARNING("%s(), No DMA channel assigned !\n", __func__);
else
- IRDA_DEBUG(2, "%s(), probing dma=%d\n", __FUNCTION__, info->dma);
+ IRDA_DEBUG(2, "%s(), probing dma=%d\n", __func__, info->dma);
/* Read Enabled Status */
outb(0x30, cfg_base);
reg = inb(cfg_base+1);
info->enabled = (reg & 0x80) && (reg & 0x01);
- IRDA_DEBUG(2, "%s(), probing enabled=%d\n", __FUNCTION__, info->enabled);
+ IRDA_DEBUG(2, "%s(), probing enabled=%d\n", __func__, info->enabled);
/* Read Power Status */
outb(0x22, cfg_base);
reg = inb(cfg_base+1);
info->suspended = (reg & 0x20);
- IRDA_DEBUG(2, "%s(), probing suspended=%d\n", __FUNCTION__, info->suspended);
+ IRDA_DEBUG(2, "%s(), probing suspended=%d\n", __func__, info->suspended);
/* Exit configuration */
outb(0xbb, cfg_base);
- IRDA_DEBUG(2, "%s(), ----------------- End -----------------\n", __FUNCTION__);
+ IRDA_DEBUG(2, "%s(), ----------------- End -----------------\n", __func__);
return 0;
}
@@ -548,7 +548,7 @@ static int ali_ircc_setup(chipio_t *info)
int version;
int iobase = info->fir_base;
- IRDA_DEBUG(2, "%s(), ---------------- Start ----------------\n", __FUNCTION__);
+ IRDA_DEBUG(2, "%s(), ---------------- Start ----------------\n", __func__);
/* Locking comments :
* Most operations here need to be protected. We are called before
@@ -609,7 +609,7 @@ static int ali_ircc_setup(chipio_t *info)
// outb(UART_IER_RDI, iobase+UART_IER); //benjamin 2000/11/23 01:25PM
// Turn on the interrupts in ali_ircc_net_open
- IRDA_DEBUG(2, "%s(), ----------------- End ------------------\n", __FUNCTION__);
+ IRDA_DEBUG(2, "%s(), ----------------- End ------------------\n", __func__);
return 0;
}
@@ -626,7 +626,7 @@ static int ali_ircc_read_dongle_id (int i, chipio_t *info)
int dongle_id, reg;
int cfg_base = info->cfg_base;
- IRDA_DEBUG(2, "%s(), ---------------- Start ----------------\n", __FUNCTION__);
+ IRDA_DEBUG(2, "%s(), ---------------- Start ----------------\n", __func__);
/* Enter Configuration */
outb(chips[i].entr1, cfg_base);
@@ -640,13 +640,13 @@ static int ali_ircc_read_dongle_id (int i, chipio_t *info)
outb(0xf0, cfg_base);
reg = inb(cfg_base+1);
dongle_id = ((reg>>6)&0x02) | ((reg>>5)&0x01);
- IRDA_DEBUG(2, "%s(), probing dongle_id=%d, dongle_types=%s\n", __FUNCTION__,
+ IRDA_DEBUG(2, "%s(), probing dongle_id=%d, dongle_types=%s\n", __func__,
dongle_id, dongle_types[dongle_id]);
/* Exit configuration */
outb(0xbb, cfg_base);
- IRDA_DEBUG(2, "%s(), ----------------- End ------------------\n", __FUNCTION__);
+ IRDA_DEBUG(2, "%s(), ----------------- End ------------------\n", __func__);
return dongle_id;
}
@@ -663,7 +663,7 @@ static irqreturn_t ali_ircc_interrupt(int irq, void *dev_id)
struct ali_ircc_cb *self;
int ret;
- IRDA_DEBUG(2, "%s(), ---------------- Start ----------------\n", __FUNCTION__);
+ IRDA_DEBUG(2, "%s(), ---------------- Start ----------------\n", __func__);
self = dev->priv;
@@ -677,7 +677,7 @@ static irqreturn_t ali_ircc_interrupt(int irq, void *dev_id)
spin_unlock(&self->lock);
- IRDA_DEBUG(2, "%s(), ----------------- End ------------------\n", __FUNCTION__);
+ IRDA_DEBUG(2, "%s(), ----------------- End ------------------\n", __func__);
return ret;
}
/*
@@ -691,7 +691,7 @@ static irqreturn_t ali_ircc_fir_interrupt(struct ali_ircc_cb *self)
__u8 eir, OldMessageCount;
int iobase, tmp;
- IRDA_DEBUG(1, "%s(), ---------------- Start ----------------\n", __FUNCTION__);
+ IRDA_DEBUG(1, "%s(), ---------------- Start ----------------\n", __func__);
iobase = self->io.fir_base;
@@ -704,10 +704,10 @@ static irqreturn_t ali_ircc_fir_interrupt(struct ali_ircc_cb *self)
//self->ier = inb(iobase+FIR_IER); 2000/12/1 04:32PM
eir = self->InterruptID & self->ier; /* Mask out the interesting ones */
- IRDA_DEBUG(1, "%s(), self->InterruptID = %x\n", __FUNCTION__,self->InterruptID);
- IRDA_DEBUG(1, "%s(), self->LineStatus = %x\n", __FUNCTION__,self->LineStatus);
- IRDA_DEBUG(1, "%s(), self->ier = %x\n", __FUNCTION__,self->ier);
- IRDA_DEBUG(1, "%s(), eir = %x\n", __FUNCTION__,eir);
+ IRDA_DEBUG(1, "%s(), self->InterruptID = %x\n", __func__,self->InterruptID);
+ IRDA_DEBUG(1, "%s(), self->LineStatus = %x\n", __func__,self->LineStatus);
+ IRDA_DEBUG(1, "%s(), self->ier = %x\n", __func__,self->ier);
+ IRDA_DEBUG(1, "%s(), eir = %x\n", __func__,eir);
/* Disable interrupts */
SetCOMInterrupts(self, FALSE);
@@ -718,7 +718,7 @@ static irqreturn_t ali_ircc_fir_interrupt(struct ali_ircc_cb *self)
{
if (self->io.direction == IO_XMIT) /* TX */
{
- IRDA_DEBUG(1, "%s(), ******* IIR_EOM (Tx) *******\n", __FUNCTION__);
+ IRDA_DEBUG(1, "%s(), ******* IIR_EOM (Tx) *******\n", __func__);
if(ali_ircc_dma_xmit_complete(self))
{
@@ -737,23 +737,23 @@ static irqreturn_t ali_ircc_fir_interrupt(struct ali_ircc_cb *self)
}
else /* RX */
{
- IRDA_DEBUG(1, "%s(), ******* IIR_EOM (Rx) *******\n", __FUNCTION__);
+ IRDA_DEBUG(1, "%s(), ******* IIR_EOM (Rx) *******\n", __func__);
if(OldMessageCount > ((self->LineStatus+1) & 0x07))
{
self->rcvFramesOverflow = TRUE;
- IRDA_DEBUG(1, "%s(), ******* self->rcvFramesOverflow = TRUE ******** \n", __FUNCTION__);
+ IRDA_DEBUG(1, "%s(), ******* self->rcvFramesOverflow = TRUE ******** \n", __func__);
}
if (ali_ircc_dma_receive_complete(self))
{
- IRDA_DEBUG(1, "%s(), ******* receive complete ******** \n", __FUNCTION__);
+ IRDA_DEBUG(1, "%s(), ******* receive complete ******** \n", __func__);
self->ier = IER_EOM;
}
else
{
- IRDA_DEBUG(1, "%s(), ******* Not receive complete ******** \n", __FUNCTION__);
+ IRDA_DEBUG(1, "%s(), ******* Not receive complete ******** \n", __func__);
self->ier = IER_EOM | IER_TIMER;
}
@@ -766,7 +766,7 @@ static irqreturn_t ali_ircc_fir_interrupt(struct ali_ircc_cb *self)
if(OldMessageCount > ((self->LineStatus+1) & 0x07))
{
self->rcvFramesOverflow = TRUE;
- IRDA_DEBUG(1, "%s(), ******* self->rcvFramesOverflow = TRUE ******* \n", __FUNCTION__);
+ IRDA_DEBUG(1, "%s(), ******* self->rcvFramesOverflow = TRUE ******* \n", __func__);
}
/* Disable Timer */
switch_bank(iobase, BANK1);
@@ -798,7 +798,7 @@ static irqreturn_t ali_ircc_fir_interrupt(struct ali_ircc_cb *self)
/* Restore Interrupt */
SetCOMInterrupts(self, TRUE);
- IRDA_DEBUG(1, "%s(), ----------------- End ---------------\n", __FUNCTION__);
+ IRDA_DEBUG(1, "%s(), ----------------- End ---------------\n", __func__);
return IRQ_RETVAL(eir);
}
@@ -813,7 +813,7 @@ static irqreturn_t ali_ircc_sir_interrupt(struct ali_ircc_cb *self)
int iobase;
int iir, lsr;
- IRDA_DEBUG(2, "%s(), ---------------- Start ----------------\n", __FUNCTION__);
+ IRDA_DEBUG(2, "%s(), ---------------- Start ----------------\n", __func__);
iobase = self->io.sir_base;
@@ -822,13 +822,13 @@ static irqreturn_t ali_ircc_sir_interrupt(struct ali_ircc_cb *self)
/* Clear interrupt */
lsr = inb(iobase+UART_LSR);
- IRDA_DEBUG(4, "%s(), iir=%02x, lsr=%02x, iobase=%#x\n", __FUNCTION__,
+ IRDA_DEBUG(4, "%s(), iir=%02x, lsr=%02x, iobase=%#x\n", __func__,
iir, lsr, iobase);
switch (iir)
{
case UART_IIR_RLSI:
- IRDA_DEBUG(2, "%s(), RLSI\n", __FUNCTION__);
+ IRDA_DEBUG(2, "%s(), RLSI\n", __func__);
break;
case UART_IIR_RDI:
/* Receive interrupt */
@@ -842,14 +842,14 @@ static irqreturn_t ali_ircc_sir_interrupt(struct ali_ircc_cb *self)
}
break;
default:
- IRDA_DEBUG(0, "%s(), unhandled IIR=%#x\n", __FUNCTION__, iir);
+ IRDA_DEBUG(0, "%s(), unhandled IIR=%#x\n", __func__, iir);
break;
}
}
- IRDA_DEBUG(2, "%s(), ----------------- End ------------------\n", __FUNCTION__);
+ IRDA_DEBUG(2, "%s(), ----------------- End ------------------\n", __func__);
return IRQ_RETVAL(iir);
}
@@ -866,7 +866,7 @@ static void ali_ircc_sir_receive(struct ali_ircc_cb *self)
int boguscount = 0;
int iobase;
- IRDA_DEBUG(2, "%s(), ---------------- Start ----------------\n", __FUNCTION__);
+ IRDA_DEBUG(2, "%s(), ---------------- Start ----------------\n", __func__);
IRDA_ASSERT(self != NULL, return;);
iobase = self->io.sir_base;
@@ -881,12 +881,12 @@ static void ali_ircc_sir_receive(struct ali_ircc_cb *self)
/* Make sure we don't stay here too long */
if (boguscount++ > 32) {
- IRDA_DEBUG(2,"%s(), breaking!\n", __FUNCTION__);
+ IRDA_DEBUG(2,"%s(), breaking!\n", __func__);
break;
}
} while (inb(iobase+UART_LSR) & UART_LSR_DR);
- IRDA_DEBUG(2, "%s(), ----------------- End ------------------\n", __FUNCTION__ );
+ IRDA_DEBUG(2, "%s(), ----------------- End ------------------\n", __func__ );
}
/*
@@ -903,7 +903,7 @@ static void ali_ircc_sir_write_wakeup(struct ali_ircc_cb *self)
IRDA_ASSERT(self != NULL, return;);
- IRDA_DEBUG(2, "%s(), ---------------- Start ----------------\n", __FUNCTION__ );
+ IRDA_DEBUG(2, "%s(), ---------------- Start ----------------\n", __func__ );
iobase = self->io.sir_base;
@@ -922,16 +922,16 @@ static void ali_ircc_sir_write_wakeup(struct ali_ircc_cb *self)
{
/* We must wait until all data are gone */
while(!(inb(iobase+UART_LSR) & UART_LSR_TEMT))
- IRDA_DEBUG(1, "%s(), UART_LSR_THRE\n", __FUNCTION__ );
+ IRDA_DEBUG(1, "%s(), UART_LSR_THRE\n", __func__ );
- IRDA_DEBUG(1, "%s(), Changing speed! self->new_speed = %d\n", __FUNCTION__ , self->new_speed);
+ IRDA_DEBUG(1, "%s(), Changing speed! self->new_speed = %d\n", __func__ , self->new_speed);
ali_ircc_change_speed(self, self->new_speed);
self->new_speed = 0;
// benjamin 2000/11/10 06:32PM
if (self->io.speed > 115200)
{
- IRDA_DEBUG(2, "%s(), ali_ircc_change_speed from UART_LSR_TEMT \n", __FUNCTION__ );
+ IRDA_DEBUG(2, "%s(), ali_ircc_change_speed from UART_LSR_TEMT \n", __func__ );
self->ier = IER_EOM;
// SetCOMInterrupts(self, TRUE);
@@ -949,7 +949,7 @@ static void ali_ircc_sir_write_wakeup(struct ali_ircc_cb *self)
outb(UART_IER_RDI, iobase+UART_IER);
}
- IRDA_DEBUG(2, "%s(), ----------------- End ------------------\n", __FUNCTION__ );
+ IRDA_DEBUG(2, "%s(), ----------------- End ------------------\n", __func__ );
}
static void ali_ircc_change_speed(struct ali_ircc_cb *self, __u32 baud)
@@ -957,9 +957,9 @@ static void ali_ircc_change_speed(struct ali_ircc_cb *self, __u32 baud)
struct net_device *dev = self->netdev;
int iobase;
- IRDA_DEBUG(1, "%s(), ---------------- Start ----------------\n", __FUNCTION__ );
+ IRDA_DEBUG(1, "%s(), ---------------- Start ----------------\n", __func__ );
- IRDA_DEBUG(2, "%s(), setting speed = %d \n", __FUNCTION__ , baud);
+ IRDA_DEBUG(2, "%s(), setting speed = %d \n", __func__ , baud);
/* This function *must* be called with irq off and spin-lock.
* - Jean II */
@@ -998,7 +998,7 @@ static void ali_ircc_change_speed(struct ali_ircc_cb *self, __u32 baud)
netif_wake_queue(self->netdev);
- IRDA_DEBUG(2, "%s(), ----------------- End ------------------\n", __FUNCTION__ );
+ IRDA_DEBUG(2, "%s(), ----------------- End ------------------\n", __func__ );
}
static void ali_ircc_fir_change_speed(struct ali_ircc_cb *priv, __u32 baud)
@@ -1008,14 +1008,14 @@ static void ali_ircc_fir_change_speed(struct ali_ircc_cb *priv, __u32 baud)
struct ali_ircc_cb *self = (struct ali_ircc_cb *) priv;
struct net_device *dev;
- IRDA_DEBUG(1, "%s(), ---------------- Start ----------------\n", __FUNCTION__ );
+ IRDA_DEBUG(1, "%s(), ---------------- Start ----------------\n", __func__ );
IRDA_ASSERT(self != NULL, return;);
dev = self->netdev;
iobase = self->io.fir_base;
- IRDA_DEBUG(1, "%s(), self->io.speed = %d, change to speed = %d\n", __FUNCTION__ ,self->io.speed,baud);
+ IRDA_DEBUG(1, "%s(), self->io.speed = %d, change to speed = %d\n", __func__ ,self->io.speed,baud);
/* Come from SIR speed */
if(self->io.speed <=115200)
@@ -1029,7 +1029,7 @@ static void ali_ircc_fir_change_speed(struct ali_ircc_cb *priv, __u32 baud)
// Set Dongle Speed mode
ali_ircc_change_dongle_speed(self, baud);
- IRDA_DEBUG(1, "%s(), ----------------- End ------------------\n", __FUNCTION__ );
+ IRDA_DEBUG(1, "%s(), ----------------- End ------------------\n", __func__ );
}
/*
@@ -1047,9 +1047,9 @@ static void ali_ircc_sir_change_speed(struct ali_ircc_cb *priv, __u32 speed)
int lcr; /* Line control reg */
int divisor;
- IRDA_DEBUG(1, "%s(), ---------------- Start ----------------\n", __FUNCTION__ );
+ IRDA_DEBUG(1, "%s(), ---------------- Start ----------------\n", __func__ );
- IRDA_DEBUG(1, "%s(), Setting speed to: %d\n", __FUNCTION__ , speed);
+ IRDA_DEBUG(1, "%s(), Setting speed to: %d\n", __func__ , speed);
IRDA_ASSERT(self != NULL, return;);
@@ -1103,7 +1103,7 @@ static void ali_ircc_sir_change_speed(struct ali_ircc_cb *priv, __u32 speed)
spin_unlock_irqrestore(&self->lock, flags);
- IRDA_DEBUG(1, "%s(), ----------------- End ------------------\n", __FUNCTION__ );
+ IRDA_DEBUG(1, "%s(), ----------------- End ------------------\n", __func__ );
}
static void ali_ircc_change_dongle_speed(struct ali_ircc_cb *priv, int speed)
@@ -1113,14 +1113,14 @@ static void ali_ircc_change_dongle_speed(struct ali_ircc_cb *priv, int speed)
int iobase,dongle_id;
int tmp = 0;
- IRDA_DEBUG(1, "%s(), ---------------- Start ----------------\n", __FUNCTION__ );
+ IRDA_DEBUG(1, "%s(), ---------------- Start ----------------\n", __func__ );
iobase = self->io.fir_base; /* or iobase = self->io.sir_base; */
dongle_id = self->io.dongle_id;
/* We are already locked, no need to do it again */
- IRDA_DEBUG(1, "%s(), Set Speed for %s , Speed = %d\n", __FUNCTION__ , dongle_types[dongle_id], speed);
+ IRDA_DEBUG(1, "%s(), Set Speed for %s , Speed = %d\n", __func__ , dongle_types[dongle_id], speed);
switch_bank(iobase, BANK2);
tmp = inb(iobase+FIR_IRDA_CR);
@@ -1284,7 +1284,7 @@ static void ali_ircc_change_dongle_speed(struct ali_ircc_cb *priv, int speed)
switch_bank(iobase, BANK0);
- IRDA_DEBUG(1, "%s(), ----------------- End ------------------\n", __FUNCTION__ );
+ IRDA_DEBUG(1, "%s(), ----------------- End ------------------\n", __func__ );
}
/*
@@ -1297,11 +1297,11 @@ static int ali_ircc_sir_write(int iobase, int fifo_size, __u8 *buf, int len)
{
int actual = 0;
- IRDA_DEBUG(2, "%s(), ---------------- Start ----------------\n", __FUNCTION__ );
+ IRDA_DEBUG(2, "%s(), ---------------- Start ----------------\n", __func__ );
/* Tx FIFO should be empty! */
if (!(inb(iobase+UART_LSR) & UART_LSR_THRE)) {
- IRDA_DEBUG(0, "%s(), failed, fifo not empty!\n", __FUNCTION__ );
+ IRDA_DEBUG(0, "%s(), failed, fifo not empty!\n", __func__ );
return 0;
}
@@ -1313,7 +1313,7 @@ static int ali_ircc_sir_write(int iobase, int fifo_size, __u8 *buf, int len)
actual++;
}
- IRDA_DEBUG(2, "%s(), ----------------- End ------------------\n", __FUNCTION__ );
+ IRDA_DEBUG(2, "%s(), ----------------- End ------------------\n", __func__ );
return actual;
}
@@ -1329,7 +1329,7 @@ static int ali_ircc_net_open(struct net_device *dev)
int iobase;
char hwname[32];
- IRDA_DEBUG(2, "%s(), ---------------- Start ----------------\n", __FUNCTION__ );
+ IRDA_DEBUG(2, "%s(), ---------------- Start ----------------\n", __func__ );
IRDA_ASSERT(dev != NULL, return -1;);
@@ -1375,7 +1375,7 @@ static int ali_ircc_net_open(struct net_device *dev)
*/
self->irlap = irlap_open(dev, &self->qos, hwname);
- IRDA_DEBUG(2, "%s(), ----------------- End ------------------\n", __FUNCTION__ );
+ IRDA_DEBUG(2, "%s(), ----------------- End ------------------\n", __func__ );
return 0;
}
@@ -1392,7 +1392,7 @@ static int ali_ircc_net_close(struct net_device *dev)
struct ali_ircc_cb *self;
//int iobase;
- IRDA_DEBUG(4, "%s(), ---------------- Start ----------------\n", __FUNCTION__ );
+ IRDA_DEBUG(4, "%s(), ---------------- Start ----------------\n", __func__ );
IRDA_ASSERT(dev != NULL, return -1;);
@@ -1415,7 +1415,7 @@ static int ali_ircc_net_close(struct net_device *dev)
free_irq(self->io.irq, dev);
free_dma(self->io.dma);
- IRDA_DEBUG(2, "%s(), ----------------- End ------------------\n", __FUNCTION__ );
+ IRDA_DEBUG(2, "%s(), ----------------- End ------------------\n", __func__ );
return 0;
}
@@ -1434,7 +1434,7 @@ static int ali_ircc_fir_hard_xmit(struct sk_buff *skb, struct net_device *dev)
__u32 speed;
int mtt, diff;
- IRDA_DEBUG(1, "%s(), ---------------- Start -----------------\n", __FUNCTION__ );
+ IRDA_DEBUG(1, "%s(), ---------------- Start -----------------\n", __func__ );
self = (struct ali_ircc_cb *) dev->priv;
iobase = self->io.fir_base;
@@ -1488,7 +1488,7 @@ static int ali_ircc_fir_hard_xmit(struct sk_buff *skb, struct net_device *dev)
diff = self->now.tv_usec - self->stamp.tv_usec;
/* self->stamp is set from ali_ircc_dma_receive_complete() */
- IRDA_DEBUG(1, "%s(), ******* diff = %d ******* \n", __FUNCTION__ , diff);
+ IRDA_DEBUG(1, "%s(), ******* diff = %d ******* \n", __func__ , diff);
if (diff < 0)
diff += 1000000;
@@ -1510,7 +1510,7 @@ static int ali_ircc_fir_hard_xmit(struct sk_buff *skb, struct net_device *dev)
/* Adjust for timer resolution */
mtt = (mtt+250) / 500; /* 4 discard, 5 get advanced, Let's round off */
- IRDA_DEBUG(1, "%s(), ************** mtt = %d ***********\n", __FUNCTION__ , mtt);
+ IRDA_DEBUG(1, "%s(), ************** mtt = %d ***********\n", __func__ , mtt);
/* Setup timer */
if (mtt == 1) /* 500 us */
@@ -1567,7 +1567,7 @@ static int ali_ircc_fir_hard_xmit(struct sk_buff *skb, struct net_device *dev)
spin_unlock_irqrestore(&self->lock, flags);
dev_kfree_skb(skb);
- IRDA_DEBUG(1, "%s(), ----------------- End ------------------\n", __FUNCTION__ );
+ IRDA_DEBUG(1, "%s(), ----------------- End ------------------\n", __func__ );
return 0;
}
@@ -1578,7 +1578,7 @@ static void ali_ircc_dma_xmit(struct ali_ircc_cb *self)
unsigned char FIFO_OPTI, Hi, Lo;
- IRDA_DEBUG(1, "%s(), ---------------- Start -----------------\n", __FUNCTION__ );
+ IRDA_DEBUG(1, "%s(), ---------------- Start -----------------\n", __func__ );
iobase = self->io.fir_base;
@@ -1629,7 +1629,7 @@ static void ali_ircc_dma_xmit(struct ali_ircc_cb *self)
tmp = inb(iobase+FIR_LCR_B);
tmp &= ~0x20; // Disable SIP
outb(((unsigned char)(tmp & 0x3f) | LCR_B_TX_MODE) & ~LCR_B_BW, iobase+FIR_LCR_B);
- IRDA_DEBUG(1, "%s(), ******* Change to TX mode: FIR_LCR_B = 0x%x ******* \n", __FUNCTION__ , inb(iobase+FIR_LCR_B));
+ IRDA_DEBUG(1, "%s(), ******* Change to TX mode: FIR_LCR_B = 0x%x ******* \n", __func__ , inb(iobase+FIR_LCR_B));
outb(0, iobase+FIR_LSR);
@@ -1639,7 +1639,7 @@ static void ali_ircc_dma_xmit(struct ali_ircc_cb *self)
switch_bank(iobase, BANK0);
- IRDA_DEBUG(1, "%s(), ----------------- End ------------------\n", __FUNCTION__ );
+ IRDA_DEBUG(1, "%s(), ----------------- End ------------------\n", __func__ );
}
static int ali_ircc_dma_xmit_complete(struct ali_ircc_cb *self)
@@ -1647,7 +1647,7 @@ static int ali_ircc_dma_xmit_complete(struct ali_ircc_cb *self)
int iobase;
int ret = TRUE;
- IRDA_DEBUG(1, "%s(), ---------------- Start -----------------\n", __FUNCTION__ );
+ IRDA_DEBUG(1, "%s(), ---------------- Start -----------------\n", __func__ );
iobase = self->io.fir_base;
@@ -1660,7 +1660,7 @@ static int ali_ircc_dma_xmit_complete(struct ali_ircc_cb *self)
if((inb(iobase+FIR_LSR) & LSR_FRAME_ABORT) == LSR_FRAME_ABORT)
{
- IRDA_ERROR("%s(), ********* LSR_FRAME_ABORT *********\n", __FUNCTION__);
+ IRDA_ERROR("%s(), ********* LSR_FRAME_ABORT *********\n", __func__);
self->stats.tx_errors++;
self->stats.tx_fifo_errors++;
}
@@ -1703,7 +1703,7 @@ static int ali_ircc_dma_xmit_complete(struct ali_ircc_cb *self)
switch_bank(iobase, BANK0);
- IRDA_DEBUG(1, "%s(), ----------------- End ------------------\n", __FUNCTION__ );
+ IRDA_DEBUG(1, "%s(), ----------------- End ------------------\n", __func__ );
return ret;
}
@@ -1718,7 +1718,7 @@ static int ali_ircc_dma_receive(struct ali_ircc_cb *self)
{
int iobase, tmp;
- IRDA_DEBUG(1, "%s(), ---------------- Start -----------------\n", __FUNCTION__ );
+ IRDA_DEBUG(1, "%s(), ---------------- Start -----------------\n", __func__ );
iobase = self->io.fir_base;
@@ -1756,7 +1756,7 @@ static int ali_ircc_dma_receive(struct ali_ircc_cb *self)
//switch_bank(iobase, BANK0);
tmp = inb(iobase+FIR_LCR_B);
outb((unsigned char)(tmp &0x3f) | LCR_B_RX_MODE | LCR_B_BW , iobase + FIR_LCR_B); // 2000/12/1 05:16PM
- IRDA_DEBUG(1, "%s(), *** Change To RX mode: FIR_LCR_B = 0x%x *** \n", __FUNCTION__ , inb(iobase+FIR_LCR_B));
+ IRDA_DEBUG(1, "%s(), *** Change To RX mode: FIR_LCR_B = 0x%x *** \n", __func__ , inb(iobase+FIR_LCR_B));
/* Set Rx Threshold */
switch_bank(iobase, BANK1);
@@ -1768,7 +1768,7 @@ static int ali_ircc_dma_receive(struct ali_ircc_cb *self)
outb(CR_DMA_EN | CR_DMA_BURST, iobase+FIR_CR);
switch_bank(iobase, BANK0);
- IRDA_DEBUG(1, "%s(), ----------------- End ------------------\n", __FUNCTION__ );
+ IRDA_DEBUG(1, "%s(), ----------------- End ------------------\n", __func__ );
return 0;
}
@@ -1779,7 +1779,7 @@ static int ali_ircc_dma_receive_complete(struct ali_ircc_cb *self)
__u8 status, MessageCount;
int len, i, iobase, val;
- IRDA_DEBUG(1, "%s(), ---------------- Start -----------------\n", __FUNCTION__ );
+ IRDA_DEBUG(1, "%s(), ---------------- Start -----------------\n", __func__ );
st_fifo = &self->st_fifo;
iobase = self->io.fir_base;
@@ -1788,7 +1788,7 @@ static int ali_ircc_dma_receive_complete(struct ali_ircc_cb *self)
MessageCount = inb(iobase+ FIR_LSR)&0x07;
if (MessageCount > 0)
- IRDA_DEBUG(0, "%s(), Messsage count = %d,\n", __FUNCTION__ , MessageCount);
+ IRDA_DEBUG(0, "%s(), Messsage count = %d,\n", __func__ , MessageCount);
for (i=0; i<=MessageCount; i++)
{
@@ -1801,11 +1801,11 @@ static int ali_ircc_dma_receive_complete(struct ali_ircc_cb *self)
len = len << 8;
len |= inb(iobase+FIR_RX_DSR_LO);
- IRDA_DEBUG(1, "%s(), RX Length = 0x%.2x,\n", __FUNCTION__ , len);
- IRDA_DEBUG(1, "%s(), RX Status = 0x%.2x,\n", __FUNCTION__ , status);
+ IRDA_DEBUG(1, "%s(), RX Length = 0x%.2x,\n", __func__ , len);
+ IRDA_DEBUG(1, "%s(), RX Status = 0x%.2x,\n", __func__ , status);
if (st_fifo->tail >= MAX_RX_WINDOW) {
- IRDA_DEBUG(0, "%s(), window is full!\n", __FUNCTION__ );
+ IRDA_DEBUG(0, "%s(), window is full!\n", __func__ );
continue;
}
@@ -1828,7 +1828,7 @@ static int ali_ircc_dma_receive_complete(struct ali_ircc_cb *self)
/* Check for errors */
if ((status & 0xd8) || self->rcvFramesOverflow || (len==0))
{
- IRDA_DEBUG(0,"%s(), ************* RX Errors ************ \n", __FUNCTION__ );
+ IRDA_DEBUG(0,"%s(), ************* RX Errors ************ \n", __func__ );
/* Skip frame */
self->stats.rx_errors++;
@@ -1838,29 +1838,29 @@ static int ali_ircc_dma_receive_complete(struct ali_ircc_cb *self)
if (status & LSR_FIFO_UR)
{
self->stats.rx_frame_errors++;
- IRDA_DEBUG(0,"%s(), ************* FIFO Errors ************ \n", __FUNCTION__ );
+ IRDA_DEBUG(0,"%s(), ************* FIFO Errors ************ \n", __func__ );
}
if (status & LSR_FRAME_ERROR)
{
self->stats.rx_frame_errors++;
- IRDA_DEBUG(0,"%s(), ************* FRAME Errors ************ \n", __FUNCTION__ );
+ IRDA_DEBUG(0,"%s(), ************* FRAME Errors ************ \n", __func__ );
}
if (status & LSR_CRC_ERROR)
{
self->stats.rx_crc_errors++;
- IRDA_DEBUG(0,"%s(), ************* CRC Errors ************ \n", __FUNCTION__ );
+ IRDA_DEBUG(0,"%s(), ************* CRC Errors ************ \n", __func__ );
}
if(self->rcvFramesOverflow)
{
self->stats.rx_frame_errors++;
- IRDA_DEBUG(0,"%s(), ************* Overran DMA buffer ************ \n", __FUNCTION__ );
+ IRDA_DEBUG(0,"%s(), ************* Overran DMA buffer ************ \n", __func__ );
}
if(len == 0)
{
self->stats.rx_frame_errors++;
- IRDA_DEBUG(0,"%s(), ********** Receive Frame Size = 0 ********* \n", __FUNCTION__ );
+ IRDA_DEBUG(0,"%s(), ********** Receive Frame Size = 0 ********* \n", __func__ );
}
}
else
@@ -1872,7 +1872,7 @@ static int ali_ircc_dma_receive_complete(struct ali_ircc_cb *self)
val = inb(iobase+FIR_BSR);
if ((val& BSR_FIFO_NOT_EMPTY)== 0x80)
{
- IRDA_DEBUG(0, "%s(), ************* BSR_FIFO_NOT_EMPTY ************ \n", __FUNCTION__ );
+ IRDA_DEBUG(0, "%s(), ************* BSR_FIFO_NOT_EMPTY ************ \n", __func__ );
/* Put this entry back in fifo */
st_fifo->head--;
@@ -1909,7 +1909,7 @@ static int ali_ircc_dma_receive_complete(struct ali_ircc_cb *self)
{
IRDA_WARNING("%s(), memory squeeze, "
"dropping frame.\n",
- __FUNCTION__);
+ __func__);
self->stats.rx_dropped++;
return FALSE;
@@ -1937,7 +1937,7 @@ static int ali_ircc_dma_receive_complete(struct ali_ircc_cb *self)
switch_bank(iobase, BANK0);
- IRDA_DEBUG(1, "%s(), ----------------- End ------------------\n", __FUNCTION__ );
+ IRDA_DEBUG(1, "%s(), ----------------- End ------------------\n", __func__ );
return TRUE;
}
@@ -1956,7 +1956,7 @@ static int ali_ircc_sir_hard_xmit(struct sk_buff *skb, struct net_device *dev)
int iobase;
__u32 speed;
- IRDA_DEBUG(2, "%s(), ---------------- Start ----------------\n", __FUNCTION__ );
+ IRDA_DEBUG(2, "%s(), ---------------- Start ----------------\n", __func__ );
IRDA_ASSERT(dev != NULL, return 0;);
@@ -2005,7 +2005,7 @@ static int ali_ircc_sir_hard_xmit(struct sk_buff *skb, struct net_device *dev)
dev_kfree_skb(skb);
- IRDA_DEBUG(2, "%s(), ----------------- End ------------------\n", __FUNCTION__ );
+ IRDA_DEBUG(2, "%s(), ----------------- End ------------------\n", __func__ );
return 0;
}
@@ -2024,7 +2024,7 @@ static int ali_ircc_net_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
unsigned long flags;
int ret = 0;
- IRDA_DEBUG(2, "%s(), ---------------- Start ----------------\n", __FUNCTION__ );
+ IRDA_DEBUG(2, "%s(), ---------------- Start ----------------\n", __func__ );
IRDA_ASSERT(dev != NULL, return -1;);
@@ -2032,11 +2032,11 @@ static int ali_ircc_net_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
IRDA_ASSERT(self != NULL, return -1;);
- IRDA_DEBUG(2, "%s(), %s, (cmd=0x%X)\n", __FUNCTION__ , dev->name, cmd);
+ IRDA_DEBUG(2, "%s(), %s, (cmd=0x%X)\n", __func__ , dev->name, cmd);
switch (cmd) {
case SIOCSBANDWIDTH: /* Set bandwidth */
- IRDA_DEBUG(1, "%s(), SIOCSBANDWIDTH\n", __FUNCTION__ );
+ IRDA_DEBUG(1, "%s(), SIOCSBANDWIDTH\n", __func__ );
/*
* This function will also be used by IrLAP to change the
* speed, so we still must allow for speed change within
@@ -2050,13 +2050,13 @@ static int ali_ircc_net_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
spin_unlock_irqrestore(&self->lock, flags);
break;
case SIOCSMEDIABUSY: /* Set media busy */
- IRDA_DEBUG(1, "%s(), SIOCSMEDIABUSY\n", __FUNCTION__ );
+ IRDA_DEBUG(1, "%s(), SIOCSMEDIABUSY\n", __func__ );
if (!capable(CAP_NET_ADMIN))
return -EPERM;
irda_device_set_media_busy(self->netdev, TRUE);
break;
case SIOCGRECEIVING: /* Check if we are receiving right now */
- IRDA_DEBUG(2, "%s(), SIOCGRECEIVING\n", __FUNCTION__ );
+ IRDA_DEBUG(2, "%s(), SIOCGRECEIVING\n", __func__ );
/* This is protected */
irq->ifr_receiving = ali_ircc_is_receiving(self);
break;
@@ -2064,7 +2064,7 @@ static int ali_ircc_net_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
ret = -EOPNOTSUPP;
}
- IRDA_DEBUG(2, "%s(), ----------------- End ------------------\n", __FUNCTION__ );
+ IRDA_DEBUG(2, "%s(), ----------------- End ------------------\n", __func__ );
return ret;
}
@@ -2081,7 +2081,7 @@ static int ali_ircc_is_receiving(struct ali_ircc_cb *self)
int status = FALSE;
int iobase;
- IRDA_DEBUG(2, "%s(), ---------------- Start -----------------\n", __FUNCTION__ );
+ IRDA_DEBUG(2, "%s(), ---------------- Start -----------------\n", __func__ );
IRDA_ASSERT(self != NULL, return FALSE;);
@@ -2095,7 +2095,7 @@ static int ali_ircc_is_receiving(struct ali_ircc_cb *self)
if((inb(iobase+FIR_FIFO_FR) & 0x3f) != 0)
{
/* We are receiving something */
- IRDA_DEBUG(1, "%s(), We are receiving something\n", __FUNCTION__ );
+ IRDA_DEBUG(1, "%s(), We are receiving something\n", __func__ );
status = TRUE;
}
switch_bank(iobase, BANK0);
@@ -2107,7 +2107,7 @@ static int ali_ircc_is_receiving(struct ali_ircc_cb *self)
spin_unlock_irqrestore(&self->lock, flags);
- IRDA_DEBUG(2, "%s(), ----------------- End ------------------\n", __FUNCTION__ );
+ IRDA_DEBUG(2, "%s(), ----------------- End ------------------\n", __func__ );
return status;
}
@@ -2116,9 +2116,9 @@ static struct net_device_stats *ali_ircc_net_get_stats(struct net_device *dev)
{
struct ali_ircc_cb *self = (struct ali_ircc_cb *) dev->priv;
- IRDA_DEBUG(2, "%s(), ---------------- Start ----------------\n", __FUNCTION__ );
+ IRDA_DEBUG(2, "%s(), ---------------- Start ----------------\n", __func__ );
- IRDA_DEBUG(2, "%s(), ----------------- End ------------------\n", __FUNCTION__ );
+ IRDA_DEBUG(2, "%s(), ----------------- End ------------------\n", __func__ );
return &self->stats;
}
@@ -2164,7 +2164,7 @@ static void SetCOMInterrupts(struct ali_ircc_cb *self , unsigned char enable)
int iobase = self->io.fir_base; /* or sir_base */
- IRDA_DEBUG(2, "%s(), -------- Start -------- ( Enable = %d )\n", __FUNCTION__ , enable);
+ IRDA_DEBUG(2, "%s(), -------- Start -------- ( Enable = %d )\n", __func__ , enable);
/* Enable the interrupt which we wish to */
if (enable){
@@ -2205,14 +2205,14 @@ static void SetCOMInterrupts(struct ali_ircc_cb *self , unsigned char enable)
else
outb(newMask, iobase+UART_IER);
- IRDA_DEBUG(2, "%s(), ----------------- End ------------------\n", __FUNCTION__ );
+ IRDA_DEBUG(2, "%s(), ----------------- End ------------------\n", __func__ );
}
static void SIR2FIR(int iobase)
{
//unsigned char tmp;
- IRDA_DEBUG(1, "%s(), ---------------- Start ----------------\n", __FUNCTION__ );
+ IRDA_DEBUG(1, "%s(), ---------------- Start ----------------\n", __func__ );
/* Already protected (change_speed() or setup()), no need to lock.
* Jean II */
@@ -2228,14 +2228,14 @@ static void SIR2FIR(int iobase)
//tmp |= 0x20;
//outb(tmp, iobase+FIR_LCR_B);
- IRDA_DEBUG(1, "%s(), ----------------- End ------------------\n", __FUNCTION__ );
+ IRDA_DEBUG(1, "%s(), ----------------- End ------------------\n", __func__ );
}
static void FIR2SIR(int iobase)
{
unsigned char val;
- IRDA_DEBUG(1, "%s(), ---------------- Start ----------------\n", __FUNCTION__ );
+ IRDA_DEBUG(1, "%s(), ---------------- Start ----------------\n", __func__ );
/* Already protected (change_speed() or setup()), no need to lock.
* Jean II */
@@ -2251,7 +2251,7 @@ static void FIR2SIR(int iobase)
val = inb(iobase+UART_LSR);
val = inb(iobase+UART_MSR);
- IRDA_DEBUG(1, "%s(), ----------------- End ------------------\n", __FUNCTION__ );
+ IRDA_DEBUG(1, "%s(), ----------------- End ------------------\n", __func__ );
}
MODULE_AUTHOR("Benjamin Kong <benjamin_kong@ali.com.tw>");
diff --git a/drivers/net/irda/donauboe.c b/drivers/net/irda/donauboe.c
index 34ad189fff6..69d16b30323 100644
--- a/drivers/net/irda/donauboe.c
+++ b/drivers/net/irda/donauboe.c
@@ -245,7 +245,7 @@ toshoboe_dumpregs (struct toshoboe_cb *self)
{
__u32 ringbase;
- IRDA_DEBUG (4, "%s()\n", __FUNCTION__);
+ IRDA_DEBUG (4, "%s()\n", __func__);
ringbase = INB (OBOE_RING_BASE0) << 10;
ringbase |= INB (OBOE_RING_BASE1) << 18;
@@ -293,7 +293,7 @@ static void
toshoboe_disablebm (struct toshoboe_cb *self)
{
__u8 command;
- IRDA_DEBUG (4, "%s()\n", __FUNCTION__);
+ IRDA_DEBUG (4, "%s()\n", __func__);
pci_read_config_byte (self->pdev, PCI_COMMAND, &command);
command &= ~PCI_COMMAND_MASTER;
@@ -305,7 +305,7 @@ toshoboe_disablebm (struct toshoboe_cb *self)
static void
toshoboe_stopchip (struct toshoboe_cb *self)
{
- IRDA_DEBUG (4, "%s()\n", __FUNCTION__);
+ IRDA_DEBUG (4, "%s()\n", __func__);
/*Disable interrupts */
OUTB (0x0, OBOE_IER);
@@ -350,7 +350,7 @@ toshoboe_setbaud (struct toshoboe_cb *self)
__u16 pconfig = 0;
__u8 config0l = 0;
- IRDA_DEBUG (2, "%s(%d/%d)\n", __FUNCTION__, self->speed, self->io.speed);
+ IRDA_DEBUG (2, "%s(%d/%d)\n", __func__, self->speed, self->io.speed);
switch (self->speed)
{
@@ -482,7 +482,7 @@ toshoboe_setbaud (struct toshoboe_cb *self)
static void
toshoboe_enablebm (struct toshoboe_cb *self)
{
- IRDA_DEBUG (4, "%s()\n", __FUNCTION__);
+ IRDA_DEBUG (4, "%s()\n", __func__);
pci_set_master (self->pdev);
}
@@ -492,7 +492,7 @@ toshoboe_initring (struct toshoboe_cb *self)
{
int i;
- IRDA_DEBUG (4, "%s()\n", __FUNCTION__);
+ IRDA_DEBUG (4, "%s()\n", __func__);
for (i = 0; i < TX_SLOTS; ++i)
{
@@ -550,7 +550,7 @@ toshoboe_startchip (struct toshoboe_cb *self)
{
__u32 physaddr;
- IRDA_DEBUG (4, "%s()\n", __FUNCTION__);
+ IRDA_DEBUG (4, "%s()\n", __func__);
toshoboe_initring (self);
toshoboe_enablebm (self);
@@ -824,7 +824,7 @@ toshoboe_probe (struct toshoboe_cb *self)
#endif
unsigned long flags;
- IRDA_DEBUG (4, "%s()\n", __FUNCTION__);
+ IRDA_DEBUG (4, "%s()\n", __func__);
if (request_irq (self->io.irq, toshoboe_probeinterrupt,
self->io.irqflags, "toshoboe", (void *) self))
@@ -983,10 +983,10 @@ toshoboe_hard_xmit (struct sk_buff *skb, struct net_device *dev)
IRDA_ASSERT (self != NULL, return 0; );
- IRDA_DEBUG (1, "%s.tx:%x(%x)%x\n", __FUNCTION__
+ IRDA_DEBUG (1, "%s.tx:%x(%x)%x\n", __func__
,skb->len,self->txpending,INB (OBOE_ENABLEH));
if (!cb->magic) {
- IRDA_DEBUG (2, "%s.Not IrLAP:%x\n", __FUNCTION__, cb->magic);
+ IRDA_DEBUG (2, "%s.Not IrLAP:%x\n", __func__, cb->magic);
#ifdef DUMP_PACKETS
_dumpbufs(skb->data,skb->len,'>');
#endif
@@ -1015,7 +1015,7 @@ toshoboe_hard_xmit (struct sk_buff *skb, struct net_device *dev)
{
self->new_speed = speed;
IRDA_DEBUG (1, "%s: Queued TxDone scheduled speed change %d\n" ,
- __FUNCTION__, speed);
+ __func__, speed);
/* if no data, that's all! */
if (!skb->len)
{
@@ -1057,7 +1057,7 @@ toshoboe_hard_xmit (struct sk_buff *skb, struct net_device *dev)
/* which we will add a wrong checksum to */
mtt = toshoboe_makemttpacket (self, self->tx_bufs[self->txs], mtt);
- IRDA_DEBUG (1, "%s.mtt:%x(%x)%d\n", __FUNCTION__
+ IRDA_DEBUG (1, "%s.mtt:%x(%x)%d\n", __func__
,skb->len,mtt,self->txpending);
if (mtt)
{
@@ -1101,7 +1101,7 @@ dumpbufs(skb->data,skb->len,'>');
if (self->ring->tx[self->txs].control & OBOE_CTL_TX_HW_OWNS)
{
- IRDA_DEBUG (0, "%s.ful:%x(%x)%x\n", __FUNCTION__
+ IRDA_DEBUG (0, "%s.ful:%x(%x)%x\n", __func__
,skb->len, self->ring->tx[self->txs].control, self->txpending);
toshoboe_start_DMA(self, OBOE_CONFIG0H_ENTX);
spin_unlock_irqrestore(&self->spinlock, flags);
@@ -1179,7 +1179,7 @@ toshoboe_interrupt (int irq, void *dev_id)
if (self->ring->tx[i].control & OBOE_CTL_TX_HW_OWNS)
self->txpending++;
}
- IRDA_DEBUG (1, "%s.txd(%x)%x/%x\n", __FUNCTION__
+ IRDA_DEBUG (1, "%s.txd(%x)%x/%x\n", __func__
,irqstat,txp,self->txpending);
txp = INB (OBOE_TXSLOT) & OBOE_SLOT_MASK;
@@ -1209,7 +1209,7 @@ toshoboe_interrupt (int irq, void *dev_id)
{
self->speed = self->new_speed;
IRDA_DEBUG (1, "%s: Executed TxDone scheduled speed change %d\n",
- __FUNCTION__, self->speed);
+ __func__, self->speed);
toshoboe_setbaud (self);
}
@@ -1224,7 +1224,7 @@ toshoboe_interrupt (int irq, void *dev_id)
{
int len = self->ring->rx[self->rxs].len;
skb = NULL;
- IRDA_DEBUG (3, "%s.rcv:%x(%x)\n", __FUNCTION__
+ IRDA_DEBUG (3, "%s.rcv:%x(%x)\n", __func__
,len,self->ring->rx[self->rxs].control);
#ifdef DUMP_PACKETS
@@ -1246,7 +1246,7 @@ dumpbufs(self->rx_bufs[self->rxs],len,'<');
len -= 2;
else
len = 0;
- IRDA_DEBUG (1, "%s.SIR:%x(%x)\n", __FUNCTION__, len,enable);
+ IRDA_DEBUG (1, "%s.SIR:%x(%x)\n", __func__, len,enable);
}
#ifdef USE_MIR
@@ -1256,7 +1256,7 @@ dumpbufs(self->rx_bufs[self->rxs],len,'<');
len -= 2;
else
len = 0;
- IRDA_DEBUG (2, "%s.MIR:%x(%x)\n", __FUNCTION__, len,enable);
+ IRDA_DEBUG (2, "%s.MIR:%x(%x)\n", __func__, len,enable);
}
#endif
else if (enable & OBOE_ENABLEH_FIRON)
@@ -1265,10 +1265,10 @@ dumpbufs(self->rx_bufs[self->rxs],len,'<');
len -= 4; /*FIXME: check this */
else
len = 0;
- IRDA_DEBUG (1, "%s.FIR:%x(%x)\n", __FUNCTION__, len,enable);
+ IRDA_DEBUG (1, "%s.FIR:%x(%x)\n", __func__, len,enable);
}
else
- IRDA_DEBUG (0, "%s.?IR:%x(%x)\n", __FUNCTION__, len,enable);
+ IRDA_DEBUG (0, "%s.?IR:%x(%x)\n", __func__, len,enable);
if (len)
{
@@ -1289,7 +1289,7 @@ dumpbufs(self->rx_bufs[self->rxs],len,'<');
{
printk (KERN_INFO
"%s(), memory squeeze, dropping frame.\n",
- __FUNCTION__);
+ __func__);
}
}
}
@@ -1301,7 +1301,7 @@ dumpbufs(self->rx_bufs[self->rxs],len,'<');
/* (SIR) data is splitted in several slots. */
/* we have to join all the received buffers received */
/*in a large buffer before checking CRC. */
- IRDA_DEBUG (0, "%s.err:%x(%x)\n", __FUNCTION__
+ IRDA_DEBUG (0, "%s.err:%x(%x)\n", __func__
,len,self->ring->rx[self->rxs].control);
}
@@ -1329,7 +1329,7 @@ dumpbufs(self->rx_bufs[self->rxs],len,'<');
if (irqstat & OBOE_INT_SIP)
{
self->int_sip++;
- IRDA_DEBUG (1, "%s.sip:%x(%x)%x\n", __FUNCTION__
+ IRDA_DEBUG (1, "%s.sip:%x(%x)%x\n", __func__
,self->int_sip,irqstat,self->txpending);
}
return IRQ_HANDLED;
@@ -1343,7 +1343,7 @@ toshoboe_net_open (struct net_device *dev)
unsigned long flags;
int rc;
- IRDA_DEBUG (4, "%s()\n", __FUNCTION__);
+ IRDA_DEBUG (4, "%s()\n", __func__);
self = netdev_priv(dev);
@@ -1381,7 +1381,7 @@ toshoboe_net_close (struct net_device *dev)
{
struct toshoboe_cb *self;
- IRDA_DEBUG (4, "%s()\n", __FUNCTION__);
+ IRDA_DEBUG (4, "%s()\n", __func__);
IRDA_ASSERT (dev != NULL, return -1; );
self = (struct toshoboe_cb *) dev->priv;
@@ -1426,7 +1426,7 @@ toshoboe_net_ioctl (struct net_device *dev, struct ifreq *rq, int cmd)
IRDA_ASSERT (self != NULL, return -1; );
- IRDA_DEBUG (5, "%s(), %s, (cmd=0x%X)\n", __FUNCTION__, dev->name, cmd);
+ IRDA_DEBUG (5, "%s(), %s, (cmd=0x%X)\n", __func__, dev->name, cmd);
/* Disable interrupts & save flags */
spin_lock_irqsave(&self->spinlock, flags);
@@ -1438,7 +1438,7 @@ toshoboe_net_ioctl (struct net_device *dev, struct ifreq *rq, int cmd)
* speed, so we still must allow for speed change within
* interrupt context.
*/
- IRDA_DEBUG (1, "%s(BANDWIDTH), %s, (%X/%ld\n", __FUNCTION__
+ IRDA_DEBUG (1, "%s(BANDWIDTH), %s, (%X/%ld\n", __func__
,dev->name, INB (OBOE_STATUS), irq->ifr_baudrate );
if (!in_interrupt () && !capable (CAP_NET_ADMIN)) {
ret = -EPERM;
@@ -1451,7 +1451,7 @@ toshoboe_net_ioctl (struct net_device *dev, struct ifreq *rq, int cmd)
self->new_speed = irq->ifr_baudrate;
break;
case SIOCSMEDIABUSY: /* Set media busy */
- IRDA_DEBUG (1, "%s(MEDIABUSY), %s, (%X/%x)\n", __FUNCTION__
+ IRDA_DEBUG (1, "%s(MEDIABUSY), %s, (%X/%x)\n", __func__
,dev->name, INB (OBOE_STATUS), capable (CAP_NET_ADMIN) );
if (!capable (CAP_NET_ADMIN)) {
ret = -EPERM;
@@ -1461,11 +1461,11 @@ toshoboe_net_ioctl (struct net_device *dev, struct ifreq *rq, int cmd)
break;
case SIOCGRECEIVING: /* Check if we are receiving right now */
irq->ifr_receiving = (INB (OBOE_STATUS) & OBOE_STATUS_RXBUSY) ? 1 : 0;
- IRDA_DEBUG (3, "%s(RECEIVING), %s, (%X/%x)\n", __FUNCTION__
+ IRDA_DEBUG (3, "%s(RECEIVING), %s, (%X/%x)\n", __func__
,dev->name, INB (OBOE_STATUS), irq->ifr_receiving );
break;
default:
- IRDA_DEBUG (1, "%s(?), %s, (cmd=0x%X)\n", __FUNCTION__, dev->name, cmd);
+ IRDA_DEBUG (1, "%s(?), %s, (cmd=0x%X)\n", __func__, dev->name, cmd);
ret = -EOPNOTSUPP;
}
out:
@@ -1492,7 +1492,7 @@ toshoboe_close (struct pci_dev *pci_dev)
int i;
struct toshoboe_cb *self = (struct toshoboe_cb*)pci_get_drvdata(pci_dev);
- IRDA_DEBUG (4, "%s()\n", __FUNCTION__);
+ IRDA_DEBUG (4, "%s()\n", __func__);
IRDA_ASSERT (self != NULL, return; );
@@ -1533,7 +1533,7 @@ toshoboe_open (struct pci_dev *pci_dev, const struct pci_device_id *pdid)
int ok = 0;
int err;
- IRDA_DEBUG (4, "%s()\n", __FUNCTION__);
+ IRDA_DEBUG (4, "%s()\n", __func__);
if ((err=pci_enable_device(pci_dev)))
return err;
@@ -1700,7 +1700,7 @@ toshoboe_gotosleep (struct pci_dev *pci_dev, pm_message_t crap)
unsigned long flags;
int i = 10;
- IRDA_DEBUG (4, "%s()\n", __FUNCTION__);
+ IRDA_DEBUG (4, "%s()\n", __func__);
if (!self || self->stopped)
return 0;
@@ -1728,7 +1728,7 @@ toshoboe_wakeup (struct pci_dev *pci_dev)
struct toshoboe_cb *self = (struct toshoboe_cb*)pci_get_drvdata(pci_dev);
unsigned long flags;
- IRDA_DEBUG (4, "%s()\n", __FUNCTION__);
+ IRDA_DEBUG (4, "%s()\n", __func__);
if (!self || !self->stopped)
return 0;
diff --git a/drivers/net/irda/ep7211-sir.c b/drivers/net/irda/ep7211-sir.c
index 831572429bb..f83c5b881d2 100644
--- a/drivers/net/irda/ep7211-sir.c
+++ b/drivers/net/irda/ep7211-sir.c
@@ -14,7 +14,7 @@
#include <net/irda/irda_device.h>
#include <asm/io.h>
-#include <asm/hardware.h>
+#include <mach/hardware.h>
#include "sir-dev.h"
diff --git a/drivers/net/irda/girbil-sir.c b/drivers/net/irda/girbil-sir.c
index 738531b16bd..a31b8fa8aaa 100644
--- a/drivers/net/irda/girbil-sir.c
+++ b/drivers/net/irda/girbil-sir.c
@@ -86,7 +86,7 @@ static int girbil_open(struct sir_dev *dev)
{
struct qos_info *qos = &dev->qos;
- IRDA_DEBUG(2, "%s()\n", __FUNCTION__);
+ IRDA_DEBUG(2, "%s()\n", __func__);
/* Power on dongle */
sirdev_set_dtr_rts(dev, TRUE, TRUE);
@@ -102,7 +102,7 @@ static int girbil_open(struct sir_dev *dev)
static int girbil_close(struct sir_dev *dev)
{
- IRDA_DEBUG(2, "%s()\n", __FUNCTION__);
+ IRDA_DEBUG(2, "%s()\n", __func__);
/* Power off dongle */
sirdev_set_dtr_rts(dev, FALSE, FALSE);
@@ -126,7 +126,7 @@ static int girbil_change_speed(struct sir_dev *dev, unsigned speed)
u8 control[2];
static int ret = 0;
- IRDA_DEBUG(2, "%s()\n", __FUNCTION__);
+ IRDA_DEBUG(2, "%s()\n", __func__);
/* dongle alread reset - port and dongle at default speed */
@@ -179,7 +179,7 @@ static int girbil_change_speed(struct sir_dev *dev, unsigned speed)
break;
default:
- IRDA_ERROR("%s - undefined state %d\n", __FUNCTION__, state);
+ IRDA_ERROR("%s - undefined state %d\n", __func__, state);
ret = -EINVAL;
break;
}
@@ -209,7 +209,7 @@ static int girbil_reset(struct sir_dev *dev)
u8 control = GIRBIL_TXEN | GIRBIL_RXEN;
int ret = 0;
- IRDA_DEBUG(2, "%s()\n", __FUNCTION__);
+ IRDA_DEBUG(2, "%s()\n", __func__);
switch (state) {
case SIRDEV_STATE_DONGLE_RESET:
@@ -241,7 +241,7 @@ static int girbil_reset(struct sir_dev *dev)
break;
default:
- IRDA_ERROR("%s(), undefined state %d\n", __FUNCTION__, state);
+ IRDA_ERROR("%s(), undefined state %d\n", __func__, state);
ret = -1;
break;
}
diff --git a/drivers/net/irda/irda-usb.c b/drivers/net/irda/irda-usb.c
index 18b471cd144..b5d6b9ac162 100644
--- a/drivers/net/irda/irda-usb.c
+++ b/drivers/net/irda/irda-usb.c
@@ -177,12 +177,12 @@ static void irda_usb_build_header(struct irda_usb_cb *self,
(!force) && (self->speed != -1)) {
/* No speed and xbofs change here
* (we'll do it later in the write callback) */
- IRDA_DEBUG(2, "%s(), not changing speed yet\n", __FUNCTION__);
+ IRDA_DEBUG(2, "%s(), not changing speed yet\n", __func__);
*header = 0;
return;
}
- IRDA_DEBUG(2, "%s(), changing speed to %d\n", __FUNCTION__, self->new_speed);
+ IRDA_DEBUG(2, "%s(), changing speed to %d\n", __func__, self->new_speed);
self->speed = self->new_speed;
/* We will do ` self->new_speed = -1; ' in the completion
* handler just in case the current URB fail - Jean II */
@@ -228,7 +228,7 @@ static void irda_usb_build_header(struct irda_usb_cb *self,
/* Set the negotiated additional XBOFS */
if (self->new_xbofs != -1) {
- IRDA_DEBUG(2, "%s(), changing xbofs to %d\n", __FUNCTION__, self->new_xbofs);
+ IRDA_DEBUG(2, "%s(), changing xbofs to %d\n", __func__, self->new_xbofs);
self->xbofs = self->new_xbofs;
/* We will do ` self->new_xbofs = -1; ' in the completion
* handler just in case the current URB fail - Jean II */
@@ -302,13 +302,13 @@ static void irda_usb_change_speed_xbofs(struct irda_usb_cb *self)
struct urb *urb;
int ret;
- IRDA_DEBUG(2, "%s(), speed=%d, xbofs=%d\n", __FUNCTION__,
+ IRDA_DEBUG(2, "%s(), speed=%d, xbofs=%d\n", __func__,
self->new_speed, self->new_xbofs);
/* Grab the speed URB */
urb = self->speed_urb;
if (urb->status != 0) {
- IRDA_WARNING("%s(), URB still in use!\n", __FUNCTION__);
+ IRDA_WARNING("%s(), URB still in use!\n", __func__);
return;
}
@@ -334,7 +334,7 @@ static void irda_usb_change_speed_xbofs(struct irda_usb_cb *self)
/* Irq disabled -> GFP_ATOMIC */
if ((ret = usb_submit_urb(urb, GFP_ATOMIC))) {
- IRDA_WARNING("%s(), failed Speed URB\n", __FUNCTION__);
+ IRDA_WARNING("%s(), failed Speed URB\n", __func__);
}
}
@@ -347,7 +347,7 @@ static void speed_bulk_callback(struct urb *urb)
{
struct irda_usb_cb *self = urb->context;
- IRDA_DEBUG(2, "%s()\n", __FUNCTION__);
+ IRDA_DEBUG(2, "%s()\n", __func__);
/* We should always have a context */
IRDA_ASSERT(self != NULL, return;);
@@ -357,7 +357,7 @@ static void speed_bulk_callback(struct urb *urb)
/* Check for timeout and other USB nasties */
if (urb->status != 0) {
/* I get a lot of -ECONNABORTED = -103 here - Jean II */
- IRDA_DEBUG(0, "%s(), URB complete status %d, transfer_flags 0x%04X\n", __FUNCTION__, urb->status, urb->transfer_flags);
+ IRDA_DEBUG(0, "%s(), URB complete status %d, transfer_flags 0x%04X\n", __func__, urb->status, urb->transfer_flags);
/* Don't do anything here, that might confuse the USB layer.
* Instead, we will wait for irda_usb_net_timeout(), the
@@ -392,7 +392,7 @@ static int irda_usb_hard_xmit(struct sk_buff *skb, struct net_device *netdev)
int res, mtt;
int err = 1; /* Failed */
- IRDA_DEBUG(4, "%s() on %s\n", __FUNCTION__, netdev->name);
+ IRDA_DEBUG(4, "%s() on %s\n", __func__, netdev->name);
netif_stop_queue(netdev);
@@ -403,7 +403,7 @@ static int irda_usb_hard_xmit(struct sk_buff *skb, struct net_device *netdev)
* We need to check self->present under the spinlock because
* of irda_usb_disconnect() is synchronous - Jean II */
if (!self->present) {
- IRDA_DEBUG(0, "%s(), Device is gone...\n", __FUNCTION__);
+ IRDA_DEBUG(0, "%s(), Device is gone...\n", __func__);
goto drop;
}
@@ -437,7 +437,7 @@ static int irda_usb_hard_xmit(struct sk_buff *skb, struct net_device *netdev)
}
if (urb->status != 0) {
- IRDA_WARNING("%s(), URB still in use!\n", __FUNCTION__);
+ IRDA_WARNING("%s(), URB still in use!\n", __func__);
goto drop;
}
@@ -524,7 +524,7 @@ static int irda_usb_hard_xmit(struct sk_buff *skb, struct net_device *netdev)
/* Ask USB to send the packet - Irq disabled -> GFP_ATOMIC */
if ((res = usb_submit_urb(urb, GFP_ATOMIC))) {
- IRDA_WARNING("%s(), failed Tx URB\n", __FUNCTION__);
+ IRDA_WARNING("%s(), failed Tx URB\n", __func__);
self->stats.tx_errors++;
/* Let USB recover : We will catch that in the watchdog */
/*netif_start_queue(netdev);*/
@@ -556,7 +556,7 @@ static void write_bulk_callback(struct urb *urb)
struct sk_buff *skb = urb->context;
struct irda_usb_cb *self = ((struct irda_skb_cb *) skb->cb)->context;
- IRDA_DEBUG(2, "%s()\n", __FUNCTION__);
+ IRDA_DEBUG(2, "%s()\n", __func__);
/* We should always have a context */
IRDA_ASSERT(self != NULL, return;);
@@ -570,7 +570,7 @@ static void write_bulk_callback(struct urb *urb)
/* Check for timeout and other USB nasties */
if (urb->status != 0) {
/* I get a lot of -ECONNABORTED = -103 here - Jean II */
- IRDA_DEBUG(0, "%s(), URB complete status %d, transfer_flags 0x%04X\n", __FUNCTION__, urb->status, urb->transfer_flags);
+ IRDA_DEBUG(0, "%s(), URB complete status %d, transfer_flags 0x%04X\n", __func__, urb->status, urb->transfer_flags);
/* Don't do anything here, that might confuse the USB layer,
* and we could go in recursion and blow the kernel stack...
@@ -589,7 +589,7 @@ static void write_bulk_callback(struct urb *urb)
/* If the network is closed, stop everything */
if ((!self->netopen) || (!self->present)) {
- IRDA_DEBUG(0, "%s(), Network is gone...\n", __FUNCTION__);
+ IRDA_DEBUG(0, "%s(), Network is gone...\n", __func__);
spin_unlock_irqrestore(&self->lock, flags);
return;
}
@@ -600,7 +600,7 @@ static void write_bulk_callback(struct urb *urb)
(self->new_xbofs != self->xbofs)) {
/* We haven't changed speed yet (because of
* IUC_SPEED_BUG), so do it now - Jean II */
- IRDA_DEBUG(1, "%s(), Changing speed now...\n", __FUNCTION__);
+ IRDA_DEBUG(1, "%s(), Changing speed now...\n", __func__);
irda_usb_change_speed_xbofs(self);
} else {
/* New speed and xbof is now commited in hardware */
@@ -632,7 +632,7 @@ static void irda_usb_net_timeout(struct net_device *netdev)
struct urb *urb;
int done = 0; /* If we have made any progress */
- IRDA_DEBUG(0, "%s(), Network layer thinks we timed out!\n", __FUNCTION__);
+ IRDA_DEBUG(0, "%s(), Network layer thinks we timed out!\n", __func__);
IRDA_ASSERT(self != NULL, return;);
/* Protect us from USB callbacks, net Tx and else. */
@@ -640,7 +640,7 @@ static void irda_usb_net_timeout(struct net_device *netdev)
/* self->present *MUST* be read under spinlock */
if (!self->present) {
- IRDA_WARNING("%s(), device not present!\n", __FUNCTION__);
+ IRDA_WARNING("%s(), device not present!\n", __func__);
netif_stop_queue(netdev);
spin_unlock_irqrestore(&self->lock, flags);
return;
@@ -763,7 +763,7 @@ static void irda_usb_submit(struct irda_usb_cb *self, struct sk_buff *skb, struc
struct irda_skb_cb *cb;
int ret;
- IRDA_DEBUG(2, "%s()\n", __FUNCTION__);
+ IRDA_DEBUG(2, "%s()\n", __func__);
/* This should never happen */
IRDA_ASSERT(skb != NULL, return;);
@@ -786,7 +786,7 @@ static void irda_usb_submit(struct irda_usb_cb *self, struct sk_buff *skb, struc
/* If this ever happen, we are in deep s***.
* Basically, the Rx path will stop... */
IRDA_WARNING("%s(), Failed to submit Rx URB %d\n",
- __FUNCTION__, ret);
+ __func__, ret);
}
}
@@ -807,7 +807,7 @@ static void irda_usb_receive(struct urb *urb)
struct urb *next_urb;
unsigned int len, docopy;
- IRDA_DEBUG(2, "%s(), len=%d\n", __FUNCTION__, urb->actual_length);
+ IRDA_DEBUG(2, "%s(), len=%d\n", __func__, urb->actual_length);
/* Find ourselves */
cb = (struct irda_skb_cb *) skb->cb;
@@ -817,7 +817,7 @@ static void irda_usb_receive(struct urb *urb)
/* If the network is closed or the device gone, stop everything */
if ((!self->netopen) || (!self->present)) {
- IRDA_DEBUG(0, "%s(), Network is gone!\n", __FUNCTION__);
+ IRDA_DEBUG(0, "%s(), Network is gone!\n", __func__);
/* Don't re-submit the URB : will stall the Rx path */
return;
}
@@ -840,7 +840,7 @@ static void irda_usb_receive(struct urb *urb)
/* Usually precursor to a hot-unplug on OHCI. */
default:
self->stats.rx_errors++;
- IRDA_DEBUG(0, "%s(), RX status %d, transfer_flags 0x%04X \n", __FUNCTION__, urb->status, urb->transfer_flags);
+ IRDA_DEBUG(0, "%s(), RX status %d, transfer_flags 0x%04X \n", __func__, urb->status, urb->transfer_flags);
break;
}
/* If we received an error, we don't want to resubmit the
@@ -861,7 +861,7 @@ static void irda_usb_receive(struct urb *urb)
/* Check for empty frames */
if (urb->actual_length <= self->header_length) {
- IRDA_WARNING("%s(), empty frame!\n", __FUNCTION__);
+ IRDA_WARNING("%s(), empty frame!\n", __func__);
goto done;
}
@@ -967,7 +967,7 @@ static void irda_usb_rx_defer_expired(unsigned long data)
struct irda_skb_cb *cb;
struct urb *next_urb;
- IRDA_DEBUG(2, "%s()\n", __FUNCTION__);
+ IRDA_DEBUG(2, "%s()\n", __func__);
/* Find ourselves */
cb = (struct irda_skb_cb *) skb->cb;
@@ -1053,7 +1053,7 @@ static int stir421x_fw_upload(struct irda_usb_cb *self,
patch_block, block_size,
&actual_len, msecs_to_jiffies(500));
IRDA_DEBUG(3,"%s(): Bulk send %u bytes, ret=%d\n",
- __FUNCTION__, actual_len, ret);
+ __func__, actual_len, ret);
if (ret < 0)
break;
@@ -1092,7 +1092,7 @@ static int stir421x_patch_device(struct irda_usb_cb *self)
/* We get a patch from userspace */
IRDA_MESSAGE("%s(): Received firmware %s (%zu bytes)\n",
- __FUNCTION__, stir421x_fw_name, fw->size);
+ __func__, stir421x_fw_name, fw->size);
ret = -EINVAL;
@@ -1116,7 +1116,7 @@ static int stir421x_patch_device(struct irda_usb_cb *self)
+ (build % 10);
IRDA_DEBUG(3, "%s(): Firmware Product version %ld\n",
- __FUNCTION__, fw_version);
+ __func__, fw_version);
}
}
@@ -1172,7 +1172,7 @@ static int irda_usb_net_open(struct net_device *netdev)
char hwname[16];
int i;
- IRDA_DEBUG(1, "%s()\n", __FUNCTION__);
+ IRDA_DEBUG(1, "%s()\n", __func__);
IRDA_ASSERT(netdev != NULL, return -1;);
self = (struct irda_usb_cb *) netdev->priv;
@@ -1182,13 +1182,13 @@ static int irda_usb_net_open(struct net_device *netdev)
/* Can only open the device if it's there */
if(!self->present) {
spin_unlock_irqrestore(&self->lock, flags);
- IRDA_WARNING("%s(), device not present!\n", __FUNCTION__);
+ IRDA_WARNING("%s(), device not present!\n", __func__);
return -1;
}
if(self->needspatch) {
spin_unlock_irqrestore(&self->lock, flags);
- IRDA_WARNING("%s(), device needs patch\n", __FUNCTION__) ;
+ IRDA_WARNING("%s(), device needs patch\n", __func__) ;
return -EIO ;
}
@@ -1231,7 +1231,7 @@ static int irda_usb_net_open(struct net_device *netdev)
/* If this ever happen, we are in deep s***.
* Basically, we can't start the Rx path... */
IRDA_WARNING("%s(), Failed to allocate Rx skb\n",
- __FUNCTION__);
+ __func__);
return -1;
}
//skb_reserve(newskb, USB_IRDA_HEADER - 1);
@@ -1254,7 +1254,7 @@ static int irda_usb_net_close(struct net_device *netdev)
struct irda_usb_cb *self;
int i;
- IRDA_DEBUG(1, "%s()\n", __FUNCTION__);
+ IRDA_DEBUG(1, "%s()\n", __func__);
IRDA_ASSERT(netdev != NULL, return -1;);
self = (struct irda_usb_cb *) netdev->priv;
@@ -1309,7 +1309,7 @@ static int irda_usb_net_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
self = dev->priv;
IRDA_ASSERT(self != NULL, return -1;);
- IRDA_DEBUG(2, "%s(), %s, (cmd=0x%X)\n", __FUNCTION__, dev->name, cmd);
+ IRDA_DEBUG(2, "%s(), %s, (cmd=0x%X)\n", __func__, dev->name, cmd);
switch (cmd) {
case SIOCSBANDWIDTH: /* Set bandwidth */
@@ -1367,7 +1367,7 @@ static inline void irda_usb_init_qos(struct irda_usb_cb *self)
{
struct irda_class_desc *desc;
- IRDA_DEBUG(3, "%s()\n", __FUNCTION__);
+ IRDA_DEBUG(3, "%s()\n", __func__);
desc = self->irda_desc;
@@ -1384,7 +1384,7 @@ static inline void irda_usb_init_qos(struct irda_usb_cb *self)
self->qos.data_size.bits = desc->bmDataSize;
IRDA_DEBUG(0, "%s(), dongle says speed=0x%X, size=0x%X, window=0x%X, bofs=0x%X, turn=0x%X\n",
- __FUNCTION__, self->qos.baud_rate.bits, self->qos.data_size.bits, self->qos.window_size.bits, self->qos.additional_bofs.bits, self->qos.min_turn_time.bits);
+ __func__, self->qos.baud_rate.bits, self->qos.data_size.bits, self->qos.window_size.bits, self->qos.additional_bofs.bits, self->qos.min_turn_time.bits);
/* Don't always trust what the dongle tell us */
if(self->capability & IUC_SIR_ONLY)
@@ -1419,7 +1419,7 @@ static inline int irda_usb_open(struct irda_usb_cb *self)
{
struct net_device *netdev = self->netdev;
- IRDA_DEBUG(1, "%s()\n", __FUNCTION__);
+ IRDA_DEBUG(1, "%s()\n", __func__);
irda_usb_init_qos(self);
@@ -1442,7 +1442,7 @@ static inline int irda_usb_open(struct irda_usb_cb *self)
*/
static inline void irda_usb_close(struct irda_usb_cb *self)
{
- IRDA_DEBUG(1, "%s()\n", __FUNCTION__);
+ IRDA_DEBUG(1, "%s()\n", __func__);
/* Remove netdevice */
unregister_netdev(self->netdev);
@@ -1515,13 +1515,13 @@ static inline int irda_usb_parse_endpoints(struct irda_usb_cb *self, struct usb_
/* This is our interrupt endpoint */
self->bulk_int_ep = ep;
} else {
- IRDA_ERROR("%s(), Unrecognised endpoint %02X.\n", __FUNCTION__, ep);
+ IRDA_ERROR("%s(), Unrecognised endpoint %02X.\n", __func__, ep);
}
}
}
IRDA_DEBUG(0, "%s(), And our endpoints are : in=%02X, out=%02X (%d), int=%02X\n",
- __FUNCTION__, self->bulk_in_ep, self->bulk_out_ep, self->bulk_out_mtu, self->bulk_int_ep);
+ __func__, self->bulk_in_ep, self->bulk_out_ep, self->bulk_out_mtu, self->bulk_int_ep);
return((self->bulk_in_ep != 0) && (self->bulk_out_ep != 0));
}
@@ -1583,7 +1583,7 @@ static inline struct irda_class_desc *irda_usb_find_class_desc(struct usb_interf
0, intf->altsetting->desc.bInterfaceNumber, desc,
sizeof(*desc), 500);
- IRDA_DEBUG(1, "%s(), ret=%d\n", __FUNCTION__, ret);
+ IRDA_DEBUG(1, "%s(), ret=%d\n", __func__, ret);
if (ret < sizeof(*desc)) {
IRDA_WARNING("usb-irda: class_descriptor read %s (%d)\n",
(ret<0) ? "failed" : "too short", ret);
@@ -1696,10 +1696,10 @@ static int irda_usb_probe(struct usb_interface *intf,
/* Martin Diehl says if we get a -EPIPE we should
* be fine and we don't need to do a usb_clear_halt().
* - Jean II */
- IRDA_DEBUG(0, "%s(), Received -EPIPE, ignoring...\n", __FUNCTION__);
+ IRDA_DEBUG(0, "%s(), Received -EPIPE, ignoring...\n", __func__);
break;
default:
- IRDA_DEBUG(0, "%s(), Unknown error %d\n", __FUNCTION__, ret);
+ IRDA_DEBUG(0, "%s(), Unknown error %d\n", __func__, ret);
ret = -EIO;
goto err_out_3;
}
@@ -1708,7 +1708,7 @@ static int irda_usb_probe(struct usb_interface *intf,
interface = intf->cur_altsetting;
if(!irda_usb_parse_endpoints(self, interface->endpoint,
interface->desc.bNumEndpoints)) {
- IRDA_ERROR("%s(), Bogus endpoints...\n", __FUNCTION__);
+ IRDA_ERROR("%s(), Bogus endpoints...\n", __func__);
ret = -EIO;
goto err_out_3;
}
@@ -1815,7 +1815,7 @@ static void irda_usb_disconnect(struct usb_interface *intf)
struct irda_usb_cb *self = usb_get_intfdata(intf);
int i;
- IRDA_DEBUG(1, "%s()\n", __FUNCTION__);
+ IRDA_DEBUG(1, "%s()\n", __func__);
usb_set_intfdata(intf, NULL);
if (!self)
@@ -1865,7 +1865,7 @@ static void irda_usb_disconnect(struct usb_interface *intf)
/* Free self and network device */
free_netdev(self->netdev);
- IRDA_DEBUG(0, "%s(), USB IrDA Disconnected\n", __FUNCTION__);
+ IRDA_DEBUG(0, "%s(), USB IrDA Disconnected\n", __func__);
}
/*------------------------------------------------------------------*/
diff --git a/drivers/net/irda/irtty-sir.c b/drivers/net/irda/irtty-sir.c
index 9e33196f945..6bcee01c684 100644
--- a/drivers/net/irda/irtty-sir.c
+++ b/drivers/net/irda/irtty-sir.c
@@ -231,7 +231,7 @@ static void irtty_receive_buf(struct tty_struct *tty, const unsigned char *cp,
dev = priv->dev;
if (!dev) {
- IRDA_WARNING("%s(), not ready yet!\n", __FUNCTION__);
+ IRDA_WARNING("%s(), not ready yet!\n", __func__);
return;
}
@@ -388,7 +388,7 @@ static int irtty_ioctl(struct tty_struct *tty, struct file *file, unsigned int c
IRDA_ASSERT(priv != NULL, return -ENODEV;);
IRDA_ASSERT(priv->magic == IRTTY_MAGIC, return -EBADR;);
- IRDA_DEBUG(3, "%s(cmd=0x%X)\n", __FUNCTION__, cmd);
+ IRDA_DEBUG(3, "%s(cmd=0x%X)\n", __func__, cmd);
dev = priv->dev;
IRDA_ASSERT(dev != NULL, return -1;);
@@ -476,7 +476,7 @@ static int irtty_open(struct tty_struct *tty)
mutex_unlock(&irtty_mutex);
- IRDA_DEBUG(0, "%s - %s: irda line discipline opened\n", __FUNCTION__, tty->name);
+ IRDA_DEBUG(0, "%s - %s: irda line discipline opened\n", __func__, tty->name);
return 0;
@@ -528,7 +528,7 @@ static void irtty_close(struct tty_struct *tty)
kfree(priv);
- IRDA_DEBUG(0, "%s - %s: irda line discipline closed\n", __FUNCTION__, tty->name);
+ IRDA_DEBUG(0, "%s - %s: irda line discipline closed\n", __func__, tty->name);
}
/* ------------------------------------------------------- */
@@ -566,7 +566,7 @@ static void __exit irtty_sir_cleanup(void)
if ((err = tty_unregister_ldisc(N_IRDA))) {
IRDA_ERROR("%s(), can't unregister line discipline (err = %d)\n",
- __FUNCTION__, err);
+ __func__, err);
}
}
diff --git a/drivers/net/irda/kingsun-sir.c b/drivers/net/irda/kingsun-sir.c
index 648e54b3f00..73fe83be34f 100644
--- a/drivers/net/irda/kingsun-sir.c
+++ b/drivers/net/irda/kingsun-sir.c
@@ -243,7 +243,7 @@ static void kingsun_rcv_irq(struct urb *urb)
}
} else if (urb->actual_length > 0) {
err("%s(): Unexpected response length, expected %d got %d",
- __FUNCTION__, kingsun->max_rx, urb->actual_length);
+ __func__, kingsun->max_rx, urb->actual_length);
}
/* This urb has already been filled in kingsun_net_open */
ret = usb_submit_urb(urb, GFP_ATOMIC);
diff --git a/drivers/net/irda/litelink-sir.c b/drivers/net/irda/litelink-sir.c
index 73261c54bbf..d6d9d2e5ad4 100644
--- a/drivers/net/irda/litelink-sir.c
+++ b/drivers/net/irda/litelink-sir.c
@@ -78,7 +78,7 @@ static int litelink_open(struct sir_dev *dev)
{
struct qos_info *qos = &dev->qos;
- IRDA_DEBUG(2, "%s()\n", __FUNCTION__);
+ IRDA_DEBUG(2, "%s()\n", __func__);
/* Power up dongle */
sirdev_set_dtr_rts(dev, TRUE, TRUE);
@@ -95,7 +95,7 @@ static int litelink_open(struct sir_dev *dev)
static int litelink_close(struct sir_dev *dev)
{
- IRDA_DEBUG(2, "%s()\n", __FUNCTION__);
+ IRDA_DEBUG(2, "%s()\n", __func__);
/* Power off dongle */
sirdev_set_dtr_rts(dev, FALSE, FALSE);
@@ -113,7 +113,7 @@ static int litelink_change_speed(struct sir_dev *dev, unsigned speed)
{
int i;
- IRDA_DEBUG(2, "%s()\n", __FUNCTION__);
+ IRDA_DEBUG(2, "%s()\n", __func__);
/* dongle already reset by irda-thread - current speed (dongle and
* port) is the default speed (115200 for litelink!)
@@ -156,7 +156,7 @@ static int litelink_change_speed(struct sir_dev *dev, unsigned speed)
*/
static int litelink_reset(struct sir_dev *dev)
{
- IRDA_DEBUG(2, "%s()\n", __FUNCTION__);
+ IRDA_DEBUG(2, "%s()\n", __func__);
/* probably the power-up can be dropped here, but with only
* 15 usec delay it's not worth the risk unless somebody with
diff --git a/drivers/net/irda/ma600-sir.c b/drivers/net/irda/ma600-sir.c
index 809906d9476..1ceed9cfb7c 100644
--- a/drivers/net/irda/ma600-sir.c
+++ b/drivers/net/irda/ma600-sir.c
@@ -67,13 +67,13 @@ static struct dongle_driver ma600 = {
static int __init ma600_sir_init(void)
{
- IRDA_DEBUG(2, "%s()\n", __FUNCTION__);
+ IRDA_DEBUG(2, "%s()\n", __func__);
return irda_register_dongle(&ma600);
}
static void __exit ma600_sir_cleanup(void)
{
- IRDA_DEBUG(2, "%s()\n", __FUNCTION__);
+ IRDA_DEBUG(2, "%s()\n", __func__);
irda_unregister_dongle(&ma600);
}
@@ -88,7 +88,7 @@ static int ma600_open(struct sir_dev *dev)
{
struct qos_info *qos = &dev->qos;
- IRDA_DEBUG(2, "%s()\n", __FUNCTION__);
+ IRDA_DEBUG(2, "%s()\n", __func__);
sirdev_set_dtr_rts(dev, TRUE, TRUE);
@@ -106,7 +106,7 @@ static int ma600_open(struct sir_dev *dev)
static int ma600_close(struct sir_dev *dev)
{
- IRDA_DEBUG(2, "%s()\n", __FUNCTION__);
+ IRDA_DEBUG(2, "%s()\n", __func__);
/* Power off dongle */
sirdev_set_dtr_rts(dev, FALSE, FALSE);
@@ -176,7 +176,7 @@ static int ma600_change_speed(struct sir_dev *dev, unsigned speed)
{
u8 byte;
- IRDA_DEBUG(2, "%s(), speed=%d (was %d)\n", __FUNCTION__,
+ IRDA_DEBUG(2, "%s(), speed=%d (was %d)\n", __func__,
speed, dev->speed);
/* dongle already reset, dongle and port at default speed (9600) */
@@ -201,12 +201,12 @@ static int ma600_change_speed(struct sir_dev *dev, unsigned speed)
sirdev_raw_read(dev, &byte, sizeof(byte));
if (byte != get_control_byte(speed)) {
IRDA_WARNING("%s(): bad control byte read-back %02x != %02x\n",
- __FUNCTION__, (unsigned) byte,
+ __func__, (unsigned) byte,
(unsigned) get_control_byte(speed));
return -1;
}
else
- IRDA_DEBUG(2, "%s() control byte write read OK\n", __FUNCTION__);
+ IRDA_DEBUG(2, "%s() control byte write read OK\n", __func__);
#endif
/* Set DTR, Set RTS */
@@ -238,7 +238,7 @@ static int ma600_change_speed(struct sir_dev *dev, unsigned speed)
int ma600_reset(struct sir_dev *dev)
{
- IRDA_DEBUG(2, "%s()\n", __FUNCTION__);
+ IRDA_DEBUG(2, "%s()\n", __func__);
/* Reset the dongle : set DTR low for 10 ms */
sirdev_set_dtr_rts(dev, FALSE, TRUE);
diff --git a/drivers/net/irda/mcp2120-sir.c b/drivers/net/irda/mcp2120-sir.c
index 67bd016e4df..5e2f4859cee 100644
--- a/drivers/net/irda/mcp2120-sir.c
+++ b/drivers/net/irda/mcp2120-sir.c
@@ -63,7 +63,7 @@ static int mcp2120_open(struct sir_dev *dev)
{
struct qos_info *qos = &dev->qos;
- IRDA_DEBUG(2, "%s()\n", __FUNCTION__);
+ IRDA_DEBUG(2, "%s()\n", __func__);
/* seems no explicit power-on required here and reset switching it on anyway */
@@ -76,7 +76,7 @@ static int mcp2120_open(struct sir_dev *dev)
static int mcp2120_close(struct sir_dev *dev)
{
- IRDA_DEBUG(2, "%s()\n", __FUNCTION__);
+ IRDA_DEBUG(2, "%s()\n", __func__);
/* Power off dongle */
/* reset and inhibit mcp2120 */
@@ -102,7 +102,7 @@ static int mcp2120_change_speed(struct sir_dev *dev, unsigned speed)
u8 control[2];
static int ret = 0;
- IRDA_DEBUG(2, "%s()\n", __FUNCTION__);
+ IRDA_DEBUG(2, "%s()\n", __func__);
switch (state) {
case SIRDEV_STATE_DONGLE_SPEED:
@@ -155,7 +155,7 @@ static int mcp2120_change_speed(struct sir_dev *dev, unsigned speed)
break;
default:
- IRDA_ERROR("%s(), undefine state %d\n", __FUNCTION__, state);
+ IRDA_ERROR("%s(), undefine state %d\n", __func__, state);
ret = -EINVAL;
break;
}
@@ -187,7 +187,7 @@ static int mcp2120_reset(struct sir_dev *dev)
unsigned delay = 0;
int ret = 0;
- IRDA_DEBUG(2, "%s()\n", __FUNCTION__);
+ IRDA_DEBUG(2, "%s()\n", __func__);
switch (state) {
case SIRDEV_STATE_DONGLE_RESET:
@@ -213,7 +213,7 @@ static int mcp2120_reset(struct sir_dev *dev)
break;
default:
- IRDA_ERROR("%s(), undefined state %d\n", __FUNCTION__, state);
+ IRDA_ERROR("%s(), undefined state %d\n", __func__, state);
ret = -EINVAL;
break;
}
diff --git a/drivers/net/irda/nsc-ircc.c b/drivers/net/irda/nsc-ircc.c
index effc1ce8179..8583d951a6a 100644
--- a/drivers/net/irda/nsc-ircc.c
+++ b/drivers/net/irda/nsc-ircc.c
@@ -151,8 +151,8 @@ static char *dongle_types[] = {
static chipio_t pnp_info;
static const struct pnp_device_id nsc_ircc_pnp_table[] = {
{ .id = "NSC6001", .driver_data = 0 },
- { .id = "IBM0071", .driver_data = 0 },
{ .id = "HWPC224", .driver_data = 0 },
+ { .id = "IBM0071", .driver_data = NSC_FORCE_DONGLE_TYPE9 },
{ }
};
@@ -223,7 +223,7 @@ static int __init nsc_ircc_init(void)
/* Probe for all the NSC chipsets we know about */
for (chip = chips; chip->name ; chip++) {
- IRDA_DEBUG(2, "%s(), Probing for %s ...\n", __FUNCTION__,
+ IRDA_DEBUG(2, "%s(), Probing for %s ...\n", __func__,
chip->name);
/* Try all config registers for this chip */
@@ -235,7 +235,7 @@ static int __init nsc_ircc_init(void)
/* Read index register */
reg = inb(cfg_base);
if (reg == 0xff) {
- IRDA_DEBUG(2, "%s() no chip at 0x%03x\n", __FUNCTION__, cfg_base);
+ IRDA_DEBUG(2, "%s() no chip at 0x%03x\n", __func__, cfg_base);
continue;
}
@@ -244,7 +244,7 @@ static int __init nsc_ircc_init(void)
id = inb(cfg_base+1);
if ((id & chip->cid_mask) == chip->cid_value) {
IRDA_DEBUG(2, "%s() Found %s chip, revision=%d\n",
- __FUNCTION__, chip->name, id & ~chip->cid_mask);
+ __func__, chip->name, id & ~chip->cid_mask);
/*
* If we found a correct PnP setting,
@@ -295,7 +295,7 @@ static int __init nsc_ircc_init(void)
}
i++;
} else {
- IRDA_DEBUG(2, "%s(), Wrong chip id=0x%02x\n", __FUNCTION__, id);
+ IRDA_DEBUG(2, "%s(), Wrong chip id=0x%02x\n", __func__, id);
}
}
}
@@ -345,7 +345,7 @@ static int __init nsc_ircc_open(chipio_t *info)
void *ret;
int err, chip_index;
- IRDA_DEBUG(2, "%s()\n", __FUNCTION__);
+ IRDA_DEBUG(2, "%s()\n", __func__);
for (chip_index = 0; chip_index < ARRAY_SIZE(dev_self); chip_index++) {
@@ -354,7 +354,7 @@ static int __init nsc_ircc_open(chipio_t *info)
}
if (chip_index == ARRAY_SIZE(dev_self)) {
- IRDA_ERROR("%s(), maximum number of supported chips reached!\n", __FUNCTION__);
+ IRDA_ERROR("%s(), maximum number of supported chips reached!\n", __func__);
return -ENOMEM;
}
@@ -369,7 +369,7 @@ static int __init nsc_ircc_open(chipio_t *info)
dev = alloc_irdadev(sizeof(struct nsc_ircc_cb));
if (dev == NULL) {
IRDA_ERROR("%s(), can't allocate memory for "
- "control block!\n", __FUNCTION__);
+ "control block!\n", __func__);
return -ENOMEM;
}
@@ -393,7 +393,7 @@ static int __init nsc_ircc_open(chipio_t *info)
ret = request_region(self->io.fir_base, self->io.fir_ext, driver_name);
if (!ret) {
IRDA_WARNING("%s(), can't get iobase of 0x%03x\n",
- __FUNCTION__, self->io.fir_base);
+ __func__, self->io.fir_base);
err = -ENODEV;
goto out1;
}
@@ -450,7 +450,7 @@ static int __init nsc_ircc_open(chipio_t *info)
err = register_netdev(dev);
if (err) {
- IRDA_ERROR("%s(), register_netdev() failed!\n", __FUNCTION__);
+ IRDA_ERROR("%s(), register_netdev() failed!\n", __func__);
goto out4;
}
IRDA_MESSAGE("IrDA: Registered device %s\n", dev->name);
@@ -506,7 +506,7 @@ static int __exit nsc_ircc_close(struct nsc_ircc_cb *self)
{
int iobase;
- IRDA_DEBUG(4, "%s()\n", __FUNCTION__);
+ IRDA_DEBUG(4, "%s()\n", __func__);
IRDA_ASSERT(self != NULL, return -1;);
@@ -519,7 +519,7 @@ static int __exit nsc_ircc_close(struct nsc_ircc_cb *self)
/* Release the PORT that this driver is using */
IRDA_DEBUG(4, "%s(), Releasing Region %03x\n",
- __FUNCTION__, self->io.fir_base);
+ __func__, self->io.fir_base);
release_region(self->io.fir_base, self->io.fir_ext);
if (self->tx_buff.head)
@@ -557,7 +557,7 @@ static int nsc_ircc_init_108(nsc_chip_t *chip, chipio_t *info)
case 0x2e8: outb(0x15, cfg_base+1); break;
case 0x3f8: outb(0x16, cfg_base+1); break;
case 0x2f8: outb(0x17, cfg_base+1); break;
- default: IRDA_ERROR("%s(), invalid base_address", __FUNCTION__);
+ default: IRDA_ERROR("%s(), invalid base_address", __func__);
}
/* Control Signal Routing Register (CSRT) */
@@ -569,7 +569,7 @@ static int nsc_ircc_init_108(nsc_chip_t *chip, chipio_t *info)
case 9: temp = 0x05; break;
case 11: temp = 0x06; break;
case 15: temp = 0x07; break;
- default: IRDA_ERROR("%s(), invalid irq", __FUNCTION__);
+ default: IRDA_ERROR("%s(), invalid irq", __func__);
}
outb(CFG_108_CSRT, cfg_base);
@@ -577,7 +577,7 @@ static int nsc_ircc_init_108(nsc_chip_t *chip, chipio_t *info)
case 0: outb(0x08+temp, cfg_base+1); break;
case 1: outb(0x10+temp, cfg_base+1); break;
case 3: outb(0x18+temp, cfg_base+1); break;
- default: IRDA_ERROR("%s(), invalid dma", __FUNCTION__);
+ default: IRDA_ERROR("%s(), invalid dma", __func__);
}
outb(CFG_108_MCTL, cfg_base); /* Mode Control Register (MCTL) */
@@ -616,7 +616,7 @@ static int nsc_ircc_probe_108(nsc_chip_t *chip, chipio_t *info)
break;
}
info->sir_base = info->fir_base;
- IRDA_DEBUG(2, "%s(), probing fir_base=0x%03x\n", __FUNCTION__,
+ IRDA_DEBUG(2, "%s(), probing fir_base=0x%03x\n", __func__,
info->fir_base);
/* Read control signals routing register (CSRT) */
@@ -649,7 +649,7 @@ static int nsc_ircc_probe_108(nsc_chip_t *chip, chipio_t *info)
info->irq = 15;
break;
}
- IRDA_DEBUG(2, "%s(), probing irq=%d\n", __FUNCTION__, info->irq);
+ IRDA_DEBUG(2, "%s(), probing irq=%d\n", __func__, info->irq);
/* Currently we only read Rx DMA but it will also be used for Tx */
switch ((reg >> 3) & 0x03) {
@@ -666,7 +666,7 @@ static int nsc_ircc_probe_108(nsc_chip_t *chip, chipio_t *info)
info->dma = 3;
break;
}
- IRDA_DEBUG(2, "%s(), probing dma=%d\n", __FUNCTION__, info->dma);
+ IRDA_DEBUG(2, "%s(), probing dma=%d\n", __func__, info->dma);
/* Read mode control register (MCTL) */
outb(CFG_108_MCTL, cfg_base);
@@ -823,7 +823,7 @@ static int nsc_ircc_init_39x(nsc_chip_t *chip, chipio_t *info)
/* User is sure about his config... accept it. */
IRDA_DEBUG(2, "%s(): nsc_ircc_init_39x (user settings): "
"io=0x%04x, irq=%d, dma=%d\n",
- __FUNCTION__, info->fir_base, info->irq, info->dma);
+ __func__, info->fir_base, info->irq, info->dma);
/* Access bank for SP2 */
outb(CFG_39X_LDN, cfg_base);
@@ -864,7 +864,7 @@ static int nsc_ircc_probe_39x(nsc_chip_t *chip, chipio_t *info)
int enabled, susp;
IRDA_DEBUG(2, "%s(), nsc_ircc_probe_39x, base=%d\n",
- __FUNCTION__, cfg_base);
+ __func__, cfg_base);
/* This function should be executed with irq off to avoid
* another driver messing with the Super I/O bank - Jean II */
@@ -898,7 +898,7 @@ static int nsc_ircc_probe_39x(nsc_chip_t *chip, chipio_t *info)
outb(CFG_39X_SPC, cfg_base);
susp = 1 - ((inb(cfg_base+1) & 0x02) >> 1);
- IRDA_DEBUG(2, "%s(): io=0x%02x%02x, irq=%d (type %d), rxdma=%d, txdma=%d, enabled=%d (suspended=%d)\n", __FUNCTION__, reg1,reg2,irq,irqt,dma1,dma2,enabled,susp);
+ IRDA_DEBUG(2, "%s(): io=0x%02x%02x, irq=%d (type %d), rxdma=%d, txdma=%d, enabled=%d (suspended=%d)\n", __func__, reg1,reg2,irq,irqt,dma1,dma2,enabled,susp);
/* Configure SP2 */
@@ -930,7 +930,10 @@ static int nsc_ircc_pnp_probe(struct pnp_dev *dev, const struct pnp_device_id *i
pnp_info.dma = -1;
pnp_succeeded = 1;
- /* There don't seem to be any way to get the cfg_base.
+ if (id->driver_data & NSC_FORCE_DONGLE_TYPE9)
+ dongle_id = 0x9;
+
+ /* There doesn't seem to be any way of getting the cfg_base.
* On my box, cfg_base is in the PnP descriptor of the
* motherboard. Oh well... Jean II */
@@ -947,7 +950,7 @@ static int nsc_ircc_pnp_probe(struct pnp_dev *dev, const struct pnp_device_id *i
pnp_info.dma = pnp_dma(dev, 0);
IRDA_DEBUG(0, "%s() : From PnP, found firbase 0x%03X ; irq %d ; dma %d.\n",
- __FUNCTION__, pnp_info.fir_base, pnp_info.irq, pnp_info.dma);
+ __func__, pnp_info.fir_base, pnp_info.irq, pnp_info.dma);
if((pnp_info.fir_base == 0) ||
(pnp_info.irq == -1) || (pnp_info.dma == -1)) {
@@ -976,7 +979,7 @@ static int nsc_ircc_setup(chipio_t *info)
version = inb(iobase+MID);
IRDA_DEBUG(2, "%s() Driver %s Found chip version %02x\n",
- __FUNCTION__, driver_name, version);
+ __func__, driver_name, version);
/* Should be 0x2? */
if (0x20 != (version & 0xf0)) {
@@ -1080,30 +1083,30 @@ static void nsc_ircc_init_dongle_interface (int iobase, int dongle_id)
case 0x00: /* same as */
case 0x01: /* Differential serial interface */
IRDA_DEBUG(0, "%s(), %s not defined by irda yet\n",
- __FUNCTION__, dongle_types[dongle_id]);
+ __func__, dongle_types[dongle_id]);
break;
case 0x02: /* same as */
case 0x03: /* Reserved */
IRDA_DEBUG(0, "%s(), %s not defined by irda yet\n",
- __FUNCTION__, dongle_types[dongle_id]);
+ __func__, dongle_types[dongle_id]);
break;
case 0x04: /* Sharp RY5HD01 */
break;
case 0x05: /* Reserved, but this is what the Thinkpad reports */
IRDA_DEBUG(0, "%s(), %s not defined by irda yet\n",
- __FUNCTION__, dongle_types[dongle_id]);
+ __func__, dongle_types[dongle_id]);
break;
case 0x06: /* Single-ended serial interface */
IRDA_DEBUG(0, "%s(), %s not defined by irda yet\n",
- __FUNCTION__, dongle_types[dongle_id]);
+ __func__, dongle_types[dongle_id]);
break;
case 0x07: /* Consumer-IR only */
IRDA_DEBUG(0, "%s(), %s is not for IrDA mode\n",
- __FUNCTION__, dongle_types[dongle_id]);
+ __func__, dongle_types[dongle_id]);
break;
case 0x08: /* HP HSDL-2300, HP HSDL-3600/HSDL-3610 */
IRDA_DEBUG(0, "%s(), %s\n",
- __FUNCTION__, dongle_types[dongle_id]);
+ __func__, dongle_types[dongle_id]);
break;
case 0x09: /* IBM31T1100 or Temic TFDS6000/TFDS6500 */
outb(0x28, iobase+7); /* Set irsl[0-2] as output */
@@ -1111,7 +1114,7 @@ static void nsc_ircc_init_dongle_interface (int iobase, int dongle_id)
case 0x0A: /* same as */
case 0x0B: /* Reserved */
IRDA_DEBUG(0, "%s(), %s not defined by irda yet\n",
- __FUNCTION__, dongle_types[dongle_id]);
+ __func__, dongle_types[dongle_id]);
break;
case 0x0C: /* same as */
case 0x0D: /* HP HSDL-1100/HSDL-2100 */
@@ -1126,14 +1129,14 @@ static void nsc_ircc_init_dongle_interface (int iobase, int dongle_id)
break;
case 0x0F: /* No dongle connected */
IRDA_DEBUG(0, "%s(), %s\n",
- __FUNCTION__, dongle_types[dongle_id]);
+ __func__, dongle_types[dongle_id]);
switch_bank(iobase, BANK0);
outb(0x62, iobase+MCR);
break;
default:
IRDA_DEBUG(0, "%s(), invalid dongle_id %#x",
- __FUNCTION__, dongle_id);
+ __func__, dongle_id);
}
/* IRCFG1: IRSL1 and 2 are set to IrDA mode */
@@ -1165,30 +1168,30 @@ static void nsc_ircc_change_dongle_speed(int iobase, int speed, int dongle_id)
case 0x00: /* same as */
case 0x01: /* Differential serial interface */
IRDA_DEBUG(0, "%s(), %s not defined by irda yet\n",
- __FUNCTION__, dongle_types[dongle_id]);
+ __func__, dongle_types[dongle_id]);
break;
case 0x02: /* same as */
case 0x03: /* Reserved */
IRDA_DEBUG(0, "%s(), %s not defined by irda yet\n",
- __FUNCTION__, dongle_types[dongle_id]);
+ __func__, dongle_types[dongle_id]);
break;
case 0x04: /* Sharp RY5HD01 */
break;
case 0x05: /* Reserved */
IRDA_DEBUG(0, "%s(), %s not defined by irda yet\n",
- __FUNCTION__, dongle_types[dongle_id]);
+ __func__, dongle_types[dongle_id]);
break;
case 0x06: /* Single-ended serial interface */
IRDA_DEBUG(0, "%s(), %s not defined by irda yet\n",
- __FUNCTION__, dongle_types[dongle_id]);
+ __func__, dongle_types[dongle_id]);
break;
case 0x07: /* Consumer-IR only */
IRDA_DEBUG(0, "%s(), %s is not for IrDA mode\n",
- __FUNCTION__, dongle_types[dongle_id]);
+ __func__, dongle_types[dongle_id]);
break;
case 0x08: /* HP HSDL-2300, HP HSDL-3600/HSDL-3610 */
IRDA_DEBUG(0, "%s(), %s\n",
- __FUNCTION__, dongle_types[dongle_id]);
+ __func__, dongle_types[dongle_id]);
outb(0x00, iobase+4);
if (speed > 115200)
outb(0x01, iobase+4);
@@ -1207,7 +1210,7 @@ static void nsc_ircc_change_dongle_speed(int iobase, int speed, int dongle_id)
case 0x0A: /* same as */
case 0x0B: /* Reserved */
IRDA_DEBUG(0, "%s(), %s not defined by irda yet\n",
- __FUNCTION__, dongle_types[dongle_id]);
+ __func__, dongle_types[dongle_id]);
break;
case 0x0C: /* same as */
case 0x0D: /* HP HSDL-1100/HSDL-2100 */
@@ -1216,13 +1219,13 @@ static void nsc_ircc_change_dongle_speed(int iobase, int speed, int dongle_id)
break;
case 0x0F: /* No dongle connected */
IRDA_DEBUG(0, "%s(), %s is not for IrDA mode\n",
- __FUNCTION__, dongle_types[dongle_id]);
+ __func__, dongle_types[dongle_id]);
switch_bank(iobase, BANK0);
outb(0x62, iobase+MCR);
break;
default:
- IRDA_DEBUG(0, "%s(), invalid data_rate\n", __FUNCTION__);
+ IRDA_DEBUG(0, "%s(), invalid data_rate\n", __func__);
}
/* Restore bank register */
outb(bank, iobase+BSR);
@@ -1243,7 +1246,7 @@ static __u8 nsc_ircc_change_speed(struct nsc_ircc_cb *self, __u32 speed)
__u8 bank;
__u8 ier; /* Interrupt enable register */
- IRDA_DEBUG(2, "%s(), speed=%d\n", __FUNCTION__, speed);
+ IRDA_DEBUG(2, "%s(), speed=%d\n", __func__, speed);
IRDA_ASSERT(self != NULL, return 0;);
@@ -1276,20 +1279,20 @@ static __u8 nsc_ircc_change_speed(struct nsc_ircc_cb *self, __u32 speed)
outb(inb(iobase+4) | 0x04, iobase+4);
mcr = MCR_MIR;
- IRDA_DEBUG(0, "%s(), handling baud of 576000\n", __FUNCTION__);
+ IRDA_DEBUG(0, "%s(), handling baud of 576000\n", __func__);
break;
case 1152000:
mcr = MCR_MIR;
- IRDA_DEBUG(0, "%s(), handling baud of 1152000\n", __FUNCTION__);
+ IRDA_DEBUG(0, "%s(), handling baud of 1152000\n", __func__);
break;
case 4000000:
mcr = MCR_FIR;
- IRDA_DEBUG(0, "%s(), handling baud of 4000000\n", __FUNCTION__);
+ IRDA_DEBUG(0, "%s(), handling baud of 4000000\n", __func__);
break;
default:
mcr = MCR_FIR;
IRDA_DEBUG(0, "%s(), unknown baud rate of %d\n",
- __FUNCTION__, speed);
+ __func__, speed);
break;
}
@@ -1594,7 +1597,7 @@ static int nsc_ircc_pio_write(int iobase, __u8 *buf, int len, int fifo_size)
int actual = 0;
__u8 bank;
- IRDA_DEBUG(4, "%s()\n", __FUNCTION__);
+ IRDA_DEBUG(4, "%s()\n", __func__);
/* Save current bank */
bank = inb(iobase+BSR);
@@ -1602,7 +1605,7 @@ static int nsc_ircc_pio_write(int iobase, __u8 *buf, int len, int fifo_size)
switch_bank(iobase, BANK0);
if (!(inb_p(iobase+LSR) & LSR_TXEMP)) {
IRDA_DEBUG(4, "%s(), warning, FIFO not empty yet!\n",
- __FUNCTION__);
+ __func__);
/* FIFO may still be filled to the Tx interrupt threshold */
fifo_size -= 17;
@@ -1615,7 +1618,7 @@ static int nsc_ircc_pio_write(int iobase, __u8 *buf, int len, int fifo_size)
}
IRDA_DEBUG(4, "%s(), fifo_size %d ; %d sent of %d\n",
- __FUNCTION__, fifo_size, actual, len);
+ __func__, fifo_size, actual, len);
/* Restore bank */
outb(bank, iobase+BSR);
@@ -1636,7 +1639,7 @@ static int nsc_ircc_dma_xmit_complete(struct nsc_ircc_cb *self)
__u8 bank;
int ret = TRUE;
- IRDA_DEBUG(2, "%s()\n", __FUNCTION__);
+ IRDA_DEBUG(2, "%s()\n", __func__);
iobase = self->io.fir_base;
@@ -1767,7 +1770,7 @@ static int nsc_ircc_dma_receive_complete(struct nsc_ircc_cb *self, int iobase)
len = inb(iobase+RFLFL) | ((inb(iobase+RFLFH) & 0x1f) << 8);
if (st_fifo->tail >= MAX_RX_WINDOW) {
- IRDA_DEBUG(0, "%s(), window is full!\n", __FUNCTION__);
+ IRDA_DEBUG(0, "%s(), window is full!\n", __func__);
continue;
}
@@ -1859,7 +1862,7 @@ static int nsc_ircc_dma_receive_complete(struct nsc_ircc_cb *self, int iobase)
if (skb == NULL) {
IRDA_WARNING("%s(), memory squeeze, "
"dropping frame.\n",
- __FUNCTION__);
+ __func__);
self->stats.rx_dropped++;
/* Restore bank register */
@@ -1965,7 +1968,7 @@ static void nsc_ircc_sir_interrupt(struct nsc_ircc_cb *self, int eir)
* Need to be after self->io.direction to avoid race with
* nsc_ircc_hard_xmit_sir() - Jean II */
if (self->new_speed) {
- IRDA_DEBUG(2, "%s(), Changing speed!\n", __FUNCTION__);
+ IRDA_DEBUG(2, "%s(), Changing speed!\n", __func__);
self->ier = nsc_ircc_change_speed(self,
self->new_speed);
self->new_speed = 0;
@@ -2051,7 +2054,7 @@ static void nsc_ircc_fir_interrupt(struct nsc_ircc_cb *self, int iobase,
} else
IRDA_WARNING("%s(), potential "
"Tx queue lockup !\n",
- __FUNCTION__);
+ __func__);
}
} else {
/* Not finished yet, so interrupt on DMA again */
@@ -2160,7 +2163,7 @@ static int nsc_ircc_net_open(struct net_device *dev)
char hwname[32];
__u8 bank;
- IRDA_DEBUG(4, "%s()\n", __FUNCTION__);
+ IRDA_DEBUG(4, "%s()\n", __func__);
IRDA_ASSERT(dev != NULL, return -1;);
self = (struct nsc_ircc_cb *) dev->priv;
@@ -2222,7 +2225,7 @@ static int nsc_ircc_net_close(struct net_device *dev)
int iobase;
__u8 bank;
- IRDA_DEBUG(4, "%s()\n", __FUNCTION__);
+ IRDA_DEBUG(4, "%s()\n", __func__);
IRDA_ASSERT(dev != NULL, return -1;);
@@ -2276,7 +2279,7 @@ static int nsc_ircc_net_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
IRDA_ASSERT(self != NULL, return -1;);
- IRDA_DEBUG(2, "%s(), %s, (cmd=0x%X)\n", __FUNCTION__, dev->name, cmd);
+ IRDA_DEBUG(2, "%s(), %s, (cmd=0x%X)\n", __func__, dev->name, cmd);
switch (cmd) {
case SIOCSBANDWIDTH: /* Set bandwidth */
diff --git a/drivers/net/irda/nsc-ircc.h b/drivers/net/irda/nsc-ircc.h
index 29398a4f73f..71cd3c5a076 100644
--- a/drivers/net/irda/nsc-ircc.h
+++ b/drivers/net/irda/nsc-ircc.h
@@ -35,6 +35,9 @@
#include <linux/types.h>
#include <asm/io.h>
+/* Features for chips (set in driver_data) */
+#define NSC_FORCE_DONGLE_TYPE9 0x00000001
+
/* DMA modes needed */
#define DMA_TX_MODE 0x08 /* Mem to I/O, ++, demand. */
#define DMA_RX_MODE 0x04 /* I/O to mem, ++, demand. */
diff --git a/drivers/net/irda/old_belkin-sir.c b/drivers/net/irda/old_belkin-sir.c
index 8c22c7374a2..75714bc7103 100644
--- a/drivers/net/irda/old_belkin-sir.c
+++ b/drivers/net/irda/old_belkin-sir.c
@@ -92,7 +92,7 @@ static int old_belkin_open(struct sir_dev *dev)
{
struct qos_info *qos = &dev->qos;
- IRDA_DEBUG(2, "%s()\n", __FUNCTION__);
+ IRDA_DEBUG(2, "%s()\n", __func__);
/* Power on dongle */
sirdev_set_dtr_rts(dev, TRUE, TRUE);
@@ -110,7 +110,7 @@ static int old_belkin_open(struct sir_dev *dev)
static int old_belkin_close(struct sir_dev *dev)
{
- IRDA_DEBUG(2, "%s()\n", __FUNCTION__);
+ IRDA_DEBUG(2, "%s()\n", __func__);
/* Power off dongle */
sirdev_set_dtr_rts(dev, FALSE, FALSE);
@@ -125,7 +125,7 @@ static int old_belkin_close(struct sir_dev *dev)
*/
static int old_belkin_change_speed(struct sir_dev *dev, unsigned speed)
{
- IRDA_DEBUG(2, "%s()\n", __FUNCTION__);
+ IRDA_DEBUG(2, "%s()\n", __func__);
dev->speed = 9600;
return (speed==dev->speed) ? 0 : -EINVAL;
@@ -139,7 +139,7 @@ static int old_belkin_change_speed(struct sir_dev *dev, unsigned speed)
*/
static int old_belkin_reset(struct sir_dev *dev)
{
- IRDA_DEBUG(2, "%s()\n", __FUNCTION__);
+ IRDA_DEBUG(2, "%s()\n", __func__);
/* This dongles speed "defaults" to 9600 bps ;-) */
dev->speed = 9600;
diff --git a/drivers/net/irda/pxaficp_ir.c b/drivers/net/irda/pxaficp_ir.c
index f76b0b6c277..4aa61a1a3d5 100644
--- a/drivers/net/irda/pxaficp_ir.c
+++ b/drivers/net/irda/pxaficp_ir.c
@@ -23,8 +23,8 @@
#include <net/irda/irda_device.h>
#include <asm/dma.h>
-#include <asm/arch/irda.h>
-#include <asm/arch/pxa-regs.h>
+#include <mach/irda.h>
+#include <mach/pxa-regs.h>
#define IrSR_RXPL_NEG_IS_ZERO (1<<4)
#define IrSR_RXPL_POS_IS_ZERO 0x0
diff --git a/drivers/net/irda/sa1100_ir.c b/drivers/net/irda/sa1100_ir.c
index 1bc8518f919..a95188948de 100644
--- a/drivers/net/irda/sa1100_ir.c
+++ b/drivers/net/irda/sa1100_ir.c
@@ -37,7 +37,7 @@
#include <asm/irq.h>
#include <asm/dma.h>
-#include <asm/hardware.h>
+#include <mach/hardware.h>
#include <asm/mach/irda.h>
static int power_level = 3;
diff --git a/drivers/net/irda/sir_dev.c b/drivers/net/irda/sir_dev.c
index 6078e03de9a..3f32909c24c 100644
--- a/drivers/net/irda/sir_dev.c
+++ b/drivers/net/irda/sir_dev.c
@@ -80,7 +80,7 @@ static int sirdev_tx_complete_fsm(struct sir_dev *dev)
return 0;
default:
- IRDA_ERROR("%s - undefined state\n", __FUNCTION__);
+ IRDA_ERROR("%s - undefined state\n", __func__);
return -EINVAL;
}
fsm->substate = next_state;
@@ -107,11 +107,11 @@ static void sirdev_config_fsm(struct work_struct *work)
int ret = -1;
unsigned delay;
- IRDA_DEBUG(2, "%s(), <%ld>\n", __FUNCTION__, jiffies);
+ IRDA_DEBUG(2, "%s(), <%ld>\n", __func__, jiffies);
do {
IRDA_DEBUG(3, "%s - state=0x%04x / substate=0x%04x\n",
- __FUNCTION__, fsm->state, fsm->substate);
+ __func__, fsm->state, fsm->substate);
next_state = fsm->state;
delay = 0;
@@ -249,12 +249,12 @@ static void sirdev_config_fsm(struct work_struct *work)
break;
default:
- IRDA_ERROR("%s - undefined state\n", __FUNCTION__);
+ IRDA_ERROR("%s - undefined state\n", __func__);
fsm->result = -EINVAL;
/* fall thru */
case SIRDEV_STATE_ERROR:
- IRDA_ERROR("%s - error: %d\n", __FUNCTION__, fsm->result);
+ IRDA_ERROR("%s - error: %d\n", __func__, fsm->result);
#if 0 /* don't enable this before we have netdev->tx_timeout to recover */
netif_stop_queue(dev->netdev);
@@ -284,11 +284,12 @@ int sirdev_schedule_request(struct sir_dev *dev, int initial_state, unsigned par
{
struct sir_fsm *fsm = &dev->fsm;
- IRDA_DEBUG(2, "%s - state=0x%04x / param=%u\n", __FUNCTION__, initial_state, param);
+ IRDA_DEBUG(2, "%s - state=0x%04x / param=%u\n", __func__,
+ initial_state, param);
if (down_trylock(&fsm->sem)) {
if (in_interrupt() || in_atomic() || irqs_disabled()) {
- IRDA_DEBUG(1, "%s(), state machine busy!\n", __FUNCTION__);
+ IRDA_DEBUG(1, "%s(), state machine busy!\n", __func__);
return -EWOULDBLOCK;
} else
down(&fsm->sem);
@@ -296,7 +297,7 @@ int sirdev_schedule_request(struct sir_dev *dev, int initial_state, unsigned par
if (fsm->state == SIRDEV_STATE_DEAD) {
/* race with sirdev_close should never happen */
- IRDA_ERROR("%s(), instance staled!\n", __FUNCTION__);
+ IRDA_ERROR("%s(), instance staled!\n", __func__);
up(&fsm->sem);
return -ESTALE; /* or better EPIPE? */
}
@@ -341,7 +342,7 @@ int sirdev_set_dongle(struct sir_dev *dev, IRDA_DONGLE type)
{
int err;
- IRDA_DEBUG(3, "%s : requesting dongle %d.\n", __FUNCTION__, type);
+ IRDA_DEBUG(3, "%s : requesting dongle %d.\n", __func__, type);
err = sirdev_schedule_dongle_open(dev, type);
if (unlikely(err))
@@ -376,7 +377,7 @@ int sirdev_raw_write(struct sir_dev *dev, const char *buf, int len)
ret = dev->drv->do_write(dev, dev->tx_buff.data, dev->tx_buff.len);
if (ret > 0) {
- IRDA_DEBUG(3, "%s(), raw-tx started\n", __FUNCTION__);
+ IRDA_DEBUG(3, "%s(), raw-tx started\n", __func__);
dev->tx_buff.data += ret;
dev->tx_buff.len -= ret;
@@ -437,7 +438,7 @@ void sirdev_write_complete(struct sir_dev *dev)
spin_lock_irqsave(&dev->tx_lock, flags);
IRDA_DEBUG(3, "%s() - dev->tx_buff.len = %d\n",
- __FUNCTION__, dev->tx_buff.len);
+ __func__, dev->tx_buff.len);
if (likely(dev->tx_buff.len > 0)) {
/* Write data left in transmit buffer */
@@ -450,7 +451,7 @@ void sirdev_write_complete(struct sir_dev *dev)
else if (unlikely(actual<0)) {
/* could be dropped later when we have tx_timeout to recover */
IRDA_ERROR("%s: drv->do_write failed (%d)\n",
- __FUNCTION__, actual);
+ __func__, actual);
if ((skb=dev->tx_skb) != NULL) {
dev->tx_skb = NULL;
dev_kfree_skb_any(skb);
@@ -471,7 +472,7 @@ void sirdev_write_complete(struct sir_dev *dev)
* restarted when the irda-thread has completed the request.
*/
- IRDA_DEBUG(3, "%s(), raw-tx done\n", __FUNCTION__);
+ IRDA_DEBUG(3, "%s(), raw-tx done\n", __func__);
dev->raw_tx = 0;
goto done; /* no post-frame handling in raw mode */
}
@@ -488,7 +489,7 @@ void sirdev_write_complete(struct sir_dev *dev)
* re-activated.
*/
- IRDA_DEBUG(5, "%s(), finished with frame!\n", __FUNCTION__);
+ IRDA_DEBUG(5, "%s(), finished with frame!\n", __func__);
if ((skb=dev->tx_skb) != NULL) {
dev->tx_skb = NULL;
@@ -498,14 +499,14 @@ void sirdev_write_complete(struct sir_dev *dev)
}
if (unlikely(dev->new_speed > 0)) {
- IRDA_DEBUG(5, "%s(), Changing speed!\n", __FUNCTION__);
+ IRDA_DEBUG(5, "%s(), Changing speed!\n", __func__);
err = sirdev_schedule_speed(dev, dev->new_speed);
if (unlikely(err)) {
/* should never happen
* forget the speed change and hope the stack recovers
*/
IRDA_ERROR("%s - schedule speed change failed: %d\n",
- __FUNCTION__, err);
+ __func__, err);
netif_wake_queue(dev->netdev);
}
/* else: success
@@ -532,13 +533,13 @@ EXPORT_SYMBOL(sirdev_write_complete);
int sirdev_receive(struct sir_dev *dev, const unsigned char *cp, size_t count)
{
if (!dev || !dev->netdev) {
- IRDA_WARNING("%s(), not ready yet!\n", __FUNCTION__);
+ IRDA_WARNING("%s(), not ready yet!\n", __func__);
return -1;
}
if (!dev->irlap) {
IRDA_WARNING("%s - too early: %p / %zd!\n",
- __FUNCTION__, cp, count);
+ __func__, cp, count);
return -1;
}
@@ -548,7 +549,7 @@ int sirdev_receive(struct sir_dev *dev, const unsigned char *cp, size_t count)
*/
irda_device_set_media_busy(dev->netdev, TRUE);
dev->stats.rx_dropped++;
- IRDA_DEBUG(0, "%s; rx-drop: %zd\n", __FUNCTION__, count);
+ IRDA_DEBUG(0, "%s; rx-drop: %zd\n", __func__, count);
return 0;
}
@@ -600,7 +601,7 @@ static int sirdev_hard_xmit(struct sk_buff *skb, struct net_device *ndev)
netif_stop_queue(ndev);
- IRDA_DEBUG(3, "%s(), skb->len = %d\n", __FUNCTION__, skb->len);
+ IRDA_DEBUG(3, "%s(), skb->len = %d\n", __func__, skb->len);
speed = irda_get_next_speed(skb);
if ((speed != dev->speed) && (speed != -1)) {
@@ -637,7 +638,7 @@ static int sirdev_hard_xmit(struct sk_buff *skb, struct net_device *ndev)
/* Check problems */
if(spin_is_locked(&dev->tx_lock)) {
- IRDA_DEBUG(3, "%s(), write not completed\n", __FUNCTION__);
+ IRDA_DEBUG(3, "%s(), write not completed\n", __func__);
}
/* serialize with write completion */
@@ -666,7 +667,7 @@ static int sirdev_hard_xmit(struct sk_buff *skb, struct net_device *ndev)
else if (unlikely(actual < 0)) {
/* could be dropped later when we have tx_timeout to recover */
IRDA_ERROR("%s: drv->do_write failed (%d)\n",
- __FUNCTION__, actual);
+ __func__, actual);
dev_kfree_skb_any(skb);
dev->stats.tx_errors++;
dev->stats.tx_dropped++;
@@ -687,7 +688,7 @@ static int sirdev_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd)
IRDA_ASSERT(dev != NULL, return -1;);
- IRDA_DEBUG(3, "%s(), %s, (cmd=0x%X)\n", __FUNCTION__, ndev->name, cmd);
+ IRDA_DEBUG(3, "%s(), %s, (cmd=0x%X)\n", __func__, ndev->name, cmd);
switch (cmd) {
case SIOCSBANDWIDTH: /* Set bandwidth */
@@ -804,7 +805,7 @@ static int sirdev_open(struct net_device *ndev)
if (!try_module_get(drv->owner))
return -ESTALE;
- IRDA_DEBUG(2, "%s()\n", __FUNCTION__);
+ IRDA_DEBUG(2, "%s()\n", __func__);
if (sirdev_alloc_buffers(dev))
goto errout_dec;
@@ -822,7 +823,7 @@ static int sirdev_open(struct net_device *ndev)
netif_wake_queue(ndev);
- IRDA_DEBUG(2, "%s - done, speed = %d\n", __FUNCTION__, dev->speed);
+ IRDA_DEBUG(2, "%s - done, speed = %d\n", __func__, dev->speed);
return 0;
@@ -842,7 +843,7 @@ static int sirdev_close(struct net_device *ndev)
struct sir_dev *dev = ndev->priv;
const struct sir_driver *drv;
-// IRDA_DEBUG(0, "%s\n", __FUNCTION__);
+// IRDA_DEBUG(0, "%s\n", __func__);
netif_stop_queue(ndev);
@@ -878,7 +879,7 @@ struct sir_dev * sirdev_get_instance(const struct sir_driver *drv, const char *n
struct net_device *ndev;
struct sir_dev *dev;
- IRDA_DEBUG(0, "%s - %s\n", __FUNCTION__, name);
+ IRDA_DEBUG(0, "%s - %s\n", __func__, name);
/* instead of adding tests to protect against drv->do_write==NULL
* at several places we refuse to create a sir_dev instance for
@@ -892,7 +893,7 @@ struct sir_dev * sirdev_get_instance(const struct sir_driver *drv, const char *n
*/
ndev = alloc_irdadev(sizeof(*dev));
if (ndev == NULL) {
- IRDA_ERROR("%s - Can't allocate memory for IrDA control block!\n", __FUNCTION__);
+ IRDA_ERROR("%s - Can't allocate memory for IrDA control block!\n", __func__);
goto out;
}
dev = ndev->priv;
@@ -921,7 +922,7 @@ struct sir_dev * sirdev_get_instance(const struct sir_driver *drv, const char *n
ndev->do_ioctl = sirdev_ioctl;
if (register_netdev(ndev)) {
- IRDA_ERROR("%s(), register_netdev() failed!\n", __FUNCTION__);
+ IRDA_ERROR("%s(), register_netdev() failed!\n", __func__);
goto out_freenetdev;
}
@@ -938,7 +939,7 @@ int sirdev_put_instance(struct sir_dev *dev)
{
int err = 0;
- IRDA_DEBUG(0, "%s\n", __FUNCTION__);
+ IRDA_DEBUG(0, "%s\n", __func__);
atomic_set(&dev->enable_rx, 0);
@@ -948,7 +949,7 @@ int sirdev_put_instance(struct sir_dev *dev)
if (dev->dongle_drv)
err = sirdev_schedule_dongle_close(dev);
if (err)
- IRDA_ERROR("%s - error %d\n", __FUNCTION__, err);
+ IRDA_ERROR("%s - error %d\n", __func__, err);
sirdev_close(dev->netdev);
diff --git a/drivers/net/irda/sir_dongle.c b/drivers/net/irda/sir_dongle.c
index 25d5b8a96bd..36030241f7a 100644
--- a/drivers/net/irda/sir_dongle.c
+++ b/drivers/net/irda/sir_dongle.c
@@ -36,7 +36,7 @@ int irda_register_dongle(struct dongle_driver *new)
struct dongle_driver *drv;
IRDA_DEBUG(0, "%s : registering dongle \"%s\" (%d).\n",
- __FUNCTION__, new->driver_name, new->type);
+ __func__, new->driver_name, new->type);
mutex_lock(&dongle_list_lock);
list_for_each(entry, &dongle_list) {
diff --git a/drivers/net/irda/smsc-ircc2.c b/drivers/net/irda/smsc-ircc2.c
index 78dc8e7837f..b5360fe99d3 100644
--- a/drivers/net/irda/smsc-ircc2.c
+++ b/drivers/net/irda/smsc-ircc2.c
@@ -460,7 +460,7 @@ static int __init smsc_ircc_init(void)
{
int ret;
- IRDA_DEBUG(1, "%s\n", __FUNCTION__);
+ IRDA_DEBUG(1, "%s\n", __func__);
ret = platform_driver_register(&smsc_ircc_driver);
if (ret) {
@@ -500,7 +500,7 @@ static int __init smsc_ircc_open(unsigned int fir_base, unsigned int sir_base, u
struct net_device *dev;
int err;
- IRDA_DEBUG(1, "%s\n", __FUNCTION__);
+ IRDA_DEBUG(1, "%s\n", __func__);
err = smsc_ircc_present(fir_base, sir_base);
if (err)
@@ -508,7 +508,7 @@ static int __init smsc_ircc_open(unsigned int fir_base, unsigned int sir_base, u
err = -ENOMEM;
if (dev_count >= ARRAY_SIZE(dev_self)) {
- IRDA_WARNING("%s(), too many devices!\n", __FUNCTION__);
+ IRDA_WARNING("%s(), too many devices!\n", __func__);
goto err_out1;
}
@@ -517,7 +517,7 @@ static int __init smsc_ircc_open(unsigned int fir_base, unsigned int sir_base, u
*/
dev = alloc_irdadev(sizeof(struct smsc_ircc_cb));
if (!dev) {
- IRDA_WARNING("%s() can't allocate net device\n", __FUNCTION__);
+ IRDA_WARNING("%s() can't allocate net device\n", __func__);
goto err_out1;
}
@@ -633,14 +633,14 @@ static int smsc_ircc_present(unsigned int fir_base, unsigned int sir_base)
if (!request_region(fir_base, SMSC_IRCC2_FIR_CHIP_IO_EXTENT,
driver_name)) {
IRDA_WARNING("%s: can't get fir_base of 0x%03x\n",
- __FUNCTION__, fir_base);
+ __func__, fir_base);
goto out1;
}
if (!request_region(sir_base, SMSC_IRCC2_SIR_CHIP_IO_EXTENT,
driver_name)) {
IRDA_WARNING("%s: can't get sir_base of 0x%03x\n",
- __FUNCTION__, sir_base);
+ __func__, sir_base);
goto out2;
}
@@ -656,7 +656,7 @@ static int smsc_ircc_present(unsigned int fir_base, unsigned int sir_base)
if (high != 0x10 || low != 0xb8 || (chip != 0xf1 && chip != 0xf2)) {
IRDA_WARNING("%s(), addr 0x%04x - no device found!\n",
- __FUNCTION__, fir_base);
+ __func__, fir_base);
goto out3;
}
IRDA_MESSAGE("SMsC IrDA Controller found\n IrCC version %d.%d, "
@@ -793,7 +793,7 @@ static int smsc_ircc_net_ioctl(struct net_device *dev, struct ifreq *rq, int cmd
IRDA_ASSERT(self != NULL, return -1;);
- IRDA_DEBUG(2, "%s(), %s, (cmd=0x%X)\n", __FUNCTION__, dev->name, cmd);
+ IRDA_DEBUG(2, "%s(), %s, (cmd=0x%X)\n", __func__, dev->name, cmd);
switch (cmd) {
case SIOCSBANDWIDTH: /* Set bandwidth */
@@ -878,7 +878,7 @@ int smsc_ircc_hard_xmit_sir(struct sk_buff *skb, struct net_device *dev)
unsigned long flags;
s32 speed;
- IRDA_DEBUG(1, "%s\n", __FUNCTION__);
+ IRDA_DEBUG(1, "%s\n", __func__);
IRDA_ASSERT(dev != NULL, return 0;);
@@ -953,21 +953,21 @@ static void smsc_ircc_set_fir_speed(struct smsc_ircc_cb *self, u32 speed)
ir_mode = IRCC_CFGA_IRDA_HDLC;
ctrl = IRCC_CRC;
fast = 0;
- IRDA_DEBUG(0, "%s(), handling baud of 576000\n", __FUNCTION__);
+ IRDA_DEBUG(0, "%s(), handling baud of 576000\n", __func__);
break;
case 1152000:
ir_mode = IRCC_CFGA_IRDA_HDLC;
ctrl = IRCC_1152 | IRCC_CRC;
fast = IRCC_LCR_A_FAST | IRCC_LCR_A_GP_DATA;
IRDA_DEBUG(0, "%s(), handling baud of 1152000\n",
- __FUNCTION__);
+ __func__);
break;
case 4000000:
ir_mode = IRCC_CFGA_IRDA_4PPM;
ctrl = IRCC_CRC;
fast = IRCC_LCR_A_FAST;
IRDA_DEBUG(0, "%s(), handling baud of 4000000\n",
- __FUNCTION__);
+ __func__);
break;
}
#if 0
@@ -995,7 +995,7 @@ static void smsc_ircc_fir_start(struct smsc_ircc_cb *self)
struct net_device *dev;
int fir_base;
- IRDA_DEBUG(1, "%s\n", __FUNCTION__);
+ IRDA_DEBUG(1, "%s\n", __func__);
IRDA_ASSERT(self != NULL, return;);
dev = self->netdev;
@@ -1043,7 +1043,7 @@ static void smsc_ircc_fir_stop(struct smsc_ircc_cb *self)
{
int fir_base;
- IRDA_DEBUG(1, "%s\n", __FUNCTION__);
+ IRDA_DEBUG(1, "%s\n", __func__);
IRDA_ASSERT(self != NULL, return;);
@@ -1067,7 +1067,7 @@ static void smsc_ircc_change_speed(struct smsc_ircc_cb *self, u32 speed)
struct net_device *dev;
int last_speed_was_sir;
- IRDA_DEBUG(0, "%s() changing speed to: %d\n", __FUNCTION__, speed);
+ IRDA_DEBUG(0, "%s() changing speed to: %d\n", __func__, speed);
IRDA_ASSERT(self != NULL, return;);
dev = self->netdev;
@@ -1135,7 +1135,7 @@ void smsc_ircc_set_sir_speed(struct smsc_ircc_cb *self, __u32 speed)
int lcr; /* Line control reg */
int divisor;
- IRDA_DEBUG(0, "%s(), Setting speed to: %d\n", __FUNCTION__, speed);
+ IRDA_DEBUG(0, "%s(), Setting speed to: %d\n", __func__, speed);
IRDA_ASSERT(self != NULL, return;);
iobase = self->io.sir_base;
@@ -1170,7 +1170,7 @@ void smsc_ircc_set_sir_speed(struct smsc_ircc_cb *self, __u32 speed)
/* Turn on interrups */
outb(UART_IER_RLSI | UART_IER_RDI | UART_IER_THRI, iobase + UART_IER);
- IRDA_DEBUG(2, "%s() speed changed to: %d\n", __FUNCTION__, speed);
+ IRDA_DEBUG(2, "%s() speed changed to: %d\n", __func__, speed);
}
@@ -1253,7 +1253,7 @@ static void smsc_ircc_dma_xmit(struct smsc_ircc_cb *self, int bofs)
int iobase = self->io.fir_base;
u8 ctrl;
- IRDA_DEBUG(3, "%s\n", __FUNCTION__);
+ IRDA_DEBUG(3, "%s\n", __func__);
#if 1
/* Disable Rx */
register_bank(iobase, 0);
@@ -1307,7 +1307,7 @@ static void smsc_ircc_dma_xmit_complete(struct smsc_ircc_cb *self)
{
int iobase = self->io.fir_base;
- IRDA_DEBUG(3, "%s\n", __FUNCTION__);
+ IRDA_DEBUG(3, "%s\n", __func__);
#if 0
/* Disable Tx */
register_bank(iobase, 0);
@@ -1411,7 +1411,7 @@ static void smsc_ircc_dma_receive_complete(struct smsc_ircc_cb *self)
register_bank(iobase, 0);
- IRDA_DEBUG(3, "%s\n", __FUNCTION__);
+ IRDA_DEBUG(3, "%s\n", __func__);
#if 0
/* Disable Rx */
register_bank(iobase, 0);
@@ -1422,7 +1422,7 @@ static void smsc_ircc_dma_receive_complete(struct smsc_ircc_cb *self)
lsr= inb(iobase + IRCC_LSR);
msgcnt = inb(iobase + IRCC_LCR_B) & 0x08;
- IRDA_DEBUG(2, "%s: dma count = %d\n", __FUNCTION__,
+ IRDA_DEBUG(2, "%s: dma count = %d\n", __func__,
get_dma_residue(self->io.dma));
len = self->rx_buff.truesize - get_dma_residue(self->io.dma);
@@ -1445,15 +1445,15 @@ static void smsc_ircc_dma_receive_complete(struct smsc_ircc_cb *self)
len -= self->io.speed < 4000000 ? 2 : 4;
if (len < 2 || len > 2050) {
- IRDA_WARNING("%s(), bogus len=%d\n", __FUNCTION__, len);
+ IRDA_WARNING("%s(), bogus len=%d\n", __func__, len);
return;
}
- IRDA_DEBUG(2, "%s: msgcnt = %d, len=%d\n", __FUNCTION__, msgcnt, len);
+ IRDA_DEBUG(2, "%s: msgcnt = %d, len=%d\n", __func__, msgcnt, len);
skb = dev_alloc_skb(len + 1);
if (!skb) {
IRDA_WARNING("%s(), memory squeeze, dropping frame.\n",
- __FUNCTION__);
+ __func__);
return;
}
/* Make sure IP header gets aligned */
@@ -1494,7 +1494,7 @@ static void smsc_ircc_sir_receive(struct smsc_ircc_cb *self)
/* Make sure we don't stay here to long */
if (boguscount++ > 32) {
- IRDA_DEBUG(2, "%s(), breaking!\n", __FUNCTION__);
+ IRDA_DEBUG(2, "%s(), breaking!\n", __func__);
break;
}
} while (inb(iobase + UART_LSR) & UART_LSR_DR);
@@ -1536,7 +1536,7 @@ static irqreturn_t smsc_ircc_interrupt(int dummy, void *dev_id)
lcra = inb(iobase + IRCC_LCR_A);
lsr = inb(iobase + IRCC_LSR);
- IRDA_DEBUG(2, "%s(), iir = 0x%02x\n", __FUNCTION__, iir);
+ IRDA_DEBUG(2, "%s(), iir = 0x%02x\n", __func__, iir);
if (iir & IRCC_IIR_EOM) {
if (self->io.direction == IO_RECV)
@@ -1548,7 +1548,7 @@ static irqreturn_t smsc_ircc_interrupt(int dummy, void *dev_id)
}
if (iir & IRCC_IIR_ACTIVE_FRAME) {
- /*printk(KERN_WARNING "%s(): Active Frame\n", __FUNCTION__);*/
+ /*printk(KERN_WARNING "%s(): Active Frame\n", __func__);*/
}
/* Enable interrupts again */
@@ -1587,11 +1587,11 @@ static irqreturn_t smsc_ircc_interrupt_sir(struct net_device *dev)
lsr = inb(iobase + UART_LSR);
IRDA_DEBUG(4, "%s(), iir=%02x, lsr=%02x, iobase=%#x\n",
- __FUNCTION__, iir, lsr, iobase);
+ __func__, iir, lsr, iobase);
switch (iir) {
case UART_IIR_RLSI:
- IRDA_DEBUG(2, "%s(), RLSI\n", __FUNCTION__);
+ IRDA_DEBUG(2, "%s(), RLSI\n", __func__);
break;
case UART_IIR_RDI:
/* Receive interrupt */
@@ -1604,7 +1604,7 @@ static irqreturn_t smsc_ircc_interrupt_sir(struct net_device *dev)
break;
default:
IRDA_DEBUG(0, "%s(), unhandled IIR=%#x\n",
- __FUNCTION__, iir);
+ __func__, iir);
break;
}
@@ -1631,11 +1631,11 @@ static int ircc_is_receiving(struct smsc_ircc_cb *self)
int status = FALSE;
/* int iobase; */
- IRDA_DEBUG(1, "%s\n", __FUNCTION__);
+ IRDA_DEBUG(1, "%s\n", __func__);
IRDA_ASSERT(self != NULL, return FALSE;);
- IRDA_DEBUG(0, "%s: dma count = %d\n", __FUNCTION__,
+ IRDA_DEBUG(0, "%s: dma count = %d\n", __func__,
get_dma_residue(self->io.dma));
status = (self->rx_buff.state != OUTSIDE_FRAME);
@@ -1652,7 +1652,7 @@ static int smsc_ircc_request_irq(struct smsc_ircc_cb *self)
self->netdev->name, self->netdev);
if (error)
IRDA_DEBUG(0, "%s(), unable to allocate irq=%d, err=%d\n",
- __FUNCTION__, self->io.irq, error);
+ __func__, self->io.irq, error);
return error;
}
@@ -1696,21 +1696,21 @@ static int smsc_ircc_net_open(struct net_device *dev)
struct smsc_ircc_cb *self;
char hwname[16];
- IRDA_DEBUG(1, "%s\n", __FUNCTION__);
+ IRDA_DEBUG(1, "%s\n", __func__);
IRDA_ASSERT(dev != NULL, return -1;);
self = netdev_priv(dev);
IRDA_ASSERT(self != NULL, return 0;);
if (self->io.suspended) {
- IRDA_DEBUG(0, "%s(), device is suspended\n", __FUNCTION__);
+ IRDA_DEBUG(0, "%s(), device is suspended\n", __func__);
return -EAGAIN;
}
if (request_irq(self->io.irq, smsc_ircc_interrupt, 0, dev->name,
(void *) dev)) {
IRDA_DEBUG(0, "%s(), unable to allocate irq=%d\n",
- __FUNCTION__, self->io.irq);
+ __func__, self->io.irq);
return -EAGAIN;
}
@@ -1734,7 +1734,7 @@ static int smsc_ircc_net_open(struct net_device *dev)
smsc_ircc_net_close(dev);
IRDA_WARNING("%s(), unable to allocate DMA=%d\n",
- __FUNCTION__, self->io.dma);
+ __func__, self->io.dma);
return -EAGAIN;
}
@@ -1753,7 +1753,7 @@ static int smsc_ircc_net_close(struct net_device *dev)
{
struct smsc_ircc_cb *self;
- IRDA_DEBUG(1, "%s\n", __FUNCTION__);
+ IRDA_DEBUG(1, "%s\n", __func__);
IRDA_ASSERT(dev != NULL, return -1;);
self = netdev_priv(dev);
@@ -1836,7 +1836,7 @@ static int smsc_ircc_resume(struct platform_device *dev)
*/
static int __exit smsc_ircc_close(struct smsc_ircc_cb *self)
{
- IRDA_DEBUG(1, "%s\n", __FUNCTION__);
+ IRDA_DEBUG(1, "%s\n", __func__);
IRDA_ASSERT(self != NULL, return -1;);
@@ -1848,12 +1848,12 @@ static int __exit smsc_ircc_close(struct smsc_ircc_cb *self)
smsc_ircc_stop_interrupts(self);
/* Release the PORTS that this driver is using */
- IRDA_DEBUG(0, "%s(), releasing 0x%03x\n", __FUNCTION__,
+ IRDA_DEBUG(0, "%s(), releasing 0x%03x\n", __func__,
self->io.fir_base);
release_region(self->io.fir_base, self->io.fir_ext);
- IRDA_DEBUG(0, "%s(), releasing 0x%03x\n", __FUNCTION__,
+ IRDA_DEBUG(0, "%s(), releasing 0x%03x\n", __func__,
self->io.sir_base);
release_region(self->io.sir_base, self->io.sir_ext);
@@ -1875,7 +1875,7 @@ static void __exit smsc_ircc_cleanup(void)
{
int i;
- IRDA_DEBUG(1, "%s\n", __FUNCTION__);
+ IRDA_DEBUG(1, "%s\n", __func__);
for (i = 0; i < 2; i++) {
if (dev_self[i])
@@ -1899,7 +1899,7 @@ void smsc_ircc_sir_start(struct smsc_ircc_cb *self)
struct net_device *dev;
int fir_base, sir_base;
- IRDA_DEBUG(3, "%s\n", __FUNCTION__);
+ IRDA_DEBUG(3, "%s\n", __func__);
IRDA_ASSERT(self != NULL, return;);
dev = self->netdev;
@@ -1926,7 +1926,7 @@ void smsc_ircc_sir_start(struct smsc_ircc_cb *self)
/* Turn on interrups */
outb(UART_IER_RLSI | UART_IER_RDI |UART_IER_THRI, sir_base + UART_IER);
- IRDA_DEBUG(3, "%s() - exit\n", __FUNCTION__);
+ IRDA_DEBUG(3, "%s() - exit\n", __func__);
outb(0x00, fir_base + IRCC_MASTER);
}
@@ -1936,7 +1936,7 @@ void smsc_ircc_sir_stop(struct smsc_ircc_cb *self)
{
int iobase;
- IRDA_DEBUG(3, "%s\n", __FUNCTION__);
+ IRDA_DEBUG(3, "%s\n", __func__);
iobase = self->io.sir_base;
/* Reset UART */
@@ -1962,7 +1962,7 @@ static void smsc_ircc_sir_write_wakeup(struct smsc_ircc_cb *self)
IRDA_ASSERT(self != NULL, return;);
- IRDA_DEBUG(4, "%s\n", __FUNCTION__);
+ IRDA_DEBUG(4, "%s\n", __func__);
iobase = self->io.sir_base;
@@ -1984,7 +1984,7 @@ static void smsc_ircc_sir_write_wakeup(struct smsc_ircc_cb *self)
*/
if (self->new_speed) {
IRDA_DEBUG(5, "%s(), Changing speed to %d.\n",
- __FUNCTION__, self->new_speed);
+ __func__, self->new_speed);
smsc_ircc_sir_wait_hw_transmitter_finish(self);
smsc_ircc_change_speed(self, self->new_speed);
self->new_speed = 0;
@@ -2023,7 +2023,7 @@ static int smsc_ircc_sir_write(int iobase, int fifo_size, __u8 *buf, int len)
/* Tx FIFO should be empty! */
if (!(inb(iobase + UART_LSR) & UART_LSR_THRE)) {
- IRDA_WARNING("%s(), failed, fifo not empty!\n", __FUNCTION__);
+ IRDA_WARNING("%s(), failed, fifo not empty!\n", __func__);
return 0;
}
@@ -2123,7 +2123,7 @@ static void smsc_ircc_sir_wait_hw_transmitter_finish(struct smsc_ircc_cb *self)
udelay(1);
if (count == 0)
- IRDA_DEBUG(0, "%s(): stuck transmitter\n", __FUNCTION__);
+ IRDA_DEBUG(0, "%s(): stuck transmitter\n", __func__);
}
@@ -2145,7 +2145,7 @@ static int __init smsc_ircc_look_for_chips(void)
while (address->cfg_base) {
cfg_base = address->cfg_base;
- /*printk(KERN_WARNING "%s(): probing: 0x%02x for: 0x%02x\n", __FUNCTION__, cfg_base, address->type);*/
+ /*printk(KERN_WARNING "%s(): probing: 0x%02x for: 0x%02x\n", __func__, cfg_base, address->type);*/
if (address->type & SMSCSIO_TYPE_FDC) {
type = "FDC";
@@ -2184,7 +2184,7 @@ static int __init smsc_superio_flat(const struct smsc_chip *chips, unsigned shor
u8 mode, dma, irq;
int ret = -ENODEV;
- IRDA_DEBUG(1, "%s\n", __FUNCTION__);
+ IRDA_DEBUG(1, "%s\n", __func__);
if (smsc_ircc_probe(cfgbase, SMSCSIOFLAT_DEVICEID_REG, chips, type) == NULL)
return ret;
@@ -2192,10 +2192,10 @@ static int __init smsc_superio_flat(const struct smsc_chip *chips, unsigned shor
outb(SMSCSIOFLAT_UARTMODE0C_REG, cfgbase);
mode = inb(cfgbase + 1);
- /*printk(KERN_WARNING "%s(): mode: 0x%02x\n", __FUNCTION__, mode);*/
+ /*printk(KERN_WARNING "%s(): mode: 0x%02x\n", __func__, mode);*/
if (!(mode & SMSCSIOFLAT_UART2MODE_VAL_IRDA))
- IRDA_WARNING("%s(): IrDA not enabled\n", __FUNCTION__);
+ IRDA_WARNING("%s(): IrDA not enabled\n", __func__);
outb(SMSCSIOFLAT_UART2BASEADDR_REG, cfgbase);
sirbase = inb(cfgbase + 1) << 2;
@@ -2212,7 +2212,7 @@ static int __init smsc_superio_flat(const struct smsc_chip *chips, unsigned shor
outb(SMSCSIOFLAT_UARTIRQSELECT_REG, cfgbase);
irq = inb(cfgbase + 1) & SMSCSIOFLAT_UART2IRQSELECT_MASK;
- IRDA_MESSAGE("%s(): fir: 0x%02x, sir: 0x%02x, dma: %02d, irq: %d, mode: 0x%02x\n", __FUNCTION__, firbase, sirbase, dma, irq, mode);
+ IRDA_MESSAGE("%s(): fir: 0x%02x, sir: 0x%02x, dma: %02d, irq: %d, mode: 0x%02x\n", __func__, firbase, sirbase, dma, irq, mode);
if (firbase && smsc_ircc_open(firbase, sirbase, dma, irq) == 0)
ret = 0;
@@ -2234,7 +2234,7 @@ static int __init smsc_superio_paged(const struct smsc_chip *chips, unsigned sho
unsigned short fir_io, sir_io;
int ret = -ENODEV;
- IRDA_DEBUG(1, "%s\n", __FUNCTION__);
+ IRDA_DEBUG(1, "%s\n", __func__);
if (smsc_ircc_probe(cfg_base, 0x20, chips, type) == NULL)
return ret;
@@ -2268,7 +2268,7 @@ static int __init smsc_superio_paged(const struct smsc_chip *chips, unsigned sho
static int __init smsc_access(unsigned short cfg_base, unsigned char reg)
{
- IRDA_DEBUG(1, "%s\n", __FUNCTION__);
+ IRDA_DEBUG(1, "%s\n", __func__);
outb(reg, cfg_base);
return inb(cfg_base) != reg ? -1 : 0;
@@ -2278,7 +2278,7 @@ static const struct smsc_chip * __init smsc_ircc_probe(unsigned short cfg_base,
{
u8 devid, xdevid, rev;
- IRDA_DEBUG(1, "%s\n", __FUNCTION__);
+ IRDA_DEBUG(1, "%s\n", __func__);
/* Leave configuration */
@@ -2353,7 +2353,7 @@ static int __init smsc_superio_fdc(unsigned short cfg_base)
if (!request_region(cfg_base, 2, driver_name)) {
IRDA_WARNING("%s: can't get cfg_base of 0x%03x\n",
- __FUNCTION__, cfg_base);
+ __func__, cfg_base);
} else {
if (!smsc_superio_flat(fdc_chips_flat, cfg_base, "FDC") ||
!smsc_superio_paged(fdc_chips_paged, cfg_base, "FDC"))
@@ -2371,7 +2371,7 @@ static int __init smsc_superio_lpc(unsigned short cfg_base)
if (!request_region(cfg_base, 2, driver_name)) {
IRDA_WARNING("%s: can't get cfg_base of 0x%03x\n",
- __FUNCTION__, cfg_base);
+ __func__, cfg_base);
} else {
if (!smsc_superio_flat(lpc_chips_flat, cfg_base, "LPC") ||
!smsc_superio_paged(lpc_chips_paged, cfg_base, "LPC"))
@@ -2932,7 +2932,7 @@ static void smsc_ircc_set_transceiver_smsc_ircc_atc(int fir_base, u32 speed)
/* empty */;
if (val)
- IRDA_WARNING("%s(): ATC: 0x%02x\n", __FUNCTION__,
+ IRDA_WARNING("%s(): ATC: 0x%02x\n", __func__,
inb(fir_base + IRCC_ATC));
}
diff --git a/drivers/net/irda/tekram-sir.c b/drivers/net/irda/tekram-sir.c
index d1ce5ae6a17..048a1542284 100644
--- a/drivers/net/irda/tekram-sir.c
+++ b/drivers/net/irda/tekram-sir.c
@@ -77,7 +77,7 @@ static int tekram_open(struct sir_dev *dev)
{
struct qos_info *qos = &dev->qos;
- IRDA_DEBUG(2, "%s()\n", __FUNCTION__);
+ IRDA_DEBUG(2, "%s()\n", __func__);
sirdev_set_dtr_rts(dev, TRUE, TRUE);
@@ -92,7 +92,7 @@ static int tekram_open(struct sir_dev *dev)
static int tekram_close(struct sir_dev *dev)
{
- IRDA_DEBUG(2, "%s()\n", __FUNCTION__);
+ IRDA_DEBUG(2, "%s()\n", __func__);
/* Power off dongle */
sirdev_set_dtr_rts(dev, FALSE, FALSE);
@@ -130,7 +130,7 @@ static int tekram_change_speed(struct sir_dev *dev, unsigned speed)
u8 byte;
static int ret = 0;
- IRDA_DEBUG(2, "%s()\n", __FUNCTION__);
+ IRDA_DEBUG(2, "%s()\n", __func__);
switch(state) {
case SIRDEV_STATE_DONGLE_SPEED:
@@ -179,7 +179,7 @@ static int tekram_change_speed(struct sir_dev *dev, unsigned speed)
break;
default:
- IRDA_ERROR("%s - undefined state %d\n", __FUNCTION__, state);
+ IRDA_ERROR("%s - undefined state %d\n", __func__, state);
ret = -EINVAL;
break;
}
@@ -204,7 +204,7 @@ static int tekram_change_speed(struct sir_dev *dev, unsigned speed)
static int tekram_reset(struct sir_dev *dev)
{
- IRDA_DEBUG(2, "%s()\n", __FUNCTION__);
+ IRDA_DEBUG(2, "%s()\n", __func__);
/* Clear DTR, Set RTS */
sirdev_set_dtr_rts(dev, FALSE, TRUE);
diff --git a/drivers/net/irda/toim3232-sir.c b/drivers/net/irda/toim3232-sir.c
index aa1a9b0ed83..fcf287b749d 100644
--- a/drivers/net/irda/toim3232-sir.c
+++ b/drivers/net/irda/toim3232-sir.c
@@ -181,7 +181,7 @@ static int toim3232_open(struct sir_dev *dev)
{
struct qos_info *qos = &dev->qos;
- IRDA_DEBUG(2, "%s()\n", __FUNCTION__);
+ IRDA_DEBUG(2, "%s()\n", __func__);
/* Pull the lines high to start with.
*
@@ -209,7 +209,7 @@ static int toim3232_open(struct sir_dev *dev)
static int toim3232_close(struct sir_dev *dev)
{
- IRDA_DEBUG(2, "%s()\n", __FUNCTION__);
+ IRDA_DEBUG(2, "%s()\n", __func__);
/* Power off dongle */
sirdev_set_dtr_rts(dev, FALSE, FALSE);
@@ -241,7 +241,7 @@ static int toim3232_change_speed(struct sir_dev *dev, unsigned speed)
u8 byte;
static int ret = 0;
- IRDA_DEBUG(2, "%s()\n", __FUNCTION__);
+ IRDA_DEBUG(2, "%s()\n", __func__);
switch(state) {
case SIRDEV_STATE_DONGLE_SPEED:
@@ -299,7 +299,7 @@ static int toim3232_change_speed(struct sir_dev *dev, unsigned speed)
break;
default:
- printk(KERN_ERR "%s - undefined state %d\n", __FUNCTION__, state);
+ printk(KERN_ERR "%s - undefined state %d\n", __func__, state);
ret = -EINVAL;
break;
}
@@ -344,7 +344,7 @@ static int toim3232_change_speed(struct sir_dev *dev, unsigned speed)
static int toim3232_reset(struct sir_dev *dev)
{
- IRDA_DEBUG(2, "%s()\n", __FUNCTION__);
+ IRDA_DEBUG(2, "%s()\n", __func__);
/* Switch off both DTR and RTS to switch off dongle */
sirdev_set_dtr_rts(dev, FALSE, FALSE);
diff --git a/drivers/net/irda/via-ircc.c b/drivers/net/irda/via-ircc.c
index 04ad3573b15..84e609ea5fb 100644
--- a/drivers/net/irda/via-ircc.c
+++ b/drivers/net/irda/via-ircc.c
@@ -152,12 +152,12 @@ static int __init via_ircc_init(void)
{
int rc;
- IRDA_DEBUG(3, "%s()\n", __FUNCTION__);
+ IRDA_DEBUG(3, "%s()\n", __func__);
rc = pci_register_driver(&via_driver);
if (rc < 0) {
IRDA_DEBUG(0, "%s(): error rc = %d, returning -ENODEV...\n",
- __FUNCTION__, rc);
+ __func__, rc);
return -ENODEV;
}
return 0;
@@ -170,11 +170,11 @@ static int __devinit via_init_one (struct pci_dev *pcidev, const struct pci_devi
u16 Chipset,FirDRQ1,FirDRQ0,FirIRQ,FirIOBase;
chipio_t info;
- IRDA_DEBUG(2, "%s(): Device ID=(0X%X)\n", __FUNCTION__, id->device);
+ IRDA_DEBUG(2, "%s(): Device ID=(0X%X)\n", __func__, id->device);
rc = pci_enable_device (pcidev);
if (rc) {
- IRDA_DEBUG(0, "%s(): error rc = %d\n", __FUNCTION__, rc);
+ IRDA_DEBUG(0, "%s(): error rc = %d\n", __func__, rc);
return -ENODEV;
}
@@ -185,7 +185,7 @@ static int __devinit via_init_one (struct pci_dev *pcidev, const struct pci_devi
Chipset=0x3076;
if (Chipset==0x3076) {
- IRDA_DEBUG(2, "%s(): Chipset = 3076\n", __FUNCTION__);
+ IRDA_DEBUG(2, "%s(): Chipset = 3076\n", __func__);
WriteLPCReg(7,0x0c );
temp=ReadLPCReg(0x30);//check if BIOS Enable Fir
@@ -222,7 +222,7 @@ static int __devinit via_init_one (struct pci_dev *pcidev, const struct pci_devi
} else
rc = -ENODEV; //IR not turn on
} else { //Not VT1211
- IRDA_DEBUG(2, "%s(): Chipset = 3096\n", __FUNCTION__);
+ IRDA_DEBUG(2, "%s(): Chipset = 3096\n", __func__);
pci_read_config_byte(pcidev,0x67,&bTmp);//check if BIOS Enable Fir
if((bTmp&0x01)==1) { // BIOS enable FIR
@@ -262,7 +262,7 @@ static int __devinit via_init_one (struct pci_dev *pcidev, const struct pci_devi
rc = -ENODEV; //IR not turn on !!!!!
}//Not VT1211
- IRDA_DEBUG(2, "%s(): End - rc = %d\n", __FUNCTION__, rc);
+ IRDA_DEBUG(2, "%s(): End - rc = %d\n", __func__, rc);
return rc;
}
@@ -276,7 +276,7 @@ static void via_ircc_clean(void)
{
int i;
- IRDA_DEBUG(3, "%s()\n", __FUNCTION__);
+ IRDA_DEBUG(3, "%s()\n", __func__);
for (i=0; i < ARRAY_SIZE(dev_self); i++) {
if (dev_self[i])
@@ -286,7 +286,7 @@ static void via_ircc_clean(void)
static void __devexit via_remove_one (struct pci_dev *pdev)
{
- IRDA_DEBUG(3, "%s()\n", __FUNCTION__);
+ IRDA_DEBUG(3, "%s()\n", __func__);
/* FIXME : This is ugly. We should use pci_get_drvdata(pdev);
* to get our driver instance and call directly via_ircc_close().
@@ -301,7 +301,7 @@ static void __devexit via_remove_one (struct pci_dev *pdev)
static void __exit via_ircc_cleanup(void)
{
- IRDA_DEBUG(3, "%s()\n", __FUNCTION__);
+ IRDA_DEBUG(3, "%s()\n", __func__);
/* FIXME : This should be redundant, as pci_unregister_driver()
* should call via_remove_one() on each device.
@@ -324,7 +324,7 @@ static __devinit int via_ircc_open(int i, chipio_t * info, unsigned int id)
struct via_ircc_cb *self;
int err;
- IRDA_DEBUG(3, "%s()\n", __FUNCTION__);
+ IRDA_DEBUG(3, "%s()\n", __func__);
if (i >= ARRAY_SIZE(dev_self))
return -ENOMEM;
@@ -360,7 +360,7 @@ static __devinit int via_ircc_open(int i, chipio_t * info, unsigned int id)
/* Reserve the ioports that we need */
if (!request_region(self->io.fir_base, self->io.fir_ext, driver_name)) {
IRDA_DEBUG(0, "%s(), can't get iobase of 0x%03x\n",
- __FUNCTION__, self->io.fir_base);
+ __func__, self->io.fir_base);
err = -ENODEV;
goto err_out1;
}
@@ -471,7 +471,7 @@ static int via_ircc_close(struct via_ircc_cb *self)
{
int iobase;
- IRDA_DEBUG(3, "%s()\n", __FUNCTION__);
+ IRDA_DEBUG(3, "%s()\n", __func__);
IRDA_ASSERT(self != NULL, return -1;);
@@ -483,7 +483,7 @@ static int via_ircc_close(struct via_ircc_cb *self)
/* Release the PORT that this driver is using */
IRDA_DEBUG(2, "%s(), Releasing Region %03x\n",
- __FUNCTION__, self->io.fir_base);
+ __func__, self->io.fir_base);
release_region(self->io.fir_base, self->io.fir_ext);
if (self->tx_buff.head)
dma_free_coherent(NULL, self->tx_buff.truesize,
@@ -509,7 +509,7 @@ static void via_hw_init(struct via_ircc_cb *self)
{
int iobase = self->io.fir_base;
- IRDA_DEBUG(3, "%s()\n", __FUNCTION__);
+ IRDA_DEBUG(3, "%s()\n", __func__);
SetMaxRxPacketSize(iobase, 0x0fff); //set to max:4095
// FIFO Init
@@ -582,7 +582,7 @@ static void via_ircc_change_dongle_speed(int iobase, int speed,
speed = speed;
IRDA_DEBUG(1, "%s(): change_dongle_speed to %d for 0x%x, %d\n",
- __FUNCTION__, speed, iobase, dongle_id);
+ __func__, speed, iobase, dongle_id);
switch (dongle_id) {
@@ -671,7 +671,7 @@ static void via_ircc_change_dongle_speed(int iobase, int speed,
case 0x11: /* Temic TFDS4500 */
- IRDA_DEBUG(2, "%s: Temic TFDS4500: One RX pin, TX normal, RX inverted.\n", __FUNCTION__);
+ IRDA_DEBUG(2, "%s: Temic TFDS4500: One RX pin, TX normal, RX inverted.\n", __func__);
UseOneRX(iobase, ON); //use ONE RX....RX1
InvertTX(iobase, OFF);
@@ -689,7 +689,7 @@ static void via_ircc_change_dongle_speed(int iobase, int speed,
SlowIRRXLowActive(iobase, OFF);
} else{
- IRDA_DEBUG(0, "%s: Warning: TFDS4500 not running in SIR mode !\n", __FUNCTION__);
+ IRDA_DEBUG(0, "%s: Warning: TFDS4500 not running in SIR mode !\n", __func__);
}
break;
@@ -707,7 +707,7 @@ static void via_ircc_change_dongle_speed(int iobase, int speed,
default:
IRDA_ERROR("%s: Error: dongle_id %d unsupported !\n",
- __FUNCTION__, dongle_id);
+ __func__, dongle_id);
}
}
@@ -726,7 +726,7 @@ static void via_ircc_change_speed(struct via_ircc_cb *self, __u32 speed)
iobase = self->io.fir_base;
/* Update accounting for new speed */
self->io.speed = speed;
- IRDA_DEBUG(1, "%s: change_speed to %d bps.\n", __FUNCTION__, speed);
+ IRDA_DEBUG(1, "%s: change_speed to %d bps.\n", __func__, speed);
WriteReg(iobase, I_ST_CT_0, 0x0);
@@ -957,7 +957,7 @@ static int via_ircc_dma_xmit(struct via_ircc_cb *self, u16 iobase)
self->tx_buff.head) + self->tx_buff_dma,
self->tx_fifo.queue[self->tx_fifo.ptr].len, DMA_TX_MODE);
IRDA_DEBUG(1, "%s: tx_fifo.ptr=%x,len=%x,tx_fifo.len=%x..\n",
- __FUNCTION__, self->tx_fifo.ptr,
+ __func__, self->tx_fifo.ptr,
self->tx_fifo.queue[self->tx_fifo.ptr].len,
self->tx_fifo.len);
@@ -981,7 +981,7 @@ static int via_ircc_dma_xmit_complete(struct via_ircc_cb *self)
int ret = TRUE;
u8 Tx_status;
- IRDA_DEBUG(3, "%s()\n", __FUNCTION__);
+ IRDA_DEBUG(3, "%s()\n", __func__);
iobase = self->io.fir_base;
/* Disable DMA */
@@ -1014,7 +1014,7 @@ static int via_ircc_dma_xmit_complete(struct via_ircc_cb *self)
}
IRDA_DEBUG(1,
"%s: tx_fifo.len=%x ,tx_fifo.ptr=%x,tx_fifo.free=%x...\n",
- __FUNCTION__,
+ __func__,
self->tx_fifo.len, self->tx_fifo.ptr, self->tx_fifo.free);
/* F01_S
// Any frames to be sent back-to-back?
@@ -1050,7 +1050,7 @@ static int via_ircc_dma_receive(struct via_ircc_cb *self)
iobase = self->io.fir_base;
- IRDA_DEBUG(3, "%s()\n", __FUNCTION__);
+ IRDA_DEBUG(3, "%s()\n", __func__);
self->tx_fifo.len = self->tx_fifo.ptr = self->tx_fifo.free = 0;
self->tx_fifo.tail = self->tx_buff.head;
@@ -1134,13 +1134,13 @@ static int via_ircc_dma_receive_complete(struct via_ircc_cb *self,
return TRUE; //interrupt only, data maybe move by RxT
if (((len - 4) < 2) || ((len - 4) > 2048)) {
IRDA_DEBUG(1, "%s(): Trouble:len=%x,CurCount=%x,LastCount=%x..\n",
- __FUNCTION__, len, RxCurCount(iobase, self),
+ __func__, len, RxCurCount(iobase, self),
self->RxLastCount);
hwreset(self);
return FALSE;
}
IRDA_DEBUG(2, "%s(): fifo.len=%x,len=%x,CurCount=%x..\n",
- __FUNCTION__,
+ __func__,
st_fifo->len, len - 4, RxCurCount(iobase, self));
st_fifo->entries[st_fifo->tail].status = status;
@@ -1187,7 +1187,7 @@ F01_E */
skb_put(skb, len - 4);
skb_copy_to_linear_data(skb, self->rx_buff.data, len - 4);
- IRDA_DEBUG(2, "%s(): len=%x.rx_buff=%p\n", __FUNCTION__,
+ IRDA_DEBUG(2, "%s(): len=%x.rx_buff=%p\n", __func__,
len - 4, self->rx_buff.data);
// Move to next frame
@@ -1217,7 +1217,7 @@ static int upload_rxdata(struct via_ircc_cb *self, int iobase)
len = GetRecvByte(iobase, self);
- IRDA_DEBUG(2, "%s(): len=%x\n", __FUNCTION__, len);
+ IRDA_DEBUG(2, "%s(): len=%x\n", __func__, len);
if ((len - 4) < 2) {
self->stats.rx_dropped++;
@@ -1302,7 +1302,7 @@ static int RxTimerHandler(struct via_ircc_cb *self, int iobase)
skb_put(skb, len - 4);
skb_copy_to_linear_data(skb, self->rx_buff.data, len - 4);
- IRDA_DEBUG(2, "%s(): len=%x.head=%x\n", __FUNCTION__,
+ IRDA_DEBUG(2, "%s(): len=%x.head=%x\n", __func__,
len - 4, st_fifo->head);
// Move to next frame
@@ -1318,7 +1318,7 @@ static int RxTimerHandler(struct via_ircc_cb *self, int iobase)
IRDA_DEBUG(2,
"%s(): End of upload HostStatus=%x,RxStatus=%x\n",
- __FUNCTION__,
+ __func__,
GetHostStatus(iobase), GetRXStatus(iobase));
/*
@@ -1358,7 +1358,7 @@ static irqreturn_t via_ircc_interrupt(int dummy, void *dev_id)
iHostIntType = GetHostStatus(iobase);
IRDA_DEBUG(4, "%s(): iHostIntType %02x: %s %s %s %02x\n",
- __FUNCTION__, iHostIntType,
+ __func__, iHostIntType,
(iHostIntType & 0x40) ? "Timer" : "",
(iHostIntType & 0x20) ? "Tx" : "",
(iHostIntType & 0x10) ? "Rx" : "",
@@ -1388,7 +1388,7 @@ static irqreturn_t via_ircc_interrupt(int dummy, void *dev_id)
iTxIntType = GetTXStatus(iobase);
IRDA_DEBUG(4, "%s(): iTxIntType %02x: %s %s %s %s\n",
- __FUNCTION__, iTxIntType,
+ __func__, iTxIntType,
(iTxIntType & 0x08) ? "FIFO underr." : "",
(iTxIntType & 0x04) ? "EOM" : "",
(iTxIntType & 0x02) ? "FIFO ready" : "",
@@ -1412,7 +1412,7 @@ static irqreturn_t via_ircc_interrupt(int dummy, void *dev_id)
iRxIntType = GetRXStatus(iobase);
IRDA_DEBUG(4, "%s(): iRxIntType %02x: %s %s %s %s %s %s %s\n",
- __FUNCTION__, iRxIntType,
+ __func__, iRxIntType,
(iRxIntType & 0x80) ? "PHY err." : "",
(iRxIntType & 0x40) ? "CRC err" : "",
(iRxIntType & 0x20) ? "FIFO overr." : "",
@@ -1421,7 +1421,7 @@ static irqreturn_t via_ircc_interrupt(int dummy, void *dev_id)
(iRxIntType & 0x02) ? "RxMaxLen" : "",
(iRxIntType & 0x01) ? "SIR bad" : "");
if (!iRxIntType)
- IRDA_DEBUG(3, "%s(): RxIRQ =0\n", __FUNCTION__);
+ IRDA_DEBUG(3, "%s(): RxIRQ =0\n", __func__);
if (iRxIntType & 0x10) {
if (via_ircc_dma_receive_complete(self, iobase)) {
@@ -1431,7 +1431,7 @@ static irqreturn_t via_ircc_interrupt(int dummy, void *dev_id)
} // No ERR
else { //ERR
IRDA_DEBUG(4, "%s(): RxIRQ ERR:iRxIntType=%x,HostIntType=%x,CurCount=%x,RxLastCount=%x_____\n",
- __FUNCTION__, iRxIntType, iHostIntType,
+ __func__, iRxIntType, iHostIntType,
RxCurCount(iobase, self),
self->RxLastCount);
@@ -1456,7 +1456,7 @@ static void hwreset(struct via_ircc_cb *self)
int iobase;
iobase = self->io.fir_base;
- IRDA_DEBUG(3, "%s()\n", __FUNCTION__);
+ IRDA_DEBUG(3, "%s()\n", __func__);
ResetChip(iobase, 5);
EnableDMA(iobase, OFF);
@@ -1501,7 +1501,7 @@ static int via_ircc_is_receiving(struct via_ircc_cb *self)
if (CkRxRecv(iobase, self))
status = TRUE;
- IRDA_DEBUG(2, "%s(): status=%x....\n", __FUNCTION__, status);
+ IRDA_DEBUG(2, "%s(): status=%x....\n", __func__, status);
return status;
}
@@ -1519,7 +1519,7 @@ static int via_ircc_net_open(struct net_device *dev)
int iobase;
char hwname[32];
- IRDA_DEBUG(3, "%s()\n", __FUNCTION__);
+ IRDA_DEBUG(3, "%s()\n", __func__);
IRDA_ASSERT(dev != NULL, return -1;);
self = (struct via_ircc_cb *) dev->priv;
@@ -1586,7 +1586,7 @@ static int via_ircc_net_close(struct net_device *dev)
struct via_ircc_cb *self;
int iobase;
- IRDA_DEBUG(3, "%s()\n", __FUNCTION__);
+ IRDA_DEBUG(3, "%s()\n", __func__);
IRDA_ASSERT(dev != NULL, return -1;);
self = (struct via_ircc_cb *) dev->priv;
@@ -1630,7 +1630,7 @@ static int via_ircc_net_ioctl(struct net_device *dev, struct ifreq *rq,
IRDA_ASSERT(dev != NULL, return -1;);
self = dev->priv;
IRDA_ASSERT(self != NULL, return -1;);
- IRDA_DEBUG(1, "%s(), %s, (cmd=0x%X)\n", __FUNCTION__, dev->name,
+ IRDA_DEBUG(1, "%s(), %s, (cmd=0x%X)\n", __func__, dev->name,
cmd);
/* Disable interrupts & save flags */
spin_lock_irqsave(&self->lock, flags);
diff --git a/drivers/net/irda/vlsi_ir.c b/drivers/net/irda/vlsi_ir.c
index d15e00b8591..18f4b3a96ae 100644
--- a/drivers/net/irda/vlsi_ir.c
+++ b/drivers/net/irda/vlsi_ir.c
@@ -140,15 +140,15 @@ static void vlsi_ring_debug(struct vlsi_ring *r)
unsigned i;
printk(KERN_DEBUG "%s - ring %p / size %u / mask 0x%04x / len %u / dir %d / hw %p\n",
- __FUNCTION__, r, r->size, r->mask, r->len, r->dir, r->rd[0].hw);
- printk(KERN_DEBUG "%s - head = %d / tail = %d\n", __FUNCTION__,
+ __func__, r, r->size, r->mask, r->len, r->dir, r->rd[0].hw);
+ printk(KERN_DEBUG "%s - head = %d / tail = %d\n", __func__,
atomic_read(&r->head) & r->mask, atomic_read(&r->tail) & r->mask);
for (i = 0; i < r->size; i++) {
rd = &r->rd[i];
- printk(KERN_DEBUG "%s - ring descr %u: ", __FUNCTION__, i);
+ printk(KERN_DEBUG "%s - ring descr %u: ", __func__, i);
printk("skb=%p data=%p hw=%p\n", rd->skb, rd->buf, rd->hw);
printk(KERN_DEBUG "%s - hw: status=%02x count=%u addr=0x%08x\n",
- __FUNCTION__, (unsigned) rd_get_status(rd),
+ __func__, (unsigned) rd_get_status(rd),
(unsigned) rd_get_count(rd), (unsigned) rd_get_addr(rd));
}
}
@@ -435,7 +435,7 @@ static struct vlsi_ring *vlsi_alloc_ring(struct pci_dev *pdev, struct ring_descr
|| !(busaddr = pci_map_single(pdev, rd->buf, len, dir))) {
if (rd->buf) {
IRDA_ERROR("%s: failed to create PCI-MAP for %p",
- __FUNCTION__, rd->buf);
+ __func__, rd->buf);
kfree(rd->buf);
rd->buf = NULL;
}
@@ -489,7 +489,7 @@ static int vlsi_create_hwif(vlsi_irda_dev_t *idev)
ringarea = pci_alloc_consistent(idev->pdev, HW_RING_AREA_SIZE, &idev->busaddr);
if (!ringarea) {
IRDA_ERROR("%s: insufficient memory for descriptor rings\n",
- __FUNCTION__);
+ __func__);
goto out;
}
memset(ringarea, 0, HW_RING_AREA_SIZE);
@@ -564,7 +564,7 @@ static int vlsi_process_rx(struct vlsi_ring *r, struct ring_descr *rd)
crclen = (idev->mode==IFF_FIR) ? sizeof(u32) : sizeof(u16);
len -= crclen; /* remove trailing CRC */
if (len <= 0) {
- IRDA_DEBUG(0, "%s: strange frame (len=%d)\n", __FUNCTION__, len);
+ IRDA_DEBUG(0, "%s: strange frame (len=%d)\n", __func__, len);
ret |= VLSI_RX_DROP;
goto done;
}
@@ -579,14 +579,14 @@ static int vlsi_process_rx(struct vlsi_ring *r, struct ring_descr *rd)
*/
le16_to_cpus(rd->buf+len);
if (irda_calc_crc16(INIT_FCS,rd->buf,len+crclen) != GOOD_FCS) {
- IRDA_DEBUG(0, "%s: crc error\n", __FUNCTION__);
+ IRDA_DEBUG(0, "%s: crc error\n", __func__);
ret |= VLSI_RX_CRC;
goto done;
}
}
if (!rd->skb) {
- IRDA_WARNING("%s: rx packet lost\n", __FUNCTION__);
+ IRDA_WARNING("%s: rx packet lost\n", __func__);
ret |= VLSI_RX_DROP;
goto done;
}
@@ -617,7 +617,7 @@ static void vlsi_fill_rx(struct vlsi_ring *r)
for (rd = ring_last(r); rd != NULL; rd = ring_put(r)) {
if (rd_is_active(rd)) {
IRDA_WARNING("%s: driver bug: rx descr race with hw\n",
- __FUNCTION__);
+ __func__);
vlsi_ring_debug(r);
break;
}
@@ -676,7 +676,7 @@ static void vlsi_rx_interrupt(struct net_device *ndev)
if (ring_first(r) == NULL) {
/* we are in big trouble, if this should ever happen */
- IRDA_ERROR("%s: rx ring exhausted!\n", __FUNCTION__);
+ IRDA_ERROR("%s: rx ring exhausted!\n", __func__);
vlsi_ring_debug(r);
}
else
@@ -697,7 +697,7 @@ static void vlsi_unarm_rx(vlsi_irda_dev_t *idev)
if (rd_is_active(rd)) {
rd_set_status(rd, 0);
if (rd_get_count(rd)) {
- IRDA_DEBUG(0, "%s - dropping rx packet\n", __FUNCTION__);
+ IRDA_DEBUG(0, "%s - dropping rx packet\n", __func__);
ret = -VLSI_RX_DROP;
}
rd_set_count(rd, 0);
@@ -772,7 +772,7 @@ static int vlsi_set_baud(vlsi_irda_dev_t *idev, unsigned iobase)
int fifocnt;
baudrate = idev->new_baud;
- IRDA_DEBUG(2, "%s: %d -> %d\n", __FUNCTION__, idev->baud, idev->new_baud);
+ IRDA_DEBUG(2, "%s: %d -> %d\n", __func__, idev->baud, idev->new_baud);
if (baudrate == 4000000) {
mode = IFF_FIR;
config = IRCFG_FIR;
@@ -789,7 +789,7 @@ static int vlsi_set_baud(vlsi_irda_dev_t *idev, unsigned iobase)
switch(baudrate) {
default:
IRDA_WARNING("%s: undefined baudrate %d - fallback to 9600!\n",
- __FUNCTION__, baudrate);
+ __func__, baudrate);
baudrate = 9600;
/* fallthru */
case 2400:
@@ -806,7 +806,7 @@ static int vlsi_set_baud(vlsi_irda_dev_t *idev, unsigned iobase)
fifocnt = inw(iobase+VLSI_PIO_RCVBCNT) & RCVBCNT_MASK;
if (fifocnt != 0) {
- IRDA_DEBUG(0, "%s: rx fifo not empty(%d)\n", __FUNCTION__, fifocnt);
+ IRDA_DEBUG(0, "%s: rx fifo not empty(%d)\n", __func__, fifocnt);
}
outw(0, iobase+VLSI_PIO_IRENABLE);
@@ -830,14 +830,14 @@ static int vlsi_set_baud(vlsi_irda_dev_t *idev, unsigned iobase)
config ^= IRENABLE_SIR_ON;
if (config != (IRENABLE_PHYANDCLOCK|IRENABLE_ENRXST)) {
- IRDA_WARNING("%s: failed to set %s mode!\n", __FUNCTION__,
+ IRDA_WARNING("%s: failed to set %s mode!\n", __func__,
(mode==IFF_SIR)?"SIR":((mode==IFF_MIR)?"MIR":"FIR"));
ret = -1;
}
else {
if (inw(iobase+VLSI_PIO_PHYCTL) != nphyctl) {
IRDA_WARNING("%s: failed to apply baudrate %d\n",
- __FUNCTION__, baudrate);
+ __func__, baudrate);
ret = -1;
}
else {
@@ -849,7 +849,7 @@ static int vlsi_set_baud(vlsi_irda_dev_t *idev, unsigned iobase)
}
if (ret)
- vlsi_reg_debug(iobase,__FUNCTION__);
+ vlsi_reg_debug(iobase,__func__);
return ret;
}
@@ -982,7 +982,7 @@ static int vlsi_hard_start_xmit(struct sk_buff *skb, struct net_device *ndev)
if (len >= r->len-5)
IRDA_WARNING("%s: possible buffer overflow with SIR wrapping!\n",
- __FUNCTION__);
+ __func__);
}
else {
/* hw deals with MIR/FIR mode wrapping */
@@ -1027,7 +1027,7 @@ static int vlsi_hard_start_xmit(struct sk_buff *skb, struct net_device *ndev)
fifocnt = inw(ndev->base_addr+VLSI_PIO_RCVBCNT) & RCVBCNT_MASK;
if (fifocnt != 0) {
- IRDA_DEBUG(0, "%s: rx fifo not empty(%d)\n", __FUNCTION__, fifocnt);
+ IRDA_DEBUG(0, "%s: rx fifo not empty(%d)\n", __func__, fifocnt);
}
config = inw(iobase+VLSI_PIO_IRCFG);
@@ -1040,7 +1040,7 @@ static int vlsi_hard_start_xmit(struct sk_buff *skb, struct net_device *ndev)
if (ring_put(r) == NULL) {
netif_stop_queue(ndev);
- IRDA_DEBUG(3, "%s: tx ring full - queue stopped\n", __FUNCTION__);
+ IRDA_DEBUG(3, "%s: tx ring full - queue stopped\n", __func__);
}
spin_unlock_irqrestore(&idev->lock, flags);
@@ -1049,7 +1049,7 @@ static int vlsi_hard_start_xmit(struct sk_buff *skb, struct net_device *ndev)
drop_unlock:
spin_unlock_irqrestore(&idev->lock, flags);
drop:
- IRDA_WARNING("%s: dropping packet - %s\n", __FUNCTION__, msg);
+ IRDA_WARNING("%s: dropping packet - %s\n", __func__, msg);
dev_kfree_skb_any(skb);
idev->stats.tx_errors++;
idev->stats.tx_dropped++;
@@ -1106,7 +1106,7 @@ static void vlsi_tx_interrupt(struct net_device *ndev)
fifocnt = inw(iobase+VLSI_PIO_RCVBCNT) & RCVBCNT_MASK;
if (fifocnt != 0) {
IRDA_DEBUG(0, "%s: rx fifo not empty(%d)\n",
- __FUNCTION__, fifocnt);
+ __func__, fifocnt);
}
outw(config | IRCFG_ENTX, iobase+VLSI_PIO_IRCFG);
}
@@ -1115,7 +1115,7 @@ static void vlsi_tx_interrupt(struct net_device *ndev)
if (netif_queue_stopped(ndev) && !idev->new_baud) {
netif_wake_queue(ndev);
- IRDA_DEBUG(3, "%s: queue awoken\n", __FUNCTION__);
+ IRDA_DEBUG(3, "%s: queue awoken\n", __func__);
}
}
@@ -1138,7 +1138,7 @@ static void vlsi_unarm_tx(vlsi_irda_dev_t *idev)
dev_kfree_skb_any(rd->skb);
rd->skb = NULL;
}
- IRDA_DEBUG(0, "%s - dropping tx packet\n", __FUNCTION__);
+ IRDA_DEBUG(0, "%s - dropping tx packet\n", __func__);
ret = -VLSI_TX_DROP;
}
else
@@ -1188,7 +1188,7 @@ static int vlsi_start_clock(struct pci_dev *pdev)
if (count < 3) {
if (clksrc == 1) { /* explicitly asked for PLL hence bail out */
IRDA_ERROR("%s: no PLL or failed to lock!\n",
- __FUNCTION__);
+ __func__);
clkctl = CLKCTL_CLKSTP;
pci_write_config_byte(pdev, VLSI_PCI_CLKCTL, clkctl);
return -1;
@@ -1197,7 +1197,7 @@ static int vlsi_start_clock(struct pci_dev *pdev)
clksrc = 3; /* fallback to 40MHz XCLK (OB800) */
IRDA_DEBUG(0, "%s: PLL not locked, fallback to clksrc=%d\n",
- __FUNCTION__, clksrc);
+ __func__, clksrc);
}
else
clksrc = 1; /* got successful PLL lock */
@@ -1269,7 +1269,7 @@ static int vlsi_init_chip(struct pci_dev *pdev)
/* start the clock and clean the registers */
if (vlsi_start_clock(pdev)) {
- IRDA_ERROR("%s: no valid clock source\n", __FUNCTION__);
+ IRDA_ERROR("%s: no valid clock source\n", __func__);
return -1;
}
iobase = ndev->base_addr;
@@ -1386,7 +1386,7 @@ static void vlsi_tx_timeout(struct net_device *ndev)
vlsi_irda_dev_t *idev = ndev->priv;
- vlsi_reg_debug(ndev->base_addr, __FUNCTION__);
+ vlsi_reg_debug(ndev->base_addr, __func__);
vlsi_ring_debug(idev->tx_ring);
if (netif_running(ndev))
@@ -1401,7 +1401,7 @@ static void vlsi_tx_timeout(struct net_device *ndev)
if (vlsi_start_hw(idev))
IRDA_ERROR("%s: failed to restart hw - %s(%s) unusable!\n",
- __FUNCTION__, pci_name(idev->pdev), ndev->name);
+ __func__, pci_name(idev->pdev), ndev->name);
else
netif_start_queue(ndev);
}
@@ -1446,7 +1446,7 @@ static int vlsi_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd)
break;
default:
IRDA_WARNING("%s: notsupp - cmd=%04x\n",
- __FUNCTION__, cmd);
+ __func__, cmd);
ret = -EOPNOTSUPP;
}
@@ -1491,7 +1491,7 @@ static irqreturn_t vlsi_interrupt(int irq, void *dev_instance)
if (boguscount <= 0)
IRDA_MESSAGE("%s: too much work in interrupt!\n",
- __FUNCTION__);
+ __func__);
return IRQ_RETVAL(handled);
}
@@ -1504,7 +1504,7 @@ static int vlsi_open(struct net_device *ndev)
char hwname[32];
if (pci_request_regions(idev->pdev, drivername)) {
- IRDA_WARNING("%s: io resource busy\n", __FUNCTION__);
+ IRDA_WARNING("%s: io resource busy\n", __func__);
goto errout;
}
ndev->base_addr = pci_resource_start(idev->pdev,0);
@@ -1519,7 +1519,7 @@ static int vlsi_open(struct net_device *ndev)
if (request_irq(ndev->irq, vlsi_interrupt, IRQF_SHARED,
drivername, ndev)) {
IRDA_WARNING("%s: couldn't get IRQ: %d\n",
- __FUNCTION__, ndev->irq);
+ __func__, ndev->irq);
goto errout_io;
}
@@ -1540,7 +1540,7 @@ static int vlsi_open(struct net_device *ndev)
netif_start_queue(ndev);
- IRDA_MESSAGE("%s: device %s operational\n", __FUNCTION__, ndev->name);
+ IRDA_MESSAGE("%s: device %s operational\n", __func__, ndev->name);
return 0;
@@ -1574,7 +1574,7 @@ static int vlsi_close(struct net_device *ndev)
pci_release_regions(idev->pdev);
- IRDA_MESSAGE("%s: device %s stopped\n", __FUNCTION__, ndev->name);
+ IRDA_MESSAGE("%s: device %s stopped\n", __func__, ndev->name);
return 0;
}
@@ -1593,7 +1593,7 @@ static int vlsi_irda_init(struct net_device *ndev)
if (pci_set_dma_mask(pdev,DMA_MASK_USED_BY_HW)
|| pci_set_dma_mask(pdev,DMA_MASK_MSTRPAGE)) {
- IRDA_ERROR("%s: aborting due to PCI BM-DMA address limitations\n", __FUNCTION__);
+ IRDA_ERROR("%s: aborting due to PCI BM-DMA address limitations\n", __func__);
return -1;
}
@@ -1645,14 +1645,14 @@ vlsi_irda_probe(struct pci_dev *pdev, const struct pci_device_id *id)
if ( !pci_resource_start(pdev,0)
|| !(pci_resource_flags(pdev,0) & IORESOURCE_IO) ) {
- IRDA_ERROR("%s: bar 0 invalid", __FUNCTION__);
+ IRDA_ERROR("%s: bar 0 invalid", __func__);
goto out_disable;
}
ndev = alloc_irdadev(sizeof(*idev));
if (ndev==NULL) {
IRDA_ERROR("%s: Unable to allocate device memory.\n",
- __FUNCTION__);
+ __func__);
goto out_disable;
}
@@ -1667,7 +1667,7 @@ vlsi_irda_probe(struct pci_dev *pdev, const struct pci_device_id *id)
goto out_freedev;
if (register_netdev(ndev) < 0) {
- IRDA_ERROR("%s: register_netdev failed\n", __FUNCTION__);
+ IRDA_ERROR("%s: register_netdev failed\n", __func__);
goto out_freedev;
}
@@ -1678,7 +1678,7 @@ vlsi_irda_probe(struct pci_dev *pdev, const struct pci_device_id *id)
vlsi_proc_root, VLSI_PROC_FOPS, ndev);
if (!ent) {
IRDA_WARNING("%s: failed to create proc entry\n",
- __FUNCTION__);
+ __func__);
} else {
ent->size = 0;
}
@@ -1745,7 +1745,7 @@ static int vlsi_irda_suspend(struct pci_dev *pdev, pm_message_t state)
if (!ndev) {
IRDA_ERROR("%s - %s: no netdevice \n",
- __FUNCTION__, pci_name(pdev));
+ __func__, pci_name(pdev));
return 0;
}
idev = ndev->priv;
@@ -1756,7 +1756,7 @@ static int vlsi_irda_suspend(struct pci_dev *pdev, pm_message_t state)
pdev->current_state = state.event;
}
else
- IRDA_ERROR("%s - %s: invalid suspend request %u -> %u\n", __FUNCTION__, pci_name(pdev), pdev->current_state, state.event);
+ IRDA_ERROR("%s - %s: invalid suspend request %u -> %u\n", __func__, pci_name(pdev), pdev->current_state, state.event);
mutex_unlock(&idev->mtx);
return 0;
}
@@ -1784,7 +1784,7 @@ static int vlsi_irda_resume(struct pci_dev *pdev)
if (!ndev) {
IRDA_ERROR("%s - %s: no netdevice \n",
- __FUNCTION__, pci_name(pdev));
+ __func__, pci_name(pdev));
return 0;
}
idev = ndev->priv;
@@ -1792,7 +1792,7 @@ static int vlsi_irda_resume(struct pci_dev *pdev)
if (pdev->current_state == 0) {
mutex_unlock(&idev->mtx);
IRDA_WARNING("%s - %s: already resumed\n",
- __FUNCTION__, pci_name(pdev));
+ __func__, pci_name(pdev));
return 0;
}
@@ -1811,7 +1811,7 @@ static int vlsi_irda_resume(struct pci_dev *pdev)
* now we explicitly set pdev->current_state = 0 after enabling the
* device and independently resume_ok should catch any garbage config.
*/
- IRDA_WARNING("%s - hm, nothing to resume?\n", __FUNCTION__);
+ IRDA_WARNING("%s - hm, nothing to resume?\n", __func__);
mutex_unlock(&idev->mtx);
return 0;
}
diff --git a/drivers/net/irda/vlsi_ir.h b/drivers/net/irda/vlsi_ir.h
index c8b9c74eea5..9b1884329fb 100644
--- a/drivers/net/irda/vlsi_ir.h
+++ b/drivers/net/irda/vlsi_ir.h
@@ -617,7 +617,7 @@ static inline void rd_set_addr_status(struct ring_descr *rd, dma_addr_t a, u8 s)
*/
if ((a & ~DMA_MASK_MSTRPAGE)>>24 != MSTRPAGE_VALUE) {
- IRDA_ERROR("%s: pci busaddr inconsistency!\n", __FUNCTION__);
+ IRDA_ERROR("%s: pci busaddr inconsistency!\n", __func__);
dump_stack();
return;
}
diff --git a/drivers/net/irda/w83977af_ir.c b/drivers/net/irda/w83977af_ir.c
index 9fd2451b0fb..002a6d769f2 100644
--- a/drivers/net/irda/w83977af_ir.c
+++ b/drivers/net/irda/w83977af_ir.c
@@ -114,7 +114,7 @@ static int __init w83977af_init(void)
{
int i;
- IRDA_DEBUG(0, "%s()\n", __FUNCTION__ );
+ IRDA_DEBUG(0, "%s()\n", __func__ );
for (i=0; (io[i] < 2000) && (i < ARRAY_SIZE(dev_self)); i++) {
if (w83977af_open(i, io[i], irq[i], dma[i]) == 0)
@@ -133,7 +133,7 @@ static void __exit w83977af_cleanup(void)
{
int i;
- IRDA_DEBUG(4, "%s()\n", __FUNCTION__ );
+ IRDA_DEBUG(4, "%s()\n", __func__ );
for (i=0; i < ARRAY_SIZE(dev_self); i++) {
if (dev_self[i])
@@ -154,12 +154,12 @@ int w83977af_open(int i, unsigned int iobase, unsigned int irq,
struct w83977af_ir *self;
int err;
- IRDA_DEBUG(0, "%s()\n", __FUNCTION__ );
+ IRDA_DEBUG(0, "%s()\n", __func__ );
/* Lock the port that we need */
if (!request_region(iobase, CHIP_IO_EXTENT, driver_name)) {
IRDA_DEBUG(0, "%s(), can't get iobase of 0x%03x\n",
- __FUNCTION__ , iobase);
+ __func__ , iobase);
return -ENODEV;
}
@@ -241,7 +241,7 @@ int w83977af_open(int i, unsigned int iobase, unsigned int irq,
err = register_netdev(dev);
if (err) {
- IRDA_ERROR("%s(), register_netdevice() failed!\n", __FUNCTION__);
+ IRDA_ERROR("%s(), register_netdevice() failed!\n", __func__);
goto err_out3;
}
IRDA_MESSAGE("IrDA: Registered device %s\n", dev->name);
@@ -273,7 +273,7 @@ static int w83977af_close(struct w83977af_ir *self)
{
int iobase;
- IRDA_DEBUG(0, "%s()\n", __FUNCTION__ );
+ IRDA_DEBUG(0, "%s()\n", __func__ );
iobase = self->io.fir_base;
@@ -294,7 +294,7 @@ static int w83977af_close(struct w83977af_ir *self)
/* Release the PORT that this driver is using */
IRDA_DEBUG(0 , "%s(), Releasing Region %03x\n",
- __FUNCTION__ , self->io.fir_base);
+ __func__ , self->io.fir_base);
release_region(self->io.fir_base, self->io.fir_ext);
if (self->tx_buff.head)
@@ -316,7 +316,7 @@ int w83977af_probe( int iobase, int irq, int dma)
int i;
for (i=0; i < 2; i++) {
- IRDA_DEBUG( 0, "%s()\n", __FUNCTION__ );
+ IRDA_DEBUG( 0, "%s()\n", __func__ );
#ifdef CONFIG_USE_W977_PNP
/* Enter PnP configuration mode */
w977_efm_enter(efbase[i]);
@@ -403,7 +403,7 @@ int w83977af_probe( int iobase, int irq, int dma)
return 0;
} else {
/* Try next extented function register address */
- IRDA_DEBUG( 0, "%s(), Wrong chip version", __FUNCTION__ );
+ IRDA_DEBUG( 0, "%s(), Wrong chip version", __func__ );
}
}
return -1;
@@ -439,19 +439,19 @@ void w83977af_change_speed(struct w83977af_ir *self, __u32 speed)
case 115200: outb(0x01, iobase+ABLL); break;
case 576000:
ir_mode = HCR_MIR_576;
- IRDA_DEBUG(0, "%s(), handling baud of 576000\n", __FUNCTION__ );
+ IRDA_DEBUG(0, "%s(), handling baud of 576000\n", __func__ );
break;
case 1152000:
ir_mode = HCR_MIR_1152;
- IRDA_DEBUG(0, "%s(), handling baud of 1152000\n", __FUNCTION__ );
+ IRDA_DEBUG(0, "%s(), handling baud of 1152000\n", __func__ );
break;
case 4000000:
ir_mode = HCR_FIR;
- IRDA_DEBUG(0, "%s(), handling baud of 4000000\n", __FUNCTION__ );
+ IRDA_DEBUG(0, "%s(), handling baud of 4000000\n", __func__ );
break;
default:
ir_mode = HCR_FIR;
- IRDA_DEBUG(0, "%s(), unknown baud rate of %d\n", __FUNCTION__ , speed);
+ IRDA_DEBUG(0, "%s(), unknown baud rate of %d\n", __func__ , speed);
break;
}
@@ -501,7 +501,7 @@ int w83977af_hard_xmit(struct sk_buff *skb, struct net_device *dev)
iobase = self->io.fir_base;
- IRDA_DEBUG(4, "%s(%ld), skb->len=%d\n", __FUNCTION__ , jiffies,
+ IRDA_DEBUG(4, "%s(%ld), skb->len=%d\n", __func__ , jiffies,
(int) skb->len);
/* Lock transmit buffer */
@@ -549,7 +549,7 @@ int w83977af_hard_xmit(struct sk_buff *skb, struct net_device *dev)
outb(ICR_ETMRI, iobase+ICR);
} else {
#endif
- IRDA_DEBUG(4, "%s(%ld), mtt=%d\n", __FUNCTION__ , jiffies, mtt);
+ IRDA_DEBUG(4, "%s(%ld), mtt=%d\n", __func__ , jiffies, mtt);
if (mtt)
udelay(mtt);
@@ -591,7 +591,7 @@ static void w83977af_dma_write(struct w83977af_ir *self, int iobase)
unsigned long flags;
__u8 hcr;
#endif
- IRDA_DEBUG(4, "%s(), len=%d\n", __FUNCTION__ , self->tx_buff.len);
+ IRDA_DEBUG(4, "%s(), len=%d\n", __func__ , self->tx_buff.len);
/* Save current set */
set = inb(iobase+SSR);
@@ -643,7 +643,7 @@ static int w83977af_pio_write(int iobase, __u8 *buf, int len, int fifo_size)
int actual = 0;
__u8 set;
- IRDA_DEBUG(4, "%s()\n", __FUNCTION__ );
+ IRDA_DEBUG(4, "%s()\n", __func__ );
/* Save current bank */
set = inb(iobase+SSR);
@@ -651,11 +651,11 @@ static int w83977af_pio_write(int iobase, __u8 *buf, int len, int fifo_size)
switch_bank(iobase, SET0);
if (!(inb_p(iobase+USR) & USR_TSRE)) {
IRDA_DEBUG(4,
- "%s(), warning, FIFO not empty yet!\n", __FUNCTION__ );
+ "%s(), warning, FIFO not empty yet!\n", __func__ );
fifo_size -= 17;
IRDA_DEBUG(4, "%s(), %d bytes left in tx fifo\n",
- __FUNCTION__ , fifo_size);
+ __func__ , fifo_size);
}
/* Fill FIFO with current frame */
@@ -665,7 +665,7 @@ static int w83977af_pio_write(int iobase, __u8 *buf, int len, int fifo_size)
}
IRDA_DEBUG(4, "%s(), fifo_size %d ; %d sent of %d\n",
- __FUNCTION__ , fifo_size, actual, len);
+ __func__ , fifo_size, actual, len);
/* Restore bank */
outb(set, iobase+SSR);
@@ -685,7 +685,7 @@ static void w83977af_dma_xmit_complete(struct w83977af_ir *self)
int iobase;
__u8 set;
- IRDA_DEBUG(4, "%s(%ld)\n", __FUNCTION__ , jiffies);
+ IRDA_DEBUG(4, "%s(%ld)\n", __func__ , jiffies);
IRDA_ASSERT(self != NULL, return;);
@@ -700,7 +700,7 @@ static void w83977af_dma_xmit_complete(struct w83977af_ir *self)
/* Check for underrrun! */
if (inb(iobase+AUDR) & AUDR_UNDR) {
- IRDA_DEBUG(0, "%s(), Transmit underrun!\n", __FUNCTION__ );
+ IRDA_DEBUG(0, "%s(), Transmit underrun!\n", __func__ );
self->stats.tx_errors++;
self->stats.tx_fifo_errors++;
@@ -741,7 +741,7 @@ int w83977af_dma_receive(struct w83977af_ir *self)
#endif
IRDA_ASSERT(self != NULL, return -1;);
- IRDA_DEBUG(4, "%s\n", __FUNCTION__ );
+ IRDA_DEBUG(4, "%s\n", __func__ );
iobase= self->io.fir_base;
@@ -812,7 +812,7 @@ int w83977af_dma_receive_complete(struct w83977af_ir *self)
__u8 set;
__u8 status;
- IRDA_DEBUG(4, "%s\n", __FUNCTION__ );
+ IRDA_DEBUG(4, "%s\n", __func__ );
st_fifo = &self->st_fifo;
@@ -892,7 +892,7 @@ int w83977af_dma_receive_complete(struct w83977af_ir *self)
skb = dev_alloc_skb(len+1);
if (skb == NULL) {
printk(KERN_INFO
- "%s(), memory squeeze, dropping frame.\n", __FUNCTION__);
+ "%s(), memory squeeze, dropping frame.\n", __func__);
/* Restore set register */
outb(set, iobase+SSR);
@@ -943,7 +943,7 @@ static void w83977af_pio_receive(struct w83977af_ir *self)
__u8 byte = 0x00;
int iobase;
- IRDA_DEBUG(4, "%s()\n", __FUNCTION__ );
+ IRDA_DEBUG(4, "%s()\n", __func__ );
IRDA_ASSERT(self != NULL, return;);
@@ -970,7 +970,7 @@ static __u8 w83977af_sir_interrupt(struct w83977af_ir *self, int isr)
__u8 set;
int iobase;
- IRDA_DEBUG(4, "%s(), isr=%#x\n", __FUNCTION__ , isr);
+ IRDA_DEBUG(4, "%s(), isr=%#x\n", __func__ , isr);
iobase = self->io.fir_base;
/* Transmit FIFO low on data */
@@ -1007,7 +1007,7 @@ static __u8 w83977af_sir_interrupt(struct w83977af_ir *self, int isr)
/* Check if we need to change the speed? */
if (self->new_speed) {
IRDA_DEBUG(2,
- "%s(), Changing speed!\n", __FUNCTION__ );
+ "%s(), Changing speed!\n", __func__ );
w83977af_change_speed(self, self->new_speed);
self->new_speed = 0;
}
@@ -1189,7 +1189,7 @@ static int w83977af_net_open(struct net_device *dev)
char hwname[32];
__u8 set;
- IRDA_DEBUG(0, "%s()\n", __FUNCTION__ );
+ IRDA_DEBUG(0, "%s()\n", __func__ );
IRDA_ASSERT(dev != NULL, return -1;);
self = (struct w83977af_ir *) dev->priv;
@@ -1252,7 +1252,7 @@ static int w83977af_net_close(struct net_device *dev)
int iobase;
__u8 set;
- IRDA_DEBUG(0, "%s()\n", __FUNCTION__ );
+ IRDA_DEBUG(0, "%s()\n", __func__ );
IRDA_ASSERT(dev != NULL, return -1;);
@@ -1307,7 +1307,7 @@ static int w83977af_net_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
IRDA_ASSERT(self != NULL, return -1;);
- IRDA_DEBUG(2, "%s(), %s, (cmd=0x%X)\n", __FUNCTION__ , dev->name, cmd);
+ IRDA_DEBUG(2, "%s(), %s, (cmd=0x%X)\n", __func__ , dev->name, cmd);
spin_lock_irqsave(&self->lock, flags);
diff --git a/drivers/net/ixp2000/ixp2400-msf.c b/drivers/net/ixp2000/ixp2400-msf.c
index 9ec38eebfb5..f5ffd7e05d2 100644
--- a/drivers/net/ixp2000/ixp2400-msf.c
+++ b/drivers/net/ixp2000/ixp2400-msf.c
@@ -13,8 +13,8 @@
#include <linux/kernel.h>
#include <linux/init.h>
-#include <asm/hardware.h>
-#include <asm/arch/ixp2000-regs.h>
+#include <mach/hardware.h>
+#include <mach/ixp2000-regs.h>
#include <asm/delay.h>
#include <asm/io.h>
#include "ixp2400-msf.h"
diff --git a/drivers/net/ixp2000/ixpdev.c b/drivers/net/ixp2000/ixpdev.c
index 7111c65f0b3..7b70c66504a 100644
--- a/drivers/net/ixp2000/ixpdev.c
+++ b/drivers/net/ixp2000/ixpdev.c
@@ -16,7 +16,6 @@
#include <linux/init.h>
#include <linux/moduleparam.h>
#include <asm/hardware/uengine.h>
-#include <asm/mach-types.h>
#include <asm/io.h>
#include "ixp2400_rx.ucode"
#include "ixp2400_tx.ucode"
diff --git a/drivers/net/lp486e.c b/drivers/net/lp486e.c
index 591a7e4220c..83fa9d82a00 100644
--- a/drivers/net/lp486e.c
+++ b/drivers/net/lp486e.c
@@ -1272,8 +1272,6 @@ static void set_multicast_list(struct net_device *dev) {
return;
}
if (dev->mc_count == 0 && !(dev->flags & (IFF_PROMISC | IFF_ALLMULTI))) {
- if (dev->flags & IFF_ALLMULTI)
- dev->flags |= IFF_PROMISC;
lp->i596_config[8] &= ~0x01;
} else {
lp->i596_config[8] |= 0x01;
diff --git a/drivers/net/macb.c b/drivers/net/macb.c
index daba82bbcb5..84c77f1f9a5 100644
--- a/drivers/net/macb.c
+++ b/drivers/net/macb.c
@@ -21,8 +21,8 @@
#include <linux/platform_device.h>
#include <linux/phy.h>
-#include <asm/arch/board.h>
-#include <asm/arch/cpu.h>
+#include <mach/board.h>
+#include <mach/cpu.h>
#include "macb.h"
diff --git a/drivers/net/meth.c b/drivers/net/meth.c
index 4cb364e67dc..0a97c26df6a 100644
--- a/drivers/net/meth.c
+++ b/drivers/net/meth.c
@@ -100,7 +100,7 @@ static inline void load_eaddr(struct net_device *dev)
DPRINTK("Loading MAC Address: %s\n", print_mac(mac, dev->dev_addr));
macaddr = 0;
for (i = 0; i < 6; i++)
- macaddr |= dev->dev_addr[i] << ((5 - i) * 8);
+ macaddr |= (u64)dev->dev_addr[i] << ((5 - i) * 8);
mace->eth.mac_addr = macaddr;
}
diff --git a/drivers/net/mv643xx_eth.c b/drivers/net/mv643xx_eth.c
index 8a97a0066a8..46819af3b06 100644
--- a/drivers/net/mv643xx_eth.c
+++ b/drivers/net/mv643xx_eth.c
@@ -55,7 +55,7 @@
#include <asm/system.h>
static char mv643xx_eth_driver_name[] = "mv643xx_eth";
-static char mv643xx_eth_driver_version[] = "1.1";
+static char mv643xx_eth_driver_version[] = "1.2";
#define MV643XX_ETH_CHECKSUM_OFFLOAD_TX
#define MV643XX_ETH_NAPI
@@ -90,12 +90,21 @@ static char mv643xx_eth_driver_version[] = "1.1";
#define PORT_SERIAL_CONTROL(p) (0x043c + ((p) << 10))
#define PORT_STATUS(p) (0x0444 + ((p) << 10))
#define TX_FIFO_EMPTY 0x00000400
+#define TX_IN_PROGRESS 0x00000080
+#define PORT_SPEED_MASK 0x00000030
+#define PORT_SPEED_1000 0x00000010
+#define PORT_SPEED_100 0x00000020
+#define PORT_SPEED_10 0x00000000
+#define FLOW_CONTROL_ENABLED 0x00000008
+#define FULL_DUPLEX 0x00000004
+#define LINK_UP 0x00000002
#define TXQ_COMMAND(p) (0x0448 + ((p) << 10))
#define TXQ_FIX_PRIO_CONF(p) (0x044c + ((p) << 10))
#define TX_BW_RATE(p) (0x0450 + ((p) << 10))
#define TX_BW_MTU(p) (0x0458 + ((p) << 10))
#define TX_BW_BURST(p) (0x045c + ((p) << 10))
#define INT_CAUSE(p) (0x0460 + ((p) << 10))
+#define INT_TX_END_0 0x00080000
#define INT_TX_END 0x07f80000
#define INT_RX 0x0007fbfc
#define INT_EXT 0x00000002
@@ -127,21 +136,21 @@ static char mv643xx_eth_driver_version[] = "1.1";
/*
* SDMA configuration register.
*/
-#define RX_BURST_SIZE_4_64BIT (2 << 1)
+#define RX_BURST_SIZE_16_64BIT (4 << 1)
#define BLM_RX_NO_SWAP (1 << 4)
#define BLM_TX_NO_SWAP (1 << 5)
-#define TX_BURST_SIZE_4_64BIT (2 << 22)
+#define TX_BURST_SIZE_16_64BIT (4 << 22)
#if defined(__BIG_ENDIAN)
#define PORT_SDMA_CONFIG_DEFAULT_VALUE \
- RX_BURST_SIZE_4_64BIT | \
- TX_BURST_SIZE_4_64BIT
+ RX_BURST_SIZE_16_64BIT | \
+ TX_BURST_SIZE_16_64BIT
#elif defined(__LITTLE_ENDIAN)
#define PORT_SDMA_CONFIG_DEFAULT_VALUE \
- RX_BURST_SIZE_4_64BIT | \
+ RX_BURST_SIZE_16_64BIT | \
BLM_RX_NO_SWAP | \
BLM_TX_NO_SWAP | \
- TX_BURST_SIZE_4_64BIT
+ TX_BURST_SIZE_16_64BIT
#else
#error One of __BIG_ENDIAN or __LITTLE_ENDIAN must be defined
#endif
@@ -153,9 +162,7 @@ static char mv643xx_eth_driver_version[] = "1.1";
#define SET_MII_SPEED_TO_100 (1 << 24)
#define SET_GMII_SPEED_TO_1000 (1 << 23)
#define SET_FULL_DUPLEX_MODE (1 << 21)
-#define MAX_RX_PACKET_1522BYTE (1 << 17)
#define MAX_RX_PACKET_9700BYTE (5 << 17)
-#define MAX_RX_PACKET_MASK (7 << 17)
#define DISABLE_AUTO_NEG_SPEED_GMII (1 << 13)
#define DO_NOT_FORCE_LINK_FAIL (1 << 10)
#define SERIAL_PORT_CONTROL_RESERVED (1 << 9)
@@ -228,6 +235,8 @@ struct tx_desc {
#define GEN_IP_V4_CHECKSUM 0x00040000
#define GEN_TCP_UDP_CHECKSUM 0x00020000
#define UDP_FRAME 0x00010000
+#define MAC_HDR_EXTRA_4_BYTES 0x00008000
+#define MAC_HDR_EXTRA_8_BYTES 0x00000200
#define TX_IHL_SHIFT 11
@@ -404,6 +413,17 @@ static void rxq_disable(struct rx_queue *rxq)
udelay(10);
}
+static void txq_reset_hw_ptr(struct tx_queue *txq)
+{
+ struct mv643xx_eth_private *mp = txq_to_mp(txq);
+ int off = TXQ_CURRENT_DESC_PTR(mp->port_num, txq->index);
+ u32 addr;
+
+ addr = (u32)txq->tx_desc_dma;
+ addr += txq->tx_curr_desc * sizeof(struct tx_desc);
+ wrl(mp, off, addr);
+}
+
static void txq_enable(struct tx_queue *txq)
{
struct mv643xx_eth_private *mp = txq_to_mp(txq);
@@ -614,6 +634,12 @@ static int mv643xx_eth_poll(struct napi_struct *napi, int budget)
for (i = 0; i < 8; i++)
if (mp->txq_mask & (1 << i))
txq_reclaim(mp->txq + i, 0);
+
+ if (netif_carrier_ok(mp->dev)) {
+ spin_lock(&mp->lock);
+ __txq_maybe_wake(mp->txq + mp->txq_primary);
+ spin_unlock(&mp->lock);
+ }
}
#endif
@@ -706,6 +732,7 @@ static inline __be16 sum16_as_be(__sum16 sum)
static void txq_submit_skb(struct tx_queue *txq, struct sk_buff *skb)
{
+ struct mv643xx_eth_private *mp = txq_to_mp(txq);
int nr_frags = skb_shinfo(skb)->nr_frags;
int tx_index;
struct tx_desc *desc;
@@ -732,12 +759,36 @@ static void txq_submit_skb(struct tx_queue *txq, struct sk_buff *skb)
desc->buf_ptr = dma_map_single(NULL, skb->data, length, DMA_TO_DEVICE);
if (skb->ip_summed == CHECKSUM_PARTIAL) {
- BUG_ON(skb->protocol != htons(ETH_P_IP));
+ int mac_hdr_len;
+
+ BUG_ON(skb->protocol != htons(ETH_P_IP) &&
+ skb->protocol != htons(ETH_P_8021Q));
cmd_sts |= GEN_TCP_UDP_CHECKSUM |
GEN_IP_V4_CHECKSUM |
ip_hdr(skb)->ihl << TX_IHL_SHIFT;
+ mac_hdr_len = (void *)ip_hdr(skb) - (void *)skb->data;
+ switch (mac_hdr_len - ETH_HLEN) {
+ case 0:
+ break;
+ case 4:
+ cmd_sts |= MAC_HDR_EXTRA_4_BYTES;
+ break;
+ case 8:
+ cmd_sts |= MAC_HDR_EXTRA_8_BYTES;
+ break;
+ case 12:
+ cmd_sts |= MAC_HDR_EXTRA_4_BYTES;
+ cmd_sts |= MAC_HDR_EXTRA_8_BYTES;
+ break;
+ default:
+ if (net_ratelimit())
+ dev_printk(KERN_ERR, &txq_to_mp(txq)->dev->dev,
+ "mac header length is %d?!\n", mac_hdr_len);
+ break;
+ }
+
switch (ip_hdr(skb)->protocol) {
case IPPROTO_UDP:
cmd_sts |= UDP_FRAME;
@@ -759,6 +810,10 @@ static void txq_submit_skb(struct tx_queue *txq, struct sk_buff *skb)
wmb();
desc->cmd_sts = cmd_sts;
+ /* clear TX_END interrupt status */
+ wrl(mp, INT_CAUSE(mp->port_num), ~(INT_TX_END_0 << txq->index));
+ rdl(mp, INT_CAUSE(mp->port_num));
+
/* ensure all descriptors are written before poking hardware */
wmb();
txq_enable(txq);
@@ -1112,10 +1167,28 @@ static int mv643xx_eth_get_settings(struct net_device *dev, struct ethtool_cmd *
static int mv643xx_eth_get_settings_phyless(struct net_device *dev, struct ethtool_cmd *cmd)
{
+ struct mv643xx_eth_private *mp = netdev_priv(dev);
+ u32 port_status;
+
+ port_status = rdl(mp, PORT_STATUS(mp->port_num));
+
cmd->supported = SUPPORTED_MII;
cmd->advertising = ADVERTISED_MII;
- cmd->speed = SPEED_1000;
- cmd->duplex = DUPLEX_FULL;
+ switch (port_status & PORT_SPEED_MASK) {
+ case PORT_SPEED_10:
+ cmd->speed = SPEED_10;
+ break;
+ case PORT_SPEED_100:
+ cmd->speed = SPEED_100;
+ break;
+ case PORT_SPEED_1000:
+ cmd->speed = SPEED_1000;
+ break;
+ default:
+ cmd->speed = -1;
+ break;
+ }
+ cmd->duplex = (port_status & FULL_DUPLEX) ? DUPLEX_FULL : DUPLEX_HALF;
cmd->port = PORT_MII;
cmd->phy_address = 0;
cmd->transceiver = XCVR_INTERNAL;
@@ -1539,8 +1612,11 @@ static int txq_init(struct mv643xx_eth_private *mp, int index)
tx_desc = (struct tx_desc *)txq->tx_desc_area;
for (i = 0; i < txq->tx_ring_size; i++) {
+ struct tx_desc *txd = tx_desc + i;
int nexti = (i + 1) % txq->tx_ring_size;
- tx_desc[i].next_desc_ptr = txq->tx_desc_dma +
+
+ txd->cmd_sts = 0;
+ txd->next_desc_ptr = txq->tx_desc_dma +
nexti * sizeof(struct tx_desc);
}
@@ -1577,8 +1653,11 @@ static void txq_reclaim(struct tx_queue *txq, int force)
desc = &txq->tx_desc_area[tx_index];
cmd_sts = desc->cmd_sts;
- if (!force && (cmd_sts & BUFFER_OWNED_BY_DMA))
- break;
+ if (cmd_sts & BUFFER_OWNED_BY_DMA) {
+ if (!force)
+ break;
+ desc->cmd_sts = cmd_sts & ~BUFFER_OWNED_BY_DMA;
+ }
txq->tx_used_desc = (tx_index + 1) % txq->tx_ring_size;
txq->tx_desc_count--;
@@ -1632,49 +1711,61 @@ static void txq_deinit(struct tx_queue *txq)
/* netdev ops and related ***************************************************/
-static void update_pscr(struct mv643xx_eth_private *mp, int speed, int duplex)
+static void handle_link_event(struct mv643xx_eth_private *mp)
{
- u32 pscr_o;
- u32 pscr_n;
-
- pscr_o = rdl(mp, PORT_SERIAL_CONTROL(mp->port_num));
+ struct net_device *dev = mp->dev;
+ u32 port_status;
+ int speed;
+ int duplex;
+ int fc;
+
+ port_status = rdl(mp, PORT_STATUS(mp->port_num));
+ if (!(port_status & LINK_UP)) {
+ if (netif_carrier_ok(dev)) {
+ int i;
- /* clear speed, duplex and rx buffer size fields */
- pscr_n = pscr_o & ~(SET_MII_SPEED_TO_100 |
- SET_GMII_SPEED_TO_1000 |
- SET_FULL_DUPLEX_MODE |
- MAX_RX_PACKET_MASK);
+ printk(KERN_INFO "%s: link down\n", dev->name);
- if (speed == SPEED_1000) {
- pscr_n |= SET_GMII_SPEED_TO_1000 | MAX_RX_PACKET_9700BYTE;
- } else {
- if (speed == SPEED_100)
- pscr_n |= SET_MII_SPEED_TO_100;
- pscr_n |= MAX_RX_PACKET_1522BYTE;
- }
+ netif_carrier_off(dev);
+ netif_stop_queue(dev);
- if (duplex == DUPLEX_FULL)
- pscr_n |= SET_FULL_DUPLEX_MODE;
+ for (i = 0; i < 8; i++) {
+ struct tx_queue *txq = mp->txq + i;
- if (pscr_n != pscr_o) {
- if ((pscr_o & SERIAL_PORT_ENABLE) == 0)
- wrl(mp, PORT_SERIAL_CONTROL(mp->port_num), pscr_n);
- else {
- int i;
+ if (mp->txq_mask & (1 << i)) {
+ txq_reclaim(txq, 1);
+ txq_reset_hw_ptr(txq);
+ }
+ }
+ }
+ return;
+ }
- for (i = 0; i < 8; i++)
- if (mp->txq_mask & (1 << i))
- txq_disable(mp->txq + i);
+ switch (port_status & PORT_SPEED_MASK) {
+ case PORT_SPEED_10:
+ speed = 10;
+ break;
+ case PORT_SPEED_100:
+ speed = 100;
+ break;
+ case PORT_SPEED_1000:
+ speed = 1000;
+ break;
+ default:
+ speed = -1;
+ break;
+ }
+ duplex = (port_status & FULL_DUPLEX) ? 1 : 0;
+ fc = (port_status & FLOW_CONTROL_ENABLED) ? 1 : 0;
- pscr_o &= ~SERIAL_PORT_ENABLE;
- wrl(mp, PORT_SERIAL_CONTROL(mp->port_num), pscr_o);
- wrl(mp, PORT_SERIAL_CONTROL(mp->port_num), pscr_n);
- wrl(mp, PORT_SERIAL_CONTROL(mp->port_num), pscr_n);
+ printk(KERN_INFO "%s: link up, %d Mb/s, %s duplex, "
+ "flow control %sabled\n", dev->name,
+ speed, duplex ? "full" : "half",
+ fc ? "en" : "dis");
- for (i = 0; i < 8; i++)
- if (mp->txq_mask & (1 << i))
- txq_enable(mp->txq + i);
- }
+ if (!netif_carrier_ok(dev)) {
+ netif_carrier_on(dev);
+ netif_wake_queue(dev);
}
}
@@ -1684,7 +1775,6 @@ static irqreturn_t mv643xx_eth_irq(int irq, void *dev_id)
struct mv643xx_eth_private *mp = netdev_priv(dev);
u32 int_cause;
u32 int_cause_ext;
- u32 txq_active;
int_cause = rdl(mp, INT_CAUSE(mp->port_num)) &
(INT_TX_END | INT_RX | INT_EXT);
@@ -1698,30 +1788,8 @@ static irqreturn_t mv643xx_eth_irq(int irq, void *dev_id)
wrl(mp, INT_CAUSE_EXT(mp->port_num), ~int_cause_ext);
}
- if (int_cause_ext & (INT_EXT_PHY | INT_EXT_LINK)) {
- if (mp->phy_addr == -1 || mii_link_ok(&mp->mii)) {
- int i;
-
- if (mp->phy_addr != -1) {
- struct ethtool_cmd cmd;
-
- mii_ethtool_gset(&mp->mii, &cmd);
- update_pscr(mp, cmd.speed, cmd.duplex);
- }
-
- for (i = 0; i < 8; i++)
- if (mp->txq_mask & (1 << i))
- txq_enable(mp->txq + i);
-
- if (!netif_carrier_ok(dev)) {
- netif_carrier_on(dev);
- __txq_maybe_wake(mp->txq + mp->txq_primary);
- }
- } else if (netif_carrier_ok(dev)) {
- netif_stop_queue(dev);
- netif_carrier_off(dev);
- }
- }
+ if (int_cause_ext & (INT_EXT_PHY | INT_EXT_LINK))
+ handle_link_event(mp);
/*
* RxBuffer or RxError set for any of the 8 queues?
@@ -1743,8 +1811,6 @@ static irqreturn_t mv643xx_eth_irq(int irq, void *dev_id)
}
#endif
- txq_active = rdl(mp, TXQ_COMMAND(mp->port_num));
-
/*
* TxBuffer or TxError set for any of the 8 queues?
*/
@@ -1754,6 +1820,16 @@ static irqreturn_t mv643xx_eth_irq(int irq, void *dev_id)
for (i = 0; i < 8; i++)
if (mp->txq_mask & (1 << i))
txq_reclaim(mp->txq + i, 0);
+
+ /*
+ * Enough space again in the primary TX queue for a
+ * full packet?
+ */
+ if (netif_carrier_ok(dev)) {
+ spin_lock(&mp->lock);
+ __txq_maybe_wake(mp->txq + mp->txq_primary);
+ spin_unlock(&mp->lock);
+ }
}
/*
@@ -1763,19 +1839,25 @@ static irqreturn_t mv643xx_eth_irq(int irq, void *dev_id)
int i;
wrl(mp, INT_CAUSE(mp->port_num), ~(int_cause & INT_TX_END));
+
+ spin_lock(&mp->lock);
for (i = 0; i < 8; i++) {
struct tx_queue *txq = mp->txq + i;
- if (txq->tx_desc_count && !((txq_active >> i) & 1))
+ u32 hw_desc_ptr;
+ u32 expected_ptr;
+
+ if ((int_cause & (INT_TX_END_0 << i)) == 0)
+ continue;
+
+ hw_desc_ptr =
+ rdl(mp, TXQ_CURRENT_DESC_PTR(mp->port_num, i));
+ expected_ptr = (u32)txq->tx_desc_dma +
+ txq->tx_curr_desc * sizeof(struct tx_desc);
+
+ if (hw_desc_ptr != expected_ptr)
txq_enable(txq);
}
- }
-
- /*
- * Enough space again in the primary TX queue for a full packet?
- */
- if (int_cause_ext & INT_EXT_TX) {
- struct tx_queue *txq = mp->txq + mp->txq_primary;
- __txq_maybe_wake(txq);
+ spin_unlock(&mp->lock);
}
return IRQ_HANDLED;
@@ -1785,14 +1867,14 @@ static void phy_reset(struct mv643xx_eth_private *mp)
{
unsigned int data;
- smi_reg_read(mp, mp->phy_addr, 0, &data);
- data |= 0x8000;
- smi_reg_write(mp, mp->phy_addr, 0, data);
+ smi_reg_read(mp, mp->phy_addr, MII_BMCR, &data);
+ data |= BMCR_RESET;
+ smi_reg_write(mp, mp->phy_addr, MII_BMCR, data);
do {
udelay(1);
- smi_reg_read(mp, mp->phy_addr, 0, &data);
- } while (data & 0x8000);
+ smi_reg_read(mp, mp->phy_addr, MII_BMCR, &data);
+ } while (data & BMCR_RESET);
}
static void port_start(struct mv643xx_eth_private *mp)
@@ -1801,23 +1883,6 @@ static void port_start(struct mv643xx_eth_private *mp)
int i;
/*
- * Configure basic link parameters.
- */
- pscr = rdl(mp, PORT_SERIAL_CONTROL(mp->port_num));
- pscr &= ~(SERIAL_PORT_ENABLE | FORCE_LINK_PASS);
- wrl(mp, PORT_SERIAL_CONTROL(mp->port_num), pscr);
- pscr |= DISABLE_AUTO_NEG_FOR_FLOW_CTRL |
- DISABLE_AUTO_NEG_SPEED_GMII |
- DISABLE_AUTO_NEG_FOR_DUPLEX |
- DO_NOT_FORCE_LINK_FAIL |
- SERIAL_PORT_CONTROL_RESERVED;
- wrl(mp, PORT_SERIAL_CONTROL(mp->port_num), pscr);
- pscr |= SERIAL_PORT_ENABLE;
- wrl(mp, PORT_SERIAL_CONTROL(mp->port_num), pscr);
-
- wrl(mp, SDMA_CONFIG(mp->port_num), PORT_SDMA_CONFIG_DEFAULT_VALUE);
-
- /*
* Perform PHY reset, if there is a PHY.
*/
if (mp->phy_addr != -1) {
@@ -1829,21 +1894,31 @@ static void port_start(struct mv643xx_eth_private *mp)
}
/*
+ * Configure basic link parameters.
+ */
+ pscr = rdl(mp, PORT_SERIAL_CONTROL(mp->port_num));
+
+ pscr |= SERIAL_PORT_ENABLE;
+ wrl(mp, PORT_SERIAL_CONTROL(mp->port_num), pscr);
+
+ pscr |= DO_NOT_FORCE_LINK_FAIL;
+ if (mp->phy_addr == -1)
+ pscr |= FORCE_LINK_PASS;
+ wrl(mp, PORT_SERIAL_CONTROL(mp->port_num), pscr);
+
+ wrl(mp, SDMA_CONFIG(mp->port_num), PORT_SDMA_CONFIG_DEFAULT_VALUE);
+
+ /*
* Configure TX path and queues.
*/
tx_set_rate(mp, 1000000000, 16777216);
for (i = 0; i < 8; i++) {
struct tx_queue *txq = mp->txq + i;
- int off = TXQ_CURRENT_DESC_PTR(mp->port_num, i);
- u32 addr;
if ((mp->txq_mask & (1 << i)) == 0)
continue;
- addr = (u32)txq->tx_desc_dma;
- addr += txq->tx_curr_desc * sizeof(struct tx_desc);
- wrl(mp, off, addr);
-
+ txq_reset_hw_ptr(txq);
txq_set_rate(txq, 1000000000, 16777216);
txq_set_fixed_prio_mode(txq);
}
@@ -1965,6 +2040,9 @@ static int mv643xx_eth_open(struct net_device *dev)
napi_enable(&mp->napi);
#endif
+ netif_carrier_off(dev);
+ netif_stop_queue(dev);
+
port_start(mp);
set_rx_coal(mp, 0);
@@ -1999,8 +2077,14 @@ static void port_reset(struct mv643xx_eth_private *mp)
if (mp->txq_mask & (1 << i))
txq_disable(mp->txq + i);
}
- while (!(rdl(mp, PORT_STATUS(mp->port_num)) & TX_FIFO_EMPTY))
+
+ while (1) {
+ u32 ps = rdl(mp, PORT_STATUS(mp->port_num));
+
+ if ((ps & (TX_IN_PROGRESS | TX_FIFO_EMPTY)) == TX_FIFO_EMPTY)
+ break;
udelay(10);
+ }
/* Reset the Enable bit in the Configuration Register */
data = rdl(mp, PORT_SERIAL_CONTROL(mp->port_num));
@@ -2202,7 +2286,8 @@ static int mv643xx_eth_shared_probe(struct platform_device *pdev)
int ret;
if (!mv643xx_eth_version_printed++)
- printk(KERN_NOTICE "MV-643xx 10/100/1000 Ethernet Driver\n");
+ printk(KERN_NOTICE "MV-643xx 10/100/1000 ethernet "
+ "driver version %s\n", mv643xx_eth_driver_version);
ret = -EINVAL;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
@@ -2338,14 +2423,14 @@ static int phy_detect(struct mv643xx_eth_private *mp)
unsigned int data;
unsigned int data2;
- smi_reg_read(mp, mp->phy_addr, 0, &data);
- smi_reg_write(mp, mp->phy_addr, 0, data ^ 0x1000);
+ smi_reg_read(mp, mp->phy_addr, MII_BMCR, &data);
+ smi_reg_write(mp, mp->phy_addr, MII_BMCR, data ^ BMCR_ANENABLE);
- smi_reg_read(mp, mp->phy_addr, 0, &data2);
- if (((data ^ data2) & 0x1000) == 0)
+ smi_reg_read(mp, mp->phy_addr, MII_BMCR, &data2);
+ if (((data ^ data2) & BMCR_ANENABLE) == 0)
return -ENODEV;
- smi_reg_write(mp, mp->phy_addr, 0, data);
+ smi_reg_write(mp, mp->phy_addr, MII_BMCR, data);
return 0;
}
@@ -2393,12 +2478,39 @@ static int phy_init(struct mv643xx_eth_private *mp,
cmd.duplex = pd->duplex;
}
- update_pscr(mp, cmd.speed, cmd.duplex);
mv643xx_eth_set_settings(mp->dev, &cmd);
return 0;
}
+static void init_pscr(struct mv643xx_eth_private *mp, int speed, int duplex)
+{
+ u32 pscr;
+
+ pscr = rdl(mp, PORT_SERIAL_CONTROL(mp->port_num));
+ if (pscr & SERIAL_PORT_ENABLE) {
+ pscr &= ~SERIAL_PORT_ENABLE;
+ wrl(mp, PORT_SERIAL_CONTROL(mp->port_num), pscr);
+ }
+
+ pscr = MAX_RX_PACKET_9700BYTE | SERIAL_PORT_CONTROL_RESERVED;
+ if (mp->phy_addr == -1) {
+ pscr |= DISABLE_AUTO_NEG_SPEED_GMII;
+ if (speed == SPEED_1000)
+ pscr |= SET_GMII_SPEED_TO_1000;
+ else if (speed == SPEED_100)
+ pscr |= SET_MII_SPEED_TO_100;
+
+ pscr |= DISABLE_AUTO_NEG_FOR_FLOW_CTRL;
+
+ pscr |= DISABLE_AUTO_NEG_FOR_DUPLEX;
+ if (duplex == DUPLEX_FULL)
+ pscr |= SET_FULL_DUPLEX_MODE;
+ }
+
+ wrl(mp, PORT_SERIAL_CONTROL(mp->port_num), pscr);
+}
+
static int mv643xx_eth_probe(struct platform_device *pdev)
{
struct mv643xx_eth_platform_data *pd;
@@ -2452,6 +2564,7 @@ static int mv643xx_eth_probe(struct platform_device *pdev)
} else {
SET_ETHTOOL_OPS(dev, &mv643xx_eth_ethtool_ops_phyless);
}
+ init_pscr(mp, pd->speed, pd->duplex);
res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
@@ -2478,6 +2591,7 @@ static int mv643xx_eth_probe(struct platform_device *pdev)
* have to map the buffers to ISA memory which is only 16 MB
*/
dev->features = NETIF_F_SG | NETIF_F_IP_CSUM;
+ dev->vlan_features = NETIF_F_SG | NETIF_F_IP_CSUM;
#endif
SET_NETDEV_DEV(dev, &pdev->dev);
diff --git a/drivers/net/myri10ge/myri10ge.c b/drivers/net/myri10ge/myri10ge.c
index 3ab0e5289f7..f1de38f8b74 100644
--- a/drivers/net/myri10ge/myri10ge.c
+++ b/drivers/net/myri10ge/myri10ge.c
@@ -3699,6 +3699,7 @@ static int myri10ge_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
dev_err(&pdev->dev, "Error %d setting DMA mask\n", status);
goto abort_with_netdev;
}
+ (void)pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK);
mgp->cmd = dma_alloc_coherent(&pdev->dev, sizeof(*mgp->cmd),
&mgp->cmd_bus, GFP_KERNEL);
if (mgp->cmd == NULL)
diff --git a/drivers/net/myri10ge/myri10ge_mcp.h b/drivers/net/myri10ge/myri10ge_mcp.h
index fdbeeee0737..99372109077 100644
--- a/drivers/net/myri10ge/myri10ge_mcp.h
+++ b/drivers/net/myri10ge/myri10ge_mcp.h
@@ -101,6 +101,8 @@ struct mcp_kreq_ether_recv {
#define MXGEFW_ETH_SEND_3 0x2c0000
#define MXGEFW_ETH_RECV_SMALL 0x300000
#define MXGEFW_ETH_RECV_BIG 0x340000
+#define MXGEFW_ETH_SEND_GO 0x380000
+#define MXGEFW_ETH_SEND_STOP 0x3C0000
#define MXGEFW_ETH_SEND(n) (0x200000 + (((n) & 0x03) * 0x40000))
#define MXGEFW_ETH_SEND_OFFSET(n) (MXGEFW_ETH_SEND(n) - MXGEFW_ETH_SEND_4)
@@ -120,6 +122,11 @@ enum myri10ge_mcp_cmd_type {
* MXGEFW_CMD_RESET is issued */
MXGEFW_CMD_SET_INTRQ_DMA,
+ /* data0 = LSW of the host address
+ * data1 = MSW of the host address
+ * data2 = slice number if multiple slices are used
+ */
+
MXGEFW_CMD_SET_BIG_BUFFER_SIZE, /* in bytes, power of 2 */
MXGEFW_CMD_SET_SMALL_BUFFER_SIZE, /* in bytes */
@@ -129,6 +136,8 @@ enum myri10ge_mcp_cmd_type {
MXGEFW_CMD_GET_SEND_OFFSET,
MXGEFW_CMD_GET_SMALL_RX_OFFSET,
MXGEFW_CMD_GET_BIG_RX_OFFSET,
+ /* data0 = slice number if multiple slices are used */
+
MXGEFW_CMD_GET_IRQ_ACK_OFFSET,
MXGEFW_CMD_GET_IRQ_DEASSERT_OFFSET,
@@ -200,7 +209,12 @@ enum myri10ge_mcp_cmd_type {
MXGEFW_CMD_SET_STATS_DMA_V2,
/* data0, data1 = bus addr,
* data2 = sizeof(struct mcp_irq_data) from driver point of view, allows
- * adding new stuff to mcp_irq_data without changing the ABI */
+ * adding new stuff to mcp_irq_data without changing the ABI
+ *
+ * If multiple slices are used, data2 contains both the size of the
+ * structure (in the lower 16 bits) and the slice number
+ * (in the upper 16 bits).
+ */
MXGEFW_CMD_UNALIGNED_TEST,
/* same than DMA_TEST (same args) but abort with UNALIGNED on unaligned
@@ -222,13 +236,18 @@ enum myri10ge_mcp_cmd_type {
MXGEFW_CMD_GET_MAX_RSS_QUEUES,
MXGEFW_CMD_ENABLE_RSS_QUEUES,
/* data0 = number of slices n (0, 1, ..., n-1) to enable
- * data1 = interrupt mode.
- * 0=share one INTx/MSI, 1=use one MSI-X per queue.
+ * data1 = interrupt mode | use of multiple transmit queues.
+ * 0=share one INTx/MSI.
+ * 1=use one MSI-X per queue.
* If all queues share one interrupt, the driver must have set
* RSS_SHARED_INTERRUPT_DMA before enabling queues.
+ * 2=enable both receive and send queues.
+ * Without this bit set, only one send queue (slice 0's send queue)
+ * is enabled. The receive queues are always enabled.
*/
-#define MXGEFW_SLICE_INTR_MODE_SHARED 0
-#define MXGEFW_SLICE_INTR_MODE_ONE_PER_SLICE 1
+#define MXGEFW_SLICE_INTR_MODE_SHARED 0x0
+#define MXGEFW_SLICE_INTR_MODE_ONE_PER_SLICE 0x1
+#define MXGEFW_SLICE_ENABLE_MULTIPLE_TX_QUEUES 0x2
MXGEFW_CMD_GET_RSS_SHARED_INTERRUPT_MASK_OFFSET,
MXGEFW_CMD_SET_RSS_SHARED_INTERRUPT_DMA,
@@ -250,10 +269,13 @@ enum myri10ge_mcp_cmd_type {
* 2: TCP_IPV4 (required by RSS)
* 3: IPV4 | TCP_IPV4 (required by RSS)
* 4: source port
+ * 5: source port + destination port
*/
#define MXGEFW_RSS_HASH_TYPE_IPV4 0x1
#define MXGEFW_RSS_HASH_TYPE_TCP_IPV4 0x2
#define MXGEFW_RSS_HASH_TYPE_SRC_PORT 0x4
+#define MXGEFW_RSS_HASH_TYPE_SRC_DST_PORT 0x5
+#define MXGEFW_RSS_HASH_TYPE_MAX 0x5
MXGEFW_CMD_GET_MAX_TSO6_HDR_SIZE,
/* Return data = the max. size of the entire headers of a IPv6 TSO packet.
@@ -329,6 +351,20 @@ enum myri10ge_mcp_cmd_type {
MXGEFW_CMD_GET_DCA_OFFSET,
/* offset of dca control for WDMAs */
+
+ /* VMWare NetQueue commands */
+ MXGEFW_CMD_NETQ_GET_FILTERS_PER_QUEUE,
+ MXGEFW_CMD_NETQ_ADD_FILTER,
+ /* data0 = filter_id << 16 | queue << 8 | type */
+ /* data1 = MS4 of MAC Addr */
+ /* data2 = LS2_MAC << 16 | VLAN_tag */
+ MXGEFW_CMD_NETQ_DEL_FILTER,
+ /* data0 = filter_id */
+ MXGEFW_CMD_NETQ_QUERY1,
+ MXGEFW_CMD_NETQ_QUERY2,
+ MXGEFW_CMD_NETQ_QUERY3,
+ MXGEFW_CMD_NETQ_QUERY4,
+
};
enum myri10ge_mcp_cmd_status {
@@ -381,4 +417,10 @@ struct mcp_irq_data {
u8 valid;
};
+/* definitions for NETQ filter type */
+#define MXGEFW_NETQ_FILTERTYPE_NONE 0
+#define MXGEFW_NETQ_FILTERTYPE_MACADDR 1
+#define MXGEFW_NETQ_FILTERTYPE_VLAN 2
+#define MXGEFW_NETQ_FILTERTYPE_VLANMACADDR 3
+
#endif /* __MYRI10GE_MCP_H__ */
diff --git a/drivers/net/myri10ge/myri10ge_mcp_gen_header.h b/drivers/net/myri10ge/myri10ge_mcp_gen_header.h
index 07d65c2cbb2..a8662ea8079 100644
--- a/drivers/net/myri10ge/myri10ge_mcp_gen_header.h
+++ b/drivers/net/myri10ge/myri10ge_mcp_gen_header.h
@@ -35,7 +35,7 @@ struct mcp_gen_header {
unsigned char mcp_index;
unsigned char disable_rabbit;
unsigned char unaligned_tlp;
- unsigned char pad1;
+ unsigned char pcie_link_algo;
unsigned counters_addr;
unsigned copy_block_info; /* for small mcps loaded with "lload -d" */
unsigned short handoff_id_major; /* must be equal */
diff --git a/drivers/net/ne.c b/drivers/net/ne.c
index 2fec6122c7f..42443d69742 100644
--- a/drivers/net/ne.c
+++ b/drivers/net/ne.c
@@ -536,7 +536,7 @@ static int __init ne_probe1(struct net_device *dev, unsigned long ioaddr)
#ifdef CONFIG_NET_POLL_CONTROLLER
dev->poll_controller = eip_poll;
#endif
- NS8390_init(dev, 0);
+ NS8390p_init(dev, 0);
ret = register_netdev(dev);
if (ret)
@@ -794,7 +794,7 @@ retry:
if (time_after(jiffies, dma_start + 2*HZ/100)) { /* 20ms */
printk(KERN_WARNING "%s: timeout waiting for Tx RDC.\n", dev->name);
ne_reset_8390(dev);
- NS8390_init(dev,1);
+ NS8390p_init(dev, 1);
break;
}
@@ -855,7 +855,7 @@ static int ne_drv_resume(struct platform_device *pdev)
if (netif_running(dev)) {
ne_reset_8390(dev);
- NS8390_init(dev, 1);
+ NS8390p_init(dev, 1);
netif_device_attach(dev);
}
return 0;
diff --git a/drivers/net/netconsole.c b/drivers/net/netconsole.c
index e13966bb5f7..9681618c323 100644
--- a/drivers/net/netconsole.c
+++ b/drivers/net/netconsole.c
@@ -53,7 +53,7 @@ MODULE_LICENSE("GPL");
static char config[MAX_PARAM_LENGTH];
module_param_string(netconsole, config, MAX_PARAM_LENGTH, 0);
-MODULE_PARM_DESC(netconsole, " netconsole=[src-port]@[src-ip]/[dev],[tgt-port]@<tgt-ip>/[tgt-macaddr]\n");
+MODULE_PARM_DESC(netconsole, " netconsole=[src-port]@[src-ip]/[dev],[tgt-port]@<tgt-ip>/[tgt-macaddr]");
#ifndef MODULE
static int __init option_setup(char *opt)
diff --git a/drivers/net/netx-eth.c b/drivers/net/netx-eth.c
index dc442e37085..3f9af759cb9 100644
--- a/drivers/net/netx-eth.c
+++ b/drivers/net/netx-eth.c
@@ -29,12 +29,11 @@
#include <linux/mii.h>
#include <asm/io.h>
-#include <asm/hardware.h>
-#include <asm/arch/hardware.h>
-#include <asm/arch/netx-regs.h>
-#include <asm/arch/pfifo.h>
-#include <asm/arch/xc.h>
-#include <asm/arch/eth.h>
+#include <mach/hardware.h>
+#include <mach/netx-regs.h>
+#include <mach/pfifo.h>
+#include <mach/xc.h>
+#include <mach/eth.h>
/* XC Fifo Offsets */
#define EMPTY_PTR_FIFO(xcno) (0 + ((xcno) << 3)) /* Index of the empty pointer FIFO */
diff --git a/drivers/net/netxen/netxen_nic.h b/drivers/net/netxen/netxen_nic.h
index 8e736614407..93a7b9b668d 100644
--- a/drivers/net/netxen/netxen_nic.h
+++ b/drivers/net/netxen/netxen_nic.h
@@ -508,6 +508,8 @@ typedef enum {
NETXEN_BRDTYPE_P3_10000_BASE_T = 0x0027,
NETXEN_BRDTYPE_P3_XG_LOM = 0x0028,
NETXEN_BRDTYPE_P3_4_GB_MM = 0x0029,
+ NETXEN_BRDTYPE_P3_10G_SFP_CT = 0x002a,
+ NETXEN_BRDTYPE_P3_10G_SFP_QT = 0x002b,
NETXEN_BRDTYPE_P3_10G_CX4 = 0x0031,
NETXEN_BRDTYPE_P3_10G_XFP = 0x0032
@@ -1170,6 +1172,36 @@ typedef struct {
nx_nic_intr_coalesce_data_t irq;
} nx_nic_intr_coalesce_t;
+#define NX_HOST_REQUEST 0x13
+#define NX_NIC_REQUEST 0x14
+
+#define NX_MAC_EVENT 0x1
+
+enum {
+ NX_NIC_H2C_OPCODE_START = 0,
+ NX_NIC_H2C_OPCODE_CONFIG_RSS,
+ NX_NIC_H2C_OPCODE_CONFIG_RSS_TBL,
+ NX_NIC_H2C_OPCODE_CONFIG_INTR_COALESCE,
+ NX_NIC_H2C_OPCODE_CONFIG_LED,
+ NX_NIC_H2C_OPCODE_CONFIG_PROMISCUOUS,
+ NX_NIC_H2C_OPCODE_CONFIG_L2_MAC,
+ NX_NIC_H2C_OPCODE_LRO_REQUEST,
+ NX_NIC_H2C_OPCODE_GET_SNMP_STATS,
+ NX_NIC_H2C_OPCODE_PROXY_START_REQUEST,
+ NX_NIC_H2C_OPCODE_PROXY_STOP_REQUEST,
+ NX_NIC_H2C_OPCODE_PROXY_SET_MTU,
+ NX_NIC_H2C_OPCODE_PROXY_SET_VPORT_MISS_MODE,
+ NX_H2P_OPCODE_GET_FINGER_PRINT_REQUEST,
+ NX_H2P_OPCODE_INSTALL_LICENSE_REQUEST,
+ NX_H2P_OPCODE_GET_LICENSE_CAPABILITY_REQUEST,
+ NX_NIC_H2C_OPCODE_GET_NET_STATS,
+ NX_NIC_H2C_OPCODE_LAST
+};
+
+#define VPORT_MISS_MODE_DROP 0 /* drop all unmatched */
+#define VPORT_MISS_MODE_ACCEPT_ALL 1 /* accept all packets */
+#define VPORT_MISS_MODE_ACCEPT_MULTI 2 /* accept unmatched multicast */
+
typedef struct {
u64 qhdr;
u64 req_hdr;
@@ -1288,7 +1320,7 @@ struct netxen_adapter {
int (*disable_phy_interrupts) (struct netxen_adapter *);
int (*macaddr_set) (struct netxen_adapter *, netxen_ethernet_macaddr_t);
int (*set_mtu) (struct netxen_adapter *, int);
- int (*set_promisc) (struct netxen_adapter *, netxen_niu_prom_mode_t);
+ int (*set_promisc) (struct netxen_adapter *, u32);
int (*phy_read) (struct netxen_adapter *, long reg, u32 *);
int (*phy_write) (struct netxen_adapter *, long reg, u32 val);
int (*init_port) (struct netxen_adapter *, int);
@@ -1465,9 +1497,10 @@ int netxen_process_cmd_ring(struct netxen_adapter *adapter);
u32 netxen_process_rcv_ring(struct netxen_adapter *adapter, int ctx, int max);
void netxen_p2_nic_set_multi(struct net_device *netdev);
void netxen_p3_nic_set_multi(struct net_device *netdev);
+int netxen_p3_nic_set_promisc(struct netxen_adapter *adapter, u32);
int netxen_config_intr_coalesce(struct netxen_adapter *adapter);
-u32 nx_fw_cmd_set_mtu(struct netxen_adapter *adapter, u32 mtu);
+int nx_fw_cmd_set_mtu(struct netxen_adapter *adapter, int mtu);
int netxen_nic_change_mtu(struct net_device *netdev, int new_mtu);
int netxen_nic_set_mac(struct net_device *netdev, void *p);
@@ -1502,7 +1535,9 @@ static const struct netxen_brdinfo netxen_boards[] = {
{NETXEN_BRDTYPE_P3_10G_SFP_PLUS, 2, "Dual XGb SFP+ LP"},
{NETXEN_BRDTYPE_P3_10000_BASE_T, 1, "XGB 10G BaseT LP"},
{NETXEN_BRDTYPE_P3_XG_LOM, 2, "Dual XGb LOM"},
- {NETXEN_BRDTYPE_P3_4_GB_MM, 4, "Quad GB - March Madness"},
+ {NETXEN_BRDTYPE_P3_4_GB_MM, 4, "NX3031 Gigabit Ethernet"},
+ {NETXEN_BRDTYPE_P3_10G_SFP_CT, 2, "NX3031 10 Gigabit Ethernet"},
+ {NETXEN_BRDTYPE_P3_10G_SFP_QT, 2, "Quanta Dual XGb SFP+"},
{NETXEN_BRDTYPE_P3_10G_CX4, 2, "Reference Dual CX4 Option"},
{NETXEN_BRDTYPE_P3_10G_XFP, 1, "Reference Single XFP Option"}
};
diff --git a/drivers/net/netxen/netxen_nic_ctx.c b/drivers/net/netxen/netxen_nic_ctx.c
index 64babc59e69..64b51643c62 100644
--- a/drivers/net/netxen/netxen_nic_ctx.c
+++ b/drivers/net/netxen/netxen_nic_ctx.c
@@ -145,8 +145,8 @@ netxen_issue_cmd(struct netxen_adapter *adapter,
return rcode;
}
-u32
-nx_fw_cmd_set_mtu(struct netxen_adapter *adapter, u32 mtu)
+int
+nx_fw_cmd_set_mtu(struct netxen_adapter *adapter, int mtu)
{
u32 rcode = NX_RCODE_SUCCESS;
struct netxen_recv_context *recv_ctx = &adapter->recv_ctx[0];
@@ -160,7 +160,10 @@ nx_fw_cmd_set_mtu(struct netxen_adapter *adapter, u32 mtu)
0,
NX_CDRP_CMD_SET_MTU);
- return rcode;
+ if (rcode != NX_RCODE_SUCCESS)
+ return -EIO;
+
+ return 0;
}
static int
diff --git a/drivers/net/netxen/netxen_nic_ethtool.c b/drivers/net/netxen/netxen_nic_ethtool.c
index 48ee06b6f4e..4ad3e0844b9 100644
--- a/drivers/net/netxen/netxen_nic_ethtool.c
+++ b/drivers/net/netxen/netxen_nic_ethtool.c
@@ -140,18 +140,33 @@ netxen_nic_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
if (netif_running(dev)) {
ecmd->speed = adapter->link_speed;
ecmd->duplex = adapter->link_duplex;
- } else
- return -EIO; /* link absent */
+ ecmd->autoneg = adapter->link_autoneg;
+ }
+
} else if (adapter->ahw.board_type == NETXEN_NIC_XGBE) {
- ecmd->supported = (SUPPORTED_TP |
- SUPPORTED_1000baseT_Full |
- SUPPORTED_10000baseT_Full);
- ecmd->advertising = (ADVERTISED_TP |
- ADVERTISED_1000baseT_Full |
- ADVERTISED_10000baseT_Full);
+ u32 val;
+
+ adapter->hw_read_wx(adapter, NETXEN_PORT_MODE_ADDR, &val, 4);
+ if (val == NETXEN_PORT_MODE_802_3_AP) {
+ ecmd->supported = SUPPORTED_1000baseT_Full;
+ ecmd->advertising = ADVERTISED_1000baseT_Full;
+ } else {
+ ecmd->supported = SUPPORTED_10000baseT_Full;
+ ecmd->advertising = ADVERTISED_10000baseT_Full;
+ }
+
ecmd->port = PORT_TP;
- ecmd->speed = SPEED_10000;
+ if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) {
+ u16 pcifn = adapter->ahw.pci_func;
+
+ adapter->hw_read_wx(adapter,
+ P3_LINK_SPEED_REG(pcifn), &val, 4);
+ ecmd->speed = P3_LINK_SPEED_MHZ *
+ P3_LINK_SPEED_VAL(pcifn, val);
+ } else
+ ecmd->speed = SPEED_10000;
+
ecmd->duplex = DUPLEX_FULL;
ecmd->autoneg = AUTONEG_DISABLE;
} else
@@ -192,6 +207,8 @@ netxen_nic_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
break;
case NETXEN_BRDTYPE_P2_SB31_10G:
case NETXEN_BRDTYPE_P3_10G_SFP_PLUS:
+ case NETXEN_BRDTYPE_P3_10G_SFP_CT:
+ case NETXEN_BRDTYPE_P3_10G_SFP_QT:
case NETXEN_BRDTYPE_P3_10G_XFP:
ecmd->supported |= SUPPORTED_FIBRE;
ecmd->advertising |= ADVERTISED_FIBRE;
diff --git a/drivers/net/netxen/netxen_nic_hdr.h b/drivers/net/netxen/netxen_nic_hdr.h
index 3ce13e451aa..e8e8d73f6ed 100644
--- a/drivers/net/netxen/netxen_nic_hdr.h
+++ b/drivers/net/netxen/netxen_nic_hdr.h
@@ -724,6 +724,13 @@ enum {
#define XG_LINK_STATE_P3(pcifn,val) \
(((val) >> ((pcifn) * 4)) & XG_LINK_STATE_P3_MASK)
+#define P3_LINK_SPEED_MHZ 100
+#define P3_LINK_SPEED_MASK 0xff
+#define P3_LINK_SPEED_REG(pcifn) \
+ (CRB_PF_LINK_SPEED_1 + (((pcifn) / 4) * 4))
+#define P3_LINK_SPEED_VAL(pcifn, reg) \
+ (((reg) >> (8 * ((pcifn) & 0x3))) & P3_LINK_SPEED_MASK)
+
#define NETXEN_CAM_RAM_BASE (NETXEN_CRB_CAM + 0x02000)
#define NETXEN_CAM_RAM(reg) (NETXEN_CAM_RAM_BASE + (reg))
#define NETXEN_FW_VERSION_MAJOR (NETXEN_CAM_RAM(0x150))
@@ -836,9 +843,11 @@ enum {
#define PCIE_SETUP_FUNCTION (0x12040)
#define PCIE_SETUP_FUNCTION2 (0x12048)
+#define PCIE_MISCCFG_RC (0x1206c)
#define PCIE_TGT_SPLIT_CHICKEN (0x12080)
#define PCIE_CHICKEN3 (0x120c8)
+#define ISR_INT_STATE_REG (NETXEN_PCIX_PS_REG(PCIE_MISCCFG_RC))
#define PCIE_MAX_MASTER_SPLIT (0x14048)
#define NETXEN_PORT_MODE_NONE 0
@@ -854,6 +863,7 @@ enum {
#define NETXEN_CAM_RAM_DMA_WATCHDOG_CTRL (0x14)
#define ISR_MSI_INT_TRIGGER(FUNC) (NETXEN_PCIX_PS_REG(PCIX_MSI_F(FUNC)))
+#define ISR_LEGACY_INT_TRIGGERED(VAL) (((VAL) & 0x300) == 0x200)
/*
* PCI Interrupt Vector Values.
diff --git a/drivers/net/netxen/netxen_nic_hw.c b/drivers/net/netxen/netxen_nic_hw.c
index 96a3bc6426e..9aa20f96161 100644
--- a/drivers/net/netxen/netxen_nic_hw.c
+++ b/drivers/net/netxen/netxen_nic_hw.c
@@ -285,14 +285,7 @@ static unsigned crb_hub_agt[64] =
#define ADDR_IN_RANGE(addr, low, high) \
(((addr) <= (high)) && ((addr) >= (low)))
-#define NETXEN_MAX_MTU 8000 + NETXEN_ENET_HEADER_SIZE + NETXEN_ETH_FCS_SIZE
-#define NETXEN_MIN_MTU 64
-#define NETXEN_ETH_FCS_SIZE 4
-#define NETXEN_ENET_HEADER_SIZE 14
#define NETXEN_WINDOW_ONE 0x2000000 /*CRB Window: bit 25 of CRB address */
-#define NETXEN_FIRMWARE_LEN ((16 * 1024) / 4)
-#define NETXEN_NIU_HDRSIZE (0x1 << 6)
-#define NETXEN_NIU_TLRSIZE (0x1 << 5)
#define NETXEN_NIC_ZERO_PAUSE_ADDR 0ULL
#define NETXEN_NIC_UNIT_PAUSE_ADDR 0x200ULL
@@ -541,9 +534,6 @@ netxen_send_cmd_descs(struct netxen_adapter *adapter,
return 0;
}
-#define NIC_REQUEST 0x14
-#define NETXEN_MAC_EVENT 0x1
-
static int nx_p3_sre_macaddr_change(struct net_device *dev,
u8 *addr, unsigned op)
{
@@ -553,8 +543,8 @@ static int nx_p3_sre_macaddr_change(struct net_device *dev,
int rv;
memset(&req, 0, sizeof(nx_nic_req_t));
- req.qhdr |= (NIC_REQUEST << 23);
- req.req_hdr |= NETXEN_MAC_EVENT;
+ req.qhdr |= (NX_NIC_REQUEST << 23);
+ req.req_hdr |= NX_MAC_EVENT;
req.req_hdr |= ((u64)adapter->portnum << 16);
mac_req.op = op;
memcpy(&mac_req.mac_addr, addr, 6);
@@ -575,31 +565,35 @@ void netxen_p3_nic_set_multi(struct net_device *netdev)
nx_mac_list_t *cur, *next, *del_list, *add_list = NULL;
struct dev_mc_list *mc_ptr;
u8 bcast_addr[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
-
- adapter->set_promisc(adapter, NETXEN_NIU_PROMISC_MODE);
-
- /*
- * Programming mac addresses will automaticly enabling L2 filtering.
- * HW will replace timestamp with L2 conid when L2 filtering is
- * enabled. This causes problem for LSA. Do not enabling L2 filtering
- * until that problem is fixed.
- */
- if ((netdev->flags & IFF_PROMISC) ||
- (netdev->mc_count > adapter->max_mc_count))
- return;
+ u32 mode = VPORT_MISS_MODE_DROP;
del_list = adapter->mac_list;
adapter->mac_list = NULL;
nx_p3_nic_add_mac(adapter, netdev->dev_addr, &add_list, &del_list);
+ nx_p3_nic_add_mac(adapter, bcast_addr, &add_list, &del_list);
+
+ if (netdev->flags & IFF_PROMISC) {
+ mode = VPORT_MISS_MODE_ACCEPT_ALL;
+ goto send_fw_cmd;
+ }
+
+ if ((netdev->flags & IFF_ALLMULTI) ||
+ (netdev->mc_count > adapter->max_mc_count)) {
+ mode = VPORT_MISS_MODE_ACCEPT_MULTI;
+ goto send_fw_cmd;
+ }
+
if (netdev->mc_count > 0) {
- nx_p3_nic_add_mac(adapter, bcast_addr, &add_list, &del_list);
for (mc_ptr = netdev->mc_list; mc_ptr;
mc_ptr = mc_ptr->next) {
nx_p3_nic_add_mac(adapter, mc_ptr->dmi_addr,
&add_list, &del_list);
}
}
+
+send_fw_cmd:
+ adapter->set_promisc(adapter, mode);
for (cur = del_list; cur;) {
nx_p3_sre_macaddr_change(netdev, cur->mac_addr, NETXEN_MAC_DEL);
next = cur->next;
@@ -615,6 +609,21 @@ void netxen_p3_nic_set_multi(struct net_device *netdev)
}
}
+int netxen_p3_nic_set_promisc(struct netxen_adapter *adapter, u32 mode)
+{
+ nx_nic_req_t req;
+
+ memset(&req, 0, sizeof(nx_nic_req_t));
+
+ req.qhdr |= (NX_HOST_REQUEST << 23);
+ req.req_hdr |= NX_NIC_H2C_OPCODE_PROXY_SET_VPORT_MISS_MODE;
+ req.req_hdr |= ((u64)adapter->portnum << 16);
+ req.words[0] = cpu_to_le64(mode);
+
+ return netxen_send_cmd_descs(adapter,
+ (struct cmd_desc_type0 *)&req, 1);
+}
+
#define NETXEN_CONFIG_INTR_COALESCE 3
/*
@@ -627,7 +636,7 @@ int netxen_config_intr_coalesce(struct netxen_adapter *adapter)
memset(&req, 0, sizeof(nx_nic_req_t));
- req.qhdr |= (NIC_REQUEST << 23);
+ req.qhdr |= (NX_NIC_REQUEST << 23);
req.req_hdr |= NETXEN_CONFIG_INTR_COALESCE;
req.req_hdr |= ((u64)adapter->portnum << 16);
@@ -653,6 +662,7 @@ int netxen_nic_change_mtu(struct net_device *netdev, int mtu)
{
struct netxen_adapter *adapter = netdev_priv(netdev);
int max_mtu;
+ int rc = 0;
if (NX_IS_REVISION_P3(adapter->ahw.revision_id))
max_mtu = P3_MAX_MTU;
@@ -666,16 +676,12 @@ int netxen_nic_change_mtu(struct net_device *netdev, int mtu)
}
if (adapter->set_mtu)
- adapter->set_mtu(adapter, mtu);
- netdev->mtu = mtu;
+ rc = adapter->set_mtu(adapter, mtu);
- mtu += MTU_FUDGE_FACTOR;
- if (NX_IS_REVISION_P3(adapter->ahw.revision_id))
- nx_fw_cmd_set_mtu(adapter, mtu);
- else if (adapter->set_mtu)
- adapter->set_mtu(adapter, mtu);
+ if (!rc)
+ netdev->mtu = mtu;
- return 0;
+ return rc;
}
int netxen_is_flash_supported(struct netxen_adapter *adapter)
@@ -1411,7 +1417,8 @@ static int netxen_nic_pci_mem_read_direct(struct netxen_adapter *adapter,
(netxen_nic_pci_is_same_window(adapter, off+size-1) == 0)) {
write_unlock_irqrestore(&adapter->adapter_lock, flags);
printk(KERN_ERR "%s out of bound pci memory access. "
- "offset is 0x%llx\n", netxen_nic_driver_name, off);
+ "offset is 0x%llx\n", netxen_nic_driver_name,
+ (unsigned long long)off);
return -1;
}
@@ -1484,7 +1491,8 @@ netxen_nic_pci_mem_write_direct(struct netxen_adapter *adapter, u64 off,
(netxen_nic_pci_is_same_window(adapter, off+size-1) == 0)) {
write_unlock_irqrestore(&adapter->adapter_lock, flags);
printk(KERN_ERR "%s out of bound pci memory access. "
- "offset is 0x%llx\n", netxen_nic_driver_name, off);
+ "offset is 0x%llx\n", netxen_nic_driver_name,
+ (unsigned long long)off);
return -1;
}
@@ -2016,6 +2024,8 @@ int netxen_nic_get_board_info(struct netxen_adapter *adapter)
case NETXEN_BRDTYPE_P3_10G_CX4_LP:
case NETXEN_BRDTYPE_P3_IMEZ:
case NETXEN_BRDTYPE_P3_10G_SFP_PLUS:
+ case NETXEN_BRDTYPE_P3_10G_SFP_CT:
+ case NETXEN_BRDTYPE_P3_10G_SFP_QT:
case NETXEN_BRDTYPE_P3_10G_XFP:
case NETXEN_BRDTYPE_P3_10000_BASE_T:
@@ -2034,6 +2044,7 @@ int netxen_nic_get_board_info(struct netxen_adapter *adapter)
default:
printk("%s: Unknown(%x)\n", netxen_nic_driver_name,
boardinfo->board_type);
+ rv = -ENODEV;
break;
}
@@ -2044,6 +2055,7 @@ int netxen_nic_get_board_info(struct netxen_adapter *adapter)
int netxen_nic_set_mtu_gb(struct netxen_adapter *adapter, int new_mtu)
{
+ new_mtu += MTU_FUDGE_FACTOR;
netxen_nic_write_w0(adapter,
NETXEN_NIU_GB_MAX_FRAME_SIZE(adapter->physical_port),
new_mtu);
@@ -2052,7 +2064,7 @@ int netxen_nic_set_mtu_gb(struct netxen_adapter *adapter, int new_mtu)
int netxen_nic_set_mtu_xgb(struct netxen_adapter *adapter, int new_mtu)
{
- new_mtu += NETXEN_NIU_HDRSIZE + NETXEN_NIU_TLRSIZE;
+ new_mtu += MTU_FUDGE_FACTOR;
if (adapter->physical_port == 0)
netxen_nic_write_w0(adapter, NETXEN_NIU_XGE_MAX_FRAME_SIZE,
new_mtu);
@@ -2074,12 +2086,22 @@ void netxen_nic_set_link_parameters(struct netxen_adapter *adapter)
__u32 status;
__u32 autoneg;
__u32 mode;
+ __u32 port_mode;
netxen_nic_read_w0(adapter, NETXEN_NIU_MODE, &mode);
if (netxen_get_niu_enable_ge(mode)) { /* Gb 10/100/1000 Mbps mode */
+
+ adapter->hw_read_wx(adapter,
+ NETXEN_PORT_MODE_ADDR, &port_mode, 4);
+ if (port_mode == NETXEN_PORT_MODE_802_3_AP) {
+ adapter->link_speed = SPEED_1000;
+ adapter->link_duplex = DUPLEX_FULL;
+ adapter->link_autoneg = AUTONEG_DISABLE;
+ return;
+ }
+
if (adapter->phy_read
- && adapter->
- phy_read(adapter,
+ && adapter->phy_read(adapter,
NETXEN_NIU_GB_MII_MGMT_ADDR_PHY_STATUS,
&status) == 0) {
if (netxen_get_phy_link(status)) {
@@ -2109,8 +2131,7 @@ void netxen_nic_set_link_parameters(struct netxen_adapter *adapter)
break;
}
if (adapter->phy_read
- && adapter->
- phy_read(adapter,
+ && adapter->phy_read(adapter,
NETXEN_NIU_GB_MII_MGMT_ADDR_AUTONEG,
&autoneg) != 0)
adapter->link_autoneg = autoneg;
diff --git a/drivers/net/netxen/netxen_nic_hw.h b/drivers/net/netxen/netxen_nic_hw.h
index b8e0030f03d..aae737dc77a 100644
--- a/drivers/net/netxen/netxen_nic_hw.h
+++ b/drivers/net/netxen/netxen_nic_hw.h
@@ -419,12 +419,9 @@ typedef enum {
#define netxen_get_niu_enable_ge(config_word) \
_netxen_crb_get_bit(config_word, 1)
-/* Promiscous mode options (GbE mode only) */
-typedef enum {
- NETXEN_NIU_PROMISC_MODE = 0,
- NETXEN_NIU_NON_PROMISC_MODE,
- NETXEN_NIU_ALLMULTI_MODE
-} netxen_niu_prom_mode_t;
+#define NETXEN_NIU_NON_PROMISC_MODE 0
+#define NETXEN_NIU_PROMISC_MODE 1
+#define NETXEN_NIU_ALLMULTI_MODE 2
/*
* NIU GB Drop CRC Register
@@ -471,9 +468,9 @@ typedef enum {
/* Set promiscuous mode for a GbE interface */
int netxen_niu_set_promiscuous_mode(struct netxen_adapter *adapter,
- netxen_niu_prom_mode_t mode);
+ u32 mode);
int netxen_niu_xg_set_promiscuous_mode(struct netxen_adapter *adapter,
- netxen_niu_prom_mode_t mode);
+ u32 mode);
/* set the MAC address for a given MAC */
int netxen_niu_macaddr_set(struct netxen_adapter *adapter,
diff --git a/drivers/net/netxen/netxen_nic_init.c b/drivers/net/netxen/netxen_nic_init.c
index 01ab31b34a8..519fc860e17 100644
--- a/drivers/net/netxen/netxen_nic_init.c
+++ b/drivers/net/netxen/netxen_nic_init.c
@@ -364,6 +364,11 @@ void netxen_initialize_adapter_ops(struct netxen_adapter *adapter)
default:
break;
}
+
+ if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) {
+ adapter->set_mtu = nx_fw_cmd_set_mtu;
+ adapter->set_promisc = netxen_p3_nic_set_promisc;
+ }
}
/*
diff --git a/drivers/net/netxen/netxen_nic_main.c b/drivers/net/netxen/netxen_nic_main.c
index 91d209a8f6c..7615c715e66 100644
--- a/drivers/net/netxen/netxen_nic_main.c
+++ b/drivers/net/netxen/netxen_nic_main.c
@@ -166,7 +166,8 @@ static void netxen_nic_disable_int(struct netxen_adapter *adapter)
if (!NETXEN_IS_MSI_FAMILY(adapter)) {
do {
adapter->pci_write_immediate(adapter,
- ISR_INT_TARGET_STATUS, 0xffffffff);
+ adapter->legacy_intr.tgt_status_reg,
+ 0xffffffff);
mask = adapter->pci_read_immediate(adapter,
ISR_INT_VECTOR);
if (!(mask & 0x80))
@@ -175,7 +176,7 @@ static void netxen_nic_disable_int(struct netxen_adapter *adapter)
} while (--retries);
if (!retries) {
- printk(KERN_NOTICE "%s: Failed to disable interrupt completely\n",
+ printk(KERN_NOTICE "%s: Failed to disable interrupt\n",
netxen_nic_driver_name);
}
} else {
@@ -190,8 +191,6 @@ static void netxen_nic_enable_int(struct netxen_adapter *adapter)
{
u32 mask;
- DPRINTK(1, INFO, "Entered ISR Enable \n");
-
if (adapter->intr_scheme != -1 &&
adapter->intr_scheme != INTR_SCHEME_PERPORT) {
switch (adapter->ahw.board_type) {
@@ -213,16 +212,13 @@ static void netxen_nic_enable_int(struct netxen_adapter *adapter)
if (!NETXEN_IS_MSI_FAMILY(adapter)) {
mask = 0xbff;
- if (adapter->intr_scheme != -1 &&
- adapter->intr_scheme != INTR_SCHEME_PERPORT) {
+ if (adapter->intr_scheme == INTR_SCHEME_PERPORT)
+ adapter->pci_write_immediate(adapter,
+ adapter->legacy_intr.tgt_mask_reg, mask);
+ else
adapter->pci_write_normalize(adapter,
CRB_INT_VECTOR, 0);
- }
- adapter->pci_write_immediate(adapter,
- ISR_INT_TARGET_MASK, mask);
}
-
- DPRINTK(1, INFO, "Done with enable Int\n");
}
static int nx_set_dma_mask(struct netxen_adapter *adapter, uint8_t revision_id)
@@ -284,6 +280,8 @@ static void netxen_check_options(struct netxen_adapter *adapter)
case NETXEN_BRDTYPE_P3_10G_CX4_LP:
case NETXEN_BRDTYPE_P3_IMEZ:
case NETXEN_BRDTYPE_P3_10G_SFP_PLUS:
+ case NETXEN_BRDTYPE_P3_10G_SFP_QT:
+ case NETXEN_BRDTYPE_P3_10G_SFP_CT:
case NETXEN_BRDTYPE_P3_10G_XFP:
case NETXEN_BRDTYPE_P3_10000_BASE_T:
adapter->msix_supported = !!use_msi_x;
@@ -301,6 +299,10 @@ static void netxen_check_options(struct netxen_adapter *adapter)
case NETXEN_BRDTYPE_P3_REF_QG:
case NETXEN_BRDTYPE_P3_4_GB:
case NETXEN_BRDTYPE_P3_4_GB_MM:
+ adapter->msix_supported = 0;
+ adapter->max_rx_desc_count = MAX_RCV_DESCRIPTORS_10G;
+ break;
+
case NETXEN_BRDTYPE_P2_SB35_4G:
case NETXEN_BRDTYPE_P2_SB31_2G:
adapter->msix_supported = 0;
@@ -700,13 +702,10 @@ netxen_nic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
adapter->status &= ~NETXEN_NETDEV_STATUS;
adapter->rx_csum = 1;
adapter->mc_enabled = 0;
- if (NX_IS_REVISION_P3(revision_id)) {
+ if (NX_IS_REVISION_P3(revision_id))
adapter->max_mc_count = 38;
- adapter->max_rds_rings = 2;
- } else {
+ else
adapter->max_mc_count = 16;
- adapter->max_rds_rings = 3;
- }
netdev->open = netxen_nic_open;
netdev->stop = netxen_nic_close;
@@ -779,10 +778,6 @@ netxen_nic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (adapter->portnum == 0)
first_driver = 1;
}
- adapter->crb_addr_cmd_producer = crb_cmd_producer[adapter->portnum];
- adapter->crb_addr_cmd_consumer = crb_cmd_consumer[adapter->portnum];
- netxen_nic_update_cmd_producer(adapter, 0);
- netxen_nic_update_cmd_consumer(adapter, 0);
if (first_driver) {
first_boot = adapter->pci_read_normalize(adapter,
@@ -1053,6 +1048,11 @@ static int netxen_nic_open(struct net_device *netdev)
return -EIO;
}
+ if (adapter->fw_major < 4)
+ adapter->max_rds_rings = 3;
+ else
+ adapter->max_rds_rings = 2;
+
err = netxen_alloc_sw_resources(adapter);
if (err) {
printk(KERN_ERR "%s: Error in setting sw resources\n",
@@ -1074,10 +1074,10 @@ static int netxen_nic_open(struct net_device *netdev)
crb_cmd_producer[adapter->portnum];
adapter->crb_addr_cmd_consumer =
crb_cmd_consumer[adapter->portnum];
- }
- netxen_nic_update_cmd_producer(adapter, 0);
- netxen_nic_update_cmd_consumer(adapter, 0);
+ netxen_nic_update_cmd_producer(adapter, 0);
+ netxen_nic_update_cmd_consumer(adapter, 0);
+ }
for (ctx = 0; ctx < MAX_RCV_CTX; ++ctx) {
for (ring = 0; ring < adapter->max_rds_rings; ring++)
@@ -1113,9 +1113,7 @@ static int netxen_nic_open(struct net_device *netdev)
netxen_nic_set_link_parameters(adapter);
netdev->set_multicast_list(netdev);
- if (NX_IS_REVISION_P3(adapter->ahw.revision_id))
- nx_fw_cmd_set_mtu(adapter, netdev->mtu);
- else
+ if (adapter->set_mtu)
adapter->set_mtu(adapter, netdev->mtu);
mod_timer(&adapter->watchdog_timer, jiffies);
@@ -1410,20 +1408,17 @@ static void netxen_nic_handle_phy_intr(struct netxen_adapter *adapter)
port = adapter->physical_port;
- if (adapter->ahw.board_type == NETXEN_NIC_GBE) {
- val = adapter->pci_read_normalize(adapter, CRB_XG_STATE);
- linkup = (val >> port) & 1;
+ if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) {
+ val = adapter->pci_read_normalize(adapter, CRB_XG_STATE_P3);
+ val = XG_LINK_STATE_P3(adapter->ahw.pci_func, val);
+ linkup = (val == XG_LINK_UP_P3);
} else {
- if (adapter->fw_major < 4) {
- val = adapter->pci_read_normalize(adapter,
- CRB_XG_STATE);
+ val = adapter->pci_read_normalize(adapter, CRB_XG_STATE);
+ if (adapter->ahw.board_type == NETXEN_NIC_GBE)
+ linkup = (val >> port) & 1;
+ else {
val = (val >> port*8) & 0xff;
linkup = (val == XG_LINK_UP);
- } else {
- val = adapter->pci_read_normalize(adapter,
- CRB_XG_STATE_P3);
- val = XG_LINK_STATE_P3(adapter->ahw.pci_func, val);
- linkup = (val == XG_LINK_UP_P3);
}
}
@@ -1535,15 +1530,33 @@ static irqreturn_t netxen_intr(int irq, void *data)
struct netxen_adapter *adapter = data;
u32 our_int = 0;
- our_int = adapter->pci_read_normalize(adapter, CRB_INT_VECTOR);
- /* not our interrupt */
- if ((our_int & (0x80 << adapter->portnum)) == 0)
+ u32 status = 0;
+
+ status = adapter->pci_read_immediate(adapter, ISR_INT_VECTOR);
+
+ if (!(status & adapter->legacy_intr.int_vec_bit))
return IRQ_NONE;
- if (adapter->intr_scheme == INTR_SCHEME_PERPORT) {
- /* claim interrupt */
- adapter->pci_write_normalize(adapter, CRB_INT_VECTOR,
+ if (adapter->ahw.revision_id >= NX_P3_B1) {
+ /* check interrupt state machine, to be sure */
+ status = adapter->pci_read_immediate(adapter,
+ ISR_INT_STATE_REG);
+ if (!ISR_LEGACY_INT_TRIGGERED(status))
+ return IRQ_NONE;
+
+ } else if (NX_IS_REVISION_P2(adapter->ahw.revision_id)) {
+
+ our_int = adapter->pci_read_normalize(adapter, CRB_INT_VECTOR);
+ /* not our interrupt */
+ if ((our_int & (0x80 << adapter->portnum)) == 0)
+ return IRQ_NONE;
+
+ if (adapter->intr_scheme == INTR_SCHEME_PERPORT) {
+ /* claim interrupt */
+ adapter->pci_write_normalize(adapter,
+ CRB_INT_VECTOR,
our_int & ~((u32)(0x80 << adapter->portnum)));
+ }
}
netxen_handle_int(adapter);
diff --git a/drivers/net/netxen/netxen_nic_niu.c b/drivers/net/netxen/netxen_nic_niu.c
index 4cb8f4a1cf4..27f07f6a45b 100644
--- a/drivers/net/netxen/netxen_nic_niu.c
+++ b/drivers/net/netxen/netxen_nic_niu.c
@@ -610,6 +610,9 @@ int netxen_niu_macaddr_set(struct netxen_adapter *adapter,
int i;
DECLARE_MAC_BUF(mac);
+ if (NX_IS_REVISION_P3(adapter->ahw.revision_id))
+ return 0;
+
for (i = 0; i < 10; i++) {
temp[0] = temp[1] = 0;
memcpy(temp + 2, addr, 2);
@@ -727,6 +730,9 @@ int netxen_niu_disable_gbe_port(struct netxen_adapter *adapter)
__u32 mac_cfg0;
u32 port = adapter->physical_port;
+ if (NX_IS_REVISION_P3(adapter->ahw.revision_id))
+ return 0;
+
if (port > NETXEN_NIU_MAX_GBE_PORTS)
return -EINVAL;
mac_cfg0 = 0;
@@ -743,6 +749,9 @@ int netxen_niu_disable_xg_port(struct netxen_adapter *adapter)
__u32 mac_cfg;
u32 port = adapter->physical_port;
+ if (NX_IS_REVISION_P3(adapter->ahw.revision_id))
+ return 0;
+
if (port > NETXEN_NIU_MAX_XG_PORTS)
return -EINVAL;
@@ -755,7 +764,7 @@ int netxen_niu_disable_xg_port(struct netxen_adapter *adapter)
/* Set promiscuous mode for a GbE interface */
int netxen_niu_set_promiscuous_mode(struct netxen_adapter *adapter,
- netxen_niu_prom_mode_t mode)
+ u32 mode)
{
__u32 reg;
u32 port = adapter->physical_port;
@@ -819,6 +828,9 @@ int netxen_niu_xg_macaddr_set(struct netxen_adapter *adapter,
u8 temp[4];
u32 val;
+ if (NX_IS_REVISION_P3(adapter->ahw.revision_id))
+ return 0;
+
if ((phy < 0) || (phy > NETXEN_NIU_MAX_XG_PORTS))
return -EIO;
@@ -894,7 +906,7 @@ int netxen_niu_xg_macaddr_get(struct netxen_adapter *adapter,
#endif /* 0 */
int netxen_niu_xg_set_promiscuous_mode(struct netxen_adapter *adapter,
- netxen_niu_prom_mode_t mode)
+ u32 mode)
{
__u32 reg;
u32 port = adapter->physical_port;
diff --git a/drivers/net/netxen/netxen_nic_phan_reg.h b/drivers/net/netxen/netxen_nic_phan_reg.h
index 3bfa51b62a4..83e5ee57bfe 100644
--- a/drivers/net/netxen/netxen_nic_phan_reg.h
+++ b/drivers/net/netxen/netxen_nic_phan_reg.h
@@ -95,8 +95,8 @@
#define CRB_HOST_STS_PROD NETXEN_NIC_REG(0xdc)
#define CRB_HOST_STS_CONS NETXEN_NIC_REG(0xe0)
#define CRB_PEG_CMD_PROD NETXEN_NIC_REG(0xe4)
-#define CRB_PEG_CMD_CONS NETXEN_NIC_REG(0xe8)
-#define CRB_HOST_BUFFER_PROD NETXEN_NIC_REG(0xec)
+#define CRB_PF_LINK_SPEED_1 NETXEN_NIC_REG(0xe8)
+#define CRB_PF_LINK_SPEED_2 NETXEN_NIC_REG(0xec)
#define CRB_HOST_BUFFER_CONS NETXEN_NIC_REG(0xf0)
#define CRB_JUMBO_BUFFER_PROD NETXEN_NIC_REG(0xf4)
#define CRB_JUMBO_BUFFER_CONS NETXEN_NIC_REG(0xf8)
diff --git a/drivers/net/ni5010.c b/drivers/net/ni5010.c
index a20005c09e0..8e0ca9f4e40 100644
--- a/drivers/net/ni5010.c
+++ b/drivers/net/ni5010.c
@@ -648,7 +648,6 @@ static void ni5010_set_multicast_list(struct net_device *dev)
PRINTK2((KERN_DEBUG "%s: entering set_multicast_list\n", dev->name));
if (dev->flags&IFF_PROMISC || dev->flags&IFF_ALLMULTI || dev->mc_list) {
- dev->flags |= IFF_PROMISC;
outb(RMD_PROMISC, EDLC_RMODE); /* Enable promiscuous mode */
PRINTK((KERN_DEBUG "%s: Entering promiscuous mode\n", dev->name));
} else {
diff --git a/drivers/net/ni52.c b/drivers/net/ni52.c
index a316dcc8a06..b9a882d362d 100644
--- a/drivers/net/ni52.c
+++ b/drivers/net/ni52.c
@@ -621,7 +621,7 @@ static int init586(struct net_device *dev)
if (num_addrs > len) {
printk(KERN_ERR "%s: switching to promisc. mode\n",
dev->name);
- dev->flags |= IFF_PROMISC;
+ writeb(0x01, &cfg_cmd->promisc);
}
}
if (dev->flags & IFF_PROMISC)
diff --git a/drivers/net/qla3xxx.c b/drivers/net/qla3xxx.c
index e82b37bbd6c..3cdd07c45b6 100644
--- a/drivers/net/qla3xxx.c
+++ b/drivers/net/qla3xxx.c
@@ -38,7 +38,7 @@
#define DRV_NAME "qla3xxx"
#define DRV_STRING "QLogic ISP3XXX Network Driver"
-#define DRV_VERSION "v2.03.00-k4"
+#define DRV_VERSION "v2.03.00-k5"
#define PFX DRV_NAME " "
static const char ql3xxx_driver_name[] = DRV_NAME;
@@ -3495,8 +3495,6 @@ static void ql_set_mac_info(struct ql3_adapter *qdev)
case ISP_CONTROL_FN0_NET:
qdev->mac_index = 0;
qdev->mac_ob_opcode = OUTBOUND_MAC_IOCB | func_number;
- qdev->tcp_ob_opcode = OUTBOUND_TCP_IOCB | func_number;
- qdev->update_ob_opcode = UPDATE_NCB_IOCB | func_number;
qdev->mb_bit_mask = FN0_MA_BITS_MASK;
qdev->PHYAddr = PORT0_PHY_ADDRESS;
if (port_status & PORT_STATUS_SM0)
@@ -3508,8 +3506,6 @@ static void ql_set_mac_info(struct ql3_adapter *qdev)
case ISP_CONTROL_FN1_NET:
qdev->mac_index = 1;
qdev->mac_ob_opcode = OUTBOUND_MAC_IOCB | func_number;
- qdev->tcp_ob_opcode = OUTBOUND_TCP_IOCB | func_number;
- qdev->update_ob_opcode = UPDATE_NCB_IOCB | func_number;
qdev->mb_bit_mask = FN1_MA_BITS_MASK;
qdev->PHYAddr = PORT1_PHY_ADDRESS;
if (port_status & PORT_STATUS_SM1)
@@ -3730,14 +3726,6 @@ static int ql3xxx_open(struct net_device *ndev)
return (ql_adapter_up(qdev));
}
-static void ql3xxx_set_multicast_list(struct net_device *ndev)
-{
- /*
- * We are manually parsing the list in the net_device structure.
- */
- return;
-}
-
static int ql3xxx_set_mac_address(struct net_device *ndev, void *p)
{
struct ql3_adapter *qdev = (struct ql3_adapter *)netdev_priv(ndev);
@@ -4007,7 +3995,11 @@ static int __devinit ql3xxx_probe(struct pci_dev *pdev,
ndev->open = ql3xxx_open;
ndev->hard_start_xmit = ql3xxx_send;
ndev->stop = ql3xxx_close;
- ndev->set_multicast_list = ql3xxx_set_multicast_list;
+ /* ndev->set_multicast_list
+ * This device is one side of a two-function adapter
+ * (NIC and iSCSI). Promiscuous mode setting/clearing is
+ * not allowed from the NIC side.
+ */
SET_ETHTOOL_OPS(ndev, &ql3xxx_ethtool_ops);
ndev->set_mac_address = ql3xxx_set_mac_address;
ndev->tx_timeout = ql3xxx_tx_timeout;
@@ -4040,9 +4032,6 @@ static int __devinit ql3xxx_probe(struct pci_dev *pdev,
ndev->tx_queue_len = NUM_REQ_Q_ENTRIES;
- /* Turn off support for multicasting */
- ndev->flags &= ~IFF_MULTICAST;
-
/* Record PCI bus information. */
ql_get_board_info(qdev);
diff --git a/drivers/net/qla3xxx.h b/drivers/net/qla3xxx.h
index 58a086fddec..7113e71b15a 100644
--- a/drivers/net/qla3xxx.h
+++ b/drivers/net/qla3xxx.h
@@ -14,24 +14,14 @@
#define OPCODE_OB_MAC_IOCB_FN0 0x01
#define OPCODE_OB_MAC_IOCB_FN2 0x21
-#define OPCODE_OB_TCP_IOCB_FN0 0x03
-#define OPCODE_OB_TCP_IOCB_FN2 0x23
-#define OPCODE_UPDATE_NCB_IOCB_FN0 0x00
-#define OPCODE_UPDATE_NCB_IOCB_FN2 0x20
-#define OPCODE_UPDATE_NCB_IOCB 0xF0
#define OPCODE_IB_MAC_IOCB 0xF9
#define OPCODE_IB_3032_MAC_IOCB 0x09
#define OPCODE_IB_IP_IOCB 0xFA
#define OPCODE_IB_3032_IP_IOCB 0x0A
-#define OPCODE_IB_TCP_IOCB 0xFB
-#define OPCODE_DUMP_PROTO_IOCB 0xFE
-#define OPCODE_BUFFER_ALERT_IOCB 0xFB
#define OPCODE_FUNC_ID_MASK 0x30
#define OUTBOUND_MAC_IOCB 0x01 /* plus function bits */
-#define OUTBOUND_TCP_IOCB 0x03 /* plus function bits */
-#define UPDATE_NCB_IOCB 0x00 /* plus function bits */
#define FN0_MA_BITS_MASK 0x00
#define FN1_MA_BITS_MASK 0x80
@@ -159,75 +149,6 @@ struct ob_ip_iocb_rsp {
__le32 reserved2;
};
-struct ob_tcp_iocb_req {
- u8 opcode;
-
- u8 flags0;
-#define OB_TCP_IOCB_REQ_P 0x80
-#define OB_TCP_IOCB_REQ_CI 0x20
-#define OB_TCP_IOCB_REQ_H 0x10
-#define OB_TCP_IOCB_REQ_LN 0x08
-#define OB_TCP_IOCB_REQ_K 0x04
-#define OB_TCP_IOCB_REQ_D 0x02
-#define OB_TCP_IOCB_REQ_I 0x01
-
- u8 flags1;
-#define OB_TCP_IOCB_REQ_OSM 0x40
-#define OB_TCP_IOCB_REQ_URG 0x20
-#define OB_TCP_IOCB_REQ_ACK 0x10
-#define OB_TCP_IOCB_REQ_PSH 0x08
-#define OB_TCP_IOCB_REQ_RST 0x04
-#define OB_TCP_IOCB_REQ_SYN 0x02
-#define OB_TCP_IOCB_REQ_FIN 0x01
-
- u8 options_len;
-#define OB_TCP_IOCB_REQ_OMASK 0xF0
-#define OB_TCP_IOCB_REQ_SHIFT 4
-
- __le32 transaction_id;
- __le32 data_len;
- __le32 hncb_ptr_low;
- __le32 hncb_ptr_high;
- __le32 buf_addr0_low;
- __le32 buf_addr0_high;
- __le32 buf_0_len;
- __le32 buf_addr1_low;
- __le32 buf_addr1_high;
- __le32 buf_1_len;
- __le32 buf_addr2_low;
- __le32 buf_addr2_high;
- __le32 buf_2_len;
- __le32 time_stamp;
- __le32 reserved1;
-};
-
-struct ob_tcp_iocb_rsp {
- u8 opcode;
-
- u8 flags0;
-#define OB_TCP_IOCB_RSP_C 0x20
-#define OB_TCP_IOCB_RSP_H 0x10
-#define OB_TCP_IOCB_RSP_LN 0x08
-#define OB_TCP_IOCB_RSP_K 0x04
-#define OB_TCP_IOCB_RSP_D 0x02
-#define OB_TCP_IOCB_RSP_I 0x01
-
- u8 flags1;
-#define OB_TCP_IOCB_RSP_E 0x10
-#define OB_TCP_IOCB_RSP_W 0x08
-#define OB_TCP_IOCB_RSP_P 0x04
-#define OB_TCP_IOCB_RSP_T 0x02
-#define OB_TCP_IOCB_RSP_F 0x01
-
- u8 state;
-#define OB_TCP_IOCB_RSP_SMASK 0xF0
-#define OB_TCP_IOCB_RSP_SHIFT 4
-
- __le32 transaction_id;
- __le32 local_ncb_ptr;
- __le32 reserved0;
-};
-
struct ib_ip_iocb_rsp {
u8 opcode;
#define IB_IP_IOCB_RSP_3032_V 0x80
@@ -256,25 +177,6 @@ struct ib_ip_iocb_rsp {
__le32 ial_high;
};
-struct ib_tcp_iocb_rsp {
- u8 opcode;
- u8 flags;
-#define IB_TCP_IOCB_RSP_P 0x80
-#define IB_TCP_IOCB_RSP_T 0x40
-#define IB_TCP_IOCB_RSP_D 0x20
-#define IB_TCP_IOCB_RSP_N 0x10
-#define IB_TCP_IOCB_RSP_IP 0x03
-#define IB_TCP_FLAG_MASK 0xf0
-#define IB_TCP_FLAG_IOCB_SYN 0x00
-
-#define TCP_IB_RSP_FLAGS(x) (x->flags & ~IB_TCP_FLAG_MASK)
-
- __le16 length;
- __le32 hncb_ref_num;
- __le32 ial_low;
- __le32 ial_high;
-};
-
struct net_rsp_iocb {
u8 opcode;
u8 flags;
@@ -1266,20 +1168,13 @@ struct ql3_adapter {
u32 small_buf_release_cnt;
u32 small_buf_total_size;
- /* ISR related, saves status for DPC. */
- u32 control_status;
-
struct eeprom_data nvram_data;
- struct timer_list ioctl_timer;
u32 port_link_state;
- u32 last_rsp_offset;
/* 4022 specific */
u32 mac_index; /* Driver's MAC number can be 0 or 1 for first and second networking functions respectively */
u32 PHYAddr; /* Address of PHY 0x1e00 Port 0 and 0x1f00 Port 1 */
u32 mac_ob_opcode; /* Opcode to use on mac transmission */
- u32 tcp_ob_opcode; /* Opcode to use on tcp transmission */
- u32 update_ob_opcode; /* Opcode to use for updating NCB */
u32 mb_bit_mask; /* MA Bits mask to use on transmission */
u32 numPorts;
struct workqueue_struct *workqueue;
diff --git a/drivers/net/s2io.c b/drivers/net/s2io.c
index 86d77d05190..a2b073097e5 100644
--- a/drivers/net/s2io.c
+++ b/drivers/net/s2io.c
@@ -3143,7 +3143,7 @@ static void tx_intr_handler(struct fifo_info *fifo_data)
pkt_cnt++;
/* Updating the statistics block */
- nic->stats.tx_bytes += skb->len;
+ nic->dev->stats.tx_bytes += skb->len;
nic->mac_control.stats_info->sw_stat.mem_freed += skb->truesize;
dev_kfree_skb_irq(skb);
@@ -4896,25 +4896,42 @@ static struct net_device_stats *s2io_get_stats(struct net_device *dev)
/* Configure Stats for immediate updt */
s2io_updt_stats(sp);
+ /* Using sp->stats as a staging area, because reset (due to mtu
+ change, for example) will clear some hardware counters */
+ dev->stats.tx_packets +=
+ le32_to_cpu(mac_control->stats_info->tmac_frms) -
+ sp->stats.tx_packets;
sp->stats.tx_packets =
le32_to_cpu(mac_control->stats_info->tmac_frms);
+ dev->stats.tx_errors +=
+ le32_to_cpu(mac_control->stats_info->tmac_any_err_frms) -
+ sp->stats.tx_errors;
sp->stats.tx_errors =
le32_to_cpu(mac_control->stats_info->tmac_any_err_frms);
+ dev->stats.rx_errors +=
+ le64_to_cpu(mac_control->stats_info->rmac_drop_frms) -
+ sp->stats.rx_errors;
sp->stats.rx_errors =
le64_to_cpu(mac_control->stats_info->rmac_drop_frms);
+ dev->stats.multicast =
+ le32_to_cpu(mac_control->stats_info->rmac_vld_mcst_frms) -
+ sp->stats.multicast;
sp->stats.multicast =
le32_to_cpu(mac_control->stats_info->rmac_vld_mcst_frms);
+ dev->stats.rx_length_errors =
+ le64_to_cpu(mac_control->stats_info->rmac_long_frms) -
+ sp->stats.rx_length_errors;
sp->stats.rx_length_errors =
le64_to_cpu(mac_control->stats_info->rmac_long_frms);
/* collect per-ring rx_packets and rx_bytes */
- sp->stats.rx_packets = sp->stats.rx_bytes = 0;
+ dev->stats.rx_packets = dev->stats.rx_bytes = 0;
for (i = 0; i < config->rx_ring_num; i++) {
- sp->stats.rx_packets += mac_control->rings[i].rx_packets;
- sp->stats.rx_bytes += mac_control->rings[i].rx_bytes;
+ dev->stats.rx_packets += mac_control->rings[i].rx_packets;
+ dev->stats.rx_bytes += mac_control->rings[i].rx_bytes;
}
- return (&sp->stats);
+ return (&dev->stats);
}
/**
@@ -7419,7 +7436,7 @@ static int rx_osm_handler(struct ring_info *ring_data, struct RxD_t * rxdp)
if (err_mask != 0x5) {
DBG_PRINT(ERR_DBG, "%s: Rx error Value: 0x%x\n",
dev->name, err_mask);
- sp->stats.rx_crc_errors++;
+ dev->stats.rx_crc_errors++;
sp->mac_control.stats_info->sw_stat.mem_freed
+= skb->truesize;
dev_kfree_skb(skb);
diff --git a/drivers/net/sh_eth.c b/drivers/net/sh_eth.c
index c69ba1395fa..25e62cf58d3 100644
--- a/drivers/net/sh_eth.c
+++ b/drivers/net/sh_eth.c
@@ -1,7 +1,7 @@
/*
* SuperH Ethernet device driver
*
- * Copyright (C) 2006,2007 Nobuhiro Iwamatsu
+ * Copyright (C) 2006-2008 Nobuhiro Iwamatsu
* Copyright (C) 2008 Renesas Solutions Corp.
*
* This program is free software; you can redistribute it and/or modify it
@@ -34,6 +34,29 @@
#include "sh_eth.h"
+/* CPU <-> EDMAC endian convert */
+static inline __u32 cpu_to_edmac(struct sh_eth_private *mdp, u32 x)
+{
+ switch (mdp->edmac_endian) {
+ case EDMAC_LITTLE_ENDIAN:
+ return cpu_to_le32(x);
+ case EDMAC_BIG_ENDIAN:
+ return cpu_to_be32(x);
+ }
+ return x;
+}
+
+static inline __u32 edmac_to_cpu(struct sh_eth_private *mdp, u32 x)
+{
+ switch (mdp->edmac_endian) {
+ case EDMAC_LITTLE_ENDIAN:
+ return le32_to_cpu(x);
+ case EDMAC_BIG_ENDIAN:
+ return be32_to_cpu(x);
+ }
+ return x;
+}
+
/*
* Program the hardware MAC address from dev->dev_addr.
*/
@@ -143,13 +166,39 @@ static struct mdiobb_ops bb_ops = {
.get_mdio_data = sh_get_mdio,
};
+/* Chip Reset */
static void sh_eth_reset(struct net_device *ndev)
{
u32 ioaddr = ndev->base_addr;
+#if defined(CONFIG_CPU_SUBTYPE_SH7763)
+ int cnt = 100;
+
+ ctrl_outl(EDSR_ENALL, ioaddr + EDSR);
+ ctrl_outl(ctrl_inl(ioaddr + EDMR) | EDMR_SRST, ioaddr + EDMR);
+ while (cnt > 0) {
+ if (!(ctrl_inl(ioaddr + EDMR) & 0x3))
+ break;
+ mdelay(1);
+ cnt--;
+ }
+ if (cnt < 0)
+ printk(KERN_ERR "Device reset fail\n");
+
+ /* Table Init */
+ ctrl_outl(0x0, ioaddr + TDLAR);
+ ctrl_outl(0x0, ioaddr + TDFAR);
+ ctrl_outl(0x0, ioaddr + TDFXR);
+ ctrl_outl(0x0, ioaddr + TDFFR);
+ ctrl_outl(0x0, ioaddr + RDLAR);
+ ctrl_outl(0x0, ioaddr + RDFAR);
+ ctrl_outl(0x0, ioaddr + RDFXR);
+ ctrl_outl(0x0, ioaddr + RDFFR);
+#else
ctrl_outl(ctrl_inl(ioaddr + EDMR) | EDMR_SRST, ioaddr + EDMR);
mdelay(3);
ctrl_outl(ctrl_inl(ioaddr + EDMR) & ~EDMR_SRST, ioaddr + EDMR);
+#endif
}
/* free skb and descriptor buffer */
@@ -180,6 +229,7 @@ static void sh_eth_ring_free(struct net_device *ndev)
/* format skb and descriptor buffer */
static void sh_eth_ring_format(struct net_device *ndev)
{
+ u32 ioaddr = ndev->base_addr, reserve = 0;
struct sh_eth_private *mdp = netdev_priv(ndev);
int i;
struct sk_buff *skb;
@@ -201,22 +251,41 @@ static void sh_eth_ring_format(struct net_device *ndev)
mdp->rx_skbuff[i] = skb;
if (skb == NULL)
break;
- skb->dev = ndev; /* Mark as being used by this device. */
+ skb->dev = ndev; /* Mark as being used by this device. */
+#if defined(CONFIG_CPU_SUBTYPE_SH7763)
+ reserve = SH7763_SKB_ALIGN
+ - ((uint32_t)skb->data & (SH7763_SKB_ALIGN-1));
+ if (reserve)
+ skb_reserve(skb, reserve);
+#else
skb_reserve(skb, RX_OFFSET);
-
+#endif
/* RX descriptor */
rxdesc = &mdp->rx_ring[i];
rxdesc->addr = (u32)skb->data & ~0x3UL;
- rxdesc->status = cpu_to_le32(RD_RACT | RD_RFP);
+ rxdesc->status = cpu_to_edmac(mdp, RD_RACT | RD_RFP);
/* The size of the buffer is 16 byte boundary. */
rxdesc->buffer_length = (mdp->rx_buf_sz + 16) & ~0x0F;
+ /* Rx descriptor address set */
+ if (i == 0) {
+ ctrl_outl((u32)rxdesc, ioaddr + RDLAR);
+#if defined(CONFIG_CPU_SUBTYPE_SH7763)
+ ctrl_outl((u32)rxdesc, ioaddr + RDFAR);
+#endif
+ }
}
+ /* Rx descriptor address set */
+#if defined(CONFIG_CPU_SUBTYPE_SH7763)
+ ctrl_outl((u32)rxdesc, ioaddr + RDFXR);
+ ctrl_outl(0x1, ioaddr + RDFFR);
+#endif
+
mdp->dirty_rx = (u32) (i - RX_RING_SIZE);
/* Mark the last entry as wrapping the ring. */
- rxdesc->status |= cpu_to_le32(RC_RDEL);
+ rxdesc->status |= cpu_to_edmac(mdp, RD_RDEL);
memset(mdp->tx_ring, 0, tx_ringsize);
@@ -224,11 +293,24 @@ static void sh_eth_ring_format(struct net_device *ndev)
for (i = 0; i < TX_RING_SIZE; i++) {
mdp->tx_skbuff[i] = NULL;
txdesc = &mdp->tx_ring[i];
- txdesc->status = cpu_to_le32(TD_TFP);
+ txdesc->status = cpu_to_edmac(mdp, TD_TFP);
txdesc->buffer_length = 0;
+ if (i == 0) {
+ /* Tx descriptor address set */
+ ctrl_outl((u32)txdesc, ioaddr + TDLAR);
+#if defined(CONFIG_CPU_SUBTYPE_SH7763)
+ ctrl_outl((u32)txdesc, ioaddr + TDFAR);
+#endif
+ }
}
- txdesc->status |= cpu_to_le32(TD_TDLE);
+ /* Tx descriptor address set */
+#if defined(CONFIG_CPU_SUBTYPE_SH7763)
+ ctrl_outl((u32)txdesc, ioaddr + TDFXR);
+ ctrl_outl(0x1, ioaddr + TDFFR);
+#endif
+
+ txdesc->status |= cpu_to_edmac(mdp, TD_TDLE);
}
/* Get skb and descriptor buffer */
@@ -311,31 +393,43 @@ static int sh_eth_dev_init(struct net_device *ndev)
/* Soft Reset */
sh_eth_reset(ndev);
- ctrl_outl(RPADIR_PADS1, ioaddr + RPADIR); /* SH7712-DMA-RX-PAD2 */
+ /* Descriptor format */
+ sh_eth_ring_format(ndev);
+ ctrl_outl(RPADIR_INIT, ioaddr + RPADIR);
/* all sh_eth int mask */
ctrl_outl(0, ioaddr + EESIPR);
- /* FIFO size set */
+#if defined(CONFIG_CPU_SUBTYPE_SH7763)
+ ctrl_outl(EDMR_EL, ioaddr + EDMR);
+#else
ctrl_outl(0, ioaddr + EDMR); /* Endian change */
+#endif
+ /* FIFO size set */
ctrl_outl((FIFO_SIZE_T | FIFO_SIZE_R), ioaddr + FDR);
ctrl_outl(0, ioaddr + TFTR);
+ /* Frame recv control */
ctrl_outl(0, ioaddr + RMCR);
rx_int_var = mdp->rx_int_var = DESC_I_RINT8 | DESC_I_RINT5;
tx_int_var = mdp->tx_int_var = DESC_I_TINT2;
ctrl_outl(rx_int_var | tx_int_var, ioaddr + TRSCER);
+#if defined(CONFIG_CPU_SUBTYPE_SH7763)
+ /* Burst sycle set */
+ ctrl_outl(0x800, ioaddr + BCULR);
+#endif
+
ctrl_outl((FIFO_F_D_RFF | FIFO_F_D_RFD), ioaddr + FCFTR);
- ctrl_outl(0, ioaddr + TRIMD);
- /* Descriptor format */
- sh_eth_ring_format(ndev);
+#if !defined(CONFIG_CPU_SUBTYPE_SH7763)
+ ctrl_outl(0, ioaddr + TRIMD);
+#endif
- ctrl_outl((u32)mdp->rx_ring, ioaddr + RDLAR);
- ctrl_outl((u32)mdp->tx_ring, ioaddr + TDLAR);
+ /* Recv frame limit set register */
+ ctrl_outl(RFLR_VALUE, ioaddr + RFLR);
ctrl_outl(ctrl_inl(ioaddr + EESR), ioaddr + EESR);
ctrl_outl((DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff), ioaddr + EESIPR);
@@ -345,21 +439,26 @@ static int sh_eth_dev_init(struct net_device *ndev)
ECMR_ZPF | (mdp->duplex ? ECMR_DM : 0) | ECMR_TE | ECMR_RE;
ctrl_outl(val, ioaddr + ECMR);
- ctrl_outl(ECSR_BRCRX | ECSR_PSRTO | ECSR_LCHNG | ECSR_ICD |
- ECSIPR_MPDIP, ioaddr + ECSR);
- ctrl_outl(ECSIPR_BRCRXIP | ECSIPR_PSRTOIP | ECSIPR_LCHNGIP |
- ECSIPR_ICDIP | ECSIPR_MPDIP, ioaddr + ECSIPR);
+
+ /* E-MAC Status Register clear */
+ ctrl_outl(ECSR_INIT, ioaddr + ECSR);
+
+ /* E-MAC Interrupt Enable register */
+ ctrl_outl(ECSIPR_INIT, ioaddr + ECSIPR);
/* Set MAC address */
update_mac_address(ndev);
/* mask reset */
-#if defined(CONFIG_CPU_SUBTYPE_SH7710)
+#if defined(CONFIG_CPU_SUBTYPE_SH7710) || defined(CONFIG_CPU_SUBTYPE_SH7763)
ctrl_outl(APR_AP, ioaddr + APR);
ctrl_outl(MPR_MP, ioaddr + MPR);
ctrl_outl(TPAUSER_UNLIMITED, ioaddr + TPAUSER);
+#endif
+#if defined(CONFIG_CPU_SUBTYPE_SH7710)
ctrl_outl(BCFR_UNLIMITED, ioaddr + BCFR);
#endif
+
/* Setting the Rx mode will start the Rx process. */
ctrl_outl(EDRRR_R, ioaddr + EDRRR);
@@ -379,7 +478,7 @@ static int sh_eth_txfree(struct net_device *ndev)
for (; mdp->cur_tx - mdp->dirty_tx > 0; mdp->dirty_tx++) {
entry = mdp->dirty_tx % TX_RING_SIZE;
txdesc = &mdp->tx_ring[entry];
- if (txdesc->status & cpu_to_le32(TD_TACT))
+ if (txdesc->status & cpu_to_edmac(mdp, TD_TACT))
break;
/* Free the original skb. */
if (mdp->tx_skbuff[entry]) {
@@ -387,9 +486,9 @@ static int sh_eth_txfree(struct net_device *ndev)
mdp->tx_skbuff[entry] = NULL;
freeNum++;
}
- txdesc->status = cpu_to_le32(TD_TFP);
+ txdesc->status = cpu_to_edmac(mdp, TD_TFP);
if (entry >= TX_RING_SIZE - 1)
- txdesc->status |= cpu_to_le32(TD_TDLE);
+ txdesc->status |= cpu_to_edmac(mdp, TD_TDLE);
mdp->stats.tx_packets++;
mdp->stats.tx_bytes += txdesc->buffer_length;
@@ -407,11 +506,11 @@ static int sh_eth_rx(struct net_device *ndev)
int boguscnt = (mdp->dirty_rx + RX_RING_SIZE) - mdp->cur_rx;
struct sk_buff *skb;
u16 pkt_len = 0;
- u32 desc_status;
+ u32 desc_status, reserve = 0;
rxdesc = &mdp->rx_ring[entry];
- while (!(rxdesc->status & cpu_to_le32(RD_RACT))) {
- desc_status = le32_to_cpu(rxdesc->status);
+ while (!(rxdesc->status & cpu_to_edmac(mdp, RD_RACT))) {
+ desc_status = edmac_to_cpu(mdp, rxdesc->status);
pkt_len = rxdesc->frame_length;
if (--boguscnt < 0)
@@ -446,7 +545,7 @@ static int sh_eth_rx(struct net_device *ndev)
mdp->stats.rx_packets++;
mdp->stats.rx_bytes += pkt_len;
}
- rxdesc->status |= cpu_to_le32(RD_RACT);
+ rxdesc->status |= cpu_to_edmac(mdp, RD_RACT);
entry = (++mdp->cur_rx) % RX_RING_SIZE;
}
@@ -454,28 +553,38 @@ static int sh_eth_rx(struct net_device *ndev)
for (; mdp->cur_rx - mdp->dirty_rx > 0; mdp->dirty_rx++) {
entry = mdp->dirty_rx % RX_RING_SIZE;
rxdesc = &mdp->rx_ring[entry];
+ /* The size of the buffer is 16 byte boundary. */
+ rxdesc->buffer_length = (mdp->rx_buf_sz + 16) & ~0x0F;
+
if (mdp->rx_skbuff[entry] == NULL) {
skb = dev_alloc_skb(mdp->rx_buf_sz);
mdp->rx_skbuff[entry] = skb;
if (skb == NULL)
break; /* Better luck next round. */
skb->dev = ndev;
+#if defined(CONFIG_CPU_SUBTYPE_SH7763)
+ reserve = SH7763_SKB_ALIGN
+ - ((uint32_t)skb->data & (SH7763_SKB_ALIGN-1));
+ if (reserve)
+ skb_reserve(skb, reserve);
+#else
skb_reserve(skb, RX_OFFSET);
+#endif
+ skb->ip_summed = CHECKSUM_NONE;
rxdesc->addr = (u32)skb->data & ~0x3UL;
}
- /* The size of the buffer is 16 byte boundary. */
- rxdesc->buffer_length = (mdp->rx_buf_sz + 16) & ~0x0F;
if (entry >= RX_RING_SIZE - 1)
rxdesc->status |=
- cpu_to_le32(RD_RACT | RD_RFP | RC_RDEL);
+ cpu_to_edmac(mdp, RD_RACT | RD_RFP | RD_RDEL);
else
rxdesc->status |=
- cpu_to_le32(RD_RACT | RD_RFP);
+ cpu_to_edmac(mdp, RD_RACT | RD_RFP);
}
/* Restart Rx engine if stopped. */
/* If we don't need to check status, don't. -KDU */
- ctrl_outl(EDRRR_R, ndev->base_addr + EDRRR);
+ if (!(ctrl_inl(ndev->base_addr + EDRRR) & EDRRR_R))
+ ctrl_outl(EDRRR_R, ndev->base_addr + EDRRR);
return 0;
}
@@ -529,13 +638,14 @@ static void sh_eth_error(struct net_device *ndev, int intr_status)
printk(KERN_ERR "Receive Frame Overflow\n");
}
}
-
+#if !defined(CONFIG_CPU_SUBTYPE_SH7763)
if (intr_status & EESR_ADE) {
if (intr_status & EESR_TDE) {
if (intr_status & EESR_TFE)
mdp->stats.tx_fifo_errors++;
}
}
+#endif
if (intr_status & EESR_RDE) {
/* Receive Descriptor Empty int */
@@ -550,8 +660,11 @@ static void sh_eth_error(struct net_device *ndev, int intr_status)
mdp->stats.rx_fifo_errors++;
printk(KERN_ERR "Receive FIFO Overflow\n");
}
- if (intr_status &
- (EESR_TWB | EESR_TABT | EESR_ADE | EESR_TDE | EESR_TFE)) {
+ if (intr_status & (EESR_TWB | EESR_TABT |
+#if !defined(CONFIG_CPU_SUBTYPE_SH7763)
+ EESR_ADE |
+#endif
+ EESR_TDE | EESR_TFE)) {
/* Tx error */
u32 edtrr = ctrl_inl(ndev->base_addr + EDTRR);
/* dmesg */
@@ -582,17 +695,23 @@ static irqreturn_t sh_eth_interrupt(int irq, void *netdev)
ioaddr = ndev->base_addr;
spin_lock(&mdp->lock);
+ /* Get interrpt stat */
intr_status = ctrl_inl(ioaddr + EESR);
/* Clear interrupt */
ctrl_outl(intr_status, ioaddr + EESR);
- if (intr_status & (EESR_FRC | EESR_RINT8 |
- EESR_RINT5 | EESR_RINT4 | EESR_RINT3 | EESR_RINT2 |
- EESR_RINT1))
+ if (intr_status & (EESR_FRC | /* Frame recv*/
+ EESR_RMAF | /* Multi cast address recv*/
+ EESR_RRF | /* Bit frame recv */
+ EESR_RTLF | /* Long frame recv*/
+ EESR_RTSF | /* short frame recv */
+ EESR_PRE | /* PHY-LSI recv error */
+ EESR_CERF)){ /* recv frame CRC error */
sh_eth_rx(ndev);
- if (intr_status & (EESR_FTC |
- EESR_TINT4 | EESR_TINT3 | EESR_TINT2 | EESR_TINT1)) {
+ }
+ /* Tx Check */
+ if (intr_status & TX_CHECK) {
sh_eth_txfree(ndev);
netif_wake_queue(ndev);
}
@@ -631,11 +750,32 @@ static void sh_eth_adjust_link(struct net_device *ndev)
if (phydev->duplex != mdp->duplex) {
new_state = 1;
mdp->duplex = phydev->duplex;
+#if defined(CONFIG_CPU_SUBTYPE_SH7763)
+ if (mdp->duplex) { /* FULL */
+ ctrl_outl(ctrl_inl(ioaddr + ECMR) | ECMR_DM,
+ ioaddr + ECMR);
+ } else { /* Half */
+ ctrl_outl(ctrl_inl(ioaddr + ECMR) & ~ECMR_DM,
+ ioaddr + ECMR);
+ }
+#endif
}
if (phydev->speed != mdp->speed) {
new_state = 1;
mdp->speed = phydev->speed;
+#if defined(CONFIG_CPU_SUBTYPE_SH7763)
+ switch (mdp->speed) {
+ case 10: /* 10BASE */
+ ctrl_outl(GECMR_10, ioaddr + GECMR); break;
+ case 100:/* 100BASE */
+ ctrl_outl(GECMR_100, ioaddr + GECMR); break;
+ case 1000: /* 1000BASE */
+ ctrl_outl(GECMR_1000, ioaddr + GECMR); break;
+ default:
+ break;
+ }
+#endif
}
if (mdp->link == PHY_DOWN) {
ctrl_outl((ctrl_inl(ioaddr + ECMR) & ~ECMR_TXF)
@@ -730,7 +870,7 @@ static int sh_eth_open(struct net_device *ndev)
/* Set the timer to check for link beat. */
init_timer(&mdp->timer);
mdp->timer.expires = (jiffies + (24 * HZ)) / 10;/* 2.4 sec. */
- setup_timer(&mdp->timer, sh_eth_timer, ndev);
+ setup_timer(&mdp->timer, sh_eth_timer, (unsigned long)ndev);
return ret;
@@ -814,13 +954,15 @@ static int sh_eth_start_xmit(struct sk_buff *skb, struct net_device *ndev)
txdesc->buffer_length = skb->len;
if (entry >= TX_RING_SIZE - 1)
- txdesc->status |= cpu_to_le32(TD_TACT | TD_TDLE);
+ txdesc->status |= cpu_to_edmac(mdp, TD_TACT | TD_TDLE);
else
- txdesc->status |= cpu_to_le32(TD_TACT);
+ txdesc->status |= cpu_to_edmac(mdp, TD_TACT);
mdp->cur_tx++;
- ctrl_outl(EDTRR_TRNS, ndev->base_addr + EDTRR);
+ if (!(ctrl_inl(ndev->base_addr + EDTRR) & EDTRR_TRNS))
+ ctrl_outl(EDTRR_TRNS, ndev->base_addr + EDTRR);
+
ndev->trans_start = jiffies;
return 0;
@@ -877,9 +1019,15 @@ static struct net_device_stats *sh_eth_get_stats(struct net_device *ndev)
ctrl_outl(0, ioaddr + CDCR); /* (write clear) */
mdp->stats.tx_carrier_errors += ctrl_inl(ioaddr + LCCR);
ctrl_outl(0, ioaddr + LCCR); /* (write clear) */
+#if defined(CONFIG_CPU_SUBTYPE_SH7763)
+ mdp->stats.tx_carrier_errors += ctrl_inl(ioaddr + CERCR);/* CERCR */
+ ctrl_outl(0, ioaddr + CERCR); /* (write clear) */
+ mdp->stats.tx_carrier_errors += ctrl_inl(ioaddr + CEECR);/* CEECR */
+ ctrl_outl(0, ioaddr + CEECR); /* (write clear) */
+#else
mdp->stats.tx_carrier_errors += ctrl_inl(ioaddr + CNDCR);
ctrl_outl(0, ioaddr + CNDCR); /* (write clear) */
-
+#endif
return &mdp->stats;
}
@@ -929,8 +1077,13 @@ static void sh_eth_tsu_init(u32 ioaddr)
ctrl_outl(0, ioaddr + TSU_FWSL0);
ctrl_outl(0, ioaddr + TSU_FWSL1);
ctrl_outl(TSU_FWSLC_POSTENU | TSU_FWSLC_POSTENL, ioaddr + TSU_FWSLC);
+#if defined(CONFIG_CPU_SUBTYPE_SH7763)
+ ctrl_outl(0, ioaddr + TSU_QTAG0); /* Disable QTAG(0->1) */
+ ctrl_outl(0, ioaddr + TSU_QTAG1); /* Disable QTAG(1->0) */
+#else
ctrl_outl(0, ioaddr + TSU_QTAGM0); /* Disable QTAG(0->1) */
ctrl_outl(0, ioaddr + TSU_QTAGM1); /* Disable QTAG(1->0) */
+#endif
ctrl_outl(0, ioaddr + TSU_FWSR); /* all interrupt status clear */
ctrl_outl(0, ioaddr + TSU_FWINMK); /* Disable all interrupt */
ctrl_outl(0, ioaddr + TSU_TEN); /* Disable all CAM entry */
@@ -1029,6 +1182,7 @@ static int sh_eth_drv_probe(struct platform_device *pdev)
struct resource *res;
struct net_device *ndev = NULL;
struct sh_eth_private *mdp;
+ struct sh_eth_plat_data *pd;
/* get base addr */
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
@@ -1066,8 +1220,11 @@ static int sh_eth_drv_probe(struct platform_device *pdev)
mdp = netdev_priv(ndev);
spin_lock_init(&mdp->lock);
+ pd = (struct sh_eth_plat_data *)(pdev->dev.platform_data);
/* get PHY ID */
- mdp->phy_id = (int)pdev->dev.platform_data;
+ mdp->phy_id = pd->phy;
+ /* EDMAC endian */
+ mdp->edmac_endian = pd->edmac_endian;
/* set function */
ndev->open = sh_eth_open;
@@ -1087,12 +1244,16 @@ static int sh_eth_drv_probe(struct platform_device *pdev)
/* First device only init */
if (!devno) {
+#if defined(ARSTR)
/* reset device */
- ctrl_outl(ARSTR_ARSTR, ndev->base_addr + ARSTR);
+ ctrl_outl(ARSTR_ARSTR, ARSTR);
mdelay(1);
+#endif
+#if defined(SH_TSU_ADDR)
/* TSU init (Init only)*/
sh_eth_tsu_init(SH_TSU_ADDR);
+#endif
}
/* network device register */
@@ -1110,8 +1271,8 @@ static int sh_eth_drv_probe(struct platform_device *pdev)
ndev->name, CARDNAME, (u32) ndev->base_addr);
for (i = 0; i < 5; i++)
- printk(KERN_INFO "%2.2x:", ndev->dev_addr[i]);
- printk(KERN_INFO "%2.2x, IRQ %d.\n", ndev->dev_addr[i], ndev->irq);
+ printk("%02X:", ndev->dev_addr[i]);
+ printk("%02X, IRQ %d.\n", ndev->dev_addr[i], ndev->irq);
platform_set_drvdata(pdev, ndev);
diff --git a/drivers/net/sh_eth.h b/drivers/net/sh_eth.h
index e01e1c34771..73bc7181cc1 100644
--- a/drivers/net/sh_eth.h
+++ b/drivers/net/sh_eth.h
@@ -30,120 +30,254 @@
#include <linux/netdevice.h>
#include <linux/phy.h>
+#include <asm/sh_eth.h>
+
#define CARDNAME "sh-eth"
#define TX_TIMEOUT (5*HZ)
-
-#define TX_RING_SIZE 128 /* Tx ring size */
-#define RX_RING_SIZE 128 /* Rx ring size */
-#define RX_OFFSET 2 /* skb offset */
+#define TX_RING_SIZE 64 /* Tx ring size */
+#define RX_RING_SIZE 64 /* Rx ring size */
#define ETHERSMALL 60
#define PKT_BUF_SZ 1538
+#ifdef CONFIG_CPU_SUBTYPE_SH7763
+
+#define SH7763_SKB_ALIGN 32
/* Chip Base Address */
-#define SH_TSU_ADDR 0xA7000804
+# define SH_TSU_ADDR 0xFFE01800
+# define ARSTR 0xFFE01800
/* Chip Registers */
/* E-DMAC */
-#define EDMR 0x0000
-#define EDTRR 0x0004
-#define EDRRR 0x0008
-#define TDLAR 0x000C
-#define RDLAR 0x0010
-#define EESR 0x0014
-#define EESIPR 0x0018
-#define TRSCER 0x001C
-#define RMFCR 0x0020
-#define TFTR 0x0024
-#define FDR 0x0028
-#define RMCR 0x002C
-#define EDOCR 0x0030
-#define FCFTR 0x0034
-#define RPADIR 0x0038
-#define TRIMD 0x003C
-#define RBWAR 0x0040
-#define RDFAR 0x0044
-#define TBRAR 0x004C
-#define TDFAR 0x0050
+# define EDSR 0x000
+# define EDMR 0x400
+# define EDTRR 0x408
+# define EDRRR 0x410
+# define EESR 0x428
+# define EESIPR 0x430
+# define TDLAR 0x010
+# define TDFAR 0x014
+# define TDFXR 0x018
+# define TDFFR 0x01C
+# define RDLAR 0x030
+# define RDFAR 0x034
+# define RDFXR 0x038
+# define RDFFR 0x03C
+# define TRSCER 0x438
+# define RMFCR 0x440
+# define TFTR 0x448
+# define FDR 0x450
+# define RMCR 0x458
+# define RPADIR 0x460
+# define FCFTR 0x468
+
+/* Ether Register */
+# define ECMR 0x500
+# define ECSR 0x510
+# define ECSIPR 0x518
+# define PIR 0x520
+# define PSR 0x528
+# define PIPR 0x52C
+# define RFLR 0x508
+# define APR 0x554
+# define MPR 0x558
+# define PFTCR 0x55C
+# define PFRCR 0x560
+# define TPAUSER 0x564
+# define GECMR 0x5B0
+# define BCULR 0x5B4
+# define MAHR 0x5C0
+# define MALR 0x5C8
+# define TROCR 0x700
+# define CDCR 0x708
+# define LCCR 0x710
+# define CEFCR 0x740
+# define FRECR 0x748
+# define TSFRCR 0x750
+# define TLFRCR 0x758
+# define RFCR 0x760
+# define CERCR 0x768
+# define CEECR 0x770
+# define MAFCR 0x778
+
+/* TSU Absolute Address */
+# define TSU_CTRST 0x004
+# define TSU_FWEN0 0x010
+# define TSU_FWEN1 0x014
+# define TSU_FCM 0x18
+# define TSU_BSYSL0 0x20
+# define TSU_BSYSL1 0x24
+# define TSU_PRISL0 0x28
+# define TSU_PRISL1 0x2C
+# define TSU_FWSL0 0x30
+# define TSU_FWSL1 0x34
+# define TSU_FWSLC 0x38
+# define TSU_QTAG0 0x40
+# define TSU_QTAG1 0x44
+# define TSU_FWSR 0x50
+# define TSU_FWINMK 0x54
+# define TSU_ADQT0 0x48
+# define TSU_ADQT1 0x4C
+# define TSU_VTAG0 0x58
+# define TSU_VTAG1 0x5C
+# define TSU_ADSBSY 0x60
+# define TSU_TEN 0x64
+# define TSU_POST1 0x70
+# define TSU_POST2 0x74
+# define TSU_POST3 0x78
+# define TSU_POST4 0x7C
+# define TSU_ADRH0 0x100
+# define TSU_ADRL0 0x104
+# define TSU_ADRH31 0x1F8
+# define TSU_ADRL31 0x1FC
+
+# define TXNLCR0 0x80
+# define TXALCR0 0x84
+# define RXNLCR0 0x88
+# define RXALCR0 0x8C
+# define FWNLCR0 0x90
+# define FWALCR0 0x94
+# define TXNLCR1 0xA0
+# define TXALCR1 0xA4
+# define RXNLCR1 0xA8
+# define RXALCR1 0xAC
+# define FWNLCR1 0xB0
+# define FWALCR1 0x40
+
+#else /* CONFIG_CPU_SUBTYPE_SH7763 */
+# define RX_OFFSET 2 /* skb offset */
+#ifndef CONFIG_CPU_SUBTYPE_SH7619
+/* Chip base address */
+# define SH_TSU_ADDR 0xA7000804
+# define ARSTR 0xA7000800
+#endif
+/* Chip Registers */
+/* E-DMAC */
+# define EDMR 0x0000
+# define EDTRR 0x0004
+# define EDRRR 0x0008
+# define TDLAR 0x000C
+# define RDLAR 0x0010
+# define EESR 0x0014
+# define EESIPR 0x0018
+# define TRSCER 0x001C
+# define RMFCR 0x0020
+# define TFTR 0x0024
+# define FDR 0x0028
+# define RMCR 0x002C
+# define EDOCR 0x0030
+# define FCFTR 0x0034
+# define RPADIR 0x0038
+# define TRIMD 0x003C
+# define RBWAR 0x0040
+# define RDFAR 0x0044
+# define TBRAR 0x004C
+# define TDFAR 0x0050
+
/* Ether Register */
-#define ECMR 0x0160
-#define ECSR 0x0164
-#define ECSIPR 0x0168
-#define PIR 0x016C
-#define MAHR 0x0170
-#define MALR 0x0174
-#define RFLR 0x0178
-#define PSR 0x017C
-#define TROCR 0x0180
-#define CDCR 0x0184
-#define LCCR 0x0188
-#define CNDCR 0x018C
-#define CEFCR 0x0194
-#define FRECR 0x0198
-#define TSFRCR 0x019C
-#define TLFRCR 0x01A0
-#define RFCR 0x01A4
-#define MAFCR 0x01A8
-#define IPGR 0x01B4
-#if defined(CONFIG_CPU_SUBTYPE_SH7710)
-#define APR 0x01B8
-#define MPR 0x01BC
-#define TPAUSER 0x1C4
-#define BCFR 0x1CC
-#endif /* CONFIG_CPU_SH7710 */
-
-#define ARSTR 0x0800
+# define ECMR 0x0160
+# define ECSR 0x0164
+# define ECSIPR 0x0168
+# define PIR 0x016C
+# define MAHR 0x0170
+# define MALR 0x0174
+# define RFLR 0x0178
+# define PSR 0x017C
+# define TROCR 0x0180
+# define CDCR 0x0184
+# define LCCR 0x0188
+# define CNDCR 0x018C
+# define CEFCR 0x0194
+# define FRECR 0x0198
+# define TSFRCR 0x019C
+# define TLFRCR 0x01A0
+# define RFCR 0x01A4
+# define MAFCR 0x01A8
+# define IPGR 0x01B4
+# if defined(CONFIG_CPU_SUBTYPE_SH7710)
+# define APR 0x01B8
+# define MPR 0x01BC
+# define TPAUSER 0x1C4
+# define BCFR 0x1CC
+# endif /* CONFIG_CPU_SH7710 */
/* TSU */
-#define TSU_CTRST 0x004
-#define TSU_FWEN0 0x010
-#define TSU_FWEN1 0x014
-#define TSU_FCM 0x018
-#define TSU_BSYSL0 0x020
-#define TSU_BSYSL1 0x024
-#define TSU_PRISL0 0x028
-#define TSU_PRISL1 0x02C
-#define TSU_FWSL0 0x030
-#define TSU_FWSL1 0x034
-#define TSU_FWSLC 0x038
-#define TSU_QTAGM0 0x040
-#define TSU_QTAGM1 0x044
-#define TSU_ADQT0 0x048
-#define TSU_ADQT1 0x04C
-#define TSU_FWSR 0x050
-#define TSU_FWINMK 0x054
-#define TSU_ADSBSY 0x060
-#define TSU_TEN 0x064
-#define TSU_POST1 0x070
-#define TSU_POST2 0x074
-#define TSU_POST3 0x078
-#define TSU_POST4 0x07C
-#define TXNLCR0 0x080
-#define TXALCR0 0x084
-#define RXNLCR0 0x088
-#define RXALCR0 0x08C
-#define FWNLCR0 0x090
-#define FWALCR0 0x094
-#define TXNLCR1 0x0A0
-#define TXALCR1 0x0A4
-#define RXNLCR1 0x0A8
-#define RXALCR1 0x0AC
-#define FWNLCR1 0x0B0
-#define FWALCR1 0x0B4
+# define TSU_CTRST 0x004
+# define TSU_FWEN0 0x010
+# define TSU_FWEN1 0x014
+# define TSU_FCM 0x018
+# define TSU_BSYSL0 0x020
+# define TSU_BSYSL1 0x024
+# define TSU_PRISL0 0x028
+# define TSU_PRISL1 0x02C
+# define TSU_FWSL0 0x030
+# define TSU_FWSL1 0x034
+# define TSU_FWSLC 0x038
+# define TSU_QTAGM0 0x040
+# define TSU_QTAGM1 0x044
+# define TSU_ADQT0 0x048
+# define TSU_ADQT1 0x04C
+# define TSU_FWSR 0x050
+# define TSU_FWINMK 0x054
+# define TSU_ADSBSY 0x060
+# define TSU_TEN 0x064
+# define TSU_POST1 0x070
+# define TSU_POST2 0x074
+# define TSU_POST3 0x078
+# define TSU_POST4 0x07C
+# define TXNLCR0 0x080
+# define TXALCR0 0x084
+# define RXNLCR0 0x088
+# define RXALCR0 0x08C
+# define FWNLCR0 0x090
+# define FWALCR0 0x094
+# define TXNLCR1 0x0A0
+# define TXALCR1 0x0A4
+# define RXNLCR1 0x0A8
+# define RXALCR1 0x0AC
+# define FWNLCR1 0x0B0
+# define FWALCR1 0x0B4
#define TSU_ADRH0 0x0100
#define TSU_ADRL0 0x0104
#define TSU_ADRL31 0x01FC
-/* Register's bits */
+#endif /* CONFIG_CPU_SUBTYPE_SH7763 */
+
+/*
+ * Register's bits
+ */
+#ifdef CONFIG_CPU_SUBTYPE_SH7763
+/* EDSR */
+enum EDSR_BIT {
+ EDSR_ENT = 0x01, EDSR_ENR = 0x02,
+};
+#define EDSR_ENALL (EDSR_ENT|EDSR_ENR)
+
+/* GECMR */
+enum GECMR_BIT {
+ GECMR_10 = 0x0, GECMR_100 = 0x04, GECMR_1000 = 0x01,
+};
+#endif
/* EDMR */
enum DMAC_M_BIT {
- EDMR_DL1 = 0x20, EDMR_DL0 = 0x10, EDMR_SRST = 0x01,
+ EDMR_DL1 = 0x20, EDMR_DL0 = 0x10,
+#ifdef CONFIG_CPU_SUBTYPE_SH7763
+ EDMR_SRST = 0x03,
+ EMDR_DESC_R = 0x30, /* Descriptor reserve size */
+ EDMR_EL = 0x40, /* Litte endian */
+#else /* CONFIG_CPU_SUBTYPE_SH7763 */
+ EDMR_SRST = 0x01,
+#endif
};
/* EDTRR */
enum DMAC_T_BIT {
+#ifdef CONFIG_CPU_SUBTYPE_SH7763
+ EDTRR_TRNS = 0x03,
+#else
EDTRR_TRNS = 0x01,
+#endif
};
/* EDRRR*/
@@ -173,21 +307,47 @@ enum PHY_STATUS_BIT { PHY_ST_LINK = 0x01, };
/* EESR */
enum EESR_BIT {
- EESR_TWB = 0x40000000, EESR_TABT = 0x04000000,
+#ifndef CONFIG_CPU_SUBTYPE_SH7763
+ EESR_TWB = 0x40000000,
+#else
+ EESR_TWB = 0xC0000000,
+ EESR_TC1 = 0x20000000,
+ EESR_TUC = 0x10000000,
+ EESR_ROC = 0x80000000,
+#endif
+ EESR_TABT = 0x04000000,
EESR_RABT = 0x02000000, EESR_RFRMER = 0x01000000,
- EESR_ADE = 0x00800000, EESR_ECI = 0x00400000,
- EESR_FTC = 0x00200000, EESR_TDE = 0x00100000,
- EESR_TFE = 0x00080000, EESR_FRC = 0x00040000,
- EESR_RDE = 0x00020000, EESR_RFE = 0x00010000,
- EESR_TINT4 = 0x00000800, EESR_TINT3 = 0x00000400,
- EESR_TINT2 = 0x00000200, EESR_TINT1 = 0x00000100,
- EESR_RINT8 = 0x00000080, EESR_RINT5 = 0x00000010,
- EESR_RINT4 = 0x00000008, EESR_RINT3 = 0x00000004,
- EESR_RINT2 = 0x00000002, EESR_RINT1 = 0x00000001,
-};
-
-#define EESR_ERR_CHECK (EESR_TWB | EESR_TABT | EESR_RABT | EESR_RDE \
+#ifndef CONFIG_CPU_SUBTYPE_SH7763
+ EESR_ADE = 0x00800000,
+#endif
+ EESR_ECI = 0x00400000,
+ EESR_FTC = 0x00200000, EESR_TDE = 0x00100000,
+ EESR_TFE = 0x00080000, EESR_FRC = 0x00040000,
+ EESR_RDE = 0x00020000, EESR_RFE = 0x00010000,
+#ifndef CONFIG_CPU_SUBTYPE_SH7763
+ EESR_CND = 0x00000800,
+#endif
+ EESR_DLC = 0x00000400,
+ EESR_CD = 0x00000200, EESR_RTO = 0x00000100,
+ EESR_RMAF = 0x00000080, EESR_CEEF = 0x00000040,
+ EESR_CELF = 0x00000020, EESR_RRF = 0x00000010,
+ EESR_RTLF = 0x00000008, EESR_RTSF = 0x00000004,
+ EESR_PRE = 0x00000002, EESR_CERF = 0x00000001,
+};
+
+
+#ifdef CONFIG_CPU_SUBTYPE_SH7763
+# define TX_CHECK (EESR_TC1 | EESR_FTC)
+# define EESR_ERR_CHECK (EESR_TWB | EESR_TABT | EESR_RABT | EESR_RDE \
+ | EESR_RFRMER | EESR_TFE | EESR_TDE | EESR_ECI)
+# define TX_ERROR_CEHCK (EESR_TWB | EESR_TABT | EESR_TDE | EESR_TFE)
+
+#else
+# define TX_CHECK (EESR_FTC | EESR_CND | EESR_DLC | EESR_CD | EESR_RTO)
+# define EESR_ERR_CHECK (EESR_TWB | EESR_TABT | EESR_RABT | EESR_RDE \
| EESR_RFRMER | EESR_ADE | EESR_TFE | EESR_TDE | EESR_ECI)
+# define TX_ERROR_CEHCK (EESR_TWB | EESR_TABT | EESR_ADE | EESR_TDE | EESR_TFE)
+#endif
/* EESIPR */
enum DMAC_IM_BIT {
@@ -207,8 +367,8 @@ enum DMAC_IM_BIT {
/* Receive descriptor bit */
enum RD_STS_BIT {
- RD_RACT = 0x80000000, RC_RDEL = 0x40000000,
- RC_RFP1 = 0x20000000, RC_RFP0 = 0x10000000,
+ RD_RACT = 0x80000000, RD_RDEL = 0x40000000,
+ RD_RFP1 = 0x20000000, RD_RFP0 = 0x10000000,
RD_RFE = 0x08000000, RD_RFS10 = 0x00000200,
RD_RFS9 = 0x00000100, RD_RFS8 = 0x00000080,
RD_RFS7 = 0x00000040, RD_RFS6 = 0x00000020,
@@ -216,9 +376,9 @@ enum RD_STS_BIT {
RD_RFS3 = 0x00000004, RD_RFS2 = 0x00000002,
RD_RFS1 = 0x00000001,
};
-#define RDF1ST RC_RFP1
-#define RDFEND RC_RFP0
-#define RD_RFP (RC_RFP1|RC_RFP0)
+#define RDF1ST RD_RFP1
+#define RDFEND RD_RFP0
+#define RD_RFP (RD_RFP1|RD_RFP0)
/* FCFTR */
enum FCFTR_BIT {
@@ -227,11 +387,16 @@ enum FCFTR_BIT {
FCFTR_RFD1 = 0x00000002, FCFTR_RFD0 = 0x00000001,
};
#define FIFO_F_D_RFF (FCFTR_RFF2|FCFTR_RFF1|FCFTR_RFF0)
+#ifndef CONFIG_CPU_SUBTYPE_SH7619
#define FIFO_F_D_RFD (FCFTR_RFD2|FCFTR_RFD1|FCFTR_RFD0)
+#else
+#define FIFO_F_D_RFD (FCFTR_RFD0)
+#endif
/* Transfer descriptor bit */
enum TD_STS_BIT {
- TD_TACT = 0x80000000, TD_TDLE = 0x40000000, TD_TFP1 = 0x20000000,
+ TD_TACT = 0x80000000,
+ TD_TDLE = 0x40000000, TD_TFP1 = 0x20000000,
TD_TFP0 = 0x10000000,
};
#define TDF1ST TD_TFP1
@@ -242,6 +407,10 @@ enum TD_STS_BIT {
enum RECV_RST_BIT { RMCR_RST = 0x01, };
/* ECMR */
enum FELIC_MODE_BIT {
+#ifdef CONFIG_CPU_SUBTYPE_SH7763
+ ECMR_TRCCM = 0x04000000, ECMR_RCSC = 0x00800000,
+ ECMR_DPAD = 0x00200000, ECMR_RZPF = 0x00100000,
+#endif
ECMR_ZPF = 0x00080000, ECMR_PFR = 0x00040000, ECMR_RXF = 0x00020000,
ECMR_TXF = 0x00010000, ECMR_MCT = 0x00002000, ECMR_PRCEF = 0x00001000,
ECMR_PMDE = 0x00000200, ECMR_RE = 0x00000040, ECMR_TE = 0x00000020,
@@ -249,18 +418,47 @@ enum FELIC_MODE_BIT {
ECMR_PRM = 0x00000001,
};
+#ifdef CONFIG_CPU_SUBTYPE_SH7763
+#define ECMR_CHG_DM (ECMR_TRCCM | ECMR_RZPF | ECMR_ZPF |\
+ ECMR_PFR | ECMR_RXF | ECMR_TXF | ECMR_MCT)
+#elif CONFIG_CPU_SUBTYPE_SH7619
+#define ECMR_CHG_DM (ECMR_ZPF | ECMR_PFR | ECMR_RXF | ECMR_TXF)
+#else
+#define ECMR_CHG_DM (ECMR_ZPF | ECMR_PFR | ECMR_RXF | ECMR_TXF | ECMR_MCT)
+#endif
+
/* ECSR */
enum ECSR_STATUS_BIT {
- ECSR_BRCRX = 0x20, ECSR_PSRTO = 0x10, ECSR_LCHNG = 0x04,
+#ifndef CONFIG_CPU_SUBTYPE_SH7763
+ ECSR_BRCRX = 0x20, ECSR_PSRTO = 0x10,
+#endif
+ ECSR_LCHNG = 0x04,
ECSR_MPD = 0x02, ECSR_ICD = 0x01,
};
+#ifdef CONFIG_CPU_SUBTYPE_SH7763
+# define ECSR_INIT (ECSR_ICD | ECSIPR_MPDIP)
+#else
+# define ECSR_INIT (ECSR_BRCRX | ECSR_PSRTO | \
+ ECSR_LCHNG | ECSR_ICD | ECSIPR_MPDIP)
+#endif
+
/* ECSIPR */
enum ECSIPR_STATUS_MASK_BIT {
- ECSIPR_BRCRXIP = 0x20, ECSIPR_PSRTOIP = 0x10, ECSIPR_LCHNGIP = 0x04,
+#ifndef CONFIG_CPU_SUBTYPE_SH7763
+ ECSIPR_BRCRXIP = 0x20, ECSIPR_PSRTOIP = 0x10,
+#endif
+ ECSIPR_LCHNGIP = 0x04,
ECSIPR_MPDIP = 0x02, ECSIPR_ICDIP = 0x01,
};
+#ifdef CONFIG_CPU_SUBTYPE_SH7763
+# define ECSIPR_INIT (ECSIPR_LCHNGIP | ECSIPR_ICDIP | ECSIPR_MPDIP)
+#else
+# define ECSIPR_INIT (ECSIPR_BRCRXIP | ECSIPR_PSRTOIP | ECSIPR_LCHNGIP | \
+ ECSIPR_ICDIP | ECSIPR_MPDIP)
+#endif
+
/* APR */
enum APR_BIT {
APR_AP = 0x00000001,
@@ -285,9 +483,22 @@ enum RPADIR_BIT {
RPADIR_PADR = 0x0003f,
};
+#if defined(CONFIG_CPU_SUBTYPE_SH7763)
+# define RPADIR_INIT (0x00)
+#else
+# define RPADIR_INIT (RPADIR_PADS1)
+#endif
+
+/* RFLR */
+#define RFLR_VALUE 0x1000
+
/* FDR */
enum FIFO_SIZE_BIT {
+#ifndef CONFIG_CPU_SUBTYPE_SH7619
FIFO_SIZE_T = 0x00000700, FIFO_SIZE_R = 0x00000007,
+#else
+ FIFO_SIZE_T = 0x00000100, FIFO_SIZE_R = 0x00000001,
+#endif
};
enum phy_offsets {
PHY_CTRL = 0, PHY_STAT = 1, PHY_IDT1 = 2, PHY_IDT2 = 3,
@@ -316,7 +527,7 @@ enum PHY_ANA_BIT {
PHY_A_NP = 0x8000, PHY_A_ACK = 0x4000, PHY_A_RF = 0x2000,
PHY_A_FCS = 0x0400, PHY_A_T4 = 0x0200, PHY_A_FDX = 0x0100,
PHY_A_HDX = 0x0080, PHY_A_10FDX = 0x0040, PHY_A_10HDX = 0x0020,
- PHY_A_SEL = 0x001f,
+ PHY_A_SEL = 0x001e,
};
/* PHY_ANL */
enum PHY_ANL_BIT {
@@ -403,7 +614,7 @@ struct sh_eth_txdesc {
#endif
u32 addr; /* TD2 */
u32 pad1; /* padding data */
-};
+} __attribute__((aligned(2), packed));
/*
* The sh ether Rx buffer descriptors.
@@ -420,7 +631,7 @@ struct sh_eth_rxdesc {
#endif
u32 addr; /* RD2 */
u32 pad0; /* padding data */
-};
+} __attribute__((aligned(2), packed));
struct sh_eth_private {
dma_addr_t rx_desc_dma;
@@ -435,6 +646,7 @@ struct sh_eth_private {
u32 cur_rx, dirty_rx; /* Producer/consumer ring indices */
u32 cur_tx, dirty_tx;
u32 rx_buf_sz; /* Based on MTU+slack. */
+ int edmac_endian;
/* MII transceiver section. */
u32 phy_id; /* PHY ID */
struct mii_bus *mii_bus; /* MDIO bus control */
@@ -449,6 +661,10 @@ struct sh_eth_private {
struct net_device_stats tsu_stats; /* TSU forward status */
};
+#ifdef CONFIG_CPU_SUBTYPE_SH7763
+/* SH7763 has endian control register */
+#define swaps(x, y)
+#else
static void swaps(char *src, int len)
{
#ifdef __LITTLE_ENDIAN__
@@ -460,5 +676,5 @@ static void swaps(char *src, int len)
*p = swab32(*p);
#endif
}
-
+#endif /* CONFIG_CPU_SUBTYPE_SH7763 */
#endif
diff --git a/drivers/net/skfp/smt.c b/drivers/net/skfp/smt.c
index ffbfb1b79f9..805383b33d3 100644
--- a/drivers/net/skfp/smt.c
+++ b/drivers/net/skfp/smt.c
@@ -19,6 +19,7 @@
#include "h/smc.h"
#include "h/smt_p.h"
#include <linux/bitrev.h>
+#include <linux/kernel.h>
#define KERNEL
#include "h/smtstate.h"
@@ -1730,20 +1731,18 @@ void fddi_send_antc(struct s_smc *smc, struct fddi_addr *dest)
#endif
#ifdef DEBUG
-#define hextoasc(x) "0123456789abcdef"[x]
-
char *addr_to_string(struct fddi_addr *addr)
{
int i ;
static char string[6*3] = "****" ;
for (i = 0 ; i < 6 ; i++) {
- string[i*3] = hextoasc((addr->a[i]>>4)&0xf) ;
- string[i*3+1] = hextoasc((addr->a[i])&0xf) ;
- string[i*3+2] = ':' ;
+ string[i * 3] = hex_asc_hi(addr->a[i]);
+ string[i * 3 + 1] = hex_asc_lo(addr->a[i]);
+ string[i * 3 + 2] = ':';
}
- string[5*3+2] = 0 ;
- return(string) ;
+ string[5 * 3 + 2] = 0;
+ return(string);
}
#endif
diff --git a/drivers/net/sky2.c b/drivers/net/sky2.c
index 5257cf464f1..7d29edcd40b 100644
--- a/drivers/net/sky2.c
+++ b/drivers/net/sky2.c
@@ -275,86 +275,6 @@ static void sky2_power_aux(struct sky2_hw *hw)
PC_VAUX_ON | PC_VCC_OFF));
}
-static void sky2_power_state(struct sky2_hw *hw, pci_power_t state)
-{
- u16 power_control = sky2_pci_read16(hw, hw->pm_cap + PCI_PM_CTRL);
- int pex = pci_find_capability(hw->pdev, PCI_CAP_ID_EXP);
- u32 reg;
-
- sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_ON);
-
- switch (state) {
- case PCI_D0:
- break;
-
- case PCI_D1:
- power_control |= 1;
- break;
-
- case PCI_D2:
- power_control |= 2;
- break;
-
- case PCI_D3hot:
- case PCI_D3cold:
- power_control |= 3;
- if (hw->flags & SKY2_HW_ADV_POWER_CTL) {
- /* additional power saving measurements */
- reg = sky2_pci_read32(hw, PCI_DEV_REG4);
-
- /* set gating core clock for LTSSM in L1 state */
- reg |= P_PEX_LTSSM_STAT(P_PEX_LTSSM_L1_STAT) |
- /* auto clock gated scheme controlled by CLKREQ */
- P_ASPM_A1_MODE_SELECT |
- /* enable Gate Root Core Clock */
- P_CLK_GATE_ROOT_COR_ENA;
-
- if (pex && (hw->flags & SKY2_HW_CLK_POWER)) {
- /* enable Clock Power Management (CLKREQ) */
- u16 ctrl = sky2_pci_read16(hw, pex + PCI_EXP_DEVCTL);
-
- ctrl |= PCI_EXP_DEVCTL_AUX_PME;
- sky2_pci_write16(hw, pex + PCI_EXP_DEVCTL, ctrl);
- } else
- /* force CLKREQ Enable in Our4 (A1b only) */
- reg |= P_ASPM_FORCE_CLKREQ_ENA;
-
- /* set Mask Register for Release/Gate Clock */
- sky2_pci_write32(hw, PCI_DEV_REG5,
- P_REL_PCIE_EXIT_L1_ST | P_GAT_PCIE_ENTER_L1_ST |
- P_REL_PCIE_RX_EX_IDLE | P_GAT_PCIE_RX_EL_IDLE |
- P_REL_GPHY_LINK_UP | P_GAT_GPHY_LINK_DOWN);
- } else
- sky2_write8(hw, B28_Y2_ASF_STAT_CMD, Y2_ASF_CLK_HALT);
-
- /* put CPU into reset state */
- sky2_write8(hw, B28_Y2_ASF_STAT_CMD, HCU_CCSR_ASF_RESET);
- if (hw->chip_id == CHIP_ID_YUKON_SUPR && hw->chip_rev == CHIP_REV_YU_SU_A0)
- /* put CPU into halt state */
- sky2_write8(hw, B28_Y2_ASF_STAT_CMD, HCU_CCSR_ASF_HALTED);
-
- if (pex && !(hw->flags & SKY2_HW_RAM_BUFFER)) {
- reg = sky2_pci_read32(hw, PCI_DEV_REG1);
- /* force to PCIe L1 */
- reg |= PCI_FORCE_PEX_L1;
- sky2_pci_write32(hw, PCI_DEV_REG1, reg);
- }
- break;
-
- default:
- dev_warn(&hw->pdev->dev, PFX "Invalid power state (%d) ",
- state);
- return;
- }
-
- power_control |= PCI_PM_CTRL_PME_ENABLE;
- /* Finally, set the new power state. */
- sky2_pci_write32(hw, hw->pm_cap + PCI_PM_CTRL, power_control);
-
- sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_OFF);
- sky2_pci_read32(hw, B0_CTST);
-}
-
static void sky2_gmac_reset(struct sky2_hw *hw, unsigned port)
{
u16 reg;
@@ -709,6 +629,11 @@ static void sky2_phy_power_up(struct sky2_hw *hw, unsigned port)
sky2_pci_write32(hw, PCI_DEV_REG1, reg1);
sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_OFF);
sky2_pci_read32(hw, PCI_DEV_REG1);
+
+ if (hw->chip_id == CHIP_ID_YUKON_FE)
+ gm_phy_write(hw, port, PHY_MARV_CTRL, PHY_CT_ANE);
+ else if (hw->flags & SKY2_HW_ADV_POWER_CTL)
+ sky2_write8(hw, SK_REG(port, GPHY_CTRL), GPC_RST_CLR);
}
static void sky2_phy_power_down(struct sky2_hw *hw, unsigned port)
@@ -2855,10 +2780,6 @@ static int __devinit sky2_init(struct sky2_hw *hw)
hw->flags = SKY2_HW_GIGABIT
| SKY2_HW_NEWER_PHY
| SKY2_HW_ADV_POWER_CTL;
-
- /* check for Rev. A1 dev 4200 */
- if (sky2_read16(hw, Q_ADDR(Q_XA1, Q_WM)) == 0)
- hw->flags |= SKY2_HW_CLK_POWER;
break;
case CHIP_ID_YUKON_EX:
@@ -2914,12 +2835,6 @@ static int __devinit sky2_init(struct sky2_hw *hw)
if (hw->pmd_type == 'L' || hw->pmd_type == 'S' || hw->pmd_type == 'P')
hw->flags |= SKY2_HW_FIBRE_PHY;
- hw->pm_cap = pci_find_capability(hw->pdev, PCI_CAP_ID_PM);
- if (hw->pm_cap == 0) {
- dev_err(&hw->pdev->dev, "cannot find PowerManagement capability\n");
- return -EIO;
- }
-
hw->ports = 1;
t8 = sky2_read8(hw, B2_Y2_HW_RES);
if ((t8 & CFG_DUAL_MAC_MSK) == CFG_DUAL_MAC_MSK) {
@@ -4512,7 +4427,7 @@ static int sky2_suspend(struct pci_dev *pdev, pm_message_t state)
pci_save_state(pdev);
pci_enable_wake(pdev, pci_choose_state(pdev, state), wol);
- sky2_power_state(hw, pci_choose_state(pdev, state));
+ pci_set_power_state(pdev, pci_choose_state(pdev, state));
return 0;
}
@@ -4525,7 +4440,9 @@ static int sky2_resume(struct pci_dev *pdev)
if (!hw)
return 0;
- sky2_power_state(hw, PCI_D0);
+ err = pci_set_power_state(pdev, PCI_D0);
+ if (err)
+ goto out;
err = pci_restore_state(pdev);
if (err)
@@ -4595,7 +4512,7 @@ static void sky2_shutdown(struct pci_dev *pdev)
pci_enable_wake(pdev, PCI_D3cold, wol);
pci_disable_device(pdev);
- sky2_power_state(hw, PCI_D3hot);
+ pci_set_power_state(pdev, PCI_D3hot);
}
static struct pci_driver sky2_driver = {
diff --git a/drivers/net/sky2.h b/drivers/net/sky2.h
index 4d9c4a19bb8..92fb24b27d4 100644
--- a/drivers/net/sky2.h
+++ b/drivers/net/sky2.h
@@ -2072,9 +2072,7 @@ struct sky2_hw {
#define SKY2_HW_NEW_LE 0x00000020 /* new LSOv2 format */
#define SKY2_HW_AUTO_TX_SUM 0x00000040 /* new IP decode for Tx */
#define SKY2_HW_ADV_POWER_CTL 0x00000080 /* additional PHY power regs */
-#define SKY2_HW_CLK_POWER 0x00000100 /* clock power management */
- int pm_cap;
u8 chip_id;
u8 chip_rev;
u8 pmd_type;
diff --git a/drivers/net/smc911x.h b/drivers/net/smc911x.h
index 76c17c28fab..2abfc284519 100644
--- a/drivers/net/smc911x.h
+++ b/drivers/net/smc911x.h
@@ -222,7 +222,7 @@ static inline void SMC_outsl(struct smc911x_local *lp, int reg,
*/
#include <linux/dma-mapping.h>
#include <asm/dma.h>
-#include <asm/arch/pxa-regs.h>
+#include <mach/pxa-regs.h>
static dma_addr_t rx_dmabuf, tx_dmabuf;
static int rx_dmalen, tx_dmalen;
diff --git a/drivers/net/smc91x.h b/drivers/net/smc91x.h
index 22209b6f140..997e7f1d5c6 100644
--- a/drivers/net/smc91x.h
+++ b/drivers/net/smc91x.h
@@ -187,7 +187,7 @@ static inline void SMC_outw(u16 val, void __iomem *ioaddr, int reg)
#elif defined(CONFIG_SA1100_ASSABET)
-#include <asm/arch/neponset.h>
+#include <mach/neponset.h>
/* We can only do 8-bit reads and writes in the static memory space. */
#define SMC_CAN_USE_8BIT 1
@@ -339,7 +339,7 @@ SMC_outw(u16 val, void __iomem *ioaddr, int reg)
* IOBARRIER on entry to their ISR.
*/
-#include <asm/arch/constants.h> /* IOBARRIER_VIRT */
+#include <mach/constants.h> /* IOBARRIER_VIRT */
#define SMC_CAN_USE_8BIT 0
#define SMC_CAN_USE_16BIT 1
@@ -525,7 +525,7 @@ struct smc_local {
*/
#include <linux/dma-mapping.h>
#include <asm/dma.h>
-#include <asm/arch/pxa-regs.h>
+#include <mach/pxa-regs.h>
#ifdef SMC_insl
#undef SMC_insl
diff --git a/drivers/net/stnic.c b/drivers/net/stnic.c
index b65be5d70fe..2ed0bd59681 100644
--- a/drivers/net/stnic.c
+++ b/drivers/net/stnic.c
@@ -19,7 +19,7 @@
#include <asm/system.h>
#include <asm/io.h>
-#include <asm/se.h>
+#include <mach-se/mach/se.h>
#include <asm/machvec.h>
#ifdef CONFIG_SH_STANDARD_BIOS
#include <asm/sh_bios.h>
diff --git a/drivers/net/sun3_82586.c b/drivers/net/sun3_82586.c
index 9b2a7f7bb25..e531302d95f 100644
--- a/drivers/net/sun3_82586.c
+++ b/drivers/net/sun3_82586.c
@@ -425,14 +425,11 @@ static int init586(struct net_device *dev)
int len = ((char *) p->iscp - (char *) ptr - 8) / 6;
if(num_addrs > len) {
printk("%s: switching to promisc. mode\n",dev->name);
- dev->flags|=IFF_PROMISC;
+ cfg_cmd->promisc = 1;
}
}
if(dev->flags&IFF_PROMISC)
- {
- cfg_cmd->promisc=1;
- dev->flags|=IFF_PROMISC;
- }
+ cfg_cmd->promisc = 1;
cfg_cmd->carr_coll = 0x00;
p->scb->cbl_offset = make16(cfg_cmd);
diff --git a/drivers/net/tg3.c b/drivers/net/tg3.c
index 633c128a622..d2439b85a79 100644
--- a/drivers/net/tg3.c
+++ b/drivers/net/tg3.c
@@ -1982,8 +1982,6 @@ static void tg3_power_down_phy(struct tg3 *tp)
static int tg3_set_power_state(struct tg3 *tp, pci_power_t state)
{
u32 misc_host_ctrl;
- u16 power_control, power_caps;
- int pm = tp->pm_cap;
/* Make sure register accesses (indirect or otherwise)
* will function correctly.
@@ -1992,18 +1990,10 @@ static int tg3_set_power_state(struct tg3 *tp, pci_power_t state)
TG3PCI_MISC_HOST_CTRL,
tp->misc_host_ctrl);
- pci_read_config_word(tp->pdev,
- pm + PCI_PM_CTRL,
- &power_control);
- power_control |= PCI_PM_CTRL_PME_STATUS;
- power_control &= ~(PCI_PM_CTRL_STATE_MASK);
switch (state) {
case PCI_D0:
- power_control |= 0;
- pci_write_config_word(tp->pdev,
- pm + PCI_PM_CTRL,
- power_control);
- udelay(100); /* Delay after power state change */
+ pci_enable_wake(tp->pdev, state, false);
+ pci_set_power_state(tp->pdev, PCI_D0);
/* Switch out of Vaux if it is a NIC */
if (tp->tg3_flags2 & TG3_FLG2_IS_NIC)
@@ -2012,26 +2002,15 @@ static int tg3_set_power_state(struct tg3 *tp, pci_power_t state)
return 0;
case PCI_D1:
- power_control |= 1;
- break;
-
case PCI_D2:
- power_control |= 2;
- break;
-
case PCI_D3hot:
- power_control |= 3;
break;
default:
- printk(KERN_WARNING PFX "%s: Invalid power state (%d) "
- "requested.\n",
- tp->dev->name, state);
+ printk(KERN_ERR PFX "%s: Invalid power state (D%d) requested\n",
+ tp->dev->name, state);
return -EINVAL;
}
-
- power_control |= PCI_PM_CTRL_PME_ENABLE;
-
misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
tw32(TG3PCI_MISC_HOST_CTRL,
misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT);
@@ -2109,8 +2088,6 @@ static int tg3_set_power_state(struct tg3 *tp, pci_power_t state)
WOL_DRV_WOL |
WOL_SET_MAGIC_PKT);
- pci_read_config_word(tp->pdev, pm + PCI_PM_PMC, &power_caps);
-
if (tp->tg3_flags & TG3_FLAG_WOL_ENABLE) {
u32 mac_mode;
@@ -2143,8 +2120,8 @@ static int tg3_set_power_state(struct tg3 *tp, pci_power_t state)
if (!(tp->tg3_flags2 & TG3_FLG2_5750_PLUS))
tw32(MAC_LED_CTRL, tp->led_ctrl);
- if (((power_caps & PCI_PM_CAP_PME_D3cold) &&
- (tp->tg3_flags & TG3_FLAG_WOL_ENABLE)))
+ if (pci_pme_capable(tp->pdev, state) &&
+ (tp->tg3_flags & TG3_FLAG_WOL_ENABLE))
mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE;
tw32_f(MAC_MODE, mac_mode);
@@ -2236,9 +2213,11 @@ static int tg3_set_power_state(struct tg3 *tp, pci_power_t state)
tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
+ if (tp->tg3_flags & TG3_FLAG_WOL_ENABLE)
+ pci_enable_wake(tp->pdev, state, true);
+
/* Finally, set the new power state. */
- pci_write_config_word(tp->pdev, pm + PCI_PM_CTRL, power_control);
- udelay(100); /* Delay after power state change */
+ pci_set_power_state(tp->pdev, state);
return 0;
}
@@ -7708,21 +7687,11 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
*/
static int tg3_init_hw(struct tg3 *tp, int reset_phy)
{
- int err;
-
- /* Force the chip into D0. */
- err = tg3_set_power_state(tp, PCI_D0);
- if (err)
- goto out;
-
tg3_switch_clocks(tp);
tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
- err = tg3_reset_hw(tp, reset_phy);
-
-out:
- return err;
+ return tg3_reset_hw(tp, reset_phy);
}
#define TG3_STAT_ADD32(PSTAT, REG) \
@@ -8037,13 +8006,11 @@ static int tg3_open(struct net_device *dev)
netif_carrier_off(tp->dev);
- tg3_full_lock(tp, 0);
-
err = tg3_set_power_state(tp, PCI_D0);
- if (err) {
- tg3_full_unlock(tp);
+ if (err)
return err;
- }
+
+ tg3_full_lock(tp, 0);
tg3_disable_ints(tp);
tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE;
@@ -9065,7 +9032,8 @@ static void tg3_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
{
struct tg3 *tp = netdev_priv(dev);
- if (tp->tg3_flags & TG3_FLAG_WOL_CAP)
+ if ((tp->tg3_flags & TG3_FLAG_WOL_CAP) &&
+ device_can_wakeup(&tp->pdev->dev))
wol->supported = WAKE_MAGIC;
else
wol->supported = 0;
@@ -9078,18 +9046,22 @@ static void tg3_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
static int tg3_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
{
struct tg3 *tp = netdev_priv(dev);
+ struct device *dp = &tp->pdev->dev;
if (wol->wolopts & ~WAKE_MAGIC)
return -EINVAL;
if ((wol->wolopts & WAKE_MAGIC) &&
- !(tp->tg3_flags & TG3_FLAG_WOL_CAP))
+ !((tp->tg3_flags & TG3_FLAG_WOL_CAP) && device_can_wakeup(dp)))
return -EINVAL;
spin_lock_bh(&tp->lock);
- if (wol->wolopts & WAKE_MAGIC)
+ if (wol->wolopts & WAKE_MAGIC) {
tp->tg3_flags |= TG3_FLAG_WOL_ENABLE;
- else
+ device_set_wakeup_enable(dp, true);
+ } else {
tp->tg3_flags &= ~TG3_FLAG_WOL_ENABLE;
+ device_set_wakeup_enable(dp, false);
+ }
spin_unlock_bh(&tp->lock);
return 0;
@@ -11296,7 +11268,8 @@ static void __devinit tg3_get_eeprom_hw_cfg(struct tg3 *tp)
if (val & VCPU_CFGSHDW_ASPM_DBNC)
tp->tg3_flags |= TG3_FLAG_ASPM_WORKAROUND;
if ((val & VCPU_CFGSHDW_WOL_ENABLE) &&
- (val & VCPU_CFGSHDW_WOL_MAGPKT))
+ (val & VCPU_CFGSHDW_WOL_MAGPKT) &&
+ device_may_wakeup(&tp->pdev->dev))
tp->tg3_flags |= TG3_FLAG_WOL_ENABLE;
return;
}
@@ -11426,8 +11399,9 @@ static void __devinit tg3_get_eeprom_hw_cfg(struct tg3 *tp)
!(nic_cfg & NIC_SRAM_DATA_CFG_FIBER_WOL))
tp->tg3_flags &= ~TG3_FLAG_WOL_CAP;
- if (tp->tg3_flags & TG3_FLAG_WOL_CAP &&
- nic_cfg & NIC_SRAM_DATA_CFG_WOL_ENABLE)
+ if ((tp->tg3_flags & TG3_FLAG_WOL_CAP) &&
+ (nic_cfg & NIC_SRAM_DATA_CFG_WOL_ENABLE) &&
+ device_may_wakeup(&tp->pdev->dev))
tp->tg3_flags |= TG3_FLAG_WOL_ENABLE;
if (cfg2 & (1 << 17))
@@ -13613,6 +13587,7 @@ static int tg3_suspend(struct pci_dev *pdev, pm_message_t state)
{
struct net_device *dev = pci_get_drvdata(pdev);
struct tg3 *tp = netdev_priv(dev);
+ pci_power_t target_state;
int err;
/* PCI register 4 needs to be saved whether netif_running() or not.
@@ -13641,7 +13616,9 @@ static int tg3_suspend(struct pci_dev *pdev, pm_message_t state)
tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE;
tg3_full_unlock(tp);
- err = tg3_set_power_state(tp, pci_choose_state(pdev, state));
+ target_state = pdev->pm_cap ? pci_target_state(pdev) : PCI_D3hot;
+
+ err = tg3_set_power_state(tp, target_state);
if (err) {
int err2;
diff --git a/drivers/net/tokenring/3c359.c b/drivers/net/tokenring/3c359.c
index 7766cde0d63..bf621328b60 100644
--- a/drivers/net/tokenring/3c359.c
+++ b/drivers/net/tokenring/3c359.c
@@ -95,20 +95,20 @@ MODULE_DESCRIPTION("3Com 3C359 Velocity XL Token Ring Adapter Driver \n") ;
static int ringspeed[XL_MAX_ADAPTERS] = {0,} ;
module_param_array(ringspeed, int, NULL, 0);
-MODULE_PARM_DESC(ringspeed,"3c359: Ringspeed selection - 4,16 or 0") ;
+MODULE_PARM_DESC(ringspeed,"3c359: Ringspeed selection - 4,16 or 0") ;
/* Packet buffer size */
static int pkt_buf_sz[XL_MAX_ADAPTERS] = {0,} ;
module_param_array(pkt_buf_sz, int, NULL, 0) ;
-MODULE_PARM_DESC(pkt_buf_sz,"3c359: Initial buffer size") ;
+MODULE_PARM_DESC(pkt_buf_sz,"3c359: Initial buffer size") ;
/* Message Level */
-static int message_level[XL_MAX_ADAPTERS] = {0,} ;
+static int message_level[XL_MAX_ADAPTERS] = {0,} ;
module_param_array(message_level, int, NULL, 0) ;
-MODULE_PARM_DESC(message_level, "3c359: Level of reported messages \n") ;
+MODULE_PARM_DESC(message_level, "3c359: Level of reported messages") ;
/*
* This is a real nasty way of doing this, but otherwise you
* will be stuck with 1555 lines of hex #'s in the code.
diff --git a/drivers/net/usb/dm9601.c b/drivers/net/usb/dm9601.c
index f7319d32691..78df2be8a72 100644
--- a/drivers/net/usb/dm9601.c
+++ b/drivers/net/usb/dm9601.c
@@ -55,12 +55,28 @@
static int dm_read(struct usbnet *dev, u8 reg, u16 length, void *data)
{
+ void *buf;
+ int err = -ENOMEM;
+
devdbg(dev, "dm_read() reg=0x%02x length=%d", reg, length);
- return usb_control_msg(dev->udev,
- usb_rcvctrlpipe(dev->udev, 0),
- DM_READ_REGS,
- USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
- 0, reg, data, length, USB_CTRL_SET_TIMEOUT);
+
+ buf = kmalloc(length, GFP_KERNEL);
+ if (!buf)
+ goto out;
+
+ err = usb_control_msg(dev->udev,
+ usb_rcvctrlpipe(dev->udev, 0),
+ DM_READ_REGS,
+ USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
+ 0, reg, buf, length, USB_CTRL_SET_TIMEOUT);
+ if (err == length)
+ memcpy(data, buf, length);
+ else if (err >= 0)
+ err = -EINVAL;
+ kfree(buf);
+
+ out:
+ return err;
}
static int dm_read_reg(struct usbnet *dev, u8 reg, u8 *value)
@@ -70,12 +86,28 @@ static int dm_read_reg(struct usbnet *dev, u8 reg, u8 *value)
static int dm_write(struct usbnet *dev, u8 reg, u16 length, void *data)
{
+ void *buf = NULL;
+ int err = -ENOMEM;
+
devdbg(dev, "dm_write() reg=0x%02x, length=%d", reg, length);
- return usb_control_msg(dev->udev,
- usb_sndctrlpipe(dev->udev, 0),
- DM_WRITE_REGS,
- USB_DIR_OUT | USB_TYPE_VENDOR |USB_RECIP_DEVICE,
- 0, reg, data, length, USB_CTRL_SET_TIMEOUT);
+
+ if (data) {
+ buf = kmalloc(length, GFP_KERNEL);
+ if (!buf)
+ goto out;
+ memcpy(buf, data, length);
+ }
+
+ err = usb_control_msg(dev->udev,
+ usb_sndctrlpipe(dev->udev, 0),
+ DM_WRITE_REGS,
+ USB_DIR_OUT | USB_TYPE_VENDOR |USB_RECIP_DEVICE,
+ 0, reg, buf, length, USB_CTRL_SET_TIMEOUT);
+ kfree(buf);
+ if (err >= 0 && err < length)
+ err = -EINVAL;
+ out:
+ return err;
}
static int dm_write_reg(struct usbnet *dev, u8 reg, u8 value)
diff --git a/drivers/net/usb/pegasus.c b/drivers/net/usb/pegasus.c
index b588c890ea7..a84ba487c71 100644
--- a/drivers/net/usb/pegasus.c
+++ b/drivers/net/usb/pegasus.c
@@ -1285,6 +1285,21 @@ static void check_carrier(struct work_struct *work)
}
}
+static int pegasus_blacklisted(struct usb_device *udev)
+{
+ struct usb_device_descriptor *udd = &udev->descriptor;
+
+ /* Special quirk to keep the driver from handling the Belkin Bluetooth
+ * dongle which happens to have the same ID.
+ */
+ if ((udd->idVendor == VENDOR_BELKIN && udd->idProduct == 0x0121) &&
+ (udd->bDeviceClass == USB_CLASS_WIRELESS_CONTROLLER) &&
+ (udd->bDeviceProtocol == 1))
+ return 1;
+
+ return 0;
+}
+
static int pegasus_probe(struct usb_interface *intf,
const struct usb_device_id *id)
{
@@ -1296,6 +1311,12 @@ static int pegasus_probe(struct usb_interface *intf,
DECLARE_MAC_BUF(mac);
usb_get_dev(dev);
+
+ if (pegasus_blacklisted(dev)) {
+ res = -ENODEV;
+ goto out;
+ }
+
net = alloc_etherdev(sizeof(struct pegasus));
if (!net) {
dev_err(&intf->dev, "can't allocate %s\n", "device");
diff --git a/drivers/net/via-velocity.c b/drivers/net/via-velocity.c
index 370ce30f2f4..007c1297006 100644
--- a/drivers/net/via-velocity.c
+++ b/drivers/net/via-velocity.c
@@ -662,6 +662,10 @@ static void velocity_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid
spin_unlock_irq(&vptr->lock);
}
+static void velocity_init_rx_ring_indexes(struct velocity_info *vptr)
+{
+ vptr->rx.dirty = vptr->rx.filled = vptr->rx.curr = 0;
+}
/**
* velocity_rx_reset - handle a receive reset
@@ -677,16 +681,16 @@ static void velocity_rx_reset(struct velocity_info *vptr)
struct mac_regs __iomem * regs = vptr->mac_regs;
int i;
- vptr->rd_dirty = vptr->rd_filled = vptr->rd_curr = 0;
+ velocity_init_rx_ring_indexes(vptr);
/*
* Init state, all RD entries belong to the NIC
*/
for (i = 0; i < vptr->options.numrx; ++i)
- vptr->rd_ring[i].rdesc0.len |= OWNED_BY_NIC;
+ vptr->rx.ring[i].rdesc0.len |= OWNED_BY_NIC;
writew(vptr->options.numrx, &regs->RBRDU);
- writel(vptr->rd_pool_dma, &regs->RDBaseLo);
+ writel(vptr->rx.pool_dma, &regs->RDBaseLo);
writew(0, &regs->RDIdx);
writew(vptr->options.numrx - 1, &regs->RDCSize);
}
@@ -779,15 +783,15 @@ static void velocity_init_registers(struct velocity_info *vptr,
vptr->int_mask = INT_MASK_DEF;
- writel(vptr->rd_pool_dma, &regs->RDBaseLo);
+ writel(vptr->rx.pool_dma, &regs->RDBaseLo);
writew(vptr->options.numrx - 1, &regs->RDCSize);
mac_rx_queue_run(regs);
mac_rx_queue_wake(regs);
writew(vptr->options.numtx - 1, &regs->TDCSize);
- for (i = 0; i < vptr->num_txq; i++) {
- writel(vptr->td_pool_dma[i], &regs->TDBaseLo[i]);
+ for (i = 0; i < vptr->tx.numq; i++) {
+ writel(vptr->tx.pool_dma[i], &regs->TDBaseLo[i]);
mac_tx_queue_run(regs, i);
}
@@ -1047,7 +1051,7 @@ static void __devinit velocity_init_info(struct pci_dev *pdev,
vptr->pdev = pdev;
vptr->chip_id = info->chip_id;
- vptr->num_txq = info->txqueue;
+ vptr->tx.numq = info->txqueue;
vptr->multicast_limit = MCAM_SIZE;
spin_lock_init(&vptr->lock);
INIT_LIST_HEAD(&vptr->list);
@@ -1093,14 +1097,14 @@ static int __devinit velocity_get_pci_info(struct velocity_info *vptr, struct pc
}
/**
- * velocity_init_rings - set up DMA rings
+ * velocity_init_dma_rings - set up DMA rings
* @vptr: Velocity to set up
*
* Allocate PCI mapped DMA rings for the receive and transmit layer
* to use.
*/
-static int velocity_init_rings(struct velocity_info *vptr)
+static int velocity_init_dma_rings(struct velocity_info *vptr)
{
struct velocity_opt *opt = &vptr->options;
const unsigned int rx_ring_size = opt->numrx * sizeof(struct rx_desc);
@@ -1116,7 +1120,7 @@ static int velocity_init_rings(struct velocity_info *vptr)
* pci_alloc_consistent() fulfills the requirement for 64 bytes
* alignment
*/
- pool = pci_alloc_consistent(pdev, tx_ring_size * vptr->num_txq +
+ pool = pci_alloc_consistent(pdev, tx_ring_size * vptr->tx.numq +
rx_ring_size, &pool_dma);
if (!pool) {
dev_err(&pdev->dev, "%s : DMA memory allocation failed.\n",
@@ -1124,15 +1128,15 @@ static int velocity_init_rings(struct velocity_info *vptr)
return -ENOMEM;
}
- vptr->rd_ring = pool;
- vptr->rd_pool_dma = pool_dma;
+ vptr->rx.ring = pool;
+ vptr->rx.pool_dma = pool_dma;
pool += rx_ring_size;
pool_dma += rx_ring_size;
- for (i = 0; i < vptr->num_txq; i++) {
- vptr->td_rings[i] = pool;
- vptr->td_pool_dma[i] = pool_dma;
+ for (i = 0; i < vptr->tx.numq; i++) {
+ vptr->tx.rings[i] = pool;
+ vptr->tx.pool_dma[i] = pool_dma;
pool += tx_ring_size;
pool_dma += tx_ring_size;
}
@@ -1141,18 +1145,18 @@ static int velocity_init_rings(struct velocity_info *vptr)
}
/**
- * velocity_free_rings - free PCI ring pointers
+ * velocity_free_dma_rings - free PCI ring pointers
* @vptr: Velocity to free from
*
* Clean up the PCI ring buffers allocated to this velocity.
*/
-static void velocity_free_rings(struct velocity_info *vptr)
+static void velocity_free_dma_rings(struct velocity_info *vptr)
{
const int size = vptr->options.numrx * sizeof(struct rx_desc) +
- vptr->options.numtx * sizeof(struct tx_desc) * vptr->num_txq;
+ vptr->options.numtx * sizeof(struct tx_desc) * vptr->tx.numq;
- pci_free_consistent(vptr->pdev, size, vptr->rd_ring, vptr->rd_pool_dma);
+ pci_free_consistent(vptr->pdev, size, vptr->rx.ring, vptr->rx.pool_dma);
}
static void velocity_give_many_rx_descs(struct velocity_info *vptr)
@@ -1164,44 +1168,44 @@ static void velocity_give_many_rx_descs(struct velocity_info *vptr)
* RD number must be equal to 4X per hardware spec
* (programming guide rev 1.20, p.13)
*/
- if (vptr->rd_filled < 4)
+ if (vptr->rx.filled < 4)
return;
wmb();
- unusable = vptr->rd_filled & 0x0003;
- dirty = vptr->rd_dirty - unusable;
- for (avail = vptr->rd_filled & 0xfffc; avail; avail--) {
+ unusable = vptr->rx.filled & 0x0003;
+ dirty = vptr->rx.dirty - unusable;
+ for (avail = vptr->rx.filled & 0xfffc; avail; avail--) {
dirty = (dirty > 0) ? dirty - 1 : vptr->options.numrx - 1;
- vptr->rd_ring[dirty].rdesc0.len |= OWNED_BY_NIC;
+ vptr->rx.ring[dirty].rdesc0.len |= OWNED_BY_NIC;
}
- writew(vptr->rd_filled & 0xfffc, &regs->RBRDU);
- vptr->rd_filled = unusable;
+ writew(vptr->rx.filled & 0xfffc, &regs->RBRDU);
+ vptr->rx.filled = unusable;
}
static int velocity_rx_refill(struct velocity_info *vptr)
{
- int dirty = vptr->rd_dirty, done = 0;
+ int dirty = vptr->rx.dirty, done = 0;
do {
- struct rx_desc *rd = vptr->rd_ring + dirty;
+ struct rx_desc *rd = vptr->rx.ring + dirty;
/* Fine for an all zero Rx desc at init time as well */
if (rd->rdesc0.len & OWNED_BY_NIC)
break;
- if (!vptr->rd_info[dirty].skb) {
+ if (!vptr->rx.info[dirty].skb) {
if (velocity_alloc_rx_buf(vptr, dirty) < 0)
break;
}
done++;
dirty = (dirty < vptr->options.numrx - 1) ? dirty + 1 : 0;
- } while (dirty != vptr->rd_curr);
+ } while (dirty != vptr->rx.curr);
if (done) {
- vptr->rd_dirty = dirty;
- vptr->rd_filled += done;
+ vptr->rx.dirty = dirty;
+ vptr->rx.filled += done;
}
return done;
@@ -1209,7 +1213,7 @@ static int velocity_rx_refill(struct velocity_info *vptr)
static void velocity_set_rxbufsize(struct velocity_info *vptr, int mtu)
{
- vptr->rx_buf_sz = (mtu <= ETH_DATA_LEN) ? PKT_BUF_SZ : mtu + 32;
+ vptr->rx.buf_sz = (mtu <= ETH_DATA_LEN) ? PKT_BUF_SZ : mtu + 32;
}
/**
@@ -1224,12 +1228,12 @@ static int velocity_init_rd_ring(struct velocity_info *vptr)
{
int ret = -ENOMEM;
- vptr->rd_info = kcalloc(vptr->options.numrx,
+ vptr->rx.info = kcalloc(vptr->options.numrx,
sizeof(struct velocity_rd_info), GFP_KERNEL);
- if (!vptr->rd_info)
+ if (!vptr->rx.info)
goto out;
- vptr->rd_filled = vptr->rd_dirty = vptr->rd_curr = 0;
+ velocity_init_rx_ring_indexes(vptr);
if (velocity_rx_refill(vptr) != vptr->options.numrx) {
VELOCITY_PRT(MSG_LEVEL_ERR, KERN_ERR
@@ -1255,18 +1259,18 @@ static void velocity_free_rd_ring(struct velocity_info *vptr)
{
int i;
- if (vptr->rd_info == NULL)
+ if (vptr->rx.info == NULL)
return;
for (i = 0; i < vptr->options.numrx; i++) {
- struct velocity_rd_info *rd_info = &(vptr->rd_info[i]);
- struct rx_desc *rd = vptr->rd_ring + i;
+ struct velocity_rd_info *rd_info = &(vptr->rx.info[i]);
+ struct rx_desc *rd = vptr->rx.ring + i;
memset(rd, 0, sizeof(*rd));
if (!rd_info->skb)
continue;
- pci_unmap_single(vptr->pdev, rd_info->skb_dma, vptr->rx_buf_sz,
+ pci_unmap_single(vptr->pdev, rd_info->skb_dma, vptr->rx.buf_sz,
PCI_DMA_FROMDEVICE);
rd_info->skb_dma = (dma_addr_t) NULL;
@@ -1274,8 +1278,8 @@ static void velocity_free_rd_ring(struct velocity_info *vptr)
rd_info->skb = NULL;
}
- kfree(vptr->rd_info);
- vptr->rd_info = NULL;
+ kfree(vptr->rx.info);
+ vptr->rx.info = NULL;
}
/**
@@ -1293,19 +1297,19 @@ static int velocity_init_td_ring(struct velocity_info *vptr)
unsigned int j;
/* Init the TD ring entries */
- for (j = 0; j < vptr->num_txq; j++) {
- curr = vptr->td_pool_dma[j];
+ for (j = 0; j < vptr->tx.numq; j++) {
+ curr = vptr->tx.pool_dma[j];
- vptr->td_infos[j] = kcalloc(vptr->options.numtx,
+ vptr->tx.infos[j] = kcalloc(vptr->options.numtx,
sizeof(struct velocity_td_info),
GFP_KERNEL);
- if (!vptr->td_infos[j]) {
+ if (!vptr->tx.infos[j]) {
while(--j >= 0)
- kfree(vptr->td_infos[j]);
+ kfree(vptr->tx.infos[j]);
return -ENOMEM;
}
- vptr->td_tail[j] = vptr->td_curr[j] = vptr->td_used[j] = 0;
+ vptr->tx.tail[j] = vptr->tx.curr[j] = vptr->tx.used[j] = 0;
}
return 0;
}
@@ -1317,7 +1321,7 @@ static int velocity_init_td_ring(struct velocity_info *vptr)
static void velocity_free_td_ring_entry(struct velocity_info *vptr,
int q, int n)
{
- struct velocity_td_info * td_info = &(vptr->td_infos[q][n]);
+ struct velocity_td_info * td_info = &(vptr->tx.infos[q][n]);
int i;
if (td_info == NULL)
@@ -1349,15 +1353,15 @@ static void velocity_free_td_ring(struct velocity_info *vptr)
{
int i, j;
- for (j = 0; j < vptr->num_txq; j++) {
- if (vptr->td_infos[j] == NULL)
+ for (j = 0; j < vptr->tx.numq; j++) {
+ if (vptr->tx.infos[j] == NULL)
continue;
for (i = 0; i < vptr->options.numtx; i++) {
velocity_free_td_ring_entry(vptr, j, i);
}
- kfree(vptr->td_infos[j]);
- vptr->td_infos[j] = NULL;
+ kfree(vptr->tx.infos[j]);
+ vptr->tx.infos[j] = NULL;
}
}
@@ -1374,13 +1378,13 @@ static void velocity_free_td_ring(struct velocity_info *vptr)
static int velocity_rx_srv(struct velocity_info *vptr, int status)
{
struct net_device_stats *stats = &vptr->stats;
- int rd_curr = vptr->rd_curr;
+ int rd_curr = vptr->rx.curr;
int works = 0;
do {
- struct rx_desc *rd = vptr->rd_ring + rd_curr;
+ struct rx_desc *rd = vptr->rx.ring + rd_curr;
- if (!vptr->rd_info[rd_curr].skb)
+ if (!vptr->rx.info[rd_curr].skb)
break;
if (rd->rdesc0.len & OWNED_BY_NIC)
@@ -1412,7 +1416,7 @@ static int velocity_rx_srv(struct velocity_info *vptr, int status)
rd_curr = 0;
} while (++works <= 15);
- vptr->rd_curr = rd_curr;
+ vptr->rx.curr = rd_curr;
if ((works > 0) && (velocity_rx_refill(vptr) > 0))
velocity_give_many_rx_descs(vptr);
@@ -1510,8 +1514,8 @@ static int velocity_receive_frame(struct velocity_info *vptr, int idx)
{
void (*pci_action)(struct pci_dev *, dma_addr_t, size_t, int);
struct net_device_stats *stats = &vptr->stats;
- struct velocity_rd_info *rd_info = &(vptr->rd_info[idx]);
- struct rx_desc *rd = &(vptr->rd_ring[idx]);
+ struct velocity_rd_info *rd_info = &(vptr->rx.info[idx]);
+ struct rx_desc *rd = &(vptr->rx.ring[idx]);
int pkt_len = le16_to_cpu(rd->rdesc0.len) & 0x3fff;
struct sk_buff *skb;
@@ -1527,7 +1531,7 @@ static int velocity_receive_frame(struct velocity_info *vptr, int idx)
skb = rd_info->skb;
pci_dma_sync_single_for_cpu(vptr->pdev, rd_info->skb_dma,
- vptr->rx_buf_sz, PCI_DMA_FROMDEVICE);
+ vptr->rx.buf_sz, PCI_DMA_FROMDEVICE);
/*
* Drop frame not meeting IEEE 802.3
@@ -1550,7 +1554,7 @@ static int velocity_receive_frame(struct velocity_info *vptr, int idx)
rd_info->skb = NULL;
}
- pci_action(vptr->pdev, rd_info->skb_dma, vptr->rx_buf_sz,
+ pci_action(vptr->pdev, rd_info->skb_dma, vptr->rx.buf_sz,
PCI_DMA_FROMDEVICE);
skb_put(skb, pkt_len - 4);
@@ -1580,10 +1584,10 @@ static int velocity_receive_frame(struct velocity_info *vptr, int idx)
static int velocity_alloc_rx_buf(struct velocity_info *vptr, int idx)
{
- struct rx_desc *rd = &(vptr->rd_ring[idx]);
- struct velocity_rd_info *rd_info = &(vptr->rd_info[idx]);
+ struct rx_desc *rd = &(vptr->rx.ring[idx]);
+ struct velocity_rd_info *rd_info = &(vptr->rx.info[idx]);
- rd_info->skb = netdev_alloc_skb(vptr->dev, vptr->rx_buf_sz + 64);
+ rd_info->skb = dev_alloc_skb(vptr->rx.buf_sz + 64);
if (rd_info->skb == NULL)
return -ENOMEM;
@@ -1592,14 +1596,15 @@ static int velocity_alloc_rx_buf(struct velocity_info *vptr, int idx)
* 64byte alignment.
*/
skb_reserve(rd_info->skb, (unsigned long) rd_info->skb->data & 63);
- rd_info->skb_dma = pci_map_single(vptr->pdev, rd_info->skb->data, vptr->rx_buf_sz, PCI_DMA_FROMDEVICE);
+ rd_info->skb_dma = pci_map_single(vptr->pdev, rd_info->skb->data,
+ vptr->rx.buf_sz, PCI_DMA_FROMDEVICE);
/*
* Fill in the descriptor to match
- */
+ */
*((u32 *) & (rd->rdesc0)) = 0;
- rd->size = cpu_to_le16(vptr->rx_buf_sz) | RX_INTEN;
+ rd->size = cpu_to_le16(vptr->rx.buf_sz) | RX_INTEN;
rd->pa_low = cpu_to_le32(rd_info->skb_dma);
rd->pa_high = 0;
return 0;
@@ -1625,15 +1630,15 @@ static int velocity_tx_srv(struct velocity_info *vptr, u32 status)
struct velocity_td_info *tdinfo;
struct net_device_stats *stats = &vptr->stats;
- for (qnum = 0; qnum < vptr->num_txq; qnum++) {
- for (idx = vptr->td_tail[qnum]; vptr->td_used[qnum] > 0;
+ for (qnum = 0; qnum < vptr->tx.numq; qnum++) {
+ for (idx = vptr->tx.tail[qnum]; vptr->tx.used[qnum] > 0;
idx = (idx + 1) % vptr->options.numtx) {
/*
* Get Tx Descriptor
*/
- td = &(vptr->td_rings[qnum][idx]);
- tdinfo = &(vptr->td_infos[qnum][idx]);
+ td = &(vptr->tx.rings[qnum][idx]);
+ tdinfo = &(vptr->tx.infos[qnum][idx]);
if (td->tdesc0.len & OWNED_BY_NIC)
break;
@@ -1657,9 +1662,9 @@ static int velocity_tx_srv(struct velocity_info *vptr, u32 status)
stats->tx_bytes += tdinfo->skb->len;
}
velocity_free_tx_buf(vptr, tdinfo);
- vptr->td_used[qnum]--;
+ vptr->tx.used[qnum]--;
}
- vptr->td_tail[qnum] = idx;
+ vptr->tx.tail[qnum] = idx;
if (AVAIL_TD(vptr, qnum) < 1) {
full = 1;
@@ -1846,6 +1851,40 @@ static void velocity_free_tx_buf(struct velocity_info *vptr, struct velocity_td_
tdinfo->skb = NULL;
}
+static int velocity_init_rings(struct velocity_info *vptr, int mtu)
+{
+ int ret;
+
+ velocity_set_rxbufsize(vptr, mtu);
+
+ ret = velocity_init_dma_rings(vptr);
+ if (ret < 0)
+ goto out;
+
+ ret = velocity_init_rd_ring(vptr);
+ if (ret < 0)
+ goto err_free_dma_rings_0;
+
+ ret = velocity_init_td_ring(vptr);
+ if (ret < 0)
+ goto err_free_rd_ring_1;
+out:
+ return ret;
+
+err_free_rd_ring_1:
+ velocity_free_rd_ring(vptr);
+err_free_dma_rings_0:
+ velocity_free_dma_rings(vptr);
+ goto out;
+}
+
+static void velocity_free_rings(struct velocity_info *vptr)
+{
+ velocity_free_td_ring(vptr);
+ velocity_free_rd_ring(vptr);
+ velocity_free_dma_rings(vptr);
+}
+
/**
* velocity_open - interface activation callback
* @dev: network layer device to open
@@ -1862,20 +1901,10 @@ static int velocity_open(struct net_device *dev)
struct velocity_info *vptr = netdev_priv(dev);
int ret;
- velocity_set_rxbufsize(vptr, dev->mtu);
-
- ret = velocity_init_rings(vptr);
+ ret = velocity_init_rings(vptr, dev->mtu);
if (ret < 0)
goto out;
- ret = velocity_init_rd_ring(vptr);
- if (ret < 0)
- goto err_free_desc_rings;
-
- ret = velocity_init_td_ring(vptr);
- if (ret < 0)
- goto err_free_rd_ring;
-
/* Ensure chip is running */
pci_set_power_state(vptr->pdev, PCI_D0);
@@ -1888,7 +1917,8 @@ static int velocity_open(struct net_device *dev)
if (ret < 0) {
/* Power down the chip */
pci_set_power_state(vptr->pdev, PCI_D3hot);
- goto err_free_td_ring;
+ velocity_free_rings(vptr);
+ goto out;
}
mac_enable_int(vptr->mac_regs);
@@ -1896,14 +1926,6 @@ static int velocity_open(struct net_device *dev)
vptr->flags |= VELOCITY_FLAGS_OPENED;
out:
return ret;
-
-err_free_td_ring:
- velocity_free_td_ring(vptr);
-err_free_rd_ring:
- velocity_free_rd_ring(vptr);
-err_free_desc_rings:
- velocity_free_rings(vptr);
- goto out;
}
/**
@@ -1919,50 +1941,72 @@ err_free_desc_rings:
static int velocity_change_mtu(struct net_device *dev, int new_mtu)
{
struct velocity_info *vptr = netdev_priv(dev);
- unsigned long flags;
- int oldmtu = dev->mtu;
int ret = 0;
if ((new_mtu < VELOCITY_MIN_MTU) || new_mtu > (VELOCITY_MAX_MTU)) {
VELOCITY_PRT(MSG_LEVEL_ERR, KERN_NOTICE "%s: Invalid MTU.\n",
vptr->dev->name);
- return -EINVAL;
+ ret = -EINVAL;
+ goto out_0;
}
if (!netif_running(dev)) {
dev->mtu = new_mtu;
- return 0;
+ goto out_0;
}
- if (new_mtu != oldmtu) {
+ if (dev->mtu != new_mtu) {
+ struct velocity_info *tmp_vptr;
+ unsigned long flags;
+ struct rx_info rx;
+ struct tx_info tx;
+
+ tmp_vptr = kzalloc(sizeof(*tmp_vptr), GFP_KERNEL);
+ if (!tmp_vptr) {
+ ret = -ENOMEM;
+ goto out_0;
+ }
+
+ tmp_vptr->dev = dev;
+ tmp_vptr->pdev = vptr->pdev;
+ tmp_vptr->options = vptr->options;
+ tmp_vptr->tx.numq = vptr->tx.numq;
+
+ ret = velocity_init_rings(tmp_vptr, new_mtu);
+ if (ret < 0)
+ goto out_free_tmp_vptr_1;
+
spin_lock_irqsave(&vptr->lock, flags);
netif_stop_queue(dev);
velocity_shutdown(vptr);
- velocity_free_td_ring(vptr);
- velocity_free_rd_ring(vptr);
+ rx = vptr->rx;
+ tx = vptr->tx;
- dev->mtu = new_mtu;
+ vptr->rx = tmp_vptr->rx;
+ vptr->tx = tmp_vptr->tx;
- velocity_set_rxbufsize(vptr, new_mtu);
+ tmp_vptr->rx = rx;
+ tmp_vptr->tx = tx;
- ret = velocity_init_rd_ring(vptr);
- if (ret < 0)
- goto out_unlock;
+ dev->mtu = new_mtu;
- ret = velocity_init_td_ring(vptr);
- if (ret < 0)
- goto out_unlock;
+ velocity_give_many_rx_descs(vptr);
velocity_init_registers(vptr, VELOCITY_INIT_COLD);
mac_enable_int(vptr->mac_regs);
netif_start_queue(dev);
-out_unlock:
+
spin_unlock_irqrestore(&vptr->lock, flags);
- }
+ velocity_free_rings(tmp_vptr);
+
+out_free_tmp_vptr_1:
+ kfree(tmp_vptr);
+ }
+out_0:
return ret;
}
@@ -2008,9 +2052,6 @@ static int velocity_close(struct net_device *dev)
/* Power down the chip */
pci_set_power_state(vptr->pdev, PCI_D3hot);
- /* Free the resources */
- velocity_free_td_ring(vptr);
- velocity_free_rd_ring(vptr);
velocity_free_rings(vptr);
vptr->flags &= (~VELOCITY_FLAGS_OPENED);
@@ -2056,9 +2097,9 @@ static int velocity_xmit(struct sk_buff *skb, struct net_device *dev)
spin_lock_irqsave(&vptr->lock, flags);
- index = vptr->td_curr[qnum];
- td_ptr = &(vptr->td_rings[qnum][index]);
- tdinfo = &(vptr->td_infos[qnum][index]);
+ index = vptr->tx.curr[qnum];
+ td_ptr = &(vptr->tx.rings[qnum][index]);
+ tdinfo = &(vptr->tx.infos[qnum][index]);
td_ptr->tdesc1.TCR = TCR0_TIC;
td_ptr->td_buf[0].size &= ~TD_QUEUE;
@@ -2071,9 +2112,9 @@ static int velocity_xmit(struct sk_buff *skb, struct net_device *dev)
skb_copy_from_linear_data(skb, tdinfo->buf, skb->len);
tdinfo->skb_dma[0] = tdinfo->buf_dma;
td_ptr->tdesc0.len = len;
- td_ptr->td_buf[0].pa_low = cpu_to_le32(tdinfo->skb_dma[0]);
- td_ptr->td_buf[0].pa_high = 0;
- td_ptr->td_buf[0].size = len; /* queue is 0 anyway */
+ td_ptr->tx.buf[0].pa_low = cpu_to_le32(tdinfo->skb_dma[0]);
+ td_ptr->tx.buf[0].pa_high = 0;
+ td_ptr->tx.buf[0].size = len; /* queue is 0 anyway */
tdinfo->nskb_dma = 1;
} else {
int i = 0;
@@ -2084,9 +2125,9 @@ static int velocity_xmit(struct sk_buff *skb, struct net_device *dev)
td_ptr->tdesc0.len = len;
/* FIXME: support 48bit DMA later */
- td_ptr->td_buf[i].pa_low = cpu_to_le32(tdinfo->skb_dma);
- td_ptr->td_buf[i].pa_high = 0;
- td_ptr->td_buf[i].size = cpu_to_le16(skb_headlen(skb));
+ td_ptr->tx.buf[i].pa_low = cpu_to_le32(tdinfo->skb_dma);
+ td_ptr->tx.buf[i].pa_high = 0;
+ td_ptr->tx.buf[i].size = cpu_to_le16(skb_headlen(skb));
for (i = 0; i < nfrags; i++) {
skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
@@ -2094,9 +2135,9 @@ static int velocity_xmit(struct sk_buff *skb, struct net_device *dev)
tdinfo->skb_dma[i + 1] = pci_map_single(vptr->pdev, addr, frag->size, PCI_DMA_TODEVICE);
- td_ptr->td_buf[i + 1].pa_low = cpu_to_le32(tdinfo->skb_dma[i + 1]);
- td_ptr->td_buf[i + 1].pa_high = 0;
- td_ptr->td_buf[i + 1].size = cpu_to_le16(frag->size);
+ td_ptr->tx.buf[i + 1].pa_low = cpu_to_le32(tdinfo->skb_dma[i + 1]);
+ td_ptr->tx.buf[i + 1].pa_high = 0;
+ td_ptr->tx.buf[i + 1].size = cpu_to_le16(frag->size);
}
tdinfo->nskb_dma = i - 1;
}
@@ -2142,13 +2183,13 @@ static int velocity_xmit(struct sk_buff *skb, struct net_device *dev)
if (prev < 0)
prev = vptr->options.numtx - 1;
td_ptr->tdesc0.len |= OWNED_BY_NIC;
- vptr->td_used[qnum]++;
- vptr->td_curr[qnum] = (index + 1) % vptr->options.numtx;
+ vptr->tx.used[qnum]++;
+ vptr->tx.curr[qnum] = (index + 1) % vptr->options.numtx;
if (AVAIL_TD(vptr, qnum) < 1)
netif_stop_queue(dev);
- td_ptr = &(vptr->td_rings[qnum][prev]);
+ td_ptr = &(vptr->tx.rings[qnum][prev]);
td_ptr->td_buf[0].size |= TD_QUEUE;
mac_tx_queue_wake(vptr->mac_regs, qnum);
}
@@ -3405,8 +3446,8 @@ static int velocity_resume(struct pci_dev *pdev)
velocity_tx_srv(vptr, 0);
- for (i = 0; i < vptr->num_txq; i++) {
- if (vptr->td_used[i]) {
+ for (i = 0; i < vptr->tx.numq; i++) {
+ if (vptr->tx.used[i]) {
mac_tx_queue_wake(vptr->mac_regs, i);
}
}
diff --git a/drivers/net/via-velocity.h b/drivers/net/via-velocity.h
index 86446147284..1b95b04c925 100644
--- a/drivers/net/via-velocity.h
+++ b/drivers/net/via-velocity.h
@@ -1494,6 +1494,10 @@ struct velocity_opt {
u32 flags;
};
+#define AVAIL_TD(p,q) ((p)->options.numtx-((p)->tx.used[(q)]))
+
+#define GET_RD_BY_IDX(vptr, idx) (vptr->rd_ring[idx])
+
struct velocity_info {
struct list_head list;
@@ -1501,9 +1505,6 @@ struct velocity_info {
struct net_device *dev;
struct net_device_stats stats;
- dma_addr_t rd_pool_dma;
- dma_addr_t td_pool_dma[TX_QUEUE_NO];
-
struct vlan_group *vlgrp;
u8 ip_addr[4];
enum chip_type chip_id;
@@ -1512,25 +1513,29 @@ struct velocity_info {
unsigned long memaddr;
unsigned long ioaddr;
- u8 rev_id;
-
-#define AVAIL_TD(p,q) ((p)->options.numtx-((p)->td_used[(q)]))
+ struct tx_info {
+ int numq;
+
+ /* FIXME: the locality of the data seems rather poor. */
+ int used[TX_QUEUE_NO];
+ int curr[TX_QUEUE_NO];
+ int tail[TX_QUEUE_NO];
+ struct tx_desc *rings[TX_QUEUE_NO];
+ struct velocity_td_info *infos[TX_QUEUE_NO];
+ dma_addr_t pool_dma[TX_QUEUE_NO];
+ } tx;
+
+ struct rx_info {
+ int buf_sz;
+
+ int dirty;
+ int curr;
+ u32 filled;
+ struct rx_desc *ring;
+ struct velocity_rd_info *info; /* It's an array */
+ dma_addr_t pool_dma;
+ } rx;
- int num_txq;
-
- volatile int td_used[TX_QUEUE_NO];
- int td_curr[TX_QUEUE_NO];
- int td_tail[TX_QUEUE_NO];
- struct tx_desc *td_rings[TX_QUEUE_NO];
- struct velocity_td_info *td_infos[TX_QUEUE_NO];
-
- int rd_curr;
- int rd_dirty;
- u32 rd_filled;
- struct rx_desc *rd_ring;
- struct velocity_rd_info *rd_info; /* It's an array */
-
-#define GET_RD_BY_IDX(vptr, idx) (vptr->rd_ring[idx])
u32 mib_counter[MAX_HW_MIB_COUNTER];
struct velocity_opt options;
@@ -1538,7 +1543,6 @@ struct velocity_info {
u32 flags;
- int rx_buf_sz;
u32 mii_status;
u32 phy_id;
int multicast_limit;
@@ -1554,8 +1558,8 @@ struct velocity_info {
struct velocity_context context;
u32 ticks;
- u32 rx_bytes;
+ u8 rev_id;
};
/**
diff --git a/drivers/net/wan/Kconfig b/drivers/net/wan/Kconfig
index 846be60e782..2ae2ec40015 100644
--- a/drivers/net/wan/Kconfig
+++ b/drivers/net/wan/Kconfig
@@ -25,7 +25,7 @@ if WAN
# There is no way to detect a comtrol sv11 - force it modular for now.
config HOSTESS_SV11
tristate "Comtrol Hostess SV-11 support"
- depends on ISA && m && ISA_DMA_API && INET
+ depends on ISA && m && ISA_DMA_API && INET && HDLC
help
Driver for Comtrol Hostess SV-11 network card which
operates on low speed synchronous serial links at up to
@@ -37,7 +37,7 @@ config HOSTESS_SV11
# The COSA/SRP driver has not been tested as non-modular yet.
config COSA
tristate "COSA/SRP sync serial boards support"
- depends on ISA && m && ISA_DMA_API
+ depends on ISA && m && ISA_DMA_API && HDLC
---help---
Driver for COSA and SRP synchronous serial boards.
@@ -61,7 +61,7 @@ config COSA
#
config LANMEDIA
tristate "LanMedia Corp. SSI/V.35, T1/E1, HSSI, T3 boards"
- depends on PCI && VIRT_TO_BUS
+ depends on PCI && VIRT_TO_BUS && HDLC
---help---
Driver for the following Lan Media family of serial boards:
@@ -78,9 +78,8 @@ config LANMEDIA
- LMC 5245 board connects directly to a T3 circuit saving the
additional external hardware.
- To change setting such as syncPPP vs Cisco HDLC or clock source you
- will need lmcctl. It is available at <ftp://ftp.lanmedia.com/>
- (broken link).
+ To change setting such as clock source you will need lmcctl.
+ It is available at <ftp://ftp.lanmedia.com/> (broken link).
To compile this driver as a module, choose M here: the
module will be called lmc.
@@ -88,7 +87,7 @@ config LANMEDIA
# There is no way to detect a Sealevel board. Force it modular
config SEALEVEL_4021
tristate "Sealevel Systems 4021 support"
- depends on ISA && m && ISA_DMA_API && INET
+ depends on ISA && m && ISA_DMA_API && INET && HDLC
help
This is a driver for the Sealevel Systems ACB 56 serial I/O adapter.
@@ -154,8 +153,6 @@ config HDLC_PPP
help
Generic HDLC driver supporting PPP over WAN connections.
- It will be replaced by new PPP implementation in Linux 2.6.26.
-
If unsure, say N.
config HDLC_X25
diff --git a/drivers/net/wan/Makefile b/drivers/net/wan/Makefile
index d61fef36afc..102549605d0 100644
--- a/drivers/net/wan/Makefile
+++ b/drivers/net/wan/Makefile
@@ -21,12 +21,11 @@ pc300-y := pc300_drv.o
pc300-$(CONFIG_PC300_MLPPP) += pc300_tty.o
pc300-objs := $(pc300-y)
-obj-$(CONFIG_HOSTESS_SV11) += z85230.o syncppp.o hostess_sv11.o
-obj-$(CONFIG_SEALEVEL_4021) += z85230.o syncppp.o sealevel.o
-obj-$(CONFIG_COSA) += syncppp.o cosa.o
-obj-$(CONFIG_FARSYNC) += syncppp.o farsync.o
-obj-$(CONFIG_DSCC4) += dscc4.o
-obj-$(CONFIG_LANMEDIA) += syncppp.o
+obj-$(CONFIG_HOSTESS_SV11) += z85230.o hostess_sv11.o
+obj-$(CONFIG_SEALEVEL_4021) += z85230.o sealevel.o
+obj-$(CONFIG_COSA) += cosa.o
+obj-$(CONFIG_FARSYNC) += farsync.o
+obj-$(CONFIG_DSCC4) += dscc4.o
obj-$(CONFIG_X25_ASY) += x25_asy.o
obj-$(CONFIG_LANMEDIA) += lmc/
diff --git a/drivers/net/wan/cosa.c b/drivers/net/wan/cosa.c
index f7d3349dc3e..f14051556c8 100644
--- a/drivers/net/wan/cosa.c
+++ b/drivers/net/wan/cosa.c
@@ -2,6 +2,7 @@
/*
* Copyright (C) 1995-1997 Jan "Yenya" Kasprzak <kas@fi.muni.cz>
+ * Generic HDLC port Copyright (C) 2008 Krzysztof Halasa <khc@pm.waw.pl>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -54,7 +55,7 @@
*
* The Linux driver (unlike the present *BSD drivers :-) can work even
* for the COSA and SRP in one computer and allows each channel to work
- * in one of the three modes (character device, Cisco HDLC, Sync PPP).
+ * in one of the two modes (character or network device).
*
* AUTHOR
*
@@ -72,12 +73,6 @@
* The Comtrol Hostess SV11 driver by Alan Cox
* The Sync PPP/Cisco HDLC layer (syncppp.c) ported to Linux by Alan Cox
*/
-/*
- * 5/25/1999 : Marcelo Tosatti <marcelo@conectiva.com.br>
- * fixed a deadlock in cosa_sppp_open
- */
-
-/* ---------- Headers, macros, data structures ---------- */
#include <linux/module.h>
#include <linux/kernel.h>
@@ -86,6 +81,7 @@
#include <linux/fs.h>
#include <linux/interrupt.h>
#include <linux/delay.h>
+#include <linux/hdlc.h>
#include <linux/errno.h>
#include <linux/ioport.h>
#include <linux/netdevice.h>
@@ -93,14 +89,12 @@
#include <linux/mutex.h>
#include <linux/device.h>
#include <linux/smp_lock.h>
-
-#undef COSA_SLOW_IO /* for testing purposes only */
-
#include <asm/io.h>
#include <asm/dma.h>
#include <asm/byteorder.h>
-#include <net/syncppp.h>
+#undef COSA_SLOW_IO /* for testing purposes only */
+
#include "cosa.h"
/* Maximum length of the identification string. */
@@ -112,7 +106,6 @@
/* Per-channel data structure */
struct channel_data {
- void *if_ptr; /* General purpose pointer (used by SPPP) */
int usage; /* Usage count; >0 for chrdev, -1 for netdev */
int num; /* Number of the channel */
struct cosa_data *cosa; /* Pointer to the per-card structure */
@@ -136,10 +129,9 @@ struct channel_data {
wait_queue_head_t txwaitq, rxwaitq;
int tx_status, rx_status;
- /* SPPP/HDLC device parts */
- struct ppp_device pppdev;
+ /* generic HDLC device parts */
+ struct net_device *netdev;
struct sk_buff *rx_skb, *tx_skb;
- struct net_device_stats stats;
};
/* cosa->firmware_status bits */
@@ -281,21 +273,19 @@ static int cosa_start_tx(struct channel_data *channel, char *buf, int size);
static void cosa_kick(struct cosa_data *cosa);
static int cosa_dma_able(struct channel_data *chan, char *buf, int data);
-/* SPPP/HDLC stuff */
-static void sppp_channel_init(struct channel_data *chan);
-static void sppp_channel_delete(struct channel_data *chan);
-static int cosa_sppp_open(struct net_device *d);
-static int cosa_sppp_close(struct net_device *d);
-static void cosa_sppp_timeout(struct net_device *d);
-static int cosa_sppp_tx(struct sk_buff *skb, struct net_device *d);
-static char *sppp_setup_rx(struct channel_data *channel, int size);
-static int sppp_rx_done(struct channel_data *channel);
-static int sppp_tx_done(struct channel_data *channel, int size);
-static int cosa_sppp_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd);
-static struct net_device_stats *cosa_net_stats(struct net_device *dev);
+/* Network device stuff */
+static int cosa_net_attach(struct net_device *dev, unsigned short encoding,
+ unsigned short parity);
+static int cosa_net_open(struct net_device *d);
+static int cosa_net_close(struct net_device *d);
+static void cosa_net_timeout(struct net_device *d);
+static int cosa_net_tx(struct sk_buff *skb, struct net_device *d);
+static char *cosa_net_setup_rx(struct channel_data *channel, int size);
+static int cosa_net_rx_done(struct channel_data *channel);
+static int cosa_net_tx_done(struct channel_data *channel, int size);
+static int cosa_net_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd);
/* Character device */
-static void chardev_channel_init(struct channel_data *chan);
static char *chrdev_setup_rx(struct channel_data *channel, int size);
static int chrdev_rx_done(struct channel_data *channel);
static int chrdev_tx_done(struct channel_data *channel, int size);
@@ -357,17 +347,17 @@ static void debug_status_in(struct cosa_data *cosa, int status);
static void debug_status_out(struct cosa_data *cosa, int status);
#endif
-
+static inline struct channel_data* dev_to_chan(struct net_device *dev)
+{
+ return (struct channel_data *)dev_to_hdlc(dev)->priv;
+}
+
/* ---------- Initialization stuff ---------- */
static int __init cosa_init(void)
{
int i, err = 0;
- printk(KERN_INFO "cosa v1.08 (c) 1997-2000 Jan Kasprzak <kas@fi.muni.cz>\n");
-#ifdef CONFIG_SMP
- printk(KERN_INFO "cosa: SMP found. Please mail any success/failure reports to the author.\n");
-#endif
if (cosa_major > 0) {
if (register_chrdev(cosa_major, "cosa", &cosa_fops)) {
printk(KERN_WARNING "cosa: unable to get major %d\n",
@@ -402,7 +392,7 @@ static int __init cosa_init(void)
NULL, "cosa%d", i);
err = 0;
goto out;
-
+
out_chrdev:
unregister_chrdev(cosa_major, "cosa");
out:
@@ -414,43 +404,29 @@ static void __exit cosa_exit(void)
{
struct cosa_data *cosa;
int i;
- printk(KERN_INFO "Unloading the cosa module\n");
- for (i=0; i<nr_cards; i++)
+ for (i = 0; i < nr_cards; i++)
device_destroy(cosa_class, MKDEV(cosa_major, i));
class_destroy(cosa_class);
- for (cosa=cosa_cards; nr_cards--; cosa++) {
+
+ for (cosa = cosa_cards; nr_cards--; cosa++) {
/* Clean up the per-channel data */
- for (i=0; i<cosa->nchannels; i++) {
+ for (i = 0; i < cosa->nchannels; i++) {
/* Chardev driver has no alloc'd per-channel data */
- sppp_channel_delete(cosa->chan+i);
+ unregister_hdlc_device(cosa->chan[i].netdev);
+ free_netdev(cosa->chan[i].netdev);
}
/* Clean up the per-card data */
kfree(cosa->chan);
kfree(cosa->bouncebuf);
free_irq(cosa->irq, cosa);
free_dma(cosa->dma);
- release_region(cosa->datareg,is_8bit(cosa)?2:4);
+ release_region(cosa->datareg, is_8bit(cosa) ? 2 : 4);
}
unregister_chrdev(cosa_major, "cosa");
}
module_exit(cosa_exit);
-/*
- * This function should register all the net devices needed for the
- * single channel.
- */
-static __inline__ void channel_init(struct channel_data *chan)
-{
- sprintf(chan->name, "cosa%dc%d", chan->cosa->num, chan->num);
-
- /* Initialize the chardev data structures */
- chardev_channel_init(chan);
-
- /* Register the sppp interface */
- sppp_channel_init(chan);
-}
-
static int cosa_probe(int base, int irq, int dma)
{
struct cosa_data *cosa = cosa_cards+nr_cards;
@@ -576,13 +552,43 @@ static int cosa_probe(int base, int irq, int dma)
/* Initialize the per-channel data */
cosa->chan = kcalloc(cosa->nchannels, sizeof(struct channel_data), GFP_KERNEL);
if (!cosa->chan) {
- err = -ENOMEM;
+ err = -ENOMEM;
goto err_out3;
}
- for (i=0; i<cosa->nchannels; i++) {
- cosa->chan[i].cosa = cosa;
- cosa->chan[i].num = i;
- channel_init(cosa->chan+i);
+
+ for (i = 0; i < cosa->nchannels; i++) {
+ struct channel_data *chan = &cosa->chan[i];
+
+ chan->cosa = cosa;
+ chan->num = i;
+ sprintf(chan->name, "cosa%dc%d", chan->cosa->num, i);
+
+ /* Initialize the chardev data structures */
+ mutex_init(&chan->rlock);
+ init_MUTEX(&chan->wsem);
+
+ /* Register the network interface */
+ if (!(chan->netdev = alloc_hdlcdev(chan))) {
+ printk(KERN_WARNING "%s: alloc_hdlcdev failed.\n",
+ chan->name);
+ goto err_hdlcdev;
+ }
+ dev_to_hdlc(chan->netdev)->attach = cosa_net_attach;
+ dev_to_hdlc(chan->netdev)->xmit = cosa_net_tx;
+ chan->netdev->open = cosa_net_open;
+ chan->netdev->stop = cosa_net_close;
+ chan->netdev->do_ioctl = cosa_net_ioctl;
+ chan->netdev->tx_timeout = cosa_net_timeout;
+ chan->netdev->watchdog_timeo = TX_TIMEOUT;
+ chan->netdev->base_addr = chan->cosa->datareg;
+ chan->netdev->irq = chan->cosa->irq;
+ chan->netdev->dma = chan->cosa->dma;
+ if (register_hdlc_device(chan->netdev)) {
+ printk(KERN_WARNING "%s: register_hdlc_device()"
+ " failed.\n", chan->netdev->name);
+ free_netdev(chan->netdev);
+ goto err_hdlcdev;
+ }
}
printk (KERN_INFO "cosa%d: %s (%s at 0x%x irq %d dma %d), %d channels\n",
@@ -590,13 +596,20 @@ static int cosa_probe(int base, int irq, int dma)
cosa->datareg, cosa->irq, cosa->dma, cosa->nchannels);
return nr_cards++;
+
+err_hdlcdev:
+ while (i-- > 0) {
+ unregister_hdlc_device(cosa->chan[i].netdev);
+ free_netdev(cosa->chan[i].netdev);
+ }
+ kfree(cosa->chan);
err_out3:
kfree(cosa->bouncebuf);
err_out2:
free_dma(cosa->dma);
err_out1:
free_irq(cosa->irq, cosa);
-err_out:
+err_out:
release_region(cosa->datareg,is_8bit(cosa)?2:4);
printk(KERN_NOTICE "cosa%d: allocating resources failed\n",
cosa->num);
@@ -604,54 +617,19 @@ err_out:
}
-/*---------- SPPP/HDLC netdevice ---------- */
+/*---------- network device ---------- */
-static void cosa_setup(struct net_device *d)
+static int cosa_net_attach(struct net_device *dev, unsigned short encoding,
+ unsigned short parity)
{
- d->open = cosa_sppp_open;
- d->stop = cosa_sppp_close;
- d->hard_start_xmit = cosa_sppp_tx;
- d->do_ioctl = cosa_sppp_ioctl;
- d->get_stats = cosa_net_stats;
- d->tx_timeout = cosa_sppp_timeout;
- d->watchdog_timeo = TX_TIMEOUT;
-}
-
-static void sppp_channel_init(struct channel_data *chan)
-{
- struct net_device *d;
- chan->if_ptr = &chan->pppdev;
- d = alloc_netdev(0, chan->name, cosa_setup);
- if (!d) {
- printk(KERN_WARNING "%s: alloc_netdev failed.\n", chan->name);
- return;
- }
- chan->pppdev.dev = d;
- d->base_addr = chan->cosa->datareg;
- d->irq = chan->cosa->irq;
- d->dma = chan->cosa->dma;
- d->ml_priv = chan;
- sppp_attach(&chan->pppdev);
- if (register_netdev(d)) {
- printk(KERN_WARNING "%s: register_netdev failed.\n", d->name);
- sppp_detach(d);
- free_netdev(d);
- chan->pppdev.dev = NULL;
- return;
- }
-}
-
-static void sppp_channel_delete(struct channel_data *chan)
-{
- unregister_netdev(chan->pppdev.dev);
- sppp_detach(chan->pppdev.dev);
- free_netdev(chan->pppdev.dev);
- chan->pppdev.dev = NULL;
+ if (encoding == ENCODING_NRZ && parity == PARITY_CRC16_PR1_CCITT)
+ return 0;
+ return -EINVAL;
}
-static int cosa_sppp_open(struct net_device *d)
+static int cosa_net_open(struct net_device *dev)
{
- struct channel_data *chan = d->ml_priv;
+ struct channel_data *chan = dev_to_chan(dev);
int err;
unsigned long flags;
@@ -662,36 +640,35 @@ static int cosa_sppp_open(struct net_device *d)
}
spin_lock_irqsave(&chan->cosa->lock, flags);
if (chan->usage != 0) {
- printk(KERN_WARNING "%s: sppp_open called with usage count %d\n",
- chan->name, chan->usage);
+ printk(KERN_WARNING "%s: cosa_net_open called with usage count"
+ " %d\n", chan->name, chan->usage);
spin_unlock_irqrestore(&chan->cosa->lock, flags);
return -EBUSY;
}
- chan->setup_rx = sppp_setup_rx;
- chan->tx_done = sppp_tx_done;
- chan->rx_done = sppp_rx_done;
- chan->usage=-1;
+ chan->setup_rx = cosa_net_setup_rx;
+ chan->tx_done = cosa_net_tx_done;
+ chan->rx_done = cosa_net_rx_done;
+ chan->usage = -1;
chan->cosa->usage++;
spin_unlock_irqrestore(&chan->cosa->lock, flags);
- err = sppp_open(d);
+ err = hdlc_open(dev);
if (err) {
spin_lock_irqsave(&chan->cosa->lock, flags);
- chan->usage=0;
+ chan->usage = 0;
chan->cosa->usage--;
-
spin_unlock_irqrestore(&chan->cosa->lock, flags);
return err;
}
- netif_start_queue(d);
+ netif_start_queue(dev);
cosa_enable_rx(chan);
return 0;
}
-static int cosa_sppp_tx(struct sk_buff *skb, struct net_device *dev)
+static int cosa_net_tx(struct sk_buff *skb, struct net_device *dev)
{
- struct channel_data *chan = dev->ml_priv;
+ struct channel_data *chan = dev_to_chan(dev);
netif_stop_queue(dev);
@@ -700,16 +677,16 @@ static int cosa_sppp_tx(struct sk_buff *skb, struct net_device *dev)
return 0;
}
-static void cosa_sppp_timeout(struct net_device *dev)
+static void cosa_net_timeout(struct net_device *dev)
{
- struct channel_data *chan = dev->ml_priv;
+ struct channel_data *chan = dev_to_chan(dev);
if (test_bit(RXBIT, &chan->cosa->rxtx)) {
- chan->stats.rx_errors++;
- chan->stats.rx_missed_errors++;
+ chan->netdev->stats.rx_errors++;
+ chan->netdev->stats.rx_missed_errors++;
} else {
- chan->stats.tx_errors++;
- chan->stats.tx_aborted_errors++;
+ chan->netdev->stats.tx_errors++;
+ chan->netdev->stats.tx_aborted_errors++;
}
cosa_kick(chan->cosa);
if (chan->tx_skb) {
@@ -719,13 +696,13 @@ static void cosa_sppp_timeout(struct net_device *dev)
netif_wake_queue(dev);
}
-static int cosa_sppp_close(struct net_device *d)
+static int cosa_net_close(struct net_device *dev)
{
- struct channel_data *chan = d->ml_priv;
+ struct channel_data *chan = dev_to_chan(dev);
unsigned long flags;
- netif_stop_queue(d);
- sppp_close(d);
+ netif_stop_queue(dev);
+ hdlc_close(dev);
cosa_disable_rx(chan);
spin_lock_irqsave(&chan->cosa->lock, flags);
if (chan->rx_skb) {
@@ -736,13 +713,13 @@ static int cosa_sppp_close(struct net_device *d)
kfree_skb(chan->tx_skb);
chan->tx_skb = NULL;
}
- chan->usage=0;
+ chan->usage = 0;
chan->cosa->usage--;
spin_unlock_irqrestore(&chan->cosa->lock, flags);
return 0;
}
-static char *sppp_setup_rx(struct channel_data *chan, int size)
+static char *cosa_net_setup_rx(struct channel_data *chan, int size)
{
/*
* We can safely fall back to non-dma-able memory, because we have
@@ -754,66 +731,53 @@ static char *sppp_setup_rx(struct channel_data *chan, int size)
if (chan->rx_skb == NULL) {
printk(KERN_NOTICE "%s: Memory squeeze, dropping packet\n",
chan->name);
- chan->stats.rx_dropped++;
+ chan->netdev->stats.rx_dropped++;
return NULL;
}
- chan->pppdev.dev->trans_start = jiffies;
+ chan->netdev->trans_start = jiffies;
return skb_put(chan->rx_skb, size);
}
-static int sppp_rx_done(struct channel_data *chan)
+static int cosa_net_rx_done(struct channel_data *chan)
{
if (!chan->rx_skb) {
printk(KERN_WARNING "%s: rx_done with empty skb!\n",
chan->name);
- chan->stats.rx_errors++;
- chan->stats.rx_frame_errors++;
+ chan->netdev->stats.rx_errors++;
+ chan->netdev->stats.rx_frame_errors++;
return 0;
}
- chan->rx_skb->protocol = htons(ETH_P_WAN_PPP);
- chan->rx_skb->dev = chan->pppdev.dev;
+ chan->rx_skb->protocol = hdlc_type_trans(chan->rx_skb, chan->netdev);
+ chan->rx_skb->dev = chan->netdev;
skb_reset_mac_header(chan->rx_skb);
- chan->stats.rx_packets++;
- chan->stats.rx_bytes += chan->cosa->rxsize;
+ chan->netdev->stats.rx_packets++;
+ chan->netdev->stats.rx_bytes += chan->cosa->rxsize;
netif_rx(chan->rx_skb);
chan->rx_skb = NULL;
- chan->pppdev.dev->last_rx = jiffies;
+ chan->netdev->last_rx = jiffies;
return 0;
}
/* ARGSUSED */
-static int sppp_tx_done(struct channel_data *chan, int size)
+static int cosa_net_tx_done(struct channel_data *chan, int size)
{
if (!chan->tx_skb) {
printk(KERN_WARNING "%s: tx_done with empty skb!\n",
chan->name);
- chan->stats.tx_errors++;
- chan->stats.tx_aborted_errors++;
+ chan->netdev->stats.tx_errors++;
+ chan->netdev->stats.tx_aborted_errors++;
return 1;
}
dev_kfree_skb_irq(chan->tx_skb);
chan->tx_skb = NULL;
- chan->stats.tx_packets++;
- chan->stats.tx_bytes += size;
- netif_wake_queue(chan->pppdev.dev);
+ chan->netdev->stats.tx_packets++;
+ chan->netdev->stats.tx_bytes += size;
+ netif_wake_queue(chan->netdev);
return 1;
}
-static struct net_device_stats *cosa_net_stats(struct net_device *dev)
-{
- struct channel_data *chan = dev->ml_priv;
- return &chan->stats;
-}
-
-
/*---------- Character device ---------- */
-static void chardev_channel_init(struct channel_data *chan)
-{
- mutex_init(&chan->rlock);
- init_MUTEX(&chan->wsem);
-}
-
static ssize_t cosa_read(struct file *file,
char __user *buf, size_t count, loff_t *ppos)
{
@@ -1223,16 +1187,15 @@ static int cosa_ioctl_common(struct cosa_data *cosa,
return -ENOIOCTLCMD;
}
-static int cosa_sppp_ioctl(struct net_device *dev, struct ifreq *ifr,
- int cmd)
+static int cosa_net_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
{
int rv;
- struct channel_data *chan = dev->ml_priv;
- rv = cosa_ioctl_common(chan->cosa, chan, cmd, (unsigned long)ifr->ifr_data);
- if (rv == -ENOIOCTLCMD) {
- return sppp_do_ioctl(dev, ifr, cmd);
- }
- return rv;
+ struct channel_data *chan = dev_to_chan(dev);
+ rv = cosa_ioctl_common(chan->cosa, chan, cmd,
+ (unsigned long)ifr->ifr_data);
+ if (rv != -ENOIOCTLCMD)
+ return rv;
+ return hdlc_ioctl(dev, ifr, cmd);
}
static int cosa_chardev_ioctl(struct inode *inode, struct file *file,
diff --git a/drivers/net/wan/dscc4.c b/drivers/net/wan/dscc4.c
index 50ef5b4efd6..f5d55ad0226 100644
--- a/drivers/net/wan/dscc4.c
+++ b/drivers/net/wan/dscc4.c
@@ -103,7 +103,6 @@
#include <linux/netdevice.h>
#include <linux/skbuff.h>
#include <linux/delay.h>
-#include <net/syncppp.h>
#include <linux/hdlc.h>
#include <linux/mutex.h>
diff --git a/drivers/net/wan/farsync.c b/drivers/net/wan/farsync.c
index 754f00809e3..9557ad078ab 100644
--- a/drivers/net/wan/farsync.c
+++ b/drivers/net/wan/farsync.c
@@ -47,10 +47,7 @@ MODULE_LICENSE("GPL");
/* Default parameters for the link
*/
#define FST_TX_QUEUE_LEN 100 /* At 8Mbps a longer queue length is
- * useful, the syncppp module forces
- * this down assuming a slower line I
- * guess.
- */
+ * useful */
#define FST_TXQ_DEPTH 16 /* This one is for the buffering
* of frames on the way down to the card
* so that we can keep the card busy
diff --git a/drivers/net/wan/farsync.h b/drivers/net/wan/farsync.h
index d871dafa87a..6b27e7c3d44 100644
--- a/drivers/net/wan/farsync.h
+++ b/drivers/net/wan/farsync.h
@@ -54,9 +54,6 @@
/* Ioctl call command values
- *
- * The first three private ioctls are used by the sync-PPP module,
- * allowing a little room for expansion we start our numbering at 10.
*/
#define FSTWRITE (SIOCDEVPRIVATE+10)
#define FSTCPURESET (SIOCDEVPRIVATE+11)
@@ -202,9 +199,6 @@ struct fstioc_info {
#define J1 7
/* "proto" */
-#define FST_HDLC 1 /* Cisco compatible HDLC */
-#define FST_PPP 2 /* Sync PPP */
-#define FST_MONITOR 3 /* Monitor only (raw packet reception) */
#define FST_RAW 4 /* Two way raw packets */
#define FST_GEN_HDLC 5 /* Using "Generic HDLC" module */
diff --git a/drivers/net/wan/hdlc.c b/drivers/net/wan/hdlc.c
index e3a536477c7..1f2a140c9f7 100644
--- a/drivers/net/wan/hdlc.c
+++ b/drivers/net/wan/hdlc.c
@@ -22,20 +22,19 @@
* - proto->start() and stop() are called with spin_lock_irq held.
*/
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/slab.h>
-#include <linux/poll.h>
#include <linux/errno.h>
+#include <linux/hdlc.h>
#include <linux/if_arp.h>
+#include <linux/inetdevice.h>
#include <linux/init.h>
-#include <linux/skbuff.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/notifier.h>
#include <linux/pkt_sched.h>
-#include <linux/inetdevice.h>
-#include <linux/lapb.h>
+#include <linux/poll.h>
#include <linux/rtnetlink.h>
-#include <linux/notifier.h>
-#include <linux/hdlc.h>
+#include <linux/skbuff.h>
+#include <linux/slab.h>
#include <net/net_namespace.h>
@@ -109,7 +108,7 @@ static int hdlc_device_event(struct notifier_block *this, unsigned long event,
if (dev->get_stats != hdlc_get_stats)
return NOTIFY_DONE; /* not an HDLC device */
-
+
if (event != NETDEV_CHANGE)
return NOTIFY_DONE; /* Only interrested in carrier changes */
@@ -357,7 +356,7 @@ static struct packet_type hdlc_packet_type = {
static struct notifier_block hdlc_notifier = {
- .notifier_call = hdlc_device_event,
+ .notifier_call = hdlc_device_event,
};
@@ -367,8 +366,8 @@ static int __init hdlc_module_init(void)
printk(KERN_INFO "%s\n", version);
if ((result = register_netdevice_notifier(&hdlc_notifier)) != 0)
- return result;
- dev_add_pack(&hdlc_packet_type);
+ return result;
+ dev_add_pack(&hdlc_packet_type);
return 0;
}
diff --git a/drivers/net/wan/hdlc_cisco.c b/drivers/net/wan/hdlc_cisco.c
index 849819c2552..44e64b15dbd 100644
--- a/drivers/net/wan/hdlc_cisco.c
+++ b/drivers/net/wan/hdlc_cisco.c
@@ -9,19 +9,18 @@
* as published by the Free Software Foundation.
*/
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/slab.h>
-#include <linux/poll.h>
#include <linux/errno.h>
+#include <linux/hdlc.h>
#include <linux/if_arp.h>
+#include <linux/inetdevice.h>
#include <linux/init.h>
-#include <linux/skbuff.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
#include <linux/pkt_sched.h>
-#include <linux/inetdevice.h>
-#include <linux/lapb.h>
+#include <linux/poll.h>
#include <linux/rtnetlink.h>
-#include <linux/hdlc.h>
+#include <linux/skbuff.h>
+#include <linux/slab.h>
#undef DEBUG_HARD_HEADER
@@ -68,9 +67,9 @@ struct cisco_state {
static int cisco_ioctl(struct net_device *dev, struct ifreq *ifr);
-static inline struct cisco_state * state(hdlc_device *hdlc)
+static inline struct cisco_state* state(hdlc_device *hdlc)
{
- return(struct cisco_state *)(hdlc->state);
+ return (struct cisco_state *)hdlc->state;
}
@@ -172,7 +171,7 @@ static int cisco_rx(struct sk_buff *skb)
data->address != CISCO_UNICAST)
goto rx_error;
- switch(ntohs(data->protocol)) {
+ switch (ntohs(data->protocol)) {
case CISCO_SYS_INFO:
/* Packet is not needed, drop it. */
dev_kfree_skb_any(skb);
@@ -336,7 +335,7 @@ static struct hdlc_proto proto = {
static const struct header_ops cisco_header_ops = {
.create = cisco_hard_header,
};
-
+
static int cisco_ioctl(struct net_device *dev, struct ifreq *ifr)
{
cisco_proto __user *cisco_s = ifr->ifr_settings.ifs_ifsu.cisco;
@@ -359,10 +358,10 @@ static int cisco_ioctl(struct net_device *dev, struct ifreq *ifr)
return 0;
case IF_PROTO_CISCO:
- if(!capable(CAP_NET_ADMIN))
+ if (!capable(CAP_NET_ADMIN))
return -EPERM;
- if(dev->flags & IFF_UP)
+ if (dev->flags & IFF_UP)
return -EBUSY;
if (copy_from_user(&new_settings, cisco_s, size))
@@ -372,7 +371,7 @@ static int cisco_ioctl(struct net_device *dev, struct ifreq *ifr)
new_settings.timeout < 2)
return -EINVAL;
- result=hdlc->attach(dev, ENCODING_NRZ,PARITY_CRC16_PR1_CCITT);
+ result = hdlc->attach(dev, ENCODING_NRZ,PARITY_CRC16_PR1_CCITT);
if (result)
return result;
diff --git a/drivers/net/wan/hdlc_fr.c b/drivers/net/wan/hdlc_fr.c
index 62e93dac6b1..d3d5055741a 100644
--- a/drivers/net/wan/hdlc_fr.c
+++ b/drivers/net/wan/hdlc_fr.c
@@ -33,20 +33,19 @@
*/
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/slab.h>
-#include <linux/poll.h>
#include <linux/errno.h>
+#include <linux/etherdevice.h>
+#include <linux/hdlc.h>
#include <linux/if_arp.h>
+#include <linux/inetdevice.h>
#include <linux/init.h>
-#include <linux/skbuff.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
#include <linux/pkt_sched.h>
-#include <linux/inetdevice.h>
-#include <linux/lapb.h>
+#include <linux/poll.h>
#include <linux/rtnetlink.h>
-#include <linux/etherdevice.h>
-#include <linux/hdlc.h>
+#include <linux/skbuff.h>
+#include <linux/slab.h>
#undef DEBUG_PKT
#undef DEBUG_ECN
@@ -96,7 +95,7 @@ typedef struct {
unsigned ea1: 1;
unsigned cr: 1;
unsigned dlcih: 6;
-
+
unsigned ea2: 1;
unsigned de: 1;
unsigned becn: 1;
diff --git a/drivers/net/wan/hdlc_ppp.c b/drivers/net/wan/hdlc_ppp.c
index 00308337928..4efe9e6d32d 100644
--- a/drivers/net/wan/hdlc_ppp.c
+++ b/drivers/net/wan/hdlc_ppp.c
@@ -9,19 +9,18 @@
* as published by the Free Software Foundation.
*/
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/slab.h>
-#include <linux/poll.h>
#include <linux/errno.h>
+#include <linux/hdlc.h>
#include <linux/if_arp.h>
+#include <linux/inetdevice.h>
#include <linux/init.h>
-#include <linux/skbuff.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
#include <linux/pkt_sched.h>
-#include <linux/inetdevice.h>
-#include <linux/lapb.h>
+#include <linux/poll.h>
#include <linux/rtnetlink.h>
-#include <linux/hdlc.h>
+#include <linux/skbuff.h>
+#include <linux/slab.h>
#include <net/syncppp.h>
struct ppp_state {
diff --git a/drivers/net/wan/hdlc_raw.c b/drivers/net/wan/hdlc_raw.c
index bbbb819d764..8612311748f 100644
--- a/drivers/net/wan/hdlc_raw.c
+++ b/drivers/net/wan/hdlc_raw.c
@@ -9,19 +9,18 @@
* as published by the Free Software Foundation.
*/
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/slab.h>
-#include <linux/poll.h>
#include <linux/errno.h>
+#include <linux/hdlc.h>
#include <linux/if_arp.h>
+#include <linux/inetdevice.h>
#include <linux/init.h>
-#include <linux/skbuff.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
#include <linux/pkt_sched.h>
-#include <linux/inetdevice.h>
-#include <linux/lapb.h>
+#include <linux/poll.h>
#include <linux/rtnetlink.h>
-#include <linux/hdlc.h>
+#include <linux/skbuff.h>
+#include <linux/slab.h>
static int raw_ioctl(struct net_device *dev, struct ifreq *ifr);
diff --git a/drivers/net/wan/hdlc_raw_eth.c b/drivers/net/wan/hdlc_raw_eth.c
index 26dee600506..a13fc320752 100644
--- a/drivers/net/wan/hdlc_raw_eth.c
+++ b/drivers/net/wan/hdlc_raw_eth.c
@@ -9,20 +9,19 @@
* as published by the Free Software Foundation.
*/
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/slab.h>
-#include <linux/poll.h>
#include <linux/errno.h>
+#include <linux/etherdevice.h>
+#include <linux/hdlc.h>
#include <linux/if_arp.h>
+#include <linux/inetdevice.h>
#include <linux/init.h>
-#include <linux/skbuff.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
#include <linux/pkt_sched.h>
-#include <linux/inetdevice.h>
-#include <linux/lapb.h>
+#include <linux/poll.h>
#include <linux/rtnetlink.h>
-#include <linux/etherdevice.h>
-#include <linux/hdlc.h>
+#include <linux/skbuff.h>
+#include <linux/slab.h>
static int raw_eth_ioctl(struct net_device *dev, struct ifreq *ifr);
diff --git a/drivers/net/wan/hdlc_x25.c b/drivers/net/wan/hdlc_x25.c
index e808720030e..8b7e5d2e2ac 100644
--- a/drivers/net/wan/hdlc_x25.c
+++ b/drivers/net/wan/hdlc_x25.c
@@ -9,20 +9,19 @@
* as published by the Free Software Foundation.
*/
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/slab.h>
-#include <linux/poll.h>
#include <linux/errno.h>
+#include <linux/hdlc.h>
#include <linux/if_arp.h>
-#include <linux/init.h>
-#include <linux/skbuff.h>
-#include <linux/pkt_sched.h>
#include <linux/inetdevice.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
#include <linux/lapb.h>
+#include <linux/module.h>
+#include <linux/pkt_sched.h>
+#include <linux/poll.h>
#include <linux/rtnetlink.h>
-#include <linux/hdlc.h>
-
+#include <linux/skbuff.h>
+#include <linux/slab.h>
#include <net/x25device.h>
static int x25_ioctl(struct net_device *dev, struct ifreq *ifr);
diff --git a/drivers/net/wan/hostess_sv11.c b/drivers/net/wan/hostess_sv11.c
index f3065d3473f..e299313f828 100644
--- a/drivers/net/wan/hostess_sv11.c
+++ b/drivers/net/wan/hostess_sv11.c
@@ -16,6 +16,8 @@
* touching control registers.
*
* Port B isnt wired (why - beats me)
+ *
+ * Generic HDLC port Copyright (C) 2008 Krzysztof Halasa <khc@pm.waw.pl>
*/
#include <linux/module.h>
@@ -26,6 +28,7 @@
#include <linux/netdevice.h>
#include <linux/if_arp.h>
#include <linux/delay.h>
+#include <linux/hdlc.h>
#include <linux/ioport.h>
#include <net/arp.h>
@@ -33,34 +36,31 @@
#include <asm/io.h>
#include <asm/dma.h>
#include <asm/byteorder.h>
-#include <net/syncppp.h>
#include "z85230.h"
static int dma;
-struct sv11_device
-{
- void *if_ptr; /* General purpose pointer (used by SPPP) */
- struct z8530_dev sync;
- struct ppp_device netdev;
-};
-
/*
* Network driver support routines
*/
+static inline struct z8530_dev* dev_to_sv(struct net_device *dev)
+{
+ return (struct z8530_dev *)dev_to_hdlc(dev)->priv;
+}
+
/*
- * Frame receive. Simple for our card as we do sync ppp and there
+ * Frame receive. Simple for our card as we do HDLC and there
* is no funny garbage involved
*/
-
+
static void hostess_input(struct z8530_channel *c, struct sk_buff *skb)
{
/* Drop the CRC - it's not a good idea to try and negotiate it ;) */
- skb_trim(skb, skb->len-2);
- skb->protocol=__constant_htons(ETH_P_WAN_PPP);
+ skb_trim(skb, skb->len - 2);
+ skb->protocol = hdlc_type_trans(skb, c->netdevice);
skb_reset_mac_header(skb);
- skb->dev=c->netdevice;
+ skb->dev = c->netdevice;
/*
* Send it to the PPP layer. We don't have time to process
* it right now.
@@ -68,56 +68,51 @@ static void hostess_input(struct z8530_channel *c, struct sk_buff *skb)
netif_rx(skb);
c->netdevice->last_rx = jiffies;
}
-
+
/*
* We've been placed in the UP state
- */
-
+ */
+
static int hostess_open(struct net_device *d)
{
- struct sv11_device *sv11=d->ml_priv;
+ struct z8530_dev *sv11 = dev_to_sv(d);
int err = -1;
-
+
/*
* Link layer up
*/
- switch(dma)
- {
+ switch (dma) {
case 0:
- err=z8530_sync_open(d, &sv11->sync.chanA);
+ err = z8530_sync_open(d, &sv11->chanA);
break;
case 1:
- err=z8530_sync_dma_open(d, &sv11->sync.chanA);
+ err = z8530_sync_dma_open(d, &sv11->chanA);
break;
case 2:
- err=z8530_sync_txdma_open(d, &sv11->sync.chanA);
+ err = z8530_sync_txdma_open(d, &sv11->chanA);
break;
}
-
- if(err)
+
+ if (err)
return err;
- /*
- * Begin PPP
- */
- err=sppp_open(d);
- if(err)
- {
- switch(dma)
- {
+
+ err = hdlc_open(d);
+ if (err) {
+ switch (dma) {
case 0:
- z8530_sync_close(d, &sv11->sync.chanA);
+ z8530_sync_close(d, &sv11->chanA);
break;
case 1:
- z8530_sync_dma_close(d, &sv11->sync.chanA);
+ z8530_sync_dma_close(d, &sv11->chanA);
break;
case 2:
- z8530_sync_txdma_close(d, &sv11->sync.chanA);
+ z8530_sync_txdma_close(d, &sv11->chanA);
break;
- }
+ }
return err;
}
- sv11->sync.chanA.rx_function=hostess_input;
-
+ sv11->chanA.rx_function = hostess_input;
+
/*
* Go go go
*/
@@ -128,30 +123,24 @@ static int hostess_open(struct net_device *d)
static int hostess_close(struct net_device *d)
{
- struct sv11_device *sv11=d->ml_priv;
+ struct z8530_dev *sv11 = dev_to_sv(d);
/*
* Discard new frames
*/
- sv11->sync.chanA.rx_function=z8530_null_rx;
- /*
- * PPP off
- */
- sppp_close(d);
- /*
- * Link layer down
- */
+ sv11->chanA.rx_function = z8530_null_rx;
+
+ hdlc_close(d);
netif_stop_queue(d);
-
- switch(dma)
- {
+
+ switch (dma) {
case 0:
- z8530_sync_close(d, &sv11->sync.chanA);
+ z8530_sync_close(d, &sv11->chanA);
break;
case 1:
- z8530_sync_dma_close(d, &sv11->sync.chanA);
+ z8530_sync_dma_close(d, &sv11->chanA);
break;
case 2:
- z8530_sync_txdma_close(d, &sv11->sync.chanA);
+ z8530_sync_txdma_close(d, &sv11->chanA);
break;
}
return 0;
@@ -159,232 +148,174 @@ static int hostess_close(struct net_device *d)
static int hostess_ioctl(struct net_device *d, struct ifreq *ifr, int cmd)
{
- /* struct sv11_device *sv11=d->ml_priv;
- z8530_ioctl(d,&sv11->sync.chanA,ifr,cmd) */
- return sppp_do_ioctl(d, ifr,cmd);
-}
-
-static struct net_device_stats *hostess_get_stats(struct net_device *d)
-{
- struct sv11_device *sv11=d->ml_priv;
- if(sv11)
- return z8530_get_stats(&sv11->sync.chanA);
- else
- return NULL;
+ /* struct z8530_dev *sv11=dev_to_sv(d);
+ z8530_ioctl(d,&sv11->chanA,ifr,cmd) */
+ return hdlc_ioctl(d, ifr, cmd);
}
/*
- * Passed PPP frames, fire them downwind.
+ * Passed network frames, fire them downwind.
*/
-
+
static int hostess_queue_xmit(struct sk_buff *skb, struct net_device *d)
{
- struct sv11_device *sv11=d->ml_priv;
- return z8530_queue_xmit(&sv11->sync.chanA, skb);
+ return z8530_queue_xmit(&dev_to_sv(d)->chanA, skb);
}
-static int hostess_neigh_setup(struct neighbour *n)
+static int hostess_attach(struct net_device *dev, unsigned short encoding,
+ unsigned short parity)
{
- if (n->nud_state == NUD_NONE) {
- n->ops = &arp_broken_ops;
- n->output = n->ops->output;
- }
- return 0;
-}
-
-static int hostess_neigh_setup_dev(struct net_device *dev, struct neigh_parms *p)
-{
- if (p->tbl->family == AF_INET) {
- p->neigh_setup = hostess_neigh_setup;
- p->ucast_probes = 0;
- p->mcast_probes = 0;
- }
- return 0;
-}
-
-static void sv11_setup(struct net_device *dev)
-{
- dev->open = hostess_open;
- dev->stop = hostess_close;
- dev->hard_start_xmit = hostess_queue_xmit;
- dev->get_stats = hostess_get_stats;
- dev->do_ioctl = hostess_ioctl;
- dev->neigh_setup = hostess_neigh_setup_dev;
+ if (encoding == ENCODING_NRZ && parity == PARITY_CRC16_PR1_CCITT)
+ return 0;
+ return -EINVAL;
}
/*
* Description block for a Comtrol Hostess SV11 card
*/
-
-static struct sv11_device *sv11_init(int iobase, int irq)
+
+static struct z8530_dev *sv11_init(int iobase, int irq)
{
- struct z8530_dev *dev;
- struct sv11_device *sv;
-
+ struct z8530_dev *sv;
+ struct net_device *netdev;
/*
* Get the needed I/O space
*/
-
- if(!request_region(iobase, 8, "Comtrol SV11"))
- {
- printk(KERN_WARNING "hostess: I/O 0x%X already in use.\n", iobase);
+
+ if (!request_region(iobase, 8, "Comtrol SV11")) {
+ printk(KERN_WARNING "hostess: I/O 0x%X already in use.\n",
+ iobase);
return NULL;
}
-
- sv = kzalloc(sizeof(struct sv11_device), GFP_KERNEL);
- if(!sv)
- goto fail3;
-
- sv->if_ptr=&sv->netdev;
-
- sv->netdev.dev = alloc_netdev(0, "hdlc%d", sv11_setup);
- if(!sv->netdev.dev)
- goto fail2;
-
- dev=&sv->sync;
-
+
+ sv = kzalloc(sizeof(struct z8530_dev), GFP_KERNEL);
+ if (!sv)
+ goto err_kzalloc;
+
/*
* Stuff in the I/O addressing
*/
-
- dev->active = 0;
-
- dev->chanA.ctrlio=iobase+1;
- dev->chanA.dataio=iobase+3;
- dev->chanB.ctrlio=-1;
- dev->chanB.dataio=-1;
- dev->chanA.irqs=&z8530_nop;
- dev->chanB.irqs=&z8530_nop;
-
- outb(0, iobase+4); /* DMA off */
-
+
+ sv->active = 0;
+
+ sv->chanA.ctrlio = iobase + 1;
+ sv->chanA.dataio = iobase + 3;
+ sv->chanB.ctrlio = -1;
+ sv->chanB.dataio = -1;
+ sv->chanA.irqs = &z8530_nop;
+ sv->chanB.irqs = &z8530_nop;
+
+ outb(0, iobase + 4); /* DMA off */
+
/* We want a fast IRQ for this device. Actually we'd like an even faster
IRQ ;) - This is one driver RtLinux is made for */
-
- if(request_irq(irq, &z8530_interrupt, IRQF_DISABLED, "Hostess SV11", dev)<0)
- {
+
+ if (request_irq(irq, &z8530_interrupt, IRQF_DISABLED,
+ "Hostess SV11", sv) < 0) {
printk(KERN_WARNING "hostess: IRQ %d already in use.\n", irq);
- goto fail1;
+ goto err_irq;
}
-
- dev->irq=irq;
- dev->chanA.private=sv;
- dev->chanA.netdevice=sv->netdev.dev;
- dev->chanA.dev=dev;
- dev->chanB.dev=dev;
-
- if(dma)
- {
+
+ sv->irq = irq;
+ sv->chanA.private = sv;
+ sv->chanA.dev = sv;
+ sv->chanB.dev = sv;
+
+ if (dma) {
/*
* You can have DMA off or 1 and 3 thats the lot
* on the Comtrol.
*/
- dev->chanA.txdma=3;
- dev->chanA.rxdma=1;
- outb(0x03|0x08, iobase+4); /* DMA on */
- if(request_dma(dev->chanA.txdma, "Hostess SV/11 (TX)")!=0)
- goto fail;
-
- if(dma==1)
- {
- if(request_dma(dev->chanA.rxdma, "Hostess SV/11 (RX)")!=0)
- goto dmafail;
- }
+ sv->chanA.txdma = 3;
+ sv->chanA.rxdma = 1;
+ outb(0x03 | 0x08, iobase + 4); /* DMA on */
+ if (request_dma(sv->chanA.txdma, "Hostess SV/11 (TX)"))
+ goto err_txdma;
+
+ if (dma == 1)
+ if (request_dma(sv->chanA.rxdma, "Hostess SV/11 (RX)"))
+ goto err_rxdma;
}
/* Kill our private IRQ line the hostess can end up chattering
until the configuration is set */
disable_irq(irq);
-
+
/*
* Begin normal initialise
*/
-
- if(z8530_init(dev)!=0)
- {
+
+ if (z8530_init(sv)) {
printk(KERN_ERR "Z8530 series device not found.\n");
enable_irq(irq);
- goto dmafail2;
+ goto free_dma;
}
- z8530_channel_load(&dev->chanB, z8530_dead_port);
- if(dev->type==Z85C30)
- z8530_channel_load(&dev->chanA, z8530_hdlc_kilostream);
+ z8530_channel_load(&sv->chanB, z8530_dead_port);
+ if (sv->type == Z85C30)
+ z8530_channel_load(&sv->chanA, z8530_hdlc_kilostream);
else
- z8530_channel_load(&dev->chanA, z8530_hdlc_kilostream_85230);
-
+ z8530_channel_load(&sv->chanA, z8530_hdlc_kilostream_85230);
+
enable_irq(irq);
-
/*
* Now we can take the IRQ
*/
- if(dev_alloc_name(dev->chanA.netdevice,"hdlc%d")>=0)
- {
- struct net_device *d=dev->chanA.netdevice;
- /*
- * Initialise the PPP components
- */
- d->ml_priv = sv;
- sppp_attach(&sv->netdev);
-
- /*
- * Local fields
- */
-
- d->base_addr = iobase;
- d->irq = irq;
-
- if(register_netdev(d))
- {
- printk(KERN_ERR "%s: unable to register device.\n",
- d->name);
- sppp_detach(d);
- goto dmafail2;
- }
+ sv->chanA.netdevice = netdev = alloc_hdlcdev(sv);
+ if (!netdev)
+ goto free_dma;
- z8530_describe(dev, "I/O", iobase);
- dev->active=1;
- return sv;
+ dev_to_hdlc(netdev)->attach = hostess_attach;
+ dev_to_hdlc(netdev)->xmit = hostess_queue_xmit;
+ netdev->open = hostess_open;
+ netdev->stop = hostess_close;
+ netdev->do_ioctl = hostess_ioctl;
+ netdev->base_addr = iobase;
+ netdev->irq = irq;
+
+ if (register_hdlc_device(netdev)) {
+ printk(KERN_ERR "hostess: unable to register HDLC device.\n");
+ free_netdev(netdev);
+ goto free_dma;
}
-dmafail2:
- if(dma==1)
- free_dma(dev->chanA.rxdma);
-dmafail:
- if(dma)
- free_dma(dev->chanA.txdma);
-fail:
- free_irq(irq, dev);
-fail1:
- free_netdev(sv->netdev.dev);
-fail2:
+
+ z8530_describe(sv, "I/O", iobase);
+ sv->active = 1;
+ return sv;
+
+free_dma:
+ if (dma == 1)
+ free_dma(sv->chanA.rxdma);
+err_rxdma:
+ if (dma)
+ free_dma(sv->chanA.txdma);
+err_txdma:
+ free_irq(irq, sv);
+err_irq:
kfree(sv);
-fail3:
- release_region(iobase,8);
+err_kzalloc:
+ release_region(iobase, 8);
return NULL;
}
-static void sv11_shutdown(struct sv11_device *dev)
+static void sv11_shutdown(struct z8530_dev *dev)
{
- sppp_detach(dev->netdev.dev);
- unregister_netdev(dev->netdev.dev);
- z8530_shutdown(&dev->sync);
- free_irq(dev->sync.irq, dev);
- if(dma)
- {
- if(dma==1)
- free_dma(dev->sync.chanA.rxdma);
- free_dma(dev->sync.chanA.txdma);
+ unregister_hdlc_device(dev->chanA.netdevice);
+ z8530_shutdown(dev);
+ free_irq(dev->irq, dev);
+ if (dma) {
+ if (dma == 1)
+ free_dma(dev->chanA.rxdma);
+ free_dma(dev->chanA.txdma);
}
- release_region(dev->sync.chanA.ctrlio-1, 8);
- free_netdev(dev->netdev.dev);
+ release_region(dev->chanA.ctrlio - 1, 8);
+ free_netdev(dev->chanA.netdevice);
kfree(dev);
}
-#ifdef MODULE
-
-static int io=0x200;
-static int irq=9;
+static int io = 0x200;
+static int irq = 9;
module_param(io, int, 0);
MODULE_PARM_DESC(io, "The I/O base of the Comtrol Hostess SV11 card");
@@ -397,22 +328,17 @@ MODULE_AUTHOR("Alan Cox");
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Modular driver for the Comtrol Hostess SV11");
-static struct sv11_device *sv11_unit;
+static struct z8530_dev *sv11_unit;
int init_module(void)
{
- printk(KERN_INFO "SV-11 Z85230 Synchronous Driver v 0.03.\n");
- printk(KERN_INFO "(c) Copyright 2001, Red Hat Inc.\n");
- if((sv11_unit=sv11_init(io,irq))==NULL)
+ if ((sv11_unit = sv11_init(io, irq)) == NULL)
return -ENODEV;
return 0;
}
void cleanup_module(void)
{
- if(sv11_unit)
+ if (sv11_unit)
sv11_shutdown(sv11_unit);
}
-
-#endif
-
diff --git a/drivers/net/wan/lmc/lmc.h b/drivers/net/wan/lmc/lmc.h
index 882e58c1bfd..4ced7ac16c2 100644
--- a/drivers/net/wan/lmc/lmc.h
+++ b/drivers/net/wan/lmc/lmc.h
@@ -11,12 +11,12 @@ unsigned lmc_mii_readreg(lmc_softc_t * const sc, unsigned
devaddr, unsigned regno);
void lmc_mii_writereg(lmc_softc_t * const sc, unsigned devaddr,
unsigned regno, unsigned data);
-void lmc_led_on(lmc_softc_t * const, u_int32_t);
-void lmc_led_off(lmc_softc_t * const, u_int32_t);
+void lmc_led_on(lmc_softc_t * const, u32);
+void lmc_led_off(lmc_softc_t * const, u32);
unsigned lmc_mii_readreg(lmc_softc_t * const, unsigned, unsigned);
void lmc_mii_writereg(lmc_softc_t * const, unsigned, unsigned, unsigned);
-void lmc_gpio_mkinput(lmc_softc_t * const sc, u_int32_t bits);
-void lmc_gpio_mkoutput(lmc_softc_t * const sc, u_int32_t bits);
+void lmc_gpio_mkinput(lmc_softc_t * const sc, u32 bits);
+void lmc_gpio_mkoutput(lmc_softc_t * const sc, u32 bits);
int lmc_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd);
@@ -26,8 +26,7 @@ extern lmc_media_t lmc_t1_media;
extern lmc_media_t lmc_hssi_media;
#ifdef _DBG_EVENTLOG
-static void lmcEventLog( u_int32_t EventNum, u_int32_t arg2, u_int32_t arg3 );
+static void lmcEventLog(u32 EventNum, u32 arg2, u32 arg3);
#endif
#endif
-
diff --git a/drivers/net/wan/lmc/lmc_debug.c b/drivers/net/wan/lmc/lmc_debug.c
index 3b94352b0d0..15049d711f4 100644
--- a/drivers/net/wan/lmc/lmc_debug.c
+++ b/drivers/net/wan/lmc/lmc_debug.c
@@ -1,4 +1,3 @@
-
#include <linux/types.h>
#include <linux/netdevice.h>
#include <linux/interrupt.h>
@@ -48,10 +47,10 @@ void lmcConsoleLog(char *type, unsigned char *ucData, int iLen)
#endif
#ifdef DEBUG
-u_int32_t lmcEventLogIndex = 0;
-u_int32_t lmcEventLogBuf[LMC_EVENTLOGSIZE * LMC_EVENTLOGARGS];
+u32 lmcEventLogIndex;
+u32 lmcEventLogBuf[LMC_EVENTLOGSIZE * LMC_EVENTLOGARGS];
-void lmcEventLog (u_int32_t EventNum, u_int32_t arg2, u_int32_t arg3)
+void lmcEventLog(u32 EventNum, u32 arg2, u32 arg3)
{
lmcEventLogBuf[lmcEventLogIndex++] = EventNum;
lmcEventLogBuf[lmcEventLogIndex++] = arg2;
diff --git a/drivers/net/wan/lmc/lmc_debug.h b/drivers/net/wan/lmc/lmc_debug.h
index cf3563859bf..2d46f121549 100644
--- a/drivers/net/wan/lmc/lmc_debug.h
+++ b/drivers/net/wan/lmc/lmc_debug.h
@@ -38,15 +38,15 @@
#ifdef DEBUG
-extern u_int32_t lmcEventLogIndex;
-extern u_int32_t lmcEventLogBuf[LMC_EVENTLOGSIZE * LMC_EVENTLOGARGS];
+extern u32 lmcEventLogIndex;
+extern u32 lmcEventLogBuf[LMC_EVENTLOGSIZE * LMC_EVENTLOGARGS];
#define LMC_EVENT_LOG(x, y, z) lmcEventLog((x), (y), (z))
#else
#define LMC_EVENT_LOG(x,y,z)
#endif /* end ifdef _DBG_EVENTLOG */
void lmcConsoleLog(char *type, unsigned char *ucData, int iLen);
-void lmcEventLog (u_int32_t EventNum, u_int32_t arg2, u_int32_t arg3);
+void lmcEventLog(u32 EventNum, u32 arg2, u32 arg3);
void lmc_trace(struct net_device *dev, char *msg);
#endif
diff --git a/drivers/net/wan/lmc/lmc_ioctl.h b/drivers/net/wan/lmc/lmc_ioctl.h
index 57dd861cd3d..72fb113a44c 100644
--- a/drivers/net/wan/lmc/lmc_ioctl.h
+++ b/drivers/net/wan/lmc/lmc_ioctl.h
@@ -61,7 +61,7 @@
/*
* IFTYPE defines
*/
-#define LMC_PPP 1 /* use sppp interface */
+#define LMC_PPP 1 /* use generic HDLC interface */
#define LMC_NET 2 /* use direct net interface */
#define LMC_RAW 3 /* use direct net interface */
diff --git a/drivers/net/wan/lmc/lmc_main.c b/drivers/net/wan/lmc/lmc_main.c
index 62133cee446..f80640f5a74 100644
--- a/drivers/net/wan/lmc/lmc_main.c
+++ b/drivers/net/wan/lmc/lmc_main.c
@@ -1,6 +1,7 @@
/*
* Copyright (c) 1997-2000 LAN Media Corporation (LMC)
* All rights reserved. www.lanmedia.com
+ * Generic HDLC port Copyright (C) 2008 Krzysztof Halasa <khc@pm.waw.pl>
*
* This code is written by:
* Andrew Stanley-Jones (asj@cban.com)
@@ -36,8 +37,6 @@
*
*/
-/* $Id: lmc_main.c,v 1.36 2000/04/11 05:25:25 asj Exp $ */
-
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/string.h>
@@ -49,6 +48,7 @@
#include <linux/interrupt.h>
#include <linux/pci.h>
#include <linux/delay.h>
+#include <linux/hdlc.h>
#include <linux/init.h>
#include <linux/in.h>
#include <linux/if_arp.h>
@@ -57,9 +57,6 @@
#include <linux/skbuff.h>
#include <linux/inet.h>
#include <linux/bitops.h>
-
-#include <net/syncppp.h>
-
#include <asm/processor.h> /* Processor type for cache alignment. */
#include <asm/io.h>
#include <asm/dma.h>
@@ -78,8 +75,6 @@
#include "lmc_debug.h"
#include "lmc_proto.h"
-static int lmc_first_load = 0;
-
static int LMC_PKT_BUF_SZ = 1542;
static struct pci_device_id lmc_pci_tbl[] = {
@@ -91,11 +86,10 @@ static struct pci_device_id lmc_pci_tbl[] = {
};
MODULE_DEVICE_TABLE(pci, lmc_pci_tbl);
-MODULE_LICENSE("GPL");
+MODULE_LICENSE("GPL v2");
static int lmc_start_xmit(struct sk_buff *skb, struct net_device *dev);
-static int lmc_start_xmit(struct sk_buff *skb, struct net_device *dev);
static int lmc_rx (struct net_device *dev);
static int lmc_open(struct net_device *dev);
static int lmc_close(struct net_device *dev);
@@ -114,20 +108,14 @@ static void lmc_driver_timeout(struct net_device *dev);
* linux reserves 16 device specific IOCTLs. We call them
* LMCIOC* to control various bits of our world.
*/
-int lmc_ioctl (struct net_device *dev, struct ifreq *ifr, int cmd) /*fold00*/
+int lmc_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) /*fold00*/
{
- lmc_softc_t *sc;
+ lmc_softc_t *sc = dev_to_sc(dev);
lmc_ctl_t ctl;
- int ret;
- u_int16_t regVal;
+ int ret = -EOPNOTSUPP;
+ u16 regVal;
unsigned long flags;
- struct sppp *sp;
-
- ret = -EOPNOTSUPP;
-
- sc = dev->priv;
-
lmc_trace(dev, "lmc_ioctl in");
/*
@@ -149,7 +137,6 @@ int lmc_ioctl (struct net_device *dev, struct ifreq *ifr, int cmd) /*fold00*/
break;
case LMCIOCSINFO: /*fold01*/
- sp = &((struct ppp_device *) dev)->sppp;
if (!capable(CAP_NET_ADMIN)) {
ret = -EPERM;
break;
@@ -175,25 +162,20 @@ int lmc_ioctl (struct net_device *dev, struct ifreq *ifr, int cmd) /*fold00*/
sc->TxDescriptControlInit &= ~LMC_TDES_ADD_CRC_DISABLE;
}
- if (ctl.keepalive_onoff == LMC_CTL_OFF)
- sp->pp_flags &= ~PP_KEEPALIVE; /* Turn off */
- else
- sp->pp_flags |= PP_KEEPALIVE; /* Turn on */
-
ret = 0;
break;
case LMCIOCIFTYPE: /*fold01*/
{
- u_int16_t old_type = sc->if_type;
- u_int16_t new_type;
+ u16 old_type = sc->if_type;
+ u16 new_type;
if (!capable(CAP_NET_ADMIN)) {
ret = -EPERM;
break;
}
- if (copy_from_user(&new_type, ifr->ifr_data, sizeof(u_int16_t))) {
+ if (copy_from_user(&new_type, ifr->ifr_data, sizeof(u16))) {
ret = -EFAULT;
break;
}
@@ -206,15 +188,11 @@ int lmc_ioctl (struct net_device *dev, struct ifreq *ifr, int cmd) /*fold00*/
}
lmc_proto_close(sc);
- lmc_proto_detach(sc);
sc->if_type = new_type;
-// lmc_proto_init(sc);
lmc_proto_attach(sc);
- lmc_proto_open(sc);
-
- ret = 0 ;
- break ;
+ ret = lmc_proto_open(sc);
+ break;
}
case LMCIOCGETXINFO: /*fold01*/
@@ -241,51 +219,53 @@ int lmc_ioctl (struct net_device *dev, struct ifreq *ifr, int cmd) /*fold00*/
break;
- case LMCIOCGETLMCSTATS: /*fold01*/
- if (sc->lmc_cardtype == LMC_CARDTYPE_T1){
- lmc_mii_writereg (sc, 0, 17, T1FRAMER_FERR_LSB);
- sc->stats.framingBitErrorCount +=
- lmc_mii_readreg (sc, 0, 18) & 0xff;
- lmc_mii_writereg (sc, 0, 17, T1FRAMER_FERR_MSB);
- sc->stats.framingBitErrorCount +=
- (lmc_mii_readreg (sc, 0, 18) & 0xff) << 8;
- lmc_mii_writereg (sc, 0, 17, T1FRAMER_LCV_LSB);
- sc->stats.lineCodeViolationCount +=
- lmc_mii_readreg (sc, 0, 18) & 0xff;
- lmc_mii_writereg (sc, 0, 17, T1FRAMER_LCV_MSB);
- sc->stats.lineCodeViolationCount +=
- (lmc_mii_readreg (sc, 0, 18) & 0xff) << 8;
- lmc_mii_writereg (sc, 0, 17, T1FRAMER_AERR);
- regVal = lmc_mii_readreg (sc, 0, 18) & 0xff;
-
- sc->stats.lossOfFrameCount +=
- (regVal & T1FRAMER_LOF_MASK) >> 4;
- sc->stats.changeOfFrameAlignmentCount +=
- (regVal & T1FRAMER_COFA_MASK) >> 2;
- sc->stats.severelyErroredFrameCount +=
- regVal & T1FRAMER_SEF_MASK;
- }
-
- if (copy_to_user(ifr->ifr_data, &sc->stats,
- sizeof (struct lmc_statistics)))
- ret = -EFAULT;
- else
- ret = 0;
- break;
+ case LMCIOCGETLMCSTATS:
+ if (sc->lmc_cardtype == LMC_CARDTYPE_T1) {
+ lmc_mii_writereg(sc, 0, 17, T1FRAMER_FERR_LSB);
+ sc->extra_stats.framingBitErrorCount +=
+ lmc_mii_readreg(sc, 0, 18) & 0xff;
+ lmc_mii_writereg(sc, 0, 17, T1FRAMER_FERR_MSB);
+ sc->extra_stats.framingBitErrorCount +=
+ (lmc_mii_readreg(sc, 0, 18) & 0xff) << 8;
+ lmc_mii_writereg(sc, 0, 17, T1FRAMER_LCV_LSB);
+ sc->extra_stats.lineCodeViolationCount +=
+ lmc_mii_readreg(sc, 0, 18) & 0xff;
+ lmc_mii_writereg(sc, 0, 17, T1FRAMER_LCV_MSB);
+ sc->extra_stats.lineCodeViolationCount +=
+ (lmc_mii_readreg(sc, 0, 18) & 0xff) << 8;
+ lmc_mii_writereg(sc, 0, 17, T1FRAMER_AERR);
+ regVal = lmc_mii_readreg(sc, 0, 18) & 0xff;
+
+ sc->extra_stats.lossOfFrameCount +=
+ (regVal & T1FRAMER_LOF_MASK) >> 4;
+ sc->extra_stats.changeOfFrameAlignmentCount +=
+ (regVal & T1FRAMER_COFA_MASK) >> 2;
+ sc->extra_stats.severelyErroredFrameCount +=
+ regVal & T1FRAMER_SEF_MASK;
+ }
+ if (copy_to_user(ifr->ifr_data, &sc->lmc_device->stats,
+ sizeof(sc->lmc_device->stats)) ||
+ copy_to_user(ifr->ifr_data + sizeof(sc->lmc_device->stats),
+ &sc->extra_stats, sizeof(sc->extra_stats)))
+ ret = -EFAULT;
+ else
+ ret = 0;
+ break;
- case LMCIOCCLEARLMCSTATS: /*fold01*/
- if (!capable(CAP_NET_ADMIN)){
- ret = -EPERM;
- break;
- }
+ case LMCIOCCLEARLMCSTATS:
+ if (!capable(CAP_NET_ADMIN)) {
+ ret = -EPERM;
+ break;
+ }
- memset (&sc->stats, 0, sizeof (struct lmc_statistics));
- sc->stats.check = STATCHECK;
- sc->stats.version_size = (DRIVER_VERSION << 16) +
- sizeof (struct lmc_statistics);
- sc->stats.lmc_cardtype = sc->lmc_cardtype;
- ret = 0;
- break;
+ memset(&sc->lmc_device->stats, 0, sizeof(sc->lmc_device->stats));
+ memset(&sc->extra_stats, 0, sizeof(sc->extra_stats));
+ sc->extra_stats.check = STATCHECK;
+ sc->extra_stats.version_size = (DRIVER_VERSION << 16) +
+ sizeof(sc->lmc_device->stats) + sizeof(sc->extra_stats);
+ sc->extra_stats.lmc_cardtype = sc->lmc_cardtype;
+ ret = 0;
+ break;
case LMCIOCSETCIRCUIT: /*fold01*/
if (!capable(CAP_NET_ADMIN)){
@@ -330,7 +310,8 @@ int lmc_ioctl (struct net_device *dev, struct ifreq *ifr, int cmd) /*fold00*/
ret = -EFAULT;
break;
}
- if (copy_to_user(ifr->ifr_data + sizeof (u32), lmcEventLogBuf, sizeof (lmcEventLogBuf)))
+ if (copy_to_user(ifr->ifr_data + sizeof(u32), lmcEventLogBuf,
+ sizeof(lmcEventLogBuf)))
ret = -EFAULT;
else
ret = 0;
@@ -641,14 +622,12 @@ int lmc_ioctl (struct net_device *dev, struct ifreq *ifr, int cmd) /*fold00*/
/* the watchdog process that cruises around */
static void lmc_watchdog (unsigned long data) /*fold00*/
{
- struct net_device *dev = (struct net_device *) data;
- lmc_softc_t *sc;
+ struct net_device *dev = (struct net_device *)data;
+ lmc_softc_t *sc = dev_to_sc(dev);
int link_status;
- u_int32_t ticks;
+ u32 ticks;
unsigned long flags;
- sc = dev->priv;
-
lmc_trace(dev, "lmc_watchdog in");
spin_lock_irqsave(&sc->lmc_lock, flags);
@@ -677,22 +656,22 @@ static void lmc_watchdog (unsigned long data) /*fold00*/
* check for a transmit interrupt timeout
* Has the packet xmt vs xmt serviced threshold been exceeded */
if (sc->lmc_taint_tx == sc->lastlmc_taint_tx &&
- sc->stats.tx_packets > sc->lasttx_packets &&
- sc->tx_TimeoutInd == 0)
+ sc->lmc_device->stats.tx_packets > sc->lasttx_packets &&
+ sc->tx_TimeoutInd == 0)
{
/* wait for the watchdog to come around again */
sc->tx_TimeoutInd = 1;
}
else if (sc->lmc_taint_tx == sc->lastlmc_taint_tx &&
- sc->stats.tx_packets > sc->lasttx_packets &&
- sc->tx_TimeoutInd)
+ sc->lmc_device->stats.tx_packets > sc->lasttx_packets &&
+ sc->tx_TimeoutInd)
{
LMC_EVENT_LOG(LMC_EVENT_XMTINTTMO, LMC_CSR_READ (sc, csr_status), 0);
sc->tx_TimeoutDisplay = 1;
- sc->stats.tx_TimeoutCnt++;
+ sc->extra_stats.tx_TimeoutCnt++;
/* DEC chip is stuck, hit it with a RESET!!!! */
lmc_running_reset (dev);
@@ -712,13 +691,11 @@ static void lmc_watchdog (unsigned long data) /*fold00*/
/* reset the transmit timeout detection flag */
sc->tx_TimeoutInd = 0;
sc->lastlmc_taint_tx = sc->lmc_taint_tx;
- sc->lasttx_packets = sc->stats.tx_packets;
- }
- else
- {
+ sc->lasttx_packets = sc->lmc_device->stats.tx_packets;
+ } else {
sc->tx_TimeoutInd = 0;
sc->lastlmc_taint_tx = sc->lmc_taint_tx;
- sc->lasttx_packets = sc->stats.tx_packets;
+ sc->lasttx_packets = sc->lmc_device->stats.tx_packets;
}
/* --- end time out check ----------------------------------- */
@@ -748,19 +725,7 @@ static void lmc_watchdog (unsigned long data) /*fold00*/
sc->last_link_status = 1;
/* lmc_reset (sc); Again why reset??? */
- /* Inform the world that link protocol is back up. */
netif_carrier_on(dev);
-
- /* Now we have to tell the syncppp that we had an outage
- * and that it should deal. Calling sppp_reopen here
- * should do the trick, but we may have to call sppp_close
- * when the link goes down, and call sppp_open here.
- * Subject to more testing.
- * --bbraun
- */
-
- lmc_proto_reopen(sc);
-
}
/* Call media specific watchdog functions */
@@ -816,114 +781,93 @@ kick_timer:
}
-static void lmc_setup(struct net_device * const dev) /*fold00*/
+static int lmc_attach(struct net_device *dev, unsigned short encoding,
+ unsigned short parity)
{
- lmc_trace(dev, "lmc_setup in");
-
- dev->type = ARPHRD_HDLC;
- dev->hard_start_xmit = lmc_start_xmit;
- dev->open = lmc_open;
- dev->stop = lmc_close;
- dev->get_stats = lmc_get_stats;
- dev->do_ioctl = lmc_ioctl;
- dev->tx_timeout = lmc_driver_timeout;
- dev->watchdog_timeo = (HZ); /* 1 second */
-
- lmc_trace(dev, "lmc_setup out");
+ if (encoding == ENCODING_NRZ && parity == PARITY_CRC16_PR1_CCITT)
+ return 0;
+ return -EINVAL;
}
-
static int __devinit lmc_init_one(struct pci_dev *pdev,
const struct pci_device_id *ent)
{
- struct net_device *dev;
- lmc_softc_t *sc;
- u16 subdevice;
- u_int16_t AdapModelNum;
- int err = -ENOMEM;
- static int cards_found;
-#ifndef GCOM
- /* We name by type not by vendor */
- static const char lmcname[] = "hdlc%d";
-#else
- /*
- * GCOM uses LMC vendor name so that clients can know which card
- * to attach to.
- */
- static const char lmcname[] = "lmc%d";
-#endif
-
-
- /*
- * Allocate our own device structure
- */
- dev = alloc_netdev(sizeof(lmc_softc_t), lmcname, lmc_setup);
- if (!dev) {
- printk (KERN_ERR "lmc:alloc_netdev for device failed\n");
- goto out1;
- }
-
- lmc_trace(dev, "lmc_init_one in");
-
- err = pci_enable_device(pdev);
- if (err) {
- printk(KERN_ERR "lmc: pci enable failed:%d\n", err);
- goto out2;
- }
-
- if (pci_request_regions(pdev, "lmc")) {
- printk(KERN_ERR "lmc: pci_request_region failed\n");
- err = -EIO;
- goto out3;
- }
-
- pci_set_drvdata(pdev, dev);
-
- if(lmc_first_load == 0){
- printk(KERN_INFO "Lan Media Corporation WAN Driver Version %d.%d.%d\n",
- DRIVER_MAJOR_VERSION, DRIVER_MINOR_VERSION,DRIVER_SUB_VERSION);
- lmc_first_load = 1;
- }
-
- sc = dev->priv;
- sc->lmc_device = dev;
- sc->name = dev->name;
-
- /* Initialize the sppp layer */
- /* An ioctl can cause a subsequent detach for raw frame interface */
- dev->ml_priv = sc;
- sc->if_type = LMC_PPP;
- sc->check = 0xBEAFCAFE;
- dev->base_addr = pci_resource_start(pdev, 0);
- dev->irq = pdev->irq;
-
- SET_NETDEV_DEV(dev, &pdev->dev);
-
- /*
- * This will get the protocol layer ready and do any 1 time init's
- * Must have a valid sc and dev structure
- */
- lmc_proto_init(sc);
-
- lmc_proto_attach(sc);
+ lmc_softc_t *sc;
+ struct net_device *dev;
+ u16 subdevice;
+ u16 AdapModelNum;
+ int err;
+ static int cards_found;
+
+ /* lmc_trace(dev, "lmc_init_one in"); */
+
+ err = pci_enable_device(pdev);
+ if (err) {
+ printk(KERN_ERR "lmc: pci enable failed: %d\n", err);
+ return err;
+ }
- /*
- * Why were we changing this???
- dev->tx_queue_len = 100;
- */
+ err = pci_request_regions(pdev, "lmc");
+ if (err) {
+ printk(KERN_ERR "lmc: pci_request_region failed\n");
+ goto err_req_io;
+ }
- /* Init the spin lock so can call it latter */
+ /*
+ * Allocate our own device structure
+ */
+ sc = kzalloc(sizeof(lmc_softc_t), GFP_KERNEL);
+ if (!sc) {
+ err = -ENOMEM;
+ goto err_kzalloc;
+ }
- spin_lock_init(&sc->lmc_lock);
- pci_set_master(pdev);
+ dev = alloc_hdlcdev(sc);
+ if (!dev) {
+ printk(KERN_ERR "lmc:alloc_netdev for device failed\n");
+ goto err_hdlcdev;
+ }
- printk ("%s: detected at %lx, irq %d\n", dev->name,
- dev->base_addr, dev->irq);
- if (register_netdev (dev) != 0) {
- printk (KERN_ERR "%s: register_netdev failed.\n", dev->name);
- goto out4;
- }
+ dev->type = ARPHRD_HDLC;
+ dev_to_hdlc(dev)->xmit = lmc_start_xmit;
+ dev_to_hdlc(dev)->attach = lmc_attach;
+ dev->open = lmc_open;
+ dev->stop = lmc_close;
+ dev->get_stats = lmc_get_stats;
+ dev->do_ioctl = lmc_ioctl;
+ dev->tx_timeout = lmc_driver_timeout;
+ dev->watchdog_timeo = HZ; /* 1 second */
+ dev->tx_queue_len = 100;
+ sc->lmc_device = dev;
+ sc->name = dev->name;
+ sc->if_type = LMC_PPP;
+ sc->check = 0xBEAFCAFE;
+ dev->base_addr = pci_resource_start(pdev, 0);
+ dev->irq = pdev->irq;
+ pci_set_drvdata(pdev, dev);
+ SET_NETDEV_DEV(dev, &pdev->dev);
+
+ /*
+ * This will get the protocol layer ready and do any 1 time init's
+ * Must have a valid sc and dev structure
+ */
+ lmc_proto_attach(sc);
+
+ /* Init the spin lock so can call it latter */
+
+ spin_lock_init(&sc->lmc_lock);
+ pci_set_master(pdev);
+
+ printk(KERN_INFO "%s: detected at %lx, irq %d\n", dev->name,
+ dev->base_addr, dev->irq);
+
+ err = register_hdlc_device(dev);
+ if (err) {
+ printk(KERN_ERR "%s: register_netdev failed.\n", dev->name);
+ free_netdev(dev);
+ goto err_hdlcdev;
+ }
sc->lmc_cardtype = LMC_CARDTYPE_UNKNOWN;
sc->lmc_timing = LMC_CTL_CLOCK_SOURCE_EXT;
@@ -939,27 +883,27 @@ static int __devinit lmc_init_one(struct pci_dev *pdev,
switch (subdevice) {
case PCI_DEVICE_ID_LMC_HSSI:
- printk ("%s: LMC HSSI\n", dev->name);
+ printk(KERN_INFO "%s: LMC HSSI\n", dev->name);
sc->lmc_cardtype = LMC_CARDTYPE_HSSI;
sc->lmc_media = &lmc_hssi_media;
break;
case PCI_DEVICE_ID_LMC_DS3:
- printk ("%s: LMC DS3\n", dev->name);
+ printk(KERN_INFO "%s: LMC DS3\n", dev->name);
sc->lmc_cardtype = LMC_CARDTYPE_DS3;
sc->lmc_media = &lmc_ds3_media;
break;
case PCI_DEVICE_ID_LMC_SSI:
- printk ("%s: LMC SSI\n", dev->name);
+ printk(KERN_INFO "%s: LMC SSI\n", dev->name);
sc->lmc_cardtype = LMC_CARDTYPE_SSI;
sc->lmc_media = &lmc_ssi_media;
break;
case PCI_DEVICE_ID_LMC_T1:
- printk ("%s: LMC T1\n", dev->name);
+ printk(KERN_INFO "%s: LMC T1\n", dev->name);
sc->lmc_cardtype = LMC_CARDTYPE_T1;
sc->lmc_media = &lmc_t1_media;
break;
default:
- printk (KERN_WARNING "%s: LMC UNKOWN CARD!\n", dev->name);
+ printk(KERN_WARNING "%s: LMC UNKOWN CARD!\n", dev->name);
break;
}
@@ -977,32 +921,28 @@ static int __devinit lmc_init_one(struct pci_dev *pdev,
*/
AdapModelNum = (lmc_mii_readreg (sc, 0, 3) & 0x3f0) >> 4;
- if ((AdapModelNum == LMC_ADAP_T1
- && subdevice == PCI_DEVICE_ID_LMC_T1) || /* detect LMC1200 */
- (AdapModelNum == LMC_ADAP_SSI
- && subdevice == PCI_DEVICE_ID_LMC_SSI) || /* detect LMC1000 */
- (AdapModelNum == LMC_ADAP_DS3
- && subdevice == PCI_DEVICE_ID_LMC_DS3) || /* detect LMC5245 */
- (AdapModelNum == LMC_ADAP_HSSI
- && subdevice == PCI_DEVICE_ID_LMC_HSSI))
- { /* detect LMC5200 */
+ if ((AdapModelNum != LMC_ADAP_T1 || /* detect LMC1200 */
+ subdevice != PCI_DEVICE_ID_LMC_T1) &&
+ (AdapModelNum != LMC_ADAP_SSI || /* detect LMC1000 */
+ subdevice != PCI_DEVICE_ID_LMC_SSI) &&
+ (AdapModelNum != LMC_ADAP_DS3 || /* detect LMC5245 */
+ subdevice != PCI_DEVICE_ID_LMC_DS3) &&
+ (AdapModelNum != LMC_ADAP_HSSI || /* detect LMC5200 */
+ subdevice != PCI_DEVICE_ID_LMC_HSSI))
+ printk(KERN_WARNING "%s: Model number (%d) miscompare for PCI"
+ " Subsystem ID = 0x%04x\n",
+ dev->name, AdapModelNum, subdevice);
- }
- else {
- printk ("%s: Model number (%d) miscompare for PCI Subsystem ID = 0x%04x\n",
- dev->name, AdapModelNum, subdevice);
-// return (NULL);
- }
/*
* reset clock
*/
LMC_CSR_WRITE (sc, csr_gp_timer, 0xFFFFFFFFUL);
sc->board_idx = cards_found++;
- sc->stats.check = STATCHECK;
- sc->stats.version_size = (DRIVER_VERSION << 16) +
- sizeof (struct lmc_statistics);
- sc->stats.lmc_cardtype = sc->lmc_cardtype;
+ sc->extra_stats.check = STATCHECK;
+ sc->extra_stats.version_size = (DRIVER_VERSION << 16) +
+ sizeof(sc->lmc_device->stats) + sizeof(sc->extra_stats);
+ sc->extra_stats.lmc_cardtype = sc->lmc_cardtype;
sc->lmc_ok = 0;
sc->last_link_status = 0;
@@ -1010,58 +950,51 @@ static int __devinit lmc_init_one(struct pci_dev *pdev,
lmc_trace(dev, "lmc_init_one out");
return 0;
- out4:
- lmc_proto_detach(sc);
- out3:
- if (pdev) {
- pci_release_regions(pdev);
- pci_set_drvdata(pdev, NULL);
- }
- out2:
- free_netdev(dev);
- out1:
- return err;
+err_hdlcdev:
+ pci_set_drvdata(pdev, NULL);
+ kfree(sc);
+err_kzalloc:
+ pci_release_regions(pdev);
+err_req_io:
+ pci_disable_device(pdev);
+ return err;
}
/*
* Called from pci when removing module.
*/
-static void __devexit lmc_remove_one (struct pci_dev *pdev)
+static void __devexit lmc_remove_one(struct pci_dev *pdev)
{
- struct net_device *dev = pci_get_drvdata(pdev);
-
- if (dev) {
- lmc_softc_t *sc = dev->priv;
-
- printk("%s: removing...\n", dev->name);
- lmc_proto_detach(sc);
- unregister_netdev(dev);
- free_netdev(dev);
- pci_release_regions(pdev);
- pci_disable_device(pdev);
- pci_set_drvdata(pdev, NULL);
- }
+ struct net_device *dev = pci_get_drvdata(pdev);
+
+ if (dev) {
+ printk(KERN_DEBUG "%s: removing...\n", dev->name);
+ unregister_hdlc_device(dev);
+ free_netdev(dev);
+ pci_release_regions(pdev);
+ pci_disable_device(pdev);
+ pci_set_drvdata(pdev, NULL);
+ }
}
/* After this is called, packets can be sent.
* Does not initialize the addresses
*/
-static int lmc_open (struct net_device *dev) /*fold00*/
+static int lmc_open(struct net_device *dev)
{
- lmc_softc_t *sc = dev->priv;
+ lmc_softc_t *sc = dev_to_sc(dev);
+ int err;
lmc_trace(dev, "lmc_open in");
lmc_led_on(sc, LMC_DS3_LED0);
- lmc_dec_reset (sc);
- lmc_reset (sc);
-
- LMC_EVENT_LOG(LMC_EVENT_RESET1, LMC_CSR_READ (sc, csr_status), 0);
- LMC_EVENT_LOG(LMC_EVENT_RESET2,
- lmc_mii_readreg (sc, 0, 16),
- lmc_mii_readreg (sc, 0, 17));
+ lmc_dec_reset(sc);
+ lmc_reset(sc);
+ LMC_EVENT_LOG(LMC_EVENT_RESET1, LMC_CSR_READ(sc, csr_status), 0);
+ LMC_EVENT_LOG(LMC_EVENT_RESET2, lmc_mii_readreg(sc, 0, 16),
+ lmc_mii_readreg(sc, 0, 17));
if (sc->lmc_ok){
lmc_trace(dev, "lmc_open lmc_ok out");
@@ -1106,14 +1039,14 @@ static int lmc_open (struct net_device *dev) /*fold00*/
/* dev->flags |= IFF_UP; */
- lmc_proto_open(sc);
+ if ((err = lmc_proto_open(sc)) != 0)
+ return err;
dev->do_ioctl = lmc_ioctl;
netif_start_queue(dev);
-
- sc->stats.tx_tbusy0++ ;
+ sc->extra_stats.tx_tbusy0++;
/*
* select what interrupts we want to get
@@ -1165,8 +1098,7 @@ static int lmc_open (struct net_device *dev) /*fold00*/
static void lmc_running_reset (struct net_device *dev) /*fold00*/
{
-
- lmc_softc_t *sc = (lmc_softc_t *) dev->priv;
+ lmc_softc_t *sc = dev_to_sc(dev);
lmc_trace(dev, "lmc_runnig_reset in");
@@ -1184,7 +1116,7 @@ static void lmc_running_reset (struct net_device *dev) /*fold00*/
netif_wake_queue(dev);
sc->lmc_txfull = 0;
- sc->stats.tx_tbusy0++ ;
+ sc->extra_stats.tx_tbusy0++;
sc->lmc_intrmask = TULIP_DEFAULT_INTR_MASK;
LMC_CSR_WRITE (sc, csr_intr, sc->lmc_intrmask);
@@ -1200,14 +1132,13 @@ static void lmc_running_reset (struct net_device *dev) /*fold00*/
* This disables the timer for the watchdog and keepalives,
* and disables the irq for dev.
*/
-static int lmc_close (struct net_device *dev) /*fold00*/
+static int lmc_close(struct net_device *dev)
{
/* not calling release_region() as we should */
- lmc_softc_t *sc;
+ lmc_softc_t *sc = dev_to_sc(dev);
lmc_trace(dev, "lmc_close in");
-
- sc = dev->priv;
+
sc->lmc_ok = 0;
sc->lmc_media->set_link_status (sc, 0);
del_timer (&sc->timer);
@@ -1215,7 +1146,7 @@ static int lmc_close (struct net_device *dev) /*fold00*/
lmc_ifdown (dev);
lmc_trace(dev, "lmc_close out");
-
+
return 0;
}
@@ -1223,16 +1154,16 @@ static int lmc_close (struct net_device *dev) /*fold00*/
/* When the interface goes down, this is called */
static int lmc_ifdown (struct net_device *dev) /*fold00*/
{
- lmc_softc_t *sc = dev->priv;
+ lmc_softc_t *sc = dev_to_sc(dev);
u32 csr6;
int i;
lmc_trace(dev, "lmc_ifdown in");
-
+
/* Don't let anything else go on right now */
// dev->start = 0;
netif_stop_queue(dev);
- sc->stats.tx_tbusy1++ ;
+ sc->extra_stats.tx_tbusy1++;
/* stop interrupts */
/* Clear the interrupt mask */
@@ -1244,8 +1175,8 @@ static int lmc_ifdown (struct net_device *dev) /*fold00*/
csr6 &= ~LMC_DEC_SR; /* Turn off the Receive bit */
LMC_CSR_WRITE (sc, csr_command, csr6);
- sc->stats.rx_missed_errors +=
- LMC_CSR_READ (sc, csr_missed_frames) & 0xffff;
+ sc->lmc_device->stats.rx_missed_errors +=
+ LMC_CSR_READ(sc, csr_missed_frames) & 0xffff;
/* release the interrupt */
if(sc->got_irq == 1){
@@ -1276,7 +1207,7 @@ static int lmc_ifdown (struct net_device *dev) /*fold00*/
lmc_led_off (sc, LMC_MII16_LED_ALL);
netif_wake_queue(dev);
- sc->stats.tx_tbusy0++ ;
+ sc->extra_stats.tx_tbusy0++;
lmc_trace(dev, "lmc_ifdown out");
@@ -1289,7 +1220,7 @@ static int lmc_ifdown (struct net_device *dev) /*fold00*/
static irqreturn_t lmc_interrupt (int irq, void *dev_instance) /*fold00*/
{
struct net_device *dev = (struct net_device *) dev_instance;
- lmc_softc_t *sc;
+ lmc_softc_t *sc = dev_to_sc(dev);
u32 csr;
int i;
s32 stat;
@@ -1300,8 +1231,6 @@ static irqreturn_t lmc_interrupt (int irq, void *dev_instance) /*fold00*/
lmc_trace(dev, "lmc_interrupt in");
- sc = dev->priv;
-
spin_lock(&sc->lmc_lock);
/*
@@ -1354,7 +1283,7 @@ static irqreturn_t lmc_interrupt (int irq, void *dev_instance) /*fold00*/
int n_compl = 0 ;
/* reset the transmit timeout detection flag -baz */
- sc->stats.tx_NoCompleteCnt = 0;
+ sc->extra_stats.tx_NoCompleteCnt = 0;
badtx = sc->lmc_taint_tx;
i = badtx % LMC_TXDESCS;
@@ -1378,27 +1307,25 @@ static irqreturn_t lmc_interrupt (int irq, void *dev_instance) /*fold00*/
if (sc->lmc_txq[i] == NULL)
continue;
- /*
- * Check the total error summary to look for any errors
- */
- if (stat & 0x8000) {
- sc->stats.tx_errors++;
- if (stat & 0x4104)
- sc->stats.tx_aborted_errors++;
- if (stat & 0x0C00)
- sc->stats.tx_carrier_errors++;
- if (stat & 0x0200)
- sc->stats.tx_window_errors++;
- if (stat & 0x0002)
- sc->stats.tx_fifo_errors++;
- }
- else {
-
- sc->stats.tx_bytes += sc->lmc_txring[i].length & 0x7ff;
-
- sc->stats.tx_packets++;
+ /*
+ * Check the total error summary to look for any errors
+ */
+ if (stat & 0x8000) {
+ sc->lmc_device->stats.tx_errors++;
+ if (stat & 0x4104)
+ sc->lmc_device->stats.tx_aborted_errors++;
+ if (stat & 0x0C00)
+ sc->lmc_device->stats.tx_carrier_errors++;
+ if (stat & 0x0200)
+ sc->lmc_device->stats.tx_window_errors++;
+ if (stat & 0x0002)
+ sc->lmc_device->stats.tx_fifo_errors++;
+ } else {
+ sc->lmc_device->stats.tx_bytes += sc->lmc_txring[i].length & 0x7ff;
+
+ sc->lmc_device->stats.tx_packets++;
}
-
+
// dev_kfree_skb(sc->lmc_txq[i]);
dev_kfree_skb_irq(sc->lmc_txq[i]);
sc->lmc_txq[i] = NULL;
@@ -1415,13 +1342,13 @@ static irqreturn_t lmc_interrupt (int irq, void *dev_instance) /*fold00*/
LMC_EVENT_LOG(LMC_EVENT_TBUSY0, n_compl, 0);
sc->lmc_txfull = 0;
netif_wake_queue(dev);
- sc->stats.tx_tbusy0++ ;
+ sc->extra_stats.tx_tbusy0++;
#ifdef DEBUG
- sc->stats.dirtyTx = badtx;
- sc->stats.lmc_next_tx = sc->lmc_next_tx;
- sc->stats.lmc_txfull = sc->lmc_txfull;
+ sc->extra_stats.dirtyTx = badtx;
+ sc->extra_stats.lmc_next_tx = sc->lmc_next_tx;
+ sc->extra_stats.lmc_txfull = sc->lmc_txfull;
#endif
sc->lmc_taint_tx = badtx;
@@ -1476,9 +1403,9 @@ lmc_int_fail_out:
return IRQ_RETVAL(handled);
}
-static int lmc_start_xmit (struct sk_buff *skb, struct net_device *dev) /*fold00*/
+static int lmc_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
- lmc_softc_t *sc;
+ lmc_softc_t *sc = dev_to_sc(dev);
u32 flag;
int entry;
int ret = 0;
@@ -1486,8 +1413,6 @@ static int lmc_start_xmit (struct sk_buff *skb, struct net_device *dev) /*fold00
lmc_trace(dev, "lmc_start_xmit in");
- sc = dev->priv;
-
spin_lock_irqsave(&sc->lmc_lock, flags);
/* normal path, tbusy known to be zero */
@@ -1532,8 +1457,8 @@ static int lmc_start_xmit (struct sk_buff *skb, struct net_device *dev) /*fold00
if (sc->lmc_next_tx - sc->lmc_taint_tx >= LMC_TXDESCS - 1)
{ /* ring full, go busy */
sc->lmc_txfull = 1;
- netif_stop_queue(dev);
- sc->stats.tx_tbusy1++ ;
+ netif_stop_queue(dev);
+ sc->extra_stats.tx_tbusy1++;
LMC_EVENT_LOG(LMC_EVENT_TBUSY1, entry, 0);
}
#endif
@@ -1550,7 +1475,7 @@ static int lmc_start_xmit (struct sk_buff *skb, struct net_device *dev) /*fold00
* the watchdog timer handler. -baz
*/
- sc->stats.tx_NoCompleteCnt++;
+ sc->extra_stats.tx_NoCompleteCnt++;
sc->lmc_next_tx++;
/* give ownership to the chip */
@@ -1569,9 +1494,9 @@ static int lmc_start_xmit (struct sk_buff *skb, struct net_device *dev) /*fold00
}
-static int lmc_rx (struct net_device *dev) /*fold00*/
+static int lmc_rx(struct net_device *dev)
{
- lmc_softc_t *sc;
+ lmc_softc_t *sc = dev_to_sc(dev);
int i;
int rx_work_limit = LMC_RXDESCS;
unsigned int next_rx;
@@ -1583,8 +1508,6 @@ static int lmc_rx (struct net_device *dev) /*fold00*/
lmc_trace(dev, "lmc_rx in");
- sc = dev->priv;
-
lmc_led_on(sc, LMC_DS3_LED3);
rxIntLoopCnt = 0; /* debug -baz */
@@ -1597,39 +1520,38 @@ static int lmc_rx (struct net_device *dev) /*fold00*/
rxIntLoopCnt++; /* debug -baz */
len = ((stat & LMC_RDES_FRAME_LENGTH) >> RDES_FRAME_LENGTH_BIT_NUMBER);
if ((stat & 0x0300) != 0x0300) { /* Check first segment and last segment */
- if ((stat & 0x0000ffff) != 0x7fff) {
- /* Oversized frame */
- sc->stats.rx_length_errors++;
- goto skip_packet;
- }
- }
-
- if(stat & 0x00000008){ /* Catch a dribbling bit error */
- sc->stats.rx_errors++;
- sc->stats.rx_frame_errors++;
- goto skip_packet;
- }
+ if ((stat & 0x0000ffff) != 0x7fff) {
+ /* Oversized frame */
+ sc->lmc_device->stats.rx_length_errors++;
+ goto skip_packet;
+ }
+ }
+ if (stat & 0x00000008) { /* Catch a dribbling bit error */
+ sc->lmc_device->stats.rx_errors++;
+ sc->lmc_device->stats.rx_frame_errors++;
+ goto skip_packet;
+ }
- if(stat & 0x00000004){ /* Catch a CRC error by the Xilinx */
- sc->stats.rx_errors++;
- sc->stats.rx_crc_errors++;
- goto skip_packet;
- }
+ if (stat & 0x00000004) { /* Catch a CRC error by the Xilinx */
+ sc->lmc_device->stats.rx_errors++;
+ sc->lmc_device->stats.rx_crc_errors++;
+ goto skip_packet;
+ }
- if (len > LMC_PKT_BUF_SZ){
- sc->stats.rx_length_errors++;
- localLengthErrCnt++;
- goto skip_packet;
- }
+ if (len > LMC_PKT_BUF_SZ) {
+ sc->lmc_device->stats.rx_length_errors++;
+ localLengthErrCnt++;
+ goto skip_packet;
+ }
- if (len < sc->lmc_crcSize + 2) {
- sc->stats.rx_length_errors++;
- sc->stats.rx_SmallPktCnt++;
- localLengthErrCnt++;
- goto skip_packet;
- }
+ if (len < sc->lmc_crcSize + 2) {
+ sc->lmc_device->stats.rx_length_errors++;
+ sc->extra_stats.rx_SmallPktCnt++;
+ localLengthErrCnt++;
+ goto skip_packet;
+ }
if(stat & 0x00004000){
printk(KERN_WARNING "%s: Receiver descriptor error, receiver out of sync?\n", dev->name);
@@ -1656,8 +1578,8 @@ static int lmc_rx (struct net_device *dev) /*fold00*/
}
dev->last_rx = jiffies;
- sc->stats.rx_packets++;
- sc->stats.rx_bytes += len;
+ sc->lmc_device->stats.rx_packets++;
+ sc->lmc_device->stats.rx_bytes += len;
LMC_CONSOLE_LOG("recv", skb->data, len);
@@ -1679,7 +1601,6 @@ static int lmc_rx (struct net_device *dev) /*fold00*/
skb_put (skb, len);
skb->protocol = lmc_proto_type(sc, skb);
- skb->protocol = htons(ETH_P_WAN_PPP);
skb_reset_mac_header(skb);
/* skb_reset_network_header(skb); */
skb->dev = dev;
@@ -1704,7 +1625,7 @@ static int lmc_rx (struct net_device *dev) /*fold00*/
* in which care we'll try to allocate the buffer
* again. (once a second)
*/
- sc->stats.rx_BuffAllocErr++;
+ sc->extra_stats.rx_BuffAllocErr++;
LMC_EVENT_LOG(LMC_EVENT_RCVINT, stat, len);
sc->failed_recv_alloc = 1;
goto skip_out_of_mem;
@@ -1739,16 +1660,14 @@ static int lmc_rx (struct net_device *dev) /*fold00*/
* descriptors with bogus packets
*
if (localLengthErrCnt > LMC_RXDESCS - 3) {
- sc->stats.rx_BadPktSurgeCnt++;
- LMC_EVENT_LOG(LMC_EVENT_BADPKTSURGE,
- localLengthErrCnt,
- sc->stats.rx_BadPktSurgeCnt);
+ sc->extra_stats.rx_BadPktSurgeCnt++;
+ LMC_EVENT_LOG(LMC_EVENT_BADPKTSURGE, localLengthErrCnt,
+ sc->extra_stats.rx_BadPktSurgeCnt);
} */
/* save max count of receive descriptors serviced */
- if (rxIntLoopCnt > sc->stats.rxIntLoopCnt) {
- sc->stats.rxIntLoopCnt = rxIntLoopCnt; /* debug -baz */
- }
+ if (rxIntLoopCnt > sc->extra_stats.rxIntLoopCnt)
+ sc->extra_stats.rxIntLoopCnt = rxIntLoopCnt; /* debug -baz */
#ifdef DEBUG
if (rxIntLoopCnt == 0)
@@ -1775,23 +1694,22 @@ skip_out_of_mem:
return 0;
}
-static struct net_device_stats *lmc_get_stats (struct net_device *dev) /*fold00*/
+static struct net_device_stats *lmc_get_stats(struct net_device *dev)
{
- lmc_softc_t *sc = dev->priv;
+ lmc_softc_t *sc = dev_to_sc(dev);
unsigned long flags;
lmc_trace(dev, "lmc_get_stats in");
-
spin_lock_irqsave(&sc->lmc_lock, flags);
- sc->stats.rx_missed_errors += LMC_CSR_READ (sc, csr_missed_frames) & 0xffff;
+ sc->lmc_device->stats.rx_missed_errors += LMC_CSR_READ(sc, csr_missed_frames) & 0xffff;
spin_unlock_irqrestore(&sc->lmc_lock, flags);
lmc_trace(dev, "lmc_get_stats out");
- return (struct net_device_stats *) &sc->stats;
+ return &sc->lmc_device->stats;
}
static struct pci_driver lmc_driver = {
@@ -1970,7 +1888,7 @@ static void lmc_softreset (lmc_softc_t * const sc) /*fold00*/
{
if (sc->lmc_txq[i] != NULL){ /* have buffer */
dev_kfree_skb(sc->lmc_txq[i]); /* free it */
- sc->stats.tx_dropped++; /* We just dropped a packet */
+ sc->lmc_device->stats.tx_dropped++; /* We just dropped a packet */
}
sc->lmc_txq[i] = NULL;
sc->lmc_txring[i].status = 0x00000000;
@@ -1982,7 +1900,7 @@ static void lmc_softreset (lmc_softc_t * const sc) /*fold00*/
lmc_trace(sc->lmc_device, "lmc_softreset out");
}
-void lmc_gpio_mkinput(lmc_softc_t * const sc, u_int32_t bits) /*fold00*/
+void lmc_gpio_mkinput(lmc_softc_t * const sc, u32 bits) /*fold00*/
{
lmc_trace(sc->lmc_device, "lmc_gpio_mkinput in");
sc->lmc_gpio_io &= ~bits;
@@ -1990,7 +1908,7 @@ void lmc_gpio_mkinput(lmc_softc_t * const sc, u_int32_t bits) /*fold00*/
lmc_trace(sc->lmc_device, "lmc_gpio_mkinput out");
}
-void lmc_gpio_mkoutput(lmc_softc_t * const sc, u_int32_t bits) /*fold00*/
+void lmc_gpio_mkoutput(lmc_softc_t * const sc, u32 bits) /*fold00*/
{
lmc_trace(sc->lmc_device, "lmc_gpio_mkoutput in");
sc->lmc_gpio_io |= bits;
@@ -1998,7 +1916,7 @@ void lmc_gpio_mkoutput(lmc_softc_t * const sc, u_int32_t bits) /*fold00*/
lmc_trace(sc->lmc_device, "lmc_gpio_mkoutput out");
}
-void lmc_led_on(lmc_softc_t * const sc, u_int32_t led) /*fold00*/
+void lmc_led_on(lmc_softc_t * const sc, u32 led) /*fold00*/
{
lmc_trace(sc->lmc_device, "lmc_led_on in");
if((~sc->lmc_miireg16) & led){ /* Already on! */
@@ -2011,7 +1929,7 @@ void lmc_led_on(lmc_softc_t * const sc, u_int32_t led) /*fold00*/
lmc_trace(sc->lmc_device, "lmc_led_on out");
}
-void lmc_led_off(lmc_softc_t * const sc, u_int32_t led) /*fold00*/
+void lmc_led_off(lmc_softc_t * const sc, u32 led) /*fold00*/
{
lmc_trace(sc->lmc_device, "lmc_led_off in");
if(sc->lmc_miireg16 & led){ /* Already set don't do anything */
@@ -2061,13 +1979,13 @@ static void lmc_reset(lmc_softc_t * const sc) /*fold00*/
*/
sc->lmc_media->init(sc);
- sc->stats.resetCount++;
+ sc->extra_stats.resetCount++;
lmc_trace(sc->lmc_device, "lmc_reset out");
}
static void lmc_dec_reset(lmc_softc_t * const sc) /*fold00*/
{
- u_int32_t val;
+ u32 val;
lmc_trace(sc->lmc_device, "lmc_dec_reset in");
/*
@@ -2151,23 +2069,21 @@ static void lmc_initcsrs(lmc_softc_t * const sc, lmc_csrptr_t csr_base, /*fold00
lmc_trace(sc->lmc_device, "lmc_initcsrs out");
}
-static void lmc_driver_timeout(struct net_device *dev) { /*fold00*/
- lmc_softc_t *sc;
+static void lmc_driver_timeout(struct net_device *dev)
+{
+ lmc_softc_t *sc = dev_to_sc(dev);
u32 csr6;
unsigned long flags;
lmc_trace(dev, "lmc_driver_timeout in");
- sc = dev->priv;
-
spin_lock_irqsave(&sc->lmc_lock, flags);
printk("%s: Xmitter busy|\n", dev->name);
- sc->stats.tx_tbusy_calls++ ;
- if (jiffies - dev->trans_start < TX_TIMEOUT) {
- goto bug_out;
- }
+ sc->extra_stats.tx_tbusy_calls++;
+ if (jiffies - dev->trans_start < TX_TIMEOUT)
+ goto bug_out;
/*
* Chip seems to have locked up
@@ -2178,7 +2094,7 @@ static void lmc_driver_timeout(struct net_device *dev) { /*fold00*/
LMC_EVENT_LOG(LMC_EVENT_XMTPRCTMO,
LMC_CSR_READ (sc, csr_status),
- sc->stats.tx_ProcTimeout);
+ sc->extra_stats.tx_ProcTimeout);
lmc_running_reset (dev);
@@ -2195,8 +2111,8 @@ static void lmc_driver_timeout(struct net_device *dev) { /*fold00*/
/* immediate transmit */
LMC_CSR_WRITE (sc, csr_txpoll, 0);
- sc->stats.tx_errors++;
- sc->stats.tx_ProcTimeout++; /* -baz */
+ sc->lmc_device->stats.tx_errors++;
+ sc->extra_stats.tx_ProcTimeout++; /* -baz */
dev->trans_start = jiffies;
diff --git a/drivers/net/wan/lmc/lmc_media.c b/drivers/net/wan/lmc/lmc_media.c
index 8aa461c941c..f327674fc93 100644
--- a/drivers/net/wan/lmc/lmc_media.c
+++ b/drivers/net/wan/lmc/lmc_media.c
@@ -16,8 +16,6 @@
#include <linux/inet.h>
#include <linux/bitops.h>
-#include <net/syncppp.h>
-
#include <asm/processor.h> /* Processor type for cache alignment. */
#include <asm/io.h>
#include <asm/dma.h>
@@ -95,8 +93,7 @@ static void lmc_dummy_set_1 (lmc_softc_t * const, int);
static void lmc_dummy_set2_1 (lmc_softc_t * const, lmc_ctl_t *);
static inline void write_av9110_bit (lmc_softc_t *, int);
-static void write_av9110 (lmc_softc_t *, u_int32_t, u_int32_t, u_int32_t,
- u_int32_t, u_int32_t);
+static void write_av9110(lmc_softc_t *, u32, u32, u32, u32, u32);
lmc_media_t lmc_ds3_media = {
lmc_ds3_init, /* special media init stuff */
@@ -427,7 +424,7 @@ lmc_ds3_set_scram (lmc_softc_t * const sc, int ie)
static int
lmc_ds3_get_link_status (lmc_softc_t * const sc)
{
- u_int16_t link_status, link_status_11;
+ u16 link_status, link_status_11;
int ret = 1;
lmc_mii_writereg (sc, 0, 17, 7);
@@ -449,7 +446,7 @@ lmc_ds3_get_link_status (lmc_softc_t * const sc)
(link_status & LMC_FRAMER_REG0_OOFS)){
ret = 0;
if(sc->last_led_err[3] != 1){
- u16 r1;
+ u16 r1;
lmc_mii_writereg (sc, 0, 17, 01); /* Turn on Xbit error as our cisco does */
r1 = lmc_mii_readreg (sc, 0, 18);
r1 &= 0xfe;
@@ -462,7 +459,7 @@ lmc_ds3_get_link_status (lmc_softc_t * const sc)
else {
lmc_led_off(sc, LMC_DS3_LED3); /* turn on red LED */
if(sc->last_led_err[3] == 1){
- u16 r1;
+ u16 r1;
lmc_mii_writereg (sc, 0, 17, 01); /* Turn off Xbit error */
r1 = lmc_mii_readreg (sc, 0, 18);
r1 |= 0x01;
@@ -540,20 +537,19 @@ lmc_ds3_watchdog (lmc_softc_t * const sc)
* SSI methods
*/
-static void
-lmc_ssi_init (lmc_softc_t * const sc)
+static void lmc_ssi_init(lmc_softc_t * const sc)
{
- u_int16_t mii17;
- int cable;
+ u16 mii17;
+ int cable;
- sc->ictl.cardtype = LMC_CTL_CARDTYPE_LMC1000;
+ sc->ictl.cardtype = LMC_CTL_CARDTYPE_LMC1000;
- mii17 = lmc_mii_readreg (sc, 0, 17);
+ mii17 = lmc_mii_readreg(sc, 0, 17);
- cable = (mii17 & LMC_MII17_SSI_CABLE_MASK) >> LMC_MII17_SSI_CABLE_SHIFT;
- sc->ictl.cable_type = cable;
+ cable = (mii17 & LMC_MII17_SSI_CABLE_MASK) >> LMC_MII17_SSI_CABLE_SHIFT;
+ sc->ictl.cable_type = cable;
- lmc_gpio_mkoutput (sc, LMC_GEP_SSI_TXCLOCK);
+ lmc_gpio_mkoutput(sc, LMC_GEP_SSI_TXCLOCK);
}
static void
@@ -681,11 +677,11 @@ lmc_ssi_set_speed (lmc_softc_t * const sc, lmc_ctl_t * ctl)
static int
lmc_ssi_get_link_status (lmc_softc_t * const sc)
{
- u_int16_t link_status;
- u_int32_t ticks;
+ u16 link_status;
+ u32 ticks;
int ret = 1;
int hw_hdsk = 1;
-
+
/*
* missing CTS? Hmm. If we require CTS on, we may never get the
* link to come up, so omit it in this test.
@@ -720,9 +716,9 @@ lmc_ssi_get_link_status (lmc_softc_t * const sc)
}
else if (ticks == 0 ) { /* no clock found ? */
ret = 0;
- if(sc->last_led_err[3] != 1){
- sc->stats.tx_lossOfClockCnt++;
- printk(KERN_WARNING "%s: Lost Clock, Link Down\n", sc->name);
+ if (sc->last_led_err[3] != 1) {
+ sc->extra_stats.tx_lossOfClockCnt++;
+ printk(KERN_WARNING "%s: Lost Clock, Link Down\n", sc->name);
}
sc->last_led_err[3] = 1;
lmc_led_on (sc, LMC_MII16_LED3); /* turn ON red LED */
@@ -838,9 +834,7 @@ write_av9110_bit (lmc_softc_t * sc, int c)
LMC_CSR_WRITE (sc, csr_gp, sc->lmc_gpio);
}
-static void
-write_av9110 (lmc_softc_t * sc, u_int32_t n, u_int32_t m, u_int32_t v,
- u_int32_t x, u_int32_t r)
+static void write_av9110(lmc_softc_t *sc, u32 n, u32 m, u32 v, u32 x, u32 r)
{
int i;
@@ -887,19 +881,13 @@ write_av9110 (lmc_softc_t * sc, u_int32_t n, u_int32_t m, u_int32_t v,
| LMC_GEP_SSI_GENERATOR));
}
-static void
-lmc_ssi_watchdog (lmc_softc_t * const sc)
+static void lmc_ssi_watchdog(lmc_softc_t * const sc)
{
- u_int16_t mii17 = lmc_mii_readreg (sc, 0, 17);
- if (((mii17 >> 3) & 7) == 7)
- {
- lmc_led_off (sc, LMC_MII16_LED2);
- }
- else
- {
- lmc_led_on (sc, LMC_MII16_LED2);
- }
-
+ u16 mii17 = lmc_mii_readreg(sc, 0, 17);
+ if (((mii17 >> 3) & 7) == 7)
+ lmc_led_off(sc, LMC_MII16_LED2);
+ else
+ lmc_led_on(sc, LMC_MII16_LED2);
}
/*
@@ -929,7 +917,7 @@ lmc_t1_read (lmc_softc_t * const sc, int a)
static void
lmc_t1_init (lmc_softc_t * const sc)
{
- u_int16_t mii16;
+ u16 mii16;
int i;
sc->ictl.cardtype = LMC_CTL_CARDTYPE_LMC1200;
@@ -1028,7 +1016,7 @@ lmc_t1_set_status (lmc_softc_t * const sc, lmc_ctl_t * ctl)
*/ static int
lmc_t1_get_link_status (lmc_softc_t * const sc)
{
- u_int16_t link_status;
+ u16 link_status;
int ret = 1;
/* LMC5245 (DS3) & LMC1200 (DS1) LED definitions
diff --git a/drivers/net/wan/lmc/lmc_proto.c b/drivers/net/wan/lmc/lmc_proto.c
index 85315758198..be9877ff551 100644
--- a/drivers/net/wan/lmc/lmc_proto.c
+++ b/drivers/net/wan/lmc/lmc_proto.c
@@ -36,9 +36,6 @@
#include <linux/workqueue.h>
#include <linux/proc_fs.h>
#include <linux/bitops.h>
-
-#include <net/syncppp.h>
-
#include <asm/processor.h> /* Processor type for cache alignment. */
#include <asm/io.h>
#include <asm/dma.h>
@@ -50,48 +47,6 @@
#include "lmc_ioctl.h"
#include "lmc_proto.h"
-/*
- * The compile-time variable SPPPSTUP causes the module to be
- * compiled without referencing any of the sync ppp routines.
- */
-#ifdef SPPPSTUB
-#define SPPP_detach(d) (void)0
-#define SPPP_open(d) 0
-#define SPPP_reopen(d) (void)0
-#define SPPP_close(d) (void)0
-#define SPPP_attach(d) (void)0
-#define SPPP_do_ioctl(d,i,c) -EOPNOTSUPP
-#else
-#define SPPP_attach(x) sppp_attach((x)->pd)
-#define SPPP_detach(x) sppp_detach((x)->pd->dev)
-#define SPPP_open(x) sppp_open((x)->pd->dev)
-#define SPPP_reopen(x) sppp_reopen((x)->pd->dev)
-#define SPPP_close(x) sppp_close((x)->pd->dev)
-#define SPPP_do_ioctl(x, y, z) sppp_do_ioctl((x)->pd->dev, (y), (z))
-#endif
-
-// init
-void lmc_proto_init(lmc_softc_t *sc) /*FOLD00*/
-{
- lmc_trace(sc->lmc_device, "lmc_proto_init in");
- switch(sc->if_type){
- case LMC_PPP:
- sc->pd = kmalloc(sizeof(struct ppp_device), GFP_KERNEL);
- if (!sc->pd) {
- printk("lmc_proto_init(): kmalloc failure!\n");
- return;
- }
- sc->pd->dev = sc->lmc_device;
- sc->if_ptr = sc->pd;
- break;
- case LMC_RAW:
- break;
- default:
- break;
- }
- lmc_trace(sc->lmc_device, "lmc_proto_init out");
-}
-
// attach
void lmc_proto_attach(lmc_softc_t *sc) /*FOLD00*/
{
@@ -100,7 +55,6 @@ void lmc_proto_attach(lmc_softc_t *sc) /*FOLD00*/
case LMC_PPP:
{
struct net_device *dev = sc->lmc_device;
- SPPP_attach(sc);
dev->do_ioctl = lmc_ioctl;
}
break;
@@ -108,7 +62,7 @@ void lmc_proto_attach(lmc_softc_t *sc) /*FOLD00*/
{
struct net_device *dev = sc->lmc_device;
/*
- * They set a few basics because they don't use sync_ppp
+ * They set a few basics because they don't use HDLC
*/
dev->flags |= IFF_POINTOPOINT;
@@ -124,88 +78,39 @@ void lmc_proto_attach(lmc_softc_t *sc) /*FOLD00*/
lmc_trace(sc->lmc_device, "lmc_proto_attach out");
}
-// detach
-void lmc_proto_detach(lmc_softc_t *sc) /*FOLD00*/
+int lmc_proto_ioctl(lmc_softc_t *sc, struct ifreq *ifr, int cmd)
{
- switch(sc->if_type){
- case LMC_PPP:
- SPPP_detach(sc);
- break;
- case LMC_RAW: /* Tell someone we're detaching? */
- break;
- default:
- break;
- }
-
+ lmc_trace(sc->lmc_device, "lmc_proto_ioctl");
+ if (sc->if_type == LMC_PPP)
+ return hdlc_ioctl(sc->lmc_device, ifr, cmd);
+ return -EOPNOTSUPP;
}
-// reopen
-void lmc_proto_reopen(lmc_softc_t *sc) /*FOLD00*/
+int lmc_proto_open(lmc_softc_t *sc)
{
- lmc_trace(sc->lmc_device, "lmc_proto_reopen in");
- switch(sc->if_type){
- case LMC_PPP:
- SPPP_reopen(sc);
- break;
- case LMC_RAW: /* Reset the interface after being down, prerape to receive packets again */
- break;
- default:
- break;
- }
- lmc_trace(sc->lmc_device, "lmc_proto_reopen out");
-}
+ int ret = 0;
+ lmc_trace(sc->lmc_device, "lmc_proto_open in");
-// ioctl
-int lmc_proto_ioctl(lmc_softc_t *sc, struct ifreq *ifr, int cmd) /*FOLD00*/
-{
- lmc_trace(sc->lmc_device, "lmc_proto_ioctl out");
- switch(sc->if_type){
- case LMC_PPP:
- return SPPP_do_ioctl (sc, ifr, cmd);
- break;
- default:
- return -EOPNOTSUPP;
- break;
- }
- lmc_trace(sc->lmc_device, "lmc_proto_ioctl out");
+ if (sc->if_type == LMC_PPP) {
+ ret = hdlc_open(sc->lmc_device);
+ if (ret < 0)
+ printk(KERN_WARNING "%s: HDLC open failed: %d\n",
+ sc->name, ret);
+ }
+
+ lmc_trace(sc->lmc_device, "lmc_proto_open out");
+ return ret;
}
-// open
-void lmc_proto_open(lmc_softc_t *sc) /*FOLD00*/
+void lmc_proto_close(lmc_softc_t *sc)
{
- int ret;
+ lmc_trace(sc->lmc_device, "lmc_proto_close in");
- lmc_trace(sc->lmc_device, "lmc_proto_open in");
- switch(sc->if_type){
- case LMC_PPP:
- ret = SPPP_open(sc);
- if(ret < 0)
- printk("%s: syncPPP open failed: %d\n", sc->name, ret);
- break;
- case LMC_RAW: /* We're about to start getting packets! */
- break;
- default:
- break;
- }
- lmc_trace(sc->lmc_device, "lmc_proto_open out");
-}
-
-// close
+ if (sc->if_type == LMC_PPP)
+ hdlc_close(sc->lmc_device);
-void lmc_proto_close(lmc_softc_t *sc) /*FOLD00*/
-{
- lmc_trace(sc->lmc_device, "lmc_proto_close in");
- switch(sc->if_type){
- case LMC_PPP:
- SPPP_close(sc);
- break;
- case LMC_RAW: /* Interface going down */
- break;
- default:
- break;
- }
- lmc_trace(sc->lmc_device, "lmc_proto_close out");
+ lmc_trace(sc->lmc_device, "lmc_proto_close out");
}
__be16 lmc_proto_type(lmc_softc_t *sc, struct sk_buff *skb) /*FOLD00*/
@@ -213,8 +118,8 @@ __be16 lmc_proto_type(lmc_softc_t *sc, struct sk_buff *skb) /*FOLD00*/
lmc_trace(sc->lmc_device, "lmc_proto_type in");
switch(sc->if_type){
case LMC_PPP:
- return htons(ETH_P_WAN_PPP);
- break;
+ return hdlc_type_trans(skb, sc->lmc_device);
+ break;
case LMC_NET:
return htons(ETH_P_802_2);
break;
@@ -245,4 +150,3 @@ void lmc_proto_netif(lmc_softc_t *sc, struct sk_buff *skb) /*FOLD00*/
}
lmc_trace(sc->lmc_device, "lmc_proto_netif out");
}
-
diff --git a/drivers/net/wan/lmc/lmc_proto.h b/drivers/net/wan/lmc/lmc_proto.h
index ccaa69e8b3c..662148c5464 100644
--- a/drivers/net/wan/lmc/lmc_proto.h
+++ b/drivers/net/wan/lmc/lmc_proto.h
@@ -1,16 +1,18 @@
#ifndef _LMC_PROTO_H_
#define _LMC_PROTO_H_
-void lmc_proto_init(lmc_softc_t *sc);
+#include <linux/hdlc.h>
+
void lmc_proto_attach(lmc_softc_t *sc);
-void lmc_proto_detach(lmc_softc_t *sc);
-void lmc_proto_reopen(lmc_softc_t *sc);
int lmc_proto_ioctl(lmc_softc_t *sc, struct ifreq *ifr, int cmd);
-void lmc_proto_open(lmc_softc_t *sc);
+int lmc_proto_open(lmc_softc_t *sc);
void lmc_proto_close(lmc_softc_t *sc);
__be16 lmc_proto_type(lmc_softc_t *sc, struct sk_buff *skb);
void lmc_proto_netif(lmc_softc_t *sc, struct sk_buff *skb);
-int lmc_skb_rawpackets(char *buf, char **start, off_t offset, int len, int unused);
-#endif
+static inline lmc_softc_t* dev_to_sc(struct net_device *dev)
+{
+ return (lmc_softc_t *)dev_to_hdlc(dev)->priv;
+}
+#endif
diff --git a/drivers/net/wan/lmc/lmc_var.h b/drivers/net/wan/lmc/lmc_var.h
index 6d003a39bfa..65d01978e78 100644
--- a/drivers/net/wan/lmc/lmc_var.h
+++ b/drivers/net/wan/lmc/lmc_var.h
@@ -1,8 +1,6 @@
#ifndef _LMC_VAR_H_
#define _LMC_VAR_H_
-/* $Id: lmc_var.h,v 1.17 2000/04/06 12:16:47 asj Exp $ */
-
/*
* Copyright (c) 1997-2000 LAN Media Corporation (LMC)
* All rights reserved. www.lanmedia.com
@@ -19,23 +17,6 @@
#include <linux/timer.h>
-#ifndef __KERNEL__
-typedef signed char s8;
-typedef unsigned char u8;
-
-typedef signed short s16;
-typedef unsigned short u16;
-
-typedef signed int s32;
-typedef unsigned int u32;
-
-typedef signed long long s64;
-typedef unsigned long long u64;
-
-#define BITS_PER_LONG 32
-
-#endif
-
/*
* basic definitions used in lmc include files
*/
@@ -45,9 +26,6 @@ typedef struct lmc___media lmc_media_t;
typedef struct lmc___ctl lmc_ctl_t;
#define lmc_csrptr_t unsigned long
-#define u_int16_t u16
-#define u_int8_t u8
-#define tulip_uint32_t u32
#define LMC_REG_RANGE 0x80
@@ -122,45 +100,45 @@ struct lmc_regfile_t {
* used to define bits in the second tulip_desc_t field (length)
* for the transmit descriptor -baz */
-#define LMC_TDES_FIRST_BUFFER_SIZE ((u_int32_t)(0x000007FF))
-#define LMC_TDES_SECOND_BUFFER_SIZE ((u_int32_t)(0x003FF800))
-#define LMC_TDES_HASH_FILTERING ((u_int32_t)(0x00400000))
-#define LMC_TDES_DISABLE_PADDING ((u_int32_t)(0x00800000))
-#define LMC_TDES_SECOND_ADDR_CHAINED ((u_int32_t)(0x01000000))
-#define LMC_TDES_END_OF_RING ((u_int32_t)(0x02000000))
-#define LMC_TDES_ADD_CRC_DISABLE ((u_int32_t)(0x04000000))
-#define LMC_TDES_SETUP_PACKET ((u_int32_t)(0x08000000))
-#define LMC_TDES_INVERSE_FILTERING ((u_int32_t)(0x10000000))
-#define LMC_TDES_FIRST_SEGMENT ((u_int32_t)(0x20000000))
-#define LMC_TDES_LAST_SEGMENT ((u_int32_t)(0x40000000))
-#define LMC_TDES_INTERRUPT_ON_COMPLETION ((u_int32_t)(0x80000000))
+#define LMC_TDES_FIRST_BUFFER_SIZE ((u32)(0x000007FF))
+#define LMC_TDES_SECOND_BUFFER_SIZE ((u32)(0x003FF800))
+#define LMC_TDES_HASH_FILTERING ((u32)(0x00400000))
+#define LMC_TDES_DISABLE_PADDING ((u32)(0x00800000))
+#define LMC_TDES_SECOND_ADDR_CHAINED ((u32)(0x01000000))
+#define LMC_TDES_END_OF_RING ((u32)(0x02000000))
+#define LMC_TDES_ADD_CRC_DISABLE ((u32)(0x04000000))
+#define LMC_TDES_SETUP_PACKET ((u32)(0x08000000))
+#define LMC_TDES_INVERSE_FILTERING ((u32)(0x10000000))
+#define LMC_TDES_FIRST_SEGMENT ((u32)(0x20000000))
+#define LMC_TDES_LAST_SEGMENT ((u32)(0x40000000))
+#define LMC_TDES_INTERRUPT_ON_COMPLETION ((u32)(0x80000000))
#define TDES_SECOND_BUFFER_SIZE_BIT_NUMBER 11
#define TDES_COLLISION_COUNT_BIT_NUMBER 3
/* Constants for the RCV descriptor RDES */
-#define LMC_RDES_OVERFLOW ((u_int32_t)(0x00000001))
-#define LMC_RDES_CRC_ERROR ((u_int32_t)(0x00000002))
-#define LMC_RDES_DRIBBLING_BIT ((u_int32_t)(0x00000004))
-#define LMC_RDES_REPORT_ON_MII_ERR ((u_int32_t)(0x00000008))
-#define LMC_RDES_RCV_WATCHDOG_TIMEOUT ((u_int32_t)(0x00000010))
-#define LMC_RDES_FRAME_TYPE ((u_int32_t)(0x00000020))
-#define LMC_RDES_COLLISION_SEEN ((u_int32_t)(0x00000040))
-#define LMC_RDES_FRAME_TOO_LONG ((u_int32_t)(0x00000080))
-#define LMC_RDES_LAST_DESCRIPTOR ((u_int32_t)(0x00000100))
-#define LMC_RDES_FIRST_DESCRIPTOR ((u_int32_t)(0x00000200))
-#define LMC_RDES_MULTICAST_FRAME ((u_int32_t)(0x00000400))
-#define LMC_RDES_RUNT_FRAME ((u_int32_t)(0x00000800))
-#define LMC_RDES_DATA_TYPE ((u_int32_t)(0x00003000))
-#define LMC_RDES_LENGTH_ERROR ((u_int32_t)(0x00004000))
-#define LMC_RDES_ERROR_SUMMARY ((u_int32_t)(0x00008000))
-#define LMC_RDES_FRAME_LENGTH ((u_int32_t)(0x3FFF0000))
-#define LMC_RDES_OWN_BIT ((u_int32_t)(0x80000000))
+#define LMC_RDES_OVERFLOW ((u32)(0x00000001))
+#define LMC_RDES_CRC_ERROR ((u32)(0x00000002))
+#define LMC_RDES_DRIBBLING_BIT ((u32)(0x00000004))
+#define LMC_RDES_REPORT_ON_MII_ERR ((u32)(0x00000008))
+#define LMC_RDES_RCV_WATCHDOG_TIMEOUT ((u32)(0x00000010))
+#define LMC_RDES_FRAME_TYPE ((u32)(0x00000020))
+#define LMC_RDES_COLLISION_SEEN ((u32)(0x00000040))
+#define LMC_RDES_FRAME_TOO_LONG ((u32)(0x00000080))
+#define LMC_RDES_LAST_DESCRIPTOR ((u32)(0x00000100))
+#define LMC_RDES_FIRST_DESCRIPTOR ((u32)(0x00000200))
+#define LMC_RDES_MULTICAST_FRAME ((u32)(0x00000400))
+#define LMC_RDES_RUNT_FRAME ((u32)(0x00000800))
+#define LMC_RDES_DATA_TYPE ((u32)(0x00003000))
+#define LMC_RDES_LENGTH_ERROR ((u32)(0x00004000))
+#define LMC_RDES_ERROR_SUMMARY ((u32)(0x00008000))
+#define LMC_RDES_FRAME_LENGTH ((u32)(0x3FFF0000))
+#define LMC_RDES_OWN_BIT ((u32)(0x80000000))
#define RDES_FRAME_LENGTH_BIT_NUMBER 16
-#define LMC_RDES_ERROR_MASK ( (u_int32_t)( \
+#define LMC_RDES_ERROR_MASK ( (u32)( \
LMC_RDES_OVERFLOW \
| LMC_RDES_DRIBBLING_BIT \
| LMC_RDES_REPORT_ON_MII_ERR \
@@ -172,32 +150,32 @@ struct lmc_regfile_t {
*/
typedef struct {
- u_int32_t n;
- u_int32_t m;
- u_int32_t v;
- u_int32_t x;
- u_int32_t r;
- u_int32_t f;
- u_int32_t exact;
+ u32 n;
+ u32 m;
+ u32 v;
+ u32 x;
+ u32 r;
+ u32 f;
+ u32 exact;
} lmc_av9110_t;
/*
* Common structure passed to the ioctl code.
*/
struct lmc___ctl {
- u_int32_t cardtype;
- u_int32_t clock_source; /* HSSI, T1 */
- u_int32_t clock_rate; /* T1 */
- u_int32_t crc_length;
- u_int32_t cable_length; /* DS3 */
- u_int32_t scrambler_onoff; /* DS3 */
- u_int32_t cable_type; /* T1 */
- u_int32_t keepalive_onoff; /* protocol */
- u_int32_t ticks; /* ticks/sec */
+ u32 cardtype;
+ u32 clock_source; /* HSSI, T1 */
+ u32 clock_rate; /* T1 */
+ u32 crc_length;
+ u32 cable_length; /* DS3 */
+ u32 scrambler_onoff; /* DS3 */
+ u32 cable_type; /* T1 */
+ u32 keepalive_onoff; /* protocol */
+ u32 ticks; /* ticks/sec */
union {
lmc_av9110_t ssi;
} cardspec;
- u_int32_t circuit_type; /* T1 or E1 */
+ u32 circuit_type; /* T1 or E1 */
};
@@ -244,108 +222,69 @@ struct lmc___media {
#define STATCHECK 0xBEEFCAFE
-/* Included in this structure are first
- * - standard net_device_stats
- * - some other counters used for debug and driver performance
- * evaluation -baz
- */
-struct lmc_statistics
+struct lmc_extra_statistics
{
- unsigned long rx_packets; /* total packets received */
- unsigned long tx_packets; /* total packets transmitted */
- unsigned long rx_bytes;
- unsigned long tx_bytes;
-
- unsigned long rx_errors; /* bad packets received */
- unsigned long tx_errors; /* packet transmit problems */
- unsigned long rx_dropped; /* no space in linux buffers */
- unsigned long tx_dropped; /* no space available in linux */
- unsigned long multicast; /* multicast packets received */
- unsigned long collisions;
-
- /* detailed rx_errors: */
- unsigned long rx_length_errors;
- unsigned long rx_over_errors; /* receiver ring buff overflow */
- unsigned long rx_crc_errors; /* recved pkt with crc error */
- unsigned long rx_frame_errors; /* recv'd frame alignment error */
- unsigned long rx_fifo_errors; /* recv'r fifo overrun */
- unsigned long rx_missed_errors; /* receiver missed packet */
-
- /* detailed tx_errors */
- unsigned long tx_aborted_errors;
- unsigned long tx_carrier_errors;
- unsigned long tx_fifo_errors;
- unsigned long tx_heartbeat_errors;
- unsigned long tx_window_errors;
-
- /* for cslip etc */
- unsigned long rx_compressed;
- unsigned long tx_compressed;
-
- /* -------------------------------------
- * Custom stats & counters follow -baz */
- u_int32_t version_size;
- u_int32_t lmc_cardtype;
-
- u_int32_t tx_ProcTimeout;
- u_int32_t tx_IntTimeout;
- u_int32_t tx_NoCompleteCnt;
- u_int32_t tx_MaxXmtsB4Int;
- u_int32_t tx_TimeoutCnt;
- u_int32_t tx_OutOfSyncPtr;
- u_int32_t tx_tbusy0;
- u_int32_t tx_tbusy1;
- u_int32_t tx_tbusy_calls;
- u_int32_t resetCount;
- u_int32_t lmc_txfull;
- u_int32_t tbusy;
- u_int32_t dirtyTx;
- u_int32_t lmc_next_tx;
- u_int32_t otherTypeCnt;
- u_int32_t lastType;
- u_int32_t lastTypeOK;
- u_int32_t txLoopCnt;
- u_int32_t usedXmtDescripCnt;
- u_int32_t txIndexCnt;
- u_int32_t rxIntLoopCnt;
-
- u_int32_t rx_SmallPktCnt;
- u_int32_t rx_BadPktSurgeCnt;
- u_int32_t rx_BuffAllocErr;
- u_int32_t tx_lossOfClockCnt;
-
- /* T1 error counters */
- u_int32_t framingBitErrorCount;
- u_int32_t lineCodeViolationCount;
-
- u_int32_t lossOfFrameCount;
- u_int32_t changeOfFrameAlignmentCount;
- u_int32_t severelyErroredFrameCount;
-
- u_int32_t check;
+ u32 version_size;
+ u32 lmc_cardtype;
+
+ u32 tx_ProcTimeout;
+ u32 tx_IntTimeout;
+ u32 tx_NoCompleteCnt;
+ u32 tx_MaxXmtsB4Int;
+ u32 tx_TimeoutCnt;
+ u32 tx_OutOfSyncPtr;
+ u32 tx_tbusy0;
+ u32 tx_tbusy1;
+ u32 tx_tbusy_calls;
+ u32 resetCount;
+ u32 lmc_txfull;
+ u32 tbusy;
+ u32 dirtyTx;
+ u32 lmc_next_tx;
+ u32 otherTypeCnt;
+ u32 lastType;
+ u32 lastTypeOK;
+ u32 txLoopCnt;
+ u32 usedXmtDescripCnt;
+ u32 txIndexCnt;
+ u32 rxIntLoopCnt;
+
+ u32 rx_SmallPktCnt;
+ u32 rx_BadPktSurgeCnt;
+ u32 rx_BuffAllocErr;
+ u32 tx_lossOfClockCnt;
+
+ /* T1 error counters */
+ u32 framingBitErrorCount;
+ u32 lineCodeViolationCount;
+
+ u32 lossOfFrameCount;
+ u32 changeOfFrameAlignmentCount;
+ u32 severelyErroredFrameCount;
+
+ u32 check;
};
-
typedef struct lmc_xinfo {
- u_int32_t Magic0; /* BEEFCAFE */
+ u32 Magic0; /* BEEFCAFE */
- u_int32_t PciCardType;
- u_int32_t PciSlotNumber; /* PCI slot number */
+ u32 PciCardType;
+ u32 PciSlotNumber; /* PCI slot number */
- u_int16_t DriverMajorVersion;
- u_int16_t DriverMinorVersion;
- u_int16_t DriverSubVersion;
+ u16 DriverMajorVersion;
+ u16 DriverMinorVersion;
+ u16 DriverSubVersion;
- u_int16_t XilinxRevisionNumber;
- u_int16_t MaxFrameSize;
+ u16 XilinxRevisionNumber;
+ u16 MaxFrameSize;
- u_int16_t t1_alarm1_status;
- u_int16_t t1_alarm2_status;
+ u16 t1_alarm1_status;
+ u16 t1_alarm2_status;
- int link_status;
- u_int32_t mii_reg16;
+ int link_status;
+ u32 mii_reg16;
- u_int32_t Magic1; /* DEADBEEF */
+ u32 Magic1; /* DEADBEEF */
} LMC_XINFO;
@@ -353,23 +292,22 @@ typedef struct lmc_xinfo {
* forward decl
*/
struct lmc___softc {
- void *if_ptr; /* General purpose pointer (used by SPPP) */
char *name;
u8 board_idx;
- struct lmc_statistics stats;
- struct net_device *lmc_device;
+ struct lmc_extra_statistics extra_stats;
+ struct net_device *lmc_device;
int hang, rxdesc, bad_packet, some_counter;
- u_int32_t txgo;
+ u32 txgo;
struct lmc_regfile_t lmc_csrs;
- volatile u_int32_t lmc_txtick;
- volatile u_int32_t lmc_rxtick;
- u_int32_t lmc_flags;
- u_int32_t lmc_intrmask; /* our copy of csr_intr */
- u_int32_t lmc_cmdmode; /* our copy of csr_cmdmode */
- u_int32_t lmc_busmode; /* our copy of csr_busmode */
- u_int32_t lmc_gpio_io; /* state of in/out settings */
- u_int32_t lmc_gpio; /* state of outputs */
+ volatile u32 lmc_txtick;
+ volatile u32 lmc_rxtick;
+ u32 lmc_flags;
+ u32 lmc_intrmask; /* our copy of csr_intr */
+ u32 lmc_cmdmode; /* our copy of csr_cmdmode */
+ u32 lmc_busmode; /* our copy of csr_busmode */
+ u32 lmc_gpio_io; /* state of in/out settings */
+ u32 lmc_gpio; /* state of outputs */
struct sk_buff* lmc_txq[LMC_TXDESCS];
struct sk_buff* lmc_rxq[LMC_RXDESCS];
volatile
@@ -381,42 +319,41 @@ struct lmc___softc {
unsigned int lmc_taint_tx, lmc_taint_rx;
int lmc_tx_start, lmc_txfull;
int lmc_txbusy;
- u_int16_t lmc_miireg16;
+ u16 lmc_miireg16;
int lmc_ok;
int last_link_status;
int lmc_cardtype;
- u_int32_t last_frameerr;
+ u32 last_frameerr;
lmc_media_t *lmc_media;
struct timer_list timer;
lmc_ctl_t ictl;
- u_int32_t TxDescriptControlInit;
+ u32 TxDescriptControlInit;
int tx_TimeoutInd; /* additional driver state */
int tx_TimeoutDisplay;
unsigned int lastlmc_taint_tx;
int lasttx_packets;
- u_int32_t tx_clockState;
- u_int32_t lmc_crcSize;
- LMC_XINFO lmc_xinfo;
+ u32 tx_clockState;
+ u32 lmc_crcSize;
+ LMC_XINFO lmc_xinfo;
char lmc_yel, lmc_blue, lmc_red; /* for T1 and DS3 */
- char lmc_timing; /* for HSSI and SSI */
- int got_irq;
+ char lmc_timing; /* for HSSI and SSI */
+ int got_irq;
- char last_led_err[4];
+ char last_led_err[4];
- u32 last_int;
- u32 num_int;
+ u32 last_int;
+ u32 num_int;
spinlock_t lmc_lock;
- u_int16_t if_type; /* PPP or NET */
- struct ppp_device *pd;
+ u16 if_type; /* HDLC/PPP or NET */
- /* Failure cases */
- u8 failed_ring;
- u8 failed_recv_alloc;
+ /* Failure cases */
+ u8 failed_ring;
+ u8 failed_recv_alloc;
- /* Structure check */
- u32 check;
+ /* Structure check */
+ u32 check;
};
#define LMC_PCI_TIME 1
@@ -512,8 +449,8 @@ struct lmc___softc {
| TULIP_STS_TXUNDERFLOW\
| TULIP_STS_RXSTOPPED )
-#define DESC_OWNED_BY_SYSTEM ((u_int32_t)(0x00000000))
-#define DESC_OWNED_BY_DC21X4 ((u_int32_t)(0x80000000))
+#define DESC_OWNED_BY_SYSTEM ((u32)(0x00000000))
+#define DESC_OWNED_BY_DC21X4 ((u32)(0x80000000))
#ifndef TULIP_CMD_RECEIVEALL
#define TULIP_CMD_RECEIVEALL 0x40000000L
@@ -525,46 +462,9 @@ struct lmc___softc {
#define LMC_ADAP_SSI 4
#define LMC_ADAP_T1 5
-#define HDLC_HDR_LEN 4
-#define HDLC_ADDR_LEN 1
-#define HDLC_SLARP 0x8035
#define LMC_MTU 1500
-#define SLARP_LINECHECK 2
#define LMC_CRC_LEN_16 2 /* 16-bit CRC */
#define LMC_CRC_LEN_32 4
-#ifdef LMC_HDLC
-/* definition of an hdlc header. */
-struct hdlc_hdr
-{
- u8 address;
- u8 control;
- u16 type;
-};
-
-/* definition of a slarp header. */
-struct slarp
-{
- long code;
- union sl
- {
- struct
- {
- ulong address;
- ulong mask;
- ushort unused;
- } add;
- struct
- {
- ulong mysequence;
- ulong yoursequence;
- ushort reliability;
- ulong time;
- } chk;
- } t;
-};
-#endif /* LMC_HDLC */
-
-
#endif /* _LMC_VAR_H_ */
diff --git a/drivers/net/wan/pc300.h b/drivers/net/wan/pc300.h
index 63e9fcf31fb..2e4f84f6cad 100644
--- a/drivers/net/wan/pc300.h
+++ b/drivers/net/wan/pc300.h
@@ -100,31 +100,14 @@
#define _PC300_H
#include <linux/hdlc.h>
-#include <net/syncppp.h>
#include "hd64572.h"
#include "pc300-falc-lh.h"
-#ifndef CY_TYPES
-#define CY_TYPES
-typedef __u64 ucdouble; /* 64 bits, unsigned */
-typedef __u32 uclong; /* 32 bits, unsigned */
-typedef __u16 ucshort; /* 16 bits, unsigned */
-typedef __u8 ucchar; /* 8 bits, unsigned */
-#endif /* CY_TYPES */
+#define PC300_PROTO_MLPPP 1
-#define PC300_PROTO_MLPPP 1
-
-#define PC300_KERNEL "2.4.x" /* Kernel supported by this driver */
-
-#define PC300_DEVNAME "hdlc" /* Dev. name base (for hdlc0, hdlc1, etc.) */
-#define PC300_MAXINDEX 100 /* Max dev. name index (the '0' in hdlc0) */
-
-#define PC300_MAXCARDS 4 /* Max number of cards per system */
#define PC300_MAXCHAN 2 /* Number of channels per card */
-#define PC300_PLX_WIN 0x80 /* PLX control window size (128b) */
#define PC300_RAMSIZE 0x40000 /* RAM window size (256Kb) */
-#define PC300_SCASIZE 0x400 /* SCA window size (1Kb) */
#define PC300_FALCSIZE 0x400 /* FALC window size (1Kb) */
#define PC300_OSC_CLOCK 24576000
@@ -160,26 +143,14 @@ typedef __u8 ucchar; /* 8 bits, unsigned */
* Memory access functions/macros *
* (required to support Alpha systems) *
***************************************/
-#ifdef __KERNEL__
-#define cpc_writeb(port,val) {writeb((ucchar)(val),(port)); mb();}
+#define cpc_writeb(port,val) {writeb((u8)(val),(port)); mb();}
#define cpc_writew(port,val) {writew((ushort)(val),(port)); mb();}
-#define cpc_writel(port,val) {writel((uclong)(val),(port)); mb();}
+#define cpc_writel(port,val) {writel((u32)(val),(port)); mb();}
#define cpc_readb(port) readb(port)
#define cpc_readw(port) readw(port)
#define cpc_readl(port) readl(port)
-#else /* __KERNEL__ */
-#define cpc_writeb(port,val) (*(volatile ucchar *)(port) = (ucchar)(val))
-#define cpc_writew(port,val) (*(volatile ucshort *)(port) = (ucshort)(val))
-#define cpc_writel(port,val) (*(volatile uclong *)(port) = (uclong)(val))
-
-#define cpc_readb(port) (*(volatile ucchar *)(port))
-#define cpc_readw(port) (*(volatile ucshort *)(port))
-#define cpc_readl(port) (*(volatile uclong *)(port))
-
-#endif /* __KERNEL__ */
-
/****** Data Structures *****************************************************/
/*
@@ -188,15 +159,15 @@ typedef __u8 ucchar; /* 8 bits, unsigned */
* (memory mapped).
*/
struct RUNTIME_9050 {
- uclong loc_addr_range[4]; /* 00-0Ch : Local Address Ranges */
- uclong loc_rom_range; /* 10h : Local ROM Range */
- uclong loc_addr_base[4]; /* 14-20h : Local Address Base Addrs */
- uclong loc_rom_base; /* 24h : Local ROM Base */
- uclong loc_bus_descr[4]; /* 28-34h : Local Bus Descriptors */
- uclong rom_bus_descr; /* 38h : ROM Bus Descriptor */
- uclong cs_base[4]; /* 3C-48h : Chip Select Base Addrs */
- uclong intr_ctrl_stat; /* 4Ch : Interrupt Control/Status */
- uclong init_ctrl; /* 50h : EEPROM ctrl, Init Ctrl, etc */
+ u32 loc_addr_range[4]; /* 00-0Ch : Local Address Ranges */
+ u32 loc_rom_range; /* 10h : Local ROM Range */
+ u32 loc_addr_base[4]; /* 14-20h : Local Address Base Addrs */
+ u32 loc_rom_base; /* 24h : Local ROM Base */
+ u32 loc_bus_descr[4]; /* 28-34h : Local Bus Descriptors */
+ u32 rom_bus_descr; /* 38h : ROM Bus Descriptor */
+ u32 cs_base[4]; /* 3C-48h : Chip Select Base Addrs */
+ u32 intr_ctrl_stat; /* 4Ch : Interrupt Control/Status */
+ u32 init_ctrl; /* 50h : EEPROM ctrl, Init Ctrl, etc */
};
#define PLX_9050_LINT1_ENABLE 0x01
@@ -240,66 +211,66 @@ struct RUNTIME_9050 {
#define PC300_FALC_MAXLOOP 0x0000ffff /* for falc_issue_cmd() */
typedef struct falc {
- ucchar sync; /* If true FALC is synchronized */
- ucchar active; /* if TRUE then already active */
- ucchar loop_active; /* if TRUE a line loopback UP was received */
- ucchar loop_gen; /* if TRUE a line loopback UP was issued */
+ u8 sync; /* If true FALC is synchronized */
+ u8 active; /* if TRUE then already active */
+ u8 loop_active; /* if TRUE a line loopback UP was received */
+ u8 loop_gen; /* if TRUE a line loopback UP was issued */
- ucchar num_channels;
- ucchar offset; /* 1 for T1, 0 for E1 */
- ucchar full_bandwidth;
+ u8 num_channels;
+ u8 offset; /* 1 for T1, 0 for E1 */
+ u8 full_bandwidth;
- ucchar xmb_cause;
- ucchar multiframe_mode;
+ u8 xmb_cause;
+ u8 multiframe_mode;
/* Statistics */
- ucshort pden; /* Pulse Density violation count */
- ucshort los; /* Loss of Signal count */
- ucshort losr; /* Loss of Signal recovery count */
- ucshort lfa; /* Loss of frame alignment count */
- ucshort farec; /* Frame Alignment Recovery count */
- ucshort lmfa; /* Loss of multiframe alignment count */
- ucshort ais; /* Remote Alarm indication Signal count */
- ucshort sec; /* One-second timer */
- ucshort es; /* Errored second */
- ucshort rai; /* remote alarm received */
- ucshort bec;
- ucshort fec;
- ucshort cvc;
- ucshort cec;
- ucshort ebc;
+ u16 pden; /* Pulse Density violation count */
+ u16 los; /* Loss of Signal count */
+ u16 losr; /* Loss of Signal recovery count */
+ u16 lfa; /* Loss of frame alignment count */
+ u16 farec; /* Frame Alignment Recovery count */
+ u16 lmfa; /* Loss of multiframe alignment count */
+ u16 ais; /* Remote Alarm indication Signal count */
+ u16 sec; /* One-second timer */
+ u16 es; /* Errored second */
+ u16 rai; /* remote alarm received */
+ u16 bec;
+ u16 fec;
+ u16 cvc;
+ u16 cec;
+ u16 ebc;
/* Status */
- ucchar red_alarm;
- ucchar blue_alarm;
- ucchar loss_fa;
- ucchar yellow_alarm;
- ucchar loss_mfa;
- ucchar prbs;
+ u8 red_alarm;
+ u8 blue_alarm;
+ u8 loss_fa;
+ u8 yellow_alarm;
+ u8 loss_mfa;
+ u8 prbs;
} falc_t;
typedef struct falc_status {
- ucchar sync; /* If true FALC is synchronized */
- ucchar red_alarm;
- ucchar blue_alarm;
- ucchar loss_fa;
- ucchar yellow_alarm;
- ucchar loss_mfa;
- ucchar prbs;
+ u8 sync; /* If true FALC is synchronized */
+ u8 red_alarm;
+ u8 blue_alarm;
+ u8 loss_fa;
+ u8 yellow_alarm;
+ u8 loss_mfa;
+ u8 prbs;
} falc_status_t;
typedef struct rsv_x21_status {
- ucchar dcd;
- ucchar dsr;
- ucchar cts;
- ucchar rts;
- ucchar dtr;
+ u8 dcd;
+ u8 dsr;
+ u8 cts;
+ u8 rts;
+ u8 dtr;
} rsv_x21_status_t;
typedef struct pc300stats {
int hw_type;
- uclong line_on;
- uclong line_off;
+ u32 line_on;
+ u32 line_off;
struct net_device_stats gen_stats;
falc_t te_stats;
} pc300stats_t;
@@ -317,28 +288,19 @@ typedef struct pc300loopback {
typedef struct pc300patterntst {
char patrntst_on; /* 0 - off; 1 - on; 2 - read num_errors */
- ucshort num_errors;
+ u16 num_errors;
} pc300patterntst_t;
typedef struct pc300dev {
- void *if_ptr; /* General purpose pointer */
struct pc300ch *chan;
- ucchar trace_on;
- uclong line_on; /* DCD(X.21, RSV) / sync(TE) change counters */
- uclong line_off;
-#ifdef __KERNEL__
+ u8 trace_on;
+ u32 line_on; /* DCD(X.21, RSV) / sync(TE) change counters */
+ u32 line_off;
char name[16];
struct net_device *dev;
-
- void *private;
- struct sk_buff *tx_skb;
- union { /* This union has all the protocol-specific structures */
- struct ppp_device pppdev;
- }ifu;
#ifdef CONFIG_PC300_MLPPP
void *cpc_tty; /* information to PC300 TTY driver */
#endif
-#endif /* __KERNEL__ */
}pc300dev_t;
typedef struct pc300hw {
@@ -346,43 +308,42 @@ typedef struct pc300hw {
int bus; /* Bus (PCI, PMC, etc.) */
int nchan; /* number of channels */
int irq; /* interrupt request level */
- uclong clock; /* Board clock */
- ucchar cpld_id; /* CPLD ID (TE only) */
- ucshort cpld_reg1; /* CPLD reg 1 (TE only) */
- ucshort cpld_reg2; /* CPLD reg 2 (TE only) */
- ucshort gpioc_reg; /* PLX GPIOC reg */
- ucshort intctl_reg; /* PLX Int Ctrl/Status reg */
- uclong iophys; /* PLX registers I/O base */
- uclong iosize; /* PLX registers I/O size */
- uclong plxphys; /* PLX registers MMIO base (physical) */
+ u32 clock; /* Board clock */
+ u8 cpld_id; /* CPLD ID (TE only) */
+ u16 cpld_reg1; /* CPLD reg 1 (TE only) */
+ u16 cpld_reg2; /* CPLD reg 2 (TE only) */
+ u16 gpioc_reg; /* PLX GPIOC reg */
+ u16 intctl_reg; /* PLX Int Ctrl/Status reg */
+ u32 iophys; /* PLX registers I/O base */
+ u32 iosize; /* PLX registers I/O size */
+ u32 plxphys; /* PLX registers MMIO base (physical) */
void __iomem * plxbase; /* PLX registers MMIO base (virtual) */
- uclong plxsize; /* PLX registers MMIO size */
- uclong scaphys; /* SCA registers MMIO base (physical) */
+ u32 plxsize; /* PLX registers MMIO size */
+ u32 scaphys; /* SCA registers MMIO base (physical) */
void __iomem * scabase; /* SCA registers MMIO base (virtual) */
- uclong scasize; /* SCA registers MMIO size */
- uclong ramphys; /* On-board RAM MMIO base (physical) */
+ u32 scasize; /* SCA registers MMIO size */
+ u32 ramphys; /* On-board RAM MMIO base (physical) */
void __iomem * rambase; /* On-board RAM MMIO base (virtual) */
- uclong alloc_ramsize; /* RAM MMIO size allocated by the PCI bridge */
- uclong ramsize; /* On-board RAM MMIO size */
- uclong falcphys; /* FALC registers MMIO base (physical) */
+ u32 alloc_ramsize; /* RAM MMIO size allocated by the PCI bridge */
+ u32 ramsize; /* On-board RAM MMIO size */
+ u32 falcphys; /* FALC registers MMIO base (physical) */
void __iomem * falcbase;/* FALC registers MMIO base (virtual) */
- uclong falcsize; /* FALC registers MMIO size */
+ u32 falcsize; /* FALC registers MMIO size */
} pc300hw_t;
typedef struct pc300chconf {
- sync_serial_settings phys_settings; /* Clock type/rate (in bps),
+ sync_serial_settings phys_settings; /* Clock type/rate (in bps),
loopback mode */
raw_hdlc_proto proto_settings; /* Encoding, parity (CRC) */
- uclong media; /* HW media (RS232, V.35, etc.) */
- uclong proto; /* Protocol (PPP, X.25, etc.) */
- ucchar monitor; /* Monitor mode (0 = off, !0 = on) */
+ u32 media; /* HW media (RS232, V.35, etc.) */
+ u32 proto; /* Protocol (PPP, X.25, etc.) */
/* TE-specific parameters */
- ucchar lcode; /* Line Code (AMI, B8ZS, etc.) */
- ucchar fr_mode; /* Frame Mode (ESF, D4, etc.) */
- ucchar lbo; /* Line Build Out */
- ucchar rx_sens; /* Rx Sensitivity (long- or short-haul) */
- uclong tslot_bitmap; /* bit[i]=1 => timeslot _i_ is active */
+ u8 lcode; /* Line Code (AMI, B8ZS, etc.) */
+ u8 fr_mode; /* Frame Mode (ESF, D4, etc.) */
+ u8 lbo; /* Line Build Out */
+ u8 rx_sens; /* Rx Sensitivity (long- or short-haul) */
+ u32 tslot_bitmap; /* bit[i]=1 => timeslot _i_ is active */
} pc300chconf_t;
typedef struct pc300ch {
@@ -390,20 +351,18 @@ typedef struct pc300ch {
int channel;
pc300dev_t d;
pc300chconf_t conf;
- ucchar tx_first_bd; /* First TX DMA block descr. w/ data */
- ucchar tx_next_bd; /* Next free TX DMA block descriptor */
- ucchar rx_first_bd; /* First free RX DMA block descriptor */
- ucchar rx_last_bd; /* Last free RX DMA block descriptor */
- ucchar nfree_tx_bd; /* Number of free TX DMA block descriptors */
- falc_t falc; /* FALC structure (TE only) */
+ u8 tx_first_bd; /* First TX DMA block descr. w/ data */
+ u8 tx_next_bd; /* Next free TX DMA block descriptor */
+ u8 rx_first_bd; /* First free RX DMA block descriptor */
+ u8 rx_last_bd; /* Last free RX DMA block descriptor */
+ u8 nfree_tx_bd; /* Number of free TX DMA block descriptors */
+ falc_t falc; /* FALC structure (TE only) */
} pc300ch_t;
typedef struct pc300 {
pc300hw_t hw; /* hardware config. */
pc300ch_t chan[PC300_MAXCHAN];
-#ifdef __KERNEL__
spinlock_t card_lock;
-#endif /* __KERNEL__ */
} pc300_t;
typedef struct pc300conf {
@@ -471,12 +430,7 @@ enum pc300_loopback_cmds {
#define PC300_TX_QUEUE_LEN 100
#define PC300_DEF_MTU 1600
-#ifdef __KERNEL__
/* Function Prototypes */
-void tx_dma_start(pc300_t *, int);
int cpc_open(struct net_device *dev);
-int cpc_set_media(hdlc_device *, int);
-#endif /* __KERNEL__ */
#endif /* _PC300_H */
-
diff --git a/drivers/net/wan/pc300_drv.c b/drivers/net/wan/pc300_drv.c
index 33417052775..d0a8d1e352a 100644
--- a/drivers/net/wan/pc300_drv.c
+++ b/drivers/net/wan/pc300_drv.c
@@ -227,8 +227,6 @@ static char rcsid[] =
#include <linux/netdevice.h>
#include <linux/spinlock.h>
#include <linux/if.h>
-
-#include <net/syncppp.h>
#include <net/arp.h>
#include <asm/io.h>
@@ -285,8 +283,8 @@ static void rx_dma_buf_init(pc300_t *, int);
static void tx_dma_buf_check(pc300_t *, int);
static void rx_dma_buf_check(pc300_t *, int);
static irqreturn_t cpc_intr(int, void *);
-static int clock_rate_calc(uclong, uclong, int *);
-static uclong detect_ram(pc300_t *);
+static int clock_rate_calc(u32, u32, int *);
+static u32 detect_ram(pc300_t *);
static void plx_init(pc300_t *);
static void cpc_trace(struct net_device *, struct sk_buff *, char);
static int cpc_attach(struct net_device *, unsigned short, unsigned short);
@@ -311,10 +309,10 @@ static void tx_dma_buf_pt_init(pc300_t * card, int ch)
+ DMA_TX_BD_BASE + ch_factor * sizeof(pcsca_bd_t));
for (i = 0; i < N_DMA_TX_BUF; i++, ptdescr++) {
- cpc_writel(&ptdescr->next, (uclong) (DMA_TX_BD_BASE +
+ cpc_writel(&ptdescr->next, (u32)(DMA_TX_BD_BASE +
(ch_factor + ((i + 1) & (N_DMA_TX_BUF - 1))) * sizeof(pcsca_bd_t)));
- cpc_writel(&ptdescr->ptbuf,
- (uclong) (DMA_TX_BASE + (ch_factor + i) * BD_DEF_LEN));
+ cpc_writel(&ptdescr->ptbuf,
+ (u32)(DMA_TX_BASE + (ch_factor + i) * BD_DEF_LEN));
}
}
@@ -341,10 +339,10 @@ static void rx_dma_buf_pt_init(pc300_t * card, int ch)
+ DMA_RX_BD_BASE + ch_factor * sizeof(pcsca_bd_t));
for (i = 0; i < N_DMA_RX_BUF; i++, ptdescr++) {
- cpc_writel(&ptdescr->next, (uclong) (DMA_RX_BD_BASE +
- (ch_factor + ((i + 1) & (N_DMA_RX_BUF - 1))) * sizeof(pcsca_bd_t)));
+ cpc_writel(&ptdescr->next, (u32)(DMA_RX_BD_BASE +
+ (ch_factor + ((i + 1) & (N_DMA_RX_BUF - 1))) * sizeof(pcsca_bd_t)));
cpc_writel(&ptdescr->ptbuf,
- (uclong) (DMA_RX_BASE + (ch_factor + i) * BD_DEF_LEN));
+ (u32)(DMA_RX_BASE + (ch_factor + i) * BD_DEF_LEN));
}
}
@@ -367,8 +365,8 @@ static void tx_dma_buf_check(pc300_t * card, int ch)
{
volatile pcsca_bd_t __iomem *ptdescr;
int i;
- ucshort first_bd = card->chan[ch].tx_first_bd;
- ucshort next_bd = card->chan[ch].tx_next_bd;
+ u16 first_bd = card->chan[ch].tx_first_bd;
+ u16 next_bd = card->chan[ch].tx_next_bd;
printk("#CH%d: f_bd = %d(0x%08zx), n_bd = %d(0x%08zx)\n", ch,
first_bd, TX_BD_ADDR(ch, first_bd),
@@ -392,9 +390,9 @@ static void tx1_dma_buf_check(pc300_t * card, int ch)
{
volatile pcsca_bd_t __iomem *ptdescr;
int i;
- ucshort first_bd = card->chan[ch].tx_first_bd;
- ucshort next_bd = card->chan[ch].tx_next_bd;
- uclong scabase = card->hw.scabase;
+ u16 first_bd = card->chan[ch].tx_first_bd;
+ u16 next_bd = card->chan[ch].tx_next_bd;
+ u32 scabase = card->hw.scabase;
printk ("\nnfree_tx_bd = %d \n", card->chan[ch].nfree_tx_bd);
printk("#CH%d: f_bd = %d(0x%08x), n_bd = %d(0x%08x)\n", ch,
@@ -413,13 +411,13 @@ static void tx1_dma_buf_check(pc300_t * card, int ch)
printk("\n");
}
#endif
-
+
static void rx_dma_buf_check(pc300_t * card, int ch)
{
volatile pcsca_bd_t __iomem *ptdescr;
int i;
- ucshort first_bd = card->chan[ch].rx_first_bd;
- ucshort last_bd = card->chan[ch].rx_last_bd;
+ u16 first_bd = card->chan[ch].rx_first_bd;
+ u16 last_bd = card->chan[ch].rx_last_bd;
int ch_factor;
ch_factor = ch * N_DMA_RX_BUF;
@@ -440,9 +438,9 @@ static void rx_dma_buf_check(pc300_t * card, int ch)
static int dma_get_rx_frame_size(pc300_t * card, int ch)
{
volatile pcsca_bd_t __iomem *ptdescr;
- ucshort first_bd = card->chan[ch].rx_first_bd;
+ u16 first_bd = card->chan[ch].rx_first_bd;
int rcvd = 0;
- volatile ucchar status;
+ volatile u8 status;
ptdescr = (card->hw.rambase + RX_BD_ADDR(ch, first_bd));
while ((status = cpc_readb(&ptdescr->status)) & DST_OSB) {
@@ -462,12 +460,12 @@ static int dma_get_rx_frame_size(pc300_t * card, int ch)
* dma_buf_write: writes a frame to the Tx DMA buffers
* NOTE: this function writes one frame at a time.
*/
-static int dma_buf_write(pc300_t * card, int ch, ucchar * ptdata, int len)
+static int dma_buf_write(pc300_t *card, int ch, u8 *ptdata, int len)
{
int i, nchar;
volatile pcsca_bd_t __iomem *ptdescr;
int tosend = len;
- ucchar nbuf = ((len - 1) / BD_DEF_LEN) + 1;
+ u8 nbuf = ((len - 1) / BD_DEF_LEN) + 1;
if (nbuf >= card->chan[ch].nfree_tx_bd) {
return -ENOMEM;
@@ -509,7 +507,7 @@ static int dma_buf_read(pc300_t * card, int ch, struct sk_buff *skb)
pc300ch_t *chan = (pc300ch_t *) & card->chan[ch];
volatile pcsca_bd_t __iomem *ptdescr;
int rcvd = 0;
- volatile ucchar status;
+ volatile u8 status;
ptdescr = (card->hw.rambase +
RX_BD_ADDR(ch, chan->rx_first_bd));
@@ -563,8 +561,8 @@ static int dma_buf_read(pc300_t * card, int ch, struct sk_buff *skb)
static void tx_dma_stop(pc300_t * card, int ch)
{
void __iomem *scabase = card->hw.scabase;
- ucchar drr_ena_bit = 1 << (5 + 2 * ch);
- ucchar drr_rst_bit = 1 << (1 + 2 * ch);
+ u8 drr_ena_bit = 1 << (5 + 2 * ch);
+ u8 drr_rst_bit = 1 << (1 + 2 * ch);
/* Disable DMA */
cpc_writeb(scabase + DRR, drr_ena_bit);
@@ -574,8 +572,8 @@ static void tx_dma_stop(pc300_t * card, int ch)
static void rx_dma_stop(pc300_t * card, int ch)
{
void __iomem *scabase = card->hw.scabase;
- ucchar drr_ena_bit = 1 << (4 + 2 * ch);
- ucchar drr_rst_bit = 1 << (2 * ch);
+ u8 drr_ena_bit = 1 << (4 + 2 * ch);
+ u8 drr_rst_bit = 1 << (2 * ch);
/* Disable DMA */
cpc_writeb(scabase + DRR, drr_ena_bit);
@@ -607,7 +605,7 @@ static void rx_dma_start(pc300_t * card, int ch)
/*************************/
/*** FALC Routines ***/
/*************************/
-static void falc_issue_cmd(pc300_t * card, int ch, ucchar cmd)
+static void falc_issue_cmd(pc300_t *card, int ch, u8 cmd)
{
void __iomem *falcbase = card->hw.falcbase;
unsigned long i = 0;
@@ -675,7 +673,7 @@ static void falc_intr_enable(pc300_t * card, int ch)
static void falc_open_timeslot(pc300_t * card, int ch, int timeslot)
{
void __iomem *falcbase = card->hw.falcbase;
- ucchar tshf = card->chan[ch].falc.offset;
+ u8 tshf = card->chan[ch].falc.offset;
cpc_writeb(falcbase + F_REG((ICB1 + (timeslot - tshf) / 8), ch),
cpc_readb(falcbase + F_REG((ICB1 + (timeslot - tshf) / 8), ch)) &
@@ -691,7 +689,7 @@ static void falc_open_timeslot(pc300_t * card, int ch, int timeslot)
static void falc_close_timeslot(pc300_t * card, int ch, int timeslot)
{
void __iomem *falcbase = card->hw.falcbase;
- ucchar tshf = card->chan[ch].falc.offset;
+ u8 tshf = card->chan[ch].falc.offset;
cpc_writeb(falcbase + F_REG((ICB1 + (timeslot - tshf) / 8), ch),
cpc_readb(falcbase + F_REG((ICB1 + (timeslot - tshf) / 8), ch)) |
@@ -812,7 +810,7 @@ static void falc_init_t1(pc300_t * card, int ch)
pc300chconf_t *conf = (pc300chconf_t *) & chan->conf;
falc_t *pfalc = (falc_t *) & chan->falc;
void __iomem *falcbase = card->hw.falcbase;
- ucchar dja = (ch ? (LIM2_DJA2 | LIM2_DJA1) : 0);
+ u8 dja = (ch ? (LIM2_DJA2 | LIM2_DJA1) : 0);
/* Switch to T1 mode (PCM 24) */
cpc_writeb(falcbase + F_REG(FMR1, ch), FMR1_PMOD);
@@ -981,7 +979,7 @@ static void falc_init_e1(pc300_t * card, int ch)
pc300chconf_t *conf = (pc300chconf_t *) & chan->conf;
falc_t *pfalc = (falc_t *) & chan->falc;
void __iomem *falcbase = card->hw.falcbase;
- ucchar dja = (ch ? (LIM2_DJA2 | LIM2_DJA1) : 0);
+ u8 dja = (ch ? (LIM2_DJA2 | LIM2_DJA1) : 0);
/* Switch to E1 mode (PCM 30) */
cpc_writeb(falcbase + F_REG(FMR1, ch),
@@ -1187,7 +1185,7 @@ static void te_config(pc300_t * card, int ch)
pc300chconf_t *conf = (pc300chconf_t *) & chan->conf;
falc_t *pfalc = (falc_t *) & chan->falc;
void __iomem *falcbase = card->hw.falcbase;
- ucchar dummy;
+ u8 dummy;
unsigned long flags;
memset(pfalc, 0, sizeof(falc_t));
@@ -1403,7 +1401,7 @@ static void falc_update_stats(pc300_t * card, int ch)
pc300chconf_t *conf = (pc300chconf_t *) & chan->conf;
falc_t *pfalc = (falc_t *) & chan->falc;
void __iomem *falcbase = card->hw.falcbase;
- ucshort counter;
+ u16 counter;
counter = cpc_readb(falcbase + F_REG(FECL, ch));
counter |= cpc_readb(falcbase + F_REG(FECH, ch)) << 8;
@@ -1729,7 +1727,7 @@ static void falc_pattern_test(pc300_t * card, int ch, unsigned int activate)
* Description: This routine returns the bit error counter value
*----------------------------------------------------------------------------
*/
-static ucshort falc_pattern_test_error(pc300_t * card, int ch)
+static u16 falc_pattern_test_error(pc300_t * card, int ch)
{
pc300ch_t *chan = (pc300ch_t *) & card->chan[ch];
falc_t *pfalc = (falc_t *) & chan->falc;
@@ -1776,7 +1774,7 @@ static void cpc_tx_timeout(struct net_device *dev)
pc300_t *card = (pc300_t *) chan->card;
int ch = chan->channel;
unsigned long flags;
- ucchar ilar;
+ u8 ilar;
dev->stats.tx_errors++;
dev->stats.tx_aborted_errors++;
@@ -1807,11 +1805,7 @@ static int cpc_queue_xmit(struct sk_buff *skb, struct net_device *dev)
int i;
#endif
- if (chan->conf.monitor) {
- /* In monitor mode no Tx is done: ignore packet */
- dev_kfree_skb(skb);
- return 0;
- } else if (!netif_carrier_ok(dev)) {
+ if (!netif_carrier_ok(dev)) {
/* DCD must be OFF: drop packet */
dev_kfree_skb(skb);
dev->stats.tx_errors++;
@@ -1836,7 +1830,7 @@ static int cpc_queue_xmit(struct sk_buff *skb, struct net_device *dev)
}
/* Write buffer to DMA buffers */
- if (dma_buf_write(card, ch, (ucchar *) skb->data, skb->len) != 0) {
+ if (dma_buf_write(card, ch, (u8 *)skb->data, skb->len) != 0) {
// printk("%s: write error. Dropping TX packet.\n", dev->name);
netif_stop_queue(dev);
dev_kfree_skb(skb);
@@ -2001,7 +1995,7 @@ static void sca_tx_intr(pc300dev_t *dev)
static void sca_intr(pc300_t * card)
{
void __iomem *scabase = card->hw.scabase;
- volatile uclong status;
+ volatile u32 status;
int ch;
int intr_count = 0;
unsigned char dsr_rx;
@@ -2016,7 +2010,7 @@ static void sca_intr(pc300_t * card)
/**** Reception ****/
if (status & IR0_DRX((IR0_DMIA | IR0_DMIB), ch)) {
- ucchar drx_stat = cpc_readb(scabase + DSR_RX(ch));
+ u8 drx_stat = cpc_readb(scabase + DSR_RX(ch));
/* Clear RX interrupts */
cpc_writeb(scabase + DSR_RX(ch), drx_stat | DSR_DWE);
@@ -2090,7 +2084,7 @@ static void sca_intr(pc300_t * card)
/**** Transmission ****/
if (status & IR0_DTX((IR0_EFT | IR0_DMIA | IR0_DMIB), ch)) {
- ucchar dtx_stat = cpc_readb(scabase + DSR_TX(ch));
+ u8 dtx_stat = cpc_readb(scabase + DSR_TX(ch));
/* Clear TX interrupts */
cpc_writeb(scabase + DSR_TX(ch), dtx_stat | DSR_DWE);
@@ -2134,7 +2128,7 @@ static void sca_intr(pc300_t * card)
/**** MSCI ****/
if (status & IR0_M(IR0_RXINTA, ch)) {
- ucchar st1 = cpc_readb(scabase + M_REG(ST1, ch));
+ u8 st1 = cpc_readb(scabase + M_REG(ST1, ch));
/* Clear MSCI interrupts */
cpc_writeb(scabase + M_REG(ST1, ch), st1);
@@ -2176,7 +2170,7 @@ static void sca_intr(pc300_t * card)
}
}
-static void falc_t1_loop_detection(pc300_t * card, int ch, ucchar frs1)
+static void falc_t1_loop_detection(pc300_t *card, int ch, u8 frs1)
{
pc300ch_t *chan = (pc300ch_t *) & card->chan[ch];
falc_t *pfalc = (falc_t *) & chan->falc;
@@ -2201,7 +2195,7 @@ static void falc_t1_loop_detection(pc300_t * card, int ch, ucchar frs1)
}
}
-static void falc_e1_loop_detection(pc300_t * card, int ch, ucchar rsp)
+static void falc_e1_loop_detection(pc300_t *card, int ch, u8 rsp)
{
pc300ch_t *chan = (pc300ch_t *) & card->chan[ch];
falc_t *pfalc = (falc_t *) & chan->falc;
@@ -2231,8 +2225,8 @@ static void falc_t1_intr(pc300_t * card, int ch)
pc300ch_t *chan = (pc300ch_t *) & card->chan[ch];
falc_t *pfalc = (falc_t *) & chan->falc;
void __iomem *falcbase = card->hw.falcbase;
- ucchar isr0, isr3, gis;
- ucchar dummy;
+ u8 isr0, isr3, gis;
+ u8 dummy;
while ((gis = cpc_readb(falcbase + F_REG(GIS, ch))) != 0) {
if (gis & GIS_ISR0) {
@@ -2278,8 +2272,8 @@ static void falc_e1_intr(pc300_t * card, int ch)
pc300ch_t *chan = (pc300ch_t *) & card->chan[ch];
falc_t *pfalc = (falc_t *) & chan->falc;
void __iomem *falcbase = card->hw.falcbase;
- ucchar isr1, isr2, isr3, gis, rsp;
- ucchar dummy;
+ u8 isr1, isr2, isr3, gis, rsp;
+ u8 dummy;
while ((gis = cpc_readb(falcbase + F_REG(GIS, ch))) != 0) {
rsp = cpc_readb(falcbase + F_REG(RSP, ch));
@@ -2361,7 +2355,7 @@ static void falc_intr(pc300_t * card)
static irqreturn_t cpc_intr(int irq, void *dev_id)
{
pc300_t *card = dev_id;
- volatile ucchar plx_status;
+ volatile u8 plx_status;
if (!card) {
#ifdef PC300_DEBUG_INTR
@@ -2400,7 +2394,7 @@ static irqreturn_t cpc_intr(int irq, void *dev_id)
static void cpc_sca_status(pc300_t * card, int ch)
{
- ucchar ilar;
+ u8 ilar;
void __iomem *scabase = card->hw.scabase;
unsigned long flags;
@@ -2818,7 +2812,7 @@ static int cpc_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
}
}
-static int clock_rate_calc(uclong rate, uclong clock, int *br_io)
+static int clock_rate_calc(u32 rate, u32 clock, int *br_io)
{
int br, tc;
int br_pwr, error;
@@ -2855,12 +2849,12 @@ static int ch_config(pc300dev_t * d)
void __iomem *scabase = card->hw.scabase;
void __iomem *plxbase = card->hw.plxbase;
int ch = chan->channel;
- uclong clkrate = chan->conf.phys_settings.clock_rate;
- uclong clktype = chan->conf.phys_settings.clock_type;
- ucshort encoding = chan->conf.proto_settings.encoding;
- ucshort parity = chan->conf.proto_settings.parity;
- ucchar md0, md2;
-
+ u32 clkrate = chan->conf.phys_settings.clock_rate;
+ u32 clktype = chan->conf.phys_settings.clock_type;
+ u16 encoding = chan->conf.proto_settings.encoding;
+ u16 parity = chan->conf.proto_settings.parity;
+ u8 md0, md2;
+
/* Reset the channel */
cpc_writeb(scabase + M_REG(CMD, ch), CMD_CH_RST);
@@ -3152,19 +3146,10 @@ int cpc_open(struct net_device *dev)
printk("pc300: cpc_open");
#endif
-#ifdef FIXME
- if (hdlc->proto.id == IF_PROTO_PPP) {
- d->if_ptr = &hdlc->state.ppp.pppdev;
- }
-#endif
-
result = hdlc_open(dev);
- if (/* FIXME hdlc->proto.id == IF_PROTO_PPP*/ 0) {
- dev->priv = d;
- }
- if (result) {
+
+ if (result)
return result;
- }
sprintf(ifr.ifr_name, "%s", dev->name);
result = cpc_opench(d);
@@ -3197,9 +3182,7 @@ static int cpc_close(struct net_device *dev)
CPC_UNLOCK(card, flags);
hdlc_close(dev);
- if (/* FIXME hdlc->proto.id == IF_PROTO_PPP*/ 0) {
- d->if_ptr = NULL;
- }
+
#ifdef CONFIG_PC300_MLPPP
if (chan->conf.proto == PC300_PROTO_MLPPP) {
cpc_tty_unregister_service(d);
@@ -3210,16 +3193,16 @@ static int cpc_close(struct net_device *dev)
return 0;
}
-static uclong detect_ram(pc300_t * card)
+static u32 detect_ram(pc300_t * card)
{
- uclong i;
- ucchar data;
+ u32 i;
+ u8 data;
void __iomem *rambase = card->hw.rambase;
card->hw.ramsize = PC300_RAMSIZE;
/* Let's find out how much RAM is present on this board */
for (i = 0; i < card->hw.ramsize; i++) {
- data = (ucchar) (i & 0xff);
+ data = (u8)(i & 0xff);
cpc_writeb(rambase + i, data);
if (cpc_readb(rambase + i) != data) {
break;
@@ -3296,7 +3279,7 @@ static void cpc_init_card(pc300_t * card)
cpc_writeb(card->hw.scabase + DMER, 0x80);
if (card->hw.type == PC300_TE) {
- ucchar reg1;
+ u8 reg1;
/* Check CPLD version */
reg1 = cpc_readb(card->hw.falcbase + CPLD_REG1);
@@ -3360,7 +3343,6 @@ static void cpc_init_card(pc300_t * card)
chan->nfree_tx_bd = N_DMA_TX_BUF;
d->chan = chan;
- d->tx_skb = NULL;
d->trace_on = 0;
d->line_on = 0;
d->line_off = 0;
@@ -3431,7 +3413,7 @@ cpc_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
{
static int first_time = 1;
int err, eeprom_outdated = 0;
- ucshort device_id;
+ u16 device_id;
pc300_t *card;
if (first_time) {
diff --git a/drivers/net/wan/sealevel.c b/drivers/net/wan/sealevel.c
index 44a89df1b8b..c0235844a4d 100644
--- a/drivers/net/wan/sealevel.c
+++ b/drivers/net/wan/sealevel.c
@@ -8,6 +8,7 @@
*
* (c) Copyright 1999, 2001 Alan Cox
* (c) Copyright 2001 Red Hat Inc.
+ * Generic HDLC port Copyright (C) 2008 Krzysztof Halasa <khc@pm.waw.pl>
*
*/
@@ -19,6 +20,7 @@
#include <linux/netdevice.h>
#include <linux/if_arp.h>
#include <linux/delay.h>
+#include <linux/hdlc.h>
#include <linux/ioport.h>
#include <linux/init.h>
#include <net/arp.h>
@@ -27,22 +29,19 @@
#include <asm/io.h>
#include <asm/dma.h>
#include <asm/byteorder.h>
-#include <net/syncppp.h>
#include "z85230.h"
struct slvl_device
{
- void *if_ptr; /* General purpose pointer (used by SPPP) */
struct z8530_channel *chan;
- struct ppp_device pppdev;
int channel;
};
struct slvl_board
{
- struct slvl_device *dev[2];
+ struct slvl_device dev[2];
struct z8530_dev board;
int iobase;
};
@@ -51,72 +50,69 @@ struct slvl_board
* Network driver support routines
*/
+static inline struct slvl_device* dev_to_chan(struct net_device *dev)
+{
+ return (struct slvl_device *)dev_to_hdlc(dev)->priv;
+}
+
/*
- * Frame receive. Simple for our card as we do sync ppp and there
+ * Frame receive. Simple for our card as we do HDLC and there
* is no funny garbage involved
*/
-
+
static void sealevel_input(struct z8530_channel *c, struct sk_buff *skb)
{
/* Drop the CRC - it's not a good idea to try and negotiate it ;) */
- skb_trim(skb, skb->len-2);
- skb->protocol=htons(ETH_P_WAN_PPP);
+ skb_trim(skb, skb->len - 2);
+ skb->protocol = hdlc_type_trans(skb, c->netdevice);
skb_reset_mac_header(skb);
- skb->dev=c->netdevice;
- /*
- * Send it to the PPP layer. We don't have time to process
- * it right now.
- */
+ skb->dev = c->netdevice;
netif_rx(skb);
c->netdevice->last_rx = jiffies;
}
-
+
/*
* We've been placed in the UP state
- */
-
+ */
+
static int sealevel_open(struct net_device *d)
{
- struct slvl_device *slvl=d->priv;
+ struct slvl_device *slvl = dev_to_chan(d);
int err = -1;
int unit = slvl->channel;
-
+
/*
- * Link layer up.
+ * Link layer up.
*/
- switch(unit)
+ switch (unit)
{
case 0:
- err=z8530_sync_dma_open(d, slvl->chan);
+ err = z8530_sync_dma_open(d, slvl->chan);
break;
case 1:
- err=z8530_sync_open(d, slvl->chan);
+ err = z8530_sync_open(d, slvl->chan);
break;
}
-
- if(err)
+
+ if (err)
return err;
- /*
- * Begin PPP
- */
- err=sppp_open(d);
- if(err)
- {
- switch(unit)
- {
+
+ err = hdlc_open(d);
+ if (err) {
+ switch (unit) {
case 0:
z8530_sync_dma_close(d, slvl->chan);
break;
case 1:
z8530_sync_close(d, slvl->chan);
break;
- }
+ }
return err;
}
-
- slvl->chan->rx_function=sealevel_input;
-
+
+ slvl->chan->rx_function = sealevel_input;
+
/*
* Go go go
*/
@@ -126,26 +122,19 @@ static int sealevel_open(struct net_device *d)
static int sealevel_close(struct net_device *d)
{
- struct slvl_device *slvl=d->priv;
+ struct slvl_device *slvl = dev_to_chan(d);
int unit = slvl->channel;
-
+
/*
* Discard new frames
*/
-
- slvl->chan->rx_function=z8530_null_rx;
-
- /*
- * PPP off
- */
- sppp_close(d);
- /*
- * Link layer down
- */
+ slvl->chan->rx_function = z8530_null_rx;
+
+ hdlc_close(d);
netif_stop_queue(d);
-
- switch(unit)
+
+ switch (unit)
{
case 0:
z8530_sync_dma_close(d, slvl->chan);
@@ -159,210 +148,153 @@ static int sealevel_close(struct net_device *d)
static int sealevel_ioctl(struct net_device *d, struct ifreq *ifr, int cmd)
{
- /* struct slvl_device *slvl=d->priv;
+ /* struct slvl_device *slvl=dev_to_chan(d);
z8530_ioctl(d,&slvl->sync.chanA,ifr,cmd) */
- return sppp_do_ioctl(d, ifr,cmd);
-}
-
-static struct net_device_stats *sealevel_get_stats(struct net_device *d)
-{
- struct slvl_device *slvl=d->priv;
- if(slvl)
- return z8530_get_stats(slvl->chan);
- else
- return NULL;
+ return hdlc_ioctl(d, ifr, cmd);
}
/*
- * Passed PPP frames, fire them downwind.
+ * Passed network frames, fire them downwind.
*/
-
+
static int sealevel_queue_xmit(struct sk_buff *skb, struct net_device *d)
{
- struct slvl_device *slvl=d->priv;
- return z8530_queue_xmit(slvl->chan, skb);
+ return z8530_queue_xmit(dev_to_chan(d)->chan, skb);
}
-static int sealevel_neigh_setup(struct neighbour *n)
+static int sealevel_attach(struct net_device *dev, unsigned short encoding,
+ unsigned short parity)
{
- if (n->nud_state == NUD_NONE) {
- n->ops = &arp_broken_ops;
- n->output = n->ops->output;
- }
- return 0;
+ if (encoding == ENCODING_NRZ && parity == PARITY_CRC16_PR1_CCITT)
+ return 0;
+ return -EINVAL;
}
-static int sealevel_neigh_setup_dev(struct net_device *dev, struct neigh_parms *p)
+static int slvl_setup(struct slvl_device *sv, int iobase, int irq)
{
- if (p->tbl->family == AF_INET) {
- p->neigh_setup = sealevel_neigh_setup;
- p->ucast_probes = 0;
- p->mcast_probes = 0;
+ struct net_device *dev = alloc_hdlcdev(sv);
+ if (!dev)
+ return -1;
+
+ dev_to_hdlc(dev)->attach = sealevel_attach;
+ dev_to_hdlc(dev)->xmit = sealevel_queue_xmit;
+ dev->open = sealevel_open;
+ dev->stop = sealevel_close;
+ dev->do_ioctl = sealevel_ioctl;
+ dev->base_addr = iobase;
+ dev->irq = irq;
+
+ if (register_hdlc_device(dev)) {
+ printk(KERN_ERR "sealevel: unable to register HDLC device\n");
+ free_netdev(dev);
+ return -1;
}
- return 0;
-}
-static int sealevel_attach(struct net_device *dev)
-{
- struct slvl_device *sv = dev->priv;
- sppp_attach(&sv->pppdev);
+ sv->chan->netdevice = dev;
return 0;
}
-static void sealevel_detach(struct net_device *dev)
-{
- sppp_detach(dev);
-}
-
-static void slvl_setup(struct net_device *d)
-{
- d->open = sealevel_open;
- d->stop = sealevel_close;
- d->init = sealevel_attach;
- d->uninit = sealevel_detach;
- d->hard_start_xmit = sealevel_queue_xmit;
- d->get_stats = sealevel_get_stats;
- d->set_multicast_list = NULL;
- d->do_ioctl = sealevel_ioctl;
- d->neigh_setup = sealevel_neigh_setup_dev;
- d->set_mac_address = NULL;
-
-}
-
-static inline struct slvl_device *slvl_alloc(int iobase, int irq)
-{
- struct net_device *d;
- struct slvl_device *sv;
-
- d = alloc_netdev(sizeof(struct slvl_device), "hdlc%d",
- slvl_setup);
-
- if (!d)
- return NULL;
-
- sv = d->priv;
- d->ml_priv = sv;
- sv->if_ptr = &sv->pppdev;
- sv->pppdev.dev = d;
- d->base_addr = iobase;
- d->irq = irq;
-
- return sv;
-}
-
/*
* Allocate and setup Sealevel board.
*/
-
-static __init struct slvl_board *slvl_init(int iobase, int irq,
+
+static __init struct slvl_board *slvl_init(int iobase, int irq,
int txdma, int rxdma, int slow)
{
struct z8530_dev *dev;
struct slvl_board *b;
-
+
/*
* Get the needed I/O space
*/
- if(!request_region(iobase, 8, "Sealevel 4021"))
- {
- printk(KERN_WARNING "sealevel: I/O 0x%X already in use.\n", iobase);
+ if (!request_region(iobase, 8, "Sealevel 4021")) {
+ printk(KERN_WARNING "sealevel: I/O 0x%X already in use.\n",
+ iobase);
return NULL;
}
-
- b = kzalloc(sizeof(struct slvl_board), GFP_KERNEL);
- if(!b)
- goto fail3;
- if (!(b->dev[0]= slvl_alloc(iobase, irq)))
- goto fail2;
+ b = kzalloc(sizeof(struct slvl_board), GFP_KERNEL);
+ if (!b)
+ goto err_kzalloc;
- b->dev[0]->chan = &b->board.chanA;
- b->dev[0]->channel = 0;
-
- if (!(b->dev[1] = slvl_alloc(iobase, irq)))
- goto fail1_0;
+ b->dev[0].chan = &b->board.chanA;
+ b->dev[0].channel = 0;
- b->dev[1]->chan = &b->board.chanB;
- b->dev[1]->channel = 1;
+ b->dev[1].chan = &b->board.chanB;
+ b->dev[1].channel = 1;
dev = &b->board;
-
+
/*
* Stuff in the I/O addressing
*/
-
+
dev->active = 0;
b->iobase = iobase;
-
+
/*
* Select 8530 delays for the old board
*/
-
- if(slow)
+
+ if (slow)
iobase |= Z8530_PORT_SLEEP;
-
- dev->chanA.ctrlio=iobase+1;
- dev->chanA.dataio=iobase;
- dev->chanB.ctrlio=iobase+3;
- dev->chanB.dataio=iobase+2;
-
- dev->chanA.irqs=&z8530_nop;
- dev->chanB.irqs=&z8530_nop;
-
+
+ dev->chanA.ctrlio = iobase + 1;
+ dev->chanA.dataio = iobase;
+ dev->chanB.ctrlio = iobase + 3;
+ dev->chanB.dataio = iobase + 2;
+
+ dev->chanA.irqs = &z8530_nop;
+ dev->chanB.irqs = &z8530_nop;
+
/*
* Assert DTR enable DMA
*/
-
- outb(3|(1<<7), b->iobase+4);
-
+
+ outb(3 | (1 << 7), b->iobase + 4);
+
/* We want a fast IRQ for this device. Actually we'd like an even faster
IRQ ;) - This is one driver RtLinux is made for */
-
- if(request_irq(irq, &z8530_interrupt, IRQF_DISABLED, "SeaLevel", dev)<0)
- {
+
+ if (request_irq(irq, &z8530_interrupt, IRQF_DISABLED,
+ "SeaLevel", dev) < 0) {
printk(KERN_WARNING "sealevel: IRQ %d already in use.\n", irq);
- goto fail1_1;
+ goto err_request_irq;
}
-
- dev->irq=irq;
- dev->chanA.private=&b->dev[0];
- dev->chanB.private=&b->dev[1];
- dev->chanA.netdevice=b->dev[0]->pppdev.dev;
- dev->chanB.netdevice=b->dev[1]->pppdev.dev;
- dev->chanA.dev=dev;
- dev->chanB.dev=dev;
-
- dev->chanA.txdma=3;
- dev->chanA.rxdma=1;
- if(request_dma(dev->chanA.txdma, "SeaLevel (TX)")!=0)
- goto fail;
-
- if(request_dma(dev->chanA.rxdma, "SeaLevel (RX)")!=0)
- goto dmafail;
-
+
+ dev->irq = irq;
+ dev->chanA.private = &b->dev[0];
+ dev->chanB.private = &b->dev[1];
+ dev->chanA.dev = dev;
+ dev->chanB.dev = dev;
+
+ dev->chanA.txdma = 3;
+ dev->chanA.rxdma = 1;
+ if (request_dma(dev->chanA.txdma, "SeaLevel (TX)"))
+ goto err_dma_tx;
+
+ if (request_dma(dev->chanA.rxdma, "SeaLevel (RX)"))
+ goto err_dma_rx;
+
disable_irq(irq);
-
+
/*
* Begin normal initialise
*/
-
- if(z8530_init(dev)!=0)
- {
+
+ if (z8530_init(dev) != 0) {
printk(KERN_ERR "Z8530 series device not found.\n");
enable_irq(irq);
- goto dmafail2;
+ goto free_hw;
}
- if(dev->type==Z85C30)
- {
+ if (dev->type == Z85C30) {
z8530_channel_load(&dev->chanA, z8530_hdlc_kilostream);
z8530_channel_load(&dev->chanB, z8530_hdlc_kilostream);
- }
- else
- {
+ } else {
z8530_channel_load(&dev->chanA, z8530_hdlc_kilostream_85230);
z8530_channel_load(&dev->chanB, z8530_hdlc_kilostream_85230);
}
@@ -370,36 +302,31 @@ static __init struct slvl_board *slvl_init(int iobase, int irq,
/*
* Now we can take the IRQ
*/
-
+
enable_irq(irq);
- if (register_netdev(b->dev[0]->pppdev.dev))
- goto dmafail2;
-
- if (register_netdev(b->dev[1]->pppdev.dev))
- goto fail_unit;
+ if (slvl_setup(&b->dev[0], iobase, irq))
+ goto free_hw;
+ if (slvl_setup(&b->dev[1], iobase, irq))
+ goto free_netdev0;
z8530_describe(dev, "I/O", iobase);
- dev->active=1;
+ dev->active = 1;
return b;
-fail_unit:
- unregister_netdev(b->dev[0]->pppdev.dev);
-
-dmafail2:
+free_netdev0:
+ unregister_hdlc_device(b->dev[0].chan->netdevice);
+ free_netdev(b->dev[0].chan->netdevice);
+free_hw:
free_dma(dev->chanA.rxdma);
-dmafail:
+err_dma_rx:
free_dma(dev->chanA.txdma);
-fail:
+err_dma_tx:
free_irq(irq, dev);
-fail1_1:
- free_netdev(b->dev[1]->pppdev.dev);
-fail1_0:
- free_netdev(b->dev[0]->pppdev.dev);
-fail2:
+err_request_irq:
kfree(b);
-fail3:
- release_region(iobase,8);
+err_kzalloc:
+ release_region(iobase, 8);
return NULL;
}
@@ -408,14 +335,14 @@ static void __exit slvl_shutdown(struct slvl_board *b)
int u;
z8530_shutdown(&b->board);
-
- for(u=0; u<2; u++)
+
+ for (u = 0; u < 2; u++)
{
- struct net_device *d = b->dev[u]->pppdev.dev;
- unregister_netdev(d);
+ struct net_device *d = b->dev[u].chan->netdevice;
+ unregister_hdlc_device(d);
free_netdev(d);
}
-
+
free_irq(b->board.irq, &b->board);
free_dma(b->board.chanA.rxdma);
free_dma(b->board.chanA.txdma);
@@ -451,10 +378,6 @@ static struct slvl_board *slvl_unit;
static int __init slvl_init_module(void)
{
-#ifdef MODULE
- printk(KERN_INFO "SeaLevel Z85230 Synchronous Driver v 0.02.\n");
- printk(KERN_INFO "(c) Copyright 1998, Building Number Three Ltd.\n");
-#endif
slvl_unit = slvl_init(io, irq, txdma, rxdma, slow);
return slvl_unit ? 0 : -ENODEV;
diff --git a/drivers/net/wan/syncppp.c b/drivers/net/wan/syncppp.c
index 29b4b94e494..327d58589e1 100644
--- a/drivers/net/wan/syncppp.c
+++ b/drivers/net/wan/syncppp.c
@@ -230,13 +230,6 @@ static void sppp_input (struct net_device *dev, struct sk_buff *skb)
skb->dev=dev;
skb_reset_mac_header(skb);
- if (dev->flags & IFF_RUNNING)
- {
- /* Count received bytes, add FCS and one flag */
- sp->ibytes+= skb->len + 3;
- sp->ipkts++;
- }
-
if (!pskb_may_pull(skb, PPP_HEADER_LEN)) {
/* Too small packet, drop it. */
if (sp->pp_flags & PP_DEBUG)
@@ -832,7 +825,6 @@ static void sppp_cp_send (struct sppp *sp, u16 proto, u8 type,
sppp_print_bytes ((u8*) (lh+1), len);
printk (">\n");
}
- sp->obytes += skb->len;
/* Control is high priority so it doesn't get queued behind data */
skb->priority=TC_PRIO_CONTROL;
skb->dev = dev;
@@ -875,7 +867,6 @@ static void sppp_cisco_send (struct sppp *sp, int type, u32 par1, u32 par2)
printk (KERN_WARNING "%s: cisco output: <%xh %xh %xh %xh %xh-%xh>\n",
dev->name, ntohl (ch->type), ch->par1,
ch->par2, ch->rel, ch->time0, ch->time1);
- sp->obytes += skb->len;
skb->priority=TC_PRIO_CONTROL;
skb->dev = dev;
skb_queue_tail(&tx_queue, skb);
diff --git a/drivers/net/wan/z85230.c b/drivers/net/wan/z85230.c
index 98ef400908b..243bd8d918f 100644
--- a/drivers/net/wan/z85230.c
+++ b/drivers/net/wan/z85230.c
@@ -43,6 +43,7 @@
#include <linux/netdevice.h>
#include <linux/if_arp.h>
#include <linux/delay.h>
+#include <linux/hdlc.h>
#include <linux/ioport.h>
#include <linux/init.h>
#include <asm/dma.h>
@@ -51,7 +52,6 @@
#define RT_UNLOCK
#include <linux/spinlock.h>
-#include <net/syncppp.h>
#include "z85230.h"
@@ -440,51 +440,46 @@ static void z8530_tx(struct z8530_channel *c)
* A status event occurred in PIO synchronous mode. There are several
* reasons the chip will bother us here. A transmit underrun means we
* failed to feed the chip fast enough and just broke a packet. A DCD
- * change is a line up or down. We communicate that back to the protocol
- * layer for synchronous PPP to renegotiate.
+ * change is a line up or down.
*/
static void z8530_status(struct z8530_channel *chan)
{
u8 status, altered;
- status=read_zsreg(chan, R0);
- altered=chan->status^status;
-
- chan->status=status;
-
- if(status&TxEOM)
- {
+ status = read_zsreg(chan, R0);
+ altered = chan->status ^ status;
+
+ chan->status = status;
+
+ if (status & TxEOM) {
/* printk("%s: Tx underrun.\n", chan->dev->name); */
- chan->stats.tx_fifo_errors++;
+ chan->netdevice->stats.tx_fifo_errors++;
write_zsctrl(chan, ERR_RES);
z8530_tx_done(chan);
}
-
- if(altered&chan->dcdcheck)
+
+ if (altered & chan->dcdcheck)
{
- if(status&chan->dcdcheck)
- {
+ if (status & chan->dcdcheck) {
printk(KERN_INFO "%s: DCD raised\n", chan->dev->name);
- write_zsreg(chan, R3, chan->regs[3]|RxENABLE);
- if(chan->netdevice &&
- ((chan->netdevice->type == ARPHRD_HDLC) ||
- (chan->netdevice->type == ARPHRD_PPP)))
- sppp_reopen(chan->netdevice);
- }
- else
- {
+ write_zsreg(chan, R3, chan->regs[3] | RxENABLE);
+ if (chan->netdevice)
+ netif_carrier_on(chan->netdevice);
+ } else {
printk(KERN_INFO "%s: DCD lost\n", chan->dev->name);
- write_zsreg(chan, R3, chan->regs[3]&~RxENABLE);
+ write_zsreg(chan, R3, chan->regs[3] & ~RxENABLE);
z8530_flush_fifo(chan);
+ if (chan->netdevice)
+ netif_carrier_off(chan->netdevice);
}
-
- }
+
+ }
write_zsctrl(chan, RES_EXT_INT);
write_zsctrl(chan, RES_H_IUS);
}
-struct z8530_irqhandler z8530_sync=
+struct z8530_irqhandler z8530_sync =
{
z8530_rx,
z8530_tx,
@@ -556,8 +551,7 @@ static void z8530_dma_tx(struct z8530_channel *chan)
*
* A status event occurred on the Z8530. We receive these for two reasons
* when in DMA mode. Firstly if we finished a packet transfer we get one
- * and kick the next packet out. Secondly we may see a DCD change and
- * have to poke the protocol layer.
+ * and kick the next packet out. Secondly we may see a DCD change.
*
*/
@@ -586,24 +580,21 @@ static void z8530_dma_status(struct z8530_channel *chan)
}
}
- if(altered&chan->dcdcheck)
+ if (altered & chan->dcdcheck)
{
- if(status&chan->dcdcheck)
- {
+ if (status & chan->dcdcheck) {
printk(KERN_INFO "%s: DCD raised\n", chan->dev->name);
- write_zsreg(chan, R3, chan->regs[3]|RxENABLE);
- if(chan->netdevice &&
- ((chan->netdevice->type == ARPHRD_HDLC) ||
- (chan->netdevice->type == ARPHRD_PPP)))
- sppp_reopen(chan->netdevice);
- }
- else
- {
+ write_zsreg(chan, R3, chan->regs[3] | RxENABLE);
+ if (chan->netdevice)
+ netif_carrier_on(chan->netdevice);
+ } else {
printk(KERN_INFO "%s:DCD lost\n", chan->dev->name);
- write_zsreg(chan, R3, chan->regs[3]&~RxENABLE);
+ write_zsreg(chan, R3, chan->regs[3] & ~RxENABLE);
z8530_flush_fifo(chan);
+ if (chan->netdevice)
+ netif_carrier_off(chan->netdevice);
}
- }
+ }
write_zsctrl(chan, RES_EXT_INT);
write_zsctrl(chan, RES_H_IUS);
@@ -1459,10 +1450,10 @@ static void z8530_tx_begin(struct z8530_channel *c)
/*
* Check if we crapped out.
*/
- if(get_dma_residue(c->txdma))
+ if (get_dma_residue(c->txdma))
{
- c->stats.tx_dropped++;
- c->stats.tx_fifo_errors++;
+ c->netdevice->stats.tx_dropped++;
+ c->netdevice->stats.tx_fifo_errors++;
}
release_dma_lock(flags);
}
@@ -1534,21 +1525,21 @@ static void z8530_tx_begin(struct z8530_channel *c)
* packet. This code is fairly timing sensitive.
*
* Called with the register lock held.
- */
-
+ */
+
static void z8530_tx_done(struct z8530_channel *c)
{
struct sk_buff *skb;
/* Actually this can happen.*/
- if(c->tx_skb==NULL)
+ if (c->tx_skb == NULL)
return;
- skb=c->tx_skb;
- c->tx_skb=NULL;
+ skb = c->tx_skb;
+ c->tx_skb = NULL;
z8530_tx_begin(c);
- c->stats.tx_packets++;
- c->stats.tx_bytes+=skb->len;
+ c->netdevice->stats.tx_packets++;
+ c->netdevice->stats.tx_bytes += skb->len;
dev_kfree_skb_irq(skb);
}
@@ -1558,7 +1549,7 @@ static void z8530_tx_done(struct z8530_channel *c)
* @skb: The buffer
*
* We point the receive handler at this function when idle. Instead
- * of syncppp processing the frames we get to throw them away.
+ * of processing the frames we get to throw them away.
*/
void z8530_null_rx(struct z8530_channel *c, struct sk_buff *skb)
@@ -1635,10 +1626,11 @@ static void z8530_rx_done(struct z8530_channel *c)
else
/* Can't occur as we dont reenable the DMA irq until
after the flip is done */
- printk(KERN_WARNING "%s: DMA flip overrun!\n", c->netdevice->name);
-
+ printk(KERN_WARNING "%s: DMA flip overrun!\n",
+ c->netdevice->name);
+
release_dma_lock(flags);
-
+
/*
* Shove the old buffer into an sk_buff. We can't DMA
* directly into one on a PC - it might be above the 16Mb
@@ -1646,27 +1638,23 @@ static void z8530_rx_done(struct z8530_channel *c)
* can avoid the copy. Optimisation 2 - make the memcpy
* a copychecksum.
*/
-
- skb=dev_alloc_skb(ct);
- if(skb==NULL)
- {
- c->stats.rx_dropped++;
- printk(KERN_WARNING "%s: Memory squeeze.\n", c->netdevice->name);
- }
- else
- {
+
+ skb = dev_alloc_skb(ct);
+ if (skb == NULL) {
+ c->netdevice->stats.rx_dropped++;
+ printk(KERN_WARNING "%s: Memory squeeze.\n",
+ c->netdevice->name);
+ } else {
skb_put(skb, ct);
skb_copy_to_linear_data(skb, rxb, ct);
- c->stats.rx_packets++;
- c->stats.rx_bytes+=ct;
+ c->netdevice->stats.rx_packets++;
+ c->netdevice->stats.rx_bytes += ct;
}
- c->dma_ready=1;
- }
- else
- {
- RT_LOCK;
- skb=c->skb;
-
+ c->dma_ready = 1;
+ } else {
+ RT_LOCK;
+ skb = c->skb;
+
/*
* The game we play for non DMA is similar. We want to
* get the controller set up for the next packet as fast
@@ -1677,48 +1665,39 @@ static void z8530_rx_done(struct z8530_channel *c)
* if you build a system where the sync irq isnt blocked
* by the kernel IRQ disable then you need only block the
* sync IRQ for the RT_LOCK area.
- *
+ *
*/
ct=c->count;
-
+
c->skb = c->skb2;
c->count = 0;
c->max = c->mtu;
- if(c->skb)
- {
+ if (c->skb) {
c->dptr = c->skb->data;
c->max = c->mtu;
- }
- else
- {
- c->count= 0;
+ } else {
+ c->count = 0;
c->max = 0;
}
RT_UNLOCK;
c->skb2 = dev_alloc_skb(c->mtu);
- if(c->skb2==NULL)
+ if (c->skb2 == NULL)
printk(KERN_WARNING "%s: memory squeeze.\n",
- c->netdevice->name);
+ c->netdevice->name);
else
- {
- skb_put(c->skb2,c->mtu);
- }
- c->stats.rx_packets++;
- c->stats.rx_bytes+=ct;
-
+ skb_put(c->skb2, c->mtu);
+ c->netdevice->stats.rx_packets++;
+ c->netdevice->stats.rx_bytes += ct;
}
/*
* If we received a frame we must now process it.
*/
- if(skb)
- {
+ if (skb) {
skb_trim(skb, ct);
- c->rx_function(c,skb);
- }
- else
- {
- c->stats.rx_dropped++;
+ c->rx_function(c, skb);
+ } else {
+ c->netdevice->stats.rx_dropped++;
printk(KERN_ERR "%s: Lost a frame\n", c->netdevice->name);
}
}
@@ -1730,7 +1709,7 @@ static void z8530_rx_done(struct z8530_channel *c)
* Returns true if the buffer cross a DMA boundary on a PC. The poor
* thing can only DMA within a 64K block not across the edges of it.
*/
-
+
static inline int spans_boundary(struct sk_buff *skb)
{
unsigned long a=(unsigned long)skb->data;
@@ -1799,24 +1778,6 @@ int z8530_queue_xmit(struct z8530_channel *c, struct sk_buff *skb)
EXPORT_SYMBOL(z8530_queue_xmit);
-/**
- * z8530_get_stats - Get network statistics
- * @c: The channel to use
- *
- * Get the statistics block. We keep the statistics in software as
- * the chip doesn't do it for us.
- *
- * Locking is ignored here - we could lock for a copy but its
- * not likely to be that big an issue
- */
-
-struct net_device_stats *z8530_get_stats(struct z8530_channel *c)
-{
- return &c->stats;
-}
-
-EXPORT_SYMBOL(z8530_get_stats);
-
/*
* Module support
*/
diff --git a/drivers/net/wan/z85230.h b/drivers/net/wan/z85230.h
index 158aea7b8ea..4f372396c51 100644
--- a/drivers/net/wan/z85230.h
+++ b/drivers/net/wan/z85230.h
@@ -325,7 +325,6 @@ struct z8530_channel
void *private; /* For our owner */
struct net_device *netdevice; /* Network layer device */
- struct net_device_stats stats; /* Network layer statistics */
/*
* Async features
@@ -366,13 +365,13 @@ struct z8530_channel
unsigned char tx_active; /* character is being xmitted */
unsigned char tx_stopped; /* output is suspended */
- spinlock_t *lock; /* Devicr lock */
-};
+ spinlock_t *lock; /* Device lock */
+};
/*
* Each Z853x0 device.
- */
-
+ */
+
struct z8530_dev
{
char *name; /* Device instance name */
@@ -408,7 +407,6 @@ extern int z8530_sync_txdma_open(struct net_device *, struct z8530_channel *);
extern int z8530_sync_txdma_close(struct net_device *, struct z8530_channel *);
extern int z8530_channel_load(struct z8530_channel *, u8 *);
extern int z8530_queue_xmit(struct z8530_channel *c, struct sk_buff *skb);
-extern struct net_device_stats *z8530_get_stats(struct z8530_channel *c);
extern void z8530_null_rx(struct z8530_channel *c, struct sk_buff *skb);
diff --git a/drivers/net/wd.c b/drivers/net/wd.c
index fa14255282a..6f9aa164374 100644
--- a/drivers/net/wd.c
+++ b/drivers/net/wd.c
@@ -337,7 +337,7 @@ static int __init wd_probe1(struct net_device *dev, int ioaddr)
#ifdef CONFIG_NET_POLL_CONTROLLER
dev->poll_controller = ei_poll;
#endif
- NS8390_init(dev, 0);
+ NS8390p_init(dev, 0);
#if 1
/* Enable interrupt generation on softconfig cards -- M.U */
diff --git a/drivers/net/wireless/Kconfig b/drivers/net/wireless/Kconfig
index 91fc2c765d9..9931b5ab59c 100644
--- a/drivers/net/wireless/Kconfig
+++ b/drivers/net/wireless/Kconfig
@@ -649,6 +649,7 @@ config RTL8187
Trendnet TEW-424UB
ASUS P5B Deluxe
Toshiba Satellite Pro series of laptops
+ Asus Wireless Link
Thanks to Realtek for their support!
@@ -694,6 +695,7 @@ config MAC80211_HWSIM
source "drivers/net/wireless/p54/Kconfig"
source "drivers/net/wireless/ath5k/Kconfig"
+source "drivers/net/wireless/ath9k/Kconfig"
source "drivers/net/wireless/iwlwifi/Kconfig"
source "drivers/net/wireless/hostap/Kconfig"
source "drivers/net/wireless/b43/Kconfig"
diff --git a/drivers/net/wireless/Makefile b/drivers/net/wireless/Makefile
index 54a4f6f1db6..59aa89ec6e8 100644
--- a/drivers/net/wireless/Makefile
+++ b/drivers/net/wireless/Makefile
@@ -62,5 +62,6 @@ obj-$(CONFIG_RT2X00) += rt2x00/
obj-$(CONFIG_P54_COMMON) += p54/
obj-$(CONFIG_ATH5K) += ath5k/
+obj-$(CONFIG_ATH9K) += ath9k/
obj-$(CONFIG_MAC80211_HWSIM) += mac80211_hwsim.o
diff --git a/drivers/net/wireless/ath5k/ath5k.h b/drivers/net/wireless/ath5k/ath5k.h
index ba35c30d203..9102eea3c8b 100644
--- a/drivers/net/wireless/ath5k/ath5k.h
+++ b/drivers/net/wireless/ath5k/ath5k.h
@@ -186,11 +186,13 @@ struct ath5k_srev_name {
#define AR5K_SREV_RAD_2111 0x20
#define AR5K_SREV_RAD_5112 0x30
#define AR5K_SREV_RAD_5112A 0x35
+#define AR5K_SREV_RAD_5112B 0x36
#define AR5K_SREV_RAD_2112 0x40
#define AR5K_SREV_RAD_2112A 0x45
-#define AR5K_SREV_RAD_SC0 0x56 /* Found on 2413/2414 */
-#define AR5K_SREV_RAD_SC1 0x63 /* Found on 5413/5414 */
-#define AR5K_SREV_RAD_SC2 0xa2 /* Found on 2424-5/5424 */
+#define AR5K_SREV_RAD_2112B 0x46
+#define AR5K_SREV_RAD_SC0 0x50 /* Found on 2413/2414 */
+#define AR5K_SREV_RAD_SC1 0x60 /* Found on 5413/5414 */
+#define AR5K_SREV_RAD_SC2 0xa0 /* Found on 2424-5/5424 */
#define AR5K_SREV_RAD_5133 0xc0 /* MIMO found on 5418 */
/* IEEE defs */
diff --git a/drivers/net/wireless/ath5k/base.c b/drivers/net/wireless/ath5k/base.c
index ff3fad794b6..2028866f599 100644
--- a/drivers/net/wireless/ath5k/base.c
+++ b/drivers/net/wireless/ath5k/base.c
@@ -95,8 +95,6 @@ static struct pci_device_id ath5k_pci_id_table[] __devinitdata = {
{ PCI_VDEVICE(ATHEROS, 0x001a), .driver_data = AR5K_AR5212 }, /* 2413 Griffin-lite */
{ PCI_VDEVICE(ATHEROS, 0x001b), .driver_data = AR5K_AR5212 }, /* 5413 Eagle */
{ PCI_VDEVICE(ATHEROS, 0x001c), .driver_data = AR5K_AR5212 }, /* 5424 Condor (PCI-E)*/
- { PCI_VDEVICE(ATHEROS, 0x0023), .driver_data = AR5K_AR5212 }, /* 5416 */
- { PCI_VDEVICE(ATHEROS, 0x0024), .driver_data = AR5K_AR5212 }, /* 5418 */
{ 0 }
};
MODULE_DEVICE_TABLE(pci, ath5k_pci_id_table);
@@ -2170,6 +2168,7 @@ ath5k_beacon_config(struct ath5k_softc *sc)
ath5k_hw_set_intr(ah, 0);
sc->bmisscount = 0;
+ sc->imask &= ~(AR5K_INT_BMISS | AR5K_INT_SWBA);
if (sc->opmode == IEEE80211_IF_TYPE_STA) {
sc->imask |= AR5K_INT_BMISS;
diff --git a/drivers/net/wireless/ath5k/debug.c b/drivers/net/wireless/ath5k/debug.c
index 41d5fa34b54..6fa6c8e04ff 100644
--- a/drivers/net/wireless/ath5k/debug.c
+++ b/drivers/net/wireless/ath5k/debug.c
@@ -129,7 +129,7 @@ static struct reg regs[] = {
REG_STRUCT_INIT(AR5K_CPC1),
REG_STRUCT_INIT(AR5K_CPC2),
REG_STRUCT_INIT(AR5K_CPC3),
- REG_STRUCT_INIT(AR5K_CPCORN),
+ REG_STRUCT_INIT(AR5K_CPCOVF),
REG_STRUCT_INIT(AR5K_RESET_CTL),
REG_STRUCT_INIT(AR5K_SLEEP_CTL),
REG_STRUCT_INIT(AR5K_INTPEND),
diff --git a/drivers/net/wireless/ath5k/debug.h b/drivers/net/wireless/ath5k/debug.h
index 2cf8d18b10e..ffc52939330 100644
--- a/drivers/net/wireless/ath5k/debug.h
+++ b/drivers/net/wireless/ath5k/debug.h
@@ -63,7 +63,6 @@
struct ath5k_softc;
struct ath5k_hw;
-struct ieee80211_hw_mode;
struct sk_buff;
struct ath5k_buf;
diff --git a/drivers/net/wireless/ath5k/hw.c b/drivers/net/wireless/ath5k/hw.c
index 7ca87a55731..ad1a5b422c8 100644
--- a/drivers/net/wireless/ath5k/hw.c
+++ b/drivers/net/wireless/ath5k/hw.c
@@ -139,6 +139,8 @@ static int ath5k_hw_post(struct ath5k_hw *ah)
for (c = 0; c < 2; c++) {
cur_reg = regs[c];
+
+ /* Save previous value */
init_val = ath5k_hw_reg_read(ah, cur_reg);
for (i = 0; i < 256; i++) {
@@ -170,6 +172,10 @@ static int ath5k_hw_post(struct ath5k_hw *ah)
var_pattern = 0x003b080f;
ath5k_hw_reg_write(ah, var_pattern, cur_reg);
}
+
+ /* Restore previous value */
+ ath5k_hw_reg_write(ah, init_val, cur_reg);
+
}
return 0;
@@ -287,67 +293,42 @@ struct ath5k_hw *ath5k_hw_attach(struct ath5k_softc *sc, u8 mac_version)
/* Identify the radio chip*/
if (ah->ah_version == AR5K_AR5210) {
ah->ah_radio = AR5K_RF5110;
+ /*
+ * Register returns 0x0/0x04 for radio revision
+ * so ath5k_hw_radio_revision doesn't parse the value
+ * correctly. For now we are based on mac's srev to
+ * identify RF2425 radio.
+ */
+ } else if (srev == AR5K_SREV_VER_AR2425) {
+ ah->ah_radio = AR5K_RF2425;
+ ah->ah_phy_spending = AR5K_PHY_SPENDING_RF2425;
} else if (ah->ah_radio_5ghz_revision < AR5K_SREV_RAD_5112) {
ah->ah_radio = AR5K_RF5111;
ah->ah_phy_spending = AR5K_PHY_SPENDING_RF5111;
} else if (ah->ah_radio_5ghz_revision < AR5K_SREV_RAD_SC0) {
-
ah->ah_radio = AR5K_RF5112;
-
- if (ah->ah_radio_5ghz_revision < AR5K_SREV_RAD_5112A) {
- ah->ah_phy_spending = AR5K_PHY_SPENDING_RF5112;
- } else {
- ah->ah_phy_spending = AR5K_PHY_SPENDING_RF5112A;
- }
-
+ ah->ah_phy_spending = AR5K_PHY_SPENDING_RF5112;
} else if (ah->ah_radio_5ghz_revision < AR5K_SREV_RAD_SC1) {
ah->ah_radio = AR5K_RF2413;
- ah->ah_phy_spending = AR5K_PHY_SPENDING_RF5112A;
+ ah->ah_phy_spending = AR5K_PHY_SPENDING_RF2413;
} else if (ah->ah_radio_5ghz_revision < AR5K_SREV_RAD_SC2) {
ah->ah_radio = AR5K_RF5413;
- ah->ah_phy_spending = AR5K_PHY_SPENDING_RF5112A;
+ ah->ah_phy_spending = AR5K_PHY_SPENDING_RF5413;
} else if (ah->ah_radio_5ghz_revision < AR5K_SREV_RAD_5133) {
-
/* AR5424 */
if (srev >= AR5K_SREV_VER_AR5424) {
ah->ah_radio = AR5K_RF5413;
- ah->ah_phy_spending = AR5K_PHY_SPENDING_RF5424;
+ ah->ah_phy_spending = AR5K_PHY_SPENDING_RF5413;
/* AR2424 */
} else {
ah->ah_radio = AR5K_RF2413; /* For testing */
- ah->ah_phy_spending = AR5K_PHY_SPENDING_RF5112A;
+ ah->ah_phy_spending = AR5K_PHY_SPENDING_RF2413;
}
-
- /*
- * Register returns 0x4 for radio revision
- * so ath5k_hw_radio_revision doesn't parse the value
- * correctly. For now we are based on mac's srev to
- * identify RF2425 radio.
- */
- } else if (srev == AR5K_SREV_VER_AR2425) {
- ah->ah_radio = AR5K_RF2425;
- ah->ah_phy_spending = AR5K_PHY_SPENDING_RF5112;
}
-
ah->ah_phy = AR5K_PHY(0);
/*
- * Identify AR5212-based PCI-E cards
- * And write some initial settings.
- *
- * (doing a "strings" on ndis driver
- * -ar5211.sys- reveals the following
- * pci-e related functions:
- *
- * pcieClockReq
- * pcieRxErrNotify
- * pcieL1SKPEnable
- * pcieAspm
- * pcieDisableAspmOnRfWake
- * pciePowerSaveEnable
- *
- * I guess these point to ClockReq but
- * i'm not sure.)
+ * Write PCI-E power save settings
*/
if ((ah->ah_version == AR5K_AR5212) && (pdev->is_pcie)) {
ath5k_hw_reg_write(ah, 0x9248fc00, 0x4080);
@@ -369,10 +350,15 @@ struct ath5k_hw *ath5k_hw_attach(struct ath5k_softc *sc, u8 mac_version)
if (ret)
goto err_free;
+ /* Write AR5K_PCICFG_UNK on 2112B and later chips */
+ if (ah->ah_radio_5ghz_revision > AR5K_SREV_RAD_2112B ||
+ srev > AR5K_SREV_VER_AR2413) {
+ ath5k_hw_reg_write(ah, AR5K_PCICFG_UNK, AR5K_PCICFG);
+ }
+
/*
* Get card capabilities, values, ...
*/
-
ret = ath5k_eeprom_init(ah);
if (ret) {
ATH5K_ERR(sc, "unable to init EEPROM\n");
@@ -843,27 +829,41 @@ int ath5k_hw_reset(struct ath5k_hw *ah, enum ieee80211_if_types op_mode,
* Write some more initial register settings
*/
if (ah->ah_version == AR5K_AR5212) {
- ath5k_hw_reg_write(ah, 0x0002a002, AR5K_PHY(11));
+ ath5k_hw_reg_write(ah, 0x0002a002, 0x982c);
if (channel->hw_value == CHANNEL_G)
if (ah->ah_mac_srev < AR5K_SREV_VER_AR2413)
ath5k_hw_reg_write(ah, 0x00f80d80,
- AR5K_PHY(83));
+ 0x994c);
else if (ah->ah_mac_srev < AR5K_SREV_VER_AR2424)
ath5k_hw_reg_write(ah, 0x00380140,
- AR5K_PHY(83));
+ 0x994c);
else if (ah->ah_mac_srev < AR5K_SREV_VER_AR2425)
ath5k_hw_reg_write(ah, 0x00fc0ec0,
- AR5K_PHY(83));
+ 0x994c);
else /* 2425 */
ath5k_hw_reg_write(ah, 0x00fc0fc0,
- AR5K_PHY(83));
+ 0x994c);
else
- ath5k_hw_reg_write(ah, 0x00000000,
- AR5K_PHY(83));
-
- ath5k_hw_reg_write(ah, 0x000009b5, 0xa228);
- ath5k_hw_reg_write(ah, 0x0000000f, 0x8060);
+ ath5k_hw_reg_write(ah, 0x00000000, 0x994c);
+
+ /* Some bits are disabled here, we know nothing about
+ * register 0xa228 yet, most of the times this ends up
+ * with a value 0x9b5 -haven't seen any dump with
+ * a different value- */
+ /* Got this from decompiling binary HAL */
+ data = ath5k_hw_reg_read(ah, 0xa228);
+ data &= 0xfffffdff;
+ ath5k_hw_reg_write(ah, data, 0xa228);
+
+ data = ath5k_hw_reg_read(ah, 0xa228);
+ data &= 0xfffe03ff;
+ ath5k_hw_reg_write(ah, data, 0xa228);
+ data = 0;
+
+ /* Just write 0x9b5 ? */
+ /* ath5k_hw_reg_write(ah, 0x000009b5, 0xa228); */
+ ath5k_hw_reg_write(ah, 0x0000000f, AR5K_SEQ_MASK);
ath5k_hw_reg_write(ah, 0x00000000, 0xa254);
ath5k_hw_reg_write(ah, 0x0000000e, AR5K_PHY_SCAL);
}
@@ -879,6 +879,7 @@ int ath5k_hw_reset(struct ath5k_hw *ah, enum ieee80211_if_types op_mode,
else
data = 0xffb80d20;
ath5k_hw_reg_write(ah, data, AR5K_PHY_FRAME_CTL);
+ data = 0;
}
/*
@@ -898,7 +899,6 @@ int ath5k_hw_reset(struct ath5k_hw *ah, enum ieee80211_if_types op_mode,
/*
* Write RF registers
- * TODO:Does this work on 5211 (5111) ?
*/
ret = ath5k_hw_rfregs(ah, channel, mode);
if (ret)
@@ -935,7 +935,7 @@ int ath5k_hw_reset(struct ath5k_hw *ah, enum ieee80211_if_types op_mode,
return ret;
/* Set antenna mode */
- AR5K_REG_MASKED_BITS(ah, AR5K_PHY(0x44),
+ AR5K_REG_MASKED_BITS(ah, AR5K_PHY_ANT_CTL,
ah->ah_antenna[ee_mode][0], 0xfffffc06);
/*
@@ -965,15 +965,15 @@ int ath5k_hw_reset(struct ath5k_hw *ah, enum ieee80211_if_types op_mode,
ath5k_hw_reg_write(ah,
AR5K_PHY_NF_SVAL(ee->ee_noise_floor_thr[ee_mode]),
- AR5K_PHY(0x5a));
+ AR5K_PHY_NFTHRES);
- AR5K_REG_MASKED_BITS(ah, AR5K_PHY(0x11),
+ AR5K_REG_MASKED_BITS(ah, AR5K_PHY_SETTLING,
(ee->ee_switch_settling[ee_mode] << 7) & 0x3f80,
0xffffc07f);
- AR5K_REG_MASKED_BITS(ah, AR5K_PHY(0x12),
+ AR5K_REG_MASKED_BITS(ah, AR5K_PHY_GAIN,
(ee->ee_ant_tx_rx[ee_mode] << 12) & 0x3f000,
0xfffc0fff);
- AR5K_REG_MASKED_BITS(ah, AR5K_PHY(0x14),
+ AR5K_REG_MASKED_BITS(ah, AR5K_PHY_DESIRED_SIZE,
(ee->ee_adc_desired_size[ee_mode] & 0x00ff) |
((ee->ee_pga_desired_size[ee_mode] << 8) & 0xff00),
0xffff0000);
@@ -982,13 +982,13 @@ int ath5k_hw_reset(struct ath5k_hw *ah, enum ieee80211_if_types op_mode,
(ee->ee_tx_end2xpa_disable[ee_mode] << 24) |
(ee->ee_tx_end2xpa_disable[ee_mode] << 16) |
(ee->ee_tx_frm2xpa_enable[ee_mode] << 8) |
- (ee->ee_tx_frm2xpa_enable[ee_mode]), AR5K_PHY(0x0d));
+ (ee->ee_tx_frm2xpa_enable[ee_mode]), AR5K_PHY_RF_CTL4);
- AR5K_REG_MASKED_BITS(ah, AR5K_PHY(0x0a),
+ AR5K_REG_MASKED_BITS(ah, AR5K_PHY_RF_CTL3,
ee->ee_tx_end2xlna_enable[ee_mode] << 8, 0xffff00ff);
- AR5K_REG_MASKED_BITS(ah, AR5K_PHY(0x19),
+ AR5K_REG_MASKED_BITS(ah, AR5K_PHY_NF,
(ee->ee_thr_62[ee_mode] << 12) & 0x7f000, 0xfff80fff);
- AR5K_REG_MASKED_BITS(ah, AR5K_PHY(0x49), 4, 0xffffff01);
+ AR5K_REG_MASKED_BITS(ah, AR5K_PHY_OFDM_SELFCORR, 4, 0xffffff01);
AR5K_REG_ENABLE_BITS(ah, AR5K_PHY_IQ,
AR5K_PHY_IQ_CORR_ENABLE |
@@ -1063,7 +1063,8 @@ int ath5k_hw_reset(struct ath5k_hw *ah, enum ieee80211_if_types op_mode,
ath5k_hw_reg_write(ah, AR5K_PHY_ACT_ENABLE, AR5K_PHY_ACT);
/*
- * 5111/5112 Specific
+ * On 5211+ read activation -> rx delay
+ * and use it.
*/
if (ah->ah_version != AR5K_AR5210) {
data = ath5k_hw_reg_read(ah, AR5K_PHY_RX_DELAY) &
@@ -1071,40 +1072,77 @@ int ath5k_hw_reset(struct ath5k_hw *ah, enum ieee80211_if_types op_mode,
data = (channel->hw_value & CHANNEL_CCK) ?
((data << 2) / 22) : (data / 10);
- udelay(100 + data);
+ udelay(100 + (2 * data));
+ data = 0;
} else {
mdelay(1);
}
/*
- * Enable calibration and wait until completion
+ * Perform ADC test (?)
+ */
+ data = ath5k_hw_reg_read(ah, AR5K_PHY_TST1);
+ ath5k_hw_reg_write(ah, AR5K_PHY_TST1_TXHOLD, AR5K_PHY_TST1);
+ for (i = 0; i <= 20; i++) {
+ if (!(ath5k_hw_reg_read(ah, AR5K_PHY_ADC_TEST) & 0x10))
+ break;
+ udelay(200);
+ }
+ ath5k_hw_reg_write(ah, data, AR5K_PHY_TST1);
+ data = 0;
+
+ /*
+ * Start automatic gain calibration
+ *
+ * During AGC calibration RX path is re-routed to
+ * a signal detector so we don't receive anything.
+ *
+ * This method is used to calibrate some static offsets
+ * used together with on-the fly I/Q calibration (the
+ * one performed via ath5k_hw_phy_calibrate), that doesn't
+ * interrupt rx path.
+ *
+ * If we are in a noisy environment AGC calibration may time
+ * out.
*/
AR5K_REG_ENABLE_BITS(ah, AR5K_PHY_AGCCTL,
AR5K_PHY_AGCCTL_CAL);
+ /* At the same time start I/Q calibration for QAM constellation
+ * -no need for CCK- */
+ ah->ah_calibration = false;
+ if (!(mode == AR5K_MODE_11B)) {
+ ah->ah_calibration = true;
+ AR5K_REG_WRITE_BITS(ah, AR5K_PHY_IQ,
+ AR5K_PHY_IQ_CAL_NUM_LOG_MAX, 15);
+ AR5K_REG_ENABLE_BITS(ah, AR5K_PHY_IQ,
+ AR5K_PHY_IQ_RUN);
+ }
+
+ /* Wait for gain calibration to finish (we check for I/Q calibration
+ * during ath5k_phy_calibrate) */
if (ath5k_hw_register_timeout(ah, AR5K_PHY_AGCCTL,
AR5K_PHY_AGCCTL_CAL, 0, false)) {
- ATH5K_ERR(ah->ah_sc, "calibration timeout (%uMHz)\n",
+ ATH5K_ERR(ah->ah_sc, "gain calibration timeout (%uMHz)\n",
channel->center_freq);
return -EAGAIN;
}
+ /*
+ * Start noise floor calibration
+ *
+ * If we run NF calibration before AGC, it always times out.
+ * Binary HAL starts NF and AGC calibration at the same time
+ * and only waits for AGC to finish. I believe that's wrong because
+ * during NF calibration, rx path is also routed to a detector, so if
+ * it doesn't finish we won't have RX.
+ *
+ * XXX: Find an interval that's OK for all cards...
+ */
ret = ath5k_hw_noise_floor_calibration(ah, channel->center_freq);
if (ret)
return ret;
- ah->ah_calibration = false;
-
- /* A and G modes can use QAM modulation which requires enabling
- * I and Q calibration. Don't bother in B mode. */
- if (!(mode == AR5K_MODE_11B)) {
- ah->ah_calibration = true;
- AR5K_REG_WRITE_BITS(ah, AR5K_PHY_IQ,
- AR5K_PHY_IQ_CAL_NUM_LOG_MAX, 15);
- AR5K_REG_ENABLE_BITS(ah, AR5K_PHY_IQ,
- AR5K_PHY_IQ_RUN);
- }
-
/*
* Reset queues and start beacon timers at the end of the reset routine
*/
@@ -1154,6 +1192,12 @@ int ath5k_hw_reset(struct ath5k_hw *ah, enum ieee80211_if_types op_mode,
ath5k_hw_reg_write(ah, AR5K_PHY_SCLOCK_32MHZ, AR5K_PHY_SCLOCK);
ath5k_hw_reg_write(ah, AR5K_PHY_SDELAY_32MHZ, AR5K_PHY_SDELAY);
ath5k_hw_reg_write(ah, ah->ah_phy_spending, AR5K_PHY_SPENDING);
+
+ data = ath5k_hw_reg_read(ah, AR5K_USEC_5211) & 0xffffc07f ;
+ data |= (ah->ah_phy_spending == AR5K_PHY_SPENDING_18) ?
+ 0x00000f80 : 0x00001380 ;
+ ath5k_hw_reg_write(ah, data, AR5K_USEC_5211);
+ data = 0;
}
if (ah->ah_version == AR5K_AR5212) {
@@ -1226,7 +1270,7 @@ int ath5k_hw_set_power(struct ath5k_hw *ah, enum ath5k_power_mode mode,
bool set_chip, u16 sleep_duration)
{
unsigned int i;
- u32 staid;
+ u32 staid, data;
ATH5K_TRACE(ah->ah_sc);
staid = ath5k_hw_reg_read(ah, AR5K_STA_ID1);
@@ -1238,7 +1282,8 @@ int ath5k_hw_set_power(struct ath5k_hw *ah, enum ath5k_power_mode mode,
case AR5K_PM_NETWORK_SLEEP:
if (set_chip)
ath5k_hw_reg_write(ah,
- AR5K_SLEEP_CTL_SLE | sleep_duration,
+ AR5K_SLEEP_CTL_SLE_ALLOW |
+ sleep_duration,
AR5K_SLEEP_CTL);
staid |= AR5K_STA_ID1_PWR_SV;
@@ -1253,13 +1298,24 @@ int ath5k_hw_set_power(struct ath5k_hw *ah, enum ath5k_power_mode mode,
break;
case AR5K_PM_AWAKE:
+
+ staid &= ~AR5K_STA_ID1_PWR_SV;
+
if (!set_chip)
goto commit;
- ath5k_hw_reg_write(ah, AR5K_SLEEP_CTL_SLE_WAKE,
- AR5K_SLEEP_CTL);
+ /* Preserve sleep duration */
+ data = ath5k_hw_reg_read(ah, AR5K_SLEEP_CTL);
+ if( data & 0xffc00000 ){
+ data = 0;
+ } else {
+ data = data & 0xfffcffff;
+ }
+
+ ath5k_hw_reg_write(ah, data, AR5K_SLEEP_CTL);
+ udelay(15);
- for (i = 5000; i > 0; i--) {
+ for (i = 50; i > 0; i--) {
/* Check if the chip did wake up */
if ((ath5k_hw_reg_read(ah, AR5K_PCICFG) &
AR5K_PCICFG_SPWR_DN) == 0)
@@ -1267,15 +1323,13 @@ int ath5k_hw_set_power(struct ath5k_hw *ah, enum ath5k_power_mode mode,
/* Wait a bit and retry */
udelay(200);
- ath5k_hw_reg_write(ah, AR5K_SLEEP_CTL_SLE_WAKE,
- AR5K_SLEEP_CTL);
+ ath5k_hw_reg_write(ah, data, AR5K_SLEEP_CTL);
}
/* Fail if the chip didn't wake up */
if (i <= 0)
return -EIO;
- staid &= ~AR5K_STA_ID1_PWR_SV;
break;
default:
@@ -1304,6 +1358,7 @@ void ath5k_hw_start_rx(struct ath5k_hw *ah)
{
ATH5K_TRACE(ah->ah_sc);
ath5k_hw_reg_write(ah, AR5K_CR_RXE, AR5K_CR);
+ ath5k_hw_reg_read(ah, AR5K_CR);
}
/*
@@ -1390,6 +1445,7 @@ int ath5k_hw_tx_start(struct ath5k_hw *ah, unsigned int queue)
}
/* Start queue */
ath5k_hw_reg_write(ah, tx_queue, AR5K_CR);
+ ath5k_hw_reg_read(ah, AR5K_CR);
} else {
/* Return if queue is disabled */
if (AR5K_REG_READ_Q(ah, AR5K_QCU_TXD, queue))
@@ -1687,6 +1743,7 @@ enum ath5k_int ath5k_hw_set_intr(struct ath5k_hw *ah, enum ath5k_int new_mask)
* (they will be re-enabled afterwards).
*/
ath5k_hw_reg_write(ah, AR5K_IER_DISABLE, AR5K_IER);
+ ath5k_hw_reg_read(ah, AR5K_IER);
old_mask = ah->ah_imr;
@@ -3363,11 +3420,13 @@ int ath5k_hw_reset_tx_queue(struct ath5k_hw *ah, unsigned int queue)
ath5k_hw_reg_write(ah, ah->ah_turbo ?
AR5K_INIT_PROTO_TIME_CNTRL_TURBO :
AR5K_INIT_PROTO_TIME_CNTRL, AR5K_IFS1);
- /* Set PHY register 0x9844 (??) */
+ /* Set AR5K_PHY_SETTLING */
ath5k_hw_reg_write(ah, ah->ah_turbo ?
- (ath5k_hw_reg_read(ah, AR5K_PHY(17)) & ~0x7F) | 0x38 :
- (ath5k_hw_reg_read(ah, AR5K_PHY(17)) & ~0x7F) | 0x1C,
- AR5K_PHY(17));
+ (ath5k_hw_reg_read(ah, AR5K_PHY_SETTLING) & ~0x7F)
+ | 0x38 :
+ (ath5k_hw_reg_read(ah, AR5K_PHY_SETTLING) & ~0x7F)
+ | 0x1C,
+ AR5K_PHY_SETTLING);
/* Set Frame Control Register */
ath5k_hw_reg_write(ah, ah->ah_turbo ?
(AR5K_PHY_FRAME_CTL_INI | AR5K_PHY_TURBO_MODE |
@@ -3488,7 +3547,7 @@ int ath5k_hw_reset_tx_queue(struct ath5k_hw *ah, unsigned int queue)
if (tq->tqi_flags & AR5K_TXQ_FLAG_RDYTIME_EXP_POLICY_ENABLE)
AR5K_REG_ENABLE_BITS(ah,
AR5K_QUEUE_MISC(queue),
- AR5K_QCU_MISC_TXE);
+ AR5K_QCU_MISC_RDY_VEOL_POLICY);
}
if (tq->tqi_flags & AR5K_TXQ_FLAG_BACKOFF_DISABLE)
diff --git a/drivers/net/wireless/ath5k/initvals.c b/drivers/net/wireless/ath5k/initvals.c
index 04c84e9da89..2806b21bf90 100644
--- a/drivers/net/wireless/ath5k/initvals.c
+++ b/drivers/net/wireless/ath5k/initvals.c
@@ -489,7 +489,7 @@ static const struct ath5k_ini ar5212_ini[] = {
{ AR5K_QUEUE_TXDP(9), 0x00000000 },
{ AR5K_DCU_FP, 0x00000000 },
{ AR5K_DCU_TXP, 0x00000000 },
- { AR5K_DCU_TX_FILTER, 0x00000000 },
+ { AR5K_DCU_TX_FILTER_0_BASE, 0x00000000 },
/* Unknown table */
{ 0x1078, 0x00000000 },
{ 0x10b8, 0x00000000 },
@@ -679,7 +679,7 @@ static const struct ath5k_ini ar5212_ini[] = {
{ AR5K_PHY(645), 0x00106c10 },
{ AR5K_PHY(646), 0x009c4060 },
{ AR5K_PHY(647), 0x1483800a },
- /* { AR5K_PHY(648), 0x018830c6 },*/ /* 2413 */
+ /* { AR5K_PHY(648), 0x018830c6 },*/ /* 2413/2425 */
{ AR5K_PHY(648), 0x01831061 },
{ AR5K_PHY(649), 0x00000400 },
/*{ AR5K_PHY(650), 0x000001b5 },*/
diff --git a/drivers/net/wireless/ath5k/phy.c b/drivers/net/wireless/ath5k/phy.c
index afd8689e5c0..fa0d47faf57 100644
--- a/drivers/net/wireless/ath5k/phy.c
+++ b/drivers/net/wireless/ath5k/phy.c
@@ -1020,6 +1020,74 @@ static const struct ath5k_ini_rfgain rfgain_2413[] = {
{ AR5K_RF_GAIN(63), { 0x000000f9 } },
};
+/* Initial RF Gain settings for RF2425 */
+static const struct ath5k_ini_rfgain rfgain_2425[] = {
+ { AR5K_RF_GAIN(0), { 0x00000000 } },
+ { AR5K_RF_GAIN(1), { 0x00000040 } },
+ { AR5K_RF_GAIN(2), { 0x00000080 } },
+ { AR5K_RF_GAIN(3), { 0x00000181 } },
+ { AR5K_RF_GAIN(4), { 0x000001c1 } },
+ { AR5K_RF_GAIN(5), { 0x00000001 } },
+ { AR5K_RF_GAIN(6), { 0x00000041 } },
+ { AR5K_RF_GAIN(7), { 0x00000081 } },
+ { AR5K_RF_GAIN(8), { 0x00000188 } },
+ { AR5K_RF_GAIN(9), { 0x000001c8 } },
+ { AR5K_RF_GAIN(10), { 0x00000008 } },
+ { AR5K_RF_GAIN(11), { 0x00000048 } },
+ { AR5K_RF_GAIN(12), { 0x00000088 } },
+ { AR5K_RF_GAIN(13), { 0x00000189 } },
+ { AR5K_RF_GAIN(14), { 0x000001c9 } },
+ { AR5K_RF_GAIN(15), { 0x00000009 } },
+ { AR5K_RF_GAIN(16), { 0x00000049 } },
+ { AR5K_RF_GAIN(17), { 0x00000089 } },
+ { AR5K_RF_GAIN(18), { 0x000001b0 } },
+ { AR5K_RF_GAIN(19), { 0x000001f0 } },
+ { AR5K_RF_GAIN(20), { 0x00000030 } },
+ { AR5K_RF_GAIN(21), { 0x00000070 } },
+ { AR5K_RF_GAIN(22), { 0x00000171 } },
+ { AR5K_RF_GAIN(23), { 0x000001b1 } },
+ { AR5K_RF_GAIN(24), { 0x000001f1 } },
+ { AR5K_RF_GAIN(25), { 0x00000031 } },
+ { AR5K_RF_GAIN(26), { 0x00000071 } },
+ { AR5K_RF_GAIN(27), { 0x000001b8 } },
+ { AR5K_RF_GAIN(28), { 0x000001f8 } },
+ { AR5K_RF_GAIN(29), { 0x00000038 } },
+ { AR5K_RF_GAIN(30), { 0x00000078 } },
+ { AR5K_RF_GAIN(31), { 0x000000b8 } },
+ { AR5K_RF_GAIN(32), { 0x000001b9 } },
+ { AR5K_RF_GAIN(33), { 0x000001f9 } },
+ { AR5K_RF_GAIN(34), { 0x00000039 } },
+ { AR5K_RF_GAIN(35), { 0x00000079 } },
+ { AR5K_RF_GAIN(36), { 0x000000b9 } },
+ { AR5K_RF_GAIN(37), { 0x000000f9 } },
+ { AR5K_RF_GAIN(38), { 0x000000f9 } },
+ { AR5K_RF_GAIN(39), { 0x000000f9 } },
+ { AR5K_RF_GAIN(40), { 0x000000f9 } },
+ { AR5K_RF_GAIN(41), { 0x000000f9 } },
+ { AR5K_RF_GAIN(42), { 0x000000f9 } },
+ { AR5K_RF_GAIN(43), { 0x000000f9 } },
+ { AR5K_RF_GAIN(44), { 0x000000f9 } },
+ { AR5K_RF_GAIN(45), { 0x000000f9 } },
+ { AR5K_RF_GAIN(46), { 0x000000f9 } },
+ { AR5K_RF_GAIN(47), { 0x000000f9 } },
+ { AR5K_RF_GAIN(48), { 0x000000f9 } },
+ { AR5K_RF_GAIN(49), { 0x000000f9 } },
+ { AR5K_RF_GAIN(50), { 0x000000f9 } },
+ { AR5K_RF_GAIN(51), { 0x000000f9 } },
+ { AR5K_RF_GAIN(52), { 0x000000f9 } },
+ { AR5K_RF_GAIN(53), { 0x000000f9 } },
+ { AR5K_RF_GAIN(54), { 0x000000f9 } },
+ { AR5K_RF_GAIN(55), { 0x000000f9 } },
+ { AR5K_RF_GAIN(56), { 0x000000f9 } },
+ { AR5K_RF_GAIN(57), { 0x000000f9 } },
+ { AR5K_RF_GAIN(58), { 0x000000f9 } },
+ { AR5K_RF_GAIN(59), { 0x000000f9 } },
+ { AR5K_RF_GAIN(60), { 0x000000f9 } },
+ { AR5K_RF_GAIN(61), { 0x000000f9 } },
+ { AR5K_RF_GAIN(62), { 0x000000f9 } },
+ { AR5K_RF_GAIN(63), { 0x000000f9 } },
+};
+
static const struct ath5k_gain_opt rfgain_opt_5112 = {
1,
8,
@@ -1588,8 +1656,8 @@ int ath5k_hw_rfgain(struct ath5k_hw *ah, unsigned int freq)
freq = 0; /* only 2Ghz */
break;
case AR5K_RF2425:
- ath5k_rfg = rfgain_2413;
- size = ARRAY_SIZE(rfgain_2413);
+ ath5k_rfg = rfgain_2425;
+ size = ARRAY_SIZE(rfgain_2425);
freq = 0; /* only 2Ghz */
break;
default:
@@ -1830,9 +1898,6 @@ static int ath5k_hw_rf5112_channel(struct ath5k_hw *ah,
data = data0 = data1 = data2 = 0;
c = channel->center_freq;
- /*
- * Set the channel on the RF5112 or newer
- */
if (c < 4800) {
if (!((c - 2224) % 5)) {
data0 = ((2 * (c - 704)) - 3040) / 10;
@@ -1844,7 +1909,7 @@ static int ath5k_hw_rf5112_channel(struct ath5k_hw *ah,
return -EINVAL;
data0 = ath5k_hw_bitswap((data0 << 2) & 0xff, 8);
- } else {
+ } else if ((c - (c % 5)) != 2 || c > 5435) {
if (!(c % 20) && c >= 5120) {
data0 = ath5k_hw_bitswap(((c - 4800) / 20 << 2), 8);
data2 = ath5k_hw_bitswap(3, 2);
@@ -1856,6 +1921,9 @@ static int ath5k_hw_rf5112_channel(struct ath5k_hw *ah,
data2 = ath5k_hw_bitswap(1, 2);
} else
return -EINVAL;
+ } else {
+ data0 = ath5k_hw_bitswap((10 * (c - 2) - 4800) / 25 + 1, 8);
+ data2 = ath5k_hw_bitswap(0, 2);
}
data = (data0 << 4) | (data1 << 1) | (data2 << 2) | 0x1001;
@@ -1867,6 +1935,45 @@ static int ath5k_hw_rf5112_channel(struct ath5k_hw *ah,
}
/*
+ * Set the channel on the RF2425
+ */
+static int ath5k_hw_rf2425_channel(struct ath5k_hw *ah,
+ struct ieee80211_channel *channel)
+{
+ u32 data, data0, data2;
+ u16 c;
+
+ data = data0 = data2 = 0;
+ c = channel->center_freq;
+
+ if (c < 4800) {
+ data0 = ath5k_hw_bitswap((c - 2272), 8);
+ data2 = 0;
+ /* ? 5GHz ? */
+ } else if ((c - (c % 5)) != 2 || c > 5435) {
+ if (!(c % 20) && c < 5120)
+ data0 = ath5k_hw_bitswap(((c - 4800) / 20 << 2), 8);
+ else if (!(c % 10))
+ data0 = ath5k_hw_bitswap(((c - 4800) / 10 << 1), 8);
+ else if (!(c % 5))
+ data0 = ath5k_hw_bitswap((c - 4800) / 5, 8);
+ else
+ return -EINVAL;
+ data2 = ath5k_hw_bitswap(1, 2);
+ } else {
+ data0 = ath5k_hw_bitswap((10 * (c - 2) - 4800) / 25 + 1, 8);
+ data2 = ath5k_hw_bitswap(0, 2);
+ }
+
+ data = (data0 << 4) | data2 << 2 | 0x1001;
+
+ ath5k_hw_reg_write(ah, data & 0xff, AR5K_RF_BUFFER);
+ ath5k_hw_reg_write(ah, (data >> 8) & 0x7f, AR5K_RF_BUFFER_CONTROL_5);
+
+ return 0;
+}
+
+/*
* Set a channel on the radio chip
*/
int ath5k_hw_channel(struct ath5k_hw *ah, struct ieee80211_channel *channel)
@@ -1895,6 +2002,9 @@ int ath5k_hw_channel(struct ath5k_hw *ah, struct ieee80211_channel *channel)
case AR5K_RF5111:
ret = ath5k_hw_rf5111_channel(ah, channel);
break;
+ case AR5K_RF2425:
+ ret = ath5k_hw_rf2425_channel(ah, channel);
+ break;
default:
ret = ath5k_hw_rf5112_channel(ah, channel);
break;
@@ -1903,6 +2013,15 @@ int ath5k_hw_channel(struct ath5k_hw *ah, struct ieee80211_channel *channel)
if (ret)
return ret;
+ /* Set JAPAN setting for channel 14 */
+ if (channel->center_freq == 2484) {
+ AR5K_REG_ENABLE_BITS(ah, AR5K_PHY_CCKTXCTL,
+ AR5K_PHY_CCKTXCTL_JAPAN);
+ } else {
+ AR5K_REG_ENABLE_BITS(ah, AR5K_PHY_CCKTXCTL,
+ AR5K_PHY_CCKTXCTL_WORLD);
+ }
+
ah->ah_current_channel.center_freq = channel->center_freq;
ah->ah_current_channel.hw_value = channel->hw_value;
ah->ah_turbo = channel->hw_value == CHANNEL_T ? true : false;
@@ -1933,6 +2052,8 @@ int ath5k_hw_channel(struct ath5k_hw *ah, struct ieee80211_channel *channel)
* http://patft.uspto.gov/netacgi/nph-Parser?Sect1=PTO1&Sect2=HITOFF&d=PALL \
* &p=1&u=%2Fnetahtml%2FPTO%2Fsrchnum.htm&r=1&f=G&l=50&s1=7245893.PN.&OS=PN/7
*
+ * XXX: Since during noise floor calibration antennas are detached according to
+ * the patent, we should stop tx queues here.
*/
int
ath5k_hw_noise_floor_calibration(struct ath5k_hw *ah, short freq)
@@ -1942,7 +2063,7 @@ ath5k_hw_noise_floor_calibration(struct ath5k_hw *ah, short freq)
s32 noise_floor;
/*
- * Enable noise floor calibration and wait until completion
+ * Enable noise floor calibration
*/
AR5K_REG_ENABLE_BITS(ah, AR5K_PHY_AGCCTL,
AR5K_PHY_AGCCTL_NF);
@@ -1952,7 +2073,7 @@ ath5k_hw_noise_floor_calibration(struct ath5k_hw *ah, short freq)
if (ret) {
ATH5K_ERR(ah->ah_sc,
"noise floor calibration timeout (%uMHz)\n", freq);
- return ret;
+ return -EAGAIN;
}
/* Wait until the noise floor is calibrated and read the value */
@@ -1974,7 +2095,7 @@ ath5k_hw_noise_floor_calibration(struct ath5k_hw *ah, short freq)
if (noise_floor > AR5K_TUNE_NOISE_FLOOR) {
ATH5K_ERR(ah->ah_sc,
"noise floor calibration failed (%uMHz)\n", freq);
- return -EIO;
+ return -EAGAIN;
}
ah->ah_noise_floor = noise_floor;
@@ -2087,38 +2208,66 @@ static int ath5k_hw_rf5110_calibrate(struct ath5k_hw *ah,
}
/*
- * Perform a PHY calibration on RF5111/5112
+ * Perform a PHY calibration on RF5111/5112 and newer chips
*/
static int ath5k_hw_rf511x_calibrate(struct ath5k_hw *ah,
struct ieee80211_channel *channel)
{
u32 i_pwr, q_pwr;
s32 iq_corr, i_coff, i_coffd, q_coff, q_coffd;
+ int i;
ATH5K_TRACE(ah->ah_sc);
if (!ah->ah_calibration ||
- ath5k_hw_reg_read(ah, AR5K_PHY_IQ) & AR5K_PHY_IQ_RUN)
+ ath5k_hw_reg_read(ah, AR5K_PHY_IQ) & AR5K_PHY_IQ_RUN)
goto done;
- ah->ah_calibration = false;
+ /* Calibration has finished, get the results and re-run */
+ for (i = 0; i <= 10; i++) {
+ iq_corr = ath5k_hw_reg_read(ah, AR5K_PHY_IQRES_CAL_CORR);
+ i_pwr = ath5k_hw_reg_read(ah, AR5K_PHY_IQRES_CAL_PWR_I);
+ q_pwr = ath5k_hw_reg_read(ah, AR5K_PHY_IQRES_CAL_PWR_Q);
+ }
- iq_corr = ath5k_hw_reg_read(ah, AR5K_PHY_IQRES_CAL_CORR);
- i_pwr = ath5k_hw_reg_read(ah, AR5K_PHY_IQRES_CAL_PWR_I);
- q_pwr = ath5k_hw_reg_read(ah, AR5K_PHY_IQRES_CAL_PWR_Q);
i_coffd = ((i_pwr >> 1) + (q_pwr >> 1)) >> 7;
- q_coffd = q_pwr >> 6;
+ q_coffd = q_pwr >> 7;
+ /* No correction */
if (i_coffd == 0 || q_coffd == 0)
goto done;
i_coff = ((-iq_corr) / i_coffd) & 0x3f;
- q_coff = (((s32)i_pwr / q_coffd) - 64) & 0x1f;
- /* Commit new IQ value */
+ /* Boundary check */
+ if (i_coff > 31)
+ i_coff = 31;
+ if (i_coff < -32)
+ i_coff = -32;
+
+ q_coff = (((s32)i_pwr / q_coffd) - 128) & 0x1f;
+
+ /* Boundary check */
+ if (q_coff > 15)
+ q_coff = 15;
+ if (q_coff < -16)
+ q_coff = -16;
+
+ /* Commit new I/Q value */
AR5K_REG_ENABLE_BITS(ah, AR5K_PHY_IQ, AR5K_PHY_IQ_CORR_ENABLE |
((u32)q_coff) | ((u32)i_coff << AR5K_PHY_IQ_CORR_Q_I_COFF_S));
+ /* Re-enable calibration -if we don't we'll commit
+ * the same values again and again */
+ AR5K_REG_WRITE_BITS(ah, AR5K_PHY_IQ,
+ AR5K_PHY_IQ_CAL_NUM_LOG_MAX, 15);
+ AR5K_REG_ENABLE_BITS(ah, AR5K_PHY_IQ, AR5K_PHY_IQ_RUN);
+
done:
+
+ /* TODO: Separate noise floor calibration from I/Q calibration
+ * since noise floor calibration interrupts rx path while I/Q
+ * calibration doesn't. We don't need to run noise floor calibration
+ * as often as I/Q calibration.*/
ath5k_hw_noise_floor_calibration(ah, channel->center_freq);
/* Request RF gain */
diff --git a/drivers/net/wireless/ath5k/reg.h b/drivers/net/wireless/ath5k/reg.h
index 30629b3e37c..7562bf173d3 100644
--- a/drivers/net/wireless/ath5k/reg.h
+++ b/drivers/net/wireless/ath5k/reg.h
@@ -53,7 +53,7 @@
#define AR5K_CR_TXD0 0x00000008 /* TX Disable for queue 0 on 5210 */
#define AR5K_CR_TXD1 0x00000010 /* TX Disable for queue 1 on 5210 */
#define AR5K_CR_RXD 0x00000020 /* RX Disable */
-#define AR5K_CR_SWI 0x00000040
+#define AR5K_CR_SWI 0x00000040 /* Software Interrupt */
/*
* RX Descriptor Pointer register
@@ -65,19 +65,19 @@
*/
#define AR5K_CFG 0x0014 /* Register Address */
#define AR5K_CFG_SWTD 0x00000001 /* Byte-swap TX descriptor (for big endian archs) */
-#define AR5K_CFG_SWTB 0x00000002 /* Byte-swap TX buffer (?) */
+#define AR5K_CFG_SWTB 0x00000002 /* Byte-swap TX buffer */
#define AR5K_CFG_SWRD 0x00000004 /* Byte-swap RX descriptor */
-#define AR5K_CFG_SWRB 0x00000008 /* Byte-swap RX buffer (?) */
-#define AR5K_CFG_SWRG 0x00000010 /* Byte-swap Register values (?) */
-#define AR5K_CFG_ADHOC 0x00000020 /* [5211+] */
+#define AR5K_CFG_SWRB 0x00000008 /* Byte-swap RX buffer */
+#define AR5K_CFG_SWRG 0x00000010 /* Byte-swap Register access */
+#define AR5K_CFG_ADHOC 0x00000020 /* AP/Adhoc indication [5211+] */
#define AR5K_CFG_PHY_OK 0x00000100 /* [5211+] */
#define AR5K_CFG_EEBS 0x00000200 /* EEPROM is busy */
-#define AR5K_CFG_CLKGD 0x00000400 /* Clock gated (?) */
+#define AR5K_CFG_CLKGD 0x00000400 /* Clock gated (Disable dynamic clock) */
#define AR5K_CFG_TXCNT 0x00007800 /* Tx frame count (?) [5210] */
#define AR5K_CFG_TXCNT_S 11
#define AR5K_CFG_TXFSTAT 0x00008000 /* Tx frame status (?) [5210] */
#define AR5K_CFG_TXFSTRT 0x00010000 /* [5210] */
-#define AR5K_CFG_PCI_THRES 0x00060000 /* [5211+] */
+#define AR5K_CFG_PCI_THRES 0x00060000 /* PCI Master req q threshold [5211+] */
#define AR5K_CFG_PCI_THRES_S 17
/*
@@ -162,35 +162,40 @@
/*
* Transmit configuration register
*/
-#define AR5K_TXCFG 0x0030 /* Register Address */
-#define AR5K_TXCFG_SDMAMR 0x00000007 /* DMA size */
-#define AR5K_TXCFG_SDMAMR_S 0
-#define AR5K_TXCFG_B_MODE 0x00000008 /* Set b mode for 5111 (enable 2111) */
-#define AR5K_TXCFG_TXFSTP 0x00000008 /* TX DMA full Stop [5210] */
-#define AR5K_TXCFG_TXFULL 0x000003f0 /* TX Triger level mask */
-#define AR5K_TXCFG_TXFULL_S 4
-#define AR5K_TXCFG_TXFULL_0B 0x00000000
-#define AR5K_TXCFG_TXFULL_64B 0x00000010
-#define AR5K_TXCFG_TXFULL_128B 0x00000020
-#define AR5K_TXCFG_TXFULL_192B 0x00000030
-#define AR5K_TXCFG_TXFULL_256B 0x00000040
-#define AR5K_TXCFG_TXCONT_EN 0x00000080
-#define AR5K_TXCFG_DMASIZE 0x00000100 /* Flag for passing DMA size [5210] */
-#define AR5K_TXCFG_JUMBO_TXE 0x00000400 /* Enable jumbo frames transmition (?) [5211+] */
-#define AR5K_TXCFG_RTSRND 0x00001000 /* [5211+] */
-#define AR5K_TXCFG_FRMPAD_DIS 0x00002000 /* [5211+] */
-#define AR5K_TXCFG_RDY_DIS 0x00004000 /* [5211+] */
+#define AR5K_TXCFG 0x0030 /* Register Address */
+#define AR5K_TXCFG_SDMAMR 0x00000007 /* DMA size (read) */
+#define AR5K_TXCFG_SDMAMR_S 0
+#define AR5K_TXCFG_B_MODE 0x00000008 /* Set b mode for 5111 (enable 2111) */
+#define AR5K_TXCFG_TXFSTP 0x00000008 /* TX DMA full Stop [5210] */
+#define AR5K_TXCFG_TXFULL 0x000003f0 /* TX Triger level mask */
+#define AR5K_TXCFG_TXFULL_S 4
+#define AR5K_TXCFG_TXFULL_0B 0x00000000
+#define AR5K_TXCFG_TXFULL_64B 0x00000010
+#define AR5K_TXCFG_TXFULL_128B 0x00000020
+#define AR5K_TXCFG_TXFULL_192B 0x00000030
+#define AR5K_TXCFG_TXFULL_256B 0x00000040
+#define AR5K_TXCFG_TXCONT_EN 0x00000080
+#define AR5K_TXCFG_DMASIZE 0x00000100 /* Flag for passing DMA size [5210] */
+#define AR5K_TXCFG_JUMBO_DESC_EN 0x00000400 /* Enable jumbo tx descriptors [5211+] */
+#define AR5K_TXCFG_ADHOC_BCN_ATIM 0x00000800 /* Adhoc Beacon ATIM Policy */
+#define AR5K_TXCFG_ATIM_WINDOW_DEF_DIS 0x00001000 /* Disable ATIM window defer [5211+] */
+#define AR5K_TXCFG_RTSRND 0x00001000 /* [5211+] */
+#define AR5K_TXCFG_FRMPAD_DIS 0x00002000 /* [5211+] */
+#define AR5K_TXCFG_RDY_CBR_DIS 0x00004000 /* Ready time CBR disable [5211+] */
+#define AR5K_TXCFG_JUMBO_FRM_MODE 0x00008000 /* Jumbo frame mode [5211+] */
+#define AR5K_TXCFG_DCU_CACHING_DIS 0x00010000 /* Disable DCU caching */
/*
* Receive configuration register
*/
#define AR5K_RXCFG 0x0034 /* Register Address */
-#define AR5K_RXCFG_SDMAMW 0x00000007 /* DMA size */
+#define AR5K_RXCFG_SDMAMW 0x00000007 /* DMA size (write) */
#define AR5K_RXCFG_SDMAMW_S 0
-#define AR5K_RXCFG_DEF_ANTENNA 0x00000008 /* Default antenna */
-#define AR5K_RXCFG_ZLFDMA 0x00000010 /* Zero-length DMA */
-#define AR5K_RXCFG_JUMBO_RXE 0x00000020 /* Enable jumbo frames reception (?) [5211+] */
-#define AR5K_RXCFG_JUMBO_WRAP 0x00000040 /* Wrap jumbo frames (?) [5211+] */
+#define AR5K_RXCFG_ZLFDMA 0x00000008 /* Enable Zero-length frame DMA */
+#define AR5K_RXCFG_DEF_ANTENNA 0x00000010 /* Default antenna (?) */
+#define AR5K_RXCFG_JUMBO_RXE 0x00000020 /* Enable jumbo rx descriptors [5211+] */
+#define AR5K_RXCFG_JUMBO_WRAP 0x00000040 /* Wrap jumbo frames [5211+] */
+#define AR5K_RXCFG_SLE_ENTRY 0x00000080 /* Sleep entry policy */
/*
* Receive jumbo descriptor last address register
@@ -202,35 +207,35 @@
* MIB control register
*/
#define AR5K_MIBC 0x0040 /* Register Address */
-#define AR5K_MIBC_COW 0x00000001
-#define AR5K_MIBC_FMC 0x00000002 /* Freeze Mib Counters (?) */
-#define AR5K_MIBC_CMC 0x00000004 /* Clean Mib Counters (?) */
-#define AR5K_MIBC_MCS 0x00000008
+#define AR5K_MIBC_COW 0x00000001 /* Warn test indicator */
+#define AR5K_MIBC_FMC 0x00000002 /* Freeze MIB Counters */
+#define AR5K_MIBC_CMC 0x00000004 /* Clean MIB Counters */
+#define AR5K_MIBC_MCS 0x00000008 /* MIB counter strobe */
/*
* Timeout prescale register
*/
#define AR5K_TOPS 0x0044
-#define AR5K_TOPS_M 0x0000ffff /* [5211+] (?) */
+#define AR5K_TOPS_M 0x0000ffff
/*
* Receive timeout register (no frame received)
*/
#define AR5K_RXNOFRM 0x0048
-#define AR5K_RXNOFRM_M 0x000003ff /* [5211+] (?) */
+#define AR5K_RXNOFRM_M 0x000003ff
/*
* Transmit timeout register (no frame sent)
*/
#define AR5K_TXNOFRM 0x004c
-#define AR5K_TXNOFRM_M 0x000003ff /* [5211+] (?) */
-#define AR5K_TXNOFRM_QCU 0x000ffc00 /* [5211+] (?) */
+#define AR5K_TXNOFRM_M 0x000003ff
+#define AR5K_TXNOFRM_QCU 0x000ffc00
/*
* Receive frame gap timeout register
*/
#define AR5K_RPGTO 0x0050
-#define AR5K_RPGTO_M 0x000003ff /* [5211+] (?) */
+#define AR5K_RPGTO_M 0x000003ff
/*
* Receive frame count limit register
@@ -241,6 +246,7 @@
/*
* Misc settings register
+ * (reserved0-3)
*/
#define AR5K_MISC 0x0058 /* Register Address */
#define AR5K_MISC_DMA_OBS_M 0x000001e0
@@ -256,6 +262,7 @@
/*
* QCU/DCU clock gating register (5311)
+ * (reserved4-5)
*/
#define AR5K_QCUDCU_CLKGT 0x005c /* Register Address (?) */
#define AR5K_QCUDCU_CLKGT_QCU 0x0000ffff /* Mask for QCU clock */
@@ -284,18 +291,18 @@
#define AR5K_ISR_TXEOL 0x00000400 /* Empty TX descriptor */
#define AR5K_ISR_TXURN 0x00000800 /* Transmit FIFO underrun */
#define AR5K_ISR_MIB 0x00001000 /* Update MIB counters */
-#define AR5K_ISR_SWI 0x00002000 /* Software interrupt (?) */
+#define AR5K_ISR_SWI 0x00002000 /* Software interrupt */
#define AR5K_ISR_RXPHY 0x00004000 /* PHY error */
-#define AR5K_ISR_RXKCM 0x00008000
+#define AR5K_ISR_RXKCM 0x00008000 /* RX Key cache miss */
#define AR5K_ISR_SWBA 0x00010000 /* Software beacon alert */
#define AR5K_ISR_BRSSI 0x00020000
#define AR5K_ISR_BMISS 0x00040000 /* Beacon missed */
#define AR5K_ISR_HIUERR 0x00080000 /* Host Interface Unit error [5211+] */
#define AR5K_ISR_BNR 0x00100000 /* Beacon not ready [5211+] */
-#define AR5K_ISR_MCABT 0x00100000 /* [5210] */
-#define AR5K_ISR_RXCHIRP 0x00200000 /* [5212+] */
-#define AR5K_ISR_SSERR 0x00200000 /* [5210] */
-#define AR5K_ISR_DPERR 0x00400000 /* [5210] */
+#define AR5K_ISR_MCABT 0x00100000 /* Master Cycle Abort [5210] */
+#define AR5K_ISR_RXCHIRP 0x00200000 /* CHIRP Received [5212+] */
+#define AR5K_ISR_SSERR 0x00200000 /* Signaled System Error [5210] */
+#define AR5K_ISR_DPERR 0x00400000 /* Det par Error (?) [5210] */
#define AR5K_ISR_TIM 0x00800000 /* [5210] */
#define AR5K_ISR_BCNMISC 0x00800000 /* [5212+] */
#define AR5K_ISR_GPIO 0x01000000 /* GPIO (rf kill)*/
@@ -320,14 +327,14 @@
#define AR5K_SISR2 0x008c /* Register Address [5211+] */
#define AR5K_SISR2_QCU_TXURN 0x000003ff /* Mask for QCU_TXURN */
-#define AR5K_SISR2_MCABT 0x00100000
-#define AR5K_SISR2_SSERR 0x00200000
-#define AR5K_SISR2_DPERR 0x00400000
+#define AR5K_SISR2_MCABT 0x00100000 /* Master Cycle Abort */
+#define AR5K_SISR2_SSERR 0x00200000 /* Signaled System Error */
+#define AR5K_SISR2_DPERR 0x00400000 /* Det par Error (?) */
#define AR5K_SISR2_TIM 0x01000000 /* [5212+] */
#define AR5K_SISR2_CAB_END 0x02000000 /* [5212+] */
-#define AR5K_SISR2_DTIM_SYNC 0x04000000 /* [5212+] */
-#define AR5K_SISR2_BCN_TIMEOUT 0x08000000 /* [5212+] */
-#define AR5K_SISR2_CAB_TIMEOUT 0x10000000 /* [5212+] */
+#define AR5K_SISR2_DTIM_SYNC 0x04000000 /* DTIM sync lost [5212+] */
+#define AR5K_SISR2_BCN_TIMEOUT 0x08000000 /* Beacon Timeout [5212+] */
+#define AR5K_SISR2_CAB_TIMEOUT 0x10000000 /* CAB Timeout [5212+] */
#define AR5K_SISR2_DTIM 0x20000000 /* [5212+] */
#define AR5K_SISR3 0x0090 /* Register Address [5211+] */
@@ -368,18 +375,18 @@
#define AR5K_IMR_TXEOL 0x00000400 /* Empty TX descriptor*/
#define AR5K_IMR_TXURN 0x00000800 /* Transmit FIFO underrun*/
#define AR5K_IMR_MIB 0x00001000 /* Update MIB counters*/
-#define AR5K_IMR_SWI 0x00002000
+#define AR5K_IMR_SWI 0x00002000 /* Software interrupt */
#define AR5K_IMR_RXPHY 0x00004000 /* PHY error*/
-#define AR5K_IMR_RXKCM 0x00008000
+#define AR5K_IMR_RXKCM 0x00008000 /* RX Key cache miss */
#define AR5K_IMR_SWBA 0x00010000 /* Software beacon alert*/
#define AR5K_IMR_BRSSI 0x00020000
#define AR5K_IMR_BMISS 0x00040000 /* Beacon missed*/
#define AR5K_IMR_HIUERR 0x00080000 /* Host Interface Unit error [5211+] */
#define AR5K_IMR_BNR 0x00100000 /* Beacon not ready [5211+] */
-#define AR5K_IMR_MCABT 0x00100000 /* [5210] */
-#define AR5K_IMR_RXCHIRP 0x00200000 /* [5212+]*/
-#define AR5K_IMR_SSERR 0x00200000 /* [5210] */
-#define AR5K_IMR_DPERR 0x00400000 /* [5210] */
+#define AR5K_IMR_MCABT 0x00100000 /* Master Cycle Abort [5210] */
+#define AR5K_IMR_RXCHIRP 0x00200000 /* CHIRP Received [5212+]*/
+#define AR5K_IMR_SSERR 0x00200000 /* Signaled System Error [5210] */
+#define AR5K_IMR_DPERR 0x00400000 /* Det par Error (?) [5210] */
#define AR5K_IMR_TIM 0x00800000 /* [5211+] */
#define AR5K_IMR_BCNMISC 0x00800000 /* [5212+] */
#define AR5K_IMR_GPIO 0x01000000 /* GPIO (rf kill)*/
@@ -405,14 +412,14 @@
#define AR5K_SIMR2 0x00ac /* Register Address [5211+] */
#define AR5K_SIMR2_QCU_TXURN 0x000003ff /* Mask for QCU_TXURN */
#define AR5K_SIMR2_QCU_TXURN_S 0
-#define AR5K_SIMR2_MCABT 0x00100000
-#define AR5K_SIMR2_SSERR 0x00200000
-#define AR5K_SIMR2_DPERR 0x00400000
+#define AR5K_SIMR2_MCABT 0x00100000 /* Master Cycle Abort */
+#define AR5K_SIMR2_SSERR 0x00200000 /* Signaled System Error */
+#define AR5K_SIMR2_DPERR 0x00400000 /* Det par Error (?) */
#define AR5K_SIMR2_TIM 0x01000000 /* [5212+] */
#define AR5K_SIMR2_CAB_END 0x02000000 /* [5212+] */
-#define AR5K_SIMR2_DTIM_SYNC 0x04000000 /* [5212+] */
-#define AR5K_SIMR2_BCN_TIMEOUT 0x08000000 /* [5212+] */
-#define AR5K_SIMR2_CAB_TIMEOUT 0x10000000 /* [5212+] */
+#define AR5K_SIMR2_DTIM_SYNC 0x04000000 /* DTIM Sync lost [5212+] */
+#define AR5K_SIMR2_BCN_TIMEOUT 0x08000000 /* Beacon Timeout [5212+] */
+#define AR5K_SIMR2_CAB_TIMEOUT 0x10000000 /* CAB Timeout [5212+] */
#define AR5K_SIMR2_DTIM 0x20000000 /* [5212+] */
#define AR5K_SIMR3 0x00b0 /* Register Address [5211+] */
@@ -425,23 +432,69 @@
#define AR5K_SIMR4_QTRIG 0x000003ff /* Mask for QTRIG */
#define AR5K_SIMR4_QTRIG_S 0
+/*
+ * DMA Debug registers 0-7
+ * 0xe0 - 0xfc
+ */
/*
* Decompression mask registers [5212+]
*/
-#define AR5K_DCM_ADDR 0x0400 /*Decompression mask address (?)*/
-#define AR5K_DCM_DATA 0x0404 /*Decompression mask data (?)*/
+#define AR5K_DCM_ADDR 0x0400 /*Decompression mask address (index) */
+#define AR5K_DCM_DATA 0x0404 /*Decompression mask data */
+
+/*
+ * Wake On Wireless pattern control register [5212+]
+ */
+#define AR5K_WOW_PCFG 0x0410 /* Register Address */
+#define AR5K_WOW_PCFG_PAT_MATCH_EN 0x00000001 /* Pattern match enable */
+#define AR5K_WOW_PCFG_LONG_FRAME_POL 0x00000002 /* Long frame policy */
+#define AR5K_WOW_PCFG_WOBMISS 0x00000004 /* Wake on bea(con) miss (?) */
+#define AR5K_WOW_PCFG_PAT_0_EN 0x00000100 /* Enable pattern 0 */
+#define AR5K_WOW_PCFG_PAT_1_EN 0x00000200 /* Enable pattern 1 */
+#define AR5K_WOW_PCFG_PAT_2_EN 0x00000400 /* Enable pattern 2 */
+#define AR5K_WOW_PCFG_PAT_3_EN 0x00000800 /* Enable pattern 3 */
+#define AR5K_WOW_PCFG_PAT_4_EN 0x00001000 /* Enable pattern 4 */
+#define AR5K_WOW_PCFG_PAT_5_EN 0x00002000 /* Enable pattern 5 */
+
+/*
+ * Wake On Wireless pattern index register (?) [5212+]
+ */
+#define AR5K_WOW_PAT_IDX 0x0414
+
+/*
+ * Wake On Wireless pattern data register [5212+]
+ */
+#define AR5K_WOW_PAT_DATA 0x0418 /* Register Address */
+#define AR5K_WOW_PAT_DATA_0_3_V 0x00000001 /* Pattern 0, 3 value */
+#define AR5K_WOW_PAT_DATA_1_4_V 0x00000100 /* Pattern 1, 4 value */
+#define AR5K_WOW_PAT_DATA_2_5_V 0x00010000 /* Pattern 2, 5 value */
+#define AR5K_WOW_PAT_DATA_0_3_M 0x01000000 /* Pattern 0, 3 mask */
+#define AR5K_WOW_PAT_DATA_1_4_M 0x04000000 /* Pattern 1, 4 mask */
+#define AR5K_WOW_PAT_DATA_2_5_M 0x10000000 /* Pattern 2, 5 mask */
/*
* Decompression configuration registers [5212+]
*/
-#define AR5K_DCCFG 0x0420
+#define AR5K_DCCFG 0x0420 /* Register Address */
+#define AR5K_DCCFG_GLOBAL_EN 0x00000001 /* Enable decompression on all queues */
+#define AR5K_DCCFG_BYPASS_EN 0x00000002 /* Bypass decompression */
+#define AR5K_DCCFG_BCAST_EN 0x00000004 /* Enable decompression for bcast frames */
+#define AR5K_DCCFG_MCAST_EN 0x00000008 /* Enable decompression for mcast frames */
/*
* Compression configuration registers [5212+]
*/
-#define AR5K_CCFG 0x0600
-#define AR5K_CCFG_CUP 0x0604
+#define AR5K_CCFG 0x0600 /* Register Address */
+#define AR5K_CCFG_WINDOW_SIZE 0x00000007 /* Compression window size */
+#define AR5K_CCFG_CPC_EN 0x00000008 /* Enable performance counters */
+
+#define AR5K_CCFG_CCU 0x0604 /* Register Address */
+#define AR5K_CCFG_CCU_CUP_EN 0x00000001 /* CCU Catchup enable */
+#define AR5K_CCFG_CCU_CREDIT 0x00000002 /* CCU Credit (field) */
+#define AR5K_CCFG_CCU_CD_THRES 0x00000080 /* CCU Cyc(lic?) debt threshold (field) */
+#define AR5K_CCFG_CCU_CUP_LCNT 0x00010000 /* CCU Catchup lit(?) count */
+#define AR5K_CCFG_CCU_INIT 0x00100200 /* Initial value during reset */
/*
* Compression performance counter registers [5212+]
@@ -450,7 +503,7 @@
#define AR5K_CPC1 0x0614 /* Compression performance counter 1*/
#define AR5K_CPC2 0x0618 /* Compression performance counter 2 */
#define AR5K_CPC3 0x061c /* Compression performance counter 3 */
-#define AR5K_CPCORN 0x0620 /* Compression performance overrun (?) */
+#define AR5K_CPCOVF 0x0620 /* Compression performance overflow */
/*
@@ -466,8 +519,6 @@
* set/clear, which contain status for all queues (we shift by 1 for each
* queue). To access these registers easily we define some macros here
* that are used inside HAL. For more infos check out *_tx_queue functs.
- *
- * TODO: Boundary checking on macros (here?)
*/
/*
@@ -513,7 +564,6 @@
#define AR5K_QCU_RDYTIMECFG_BASE 0x0900 /* Register Address - Queue0 RDYTIMECFG */
#define AR5K_QCU_RDYTIMECFG_INTVAL 0x00ffffff /* Ready time interval mask */
#define AR5K_QCU_RDYTIMECFG_INTVAL_S 0
-#define AR5K_QCU_RDYTIMECFG_DURATION 0x00ffffff /* Ready time duration mask */
#define AR5K_QCU_RDYTIMECFG_ENABLE 0x01000000 /* Ready time enable mask */
#define AR5K_QUEUE_RDYTIMECFG(_q) AR5K_QUEUE_REG(AR5K_QCU_RDYTIMECFG_BASE, _q)
@@ -534,19 +584,20 @@
*/
#define AR5K_QCU_MISC_BASE 0x09c0 /* Register Address -Queue0 MISC */
#define AR5K_QCU_MISC_FRSHED_M 0x0000000f /* Frame sheduling mask */
-#define AR5K_QCU_MISC_FRSHED_ASAP 0 /* ASAP */
-#define AR5K_QCU_MISC_FRSHED_CBR 1 /* Constant Bit Rate */
-#define AR5K_QCU_MISC_FRSHED_DBA_GT 2 /* DMA Beacon alert gated (?) */
-#define AR5K_QCU_MISC_FRSHED_TIM_GT 3 /* Time gated (?) */
+#define AR5K_QCU_MISC_FRSHED_ASAP 0 /* ASAP */
+#define AR5K_QCU_MISC_FRSHED_CBR 1 /* Constant Bit Rate */
+#define AR5K_QCU_MISC_FRSHED_DBA_GT 2 /* DMA Beacon alert gated (?) */
+#define AR5K_QCU_MISC_FRSHED_TIM_GT 3 /* Time gated (?) */
#define AR5K_QCU_MISC_FRSHED_BCN_SENT_GT 4 /* Beacon sent gated (?) */
#define AR5K_QCU_MISC_ONESHOT_ENABLE 0x00000010 /* Oneshot enable */
#define AR5K_QCU_MISC_CBREXP 0x00000020 /* CBR expired (normal queue) */
#define AR5K_QCU_MISC_CBREXP_BCN 0x00000040 /* CBR expired (beacon queue) */
-#define AR5K_QCU_MISC_BCN_ENABLE 0x00000080 /* Beacons enabled */
-#define AR5K_QCU_MISC_CBR_THRES_ENABLE 0x00000100 /* CBR threshold enabled (?) */
-#define AR5K_QCU_MISC_TXE 0x00000200 /* TXE reset when RDYTIME enalbed (?) */
-#define AR5K_QCU_MISC_CBR 0x00000400 /* CBR threshold reset (?) */
-#define AR5K_QCU_MISC_DCU_EARLY 0x00000800 /* DCU reset (?) */
+#define AR5K_QCU_MISC_BCN_ENABLE 0x00000080 /* Enable Beacon use */
+#define AR5K_QCU_MISC_CBR_THRES_ENABLE 0x00000100 /* CBR threshold enabled */
+#define AR5K_QCU_MISC_RDY_VEOL_POLICY 0x00000200 /* TXE reset when RDYTIME enalbed */
+#define AR5K_QCU_MISC_CBR_RESET_CNT 0x00000400 /* CBR threshold (counter) reset */
+#define AR5K_QCU_MISC_DCU_EARLY 0x00000800 /* DCU early termination */
+#define AR5K_QCU_MISC_DCU_CMP_EN 0x00001000 /* Enable frame compression */
#define AR5K_QUEUE_MISC(_q) AR5K_QUEUE_REG(AR5K_QCU_MISC_BASE, _q)
@@ -555,7 +606,7 @@
*/
#define AR5K_QCU_STS_BASE 0x0a00 /* Register Address - Queue0 STS */
#define AR5K_QCU_STS_FRMPENDCNT 0x00000003 /* Frames pending counter */
-#define AR5K_QCU_STS_CBREXPCNT 0x0000ff00 /* CBR expired counter (?) */
+#define AR5K_QCU_STS_CBREXPCNT 0x0000ff00 /* CBR expired counter */
#define AR5K_QUEUE_STATUS(_q) AR5K_QUEUE_REG(AR5K_QCU_STS_BASE, _q)
/*
@@ -569,9 +620,11 @@
*/
#define AR5K_QCU_CBB_SELECT 0x0b00
#define AR5K_QCU_CBB_ADDR 0x0b04
+#define AR5K_QCU_CBB_ADDR_S 9
/*
* QCU compression buffer configuration register [5212+]
+ * (buffer size)
*/
#define AR5K_QCU_CBCFG 0x0b08
@@ -652,80 +705,100 @@
* No lockout means there is no special handling.
*/
#define AR5K_DCU_MISC_BASE 0x1100 /* Register Address -Queue0 DCU_MISC */
-#define AR5K_DCU_MISC_BACKOFF 0x000007ff /* Mask for backoff setting (?) */
+#define AR5K_DCU_MISC_BACKOFF 0x000007ff /* Mask for backoff threshold */
#define AR5K_DCU_MISC_BACKOFF_FRAG 0x00000200 /* Enable backoff while bursting */
-#define AR5K_DCU_MISC_HCFPOLL_ENABLE 0x00000800 /* CF - Poll (?) */
-#define AR5K_DCU_MISC_BACKOFF_PERSIST 0x00001000 /* Persistent backoff (?) */
-#define AR5K_DCU_MISC_FRMPRFTCH_ENABLE 0x00002000 /* Enable frame pre-fetch (?) */
+#define AR5K_DCU_MISC_HCFPOLL_ENABLE 0x00000800 /* CF - Poll enable */
+#define AR5K_DCU_MISC_BACKOFF_PERSIST 0x00001000 /* Persistent backoff */
+#define AR5K_DCU_MISC_FRMPRFTCH_ENABLE 0x00002000 /* Enable frame pre-fetch */
#define AR5K_DCU_MISC_VIRTCOL 0x0000c000 /* Mask for Virtual Collision (?) */
-#define AR5K_DCU_MISC_VIRTCOL_NORMAL 0
-#define AR5K_DCU_MISC_VIRTCOL_MODIFIED 1
-#define AR5K_DCU_MISC_VIRTCOL_IGNORE 2
-#define AR5K_DCU_MISC_BCN_ENABLE 0x00010000 /* Beacon enable (?) */
+#define AR5K_DCU_MISC_VIRTCOL_NORMAL 0
+#define AR5K_DCU_MISC_VIRTCOL_MODIFIED 1
+#define AR5K_DCU_MISC_VIRTCOL_IGNORE 2
+#define AR5K_DCU_MISC_BCN_ENABLE 0x00010000 /* Enable Beacon use */
#define AR5K_DCU_MISC_ARBLOCK_CTL 0x00060000 /* Arbiter lockout control mask */
#define AR5K_DCU_MISC_ARBLOCK_CTL_S 17
-#define AR5K_DCU_MISC_ARBLOCK_CTL_NONE 0 /* No arbiter lockout */
+#define AR5K_DCU_MISC_ARBLOCK_CTL_NONE 0 /* No arbiter lockout */
#define AR5K_DCU_MISC_ARBLOCK_CTL_INTFRM 1 /* Intra-frame lockout */
#define AR5K_DCU_MISC_ARBLOCK_CTL_GLOBAL 2 /* Global lockout */
-#define AR5K_DCU_MISC_ARBLOCK_IGNORE 0x00080000
-#define AR5K_DCU_MISC_SEQ_NUM_INCR_DIS 0x00100000 /* Disable sequence number increment (?) */
-#define AR5K_DCU_MISC_POST_FR_BKOFF_DIS 0x00200000 /* Disable post-frame backoff (?) */
-#define AR5K_DCU_MISC_VIRT_COLL_POLICY 0x00400000 /* Virtual Collision policy (?) */
-#define AR5K_DCU_MISC_BLOWN_IFS_POLICY 0x00800000
+#define AR5K_DCU_MISC_ARBLOCK_IGNORE 0x00080000 /* Ignore Arbiter lockout */
+#define AR5K_DCU_MISC_SEQ_NUM_INCR_DIS 0x00100000 /* Disable sequence number increment */
+#define AR5K_DCU_MISC_POST_FR_BKOFF_DIS 0x00200000 /* Disable post-frame backoff */
+#define AR5K_DCU_MISC_VIRT_COLL_POLICY 0x00400000 /* Virtual Collision cw policy */
+#define AR5K_DCU_MISC_BLOWN_IFS_POLICY 0x00800000 /* Blown IFS policy (?) */
#define AR5K_DCU_MISC_SEQNUM_CTL 0x01000000 /* Sequence number control (?) */
#define AR5K_QUEUE_DFS_MISC(_q) AR5K_QUEUE_REG(AR5K_DCU_MISC_BASE, _q)
/*
* DCU frame sequence number registers
*/
-#define AR5K_DCU_SEQNUM_BASE 0x1140
-#define AR5K_DCU_SEQNUM_M 0x00000fff
+#define AR5K_DCU_SEQNUM_BASE 0x1140
+#define AR5K_DCU_SEQNUM_M 0x00000fff
#define AR5K_QUEUE_DFS_SEQNUM(_q) AR5K_QUEUE_REG(AR5K_DCU_SEQNUM_BASE, _q)
/*
- * DCU global IFS SIFS registers
+ * DCU global IFS SIFS register
*/
#define AR5K_DCU_GBL_IFS_SIFS 0x1030
#define AR5K_DCU_GBL_IFS_SIFS_M 0x0000ffff
/*
- * DCU global IFS slot interval registers
+ * DCU global IFS slot interval register
*/
#define AR5K_DCU_GBL_IFS_SLOT 0x1070
#define AR5K_DCU_GBL_IFS_SLOT_M 0x0000ffff
/*
- * DCU global IFS EIFS registers
+ * DCU global IFS EIFS register
*/
#define AR5K_DCU_GBL_IFS_EIFS 0x10b0
#define AR5K_DCU_GBL_IFS_EIFS_M 0x0000ffff
/*
- * DCU global IFS misc registers
+ * DCU global IFS misc register
+ *
+ * LFSR stands for Linear Feedback Shift Register
+ * and it's used for generating pseudo-random
+ * number sequences.
+ *
+ * (If i understand corectly, random numbers are
+ * used for idle sensing -multiplied with cwmin/max etc-)
*/
#define AR5K_DCU_GBL_IFS_MISC 0x10f0 /* Register Address */
-#define AR5K_DCU_GBL_IFS_MISC_LFSR_SLICE 0x00000007
-#define AR5K_DCU_GBL_IFS_MISC_TURBO_MODE 0x00000008 /* Turbo mode (?) */
-#define AR5K_DCU_GBL_IFS_MISC_SIFS_DUR_USEC 0x000003f0 /* SIFS Duration mask (?) */
-#define AR5K_DCU_GBL_IFS_MISC_USEC_DUR 0x000ffc00
-#define AR5K_DCU_GBL_IFS_MISC_DCU_ARB_DELAY 0x00300000
+#define AR5K_DCU_GBL_IFS_MISC_LFSR_SLICE 0x00000007 /* LFSR Slice Select */
+#define AR5K_DCU_GBL_IFS_MISC_TURBO_MODE 0x00000008 /* Turbo mode */
+#define AR5K_DCU_GBL_IFS_MISC_SIFS_DUR_USEC 0x000003f0 /* SIFS Duration mask */
+#define AR5K_DCU_GBL_IFS_MISC_USEC_DUR 0x000ffc00 /* USEC Duration mask */
+#define AR5K_DCU_GBL_IFS_MISC_DCU_ARB_DELAY 0x00300000 /* DCU Arbiter delay mask */
+#define AR5K_DCU_GBL_IFS_MISC_SIFS_CNT_RST 0x00400000 /* SIFC cnt reset policy (?) */
+#define AR5K_DCU_GBL_IFS_MISC_AIFS_CNT_RST 0x00800000 /* AIFS cnt reset policy (?) */
+#define AR5K_DCU_GBL_IFS_MISC_RND_LFSR_SL_DIS 0x01000000 /* Disable random LFSR slice */
/*
* DCU frame prefetch control register
*/
-#define AR5K_DCU_FP 0x1230
+#define AR5K_DCU_FP 0x1230 /* Register Address */
+#define AR5K_DCU_FP_NOBURST_DCU_EN 0x00000001 /* Enable non-burst prefetch on DCU (?) */
+#define AR5K_DCU_FP_NOBURST_EN 0x00000010 /* Enable non-burst prefetch (?) */
+#define AR5K_DCU_FP_BURST_DCU_EN 0x00000020 /* Enable burst prefetch on DCU (?) */
/*
* DCU transmit pause control/status register
*/
#define AR5K_DCU_TXP 0x1270 /* Register Address */
-#define AR5K_DCU_TXP_M 0x000003ff /* Tx pause mask (?) */
-#define AR5K_DCU_TXP_STATUS 0x00010000 /* Tx pause status (?) */
+#define AR5K_DCU_TXP_M 0x000003ff /* Tx pause mask */
+#define AR5K_DCU_TXP_STATUS 0x00010000 /* Tx pause status */
+
+/*
+ * DCU transmit filter table 0 (32 entries)
+ */
+#define AR5K_DCU_TX_FILTER_0_BASE 0x1038
+#define AR5K_DCU_TX_FILTER_0(_n) (AR5K_DCU_TX_FILTER_0_BASE + (_n * 64))
/*
- * DCU transmit filter register
+ * DCU transmit filter table 1 (16 entries)
*/
-#define AR5K_DCU_TX_FILTER 0x1038
+#define AR5K_DCU_TX_FILTER_1_BASE 0x103c
+#define AR5K_DCU_TX_FILTER_1(_n) (AR5K_DCU_TX_FILTER_1_BASE + ((_n - 32) * 64))
/*
* DCU clear transmit filter register
@@ -739,9 +812,6 @@
/*
* Reset control register
- *
- * 4 and 8 are not used in 5211/5212 and
- * 2 means "baseband reset" on 5211/5212.
*/
#define AR5K_RESET_CTL 0x4000 /* Register Address */
#define AR5K_RESET_CTL_PCU 0x00000001 /* Protocol Control Unit reset */
@@ -765,6 +835,7 @@
#define AR5K_SLEEP_CTL_SLE_SLP 0x00010000 /* Force chip sleep */
#define AR5K_SLEEP_CTL_SLE_ALLOW 0x00020000
#define AR5K_SLEEP_CTL_SLE_UNITS 0x00000008 /* [5211+] */
+/* more bits */
/*
* Interrupt pending register
@@ -776,13 +847,14 @@
* Sleep force register
*/
#define AR5K_SFR 0x400c
-#define AR5K_SFR_M 0x00000001
+#define AR5K_SFR_EN 0x00000001
/*
* PCI configuration register
*/
#define AR5K_PCICFG 0x4010 /* Register Address */
#define AR5K_PCICFG_EEAE 0x00000001 /* Eeprom access enable [5210] */
+#define AR5K_PCICFG_SLEEP_CLOCK_EN 0x00000002 /* Enable sleep clock (?) */
#define AR5K_PCICFG_CLKRUNEN 0x00000004 /* CLKRUN enable [5211+] */
#define AR5K_PCICFG_EESIZE 0x00000018 /* Mask for EEPROM size [5211+] */
#define AR5K_PCICFG_EESIZE_S 3
@@ -798,19 +870,21 @@
#define AR5K_PCICFG_CBEFIX_DIS 0x00000400 /* Disable CBE fix (?) */
#define AR5K_PCICFG_SL_INTEN 0x00000800 /* Enable interrupts when asleep (?) */
#define AR5K_PCICFG_LED_BCTL 0x00001000 /* Led blink (?) [5210] */
-#define AR5K_PCICFG_SL_INPEN 0x00002800 /* Sleep even whith pending interrupts (?) */
+#define AR5K_PCICFG_UNK 0x00001000 /* Passed on some parts durring attach (?) */
+#define AR5K_PCICFG_SL_INPEN 0x00002000 /* Sleep even whith pending interrupts (?) */
#define AR5K_PCICFG_SPWR_DN 0x00010000 /* Mask for power status */
#define AR5K_PCICFG_LEDMODE 0x000e0000 /* Ledmode [5211+] */
#define AR5K_PCICFG_LEDMODE_PROP 0x00000000 /* Blink on standard traffic [5211+] */
#define AR5K_PCICFG_LEDMODE_PROM 0x00020000 /* Default mode (blink on any traffic) [5211+] */
#define AR5K_PCICFG_LEDMODE_PWR 0x00040000 /* Some other blinking mode (?) [5211+] */
#define AR5K_PCICFG_LEDMODE_RAND 0x00060000 /* Random blinking (?) [5211+] */
-#define AR5K_PCICFG_LEDBLINK 0x00700000
+#define AR5K_PCICFG_LEDBLINK 0x00700000 /* Led blink rate */
#define AR5K_PCICFG_LEDBLINK_S 20
-#define AR5K_PCICFG_LEDSLOW 0x00800000 /* Slow led blink rate (?) [5211+] */
+#define AR5K_PCICFG_LEDSLOW 0x00800000 /* Slowest led blink rate [5211+] */
#define AR5K_PCICFG_LEDSTATE \
(AR5K_PCICFG_LED | AR5K_PCICFG_LEDMODE | \
AR5K_PCICFG_LEDBLINK | AR5K_PCICFG_LEDSLOW)
+#define AR5K_PCICFG_SLEEP_CLOCK_RATE 0x03000000 /* Sleep clock rate (field) */
/*
* "General Purpose Input/Output" (GPIO) control register
@@ -947,7 +1021,7 @@
#define AR5K_EEPROM_VERSION_4_4 0x4004
#define AR5K_EEPROM_VERSION_4_5 0x4005
#define AR5K_EEPROM_VERSION_4_6 0x4006 /* has ee_scaled_cck_delta */
-#define AR5K_EEPROM_VERSION_4_7 0x3007
+#define AR5K_EEPROM_VERSION_4_7 0x4007
#define AR5K_EEPROM_MODE_11A 0
#define AR5K_EEPROM_MODE_11B 1
@@ -1023,10 +1097,14 @@
#define AR5K_EEPROM_STAT_WRDONE 0x00000008 /* EEPROM write successful */
/*
- * EEPROM config register (?)
+ * EEPROM config register
*/
-#define AR5K_EEPROM_CFG 0x6010
-
+#define AR5K_EEPROM_CFG 0x6010 /* Register Addres */
+#define AR5K_EEPROM_CFG_SIZE_OVR 0x00000001
+#define AR5K_EEPROM_CFG_WR_WAIT_DIS 0x00000004 /* Disable write wait */
+#define AR5K_EEPROM_CFG_CLK_RATE 0x00000018 /* Clock rate */
+#define AR5K_EEPROM_CFG_PROT_KEY 0x00ffff00 /* Protectio key */
+#define AR5K_EEPROM_CFG_LIND_EN 0x01000000 /* Enable length indicator (?) */
/*
@@ -1050,7 +1128,7 @@
#define AR5K_STA_ID1 0x8004 /* Register Address */
#define AR5K_STA_ID1_AP 0x00010000 /* Set AP mode */
#define AR5K_STA_ID1_ADHOC 0x00020000 /* Set Ad-Hoc mode */
-#define AR5K_STA_ID1_PWR_SV 0x00040000 /* Power save reporting (?) */
+#define AR5K_STA_ID1_PWR_SV 0x00040000 /* Power save reporting */
#define AR5K_STA_ID1_NO_KEYSRCH 0x00080000 /* No key search */
#define AR5K_STA_ID1_NO_PSPOLL 0x00100000 /* No power save polling [5210] */
#define AR5K_STA_ID1_PCF_5211 0x00100000 /* Enable PCF on [5211+] */
@@ -1059,9 +1137,13 @@
AR5K_STA_ID1_PCF_5210 : AR5K_STA_ID1_PCF_5211)
#define AR5K_STA_ID1_DEFAULT_ANTENNA 0x00200000 /* Use default antenna */
#define AR5K_STA_ID1_DESC_ANTENNA 0x00400000 /* Update antenna from descriptor */
-#define AR5K_STA_ID1_RTS_DEF_ANTENNA 0x00800000 /* Use default antenna for RTS (?) */
-#define AR5K_STA_ID1_ACKCTS_6MB 0x01000000 /* Use 6Mbit/s for ACK/CTS (?) */
+#define AR5K_STA_ID1_RTS_DEF_ANTENNA 0x00800000 /* Use default antenna for RTS */
+#define AR5K_STA_ID1_ACKCTS_6MB 0x01000000 /* Use 6Mbit/s for ACK/CTS */
#define AR5K_STA_ID1_BASE_RATE_11B 0x02000000 /* Use 11b base rate (for ACK/CTS ?) [5211+] */
+#define AR5K_STA_ID1_SELF_GEN_SECTORE 0x04000000 /* Self generate sectore (?) */
+#define AR5K_STA_ID1_CRYPT_MIC_EN 0x08000000 /* Enable MIC */
+#define AR5K_STA_ID1_KEYSRCH_MODE 0x10000000 /* Keysearch mode (?) */
+#define AR5K_STA_ID1_PRESERVE_SEQ_NUM 0x20000000 /* Preserve sequence number */
/*
* First BSSID register (MAC address, lower 32bits)
@@ -1117,7 +1199,7 @@
*
* Retry limit register for 5210 (no QCU/DCU so it's done in PCU)
*/
-#define AR5K_NODCU_RETRY_LMT 0x801c /*Register Address */
+#define AR5K_NODCU_RETRY_LMT 0x801c /* Register Address */
#define AR5K_NODCU_RETRY_LMT_SH_RETRY 0x0000000f /* Short retry limit mask */
#define AR5K_NODCU_RETRY_LMT_SH_RETRY_S 0
#define AR5K_NODCU_RETRY_LMT_LG_RETRY 0x000000f0 /* Long retry mask */
@@ -1136,9 +1218,9 @@
#define AR5K_USEC_5211 0x801c /* Register Address [5211+] */
#define AR5K_USEC (ah->ah_version == AR5K_AR5210 ? \
AR5K_USEC_5210 : AR5K_USEC_5211)
-#define AR5K_USEC_1 0x0000007f
+#define AR5K_USEC_1 0x0000007f /* clock cycles for 1us */
#define AR5K_USEC_1_S 0
-#define AR5K_USEC_32 0x00003f80
+#define AR5K_USEC_32 0x00003f80 /* clock cycles for 1us while on 32Mhz clock */
#define AR5K_USEC_32_S 7
#define AR5K_USEC_TX_LATENCY_5211 0x007fc000
#define AR5K_USEC_TX_LATENCY_5211_S 14
@@ -1152,16 +1234,16 @@
/*
* PCU beacon control register
*/
-#define AR5K_BEACON_5210 0x8024
-#define AR5K_BEACON_5211 0x8020
+#define AR5K_BEACON_5210 0x8024 /*Register Address [5210] */
+#define AR5K_BEACON_5211 0x8020 /*Register Address [5211+] */
#define AR5K_BEACON (ah->ah_version == AR5K_AR5210 ? \
AR5K_BEACON_5210 : AR5K_BEACON_5211)
-#define AR5K_BEACON_PERIOD 0x0000ffff
+#define AR5K_BEACON_PERIOD 0x0000ffff /* Mask for beacon period */
#define AR5K_BEACON_PERIOD_S 0
-#define AR5K_BEACON_TIM 0x007f0000
+#define AR5K_BEACON_TIM 0x007f0000 /* Mask for TIM offset */
#define AR5K_BEACON_TIM_S 16
-#define AR5K_BEACON_ENABLE 0x00800000
-#define AR5K_BEACON_RESET_TSF 0x01000000
+#define AR5K_BEACON_ENABLE 0x00800000 /* Enable beacons */
+#define AR5K_BEACON_RESET_TSF 0x01000000 /* Force TSF reset */
/*
* CFP period register
@@ -1234,7 +1316,6 @@
/*
* Receive filter register
- * TODO: Get these out of ar5xxx.h on ath5k
*/
#define AR5K_RX_FILTER_5210 0x804c /* Register Address [5210] */
#define AR5K_RX_FILTER_5211 0x803c /* Register Address [5211+] */
@@ -1307,11 +1388,11 @@
#define AR5K_DIAG_SW_5211 0x8048 /* Register Address [5211+] */
#define AR5K_DIAG_SW (ah->ah_version == AR5K_AR5210 ? \
AR5K_DIAG_SW_5210 : AR5K_DIAG_SW_5211)
-#define AR5K_DIAG_SW_DIS_WEP_ACK 0x00000001
-#define AR5K_DIAG_SW_DIS_ACK 0x00000002 /* Disable ACKs (?) */
-#define AR5K_DIAG_SW_DIS_CTS 0x00000004 /* Disable CTSs (?) */
-#define AR5K_DIAG_SW_DIS_ENC 0x00000008 /* Disable encryption (?) */
-#define AR5K_DIAG_SW_DIS_DEC 0x00000010 /* Disable decryption (?) */
+#define AR5K_DIAG_SW_DIS_WEP_ACK 0x00000001 /* Disable ACKs if WEP key is invalid */
+#define AR5K_DIAG_SW_DIS_ACK 0x00000002 /* Disable ACKs */
+#define AR5K_DIAG_SW_DIS_CTS 0x00000004 /* Disable CTSs */
+#define AR5K_DIAG_SW_DIS_ENC 0x00000008 /* Disable encryption */
+#define AR5K_DIAG_SW_DIS_DEC 0x00000010 /* Disable decryption */
#define AR5K_DIAG_SW_DIS_TX 0x00000020 /* Disable transmit [5210] */
#define AR5K_DIAG_SW_DIS_RX_5210 0x00000040 /* Disable recieve */
#define AR5K_DIAG_SW_DIS_RX_5211 0x00000020
@@ -1329,13 +1410,13 @@
#define AR5K_DIAG_SW_CHAN_INFO_5211 0x00000100
#define AR5K_DIAG_SW_CHAN_INFO (ah->ah_version == AR5K_AR5210 ? \
AR5K_DIAG_SW_CHAN_INFO_5210 : AR5K_DIAG_SW_CHAN_INFO_5211)
-#define AR5K_DIAG_SW_EN_SCRAM_SEED_5211 0x00000200 /* Scrambler seed (?) */
+#define AR5K_DIAG_SW_EN_SCRAM_SEED_5211 0x00000200 /* Enable scrambler seed */
#define AR5K_DIAG_SW_EN_SCRAM_SEED_5210 0x00000400
#define AR5K_DIAG_SW_EN_SCRAM_SEED (ah->ah_version == AR5K_AR5210 ? \
AR5K_DIAG_SW_EN_SCRAM_SEED_5210 : AR5K_DIAG_SW_EN_SCRAM_SEED_5211)
#define AR5K_DIAG_SW_ECO_ENABLE 0x00000400 /* [5211+] */
#define AR5K_DIAG_SW_SCVRAM_SEED 0x0003f800 /* [5210] */
-#define AR5K_DIAG_SW_SCRAM_SEED_M 0x0001fc00 /* Scrambler seed mask (?) */
+#define AR5K_DIAG_SW_SCRAM_SEED_M 0x0001fc00 /* Scrambler seed mask */
#define AR5K_DIAG_SW_SCRAM_SEED_S 10
#define AR5K_DIAG_SW_DIS_SEQ_INC 0x00040000 /* Disable seqnum increment (?)[5210] */
#define AR5K_DIAG_SW_FRAME_NV0_5210 0x00080000
@@ -1344,6 +1425,7 @@
AR5K_DIAG_SW_FRAME_NV0_5210 : AR5K_DIAG_SW_FRAME_NV0_5211)
#define AR5K_DIAG_SW_OBSPT_M 0x000c0000
#define AR5K_DIAG_SW_OBSPT_S 18
+/* more bits */
/*
* TSF (clock) register (lower 32 bits)
@@ -1369,15 +1451,34 @@
/*
* ADDAC test register [5211+]
*/
-#define AR5K_ADDAC_TEST 0x8054
-#define AR5K_ADDAC_TEST_TXCONT 0x00000001
+#define AR5K_ADDAC_TEST 0x8054 /* Register Address */
+#define AR5K_ADDAC_TEST_TXCONT 0x00000001 /* Test continuous tx */
+#define AR5K_ADDAC_TEST_TST_MODE 0x00000002 /* Test mode */
+#define AR5K_ADDAC_TEST_LOOP_EN 0x00000004 /* Enable loop */
+#define AR5K_ADDAC_TEST_LOOP_LEN 0x00000008 /* Loop length (field) */
+#define AR5K_ADDAC_TEST_USE_U8 0x00004000 /* Use upper 8 bits */
+#define AR5K_ADDAC_TEST_MSB 0x00008000 /* State of MSB */
+#define AR5K_ADDAC_TEST_TRIG_SEL 0x00010000 /* Trigger select */
+#define AR5K_ADDAC_TEST_TRIG_PTY 0x00020000 /* Trigger polarity */
+#define AR5K_ADDAC_TEST_RXCONT 0x00040000 /* Continuous capture */
+#define AR5K_ADDAC_TEST_CAPTURE 0x00080000 /* Begin capture */
+#define AR5K_ADDAC_TEST_TST_ARM 0x00100000 /* Test ARM (Adaptive Radio Mode ?) */
/*
* Default antenna register [5211+]
*/
#define AR5K_DEFAULT_ANTENNA 0x8058
+/*
+ * Frame control QoS mask register (?) [5211+]
+ * (FC_QOS_MASK)
+ */
+#define AR5K_FRAME_CTL_QOSM 0x805c
+/*
+ * Seq mask register (?) [5211+]
+ */
+#define AR5K_SEQ_MASK 0x8060
/*
* Retry count register [5210]
@@ -1449,124 +1550,242 @@
/*
* XR (eXtended Range) mode register
*/
-#define AR5K_XRMODE 0x80c0
-#define AR5K_XRMODE_POLL_TYPE_M 0x0000003f
+#define AR5K_XRMODE 0x80c0 /* Register Address */
+#define AR5K_XRMODE_POLL_TYPE_M 0x0000003f /* Mask for Poll type (?) */
#define AR5K_XRMODE_POLL_TYPE_S 0
-#define AR5K_XRMODE_POLL_SUBTYPE_M 0x0000003c
+#define AR5K_XRMODE_POLL_SUBTYPE_M 0x0000003c /* Mask for Poll subtype (?) */
#define AR5K_XRMODE_POLL_SUBTYPE_S 2
-#define AR5K_XRMODE_POLL_WAIT_ALL 0x00000080
-#define AR5K_XRMODE_SIFS_DELAY 0x000fff00
-#define AR5K_XRMODE_FRAME_HOLD_M 0xfff00000
+#define AR5K_XRMODE_POLL_WAIT_ALL 0x00000080 /* Wait for poll */
+#define AR5K_XRMODE_SIFS_DELAY 0x000fff00 /* Mask for SIFS delay */
+#define AR5K_XRMODE_FRAME_HOLD_M 0xfff00000 /* Mask for frame hold (?) */
#define AR5K_XRMODE_FRAME_HOLD_S 20
/*
* XR delay register
*/
-#define AR5K_XRDELAY 0x80c4
-#define AR5K_XRDELAY_SLOT_DELAY_M 0x0000ffff
+#define AR5K_XRDELAY 0x80c4 /* Register Address */
+#define AR5K_XRDELAY_SLOT_DELAY_M 0x0000ffff /* Mask for slot delay */
#define AR5K_XRDELAY_SLOT_DELAY_S 0
-#define AR5K_XRDELAY_CHIRP_DELAY_M 0xffff0000
+#define AR5K_XRDELAY_CHIRP_DELAY_M 0xffff0000 /* Mask for CHIRP data delay */
#define AR5K_XRDELAY_CHIRP_DELAY_S 16
/*
* XR timeout register
*/
-#define AR5K_XRTIMEOUT 0x80c8
-#define AR5K_XRTIMEOUT_CHIRP_M 0x0000ffff
+#define AR5K_XRTIMEOUT 0x80c8 /* Register Address */
+#define AR5K_XRTIMEOUT_CHIRP_M 0x0000ffff /* Mask for CHIRP timeout */
#define AR5K_XRTIMEOUT_CHIRP_S 0
-#define AR5K_XRTIMEOUT_POLL_M 0xffff0000
+#define AR5K_XRTIMEOUT_POLL_M 0xffff0000 /* Mask for Poll timeout */
#define AR5K_XRTIMEOUT_POLL_S 16
/*
* XR chirp register
*/
-#define AR5K_XRCHIRP 0x80cc
-#define AR5K_XRCHIRP_SEND 0x00000001
-#define AR5K_XRCHIRP_GAP 0xffff0000
+#define AR5K_XRCHIRP 0x80cc /* Register Address */
+#define AR5K_XRCHIRP_SEND 0x00000001 /* Send CHIRP */
+#define AR5K_XRCHIRP_GAP 0xffff0000 /* Mask for CHIRP gap (?) */
/*
* XR stomp register
*/
-#define AR5K_XRSTOMP 0x80d0
-#define AR5K_XRSTOMP_TX 0x00000001
-#define AR5K_XRSTOMP_RX_ABORT 0x00000002
-#define AR5K_XRSTOMP_RSSI_THRES 0x0000ff00
+#define AR5K_XRSTOMP 0x80d0 /* Register Address */
+#define AR5K_XRSTOMP_TX 0x00000001 /* Stomp Tx (?) */
+#define AR5K_XRSTOMP_RX 0x00000002 /* Stomp Rx (?) */
+#define AR5K_XRSTOMP_TX_RSSI 0x00000004 /* Stomp Tx RSSI (?) */
+#define AR5K_XRSTOMP_TX_BSSID 0x00000008 /* Stomp Tx BSSID (?) */
+#define AR5K_XRSTOMP_DATA 0x00000010 /* Stomp data (?)*/
+#define AR5K_XRSTOMP_RSSI_THRES 0x0000ff00 /* Mask for XR RSSI threshold */
/*
* First enhanced sleep register
*/
-#define AR5K_SLEEP0 0x80d4
-#define AR5K_SLEEP0_NEXT_DTIM 0x0007ffff
+#define AR5K_SLEEP0 0x80d4 /* Register Address */
+#define AR5K_SLEEP0_NEXT_DTIM 0x0007ffff /* Mask for next DTIM (?) */
#define AR5K_SLEEP0_NEXT_DTIM_S 0
-#define AR5K_SLEEP0_ASSUME_DTIM 0x00080000
-#define AR5K_SLEEP0_ENH_SLEEP_EN 0x00100000
-#define AR5K_SLEEP0_CABTO 0xff000000
+#define AR5K_SLEEP0_ASSUME_DTIM 0x00080000 /* Assume DTIM */
+#define AR5K_SLEEP0_ENH_SLEEP_EN 0x00100000 /* Enable enchanced sleep control */
+#define AR5K_SLEEP0_CABTO 0xff000000 /* Mask for CAB Time Out */
#define AR5K_SLEEP0_CABTO_S 24
/*
* Second enhanced sleep register
*/
-#define AR5K_SLEEP1 0x80d8
-#define AR5K_SLEEP1_NEXT_TIM 0x0007ffff
+#define AR5K_SLEEP1 0x80d8 /* Register Address */
+#define AR5K_SLEEP1_NEXT_TIM 0x0007ffff /* Mask for next TIM (?) */
#define AR5K_SLEEP1_NEXT_TIM_S 0
-#define AR5K_SLEEP1_BEACON_TO 0xff000000
+#define AR5K_SLEEP1_BEACON_TO 0xff000000 /* Mask for Beacon Time Out */
#define AR5K_SLEEP1_BEACON_TO_S 24
/*
* Third enhanced sleep register
*/
-#define AR5K_SLEEP2 0x80dc
-#define AR5K_SLEEP2_TIM_PER 0x0000ffff
+#define AR5K_SLEEP2 0x80dc /* Register Address */
+#define AR5K_SLEEP2_TIM_PER 0x0000ffff /* Mask for TIM period (?) */
#define AR5K_SLEEP2_TIM_PER_S 0
-#define AR5K_SLEEP2_DTIM_PER 0xffff0000
+#define AR5K_SLEEP2_DTIM_PER 0xffff0000 /* Mask for DTIM period (?) */
#define AR5K_SLEEP2_DTIM_PER_S 16
/*
* BSSID mask registers
*/
-#define AR5K_BSS_IDM0 0x80e0
-#define AR5K_BSS_IDM1 0x80e4
+#define AR5K_BSS_IDM0 0x80e0 /* Upper bits */
+#define AR5K_BSS_IDM1 0x80e4 /* Lower bits */
/*
* TX power control (TPC) register
+ *
+ * XXX: PCDAC steps (0.5dbm) or DBM ?
+ *
+ * XXX: Mask changes for newer chips to 7f
+ * like tx power table ?
*/
-#define AR5K_TXPC 0x80e8
-#define AR5K_TXPC_ACK_M 0x0000003f
+#define AR5K_TXPC 0x80e8 /* Register Address */
+#define AR5K_TXPC_ACK_M 0x0000003f /* Mask for ACK tx power */
#define AR5K_TXPC_ACK_S 0
-#define AR5K_TXPC_CTS_M 0x00003f00
+#define AR5K_TXPC_CTS_M 0x00003f00 /* Mask for CTS tx power */
#define AR5K_TXPC_CTS_S 8
-#define AR5K_TXPC_CHIRP_M 0x003f0000
+#define AR5K_TXPC_CHIRP_M 0x003f0000 /* Mask for CHIRP tx power */
#define AR5K_TXPC_CHIRP_S 22
/*
* Profile count registers
*/
-#define AR5K_PROFCNT_TX 0x80ec
-#define AR5K_PROFCNT_RX 0x80f0
-#define AR5K_PROFCNT_RXCLR 0x80f4
-#define AR5K_PROFCNT_CYCLE 0x80f8
+#define AR5K_PROFCNT_TX 0x80ec /* Tx count */
+#define AR5K_PROFCNT_RX 0x80f0 /* Rx count */
+#define AR5K_PROFCNT_RXCLR 0x80f4 /* Clear Rx count */
+#define AR5K_PROFCNT_CYCLE 0x80f8 /* Cycle count (?) */
+
+/*
+ * Quiet (period) control registers (?)
+ */
+#define AR5K_QUIET_CTL1 0x80fc /* Register Address */
+#define AR5K_QUIET_CTL1_NEXT_QT 0x0000ffff /* Mask for next quiet (period?) (?) */
+#define AR5K_QUIET_CTL1_QT_EN 0x00010000 /* Enable quiet (period?) */
+#define AR5K_QUIET_CTL2 0x8100 /* Register Address */
+#define AR5K_QUIET_CTL2_QT_PER 0x0000ffff /* Mask for quiet period (?) */
+#define AR5K_QUIET_CTL2_QT_DUR 0xffff0000 /* Mask for quiet duration (?) */
/*
* TSF parameter register
*/
-#define AR5K_TSF_PARM 0x8104
-#define AR5K_TSF_PARM_INC_M 0x000000ff
+#define AR5K_TSF_PARM 0x8104 /* Register Address */
+#define AR5K_TSF_PARM_INC_M 0x000000ff /* Mask for TSF increment */
#define AR5K_TSF_PARM_INC_S 0
/*
+ * QoS register (?)
+ */
+#define AR5K_QOS 0x8108 /* Register Address */
+#define AR5K_QOS_NOACK_2BIT_VALUES 0x00000000 /* (field) */
+#define AR5K_QOS_NOACK_BIT_OFFSET 0x00000020 /* (field) */
+#define AR5K_QOS_NOACK_BYTE_OFFSET 0x00000080 /* (field) */
+
+/*
* PHY error filter register
*/
#define AR5K_PHY_ERR_FIL 0x810c
-#define AR5K_PHY_ERR_FIL_RADAR 0x00000020
-#define AR5K_PHY_ERR_FIL_OFDM 0x00020000
-#define AR5K_PHY_ERR_FIL_CCK 0x02000000
+#define AR5K_PHY_ERR_FIL_RADAR 0x00000020 /* Radar signal */
+#define AR5K_PHY_ERR_FIL_OFDM 0x00020000 /* OFDM false detect (ANI) */
+#define AR5K_PHY_ERR_FIL_CCK 0x02000000 /* CCK false detect (ANI) */
+
+/*
+ * XR latency register
+ */
+#define AR5K_XRLAT_TX 0x8110
/*
- * Rate duration register
+ * ACK SIFS register
+ */
+#define AR5K_ACKSIFS 0x8114 /* Register Address */
+#define AR5K_ACKSIFS_INC 0x00000000 /* ACK SIFS Increment (field) */
+
+/*
+ * MIC QoS control register (?)
+ */
+#define AR5K_MIC_QOS_CTL 0x8118 /* Register Address */
+#define AR5K_MIC_QOS_CTL_0 0x00000001 /* MIC QoS control 0 (?) */
+#define AR5K_MIC_QOS_CTL_1 0x00000004 /* MIC QoS control 1 (?) */
+#define AR5K_MIC_QOS_CTL_2 0x00000010 /* MIC QoS control 2 (?) */
+#define AR5K_MIC_QOS_CTL_3 0x00000040 /* MIC QoS control 3 (?) */
+#define AR5K_MIC_QOS_CTL_4 0x00000100 /* MIC QoS control 4 (?) */
+#define AR5K_MIC_QOS_CTL_5 0x00000400 /* MIC QoS control 5 (?) */
+#define AR5K_MIC_QOS_CTL_6 0x00001000 /* MIC QoS control 6 (?) */
+#define AR5K_MIC_QOS_CTL_7 0x00004000 /* MIC QoS control 7 (?) */
+#define AR5K_MIC_QOS_CTL_MQ_EN 0x00010000 /* Enable MIC QoS */
+
+/*
+ * MIC QoS select register (?)
+ */
+#define AR5K_MIC_QOS_SEL 0x811c
+#define AR5K_MIC_QOS_SEL_0 0x00000001
+#define AR5K_MIC_QOS_SEL_1 0x00000010
+#define AR5K_MIC_QOS_SEL_2 0x00000100
+#define AR5K_MIC_QOS_SEL_3 0x00001000
+#define AR5K_MIC_QOS_SEL_4 0x00010000
+#define AR5K_MIC_QOS_SEL_5 0x00100000
+#define AR5K_MIC_QOS_SEL_6 0x01000000
+#define AR5K_MIC_QOS_SEL_7 0x10000000
+
+/*
+ * Misc mode control register (?)
+ */
+#define AR5K_MISC_MODE 0x8120 /* Register Address */
+#define AR5K_MISC_MODE_FBSSID_MATCH 0x00000001 /* Force BSSID match */
+#define AR5K_MISC_MODE_ACKSIFS_MEM 0x00000002 /* ACK SIFS memory (?) */
+/* more bits */
+
+/*
+ * OFDM Filter counter
+ */
+#define AR5K_OFDM_FIL_CNT 0x8124
+
+/*
+ * CCK Filter counter
+ */
+#define AR5K_CCK_FIL_CNT 0x8128
+
+/*
+ * PHY Error Counters (?)
+ */
+#define AR5K_PHYERR_CNT1 0x812c
+#define AR5K_PHYERR_CNT1_MASK 0x8130
+
+#define AR5K_PHYERR_CNT2 0x8134
+#define AR5K_PHYERR_CNT2_MASK 0x8138
+
+/*
+ * TSF Threshold register (?)
+ */
+#define AR5K_TSF_THRES 0x813c
+
+/*
+ * Rate -> ACK SIFS mapping table (32 entries)
+ */
+#define AR5K_RATE_ACKSIFS_BASE 0x8680 /* Register Address */
+#define AR5K_RATE_ACKSIFS(_n) (AR5K_RATE_ACKSIFS_BSE + ((_n) << 2))
+#define AR5K_RATE_ACKSIFS_NORMAL 0x00000001 /* Normal SIFS (field) */
+#define AR5K_RATE_ACKSIFS_TURBO 0x00000400 /* Turbo SIFS (field) */
+
+/*
+ * Rate -> duration mapping table (32 entries)
*/
#define AR5K_RATE_DUR_BASE 0x8700
#define AR5K_RATE_DUR(_n) (AR5K_RATE_DUR_BASE + ((_n) << 2))
+/*
+ * Rate -> db mapping table
+ * (8 entries, each one has 4 8bit fields)
+ */
+#define AR5K_RATE2DB_BASE 0x87c0
+#define AR5K_RATE2DB(_n) (AR5K_RATE2DB_BASE + ((_n) << 2))
+
+/*
+ * db -> Rate mapping table
+ * (8 entries, each one has 4 8bit fields)
+ */
+#define AR5K_DB2RATE_BASE 0x87e0
+#define AR5K_DB2RATE(_n) (AR5K_DB2RATE_BASE + ((_n) << 2))
+
/*===5212 end===*/
/*
@@ -1613,12 +1832,34 @@
/*===PHY REGISTERS===*/
/*
- * PHY register
+ * PHY registers start
*/
#define AR5K_PHY_BASE 0x9800
#define AR5K_PHY(_n) (AR5K_PHY_BASE + ((_n) << 2))
-#define AR5K_PHY_SHIFT_2GHZ 0x00004007
-#define AR5K_PHY_SHIFT_5GHZ 0x00000007
+
+/*
+ * TST_2 (Misc config parameters)
+ */
+#define AR5K_PHY_TST2 0x9800 /* Register Address */
+#define AR5K_PHY_TST2_TRIG_SEL 0x00000001 /* Trigger select (?) (field ?) */
+#define AR5K_PHY_TST2_TRIG 0x00000010 /* Trigger (?) (field ?) */
+#define AR5K_PHY_TST2_CBUS_MODE 0x00000100 /* Cardbus mode (?) */
+/* bit reserved */
+#define AR5K_PHY_TST2_CLK32 0x00000400 /* CLK_OUT is CLK32 (32Khz external) */
+#define AR5K_PHY_TST2_CHANCOR_DUMP_EN 0x00000800 /* Enable Chancor dump (?) */
+#define AR5K_PHY_TST2_EVEN_CHANCOR_DUMP 0x00001000 /* Even Chancor dump (?) */
+#define AR5K_PHY_TST2_RFSILENT_EN 0x00002000 /* Enable RFSILENT */
+#define AR5K_PHY_TST2_ALT_RFDATA 0x00004000 /* Alternate RFDATA (5-2GHz switch) */
+#define AR5K_PHY_TST2_MINI_OBS_EN 0x00008000 /* Enable mini OBS (?) */
+#define AR5K_PHY_TST2_RX2_IS_RX5_INV 0x00010000 /* 2GHz rx path is the 5GHz path inverted (?) */
+#define AR5K_PHY_TST2_SLOW_CLK160 0x00020000 /* Slow CLK160 (?) */
+#define AR5K_PHY_TST2_AGC_OBS_SEL_3 0x00040000 /* AGC OBS Select 3 (?) */
+#define AR5K_PHY_TST2_BBB_OBS_SEL 0x00080000 /* BB OBS Select (field ?) */
+#define AR5K_PHY_TST2_ADC_OBS_SEL 0x00800000 /* ADC OBS Select (field ?) */
+#define AR5K_PHY_TST2_RX_CLR_SEL 0x08000000 /* RX Clear Select (?) */
+#define AR5K_PHY_TST2_FORCE_AGC_CLR 0x10000000 /* Force AGC clear (?) */
+#define AR5K_PHY_SHIFT_2GHZ 0x00004007 /* Used to access 2GHz radios */
+#define AR5K_PHY_SHIFT_5GHZ 0x00000007 /* Used to access 5GHz radios (default) */
/*
* PHY frame control register [5110] /turbo mode register [5111+]
@@ -1630,18 +1871,21 @@
* a "turbo mode register" for 5110. We treat this one as
* a frame control register for 5110 below.
*/
-#define AR5K_PHY_TURBO 0x9804
-#define AR5K_PHY_TURBO_MODE 0x00000001
-#define AR5K_PHY_TURBO_SHORT 0x00000002
+#define AR5K_PHY_TURBO 0x9804 /* Register Address */
+#define AR5K_PHY_TURBO_MODE 0x00000001 /* Enable turbo mode */
+#define AR5K_PHY_TURBO_SHORT 0x00000002 /* Short mode (20Mhz channels) (?) */
/*
* PHY agility command register
+ * (aka TST_1)
*/
-#define AR5K_PHY_AGC 0x9808
-#define AR5K_PHY_AGC_DISABLE 0x08000000
+#define AR5K_PHY_AGC 0x9808 /* Register Address */
+#define AR5K_PHY_TST1 0x9808
+#define AR5K_PHY_AGC_DISABLE 0x08000000 /* Disable AGC to A2 (?)*/
+#define AR5K_PHY_TST1_TXHOLD 0x00003800 /* Set tx hold (?) */
/*
- * PHY timing register [5112+]
+ * PHY timing register 3 [5112+]
*/
#define AR5K_PHY_TIMING_3 0x9814
#define AR5K_PHY_TIMING_3_DSC_MAN 0xfffe0000
@@ -1657,26 +1901,81 @@
/*
* PHY activation register
*/
-#define AR5K_PHY_ACT 0x981c
-#define AR5K_PHY_ACT_ENABLE 0x00000001
-#define AR5K_PHY_ACT_DISABLE 0x00000002
+#define AR5K_PHY_ACT 0x981c /* Register Address */
+#define AR5K_PHY_ACT_ENABLE 0x00000001 /* Activate PHY */
+#define AR5K_PHY_ACT_DISABLE 0x00000002 /* Deactivate PHY */
+
+/*
+ * PHY RF control registers
+ * (i think these are delay times,
+ * these calibration values exist
+ * in EEPROM)
+ */
+#define AR5K_PHY_RF_CTL2 0x9824 /* Register Address */
+#define AR5K_PHY_RF_CTL2_TXF2TXD_START 0x0000000f /* Mask for TX frame to TX d(esc?) start */
+
+#define AR5K_PHY_RF_CTL3 0x9828 /* Register Address */
+#define AR5K_PHY_RF_CTL3_TXE2XLNA_ON 0x0000000f /* Mask for TX end to XLNA on */
+
+#define AR5K_PHY_RF_CTL4 0x9834 /* Register Address */
+#define AR5K_PHY_RF_CTL4_TXF2XPA_A_ON 0x00000001 /* TX frame to XPA A on (field) */
+#define AR5K_PHY_RF_CTL4_TXF2XPA_B_ON 0x00000100 /* TX frame to XPA B on (field) */
+#define AR5K_PHY_RF_CTL4_TXE2XPA_A_OFF 0x00010000 /* TX end to XPA A off (field) */
+#define AR5K_PHY_RF_CTL4_TXE2XPA_B_OFF 0x01000000 /* TX end to XPA B off (field) */
+
+/*
+ * Pre-Amplifier control register
+ * (XPA -> external pre-amplifier)
+ */
+#define AR5K_PHY_PA_CTL 0x9838 /* Register Address */
+#define AR5K_PHY_PA_CTL_XPA_A_HI 0x00000001 /* XPA A high (?) */
+#define AR5K_PHY_PA_CTL_XPA_B_HI 0x00000002 /* XPA B high (?) */
+#define AR5K_PHY_PA_CTL_XPA_A_EN 0x00000004 /* Enable XPA A */
+#define AR5K_PHY_PA_CTL_XPA_B_EN 0x00000008 /* Enable XPA B */
+
+/*
+ * PHY settling register
+ */
+#define AR5K_PHY_SETTLING 0x9844 /* Register Address */
+#define AR5K_PHY_SETTLING_AGC 0x0000007f /* Mask for AGC settling time */
+#define AR5K_PHY_SETTLING_SWITCH 0x00003f80 /* Mask for Switch settlig time */
+
+/*
+ * PHY Gain registers
+ */
+#define AR5K_PHY_GAIN 0x9848 /* Register Address */
+#define AR5K_PHY_GAIN_TXRX_ATTEN 0x0003f000 /* Mask for TX-RX Attenuation */
+
+#define AR5K_PHY_GAIN_OFFSET 0x984c /* Register Address */
+#define AR5K_PHY_GAIN_OFFSET_RXTX_FLAG 0x00020000 /* RX-TX flag (?) */
+
+/*
+ * Desired size register
+ * (for more infos read ANI patent)
+ */
+#define AR5K_PHY_DESIRED_SIZE 0x9850 /* Register Address */
+#define AR5K_PHY_DESIRED_SIZE_ADC 0x000000ff /* Mask for ADC desired size */
+#define AR5K_PHY_DESIRED_SIZE_PGA 0x0000ff00 /* Mask for PGA desired size */
+#define AR5K_PHY_DESIRED_SIZE_TOT 0x0ff00000 /* Mask for Total desired size (?) */
/*
* PHY signal register
+ * (for more infos read ANI patent)
*/
-#define AR5K_PHY_SIG 0x9858
-#define AR5K_PHY_SIG_FIRSTEP 0x0003f000
+#define AR5K_PHY_SIG 0x9858 /* Register Address */
+#define AR5K_PHY_SIG_FIRSTEP 0x0003f000 /* Mask for FIRSTEP */
#define AR5K_PHY_SIG_FIRSTEP_S 12
-#define AR5K_PHY_SIG_FIRPWR 0x03fc0000
+#define AR5K_PHY_SIG_FIRPWR 0x03fc0000 /* Mask for FIPWR */
#define AR5K_PHY_SIG_FIRPWR_S 18
/*
* PHY coarse agility control register
+ * (for more infos read ANI patent)
*/
-#define AR5K_PHY_AGCCOARSE 0x985c
-#define AR5K_PHY_AGCCOARSE_LO 0x00007f80
+#define AR5K_PHY_AGCCOARSE 0x985c /* Register Address */
+#define AR5K_PHY_AGCCOARSE_LO 0x00007f80 /* Mask for AGC Coarse low */
#define AR5K_PHY_AGCCOARSE_LO_S 7
-#define AR5K_PHY_AGCCOARSE_HI 0x003f8000
+#define AR5K_PHY_AGCCOARSE_HI 0x003f8000 /* Mask for AGC Coarse high */
#define AR5K_PHY_AGCCOARSE_HI_S 15
/*
@@ -1689,12 +1988,13 @@
/*
* PHY noise floor status register
*/
-#define AR5K_PHY_NF 0x9864
-#define AR5K_PHY_NF_M 0x000001ff
-#define AR5K_PHY_NF_ACTIVE 0x00000100
+#define AR5K_PHY_NF 0x9864 /* Register address */
+#define AR5K_PHY_NF_M 0x000001ff /* Noise floor mask */
+#define AR5K_PHY_NF_ACTIVE 0x00000100 /* Noise floor calibration still active */
#define AR5K_PHY_NF_RVAL(_n) (((_n) >> 19) & AR5K_PHY_NF_M)
#define AR5K_PHY_NF_AVAL(_n) (-((_n) ^ AR5K_PHY_NF_M) + 1)
#define AR5K_PHY_NF_SVAL(_n) (((_n) & AR5K_PHY_NF_M) | (1 << 9))
+#define AR5K_PHY_NF_THRESH62 0x00001000 /* Thresh62 -check ANI patent- (field) */
/*
* PHY ADC saturation register [5110]
@@ -1706,6 +2006,30 @@
#define AR5K_PHY_ADCSAT_THR_S 5
/*
+ * PHY Weak ofdm signal detection threshold registers (ANI) [5212+]
+ */
+
+/* High thresholds */
+#define AR5K_PHY_WEAK_OFDM_HIGH_THR 0x9868
+#define AR5K_PHY_WEAK_OFDM_HIGH_THR_M2_COUNT 0x0000001f
+#define AR5K_PHY_WEAK_OFDM_HIGH_THR_M2_COUNT_S 0
+#define AR5K_PHY_WEAK_OFDM_HIGH_THR_M1 0x00fe0000
+#define AR5K_PHY_WEAK_OFDM_HIGH_THR_M1_S 17
+#define AR5K_PHY_WEAK_OFDM_HIGH_THR_M2 0x7f000000
+#define AR5K_PHY_WEAK_OFDM_HIGH_THR_M2_S 24
+
+/* Low thresholds */
+#define AR5K_PHY_WEAK_OFDM_LOW_THR 0x986c
+#define AR5K_PHY_WEAK_OFDM_LOW_THR_SELFCOR_EN 0x00000001
+#define AR5K_PHY_WEAK_OFDM_LOW_THR_M2_COUNT 0x00003f00
+#define AR5K_PHY_WEAK_OFDM_LOW_THR_M2_COUNT_S 8
+#define AR5K_PHY_WEAK_OFDM_LOW_THR_M1 0x001fc000
+#define AR5K_PHY_WEAK_OFDM_LOW_THR_M1_S 14
+#define AR5K_PHY_WEAK_OFDM_LOW_THR_M2 0x0fe00000
+#define AR5K_PHY_WEAK_OFDM_LOW_THR_M2_S 21
+
+
+/*
* PHY sleep registers [5112+]
*/
#define AR5K_PHY_SCR 0x9870
@@ -1730,6 +2054,8 @@
AR5K_PHY_PLL_44MHZ_5211 : AR5K_PHY_PLL_44MHZ_5212)
#define AR5K_PHY_PLL_RF5111 0x00000000
#define AR5K_PHY_PLL_RF5112 0x00000040
+#define AR5K_PHY_PLL_HALF_RATE 0x00000100
+#define AR5K_PHY_PLL_QUARTER_RATE 0x00000200
/*
* RF Buffer register
@@ -1792,23 +2118,74 @@
#define AR5K_PHY_RFSTG_DISABLE 0x00000021
/*
+ * PHY Antenna control register
+ */
+#define AR5K_PHY_ANT_CTL 0x9910 /* Register Address */
+#define AR5K_PHY_ANT_CTL_TXRX_EN 0x00000001 /* Enable TX/RX (?) */
+#define AR5K_PHY_ANT_CTL_SECTORED_ANT 0x00000004 /* Sectored Antenna */
+#define AR5K_PHY_ANT_CTL_HITUNE5 0x00000008 /* Hitune5 (?) */
+#define AR5K_PHY_ANT_CTL_SWTABLE_IDLE 0x00000010 /* Switch table idle (?) */
+
+/*
* PHY receiver delay register [5111+]
*/
-#define AR5K_PHY_RX_DELAY 0x9914
-#define AR5K_PHY_RX_DELAY_M 0x00003fff
+#define AR5K_PHY_RX_DELAY 0x9914 /* Register Address */
+#define AR5K_PHY_RX_DELAY_M 0x00003fff /* Mask for RX activate to receive delay (/100ns) */
+
+/*
+ * PHY max rx length register (?) [5111]
+ */
+#define AR5K_PHY_MAX_RX_LEN 0x991c
/*
- * PHY timing I(nphase) Q(adrature) control register [5111+]
+ * PHY timing register 4
+ * I(nphase)/Q(adrature) calibration register [5111+]
*/
-#define AR5K_PHY_IQ 0x9920 /* Register address */
+#define AR5K_PHY_IQ 0x9920 /* Register Address */
#define AR5K_PHY_IQ_CORR_Q_Q_COFF 0x0000001f /* Mask for q correction info */
#define AR5K_PHY_IQ_CORR_Q_I_COFF 0x000007e0 /* Mask for i correction info */
#define AR5K_PHY_IQ_CORR_Q_I_COFF_S 5
#define AR5K_PHY_IQ_CORR_ENABLE 0x00000800 /* Enable i/q correction */
-#define AR5K_PHY_IQ_CAL_NUM_LOG_MAX 0x0000f000
+#define AR5K_PHY_IQ_CAL_NUM_LOG_MAX 0x0000f000 /* Mask for max number of samples in log scale */
#define AR5K_PHY_IQ_CAL_NUM_LOG_MAX_S 12
#define AR5K_PHY_IQ_RUN 0x00010000 /* Run i/q calibration */
+#define AR5K_PHY_IQ_USE_PT_DF 0x00020000 /* Use pilot track df (?) */
+#define AR5K_PHY_IQ_EARLY_TRIG_THR 0x00200000 /* Early trigger threshold (?) (field) */
+#define AR5K_PHY_IQ_PILOT_MASK_EN 0x10000000 /* Enable pilot mask (?) */
+#define AR5K_PHY_IQ_CHAN_MASK_EN 0x20000000 /* Enable channel mask (?) */
+#define AR5K_PHY_IQ_SPUR_FILT_EN 0x40000000 /* Enable spur filter */
+#define AR5K_PHY_IQ_SPUR_RSSI_EN 0x80000000 /* Enable spur rssi */
+/*
+ * PHY timing register 5
+ * OFDM Self-correlator Cyclic RSSI threshold params
+ * (Check out bb_cycpwr_thr1 on ANI patent)
+ */
+#define AR5K_PHY_OFDM_SELFCORR 0x9924 /* Register Address */
+#define AR5K_PHY_OFDM_SELFCORR_CYPWR_THR1_EN 0x00000001 /* Enable cyclic RSSI thr 1 */
+#define AR5K_PHY_OFDM_SELFCORR_CYPWR_THR1 0x000000fe /* Mask for Cyclic RSSI threshold 1 */
+#define AR5K_PHY_OFDM_SELFCORR_CYPWR_THR3 0x00000100 /* Cyclic RSSI threshold 3 (field) (?) */
+#define AR5K_PHY_OFDM_SELFCORR_RSSI_1ATHR_EN 0x00008000 /* Enable 1A RSSI threshold (?) */
+#define AR5K_PHY_OFDM_SELFCORR_RSSI_1ATHR 0x00010000 /* 1A RSSI threshold (field) (?) */
+#define AR5K_PHY_OFDM_SELFCORR_LSCTHR_HIRSSI 0x00800000 /* Long sc threshold hi rssi (?) */
+
+/*
+ * PHY-only warm reset register
+ */
+#define AR5K_PHY_WARM_RESET 0x9928
+
+/*
+ * PHY-only control register
+ */
+#define AR5K_PHY_CTL 0x992c /* Register Address */
+#define AR5K_PHY_CTL_RX_DRAIN_RATE 0x00000001 /* RX drain rate (?) */
+#define AR5K_PHY_CTL_LATE_TX_SIG_SYM 0x00000002 /* Late tx signal symbol (?) */
+#define AR5K_PHY_CTL_GEN_SCRAMBLER 0x00000004 /* Generate scrambler */
+#define AR5K_PHY_CTL_TX_ANT_SEL 0x00000008 /* TX antenna select */
+#define AR5K_PHY_CTL_TX_ANT_STATIC 0x00000010 /* Static TX antenna */
+#define AR5K_PHY_CTL_RX_ANT_SEL 0x00000020 /* RX antenna select */
+#define AR5K_PHY_CTL_RX_ANT_STATIC 0x00000040 /* Static RX antenna */
+#define AR5K_PHY_CTL_LOW_FREQ_SLE_EN 0x00000080 /* Enable low freq sleep */
/*
* PHY PAPD probe register [5111+ (?)]
@@ -1816,9 +2193,13 @@
* Because it's always 0 in 5211 initialization code
*/
#define AR5K_PHY_PAPD_PROBE 0x9930
+#define AR5K_PHY_PAPD_PROBE_SH_HI_PAR 0x00000001
+#define AR5K_PHY_PAPD_PROBE_PCDAC_BIAS 0x00000002
+#define AR5K_PHY_PAPD_PROBE_COMP_GAIN 0x00000040
#define AR5K_PHY_PAPD_PROBE_TXPOWER 0x00007e00
#define AR5K_PHY_PAPD_PROBE_TXPOWER_S 9
#define AR5K_PHY_PAPD_PROBE_TX_NEXT 0x00008000
+#define AR5K_PHY_PAPD_PROBE_PREDIST_EN 0x00010000
#define AR5K_PHY_PAPD_PROBE_TYPE 0x01800000 /* [5112+] */
#define AR5K_PHY_PAPD_PROBE_TYPE_S 23
#define AR5K_PHY_PAPD_PROBE_TYPE_OFDM 0
@@ -1848,15 +2229,16 @@
#define AR5K_PHY_FRAME_CTL (ah->ah_version == AR5K_AR5210 ? \
AR5K_PHY_FRAME_CTL_5210 : AR5K_PHY_FRAME_CTL_5211)
/*---[5111+]---*/
-#define AR5K_PHY_FRAME_CTL_TX_CLIP 0x00000038
+#define AR5K_PHY_FRAME_CTL_TX_CLIP 0x00000038 /* Mask for tx clip (?) */
#define AR5K_PHY_FRAME_CTL_TX_CLIP_S 3
+#define AR5K_PHY_FRAME_CTL_PREP_CHINFO 0x00010000 /* Prepend chan info */
/*---[5110/5111]---*/
-#define AR5K_PHY_FRAME_CTL_TIMING_ERR 0x01000000
-#define AR5K_PHY_FRAME_CTL_PARITY_ERR 0x02000000
-#define AR5K_PHY_FRAME_CTL_ILLRATE_ERR 0x04000000 /* illegal rate */
-#define AR5K_PHY_FRAME_CTL_ILLLEN_ERR 0x08000000 /* illegal length */
+#define AR5K_PHY_FRAME_CTL_TIMING_ERR 0x01000000 /* PHY timing error */
+#define AR5K_PHY_FRAME_CTL_PARITY_ERR 0x02000000 /* Parity error */
+#define AR5K_PHY_FRAME_CTL_ILLRATE_ERR 0x04000000 /* Illegal rate */
+#define AR5K_PHY_FRAME_CTL_ILLLEN_ERR 0x08000000 /* Illegal length */
#define AR5K_PHY_FRAME_CTL_SERVICE_ERR 0x20000000
-#define AR5K_PHY_FRAME_CTL_TXURN_ERR 0x40000000 /* tx underrun */
+#define AR5K_PHY_FRAME_CTL_TXURN_ERR 0x40000000 /* TX underrun */
#define AR5K_PHY_FRAME_CTL_INI AR5K_PHY_FRAME_CTL_SERVICE_ERR | \
AR5K_PHY_FRAME_CTL_TXURN_ERR | \
AR5K_PHY_FRAME_CTL_ILLLEN_ERR | \
@@ -1915,6 +2297,11 @@ after DFS is enabled */
#define AR5K_PHY_ANT_SWITCH_TABLE_1 0x9964
/*
+ * PHY Noise floor threshold
+ */
+#define AR5K_PHY_NFTHRES 0x9968
+
+/*
* PHY clock sleep registers [5112+]
*/
#define AR5K_PHY_SCLOCK 0x99f0
@@ -1922,56 +2309,116 @@ after DFS is enabled */
#define AR5K_PHY_SDELAY 0x99f4
#define AR5K_PHY_SDELAY_32MHZ 0x000000ff
#define AR5K_PHY_SPENDING 0x99f8
+#define AR5K_PHY_SPENDING_14 0x00000014
+#define AR5K_PHY_SPENDING_18 0x00000018
#define AR5K_PHY_SPENDING_RF5111 0x00000018
-#define AR5K_PHY_SPENDING_RF5112 0x00000014 /* <- i 've only seen this on 2425 dumps ! */
-#define AR5K_PHY_SPENDING_RF5112A 0x0000000e /* but since i only have 5112A-based chips */
-#define AR5K_PHY_SPENDING_RF5424 0x00000012 /* to test it might be also for old 5112. */
+#define AR5K_PHY_SPENDING_RF5112 0x00000014
+/* #define AR5K_PHY_SPENDING_RF5112A 0x0000000e */
+/* #define AR5K_PHY_SPENDING_RF5424 0x00000012 */
+#define AR5K_PHY_SPENDING_RF5413 0x00000014
+#define AR5K_PHY_SPENDING_RF2413 0x00000014
+#define AR5K_PHY_SPENDING_RF2425 0x00000018
/*
* Misc PHY/radio registers [5110 - 5111]
*/
-#define AR5K_BB_GAIN_BASE 0x9b00 /* BaseBand Amplifier Gain table base address */
+#define AR5K_BB_GAIN_BASE 0x9b00 /* BaseBand Amplifier Gain table base address */
#define AR5K_BB_GAIN(_n) (AR5K_BB_GAIN_BASE + ((_n) << 2))
-#define AR5K_RF_GAIN_BASE 0x9a00 /* RF Amplrifier Gain table base address */
+#define AR5K_RF_GAIN_BASE 0x9a00 /* RF Amplrifier Gain table base address */
#define AR5K_RF_GAIN(_n) (AR5K_RF_GAIN_BASE + ((_n) << 2))
/*
* PHY timing IQ calibration result register [5111+]
*/
-#define AR5K_PHY_IQRES_CAL_PWR_I 0x9c10 /* I (Inphase) power value */
-#define AR5K_PHY_IQRES_CAL_PWR_Q 0x9c14 /* Q (Quadrature) power value */
+#define AR5K_PHY_IQRES_CAL_PWR_I 0x9c10 /* I (Inphase) power value */
+#define AR5K_PHY_IQRES_CAL_PWR_Q 0x9c14 /* Q (Quadrature) power value */
#define AR5K_PHY_IQRES_CAL_CORR 0x9c18 /* I/Q Correlation */
/*
* PHY current RSSI register [5111+]
*/
-#define AR5K_PHY_CURRENT_RSSI 0x9c1c
+#define AR5K_PHY_CURRENT_RSSI 0x9c1c
+
+/*
+ * PHY RF Bus grant register (?)
+ */
+#define AR5K_PHY_RFBUS_GRANT 0x9c20
+
+/*
+ * PHY ADC test register
+ */
+#define AR5K_PHY_ADC_TEST 0x9c24
+#define AR5K_PHY_ADC_TEST_I 0x00000001
+#define AR5K_PHY_ADC_TEST_Q 0x00000200
+
+/*
+ * PHY DAC test register
+ */
+#define AR5K_PHY_DAC_TEST 0x9c28
+#define AR5K_PHY_DAC_TEST_I 0x00000001
+#define AR5K_PHY_DAC_TEST_Q 0x00000200
+
+/*
+ * PHY PTAT register (?)
+ */
+#define AR5K_PHY_PTAT 0x9c2c
+
+/*
+ * PHY Illegal TX rate register [5112+]
+ */
+#define AR5K_PHY_BAD_TX_RATE 0x9c30
+
+/*
+ * PHY SPUR Power register [5112+]
+ */
+#define AR5K_PHY_SPUR_PWR 0x9c34 /* Register Address */
+#define AR5K_PHY_SPUR_PWR_I 0x00000001 /* SPUR Power estimate for I (field) */
+#define AR5K_PHY_SPUR_PWR_Q 0x00000100 /* SPUR Power estimate for Q (field) */
+#define AR5K_PHY_SPUR_PWR_FILT 0x00010000 /* Power with SPUR removed (field) */
+
+/*
+ * PHY Channel status register [5112+] (?)
+ */
+#define AR5K_PHY_CHAN_STATUS 0x9c38
+#define AR5K_PHY_CHAN_STATUS_BT_ACT 0x00000001
+#define AR5K_PHY_CHAN_STATUS_RX_CLR_RAW 0x00000002
+#define AR5K_PHY_CHAN_STATUS_RX_CLR_MAC 0x00000004
+#define AR5K_PHY_CHAN_STATUS_RX_CLR_PAP 0x00000008
+
+/*
+ * PHY PAPD I (power?) table (?)
+ * (92! entries)
+ */
+#define AR5K_PHY_PAPD_I_BASE 0xa000
+#define AR5K_PHY_PAPD_I(_n) (AR5K_PHY_PAPD_I_BASE + ((_n) << 2))
/*
* PHY PCDAC TX power table
*/
#define AR5K_PHY_PCDAC_TXPOWER_BASE_5211 0xa180
-#define AR5K_PHY_PCDAC_TXPOWER_BASE_5413 0xa280
-#define AR5K_PHY_PCDAC_TXPOWER_BASE (ah->ah_radio >= AR5K_RF5413 ? \
- AR5K_PHY_PCDAC_TXPOWER_BASE_5413 :\
+#define AR5K_PHY_PCDAC_TXPOWER_BASE_2413 0xa280
+#define AR5K_PHY_PCDAC_TXPOWER_BASE (ah->ah_radio >= AR5K_RF2413 ? \
+ AR5K_PHY_PCDAC_TXPOWER_BASE_2413 :\
AR5K_PHY_PCDAC_TXPOWER_BASE_5211)
#define AR5K_PHY_PCDAC_TXPOWER(_n) (AR5K_PHY_PCDAC_TXPOWER_BASE + ((_n) << 2))
/*
* PHY mode register [5111+]
*/
-#define AR5K_PHY_MODE 0x0a200 /* Register address */
-#define AR5K_PHY_MODE_MOD 0x00000001 /* PHY Modulation mask*/
+#define AR5K_PHY_MODE 0x0a200 /* Register Address */
+#define AR5K_PHY_MODE_MOD 0x00000001 /* PHY Modulation bit */
#define AR5K_PHY_MODE_MOD_OFDM 0
#define AR5K_PHY_MODE_MOD_CCK 1
-#define AR5K_PHY_MODE_FREQ 0x00000002 /* Freq mode mask */
+#define AR5K_PHY_MODE_FREQ 0x00000002 /* Freq mode bit */
#define AR5K_PHY_MODE_FREQ_5GHZ 0
#define AR5K_PHY_MODE_FREQ_2GHZ 2
-#define AR5K_PHY_MODE_MOD_DYN 0x00000004 /* Dynamic OFDM/CCK mode mask [5112+] */
+#define AR5K_PHY_MODE_MOD_DYN 0x00000004 /* Enable Dynamic OFDM/CCK mode [5112+] */
#define AR5K_PHY_MODE_RAD 0x00000008 /* [5212+] */
#define AR5K_PHY_MODE_RAD_RF5111 0
#define AR5K_PHY_MODE_RAD_RF5112 8
-#define AR5K_PHY_MODE_XR 0x00000010 /* [5112+] */
+#define AR5K_PHY_MODE_XR 0x00000010 /* Enable XR mode [5112+] */
+#define AR5K_PHY_MODE_HALF_RATE 0x00000020 /* Enable Half rate (test) */
+#define AR5K_PHY_MODE_QUARTER_RATE 0x00000040 /* Enable Quarter rat (test) */
/*
* PHY CCK transmit control register [5111+ (?)]
@@ -1979,6 +2426,15 @@ after DFS is enabled */
#define AR5K_PHY_CCKTXCTL 0xa204
#define AR5K_PHY_CCKTXCTL_WORLD 0x00000000
#define AR5K_PHY_CCKTXCTL_JAPAN 0x00000010
+#define AR5K_PHY_CCKTXCTL_SCRAMBLER_DIS 0x00000001
+#define AR5K_PHY_CCKTXCTK_DAC_SCALE 0x00000004
+
+/*
+ * PHY CCK Cross-correlator Barker RSSI threshold register [5212+]
+ */
+#define AR5K_PHY_CCK_CROSSCORR 0xa208
+#define AR5K_PHY_CCK_CROSSCORR_WEAK_SIG_THR 0x0000000f
+#define AR5K_PHY_CCK_CROSSCORR_WEAK_SIG_THR_S 0
/*
* PHY 2GHz gain register [5111+]
diff --git a/drivers/net/wireless/ath9k/Kconfig b/drivers/net/wireless/ath9k/Kconfig
new file mode 100644
index 00000000000..9e19dcceb3a
--- /dev/null
+++ b/drivers/net/wireless/ath9k/Kconfig
@@ -0,0 +1,8 @@
+config ATH9K
+ tristate "Atheros 802.11n wireless cards support"
+ depends on PCI && MAC80211 && WLAN_80211
+ ---help---
+ This module adds support for wireless adapters based on
+ Atheros IEEE 802.11n AR5008 and AR9001 family of chipsets.
+
+ If you choose to build a module, it'll be called ath9k.
diff --git a/drivers/net/wireless/ath9k/Makefile b/drivers/net/wireless/ath9k/Makefile
new file mode 100644
index 00000000000..a6411517e5f
--- /dev/null
+++ b/drivers/net/wireless/ath9k/Makefile
@@ -0,0 +1,11 @@
+ath9k-y += hw.o \
+ phy.o \
+ regd.o \
+ beacon.o \
+ main.o \
+ recv.o \
+ xmit.o \
+ rc.o \
+ core.o
+
+obj-$(CONFIG_ATH9K) += ath9k.o
diff --git a/drivers/net/wireless/ath9k/ath9k.h b/drivers/net/wireless/ath9k/ath9k.h
new file mode 100644
index 00000000000..d1b0fbae5a3
--- /dev/null
+++ b/drivers/net/wireless/ath9k/ath9k.h
@@ -0,0 +1,1021 @@
+/*
+ * Copyright (c) 2008 Atheros Communications Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef ATH9K_H
+#define ATH9K_H
+
+#include <linux/io.h>
+
+#define ATHEROS_VENDOR_ID 0x168c
+
+#define AR5416_DEVID_PCI 0x0023
+#define AR5416_DEVID_PCIE 0x0024
+#define AR9160_DEVID_PCI 0x0027
+#define AR9280_DEVID_PCI 0x0029
+#define AR9280_DEVID_PCIE 0x002a
+
+#define AR5416_AR9100_DEVID 0x000b
+
+#define AR_SUBVENDOR_ID_NOG 0x0e11
+#define AR_SUBVENDOR_ID_NEW_A 0x7065
+
+#define ATH9K_TXERR_XRETRY 0x01
+#define ATH9K_TXERR_FILT 0x02
+#define ATH9K_TXERR_FIFO 0x04
+#define ATH9K_TXERR_XTXOP 0x08
+#define ATH9K_TXERR_TIMER_EXPIRED 0x10
+
+#define ATH9K_TX_BA 0x01
+#define ATH9K_TX_PWRMGMT 0x02
+#define ATH9K_TX_DESC_CFG_ERR 0x04
+#define ATH9K_TX_DATA_UNDERRUN 0x08
+#define ATH9K_TX_DELIM_UNDERRUN 0x10
+#define ATH9K_TX_SW_ABORTED 0x40
+#define ATH9K_TX_SW_FILTERED 0x80
+
+#define NBBY 8
+
+struct ath_tx_status {
+ u32 ts_tstamp;
+ u16 ts_seqnum;
+ u8 ts_status;
+ u8 ts_ratecode;
+ u8 ts_rateindex;
+ int8_t ts_rssi;
+ u8 ts_shortretry;
+ u8 ts_longretry;
+ u8 ts_virtcol;
+ u8 ts_antenna;
+ u8 ts_flags;
+ int8_t ts_rssi_ctl0;
+ int8_t ts_rssi_ctl1;
+ int8_t ts_rssi_ctl2;
+ int8_t ts_rssi_ext0;
+ int8_t ts_rssi_ext1;
+ int8_t ts_rssi_ext2;
+ u8 pad[3];
+ u32 ba_low;
+ u32 ba_high;
+ u32 evm0;
+ u32 evm1;
+ u32 evm2;
+};
+
+struct ath_rx_status {
+ u32 rs_tstamp;
+ u16 rs_datalen;
+ u8 rs_status;
+ u8 rs_phyerr;
+ int8_t rs_rssi;
+ u8 rs_keyix;
+ u8 rs_rate;
+ u8 rs_antenna;
+ u8 rs_more;
+ int8_t rs_rssi_ctl0;
+ int8_t rs_rssi_ctl1;
+ int8_t rs_rssi_ctl2;
+ int8_t rs_rssi_ext0;
+ int8_t rs_rssi_ext1;
+ int8_t rs_rssi_ext2;
+ u8 rs_isaggr;
+ u8 rs_moreaggr;
+ u8 rs_num_delims;
+ u8 rs_flags;
+ u32 evm0;
+ u32 evm1;
+ u32 evm2;
+};
+
+#define ATH9K_RXERR_CRC 0x01
+#define ATH9K_RXERR_PHY 0x02
+#define ATH9K_RXERR_FIFO 0x04
+#define ATH9K_RXERR_DECRYPT 0x08
+#define ATH9K_RXERR_MIC 0x10
+
+#define ATH9K_RX_MORE 0x01
+#define ATH9K_RX_MORE_AGGR 0x02
+#define ATH9K_RX_GI 0x04
+#define ATH9K_RX_2040 0x08
+#define ATH9K_RX_DELIM_CRC_PRE 0x10
+#define ATH9K_RX_DELIM_CRC_POST 0x20
+#define ATH9K_RX_DECRYPT_BUSY 0x40
+
+#define ATH9K_RXKEYIX_INVALID ((u8)-1)
+#define ATH9K_TXKEYIX_INVALID ((u32)-1)
+
+struct ath_desc {
+ u32 ds_link;
+ u32 ds_data;
+ u32 ds_ctl0;
+ u32 ds_ctl1;
+ u32 ds_hw[20];
+ union {
+ struct ath_tx_status tx;
+ struct ath_rx_status rx;
+ void *stats;
+ } ds_us;
+ void *ds_vdata;
+} __packed;
+
+#define ds_txstat ds_us.tx
+#define ds_rxstat ds_us.rx
+#define ds_stat ds_us.stats
+
+#define ATH9K_TXDESC_CLRDMASK 0x0001
+#define ATH9K_TXDESC_NOACK 0x0002
+#define ATH9K_TXDESC_RTSENA 0x0004
+#define ATH9K_TXDESC_CTSENA 0x0008
+#define ATH9K_TXDESC_INTREQ 0x0010
+#define ATH9K_TXDESC_VEOL 0x0020
+#define ATH9K_TXDESC_EXT_ONLY 0x0040
+#define ATH9K_TXDESC_EXT_AND_CTL 0x0080
+#define ATH9K_TXDESC_VMF 0x0100
+#define ATH9K_TXDESC_FRAG_IS_ON 0x0200
+
+#define ATH9K_RXDESC_INTREQ 0x0020
+
+enum wireless_mode {
+ ATH9K_MODE_11A = 0,
+ ATH9K_MODE_11B = 2,
+ ATH9K_MODE_11G = 3,
+ ATH9K_MODE_11NA_HT20 = 6,
+ ATH9K_MODE_11NG_HT20 = 7,
+ ATH9K_MODE_11NA_HT40PLUS = 8,
+ ATH9K_MODE_11NA_HT40MINUS = 9,
+ ATH9K_MODE_11NG_HT40PLUS = 10,
+ ATH9K_MODE_11NG_HT40MINUS = 11,
+ ATH9K_MODE_MAX
+};
+
+enum ath9k_hw_caps {
+ ATH9K_HW_CAP_CHAN_SPREAD = BIT(0),
+ ATH9K_HW_CAP_MIC_AESCCM = BIT(1),
+ ATH9K_HW_CAP_MIC_CKIP = BIT(2),
+ ATH9K_HW_CAP_MIC_TKIP = BIT(3),
+ ATH9K_HW_CAP_CIPHER_AESCCM = BIT(4),
+ ATH9K_HW_CAP_CIPHER_CKIP = BIT(5),
+ ATH9K_HW_CAP_CIPHER_TKIP = BIT(6),
+ ATH9K_HW_CAP_VEOL = BIT(7),
+ ATH9K_HW_CAP_BSSIDMASK = BIT(8),
+ ATH9K_HW_CAP_MCAST_KEYSEARCH = BIT(9),
+ ATH9K_HW_CAP_CHAN_HALFRATE = BIT(10),
+ ATH9K_HW_CAP_CHAN_QUARTERRATE = BIT(11),
+ ATH9K_HW_CAP_HT = BIT(12),
+ ATH9K_HW_CAP_GTT = BIT(13),
+ ATH9K_HW_CAP_FASTCC = BIT(14),
+ ATH9K_HW_CAP_RFSILENT = BIT(15),
+ ATH9K_HW_CAP_WOW = BIT(16),
+ ATH9K_HW_CAP_CST = BIT(17),
+ ATH9K_HW_CAP_ENHANCEDPM = BIT(18),
+ ATH9K_HW_CAP_AUTOSLEEP = BIT(19),
+ ATH9K_HW_CAP_4KB_SPLITTRANS = BIT(20),
+ ATH9K_HW_CAP_WOW_MATCHPATTERN_EXACT = BIT(21),
+};
+
+enum ath9k_capability_type {
+ ATH9K_CAP_CIPHER = 0,
+ ATH9K_CAP_TKIP_MIC,
+ ATH9K_CAP_TKIP_SPLIT,
+ ATH9K_CAP_PHYCOUNTERS,
+ ATH9K_CAP_DIVERSITY,
+ ATH9K_CAP_TXPOW,
+ ATH9K_CAP_PHYDIAG,
+ ATH9K_CAP_MCAST_KEYSRCH,
+ ATH9K_CAP_TSF_ADJUST,
+ ATH9K_CAP_WME_TKIPMIC,
+ ATH9K_CAP_RFSILENT,
+ ATH9K_CAP_ANT_CFG_2GHZ,
+ ATH9K_CAP_ANT_CFG_5GHZ
+};
+
+struct ath9k_hw_capabilities {
+ u32 hw_caps; /* ATH9K_HW_CAP_* from ath9k_hw_caps */
+ DECLARE_BITMAP(wireless_modes, ATH9K_MODE_MAX); /* ATH9K_MODE_* */
+ u16 total_queues;
+ u16 keycache_size;
+ u16 low_5ghz_chan, high_5ghz_chan;
+ u16 low_2ghz_chan, high_2ghz_chan;
+ u16 num_mr_retries;
+ u16 rts_aggr_limit;
+ u8 tx_chainmask;
+ u8 rx_chainmask;
+ u16 tx_triglevel_max;
+ u16 reg_cap;
+ u8 num_gpio_pins;
+ u8 num_antcfg_2ghz;
+ u8 num_antcfg_5ghz;
+};
+
+struct ath9k_ops_config {
+ int dma_beacon_response_time;
+ int sw_beacon_response_time;
+ int additional_swba_backoff;
+ int ack_6mb;
+ int cwm_ignore_extcca;
+ u8 pcie_powersave_enable;
+ u8 pcie_l1skp_enable;
+ u8 pcie_clock_req;
+ u32 pcie_waen;
+ int pcie_power_reset;
+ u8 pcie_restore;
+ u8 analog_shiftreg;
+ u8 ht_enable;
+ u32 ofdm_trig_low;
+ u32 ofdm_trig_high;
+ u32 cck_trig_high;
+ u32 cck_trig_low;
+ u32 enable_ani;
+ u8 noise_immunity_level;
+ u32 ofdm_weaksignal_det;
+ u32 cck_weaksignal_thr;
+ u8 spur_immunity_level;
+ u8 firstep_level;
+ int8_t rssi_thr_high;
+ int8_t rssi_thr_low;
+ u16 diversity_control;
+ u16 antenna_switch_swap;
+ int serialize_regmode;
+ int intr_mitigation;
+#define SPUR_DISABLE 0
+#define SPUR_ENABLE_IOCTL 1
+#define SPUR_ENABLE_EEPROM 2
+#define AR_EEPROM_MODAL_SPURS 5
+#define AR_SPUR_5413_1 1640
+#define AR_SPUR_5413_2 1200
+#define AR_NO_SPUR 0x8000
+#define AR_BASE_FREQ_2GHZ 2300
+#define AR_BASE_FREQ_5GHZ 4900
+#define AR_SPUR_FEEQ_BOUND_HT40 19
+#define AR_SPUR_FEEQ_BOUND_HT20 10
+ int spurmode;
+ u16 spurchans[AR_EEPROM_MODAL_SPURS][2];
+};
+
+enum ath9k_tx_queue {
+ ATH9K_TX_QUEUE_INACTIVE = 0,
+ ATH9K_TX_QUEUE_DATA,
+ ATH9K_TX_QUEUE_BEACON,
+ ATH9K_TX_QUEUE_CAB,
+ ATH9K_TX_QUEUE_UAPSD,
+ ATH9K_TX_QUEUE_PSPOLL
+};
+
+#define ATH9K_NUM_TX_QUEUES 10
+
+enum ath9k_tx_queue_subtype {
+ ATH9K_WME_AC_BK = 0,
+ ATH9K_WME_AC_BE,
+ ATH9K_WME_AC_VI,
+ ATH9K_WME_AC_VO,
+ ATH9K_WME_UPSD
+};
+
+enum ath9k_tx_queue_flags {
+ TXQ_FLAG_TXOKINT_ENABLE = 0x0001,
+ TXQ_FLAG_TXERRINT_ENABLE = 0x0001,
+ TXQ_FLAG_TXDESCINT_ENABLE = 0x0002,
+ TXQ_FLAG_TXEOLINT_ENABLE = 0x0004,
+ TXQ_FLAG_TXURNINT_ENABLE = 0x0008,
+ TXQ_FLAG_BACKOFF_DISABLE = 0x0010,
+ TXQ_FLAG_COMPRESSION_ENABLE = 0x0020,
+ TXQ_FLAG_RDYTIME_EXP_POLICY_ENABLE = 0x0040,
+ TXQ_FLAG_FRAG_BURST_BACKOFF_ENABLE = 0x0080,
+};
+
+#define ATH9K_TXQ_USEDEFAULT ((u32) -1)
+
+#define ATH9K_DECOMP_MASK_SIZE 128
+#define ATH9K_READY_TIME_LO_BOUND 50
+#define ATH9K_READY_TIME_HI_BOUND 96
+
+enum ath9k_pkt_type {
+ ATH9K_PKT_TYPE_NORMAL = 0,
+ ATH9K_PKT_TYPE_ATIM,
+ ATH9K_PKT_TYPE_PSPOLL,
+ ATH9K_PKT_TYPE_BEACON,
+ ATH9K_PKT_TYPE_PROBE_RESP,
+ ATH9K_PKT_TYPE_CHIRP,
+ ATH9K_PKT_TYPE_GRP_POLL,
+};
+
+struct ath9k_tx_queue_info {
+ u32 tqi_ver;
+ enum ath9k_tx_queue tqi_type;
+ enum ath9k_tx_queue_subtype tqi_subtype;
+ enum ath9k_tx_queue_flags tqi_qflags;
+ u32 tqi_priority;
+ u32 tqi_aifs;
+ u32 tqi_cwmin;
+ u32 tqi_cwmax;
+ u16 tqi_shretry;
+ u16 tqi_lgretry;
+ u32 tqi_cbrPeriod;
+ u32 tqi_cbrOverflowLimit;
+ u32 tqi_burstTime;
+ u32 tqi_readyTime;
+ u32 tqi_physCompBuf;
+ u32 tqi_intFlags;
+};
+
+enum ath9k_rx_filter {
+ ATH9K_RX_FILTER_UCAST = 0x00000001,
+ ATH9K_RX_FILTER_MCAST = 0x00000002,
+ ATH9K_RX_FILTER_BCAST = 0x00000004,
+ ATH9K_RX_FILTER_CONTROL = 0x00000008,
+ ATH9K_RX_FILTER_BEACON = 0x00000010,
+ ATH9K_RX_FILTER_PROM = 0x00000020,
+ ATH9K_RX_FILTER_PROBEREQ = 0x00000080,
+ ATH9K_RX_FILTER_PSPOLL = 0x00004000,
+ ATH9K_RX_FILTER_PHYERR = 0x00000100,
+ ATH9K_RX_FILTER_PHYRADAR = 0x00002000,
+};
+
+enum ath9k_int {
+ ATH9K_INT_RX = 0x00000001,
+ ATH9K_INT_RXDESC = 0x00000002,
+ ATH9K_INT_RXNOFRM = 0x00000008,
+ ATH9K_INT_RXEOL = 0x00000010,
+ ATH9K_INT_RXORN = 0x00000020,
+ ATH9K_INT_TX = 0x00000040,
+ ATH9K_INT_TXDESC = 0x00000080,
+ ATH9K_INT_TIM_TIMER = 0x00000100,
+ ATH9K_INT_TXURN = 0x00000800,
+ ATH9K_INT_MIB = 0x00001000,
+ ATH9K_INT_RXPHY = 0x00004000,
+ ATH9K_INT_RXKCM = 0x00008000,
+ ATH9K_INT_SWBA = 0x00010000,
+ ATH9K_INT_BMISS = 0x00040000,
+ ATH9K_INT_BNR = 0x00100000,
+ ATH9K_INT_TIM = 0x00200000,
+ ATH9K_INT_DTIM = 0x00400000,
+ ATH9K_INT_DTIMSYNC = 0x00800000,
+ ATH9K_INT_GPIO = 0x01000000,
+ ATH9K_INT_CABEND = 0x02000000,
+ ATH9K_INT_CST = 0x10000000,
+ ATH9K_INT_GTT = 0x20000000,
+ ATH9K_INT_FATAL = 0x40000000,
+ ATH9K_INT_GLOBAL = 0x80000000,
+ ATH9K_INT_BMISC = ATH9K_INT_TIM |
+ ATH9K_INT_DTIM |
+ ATH9K_INT_DTIMSYNC |
+ ATH9K_INT_CABEND,
+ ATH9K_INT_COMMON = ATH9K_INT_RXNOFRM |
+ ATH9K_INT_RXDESC |
+ ATH9K_INT_RXEOL |
+ ATH9K_INT_RXORN |
+ ATH9K_INT_TXURN |
+ ATH9K_INT_TXDESC |
+ ATH9K_INT_MIB |
+ ATH9K_INT_RXPHY |
+ ATH9K_INT_RXKCM |
+ ATH9K_INT_SWBA |
+ ATH9K_INT_BMISS |
+ ATH9K_INT_GPIO,
+ ATH9K_INT_NOCARD = 0xffffffff
+};
+
+struct ath9k_rate_table {
+ int rateCount;
+ u8 rateCodeToIndex[256];
+ struct {
+ u8 valid;
+ u8 phy;
+ u32 rateKbps;
+ u8 rateCode;
+ u8 shortPreamble;
+ u8 dot11Rate;
+ u8 controlRate;
+ u16 lpAckDuration;
+ u16 spAckDuration;
+ } info[32];
+};
+
+#define ATH9K_RATESERIES_RTS_CTS 0x0001
+#define ATH9K_RATESERIES_2040 0x0002
+#define ATH9K_RATESERIES_HALFGI 0x0004
+
+struct ath9k_11n_rate_series {
+ u32 Tries;
+ u32 Rate;
+ u32 PktDuration;
+ u32 ChSel;
+ u32 RateFlags;
+};
+
+#define CHANNEL_CW_INT 0x00002
+#define CHANNEL_CCK 0x00020
+#define CHANNEL_OFDM 0x00040
+#define CHANNEL_2GHZ 0x00080
+#define CHANNEL_5GHZ 0x00100
+#define CHANNEL_PASSIVE 0x00200
+#define CHANNEL_DYN 0x00400
+#define CHANNEL_HALF 0x04000
+#define CHANNEL_QUARTER 0x08000
+#define CHANNEL_HT20 0x10000
+#define CHANNEL_HT40PLUS 0x20000
+#define CHANNEL_HT40MINUS 0x40000
+
+#define CHANNEL_INTERFERENCE 0x01
+#define CHANNEL_DFS 0x02
+#define CHANNEL_4MS_LIMIT 0x04
+#define CHANNEL_DFS_CLEAR 0x08
+#define CHANNEL_DISALLOW_ADHOC 0x10
+#define CHANNEL_PER_11D_ADHOC 0x20
+
+#define CHANNEL_A (CHANNEL_5GHZ|CHANNEL_OFDM)
+#define CHANNEL_B (CHANNEL_2GHZ|CHANNEL_CCK)
+#define CHANNEL_G (CHANNEL_2GHZ|CHANNEL_OFDM)
+#define CHANNEL_G_HT20 (CHANNEL_2GHZ|CHANNEL_HT20)
+#define CHANNEL_A_HT20 (CHANNEL_5GHZ|CHANNEL_HT20)
+#define CHANNEL_G_HT40PLUS (CHANNEL_2GHZ|CHANNEL_HT40PLUS)
+#define CHANNEL_G_HT40MINUS (CHANNEL_2GHZ|CHANNEL_HT40MINUS)
+#define CHANNEL_A_HT40PLUS (CHANNEL_5GHZ|CHANNEL_HT40PLUS)
+#define CHANNEL_A_HT40MINUS (CHANNEL_5GHZ|CHANNEL_HT40MINUS)
+#define CHANNEL_ALL \
+ (CHANNEL_OFDM| \
+ CHANNEL_CCK| \
+ CHANNEL_2GHZ | \
+ CHANNEL_5GHZ | \
+ CHANNEL_HT20 | \
+ CHANNEL_HT40PLUS | \
+ CHANNEL_HT40MINUS)
+
+struct ath9k_channel {
+ u16 channel;
+ u32 channelFlags;
+ u8 privFlags;
+ int8_t maxRegTxPower;
+ int8_t maxTxPower;
+ int8_t minTxPower;
+ u32 chanmode;
+ int32_t CalValid;
+ bool oneTimeCalsDone;
+ int8_t iCoff;
+ int8_t qCoff;
+ int16_t rawNoiseFloor;
+ int8_t antennaMax;
+ u32 regDmnFlags;
+ u32 conformanceTestLimit[3]; /* 0:11a, 1: 11b, 2:11g */
+#ifdef ATH_NF_PER_CHAN
+ struct ath9k_nfcal_hist nfCalHist[NUM_NF_READINGS];
+#endif
+};
+
+#define IS_CHAN_A(_c) ((((_c)->channelFlags & CHANNEL_A) == CHANNEL_A) || \
+ (((_c)->channelFlags & CHANNEL_A_HT20) == CHANNEL_A_HT20) || \
+ (((_c)->channelFlags & CHANNEL_A_HT40PLUS) == CHANNEL_A_HT40PLUS) || \
+ (((_c)->channelFlags & CHANNEL_A_HT40MINUS) == CHANNEL_A_HT40MINUS))
+#define IS_CHAN_B(_c) (((_c)->channelFlags & CHANNEL_B) == CHANNEL_B)
+#define IS_CHAN_G(_c) ((((_c)->channelFlags & (CHANNEL_G)) == CHANNEL_G) || \
+ (((_c)->channelFlags & CHANNEL_G_HT20) == CHANNEL_G_HT20) || \
+ (((_c)->channelFlags & CHANNEL_G_HT40PLUS) == CHANNEL_G_HT40PLUS) || \
+ (((_c)->channelFlags & CHANNEL_G_HT40MINUS) == CHANNEL_G_HT40MINUS))
+#define IS_CHAN_CCK(_c) (((_c)->channelFlags & CHANNEL_CCK) != 0)
+#define IS_CHAN_OFDM(_c) (((_c)->channelFlags & CHANNEL_OFDM) != 0)
+#define IS_CHAN_5GHZ(_c) (((_c)->channelFlags & CHANNEL_5GHZ) != 0)
+#define IS_CHAN_2GHZ(_c) (((_c)->channelFlags & CHANNEL_2GHZ) != 0)
+#define IS_CHAN_PASSIVE(_c) (((_c)->channelFlags & CHANNEL_PASSIVE) != 0)
+#define IS_CHAN_HALF_RATE(_c) (((_c)->channelFlags & CHANNEL_HALF) != 0)
+#define IS_CHAN_QUARTER_RATE(_c) (((_c)->channelFlags & CHANNEL_QUARTER) != 0)
+
+/* These macros check chanmode and not channelFlags */
+#define IS_CHAN_HT20(_c) (((_c)->chanmode == CHANNEL_A_HT20) || \
+ ((_c)->chanmode == CHANNEL_G_HT20))
+#define IS_CHAN_HT40(_c) (((_c)->chanmode == CHANNEL_A_HT40PLUS) || \
+ ((_c)->chanmode == CHANNEL_A_HT40MINUS) || \
+ ((_c)->chanmode == CHANNEL_G_HT40PLUS) || \
+ ((_c)->chanmode == CHANNEL_G_HT40MINUS))
+#define IS_CHAN_HT(_c) (IS_CHAN_HT20((_c)) || IS_CHAN_HT40((_c)))
+
+#define IS_CHAN_IN_PUBLIC_SAFETY_BAND(_c) ((_c) > 4940 && (_c) < 4990)
+#define IS_CHAN_A_5MHZ_SPACED(_c) \
+ ((((_c)->channelFlags & CHANNEL_5GHZ) != 0) && \
+ (((_c)->channel % 20) != 0) && \
+ (((_c)->channel % 10) != 0))
+
+struct ath9k_keyval {
+ u8 kv_type;
+ u8 kv_pad;
+ u16 kv_len;
+ u8 kv_val[16];
+ u8 kv_mic[8];
+ u8 kv_txmic[8];
+};
+
+enum ath9k_key_type {
+ ATH9K_KEY_TYPE_CLEAR,
+ ATH9K_KEY_TYPE_WEP,
+ ATH9K_KEY_TYPE_AES,
+ ATH9K_KEY_TYPE_TKIP,
+};
+
+enum ath9k_cipher {
+ ATH9K_CIPHER_WEP = 0,
+ ATH9K_CIPHER_AES_OCB = 1,
+ ATH9K_CIPHER_AES_CCM = 2,
+ ATH9K_CIPHER_CKIP = 3,
+ ATH9K_CIPHER_TKIP = 4,
+ ATH9K_CIPHER_CLR = 5,
+ ATH9K_CIPHER_MIC = 127
+};
+
+#define AR_EEPROM_EEPCAP_COMPRESS_DIS 0x0001
+#define AR_EEPROM_EEPCAP_AES_DIS 0x0002
+#define AR_EEPROM_EEPCAP_FASTFRAME_DIS 0x0004
+#define AR_EEPROM_EEPCAP_BURST_DIS 0x0008
+#define AR_EEPROM_EEPCAP_MAXQCU 0x01F0
+#define AR_EEPROM_EEPCAP_MAXQCU_S 4
+#define AR_EEPROM_EEPCAP_HEAVY_CLIP_EN 0x0200
+#define AR_EEPROM_EEPCAP_KC_ENTRIES 0xF000
+#define AR_EEPROM_EEPCAP_KC_ENTRIES_S 12
+
+#define AR_EEPROM_EEREGCAP_EN_FCC_MIDBAND 0x0040
+#define AR_EEPROM_EEREGCAP_EN_KK_U1_EVEN 0x0080
+#define AR_EEPROM_EEREGCAP_EN_KK_U2 0x0100
+#define AR_EEPROM_EEREGCAP_EN_KK_MIDBAND 0x0200
+#define AR_EEPROM_EEREGCAP_EN_KK_U1_ODD 0x0400
+#define AR_EEPROM_EEREGCAP_EN_KK_NEW_11A 0x0800
+
+#define AR_EEPROM_EEREGCAP_EN_KK_U1_ODD_PRE4_0 0x4000
+#define AR_EEPROM_EEREGCAP_EN_KK_NEW_11A_PRE4_0 0x8000
+
+#define SD_NO_CTL 0xE0
+#define NO_CTL 0xff
+#define CTL_MODE_M 7
+#define CTL_11A 0
+#define CTL_11B 1
+#define CTL_11G 2
+#define CTL_2GHT20 5
+#define CTL_5GHT20 6
+#define CTL_2GHT40 7
+#define CTL_5GHT40 8
+
+#define AR_EEPROM_MAC(i) (0x1d+(i))
+#define EEP_SCALE 100
+#define EEP_DELTA 10
+
+#define AR_EEPROM_RFSILENT_GPIO_SEL 0x001c
+#define AR_EEPROM_RFSILENT_GPIO_SEL_S 2
+#define AR_EEPROM_RFSILENT_POLARITY 0x0002
+#define AR_EEPROM_RFSILENT_POLARITY_S 1
+
+#define CTRY_DEBUG 0x1ff
+#define CTRY_DEFAULT 0
+
+enum reg_ext_bitmap {
+ REG_EXT_JAPAN_MIDBAND = 1,
+ REG_EXT_FCC_DFS_HT40 = 2,
+ REG_EXT_JAPAN_NONDFS_HT40 = 3,
+ REG_EXT_JAPAN_DFS_HT40 = 4
+};
+
+struct ath9k_country_entry {
+ u16 countryCode;
+ u16 regDmnEnum;
+ u16 regDmn5G;
+ u16 regDmn2G;
+ u8 isMultidomain;
+ u8 iso[3];
+};
+
+#define REG_WRITE(_ah, _reg, _val) iowrite32(_val, _ah->ah_sh + _reg)
+#define REG_READ(_ah, _reg) ioread32(_ah->ah_sh + _reg)
+
+#define SM(_v, _f) (((_v) << _f##_S) & _f)
+#define MS(_v, _f) (((_v) & _f) >> _f##_S)
+#define REG_RMW(_a, _r, _set, _clr) \
+ REG_WRITE(_a, _r, (REG_READ(_a, _r) & ~(_clr)) | (_set))
+#define REG_RMW_FIELD(_a, _r, _f, _v) \
+ REG_WRITE(_a, _r, \
+ (REG_READ(_a, _r) & ~_f) | (((_v) << _f##_S) & _f))
+#define REG_SET_BIT(_a, _r, _f) \
+ REG_WRITE(_a, _r, REG_READ(_a, _r) | _f)
+#define REG_CLR_BIT(_a, _r, _f) \
+ REG_WRITE(_a, _r, REG_READ(_a, _r) & ~_f)
+
+#define ATH9K_COMP_BUF_MAX_SIZE 9216
+#define ATH9K_COMP_BUF_ALIGN_SIZE 512
+
+#define ATH9K_TXQ_USE_LOCKOUT_BKOFF_DIS 0x00000001
+
+#define INIT_AIFS 2
+#define INIT_CWMIN 15
+#define INIT_CWMIN_11B 31
+#define INIT_CWMAX 1023
+#define INIT_SH_RETRY 10
+#define INIT_LG_RETRY 10
+#define INIT_SSH_RETRY 32
+#define INIT_SLG_RETRY 32
+
+#define WLAN_CTRL_FRAME_SIZE (2+2+6+4)
+
+#define ATH_AMPDU_LIMIT_MAX (64 * 1024 - 1)
+#define ATH_AMPDU_LIMIT_DEFAULT ATH_AMPDU_LIMIT_MAX
+
+#define IEEE80211_WEP_IVLEN 3
+#define IEEE80211_WEP_KIDLEN 1
+#define IEEE80211_WEP_CRCLEN 4
+#define IEEE80211_MAX_MPDU_LEN (3840 + FCS_LEN + \
+ (IEEE80211_WEP_IVLEN + \
+ IEEE80211_WEP_KIDLEN + \
+ IEEE80211_WEP_CRCLEN))
+#define IEEE80211_MAX_LEN (2300 + FCS_LEN + \
+ (IEEE80211_WEP_IVLEN + \
+ IEEE80211_WEP_KIDLEN + \
+ IEEE80211_WEP_CRCLEN))
+
+#define MAX_REG_ADD_COUNT 129
+#define MAX_RATE_POWER 63
+
+enum ath9k_power_mode {
+ ATH9K_PM_AWAKE = 0,
+ ATH9K_PM_FULL_SLEEP,
+ ATH9K_PM_NETWORK_SLEEP,
+ ATH9K_PM_UNDEFINED
+};
+
+struct ath9k_mib_stats {
+ u32 ackrcv_bad;
+ u32 rts_bad;
+ u32 rts_good;
+ u32 fcs_bad;
+ u32 beacons;
+};
+
+enum ath9k_ant_setting {
+ ATH9K_ANT_VARIABLE = 0,
+ ATH9K_ANT_FIXED_A,
+ ATH9K_ANT_FIXED_B
+};
+
+enum ath9k_opmode {
+ ATH9K_M_STA = 1,
+ ATH9K_M_IBSS = 0,
+ ATH9K_M_HOSTAP = 6,
+ ATH9K_M_MONITOR = 8
+};
+
+#define ATH9K_SLOT_TIME_6 6
+#define ATH9K_SLOT_TIME_9 9
+#define ATH9K_SLOT_TIME_20 20
+
+enum ath9k_ht_macmode {
+ ATH9K_HT_MACMODE_20 = 0,
+ ATH9K_HT_MACMODE_2040 = 1,
+};
+
+enum ath9k_ht_extprotspacing {
+ ATH9K_HT_EXTPROTSPACING_20 = 0,
+ ATH9K_HT_EXTPROTSPACING_25 = 1,
+};
+
+struct ath9k_ht_cwm {
+ enum ath9k_ht_macmode ht_macmode;
+ enum ath9k_ht_extprotspacing ht_extprotspacing;
+};
+
+enum ath9k_ani_cmd {
+ ATH9K_ANI_PRESENT = 0x1,
+ ATH9K_ANI_NOISE_IMMUNITY_LEVEL = 0x2,
+ ATH9K_ANI_OFDM_WEAK_SIGNAL_DETECTION = 0x4,
+ ATH9K_ANI_CCK_WEAK_SIGNAL_THR = 0x8,
+ ATH9K_ANI_FIRSTEP_LEVEL = 0x10,
+ ATH9K_ANI_SPUR_IMMUNITY_LEVEL = 0x20,
+ ATH9K_ANI_MODE = 0x40,
+ ATH9K_ANI_PHYERR_RESET = 0x80,
+ ATH9K_ANI_ALL = 0xff
+};
+
+enum phytype {
+ PHY_DS,
+ PHY_FH,
+ PHY_OFDM,
+ PHY_HT,
+};
+#define PHY_CCK PHY_DS
+
+enum start_adhoc_option {
+ START_ADHOC_NO_11A,
+ START_ADHOC_PER_11D,
+ START_ADHOC_IN_11A,
+ START_ADHOC_IN_11B,
+};
+
+enum ath9k_tp_scale {
+ ATH9K_TP_SCALE_MAX = 0,
+ ATH9K_TP_SCALE_50,
+ ATH9K_TP_SCALE_25,
+ ATH9K_TP_SCALE_12,
+ ATH9K_TP_SCALE_MIN
+};
+
+enum ser_reg_mode {
+ SER_REG_MODE_OFF = 0,
+ SER_REG_MODE_ON = 1,
+ SER_REG_MODE_AUTO = 2,
+};
+
+#define AR_PHY_CCA_MAX_GOOD_VALUE -85
+#define AR_PHY_CCA_MAX_HIGH_VALUE -62
+#define AR_PHY_CCA_MIN_BAD_VALUE -121
+#define AR_PHY_CCA_FILTERWINDOW_LENGTH_INIT 3
+#define AR_PHY_CCA_FILTERWINDOW_LENGTH 5
+
+#define ATH9K_NF_CAL_HIST_MAX 5
+#define NUM_NF_READINGS 6
+
+struct ath9k_nfcal_hist {
+ int16_t nfCalBuffer[ATH9K_NF_CAL_HIST_MAX];
+ u8 currIndex;
+ int16_t privNF;
+ u8 invalidNFcount;
+};
+
+struct ath9k_beacon_state {
+ u32 bs_nexttbtt;
+ u32 bs_nextdtim;
+ u32 bs_intval;
+#define ATH9K_BEACON_PERIOD 0x0000ffff
+#define ATH9K_BEACON_ENA 0x00800000
+#define ATH9K_BEACON_RESET_TSF 0x01000000
+ u32 bs_dtimperiod;
+ u16 bs_cfpperiod;
+ u16 bs_cfpmaxduration;
+ u32 bs_cfpnext;
+ u16 bs_timoffset;
+ u16 bs_bmissthreshold;
+ u32 bs_sleepduration;
+};
+
+struct ath9k_node_stats {
+ u32 ns_avgbrssi;
+ u32 ns_avgrssi;
+ u32 ns_avgtxrssi;
+ u32 ns_avgtxrate;
+};
+
+#define ATH9K_RSSI_EP_MULTIPLIER (1<<7)
+
+enum ath9k_gpio_output_mux_type {
+ ATH9K_GPIO_OUTPUT_MUX_AS_OUTPUT,
+ ATH9K_GPIO_OUTPUT_MUX_AS_PCIE_ATTENTION_LED,
+ ATH9K_GPIO_OUTPUT_MUX_AS_PCIE_POWER_LED,
+ ATH9K_GPIO_OUTPUT_MUX_AS_MAC_NETWORK_LED,
+ ATH9K_GPIO_OUTPUT_MUX_AS_MAC_POWER_LED,
+ ATH9K_GPIO_OUTPUT_MUX_NUM_ENTRIES
+};
+
+enum {
+ ATH9K_RESET_POWER_ON,
+ ATH9K_RESET_WARM,
+ ATH9K_RESET_COLD,
+};
+
+#define AH_USE_EEPROM 0x1
+
+struct ath_hal {
+ u32 ah_magic;
+ u16 ah_devid;
+ u16 ah_subvendorid;
+ struct ath_softc *ah_sc;
+ void __iomem *ah_sh;
+ u16 ah_countryCode;
+ u32 ah_macVersion;
+ u16 ah_macRev;
+ u16 ah_phyRev;
+ u16 ah_analog5GhzRev;
+ u16 ah_analog2GhzRev;
+ u8 ah_decompMask[ATH9K_DECOMP_MASK_SIZE];
+ u32 ah_flags;
+ enum ath9k_opmode ah_opmode;
+ struct ath9k_ops_config ah_config;
+ struct ath9k_hw_capabilities ah_caps;
+ int16_t ah_powerLimit;
+ u16 ah_maxPowerLevel;
+ u32 ah_tpScale;
+ u16 ah_currentRD;
+ u16 ah_currentRDExt;
+ u16 ah_currentRDInUse;
+ u16 ah_currentRD5G;
+ u16 ah_currentRD2G;
+ char ah_iso[4];
+ enum start_adhoc_option ah_adHocMode;
+ bool ah_commonMode;
+ struct ath9k_channel ah_channels[150];
+ u32 ah_nchan;
+ struct ath9k_channel *ah_curchan;
+ u16 ah_rfsilent;
+ bool ah_rfkillEnabled;
+ bool ah_isPciExpress;
+ u16 ah_txTrigLevel;
+#ifndef ATH_NF_PER_CHAN
+ struct ath9k_nfcal_hist nfCalHist[NUM_NF_READINGS];
+#endif
+};
+
+struct chan_centers {
+ u16 synth_center;
+ u16 ctl_center;
+ u16 ext_center;
+};
+
+int ath_hal_getcapability(struct ath_hal *ah,
+ enum ath9k_capability_type type,
+ u32 capability,
+ u32 *result);
+const struct ath9k_rate_table *ath9k_hw_getratetable(struct ath_hal *ah,
+ u32 mode);
+void ath9k_hw_detach(struct ath_hal *ah);
+struct ath_hal *ath9k_hw_attach(u16 devid,
+ struct ath_softc *sc,
+ void __iomem *mem,
+ int *error);
+bool ath9k_regd_init_channels(struct ath_hal *ah,
+ u32 maxchans, u32 *nchans,
+ u8 *regclassids,
+ u32 maxregids, u32 *nregids,
+ u16 cc,
+ bool enableOutdoor,
+ bool enableExtendedChannels);
+u32 ath9k_hw_mhz2ieee(struct ath_hal *ah, u32 freq, u32 flags);
+enum ath9k_int ath9k_hw_set_interrupts(struct ath_hal *ah,
+ enum ath9k_int ints);
+bool ath9k_hw_reset(struct ath_hal *ah, enum ath9k_opmode opmode,
+ struct ath9k_channel *chan,
+ enum ath9k_ht_macmode macmode,
+ u8 txchainmask, u8 rxchainmask,
+ enum ath9k_ht_extprotspacing extprotspacing,
+ bool bChannelChange,
+ int *status);
+bool ath9k_hw_phy_disable(struct ath_hal *ah);
+void ath9k_hw_reset_calvalid(struct ath_hal *ah, struct ath9k_channel *chan,
+ bool *isCalDone);
+void ath9k_hw_ani_monitor(struct ath_hal *ah,
+ const struct ath9k_node_stats *stats,
+ struct ath9k_channel *chan);
+bool ath9k_hw_calibrate(struct ath_hal *ah,
+ struct ath9k_channel *chan,
+ u8 rxchainmask,
+ bool longcal,
+ bool *isCalDone);
+int16_t ath9k_hw_getchan_noise(struct ath_hal *ah,
+ struct ath9k_channel *chan);
+void ath9k_hw_write_associd(struct ath_hal *ah, const u8 *bssid,
+ u16 assocId);
+void ath9k_hw_setrxfilter(struct ath_hal *ah, u32 bits);
+void ath9k_hw_write_associd(struct ath_hal *ah, const u8 *bssid,
+ u16 assocId);
+bool ath9k_hw_stoptxdma(struct ath_hal *ah, u32 q);
+void ath9k_hw_reset_tsf(struct ath_hal *ah);
+bool ath9k_hw_keyisvalid(struct ath_hal *ah, u16 entry);
+bool ath9k_hw_keysetmac(struct ath_hal *ah, u16 entry,
+ const u8 *mac);
+bool ath9k_hw_set_keycache_entry(struct ath_hal *ah,
+ u16 entry,
+ const struct ath9k_keyval *k,
+ const u8 *mac,
+ int xorKey);
+bool ath9k_hw_set_tsfadjust(struct ath_hal *ah,
+ u32 setting);
+void ath9k_hw_configpcipowersave(struct ath_hal *ah, int restore);
+bool ath9k_hw_intrpend(struct ath_hal *ah);
+bool ath9k_hw_getisr(struct ath_hal *ah, enum ath9k_int *masked);
+bool ath9k_hw_updatetxtriglevel(struct ath_hal *ah,
+ bool bIncTrigLevel);
+void ath9k_hw_procmibevent(struct ath_hal *ah,
+ const struct ath9k_node_stats *stats);
+bool ath9k_hw_setrxabort(struct ath_hal *ah, bool set);
+void ath9k_hw_set11nmac2040(struct ath_hal *ah, enum ath9k_ht_macmode mode);
+bool ath9k_hw_phycounters(struct ath_hal *ah);
+bool ath9k_hw_keyreset(struct ath_hal *ah, u16 entry);
+bool ath9k_hw_getcapability(struct ath_hal *ah,
+ enum ath9k_capability_type type,
+ u32 capability,
+ u32 *result);
+bool ath9k_hw_setcapability(struct ath_hal *ah,
+ enum ath9k_capability_type type,
+ u32 capability,
+ u32 setting,
+ int *status);
+u32 ath9k_hw_getdefantenna(struct ath_hal *ah);
+void ath9k_hw_getmac(struct ath_hal *ah, u8 *mac);
+void ath9k_hw_getbssidmask(struct ath_hal *ah, u8 *mask);
+bool ath9k_hw_setbssidmask(struct ath_hal *ah,
+ const u8 *mask);
+bool ath9k_hw_setpower(struct ath_hal *ah,
+ enum ath9k_power_mode mode);
+enum ath9k_int ath9k_hw_intrget(struct ath_hal *ah);
+u64 ath9k_hw_gettsf64(struct ath_hal *ah);
+u32 ath9k_hw_getdefantenna(struct ath_hal *ah);
+bool ath9k_hw_setslottime(struct ath_hal *ah, u32 us);
+bool ath9k_hw_setantennaswitch(struct ath_hal *ah,
+ enum ath9k_ant_setting settings,
+ struct ath9k_channel *chan,
+ u8 *tx_chainmask,
+ u8 *rx_chainmask,
+ u8 *antenna_cfgd);
+void ath9k_hw_setantenna(struct ath_hal *ah, u32 antenna);
+int ath9k_hw_select_antconfig(struct ath_hal *ah,
+ u32 cfg);
+bool ath9k_hw_puttxbuf(struct ath_hal *ah, u32 q,
+ u32 txdp);
+bool ath9k_hw_txstart(struct ath_hal *ah, u32 q);
+u16 ath9k_hw_computetxtime(struct ath_hal *ah,
+ const struct ath9k_rate_table *rates,
+ u32 frameLen, u16 rateix,
+ bool shortPreamble);
+void ath9k_hw_set11n_ratescenario(struct ath_hal *ah, struct ath_desc *ds,
+ struct ath_desc *lastds,
+ u32 durUpdateEn, u32 rtsctsRate,
+ u32 rtsctsDuration,
+ struct ath9k_11n_rate_series series[],
+ u32 nseries, u32 flags);
+void ath9k_hw_set11n_burstduration(struct ath_hal *ah,
+ struct ath_desc *ds,
+ u32 burstDuration);
+void ath9k_hw_cleartxdesc(struct ath_hal *ah, struct ath_desc *ds);
+u32 ath9k_hw_reverse_bits(u32 val, u32 n);
+bool ath9k_hw_resettxqueue(struct ath_hal *ah, u32 q);
+u32 ath9k_regd_get_ctl(struct ath_hal *ah, struct ath9k_channel *chan);
+u32 ath9k_regd_get_antenna_allowed(struct ath_hal *ah,
+ struct ath9k_channel *chan);
+u32 ath9k_hw_mhz2ieee(struct ath_hal *ah, u32 freq, u32 flags);
+bool ath9k_hw_get_txq_props(struct ath_hal *ah, int q,
+ struct ath9k_tx_queue_info *qinfo);
+bool ath9k_hw_set_txq_props(struct ath_hal *ah, int q,
+ const struct ath9k_tx_queue_info *qinfo);
+struct ath9k_channel *ath9k_regd_check_channel(struct ath_hal *ah,
+ const struct ath9k_channel *c);
+void ath9k_hw_set11n_txdesc(struct ath_hal *ah, struct ath_desc *ds,
+ u32 pktLen, enum ath9k_pkt_type type,
+ u32 txPower, u32 keyIx,
+ enum ath9k_key_type keyType, u32 flags);
+bool ath9k_hw_filltxdesc(struct ath_hal *ah, struct ath_desc *ds,
+ u32 segLen, bool firstSeg,
+ bool lastSeg,
+ const struct ath_desc *ds0);
+u32 ath9k_hw_GetMibCycleCountsPct(struct ath_hal *ah,
+ u32 *rxc_pcnt,
+ u32 *rxf_pcnt,
+ u32 *txf_pcnt);
+void ath9k_hw_dmaRegDump(struct ath_hal *ah);
+void ath9k_hw_beaconinit(struct ath_hal *ah,
+ u32 next_beacon, u32 beacon_period);
+void ath9k_hw_set_sta_beacon_timers(struct ath_hal *ah,
+ const struct ath9k_beacon_state *bs);
+bool ath9k_hw_setuprxdesc(struct ath_hal *ah, struct ath_desc *ds,
+ u32 size, u32 flags);
+void ath9k_hw_putrxbuf(struct ath_hal *ah, u32 rxdp);
+void ath9k_hw_rxena(struct ath_hal *ah);
+void ath9k_hw_setopmode(struct ath_hal *ah);
+bool ath9k_hw_setmac(struct ath_hal *ah, const u8 *mac);
+void ath9k_hw_setmcastfilter(struct ath_hal *ah, u32 filter0,
+ u32 filter1);
+u32 ath9k_hw_getrxfilter(struct ath_hal *ah);
+void ath9k_hw_startpcureceive(struct ath_hal *ah);
+void ath9k_hw_stoppcurecv(struct ath_hal *ah);
+bool ath9k_hw_stopdmarecv(struct ath_hal *ah);
+int ath9k_hw_rxprocdesc(struct ath_hal *ah,
+ struct ath_desc *ds, u32 pa,
+ struct ath_desc *nds, u64 tsf);
+u32 ath9k_hw_gettxbuf(struct ath_hal *ah, u32 q);
+int ath9k_hw_txprocdesc(struct ath_hal *ah,
+ struct ath_desc *ds);
+void ath9k_hw_set11n_aggr_middle(struct ath_hal *ah, struct ath_desc *ds,
+ u32 numDelims);
+void ath9k_hw_set11n_aggr_first(struct ath_hal *ah, struct ath_desc *ds,
+ u32 aggrLen);
+void ath9k_hw_set11n_aggr_last(struct ath_hal *ah, struct ath_desc *ds);
+bool ath9k_hw_releasetxqueue(struct ath_hal *ah, u32 q);
+void ath9k_hw_gettxintrtxqs(struct ath_hal *ah, u32 *txqs);
+void ath9k_hw_clr11n_aggr(struct ath_hal *ah, struct ath_desc *ds);
+void ath9k_hw_set11n_virtualmorefrag(struct ath_hal *ah,
+ struct ath_desc *ds, u32 vmf);
+bool ath9k_hw_set_txpowerlimit(struct ath_hal *ah, u32 limit);
+bool ath9k_regd_is_public_safety_sku(struct ath_hal *ah);
+int ath9k_hw_setuptxqueue(struct ath_hal *ah, enum ath9k_tx_queue type,
+ const struct ath9k_tx_queue_info *qinfo);
+u32 ath9k_hw_numtxpending(struct ath_hal *ah, u32 q);
+const char *ath9k_hw_probe(u16 vendorid, u16 devid);
+bool ath9k_hw_disable(struct ath_hal *ah);
+void ath9k_hw_rfdetach(struct ath_hal *ah);
+void ath9k_hw_get_channel_centers(struct ath_hal *ah,
+ struct ath9k_channel *chan,
+ struct chan_centers *centers);
+bool ath9k_get_channel_edges(struct ath_hal *ah,
+ u16 flags, u16 *low,
+ u16 *high);
+#endif
diff --git a/drivers/net/wireless/ath9k/beacon.c b/drivers/net/wireless/ath9k/beacon.c
new file mode 100644
index 00000000000..caf569401a3
--- /dev/null
+++ b/drivers/net/wireless/ath9k/beacon.c
@@ -0,0 +1,979 @@
+/*
+ * Copyright (c) 2008 Atheros Communications Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+ /* Implementation of beacon processing. */
+
+#include <asm/unaligned.h>
+#include "core.h"
+
+/*
+ * Configure parameters for the beacon queue
+ *
+ * This function will modify certain transmit queue properties depending on
+ * the operating mode of the station (AP or AdHoc). Parameters are AIFS
+ * settings and channel width min/max
+*/
+
+static int ath_beaconq_config(struct ath_softc *sc)
+{
+ struct ath_hal *ah = sc->sc_ah;
+ struct ath9k_tx_queue_info qi;
+
+ ath9k_hw_get_txq_props(ah, sc->sc_bhalq, &qi);
+ if (sc->sc_opmode == ATH9K_M_HOSTAP) {
+ /* Always burst out beacon and CAB traffic. */
+ qi.tqi_aifs = 1;
+ qi.tqi_cwmin = 0;
+ qi.tqi_cwmax = 0;
+ } else {
+ /* Adhoc mode; important thing is to use 2x cwmin. */
+ qi.tqi_aifs = sc->sc_beacon_qi.tqi_aifs;
+ qi.tqi_cwmin = 2*sc->sc_beacon_qi.tqi_cwmin;
+ qi.tqi_cwmax = sc->sc_beacon_qi.tqi_cwmax;
+ }
+
+ if (!ath9k_hw_set_txq_props(ah, sc->sc_bhalq, &qi)) {
+ DPRINTF(sc, ATH_DBG_FATAL,
+ "%s: unable to update h/w beacon queue parameters\n",
+ __func__);
+ return 0;
+ } else {
+ ath9k_hw_resettxqueue(ah, sc->sc_bhalq); /* push to h/w */
+ return 1;
+ }
+}
+
+/*
+ * Setup the beacon frame for transmit.
+ *
+ * Associates the beacon frame buffer with a transmit descriptor. Will set
+ * up all required antenna switch parameters, rate codes, and channel flags.
+ * Beacons are always sent out at the lowest rate, and are not retried.
+*/
+
+static void ath_beacon_setup(struct ath_softc *sc,
+ struct ath_vap *avp, struct ath_buf *bf)
+{
+ struct sk_buff *skb = (struct sk_buff *)bf->bf_mpdu;
+ struct ath_hal *ah = sc->sc_ah;
+ struct ath_desc *ds;
+ int flags, antenna;
+ const struct ath9k_rate_table *rt;
+ u8 rix, rate;
+ int ctsrate = 0;
+ int ctsduration = 0;
+ struct ath9k_11n_rate_series series[4];
+
+ DPRINTF(sc, ATH_DBG_BEACON, "%s: m %p len %u\n",
+ __func__, skb, skb->len);
+
+ /* setup descriptors */
+ ds = bf->bf_desc;
+
+ flags = ATH9K_TXDESC_NOACK;
+
+ if (sc->sc_opmode == ATH9K_M_IBSS &&
+ (ah->ah_caps.hw_caps & ATH9K_HW_CAP_VEOL)) {
+ ds->ds_link = bf->bf_daddr; /* self-linked */
+ flags |= ATH9K_TXDESC_VEOL;
+ /* Let hardware handle antenna switching. */
+ antenna = 0;
+ } else {
+ ds->ds_link = 0;
+ /*
+ * Switch antenna every beacon.
+ * Should only switch every beacon period, not for every
+ * SWBA's
+ * XXX assumes two antenna
+ */
+ antenna = ((sc->ast_be_xmit / sc->sc_nbcnvaps) & 1 ? 2 : 1);
+ }
+
+ ds->ds_data = bf->bf_buf_addr;
+
+ /*
+ * Calculate rate code.
+ * XXX everything at min xmit rate
+ */
+ rix = 0;
+ rt = sc->sc_currates;
+ rate = rt->info[rix].rateCode;
+ if (sc->sc_flags & ATH_PREAMBLE_SHORT)
+ rate |= rt->info[rix].shortPreamble;
+
+ ath9k_hw_set11n_txdesc(ah, ds
+ , skb->len + FCS_LEN /* frame length */
+ , ATH9K_PKT_TYPE_BEACON /* Atheros packet type */
+ , avp->av_btxctl.txpower /* txpower XXX */
+ , ATH9K_TXKEYIX_INVALID /* no encryption */
+ , ATH9K_KEY_TYPE_CLEAR /* no encryption */
+ , flags /* no ack, veol for beacons */
+ );
+
+ /* NB: beacon's BufLen must be a multiple of 4 bytes */
+ ath9k_hw_filltxdesc(ah, ds
+ , roundup(skb->len, 4) /* buffer length */
+ , true /* first segment */
+ , true /* last segment */
+ , ds /* first descriptor */
+ );
+
+ memzero(series, sizeof(struct ath9k_11n_rate_series) * 4);
+ series[0].Tries = 1;
+ series[0].Rate = rate;
+ series[0].ChSel = sc->sc_tx_chainmask;
+ series[0].RateFlags = (ctsrate) ? ATH9K_RATESERIES_RTS_CTS : 0;
+ ath9k_hw_set11n_ratescenario(ah, ds, ds, 0,
+ ctsrate, ctsduration, series, 4, 0);
+}
+
+/* Move everything from the vap's mcast queue to the hardware cab queue.
+ * Caller must hold mcasq lock and cabq lock
+ * XXX MORE_DATA bit?
+ */
+static void empty_mcastq_into_cabq(struct ath_hal *ah,
+ struct ath_txq *mcastq, struct ath_txq *cabq)
+{
+ struct ath_buf *bfmcast;
+
+ BUG_ON(list_empty(&mcastq->axq_q));
+
+ bfmcast = list_first_entry(&mcastq->axq_q, struct ath_buf, list);
+
+ /* link the descriptors */
+ if (!cabq->axq_link)
+ ath9k_hw_puttxbuf(ah, cabq->axq_qnum, bfmcast->bf_daddr);
+ else
+ *cabq->axq_link = bfmcast->bf_daddr;
+
+ /* append the private vap mcast list to the cabq */
+
+ cabq->axq_depth += mcastq->axq_depth;
+ cabq->axq_totalqueued += mcastq->axq_totalqueued;
+ cabq->axq_linkbuf = mcastq->axq_linkbuf;
+ cabq->axq_link = mcastq->axq_link;
+ list_splice_tail_init(&mcastq->axq_q, &cabq->axq_q);
+ mcastq->axq_depth = 0;
+ mcastq->axq_totalqueued = 0;
+ mcastq->axq_linkbuf = NULL;
+ mcastq->axq_link = NULL;
+}
+
+/* This is only run at DTIM. We move everything from the vap's mcast queue
+ * to the hardware cab queue. Caller must hold the mcastq lock. */
+static void trigger_mcastq(struct ath_hal *ah,
+ struct ath_txq *mcastq, struct ath_txq *cabq)
+{
+ spin_lock_bh(&cabq->axq_lock);
+
+ if (!list_empty(&mcastq->axq_q))
+ empty_mcastq_into_cabq(ah, mcastq, cabq);
+
+ /* cabq is gated by beacon so it is safe to start here */
+ if (!list_empty(&cabq->axq_q))
+ ath9k_hw_txstart(ah, cabq->axq_qnum);
+
+ spin_unlock_bh(&cabq->axq_lock);
+}
+
+/*
+ * Generate beacon frame and queue cab data for a vap.
+ *
+ * Updates the contents of the beacon frame. It is assumed that the buffer for
+ * the beacon frame has been allocated in the ATH object, and simply needs to
+ * be filled for this cycle. Also, any CAB (crap after beacon?) traffic will
+ * be added to the beacon frame at this point.
+*/
+static struct ath_buf *ath_beacon_generate(struct ath_softc *sc, int if_id)
+{
+ struct ath_hal *ah = sc->sc_ah;
+ struct ath_buf *bf;
+ struct ath_vap *avp;
+ struct sk_buff *skb;
+ int cabq_depth;
+ int mcastq_depth;
+ int is_beacon_dtim = 0;
+ unsigned int curlen;
+ struct ath_txq *cabq;
+ struct ath_txq *mcastq;
+ avp = sc->sc_vaps[if_id];
+
+ mcastq = &avp->av_mcastq;
+ cabq = sc->sc_cabq;
+
+ ASSERT(avp);
+
+ if (avp->av_bcbuf == NULL) {
+ DPRINTF(sc, ATH_DBG_BEACON, "%s: avp=%p av_bcbuf=%p\n",
+ __func__, avp, avp->av_bcbuf);
+ return NULL;
+ }
+ bf = avp->av_bcbuf;
+ skb = (struct sk_buff *) bf->bf_mpdu;
+
+ /*
+ * Update dynamic beacon contents. If this returns
+ * non-zero then we need to remap the memory because
+ * the beacon frame changed size (probably because
+ * of the TIM bitmap).
+ */
+ curlen = skb->len;
+
+ /* XXX: spin_lock_bh should not be used here, but sparse bitches
+ * otherwise. We should fix sparse :) */
+ spin_lock_bh(&mcastq->axq_lock);
+ mcastq_depth = avp->av_mcastq.axq_depth;
+
+ if (ath_update_beacon(sc, if_id, &avp->av_boff, skb, mcastq_depth) ==
+ 1) {
+ ath_skb_unmap_single(sc, skb, PCI_DMA_TODEVICE,
+ get_dma_mem_context(bf, bf_dmacontext));
+ bf->bf_buf_addr = ath_skb_map_single(sc, skb, PCI_DMA_TODEVICE,
+ get_dma_mem_context(bf, bf_dmacontext));
+ } else {
+ pci_dma_sync_single_for_cpu(sc->pdev,
+ bf->bf_buf_addr,
+ skb_tailroom(skb),
+ PCI_DMA_TODEVICE);
+ }
+
+ /*
+ * if the CABQ traffic from previous DTIM is pending and the current
+ * beacon is also a DTIM.
+ * 1) if there is only one vap let the cab traffic continue.
+ * 2) if there are more than one vap and we are using staggered
+ * beacons, then drain the cabq by dropping all the frames in
+ * the cabq so that the current vaps cab traffic can be scheduled.
+ */
+ spin_lock_bh(&cabq->axq_lock);
+ cabq_depth = cabq->axq_depth;
+ spin_unlock_bh(&cabq->axq_lock);
+
+ is_beacon_dtim = avp->av_boff.bo_tim[4] & 1;
+
+ if (mcastq_depth && is_beacon_dtim && cabq_depth) {
+ /*
+ * Unlock the cabq lock as ath_tx_draintxq acquires
+ * the lock again which is a common function and that
+ * acquires txq lock inside.
+ */
+ if (sc->sc_nvaps > 1) {
+ ath_tx_draintxq(sc, cabq, false);
+ DPRINTF(sc, ATH_DBG_BEACON,
+ "%s: flush previous cabq traffic\n", __func__);
+ }
+ }
+
+ /* Construct tx descriptor. */
+ ath_beacon_setup(sc, avp, bf);
+
+ /*
+ * Enable the CAB queue before the beacon queue to
+ * insure cab frames are triggered by this beacon.
+ */
+ if (is_beacon_dtim)
+ trigger_mcastq(ah, mcastq, cabq);
+
+ spin_unlock_bh(&mcastq->axq_lock);
+ return bf;
+}
+
+/*
+ * Startup beacon transmission for adhoc mode when they are sent entirely
+ * by the hardware using the self-linked descriptor + veol trick.
+*/
+
+static void ath_beacon_start_adhoc(struct ath_softc *sc, int if_id)
+{
+ struct ath_hal *ah = sc->sc_ah;
+ struct ath_buf *bf;
+ struct ath_vap *avp;
+ struct sk_buff *skb;
+
+ avp = sc->sc_vaps[if_id];
+ ASSERT(avp);
+
+ if (avp->av_bcbuf == NULL) {
+ DPRINTF(sc, ATH_DBG_BEACON, "%s: avp=%p av_bcbuf=%p\n",
+ __func__, avp, avp != NULL ? avp->av_bcbuf : NULL);
+ return;
+ }
+ bf = avp->av_bcbuf;
+ skb = (struct sk_buff *) bf->bf_mpdu;
+
+ /* Construct tx descriptor. */
+ ath_beacon_setup(sc, avp, bf);
+
+ /* NB: caller is known to have already stopped tx dma */
+ ath9k_hw_puttxbuf(ah, sc->sc_bhalq, bf->bf_daddr);
+ ath9k_hw_txstart(ah, sc->sc_bhalq);
+ DPRINTF(sc, ATH_DBG_BEACON, "%s: TXDP%u = %llx (%p)\n", __func__,
+ sc->sc_bhalq, ito64(bf->bf_daddr), bf->bf_desc);
+}
+
+/*
+ * Setup a h/w transmit queue for beacons.
+ *
+ * This function allocates an information structure (struct ath9k_txq_info)
+ * on the stack, sets some specific parameters (zero out channel width
+ * min/max, and enable aifs). The info structure does not need to be
+ * persistant.
+*/
+
+int ath_beaconq_setup(struct ath_hal *ah)
+{
+ struct ath9k_tx_queue_info qi;
+
+ memzero(&qi, sizeof(qi));
+ qi.tqi_aifs = 1;
+ qi.tqi_cwmin = 0;
+ qi.tqi_cwmax = 0;
+ /* NB: don't enable any interrupts */
+ return ath9k_hw_setuptxqueue(ah, ATH9K_TX_QUEUE_BEACON, &qi);
+}
+
+
+/*
+ * Allocate and setup an initial beacon frame.
+ *
+ * Allocate a beacon state variable for a specific VAP instance created on
+ * the ATH interface. This routine also calculates the beacon "slot" for
+ * staggared beacons in the mBSSID case.
+*/
+
+int ath_beacon_alloc(struct ath_softc *sc, int if_id)
+{
+ struct ath_vap *avp;
+ struct ieee80211_hdr *wh;
+ struct ath_buf *bf;
+ struct sk_buff *skb;
+
+ avp = sc->sc_vaps[if_id];
+ ASSERT(avp);
+
+ /* Allocate a beacon descriptor if we haven't done so. */
+ if (!avp->av_bcbuf) {
+ /*
+ * Allocate beacon state for hostap/ibss. We know
+ * a buffer is available.
+ */
+
+ avp->av_bcbuf = list_first_entry(&sc->sc_bbuf,
+ struct ath_buf, list);
+ list_del(&avp->av_bcbuf->list);
+
+ if (sc->sc_opmode == ATH9K_M_HOSTAP ||
+ !(sc->sc_ah->ah_caps.hw_caps & ATH9K_HW_CAP_VEOL)) {
+ int slot;
+ /*
+ * Assign the vap to a beacon xmit slot. As
+ * above, this cannot fail to find one.
+ */
+ avp->av_bslot = 0;
+ for (slot = 0; slot < ATH_BCBUF; slot++)
+ if (sc->sc_bslot[slot] == ATH_IF_ID_ANY) {
+ /*
+ * XXX hack, space out slots to better
+ * deal with misses
+ */
+ if (slot+1 < ATH_BCBUF &&
+ sc->sc_bslot[slot+1] ==
+ ATH_IF_ID_ANY) {
+ avp->av_bslot = slot+1;
+ break;
+ }
+ avp->av_bslot = slot;
+ /* NB: keep looking for a double slot */
+ }
+ BUG_ON(sc->sc_bslot[avp->av_bslot] != ATH_IF_ID_ANY);
+ sc->sc_bslot[avp->av_bslot] = if_id;
+ sc->sc_nbcnvaps++;
+ }
+ }
+
+ /* release the previous beacon frame , if it already exists. */
+ bf = avp->av_bcbuf;
+ if (bf->bf_mpdu != NULL) {
+ skb = (struct sk_buff *)bf->bf_mpdu;
+ ath_skb_unmap_single(sc, skb, PCI_DMA_TODEVICE,
+ get_dma_mem_context(bf, bf_dmacontext));
+ dev_kfree_skb_any(skb);
+ bf->bf_mpdu = NULL;
+ }
+
+ /*
+ * NB: the beacon data buffer must be 32-bit aligned;
+ * we assume the wbuf routines will return us something
+ * with this alignment (perhaps should assert).
+ * FIXME: Fill avp->av_boff.bo_tim,avp->av_btxctl.txpower and
+ * avp->av_btxctl.shortPreamble
+ */
+ skb = ieee80211_beacon_get(sc->hw, avp->av_if_data);
+ if (skb == NULL) {
+ DPRINTF(sc, ATH_DBG_BEACON, "%s: cannot get skb\n",
+ __func__);
+ return -ENOMEM;
+ }
+
+ /*
+ * Calculate a TSF adjustment factor required for
+ * staggered beacons. Note that we assume the format
+ * of the beacon frame leaves the tstamp field immediately
+ * following the header.
+ */
+ if (avp->av_bslot > 0) {
+ u64 tsfadjust;
+ __le64 val;
+ int intval;
+
+ /* FIXME: Use default value for now: Sujith */
+
+ intval = ATH_DEFAULT_BINTVAL;
+
+ /*
+ * The beacon interval is in TU's; the TSF in usecs.
+ * We figure out how many TU's to add to align the
+ * timestamp then convert to TSF units and handle
+ * byte swapping before writing it in the frame.
+ * The hardware will then add this each time a beacon
+ * frame is sent. Note that we align vap's 1..N
+ * and leave vap 0 untouched. This means vap 0
+ * has a timestamp in one beacon interval while the
+ * others get a timestamp aligned to the next interval.
+ */
+ tsfadjust = (intval * (ATH_BCBUF - avp->av_bslot)) / ATH_BCBUF;
+ val = cpu_to_le64(tsfadjust << 10); /* TU->TSF */
+
+ DPRINTF(sc, ATH_DBG_BEACON,
+ "%s: %s beacons, bslot %d intval %u tsfadjust %llu\n",
+ __func__, "stagger",
+ avp->av_bslot, intval, (unsigned long long)tsfadjust);
+
+ wh = (struct ieee80211_hdr *)skb->data;
+ memcpy(&wh[1], &val, sizeof(val));
+ }
+
+ bf->bf_buf_addr = ath_skb_map_single(sc, skb, PCI_DMA_TODEVICE,
+ get_dma_mem_context(bf, bf_dmacontext));
+ bf->bf_mpdu = skb;
+
+ return 0;
+}
+
+/*
+ * Reclaim beacon resources and return buffer to the pool.
+ *
+ * Checks the VAP to put the beacon frame buffer back to the ATH object
+ * queue, and de-allocates any wbuf frames that were sent as CAB traffic.
+*/
+
+void ath_beacon_return(struct ath_softc *sc, struct ath_vap *avp)
+{
+ if (avp->av_bcbuf != NULL) {
+ struct ath_buf *bf;
+
+ if (avp->av_bslot != -1) {
+ sc->sc_bslot[avp->av_bslot] = ATH_IF_ID_ANY;
+ sc->sc_nbcnvaps--;
+ }
+
+ bf = avp->av_bcbuf;
+ if (bf->bf_mpdu != NULL) {
+ struct sk_buff *skb = (struct sk_buff *)bf->bf_mpdu;
+ ath_skb_unmap_single(sc, skb, PCI_DMA_TODEVICE,
+ get_dma_mem_context(bf, bf_dmacontext));
+ dev_kfree_skb_any(skb);
+ bf->bf_mpdu = NULL;
+ }
+ list_add_tail(&bf->list, &sc->sc_bbuf);
+
+ avp->av_bcbuf = NULL;
+ }
+}
+
+/*
+ * Reclaim beacon resources and return buffer to the pool.
+ *
+ * This function will free any wbuf frames that are still attached to the
+ * beacon buffers in the ATH object. Note that this does not de-allocate
+ * any wbuf objects that are in the transmit queue and have not yet returned
+ * to the ATH object.
+*/
+
+void ath_beacon_free(struct ath_softc *sc)
+{
+ struct ath_buf *bf;
+
+ list_for_each_entry(bf, &sc->sc_bbuf, list) {
+ if (bf->bf_mpdu != NULL) {
+ struct sk_buff *skb = (struct sk_buff *) bf->bf_mpdu;
+ ath_skb_unmap_single(sc, skb, PCI_DMA_TODEVICE,
+ get_dma_mem_context(bf, bf_dmacontext));
+ dev_kfree_skb_any(skb);
+ bf->bf_mpdu = NULL;
+ }
+ }
+}
+
+/*
+ * Tasklet for Sending Beacons
+ *
+ * Transmit one or more beacon frames at SWBA. Dynamic updates to the frame
+ * contents are done as needed and the slot time is also adjusted based on
+ * current state.
+ *
+ * This tasklet is not scheduled, it's called in ISR context.
+*/
+
+void ath9k_beacon_tasklet(unsigned long data)
+{
+#define TSF_TO_TU(_h,_l) \
+ ((((u32)(_h)) << 22) | (((u32)(_l)) >> 10))
+
+ struct ath_softc *sc = (struct ath_softc *)data;
+ struct ath_hal *ah = sc->sc_ah;
+ struct ath_buf *bf = NULL;
+ int slot, if_id;
+ u32 bfaddr;
+ u32 rx_clear = 0, rx_frame = 0, tx_frame = 0;
+ u32 show_cycles = 0;
+ u32 bc = 0; /* beacon count */
+ u64 tsf;
+ u32 tsftu;
+ u16 intval;
+
+ if (sc->sc_noreset) {
+ show_cycles = ath9k_hw_GetMibCycleCountsPct(ah,
+ &rx_clear,
+ &rx_frame,
+ &tx_frame);
+ }
+
+ /*
+ * Check if the previous beacon has gone out. If
+ * not don't try to post another, skip this period
+ * and wait for the next. Missed beacons indicate
+ * a problem and should not occur. If we miss too
+ * many consecutive beacons reset the device.
+ */
+ if (ath9k_hw_numtxpending(ah, sc->sc_bhalq) != 0) {
+ sc->sc_bmisscount++;
+ /* XXX: doth needs the chanchange IE countdown decremented.
+ * We should consider adding a mac80211 call to indicate
+ * a beacon miss so appropriate action could be taken
+ * (in that layer).
+ */
+ if (sc->sc_bmisscount < BSTUCK_THRESH) {
+ if (sc->sc_noreset) {
+ DPRINTF(sc, ATH_DBG_BEACON,
+ "%s: missed %u consecutive beacons\n",
+ __func__, sc->sc_bmisscount);
+ if (show_cycles) {
+ /*
+ * Display cycle counter stats
+ * from HW to aide in debug of
+ * stickiness.
+ */
+ DPRINTF(sc,
+ ATH_DBG_BEACON,
+ "%s: busy times: rx_clear=%d, "
+ "rx_frame=%d, tx_frame=%d\n",
+ __func__, rx_clear, rx_frame,
+ tx_frame);
+ } else {
+ DPRINTF(sc,
+ ATH_DBG_BEACON,
+ "%s: unable to obtain "
+ "busy times\n", __func__);
+ }
+ } else {
+ DPRINTF(sc, ATH_DBG_BEACON,
+ "%s: missed %u consecutive beacons\n",
+ __func__, sc->sc_bmisscount);
+ }
+ } else if (sc->sc_bmisscount >= BSTUCK_THRESH) {
+ if (sc->sc_noreset) {
+ if (sc->sc_bmisscount == BSTUCK_THRESH) {
+ DPRINTF(sc,
+ ATH_DBG_BEACON,
+ "%s: beacon is officially "
+ "stuck\n", __func__);
+ ath9k_hw_dmaRegDump(ah);
+ }
+ } else {
+ DPRINTF(sc, ATH_DBG_BEACON,
+ "%s: beacon is officially stuck\n",
+ __func__);
+ ath_bstuck_process(sc);
+ }
+ }
+
+ return;
+ }
+ if (sc->sc_bmisscount != 0) {
+ if (sc->sc_noreset) {
+ DPRINTF(sc,
+ ATH_DBG_BEACON,
+ "%s: resume beacon xmit after %u misses\n",
+ __func__, sc->sc_bmisscount);
+ } else {
+ DPRINTF(sc, ATH_DBG_BEACON,
+ "%s: resume beacon xmit after %u misses\n",
+ __func__, sc->sc_bmisscount);
+ }
+ sc->sc_bmisscount = 0;
+ }
+
+ /*
+ * Generate beacon frames. we are sending frames
+ * staggered so calculate the slot for this frame based
+ * on the tsf to safeguard against missing an swba.
+ */
+
+ /* FIXME: Use default value for now - Sujith */
+ intval = ATH_DEFAULT_BINTVAL;
+
+ tsf = ath9k_hw_gettsf64(ah);
+ tsftu = TSF_TO_TU(tsf>>32, tsf);
+ slot = ((tsftu % intval) * ATH_BCBUF) / intval;
+ if_id = sc->sc_bslot[(slot + 1) % ATH_BCBUF];
+ DPRINTF(sc, ATH_DBG_BEACON,
+ "%s: slot %d [tsf %llu tsftu %u intval %u] if_id %d\n",
+ __func__, slot, (unsigned long long) tsf, tsftu,
+ intval, if_id);
+ bfaddr = 0;
+ if (if_id != ATH_IF_ID_ANY) {
+ bf = ath_beacon_generate(sc, if_id);
+ if (bf != NULL) {
+ bfaddr = bf->bf_daddr;
+ bc = 1;
+ }
+ }
+ /*
+ * Handle slot time change when a non-ERP station joins/leaves
+ * an 11g network. The 802.11 layer notifies us via callback,
+ * we mark updateslot, then wait one beacon before effecting
+ * the change. This gives associated stations at least one
+ * beacon interval to note the state change.
+ *
+ * NB: The slot time change state machine is clocked according
+ * to whether we are bursting or staggering beacons. We
+ * recognize the request to update and record the current
+ * slot then don't transition until that slot is reached
+ * again. If we miss a beacon for that slot then we'll be
+ * slow to transition but we'll be sure at least one beacon
+ * interval has passed. When bursting slot is always left
+ * set to ATH_BCBUF so this check is a noop.
+ */
+ /* XXX locking */
+ if (sc->sc_updateslot == UPDATE) {
+ sc->sc_updateslot = COMMIT; /* commit next beacon */
+ sc->sc_slotupdate = slot;
+ } else if (sc->sc_updateslot == COMMIT && sc->sc_slotupdate == slot)
+ ath_setslottime(sc); /* commit change to hardware */
+
+ if (bfaddr != 0) {
+ /*
+ * Stop any current dma and put the new frame(s) on the queue.
+ * This should never fail since we check above that no frames
+ * are still pending on the queue.
+ */
+ if (!ath9k_hw_stoptxdma(ah, sc->sc_bhalq)) {
+ DPRINTF(sc, ATH_DBG_FATAL,
+ "%s: beacon queue %u did not stop?\n",
+ __func__, sc->sc_bhalq);
+ /* NB: the HAL still stops DMA, so proceed */
+ }
+
+ /* NB: cabq traffic should already be queued and primed */
+ ath9k_hw_puttxbuf(ah, sc->sc_bhalq, bfaddr);
+ ath9k_hw_txstart(ah, sc->sc_bhalq);
+
+ sc->ast_be_xmit += bc; /* XXX per-vap? */
+ }
+#undef TSF_TO_TU
+}
+
+/*
+ * Tasklet for Beacon Stuck processing
+ *
+ * Processing for Beacon Stuck.
+ * Basically calls the ath_internal_reset function to reset the chip.
+*/
+
+void ath_bstuck_process(struct ath_softc *sc)
+{
+ DPRINTF(sc, ATH_DBG_BEACON,
+ "%s: stuck beacon; resetting (bmiss count %u)\n",
+ __func__, sc->sc_bmisscount);
+ ath_internal_reset(sc);
+}
+
+/*
+ * Configure the beacon and sleep timers.
+ *
+ * When operating as an AP this resets the TSF and sets
+ * up the hardware to notify us when we need to issue beacons.
+ *
+ * When operating in station mode this sets up the beacon
+ * timers according to the timestamp of the last received
+ * beacon and the current TSF, configures PCF and DTIM
+ * handling, programs the sleep registers so the hardware
+ * will wakeup in time to receive beacons, and configures
+ * the beacon miss handling so we'll receive a BMISS
+ * interrupt when we stop seeing beacons from the AP
+ * we've associated with.
+ */
+
+void ath_beacon_config(struct ath_softc *sc, int if_id)
+{
+#define TSF_TO_TU(_h,_l) \
+ ((((u32)(_h)) << 22) | (((u32)(_l)) >> 10))
+ struct ath_hal *ah = sc->sc_ah;
+ u32 nexttbtt, intval;
+ struct ath_beacon_config conf;
+ enum ath9k_opmode av_opmode;
+
+ if (if_id != ATH_IF_ID_ANY)
+ av_opmode = sc->sc_vaps[if_id]->av_opmode;
+ else
+ av_opmode = sc->sc_opmode;
+
+ memzero(&conf, sizeof(struct ath_beacon_config));
+
+ /* FIXME: Use default values for now - Sujith */
+ /* Query beacon configuration first */
+ /*
+ * Protocol stack doesn't support dynamic beacon configuration,
+ * use default configurations.
+ */
+ conf.beacon_interval = ATH_DEFAULT_BINTVAL;
+ conf.listen_interval = 1;
+ conf.dtim_period = conf.beacon_interval;
+ conf.dtim_count = 1;
+ conf.bmiss_timeout = ATH_DEFAULT_BMISS_LIMIT * conf.beacon_interval;
+
+ /* extract tstamp from last beacon and convert to TU */
+ nexttbtt = TSF_TO_TU(get_unaligned_le32(conf.u.last_tstamp + 4),
+ get_unaligned_le32(conf.u.last_tstamp));
+ /* XXX conditionalize multi-bss support? */
+ if (sc->sc_opmode == ATH9K_M_HOSTAP) {
+ /*
+ * For multi-bss ap support beacons are either staggered
+ * evenly over N slots or burst together. For the former
+ * arrange for the SWBA to be delivered for each slot.
+ * Slots that are not occupied will generate nothing.
+ */
+ /* NB: the beacon interval is kept internally in TU's */
+ intval = conf.beacon_interval & ATH9K_BEACON_PERIOD;
+ intval /= ATH_BCBUF; /* for staggered beacons */
+ } else {
+ intval = conf.beacon_interval & ATH9K_BEACON_PERIOD;
+ }
+
+ if (nexttbtt == 0) /* e.g. for ap mode */
+ nexttbtt = intval;
+ else if (intval) /* NB: can be 0 for monitor mode */
+ nexttbtt = roundup(nexttbtt, intval);
+ DPRINTF(sc, ATH_DBG_BEACON, "%s: nexttbtt %u intval %u (%u)\n",
+ __func__, nexttbtt, intval, conf.beacon_interval);
+ /* Check for ATH9K_M_HOSTAP and sc_nostabeacons for WDS client */
+ if (sc->sc_opmode == ATH9K_M_STA) {
+ struct ath9k_beacon_state bs;
+ u64 tsf;
+ u32 tsftu;
+ int dtimperiod, dtimcount, sleepduration;
+ int cfpperiod, cfpcount;
+
+ /*
+ * Setup dtim and cfp parameters according to
+ * last beacon we received (which may be none).
+ */
+ dtimperiod = conf.dtim_period;
+ if (dtimperiod <= 0) /* NB: 0 if not known */
+ dtimperiod = 1;
+ dtimcount = conf.dtim_count;
+ if (dtimcount >= dtimperiod) /* NB: sanity check */
+ dtimcount = 0; /* XXX? */
+ cfpperiod = 1; /* NB: no PCF support yet */
+ cfpcount = 0;
+
+ sleepduration = conf.listen_interval * intval;
+ if (sleepduration <= 0)
+ sleepduration = intval;
+
+#define FUDGE 2
+ /*
+ * Pull nexttbtt forward to reflect the current
+ * TSF and calculate dtim+cfp state for the result.
+ */
+ tsf = ath9k_hw_gettsf64(ah);
+ tsftu = TSF_TO_TU(tsf>>32, tsf) + FUDGE;
+ do {
+ nexttbtt += intval;
+ if (--dtimcount < 0) {
+ dtimcount = dtimperiod - 1;
+ if (--cfpcount < 0)
+ cfpcount = cfpperiod - 1;
+ }
+ } while (nexttbtt < tsftu);
+#undef FUDGE
+ memzero(&bs, sizeof(bs));
+ bs.bs_intval = intval;
+ bs.bs_nexttbtt = nexttbtt;
+ bs.bs_dtimperiod = dtimperiod*intval;
+ bs.bs_nextdtim = bs.bs_nexttbtt + dtimcount*intval;
+ bs.bs_cfpperiod = cfpperiod*bs.bs_dtimperiod;
+ bs.bs_cfpnext = bs.bs_nextdtim + cfpcount*bs.bs_dtimperiod;
+ bs.bs_cfpmaxduration = 0;
+ /*
+ * Calculate the number of consecutive beacons to miss
+ * before taking a BMISS interrupt. The configuration
+ * is specified in TU so we only need calculate based
+ * on the beacon interval. Note that we clamp the
+ * result to at most 15 beacons.
+ */
+ if (sleepduration > intval) {
+ bs.bs_bmissthreshold =
+ conf.listen_interval *
+ ATH_DEFAULT_BMISS_LIMIT / 2;
+ } else {
+ bs.bs_bmissthreshold =
+ DIV_ROUND_UP(conf.bmiss_timeout, intval);
+ if (bs.bs_bmissthreshold > 15)
+ bs.bs_bmissthreshold = 15;
+ else if (bs.bs_bmissthreshold <= 0)
+ bs.bs_bmissthreshold = 1;
+ }
+
+ /*
+ * Calculate sleep duration. The configuration is
+ * given in ms. We insure a multiple of the beacon
+ * period is used. Also, if the sleep duration is
+ * greater than the DTIM period then it makes senses
+ * to make it a multiple of that.
+ *
+ * XXX fixed at 100ms
+ */
+
+ bs.bs_sleepduration =
+ roundup(IEEE80211_MS_TO_TU(100), sleepduration);
+ if (bs.bs_sleepduration > bs.bs_dtimperiod)
+ bs.bs_sleepduration = bs.bs_dtimperiod;
+
+ DPRINTF(sc, ATH_DBG_BEACON,
+ "%s: tsf %llu "
+ "tsf:tu %u "
+ "intval %u "
+ "nexttbtt %u "
+ "dtim %u "
+ "nextdtim %u "
+ "bmiss %u "
+ "sleep %u "
+ "cfp:period %u "
+ "maxdur %u "
+ "next %u "
+ "timoffset %u\n"
+ , __func__
+ , (unsigned long long)tsf, tsftu
+ , bs.bs_intval
+ , bs.bs_nexttbtt
+ , bs.bs_dtimperiod
+ , bs.bs_nextdtim
+ , bs.bs_bmissthreshold
+ , bs.bs_sleepduration
+ , bs.bs_cfpperiod
+ , bs.bs_cfpmaxduration
+ , bs.bs_cfpnext
+ , bs.bs_timoffset
+ );
+
+ ath9k_hw_set_interrupts(ah, 0);
+ ath9k_hw_set_sta_beacon_timers(ah, &bs);
+ sc->sc_imask |= ATH9K_INT_BMISS;
+ ath9k_hw_set_interrupts(ah, sc->sc_imask);
+ } else {
+ u64 tsf;
+ u32 tsftu;
+ ath9k_hw_set_interrupts(ah, 0);
+ if (nexttbtt == intval)
+ intval |= ATH9K_BEACON_RESET_TSF;
+ if (sc->sc_opmode == ATH9K_M_IBSS) {
+ /*
+ * Pull nexttbtt forward to reflect the current
+ * TSF .
+ */
+#define FUDGE 2
+ if (!(intval & ATH9K_BEACON_RESET_TSF)) {
+ tsf = ath9k_hw_gettsf64(ah);
+ tsftu = TSF_TO_TU((u32)(tsf>>32),
+ (u32)tsf) + FUDGE;
+ do {
+ nexttbtt += intval;
+ } while (nexttbtt < tsftu);
+ }
+#undef FUDGE
+ DPRINTF(sc, ATH_DBG_BEACON,
+ "%s: IBSS nexttbtt %u intval %u (%u)\n",
+ __func__, nexttbtt,
+ intval & ~ATH9K_BEACON_RESET_TSF,
+ conf.beacon_interval);
+
+ /*
+ * In IBSS mode enable the beacon timers but only
+ * enable SWBA interrupts if we need to manually
+ * prepare beacon frames. Otherwise we use a
+ * self-linked tx descriptor and let the hardware
+ * deal with things.
+ */
+ intval |= ATH9K_BEACON_ENA;
+ if (!(ah->ah_caps.hw_caps & ATH9K_HW_CAP_VEOL))
+ sc->sc_imask |= ATH9K_INT_SWBA;
+ ath_beaconq_config(sc);
+ } else if (sc->sc_opmode == ATH9K_M_HOSTAP) {
+ /*
+ * In AP mode we enable the beacon timers and
+ * SWBA interrupts to prepare beacon frames.
+ */
+ intval |= ATH9K_BEACON_ENA;
+ sc->sc_imask |= ATH9K_INT_SWBA; /* beacon prepare */
+ ath_beaconq_config(sc);
+ }
+ ath9k_hw_beaconinit(ah, nexttbtt, intval);
+ sc->sc_bmisscount = 0;
+ ath9k_hw_set_interrupts(ah, sc->sc_imask);
+ /*
+ * When using a self-linked beacon descriptor in
+ * ibss mode load it once here.
+ */
+ if (sc->sc_opmode == ATH9K_M_IBSS &&
+ (ah->ah_caps.hw_caps & ATH9K_HW_CAP_VEOL))
+ ath_beacon_start_adhoc(sc, 0);
+ }
+#undef TSF_TO_TU
+}
+
+/* Function to collect beacon rssi data and resync beacon if necessary */
+
+void ath_beacon_sync(struct ath_softc *sc, int if_id)
+{
+ /*
+ * Resync beacon timers using the tsf of the
+ * beacon frame we just received.
+ */
+ ath_beacon_config(sc, if_id);
+ sc->sc_beacons = 1;
+}
diff --git a/drivers/net/wireless/ath9k/core.c b/drivers/net/wireless/ath9k/core.c
new file mode 100644
index 00000000000..f6c45288d0e
--- /dev/null
+++ b/drivers/net/wireless/ath9k/core.c
@@ -0,0 +1,1923 @@
+/*
+ * Copyright (c) 2008, Atheros Communications Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+ /* Implementation of the main "ATH" layer. */
+
+#include "core.h"
+#include "regd.h"
+
+static int ath_outdoor; /* enable outdoor use */
+
+static const u8 ath_bcast_mac[ETH_ALEN] =
+ { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
+
+static u32 ath_chainmask_sel_up_rssi_thres =
+ ATH_CHAINMASK_SEL_UP_RSSI_THRES;
+static u32 ath_chainmask_sel_down_rssi_thres =
+ ATH_CHAINMASK_SEL_DOWN_RSSI_THRES;
+static u32 ath_chainmask_sel_period =
+ ATH_CHAINMASK_SEL_TIMEOUT;
+
+/* return bus cachesize in 4B word units */
+
+static void bus_read_cachesize(struct ath_softc *sc, int *csz)
+{
+ u8 u8tmp;
+
+ pci_read_config_byte(sc->pdev, PCI_CACHE_LINE_SIZE, (u8 *)&u8tmp);
+ *csz = (int)u8tmp;
+
+ /*
+ * This check was put in to avoid "unplesant" consequences if
+ * the bootrom has not fully initialized all PCI devices.
+ * Sometimes the cache line size register is not set
+ */
+
+ if (*csz == 0)
+ *csz = DEFAULT_CACHELINE >> 2; /* Use the default size */
+}
+
+/*
+ * Set current operating mode
+ *
+ * This function initializes and fills the rate table in the ATH object based
+ * on the operating mode. The blink rates are also set up here, although
+ * they have been superceeded by the ath_led module.
+*/
+
+static void ath_setcurmode(struct ath_softc *sc, enum wireless_mode mode)
+{
+ const struct ath9k_rate_table *rt;
+ int i;
+
+ memset(sc->sc_rixmap, 0xff, sizeof(sc->sc_rixmap));
+ rt = ath9k_hw_getratetable(sc->sc_ah, mode);
+ BUG_ON(!rt);
+
+ for (i = 0; i < rt->rateCount; i++)
+ sc->sc_rixmap[rt->info[i].rateCode] = (u8) i;
+
+ memzero(sc->sc_hwmap, sizeof(sc->sc_hwmap));
+ for (i = 0; i < 256; i++) {
+ u8 ix = rt->rateCodeToIndex[i];
+
+ if (ix == 0xff)
+ continue;
+
+ sc->sc_hwmap[i].ieeerate =
+ rt->info[ix].dot11Rate & IEEE80211_RATE_VAL;
+ sc->sc_hwmap[i].rateKbps = rt->info[ix].rateKbps;
+
+ if (rt->info[ix].shortPreamble ||
+ rt->info[ix].phy == PHY_OFDM) {
+ /* XXX: Handle this */
+ }
+
+ /* NB: this uses the last entry if the rate isn't found */
+ /* XXX beware of overlow */
+ }
+ sc->sc_currates = rt;
+ sc->sc_curmode = mode;
+ /*
+ * All protection frames are transmited at 2Mb/s for
+ * 11g, otherwise at 1Mb/s.
+ * XXX select protection rate index from rate table.
+ */
+ sc->sc_protrix = (mode == ATH9K_MODE_11G ? 1 : 0);
+}
+
+/*
+ * Set up rate table (legacy rates)
+ */
+static void ath_setup_rates(struct ath_softc *sc, enum ieee80211_band band)
+{
+ struct ath_hal *ah = sc->sc_ah;
+ const struct ath9k_rate_table *rt = NULL;
+ struct ieee80211_supported_band *sband;
+ struct ieee80211_rate *rate;
+ int i, maxrates;
+
+ switch (band) {
+ case IEEE80211_BAND_2GHZ:
+ rt = ath9k_hw_getratetable(ah, ATH9K_MODE_11G);
+ break;
+ case IEEE80211_BAND_5GHZ:
+ rt = ath9k_hw_getratetable(ah, ATH9K_MODE_11A);
+ break;
+ default:
+ break;
+ }
+
+ if (rt == NULL)
+ return;
+
+ sband = &sc->sbands[band];
+ rate = sc->rates[band];
+
+ if (rt->rateCount > ATH_RATE_MAX)
+ maxrates = ATH_RATE_MAX;
+ else
+ maxrates = rt->rateCount;
+
+ for (i = 0; i < maxrates; i++) {
+ rate[i].bitrate = rt->info[i].rateKbps / 100;
+ rate[i].hw_value = rt->info[i].rateCode;
+ sband->n_bitrates++;
+ DPRINTF(sc, ATH_DBG_CONFIG,
+ "%s: Rate: %2dMbps, ratecode: %2d\n",
+ __func__,
+ rate[i].bitrate / 10,
+ rate[i].hw_value);
+ }
+}
+
+/*
+ * Set up channel list
+ */
+static int ath_setup_channels(struct ath_softc *sc)
+{
+ struct ath_hal *ah = sc->sc_ah;
+ int nchan, i, a = 0, b = 0;
+ u8 regclassids[ATH_REGCLASSIDS_MAX];
+ u32 nregclass = 0;
+ struct ieee80211_supported_band *band_2ghz;
+ struct ieee80211_supported_band *band_5ghz;
+ struct ieee80211_channel *chan_2ghz;
+ struct ieee80211_channel *chan_5ghz;
+ struct ath9k_channel *c;
+
+ /* Fill in ah->ah_channels */
+ if (!ath9k_regd_init_channels(ah,
+ ATH_CHAN_MAX,
+ (u32 *)&nchan,
+ regclassids,
+ ATH_REGCLASSIDS_MAX,
+ &nregclass,
+ CTRY_DEFAULT,
+ false,
+ 1)) {
+ u32 rd = ah->ah_currentRD;
+
+ DPRINTF(sc, ATH_DBG_FATAL,
+ "%s: unable to collect channel list; "
+ "regdomain likely %u country code %u\n",
+ __func__, rd, CTRY_DEFAULT);
+ return -EINVAL;
+ }
+
+ band_2ghz = &sc->sbands[IEEE80211_BAND_2GHZ];
+ band_5ghz = &sc->sbands[IEEE80211_BAND_5GHZ];
+ chan_2ghz = sc->channels[IEEE80211_BAND_2GHZ];
+ chan_5ghz = sc->channels[IEEE80211_BAND_5GHZ];
+
+ for (i = 0; i < nchan; i++) {
+ c = &ah->ah_channels[i];
+ if (IS_CHAN_2GHZ(c)) {
+ chan_2ghz[a].band = IEEE80211_BAND_2GHZ;
+ chan_2ghz[a].center_freq = c->channel;
+ chan_2ghz[a].max_power = c->maxTxPower;
+
+ if (c->privFlags & CHANNEL_DISALLOW_ADHOC)
+ chan_2ghz[a].flags |=
+ IEEE80211_CHAN_NO_IBSS;
+ if (c->channelFlags & CHANNEL_PASSIVE)
+ chan_2ghz[a].flags |=
+ IEEE80211_CHAN_PASSIVE_SCAN;
+
+ band_2ghz->n_channels = ++a;
+
+ DPRINTF(sc, ATH_DBG_CONFIG,
+ "%s: 2MHz channel: %d, "
+ "channelFlags: 0x%x\n",
+ __func__,
+ c->channel,
+ c->channelFlags);
+ } else if (IS_CHAN_5GHZ(c)) {
+ chan_5ghz[b].band = IEEE80211_BAND_5GHZ;
+ chan_5ghz[b].center_freq = c->channel;
+ chan_5ghz[b].max_power = c->maxTxPower;
+
+ if (c->privFlags & CHANNEL_DISALLOW_ADHOC)
+ chan_5ghz[b].flags |=
+ IEEE80211_CHAN_NO_IBSS;
+ if (c->channelFlags & CHANNEL_PASSIVE)
+ chan_5ghz[b].flags |=
+ IEEE80211_CHAN_PASSIVE_SCAN;
+
+ band_5ghz->n_channels = ++b;
+
+ DPRINTF(sc, ATH_DBG_CONFIG,
+ "%s: 5MHz channel: %d, "
+ "channelFlags: 0x%x\n",
+ __func__,
+ c->channel,
+ c->channelFlags);
+ }
+ }
+
+ return 0;
+}
+
+/*
+ * Determine mode from channel flags
+ *
+ * This routine will provide the enumerated WIRELESSS_MODE value based
+ * on the settings of the channel flags. If ho valid set of flags
+ * exist, the lowest mode (11b) is selected.
+*/
+
+static enum wireless_mode ath_chan2mode(struct ath9k_channel *chan)
+{
+ if (chan->chanmode == CHANNEL_A)
+ return ATH9K_MODE_11A;
+ else if (chan->chanmode == CHANNEL_G)
+ return ATH9K_MODE_11G;
+ else if (chan->chanmode == CHANNEL_B)
+ return ATH9K_MODE_11B;
+ else if (chan->chanmode == CHANNEL_A_HT20)
+ return ATH9K_MODE_11NA_HT20;
+ else if (chan->chanmode == CHANNEL_G_HT20)
+ return ATH9K_MODE_11NG_HT20;
+ else if (chan->chanmode == CHANNEL_A_HT40PLUS)
+ return ATH9K_MODE_11NA_HT40PLUS;
+ else if (chan->chanmode == CHANNEL_A_HT40MINUS)
+ return ATH9K_MODE_11NA_HT40MINUS;
+ else if (chan->chanmode == CHANNEL_G_HT40PLUS)
+ return ATH9K_MODE_11NG_HT40PLUS;
+ else if (chan->chanmode == CHANNEL_G_HT40MINUS)
+ return ATH9K_MODE_11NG_HT40MINUS;
+
+ /* NB: should not get here */
+ return ATH9K_MODE_11B;
+}
+
+/*
+ * Stop the device, grabbing the top-level lock to protect
+ * against concurrent entry through ath_init (which can happen
+ * if another thread does a system call and the thread doing the
+ * stop is preempted).
+ */
+
+static int ath_stop(struct ath_softc *sc)
+{
+ struct ath_hal *ah = sc->sc_ah;
+
+ DPRINTF(sc, ATH_DBG_CONFIG, "%s: invalid %u\n",
+ __func__, sc->sc_invalid);
+
+ /*
+ * Shutdown the hardware and driver:
+ * stop output from above
+ * reset 802.11 state machine
+ * (sends station deassoc/deauth frames)
+ * turn off timers
+ * disable interrupts
+ * clear transmit machinery
+ * clear receive machinery
+ * turn off the radio
+ * reclaim beacon resources
+ *
+ * Note that some of this work is not possible if the
+ * hardware is gone (invalid).
+ */
+
+ if (!sc->sc_invalid)
+ ath9k_hw_set_interrupts(ah, 0);
+ ath_draintxq(sc, false);
+ if (!sc->sc_invalid) {
+ ath_stoprecv(sc);
+ ath9k_hw_phy_disable(ah);
+ } else
+ sc->sc_rxlink = NULL;
+
+ return 0;
+}
+
+/*
+ * Start Scan
+ *
+ * This function is called when starting a channel scan. It will perform
+ * power save wakeup processing, set the filter for the scan, and get the
+ * chip ready to send broadcast packets out during the scan.
+*/
+
+void ath_scan_start(struct ath_softc *sc)
+{
+ struct ath_hal *ah = sc->sc_ah;
+ u32 rfilt;
+ u32 now = (u32) jiffies_to_msecs(get_timestamp());
+
+ sc->sc_scanning = 1;
+ rfilt = ath_calcrxfilter(sc);
+ ath9k_hw_setrxfilter(ah, rfilt);
+ ath9k_hw_write_associd(ah, ath_bcast_mac, 0);
+
+ /* Restore previous power management state. */
+
+ DPRINTF(sc, ATH_DBG_CONFIG, "%d.%03d | %s: RX filter 0x%x aid 0\n",
+ now / 1000, now % 1000, __func__, rfilt);
+}
+
+/*
+ * Scan End
+ *
+ * This routine is called by the upper layer when the scan is completed. This
+ * will set the filters back to normal operating mode, set the BSSID to the
+ * correct value, and restore the power save state.
+*/
+
+void ath_scan_end(struct ath_softc *sc)
+{
+ struct ath_hal *ah = sc->sc_ah;
+ u32 rfilt;
+ u32 now = (u32) jiffies_to_msecs(get_timestamp());
+
+ sc->sc_scanning = 0;
+ /* Request for a full reset due to rx packet filter changes */
+ sc->sc_full_reset = 1;
+ rfilt = ath_calcrxfilter(sc);
+ ath9k_hw_setrxfilter(ah, rfilt);
+ ath9k_hw_write_associd(ah, sc->sc_curbssid, sc->sc_curaid);
+
+ DPRINTF(sc, ATH_DBG_CONFIG, "%d.%03d | %s: RX filter 0x%x aid 0x%x\n",
+ now / 1000, now % 1000, __func__, rfilt, sc->sc_curaid);
+}
+
+/*
+ * Set the current channel
+ *
+ * Set/change channels. If the channel is really being changed, it's done
+ * by reseting the chip. To accomplish this we must first cleanup any pending
+ * DMA, then restart stuff after a la ath_init.
+*/
+int ath_set_channel(struct ath_softc *sc, struct ath9k_channel *hchan)
+{
+ struct ath_hal *ah = sc->sc_ah;
+ bool fastcc = true, stopped;
+ enum ath9k_ht_macmode ht_macmode;
+
+ if (sc->sc_invalid) /* if the device is invalid or removed */
+ return -EIO;
+
+ DPRINTF(sc, ATH_DBG_CONFIG,
+ "%s: %u (%u MHz) -> %u (%u MHz), cflags:%x\n",
+ __func__,
+ ath9k_hw_mhz2ieee(ah, sc->sc_curchan.channel,
+ sc->sc_curchan.channelFlags),
+ sc->sc_curchan.channel,
+ ath9k_hw_mhz2ieee(ah, hchan->channel, hchan->channelFlags),
+ hchan->channel, hchan->channelFlags);
+
+ ht_macmode = ath_cwm_macmode(sc);
+
+ if (hchan->channel != sc->sc_curchan.channel ||
+ hchan->channelFlags != sc->sc_curchan.channelFlags ||
+ sc->sc_update_chainmask || sc->sc_full_reset) {
+ int status;
+ /*
+ * This is only performed if the channel settings have
+ * actually changed.
+ *
+ * To switch channels clear any pending DMA operations;
+ * wait long enough for the RX fifo to drain, reset the
+ * hardware at the new frequency, and then re-enable
+ * the relevant bits of the h/w.
+ */
+ ath9k_hw_set_interrupts(ah, 0); /* disable interrupts */
+ ath_draintxq(sc, false); /* clear pending tx frames */
+ stopped = ath_stoprecv(sc); /* turn off frame recv */
+
+ /* XXX: do not flush receive queue here. We don't want
+ * to flush data frames already in queue because of
+ * changing channel. */
+
+ if (!stopped || sc->sc_full_reset)
+ fastcc = false;
+
+ spin_lock_bh(&sc->sc_resetlock);
+ if (!ath9k_hw_reset(ah, sc->sc_opmode, hchan,
+ ht_macmode, sc->sc_tx_chainmask,
+ sc->sc_rx_chainmask,
+ sc->sc_ht_extprotspacing,
+ fastcc, &status)) {
+ DPRINTF(sc, ATH_DBG_FATAL,
+ "%s: unable to reset channel %u (%uMhz) "
+ "flags 0x%x hal status %u\n", __func__,
+ ath9k_hw_mhz2ieee(ah, hchan->channel,
+ hchan->channelFlags),
+ hchan->channel, hchan->channelFlags, status);
+ spin_unlock_bh(&sc->sc_resetlock);
+ return -EIO;
+ }
+ spin_unlock_bh(&sc->sc_resetlock);
+
+ sc->sc_curchan = *hchan;
+ sc->sc_update_chainmask = 0;
+ sc->sc_full_reset = 0;
+
+ /* Re-enable rx framework */
+ if (ath_startrecv(sc) != 0) {
+ DPRINTF(sc, ATH_DBG_FATAL,
+ "%s: unable to restart recv logic\n", __func__);
+ return -EIO;
+ }
+ /*
+ * Change channels and update the h/w rate map
+ * if we're switching; e.g. 11a to 11b/g.
+ */
+ ath_setcurmode(sc, ath_chan2mode(hchan));
+
+ ath_update_txpow(sc); /* update tx power state */
+ /*
+ * Re-enable interrupts.
+ */
+ ath9k_hw_set_interrupts(ah, sc->sc_imask);
+ }
+ return 0;
+}
+
+/**********************/
+/* Chainmask Handling */
+/**********************/
+
+static void ath_chainmask_sel_timertimeout(unsigned long data)
+{
+ struct ath_chainmask_sel *cm = (struct ath_chainmask_sel *)data;
+ cm->switch_allowed = 1;
+}
+
+/* Start chainmask select timer */
+static void ath_chainmask_sel_timerstart(struct ath_chainmask_sel *cm)
+{
+ cm->switch_allowed = 0;
+ mod_timer(&cm->timer, ath_chainmask_sel_period);
+}
+
+/* Stop chainmask select timer */
+static void ath_chainmask_sel_timerstop(struct ath_chainmask_sel *cm)
+{
+ cm->switch_allowed = 0;
+ del_timer_sync(&cm->timer);
+}
+
+static void ath_chainmask_sel_init(struct ath_softc *sc, struct ath_node *an)
+{
+ struct ath_chainmask_sel *cm = &an->an_chainmask_sel;
+
+ memzero(cm, sizeof(struct ath_chainmask_sel));
+
+ cm->cur_tx_mask = sc->sc_tx_chainmask;
+ cm->cur_rx_mask = sc->sc_rx_chainmask;
+ cm->tx_avgrssi = ATH_RSSI_DUMMY_MARKER;
+ setup_timer(&cm->timer,
+ ath_chainmask_sel_timertimeout, (unsigned long) cm);
+}
+
+int ath_chainmask_sel_logic(struct ath_softc *sc, struct ath_node *an)
+{
+ struct ath_chainmask_sel *cm = &an->an_chainmask_sel;
+
+ /*
+ * Disable auto-swtiching in one of the following if conditions.
+ * sc_chainmask_auto_sel is used for internal global auto-switching
+ * enabled/disabled setting
+ */
+ if (sc->sc_ah->ah_caps.tx_chainmask != ATH_CHAINMASK_SEL_3X3) {
+ cm->cur_tx_mask = sc->sc_tx_chainmask;
+ return cm->cur_tx_mask;
+ }
+
+ if (cm->tx_avgrssi == ATH_RSSI_DUMMY_MARKER)
+ return cm->cur_tx_mask;
+
+ if (cm->switch_allowed) {
+ /* Switch down from tx 3 to tx 2. */
+ if (cm->cur_tx_mask == ATH_CHAINMASK_SEL_3X3 &&
+ ATH_RSSI_OUT(cm->tx_avgrssi) >=
+ ath_chainmask_sel_down_rssi_thres) {
+ cm->cur_tx_mask = sc->sc_tx_chainmask;
+
+ /* Don't let another switch happen until
+ * this timer expires */
+ ath_chainmask_sel_timerstart(cm);
+ }
+ /* Switch up from tx 2 to 3. */
+ else if (cm->cur_tx_mask == sc->sc_tx_chainmask &&
+ ATH_RSSI_OUT(cm->tx_avgrssi) <=
+ ath_chainmask_sel_up_rssi_thres) {
+ cm->cur_tx_mask = ATH_CHAINMASK_SEL_3X3;
+
+ /* Don't let another switch happen
+ * until this timer expires */
+ ath_chainmask_sel_timerstart(cm);
+ }
+ }
+
+ return cm->cur_tx_mask;
+}
+
+/*
+ * Update tx/rx chainmask. For legacy association,
+ * hard code chainmask to 1x1, for 11n association, use
+ * the chainmask configuration.
+ */
+
+void ath_update_chainmask(struct ath_softc *sc, int is_ht)
+{
+ sc->sc_update_chainmask = 1;
+ if (is_ht) {
+ sc->sc_tx_chainmask = sc->sc_ah->ah_caps.tx_chainmask;
+ sc->sc_rx_chainmask = sc->sc_ah->ah_caps.rx_chainmask;
+ } else {
+ sc->sc_tx_chainmask = 1;
+ sc->sc_rx_chainmask = 1;
+ }
+
+ DPRINTF(sc, ATH_DBG_CONFIG, "%s: tx chmask: %d, rx chmask: %d\n",
+ __func__, sc->sc_tx_chainmask, sc->sc_rx_chainmask);
+}
+
+/******************/
+/* VAP management */
+/******************/
+
+/*
+ * VAP in Listen mode
+ *
+ * This routine brings the VAP out of the down state into a "listen" state
+ * where it waits for association requests. This is used in AP and AdHoc
+ * modes.
+*/
+
+int ath_vap_listen(struct ath_softc *sc, int if_id)
+{
+ struct ath_hal *ah = sc->sc_ah;
+ struct ath_vap *avp;
+ u32 rfilt = 0;
+ DECLARE_MAC_BUF(mac);
+
+ avp = sc->sc_vaps[if_id];
+ if (avp == NULL) {
+ DPRINTF(sc, ATH_DBG_FATAL, "%s: invalid interface id %u\n",
+ __func__, if_id);
+ return -EINVAL;
+ }
+
+#ifdef CONFIG_SLOW_ANT_DIV
+ ath_slow_ant_div_stop(&sc->sc_antdiv);
+#endif
+
+ /* update ratectrl about the new state */
+ ath_rate_newstate(sc, avp);
+
+ rfilt = ath_calcrxfilter(sc);
+ ath9k_hw_setrxfilter(ah, rfilt);
+
+ if (sc->sc_opmode == ATH9K_M_STA || sc->sc_opmode == ATH9K_M_IBSS) {
+ memcpy(sc->sc_curbssid, ath_bcast_mac, ETH_ALEN);
+ ath9k_hw_write_associd(ah, sc->sc_curbssid, sc->sc_curaid);
+ } else
+ sc->sc_curaid = 0;
+
+ DPRINTF(sc, ATH_DBG_CONFIG,
+ "%s: RX filter 0x%x bssid %s aid 0x%x\n",
+ __func__, rfilt, print_mac(mac,
+ sc->sc_curbssid), sc->sc_curaid);
+
+ /*
+ * XXXX
+ * Disable BMISS interrupt when we're not associated
+ */
+ ath9k_hw_set_interrupts(ah,
+ sc->sc_imask & ~(ATH9K_INT_SWBA | ATH9K_INT_BMISS));
+ sc->sc_imask &= ~(ATH9K_INT_SWBA | ATH9K_INT_BMISS);
+ /* need to reconfigure the beacons when it moves to RUN */
+ sc->sc_beacons = 0;
+
+ return 0;
+}
+
+int ath_vap_attach(struct ath_softc *sc,
+ int if_id,
+ struct ieee80211_vif *if_data,
+ enum ath9k_opmode opmode)
+{
+ struct ath_vap *avp;
+
+ if (if_id >= ATH_BCBUF || sc->sc_vaps[if_id] != NULL) {
+ DPRINTF(sc, ATH_DBG_FATAL,
+ "%s: Invalid interface id = %u\n", __func__, if_id);
+ return -EINVAL;
+ }
+
+ switch (opmode) {
+ case ATH9K_M_STA:
+ case ATH9K_M_IBSS:
+ case ATH9K_M_MONITOR:
+ break;
+ case ATH9K_M_HOSTAP:
+ /* XXX not right, beacon buffer is allocated on RUN trans */
+ if (list_empty(&sc->sc_bbuf))
+ return -ENOMEM;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ /* create ath_vap */
+ avp = kmalloc(sizeof(struct ath_vap), GFP_KERNEL);
+ if (avp == NULL)
+ return -ENOMEM;
+
+ memzero(avp, sizeof(struct ath_vap));
+ avp->av_if_data = if_data;
+ /* Set the VAP opmode */
+ avp->av_opmode = opmode;
+ avp->av_bslot = -1;
+ INIT_LIST_HEAD(&avp->av_mcastq.axq_q);
+ INIT_LIST_HEAD(&avp->av_mcastq.axq_acq);
+ spin_lock_init(&avp->av_mcastq.axq_lock);
+
+ ath9k_hw_set_tsfadjust(sc->sc_ah, 1);
+
+ sc->sc_vaps[if_id] = avp;
+ sc->sc_nvaps++;
+ /* Set the device opmode */
+ sc->sc_opmode = opmode;
+
+ /* default VAP configuration */
+ avp->av_config.av_fixed_rateset = IEEE80211_FIXED_RATE_NONE;
+ avp->av_config.av_fixed_retryset = 0x03030303;
+
+ return 0;
+}
+
+int ath_vap_detach(struct ath_softc *sc, int if_id)
+{
+ struct ath_hal *ah = sc->sc_ah;
+ struct ath_vap *avp;
+
+ avp = sc->sc_vaps[if_id];
+ if (avp == NULL) {
+ DPRINTF(sc, ATH_DBG_FATAL, "%s: invalid interface id %u\n",
+ __func__, if_id);
+ return -EINVAL;
+ }
+
+ /*
+ * Quiesce the hardware while we remove the vap. In
+ * particular we need to reclaim all references to the
+ * vap state by any frames pending on the tx queues.
+ *
+ * XXX can we do this w/o affecting other vap's?
+ */
+ ath9k_hw_set_interrupts(ah, 0); /* disable interrupts */
+ ath_draintxq(sc, false); /* stop xmit side */
+ ath_stoprecv(sc); /* stop recv side */
+ ath_flushrecv(sc); /* flush recv queue */
+
+ /* Reclaim any pending mcast bufs on the vap. */
+ ath_tx_draintxq(sc, &avp->av_mcastq, false);
+
+ kfree(avp);
+ sc->sc_vaps[if_id] = NULL;
+ sc->sc_nvaps--;
+
+ return 0;
+}
+
+int ath_vap_config(struct ath_softc *sc,
+ int if_id, struct ath_vap_config *if_config)
+{
+ struct ath_vap *avp;
+
+ if (if_id >= ATH_BCBUF) {
+ DPRINTF(sc, ATH_DBG_FATAL,
+ "%s: Invalid interface id = %u\n", __func__, if_id);
+ return -EINVAL;
+ }
+
+ avp = sc->sc_vaps[if_id];
+ ASSERT(avp != NULL);
+
+ if (avp)
+ memcpy(&avp->av_config, if_config, sizeof(avp->av_config));
+
+ return 0;
+}
+
+/********/
+/* Core */
+/********/
+
+int ath_open(struct ath_softc *sc, struct ath9k_channel *initial_chan)
+{
+ struct ath_hal *ah = sc->sc_ah;
+ int status;
+ int error = 0;
+ enum ath9k_ht_macmode ht_macmode = ath_cwm_macmode(sc);
+
+ DPRINTF(sc, ATH_DBG_CONFIG, "%s: mode %d\n", __func__, sc->sc_opmode);
+
+ /*
+ * Stop anything previously setup. This is safe
+ * whether this is the first time through or not.
+ */
+ ath_stop(sc);
+
+ /* Initialize chanmask selection */
+ sc->sc_tx_chainmask = ah->ah_caps.tx_chainmask;
+ sc->sc_rx_chainmask = ah->ah_caps.rx_chainmask;
+
+ /* Reset SERDES registers */
+ ath9k_hw_configpcipowersave(ah, 0);
+
+ /*
+ * The basic interface to setting the hardware in a good
+ * state is ``reset''. On return the hardware is known to
+ * be powered up and with interrupts disabled. This must
+ * be followed by initialization of the appropriate bits
+ * and then setup of the interrupt mask.
+ */
+ sc->sc_curchan = *initial_chan;
+
+ spin_lock_bh(&sc->sc_resetlock);
+ if (!ath9k_hw_reset(ah, sc->sc_opmode, &sc->sc_curchan, ht_macmode,
+ sc->sc_tx_chainmask, sc->sc_rx_chainmask,
+ sc->sc_ht_extprotspacing, false, &status)) {
+ DPRINTF(sc, ATH_DBG_FATAL,
+ "%s: unable to reset hardware; hal status %u "
+ "(freq %u flags 0x%x)\n", __func__, status,
+ sc->sc_curchan.channel, sc->sc_curchan.channelFlags);
+ error = -EIO;
+ spin_unlock_bh(&sc->sc_resetlock);
+ goto done;
+ }
+ spin_unlock_bh(&sc->sc_resetlock);
+ /*
+ * This is needed only to setup initial state
+ * but it's best done after a reset.
+ */
+ ath_update_txpow(sc);
+
+ /*
+ * Setup the hardware after reset:
+ * The receive engine is set going.
+ * Frame transmit is handled entirely
+ * in the frame output path; there's nothing to do
+ * here except setup the interrupt mask.
+ */
+ if (ath_startrecv(sc) != 0) {
+ DPRINTF(sc, ATH_DBG_FATAL,
+ "%s: unable to start recv logic\n", __func__);
+ error = -EIO;
+ goto done;
+ }
+ /* Setup our intr mask. */
+ sc->sc_imask = ATH9K_INT_RX | ATH9K_INT_TX
+ | ATH9K_INT_RXEOL | ATH9K_INT_RXORN
+ | ATH9K_INT_FATAL | ATH9K_INT_GLOBAL;
+
+ if (ah->ah_caps.hw_caps & ATH9K_HW_CAP_GTT)
+ sc->sc_imask |= ATH9K_INT_GTT;
+
+ if (ah->ah_caps.hw_caps & ATH9K_HW_CAP_HT)
+ sc->sc_imask |= ATH9K_INT_CST;
+
+ /*
+ * Enable MIB interrupts when there are hardware phy counters.
+ * Note we only do this (at the moment) for station mode.
+ */
+ if (ath9k_hw_phycounters(ah) &&
+ ((sc->sc_opmode == ATH9K_M_STA) || (sc->sc_opmode == ATH9K_M_IBSS)))
+ sc->sc_imask |= ATH9K_INT_MIB;
+ /*
+ * Some hardware processes the TIM IE and fires an
+ * interrupt when the TIM bit is set. For hardware
+ * that does, if not overridden by configuration,
+ * enable the TIM interrupt when operating as station.
+ */
+ if ((ah->ah_caps.hw_caps & ATH9K_HW_CAP_ENHANCEDPM) &&
+ (sc->sc_opmode == ATH9K_M_STA) &&
+ !sc->sc_config.swBeaconProcess)
+ sc->sc_imask |= ATH9K_INT_TIM;
+ /*
+ * Don't enable interrupts here as we've not yet built our
+ * vap and node data structures, which will be needed as soon
+ * as we start receiving.
+ */
+ ath_setcurmode(sc, ath_chan2mode(initial_chan));
+
+ /* XXX: we must make sure h/w is ready and clear invalid flag
+ * before turning on interrupt. */
+ sc->sc_invalid = 0;
+done:
+ return error;
+}
+
+/*
+ * Reset the hardware w/o losing operational state. This is
+ * basically a more efficient way of doing ath_stop, ath_init,
+ * followed by state transitions to the current 802.11
+ * operational state. Used to recover from errors rx overrun
+ * and to reset the hardware when rf gain settings must be reset.
+ */
+
+static int ath_reset_start(struct ath_softc *sc, u32 flag)
+{
+ struct ath_hal *ah = sc->sc_ah;
+
+ ath9k_hw_set_interrupts(ah, 0); /* disable interrupts */
+ ath_draintxq(sc, flag & RESET_RETRY_TXQ); /* stop xmit side */
+ ath_stoprecv(sc); /* stop recv side */
+ ath_flushrecv(sc); /* flush recv queue */
+
+ return 0;
+}
+
+static int ath_reset_end(struct ath_softc *sc, u32 flag)
+{
+ struct ath_hal *ah = sc->sc_ah;
+
+ if (ath_startrecv(sc) != 0) /* restart recv */
+ DPRINTF(sc, ATH_DBG_FATAL,
+ "%s: unable to start recv logic\n", __func__);
+
+ /*
+ * We may be doing a reset in response to a request
+ * that changes the channel so update any state that
+ * might change as a result.
+ */
+ ath_setcurmode(sc, ath_chan2mode(&sc->sc_curchan));
+
+ ath_update_txpow(sc); /* update tx power state */
+
+ if (sc->sc_beacons)
+ ath_beacon_config(sc, ATH_IF_ID_ANY); /* restart beacons */
+ ath9k_hw_set_interrupts(ah, sc->sc_imask);
+
+ /* Restart the txq */
+ if (flag & RESET_RETRY_TXQ) {
+ int i;
+ for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
+ if (ATH_TXQ_SETUP(sc, i)) {
+ spin_lock_bh(&sc->sc_txq[i].axq_lock);
+ ath_txq_schedule(sc, &sc->sc_txq[i]);
+ spin_unlock_bh(&sc->sc_txq[i].axq_lock);
+ }
+ }
+ }
+ return 0;
+}
+
+int ath_reset(struct ath_softc *sc)
+{
+ struct ath_hal *ah = sc->sc_ah;
+ int status;
+ int error = 0;
+ enum ath9k_ht_macmode ht_macmode = ath_cwm_macmode(sc);
+
+ /* NB: indicate channel change so we do a full reset */
+ spin_lock_bh(&sc->sc_resetlock);
+ if (!ath9k_hw_reset(ah, sc->sc_opmode, &sc->sc_curchan,
+ ht_macmode,
+ sc->sc_tx_chainmask, sc->sc_rx_chainmask,
+ sc->sc_ht_extprotspacing, false, &status)) {
+ DPRINTF(sc, ATH_DBG_FATAL,
+ "%s: unable to reset hardware; hal status %u\n",
+ __func__, status);
+ error = -EIO;
+ }
+ spin_unlock_bh(&sc->sc_resetlock);
+
+ return error;
+}
+
+int ath_suspend(struct ath_softc *sc)
+{
+ struct ath_hal *ah = sc->sc_ah;
+
+ /* No I/O if device has been surprise removed */
+ if (sc->sc_invalid)
+ return -EIO;
+
+ /* Shut off the interrupt before setting sc->sc_invalid to '1' */
+ ath9k_hw_set_interrupts(ah, 0);
+
+ /* XXX: we must make sure h/w will not generate any interrupt
+ * before setting the invalid flag. */
+ sc->sc_invalid = 1;
+
+ /* disable HAL and put h/w to sleep */
+ ath9k_hw_disable(sc->sc_ah);
+
+ ath9k_hw_configpcipowersave(sc->sc_ah, 1);
+
+ return 0;
+}
+
+/* Interrupt handler. Most of the actual processing is deferred.
+ * It's the caller's responsibility to ensure the chip is awake. */
+
+irqreturn_t ath_isr(int irq, void *dev)
+{
+ struct ath_softc *sc = dev;
+ struct ath_hal *ah = sc->sc_ah;
+ enum ath9k_int status;
+ bool sched = false;
+
+ do {
+ if (sc->sc_invalid) {
+ /*
+ * The hardware is not ready/present, don't
+ * touch anything. Note this can happen early
+ * on if the IRQ is shared.
+ */
+ return IRQ_NONE;
+ }
+ if (!ath9k_hw_intrpend(ah)) { /* shared irq, not for us */
+ return IRQ_NONE;
+ }
+
+ /*
+ * Figure out the reason(s) for the interrupt. Note
+ * that the hal returns a pseudo-ISR that may include
+ * bits we haven't explicitly enabled so we mask the
+ * value to insure we only process bits we requested.
+ */
+ ath9k_hw_getisr(ah, &status); /* NB: clears ISR too */
+
+ status &= sc->sc_imask; /* discard unasked-for bits */
+
+ /*
+ * If there are no status bits set, then this interrupt was not
+ * for me (should have been caught above).
+ */
+
+ if (!status)
+ return IRQ_NONE;
+
+ sc->sc_intrstatus = status;
+
+ if (status & ATH9K_INT_FATAL) {
+ /* need a chip reset */
+ sched = true;
+ } else if (status & ATH9K_INT_RXORN) {
+ /* need a chip reset */
+ sched = true;
+ } else {
+ if (status & ATH9K_INT_SWBA) {
+ /* schedule a tasklet for beacon handling */
+ tasklet_schedule(&sc->bcon_tasklet);
+ }
+ if (status & ATH9K_INT_RXEOL) {
+ /*
+ * NB: the hardware should re-read the link when
+ * RXE bit is written, but it doesn't work
+ * at least on older hardware revs.
+ */
+ sched = true;
+ }
+
+ if (status & ATH9K_INT_TXURN)
+ /* bump tx trigger level */
+ ath9k_hw_updatetxtriglevel(ah, true);
+ /* XXX: optimize this */
+ if (status & ATH9K_INT_RX)
+ sched = true;
+ if (status & ATH9K_INT_TX)
+ sched = true;
+ if (status & ATH9K_INT_BMISS)
+ sched = true;
+ /* carrier sense timeout */
+ if (status & ATH9K_INT_CST)
+ sched = true;
+ if (status & ATH9K_INT_MIB) {
+ /*
+ * Disable interrupts until we service the MIB
+ * interrupt; otherwise it will continue to
+ * fire.
+ */
+ ath9k_hw_set_interrupts(ah, 0);
+ /*
+ * Let the hal handle the event. We assume
+ * it will clear whatever condition caused
+ * the interrupt.
+ */
+ ath9k_hw_procmibevent(ah, &sc->sc_halstats);
+ ath9k_hw_set_interrupts(ah, sc->sc_imask);
+ }
+ if (status & ATH9K_INT_TIM_TIMER) {
+ if (!(ah->ah_caps.hw_caps &
+ ATH9K_HW_CAP_AUTOSLEEP)) {
+ /* Clear RxAbort bit so that we can
+ * receive frames */
+ ath9k_hw_setrxabort(ah, 0);
+ sched = true;
+ }
+ }
+ }
+ } while (0);
+
+ if (sched) {
+ /* turn off every interrupt except SWBA */
+ ath9k_hw_set_interrupts(ah, (sc->sc_imask & ATH9K_INT_SWBA));
+ tasklet_schedule(&sc->intr_tq);
+ }
+
+ return IRQ_HANDLED;
+}
+
+/* Deferred interrupt processing */
+
+static void ath9k_tasklet(unsigned long data)
+{
+ struct ath_softc *sc = (struct ath_softc *)data;
+ u32 status = sc->sc_intrstatus;
+
+ if (status & ATH9K_INT_FATAL) {
+ /* need a chip reset */
+ ath_internal_reset(sc);
+ return;
+ } else {
+
+ if (status &
+ (ATH9K_INT_RX | ATH9K_INT_RXEOL | ATH9K_INT_RXORN)) {
+ /* XXX: fill me in */
+ /*
+ if (status & ATH9K_INT_RXORN) {
+ }
+ if (status & ATH9K_INT_RXEOL) {
+ }
+ */
+ spin_lock_bh(&sc->sc_rxflushlock);
+ ath_rx_tasklet(sc, 0);
+ spin_unlock_bh(&sc->sc_rxflushlock);
+ }
+ /* XXX: optimize this */
+ if (status & ATH9K_INT_TX)
+ ath_tx_tasklet(sc);
+ /* XXX: fill me in */
+ /*
+ if (status & ATH9K_INT_BMISS) {
+ }
+ if (status & (ATH9K_INT_TIM | ATH9K_INT_DTIMSYNC)) {
+ if (status & ATH9K_INT_TIM) {
+ }
+ if (status & ATH9K_INT_DTIMSYNC) {
+ }
+ }
+ */
+ }
+
+ /* re-enable hardware interrupt */
+ ath9k_hw_set_interrupts(sc->sc_ah, sc->sc_imask);
+}
+
+int ath_init(u16 devid, struct ath_softc *sc)
+{
+ struct ath_hal *ah = NULL;
+ int status;
+ int error = 0, i;
+ int csz = 0;
+ u32 rd;
+
+ /* XXX: hardware will not be ready until ath_open() being called */
+ sc->sc_invalid = 1;
+
+ sc->sc_debug = DBG_DEFAULT;
+ DPRINTF(sc, ATH_DBG_CONFIG, "%s: devid 0x%x\n", __func__, devid);
+
+ /* Initialize tasklet */
+ tasklet_init(&sc->intr_tq, ath9k_tasklet, (unsigned long)sc);
+ tasklet_init(&sc->bcon_tasklet, ath9k_beacon_tasklet,
+ (unsigned long)sc);
+
+ /*
+ * Cache line size is used to size and align various
+ * structures used to communicate with the hardware.
+ */
+ bus_read_cachesize(sc, &csz);
+ /* XXX assert csz is non-zero */
+ sc->sc_cachelsz = csz << 2; /* convert to bytes */
+
+ spin_lock_init(&sc->sc_resetlock);
+
+ ah = ath9k_hw_attach(devid, sc, sc->mem, &status);
+ if (ah == NULL) {
+ DPRINTF(sc, ATH_DBG_FATAL,
+ "%s: unable to attach hardware; HAL status %u\n",
+ __func__, status);
+ error = -ENXIO;
+ goto bad;
+ }
+ sc->sc_ah = ah;
+
+ /* Get the chipset-specific aggr limit. */
+ sc->sc_rtsaggrlimit = ah->ah_caps.rts_aggr_limit;
+
+ /* Get the hardware key cache size. */
+ sc->sc_keymax = ah->ah_caps.keycache_size;
+ if (sc->sc_keymax > ATH_KEYMAX) {
+ DPRINTF(sc, ATH_DBG_KEYCACHE,
+ "%s: Warning, using only %u entries in %u key cache\n",
+ __func__, ATH_KEYMAX, sc->sc_keymax);
+ sc->sc_keymax = ATH_KEYMAX;
+ }
+
+ /*
+ * Reset the key cache since some parts do not
+ * reset the contents on initial power up.
+ */
+ for (i = 0; i < sc->sc_keymax; i++)
+ ath9k_hw_keyreset(ah, (u16) i);
+ /*
+ * Mark key cache slots associated with global keys
+ * as in use. If we knew TKIP was not to be used we
+ * could leave the +32, +64, and +32+64 slots free.
+ * XXX only for splitmic.
+ */
+ for (i = 0; i < IEEE80211_WEP_NKID; i++) {
+ set_bit(i, sc->sc_keymap);
+ set_bit(i + 32, sc->sc_keymap);
+ set_bit(i + 64, sc->sc_keymap);
+ set_bit(i + 32 + 64, sc->sc_keymap);
+ }
+ /*
+ * Collect the channel list using the default country
+ * code and including outdoor channels. The 802.11 layer
+ * is resposible for filtering this list based on settings
+ * like the phy mode.
+ */
+ rd = ah->ah_currentRD;
+
+ error = ath_setup_channels(sc);
+ if (error)
+ goto bad;
+
+ /* default to STA mode */
+ sc->sc_opmode = ATH9K_M_MONITOR;
+
+ /* Setup rate tables */
+
+ ath_setup_rates(sc, IEEE80211_BAND_2GHZ);
+ ath_setup_rates(sc, IEEE80211_BAND_5GHZ);
+
+ /* NB: setup here so ath_rate_update is happy */
+ ath_setcurmode(sc, ATH9K_MODE_11A);
+
+ /*
+ * Allocate hardware transmit queues: one queue for
+ * beacon frames and one data queue for each QoS
+ * priority. Note that the hal handles reseting
+ * these queues at the needed time.
+ */
+ sc->sc_bhalq = ath_beaconq_setup(ah);
+ if (sc->sc_bhalq == -1) {
+ DPRINTF(sc, ATH_DBG_FATAL,
+ "%s: unable to setup a beacon xmit queue\n", __func__);
+ error = -EIO;
+ goto bad2;
+ }
+ sc->sc_cabq = ath_txq_setup(sc, ATH9K_TX_QUEUE_CAB, 0);
+ if (sc->sc_cabq == NULL) {
+ DPRINTF(sc, ATH_DBG_FATAL,
+ "%s: unable to setup CAB xmit queue\n", __func__);
+ error = -EIO;
+ goto bad2;
+ }
+
+ sc->sc_config.cabqReadytime = ATH_CABQ_READY_TIME;
+ ath_cabq_update(sc);
+
+ for (i = 0; i < ARRAY_SIZE(sc->sc_haltype2q); i++)
+ sc->sc_haltype2q[i] = -1;
+
+ /* Setup data queues */
+ /* NB: ensure BK queue is the lowest priority h/w queue */
+ if (!ath_tx_setup(sc, ATH9K_WME_AC_BK)) {
+ DPRINTF(sc, ATH_DBG_FATAL,
+ "%s: unable to setup xmit queue for BK traffic\n",
+ __func__);
+ error = -EIO;
+ goto bad2;
+ }
+
+ if (!ath_tx_setup(sc, ATH9K_WME_AC_BE)) {
+ DPRINTF(sc, ATH_DBG_FATAL,
+ "%s: unable to setup xmit queue for BE traffic\n",
+ __func__);
+ error = -EIO;
+ goto bad2;
+ }
+ if (!ath_tx_setup(sc, ATH9K_WME_AC_VI)) {
+ DPRINTF(sc, ATH_DBG_FATAL,
+ "%s: unable to setup xmit queue for VI traffic\n",
+ __func__);
+ error = -EIO;
+ goto bad2;
+ }
+ if (!ath_tx_setup(sc, ATH9K_WME_AC_VO)) {
+ DPRINTF(sc, ATH_DBG_FATAL,
+ "%s: unable to setup xmit queue for VO traffic\n",
+ __func__);
+ error = -EIO;
+ goto bad2;
+ }
+
+ sc->sc_rc = ath_rate_attach(ah);
+ if (sc->sc_rc == NULL) {
+ error = EIO;
+ goto bad2;
+ }
+
+ if (ath9k_hw_getcapability(ah, ATH9K_CAP_CIPHER,
+ ATH9K_CIPHER_TKIP, NULL)) {
+ /*
+ * Whether we should enable h/w TKIP MIC.
+ * XXX: if we don't support WME TKIP MIC, then we wouldn't
+ * report WMM capable, so it's always safe to turn on
+ * TKIP MIC in this case.
+ */
+ ath9k_hw_setcapability(sc->sc_ah, ATH9K_CAP_TKIP_MIC,
+ 0, 1, NULL);
+ }
+
+ /*
+ * Check whether the separate key cache entries
+ * are required to handle both tx+rx MIC keys.
+ * With split mic keys the number of stations is limited
+ * to 27 otherwise 59.
+ */
+ if (ath9k_hw_getcapability(ah, ATH9K_CAP_CIPHER,
+ ATH9K_CIPHER_TKIP, NULL)
+ && ath9k_hw_getcapability(ah, ATH9K_CAP_CIPHER,
+ ATH9K_CIPHER_MIC, NULL)
+ && ath9k_hw_getcapability(ah, ATH9K_CAP_TKIP_SPLIT,
+ 0, NULL))
+ sc->sc_splitmic = 1;
+
+ /* turn on mcast key search if possible */
+ if (!ath9k_hw_getcapability(ah, ATH9K_CAP_MCAST_KEYSRCH, 0, NULL))
+ (void)ath9k_hw_setcapability(ah, ATH9K_CAP_MCAST_KEYSRCH, 1,
+ 1, NULL);
+
+ sc->sc_config.txpowlimit = ATH_TXPOWER_MAX;
+ sc->sc_config.txpowlimit_override = 0;
+
+ /* 11n Capabilities */
+ if (ah->ah_caps.hw_caps & ATH9K_HW_CAP_HT) {
+ sc->sc_txaggr = 1;
+ sc->sc_rxaggr = 1;
+ }
+
+ sc->sc_tx_chainmask = ah->ah_caps.tx_chainmask;
+ sc->sc_rx_chainmask = ah->ah_caps.rx_chainmask;
+
+ /* Configuration for rx chain detection */
+ sc->sc_rxchaindetect_ref = 0;
+ sc->sc_rxchaindetect_thresh5GHz = 35;
+ sc->sc_rxchaindetect_thresh2GHz = 35;
+ sc->sc_rxchaindetect_delta5GHz = 30;
+ sc->sc_rxchaindetect_delta2GHz = 30;
+
+ ath9k_hw_setcapability(ah, ATH9K_CAP_DIVERSITY, 1, true, NULL);
+ sc->sc_defant = ath9k_hw_getdefantenna(ah);
+
+ ath9k_hw_getmac(ah, sc->sc_myaddr);
+ if (ah->ah_caps.hw_caps & ATH9K_HW_CAP_BSSIDMASK) {
+ ath9k_hw_getbssidmask(ah, sc->sc_bssidmask);
+ ATH_SET_VAP_BSSID_MASK(sc->sc_bssidmask);
+ ath9k_hw_setbssidmask(ah, sc->sc_bssidmask);
+ }
+ sc->sc_slottime = ATH9K_SLOT_TIME_9; /* default to short slot time */
+
+ /* initialize beacon slots */
+ for (i = 0; i < ARRAY_SIZE(sc->sc_bslot); i++)
+ sc->sc_bslot[i] = ATH_IF_ID_ANY;
+
+ /* save MISC configurations */
+ sc->sc_config.swBeaconProcess = 1;
+
+#ifdef CONFIG_SLOW_ANT_DIV
+ /* range is 40 - 255, we use something in the middle */
+ ath_slow_ant_div_init(&sc->sc_antdiv, sc, 0x127);
+#endif
+
+ return 0;
+bad2:
+ /* cleanup tx queues */
+ for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++)
+ if (ATH_TXQ_SETUP(sc, i))
+ ath_tx_cleanupq(sc, &sc->sc_txq[i]);
+bad:
+ if (ah)
+ ath9k_hw_detach(ah);
+ return error;
+}
+
+void ath_deinit(struct ath_softc *sc)
+{
+ struct ath_hal *ah = sc->sc_ah;
+ int i;
+
+ DPRINTF(sc, ATH_DBG_CONFIG, "%s\n", __func__);
+
+ ath_stop(sc);
+ if (!sc->sc_invalid)
+ ath9k_hw_setpower(sc->sc_ah, ATH9K_PM_AWAKE);
+ ath_rate_detach(sc->sc_rc);
+ /* cleanup tx queues */
+ for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++)
+ if (ATH_TXQ_SETUP(sc, i))
+ ath_tx_cleanupq(sc, &sc->sc_txq[i]);
+ ath9k_hw_detach(ah);
+}
+
+/*******************/
+/* Node Management */
+/*******************/
+
+struct ath_node *ath_node_attach(struct ath_softc *sc, u8 *addr, int if_id)
+{
+ struct ath_vap *avp;
+ struct ath_node *an;
+ DECLARE_MAC_BUF(mac);
+
+ avp = sc->sc_vaps[if_id];
+ ASSERT(avp != NULL);
+
+ /* mac80211 sta_notify callback is from an IRQ context, so no sleep */
+ an = kmalloc(sizeof(struct ath_node), GFP_ATOMIC);
+ if (an == NULL)
+ return NULL;
+ memzero(an, sizeof(*an));
+
+ an->an_sc = sc;
+ memcpy(an->an_addr, addr, ETH_ALEN);
+ atomic_set(&an->an_refcnt, 1);
+
+ /* set up per-node tx/rx state */
+ ath_tx_node_init(sc, an);
+ ath_rx_node_init(sc, an);
+
+ ath_chainmask_sel_init(sc, an);
+ ath_chainmask_sel_timerstart(&an->an_chainmask_sel);
+ list_add(&an->list, &sc->node_list);
+
+ return an;
+}
+
+void ath_node_detach(struct ath_softc *sc, struct ath_node *an, bool bh_flag)
+{
+ unsigned long flags;
+
+ DECLARE_MAC_BUF(mac);
+
+ ath_chainmask_sel_timerstop(&an->an_chainmask_sel);
+ an->an_flags |= ATH_NODE_CLEAN;
+ ath_tx_node_cleanup(sc, an, bh_flag);
+ ath_rx_node_cleanup(sc, an);
+
+ ath_tx_node_free(sc, an);
+ ath_rx_node_free(sc, an);
+
+ spin_lock_irqsave(&sc->node_lock, flags);
+
+ list_del(&an->list);
+
+ spin_unlock_irqrestore(&sc->node_lock, flags);
+
+ kfree(an);
+}
+
+/* Finds a node and increases the refcnt if found */
+
+struct ath_node *ath_node_get(struct ath_softc *sc, u8 *addr)
+{
+ struct ath_node *an = NULL, *an_found = NULL;
+
+ if (list_empty(&sc->node_list)) /* FIXME */
+ goto out;
+ list_for_each_entry(an, &sc->node_list, list) {
+ if (!compare_ether_addr(an->an_addr, addr)) {
+ atomic_inc(&an->an_refcnt);
+ an_found = an;
+ break;
+ }
+ }
+out:
+ return an_found;
+}
+
+/* Decrements the refcnt and if it drops to zero, detach the node */
+
+void ath_node_put(struct ath_softc *sc, struct ath_node *an, bool bh_flag)
+{
+ if (atomic_dec_and_test(&an->an_refcnt))
+ ath_node_detach(sc, an, bh_flag);
+}
+
+/* Finds a node, doesn't increment refcnt. Caller must hold sc->node_lock */
+struct ath_node *ath_node_find(struct ath_softc *sc, u8 *addr)
+{
+ struct ath_node *an = NULL, *an_found = NULL;
+
+ if (list_empty(&sc->node_list))
+ return NULL;
+
+ list_for_each_entry(an, &sc->node_list, list)
+ if (!compare_ether_addr(an->an_addr, addr)) {
+ an_found = an;
+ break;
+ }
+
+ return an_found;
+}
+
+/*
+ * Set up New Node
+ *
+ * Setup driver-specific state for a newly associated node. This routine
+ * really only applies if compression or XR are enabled, there is no code
+ * covering any other cases.
+*/
+
+void ath_newassoc(struct ath_softc *sc,
+ struct ath_node *an, int isnew, int isuapsd)
+{
+ int tidno;
+
+ /* if station reassociates, tear down the aggregation state. */
+ if (!isnew) {
+ for (tidno = 0; tidno < WME_NUM_TID; tidno++) {
+ if (sc->sc_txaggr)
+ ath_tx_aggr_teardown(sc, an, tidno);
+ if (sc->sc_rxaggr)
+ ath_rx_aggr_teardown(sc, an, tidno);
+ }
+ }
+ an->an_flags = 0;
+}
+
+/**************/
+/* Encryption */
+/**************/
+
+void ath_key_reset(struct ath_softc *sc, u16 keyix, int freeslot)
+{
+ ath9k_hw_keyreset(sc->sc_ah, keyix);
+ if (freeslot)
+ clear_bit(keyix, sc->sc_keymap);
+}
+
+int ath_keyset(struct ath_softc *sc,
+ u16 keyix,
+ struct ath9k_keyval *hk,
+ const u8 mac[ETH_ALEN])
+{
+ bool status;
+
+ status = ath9k_hw_set_keycache_entry(sc->sc_ah,
+ keyix, hk, mac, false);
+
+ return status != false;
+}
+
+/***********************/
+/* TX Power/Regulatory */
+/***********************/
+
+/*
+ * Set Transmit power in HAL
+ *
+ * This routine makes the actual HAL calls to set the new transmit power
+ * limit.
+*/
+
+void ath_update_txpow(struct ath_softc *sc)
+{
+ struct ath_hal *ah = sc->sc_ah;
+ u32 txpow;
+
+ if (sc->sc_curtxpow != sc->sc_config.txpowlimit) {
+ ath9k_hw_set_txpowerlimit(ah, sc->sc_config.txpowlimit);
+ /* read back in case value is clamped */
+ ath9k_hw_getcapability(ah, ATH9K_CAP_TXPOW, 1, &txpow);
+ sc->sc_curtxpow = txpow;
+ }
+}
+
+/* Return the current country and domain information */
+void ath_get_currentCountry(struct ath_softc *sc,
+ struct ath9k_country_entry *ctry)
+{
+ ath9k_regd_get_current_country(sc->sc_ah, ctry);
+
+ /* If HAL not specific yet, since it is band dependent,
+ * use the one we passed in. */
+ if (ctry->countryCode == CTRY_DEFAULT) {
+ ctry->iso[0] = 0;
+ ctry->iso[1] = 0;
+ } else if (ctry->iso[0] && ctry->iso[1]) {
+ if (!ctry->iso[2]) {
+ if (ath_outdoor)
+ ctry->iso[2] = 'O';
+ else
+ ctry->iso[2] = 'I';
+ }
+ }
+}
+
+/**************************/
+/* Slow Antenna Diversity */
+/**************************/
+
+void ath_slow_ant_div_init(struct ath_antdiv *antdiv,
+ struct ath_softc *sc,
+ int32_t rssitrig)
+{
+ int trig;
+
+ /* antdivf_rssitrig can range from 40 - 0xff */
+ trig = (rssitrig > 0xff) ? 0xff : rssitrig;
+ trig = (rssitrig < 40) ? 40 : rssitrig;
+
+ antdiv->antdiv_sc = sc;
+ antdiv->antdivf_rssitrig = trig;
+}
+
+void ath_slow_ant_div_start(struct ath_antdiv *antdiv,
+ u8 num_antcfg,
+ const u8 *bssid)
+{
+ antdiv->antdiv_num_antcfg =
+ num_antcfg < ATH_ANT_DIV_MAX_CFG ?
+ num_antcfg : ATH_ANT_DIV_MAX_CFG;
+ antdiv->antdiv_state = ATH_ANT_DIV_IDLE;
+ antdiv->antdiv_curcfg = 0;
+ antdiv->antdiv_bestcfg = 0;
+ antdiv->antdiv_laststatetsf = 0;
+
+ memcpy(antdiv->antdiv_bssid, bssid, sizeof(antdiv->antdiv_bssid));
+
+ antdiv->antdiv_start = 1;
+}
+
+void ath_slow_ant_div_stop(struct ath_antdiv *antdiv)
+{
+ antdiv->antdiv_start = 0;
+}
+
+static int32_t ath_find_max_val(int32_t *val,
+ u8 num_val, u8 *max_index)
+{
+ u32 MaxVal = *val++;
+ u32 cur_index = 0;
+
+ *max_index = 0;
+ while (++cur_index < num_val) {
+ if (*val > MaxVal) {
+ MaxVal = *val;
+ *max_index = cur_index;
+ }
+
+ val++;
+ }
+
+ return MaxVal;
+}
+
+void ath_slow_ant_div(struct ath_antdiv *antdiv,
+ struct ieee80211_hdr *hdr,
+ struct ath_rx_status *rx_stats)
+{
+ struct ath_softc *sc = antdiv->antdiv_sc;
+ struct ath_hal *ah = sc->sc_ah;
+ u64 curtsf = 0;
+ u8 bestcfg, curcfg = antdiv->antdiv_curcfg;
+ __le16 fc = hdr->frame_control;
+
+ if (antdiv->antdiv_start && ieee80211_is_beacon(fc)
+ && !compare_ether_addr(hdr->addr3, antdiv->antdiv_bssid)) {
+ antdiv->antdiv_lastbrssi[curcfg] = rx_stats->rs_rssi;
+ antdiv->antdiv_lastbtsf[curcfg] = ath9k_hw_gettsf64(sc->sc_ah);
+ curtsf = antdiv->antdiv_lastbtsf[curcfg];
+ } else {
+ return;
+ }
+
+ switch (antdiv->antdiv_state) {
+ case ATH_ANT_DIV_IDLE:
+ if ((antdiv->antdiv_lastbrssi[curcfg] <
+ antdiv->antdivf_rssitrig)
+ && ((curtsf - antdiv->antdiv_laststatetsf) >
+ ATH_ANT_DIV_MIN_IDLE_US)) {
+
+ curcfg++;
+ if (curcfg == antdiv->antdiv_num_antcfg)
+ curcfg = 0;
+
+ if (!ath9k_hw_select_antconfig(ah, curcfg)) {
+ antdiv->antdiv_bestcfg = antdiv->antdiv_curcfg;
+ antdiv->antdiv_curcfg = curcfg;
+ antdiv->antdiv_laststatetsf = curtsf;
+ antdiv->antdiv_state = ATH_ANT_DIV_SCAN;
+ }
+ }
+ break;
+
+ case ATH_ANT_DIV_SCAN:
+ if ((curtsf - antdiv->antdiv_laststatetsf) <
+ ATH_ANT_DIV_MIN_SCAN_US)
+ break;
+
+ curcfg++;
+ if (curcfg == antdiv->antdiv_num_antcfg)
+ curcfg = 0;
+
+ if (curcfg == antdiv->antdiv_bestcfg) {
+ ath_find_max_val(antdiv->antdiv_lastbrssi,
+ antdiv->antdiv_num_antcfg, &bestcfg);
+ if (!ath9k_hw_select_antconfig(ah, bestcfg)) {
+ antdiv->antdiv_bestcfg = bestcfg;
+ antdiv->antdiv_curcfg = bestcfg;
+ antdiv->antdiv_laststatetsf = curtsf;
+ antdiv->antdiv_state = ATH_ANT_DIV_IDLE;
+ }
+ } else {
+ if (!ath9k_hw_select_antconfig(ah, curcfg)) {
+ antdiv->antdiv_curcfg = curcfg;
+ antdiv->antdiv_laststatetsf = curtsf;
+ antdiv->antdiv_state = ATH_ANT_DIV_SCAN;
+ }
+ }
+
+ break;
+ }
+}
+
+/***********************/
+/* Descriptor Handling */
+/***********************/
+
+/*
+ * Set up DMA descriptors
+ *
+ * This function will allocate both the DMA descriptor structure, and the
+ * buffers it contains. These are used to contain the descriptors used
+ * by the system.
+*/
+
+int ath_descdma_setup(struct ath_softc *sc,
+ struct ath_descdma *dd,
+ struct list_head *head,
+ const char *name,
+ int nbuf,
+ int ndesc)
+{
+#define DS2PHYS(_dd, _ds) \
+ ((_dd)->dd_desc_paddr + ((caddr_t)(_ds) - (caddr_t)(_dd)->dd_desc))
+#define ATH_DESC_4KB_BOUND_CHECK(_daddr) ((((_daddr) & 0xFFF) > 0xF7F) ? 1 : 0)
+#define ATH_DESC_4KB_BOUND_NUM_SKIPPED(_len) ((_len) / 4096)
+
+ struct ath_desc *ds;
+ struct ath_buf *bf;
+ int i, bsize, error;
+
+ DPRINTF(sc, ATH_DBG_CONFIG, "%s: %s DMA: %u buffers %u desc/buf\n",
+ __func__, name, nbuf, ndesc);
+
+ /* ath_desc must be a multiple of DWORDs */
+ if ((sizeof(struct ath_desc) % 4) != 0) {
+ DPRINTF(sc, ATH_DBG_FATAL, "%s: ath_desc not DWORD aligned\n",
+ __func__);
+ ASSERT((sizeof(struct ath_desc) % 4) == 0);
+ error = -ENOMEM;
+ goto fail;
+ }
+
+ dd->dd_name = name;
+ dd->dd_desc_len = sizeof(struct ath_desc) * nbuf * ndesc;
+
+ /*
+ * Need additional DMA memory because we can't use
+ * descriptors that cross the 4K page boundary. Assume
+ * one skipped descriptor per 4K page.
+ */
+ if (!(sc->sc_ah->ah_caps.hw_caps & ATH9K_HW_CAP_4KB_SPLITTRANS)) {
+ u32 ndesc_skipped =
+ ATH_DESC_4KB_BOUND_NUM_SKIPPED(dd->dd_desc_len);
+ u32 dma_len;
+
+ while (ndesc_skipped) {
+ dma_len = ndesc_skipped * sizeof(struct ath_desc);
+ dd->dd_desc_len += dma_len;
+
+ ndesc_skipped = ATH_DESC_4KB_BOUND_NUM_SKIPPED(dma_len);
+ };
+ }
+
+ /* allocate descriptors */
+ dd->dd_desc = pci_alloc_consistent(sc->pdev,
+ dd->dd_desc_len,
+ &dd->dd_desc_paddr);
+ if (dd->dd_desc == NULL) {
+ error = -ENOMEM;
+ goto fail;
+ }
+ ds = dd->dd_desc;
+ DPRINTF(sc, ATH_DBG_CONFIG, "%s: %s DMA map: %p (%u) -> %llx (%u)\n",
+ __func__, dd->dd_name, ds, (u32) dd->dd_desc_len,
+ ito64(dd->dd_desc_paddr), /*XXX*/(u32) dd->dd_desc_len);
+
+ /* allocate buffers */
+ bsize = sizeof(struct ath_buf) * nbuf;
+ bf = kmalloc(bsize, GFP_KERNEL);
+ if (bf == NULL) {
+ error = -ENOMEM;
+ goto fail2;
+ }
+ memzero(bf, bsize);
+ dd->dd_bufptr = bf;
+
+ INIT_LIST_HEAD(head);
+ for (i = 0; i < nbuf; i++, bf++, ds += ndesc) {
+ bf->bf_desc = ds;
+ bf->bf_daddr = DS2PHYS(dd, ds);
+
+ if (!(sc->sc_ah->ah_caps.hw_caps &
+ ATH9K_HW_CAP_4KB_SPLITTRANS)) {
+ /*
+ * Skip descriptor addresses which can cause 4KB
+ * boundary crossing (addr + length) with a 32 dword
+ * descriptor fetch.
+ */
+ while (ATH_DESC_4KB_BOUND_CHECK(bf->bf_daddr)) {
+ ASSERT((caddr_t) bf->bf_desc <
+ ((caddr_t) dd->dd_desc +
+ dd->dd_desc_len));
+
+ ds += ndesc;
+ bf->bf_desc = ds;
+ bf->bf_daddr = DS2PHYS(dd, ds);
+ }
+ }
+ list_add_tail(&bf->list, head);
+ }
+ return 0;
+fail2:
+ pci_free_consistent(sc->pdev,
+ dd->dd_desc_len, dd->dd_desc, dd->dd_desc_paddr);
+fail:
+ memzero(dd, sizeof(*dd));
+ return error;
+#undef ATH_DESC_4KB_BOUND_CHECK
+#undef ATH_DESC_4KB_BOUND_NUM_SKIPPED
+#undef DS2PHYS
+}
+
+/*
+ * Cleanup DMA descriptors
+ *
+ * This function will free the DMA block that was allocated for the descriptor
+ * pool. Since this was allocated as one "chunk", it is freed in the same
+ * manner.
+*/
+
+void ath_descdma_cleanup(struct ath_softc *sc,
+ struct ath_descdma *dd,
+ struct list_head *head)
+{
+ /* Free memory associated with descriptors */
+ pci_free_consistent(sc->pdev,
+ dd->dd_desc_len, dd->dd_desc, dd->dd_desc_paddr);
+
+ INIT_LIST_HEAD(head);
+ kfree(dd->dd_bufptr);
+ memzero(dd, sizeof(*dd));
+}
+
+/*************/
+/* Utilities */
+/*************/
+
+void ath_internal_reset(struct ath_softc *sc)
+{
+ ath_reset_start(sc, 0);
+ ath_reset(sc);
+ ath_reset_end(sc, 0);
+}
+
+int ath_get_hal_qnum(u16 queue, struct ath_softc *sc)
+{
+ int qnum;
+
+ switch (queue) {
+ case 0:
+ qnum = sc->sc_haltype2q[ATH9K_WME_AC_VO];
+ break;
+ case 1:
+ qnum = sc->sc_haltype2q[ATH9K_WME_AC_VI];
+ break;
+ case 2:
+ qnum = sc->sc_haltype2q[ATH9K_WME_AC_BE];
+ break;
+ case 3:
+ qnum = sc->sc_haltype2q[ATH9K_WME_AC_BK];
+ break;
+ default:
+ qnum = sc->sc_haltype2q[ATH9K_WME_AC_BE];
+ break;
+ }
+
+ return qnum;
+}
+
+int ath_get_mac80211_qnum(u32 queue, struct ath_softc *sc)
+{
+ int qnum;
+
+ switch (queue) {
+ case ATH9K_WME_AC_VO:
+ qnum = 0;
+ break;
+ case ATH9K_WME_AC_VI:
+ qnum = 1;
+ break;
+ case ATH9K_WME_AC_BE:
+ qnum = 2;
+ break;
+ case ATH9K_WME_AC_BK:
+ qnum = 3;
+ break;
+ default:
+ qnum = -1;
+ break;
+ }
+
+ return qnum;
+}
+
+
+/*
+ * Expand time stamp to TSF
+ *
+ * Extend 15-bit time stamp from rx descriptor to
+ * a full 64-bit TSF using the current h/w TSF.
+*/
+
+u64 ath_extend_tsf(struct ath_softc *sc, u32 rstamp)
+{
+ u64 tsf;
+
+ tsf = ath9k_hw_gettsf64(sc->sc_ah);
+ if ((tsf & 0x7fff) < rstamp)
+ tsf -= 0x8000;
+ return (tsf & ~0x7fff) | rstamp;
+}
+
+/*
+ * Set Default Antenna
+ *
+ * Call into the HAL to set the default antenna to use. Not really valid for
+ * MIMO technology.
+*/
+
+void ath_setdefantenna(void *context, u32 antenna)
+{
+ struct ath_softc *sc = (struct ath_softc *)context;
+ struct ath_hal *ah = sc->sc_ah;
+
+ /* XXX block beacon interrupts */
+ ath9k_hw_setantenna(ah, antenna);
+ sc->sc_defant = antenna;
+ sc->sc_rxotherant = 0;
+}
+
+/*
+ * Set Slot Time
+ *
+ * This will wake up the chip if required, and set the slot time for the
+ * frame (maximum transmit time). Slot time is assumed to be already set
+ * in the ATH object member sc_slottime
+*/
+
+void ath_setslottime(struct ath_softc *sc)
+{
+ ath9k_hw_setslottime(sc->sc_ah, sc->sc_slottime);
+ sc->sc_updateslot = OK;
+}
diff --git a/drivers/net/wireless/ath9k/core.h b/drivers/net/wireless/ath9k/core.h
new file mode 100644
index 00000000000..673b3d81133
--- /dev/null
+++ b/drivers/net/wireless/ath9k/core.h
@@ -0,0 +1,1072 @@
+/*
+ * Copyright (c) 2008 Atheros Communications Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef CORE_H
+#define CORE_H
+
+#include <linux/version.h>
+#include <linux/autoconf.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/spinlock.h>
+#include <linux/errno.h>
+#include <linux/skbuff.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/ip.h>
+#include <linux/tcp.h>
+#include <linux/in.h>
+#include <linux/delay.h>
+#include <linux/wait.h>
+#include <linux/pci.h>
+#include <linux/interrupt.h>
+#include <linux/sched.h>
+#include <linux/list.h>
+#include <asm/byteorder.h>
+#include <linux/scatterlist.h>
+#include <asm/page.h>
+#include <net/mac80211.h>
+
+#include "ath9k.h"
+#include "rc.h"
+
+struct ath_node;
+
+/******************/
+/* Utility macros */
+/******************/
+
+/* Macro to expand scalars to 64-bit objects */
+
+#define ito64(x) (sizeof(x) == 8) ? \
+ (((unsigned long long int)(x)) & (0xff)) : \
+ (sizeof(x) == 16) ? \
+ (((unsigned long long int)(x)) & 0xffff) : \
+ ((sizeof(x) == 32) ? \
+ (((unsigned long long int)(x)) & 0xffffffff) : \
+ (unsigned long long int)(x))
+
+/* increment with wrap-around */
+#define INCR(_l, _sz) do { \
+ (_l)++; \
+ (_l) &= ((_sz) - 1); \
+ } while (0)
+
+/* decrement with wrap-around */
+#define DECR(_l, _sz) do { \
+ (_l)--; \
+ (_l) &= ((_sz) - 1); \
+ } while (0)
+
+#define A_MAX(a, b) ((a) > (b) ? (a) : (b))
+
+#define ASSERT(exp) do { \
+ if (unlikely(!(exp))) { \
+ BUG(); \
+ } \
+ } while (0)
+
+/* XXX: remove */
+#define memzero(_buf, _len) memset(_buf, 0, _len)
+
+#define get_dma_mem_context(var, field) (&((var)->field))
+#define copy_dma_mem_context(dst, src) (*dst = *src)
+
+#define ATH9K_BH_STATUS_INTACT 0
+#define ATH9K_BH_STATUS_CHANGE 1
+
+#define ATH_TXQ_SETUP(sc, i) ((sc)->sc_txqsetup & (1<<i))
+
+static inline unsigned long get_timestamp(void)
+{
+ return ((jiffies / HZ) * 1000) + (jiffies % HZ) * (1000 / HZ);
+}
+
+/*************/
+/* Debugging */
+/*************/
+
+enum ATH_DEBUG {
+ ATH_DBG_RESET = 0x00000001,
+ ATH_DBG_PHY_IO = 0x00000002,
+ ATH_DBG_REG_IO = 0x00000004,
+ ATH_DBG_QUEUE = 0x00000008,
+ ATH_DBG_EEPROM = 0x00000010,
+ ATH_DBG_NF_CAL = 0x00000020,
+ ATH_DBG_CALIBRATE = 0x00000040,
+ ATH_DBG_CHANNEL = 0x00000080,
+ ATH_DBG_INTERRUPT = 0x00000100,
+ ATH_DBG_REGULATORY = 0x00000200,
+ ATH_DBG_ANI = 0x00000400,
+ ATH_DBG_POWER_MGMT = 0x00000800,
+ ATH_DBG_XMIT = 0x00001000,
+ ATH_DBG_BEACON = 0x00002000,
+ ATH_DBG_RATE = 0x00004000,
+ ATH_DBG_CONFIG = 0x00008000,
+ ATH_DBG_KEYCACHE = 0x00010000,
+ ATH_DBG_AGGR = 0x00020000,
+ ATH_DBG_FATAL = 0x00040000,
+ ATH_DBG_ANY = 0xffffffff
+};
+
+#define DBG_DEFAULT (ATH_DBG_FATAL)
+
+#define DPRINTF(sc, _m, _fmt, ...) do { \
+ if (sc->sc_debug & (_m)) \
+ printk(_fmt , ##__VA_ARGS__); \
+ } while (0)
+
+/***************************/
+/* Load-time Configuration */
+/***************************/
+
+/* Per-instance load-time (note: NOT run-time) configurations
+ * for Atheros Device */
+struct ath_config {
+ u32 ath_aggr_prot;
+ u16 txpowlimit;
+ u16 txpowlimit_override;
+ u8 cabqReadytime; /* Cabq Readytime % */
+ u8 swBeaconProcess; /* Process received beacons in SW (vs HW) */
+};
+
+/***********************/
+/* Chainmask Selection */
+/***********************/
+
+#define ATH_CHAINMASK_SEL_TIMEOUT 6000
+/* Default - Number of last RSSI values that is used for
+ * chainmask selection */
+#define ATH_CHAINMASK_SEL_RSSI_CNT 10
+/* Means use 3x3 chainmask instead of configured chainmask */
+#define ATH_CHAINMASK_SEL_3X3 7
+/* Default - Rssi threshold below which we have to switch to 3x3 */
+#define ATH_CHAINMASK_SEL_UP_RSSI_THRES 20
+/* Default - Rssi threshold above which we have to switch to
+ * user configured values */
+#define ATH_CHAINMASK_SEL_DOWN_RSSI_THRES 35
+/* Struct to store the chainmask select related info */
+struct ath_chainmask_sel {
+ struct timer_list timer;
+ int cur_tx_mask; /* user configured or 3x3 */
+ int cur_rx_mask; /* user configured or 3x3 */
+ int tx_avgrssi;
+ u8 switch_allowed:1, /* timer will set this */
+ cm_sel_enabled : 1;
+};
+
+int ath_chainmask_sel_logic(struct ath_softc *sc, struct ath_node *an);
+void ath_update_chainmask(struct ath_softc *sc, int is_ht);
+
+/*************************/
+/* Descriptor Management */
+/*************************/
+
+/* Number of descriptors per buffer. The only case where we see skbuff
+chains is due to FF aggregation in the driver. */
+#define ATH_TXDESC 1
+/* if there's more fragment for this MSDU */
+#define ATH_BF_MORE_MPDU 1
+#define ATH_TXBUF_RESET(_bf) do { \
+ (_bf)->bf_status = 0; \
+ (_bf)->bf_lastbf = NULL; \
+ (_bf)->bf_lastfrm = NULL; \
+ (_bf)->bf_next = NULL; \
+ memzero(&((_bf)->bf_state), \
+ sizeof(struct ath_buf_state)); \
+ } while (0)
+
+struct ath_buf_state {
+ int bfs_nframes; /* # frames in aggregate */
+ u16 bfs_al; /* length of aggregate */
+ u16 bfs_frmlen; /* length of frame */
+ int bfs_seqno; /* sequence number */
+ int bfs_tidno; /* tid of this frame */
+ int bfs_retries; /* current retries */
+ struct ath_rc_series bfs_rcs[4]; /* rate series */
+ u8 bfs_isdata:1; /* is a data frame/aggregate */
+ u8 bfs_isaggr:1; /* is an aggregate */
+ u8 bfs_isampdu:1; /* is an a-mpdu, aggregate or not */
+ u8 bfs_ht:1; /* is an HT frame */
+ u8 bfs_isretried:1; /* is retried */
+ u8 bfs_isxretried:1; /* is excessive retried */
+ u8 bfs_shpreamble:1; /* is short preamble */
+ u8 bfs_isbar:1; /* is a BAR */
+ u8 bfs_ispspoll:1; /* is a PS-Poll */
+ u8 bfs_aggrburst:1; /* is a aggr burst */
+ u8 bfs_calcairtime:1; /* requests airtime be calculated
+ when set for tx frame */
+ int bfs_rifsburst_elem; /* RIFS burst/bar */
+ int bfs_nrifsubframes; /* # of elements in burst */
+ /* key type use to encrypt this frame */
+ enum ath9k_key_type bfs_keytype;
+};
+
+#define bf_nframes bf_state.bfs_nframes
+#define bf_al bf_state.bfs_al
+#define bf_frmlen bf_state.bfs_frmlen
+#define bf_retries bf_state.bfs_retries
+#define bf_seqno bf_state.bfs_seqno
+#define bf_tidno bf_state.bfs_tidno
+#define bf_rcs bf_state.bfs_rcs
+#define bf_isdata bf_state.bfs_isdata
+#define bf_isaggr bf_state.bfs_isaggr
+#define bf_isampdu bf_state.bfs_isampdu
+#define bf_ht bf_state.bfs_ht
+#define bf_isretried bf_state.bfs_isretried
+#define bf_isxretried bf_state.bfs_isxretried
+#define bf_shpreamble bf_state.bfs_shpreamble
+#define bf_rifsburst_elem bf_state.bfs_rifsburst_elem
+#define bf_nrifsubframes bf_state.bfs_nrifsubframes
+#define bf_keytype bf_state.bfs_keytype
+#define bf_isbar bf_state.bfs_isbar
+#define bf_ispspoll bf_state.bfs_ispspoll
+#define bf_aggrburst bf_state.bfs_aggrburst
+#define bf_calcairtime bf_state.bfs_calcairtime
+
+/*
+ * Abstraction of a contiguous buffer to transmit/receive. There is only
+ * a single hw descriptor encapsulated here.
+ */
+
+struct ath_buf {
+ struct list_head list;
+ struct list_head *last;
+ struct ath_buf *bf_lastbf; /* last buf of this unit (a frame or
+ an aggregate) */
+ struct ath_buf *bf_lastfrm; /* last buf of this frame */
+ struct ath_buf *bf_next; /* next subframe in the aggregate */
+ struct ath_buf *bf_rifslast; /* last buf for RIFS burst */
+ void *bf_mpdu; /* enclosing frame structure */
+ void *bf_node; /* pointer to the node */
+ struct ath_desc *bf_desc; /* virtual addr of desc */
+ dma_addr_t bf_daddr; /* physical addr of desc */
+ dma_addr_t bf_buf_addr; /* physical addr of data buffer */
+ u32 bf_status;
+ u16 bf_flags; /* tx descriptor flags */
+ struct ath_buf_state bf_state; /* buffer state */
+ dma_addr_t bf_dmacontext;
+};
+
+/*
+ * reset the rx buffer.
+ * any new fields added to the athbuf and require
+ * reset need to be added to this macro.
+ * currently bf_status is the only one requires that
+ * requires reset.
+ */
+#define ATH_RXBUF_RESET(_bf) ((_bf)->bf_status = 0)
+
+/* hw processing complete, desc processed by hal */
+#define ATH_BUFSTATUS_DONE 0x00000001
+/* hw processing complete, desc hold for hw */
+#define ATH_BUFSTATUS_STALE 0x00000002
+/* Rx-only: OS is done with this packet and it's ok to queued it to hw */
+#define ATH_BUFSTATUS_FREE 0x00000004
+
+/* DMA state for tx/rx descriptors */
+
+struct ath_descdma {
+ const char *dd_name;
+ struct ath_desc *dd_desc; /* descriptors */
+ dma_addr_t dd_desc_paddr; /* physical addr of dd_desc */
+ u32 dd_desc_len; /* size of dd_desc */
+ struct ath_buf *dd_bufptr; /* associated buffers */
+ dma_addr_t dd_dmacontext;
+};
+
+/* Abstraction of a received RX MPDU/MMPDU, or a RX fragment */
+
+struct ath_rx_context {
+ struct ath_buf *ctx_rxbuf; /* associated ath_buf for rx */
+};
+#define ATH_RX_CONTEXT(skb) ((struct ath_rx_context *)skb->cb)
+
+int ath_descdma_setup(struct ath_softc *sc,
+ struct ath_descdma *dd,
+ struct list_head *head,
+ const char *name,
+ int nbuf,
+ int ndesc);
+int ath_desc_alloc(struct ath_softc *sc);
+void ath_desc_free(struct ath_softc *sc);
+void ath_descdma_cleanup(struct ath_softc *sc,
+ struct ath_descdma *dd,
+ struct list_head *head);
+
+/******/
+/* RX */
+/******/
+
+#define ATH_MAX_ANTENNA 3
+#define ATH_RXBUF 512
+#define ATH_RX_TIMEOUT 40 /* 40 milliseconds */
+#define WME_NUM_TID 16
+#define IEEE80211_BAR_CTL_TID_M 0xF000 /* tid mask */
+#define IEEE80211_BAR_CTL_TID_S 2 /* tid shift */
+
+enum ATH_RX_TYPE {
+ ATH_RX_NON_CONSUMED = 0,
+ ATH_RX_CONSUMED
+};
+
+/* per frame rx status block */
+struct ath_recv_status {
+ u64 tsf; /* mac tsf */
+ int8_t rssi; /* RSSI (noise floor ajusted) */
+ int8_t rssictl[ATH_MAX_ANTENNA]; /* RSSI (noise floor ajusted) */
+ int8_t rssiextn[ATH_MAX_ANTENNA]; /* RSSI (noise floor ajusted) */
+ int8_t abs_rssi; /* absolute RSSI */
+ u8 rateieee; /* data rate received (IEEE rate code) */
+ u8 ratecode; /* phy rate code */
+ int rateKbps; /* data rate received (Kbps) */
+ int antenna; /* rx antenna */
+ int flags; /* status of associated skb */
+#define ATH_RX_FCS_ERROR 0x01
+#define ATH_RX_MIC_ERROR 0x02
+#define ATH_RX_DECRYPT_ERROR 0x04
+#define ATH_RX_RSSI_VALID 0x08
+/* if any of ctl,extn chainrssis are valid */
+#define ATH_RX_CHAIN_RSSI_VALID 0x10
+/* if extn chain rssis are valid */
+#define ATH_RX_RSSI_EXTN_VALID 0x20
+/* set if 40Mhz, clear if 20Mhz */
+#define ATH_RX_40MHZ 0x40
+/* set if short GI, clear if full GI */
+#define ATH_RX_SHORT_GI 0x80
+};
+
+struct ath_rxbuf {
+ struct sk_buff *rx_wbuf;
+ unsigned long rx_time; /* system time when received */
+ struct ath_recv_status rx_status; /* cached rx status */
+};
+
+/* Per-TID aggregate receiver state for a node */
+struct ath_arx_tid {
+ struct ath_node *an;
+ struct ath_rxbuf *rxbuf; /* re-ordering buffer */
+ struct timer_list timer;
+ spinlock_t tidlock;
+ int baw_head; /* seq_next at head */
+ int baw_tail; /* tail of block-ack window */
+ int seq_reset; /* need to reset start sequence */
+ int addba_exchangecomplete;
+ u16 seq_next; /* next expected sequence */
+ u16 baw_size; /* block-ack window size */
+};
+
+/* Per-node receiver aggregate state */
+struct ath_arx {
+ struct ath_arx_tid tid[WME_NUM_TID];
+};
+
+int ath_startrecv(struct ath_softc *sc);
+bool ath_stoprecv(struct ath_softc *sc);
+void ath_flushrecv(struct ath_softc *sc);
+u32 ath_calcrxfilter(struct ath_softc *sc);
+void ath_rx_node_init(struct ath_softc *sc, struct ath_node *an);
+void ath_rx_node_free(struct ath_softc *sc, struct ath_node *an);
+void ath_rx_node_cleanup(struct ath_softc *sc, struct ath_node *an);
+void ath_handle_rx_intr(struct ath_softc *sc);
+int ath_rx_init(struct ath_softc *sc, int nbufs);
+void ath_rx_cleanup(struct ath_softc *sc);
+int ath_rx_tasklet(struct ath_softc *sc, int flush);
+int ath_rx_input(struct ath_softc *sc,
+ struct ath_node *node,
+ int is_ampdu,
+ struct sk_buff *skb,
+ struct ath_recv_status *rx_status,
+ enum ATH_RX_TYPE *status);
+int ath__rx_indicate(struct ath_softc *sc,
+ struct sk_buff *skb,
+ struct ath_recv_status *status,
+ u16 keyix);
+int ath_rx_subframe(struct ath_node *an, struct sk_buff *skb,
+ struct ath_recv_status *status);
+
+/******/
+/* TX */
+/******/
+
+#define ATH_FRAG_PER_MSDU 1
+#define ATH_TXBUF (512/ATH_FRAG_PER_MSDU)
+/* max number of transmit attempts (tries) */
+#define ATH_TXMAXTRY 13
+/* max number of 11n transmit attempts (tries) */
+#define ATH_11N_TXMAXTRY 10
+/* max number of tries for management and control frames */
+#define ATH_MGT_TXMAXTRY 4
+#define WME_BA_BMP_SIZE 64
+#define WME_MAX_BA WME_BA_BMP_SIZE
+#define ATH_TID_MAX_BUFS (2 * WME_MAX_BA)
+#define TID_TO_WME_AC(_tid) \
+ ((((_tid) == 0) || ((_tid) == 3)) ? WME_AC_BE : \
+ (((_tid) == 1) || ((_tid) == 2)) ? WME_AC_BK : \
+ (((_tid) == 4) || ((_tid) == 5)) ? WME_AC_VI : \
+ WME_AC_VO)
+
+
+/* Wireless Multimedia Extension Defines */
+#define WME_AC_BE 0 /* best effort */
+#define WME_AC_BK 1 /* background */
+#define WME_AC_VI 2 /* video */
+#define WME_AC_VO 3 /* voice */
+#define WME_NUM_AC 4
+
+enum ATH_SM_PWRSAV{
+ ATH_SM_ENABLE,
+ ATH_SM_PWRSAV_STATIC,
+ ATH_SM_PWRSAV_DYNAMIC,
+};
+
+/*
+ * Data transmit queue state. One of these exists for each
+ * hardware transmit queue. Packets sent to us from above
+ * are assigned to queues based on their priority. Not all
+ * devices support a complete set of hardware transmit queues.
+ * For those devices the array sc_ac2q will map multiple
+ * priorities to fewer hardware queues (typically all to one
+ * hardware queue).
+ */
+struct ath_txq {
+ u32 axq_qnum; /* hardware q number */
+ u32 *axq_link; /* link ptr in last TX desc */
+ struct list_head axq_q; /* transmit queue */
+ spinlock_t axq_lock;
+ unsigned long axq_lockflags; /* intr state when must cli */
+ u32 axq_depth; /* queue depth */
+ u8 axq_aggr_depth; /* aggregates queued */
+ u32 axq_totalqueued; /* total ever queued */
+
+ /* count to determine if descriptor should generate int on this txq. */
+ u32 axq_intrcnt;
+
+ bool stopped; /* Is mac80211 queue stopped ? */
+ struct ath_buf *axq_linkbuf; /* virtual addr of last buffer*/
+
+ /* first desc of the last descriptor that contains CTS */
+ struct ath_desc *axq_lastdsWithCTS;
+
+ /* final desc of the gating desc that determines whether
+ lastdsWithCTS has been DMA'ed or not */
+ struct ath_desc *axq_gatingds;
+
+ struct list_head axq_acq;
+};
+
+/* per TID aggregate tx state for a destination */
+struct ath_atx_tid {
+ struct list_head list; /* round-robin tid entry */
+ struct list_head buf_q; /* pending buffers */
+ struct ath_node *an;
+ struct ath_atx_ac *ac;
+ struct ath_buf *tx_buf[ATH_TID_MAX_BUFS]; /* active tx frames */
+ u16 seq_start;
+ u16 seq_next;
+ u16 baw_size;
+ int tidno;
+ int baw_head; /* first un-acked tx buffer */
+ int baw_tail; /* next unused tx buffer slot */
+ int sched;
+ int paused;
+ int cleanup_inprogress;
+ u32 addba_exchangecomplete:1;
+ int32_t addba_exchangeinprogress;
+ int addba_exchangeattempts;
+};
+
+/* per access-category aggregate tx state for a destination */
+struct ath_atx_ac {
+ int sched; /* dest-ac is scheduled */
+ int qnum; /* H/W queue number associated
+ with this AC */
+ struct list_head list; /* round-robin txq entry */
+ struct list_head tid_q; /* queue of TIDs with buffers */
+};
+
+/* per dest tx state */
+struct ath_atx {
+ struct ath_atx_tid tid[WME_NUM_TID];
+ struct ath_atx_ac ac[WME_NUM_AC];
+};
+
+/* per-frame tx control block */
+struct ath_tx_control {
+ struct ath_node *an;
+ int if_id;
+ int qnum;
+ u32 ht:1;
+ u32 ps:1;
+ u32 use_minrate:1;
+ enum ath9k_pkt_type atype;
+ enum ath9k_key_type keytype;
+ u32 flags;
+ u16 seqno;
+ u16 tidno;
+ u16 txpower;
+ u16 frmlen;
+ u32 keyix;
+ int min_rate;
+ int mcast_rate;
+ u16 nextfraglen;
+ struct ath_softc *dev;
+ dma_addr_t dmacontext;
+};
+
+/* per frame tx status block */
+struct ath_xmit_status {
+ int retries; /* number of retries to successufully
+ transmit this frame */
+ int flags; /* status of transmit */
+#define ATH_TX_ERROR 0x01
+#define ATH_TX_XRETRY 0x02
+#define ATH_TX_BAR 0x04
+};
+
+struct ath_tx_stat {
+ int rssi; /* RSSI (noise floor ajusted) */
+ int rssictl[ATH_MAX_ANTENNA]; /* RSSI (noise floor ajusted) */
+ int rssiextn[ATH_MAX_ANTENNA]; /* RSSI (noise floor ajusted) */
+ int rateieee; /* data rate xmitted (IEEE rate code) */
+ int rateKbps; /* data rate xmitted (Kbps) */
+ int ratecode; /* phy rate code */
+ int flags; /* validity flags */
+/* if any of ctl,extn chain rssis are valid */
+#define ATH_TX_CHAIN_RSSI_VALID 0x01
+/* if extn chain rssis are valid */
+#define ATH_TX_RSSI_EXTN_VALID 0x02
+ u32 airtime; /* time on air per final tx rate */
+};
+
+struct ath_txq *ath_txq_setup(struct ath_softc *sc, int qtype, int subtype);
+void ath_tx_cleanupq(struct ath_softc *sc, struct ath_txq *txq);
+int ath_tx_setup(struct ath_softc *sc, int haltype);
+void ath_draintxq(struct ath_softc *sc, bool retry_tx);
+void ath_tx_draintxq(struct ath_softc *sc,
+ struct ath_txq *txq, bool retry_tx);
+void ath_tx_node_init(struct ath_softc *sc, struct ath_node *an);
+void ath_tx_node_cleanup(struct ath_softc *sc,
+ struct ath_node *an, bool bh_flag);
+void ath_tx_node_free(struct ath_softc *sc, struct ath_node *an);
+void ath_txq_schedule(struct ath_softc *sc, struct ath_txq *txq);
+int ath_tx_init(struct ath_softc *sc, int nbufs);
+int ath_tx_cleanup(struct ath_softc *sc);
+int ath_tx_get_qnum(struct ath_softc *sc, int qtype, int haltype);
+int ath_txq_update(struct ath_softc *sc, int qnum,
+ struct ath9k_tx_queue_info *q);
+int ath_tx_start(struct ath_softc *sc, struct sk_buff *skb);
+void ath_tx_tasklet(struct ath_softc *sc);
+u32 ath_txq_depth(struct ath_softc *sc, int qnum);
+u32 ath_txq_aggr_depth(struct ath_softc *sc, int qnum);
+void ath_notify_txq_status(struct ath_softc *sc, u16 queue_depth);
+void ath_tx_complete(struct ath_softc *sc, struct sk_buff *skb,
+ struct ath_xmit_status *tx_status, struct ath_node *an);
+
+/**********************/
+/* Node / Aggregation */
+/**********************/
+
+/* indicates the node is clened up */
+#define ATH_NODE_CLEAN 0x1
+/* indicates the node is 80211 power save */
+#define ATH_NODE_PWRSAVE 0x2
+
+#define ADDBA_TIMEOUT 200 /* 200 milliseconds */
+#define ADDBA_EXCHANGE_ATTEMPTS 10
+#define ATH_AGGR_DELIM_SZ 4 /* delimiter size */
+#define ATH_AGGR_MINPLEN 256 /* in bytes, minimum packet length */
+/* number of delimiters for encryption padding */
+#define ATH_AGGR_ENCRYPTDELIM 10
+/* minimum h/w qdepth to be sustained to maximize aggregation */
+#define ATH_AGGR_MIN_QDEPTH 2
+#define ATH_AMPDU_SUBFRAME_DEFAULT 32
+#define IEEE80211_SEQ_SEQ_SHIFT 4
+#define IEEE80211_SEQ_MAX 4096
+#define IEEE80211_MIN_AMPDU_BUF 0x8
+
+/* return whether a bit at index _n in bitmap _bm is set
+ * _sz is the size of the bitmap */
+#define ATH_BA_ISSET(_bm, _n) (((_n) < (WME_BA_BMP_SIZE)) && \
+ ((_bm)[(_n) >> 5] & (1 << ((_n) & 31))))
+
+/* return block-ack bitmap index given sequence and starting sequence */
+#define ATH_BA_INDEX(_st, _seq) (((_seq) - (_st)) & (IEEE80211_SEQ_MAX - 1))
+
+/* returns delimiter padding required given the packet length */
+#define ATH_AGGR_GET_NDELIM(_len) \
+ (((((_len) + ATH_AGGR_DELIM_SZ) < ATH_AGGR_MINPLEN) ? \
+ (ATH_AGGR_MINPLEN - (_len) - ATH_AGGR_DELIM_SZ) : 0) >> 2)
+
+#define BAW_WITHIN(_start, _bawsz, _seqno) \
+ ((((_seqno) - (_start)) & 4095) < (_bawsz))
+
+#define ATH_DS_BA_SEQ(_ds) ((_ds)->ds_us.tx.ts_seqnum)
+#define ATH_DS_BA_BITMAP(_ds) (&(_ds)->ds_us.tx.ba_low)
+#define ATH_DS_TX_BA(_ds) ((_ds)->ds_us.tx.ts_flags & ATH9K_TX_BA)
+#define ATH_AN_2_TID(_an, _tidno) (&(_an)->an_aggr.tx.tid[(_tidno)])
+
+enum ATH_AGGR_STATUS {
+ ATH_AGGR_DONE,
+ ATH_AGGR_BAW_CLOSED,
+ ATH_AGGR_LIMITED,
+ ATH_AGGR_SHORTPKT,
+ ATH_AGGR_8K_LIMITED,
+};
+
+enum ATH_AGGR_CHECK {
+ AGGR_NOT_REQUIRED,
+ AGGR_REQUIRED,
+ AGGR_CLEANUP_PROGRESS,
+ AGGR_EXCHANGE_PROGRESS,
+ AGGR_EXCHANGE_DONE
+};
+
+struct aggr_rifs_param {
+ int param_max_frames;
+ int param_max_len;
+ int param_rl;
+ int param_al;
+ struct ath_rc_series *param_rcs;
+};
+
+/* Per-node aggregation state */
+struct ath_node_aggr {
+ struct ath_atx tx; /* node transmit state */
+ struct ath_arx rx; /* node receive state */
+};
+
+/* driver-specific node state */
+struct ath_node {
+ struct list_head list;
+ struct ath_softc *an_sc;
+ atomic_t an_refcnt;
+ struct ath_chainmask_sel an_chainmask_sel;
+ struct ath_node_aggr an_aggr;
+ u8 an_smmode; /* SM Power save mode */
+ u8 an_flags;
+ u8 an_addr[ETH_ALEN];
+};
+
+void ath_tx_resume_tid(struct ath_softc *sc,
+ struct ath_atx_tid *tid);
+enum ATH_AGGR_CHECK ath_tx_aggr_check(struct ath_softc *sc,
+ struct ath_node *an, u8 tidno);
+void ath_tx_aggr_teardown(struct ath_softc *sc,
+ struct ath_node *an, u8 tidno);
+void ath_rx_aggr_teardown(struct ath_softc *sc,
+ struct ath_node *an, u8 tidno);
+int ath_rx_aggr_start(struct ath_softc *sc,
+ const u8 *addr,
+ u16 tid,
+ u16 *ssn);
+int ath_rx_aggr_stop(struct ath_softc *sc,
+ const u8 *addr,
+ u16 tid);
+int ath_tx_aggr_start(struct ath_softc *sc,
+ const u8 *addr,
+ u16 tid,
+ u16 *ssn);
+int ath_tx_aggr_stop(struct ath_softc *sc,
+ const u8 *addr,
+ u16 tid);
+void ath_newassoc(struct ath_softc *sc,
+ struct ath_node *node, int isnew, int isuapsd);
+struct ath_node *ath_node_attach(struct ath_softc *sc,
+ u8 addr[ETH_ALEN], int if_id);
+void ath_node_detach(struct ath_softc *sc, struct ath_node *an, bool bh_flag);
+struct ath_node *ath_node_get(struct ath_softc *sc, u8 addr[ETH_ALEN]);
+void ath_node_put(struct ath_softc *sc, struct ath_node *an, bool bh_flag);
+struct ath_node *ath_node_find(struct ath_softc *sc, u8 *addr);
+
+/*******************/
+/* Beacon Handling */
+/*******************/
+
+/*
+ * Regardless of the number of beacons we stagger, (i.e. regardless of the
+ * number of BSSIDs) if a given beacon does not go out even after waiting this
+ * number of beacon intervals, the game's up.
+ */
+#define BSTUCK_THRESH (9 * ATH_BCBUF)
+#define ATH_BCBUF 4 /* number of beacon buffers */
+#define ATH_DEFAULT_BINTVAL 100 /* default beacon interval in TU */
+#define ATH_DEFAULT_BMISS_LIMIT 10
+#define ATH_BEACON_AIFS_DEFAULT 0 /* Default aifs for ap beacon q */
+#define ATH_BEACON_CWMIN_DEFAULT 0 /* Default cwmin for ap beacon q */
+#define ATH_BEACON_CWMAX_DEFAULT 0 /* Default cwmax for ap beacon q */
+#define IEEE80211_MS_TO_TU(x) (((x) * 1000) / 1024)
+
+/* beacon configuration */
+struct ath_beacon_config {
+ u16 beacon_interval;
+ u16 listen_interval;
+ u16 dtim_period;
+ u16 bmiss_timeout;
+ u8 dtim_count;
+ u8 tim_offset;
+ union {
+ u64 last_tsf;
+ u8 last_tstamp[8];
+ } u; /* last received beacon/probe response timestamp of this BSS. */
+};
+
+/* offsets in a beacon frame for
+ * quick acess of beacon content by low-level driver */
+struct ath_beacon_offset {
+ u8 *bo_tim; /* start of atim/dtim */
+};
+
+void ath9k_beacon_tasklet(unsigned long data);
+void ath_beacon_config(struct ath_softc *sc, int if_id);
+int ath_beaconq_setup(struct ath_hal *ah);
+int ath_beacon_alloc(struct ath_softc *sc, int if_id);
+void ath_bstuck_process(struct ath_softc *sc);
+void ath_beacon_tasklet(struct ath_softc *sc, int *needmark);
+void ath_beacon_free(struct ath_softc *sc);
+void ath_beacon_return(struct ath_softc *sc, struct ath_vap *avp);
+void ath_beacon_sync(struct ath_softc *sc, int if_id);
+void ath_update_beacon_info(struct ath_softc *sc, int avgbrssi);
+void ath_get_beaconconfig(struct ath_softc *sc,
+ int if_id,
+ struct ath_beacon_config *conf);
+int ath_update_beacon(struct ath_softc *sc,
+ int if_id,
+ struct ath_beacon_offset *bo,
+ struct sk_buff *skb,
+ int mcast);
+/********/
+/* VAPs */
+/********/
+
+/*
+ * Define the scheme that we select MAC address for multiple
+ * BSS on the same radio. The very first VAP will just use the MAC
+ * address from the EEPROM. For the next 3 VAPs, we set the
+ * U/L bit (bit 1) in MAC address, and use the next two bits as the
+ * index of the VAP.
+ */
+
+#define ATH_SET_VAP_BSSID_MASK(bssid_mask) \
+ ((bssid_mask)[0] &= ~(((ATH_BCBUF-1)<<2)|0x02))
+
+/* VAP configuration (from protocol layer) */
+struct ath_vap_config {
+ u32 av_fixed_rateset;
+ u32 av_fixed_retryset;
+};
+
+/* driver-specific vap state */
+struct ath_vap {
+ struct ieee80211_vif *av_if_data;
+ enum ath9k_opmode av_opmode; /* VAP operational mode */
+ struct ath_buf *av_bcbuf; /* beacon buffer */
+ struct ath_beacon_offset av_boff; /* dynamic update state */
+ struct ath_tx_control av_btxctl; /* txctl information for beacon */
+ int av_bslot; /* beacon slot index */
+ struct ath_txq av_mcastq; /* multicast transmit queue */
+ struct ath_vap_config av_config;/* vap configuration parameters*/
+ struct ath_rate_node *rc_node;
+};
+
+int ath_vap_attach(struct ath_softc *sc,
+ int if_id,
+ struct ieee80211_vif *if_data,
+ enum ath9k_opmode opmode);
+int ath_vap_detach(struct ath_softc *sc, int if_id);
+int ath_vap_config(struct ath_softc *sc,
+ int if_id, struct ath_vap_config *if_config);
+int ath_vap_listen(struct ath_softc *sc, int if_id);
+
+/*********************/
+/* Antenna diversity */
+/*********************/
+
+#define ATH_ANT_DIV_MAX_CFG 2
+#define ATH_ANT_DIV_MIN_IDLE_US 1000000 /* us */
+#define ATH_ANT_DIV_MIN_SCAN_US 50000 /* us */
+
+enum ATH_ANT_DIV_STATE{
+ ATH_ANT_DIV_IDLE,
+ ATH_ANT_DIV_SCAN, /* evaluating antenna */
+};
+
+struct ath_antdiv {
+ struct ath_softc *antdiv_sc;
+ u8 antdiv_start;
+ enum ATH_ANT_DIV_STATE antdiv_state;
+ u8 antdiv_num_antcfg;
+ u8 antdiv_curcfg;
+ u8 antdiv_bestcfg;
+ int32_t antdivf_rssitrig;
+ int32_t antdiv_lastbrssi[ATH_ANT_DIV_MAX_CFG];
+ u64 antdiv_lastbtsf[ATH_ANT_DIV_MAX_CFG];
+ u64 antdiv_laststatetsf;
+ u8 antdiv_bssid[ETH_ALEN];
+};
+
+void ath_slow_ant_div_init(struct ath_antdiv *antdiv,
+ struct ath_softc *sc, int32_t rssitrig);
+void ath_slow_ant_div_start(struct ath_antdiv *antdiv,
+ u8 num_antcfg,
+ const u8 *bssid);
+void ath_slow_ant_div_stop(struct ath_antdiv *antdiv);
+void ath_slow_ant_div(struct ath_antdiv *antdiv,
+ struct ieee80211_hdr *wh,
+ struct ath_rx_status *rx_stats);
+void ath_setdefantenna(void *sc, u32 antenna);
+
+/********************/
+/* Main driver core */
+/********************/
+
+/*
+ * Default cache line size, in bytes.
+ * Used when PCI device not fully initialized by bootrom/BIOS
+*/
+#define DEFAULT_CACHELINE 32
+#define ATH_DEFAULT_NOISE_FLOOR -95
+#define ATH_REGCLASSIDS_MAX 10
+#define ATH_CABQ_READY_TIME 80 /* % of beacon interval */
+#define ATH_PREAMBLE_SHORT (1<<0)
+#define ATH_PROTECT_ENABLE (1<<1)
+#define ATH_MAX_SW_RETRIES 10
+/* Num farmes difference in tx to flip default recv */
+#define ATH_ANTENNA_DIFF 2
+#define ATH_CHAN_MAX 255
+#define IEEE80211_WEP_NKID 4 /* number of key ids */
+#define IEEE80211_RATE_VAL 0x7f
+/*
+ * The key cache is used for h/w cipher state and also for
+ * tracking station state such as the current tx antenna.
+ * We also setup a mapping table between key cache slot indices
+ * and station state to short-circuit node lookups on rx.
+ * Different parts have different size key caches. We handle
+ * up to ATH_KEYMAX entries (could dynamically allocate state).
+ */
+#define ATH_KEYMAX 128 /* max key cache size we handle */
+
+#define RESET_RETRY_TXQ 0x00000001
+#define ATH_IF_ID_ANY 0xff
+
+#define ATH_TXPOWER_MAX 100 /* .5 dBm units */
+
+#define RSSI_LPF_THRESHOLD -20
+#define ATH_RSSI_EP_MULTIPLIER (1<<7) /* pow2 to optimize out * and / */
+#define ATH_RATE_DUMMY_MARKER 0
+#define ATH_RSSI_LPF_LEN 10
+#define ATH_RSSI_DUMMY_MARKER 0x127
+
+#define ATH_EP_MUL(x, mul) ((x) * (mul))
+#define ATH_EP_RND(x, mul) \
+ ((((x)%(mul)) >= ((mul)/2)) ? ((x) + ((mul) - 1)) / (mul) : (x)/(mul))
+#define ATH_RSSI_OUT(x) \
+ (((x) != ATH_RSSI_DUMMY_MARKER) ? \
+ (ATH_EP_RND((x), ATH_RSSI_EP_MULTIPLIER)) : ATH_RSSI_DUMMY_MARKER)
+#define ATH_RSSI_IN(x) \
+ (ATH_EP_MUL((x), ATH_RSSI_EP_MULTIPLIER))
+#define ATH_LPF_RSSI(x, y, len) \
+ ((x != ATH_RSSI_DUMMY_MARKER) ? \
+ (((x) * ((len) - 1) + (y)) / (len)) : (y))
+#define ATH_RSSI_LPF(x, y) do { \
+ if ((y) >= RSSI_LPF_THRESHOLD) \
+ x = ATH_LPF_RSSI((x), \
+ ATH_RSSI_IN((y)), ATH_RSSI_LPF_LEN); \
+ } while (0)
+
+
+enum PROT_MODE {
+ PROT_M_NONE = 0,
+ PROT_M_RTSCTS,
+ PROT_M_CTSONLY
+};
+
+enum RATE_TYPE {
+ NORMAL_RATE = 0,
+ HALF_RATE,
+ QUARTER_RATE
+};
+
+struct ath_ht_info {
+ enum ath9k_ht_macmode tx_chan_width;
+ u16 maxampdu;
+ u8 mpdudensity;
+ u8 ext_chan_offset;
+};
+
+struct ath_softc {
+ struct ieee80211_hw *hw;
+ struct pci_dev *pdev;
+ void __iomem *mem;
+ struct tasklet_struct intr_tq;
+ struct tasklet_struct bcon_tasklet;
+ struct ath_config sc_config; /* load-time parameters */
+ int sc_debug;
+ struct ath_hal *sc_ah;
+ struct ath_rate_softc *sc_rc; /* tx rate control support */
+ u32 sc_intrstatus;
+ enum ath9k_opmode sc_opmode; /* current operating mode */
+
+ u8 sc_invalid; /* being detached */
+ u8 sc_beacons; /* beacons running */
+ u8 sc_scanning; /* scanning active */
+ u8 sc_txaggr; /* enable 11n tx aggregation */
+ u8 sc_rxaggr; /* enable 11n rx aggregation */
+ u8 sc_update_chainmask; /* change chain mask */
+ u8 sc_full_reset; /* force full reset */
+ enum wireless_mode sc_curmode; /* current phy mode */
+ u16 sc_curtxpow;
+ u16 sc_curaid;
+ u8 sc_curbssid[ETH_ALEN];
+ u8 sc_myaddr[ETH_ALEN];
+ enum PROT_MODE sc_protmode;
+ u8 sc_mcastantenna;
+ u8 sc_txantenna; /* data tx antenna (fixed or auto) */
+ u8 sc_nbcnvaps; /* # of vaps sending beacons */
+ u16 sc_nvaps; /* # of active virtual ap's */
+ struct ath_vap *sc_vaps[ATH_BCBUF];
+ enum ath9k_int sc_imask;
+ u8 sc_bssidmask[ETH_ALEN];
+ u8 sc_defant; /* current default antenna */
+ u8 sc_rxotherant; /* rx's on non-default antenna */
+ u16 sc_cachelsz;
+ int sc_slotupdate; /* slot to next advance fsm */
+ int sc_slottime;
+ u8 sc_noreset;
+ int sc_bslot[ATH_BCBUF];
+ struct ath9k_node_stats sc_halstats; /* station-mode rssi stats */
+ struct list_head node_list;
+ struct ath_ht_info sc_ht_info;
+ int16_t sc_noise_floor; /* signal noise floor in dBm */
+ enum ath9k_ht_extprotspacing sc_ht_extprotspacing;
+ u8 sc_tx_chainmask;
+ u8 sc_rx_chainmask;
+ u8 sc_rxchaindetect_ref;
+ u8 sc_rxchaindetect_thresh5GHz;
+ u8 sc_rxchaindetect_thresh2GHz;
+ u8 sc_rxchaindetect_delta5GHz;
+ u8 sc_rxchaindetect_delta2GHz;
+ u32 sc_rtsaggrlimit; /* Chipset specific aggr limit */
+ u32 sc_flags;
+#ifdef CONFIG_SLOW_ANT_DIV
+ struct ath_antdiv sc_antdiv;
+#endif
+ enum {
+ OK, /* no change needed */
+ UPDATE, /* update pending */
+ COMMIT /* beacon sent, commit change */
+ } sc_updateslot; /* slot time update fsm */
+
+ /* Crypto */
+ u32 sc_keymax; /* size of key cache */
+ DECLARE_BITMAP(sc_keymap, ATH_KEYMAX); /* key use bit map */
+ u8 sc_splitmic; /* split TKIP MIC keys */
+ int sc_keytype;
+
+ /* RX */
+ struct list_head sc_rxbuf;
+ struct ath_descdma sc_rxdma;
+ int sc_rxbufsize; /* rx size based on mtu */
+ u32 *sc_rxlink; /* link ptr in last RX desc */
+ u32 sc_rxflush; /* rx flush in progress */
+ u64 sc_lastrx; /* tsf of last rx'd frame */
+
+ /* TX */
+ struct list_head sc_txbuf;
+ struct ath_txq sc_txq[ATH9K_NUM_TX_QUEUES];
+ struct ath_descdma sc_txdma;
+ u32 sc_txqsetup;
+ u32 sc_txintrperiod; /* tx interrupt batching */
+ int sc_haltype2q[ATH9K_WME_AC_VO+1]; /* HAL WME AC -> h/w qnum */
+ u32 sc_ant_tx[8]; /* recent tx frames/antenna */
+
+ /* Beacon */
+ struct ath9k_tx_queue_info sc_beacon_qi;
+ struct ath_descdma sc_bdma;
+ struct ath_txq *sc_cabq;
+ struct list_head sc_bbuf;
+ u32 sc_bhalq;
+ u32 sc_bmisscount;
+ u32 ast_be_xmit; /* beacons transmitted */
+
+ /* Rate */
+ struct ieee80211_rate rates[IEEE80211_NUM_BANDS][ATH_RATE_MAX];
+ const struct ath9k_rate_table *sc_currates;
+ u8 sc_rixmap[256]; /* IEEE to h/w rate table ix */
+ u8 sc_protrix; /* protection rate index */
+ struct {
+ u32 rateKbps; /* transfer rate in kbs */
+ u8 ieeerate; /* IEEE rate */
+ } sc_hwmap[256]; /* h/w rate ix mappings */
+
+ /* Channel, Band */
+ struct ieee80211_channel channels[IEEE80211_NUM_BANDS][ATH_CHAN_MAX];
+ struct ieee80211_supported_band sbands[IEEE80211_NUM_BANDS];
+ struct ath9k_channel sc_curchan;
+
+ /* Locks */
+ spinlock_t sc_rxflushlock;
+ spinlock_t sc_rxbuflock;
+ spinlock_t sc_txbuflock;
+ spinlock_t sc_resetlock;
+ spinlock_t node_lock;
+};
+
+int ath_init(u16 devid, struct ath_softc *sc);
+void ath_deinit(struct ath_softc *sc);
+int ath_open(struct ath_softc *sc, struct ath9k_channel *initial_chan);
+int ath_suspend(struct ath_softc *sc);
+irqreturn_t ath_isr(int irq, void *dev);
+int ath_reset(struct ath_softc *sc);
+void ath_scan_start(struct ath_softc *sc);
+void ath_scan_end(struct ath_softc *sc);
+int ath_set_channel(struct ath_softc *sc, struct ath9k_channel *hchan);
+void ath_setup_rate(struct ath_softc *sc,
+ enum wireless_mode wMode,
+ enum RATE_TYPE type,
+ const struct ath9k_rate_table *rt);
+
+/*********************/
+/* Utility Functions */
+/*********************/
+
+void ath_key_reset(struct ath_softc *sc, u16 keyix, int freeslot);
+int ath_keyset(struct ath_softc *sc,
+ u16 keyix,
+ struct ath9k_keyval *hk,
+ const u8 mac[ETH_ALEN]);
+int ath_get_hal_qnum(u16 queue, struct ath_softc *sc);
+int ath_get_mac80211_qnum(u32 queue, struct ath_softc *sc);
+void ath_setslottime(struct ath_softc *sc);
+void ath_update_txpow(struct ath_softc *sc);
+int ath_cabq_update(struct ath_softc *);
+void ath_get_currentCountry(struct ath_softc *sc,
+ struct ath9k_country_entry *ctry);
+u64 ath_extend_tsf(struct ath_softc *sc, u32 rstamp);
+void ath_internal_reset(struct ath_softc *sc);
+u32 ath_chan2flags(struct ieee80211_channel *chan, struct ath_softc *sc);
+dma_addr_t ath_skb_map_single(struct ath_softc *sc,
+ struct sk_buff *skb,
+ int direction,
+ dma_addr_t *pa);
+void ath_skb_unmap_single(struct ath_softc *sc,
+ struct sk_buff *skb,
+ int direction,
+ dma_addr_t *pa);
+void ath_mcast_merge(struct ath_softc *sc, u32 mfilt[2]);
+enum ath9k_ht_macmode ath_cwm_macmode(struct ath_softc *sc);
+
+#endif /* CORE_H */
diff --git a/drivers/net/wireless/ath9k/hw.c b/drivers/net/wireless/ath9k/hw.c
new file mode 100644
index 00000000000..bde162f128a
--- /dev/null
+++ b/drivers/net/wireless/ath9k/hw.c
@@ -0,0 +1,8571 @@
+/*
+ * Copyright (c) 2008 Atheros Communications Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <linux/io.h>
+#include <asm/unaligned.h>
+
+#include "core.h"
+#include "hw.h"
+#include "reg.h"
+#include "phy.h"
+#include "initvals.h"
+
+static void ath9k_hw_iqcal_collect(struct ath_hal *ah);
+static void ath9k_hw_iqcalibrate(struct ath_hal *ah, u8 numChains);
+static void ath9k_hw_adc_gaincal_collect(struct ath_hal *ah);
+static void ath9k_hw_adc_gaincal_calibrate(struct ath_hal *ah,
+ u8 numChains);
+static void ath9k_hw_adc_dccal_collect(struct ath_hal *ah);
+static void ath9k_hw_adc_dccal_calibrate(struct ath_hal *ah,
+ u8 numChains);
+
+static const u8 CLOCK_RATE[] = { 40, 80, 22, 44, 88, 40 };
+static const int16_t NOISE_FLOOR[] = { -96, -93, -98, -96, -93, -96 };
+
+static const struct hal_percal_data iq_cal_multi_sample = {
+ IQ_MISMATCH_CAL,
+ MAX_CAL_SAMPLES,
+ PER_MIN_LOG_COUNT,
+ ath9k_hw_iqcal_collect,
+ ath9k_hw_iqcalibrate
+};
+static const struct hal_percal_data iq_cal_single_sample = {
+ IQ_MISMATCH_CAL,
+ MIN_CAL_SAMPLES,
+ PER_MAX_LOG_COUNT,
+ ath9k_hw_iqcal_collect,
+ ath9k_hw_iqcalibrate
+};
+static const struct hal_percal_data adc_gain_cal_multi_sample = {
+ ADC_GAIN_CAL,
+ MAX_CAL_SAMPLES,
+ PER_MIN_LOG_COUNT,
+ ath9k_hw_adc_gaincal_collect,
+ ath9k_hw_adc_gaincal_calibrate
+};
+static const struct hal_percal_data adc_gain_cal_single_sample = {
+ ADC_GAIN_CAL,
+ MIN_CAL_SAMPLES,
+ PER_MAX_LOG_COUNT,
+ ath9k_hw_adc_gaincal_collect,
+ ath9k_hw_adc_gaincal_calibrate
+};
+static const struct hal_percal_data adc_dc_cal_multi_sample = {
+ ADC_DC_CAL,
+ MAX_CAL_SAMPLES,
+ PER_MIN_LOG_COUNT,
+ ath9k_hw_adc_dccal_collect,
+ ath9k_hw_adc_dccal_calibrate
+};
+static const struct hal_percal_data adc_dc_cal_single_sample = {
+ ADC_DC_CAL,
+ MIN_CAL_SAMPLES,
+ PER_MAX_LOG_COUNT,
+ ath9k_hw_adc_dccal_collect,
+ ath9k_hw_adc_dccal_calibrate
+};
+static const struct hal_percal_data adc_init_dc_cal = {
+ ADC_DC_INIT_CAL,
+ MIN_CAL_SAMPLES,
+ INIT_LOG_COUNT,
+ ath9k_hw_adc_dccal_collect,
+ ath9k_hw_adc_dccal_calibrate
+};
+
+static const struct ath_hal ar5416hal = {
+ AR5416_MAGIC,
+ 0,
+ 0,
+ NULL,
+ NULL,
+ CTRY_DEFAULT,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ },
+};
+
+static struct ath9k_rate_table ar5416_11a_table = {
+ 8,
+ {0},
+ {
+ {true, PHY_OFDM, 6000, 0x0b, 0x00, (0x80 | 12), 0},
+ {true, PHY_OFDM, 9000, 0x0f, 0x00, 18, 0},
+ {true, PHY_OFDM, 12000, 0x0a, 0x00, (0x80 | 24), 2},
+ {true, PHY_OFDM, 18000, 0x0e, 0x00, 36, 2},
+ {true, PHY_OFDM, 24000, 0x09, 0x00, (0x80 | 48), 4},
+ {true, PHY_OFDM, 36000, 0x0d, 0x00, 72, 4},
+ {true, PHY_OFDM, 48000, 0x08, 0x00, 96, 4},
+ {true, PHY_OFDM, 54000, 0x0c, 0x00, 108, 4}
+ },
+};
+
+static struct ath9k_rate_table ar5416_11b_table = {
+ 4,
+ {0},
+ {
+ {true, PHY_CCK, 1000, 0x1b, 0x00, (0x80 | 2), 0},
+ {true, PHY_CCK, 2000, 0x1a, 0x04, (0x80 | 4), 1},
+ {true, PHY_CCK, 5500, 0x19, 0x04, (0x80 | 11), 1},
+ {true, PHY_CCK, 11000, 0x18, 0x04, (0x80 | 22), 1}
+ },
+};
+
+static struct ath9k_rate_table ar5416_11g_table = {
+ 12,
+ {0},
+ {
+ {true, PHY_CCK, 1000, 0x1b, 0x00, (0x80 | 2), 0},
+ {true, PHY_CCK, 2000, 0x1a, 0x04, (0x80 | 4), 1},
+ {true, PHY_CCK, 5500, 0x19, 0x04, (0x80 | 11), 2},
+ {true, PHY_CCK, 11000, 0x18, 0x04, (0x80 | 22), 3},
+
+ {false, PHY_OFDM, 6000, 0x0b, 0x00, 12, 4},
+ {false, PHY_OFDM, 9000, 0x0f, 0x00, 18, 4},
+ {true, PHY_OFDM, 12000, 0x0a, 0x00, 24, 6},
+ {true, PHY_OFDM, 18000, 0x0e, 0x00, 36, 6},
+ {true, PHY_OFDM, 24000, 0x09, 0x00, 48, 8},
+ {true, PHY_OFDM, 36000, 0x0d, 0x00, 72, 8},
+ {true, PHY_OFDM, 48000, 0x08, 0x00, 96, 8},
+ {true, PHY_OFDM, 54000, 0x0c, 0x00, 108, 8}
+ },
+};
+
+static struct ath9k_rate_table ar5416_11ng_table = {
+ 28,
+ {0},
+ {
+ {true, PHY_CCK, 1000, 0x1b, 0x00, (0x80 | 2), 0},
+ {true, PHY_CCK, 2000, 0x1a, 0x04, (0x80 | 4), 1},
+ {true, PHY_CCK, 5500, 0x19, 0x04, (0x80 | 11), 2},
+ {true, PHY_CCK, 11000, 0x18, 0x04, (0x80 | 22), 3},
+
+ {false, PHY_OFDM, 6000, 0x0b, 0x00, 12, 4},
+ {false, PHY_OFDM, 9000, 0x0f, 0x00, 18, 4},
+ {true, PHY_OFDM, 12000, 0x0a, 0x00, 24, 6},
+ {true, PHY_OFDM, 18000, 0x0e, 0x00, 36, 6},
+ {true, PHY_OFDM, 24000, 0x09, 0x00, 48, 8},
+ {true, PHY_OFDM, 36000, 0x0d, 0x00, 72, 8},
+ {true, PHY_OFDM, 48000, 0x08, 0x00, 96, 8},
+ {true, PHY_OFDM, 54000, 0x0c, 0x00, 108, 8},
+ {true, PHY_HT, 6500, 0x80, 0x00, 0, 4},
+ {true, PHY_HT, 13000, 0x81, 0x00, 1, 6},
+ {true, PHY_HT, 19500, 0x82, 0x00, 2, 6},
+ {true, PHY_HT, 26000, 0x83, 0x00, 3, 8},
+ {true, PHY_HT, 39000, 0x84, 0x00, 4, 8},
+ {true, PHY_HT, 52000, 0x85, 0x00, 5, 8},
+ {true, PHY_HT, 58500, 0x86, 0x00, 6, 8},
+ {true, PHY_HT, 65000, 0x87, 0x00, 7, 8},
+ {true, PHY_HT, 13000, 0x88, 0x00, 8, 4},
+ {true, PHY_HT, 26000, 0x89, 0x00, 9, 6},
+ {true, PHY_HT, 39000, 0x8a, 0x00, 10, 6},
+ {true, PHY_HT, 52000, 0x8b, 0x00, 11, 8},
+ {true, PHY_HT, 78000, 0x8c, 0x00, 12, 8},
+ {true, PHY_HT, 104000, 0x8d, 0x00, 13, 8},
+ {true, PHY_HT, 117000, 0x8e, 0x00, 14, 8},
+ {true, PHY_HT, 130000, 0x8f, 0x00, 15, 8},
+ },
+};
+
+static struct ath9k_rate_table ar5416_11na_table = {
+ 24,
+ {0},
+ {
+ {true, PHY_OFDM, 6000, 0x0b, 0x00, (0x80 | 12), 0},
+ {true, PHY_OFDM, 9000, 0x0f, 0x00, 18, 0},
+ {true, PHY_OFDM, 12000, 0x0a, 0x00, (0x80 | 24), 2},
+ {true, PHY_OFDM, 18000, 0x0e, 0x00, 36, 2},
+ {true, PHY_OFDM, 24000, 0x09, 0x00, (0x80 | 48), 4},
+ {true, PHY_OFDM, 36000, 0x0d, 0x00, 72, 4},
+ {true, PHY_OFDM, 48000, 0x08, 0x00, 96, 4},
+ {true, PHY_OFDM, 54000, 0x0c, 0x00, 108, 4},
+ {true, PHY_HT, 6500, 0x80, 0x00, 0, 0},
+ {true, PHY_HT, 13000, 0x81, 0x00, 1, 2},
+ {true, PHY_HT, 19500, 0x82, 0x00, 2, 2},
+ {true, PHY_HT, 26000, 0x83, 0x00, 3, 4},
+ {true, PHY_HT, 39000, 0x84, 0x00, 4, 4},
+ {true, PHY_HT, 52000, 0x85, 0x00, 5, 4},
+ {true, PHY_HT, 58500, 0x86, 0x00, 6, 4},
+ {true, PHY_HT, 65000, 0x87, 0x00, 7, 4},
+ {true, PHY_HT, 13000, 0x88, 0x00, 8, 0},
+ {true, PHY_HT, 26000, 0x89, 0x00, 9, 2},
+ {true, PHY_HT, 39000, 0x8a, 0x00, 10, 2},
+ {true, PHY_HT, 52000, 0x8b, 0x00, 11, 4},
+ {true, PHY_HT, 78000, 0x8c, 0x00, 12, 4},
+ {true, PHY_HT, 104000, 0x8d, 0x00, 13, 4},
+ {true, PHY_HT, 117000, 0x8e, 0x00, 14, 4},
+ {true, PHY_HT, 130000, 0x8f, 0x00, 15, 4},
+ },
+};
+
+static enum wireless_mode ath9k_hw_chan2wmode(struct ath_hal *ah,
+ const struct ath9k_channel *chan)
+{
+ if (IS_CHAN_CCK(chan))
+ return ATH9K_MODE_11A;
+ if (IS_CHAN_G(chan))
+ return ATH9K_MODE_11G;
+ return ATH9K_MODE_11A;
+}
+
+static bool ath9k_hw_wait(struct ath_hal *ah,
+ u32 reg,
+ u32 mask,
+ u32 val)
+{
+ int i;
+
+ for (i = 0; i < (AH_TIMEOUT / AH_TIME_QUANTUM); i++) {
+ if ((REG_READ(ah, reg) & mask) == val)
+ return true;
+
+ udelay(AH_TIME_QUANTUM);
+ }
+ DPRINTF(ah->ah_sc, ATH_DBG_PHY_IO,
+ "%s: timeout on reg 0x%x: 0x%08x & 0x%08x != 0x%08x\n",
+ __func__, reg, REG_READ(ah, reg), mask, val);
+ return false;
+}
+
+static bool ath9k_hw_eeprom_read(struct ath_hal *ah, u32 off,
+ u16 *data)
+{
+ (void) REG_READ(ah, AR5416_EEPROM_OFFSET + (off << AR5416_EEPROM_S));
+
+ if (!ath9k_hw_wait(ah,
+ AR_EEPROM_STATUS_DATA,
+ AR_EEPROM_STATUS_DATA_BUSY |
+ AR_EEPROM_STATUS_DATA_PROT_ACCESS, 0)) {
+ return false;
+ }
+
+ *data = MS(REG_READ(ah, AR_EEPROM_STATUS_DATA),
+ AR_EEPROM_STATUS_DATA_VAL);
+
+ return true;
+}
+
+static int ath9k_hw_flash_map(struct ath_hal *ah)
+{
+ struct ath_hal_5416 *ahp = AH5416(ah);
+
+ ahp->ah_cal_mem = ioremap(AR5416_EEPROM_START_ADDR, AR5416_EEPROM_MAX);
+
+ if (!ahp->ah_cal_mem) {
+ DPRINTF(ah->ah_sc, ATH_DBG_EEPROM,
+ "%s: cannot remap eeprom region \n", __func__);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static bool ath9k_hw_flash_read(struct ath_hal *ah, u32 off,
+ u16 *data)
+{
+ struct ath_hal_5416 *ahp = AH5416(ah);
+
+ *data = ioread16(ahp->ah_cal_mem + off);
+ return true;
+}
+
+static void ath9k_hw_read_revisions(struct ath_hal *ah)
+{
+ u32 val;
+
+ val = REG_READ(ah, AR_SREV) & AR_SREV_ID;
+
+ if (val == 0xFF) {
+ val = REG_READ(ah, AR_SREV);
+
+ ah->ah_macVersion =
+ (val & AR_SREV_VERSION2) >> AR_SREV_TYPE2_S;
+
+ ah->ah_macRev = MS(val, AR_SREV_REVISION2);
+ ah->ah_isPciExpress =
+ (val & AR_SREV_TYPE2_HOST_MODE) ? 0 : 1;
+
+ } else {
+ if (!AR_SREV_9100(ah))
+ ah->ah_macVersion = MS(val, AR_SREV_VERSION);
+
+ ah->ah_macRev = val & AR_SREV_REVISION;
+
+ if (ah->ah_macVersion == AR_SREV_VERSION_5416_PCIE)
+ ah->ah_isPciExpress = true;
+ }
+}
+
+u32 ath9k_hw_reverse_bits(u32 val, u32 n)
+{
+ u32 retval;
+ int i;
+
+ for (i = 0, retval = 0; i < n; i++) {
+ retval = (retval << 1) | (val & 1);
+ val >>= 1;
+ }
+ return retval;
+}
+
+static void ath9k_hw_set_defaults(struct ath_hal *ah)
+{
+ int i;
+
+ ah->ah_config.dma_beacon_response_time = 2;
+ ah->ah_config.sw_beacon_response_time = 10;
+ ah->ah_config.additional_swba_backoff = 0;
+ ah->ah_config.ack_6mb = 0x0;
+ ah->ah_config.cwm_ignore_extcca = 0;
+ ah->ah_config.pcie_powersave_enable = 0;
+ ah->ah_config.pcie_l1skp_enable = 0;
+ ah->ah_config.pcie_clock_req = 0;
+ ah->ah_config.pcie_power_reset = 0x100;
+ ah->ah_config.pcie_restore = 0;
+ ah->ah_config.pcie_waen = 0;
+ ah->ah_config.analog_shiftreg = 1;
+ ah->ah_config.ht_enable = 1;
+ ah->ah_config.ofdm_trig_low = 200;
+ ah->ah_config.ofdm_trig_high = 500;
+ ah->ah_config.cck_trig_high = 200;
+ ah->ah_config.cck_trig_low = 100;
+ ah->ah_config.enable_ani = 0;
+ ah->ah_config.noise_immunity_level = 4;
+ ah->ah_config.ofdm_weaksignal_det = 1;
+ ah->ah_config.cck_weaksignal_thr = 0;
+ ah->ah_config.spur_immunity_level = 2;
+ ah->ah_config.firstep_level = 0;
+ ah->ah_config.rssi_thr_high = 40;
+ ah->ah_config.rssi_thr_low = 7;
+ ah->ah_config.diversity_control = 0;
+ ah->ah_config.antenna_switch_swap = 0;
+
+ for (i = 0; i < AR_EEPROM_MODAL_SPURS; i++) {
+ ah->ah_config.spurchans[i][0] = AR_NO_SPUR;
+ ah->ah_config.spurchans[i][1] = AR_NO_SPUR;
+ }
+
+ ah->ah_config.intr_mitigation = 0;
+}
+
+static inline void ath9k_hw_override_ini(struct ath_hal *ah,
+ struct ath9k_channel *chan)
+{
+ if (!AR_SREV_5416_V20_OR_LATER(ah)
+ || AR_SREV_9280_10_OR_LATER(ah))
+ return;
+
+ REG_WRITE(ah, 0x9800 + (651 << 2), 0x11);
+}
+
+static inline void ath9k_hw_init_bb(struct ath_hal *ah,
+ struct ath9k_channel *chan)
+{
+ u32 synthDelay;
+
+ synthDelay = REG_READ(ah, AR_PHY_RX_DELAY) & AR_PHY_RX_DELAY_DELAY;
+ if (IS_CHAN_CCK(chan))
+ synthDelay = (4 * synthDelay) / 22;
+ else
+ synthDelay /= 10;
+
+ REG_WRITE(ah, AR_PHY_ACTIVE, AR_PHY_ACTIVE_EN);
+
+ udelay(synthDelay + BASE_ACTIVATE_DELAY);
+}
+
+static inline void ath9k_hw_init_interrupt_masks(struct ath_hal *ah,
+ enum ath9k_opmode opmode)
+{
+ struct ath_hal_5416 *ahp = AH5416(ah);
+
+ ahp->ah_maskReg = AR_IMR_TXERR |
+ AR_IMR_TXURN |
+ AR_IMR_RXERR |
+ AR_IMR_RXORN |
+ AR_IMR_BCNMISC;
+
+ if (ahp->ah_intrMitigation)
+ ahp->ah_maskReg |= AR_IMR_RXINTM | AR_IMR_RXMINTR;
+ else
+ ahp->ah_maskReg |= AR_IMR_RXOK;
+
+ ahp->ah_maskReg |= AR_IMR_TXOK;
+
+ if (opmode == ATH9K_M_HOSTAP)
+ ahp->ah_maskReg |= AR_IMR_MIB;
+
+ REG_WRITE(ah, AR_IMR, ahp->ah_maskReg);
+ REG_WRITE(ah, AR_IMR_S2, REG_READ(ah, AR_IMR_S2) | AR_IMR_S2_GTT);
+
+ if (!AR_SREV_9100(ah)) {
+ REG_WRITE(ah, AR_INTR_SYNC_CAUSE, 0xFFFFFFFF);
+ REG_WRITE(ah, AR_INTR_SYNC_ENABLE, AR_INTR_SYNC_DEFAULT);
+ REG_WRITE(ah, AR_INTR_SYNC_MASK, 0);
+ }
+}
+
+static inline void ath9k_hw_init_qos(struct ath_hal *ah)
+{
+ REG_WRITE(ah, AR_MIC_QOS_CONTROL, 0x100aa);
+ REG_WRITE(ah, AR_MIC_QOS_SELECT, 0x3210);
+
+ REG_WRITE(ah, AR_QOS_NO_ACK,
+ SM(2, AR_QOS_NO_ACK_TWO_BIT) |
+ SM(5, AR_QOS_NO_ACK_BIT_OFF) |
+ SM(0, AR_QOS_NO_ACK_BYTE_OFF));
+
+ REG_WRITE(ah, AR_TXOP_X, AR_TXOP_X_VAL);
+ REG_WRITE(ah, AR_TXOP_0_3, 0xFFFFFFFF);
+ REG_WRITE(ah, AR_TXOP_4_7, 0xFFFFFFFF);
+ REG_WRITE(ah, AR_TXOP_8_11, 0xFFFFFFFF);
+ REG_WRITE(ah, AR_TXOP_12_15, 0xFFFFFFFF);
+}
+
+static void ath9k_hw_analog_shift_rmw(struct ath_hal *ah,
+ u32 reg,
+ u32 mask,
+ u32 shift,
+ u32 val)
+{
+ u32 regVal;
+
+ regVal = REG_READ(ah, reg) & ~mask;
+ regVal |= (val << shift) & mask;
+
+ REG_WRITE(ah, reg, regVal);
+
+ if (ah->ah_config.analog_shiftreg)
+ udelay(100);
+
+ return;
+}
+
+static u8 ath9k_hw_get_num_ant_config(struct ath_hal_5416 *ahp,
+ enum ieee80211_band freq_band)
+{
+ struct ar5416_eeprom *eep = &ahp->ah_eeprom;
+ struct modal_eep_header *pModal =
+ &(eep->modalHeader[IEEE80211_BAND_5GHZ == freq_band]);
+ struct base_eep_header *pBase = &eep->baseEepHeader;
+ u8 num_ant_config;
+
+ num_ant_config = 1;
+
+ if (pBase->version >= 0x0E0D)
+ if (pModal->useAnt1)
+ num_ant_config += 1;
+
+ return num_ant_config;
+}
+
+static int
+ath9k_hw_get_eeprom_antenna_cfg(struct ath_hal_5416 *ahp,
+ struct ath9k_channel *chan,
+ u8 index,
+ u16 *config)
+{
+ struct ar5416_eeprom *eep = &ahp->ah_eeprom;
+ struct modal_eep_header *pModal =
+ &(eep->modalHeader[IS_CHAN_2GHZ(chan)]);
+ struct base_eep_header *pBase = &eep->baseEepHeader;
+
+ switch (index) {
+ case 0:
+ *config = pModal->antCtrlCommon & 0xFFFF;
+ return 0;
+ case 1:
+ if (pBase->version >= 0x0E0D) {
+ if (pModal->useAnt1) {
+ *config =
+ ((pModal->antCtrlCommon & 0xFFFF0000) >> 16);
+ return 0;
+ }
+ }
+ break;
+ default:
+ break;
+ }
+
+ return -EINVAL;
+}
+
+static inline bool ath9k_hw_nvram_read(struct ath_hal *ah,
+ u32 off,
+ u16 *data)
+{
+ if (ath9k_hw_use_flash(ah))
+ return ath9k_hw_flash_read(ah, off, data);
+ else
+ return ath9k_hw_eeprom_read(ah, off, data);
+}
+
+static inline bool ath9k_hw_fill_eeprom(struct ath_hal *ah)
+{
+ struct ath_hal_5416 *ahp = AH5416(ah);
+ struct ar5416_eeprom *eep = &ahp->ah_eeprom;
+ u16 *eep_data;
+ int addr, ar5416_eep_start_loc = 0;
+
+ if (!ath9k_hw_use_flash(ah)) {
+ DPRINTF(ah->ah_sc, ATH_DBG_EEPROM,
+ "%s: Reading from EEPROM, not flash\n", __func__);
+ ar5416_eep_start_loc = 256;
+ }
+ if (AR_SREV_9100(ah))
+ ar5416_eep_start_loc = 256;
+
+ eep_data = (u16 *) eep;
+ for (addr = 0;
+ addr < sizeof(struct ar5416_eeprom) / sizeof(u16);
+ addr++) {
+ if (!ath9k_hw_nvram_read(ah, addr + ar5416_eep_start_loc,
+ eep_data)) {
+ DPRINTF(ah->ah_sc, ATH_DBG_EEPROM,
+ "%s: Unable to read eeprom region \n",
+ __func__);
+ return false;
+ }
+ eep_data++;
+ }
+ return true;
+}
+
+/* XXX: Clean me up, make me more legible */
+static bool
+ath9k_hw_eeprom_set_board_values(struct ath_hal *ah,
+ struct ath9k_channel *chan)
+{
+ struct modal_eep_header *pModal;
+ int i, regChainOffset;
+ struct ath_hal_5416 *ahp = AH5416(ah);
+ struct ar5416_eeprom *eep = &ahp->ah_eeprom;
+ u8 txRxAttenLocal;
+ u16 ant_config;
+
+ pModal = &(eep->modalHeader[IS_CHAN_2GHZ(chan)]);
+
+ txRxAttenLocal = IS_CHAN_2GHZ(chan) ? 23 : 44;
+
+ ath9k_hw_get_eeprom_antenna_cfg(ahp, chan, 1, &ant_config);
+ REG_WRITE(ah, AR_PHY_SWITCH_COM, ant_config);
+
+ for (i = 0; i < AR5416_MAX_CHAINS; i++) {
+ if (AR_SREV_9280(ah)) {
+ if (i >= 2)
+ break;
+ }
+
+ if (AR_SREV_5416_V20_OR_LATER(ah) &&
+ (ahp->ah_rxchainmask == 5 || ahp->ah_txchainmask == 5)
+ && (i != 0))
+ regChainOffset = (i == 1) ? 0x2000 : 0x1000;
+ else
+ regChainOffset = i * 0x1000;
+
+ REG_WRITE(ah, AR_PHY_SWITCH_CHAIN_0 + regChainOffset,
+ pModal->antCtrlChain[i]);
+
+ REG_WRITE(ah, AR_PHY_TIMING_CTRL4(0) + regChainOffset,
+ (REG_READ(ah,
+ AR_PHY_TIMING_CTRL4(0) +
+ regChainOffset) &
+ ~(AR_PHY_TIMING_CTRL4_IQCORR_Q_Q_COFF |
+ AR_PHY_TIMING_CTRL4_IQCORR_Q_I_COFF)) |
+ SM(pModal->iqCalICh[i],
+ AR_PHY_TIMING_CTRL4_IQCORR_Q_I_COFF) |
+ SM(pModal->iqCalQCh[i],
+ AR_PHY_TIMING_CTRL4_IQCORR_Q_Q_COFF));
+
+ if ((i == 0) || AR_SREV_5416_V20_OR_LATER(ah)) {
+ if ((eep->baseEepHeader.version &
+ AR5416_EEP_VER_MINOR_MASK) >=
+ AR5416_EEP_MINOR_VER_3) {
+ txRxAttenLocal = pModal->txRxAttenCh[i];
+ if (AR_SREV_9280_10_OR_LATER(ah)) {
+ REG_RMW_FIELD(ah,
+ AR_PHY_GAIN_2GHZ +
+ regChainOffset,
+ AR_PHY_GAIN_2GHZ_XATTEN1_MARGIN,
+ pModal->
+ bswMargin[i]);
+ REG_RMW_FIELD(ah,
+ AR_PHY_GAIN_2GHZ +
+ regChainOffset,
+ AR_PHY_GAIN_2GHZ_XATTEN1_DB,
+ pModal->
+ bswAtten[i]);
+ REG_RMW_FIELD(ah,
+ AR_PHY_GAIN_2GHZ +
+ regChainOffset,
+ AR_PHY_GAIN_2GHZ_XATTEN2_MARGIN,
+ pModal->
+ xatten2Margin[i]);
+ REG_RMW_FIELD(ah,
+ AR_PHY_GAIN_2GHZ +
+ regChainOffset,
+ AR_PHY_GAIN_2GHZ_XATTEN2_DB,
+ pModal->
+ xatten2Db[i]);
+ } else {
+ REG_WRITE(ah,
+ AR_PHY_GAIN_2GHZ +
+ regChainOffset,
+ (REG_READ(ah,
+ AR_PHY_GAIN_2GHZ +
+ regChainOffset) &
+ ~AR_PHY_GAIN_2GHZ_BSW_MARGIN)
+ | SM(pModal->
+ bswMargin[i],
+ AR_PHY_GAIN_2GHZ_BSW_MARGIN));
+ REG_WRITE(ah,
+ AR_PHY_GAIN_2GHZ +
+ regChainOffset,
+ (REG_READ(ah,
+ AR_PHY_GAIN_2GHZ +
+ regChainOffset) &
+ ~AR_PHY_GAIN_2GHZ_BSW_ATTEN)
+ | SM(pModal->bswAtten[i],
+ AR_PHY_GAIN_2GHZ_BSW_ATTEN));
+ }
+ }
+ if (AR_SREV_9280_10_OR_LATER(ah)) {
+ REG_RMW_FIELD(ah,
+ AR_PHY_RXGAIN +
+ regChainOffset,
+ AR9280_PHY_RXGAIN_TXRX_ATTEN,
+ txRxAttenLocal);
+ REG_RMW_FIELD(ah,
+ AR_PHY_RXGAIN +
+ regChainOffset,
+ AR9280_PHY_RXGAIN_TXRX_MARGIN,
+ pModal->rxTxMarginCh[i]);
+ } else {
+ REG_WRITE(ah,
+ AR_PHY_RXGAIN + regChainOffset,
+ (REG_READ(ah,
+ AR_PHY_RXGAIN +
+ regChainOffset) &
+ ~AR_PHY_RXGAIN_TXRX_ATTEN) |
+ SM(txRxAttenLocal,
+ AR_PHY_RXGAIN_TXRX_ATTEN));
+ REG_WRITE(ah,
+ AR_PHY_GAIN_2GHZ +
+ regChainOffset,
+ (REG_READ(ah,
+ AR_PHY_GAIN_2GHZ +
+ regChainOffset) &
+ ~AR_PHY_GAIN_2GHZ_RXTX_MARGIN) |
+ SM(pModal->rxTxMarginCh[i],
+ AR_PHY_GAIN_2GHZ_RXTX_MARGIN));
+ }
+ }
+ }
+
+ if (AR_SREV_9280_10_OR_LATER(ah)) {
+ if (IS_CHAN_2GHZ(chan)) {
+ ath9k_hw_analog_shift_rmw(ah, AR_AN_RF2G1_CH0,
+ AR_AN_RF2G1_CH0_OB,
+ AR_AN_RF2G1_CH0_OB_S,
+ pModal->ob);
+ ath9k_hw_analog_shift_rmw(ah, AR_AN_RF2G1_CH0,
+ AR_AN_RF2G1_CH0_DB,
+ AR_AN_RF2G1_CH0_DB_S,
+ pModal->db);
+ ath9k_hw_analog_shift_rmw(ah, AR_AN_RF2G1_CH1,
+ AR_AN_RF2G1_CH1_OB,
+ AR_AN_RF2G1_CH1_OB_S,
+ pModal->ob_ch1);
+ ath9k_hw_analog_shift_rmw(ah, AR_AN_RF2G1_CH1,
+ AR_AN_RF2G1_CH1_DB,
+ AR_AN_RF2G1_CH1_DB_S,
+ pModal->db_ch1);
+ } else {
+ ath9k_hw_analog_shift_rmw(ah, AR_AN_RF5G1_CH0,
+ AR_AN_RF5G1_CH0_OB5,
+ AR_AN_RF5G1_CH0_OB5_S,
+ pModal->ob);
+ ath9k_hw_analog_shift_rmw(ah, AR_AN_RF5G1_CH0,
+ AR_AN_RF5G1_CH0_DB5,
+ AR_AN_RF5G1_CH0_DB5_S,
+ pModal->db);
+ ath9k_hw_analog_shift_rmw(ah, AR_AN_RF5G1_CH1,
+ AR_AN_RF5G1_CH1_OB5,
+ AR_AN_RF5G1_CH1_OB5_S,
+ pModal->ob_ch1);
+ ath9k_hw_analog_shift_rmw(ah, AR_AN_RF5G1_CH1,
+ AR_AN_RF5G1_CH1_DB5,
+ AR_AN_RF5G1_CH1_DB5_S,
+ pModal->db_ch1);
+ }
+ ath9k_hw_analog_shift_rmw(ah, AR_AN_TOP2,
+ AR_AN_TOP2_XPABIAS_LVL,
+ AR_AN_TOP2_XPABIAS_LVL_S,
+ pModal->xpaBiasLvl);
+ ath9k_hw_analog_shift_rmw(ah, AR_AN_TOP2,
+ AR_AN_TOP2_LOCALBIAS,
+ AR_AN_TOP2_LOCALBIAS_S,
+ pModal->local_bias);
+ DPRINTF(ah->ah_sc, ATH_DBG_ANY, "ForceXPAon: %d\n",
+ pModal->force_xpaon);
+ REG_RMW_FIELD(ah, AR_PHY_XPA_CFG, AR_PHY_FORCE_XPA_CFG,
+ pModal->force_xpaon);
+ }
+
+ REG_RMW_FIELD(ah, AR_PHY_SETTLING, AR_PHY_SETTLING_SWITCH,
+ pModal->switchSettling);
+ REG_RMW_FIELD(ah, AR_PHY_DESIRED_SZ, AR_PHY_DESIRED_SZ_ADC,
+ pModal->adcDesiredSize);
+
+ if (!AR_SREV_9280_10_OR_LATER(ah))
+ REG_RMW_FIELD(ah, AR_PHY_DESIRED_SZ,
+ AR_PHY_DESIRED_SZ_PGA,
+ pModal->pgaDesiredSize);
+
+ REG_WRITE(ah, AR_PHY_RF_CTL4,
+ SM(pModal->txEndToXpaOff, AR_PHY_RF_CTL4_TX_END_XPAA_OFF)
+ | SM(pModal->txEndToXpaOff,
+ AR_PHY_RF_CTL4_TX_END_XPAB_OFF)
+ | SM(pModal->txFrameToXpaOn,
+ AR_PHY_RF_CTL4_FRAME_XPAA_ON)
+ | SM(pModal->txFrameToXpaOn,
+ AR_PHY_RF_CTL4_FRAME_XPAB_ON));
+
+ REG_RMW_FIELD(ah, AR_PHY_RF_CTL3, AR_PHY_TX_END_TO_A2_RX_ON,
+ pModal->txEndToRxOn);
+ if (AR_SREV_9280_10_OR_LATER(ah)) {
+ REG_RMW_FIELD(ah, AR_PHY_CCA, AR9280_PHY_CCA_THRESH62,
+ pModal->thresh62);
+ REG_RMW_FIELD(ah, AR_PHY_EXT_CCA0,
+ AR_PHY_EXT_CCA0_THRESH62,
+ pModal->thresh62);
+ } else {
+ REG_RMW_FIELD(ah, AR_PHY_CCA, AR_PHY_CCA_THRESH62,
+ pModal->thresh62);
+ REG_RMW_FIELD(ah, AR_PHY_EXT_CCA,
+ AR_PHY_EXT_CCA_THRESH62,
+ pModal->thresh62);
+ }
+
+ if ((eep->baseEepHeader.version & AR5416_EEP_VER_MINOR_MASK) >=
+ AR5416_EEP_MINOR_VER_2) {
+ REG_RMW_FIELD(ah, AR_PHY_RF_CTL2,
+ AR_PHY_TX_END_DATA_START,
+ pModal->txFrameToDataStart);
+ REG_RMW_FIELD(ah, AR_PHY_RF_CTL2, AR_PHY_TX_END_PA_ON,
+ pModal->txFrameToPaOn);
+ }
+
+ if ((eep->baseEepHeader.version & AR5416_EEP_VER_MINOR_MASK) >=
+ AR5416_EEP_MINOR_VER_3) {
+ if (IS_CHAN_HT40(chan))
+ REG_RMW_FIELD(ah, AR_PHY_SETTLING,
+ AR_PHY_SETTLING_SWITCH,
+ pModal->swSettleHt40);
+ }
+
+ return true;
+}
+
+static inline int ath9k_hw_check_eeprom(struct ath_hal *ah)
+{
+ u32 sum = 0, el;
+ u16 *eepdata;
+ int i;
+ struct ath_hal_5416 *ahp = AH5416(ah);
+ bool need_swap = false;
+ struct ar5416_eeprom *eep =
+ (struct ar5416_eeprom *) &ahp->ah_eeprom;
+
+ if (!ath9k_hw_use_flash(ah)) {
+ u16 magic, magic2;
+ int addr;
+
+ if (!ath9k_hw_nvram_read(ah, AR5416_EEPROM_MAGIC_OFFSET,
+ &magic)) {
+ DPRINTF(ah->ah_sc, ATH_DBG_EEPROM,
+ "%s: Reading Magic # failed\n", __func__);
+ return false;
+ }
+ DPRINTF(ah->ah_sc, ATH_DBG_EEPROM, "%s: Read Magic = 0x%04X\n",
+ __func__, magic);
+
+ if (magic != AR5416_EEPROM_MAGIC) {
+ magic2 = swab16(magic);
+
+ if (magic2 == AR5416_EEPROM_MAGIC) {
+ need_swap = true;
+ eepdata = (u16 *) (&ahp->ah_eeprom);
+
+ for (addr = 0;
+ addr <
+ sizeof(struct ar5416_eeprom) /
+ sizeof(u16); addr++) {
+ u16 temp;
+
+ temp = swab16(*eepdata);
+ *eepdata = temp;
+ eepdata++;
+
+ DPRINTF(ah->ah_sc, ATH_DBG_EEPROM,
+ "0x%04X ", *eepdata);
+ if (((addr + 1) % 6) == 0)
+ DPRINTF(ah->ah_sc,
+ ATH_DBG_EEPROM,
+ "\n");
+ }
+ } else {
+ DPRINTF(ah->ah_sc, ATH_DBG_EEPROM,
+ "Invalid EEPROM Magic. "
+ "endianness missmatch.\n");
+ return -EINVAL;
+ }
+ }
+ }
+ DPRINTF(ah->ah_sc, ATH_DBG_EEPROM, "need_swap = %s.\n",
+ need_swap ? "True" : "False");
+
+ if (need_swap)
+ el = swab16(ahp->ah_eeprom.baseEepHeader.length);
+ else
+ el = ahp->ah_eeprom.baseEepHeader.length;
+
+ if (el > sizeof(struct ar5416_eeprom))
+ el = sizeof(struct ar5416_eeprom) / sizeof(u16);
+ else
+ el = el / sizeof(u16);
+
+ eepdata = (u16 *) (&ahp->ah_eeprom);
+
+ for (i = 0; i < el; i++)
+ sum ^= *eepdata++;
+
+ if (need_swap) {
+ u32 integer, j;
+ u16 word;
+
+ DPRINTF(ah->ah_sc, ATH_DBG_EEPROM,
+ "EEPROM Endianness is not native.. Changing \n");
+
+ word = swab16(eep->baseEepHeader.length);
+ eep->baseEepHeader.length = word;
+
+ word = swab16(eep->baseEepHeader.checksum);
+ eep->baseEepHeader.checksum = word;
+
+ word = swab16(eep->baseEepHeader.version);
+ eep->baseEepHeader.version = word;
+
+ word = swab16(eep->baseEepHeader.regDmn[0]);
+ eep->baseEepHeader.regDmn[0] = word;
+
+ word = swab16(eep->baseEepHeader.regDmn[1]);
+ eep->baseEepHeader.regDmn[1] = word;
+
+ word = swab16(eep->baseEepHeader.rfSilent);
+ eep->baseEepHeader.rfSilent = word;
+
+ word = swab16(eep->baseEepHeader.blueToothOptions);
+ eep->baseEepHeader.blueToothOptions = word;
+
+ word = swab16(eep->baseEepHeader.deviceCap);
+ eep->baseEepHeader.deviceCap = word;
+
+ for (j = 0; j < ARRAY_SIZE(eep->modalHeader); j++) {
+ struct modal_eep_header *pModal =
+ &eep->modalHeader[j];
+ integer = swab32(pModal->antCtrlCommon);
+ pModal->antCtrlCommon = integer;
+
+ for (i = 0; i < AR5416_MAX_CHAINS; i++) {
+ integer = swab32(pModal->antCtrlChain[i]);
+ pModal->antCtrlChain[i] = integer;
+ }
+
+ for (i = 0; i < AR5416_EEPROM_MODAL_SPURS; i++) {
+ word = swab16(pModal->spurChans[i].spurChan);
+ pModal->spurChans[i].spurChan = word;
+ }
+ }
+ }
+
+ if (sum != 0xffff || ar5416_get_eep_ver(ahp) != AR5416_EEP_VER ||
+ ar5416_get_eep_rev(ahp) < AR5416_EEP_NO_BACK_VER) {
+ DPRINTF(ah->ah_sc, ATH_DBG_EEPROM,
+ "Bad EEPROM checksum 0x%x or revision 0x%04x\n",
+ sum, ar5416_get_eep_ver(ahp));
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static bool ath9k_hw_chip_test(struct ath_hal *ah)
+{
+ u32 regAddr[2] = { AR_STA_ID0, AR_PHY_BASE + (8 << 2) };
+ u32 regHold[2];
+ u32 patternData[4] = { 0x55555555,
+ 0xaaaaaaaa,
+ 0x66666666,
+ 0x99999999 };
+ int i, j;
+
+ for (i = 0; i < 2; i++) {
+ u32 addr = regAddr[i];
+ u32 wrData, rdData;
+
+ regHold[i] = REG_READ(ah, addr);
+ for (j = 0; j < 0x100; j++) {
+ wrData = (j << 16) | j;
+ REG_WRITE(ah, addr, wrData);
+ rdData = REG_READ(ah, addr);
+ if (rdData != wrData) {
+ DPRINTF(ah->ah_sc, ATH_DBG_REG_IO,
+ "%s: address test failed "
+ "addr: 0x%08x - wr:0x%08x != rd:0x%08x\n",
+ __func__, addr, wrData, rdData);
+ return false;
+ }
+ }
+ for (j = 0; j < 4; j++) {
+ wrData = patternData[j];
+ REG_WRITE(ah, addr, wrData);
+ rdData = REG_READ(ah, addr);
+ if (wrData != rdData) {
+ DPRINTF(ah->ah_sc, ATH_DBG_REG_IO,
+ "%s: address test failed "
+ "addr: 0x%08x - wr:0x%08x != rd:0x%08x\n",
+ __func__, addr, wrData, rdData);
+ return false;
+ }
+ }
+ REG_WRITE(ah, regAddr[i], regHold[i]);
+ }
+ udelay(100);
+ return true;
+}
+
+u32 ath9k_hw_getrxfilter(struct ath_hal *ah)
+{
+ u32 bits = REG_READ(ah, AR_RX_FILTER);
+ u32 phybits = REG_READ(ah, AR_PHY_ERR);
+
+ if (phybits & AR_PHY_ERR_RADAR)
+ bits |= ATH9K_RX_FILTER_PHYRADAR;
+ if (phybits & (AR_PHY_ERR_OFDM_TIMING | AR_PHY_ERR_CCK_TIMING))
+ bits |= ATH9K_RX_FILTER_PHYERR;
+ return bits;
+}
+
+void ath9k_hw_setrxfilter(struct ath_hal *ah, u32 bits)
+{
+ u32 phybits;
+
+ REG_WRITE(ah, AR_RX_FILTER, (bits & 0xffff) | AR_RX_COMPR_BAR);
+ phybits = 0;
+ if (bits & ATH9K_RX_FILTER_PHYRADAR)
+ phybits |= AR_PHY_ERR_RADAR;
+ if (bits & ATH9K_RX_FILTER_PHYERR)
+ phybits |= AR_PHY_ERR_OFDM_TIMING | AR_PHY_ERR_CCK_TIMING;
+ REG_WRITE(ah, AR_PHY_ERR, phybits);
+
+ if (phybits)
+ REG_WRITE(ah, AR_RXCFG,
+ REG_READ(ah, AR_RXCFG) | AR_RXCFG_ZLFDMA);
+ else
+ REG_WRITE(ah, AR_RXCFG,
+ REG_READ(ah, AR_RXCFG) & ~AR_RXCFG_ZLFDMA);
+}
+
+bool ath9k_hw_setcapability(struct ath_hal *ah,
+ enum ath9k_capability_type type,
+ u32 capability,
+ u32 setting,
+ int *status)
+{
+ struct ath_hal_5416 *ahp = AH5416(ah);
+ u32 v;
+
+ switch (type) {
+ case ATH9K_CAP_TKIP_MIC:
+ if (setting)
+ ahp->ah_staId1Defaults |=
+ AR_STA_ID1_CRPT_MIC_ENABLE;
+ else
+ ahp->ah_staId1Defaults &=
+ ~AR_STA_ID1_CRPT_MIC_ENABLE;
+ return true;
+ case ATH9K_CAP_DIVERSITY:
+ v = REG_READ(ah, AR_PHY_CCK_DETECT);
+ if (setting)
+ v |= AR_PHY_CCK_DETECT_BB_ENABLE_ANT_FAST_DIV;
+ else
+ v &= ~AR_PHY_CCK_DETECT_BB_ENABLE_ANT_FAST_DIV;
+ REG_WRITE(ah, AR_PHY_CCK_DETECT, v);
+ return true;
+ case ATH9K_CAP_MCAST_KEYSRCH:
+ if (setting)
+ ahp->ah_staId1Defaults |= AR_STA_ID1_MCAST_KSRCH;
+ else
+ ahp->ah_staId1Defaults &= ~AR_STA_ID1_MCAST_KSRCH;
+ return true;
+ case ATH9K_CAP_TSF_ADJUST:
+ if (setting)
+ ahp->ah_miscMode |= AR_PCU_TX_ADD_TSF;
+ else
+ ahp->ah_miscMode &= ~AR_PCU_TX_ADD_TSF;
+ return true;
+ default:
+ return false;
+ }
+}
+
+void ath9k_hw_dmaRegDump(struct ath_hal *ah)
+{
+ u32 val[ATH9K_NUM_DMA_DEBUG_REGS];
+ int qcuOffset = 0, dcuOffset = 0;
+ u32 *qcuBase = &val[0], *dcuBase = &val[4];
+ int i;
+
+ REG_WRITE(ah, AR_MACMISC,
+ ((AR_MACMISC_DMA_OBS_LINE_8 << AR_MACMISC_DMA_OBS_S) |
+ (AR_MACMISC_MISC_OBS_BUS_1 <<
+ AR_MACMISC_MISC_OBS_BUS_MSB_S)));
+
+ DPRINTF(ah->ah_sc, ATH_DBG_REG_IO, "Raw DMA Debug values:\n");
+ for (i = 0; i < ATH9K_NUM_DMA_DEBUG_REGS; i++) {
+ if (i % 4 == 0)
+ DPRINTF(ah->ah_sc, ATH_DBG_REG_IO, "\n");
+
+ val[i] = REG_READ(ah, AR_DMADBG_0 + (i * sizeof(u32)));
+ DPRINTF(ah->ah_sc, ATH_DBG_REG_IO, "%d: %08x ", i, val[i]);
+ }
+
+ DPRINTF(ah->ah_sc, ATH_DBG_REG_IO, "\n\n");
+ DPRINTF(ah->ah_sc, ATH_DBG_REG_IO,
+ "Num QCU: chain_st fsp_ok fsp_st DCU: chain_st\n");
+
+ for (i = 0; i < ATH9K_NUM_QUEUES;
+ i++, qcuOffset += 4, dcuOffset += 5) {
+ if (i == 8) {
+ qcuOffset = 0;
+ qcuBase++;
+ }
+
+ if (i == 6) {
+ dcuOffset = 0;
+ dcuBase++;
+ }
+
+ DPRINTF(ah->ah_sc, ATH_DBG_REG_IO,
+ "%2d %2x %1x %2x %2x\n",
+ i, (*qcuBase & (0x7 << qcuOffset)) >> qcuOffset,
+ (*qcuBase & (0x8 << qcuOffset)) >> (qcuOffset +
+ 3),
+ val[2] & (0x7 << (i * 3)) >> (i * 3),
+ (*dcuBase & (0x1f << dcuOffset)) >> dcuOffset);
+ }
+
+ DPRINTF(ah->ah_sc, ATH_DBG_REG_IO, "\n");
+ DPRINTF(ah->ah_sc, ATH_DBG_REG_IO,
+ "qcu_stitch state: %2x qcu_fetch state: %2x\n",
+ (val[3] & 0x003c0000) >> 18, (val[3] & 0x03c00000) >> 22);
+ DPRINTF(ah->ah_sc, ATH_DBG_REG_IO,
+ "qcu_complete state: %2x dcu_complete state: %2x\n",
+ (val[3] & 0x1c000000) >> 26, (val[6] & 0x3));
+ DPRINTF(ah->ah_sc, ATH_DBG_REG_IO,
+ "dcu_arb state: %2x dcu_fp state: %2x\n",
+ (val[5] & 0x06000000) >> 25, (val[5] & 0x38000000) >> 27);
+ DPRINTF(ah->ah_sc, ATH_DBG_REG_IO,
+ "chan_idle_dur: %3d chan_idle_dur_valid: %1d\n",
+ (val[6] & 0x000003fc) >> 2, (val[6] & 0x00000400) >> 10);
+ DPRINTF(ah->ah_sc, ATH_DBG_REG_IO,
+ "txfifo_valid_0: %1d txfifo_valid_1: %1d\n",
+ (val[6] & 0x00000800) >> 11, (val[6] & 0x00001000) >> 12);
+ DPRINTF(ah->ah_sc, ATH_DBG_REG_IO,
+ "txfifo_dcu_num_0: %2d txfifo_dcu_num_1: %2d\n",
+ (val[6] & 0x0001e000) >> 13, (val[6] & 0x001e0000) >> 17);
+
+ DPRINTF(ah->ah_sc, ATH_DBG_REG_IO, "pcu observe 0x%x \n",
+ REG_READ(ah, AR_OBS_BUS_1));
+ DPRINTF(ah->ah_sc, ATH_DBG_REG_IO,
+ "AR_CR 0x%x \n", REG_READ(ah, AR_CR));
+}
+
+u32 ath9k_hw_GetMibCycleCountsPct(struct ath_hal *ah,
+ u32 *rxc_pcnt,
+ u32 *rxf_pcnt,
+ u32 *txf_pcnt)
+{
+ static u32 cycles, rx_clear, rx_frame, tx_frame;
+ u32 good = 1;
+
+ u32 rc = REG_READ(ah, AR_RCCNT);
+ u32 rf = REG_READ(ah, AR_RFCNT);
+ u32 tf = REG_READ(ah, AR_TFCNT);
+ u32 cc = REG_READ(ah, AR_CCCNT);
+
+ if (cycles == 0 || cycles > cc) {
+ DPRINTF(ah->ah_sc, ATH_DBG_CHANNEL,
+ "%s: cycle counter wrap. ExtBusy = 0\n",
+ __func__);
+ good = 0;
+ } else {
+ u32 cc_d = cc - cycles;
+ u32 rc_d = rc - rx_clear;
+ u32 rf_d = rf - rx_frame;
+ u32 tf_d = tf - tx_frame;
+
+ if (cc_d != 0) {
+ *rxc_pcnt = rc_d * 100 / cc_d;
+ *rxf_pcnt = rf_d * 100 / cc_d;
+ *txf_pcnt = tf_d * 100 / cc_d;
+ } else {
+ good = 0;
+ }
+ }
+
+ cycles = cc;
+ rx_frame = rf;
+ rx_clear = rc;
+ tx_frame = tf;
+
+ return good;
+}
+
+void ath9k_hw_set11nmac2040(struct ath_hal *ah, enum ath9k_ht_macmode mode)
+{
+ u32 macmode;
+
+ if (mode == ATH9K_HT_MACMODE_2040 &&
+ !ah->ah_config.cwm_ignore_extcca)
+ macmode = AR_2040_JOINED_RX_CLEAR;
+ else
+ macmode = 0;
+
+ REG_WRITE(ah, AR_2040_MODE, macmode);
+}
+
+static void ath9k_hw_mark_phy_inactive(struct ath_hal *ah)
+{
+ REG_WRITE(ah, AR_PHY_ACTIVE, AR_PHY_ACTIVE_DIS);
+}
+
+
+static struct ath_hal_5416 *ath9k_hw_newstate(u16 devid,
+ struct ath_softc *sc,
+ void __iomem *mem,
+ int *status)
+{
+ static const u8 defbssidmask[ETH_ALEN] =
+ { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
+ struct ath_hal_5416 *ahp;
+ struct ath_hal *ah;
+
+ ahp = kzalloc(sizeof(struct ath_hal_5416), GFP_KERNEL);
+ if (ahp == NULL) {
+ DPRINTF(sc, ATH_DBG_FATAL,
+ "%s: cannot allocate memory for state block\n",
+ __func__);
+ *status = -ENOMEM;
+ return NULL;
+ }
+
+ ah = &ahp->ah;
+
+ memcpy(&ahp->ah, &ar5416hal, sizeof(struct ath_hal));
+
+ ah->ah_sc = sc;
+ ah->ah_sh = mem;
+
+ ah->ah_devid = devid;
+ ah->ah_subvendorid = 0;
+
+ ah->ah_flags = 0;
+ if ((devid == AR5416_AR9100_DEVID))
+ ah->ah_macVersion = AR_SREV_VERSION_9100;
+ if (!AR_SREV_9100(ah))
+ ah->ah_flags = AH_USE_EEPROM;
+
+ ah->ah_powerLimit = MAX_RATE_POWER;
+ ah->ah_tpScale = ATH9K_TP_SCALE_MAX;
+
+ ahp->ah_atimWindow = 0;
+ ahp->ah_diversityControl = ah->ah_config.diversity_control;
+ ahp->ah_antennaSwitchSwap =
+ ah->ah_config.antenna_switch_swap;
+
+ ahp->ah_staId1Defaults = AR_STA_ID1_CRPT_MIC_ENABLE;
+ ahp->ah_beaconInterval = 100;
+ ahp->ah_enable32kHzClock = DONT_USE_32KHZ;
+ ahp->ah_slottime = (u32) -1;
+ ahp->ah_acktimeout = (u32) -1;
+ ahp->ah_ctstimeout = (u32) -1;
+ ahp->ah_globaltxtimeout = (u32) -1;
+ memcpy(&ahp->ah_bssidmask, defbssidmask, ETH_ALEN);
+
+ ahp->ah_gBeaconRate = 0;
+
+ return ahp;
+}
+
+static int ath9k_hw_eeprom_attach(struct ath_hal *ah)
+{
+ int status;
+
+ if (ath9k_hw_use_flash(ah))
+ ath9k_hw_flash_map(ah);
+
+ if (!ath9k_hw_fill_eeprom(ah))
+ return -EIO;
+
+ status = ath9k_hw_check_eeprom(ah);
+
+ return status;
+}
+
+u32 ath9k_hw_get_eeprom(struct ath_hal_5416 *ahp,
+ enum eeprom_param param)
+{
+ struct ar5416_eeprom *eep = &ahp->ah_eeprom;
+ struct modal_eep_header *pModal = eep->modalHeader;
+ struct base_eep_header *pBase = &eep->baseEepHeader;
+
+ switch (param) {
+ case EEP_NFTHRESH_5:
+ return -pModal[0].noiseFloorThreshCh[0];
+ case EEP_NFTHRESH_2:
+ return -pModal[1].noiseFloorThreshCh[0];
+ case AR_EEPROM_MAC(0):
+ return pBase->macAddr[0] << 8 | pBase->macAddr[1];
+ case AR_EEPROM_MAC(1):
+ return pBase->macAddr[2] << 8 | pBase->macAddr[3];
+ case AR_EEPROM_MAC(2):
+ return pBase->macAddr[4] << 8 | pBase->macAddr[5];
+ case EEP_REG_0:
+ return pBase->regDmn[0];
+ case EEP_REG_1:
+ return pBase->regDmn[1];
+ case EEP_OP_CAP:
+ return pBase->deviceCap;
+ case EEP_OP_MODE:
+ return pBase->opCapFlags;
+ case EEP_RF_SILENT:
+ return pBase->rfSilent;
+ case EEP_OB_5:
+ return pModal[0].ob;
+ case EEP_DB_5:
+ return pModal[0].db;
+ case EEP_OB_2:
+ return pModal[1].ob;
+ case EEP_DB_2:
+ return pModal[1].db;
+ case EEP_MINOR_REV:
+ return pBase->version & AR5416_EEP_VER_MINOR_MASK;
+ case EEP_TX_MASK:
+ return pBase->txMask;
+ case EEP_RX_MASK:
+ return pBase->rxMask;
+ default:
+ return 0;
+ }
+}
+
+static inline int ath9k_hw_get_radiorev(struct ath_hal *ah)
+{
+ u32 val;
+ int i;
+
+ REG_WRITE(ah, AR_PHY(0x36), 0x00007058);
+ for (i = 0; i < 8; i++)
+ REG_WRITE(ah, AR_PHY(0x20), 0x00010000);
+ val = (REG_READ(ah, AR_PHY(256)) >> 24) & 0xff;
+ val = ((val & 0xf0) >> 4) | ((val & 0x0f) << 4);
+ return ath9k_hw_reverse_bits(val, 8);
+}
+
+static inline int ath9k_hw_init_macaddr(struct ath_hal *ah)
+{
+ u32 sum;
+ int i;
+ u16 eeval;
+ struct ath_hal_5416 *ahp = AH5416(ah);
+ DECLARE_MAC_BUF(mac);
+
+ sum = 0;
+ for (i = 0; i < 3; i++) {
+ eeval = ath9k_hw_get_eeprom(ahp, AR_EEPROM_MAC(i));
+ sum += eeval;
+ ahp->ah_macaddr[2 * i] = eeval >> 8;
+ ahp->ah_macaddr[2 * i + 1] = eeval & 0xff;
+ }
+ if (sum == 0 || sum == 0xffff * 3) {
+ DPRINTF(ah->ah_sc, ATH_DBG_EEPROM,
+ "%s: mac address read failed: %s\n", __func__,
+ print_mac(mac, ahp->ah_macaddr));
+ return -EADDRNOTAVAIL;
+ }
+
+ return 0;
+}
+
+static inline int16_t ath9k_hw_interpolate(u16 target,
+ u16 srcLeft,
+ u16 srcRight,
+ int16_t targetLeft,
+ int16_t targetRight)
+{
+ int16_t rv;
+
+ if (srcRight == srcLeft) {
+ rv = targetLeft;
+ } else {
+ rv = (int16_t) (((target - srcLeft) * targetRight +
+ (srcRight - target) * targetLeft) /
+ (srcRight - srcLeft));
+ }
+ return rv;
+}
+
+static inline u16 ath9k_hw_fbin2freq(u8 fbin,
+ bool is2GHz)
+{
+
+ if (fbin == AR5416_BCHAN_UNUSED)
+ return fbin;
+
+ return (u16) ((is2GHz) ? (2300 + fbin) : (4800 + 5 * fbin));
+}
+
+static u16 ath9k_hw_eeprom_get_spur_chan(struct ath_hal *ah,
+ u16 i,
+ bool is2GHz)
+{
+ struct ath_hal_5416 *ahp = AH5416(ah);
+ struct ar5416_eeprom *eep =
+ (struct ar5416_eeprom *) &ahp->ah_eeprom;
+ u16 spur_val = AR_NO_SPUR;
+
+ DPRINTF(ah->ah_sc, ATH_DBG_ANI,
+ "Getting spur idx %d is2Ghz. %d val %x\n",
+ i, is2GHz, ah->ah_config.spurchans[i][is2GHz]);
+
+ switch (ah->ah_config.spurmode) {
+ case SPUR_DISABLE:
+ break;
+ case SPUR_ENABLE_IOCTL:
+ spur_val = ah->ah_config.spurchans[i][is2GHz];
+ DPRINTF(ah->ah_sc, ATH_DBG_ANI,
+ "Getting spur val from new loc. %d\n", spur_val);
+ break;
+ case SPUR_ENABLE_EEPROM:
+ spur_val = eep->modalHeader[is2GHz].spurChans[i].spurChan;
+ break;
+
+ }
+ return spur_val;
+}
+
+static inline int ath9k_hw_rfattach(struct ath_hal *ah)
+{
+ bool rfStatus = false;
+ int ecode = 0;
+
+ rfStatus = ath9k_hw_init_rf(ah, &ecode);
+ if (!rfStatus) {
+ DPRINTF(ah->ah_sc, ATH_DBG_RESET,
+ "%s: RF setup failed, status %u\n", __func__,
+ ecode);
+ return ecode;
+ }
+
+ return 0;
+}
+
+static int ath9k_hw_rf_claim(struct ath_hal *ah)
+{
+ u32 val;
+
+ REG_WRITE(ah, AR_PHY(0), 0x00000007);
+
+ val = ath9k_hw_get_radiorev(ah);
+ switch (val & AR_RADIO_SREV_MAJOR) {
+ case 0:
+ val = AR_RAD5133_SREV_MAJOR;
+ break;
+ case AR_RAD5133_SREV_MAJOR:
+ case AR_RAD5122_SREV_MAJOR:
+ case AR_RAD2133_SREV_MAJOR:
+ case AR_RAD2122_SREV_MAJOR:
+ break;
+ default:
+ DPRINTF(ah->ah_sc, ATH_DBG_CHANNEL,
+ "%s: 5G Radio Chip Rev 0x%02X is not "
+ "supported by this driver\n",
+ __func__, ah->ah_analog5GhzRev);
+ return -EOPNOTSUPP;
+ }
+
+ ah->ah_analog5GhzRev = val;
+
+ return 0;
+}
+
+static inline void ath9k_hw_init_pll(struct ath_hal *ah,
+ struct ath9k_channel *chan)
+{
+ u32 pll;
+
+ if (AR_SREV_9100(ah)) {
+ if (chan && IS_CHAN_5GHZ(chan))
+ pll = 0x1450;
+ else
+ pll = 0x1458;
+ } else {
+ if (AR_SREV_9280_10_OR_LATER(ah)) {
+ pll = SM(0x5, AR_RTC_9160_PLL_REFDIV);
+
+ if (chan && IS_CHAN_HALF_RATE(chan))
+ pll |= SM(0x1, AR_RTC_9160_PLL_CLKSEL);
+ else if (chan && IS_CHAN_QUARTER_RATE(chan))
+ pll |= SM(0x2, AR_RTC_9160_PLL_CLKSEL);
+
+ if (chan && IS_CHAN_5GHZ(chan)) {
+ pll |= SM(0x28, AR_RTC_9160_PLL_DIV);
+
+
+ if (AR_SREV_9280_20(ah)) {
+ if (((chan->channel % 20) == 0)
+ || ((chan->channel % 10) == 0))
+ pll = 0x2850;
+ else
+ pll = 0x142c;
+ }
+ } else {
+ pll |= SM(0x2c, AR_RTC_9160_PLL_DIV);
+ }
+
+ } else if (AR_SREV_9160_10_OR_LATER(ah)) {
+
+ pll = SM(0x5, AR_RTC_9160_PLL_REFDIV);
+
+ if (chan && IS_CHAN_HALF_RATE(chan))
+ pll |= SM(0x1, AR_RTC_9160_PLL_CLKSEL);
+ else if (chan && IS_CHAN_QUARTER_RATE(chan))
+ pll |= SM(0x2, AR_RTC_9160_PLL_CLKSEL);
+
+ if (chan && IS_CHAN_5GHZ(chan))
+ pll |= SM(0x50, AR_RTC_9160_PLL_DIV);
+ else
+ pll |= SM(0x58, AR_RTC_9160_PLL_DIV);
+ } else {
+ pll = AR_RTC_PLL_REFDIV_5 | AR_RTC_PLL_DIV2;
+
+ if (chan && IS_CHAN_HALF_RATE(chan))
+ pll |= SM(0x1, AR_RTC_PLL_CLKSEL);
+ else if (chan && IS_CHAN_QUARTER_RATE(chan))
+ pll |= SM(0x2, AR_RTC_PLL_CLKSEL);
+
+ if (chan && IS_CHAN_5GHZ(chan))
+ pll |= SM(0xa, AR_RTC_PLL_DIV);
+ else
+ pll |= SM(0xb, AR_RTC_PLL_DIV);
+ }
+ }
+ REG_WRITE(ah, (u16) (AR_RTC_PLL_CONTROL), pll);
+
+ udelay(RTC_PLL_SETTLE_DELAY);
+
+ REG_WRITE(ah, AR_RTC_SLEEP_CLK, AR_RTC_FORCE_DERIVED_CLK);
+}
+
+static void ath9k_hw_set_regs(struct ath_hal *ah, struct ath9k_channel *chan,
+ enum ath9k_ht_macmode macmode)
+{
+ u32 phymode;
+ struct ath_hal_5416 *ahp = AH5416(ah);
+
+ phymode = AR_PHY_FC_HT_EN | AR_PHY_FC_SHORT_GI_40
+ | AR_PHY_FC_SINGLE_HT_LTF1 | AR_PHY_FC_WALSH;
+
+ if (IS_CHAN_HT40(chan)) {
+ phymode |= AR_PHY_FC_DYN2040_EN;
+
+ if ((chan->chanmode == CHANNEL_A_HT40PLUS) ||
+ (chan->chanmode == CHANNEL_G_HT40PLUS))
+ phymode |= AR_PHY_FC_DYN2040_PRI_CH;
+
+ if (ahp->ah_extprotspacing == ATH9K_HT_EXTPROTSPACING_25)
+ phymode |= AR_PHY_FC_DYN2040_EXT_CH;
+ }
+ REG_WRITE(ah, AR_PHY_TURBO, phymode);
+
+ ath9k_hw_set11nmac2040(ah, macmode);
+
+ REG_WRITE(ah, AR_GTXTO, 25 << AR_GTXTO_TIMEOUT_LIMIT_S);
+ REG_WRITE(ah, AR_CST, 0xF << AR_CST_TIMEOUT_LIMIT_S);
+}
+
+static void ath9k_hw_set_operating_mode(struct ath_hal *ah, int opmode)
+{
+ u32 val;
+
+ val = REG_READ(ah, AR_STA_ID1);
+ val &= ~(AR_STA_ID1_STA_AP | AR_STA_ID1_ADHOC);
+ switch (opmode) {
+ case ATH9K_M_HOSTAP:
+ REG_WRITE(ah, AR_STA_ID1, val | AR_STA_ID1_STA_AP
+ | AR_STA_ID1_KSRCH_MODE);
+ REG_CLR_BIT(ah, AR_CFG, AR_CFG_AP_ADHOC_INDICATION);
+ break;
+ case ATH9K_M_IBSS:
+ REG_WRITE(ah, AR_STA_ID1, val | AR_STA_ID1_ADHOC
+ | AR_STA_ID1_KSRCH_MODE);
+ REG_SET_BIT(ah, AR_CFG, AR_CFG_AP_ADHOC_INDICATION);
+ break;
+ case ATH9K_M_STA:
+ case ATH9K_M_MONITOR:
+ REG_WRITE(ah, AR_STA_ID1, val | AR_STA_ID1_KSRCH_MODE);
+ break;
+ }
+}
+
+static inline void
+ath9k_hw_set_rfmode(struct ath_hal *ah, struct ath9k_channel *chan)
+{
+ u32 rfMode = 0;
+
+ if (chan == NULL)
+ return;
+
+ rfMode |= (IS_CHAN_B(chan) || IS_CHAN_G(chan))
+ ? AR_PHY_MODE_DYNAMIC : AR_PHY_MODE_OFDM;
+
+ if (!AR_SREV_9280_10_OR_LATER(ah))
+ rfMode |= (IS_CHAN_5GHZ(chan)) ? AR_PHY_MODE_RF5GHZ :
+ AR_PHY_MODE_RF2GHZ;
+
+ if (AR_SREV_9280_20(ah) && IS_CHAN_A_5MHZ_SPACED(chan))
+ rfMode |= (AR_PHY_MODE_DYNAMIC | AR_PHY_MODE_DYN_CCK_DISABLE);
+
+ REG_WRITE(ah, AR_PHY_MODE, rfMode);
+}
+
+static bool ath9k_hw_set_reset(struct ath_hal *ah, int type)
+{
+ u32 rst_flags;
+ u32 tmpReg;
+
+ REG_WRITE(ah, AR_RTC_FORCE_WAKE, AR_RTC_FORCE_WAKE_EN |
+ AR_RTC_FORCE_WAKE_ON_INT);
+
+ if (AR_SREV_9100(ah)) {
+ rst_flags = AR_RTC_RC_MAC_WARM | AR_RTC_RC_MAC_COLD |
+ AR_RTC_RC_COLD_RESET | AR_RTC_RC_WARM_RESET;
+ } else {
+ tmpReg = REG_READ(ah, AR_INTR_SYNC_CAUSE);
+ if (tmpReg &
+ (AR_INTR_SYNC_LOCAL_TIMEOUT |
+ AR_INTR_SYNC_RADM_CPL_TIMEOUT)) {
+ REG_WRITE(ah, AR_INTR_SYNC_ENABLE, 0);
+ REG_WRITE(ah, AR_RC, AR_RC_AHB | AR_RC_HOSTIF);
+ } else {
+ REG_WRITE(ah, AR_RC, AR_RC_AHB);
+ }
+
+ rst_flags = AR_RTC_RC_MAC_WARM;
+ if (type == ATH9K_RESET_COLD)
+ rst_flags |= AR_RTC_RC_MAC_COLD;
+ }
+
+ REG_WRITE(ah, (u16) (AR_RTC_RC), rst_flags);
+ udelay(50);
+
+ REG_WRITE(ah, (u16) (AR_RTC_RC), 0);
+ if (!ath9k_hw_wait(ah, (u16) (AR_RTC_RC), AR_RTC_RC_M, 0)) {
+ DPRINTF(ah->ah_sc, ATH_DBG_RESET,
+ "%s: RTC stuck in MAC reset\n",
+ __func__);
+ return false;
+ }
+
+ if (!AR_SREV_9100(ah))
+ REG_WRITE(ah, AR_RC, 0);
+
+ ath9k_hw_init_pll(ah, NULL);
+
+ if (AR_SREV_9100(ah))
+ udelay(50);
+
+ return true;
+}
+
+static inline bool ath9k_hw_set_reset_power_on(struct ath_hal *ah)
+{
+ REG_WRITE(ah, AR_RTC_FORCE_WAKE, AR_RTC_FORCE_WAKE_EN |
+ AR_RTC_FORCE_WAKE_ON_INT);
+
+ REG_WRITE(ah, (u16) (AR_RTC_RESET), 0);
+ REG_WRITE(ah, (u16) (AR_RTC_RESET), 1);
+
+ if (!ath9k_hw_wait(ah,
+ AR_RTC_STATUS,
+ AR_RTC_STATUS_M,
+ AR_RTC_STATUS_ON)) {
+ DPRINTF(ah->ah_sc, ATH_DBG_RESET, "%s: RTC not waking up\n",
+ __func__);
+ return false;
+ }
+
+ ath9k_hw_read_revisions(ah);
+
+ return ath9k_hw_set_reset(ah, ATH9K_RESET_WARM);
+}
+
+static bool ath9k_hw_set_reset_reg(struct ath_hal *ah,
+ u32 type)
+{
+ REG_WRITE(ah, AR_RTC_FORCE_WAKE,
+ AR_RTC_FORCE_WAKE_EN | AR_RTC_FORCE_WAKE_ON_INT);
+
+ switch (type) {
+ case ATH9K_RESET_POWER_ON:
+ return ath9k_hw_set_reset_power_on(ah);
+ break;
+ case ATH9K_RESET_WARM:
+ case ATH9K_RESET_COLD:
+ return ath9k_hw_set_reset(ah, type);
+ break;
+ default:
+ return false;
+ }
+}
+
+static inline
+struct ath9k_channel *ath9k_hw_check_chan(struct ath_hal *ah,
+ struct ath9k_channel *chan)
+{
+ if (!(IS_CHAN_2GHZ(chan) ^ IS_CHAN_5GHZ(chan))) {
+ DPRINTF(ah->ah_sc, ATH_DBG_CHANNEL,
+ "%s: invalid channel %u/0x%x; not marked as "
+ "2GHz or 5GHz\n", __func__, chan->channel,
+ chan->channelFlags);
+ return NULL;
+ }
+
+ if (!IS_CHAN_OFDM(chan) &&
+ !IS_CHAN_CCK(chan) &&
+ !IS_CHAN_HT20(chan) &&
+ !IS_CHAN_HT40(chan)) {
+ DPRINTF(ah->ah_sc, ATH_DBG_CHANNEL,
+ "%s: invalid channel %u/0x%x; not marked as "
+ "OFDM or CCK or HT20 or HT40PLUS or HT40MINUS\n",
+ __func__, chan->channel, chan->channelFlags);
+ return NULL;
+ }
+
+ return ath9k_regd_check_channel(ah, chan);
+}
+
+static inline bool
+ath9k_hw_get_lower_upper_index(u8 target,
+ u8 *pList,
+ u16 listSize,
+ u16 *indexL,
+ u16 *indexR)
+{
+ u16 i;
+
+ if (target <= pList[0]) {
+ *indexL = *indexR = 0;
+ return true;
+ }
+ if (target >= pList[listSize - 1]) {
+ *indexL = *indexR = (u16) (listSize - 1);
+ return true;
+ }
+
+ for (i = 0; i < listSize - 1; i++) {
+ if (pList[i] == target) {
+ *indexL = *indexR = i;
+ return true;
+ }
+ if (target < pList[i + 1]) {
+ *indexL = i;
+ *indexR = (u16) (i + 1);
+ return false;
+ }
+ }
+ return false;
+}
+
+static int16_t ath9k_hw_get_nf_hist_mid(int16_t *nfCalBuffer)
+{
+ int16_t nfval;
+ int16_t sort[ATH9K_NF_CAL_HIST_MAX];
+ int i, j;
+
+ for (i = 0; i < ATH9K_NF_CAL_HIST_MAX; i++)
+ sort[i] = nfCalBuffer[i];
+
+ for (i = 0; i < ATH9K_NF_CAL_HIST_MAX - 1; i++) {
+ for (j = 1; j < ATH9K_NF_CAL_HIST_MAX - i; j++) {
+ if (sort[j] > sort[j - 1]) {
+ nfval = sort[j];
+ sort[j] = sort[j - 1];
+ sort[j - 1] = nfval;
+ }
+ }
+ }
+ nfval = sort[(ATH9K_NF_CAL_HIST_MAX - 1) >> 1];
+
+ return nfval;
+}
+
+static void ath9k_hw_update_nfcal_hist_buffer(struct ath9k_nfcal_hist *h,
+ int16_t *nfarray)
+{
+ int i;
+
+ for (i = 0; i < NUM_NF_READINGS; i++) {
+ h[i].nfCalBuffer[h[i].currIndex] = nfarray[i];
+
+ if (++h[i].currIndex >= ATH9K_NF_CAL_HIST_MAX)
+ h[i].currIndex = 0;
+
+ if (h[i].invalidNFcount > 0) {
+ if (nfarray[i] < AR_PHY_CCA_MIN_BAD_VALUE
+ || nfarray[i] > AR_PHY_CCA_MAX_HIGH_VALUE) {
+ h[i].invalidNFcount = ATH9K_NF_CAL_HIST_MAX;
+ } else {
+ h[i].invalidNFcount--;
+ h[i].privNF = nfarray[i];
+ }
+ } else {
+ h[i].privNF =
+ ath9k_hw_get_nf_hist_mid(h[i].nfCalBuffer);
+ }
+ }
+ return;
+}
+
+static void ar5416GetNoiseFloor(struct ath_hal *ah,
+ int16_t nfarray[NUM_NF_READINGS])
+{
+ int16_t nf;
+
+ if (AR_SREV_9280_10_OR_LATER(ah))
+ nf = MS(REG_READ(ah, AR_PHY_CCA), AR9280_PHY_MINCCA_PWR);
+ else
+ nf = MS(REG_READ(ah, AR_PHY_CCA), AR_PHY_MINCCA_PWR);
+
+ if (nf & 0x100)
+ nf = 0 - ((nf ^ 0x1ff) + 1);
+ DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE,
+ "NF calibrated [ctl] [chain 0] is %d\n", nf);
+ nfarray[0] = nf;
+
+ if (AR_SREV_9280_10_OR_LATER(ah))
+ nf = MS(REG_READ(ah, AR_PHY_CH1_CCA),
+ AR9280_PHY_CH1_MINCCA_PWR);
+ else
+ nf = MS(REG_READ(ah, AR_PHY_CH1_CCA),
+ AR_PHY_CH1_MINCCA_PWR);
+
+ if (nf & 0x100)
+ nf = 0 - ((nf ^ 0x1ff) + 1);
+ DPRINTF(ah->ah_sc, ATH_DBG_NF_CAL,
+ "NF calibrated [ctl] [chain 1] is %d\n", nf);
+ nfarray[1] = nf;
+
+ if (!AR_SREV_9280(ah)) {
+ nf = MS(REG_READ(ah, AR_PHY_CH2_CCA),
+ AR_PHY_CH2_MINCCA_PWR);
+ if (nf & 0x100)
+ nf = 0 - ((nf ^ 0x1ff) + 1);
+ DPRINTF(ah->ah_sc, ATH_DBG_NF_CAL,
+ "NF calibrated [ctl] [chain 2] is %d\n", nf);
+ nfarray[2] = nf;
+ }
+
+ if (AR_SREV_9280_10_OR_LATER(ah))
+ nf = MS(REG_READ(ah, AR_PHY_EXT_CCA),
+ AR9280_PHY_EXT_MINCCA_PWR);
+ else
+ nf = MS(REG_READ(ah, AR_PHY_EXT_CCA),
+ AR_PHY_EXT_MINCCA_PWR);
+
+ if (nf & 0x100)
+ nf = 0 - ((nf ^ 0x1ff) + 1);
+ DPRINTF(ah->ah_sc, ATH_DBG_NF_CAL,
+ "NF calibrated [ext] [chain 0] is %d\n", nf);
+ nfarray[3] = nf;
+
+ if (AR_SREV_9280_10_OR_LATER(ah))
+ nf = MS(REG_READ(ah, AR_PHY_CH1_EXT_CCA),
+ AR9280_PHY_CH1_EXT_MINCCA_PWR);
+ else
+ nf = MS(REG_READ(ah, AR_PHY_CH1_EXT_CCA),
+ AR_PHY_CH1_EXT_MINCCA_PWR);
+
+ if (nf & 0x100)
+ nf = 0 - ((nf ^ 0x1ff) + 1);
+ DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE,
+ "NF calibrated [ext] [chain 1] is %d\n", nf);
+ nfarray[4] = nf;
+
+ if (!AR_SREV_9280(ah)) {
+ nf = MS(REG_READ(ah, AR_PHY_CH2_EXT_CCA),
+ AR_PHY_CH2_EXT_MINCCA_PWR);
+ if (nf & 0x100)
+ nf = 0 - ((nf ^ 0x1ff) + 1);
+ DPRINTF(ah->ah_sc, ATH_DBG_NF_CAL,
+ "NF calibrated [ext] [chain 2] is %d\n", nf);
+ nfarray[5] = nf;
+ }
+}
+
+static bool
+getNoiseFloorThresh(struct ath_hal *ah,
+ const struct ath9k_channel *chan,
+ int16_t *nft)
+{
+ struct ath_hal_5416 *ahp = AH5416(ah);
+
+ switch (chan->chanmode) {
+ case CHANNEL_A:
+ case CHANNEL_A_HT20:
+ case CHANNEL_A_HT40PLUS:
+ case CHANNEL_A_HT40MINUS:
+ *nft = (int16_t) ath9k_hw_get_eeprom(ahp, EEP_NFTHRESH_5);
+ break;
+ case CHANNEL_B:
+ case CHANNEL_G:
+ case CHANNEL_G_HT20:
+ case CHANNEL_G_HT40PLUS:
+ case CHANNEL_G_HT40MINUS:
+ *nft = (int16_t) ath9k_hw_get_eeprom(ahp, EEP_NFTHRESH_2);
+ break;
+ default:
+ DPRINTF(ah->ah_sc, ATH_DBG_CHANNEL,
+ "%s: invalid channel flags 0x%x\n", __func__,
+ chan->channelFlags);
+ return false;
+ }
+ return true;
+}
+
+static void ath9k_hw_start_nfcal(struct ath_hal *ah)
+{
+ REG_SET_BIT(ah, AR_PHY_AGC_CONTROL,
+ AR_PHY_AGC_CONTROL_ENABLE_NF);
+ REG_SET_BIT(ah, AR_PHY_AGC_CONTROL,
+ AR_PHY_AGC_CONTROL_NO_UPDATE_NF);
+ REG_SET_BIT(ah, AR_PHY_AGC_CONTROL, AR_PHY_AGC_CONTROL_NF);
+}
+
+static void
+ath9k_hw_loadnf(struct ath_hal *ah, struct ath9k_channel *chan)
+{
+ struct ath9k_nfcal_hist *h;
+ int i, j;
+ int32_t val;
+ const u32 ar5416_cca_regs[6] = {
+ AR_PHY_CCA,
+ AR_PHY_CH1_CCA,
+ AR_PHY_CH2_CCA,
+ AR_PHY_EXT_CCA,
+ AR_PHY_CH1_EXT_CCA,
+ AR_PHY_CH2_EXT_CCA
+ };
+ u8 chainmask;
+
+ if (AR_SREV_9280(ah))
+ chainmask = 0x1B;
+ else
+ chainmask = 0x3F;
+
+#ifdef ATH_NF_PER_CHAN
+ h = chan->nfCalHist;
+#else
+ h = ah->nfCalHist;
+#endif
+
+ for (i = 0; i < NUM_NF_READINGS; i++) {
+ if (chainmask & (1 << i)) {
+ val = REG_READ(ah, ar5416_cca_regs[i]);
+ val &= 0xFFFFFE00;
+ val |= (((u32) (h[i].privNF) << 1) & 0x1ff);
+ REG_WRITE(ah, ar5416_cca_regs[i], val);
+ }
+ }
+
+ REG_CLR_BIT(ah, AR_PHY_AGC_CONTROL,
+ AR_PHY_AGC_CONTROL_ENABLE_NF);
+ REG_CLR_BIT(ah, AR_PHY_AGC_CONTROL,
+ AR_PHY_AGC_CONTROL_NO_UPDATE_NF);
+ REG_SET_BIT(ah, AR_PHY_AGC_CONTROL, AR_PHY_AGC_CONTROL_NF);
+
+ for (j = 0; j < 1000; j++) {
+ if ((REG_READ(ah, AR_PHY_AGC_CONTROL) &
+ AR_PHY_AGC_CONTROL_NF) == 0)
+ break;
+ udelay(10);
+ }
+
+ for (i = 0; i < NUM_NF_READINGS; i++) {
+ if (chainmask & (1 << i)) {
+ val = REG_READ(ah, ar5416_cca_regs[i]);
+ val &= 0xFFFFFE00;
+ val |= (((u32) (-50) << 1) & 0x1ff);
+ REG_WRITE(ah, ar5416_cca_regs[i], val);
+ }
+ }
+}
+
+static int16_t ath9k_hw_getnf(struct ath_hal *ah,
+ struct ath9k_channel *chan)
+{
+ int16_t nf, nfThresh;
+ int16_t nfarray[NUM_NF_READINGS] = { 0 };
+ struct ath9k_nfcal_hist *h;
+ u8 chainmask;
+
+ if (AR_SREV_9280(ah))
+ chainmask = 0x1B;
+ else
+ chainmask = 0x3F;
+
+ chan->channelFlags &= (~CHANNEL_CW_INT);
+ if (REG_READ(ah, AR_PHY_AGC_CONTROL) & AR_PHY_AGC_CONTROL_NF) {
+ DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE,
+ "%s: NF did not complete in calibration window\n",
+ __func__);
+ nf = 0;
+ chan->rawNoiseFloor = nf;
+ return chan->rawNoiseFloor;
+ } else {
+ ar5416GetNoiseFloor(ah, nfarray);
+ nf = nfarray[0];
+ if (getNoiseFloorThresh(ah, chan, &nfThresh)
+ && nf > nfThresh) {
+ DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE,
+ "%s: noise floor failed detected; "
+ "detected %d, threshold %d\n", __func__,
+ nf, nfThresh);
+ chan->channelFlags |= CHANNEL_CW_INT;
+ }
+ }
+
+#ifdef ATH_NF_PER_CHAN
+ h = chan->nfCalHist;
+#else
+ h = ah->nfCalHist;
+#endif
+
+ ath9k_hw_update_nfcal_hist_buffer(h, nfarray);
+ chan->rawNoiseFloor = h[0].privNF;
+
+ return chan->rawNoiseFloor;
+}
+
+static void ath9k_hw_update_mibstats(struct ath_hal *ah,
+ struct ath9k_mib_stats *stats)
+{
+ stats->ackrcv_bad += REG_READ(ah, AR_ACK_FAIL);
+ stats->rts_bad += REG_READ(ah, AR_RTS_FAIL);
+ stats->fcs_bad += REG_READ(ah, AR_FCS_FAIL);
+ stats->rts_good += REG_READ(ah, AR_RTS_OK);
+ stats->beacons += REG_READ(ah, AR_BEACON_CNT);
+}
+
+static void ath9k_enable_mib_counters(struct ath_hal *ah)
+{
+ struct ath_hal_5416 *ahp = AH5416(ah);
+
+ DPRINTF(ah->ah_sc, ATH_DBG_ANI, "Enable mib counters\n");
+
+ ath9k_hw_update_mibstats(ah, &ahp->ah_mibStats);
+
+ REG_WRITE(ah, AR_FILT_OFDM, 0);
+ REG_WRITE(ah, AR_FILT_CCK, 0);
+ REG_WRITE(ah, AR_MIBC,
+ ~(AR_MIBC_COW | AR_MIBC_FMC | AR_MIBC_CMC | AR_MIBC_MCS)
+ & 0x0f);
+ REG_WRITE(ah, AR_PHY_ERR_MASK_1, AR_PHY_ERR_OFDM_TIMING);
+ REG_WRITE(ah, AR_PHY_ERR_MASK_2, AR_PHY_ERR_CCK_TIMING);
+}
+
+static void ath9k_hw_disable_mib_counters(struct ath_hal *ah)
+{
+ struct ath_hal_5416 *ahp = AH5416(ah);
+
+ DPRINTF(ah->ah_sc, ATH_DBG_ANI, "Disabling MIB counters\n");
+
+ REG_WRITE(ah, AR_MIBC, AR_MIBC_FMC | AR_MIBC_CMC);
+
+ ath9k_hw_update_mibstats(ah, &ahp->ah_mibStats);
+
+ REG_WRITE(ah, AR_FILT_OFDM, 0);
+ REG_WRITE(ah, AR_FILT_CCK, 0);
+}
+
+static int ath9k_hw_get_ani_channel_idx(struct ath_hal *ah,
+ struct ath9k_channel *chan)
+{
+ struct ath_hal_5416 *ahp = AH5416(ah);
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(ahp->ah_ani); i++) {
+ if (ahp->ah_ani[i].c.channel == chan->channel)
+ return i;
+ if (ahp->ah_ani[i].c.channel == 0) {
+ ahp->ah_ani[i].c.channel = chan->channel;
+ ahp->ah_ani[i].c.channelFlags = chan->channelFlags;
+ return i;
+ }
+ }
+
+ DPRINTF(ah->ah_sc, ATH_DBG_ANI,
+ "No more channel states left. Using channel 0\n");
+ return 0;
+}
+
+static void ath9k_hw_ani_attach(struct ath_hal *ah)
+{
+ struct ath_hal_5416 *ahp = AH5416(ah);
+ int i;
+
+ ahp->ah_hasHwPhyCounters = 1;
+
+ memset(ahp->ah_ani, 0, sizeof(ahp->ah_ani));
+ for (i = 0; i < ARRAY_SIZE(ahp->ah_ani); i++) {
+ ahp->ah_ani[i].ofdmTrigHigh = ATH9K_ANI_OFDM_TRIG_HIGH;
+ ahp->ah_ani[i].ofdmTrigLow = ATH9K_ANI_OFDM_TRIG_LOW;
+ ahp->ah_ani[i].cckTrigHigh = ATH9K_ANI_CCK_TRIG_HIGH;
+ ahp->ah_ani[i].cckTrigLow = ATH9K_ANI_CCK_TRIG_LOW;
+ ahp->ah_ani[i].rssiThrHigh = ATH9K_ANI_RSSI_THR_HIGH;
+ ahp->ah_ani[i].rssiThrLow = ATH9K_ANI_RSSI_THR_LOW;
+ ahp->ah_ani[i].ofdmWeakSigDetectOff =
+ !ATH9K_ANI_USE_OFDM_WEAK_SIG;
+ ahp->ah_ani[i].cckWeakSigThreshold =
+ ATH9K_ANI_CCK_WEAK_SIG_THR;
+ ahp->ah_ani[i].spurImmunityLevel = ATH9K_ANI_SPUR_IMMUNE_LVL;
+ ahp->ah_ani[i].firstepLevel = ATH9K_ANI_FIRSTEP_LVL;
+ if (ahp->ah_hasHwPhyCounters) {
+ ahp->ah_ani[i].ofdmPhyErrBase =
+ AR_PHY_COUNTMAX - ATH9K_ANI_OFDM_TRIG_HIGH;
+ ahp->ah_ani[i].cckPhyErrBase =
+ AR_PHY_COUNTMAX - ATH9K_ANI_CCK_TRIG_HIGH;
+ }
+ }
+ if (ahp->ah_hasHwPhyCounters) {
+ DPRINTF(ah->ah_sc, ATH_DBG_ANI,
+ "Setting OfdmErrBase = 0x%08x\n",
+ ahp->ah_ani[0].ofdmPhyErrBase);
+ DPRINTF(ah->ah_sc, ATH_DBG_ANI, "Setting cckErrBase = 0x%08x\n",
+ ahp->ah_ani[0].cckPhyErrBase);
+
+ REG_WRITE(ah, AR_PHY_ERR_1, ahp->ah_ani[0].ofdmPhyErrBase);
+ REG_WRITE(ah, AR_PHY_ERR_2, ahp->ah_ani[0].cckPhyErrBase);
+ ath9k_enable_mib_counters(ah);
+ }
+ ahp->ah_aniPeriod = ATH9K_ANI_PERIOD;
+ if (ah->ah_config.enable_ani)
+ ahp->ah_procPhyErr |= HAL_PROCESS_ANI;
+}
+
+static inline void ath9k_hw_ani_setup(struct ath_hal *ah)
+{
+ struct ath_hal_5416 *ahp = AH5416(ah);
+ int i;
+
+ const int totalSizeDesired[] = { -55, -55, -55, -55, -62 };
+ const int coarseHigh[] = { -14, -14, -14, -14, -12 };
+ const int coarseLow[] = { -64, -64, -64, -64, -70 };
+ const int firpwr[] = { -78, -78, -78, -78, -80 };
+
+ for (i = 0; i < 5; i++) {
+ ahp->ah_totalSizeDesired[i] = totalSizeDesired[i];
+ ahp->ah_coarseHigh[i] = coarseHigh[i];
+ ahp->ah_coarseLow[i] = coarseLow[i];
+ ahp->ah_firpwr[i] = firpwr[i];
+ }
+}
+
+static void ath9k_hw_ani_detach(struct ath_hal *ah)
+{
+ struct ath_hal_5416 *ahp = AH5416(ah);
+
+ DPRINTF(ah->ah_sc, ATH_DBG_ANI, "Detaching Ani\n");
+ if (ahp->ah_hasHwPhyCounters) {
+ ath9k_hw_disable_mib_counters(ah);
+ REG_WRITE(ah, AR_PHY_ERR_1, 0);
+ REG_WRITE(ah, AR_PHY_ERR_2, 0);
+ }
+}
+
+
+static bool ath9k_hw_ani_control(struct ath_hal *ah,
+ enum ath9k_ani_cmd cmd, int param)
+{
+ struct ath_hal_5416 *ahp = AH5416(ah);
+ struct ar5416AniState *aniState = ahp->ah_curani;
+
+ switch (cmd & ahp->ah_ani_function) {
+ case ATH9K_ANI_NOISE_IMMUNITY_LEVEL:{
+ u32 level = param;
+
+ if (level >= ARRAY_SIZE(ahp->ah_totalSizeDesired)) {
+ DPRINTF(ah->ah_sc, ATH_DBG_ANI,
+ "%s: level out of range (%u > %u)\n",
+ __func__, level,
+ (unsigned) ARRAY_SIZE(ahp->
+ ah_totalSizeDesired));
+ return false;
+ }
+
+ REG_RMW_FIELD(ah, AR_PHY_DESIRED_SZ,
+ AR_PHY_DESIRED_SZ_TOT_DES,
+ ahp->ah_totalSizeDesired[level]);
+ REG_RMW_FIELD(ah, AR_PHY_AGC_CTL1,
+ AR_PHY_AGC_CTL1_COARSE_LOW,
+ ahp->ah_coarseLow[level]);
+ REG_RMW_FIELD(ah, AR_PHY_AGC_CTL1,
+ AR_PHY_AGC_CTL1_COARSE_HIGH,
+ ahp->ah_coarseHigh[level]);
+ REG_RMW_FIELD(ah, AR_PHY_FIND_SIG,
+ AR_PHY_FIND_SIG_FIRPWR,
+ ahp->ah_firpwr[level]);
+
+ if (level > aniState->noiseImmunityLevel)
+ ahp->ah_stats.ast_ani_niup++;
+ else if (level < aniState->noiseImmunityLevel)
+ ahp->ah_stats.ast_ani_nidown++;
+ aniState->noiseImmunityLevel = level;
+ break;
+ }
+ case ATH9K_ANI_OFDM_WEAK_SIGNAL_DETECTION:{
+ const int m1ThreshLow[] = { 127, 50 };
+ const int m2ThreshLow[] = { 127, 40 };
+ const int m1Thresh[] = { 127, 0x4d };
+ const int m2Thresh[] = { 127, 0x40 };
+ const int m2CountThr[] = { 31, 16 };
+ const int m2CountThrLow[] = { 63, 48 };
+ u32 on = param ? 1 : 0;
+
+ REG_RMW_FIELD(ah, AR_PHY_SFCORR_LOW,
+ AR_PHY_SFCORR_LOW_M1_THRESH_LOW,
+ m1ThreshLow[on]);
+ REG_RMW_FIELD(ah, AR_PHY_SFCORR_LOW,
+ AR_PHY_SFCORR_LOW_M2_THRESH_LOW,
+ m2ThreshLow[on]);
+ REG_RMW_FIELD(ah, AR_PHY_SFCORR,
+ AR_PHY_SFCORR_M1_THRESH,
+ m1Thresh[on]);
+ REG_RMW_FIELD(ah, AR_PHY_SFCORR,
+ AR_PHY_SFCORR_M2_THRESH,
+ m2Thresh[on]);
+ REG_RMW_FIELD(ah, AR_PHY_SFCORR,
+ AR_PHY_SFCORR_M2COUNT_THR,
+ m2CountThr[on]);
+ REG_RMW_FIELD(ah, AR_PHY_SFCORR_LOW,
+ AR_PHY_SFCORR_LOW_M2COUNT_THR_LOW,
+ m2CountThrLow[on]);
+
+ REG_RMW_FIELD(ah, AR_PHY_SFCORR_EXT,
+ AR_PHY_SFCORR_EXT_M1_THRESH_LOW,
+ m1ThreshLow[on]);
+ REG_RMW_FIELD(ah, AR_PHY_SFCORR_EXT,
+ AR_PHY_SFCORR_EXT_M2_THRESH_LOW,
+ m2ThreshLow[on]);
+ REG_RMW_FIELD(ah, AR_PHY_SFCORR_EXT,
+ AR_PHY_SFCORR_EXT_M1_THRESH,
+ m1Thresh[on]);
+ REG_RMW_FIELD(ah, AR_PHY_SFCORR_EXT,
+ AR_PHY_SFCORR_EXT_M2_THRESH,
+ m2Thresh[on]);
+
+ if (on)
+ REG_SET_BIT(ah, AR_PHY_SFCORR_LOW,
+ AR_PHY_SFCORR_LOW_USE_SELF_CORR_LOW);
+ else
+ REG_CLR_BIT(ah, AR_PHY_SFCORR_LOW,
+ AR_PHY_SFCORR_LOW_USE_SELF_CORR_LOW);
+
+ if (!on != aniState->ofdmWeakSigDetectOff) {
+ if (on)
+ ahp->ah_stats.ast_ani_ofdmon++;
+ else
+ ahp->ah_stats.ast_ani_ofdmoff++;
+ aniState->ofdmWeakSigDetectOff = !on;
+ }
+ break;
+ }
+ case ATH9K_ANI_CCK_WEAK_SIGNAL_THR:{
+ const int weakSigThrCck[] = { 8, 6 };
+ u32 high = param ? 1 : 0;
+
+ REG_RMW_FIELD(ah, AR_PHY_CCK_DETECT,
+ AR_PHY_CCK_DETECT_WEAK_SIG_THR_CCK,
+ weakSigThrCck[high]);
+ if (high != aniState->cckWeakSigThreshold) {
+ if (high)
+ ahp->ah_stats.ast_ani_cckhigh++;
+ else
+ ahp->ah_stats.ast_ani_ccklow++;
+ aniState->cckWeakSigThreshold = high;
+ }
+ break;
+ }
+ case ATH9K_ANI_FIRSTEP_LEVEL:{
+ const int firstep[] = { 0, 4, 8 };
+ u32 level = param;
+
+ if (level >= ARRAY_SIZE(firstep)) {
+ DPRINTF(ah->ah_sc, ATH_DBG_ANI,
+ "%s: level out of range (%u > %u)\n",
+ __func__, level,
+ (unsigned) ARRAY_SIZE(firstep));
+ return false;
+ }
+ REG_RMW_FIELD(ah, AR_PHY_FIND_SIG,
+ AR_PHY_FIND_SIG_FIRSTEP,
+ firstep[level]);
+ if (level > aniState->firstepLevel)
+ ahp->ah_stats.ast_ani_stepup++;
+ else if (level < aniState->firstepLevel)
+ ahp->ah_stats.ast_ani_stepdown++;
+ aniState->firstepLevel = level;
+ break;
+ }
+ case ATH9K_ANI_SPUR_IMMUNITY_LEVEL:{
+ const int cycpwrThr1[] =
+ { 2, 4, 6, 8, 10, 12, 14, 16 };
+ u32 level = param;
+
+ if (level >= ARRAY_SIZE(cycpwrThr1)) {
+ DPRINTF(ah->ah_sc, ATH_DBG_ANI,
+ "%s: level out of range (%u > %u)\n",
+ __func__, level,
+ (unsigned)
+ ARRAY_SIZE(cycpwrThr1));
+ return false;
+ }
+ REG_RMW_FIELD(ah, AR_PHY_TIMING5,
+ AR_PHY_TIMING5_CYCPWR_THR1,
+ cycpwrThr1[level]);
+ if (level > aniState->spurImmunityLevel)
+ ahp->ah_stats.ast_ani_spurup++;
+ else if (level < aniState->spurImmunityLevel)
+ ahp->ah_stats.ast_ani_spurdown++;
+ aniState->spurImmunityLevel = level;
+ break;
+ }
+ case ATH9K_ANI_PRESENT:
+ break;
+ default:
+ DPRINTF(ah->ah_sc, ATH_DBG_ANI,
+ "%s: invalid cmd %u\n", __func__, cmd);
+ return false;
+ }
+
+ DPRINTF(ah->ah_sc, ATH_DBG_ANI, "%s: ANI parameters:\n", __func__);
+ DPRINTF(ah->ah_sc, ATH_DBG_ANI,
+ "noiseImmunityLevel=%d, spurImmunityLevel=%d, "
+ "ofdmWeakSigDetectOff=%d\n",
+ aniState->noiseImmunityLevel, aniState->spurImmunityLevel,
+ !aniState->ofdmWeakSigDetectOff);
+ DPRINTF(ah->ah_sc, ATH_DBG_ANI,
+ "cckWeakSigThreshold=%d, "
+ "firstepLevel=%d, listenTime=%d\n",
+ aniState->cckWeakSigThreshold, aniState->firstepLevel,
+ aniState->listenTime);
+ DPRINTF(ah->ah_sc, ATH_DBG_ANI,
+ "cycleCount=%d, ofdmPhyErrCount=%d, cckPhyErrCount=%d\n\n",
+ aniState->cycleCount, aniState->ofdmPhyErrCount,
+ aniState->cckPhyErrCount);
+ return true;
+}
+
+static void ath9k_ani_restart(struct ath_hal *ah)
+{
+ struct ath_hal_5416 *ahp = AH5416(ah);
+ struct ar5416AniState *aniState;
+
+ if (!DO_ANI(ah))
+ return;
+
+ aniState = ahp->ah_curani;
+
+ aniState->listenTime = 0;
+ if (ahp->ah_hasHwPhyCounters) {
+ if (aniState->ofdmTrigHigh > AR_PHY_COUNTMAX) {
+ aniState->ofdmPhyErrBase = 0;
+ DPRINTF(ah->ah_sc, ATH_DBG_ANI,
+ "OFDM Trigger is too high for hw counters\n");
+ } else {
+ aniState->ofdmPhyErrBase =
+ AR_PHY_COUNTMAX - aniState->ofdmTrigHigh;
+ }
+ if (aniState->cckTrigHigh > AR_PHY_COUNTMAX) {
+ aniState->cckPhyErrBase = 0;
+ DPRINTF(ah->ah_sc, ATH_DBG_ANI,
+ "CCK Trigger is too high for hw counters\n");
+ } else {
+ aniState->cckPhyErrBase =
+ AR_PHY_COUNTMAX - aniState->cckTrigHigh;
+ }
+ DPRINTF(ah->ah_sc, ATH_DBG_ANI,
+ "%s: Writing ofdmbase=%u cckbase=%u\n",
+ __func__, aniState->ofdmPhyErrBase,
+ aniState->cckPhyErrBase);
+ REG_WRITE(ah, AR_PHY_ERR_1, aniState->ofdmPhyErrBase);
+ REG_WRITE(ah, AR_PHY_ERR_2, aniState->cckPhyErrBase);
+ REG_WRITE(ah, AR_PHY_ERR_MASK_1, AR_PHY_ERR_OFDM_TIMING);
+ REG_WRITE(ah, AR_PHY_ERR_MASK_2, AR_PHY_ERR_CCK_TIMING);
+
+ ath9k_hw_update_mibstats(ah, &ahp->ah_mibStats);
+ }
+ aniState->ofdmPhyErrCount = 0;
+ aniState->cckPhyErrCount = 0;
+}
+
+static void ath9k_hw_ani_ofdm_err_trigger(struct ath_hal *ah)
+{
+ struct ath_hal_5416 *ahp = AH5416(ah);
+ struct ath9k_channel *chan = ah->ah_curchan;
+ struct ar5416AniState *aniState;
+ enum wireless_mode mode;
+ int32_t rssi;
+
+ if (!DO_ANI(ah))
+ return;
+
+ aniState = ahp->ah_curani;
+
+ if (aniState->noiseImmunityLevel < HAL_NOISE_IMMUNE_MAX) {
+ if (ath9k_hw_ani_control(ah, ATH9K_ANI_NOISE_IMMUNITY_LEVEL,
+ aniState->noiseImmunityLevel + 1)) {
+ return;
+ }
+ }
+
+ if (aniState->spurImmunityLevel < HAL_SPUR_IMMUNE_MAX) {
+ if (ath9k_hw_ani_control(ah, ATH9K_ANI_SPUR_IMMUNITY_LEVEL,
+ aniState->spurImmunityLevel + 1)) {
+ return;
+ }
+ }
+
+ if (ah->ah_opmode == ATH9K_M_HOSTAP) {
+ if (aniState->firstepLevel < HAL_FIRST_STEP_MAX) {
+ ath9k_hw_ani_control(ah, ATH9K_ANI_FIRSTEP_LEVEL,
+ aniState->firstepLevel + 1);
+ }
+ return;
+ }
+ rssi = BEACON_RSSI(ahp);
+ if (rssi > aniState->rssiThrHigh) {
+ if (!aniState->ofdmWeakSigDetectOff) {
+ if (ath9k_hw_ani_control(ah,
+ ATH9K_ANI_OFDM_WEAK_SIGNAL_DETECTION,
+ false)) {
+ ath9k_hw_ani_control(ah,
+ ATH9K_ANI_SPUR_IMMUNITY_LEVEL,
+ 0);
+ return;
+ }
+ }
+ if (aniState->firstepLevel < HAL_FIRST_STEP_MAX) {
+ ath9k_hw_ani_control(ah, ATH9K_ANI_FIRSTEP_LEVEL,
+ aniState->firstepLevel + 1);
+ return;
+ }
+ } else if (rssi > aniState->rssiThrLow) {
+ if (aniState->ofdmWeakSigDetectOff)
+ ath9k_hw_ani_control(ah,
+ ATH9K_ANI_OFDM_WEAK_SIGNAL_DETECTION,
+ true);
+ if (aniState->firstepLevel < HAL_FIRST_STEP_MAX)
+ ath9k_hw_ani_control(ah, ATH9K_ANI_FIRSTEP_LEVEL,
+ aniState->firstepLevel + 1);
+ return;
+ } else {
+ mode = ath9k_hw_chan2wmode(ah, chan);
+ if (mode == ATH9K_MODE_11G || mode == ATH9K_MODE_11B) {
+ if (!aniState->ofdmWeakSigDetectOff)
+ ath9k_hw_ani_control(ah,
+ ATH9K_ANI_OFDM_WEAK_SIGNAL_DETECTION,
+ false);
+ if (aniState->firstepLevel > 0)
+ ath9k_hw_ani_control(ah,
+ ATH9K_ANI_FIRSTEP_LEVEL,
+ 0);
+ return;
+ }
+ }
+}
+
+static void ath9k_hw_ani_cck_err_trigger(struct ath_hal *ah)
+{
+ struct ath_hal_5416 *ahp = AH5416(ah);
+ struct ath9k_channel *chan = ah->ah_curchan;
+ struct ar5416AniState *aniState;
+ enum wireless_mode mode;
+ int32_t rssi;
+
+ if (!DO_ANI(ah))
+ return;
+
+ aniState = ahp->ah_curani;
+ if (aniState->noiseImmunityLevel < HAL_NOISE_IMMUNE_MAX) {
+ if (ath9k_hw_ani_control(ah, ATH9K_ANI_NOISE_IMMUNITY_LEVEL,
+ aniState->noiseImmunityLevel + 1)) {
+ return;
+ }
+ }
+ if (ah->ah_opmode == ATH9K_M_HOSTAP) {
+ if (aniState->firstepLevel < HAL_FIRST_STEP_MAX) {
+ ath9k_hw_ani_control(ah, ATH9K_ANI_FIRSTEP_LEVEL,
+ aniState->firstepLevel + 1);
+ }
+ return;
+ }
+ rssi = BEACON_RSSI(ahp);
+ if (rssi > aniState->rssiThrLow) {
+ if (aniState->firstepLevel < HAL_FIRST_STEP_MAX)
+ ath9k_hw_ani_control(ah, ATH9K_ANI_FIRSTEP_LEVEL,
+ aniState->firstepLevel + 1);
+ } else {
+ mode = ath9k_hw_chan2wmode(ah, chan);
+ if (mode == ATH9K_MODE_11G || mode == ATH9K_MODE_11B) {
+ if (aniState->firstepLevel > 0)
+ ath9k_hw_ani_control(ah,
+ ATH9K_ANI_FIRSTEP_LEVEL,
+ 0);
+ }
+ }
+}
+
+static void ath9k_ani_reset(struct ath_hal *ah)
+{
+ struct ath_hal_5416 *ahp = AH5416(ah);
+ struct ar5416AniState *aniState;
+ struct ath9k_channel *chan = ah->ah_curchan;
+ int index;
+
+ if (!DO_ANI(ah))
+ return;
+
+ index = ath9k_hw_get_ani_channel_idx(ah, chan);
+ aniState = &ahp->ah_ani[index];
+ ahp->ah_curani = aniState;
+
+ if (DO_ANI(ah) && ah->ah_opmode != ATH9K_M_STA
+ && ah->ah_opmode != ATH9K_M_IBSS) {
+ DPRINTF(ah->ah_sc, ATH_DBG_ANI,
+ "%s: Reset ANI state opmode %u\n", __func__,
+ ah->ah_opmode);
+ ahp->ah_stats.ast_ani_reset++;
+ ath9k_hw_ani_control(ah, ATH9K_ANI_NOISE_IMMUNITY_LEVEL, 0);
+ ath9k_hw_ani_control(ah, ATH9K_ANI_SPUR_IMMUNITY_LEVEL, 0);
+ ath9k_hw_ani_control(ah, ATH9K_ANI_FIRSTEP_LEVEL, 0);
+ ath9k_hw_ani_control(ah,
+ ATH9K_ANI_OFDM_WEAK_SIGNAL_DETECTION,
+ !ATH9K_ANI_USE_OFDM_WEAK_SIG);
+ ath9k_hw_ani_control(ah, ATH9K_ANI_CCK_WEAK_SIGNAL_THR,
+ ATH9K_ANI_CCK_WEAK_SIG_THR);
+ ath9k_hw_setrxfilter(ah,
+ ath9k_hw_getrxfilter(ah) |
+ ATH9K_RX_FILTER_PHYERR);
+ if (ah->ah_opmode == ATH9K_M_HOSTAP) {
+ ahp->ah_curani->ofdmTrigHigh =
+ ah->ah_config.ofdm_trig_high;
+ ahp->ah_curani->ofdmTrigLow =
+ ah->ah_config.ofdm_trig_low;
+ ahp->ah_curani->cckTrigHigh =
+ ah->ah_config.cck_trig_high;
+ ahp->ah_curani->cckTrigLow =
+ ah->ah_config.cck_trig_low;
+ }
+ ath9k_ani_restart(ah);
+ return;
+ }
+
+ if (aniState->noiseImmunityLevel != 0)
+ ath9k_hw_ani_control(ah, ATH9K_ANI_NOISE_IMMUNITY_LEVEL,
+ aniState->noiseImmunityLevel);
+ if (aniState->spurImmunityLevel != 0)
+ ath9k_hw_ani_control(ah, ATH9K_ANI_SPUR_IMMUNITY_LEVEL,
+ aniState->spurImmunityLevel);
+ if (aniState->ofdmWeakSigDetectOff)
+ ath9k_hw_ani_control(ah,
+ ATH9K_ANI_OFDM_WEAK_SIGNAL_DETECTION,
+ !aniState->ofdmWeakSigDetectOff);
+ if (aniState->cckWeakSigThreshold)
+ ath9k_hw_ani_control(ah, ATH9K_ANI_CCK_WEAK_SIGNAL_THR,
+ aniState->cckWeakSigThreshold);
+ if (aniState->firstepLevel != 0)
+ ath9k_hw_ani_control(ah, ATH9K_ANI_FIRSTEP_LEVEL,
+ aniState->firstepLevel);
+ if (ahp->ah_hasHwPhyCounters) {
+ ath9k_hw_setrxfilter(ah,
+ ath9k_hw_getrxfilter(ah) &
+ ~ATH9K_RX_FILTER_PHYERR);
+ ath9k_ani_restart(ah);
+ REG_WRITE(ah, AR_PHY_ERR_MASK_1, AR_PHY_ERR_OFDM_TIMING);
+ REG_WRITE(ah, AR_PHY_ERR_MASK_2, AR_PHY_ERR_CCK_TIMING);
+
+ } else {
+ ath9k_ani_restart(ah);
+ ath9k_hw_setrxfilter(ah,
+ ath9k_hw_getrxfilter(ah) |
+ ATH9K_RX_FILTER_PHYERR);
+ }
+}
+
+void ath9k_hw_procmibevent(struct ath_hal *ah,
+ const struct ath9k_node_stats *stats)
+{
+ struct ath_hal_5416 *ahp = AH5416(ah);
+ u32 phyCnt1, phyCnt2;
+
+ DPRINTF(ah->ah_sc, ATH_DBG_ANI, "Processing Mib Intr\n");
+
+ REG_WRITE(ah, AR_FILT_OFDM, 0);
+ REG_WRITE(ah, AR_FILT_CCK, 0);
+ if (!(REG_READ(ah, AR_SLP_MIB_CTRL) & AR_SLP_MIB_PENDING))
+ REG_WRITE(ah, AR_SLP_MIB_CTRL, AR_SLP_MIB_CLEAR);
+
+ ath9k_hw_update_mibstats(ah, &ahp->ah_mibStats);
+ ahp->ah_stats.ast_nodestats = *stats;
+
+ if (!DO_ANI(ah))
+ return;
+
+ phyCnt1 = REG_READ(ah, AR_PHY_ERR_1);
+ phyCnt2 = REG_READ(ah, AR_PHY_ERR_2);
+ if (((phyCnt1 & AR_MIBCNT_INTRMASK) == AR_MIBCNT_INTRMASK) ||
+ ((phyCnt2 & AR_MIBCNT_INTRMASK) == AR_MIBCNT_INTRMASK)) {
+ struct ar5416AniState *aniState = ahp->ah_curani;
+ u32 ofdmPhyErrCnt, cckPhyErrCnt;
+
+ ofdmPhyErrCnt = phyCnt1 - aniState->ofdmPhyErrBase;
+ ahp->ah_stats.ast_ani_ofdmerrs +=
+ ofdmPhyErrCnt - aniState->ofdmPhyErrCount;
+ aniState->ofdmPhyErrCount = ofdmPhyErrCnt;
+
+ cckPhyErrCnt = phyCnt2 - aniState->cckPhyErrBase;
+ ahp->ah_stats.ast_ani_cckerrs +=
+ cckPhyErrCnt - aniState->cckPhyErrCount;
+ aniState->cckPhyErrCount = cckPhyErrCnt;
+
+ if (aniState->ofdmPhyErrCount > aniState->ofdmTrigHigh)
+ ath9k_hw_ani_ofdm_err_trigger(ah);
+ if (aniState->cckPhyErrCount > aniState->cckTrigHigh)
+ ath9k_hw_ani_cck_err_trigger(ah);
+
+ ath9k_ani_restart(ah);
+ }
+}
+
+static void ath9k_hw_ani_lower_immunity(struct ath_hal *ah)
+{
+ struct ath_hal_5416 *ahp = AH5416(ah);
+ struct ar5416AniState *aniState;
+ int32_t rssi;
+
+ aniState = ahp->ah_curani;
+
+ if (ah->ah_opmode == ATH9K_M_HOSTAP) {
+ if (aniState->firstepLevel > 0) {
+ if (ath9k_hw_ani_control(ah, ATH9K_ANI_FIRSTEP_LEVEL,
+ aniState->firstepLevel - 1)) {
+ return;
+ }
+ }
+ } else {
+ rssi = BEACON_RSSI(ahp);
+ if (rssi > aniState->rssiThrHigh) {
+ /* XXX: Handle me */
+ } else if (rssi > aniState->rssiThrLow) {
+ if (aniState->ofdmWeakSigDetectOff) {
+ if (ath9k_hw_ani_control(ah,
+ ATH9K_ANI_OFDM_WEAK_SIGNAL_DETECTION,
+ true) ==
+ true) {
+ return;
+ }
+ }
+ if (aniState->firstepLevel > 0) {
+ if (ath9k_hw_ani_control
+ (ah, ATH9K_ANI_FIRSTEP_LEVEL,
+ aniState->firstepLevel - 1) ==
+ true) {
+ return;
+ }
+ }
+ } else {
+ if (aniState->firstepLevel > 0) {
+ if (ath9k_hw_ani_control
+ (ah, ATH9K_ANI_FIRSTEP_LEVEL,
+ aniState->firstepLevel - 1) ==
+ true) {
+ return;
+ }
+ }
+ }
+ }
+
+ if (aniState->spurImmunityLevel > 0) {
+ if (ath9k_hw_ani_control(ah, ATH9K_ANI_SPUR_IMMUNITY_LEVEL,
+ aniState->spurImmunityLevel - 1)) {
+ return;
+ }
+ }
+
+ if (aniState->noiseImmunityLevel > 0) {
+ ath9k_hw_ani_control(ah, ATH9K_ANI_NOISE_IMMUNITY_LEVEL,
+ aniState->noiseImmunityLevel - 1);
+ return;
+ }
+}
+
+static int32_t ath9k_hw_ani_get_listen_time(struct ath_hal *ah)
+{
+ struct ath_hal_5416 *ahp = AH5416(ah);
+ struct ar5416AniState *aniState;
+ u32 txFrameCount, rxFrameCount, cycleCount;
+ int32_t listenTime;
+
+ txFrameCount = REG_READ(ah, AR_TFCNT);
+ rxFrameCount = REG_READ(ah, AR_RFCNT);
+ cycleCount = REG_READ(ah, AR_CCCNT);
+
+ aniState = ahp->ah_curani;
+ if (aniState->cycleCount == 0 || aniState->cycleCount > cycleCount) {
+
+ listenTime = 0;
+ ahp->ah_stats.ast_ani_lzero++;
+ } else {
+ int32_t ccdelta = cycleCount - aniState->cycleCount;
+ int32_t rfdelta = rxFrameCount - aniState->rxFrameCount;
+ int32_t tfdelta = txFrameCount - aniState->txFrameCount;
+ listenTime = (ccdelta - rfdelta - tfdelta) / 44000;
+ }
+ aniState->cycleCount = cycleCount;
+ aniState->txFrameCount = txFrameCount;
+ aniState->rxFrameCount = rxFrameCount;
+
+ return listenTime;
+}
+
+void ath9k_hw_ani_monitor(struct ath_hal *ah,
+ const struct ath9k_node_stats *stats,
+ struct ath9k_channel *chan)
+{
+ struct ath_hal_5416 *ahp = AH5416(ah);
+ struct ar5416AniState *aniState;
+ int32_t listenTime;
+
+ aniState = ahp->ah_curani;
+ ahp->ah_stats.ast_nodestats = *stats;
+
+ listenTime = ath9k_hw_ani_get_listen_time(ah);
+ if (listenTime < 0) {
+ ahp->ah_stats.ast_ani_lneg++;
+ ath9k_ani_restart(ah);
+ return;
+ }
+
+ aniState->listenTime += listenTime;
+
+ if (ahp->ah_hasHwPhyCounters) {
+ u32 phyCnt1, phyCnt2;
+ u32 ofdmPhyErrCnt, cckPhyErrCnt;
+
+ ath9k_hw_update_mibstats(ah, &ahp->ah_mibStats);
+
+ phyCnt1 = REG_READ(ah, AR_PHY_ERR_1);
+ phyCnt2 = REG_READ(ah, AR_PHY_ERR_2);
+
+ if (phyCnt1 < aniState->ofdmPhyErrBase ||
+ phyCnt2 < aniState->cckPhyErrBase) {
+ if (phyCnt1 < aniState->ofdmPhyErrBase) {
+ DPRINTF(ah->ah_sc, ATH_DBG_ANI,
+ "%s: phyCnt1 0x%x, resetting "
+ "counter value to 0x%x\n",
+ __func__, phyCnt1,
+ aniState->ofdmPhyErrBase);
+ REG_WRITE(ah, AR_PHY_ERR_1,
+ aniState->ofdmPhyErrBase);
+ REG_WRITE(ah, AR_PHY_ERR_MASK_1,
+ AR_PHY_ERR_OFDM_TIMING);
+ }
+ if (phyCnt2 < aniState->cckPhyErrBase) {
+ DPRINTF(ah->ah_sc, ATH_DBG_ANI,
+ "%s: phyCnt2 0x%x, resetting "
+ "counter value to 0x%x\n",
+ __func__, phyCnt2,
+ aniState->cckPhyErrBase);
+ REG_WRITE(ah, AR_PHY_ERR_2,
+ aniState->cckPhyErrBase);
+ REG_WRITE(ah, AR_PHY_ERR_MASK_2,
+ AR_PHY_ERR_CCK_TIMING);
+ }
+ return;
+ }
+
+ ofdmPhyErrCnt = phyCnt1 - aniState->ofdmPhyErrBase;
+ ahp->ah_stats.ast_ani_ofdmerrs +=
+ ofdmPhyErrCnt - aniState->ofdmPhyErrCount;
+ aniState->ofdmPhyErrCount = ofdmPhyErrCnt;
+
+ cckPhyErrCnt = phyCnt2 - aniState->cckPhyErrBase;
+ ahp->ah_stats.ast_ani_cckerrs +=
+ cckPhyErrCnt - aniState->cckPhyErrCount;
+ aniState->cckPhyErrCount = cckPhyErrCnt;
+ }
+
+ if (!DO_ANI(ah))
+ return;
+
+ if (aniState->listenTime > 5 * ahp->ah_aniPeriod) {
+ if (aniState->ofdmPhyErrCount <= aniState->listenTime *
+ aniState->ofdmTrigLow / 1000 &&
+ aniState->cckPhyErrCount <= aniState->listenTime *
+ aniState->cckTrigLow / 1000)
+ ath9k_hw_ani_lower_immunity(ah);
+ ath9k_ani_restart(ah);
+ } else if (aniState->listenTime > ahp->ah_aniPeriod) {
+ if (aniState->ofdmPhyErrCount > aniState->listenTime *
+ aniState->ofdmTrigHigh / 1000) {
+ ath9k_hw_ani_ofdm_err_trigger(ah);
+ ath9k_ani_restart(ah);
+ } else if (aniState->cckPhyErrCount >
+ aniState->listenTime * aniState->cckTrigHigh /
+ 1000) {
+ ath9k_hw_ani_cck_err_trigger(ah);
+ ath9k_ani_restart(ah);
+ }
+ }
+}
+
+#ifndef ATH_NF_PER_CHAN
+static void ath9k_init_nfcal_hist_buffer(struct ath_hal *ah)
+{
+ int i, j;
+
+ for (i = 0; i < NUM_NF_READINGS; i++) {
+ ah->nfCalHist[i].currIndex = 0;
+ ah->nfCalHist[i].privNF = AR_PHY_CCA_MAX_GOOD_VALUE;
+ ah->nfCalHist[i].invalidNFcount =
+ AR_PHY_CCA_FILTERWINDOW_LENGTH;
+ for (j = 0; j < ATH9K_NF_CAL_HIST_MAX; j++) {
+ ah->nfCalHist[i].nfCalBuffer[j] =
+ AR_PHY_CCA_MAX_GOOD_VALUE;
+ }
+ }
+ return;
+}
+#endif
+
+static void ath9k_hw_gpio_cfg_output_mux(struct ath_hal *ah,
+ u32 gpio, u32 type)
+{
+ int addr;
+ u32 gpio_shift, tmp;
+
+ if (gpio > 11)
+ addr = AR_GPIO_OUTPUT_MUX3;
+ else if (gpio > 5)
+ addr = AR_GPIO_OUTPUT_MUX2;
+ else
+ addr = AR_GPIO_OUTPUT_MUX1;
+
+ gpio_shift = (gpio % 6) * 5;
+
+ if (AR_SREV_9280_20_OR_LATER(ah)
+ || (addr != AR_GPIO_OUTPUT_MUX1)) {
+ REG_RMW(ah, addr, (type << gpio_shift),
+ (0x1f << gpio_shift));
+ } else {
+ tmp = REG_READ(ah, addr);
+ tmp = ((tmp & 0x1F0) << 1) | (tmp & ~0x1F0);
+ tmp &= ~(0x1f << gpio_shift);
+ tmp |= (type << gpio_shift);
+ REG_WRITE(ah, addr, tmp);
+ }
+}
+
+static bool ath9k_hw_cfg_output(struct ath_hal *ah, u32 gpio,
+ enum ath9k_gpio_output_mux_type
+ halSignalType)
+{
+ u32 ah_signal_type;
+ u32 gpio_shift;
+
+ static u32 MuxSignalConversionTable[] = {
+
+ AR_GPIO_OUTPUT_MUX_AS_OUTPUT,
+
+ AR_GPIO_OUTPUT_MUX_AS_PCIE_ATTENTION_LED,
+
+ AR_GPIO_OUTPUT_MUX_AS_PCIE_POWER_LED,
+
+ AR_GPIO_OUTPUT_MUX_AS_MAC_NETWORK_LED,
+
+ AR_GPIO_OUTPUT_MUX_AS_MAC_POWER_LED,
+ };
+
+ if ((halSignalType >= 0)
+ && (halSignalType < ARRAY_SIZE(MuxSignalConversionTable)))
+ ah_signal_type = MuxSignalConversionTable[halSignalType];
+ else
+ return false;
+
+ ath9k_hw_gpio_cfg_output_mux(ah, gpio, ah_signal_type);
+
+ gpio_shift = 2 * gpio;
+
+ REG_RMW(ah,
+ AR_GPIO_OE_OUT,
+ (AR_GPIO_OE_OUT_DRV_ALL << gpio_shift),
+ (AR_GPIO_OE_OUT_DRV << gpio_shift));
+
+ return true;
+}
+
+static bool ath9k_hw_set_gpio(struct ath_hal *ah, u32 gpio,
+ u32 val)
+{
+ REG_RMW(ah, AR_GPIO_IN_OUT, ((val & 1) << gpio),
+ AR_GPIO_BIT(gpio));
+ return true;
+}
+
+static u32 ath9k_hw_gpio_get(struct ath_hal *ah, u32 gpio)
+{
+ if (gpio >= ah->ah_caps.num_gpio_pins)
+ return 0xffffffff;
+
+ if (AR_SREV_9280_10_OR_LATER(ah)) {
+ return (MS
+ (REG_READ(ah, AR_GPIO_IN_OUT),
+ AR928X_GPIO_IN_VAL) & AR_GPIO_BIT(gpio)) != 0;
+ } else {
+ return (MS(REG_READ(ah, AR_GPIO_IN_OUT), AR_GPIO_IN_VAL) &
+ AR_GPIO_BIT(gpio)) != 0;
+ }
+}
+
+static inline int ath9k_hw_post_attach(struct ath_hal *ah)
+{
+ int ecode;
+
+ if (!ath9k_hw_chip_test(ah)) {
+ DPRINTF(ah->ah_sc, ATH_DBG_REG_IO,
+ "%s: hardware self-test failed\n", __func__);
+ return -ENODEV;
+ }
+
+ ecode = ath9k_hw_rf_claim(ah);
+ if (ecode != 0)
+ return ecode;
+
+ ecode = ath9k_hw_eeprom_attach(ah);
+ if (ecode != 0)
+ return ecode;
+ ecode = ath9k_hw_rfattach(ah);
+ if (ecode != 0)
+ return ecode;
+
+ if (!AR_SREV_9100(ah)) {
+ ath9k_hw_ani_setup(ah);
+ ath9k_hw_ani_attach(ah);
+ }
+ return 0;
+}
+
+static u32 ath9k_hw_ini_fixup(struct ath_hal *ah,
+ struct ar5416_eeprom *pEepData,
+ u32 reg, u32 value)
+{
+ struct base_eep_header *pBase = &(pEepData->baseEepHeader);
+
+ switch (ah->ah_devid) {
+ case AR9280_DEVID_PCI:
+ if (reg == 0x7894) {
+ DPRINTF(ah->ah_sc, ATH_DBG_ANY,
+ "ini VAL: %x EEPROM: %x\n", value,
+ (pBase->version & 0xff));
+
+ if ((pBase->version & 0xff) > 0x0a) {
+ DPRINTF(ah->ah_sc, ATH_DBG_ANY,
+ "PWDCLKIND: %d\n",
+ pBase->pwdclkind);
+ value &= ~AR_AN_TOP2_PWDCLKIND;
+ value |= AR_AN_TOP2_PWDCLKIND & (pBase->
+ pwdclkind << AR_AN_TOP2_PWDCLKIND_S);
+ } else {
+ DPRINTF(ah->ah_sc, ATH_DBG_ANY,
+ "PWDCLKIND Earlier Rev\n");
+ }
+
+ DPRINTF(ah->ah_sc, ATH_DBG_ANY,
+ "final ini VAL: %x\n", value);
+ }
+ break;
+ }
+ return value;
+}
+
+static bool ath9k_hw_fill_cap_info(struct ath_hal *ah)
+{
+ struct ath_hal_5416 *ahp = AH5416(ah);
+ struct ath9k_hw_capabilities *pCap = &ah->ah_caps;
+ u16 capField = 0, eeval;
+
+ eeval = ath9k_hw_get_eeprom(ahp, EEP_REG_0);
+
+ ah->ah_currentRD = eeval;
+
+ eeval = ath9k_hw_get_eeprom(ahp, EEP_REG_1);
+ ah->ah_currentRDExt = eeval;
+
+ capField = ath9k_hw_get_eeprom(ahp, EEP_OP_CAP);
+
+ if (ah->ah_opmode != ATH9K_M_HOSTAP &&
+ ah->ah_subvendorid == AR_SUBVENDOR_ID_NEW_A) {
+ if (ah->ah_currentRD == 0x64 || ah->ah_currentRD == 0x65)
+ ah->ah_currentRD += 5;
+ else if (ah->ah_currentRD == 0x41)
+ ah->ah_currentRD = 0x43;
+ DPRINTF(ah->ah_sc, ATH_DBG_REGULATORY,
+ "%s: regdomain mapped to 0x%x\n", __func__,
+ ah->ah_currentRD);
+ }
+
+ eeval = ath9k_hw_get_eeprom(ahp, EEP_OP_MODE);
+ bitmap_zero(pCap->wireless_modes, ATH9K_MODE_MAX);
+
+ if (eeval & AR5416_OPFLAGS_11A) {
+ set_bit(ATH9K_MODE_11A, pCap->wireless_modes);
+ if (ah->ah_config.ht_enable) {
+ if (!(eeval & AR5416_OPFLAGS_N_5G_HT20))
+ set_bit(ATH9K_MODE_11NA_HT20,
+ pCap->wireless_modes);
+ if (!(eeval & AR5416_OPFLAGS_N_5G_HT40)) {
+ set_bit(ATH9K_MODE_11NA_HT40PLUS,
+ pCap->wireless_modes);
+ set_bit(ATH9K_MODE_11NA_HT40MINUS,
+ pCap->wireless_modes);
+ }
+ }
+ }
+
+ if (eeval & AR5416_OPFLAGS_11G) {
+ set_bit(ATH9K_MODE_11B, pCap->wireless_modes);
+ set_bit(ATH9K_MODE_11G, pCap->wireless_modes);
+ if (ah->ah_config.ht_enable) {
+ if (!(eeval & AR5416_OPFLAGS_N_2G_HT20))
+ set_bit(ATH9K_MODE_11NG_HT20,
+ pCap->wireless_modes);
+ if (!(eeval & AR5416_OPFLAGS_N_2G_HT40)) {
+ set_bit(ATH9K_MODE_11NG_HT40PLUS,
+ pCap->wireless_modes);
+ set_bit(ATH9K_MODE_11NG_HT40MINUS,
+ pCap->wireless_modes);
+ }
+ }
+ }
+
+ pCap->tx_chainmask = ath9k_hw_get_eeprom(ahp, EEP_TX_MASK);
+ if ((ah->ah_isPciExpress)
+ || (eeval & AR5416_OPFLAGS_11A)) {
+ pCap->rx_chainmask =
+ ath9k_hw_get_eeprom(ahp, EEP_RX_MASK);
+ } else {
+ pCap->rx_chainmask =
+ (ath9k_hw_gpio_get(ah, 0)) ? 0x5 : 0x7;
+ }
+
+ if (!(AR_SREV_9280(ah) && (ah->ah_macRev == 0)))
+ ahp->ah_miscMode |= AR_PCU_MIC_NEW_LOC_ENA;
+
+ pCap->low_2ghz_chan = 2312;
+ pCap->high_2ghz_chan = 2732;
+
+ pCap->low_5ghz_chan = 4920;
+ pCap->high_5ghz_chan = 6100;
+
+ pCap->hw_caps &= ~ATH9K_HW_CAP_CIPHER_CKIP;
+ pCap->hw_caps |= ATH9K_HW_CAP_CIPHER_TKIP;
+ pCap->hw_caps |= ATH9K_HW_CAP_CIPHER_AESCCM;
+
+ pCap->hw_caps &= ~ATH9K_HW_CAP_MIC_CKIP;
+ pCap->hw_caps |= ATH9K_HW_CAP_MIC_TKIP;
+ pCap->hw_caps |= ATH9K_HW_CAP_MIC_AESCCM;
+
+ pCap->hw_caps |= ATH9K_HW_CAP_CHAN_SPREAD;
+
+ if (ah->ah_config.ht_enable)
+ pCap->hw_caps |= ATH9K_HW_CAP_HT;
+ else
+ pCap->hw_caps &= ~ATH9K_HW_CAP_HT;
+
+ pCap->hw_caps |= ATH9K_HW_CAP_GTT;
+ pCap->hw_caps |= ATH9K_HW_CAP_VEOL;
+ pCap->hw_caps |= ATH9K_HW_CAP_BSSIDMASK;
+ pCap->hw_caps &= ~ATH9K_HW_CAP_MCAST_KEYSEARCH;
+
+ if (capField & AR_EEPROM_EEPCAP_MAXQCU)
+ pCap->total_queues =
+ MS(capField, AR_EEPROM_EEPCAP_MAXQCU);
+ else
+ pCap->total_queues = ATH9K_NUM_TX_QUEUES;
+
+ if (capField & AR_EEPROM_EEPCAP_KC_ENTRIES)
+ pCap->keycache_size =
+ 1 << MS(capField, AR_EEPROM_EEPCAP_KC_ENTRIES);
+ else
+ pCap->keycache_size = AR_KEYTABLE_SIZE;
+
+ pCap->hw_caps |= ATH9K_HW_CAP_FASTCC;
+ pCap->num_mr_retries = 4;
+ pCap->tx_triglevel_max = MAX_TX_FIFO_THRESHOLD;
+
+ if (AR_SREV_9280_10_OR_LATER(ah))
+ pCap->num_gpio_pins = AR928X_NUM_GPIO;
+ else
+ pCap->num_gpio_pins = AR_NUM_GPIO;
+
+ if (AR_SREV_9280_10_OR_LATER(ah)) {
+ pCap->hw_caps |= ATH9K_HW_CAP_WOW;
+ pCap->hw_caps |= ATH9K_HW_CAP_WOW_MATCHPATTERN_EXACT;
+ } else {
+ pCap->hw_caps &= ~ATH9K_HW_CAP_WOW;
+ pCap->hw_caps &= ~ATH9K_HW_CAP_WOW_MATCHPATTERN_EXACT;
+ }
+
+ if (AR_SREV_9160_10_OR_LATER(ah) || AR_SREV_9100(ah)) {
+ pCap->hw_caps |= ATH9K_HW_CAP_CST;
+ pCap->rts_aggr_limit = ATH_AMPDU_LIMIT_MAX;
+ } else {
+ pCap->rts_aggr_limit = (8 * 1024);
+ }
+
+ pCap->hw_caps |= ATH9K_HW_CAP_ENHANCEDPM;
+
+ ah->ah_rfsilent = ath9k_hw_get_eeprom(ahp, EEP_RF_SILENT);
+ if (ah->ah_rfsilent & EEP_RFSILENT_ENABLED) {
+ ahp->ah_gpioSelect =
+ MS(ah->ah_rfsilent, EEP_RFSILENT_GPIO_SEL);
+ ahp->ah_polarity =
+ MS(ah->ah_rfsilent, EEP_RFSILENT_POLARITY);
+
+ ath9k_hw_setcapability(ah, ATH9K_CAP_RFSILENT, 1, true,
+ NULL);
+ pCap->hw_caps |= ATH9K_HW_CAP_RFSILENT;
+ }
+
+ if ((ah->ah_macVersion == AR_SREV_VERSION_5416_PCI) ||
+ (ah->ah_macVersion == AR_SREV_VERSION_5416_PCIE) ||
+ (ah->ah_macVersion == AR_SREV_VERSION_9160) ||
+ (ah->ah_macVersion == AR_SREV_VERSION_9100) ||
+ (ah->ah_macVersion == AR_SREV_VERSION_9280))
+ pCap->hw_caps &= ~ATH9K_HW_CAP_AUTOSLEEP;
+ else
+ pCap->hw_caps |= ATH9K_HW_CAP_AUTOSLEEP;
+
+ if (AR_SREV_9280(ah))
+ pCap->hw_caps &= ~ATH9K_HW_CAP_4KB_SPLITTRANS;
+ else
+ pCap->hw_caps |= ATH9K_HW_CAP_4KB_SPLITTRANS;
+
+ if (ah->ah_currentRDExt & (1 << REG_EXT_JAPAN_MIDBAND)) {
+ pCap->reg_cap =
+ AR_EEPROM_EEREGCAP_EN_KK_NEW_11A |
+ AR_EEPROM_EEREGCAP_EN_KK_U1_EVEN |
+ AR_EEPROM_EEREGCAP_EN_KK_U2 |
+ AR_EEPROM_EEREGCAP_EN_KK_MIDBAND;
+ } else {
+ pCap->reg_cap =
+ AR_EEPROM_EEREGCAP_EN_KK_NEW_11A |
+ AR_EEPROM_EEREGCAP_EN_KK_U1_EVEN;
+ }
+
+ pCap->reg_cap |= AR_EEPROM_EEREGCAP_EN_FCC_MIDBAND;
+
+ pCap->num_antcfg_5ghz =
+ ath9k_hw_get_num_ant_config(ahp, IEEE80211_BAND_5GHZ);
+ pCap->num_antcfg_2ghz =
+ ath9k_hw_get_num_ant_config(ahp, IEEE80211_BAND_2GHZ);
+
+ return true;
+}
+
+static void ar5416DisablePciePhy(struct ath_hal *ah)
+{
+ if (!AR_SREV_9100(ah))
+ return;
+
+ REG_WRITE(ah, AR_PCIE_SERDES, 0x9248fc00);
+ REG_WRITE(ah, AR_PCIE_SERDES, 0x24924924);
+ REG_WRITE(ah, AR_PCIE_SERDES, 0x28000029);
+ REG_WRITE(ah, AR_PCIE_SERDES, 0x57160824);
+ REG_WRITE(ah, AR_PCIE_SERDES, 0x25980579);
+ REG_WRITE(ah, AR_PCIE_SERDES, 0x00000000);
+ REG_WRITE(ah, AR_PCIE_SERDES, 0x1aaabe40);
+ REG_WRITE(ah, AR_PCIE_SERDES, 0xbe105554);
+ REG_WRITE(ah, AR_PCIE_SERDES, 0x000e1007);
+
+ REG_WRITE(ah, AR_PCIE_SERDES2, 0x00000000);
+}
+
+static void ath9k_set_power_sleep(struct ath_hal *ah, int setChip)
+{
+ REG_SET_BIT(ah, AR_STA_ID1, AR_STA_ID1_PWR_SAV);
+ if (setChip) {
+ REG_CLR_BIT(ah, AR_RTC_FORCE_WAKE,
+ AR_RTC_FORCE_WAKE_EN);
+ if (!AR_SREV_9100(ah))
+ REG_WRITE(ah, AR_RC, AR_RC_AHB | AR_RC_HOSTIF);
+
+ REG_CLR_BIT(ah, (u16) (AR_RTC_RESET),
+ AR_RTC_RESET_EN);
+ }
+}
+
+static void ath9k_set_power_network_sleep(struct ath_hal *ah, int setChip)
+{
+ REG_SET_BIT(ah, AR_STA_ID1, AR_STA_ID1_PWR_SAV);
+ if (setChip) {
+ struct ath9k_hw_capabilities *pCap = &ah->ah_caps;
+
+ if (!(pCap->hw_caps & ATH9K_HW_CAP_AUTOSLEEP)) {
+ REG_WRITE(ah, AR_RTC_FORCE_WAKE,
+ AR_RTC_FORCE_WAKE_ON_INT);
+ } else {
+ REG_CLR_BIT(ah, AR_RTC_FORCE_WAKE,
+ AR_RTC_FORCE_WAKE_EN);
+ }
+ }
+}
+
+static bool ath9k_hw_set_power_awake(struct ath_hal *ah,
+ int setChip)
+{
+ u32 val;
+ int i;
+
+ if (setChip) {
+ if ((REG_READ(ah, AR_RTC_STATUS) & AR_RTC_STATUS_M) ==
+ AR_RTC_STATUS_SHUTDOWN) {
+ if (ath9k_hw_set_reset_reg(ah, ATH9K_RESET_POWER_ON)
+ != true) {
+ return false;
+ }
+ }
+ if (AR_SREV_9100(ah))
+ REG_SET_BIT(ah, AR_RTC_RESET,
+ AR_RTC_RESET_EN);
+
+ REG_SET_BIT(ah, AR_RTC_FORCE_WAKE,
+ AR_RTC_FORCE_WAKE_EN);
+ udelay(50);
+
+ for (i = POWER_UP_TIME / 50; i > 0; i--) {
+ val = REG_READ(ah, AR_RTC_STATUS) & AR_RTC_STATUS_M;
+ if (val == AR_RTC_STATUS_ON)
+ break;
+ udelay(50);
+ REG_SET_BIT(ah, AR_RTC_FORCE_WAKE,
+ AR_RTC_FORCE_WAKE_EN);
+ }
+ if (i == 0) {
+ DPRINTF(ah->ah_sc, ATH_DBG_POWER_MGMT,
+ "%s: Failed to wakeup in %uus\n",
+ __func__, POWER_UP_TIME / 20);
+ return false;
+ }
+ }
+
+ REG_CLR_BIT(ah, AR_STA_ID1, AR_STA_ID1_PWR_SAV);
+ return true;
+}
+
+bool ath9k_hw_setpower(struct ath_hal *ah,
+ enum ath9k_power_mode mode)
+{
+ struct ath_hal_5416 *ahp = AH5416(ah);
+ static const char *modes[] = {
+ "AWAKE",
+ "FULL-SLEEP",
+ "NETWORK SLEEP",
+ "UNDEFINED"
+ };
+ int status = true, setChip = true;
+
+ DPRINTF(ah->ah_sc, ATH_DBG_POWER_MGMT, "%s: %s -> %s (%s)\n", __func__,
+ modes[ahp->ah_powerMode], modes[mode],
+ setChip ? "set chip " : "");
+
+ switch (mode) {
+ case ATH9K_PM_AWAKE:
+ status = ath9k_hw_set_power_awake(ah, setChip);
+ break;
+ case ATH9K_PM_FULL_SLEEP:
+ ath9k_set_power_sleep(ah, setChip);
+ ahp->ah_chipFullSleep = true;
+ break;
+ case ATH9K_PM_NETWORK_SLEEP:
+ ath9k_set_power_network_sleep(ah, setChip);
+ break;
+ default:
+ DPRINTF(ah->ah_sc, ATH_DBG_POWER_MGMT,
+ "%s: unknown power mode %u\n", __func__, mode);
+ return false;
+ }
+ ahp->ah_powerMode = mode;
+ return status;
+}
+
+static struct ath_hal *ath9k_hw_do_attach(u16 devid,
+ struct ath_softc *sc,
+ void __iomem *mem,
+ int *status)
+{
+ struct ath_hal_5416 *ahp;
+ struct ath_hal *ah;
+ int ecode;
+#ifndef CONFIG_SLOW_ANT_DIV
+ u32 i;
+ u32 j;
+#endif
+
+ ahp = ath9k_hw_newstate(devid, sc, mem, status);
+ if (ahp == NULL)
+ return NULL;
+
+ ah = &ahp->ah;
+
+ ath9k_hw_set_defaults(ah);
+
+ if (ah->ah_config.intr_mitigation != 0)
+ ahp->ah_intrMitigation = true;
+
+ if (!ath9k_hw_set_reset_reg(ah, ATH9K_RESET_POWER_ON)) {
+ DPRINTF(ah->ah_sc, ATH_DBG_RESET, "%s: couldn't reset chip\n",
+ __func__);
+ ecode = -EIO;
+ goto bad;
+ }
+
+ if (!ath9k_hw_setpower(ah, ATH9K_PM_AWAKE)) {
+ DPRINTF(ah->ah_sc, ATH_DBG_RESET, "%s: couldn't wakeup chip\n",
+ __func__);
+ ecode = -EIO;
+ goto bad;
+ }
+
+ if (ah->ah_config.serialize_regmode == SER_REG_MODE_AUTO) {
+ if (ah->ah_macVersion == AR_SREV_VERSION_5416_PCI) {
+ ah->ah_config.serialize_regmode =
+ SER_REG_MODE_ON;
+ } else {
+ ah->ah_config.serialize_regmode =
+ SER_REG_MODE_OFF;
+ }
+ }
+ DPRINTF(ah->ah_sc, ATH_DBG_RESET,
+ "%s: serialize_regmode is %d\n",
+ __func__, ah->ah_config.serialize_regmode);
+
+ if ((ah->ah_macVersion != AR_SREV_VERSION_5416_PCI) &&
+ (ah->ah_macVersion != AR_SREV_VERSION_5416_PCIE) &&
+ (ah->ah_macVersion != AR_SREV_VERSION_9160) &&
+ (!AR_SREV_9100(ah)) && (!AR_SREV_9280(ah))) {
+ DPRINTF(ah->ah_sc, ATH_DBG_RESET,
+ "%s: Mac Chip Rev 0x%02x.%x is not supported by "
+ "this driver\n", __func__,
+ ah->ah_macVersion, ah->ah_macRev);
+ ecode = -EOPNOTSUPP;
+ goto bad;
+ }
+
+ if (AR_SREV_9100(ah)) {
+ ahp->ah_iqCalData.calData = &iq_cal_multi_sample;
+ ahp->ah_suppCals = IQ_MISMATCH_CAL;
+ ah->ah_isPciExpress = false;
+ }
+ ah->ah_phyRev = REG_READ(ah, AR_PHY_CHIP_ID);
+
+ if (AR_SREV_9160_10_OR_LATER(ah)) {
+ if (AR_SREV_9280_10_OR_LATER(ah)) {
+ ahp->ah_iqCalData.calData = &iq_cal_single_sample;
+ ahp->ah_adcGainCalData.calData =
+ &adc_gain_cal_single_sample;
+ ahp->ah_adcDcCalData.calData =
+ &adc_dc_cal_single_sample;
+ ahp->ah_adcDcCalInitData.calData =
+ &adc_init_dc_cal;
+ } else {
+ ahp->ah_iqCalData.calData = &iq_cal_multi_sample;
+ ahp->ah_adcGainCalData.calData =
+ &adc_gain_cal_multi_sample;
+ ahp->ah_adcDcCalData.calData =
+ &adc_dc_cal_multi_sample;
+ ahp->ah_adcDcCalInitData.calData =
+ &adc_init_dc_cal;
+ }
+ ahp->ah_suppCals =
+ ADC_GAIN_CAL | ADC_DC_CAL | IQ_MISMATCH_CAL;
+ }
+
+ if (AR_SREV_9160(ah)) {
+ ah->ah_config.enable_ani = 1;
+ ahp->ah_ani_function = (ATH9K_ANI_SPUR_IMMUNITY_LEVEL |
+ ATH9K_ANI_FIRSTEP_LEVEL);
+ } else {
+ ahp->ah_ani_function = ATH9K_ANI_ALL;
+ if (AR_SREV_9280_10_OR_LATER(ah)) {
+ ahp->ah_ani_function &=
+ ~ATH9K_ANI_NOISE_IMMUNITY_LEVEL;
+ }
+ }
+
+ DPRINTF(ah->ah_sc, ATH_DBG_RESET,
+ "%s: This Mac Chip Rev 0x%02x.%x is \n", __func__,
+ ah->ah_macVersion, ah->ah_macRev);
+
+ if (AR_SREV_9280_20_OR_LATER(ah)) {
+ INIT_INI_ARRAY(&ahp->ah_iniModes, ar9280Modes_9280_2,
+ ARRAY_SIZE(ar9280Modes_9280_2), 6);
+ INIT_INI_ARRAY(&ahp->ah_iniCommon, ar9280Common_9280_2,
+ ARRAY_SIZE(ar9280Common_9280_2), 2);
+
+ if (ah->ah_config.pcie_clock_req) {
+ INIT_INI_ARRAY(&ahp->ah_iniPcieSerdes,
+ ar9280PciePhy_clkreq_off_L1_9280,
+ ARRAY_SIZE
+ (ar9280PciePhy_clkreq_off_L1_9280),
+ 2);
+ } else {
+ INIT_INI_ARRAY(&ahp->ah_iniPcieSerdes,
+ ar9280PciePhy_clkreq_always_on_L1_9280,
+ ARRAY_SIZE
+ (ar9280PciePhy_clkreq_always_on_L1_9280),
+ 2);
+ }
+ INIT_INI_ARRAY(&ahp->ah_iniModesAdditional,
+ ar9280Modes_fast_clock_9280_2,
+ ARRAY_SIZE(ar9280Modes_fast_clock_9280_2),
+ 3);
+ } else if (AR_SREV_9280_10_OR_LATER(ah)) {
+ INIT_INI_ARRAY(&ahp->ah_iniModes, ar9280Modes_9280,
+ ARRAY_SIZE(ar9280Modes_9280), 6);
+ INIT_INI_ARRAY(&ahp->ah_iniCommon, ar9280Common_9280,
+ ARRAY_SIZE(ar9280Common_9280), 2);
+ } else if (AR_SREV_9160_10_OR_LATER(ah)) {
+ INIT_INI_ARRAY(&ahp->ah_iniModes, ar5416Modes_9160,
+ ARRAY_SIZE(ar5416Modes_9160), 6);
+ INIT_INI_ARRAY(&ahp->ah_iniCommon, ar5416Common_9160,
+ ARRAY_SIZE(ar5416Common_9160), 2);
+ INIT_INI_ARRAY(&ahp->ah_iniBank0, ar5416Bank0_9160,
+ ARRAY_SIZE(ar5416Bank0_9160), 2);
+ INIT_INI_ARRAY(&ahp->ah_iniBB_RfGain, ar5416BB_RfGain_9160,
+ ARRAY_SIZE(ar5416BB_RfGain_9160), 3);
+ INIT_INI_ARRAY(&ahp->ah_iniBank1, ar5416Bank1_9160,
+ ARRAY_SIZE(ar5416Bank1_9160), 2);
+ INIT_INI_ARRAY(&ahp->ah_iniBank2, ar5416Bank2_9160,
+ ARRAY_SIZE(ar5416Bank2_9160), 2);
+ INIT_INI_ARRAY(&ahp->ah_iniBank3, ar5416Bank3_9160,
+ ARRAY_SIZE(ar5416Bank3_9160), 3);
+ INIT_INI_ARRAY(&ahp->ah_iniBank6, ar5416Bank6_9160,
+ ARRAY_SIZE(ar5416Bank6_9160), 3);
+ INIT_INI_ARRAY(&ahp->ah_iniBank6TPC, ar5416Bank6TPC_9160,
+ ARRAY_SIZE(ar5416Bank6TPC_9160), 3);
+ INIT_INI_ARRAY(&ahp->ah_iniBank7, ar5416Bank7_9160,
+ ARRAY_SIZE(ar5416Bank7_9160), 2);
+ if (AR_SREV_9160_11(ah)) {
+ INIT_INI_ARRAY(&ahp->ah_iniAddac,
+ ar5416Addac_91601_1,
+ ARRAY_SIZE(ar5416Addac_91601_1), 2);
+ } else {
+ INIT_INI_ARRAY(&ahp->ah_iniAddac, ar5416Addac_9160,
+ ARRAY_SIZE(ar5416Addac_9160), 2);
+ }
+ } else if (AR_SREV_9100_OR_LATER(ah)) {
+ INIT_INI_ARRAY(&ahp->ah_iniModes, ar5416Modes_9100,
+ ARRAY_SIZE(ar5416Modes_9100), 6);
+ INIT_INI_ARRAY(&ahp->ah_iniCommon, ar5416Common_9100,
+ ARRAY_SIZE(ar5416Common_9100), 2);
+ INIT_INI_ARRAY(&ahp->ah_iniBank0, ar5416Bank0_9100,
+ ARRAY_SIZE(ar5416Bank0_9100), 2);
+ INIT_INI_ARRAY(&ahp->ah_iniBB_RfGain, ar5416BB_RfGain_9100,
+ ARRAY_SIZE(ar5416BB_RfGain_9100), 3);
+ INIT_INI_ARRAY(&ahp->ah_iniBank1, ar5416Bank1_9100,
+ ARRAY_SIZE(ar5416Bank1_9100), 2);
+ INIT_INI_ARRAY(&ahp->ah_iniBank2, ar5416Bank2_9100,
+ ARRAY_SIZE(ar5416Bank2_9100), 2);
+ INIT_INI_ARRAY(&ahp->ah_iniBank3, ar5416Bank3_9100,
+ ARRAY_SIZE(ar5416Bank3_9100), 3);
+ INIT_INI_ARRAY(&ahp->ah_iniBank6, ar5416Bank6_9100,
+ ARRAY_SIZE(ar5416Bank6_9100), 3);
+ INIT_INI_ARRAY(&ahp->ah_iniBank6TPC, ar5416Bank6TPC_9100,
+ ARRAY_SIZE(ar5416Bank6TPC_9100), 3);
+ INIT_INI_ARRAY(&ahp->ah_iniBank7, ar5416Bank7_9100,
+ ARRAY_SIZE(ar5416Bank7_9100), 2);
+ INIT_INI_ARRAY(&ahp->ah_iniAddac, ar5416Addac_9100,
+ ARRAY_SIZE(ar5416Addac_9100), 2);
+ } else {
+ INIT_INI_ARRAY(&ahp->ah_iniModes, ar5416Modes,
+ ARRAY_SIZE(ar5416Modes), 6);
+ INIT_INI_ARRAY(&ahp->ah_iniCommon, ar5416Common,
+ ARRAY_SIZE(ar5416Common), 2);
+ INIT_INI_ARRAY(&ahp->ah_iniBank0, ar5416Bank0,
+ ARRAY_SIZE(ar5416Bank0), 2);
+ INIT_INI_ARRAY(&ahp->ah_iniBB_RfGain, ar5416BB_RfGain,
+ ARRAY_SIZE(ar5416BB_RfGain), 3);
+ INIT_INI_ARRAY(&ahp->ah_iniBank1, ar5416Bank1,
+ ARRAY_SIZE(ar5416Bank1), 2);
+ INIT_INI_ARRAY(&ahp->ah_iniBank2, ar5416Bank2,
+ ARRAY_SIZE(ar5416Bank2), 2);
+ INIT_INI_ARRAY(&ahp->ah_iniBank3, ar5416Bank3,
+ ARRAY_SIZE(ar5416Bank3), 3);
+ INIT_INI_ARRAY(&ahp->ah_iniBank6, ar5416Bank6,
+ ARRAY_SIZE(ar5416Bank6), 3);
+ INIT_INI_ARRAY(&ahp->ah_iniBank6TPC, ar5416Bank6TPC,
+ ARRAY_SIZE(ar5416Bank6TPC), 3);
+ INIT_INI_ARRAY(&ahp->ah_iniBank7, ar5416Bank7,
+ ARRAY_SIZE(ar5416Bank7), 2);
+ INIT_INI_ARRAY(&ahp->ah_iniAddac, ar5416Addac,
+ ARRAY_SIZE(ar5416Addac), 2);
+ }
+
+ if (ah->ah_isPciExpress)
+ ath9k_hw_configpcipowersave(ah, 0);
+ else
+ ar5416DisablePciePhy(ah);
+
+ ecode = ath9k_hw_post_attach(ah);
+ if (ecode != 0)
+ goto bad;
+
+#ifndef CONFIG_SLOW_ANT_DIV
+ if (ah->ah_devid == AR9280_DEVID_PCI) {
+ for (i = 0; i < ahp->ah_iniModes.ia_rows; i++) {
+ u32 reg = INI_RA(&ahp->ah_iniModes, i, 0);
+
+ for (j = 1; j < ahp->ah_iniModes.ia_columns; j++) {
+ u32 val = INI_RA(&ahp->ah_iniModes, i, j);
+
+ INI_RA(&ahp->ah_iniModes, i, j) =
+ ath9k_hw_ini_fixup(ah, &ahp->ah_eeprom,
+ reg, val);
+ }
+ }
+ }
+#endif
+
+ if (!ath9k_hw_fill_cap_info(ah)) {
+ DPRINTF(ah->ah_sc, ATH_DBG_RESET,
+ "%s:failed ath9k_hw_fill_cap_info\n", __func__);
+ ecode = -EINVAL;
+ goto bad;
+ }
+
+ ecode = ath9k_hw_init_macaddr(ah);
+ if (ecode != 0) {
+ DPRINTF(ah->ah_sc, ATH_DBG_RESET,
+ "%s: failed initializing mac address\n",
+ __func__);
+ goto bad;
+ }
+
+ if (AR_SREV_9285(ah))
+ ah->ah_txTrigLevel = (AR_FTRIG_256B >> AR_FTRIG_S);
+ else
+ ah->ah_txTrigLevel = (AR_FTRIG_512B >> AR_FTRIG_S);
+
+#ifndef ATH_NF_PER_CHAN
+
+ ath9k_init_nfcal_hist_buffer(ah);
+#endif
+
+ return ah;
+
+bad:
+ if (ahp)
+ ath9k_hw_detach((struct ath_hal *) ahp);
+ if (status)
+ *status = ecode;
+ return NULL;
+}
+
+void ath9k_hw_detach(struct ath_hal *ah)
+{
+ if (!AR_SREV_9100(ah))
+ ath9k_hw_ani_detach(ah);
+ ath9k_hw_rfdetach(ah);
+
+ ath9k_hw_setpower(ah, ATH9K_PM_FULL_SLEEP);
+ kfree(ah);
+}
+
+bool ath9k_get_channel_edges(struct ath_hal *ah,
+ u16 flags, u16 *low,
+ u16 *high)
+{
+ struct ath9k_hw_capabilities *pCap = &ah->ah_caps;
+
+ if (flags & CHANNEL_5GHZ) {
+ *low = pCap->low_5ghz_chan;
+ *high = pCap->high_5ghz_chan;
+ return true;
+ }
+ if ((flags & CHANNEL_2GHZ)) {
+ *low = pCap->low_2ghz_chan;
+ *high = pCap->high_2ghz_chan;
+
+ return true;
+ }
+ return false;
+}
+
+static inline bool ath9k_hw_fill_vpd_table(u8 pwrMin,
+ u8 pwrMax,
+ u8 *pPwrList,
+ u8 *pVpdList,
+ u16
+ numIntercepts,
+ u8 *pRetVpdList)
+{
+ u16 i, k;
+ u8 currPwr = pwrMin;
+ u16 idxL = 0, idxR = 0;
+
+ for (i = 0; i <= (pwrMax - pwrMin) / 2; i++) {
+ ath9k_hw_get_lower_upper_index(currPwr, pPwrList,
+ numIntercepts, &(idxL),
+ &(idxR));
+ if (idxR < 1)
+ idxR = 1;
+ if (idxL == numIntercepts - 1)
+ idxL = (u16) (numIntercepts - 2);
+ if (pPwrList[idxL] == pPwrList[idxR])
+ k = pVpdList[idxL];
+ else
+ k = (u16) (((currPwr -
+ pPwrList[idxL]) *
+ pVpdList[idxR] +
+ (pPwrList[idxR] -
+ currPwr) * pVpdList[idxL]) /
+ (pPwrList[idxR] -
+ pPwrList[idxL]));
+ pRetVpdList[i] = (u8) k;
+ currPwr += 2;
+ }
+
+ return true;
+}
+
+static inline void
+ath9k_hw_get_gain_boundaries_pdadcs(struct ath_hal *ah,
+ struct ath9k_channel *chan,
+ struct cal_data_per_freq *pRawDataSet,
+ u8 *bChans,
+ u16 availPiers,
+ u16 tPdGainOverlap,
+ int16_t *pMinCalPower,
+ u16 *pPdGainBoundaries,
+ u8 *pPDADCValues,
+ u16 numXpdGains)
+{
+ int i, j, k;
+ int16_t ss;
+ u16 idxL = 0, idxR = 0, numPiers;
+ static u8 vpdTableL[AR5416_NUM_PD_GAINS]
+ [AR5416_MAX_PWR_RANGE_IN_HALF_DB];
+ static u8 vpdTableR[AR5416_NUM_PD_GAINS]
+ [AR5416_MAX_PWR_RANGE_IN_HALF_DB];
+ static u8 vpdTableI[AR5416_NUM_PD_GAINS]
+ [AR5416_MAX_PWR_RANGE_IN_HALF_DB];
+
+ u8 *pVpdL, *pVpdR, *pPwrL, *pPwrR;
+ u8 minPwrT4[AR5416_NUM_PD_GAINS];
+ u8 maxPwrT4[AR5416_NUM_PD_GAINS];
+ int16_t vpdStep;
+ int16_t tmpVal;
+ u16 sizeCurrVpdTable, maxIndex, tgtIndex;
+ bool match;
+ int16_t minDelta = 0;
+ struct chan_centers centers;
+
+ ath9k_hw_get_channel_centers(ah, chan, &centers);
+
+ for (numPiers = 0; numPiers < availPiers; numPiers++) {
+ if (bChans[numPiers] == AR5416_BCHAN_UNUSED)
+ break;
+ }
+
+ match = ath9k_hw_get_lower_upper_index((u8)
+ FREQ2FBIN(centers.
+ synth_center,
+ IS_CHAN_2GHZ
+ (chan)), bChans,
+ numPiers, &idxL, &idxR);
+
+ if (match) {
+ for (i = 0; i < numXpdGains; i++) {
+ minPwrT4[i] = pRawDataSet[idxL].pwrPdg[i][0];
+ maxPwrT4[i] = pRawDataSet[idxL].pwrPdg[i][4];
+ ath9k_hw_fill_vpd_table(minPwrT4[i], maxPwrT4[i],
+ pRawDataSet[idxL].
+ pwrPdg[i],
+ pRawDataSet[idxL].
+ vpdPdg[i],
+ AR5416_PD_GAIN_ICEPTS,
+ vpdTableI[i]);
+ }
+ } else {
+ for (i = 0; i < numXpdGains; i++) {
+ pVpdL = pRawDataSet[idxL].vpdPdg[i];
+ pPwrL = pRawDataSet[idxL].pwrPdg[i];
+ pVpdR = pRawDataSet[idxR].vpdPdg[i];
+ pPwrR = pRawDataSet[idxR].pwrPdg[i];
+
+ minPwrT4[i] = max(pPwrL[0], pPwrR[0]);
+
+ maxPwrT4[i] =
+ min(pPwrL[AR5416_PD_GAIN_ICEPTS - 1],
+ pPwrR[AR5416_PD_GAIN_ICEPTS - 1]);
+
+
+ ath9k_hw_fill_vpd_table(minPwrT4[i], maxPwrT4[i],
+ pPwrL, pVpdL,
+ AR5416_PD_GAIN_ICEPTS,
+ vpdTableL[i]);
+ ath9k_hw_fill_vpd_table(minPwrT4[i], maxPwrT4[i],
+ pPwrR, pVpdR,
+ AR5416_PD_GAIN_ICEPTS,
+ vpdTableR[i]);
+
+ for (j = 0; j <= (maxPwrT4[i] - minPwrT4[i]) / 2; j++) {
+ vpdTableI[i][j] =
+ (u8) (ath9k_hw_interpolate
+ ((u16)
+ FREQ2FBIN(centers.
+ synth_center,
+ IS_CHAN_2GHZ
+ (chan)),
+ bChans[idxL],
+ bChans[idxR], vpdTableL[i]
+ [j], vpdTableR[i]
+ [j]));
+ }
+ }
+ }
+
+ *pMinCalPower = (int16_t) (minPwrT4[0] / 2);
+
+ k = 0;
+ for (i = 0; i < numXpdGains; i++) {
+ if (i == (numXpdGains - 1))
+ pPdGainBoundaries[i] =
+ (u16) (maxPwrT4[i] / 2);
+ else
+ pPdGainBoundaries[i] =
+ (u16) ((maxPwrT4[i] +
+ minPwrT4[i + 1]) / 4);
+
+ pPdGainBoundaries[i] =
+ min((u16) AR5416_MAX_RATE_POWER,
+ pPdGainBoundaries[i]);
+
+ if ((i == 0) && !AR_SREV_5416_V20_OR_LATER(ah)) {
+ minDelta = pPdGainBoundaries[0] - 23;
+ pPdGainBoundaries[0] = 23;
+ } else {
+ minDelta = 0;
+ }
+
+ if (i == 0) {
+ if (AR_SREV_9280_10_OR_LATER(ah))
+ ss = (int16_t) (0 - (minPwrT4[i] / 2));
+ else
+ ss = 0;
+ } else {
+ ss = (int16_t) ((pPdGainBoundaries[i - 1] -
+ (minPwrT4[i] / 2)) -
+ tPdGainOverlap + 1 + minDelta);
+ }
+ vpdStep = (int16_t) (vpdTableI[i][1] - vpdTableI[i][0]);
+ vpdStep = (int16_t) ((vpdStep < 1) ? 1 : vpdStep);
+
+ while ((ss < 0) && (k < (AR5416_NUM_PDADC_VALUES - 1))) {
+ tmpVal = (int16_t) (vpdTableI[i][0] + ss * vpdStep);
+ pPDADCValues[k++] =
+ (u8) ((tmpVal < 0) ? 0 : tmpVal);
+ ss++;
+ }
+
+ sizeCurrVpdTable =
+ (u8) ((maxPwrT4[i] - minPwrT4[i]) / 2 + 1);
+ tgtIndex = (u8) (pPdGainBoundaries[i] + tPdGainOverlap -
+ (minPwrT4[i] / 2));
+ maxIndex = (tgtIndex <
+ sizeCurrVpdTable) ? tgtIndex : sizeCurrVpdTable;
+
+ while ((ss < maxIndex)
+ && (k < (AR5416_NUM_PDADC_VALUES - 1))) {
+ pPDADCValues[k++] = vpdTableI[i][ss++];
+ }
+
+ vpdStep = (int16_t) (vpdTableI[i][sizeCurrVpdTable - 1] -
+ vpdTableI[i][sizeCurrVpdTable - 2]);
+ vpdStep = (int16_t) ((vpdStep < 1) ? 1 : vpdStep);
+
+ if (tgtIndex > maxIndex) {
+ while ((ss <= tgtIndex)
+ && (k < (AR5416_NUM_PDADC_VALUES - 1))) {
+ tmpVal = (int16_t) ((vpdTableI[i]
+ [sizeCurrVpdTable -
+ 1] + (ss - maxIndex +
+ 1) * vpdStep));
+ pPDADCValues[k++] = (u8) ((tmpVal >
+ 255) ? 255 : tmpVal);
+ ss++;
+ }
+ }
+ }
+
+ while (i < AR5416_PD_GAINS_IN_MASK) {
+ pPdGainBoundaries[i] = pPdGainBoundaries[i - 1];
+ i++;
+ }
+
+ while (k < AR5416_NUM_PDADC_VALUES) {
+ pPDADCValues[k] = pPDADCValues[k - 1];
+ k++;
+ }
+ return;
+}
+
+static inline bool
+ath9k_hw_set_power_cal_table(struct ath_hal *ah,
+ struct ar5416_eeprom *pEepData,
+ struct ath9k_channel *chan,
+ int16_t *pTxPowerIndexOffset)
+{
+ struct cal_data_per_freq *pRawDataset;
+ u8 *pCalBChans = NULL;
+ u16 pdGainOverlap_t2;
+ static u8 pdadcValues[AR5416_NUM_PDADC_VALUES];
+ u16 gainBoundaries[AR5416_PD_GAINS_IN_MASK];
+ u16 numPiers, i, j;
+ int16_t tMinCalPower;
+ u16 numXpdGain, xpdMask;
+ u16 xpdGainValues[AR5416_NUM_PD_GAINS] = { 0, 0, 0, 0 };
+ u32 reg32, regOffset, regChainOffset;
+ int16_t modalIdx;
+ struct ath_hal_5416 *ahp = AH5416(ah);
+
+ modalIdx = IS_CHAN_2GHZ(chan) ? 1 : 0;
+ xpdMask = pEepData->modalHeader[modalIdx].xpdGain;
+
+ if ((pEepData->baseEepHeader.
+ version & AR5416_EEP_VER_MINOR_MASK) >=
+ AR5416_EEP_MINOR_VER_2) {
+ pdGainOverlap_t2 =
+ pEepData->modalHeader[modalIdx].pdGainOverlap;
+ } else {
+ pdGainOverlap_t2 =
+ (u16) (MS
+ (REG_READ(ah, AR_PHY_TPCRG5),
+ AR_PHY_TPCRG5_PD_GAIN_OVERLAP));
+ }
+
+ if (IS_CHAN_2GHZ(chan)) {
+ pCalBChans = pEepData->calFreqPier2G;
+ numPiers = AR5416_NUM_2G_CAL_PIERS;
+ } else {
+ pCalBChans = pEepData->calFreqPier5G;
+ numPiers = AR5416_NUM_5G_CAL_PIERS;
+ }
+
+ numXpdGain = 0;
+
+ for (i = 1; i <= AR5416_PD_GAINS_IN_MASK; i++) {
+ if ((xpdMask >> (AR5416_PD_GAINS_IN_MASK - i)) & 1) {
+ if (numXpdGain >= AR5416_NUM_PD_GAINS)
+ break;
+ xpdGainValues[numXpdGain] =
+ (u16) (AR5416_PD_GAINS_IN_MASK - i);
+ numXpdGain++;
+ }
+ }
+
+ REG_RMW_FIELD(ah, AR_PHY_TPCRG1, AR_PHY_TPCRG1_NUM_PD_GAIN,
+ (numXpdGain - 1) & 0x3);
+ REG_RMW_FIELD(ah, AR_PHY_TPCRG1, AR_PHY_TPCRG1_PD_GAIN_1,
+ xpdGainValues[0]);
+ REG_RMW_FIELD(ah, AR_PHY_TPCRG1, AR_PHY_TPCRG1_PD_GAIN_2,
+ xpdGainValues[1]);
+ REG_RMW_FIELD(ah, AR_PHY_TPCRG1, AR_PHY_TPCRG1_PD_GAIN_3,
+ xpdGainValues[2]);
+
+ for (i = 0; i < AR5416_MAX_CHAINS; i++) {
+ if (AR_SREV_5416_V20_OR_LATER(ah) &&
+ (ahp->ah_rxchainmask == 5 || ahp->ah_txchainmask == 5)
+ && (i != 0)) {
+ regChainOffset = (i == 1) ? 0x2000 : 0x1000;
+ } else
+ regChainOffset = i * 0x1000;
+ if (pEepData->baseEepHeader.txMask & (1 << i)) {
+ if (IS_CHAN_2GHZ(chan))
+ pRawDataset = pEepData->calPierData2G[i];
+ else
+ pRawDataset = pEepData->calPierData5G[i];
+
+ ath9k_hw_get_gain_boundaries_pdadcs(ah, chan,
+ pRawDataset,
+ pCalBChans,
+ numPiers,
+ pdGainOverlap_t2,
+ &tMinCalPower,
+ gainBoundaries,
+ pdadcValues,
+ numXpdGain);
+
+ if ((i == 0) || AR_SREV_5416_V20_OR_LATER(ah)) {
+
+ REG_WRITE(ah,
+ AR_PHY_TPCRG5 + regChainOffset,
+ SM(pdGainOverlap_t2,
+ AR_PHY_TPCRG5_PD_GAIN_OVERLAP)
+ | SM(gainBoundaries[0],
+ AR_PHY_TPCRG5_PD_GAIN_BOUNDARY_1)
+ | SM(gainBoundaries[1],
+ AR_PHY_TPCRG5_PD_GAIN_BOUNDARY_2)
+ | SM(gainBoundaries[2],
+ AR_PHY_TPCRG5_PD_GAIN_BOUNDARY_3)
+ | SM(gainBoundaries[3],
+ AR_PHY_TPCRG5_PD_GAIN_BOUNDARY_4));
+ }
+
+ regOffset =
+ AR_PHY_BASE + (672 << 2) + regChainOffset;
+ for (j = 0; j < 32; j++) {
+ reg32 =
+ ((pdadcValues[4 * j + 0] & 0xFF) << 0)
+ | ((pdadcValues[4 * j + 1] & 0xFF) <<
+ 8) | ((pdadcValues[4 * j + 2] &
+ 0xFF) << 16) |
+ ((pdadcValues[4 * j + 3] & 0xFF) <<
+ 24);
+ REG_WRITE(ah, regOffset, reg32);
+
+ DPRINTF(ah->ah_sc, ATH_DBG_PHY_IO,
+ "PDADC (%d,%4x): %4.4x %8.8x\n",
+ i, regChainOffset, regOffset,
+ reg32);
+ DPRINTF(ah->ah_sc, ATH_DBG_PHY_IO,
+ "PDADC: Chain %d | PDADC %3d Value %3d | "
+ "PDADC %3d Value %3d | PDADC %3d Value %3d | "
+ "PDADC %3d Value %3d |\n",
+ i, 4 * j, pdadcValues[4 * j],
+ 4 * j + 1, pdadcValues[4 * j + 1],
+ 4 * j + 2, pdadcValues[4 * j + 2],
+ 4 * j + 3,
+ pdadcValues[4 * j + 3]);
+
+ regOffset += 4;
+ }
+ }
+ }
+ *pTxPowerIndexOffset = 0;
+
+ return true;
+}
+
+void ath9k_hw_configpcipowersave(struct ath_hal *ah, int restore)
+{
+ struct ath_hal_5416 *ahp = AH5416(ah);
+ u8 i;
+
+ if (ah->ah_isPciExpress != true)
+ return;
+
+ if (ah->ah_config.pcie_powersave_enable == 2)
+ return;
+
+ if (restore)
+ return;
+
+ if (AR_SREV_9280_20_OR_LATER(ah)) {
+ for (i = 0; i < ahp->ah_iniPcieSerdes.ia_rows; i++) {
+ REG_WRITE(ah, INI_RA(&ahp->ah_iniPcieSerdes, i, 0),
+ INI_RA(&ahp->ah_iniPcieSerdes, i, 1));
+ }
+ udelay(1000);
+ } else if (AR_SREV_9280(ah)
+ && (ah->ah_macRev == AR_SREV_REVISION_9280_10)) {
+ REG_WRITE(ah, AR_PCIE_SERDES, 0x9248fd00);
+ REG_WRITE(ah, AR_PCIE_SERDES, 0x24924924);
+
+ REG_WRITE(ah, AR_PCIE_SERDES, 0xa8000019);
+ REG_WRITE(ah, AR_PCIE_SERDES, 0x13160820);
+ REG_WRITE(ah, AR_PCIE_SERDES, 0xe5980560);
+
+ if (ah->ah_config.pcie_clock_req)
+ REG_WRITE(ah, AR_PCIE_SERDES, 0x401deffc);
+ else
+ REG_WRITE(ah, AR_PCIE_SERDES, 0x401deffd);
+
+ REG_WRITE(ah, AR_PCIE_SERDES, 0x1aaabe40);
+ REG_WRITE(ah, AR_PCIE_SERDES, 0xbe105554);
+ REG_WRITE(ah, AR_PCIE_SERDES, 0x00043007);
+
+ REG_WRITE(ah, AR_PCIE_SERDES2, 0x00000000);
+
+ udelay(1000);
+ } else {
+ REG_WRITE(ah, AR_PCIE_SERDES, 0x9248fc00);
+ REG_WRITE(ah, AR_PCIE_SERDES, 0x24924924);
+ REG_WRITE(ah, AR_PCIE_SERDES, 0x28000039);
+ REG_WRITE(ah, AR_PCIE_SERDES, 0x53160824);
+ REG_WRITE(ah, AR_PCIE_SERDES, 0xe5980579);
+ REG_WRITE(ah, AR_PCIE_SERDES, 0x001defff);
+ REG_WRITE(ah, AR_PCIE_SERDES, 0x1aaabe40);
+ REG_WRITE(ah, AR_PCIE_SERDES, 0xbe105554);
+ REG_WRITE(ah, AR_PCIE_SERDES, 0x000e3007);
+ REG_WRITE(ah, AR_PCIE_SERDES2, 0x00000000);
+ }
+
+ REG_SET_BIT(ah, AR_PCIE_PM_CTRL, AR_PCIE_PM_CTRL_ENA);
+
+ if (ah->ah_config.pcie_waen) {
+ REG_WRITE(ah, AR_WA, ah->ah_config.pcie_waen);
+ } else {
+ if (AR_SREV_9280(ah))
+ REG_WRITE(ah, AR_WA, 0x0040073f);
+ else
+ REG_WRITE(ah, AR_WA, 0x0000073f);
+ }
+}
+
+static inline void
+ath9k_hw_get_legacy_target_powers(struct ath_hal *ah,
+ struct ath9k_channel *chan,
+ struct cal_target_power_leg *powInfo,
+ u16 numChannels,
+ struct cal_target_power_leg *pNewPower,
+ u16 numRates,
+ bool isExtTarget)
+{
+ u16 clo, chi;
+ int i;
+ int matchIndex = -1, lowIndex = -1;
+ u16 freq;
+ struct chan_centers centers;
+
+ ath9k_hw_get_channel_centers(ah, chan, &centers);
+ freq = (isExtTarget) ? centers.ext_center : centers.ctl_center;
+
+ if (freq <= ath9k_hw_fbin2freq(powInfo[0].bChannel,
+ IS_CHAN_2GHZ(chan))) {
+ matchIndex = 0;
+ } else {
+ for (i = 0; (i < numChannels)
+ && (powInfo[i].bChannel != AR5416_BCHAN_UNUSED); i++) {
+ if (freq ==
+ ath9k_hw_fbin2freq(powInfo[i].bChannel,
+ IS_CHAN_2GHZ(chan))) {
+ matchIndex = i;
+ break;
+ } else if ((freq <
+ ath9k_hw_fbin2freq(powInfo[i].bChannel,
+ IS_CHAN_2GHZ(chan)))
+ && (freq >
+ ath9k_hw_fbin2freq(powInfo[i - 1].
+ bChannel,
+ IS_CHAN_2GHZ
+ (chan)))) {
+ lowIndex = i - 1;
+ break;
+ }
+ }
+ if ((matchIndex == -1) && (lowIndex == -1))
+ matchIndex = i - 1;
+ }
+
+ if (matchIndex != -1) {
+ *pNewPower = powInfo[matchIndex];
+ } else {
+ clo = ath9k_hw_fbin2freq(powInfo[lowIndex].bChannel,
+ IS_CHAN_2GHZ(chan));
+ chi = ath9k_hw_fbin2freq(powInfo[lowIndex + 1].bChannel,
+ IS_CHAN_2GHZ(chan));
+
+ for (i = 0; i < numRates; i++) {
+ pNewPower->tPow2x[i] =
+ (u8) ath9k_hw_interpolate(freq, clo, chi,
+ powInfo
+ [lowIndex].
+ tPow2x[i],
+ powInfo
+ [lowIndex +
+ 1].tPow2x[i]);
+ }
+ }
+}
+
+static inline void
+ath9k_hw_get_target_powers(struct ath_hal *ah,
+ struct ath9k_channel *chan,
+ struct cal_target_power_ht *powInfo,
+ u16 numChannels,
+ struct cal_target_power_ht *pNewPower,
+ u16 numRates,
+ bool isHt40Target)
+{
+ u16 clo, chi;
+ int i;
+ int matchIndex = -1, lowIndex = -1;
+ u16 freq;
+ struct chan_centers centers;
+
+ ath9k_hw_get_channel_centers(ah, chan, &centers);
+ freq = isHt40Target ? centers.synth_center : centers.ctl_center;
+
+ if (freq <=
+ ath9k_hw_fbin2freq(powInfo[0].bChannel, IS_CHAN_2GHZ(chan))) {
+ matchIndex = 0;
+ } else {
+ for (i = 0; (i < numChannels)
+ && (powInfo[i].bChannel != AR5416_BCHAN_UNUSED); i++) {
+ if (freq ==
+ ath9k_hw_fbin2freq(powInfo[i].bChannel,
+ IS_CHAN_2GHZ(chan))) {
+ matchIndex = i;
+ break;
+ } else
+ if ((freq <
+ ath9k_hw_fbin2freq(powInfo[i].bChannel,
+ IS_CHAN_2GHZ(chan)))
+ && (freq >
+ ath9k_hw_fbin2freq(powInfo[i - 1].
+ bChannel,
+ IS_CHAN_2GHZ
+ (chan)))) {
+ lowIndex = i - 1;
+ break;
+ }
+ }
+ if ((matchIndex == -1) && (lowIndex == -1))
+ matchIndex = i - 1;
+ }
+
+ if (matchIndex != -1) {
+ *pNewPower = powInfo[matchIndex];
+ } else {
+ clo = ath9k_hw_fbin2freq(powInfo[lowIndex].bChannel,
+ IS_CHAN_2GHZ(chan));
+ chi = ath9k_hw_fbin2freq(powInfo[lowIndex + 1].bChannel,
+ IS_CHAN_2GHZ(chan));
+
+ for (i = 0; i < numRates; i++) {
+ pNewPower->tPow2x[i] =
+ (u8) ath9k_hw_interpolate(freq, clo, chi,
+ powInfo
+ [lowIndex].
+ tPow2x[i],
+ powInfo
+ [lowIndex +
+ 1].tPow2x[i]);
+ }
+ }
+}
+
+static inline u16
+ath9k_hw_get_max_edge_power(u16 freq,
+ struct cal_ctl_edges *pRdEdgesPower,
+ bool is2GHz)
+{
+ u16 twiceMaxEdgePower = AR5416_MAX_RATE_POWER;
+ int i;
+
+ for (i = 0; (i < AR5416_NUM_BAND_EDGES)
+ && (pRdEdgesPower[i].bChannel != AR5416_BCHAN_UNUSED); i++) {
+ if (freq == ath9k_hw_fbin2freq(pRdEdgesPower[i].bChannel,
+ is2GHz)) {
+ twiceMaxEdgePower = pRdEdgesPower[i].tPower;
+ break;
+ } else if ((i > 0)
+ && (freq <
+ ath9k_hw_fbin2freq(pRdEdgesPower[i].
+ bChannel, is2GHz))) {
+ if (ath9k_hw_fbin2freq
+ (pRdEdgesPower[i - 1].bChannel, is2GHz) < freq
+ && pRdEdgesPower[i - 1].flag) {
+ twiceMaxEdgePower =
+ pRdEdgesPower[i - 1].tPower;
+ }
+ break;
+ }
+ }
+ return twiceMaxEdgePower;
+}
+
+static inline bool
+ath9k_hw_set_power_per_rate_table(struct ath_hal *ah,
+ struct ar5416_eeprom *pEepData,
+ struct ath9k_channel *chan,
+ int16_t *ratesArray,
+ u16 cfgCtl,
+ u8 AntennaReduction,
+ u8 twiceMaxRegulatoryPower,
+ u8 powerLimit)
+{
+ u8 twiceMaxEdgePower = AR5416_MAX_RATE_POWER;
+ static const u16 tpScaleReductionTable[5] =
+ { 0, 3, 6, 9, AR5416_MAX_RATE_POWER };
+
+ int i;
+ int8_t twiceLargestAntenna;
+ struct cal_ctl_data *rep;
+ struct cal_target_power_leg targetPowerOfdm, targetPowerCck = {
+ 0, { 0, 0, 0, 0}
+ };
+ struct cal_target_power_leg targetPowerOfdmExt = {
+ 0, { 0, 0, 0, 0} }, targetPowerCckExt = {
+ 0, { 0, 0, 0, 0 }
+ };
+ struct cal_target_power_ht targetPowerHt20, targetPowerHt40 = {
+ 0, {0, 0, 0, 0}
+ };
+ u8 scaledPower = 0, minCtlPower, maxRegAllowedPower;
+ u16 ctlModesFor11a[] =
+ { CTL_11A, CTL_5GHT20, CTL_11A_EXT, CTL_5GHT40 };
+ u16 ctlModesFor11g[] =
+ { CTL_11B, CTL_11G, CTL_2GHT20, CTL_11B_EXT, CTL_11G_EXT,
+ CTL_2GHT40
+ };
+ u16 numCtlModes, *pCtlMode, ctlMode, freq;
+ struct chan_centers centers;
+ int tx_chainmask;
+ u8 twiceMinEdgePower;
+ struct ath_hal_5416 *ahp = AH5416(ah);
+
+ tx_chainmask = ahp->ah_txchainmask;
+
+ ath9k_hw_get_channel_centers(ah, chan, &centers);
+
+ twiceLargestAntenna = max(
+ pEepData->modalHeader
+ [IS_CHAN_2GHZ(chan)].antennaGainCh[0],
+ pEepData->modalHeader
+ [IS_CHAN_2GHZ(chan)].antennaGainCh[1]);
+
+ twiceLargestAntenna = max((u8) twiceLargestAntenna,
+ pEepData->modalHeader
+ [IS_CHAN_2GHZ(chan)].antennaGainCh[2]);
+
+ twiceLargestAntenna =
+ (int8_t) min(AntennaReduction - twiceLargestAntenna, 0);
+
+ maxRegAllowedPower = twiceMaxRegulatoryPower + twiceLargestAntenna;
+
+ if (ah->ah_tpScale != ATH9K_TP_SCALE_MAX) {
+ maxRegAllowedPower -=
+ (tpScaleReductionTable[(ah->ah_tpScale)] * 2);
+ }
+
+ scaledPower = min(powerLimit, maxRegAllowedPower);
+
+ switch (ar5416_get_ntxchains(tx_chainmask)) {
+ case 1:
+ break;
+ case 2:
+ scaledPower -=
+ pEepData->modalHeader[IS_CHAN_2GHZ(chan)].
+ pwrDecreaseFor2Chain;
+ break;
+ case 3:
+ scaledPower -=
+ pEepData->modalHeader[IS_CHAN_2GHZ(chan)].
+ pwrDecreaseFor3Chain;
+ break;
+ }
+
+ scaledPower = max(0, (int32_t) scaledPower);
+
+ if (IS_CHAN_2GHZ(chan)) {
+ numCtlModes =
+ ARRAY_SIZE(ctlModesFor11g) -
+ SUB_NUM_CTL_MODES_AT_2G_40;
+ pCtlMode = ctlModesFor11g;
+
+ ath9k_hw_get_legacy_target_powers(ah, chan,
+ pEepData->
+ calTargetPowerCck,
+ AR5416_NUM_2G_CCK_TARGET_POWERS,
+ &targetPowerCck, 4,
+ false);
+ ath9k_hw_get_legacy_target_powers(ah, chan,
+ pEepData->
+ calTargetPower2G,
+ AR5416_NUM_2G_20_TARGET_POWERS,
+ &targetPowerOfdm, 4,
+ false);
+ ath9k_hw_get_target_powers(ah, chan,
+ pEepData->calTargetPower2GHT20,
+ AR5416_NUM_2G_20_TARGET_POWERS,
+ &targetPowerHt20, 8, false);
+
+ if (IS_CHAN_HT40(chan)) {
+ numCtlModes = ARRAY_SIZE(ctlModesFor11g);
+ ath9k_hw_get_target_powers(ah, chan,
+ pEepData->
+ calTargetPower2GHT40,
+ AR5416_NUM_2G_40_TARGET_POWERS,
+ &targetPowerHt40, 8,
+ true);
+ ath9k_hw_get_legacy_target_powers(ah, chan,
+ pEepData->
+ calTargetPowerCck,
+ AR5416_NUM_2G_CCK_TARGET_POWERS,
+ &targetPowerCckExt,
+ 4, true);
+ ath9k_hw_get_legacy_target_powers(ah, chan,
+ pEepData->
+ calTargetPower2G,
+ AR5416_NUM_2G_20_TARGET_POWERS,
+ &targetPowerOfdmExt,
+ 4, true);
+ }
+ } else {
+
+ numCtlModes =
+ ARRAY_SIZE(ctlModesFor11a) -
+ SUB_NUM_CTL_MODES_AT_5G_40;
+ pCtlMode = ctlModesFor11a;
+
+ ath9k_hw_get_legacy_target_powers(ah, chan,
+ pEepData->
+ calTargetPower5G,
+ AR5416_NUM_5G_20_TARGET_POWERS,
+ &targetPowerOfdm, 4,
+ false);
+ ath9k_hw_get_target_powers(ah, chan,
+ pEepData->calTargetPower5GHT20,
+ AR5416_NUM_5G_20_TARGET_POWERS,
+ &targetPowerHt20, 8, false);
+
+ if (IS_CHAN_HT40(chan)) {
+ numCtlModes = ARRAY_SIZE(ctlModesFor11a);
+ ath9k_hw_get_target_powers(ah, chan,
+ pEepData->
+ calTargetPower5GHT40,
+ AR5416_NUM_5G_40_TARGET_POWERS,
+ &targetPowerHt40, 8,
+ true);
+ ath9k_hw_get_legacy_target_powers(ah, chan,
+ pEepData->
+ calTargetPower5G,
+ AR5416_NUM_5G_20_TARGET_POWERS,
+ &targetPowerOfdmExt,
+ 4, true);
+ }
+ }
+
+ for (ctlMode = 0; ctlMode < numCtlModes; ctlMode++) {
+ bool isHt40CtlMode =
+ (pCtlMode[ctlMode] == CTL_5GHT40)
+ || (pCtlMode[ctlMode] == CTL_2GHT40);
+ if (isHt40CtlMode)
+ freq = centers.synth_center;
+ else if (pCtlMode[ctlMode] & EXT_ADDITIVE)
+ freq = centers.ext_center;
+ else
+ freq = centers.ctl_center;
+
+ if (ar5416_get_eep_ver(ahp) == 14
+ && ar5416_get_eep_rev(ahp) <= 2)
+ twiceMaxEdgePower = AR5416_MAX_RATE_POWER;
+
+ DPRINTF(ah->ah_sc, ATH_DBG_POWER_MGMT,
+ "LOOP-Mode ctlMode %d < %d, isHt40CtlMode %d, "
+ "EXT_ADDITIVE %d\n",
+ ctlMode, numCtlModes, isHt40CtlMode,
+ (pCtlMode[ctlMode] & EXT_ADDITIVE));
+
+ for (i = 0; (i < AR5416_NUM_CTLS) && pEepData->ctlIndex[i];
+ i++) {
+ DPRINTF(ah->ah_sc, ATH_DBG_POWER_MGMT,
+ " LOOP-Ctlidx %d: cfgCtl 0x%2.2x "
+ "pCtlMode 0x%2.2x ctlIndex 0x%2.2x "
+ "chan %d\n",
+ i, cfgCtl, pCtlMode[ctlMode],
+ pEepData->ctlIndex[i], chan->channel);
+
+ if ((((cfgCtl & ~CTL_MODE_M) |
+ (pCtlMode[ctlMode] & CTL_MODE_M)) ==
+ pEepData->ctlIndex[i])
+ ||
+ (((cfgCtl & ~CTL_MODE_M) |
+ (pCtlMode[ctlMode] & CTL_MODE_M)) ==
+ ((pEepData->
+ ctlIndex[i] & CTL_MODE_M) | SD_NO_CTL))) {
+ rep = &(pEepData->ctlData[i]);
+
+ twiceMinEdgePower =
+ ath9k_hw_get_max_edge_power(freq,
+ rep->
+ ctlEdges
+ [ar5416_get_ntxchains
+ (tx_chainmask)
+ - 1],
+ IS_CHAN_2GHZ
+ (chan));
+
+ DPRINTF(ah->ah_sc, ATH_DBG_POWER_MGMT,
+ " MATCH-EE_IDX %d: ch %d is2 %d "
+ "2xMinEdge %d chainmask %d chains %d\n",
+ i, freq, IS_CHAN_2GHZ(chan),
+ twiceMinEdgePower, tx_chainmask,
+ ar5416_get_ntxchains
+ (tx_chainmask));
+ if ((cfgCtl & ~CTL_MODE_M) == SD_NO_CTL) {
+ twiceMaxEdgePower =
+ min(twiceMaxEdgePower,
+ twiceMinEdgePower);
+ } else {
+ twiceMaxEdgePower =
+ twiceMinEdgePower;
+ break;
+ }
+ }
+ }
+
+ minCtlPower = min(twiceMaxEdgePower, scaledPower);
+
+ DPRINTF(ah->ah_sc, ATH_DBG_POWER_MGMT,
+ " SEL-Min ctlMode %d pCtlMode %d "
+ "2xMaxEdge %d sP %d minCtlPwr %d\n",
+ ctlMode, pCtlMode[ctlMode], twiceMaxEdgePower,
+ scaledPower, minCtlPower);
+
+ switch (pCtlMode[ctlMode]) {
+ case CTL_11B:
+ for (i = 0; i < ARRAY_SIZE(targetPowerCck.tPow2x);
+ i++) {
+ targetPowerCck.tPow2x[i] =
+ min(targetPowerCck.tPow2x[i],
+ minCtlPower);
+ }
+ break;
+ case CTL_11A:
+ case CTL_11G:
+ for (i = 0; i < ARRAY_SIZE(targetPowerOfdm.tPow2x);
+ i++) {
+ targetPowerOfdm.tPow2x[i] =
+ min(targetPowerOfdm.tPow2x[i],
+ minCtlPower);
+ }
+ break;
+ case CTL_5GHT20:
+ case CTL_2GHT20:
+ for (i = 0; i < ARRAY_SIZE(targetPowerHt20.tPow2x);
+ i++) {
+ targetPowerHt20.tPow2x[i] =
+ min(targetPowerHt20.tPow2x[i],
+ minCtlPower);
+ }
+ break;
+ case CTL_11B_EXT:
+ targetPowerCckExt.tPow2x[0] =
+ min(targetPowerCckExt.tPow2x[0], minCtlPower);
+ break;
+ case CTL_11A_EXT:
+ case CTL_11G_EXT:
+ targetPowerOfdmExt.tPow2x[0] =
+ min(targetPowerOfdmExt.tPow2x[0], minCtlPower);
+ break;
+ case CTL_5GHT40:
+ case CTL_2GHT40:
+ for (i = 0; i < ARRAY_SIZE(targetPowerHt40.tPow2x);
+ i++) {
+ targetPowerHt40.tPow2x[i] =
+ min(targetPowerHt40.tPow2x[i],
+ minCtlPower);
+ }
+ break;
+ default:
+ break;
+ }
+ }
+
+ ratesArray[rate6mb] = ratesArray[rate9mb] = ratesArray[rate12mb] =
+ ratesArray[rate18mb] = ratesArray[rate24mb] =
+ targetPowerOfdm.tPow2x[0];
+ ratesArray[rate36mb] = targetPowerOfdm.tPow2x[1];
+ ratesArray[rate48mb] = targetPowerOfdm.tPow2x[2];
+ ratesArray[rate54mb] = targetPowerOfdm.tPow2x[3];
+ ratesArray[rateXr] = targetPowerOfdm.tPow2x[0];
+
+ for (i = 0; i < ARRAY_SIZE(targetPowerHt20.tPow2x); i++)
+ ratesArray[rateHt20_0 + i] = targetPowerHt20.tPow2x[i];
+
+ if (IS_CHAN_2GHZ(chan)) {
+ ratesArray[rate1l] = targetPowerCck.tPow2x[0];
+ ratesArray[rate2s] = ratesArray[rate2l] =
+ targetPowerCck.tPow2x[1];
+ ratesArray[rate5_5s] = ratesArray[rate5_5l] =
+ targetPowerCck.tPow2x[2];
+ ;
+ ratesArray[rate11s] = ratesArray[rate11l] =
+ targetPowerCck.tPow2x[3];
+ ;
+ }
+ if (IS_CHAN_HT40(chan)) {
+ for (i = 0; i < ARRAY_SIZE(targetPowerHt40.tPow2x); i++) {
+ ratesArray[rateHt40_0 + i] =
+ targetPowerHt40.tPow2x[i];
+ }
+ ratesArray[rateDupOfdm] = targetPowerHt40.tPow2x[0];
+ ratesArray[rateDupCck] = targetPowerHt40.tPow2x[0];
+ ratesArray[rateExtOfdm] = targetPowerOfdmExt.tPow2x[0];
+ if (IS_CHAN_2GHZ(chan)) {
+ ratesArray[rateExtCck] =
+ targetPowerCckExt.tPow2x[0];
+ }
+ }
+ return true;
+}
+
+static int
+ath9k_hw_set_txpower(struct ath_hal *ah,
+ struct ar5416_eeprom *pEepData,
+ struct ath9k_channel *chan,
+ u16 cfgCtl,
+ u8 twiceAntennaReduction,
+ u8 twiceMaxRegulatoryPower,
+ u8 powerLimit)
+{
+ struct modal_eep_header *pModal =
+ &(pEepData->modalHeader[IS_CHAN_2GHZ(chan)]);
+ int16_t ratesArray[Ar5416RateSize];
+ int16_t txPowerIndexOffset = 0;
+ u8 ht40PowerIncForPdadc = 2;
+ int i;
+
+ memset(ratesArray, 0, sizeof(ratesArray));
+
+ if ((pEepData->baseEepHeader.
+ version & AR5416_EEP_VER_MINOR_MASK) >=
+ AR5416_EEP_MINOR_VER_2) {
+ ht40PowerIncForPdadc = pModal->ht40PowerIncForPdadc;
+ }
+
+ if (!ath9k_hw_set_power_per_rate_table(ah, pEepData, chan,
+ &ratesArray[0], cfgCtl,
+ twiceAntennaReduction,
+ twiceMaxRegulatoryPower,
+ powerLimit)) {
+ DPRINTF(ah->ah_sc, ATH_DBG_EEPROM,
+ "ath9k_hw_set_txpower: unable to set "
+ "tx power per rate table\n");
+ return -EIO;
+ }
+
+ if (!ath9k_hw_set_power_cal_table
+ (ah, pEepData, chan, &txPowerIndexOffset)) {
+ DPRINTF(ah->ah_sc, ATH_DBG_EEPROM,
+ "ath9k_hw_set_txpower: unable to set power table\n");
+ return -EIO;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(ratesArray); i++) {
+ ratesArray[i] =
+ (int16_t) (txPowerIndexOffset + ratesArray[i]);
+ if (ratesArray[i] > AR5416_MAX_RATE_POWER)
+ ratesArray[i] = AR5416_MAX_RATE_POWER;
+ }
+
+ if (AR_SREV_9280_10_OR_LATER(ah)) {
+ for (i = 0; i < Ar5416RateSize; i++)
+ ratesArray[i] -= AR5416_PWR_TABLE_OFFSET * 2;
+ }
+
+ REG_WRITE(ah, AR_PHY_POWER_TX_RATE1,
+ ATH9K_POW_SM(ratesArray[rate18mb], 24)
+ | ATH9K_POW_SM(ratesArray[rate12mb], 16)
+ | ATH9K_POW_SM(ratesArray[rate9mb], 8)
+ | ATH9K_POW_SM(ratesArray[rate6mb], 0)
+ );
+ REG_WRITE(ah, AR_PHY_POWER_TX_RATE2,
+ ATH9K_POW_SM(ratesArray[rate54mb], 24)
+ | ATH9K_POW_SM(ratesArray[rate48mb], 16)
+ | ATH9K_POW_SM(ratesArray[rate36mb], 8)
+ | ATH9K_POW_SM(ratesArray[rate24mb], 0)
+ );
+
+ if (IS_CHAN_2GHZ(chan)) {
+ REG_WRITE(ah, AR_PHY_POWER_TX_RATE3,
+ ATH9K_POW_SM(ratesArray[rate2s], 24)
+ | ATH9K_POW_SM(ratesArray[rate2l], 16)
+ | ATH9K_POW_SM(ratesArray[rateXr], 8)
+ | ATH9K_POW_SM(ratesArray[rate1l], 0)
+ );
+ REG_WRITE(ah, AR_PHY_POWER_TX_RATE4,
+ ATH9K_POW_SM(ratesArray[rate11s], 24)
+ | ATH9K_POW_SM(ratesArray[rate11l], 16)
+ | ATH9K_POW_SM(ratesArray[rate5_5s], 8)
+ | ATH9K_POW_SM(ratesArray[rate5_5l], 0)
+ );
+ }
+
+ REG_WRITE(ah, AR_PHY_POWER_TX_RATE5,
+ ATH9K_POW_SM(ratesArray[rateHt20_3], 24)
+ | ATH9K_POW_SM(ratesArray[rateHt20_2], 16)
+ | ATH9K_POW_SM(ratesArray[rateHt20_1], 8)
+ | ATH9K_POW_SM(ratesArray[rateHt20_0], 0)
+ );
+ REG_WRITE(ah, AR_PHY_POWER_TX_RATE6,
+ ATH9K_POW_SM(ratesArray[rateHt20_7], 24)
+ | ATH9K_POW_SM(ratesArray[rateHt20_6], 16)
+ | ATH9K_POW_SM(ratesArray[rateHt20_5], 8)
+ | ATH9K_POW_SM(ratesArray[rateHt20_4], 0)
+ );
+
+ if (IS_CHAN_HT40(chan)) {
+ REG_WRITE(ah, AR_PHY_POWER_TX_RATE7,
+ ATH9K_POW_SM(ratesArray[rateHt40_3] +
+ ht40PowerIncForPdadc, 24)
+ | ATH9K_POW_SM(ratesArray[rateHt40_2] +
+ ht40PowerIncForPdadc, 16)
+ | ATH9K_POW_SM(ratesArray[rateHt40_1] +
+ ht40PowerIncForPdadc, 8)
+ | ATH9K_POW_SM(ratesArray[rateHt40_0] +
+ ht40PowerIncForPdadc, 0)
+ );
+ REG_WRITE(ah, AR_PHY_POWER_TX_RATE8,
+ ATH9K_POW_SM(ratesArray[rateHt40_7] +
+ ht40PowerIncForPdadc, 24)
+ | ATH9K_POW_SM(ratesArray[rateHt40_6] +
+ ht40PowerIncForPdadc, 16)
+ | ATH9K_POW_SM(ratesArray[rateHt40_5] +
+ ht40PowerIncForPdadc, 8)
+ | ATH9K_POW_SM(ratesArray[rateHt40_4] +
+ ht40PowerIncForPdadc, 0)
+ );
+
+ REG_WRITE(ah, AR_PHY_POWER_TX_RATE9,
+ ATH9K_POW_SM(ratesArray[rateExtOfdm], 24)
+ | ATH9K_POW_SM(ratesArray[rateExtCck], 16)
+ | ATH9K_POW_SM(ratesArray[rateDupOfdm], 8)
+ | ATH9K_POW_SM(ratesArray[rateDupCck], 0)
+ );
+ }
+
+ REG_WRITE(ah, AR_PHY_POWER_TX_SUB,
+ ATH9K_POW_SM(pModal->pwrDecreaseFor3Chain, 6)
+ | ATH9K_POW_SM(pModal->pwrDecreaseFor2Chain, 0)
+ );
+
+ i = rate6mb;
+ if (IS_CHAN_HT40(chan))
+ i = rateHt40_0;
+ else if (IS_CHAN_HT20(chan))
+ i = rateHt20_0;
+
+ if (AR_SREV_9280_10_OR_LATER(ah))
+ ah->ah_maxPowerLevel =
+ ratesArray[i] + AR5416_PWR_TABLE_OFFSET * 2;
+ else
+ ah->ah_maxPowerLevel = ratesArray[i];
+
+ return 0;
+}
+
+static inline void ath9k_hw_get_delta_slope_vals(struct ath_hal *ah,
+ u32 coef_scaled,
+ u32 *coef_mantissa,
+ u32 *coef_exponent)
+{
+ u32 coef_exp, coef_man;
+
+ for (coef_exp = 31; coef_exp > 0; coef_exp--)
+ if ((coef_scaled >> coef_exp) & 0x1)
+ break;
+
+ coef_exp = 14 - (coef_exp - COEF_SCALE_S);
+
+ coef_man = coef_scaled + (1 << (COEF_SCALE_S - coef_exp - 1));
+
+ *coef_mantissa = coef_man >> (COEF_SCALE_S - coef_exp);
+ *coef_exponent = coef_exp - 16;
+}
+
+static void
+ath9k_hw_set_delta_slope(struct ath_hal *ah,
+ struct ath9k_channel *chan)
+{
+ u32 coef_scaled, ds_coef_exp, ds_coef_man;
+ u32 clockMhzScaled = 0x64000000;
+ struct chan_centers centers;
+
+ if (IS_CHAN_HALF_RATE(chan))
+ clockMhzScaled = clockMhzScaled >> 1;
+ else if (IS_CHAN_QUARTER_RATE(chan))
+ clockMhzScaled = clockMhzScaled >> 2;
+
+ ath9k_hw_get_channel_centers(ah, chan, &centers);
+ coef_scaled = clockMhzScaled / centers.synth_center;
+
+ ath9k_hw_get_delta_slope_vals(ah, coef_scaled, &ds_coef_man,
+ &ds_coef_exp);
+
+ REG_RMW_FIELD(ah, AR_PHY_TIMING3,
+ AR_PHY_TIMING3_DSC_MAN, ds_coef_man);
+ REG_RMW_FIELD(ah, AR_PHY_TIMING3,
+ AR_PHY_TIMING3_DSC_EXP, ds_coef_exp);
+
+ coef_scaled = (9 * coef_scaled) / 10;
+
+ ath9k_hw_get_delta_slope_vals(ah, coef_scaled, &ds_coef_man,
+ &ds_coef_exp);
+
+ REG_RMW_FIELD(ah, AR_PHY_HALFGI,
+ AR_PHY_HALFGI_DSC_MAN, ds_coef_man);
+ REG_RMW_FIELD(ah, AR_PHY_HALFGI,
+ AR_PHY_HALFGI_DSC_EXP, ds_coef_exp);
+}
+
+static void ath9k_hw_9280_spur_mitigate(struct ath_hal *ah,
+ struct ath9k_channel *chan)
+{
+ int bb_spur = AR_NO_SPUR;
+ int freq;
+ int bin, cur_bin;
+ int bb_spur_off, spur_subchannel_sd;
+ int spur_freq_sd;
+ int spur_delta_phase;
+ int denominator;
+ int upper, lower, cur_vit_mask;
+ int tmp, newVal;
+ int i;
+ int pilot_mask_reg[4] = { AR_PHY_TIMING7, AR_PHY_TIMING8,
+ AR_PHY_PILOT_MASK_01_30, AR_PHY_PILOT_MASK_31_60
+ };
+ int chan_mask_reg[4] = { AR_PHY_TIMING9, AR_PHY_TIMING10,
+ AR_PHY_CHANNEL_MASK_01_30, AR_PHY_CHANNEL_MASK_31_60
+ };
+ int inc[4] = { 0, 100, 0, 0 };
+ struct chan_centers centers;
+
+ int8_t mask_m[123];
+ int8_t mask_p[123];
+ int8_t mask_amt;
+ int tmp_mask;
+ int cur_bb_spur;
+ bool is2GHz = IS_CHAN_2GHZ(chan);
+
+ memset(&mask_m, 0, sizeof(int8_t) * 123);
+ memset(&mask_p, 0, sizeof(int8_t) * 123);
+
+ ath9k_hw_get_channel_centers(ah, chan, &centers);
+ freq = centers.synth_center;
+
+ ah->ah_config.spurmode = SPUR_ENABLE_EEPROM;
+ for (i = 0; i < AR_EEPROM_MODAL_SPURS; i++) {
+ cur_bb_spur = ath9k_hw_eeprom_get_spur_chan(ah, i, is2GHz);
+
+ if (is2GHz)
+ cur_bb_spur = (cur_bb_spur / 10) + AR_BASE_FREQ_2GHZ;
+ else
+ cur_bb_spur = (cur_bb_spur / 10) + AR_BASE_FREQ_5GHZ;
+
+ if (AR_NO_SPUR == cur_bb_spur)
+ break;
+ cur_bb_spur = cur_bb_spur - freq;
+
+ if (IS_CHAN_HT40(chan)) {
+ if ((cur_bb_spur > -AR_SPUR_FEEQ_BOUND_HT40) &&
+ (cur_bb_spur < AR_SPUR_FEEQ_BOUND_HT40)) {
+ bb_spur = cur_bb_spur;
+ break;
+ }
+ } else if ((cur_bb_spur > -AR_SPUR_FEEQ_BOUND_HT20) &&
+ (cur_bb_spur < AR_SPUR_FEEQ_BOUND_HT20)) {
+ bb_spur = cur_bb_spur;
+ break;
+ }
+ }
+
+ if (AR_NO_SPUR == bb_spur) {
+ REG_CLR_BIT(ah, AR_PHY_FORCE_CLKEN_CCK,
+ AR_PHY_FORCE_CLKEN_CCK_MRC_MUX);
+ return;
+ } else {
+ REG_CLR_BIT(ah, AR_PHY_FORCE_CLKEN_CCK,
+ AR_PHY_FORCE_CLKEN_CCK_MRC_MUX);
+ }
+
+ bin = bb_spur * 320;
+
+ tmp = REG_READ(ah, AR_PHY_TIMING_CTRL4(0));
+
+ newVal = tmp | (AR_PHY_TIMING_CTRL4_ENABLE_SPUR_RSSI |
+ AR_PHY_TIMING_CTRL4_ENABLE_SPUR_FILTER |
+ AR_PHY_TIMING_CTRL4_ENABLE_CHAN_MASK |
+ AR_PHY_TIMING_CTRL4_ENABLE_PILOT_MASK);
+ REG_WRITE(ah, AR_PHY_TIMING_CTRL4(0), newVal);
+
+ newVal = (AR_PHY_SPUR_REG_MASK_RATE_CNTL |
+ AR_PHY_SPUR_REG_ENABLE_MASK_PPM |
+ AR_PHY_SPUR_REG_MASK_RATE_SELECT |
+ AR_PHY_SPUR_REG_ENABLE_VIT_SPUR_RSSI |
+ SM(SPUR_RSSI_THRESH, AR_PHY_SPUR_REG_SPUR_RSSI_THRESH));
+ REG_WRITE(ah, AR_PHY_SPUR_REG, newVal);
+
+ if (IS_CHAN_HT40(chan)) {
+ if (bb_spur < 0) {
+ spur_subchannel_sd = 1;
+ bb_spur_off = bb_spur + 10;
+ } else {
+ spur_subchannel_sd = 0;
+ bb_spur_off = bb_spur - 10;
+ }
+ } else {
+ spur_subchannel_sd = 0;
+ bb_spur_off = bb_spur;
+ }
+
+ if (IS_CHAN_HT40(chan))
+ spur_delta_phase =
+ ((bb_spur * 262144) /
+ 10) & AR_PHY_TIMING11_SPUR_DELTA_PHASE;
+ else
+ spur_delta_phase =
+ ((bb_spur * 524288) /
+ 10) & AR_PHY_TIMING11_SPUR_DELTA_PHASE;
+
+ denominator = IS_CHAN_2GHZ(chan) ? 44 : 40;
+ spur_freq_sd = ((bb_spur_off * 2048) / denominator) & 0x3ff;
+
+ newVal = (AR_PHY_TIMING11_USE_SPUR_IN_AGC |
+ SM(spur_freq_sd, AR_PHY_TIMING11_SPUR_FREQ_SD) |
+ SM(spur_delta_phase, AR_PHY_TIMING11_SPUR_DELTA_PHASE));
+ REG_WRITE(ah, AR_PHY_TIMING11, newVal);
+
+ newVal = spur_subchannel_sd << AR_PHY_SFCORR_SPUR_SUBCHNL_SD_S;
+ REG_WRITE(ah, AR_PHY_SFCORR_EXT, newVal);
+
+ cur_bin = -6000;
+ upper = bin + 100;
+ lower = bin - 100;
+
+ for (i = 0; i < 4; i++) {
+ int pilot_mask = 0;
+ int chan_mask = 0;
+ int bp = 0;
+ for (bp = 0; bp < 30; bp++) {
+ if ((cur_bin > lower) && (cur_bin < upper)) {
+ pilot_mask = pilot_mask | 0x1 << bp;
+ chan_mask = chan_mask | 0x1 << bp;
+ }
+ cur_bin += 100;
+ }
+ cur_bin += inc[i];
+ REG_WRITE(ah, pilot_mask_reg[i], pilot_mask);
+ REG_WRITE(ah, chan_mask_reg[i], chan_mask);
+ }
+
+ cur_vit_mask = 6100;
+ upper = bin + 120;
+ lower = bin - 120;
+
+ for (i = 0; i < 123; i++) {
+ if ((cur_vit_mask > lower) && (cur_vit_mask < upper)) {
+
+ /* workaround for gcc bug #37014 */
+ volatile int tmp = abs(cur_vit_mask - bin);
+
+ if (tmp < 75)
+ mask_amt = 1;
+ else
+ mask_amt = 0;
+ if (cur_vit_mask < 0)
+ mask_m[abs(cur_vit_mask / 100)] = mask_amt;
+ else
+ mask_p[cur_vit_mask / 100] = mask_amt;
+ }
+ cur_vit_mask -= 100;
+ }
+
+ tmp_mask = (mask_m[46] << 30) | (mask_m[47] << 28)
+ | (mask_m[48] << 26) | (mask_m[49] << 24)
+ | (mask_m[50] << 22) | (mask_m[51] << 20)
+ | (mask_m[52] << 18) | (mask_m[53] << 16)
+ | (mask_m[54] << 14) | (mask_m[55] << 12)
+ | (mask_m[56] << 10) | (mask_m[57] << 8)
+ | (mask_m[58] << 6) | (mask_m[59] << 4)
+ | (mask_m[60] << 2) | (mask_m[61] << 0);
+ REG_WRITE(ah, AR_PHY_BIN_MASK_1, tmp_mask);
+ REG_WRITE(ah, AR_PHY_VIT_MASK2_M_46_61, tmp_mask);
+
+ tmp_mask = (mask_m[31] << 28)
+ | (mask_m[32] << 26) | (mask_m[33] << 24)
+ | (mask_m[34] << 22) | (mask_m[35] << 20)
+ | (mask_m[36] << 18) | (mask_m[37] << 16)
+ | (mask_m[48] << 14) | (mask_m[39] << 12)
+ | (mask_m[40] << 10) | (mask_m[41] << 8)
+ | (mask_m[42] << 6) | (mask_m[43] << 4)
+ | (mask_m[44] << 2) | (mask_m[45] << 0);
+ REG_WRITE(ah, AR_PHY_BIN_MASK_2, tmp_mask);
+ REG_WRITE(ah, AR_PHY_MASK2_M_31_45, tmp_mask);
+
+ tmp_mask = (mask_m[16] << 30) | (mask_m[16] << 28)
+ | (mask_m[18] << 26) | (mask_m[18] << 24)
+ | (mask_m[20] << 22) | (mask_m[20] << 20)
+ | (mask_m[22] << 18) | (mask_m[22] << 16)
+ | (mask_m[24] << 14) | (mask_m[24] << 12)
+ | (mask_m[25] << 10) | (mask_m[26] << 8)
+ | (mask_m[27] << 6) | (mask_m[28] << 4)
+ | (mask_m[29] << 2) | (mask_m[30] << 0);
+ REG_WRITE(ah, AR_PHY_BIN_MASK_3, tmp_mask);
+ REG_WRITE(ah, AR_PHY_MASK2_M_16_30, tmp_mask);
+
+ tmp_mask = (mask_m[0] << 30) | (mask_m[1] << 28)
+ | (mask_m[2] << 26) | (mask_m[3] << 24)
+ | (mask_m[4] << 22) | (mask_m[5] << 20)
+ | (mask_m[6] << 18) | (mask_m[7] << 16)
+ | (mask_m[8] << 14) | (mask_m[9] << 12)
+ | (mask_m[10] << 10) | (mask_m[11] << 8)
+ | (mask_m[12] << 6) | (mask_m[13] << 4)
+ | (mask_m[14] << 2) | (mask_m[15] << 0);
+ REG_WRITE(ah, AR_PHY_MASK_CTL, tmp_mask);
+ REG_WRITE(ah, AR_PHY_MASK2_M_00_15, tmp_mask);
+
+ tmp_mask = (mask_p[15] << 28)
+ | (mask_p[14] << 26) | (mask_p[13] << 24)
+ | (mask_p[12] << 22) | (mask_p[11] << 20)
+ | (mask_p[10] << 18) | (mask_p[9] << 16)
+ | (mask_p[8] << 14) | (mask_p[7] << 12)
+ | (mask_p[6] << 10) | (mask_p[5] << 8)
+ | (mask_p[4] << 6) | (mask_p[3] << 4)
+ | (mask_p[2] << 2) | (mask_p[1] << 0);
+ REG_WRITE(ah, AR_PHY_BIN_MASK2_1, tmp_mask);
+ REG_WRITE(ah, AR_PHY_MASK2_P_15_01, tmp_mask);
+
+ tmp_mask = (mask_p[30] << 28)
+ | (mask_p[29] << 26) | (mask_p[28] << 24)
+ | (mask_p[27] << 22) | (mask_p[26] << 20)
+ | (mask_p[25] << 18) | (mask_p[24] << 16)
+ | (mask_p[23] << 14) | (mask_p[22] << 12)
+ | (mask_p[21] << 10) | (mask_p[20] << 8)
+ | (mask_p[19] << 6) | (mask_p[18] << 4)
+ | (mask_p[17] << 2) | (mask_p[16] << 0);
+ REG_WRITE(ah, AR_PHY_BIN_MASK2_2, tmp_mask);
+ REG_WRITE(ah, AR_PHY_MASK2_P_30_16, tmp_mask);
+
+ tmp_mask = (mask_p[45] << 28)
+ | (mask_p[44] << 26) | (mask_p[43] << 24)
+ | (mask_p[42] << 22) | (mask_p[41] << 20)
+ | (mask_p[40] << 18) | (mask_p[39] << 16)
+ | (mask_p[38] << 14) | (mask_p[37] << 12)
+ | (mask_p[36] << 10) | (mask_p[35] << 8)
+ | (mask_p[34] << 6) | (mask_p[33] << 4)
+ | (mask_p[32] << 2) | (mask_p[31] << 0);
+ REG_WRITE(ah, AR_PHY_BIN_MASK2_3, tmp_mask);
+ REG_WRITE(ah, AR_PHY_MASK2_P_45_31, tmp_mask);
+
+ tmp_mask = (mask_p[61] << 30) | (mask_p[60] << 28)
+ | (mask_p[59] << 26) | (mask_p[58] << 24)
+ | (mask_p[57] << 22) | (mask_p[56] << 20)
+ | (mask_p[55] << 18) | (mask_p[54] << 16)
+ | (mask_p[53] << 14) | (mask_p[52] << 12)
+ | (mask_p[51] << 10) | (mask_p[50] << 8)
+ | (mask_p[49] << 6) | (mask_p[48] << 4)
+ | (mask_p[47] << 2) | (mask_p[46] << 0);
+ REG_WRITE(ah, AR_PHY_BIN_MASK2_4, tmp_mask);
+ REG_WRITE(ah, AR_PHY_MASK2_P_61_45, tmp_mask);
+}
+
+static void ath9k_hw_spur_mitigate(struct ath_hal *ah,
+ struct ath9k_channel *chan)
+{
+ int bb_spur = AR_NO_SPUR;
+ int bin, cur_bin;
+ int spur_freq_sd;
+ int spur_delta_phase;
+ int denominator;
+ int upper, lower, cur_vit_mask;
+ int tmp, new;
+ int i;
+ int pilot_mask_reg[4] = { AR_PHY_TIMING7, AR_PHY_TIMING8,
+ AR_PHY_PILOT_MASK_01_30, AR_PHY_PILOT_MASK_31_60
+ };
+ int chan_mask_reg[4] = { AR_PHY_TIMING9, AR_PHY_TIMING10,
+ AR_PHY_CHANNEL_MASK_01_30, AR_PHY_CHANNEL_MASK_31_60
+ };
+ int inc[4] = { 0, 100, 0, 0 };
+
+ int8_t mask_m[123];
+ int8_t mask_p[123];
+ int8_t mask_amt;
+ int tmp_mask;
+ int cur_bb_spur;
+ bool is2GHz = IS_CHAN_2GHZ(chan);
+
+ memset(&mask_m, 0, sizeof(int8_t) * 123);
+ memset(&mask_p, 0, sizeof(int8_t) * 123);
+
+ for (i = 0; i < AR_EEPROM_MODAL_SPURS; i++) {
+ cur_bb_spur = ath9k_hw_eeprom_get_spur_chan(ah, i, is2GHz);
+ if (AR_NO_SPUR == cur_bb_spur)
+ break;
+ cur_bb_spur = cur_bb_spur - (chan->channel * 10);
+ if ((cur_bb_spur > -95) && (cur_bb_spur < 95)) {
+ bb_spur = cur_bb_spur;
+ break;
+ }
+ }
+
+ if (AR_NO_SPUR == bb_spur)
+ return;
+
+ bin = bb_spur * 32;
+
+ tmp = REG_READ(ah, AR_PHY_TIMING_CTRL4(0));
+ new = tmp | (AR_PHY_TIMING_CTRL4_ENABLE_SPUR_RSSI |
+ AR_PHY_TIMING_CTRL4_ENABLE_SPUR_FILTER |
+ AR_PHY_TIMING_CTRL4_ENABLE_CHAN_MASK |
+ AR_PHY_TIMING_CTRL4_ENABLE_PILOT_MASK);
+
+ REG_WRITE(ah, AR_PHY_TIMING_CTRL4(0), new);
+
+ new = (AR_PHY_SPUR_REG_MASK_RATE_CNTL |
+ AR_PHY_SPUR_REG_ENABLE_MASK_PPM |
+ AR_PHY_SPUR_REG_MASK_RATE_SELECT |
+ AR_PHY_SPUR_REG_ENABLE_VIT_SPUR_RSSI |
+ SM(SPUR_RSSI_THRESH, AR_PHY_SPUR_REG_SPUR_RSSI_THRESH));
+ REG_WRITE(ah, AR_PHY_SPUR_REG, new);
+
+ spur_delta_phase = ((bb_spur * 524288) / 100) &
+ AR_PHY_TIMING11_SPUR_DELTA_PHASE;
+
+ denominator = IS_CHAN_2GHZ(chan) ? 440 : 400;
+ spur_freq_sd = ((bb_spur * 2048) / denominator) & 0x3ff;
+
+ new = (AR_PHY_TIMING11_USE_SPUR_IN_AGC |
+ SM(spur_freq_sd, AR_PHY_TIMING11_SPUR_FREQ_SD) |
+ SM(spur_delta_phase, AR_PHY_TIMING11_SPUR_DELTA_PHASE));
+ REG_WRITE(ah, AR_PHY_TIMING11, new);
+
+ cur_bin = -6000;
+ upper = bin + 100;
+ lower = bin - 100;
+
+ for (i = 0; i < 4; i++) {
+ int pilot_mask = 0;
+ int chan_mask = 0;
+ int bp = 0;
+ for (bp = 0; bp < 30; bp++) {
+ if ((cur_bin > lower) && (cur_bin < upper)) {
+ pilot_mask = pilot_mask | 0x1 << bp;
+ chan_mask = chan_mask | 0x1 << bp;
+ }
+ cur_bin += 100;
+ }
+ cur_bin += inc[i];
+ REG_WRITE(ah, pilot_mask_reg[i], pilot_mask);
+ REG_WRITE(ah, chan_mask_reg[i], chan_mask);
+ }
+
+ cur_vit_mask = 6100;
+ upper = bin + 120;
+ lower = bin - 120;
+
+ for (i = 0; i < 123; i++) {
+ if ((cur_vit_mask > lower) && (cur_vit_mask < upper)) {
+ if ((abs(cur_vit_mask - bin)) < 75)
+ mask_amt = 1;
+ else
+ mask_amt = 0;
+ if (cur_vit_mask < 0)
+ mask_m[abs(cur_vit_mask / 100)] = mask_amt;
+ else
+ mask_p[cur_vit_mask / 100] = mask_amt;
+ }
+ cur_vit_mask -= 100;
+ }
+
+ tmp_mask = (mask_m[46] << 30) | (mask_m[47] << 28)
+ | (mask_m[48] << 26) | (mask_m[49] << 24)
+ | (mask_m[50] << 22) | (mask_m[51] << 20)
+ | (mask_m[52] << 18) | (mask_m[53] << 16)
+ | (mask_m[54] << 14) | (mask_m[55] << 12)
+ | (mask_m[56] << 10) | (mask_m[57] << 8)
+ | (mask_m[58] << 6) | (mask_m[59] << 4)
+ | (mask_m[60] << 2) | (mask_m[61] << 0);
+ REG_WRITE(ah, AR_PHY_BIN_MASK_1, tmp_mask);
+ REG_WRITE(ah, AR_PHY_VIT_MASK2_M_46_61, tmp_mask);
+
+ tmp_mask = (mask_m[31] << 28)
+ | (mask_m[32] << 26) | (mask_m[33] << 24)
+ | (mask_m[34] << 22) | (mask_m[35] << 20)
+ | (mask_m[36] << 18) | (mask_m[37] << 16)
+ | (mask_m[48] << 14) | (mask_m[39] << 12)
+ | (mask_m[40] << 10) | (mask_m[41] << 8)
+ | (mask_m[42] << 6) | (mask_m[43] << 4)
+ | (mask_m[44] << 2) | (mask_m[45] << 0);
+ REG_WRITE(ah, AR_PHY_BIN_MASK_2, tmp_mask);
+ REG_WRITE(ah, AR_PHY_MASK2_M_31_45, tmp_mask);
+
+ tmp_mask = (mask_m[16] << 30) | (mask_m[16] << 28)
+ | (mask_m[18] << 26) | (mask_m[18] << 24)
+ | (mask_m[20] << 22) | (mask_m[20] << 20)
+ | (mask_m[22] << 18) | (mask_m[22] << 16)
+ | (mask_m[24] << 14) | (mask_m[24] << 12)
+ | (mask_m[25] << 10) | (mask_m[26] << 8)
+ | (mask_m[27] << 6) | (mask_m[28] << 4)
+ | (mask_m[29] << 2) | (mask_m[30] << 0);
+ REG_WRITE(ah, AR_PHY_BIN_MASK_3, tmp_mask);
+ REG_WRITE(ah, AR_PHY_MASK2_M_16_30, tmp_mask);
+
+ tmp_mask = (mask_m[0] << 30) | (mask_m[1] << 28)
+ | (mask_m[2] << 26) | (mask_m[3] << 24)
+ | (mask_m[4] << 22) | (mask_m[5] << 20)
+ | (mask_m[6] << 18) | (mask_m[7] << 16)
+ | (mask_m[8] << 14) | (mask_m[9] << 12)
+ | (mask_m[10] << 10) | (mask_m[11] << 8)
+ | (mask_m[12] << 6) | (mask_m[13] << 4)
+ | (mask_m[14] << 2) | (mask_m[15] << 0);
+ REG_WRITE(ah, AR_PHY_MASK_CTL, tmp_mask);
+ REG_WRITE(ah, AR_PHY_MASK2_M_00_15, tmp_mask);
+
+ tmp_mask = (mask_p[15] << 28)
+ | (mask_p[14] << 26) | (mask_p[13] << 24)
+ | (mask_p[12] << 22) | (mask_p[11] << 20)
+ | (mask_p[10] << 18) | (mask_p[9] << 16)
+ | (mask_p[8] << 14) | (mask_p[7] << 12)
+ | (mask_p[6] << 10) | (mask_p[5] << 8)
+ | (mask_p[4] << 6) | (mask_p[3] << 4)
+ | (mask_p[2] << 2) | (mask_p[1] << 0);
+ REG_WRITE(ah, AR_PHY_BIN_MASK2_1, tmp_mask);
+ REG_WRITE(ah, AR_PHY_MASK2_P_15_01, tmp_mask);
+
+ tmp_mask = (mask_p[30] << 28)
+ | (mask_p[29] << 26) | (mask_p[28] << 24)
+ | (mask_p[27] << 22) | (mask_p[26] << 20)
+ | (mask_p[25] << 18) | (mask_p[24] << 16)
+ | (mask_p[23] << 14) | (mask_p[22] << 12)
+ | (mask_p[21] << 10) | (mask_p[20] << 8)
+ | (mask_p[19] << 6) | (mask_p[18] << 4)
+ | (mask_p[17] << 2) | (mask_p[16] << 0);
+ REG_WRITE(ah, AR_PHY_BIN_MASK2_2, tmp_mask);
+ REG_WRITE(ah, AR_PHY_MASK2_P_30_16, tmp_mask);
+
+ tmp_mask = (mask_p[45] << 28)
+ | (mask_p[44] << 26) | (mask_p[43] << 24)
+ | (mask_p[42] << 22) | (mask_p[41] << 20)
+ | (mask_p[40] << 18) | (mask_p[39] << 16)
+ | (mask_p[38] << 14) | (mask_p[37] << 12)
+ | (mask_p[36] << 10) | (mask_p[35] << 8)
+ | (mask_p[34] << 6) | (mask_p[33] << 4)
+ | (mask_p[32] << 2) | (mask_p[31] << 0);
+ REG_WRITE(ah, AR_PHY_BIN_MASK2_3, tmp_mask);
+ REG_WRITE(ah, AR_PHY_MASK2_P_45_31, tmp_mask);
+
+ tmp_mask = (mask_p[61] << 30) | (mask_p[60] << 28)
+ | (mask_p[59] << 26) | (mask_p[58] << 24)
+ | (mask_p[57] << 22) | (mask_p[56] << 20)
+ | (mask_p[55] << 18) | (mask_p[54] << 16)
+ | (mask_p[53] << 14) | (mask_p[52] << 12)
+ | (mask_p[51] << 10) | (mask_p[50] << 8)
+ | (mask_p[49] << 6) | (mask_p[48] << 4)
+ | (mask_p[47] << 2) | (mask_p[46] << 0);
+ REG_WRITE(ah, AR_PHY_BIN_MASK2_4, tmp_mask);
+ REG_WRITE(ah, AR_PHY_MASK2_P_61_45, tmp_mask);
+}
+
+static inline void ath9k_hw_init_chain_masks(struct ath_hal *ah)
+{
+ struct ath_hal_5416 *ahp = AH5416(ah);
+ int rx_chainmask, tx_chainmask;
+
+ rx_chainmask = ahp->ah_rxchainmask;
+ tx_chainmask = ahp->ah_txchainmask;
+
+ switch (rx_chainmask) {
+ case 0x5:
+ REG_SET_BIT(ah, AR_PHY_ANALOG_SWAP,
+ AR_PHY_SWAP_ALT_CHAIN);
+ case 0x3:
+ if (((ah)->ah_macVersion <= AR_SREV_VERSION_9160)) {
+ REG_WRITE(ah, AR_PHY_RX_CHAINMASK, 0x7);
+ REG_WRITE(ah, AR_PHY_CAL_CHAINMASK, 0x7);
+ break;
+ }
+ case 0x1:
+ case 0x2:
+ if (!AR_SREV_9280(ah))
+ break;
+ case 0x7:
+ REG_WRITE(ah, AR_PHY_RX_CHAINMASK, rx_chainmask);
+ REG_WRITE(ah, AR_PHY_CAL_CHAINMASK, rx_chainmask);
+ break;
+ default:
+ break;
+ }
+
+ REG_WRITE(ah, AR_SELFGEN_MASK, tx_chainmask);
+ if (tx_chainmask == 0x5) {
+ REG_SET_BIT(ah, AR_PHY_ANALOG_SWAP,
+ AR_PHY_SWAP_ALT_CHAIN);
+ }
+ if (AR_SREV_9100(ah))
+ REG_WRITE(ah, AR_PHY_ANALOG_SWAP,
+ REG_READ(ah, AR_PHY_ANALOG_SWAP) | 0x00000001);
+}
+
+static void ath9k_hw_set_addac(struct ath_hal *ah,
+ struct ath9k_channel *chan)
+{
+ struct modal_eep_header *pModal;
+ struct ath_hal_5416 *ahp = AH5416(ah);
+ struct ar5416_eeprom *eep = &ahp->ah_eeprom;
+ u8 biaslevel;
+
+ if (ah->ah_macVersion != AR_SREV_VERSION_9160)
+ return;
+
+ if (ar5416_get_eep_rev(ahp) < AR5416_EEP_MINOR_VER_7)
+ return;
+
+ pModal = &(eep->modalHeader[IS_CHAN_2GHZ(chan)]);
+
+ if (pModal->xpaBiasLvl != 0xff) {
+ biaslevel = pModal->xpaBiasLvl;
+ } else {
+
+ u16 resetFreqBin, freqBin, freqCount = 0;
+ struct chan_centers centers;
+
+ ath9k_hw_get_channel_centers(ah, chan, &centers);
+
+ resetFreqBin =
+ FREQ2FBIN(centers.synth_center, IS_CHAN_2GHZ(chan));
+ freqBin = pModal->xpaBiasLvlFreq[0] & 0xff;
+ biaslevel = (u8) (pModal->xpaBiasLvlFreq[0] >> 14);
+
+ freqCount++;
+
+ while (freqCount < 3) {
+ if (pModal->xpaBiasLvlFreq[freqCount] == 0x0)
+ break;
+
+ freqBin = pModal->xpaBiasLvlFreq[freqCount] & 0xff;
+ if (resetFreqBin >= freqBin) {
+ biaslevel =
+ (u8) (pModal->
+ xpaBiasLvlFreq[freqCount]
+ >> 14);
+ } else {
+ break;
+ }
+ freqCount++;
+ }
+ }
+
+ if (IS_CHAN_2GHZ(chan)) {
+ INI_RA(&ahp->ah_iniAddac, 7, 1) =
+ (INI_RA(&ahp->ah_iniAddac, 7, 1) & (~0x18)) | biaslevel
+ << 3;
+ } else {
+ INI_RA(&ahp->ah_iniAddac, 6, 1) =
+ (INI_RA(&ahp->ah_iniAddac, 6, 1) & (~0xc0)) | biaslevel
+ << 6;
+ }
+}
+
+static u32 ath9k_hw_mac_usec(struct ath_hal *ah, u32 clks)
+{
+ if (ah->ah_curchan != NULL)
+ return clks /
+ CLOCK_RATE[ath9k_hw_chan2wmode(ah, ah->ah_curchan)];
+ else
+ return clks / CLOCK_RATE[ATH9K_MODE_11B];
+}
+
+static u32 ath9k_hw_mac_to_usec(struct ath_hal *ah, u32 clks)
+{
+ struct ath9k_channel *chan = ah->ah_curchan;
+
+ if (chan && IS_CHAN_HT40(chan))
+ return ath9k_hw_mac_usec(ah, clks) / 2;
+ else
+ return ath9k_hw_mac_usec(ah, clks);
+}
+
+static u32 ath9k_hw_mac_clks(struct ath_hal *ah, u32 usecs)
+{
+ if (ah->ah_curchan != NULL)
+ return usecs * CLOCK_RATE[ath9k_hw_chan2wmode(ah,
+ ah->ah_curchan)];
+ else
+ return usecs * CLOCK_RATE[ATH9K_MODE_11B];
+}
+
+static u32 ath9k_hw_mac_to_clks(struct ath_hal *ah, u32 usecs)
+{
+ struct ath9k_channel *chan = ah->ah_curchan;
+
+ if (chan && IS_CHAN_HT40(chan))
+ return ath9k_hw_mac_clks(ah, usecs) * 2;
+ else
+ return ath9k_hw_mac_clks(ah, usecs);
+}
+
+static bool ath9k_hw_set_ack_timeout(struct ath_hal *ah, u32 us)
+{
+ struct ath_hal_5416 *ahp = AH5416(ah);
+
+ if (us > ath9k_hw_mac_to_usec(ah, MS(0xffffffff, AR_TIME_OUT_ACK))) {
+ DPRINTF(ah->ah_sc, ATH_DBG_RESET, "%s: bad ack timeout %u\n",
+ __func__, us);
+ ahp->ah_acktimeout = (u32) -1;
+ return false;
+ } else {
+ REG_RMW_FIELD(ah, AR_TIME_OUT,
+ AR_TIME_OUT_ACK, ath9k_hw_mac_to_clks(ah, us));
+ ahp->ah_acktimeout = us;
+ return true;
+ }
+}
+
+static bool ath9k_hw_set_cts_timeout(struct ath_hal *ah, u32 us)
+{
+ struct ath_hal_5416 *ahp = AH5416(ah);
+
+ if (us > ath9k_hw_mac_to_usec(ah, MS(0xffffffff, AR_TIME_OUT_CTS))) {
+ DPRINTF(ah->ah_sc, ATH_DBG_RESET, "%s: bad cts timeout %u\n",
+ __func__, us);
+ ahp->ah_ctstimeout = (u32) -1;
+ return false;
+ } else {
+ REG_RMW_FIELD(ah, AR_TIME_OUT,
+ AR_TIME_OUT_CTS, ath9k_hw_mac_to_clks(ah, us));
+ ahp->ah_ctstimeout = us;
+ return true;
+ }
+}
+static bool ath9k_hw_set_global_txtimeout(struct ath_hal *ah,
+ u32 tu)
+{
+ struct ath_hal_5416 *ahp = AH5416(ah);
+
+ if (tu > 0xFFFF) {
+ DPRINTF(ah->ah_sc, ATH_DBG_XMIT,
+ "%s: bad global tx timeout %u\n", __func__, tu);
+ ahp->ah_globaltxtimeout = (u32) -1;
+ return false;
+ } else {
+ REG_RMW_FIELD(ah, AR_GTXTO, AR_GTXTO_TIMEOUT_LIMIT, tu);
+ ahp->ah_globaltxtimeout = tu;
+ return true;
+ }
+}
+
+bool ath9k_hw_setslottime(struct ath_hal *ah, u32 us)
+{
+ struct ath_hal_5416 *ahp = AH5416(ah);
+
+ if (us < ATH9K_SLOT_TIME_9 || us > ath9k_hw_mac_to_usec(ah, 0xffff)) {
+ DPRINTF(ah->ah_sc, ATH_DBG_RESET, "%s: bad slot time %u\n",
+ __func__, us);
+ ahp->ah_slottime = (u32) -1;
+ return false;
+ } else {
+ REG_WRITE(ah, AR_D_GBL_IFS_SLOT, ath9k_hw_mac_to_clks(ah, us));
+ ahp->ah_slottime = us;
+ return true;
+ }
+}
+
+static inline void ath9k_hw_init_user_settings(struct ath_hal *ah)
+{
+ struct ath_hal_5416 *ahp = AH5416(ah);
+
+ DPRINTF(ah->ah_sc, ATH_DBG_RESET, "--AP %s ahp->ah_miscMode 0x%x\n",
+ __func__, ahp->ah_miscMode);
+ if (ahp->ah_miscMode != 0)
+ REG_WRITE(ah, AR_PCU_MISC,
+ REG_READ(ah, AR_PCU_MISC) | ahp->ah_miscMode);
+ if (ahp->ah_slottime != (u32) -1)
+ ath9k_hw_setslottime(ah, ahp->ah_slottime);
+ if (ahp->ah_acktimeout != (u32) -1)
+ ath9k_hw_set_ack_timeout(ah, ahp->ah_acktimeout);
+ if (ahp->ah_ctstimeout != (u32) -1)
+ ath9k_hw_set_cts_timeout(ah, ahp->ah_ctstimeout);
+ if (ahp->ah_globaltxtimeout != (u32) -1)
+ ath9k_hw_set_global_txtimeout(ah, ahp->ah_globaltxtimeout);
+}
+
+static inline int
+ath9k_hw_process_ini(struct ath_hal *ah,
+ struct ath9k_channel *chan,
+ enum ath9k_ht_macmode macmode)
+{
+ int i, regWrites = 0;
+ struct ath_hal_5416 *ahp = AH5416(ah);
+ u32 modesIndex, freqIndex;
+ int status;
+
+ switch (chan->chanmode) {
+ case CHANNEL_A:
+ case CHANNEL_A_HT20:
+ modesIndex = 1;
+ freqIndex = 1;
+ break;
+ case CHANNEL_A_HT40PLUS:
+ case CHANNEL_A_HT40MINUS:
+ modesIndex = 2;
+ freqIndex = 1;
+ break;
+ case CHANNEL_G:
+ case CHANNEL_G_HT20:
+ case CHANNEL_B:
+ modesIndex = 4;
+ freqIndex = 2;
+ break;
+ case CHANNEL_G_HT40PLUS:
+ case CHANNEL_G_HT40MINUS:
+ modesIndex = 3;
+ freqIndex = 2;
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ REG_WRITE(ah, AR_PHY(0), 0x00000007);
+
+ REG_WRITE(ah, AR_PHY_ADC_SERIAL_CTL, AR_PHY_SEL_EXTERNAL_RADIO);
+
+ ath9k_hw_set_addac(ah, chan);
+
+ if (AR_SREV_5416_V22_OR_LATER(ah)) {
+ REG_WRITE_ARRAY(&ahp->ah_iniAddac, 1, regWrites);
+ } else {
+ struct ar5416IniArray temp;
+ u32 addacSize =
+ sizeof(u32) * ahp->ah_iniAddac.ia_rows *
+ ahp->ah_iniAddac.ia_columns;
+
+ memcpy(ahp->ah_addac5416_21,
+ ahp->ah_iniAddac.ia_array, addacSize);
+
+ (ahp->ah_addac5416_21)[31 *
+ ahp->ah_iniAddac.ia_columns + 1] = 0;
+
+ temp.ia_array = ahp->ah_addac5416_21;
+ temp.ia_columns = ahp->ah_iniAddac.ia_columns;
+ temp.ia_rows = ahp->ah_iniAddac.ia_rows;
+ REG_WRITE_ARRAY(&temp, 1, regWrites);
+ }
+ REG_WRITE(ah, AR_PHY_ADC_SERIAL_CTL, AR_PHY_SEL_INTERNAL_ADDAC);
+
+ for (i = 0; i < ahp->ah_iniModes.ia_rows; i++) {
+ u32 reg = INI_RA(&ahp->ah_iniModes, i, 0);
+ u32 val = INI_RA(&ahp->ah_iniModes, i, modesIndex);
+
+#ifdef CONFIG_SLOW_ANT_DIV
+ if (ah->ah_devid == AR9280_DEVID_PCI)
+ val = ath9k_hw_ini_fixup(ah, &ahp->ah_eeprom, reg,
+ val);
+#endif
+
+ REG_WRITE(ah, reg, val);
+
+ if (reg >= 0x7800 && reg < 0x78a0
+ && ah->ah_config.analog_shiftreg) {
+ udelay(100);
+ }
+
+ DO_DELAY(regWrites);
+ }
+
+ for (i = 0; i < ahp->ah_iniCommon.ia_rows; i++) {
+ u32 reg = INI_RA(&ahp->ah_iniCommon, i, 0);
+ u32 val = INI_RA(&ahp->ah_iniCommon, i, 1);
+
+ REG_WRITE(ah, reg, val);
+
+ if (reg >= 0x7800 && reg < 0x78a0
+ && ah->ah_config.analog_shiftreg) {
+ udelay(100);
+ }
+
+ DO_DELAY(regWrites);
+ }
+
+ ath9k_hw_write_regs(ah, modesIndex, freqIndex, regWrites);
+
+ if (AR_SREV_9280_20(ah) && IS_CHAN_A_5MHZ_SPACED(chan)) {
+ REG_WRITE_ARRAY(&ahp->ah_iniModesAdditional, modesIndex,
+ regWrites);
+ }
+
+ ath9k_hw_override_ini(ah, chan);
+ ath9k_hw_set_regs(ah, chan, macmode);
+ ath9k_hw_init_chain_masks(ah);
+
+ status = ath9k_hw_set_txpower(ah, &ahp->ah_eeprom, chan,
+ ath9k_regd_get_ctl(ah, chan),
+ ath9k_regd_get_antenna_allowed(ah,
+ chan),
+ chan->maxRegTxPower * 2,
+ min((u32) MAX_RATE_POWER,
+ (u32) ah->ah_powerLimit));
+ if (status != 0) {
+ DPRINTF(ah->ah_sc, ATH_DBG_POWER_MGMT,
+ "%s: error init'ing transmit power\n", __func__);
+ return -EIO;
+ }
+
+ if (!ath9k_hw_set_rf_regs(ah, chan, freqIndex)) {
+ DPRINTF(ah->ah_sc, ATH_DBG_REG_IO,
+ "%s: ar5416SetRfRegs failed\n", __func__);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static inline void ath9k_hw_setup_calibration(struct ath_hal *ah,
+ struct hal_cal_list *currCal)
+{
+ REG_RMW_FIELD(ah, AR_PHY_TIMING_CTRL4(0),
+ AR_PHY_TIMING_CTRL4_IQCAL_LOG_COUNT_MAX,
+ currCal->calData->calCountMax);
+
+ switch (currCal->calData->calType) {
+ case IQ_MISMATCH_CAL:
+ REG_WRITE(ah, AR_PHY_CALMODE, AR_PHY_CALMODE_IQ);
+ DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE,
+ "%s: starting IQ Mismatch Calibration\n",
+ __func__);
+ break;
+ case ADC_GAIN_CAL:
+ REG_WRITE(ah, AR_PHY_CALMODE, AR_PHY_CALMODE_ADC_GAIN);
+ DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE,
+ "%s: starting ADC Gain Calibration\n", __func__);
+ break;
+ case ADC_DC_CAL:
+ REG_WRITE(ah, AR_PHY_CALMODE, AR_PHY_CALMODE_ADC_DC_PER);
+ DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE,
+ "%s: starting ADC DC Calibration\n", __func__);
+ break;
+ case ADC_DC_INIT_CAL:
+ REG_WRITE(ah, AR_PHY_CALMODE, AR_PHY_CALMODE_ADC_DC_INIT);
+ DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE,
+ "%s: starting Init ADC DC Calibration\n",
+ __func__);
+ break;
+ }
+
+ REG_SET_BIT(ah, AR_PHY_TIMING_CTRL4(0),
+ AR_PHY_TIMING_CTRL4_DO_CAL);
+}
+
+static inline void ath9k_hw_reset_calibration(struct ath_hal *ah,
+ struct hal_cal_list *currCal)
+{
+ struct ath_hal_5416 *ahp = AH5416(ah);
+ int i;
+
+ ath9k_hw_setup_calibration(ah, currCal);
+
+ currCal->calState = CAL_RUNNING;
+
+ for (i = 0; i < AR5416_MAX_CHAINS; i++) {
+ ahp->ah_Meas0.sign[i] = 0;
+ ahp->ah_Meas1.sign[i] = 0;
+ ahp->ah_Meas2.sign[i] = 0;
+ ahp->ah_Meas3.sign[i] = 0;
+ }
+
+ ahp->ah_CalSamples = 0;
+}
+
+static inline void
+ath9k_hw_per_calibration(struct ath_hal *ah,
+ struct ath9k_channel *ichan,
+ u8 rxchainmask,
+ struct hal_cal_list *currCal,
+ bool *isCalDone)
+{
+ struct ath_hal_5416 *ahp = AH5416(ah);
+
+ *isCalDone = false;
+
+ if (currCal->calState == CAL_RUNNING) {
+ if (!(REG_READ(ah,
+ AR_PHY_TIMING_CTRL4(0)) &
+ AR_PHY_TIMING_CTRL4_DO_CAL)) {
+
+ currCal->calData->calCollect(ah);
+
+ ahp->ah_CalSamples++;
+
+ if (ahp->ah_CalSamples >=
+ currCal->calData->calNumSamples) {
+ int i, numChains = 0;
+ for (i = 0; i < AR5416_MAX_CHAINS; i++) {
+ if (rxchainmask & (1 << i))
+ numChains++;
+ }
+
+ currCal->calData->calPostProc(ah,
+ numChains);
+
+ ichan->CalValid |=
+ currCal->calData->calType;
+ currCal->calState = CAL_DONE;
+ *isCalDone = true;
+ } else {
+ ath9k_hw_setup_calibration(ah, currCal);
+ }
+ }
+ } else if (!(ichan->CalValid & currCal->calData->calType)) {
+ ath9k_hw_reset_calibration(ah, currCal);
+ }
+}
+
+static inline bool ath9k_hw_run_init_cals(struct ath_hal *ah,
+ int init_cal_count)
+{
+ struct ath_hal_5416 *ahp = AH5416(ah);
+ struct ath9k_channel ichan;
+ bool isCalDone;
+ struct hal_cal_list *currCal = ahp->ah_cal_list_curr;
+ const struct hal_percal_data *calData = currCal->calData;
+ int i;
+
+ if (currCal == NULL)
+ return false;
+
+ ichan.CalValid = 0;
+
+ for (i = 0; i < init_cal_count; i++) {
+ ath9k_hw_reset_calibration(ah, currCal);
+
+ if (!ath9k_hw_wait(ah, AR_PHY_TIMING_CTRL4(0),
+ AR_PHY_TIMING_CTRL4_DO_CAL, 0)) {
+ DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE,
+ "%s: Cal %d failed to complete in 100ms.\n",
+ __func__, calData->calType);
+
+ ahp->ah_cal_list = ahp->ah_cal_list_last =
+ ahp->ah_cal_list_curr = NULL;
+ return false;
+ }
+
+ ath9k_hw_per_calibration(ah, &ichan, ahp->ah_rxchainmask,
+ currCal, &isCalDone);
+ if (!isCalDone) {
+ DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE,
+ "%s: Not able to run Init Cal %d.\n",
+ __func__, calData->calType);
+ }
+ if (currCal->calNext) {
+ currCal = currCal->calNext;
+ calData = currCal->calData;
+ }
+ }
+
+ ahp->ah_cal_list = ahp->ah_cal_list_last = ahp->ah_cal_list_curr = NULL;
+ return true;
+}
+
+static inline bool
+ath9k_hw_channel_change(struct ath_hal *ah,
+ struct ath9k_channel *chan,
+ enum ath9k_ht_macmode macmode)
+{
+ u32 synthDelay, qnum;
+ struct ath_hal_5416 *ahp = AH5416(ah);
+
+ for (qnum = 0; qnum < AR_NUM_QCU; qnum++) {
+ if (ath9k_hw_numtxpending(ah, qnum)) {
+ DPRINTF(ah->ah_sc, ATH_DBG_QUEUE,
+ "%s: Transmit frames pending on queue %d\n",
+ __func__, qnum);
+ return false;
+ }
+ }
+
+ REG_WRITE(ah, AR_PHY_RFBUS_REQ, AR_PHY_RFBUS_REQ_EN);
+ if (!ath9k_hw_wait(ah, AR_PHY_RFBUS_GRANT, AR_PHY_RFBUS_GRANT_EN,
+ AR_PHY_RFBUS_GRANT_EN)) {
+ DPRINTF(ah->ah_sc, ATH_DBG_PHY_IO,
+ "%s: Could not kill baseband RX\n", __func__);
+ return false;
+ }
+
+ ath9k_hw_set_regs(ah, chan, macmode);
+
+ if (AR_SREV_9280_10_OR_LATER(ah)) {
+ if (!(ath9k_hw_ar9280_set_channel(ah, chan))) {
+ DPRINTF(ah->ah_sc, ATH_DBG_CHANNEL,
+ "%s: failed to set channel\n", __func__);
+ return false;
+ }
+ } else {
+ if (!(ath9k_hw_set_channel(ah, chan))) {
+ DPRINTF(ah->ah_sc, ATH_DBG_CHANNEL,
+ "%s: failed to set channel\n", __func__);
+ return false;
+ }
+ }
+
+ if (ath9k_hw_set_txpower(ah, &ahp->ah_eeprom, chan,
+ ath9k_regd_get_ctl(ah, chan),
+ ath9k_regd_get_antenna_allowed(ah, chan),
+ chan->maxRegTxPower * 2,
+ min((u32) MAX_RATE_POWER,
+ (u32) ah->ah_powerLimit)) != 0) {
+ DPRINTF(ah->ah_sc, ATH_DBG_EEPROM,
+ "%s: error init'ing transmit power\n", __func__);
+ return false;
+ }
+
+ synthDelay = REG_READ(ah, AR_PHY_RX_DELAY) & AR_PHY_RX_DELAY_DELAY;
+ if (IS_CHAN_CCK(chan))
+ synthDelay = (4 * synthDelay) / 22;
+ else
+ synthDelay /= 10;
+
+ udelay(synthDelay + BASE_ACTIVATE_DELAY);
+
+ REG_WRITE(ah, AR_PHY_RFBUS_REQ, 0);
+
+ if (IS_CHAN_OFDM(chan) || IS_CHAN_HT(chan))
+ ath9k_hw_set_delta_slope(ah, chan);
+
+ if (AR_SREV_9280_10_OR_LATER(ah))
+ ath9k_hw_9280_spur_mitigate(ah, chan);
+ else
+ ath9k_hw_spur_mitigate(ah, chan);
+
+ if (!chan->oneTimeCalsDone)
+ chan->oneTimeCalsDone = true;
+
+ return true;
+}
+
+static bool ath9k_hw_chip_reset(struct ath_hal *ah,
+ struct ath9k_channel *chan)
+{
+ struct ath_hal_5416 *ahp = AH5416(ah);
+
+ if (!ath9k_hw_set_reset_reg(ah, ATH9K_RESET_WARM))
+ return false;
+
+ if (!ath9k_hw_setpower(ah, ATH9K_PM_AWAKE))
+ return false;
+
+ ahp->ah_chipFullSleep = false;
+
+ ath9k_hw_init_pll(ah, chan);
+
+ ath9k_hw_set_rfmode(ah, chan);
+
+ return true;
+}
+
+static inline void ath9k_hw_set_dma(struct ath_hal *ah)
+{
+ u32 regval;
+
+ regval = REG_READ(ah, AR_AHB_MODE);
+ REG_WRITE(ah, AR_AHB_MODE, regval | AR_AHB_PREFETCH_RD_EN);
+
+ regval = REG_READ(ah, AR_TXCFG) & ~AR_TXCFG_DMASZ_MASK;
+ REG_WRITE(ah, AR_TXCFG, regval | AR_TXCFG_DMASZ_128B);
+
+ REG_RMW_FIELD(ah, AR_TXCFG, AR_FTRIG, ah->ah_txTrigLevel);
+
+ regval = REG_READ(ah, AR_RXCFG) & ~AR_RXCFG_DMASZ_MASK;
+ REG_WRITE(ah, AR_RXCFG, regval | AR_RXCFG_DMASZ_128B);
+
+ REG_WRITE(ah, AR_RXFIFO_CFG, 0x200);
+
+ if (AR_SREV_9285(ah)) {
+ REG_WRITE(ah, AR_PCU_TXBUF_CTRL,
+ AR_9285_PCU_TXBUF_CTRL_USABLE_SIZE);
+ } else {
+ REG_WRITE(ah, AR_PCU_TXBUF_CTRL,
+ AR_PCU_TXBUF_CTRL_USABLE_SIZE);
+ }
+}
+
+bool ath9k_hw_stopdmarecv(struct ath_hal *ah)
+{
+ REG_WRITE(ah, AR_CR, AR_CR_RXD);
+ if (!ath9k_hw_wait(ah, AR_CR, AR_CR_RXE, 0)) {
+ DPRINTF(ah->ah_sc, ATH_DBG_QUEUE,
+ "%s: dma failed to stop in 10ms\n"
+ "AR_CR=0x%08x\nAR_DIAG_SW=0x%08x\n",
+ __func__,
+ REG_READ(ah, AR_CR), REG_READ(ah, AR_DIAG_SW));
+ return false;
+ } else {
+ return true;
+ }
+}
+
+void ath9k_hw_startpcureceive(struct ath_hal *ah)
+{
+ REG_CLR_BIT(ah, AR_DIAG_SW,
+ (AR_DIAG_RX_DIS | AR_DIAG_RX_ABORT));
+
+ ath9k_enable_mib_counters(ah);
+
+ ath9k_ani_reset(ah);
+}
+
+void ath9k_hw_stoppcurecv(struct ath_hal *ah)
+{
+ REG_SET_BIT(ah, AR_DIAG_SW, AR_DIAG_RX_DIS);
+
+ ath9k_hw_disable_mib_counters(ah);
+}
+
+static bool ath9k_hw_iscal_supported(struct ath_hal *ah,
+ struct ath9k_channel *chan,
+ enum hal_cal_types calType)
+{
+ struct ath_hal_5416 *ahp = AH5416(ah);
+ bool retval = false;
+
+ switch (calType & ahp->ah_suppCals) {
+ case IQ_MISMATCH_CAL:
+ if (!IS_CHAN_B(chan))
+ retval = true;
+ break;
+ case ADC_GAIN_CAL:
+ case ADC_DC_CAL:
+ if (!IS_CHAN_B(chan)
+ && !(IS_CHAN_2GHZ(chan) && IS_CHAN_HT20(chan)))
+ retval = true;
+ break;
+ }
+
+ return retval;
+}
+
+static inline bool ath9k_hw_init_cal(struct ath_hal *ah,
+ struct ath9k_channel *chan)
+{
+ struct ath_hal_5416 *ahp = AH5416(ah);
+ struct ath9k_channel *ichan =
+ ath9k_regd_check_channel(ah, chan);
+
+ REG_WRITE(ah, AR_PHY_AGC_CONTROL,
+ REG_READ(ah, AR_PHY_AGC_CONTROL) |
+ AR_PHY_AGC_CONTROL_CAL);
+
+ if (!ath9k_hw_wait
+ (ah, AR_PHY_AGC_CONTROL, AR_PHY_AGC_CONTROL_CAL, 0)) {
+ DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE,
+ "%s: offset calibration failed to complete in 1ms; "
+ "noisy environment?\n", __func__);
+ return false;
+ }
+
+ REG_WRITE(ah, AR_PHY_AGC_CONTROL,
+ REG_READ(ah, AR_PHY_AGC_CONTROL) |
+ AR_PHY_AGC_CONTROL_NF);
+
+ ahp->ah_cal_list = ahp->ah_cal_list_last = ahp->ah_cal_list_curr =
+ NULL;
+
+ if (AR_SREV_9100(ah) || AR_SREV_9160_10_OR_LATER(ah)) {
+ if (ath9k_hw_iscal_supported(ah, chan, ADC_GAIN_CAL)) {
+ INIT_CAL(&ahp->ah_adcGainCalData);
+ INSERT_CAL(ahp, &ahp->ah_adcGainCalData);
+ DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE,
+ "%s: enabling ADC Gain Calibration.\n",
+ __func__);
+ }
+ if (ath9k_hw_iscal_supported(ah, chan, ADC_DC_CAL)) {
+ INIT_CAL(&ahp->ah_adcDcCalData);
+ INSERT_CAL(ahp, &ahp->ah_adcDcCalData);
+ DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE,
+ "%s: enabling ADC DC Calibration.\n",
+ __func__);
+ }
+ if (ath9k_hw_iscal_supported(ah, chan, IQ_MISMATCH_CAL)) {
+ INIT_CAL(&ahp->ah_iqCalData);
+ INSERT_CAL(ahp, &ahp->ah_iqCalData);
+ DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE,
+ "%s: enabling IQ Calibration.\n",
+ __func__);
+ }
+
+ ahp->ah_cal_list_curr = ahp->ah_cal_list;
+
+ if (ahp->ah_cal_list_curr)
+ ath9k_hw_reset_calibration(ah,
+ ahp->ah_cal_list_curr);
+ }
+
+ ichan->CalValid = 0;
+
+ return true;
+}
+
+
+bool ath9k_hw_reset(struct ath_hal *ah, enum ath9k_opmode opmode,
+ struct ath9k_channel *chan,
+ enum ath9k_ht_macmode macmode,
+ u8 txchainmask, u8 rxchainmask,
+ enum ath9k_ht_extprotspacing extprotspacing,
+ bool bChannelChange,
+ int *status)
+{
+#define FAIL(_code) do { ecode = _code; goto bad; } while (0)
+ u32 saveLedState;
+ struct ath_hal_5416 *ahp = AH5416(ah);
+ struct ath9k_channel *curchan = ah->ah_curchan;
+ u32 saveDefAntenna;
+ u32 macStaId1;
+ int ecode;
+ int i, rx_chainmask;
+
+ ahp->ah_extprotspacing = extprotspacing;
+ ahp->ah_txchainmask = txchainmask;
+ ahp->ah_rxchainmask = rxchainmask;
+
+ if (AR_SREV_9280(ah)) {
+ ahp->ah_txchainmask &= 0x3;
+ ahp->ah_rxchainmask &= 0x3;
+ }
+
+ if (ath9k_hw_check_chan(ah, chan) == NULL) {
+ DPRINTF(ah->ah_sc, ATH_DBG_CHANNEL,
+ "%s: invalid channel %u/0x%x; no mapping\n",
+ __func__, chan->channel, chan->channelFlags);
+ FAIL(-EINVAL);
+ }
+
+ if (!ath9k_hw_setpower(ah, ATH9K_PM_AWAKE))
+ return false;
+
+ if (curchan)
+ ath9k_hw_getnf(ah, curchan);
+
+ if (bChannelChange &&
+ (ahp->ah_chipFullSleep != true) &&
+ (ah->ah_curchan != NULL) &&
+ (chan->channel != ah->ah_curchan->channel) &&
+ ((chan->channelFlags & CHANNEL_ALL) ==
+ (ah->ah_curchan->channelFlags & CHANNEL_ALL)) &&
+ (!AR_SREV_9280(ah) || (!IS_CHAN_A_5MHZ_SPACED(chan) &&
+ !IS_CHAN_A_5MHZ_SPACED(ah->
+ ah_curchan)))) {
+
+ if (ath9k_hw_channel_change(ah, chan, macmode)) {
+ ath9k_hw_loadnf(ah, ah->ah_curchan);
+ ath9k_hw_start_nfcal(ah);
+ return true;
+ }
+ }
+
+ saveDefAntenna = REG_READ(ah, AR_DEF_ANTENNA);
+ if (saveDefAntenna == 0)
+ saveDefAntenna = 1;
+
+ macStaId1 = REG_READ(ah, AR_STA_ID1) & AR_STA_ID1_BASE_RATE_11B;
+
+ saveLedState = REG_READ(ah, AR_CFG_LED) &
+ (AR_CFG_LED_ASSOC_CTL | AR_CFG_LED_MODE_SEL |
+ AR_CFG_LED_BLINK_THRESH_SEL | AR_CFG_LED_BLINK_SLOW);
+
+ ath9k_hw_mark_phy_inactive(ah);
+
+ if (!ath9k_hw_chip_reset(ah, chan)) {
+ DPRINTF(ah->ah_sc, ATH_DBG_RESET, "%s: chip reset failed\n",
+ __func__);
+ FAIL(-EIO);
+ }
+
+ if (AR_SREV_9280(ah)) {
+ REG_SET_BIT(ah, AR_GPIO_INPUT_EN_VAL,
+ AR_GPIO_JTAG_DISABLE);
+
+ if (test_bit(ATH9K_MODE_11A, ah->ah_caps.wireless_modes)) {
+ if (IS_CHAN_5GHZ(chan))
+ ath9k_hw_set_gpio(ah, 9, 0);
+ else
+ ath9k_hw_set_gpio(ah, 9, 1);
+ }
+ ath9k_hw_cfg_output(ah, 9, ATH9K_GPIO_OUTPUT_MUX_AS_OUTPUT);
+ }
+
+ ecode = ath9k_hw_process_ini(ah, chan, macmode);
+ if (ecode != 0)
+ goto bad;
+
+ if (IS_CHAN_OFDM(chan) || IS_CHAN_HT(chan))
+ ath9k_hw_set_delta_slope(ah, chan);
+
+ if (AR_SREV_9280_10_OR_LATER(ah))
+ ath9k_hw_9280_spur_mitigate(ah, chan);
+ else
+ ath9k_hw_spur_mitigate(ah, chan);
+
+ if (!ath9k_hw_eeprom_set_board_values(ah, chan)) {
+ DPRINTF(ah->ah_sc, ATH_DBG_EEPROM,
+ "%s: error setting board options\n", __func__);
+ FAIL(-EIO);
+ }
+
+ ath9k_hw_decrease_chain_power(ah, chan);
+
+ REG_WRITE(ah, AR_STA_ID0, get_unaligned_le32(ahp->ah_macaddr));
+ REG_WRITE(ah, AR_STA_ID1, get_unaligned_le16(ahp->ah_macaddr + 4)
+ | macStaId1
+ | AR_STA_ID1_RTS_USE_DEF
+ | (ah->ah_config.
+ ack_6mb ? AR_STA_ID1_ACKCTS_6MB : 0)
+ | ahp->ah_staId1Defaults);
+ ath9k_hw_set_operating_mode(ah, opmode);
+
+ REG_WRITE(ah, AR_BSSMSKL, get_unaligned_le32(ahp->ah_bssidmask));
+ REG_WRITE(ah, AR_BSSMSKU, get_unaligned_le16(ahp->ah_bssidmask + 4));
+
+ REG_WRITE(ah, AR_DEF_ANTENNA, saveDefAntenna);
+
+ REG_WRITE(ah, AR_BSS_ID0, get_unaligned_le32(ahp->ah_bssid));
+ REG_WRITE(ah, AR_BSS_ID1, get_unaligned_le16(ahp->ah_bssid + 4) |
+ ((ahp->ah_assocId & 0x3fff) << AR_BSS_ID1_AID_S));
+
+ REG_WRITE(ah, AR_ISR, ~0);
+
+ REG_WRITE(ah, AR_RSSI_THR, INIT_RSSI_THR);
+
+ if (AR_SREV_9280_10_OR_LATER(ah)) {
+ if (!(ath9k_hw_ar9280_set_channel(ah, chan)))
+ FAIL(-EIO);
+ } else {
+ if (!(ath9k_hw_set_channel(ah, chan)))
+ FAIL(-EIO);
+ }
+
+ for (i = 0; i < AR_NUM_DCU; i++)
+ REG_WRITE(ah, AR_DQCUMASK(i), 1 << i);
+
+ ahp->ah_intrTxqs = 0;
+ for (i = 0; i < ah->ah_caps.total_queues; i++)
+ ath9k_hw_resettxqueue(ah, i);
+
+ ath9k_hw_init_interrupt_masks(ah, opmode);
+ ath9k_hw_init_qos(ah);
+
+ ath9k_hw_init_user_settings(ah);
+
+ ah->ah_opmode = opmode;
+
+ REG_WRITE(ah, AR_STA_ID1,
+ REG_READ(ah, AR_STA_ID1) | AR_STA_ID1_PRESERVE_SEQNUM);
+
+ ath9k_hw_set_dma(ah);
+
+ REG_WRITE(ah, AR_OBS, 8);
+
+ if (ahp->ah_intrMitigation) {
+
+ REG_RMW_FIELD(ah, AR_RIMT, AR_RIMT_LAST, 500);
+ REG_RMW_FIELD(ah, AR_RIMT, AR_RIMT_FIRST, 2000);
+ }
+
+ ath9k_hw_init_bb(ah, chan);
+
+ if (!ath9k_hw_init_cal(ah, chan))
+ FAIL(-ENODEV);
+
+ rx_chainmask = ahp->ah_rxchainmask;
+ if ((rx_chainmask == 0x5) || (rx_chainmask == 0x3)) {
+ REG_WRITE(ah, AR_PHY_RX_CHAINMASK, rx_chainmask);
+ REG_WRITE(ah, AR_PHY_CAL_CHAINMASK, rx_chainmask);
+ }
+
+ REG_WRITE(ah, AR_CFG_LED, saveLedState | AR_CFG_SCLK_32KHZ);
+
+ if (AR_SREV_9100(ah)) {
+ u32 mask;
+ mask = REG_READ(ah, AR_CFG);
+ if (mask & (AR_CFG_SWRB | AR_CFG_SWTB | AR_CFG_SWRG)) {
+ DPRINTF(ah->ah_sc, ATH_DBG_RESET,
+ "%s CFG Byte Swap Set 0x%x\n", __func__,
+ mask);
+ } else {
+ mask =
+ INIT_CONFIG_STATUS | AR_CFG_SWRB | AR_CFG_SWTB;
+ REG_WRITE(ah, AR_CFG, mask);
+ DPRINTF(ah->ah_sc, ATH_DBG_RESET,
+ "%s Setting CFG 0x%x\n", __func__,
+ REG_READ(ah, AR_CFG));
+ }
+ } else {
+#ifdef __BIG_ENDIAN
+ REG_WRITE(ah, AR_CFG, AR_CFG_SWTD | AR_CFG_SWRD);
+#endif
+ }
+
+ return true;
+bad:
+ if (status)
+ *status = ecode;
+ return false;
+#undef FAIL
+}
+
+bool ath9k_hw_phy_disable(struct ath_hal *ah)
+{
+ return ath9k_hw_set_reset_reg(ah, ATH9K_RESET_WARM);
+}
+
+bool ath9k_hw_disable(struct ath_hal *ah)
+{
+ if (!ath9k_hw_setpower(ah, ATH9K_PM_AWAKE))
+ return false;
+
+ return ath9k_hw_set_reset_reg(ah, ATH9K_RESET_COLD);
+}
+
+bool
+ath9k_hw_calibrate(struct ath_hal *ah, struct ath9k_channel *chan,
+ u8 rxchainmask, bool longcal,
+ bool *isCalDone)
+{
+ struct ath_hal_5416 *ahp = AH5416(ah);
+ struct hal_cal_list *currCal = ahp->ah_cal_list_curr;
+ struct ath9k_channel *ichan =
+ ath9k_regd_check_channel(ah, chan);
+
+ *isCalDone = true;
+
+ if (ichan == NULL) {
+ DPRINTF(ah->ah_sc, ATH_DBG_CHANNEL,
+ "%s: invalid channel %u/0x%x; no mapping\n",
+ __func__, chan->channel, chan->channelFlags);
+ return false;
+ }
+
+ if (currCal &&
+ (currCal->calState == CAL_RUNNING ||
+ currCal->calState == CAL_WAITING)) {
+ ath9k_hw_per_calibration(ah, ichan, rxchainmask, currCal,
+ isCalDone);
+ if (*isCalDone) {
+ ahp->ah_cal_list_curr = currCal = currCal->calNext;
+
+ if (currCal->calState == CAL_WAITING) {
+ *isCalDone = false;
+ ath9k_hw_reset_calibration(ah, currCal);
+ }
+ }
+ }
+
+ if (longcal) {
+ ath9k_hw_getnf(ah, ichan);
+ ath9k_hw_loadnf(ah, ah->ah_curchan);
+ ath9k_hw_start_nfcal(ah);
+
+ if ((ichan->channelFlags & CHANNEL_CW_INT) != 0) {
+
+ chan->channelFlags |= CHANNEL_CW_INT;
+ ichan->channelFlags &= ~CHANNEL_CW_INT;
+ }
+ }
+
+ return true;
+}
+
+static void ath9k_hw_iqcal_collect(struct ath_hal *ah)
+{
+ struct ath_hal_5416 *ahp = AH5416(ah);
+ int i;
+
+ for (i = 0; i < AR5416_MAX_CHAINS; i++) {
+ ahp->ah_totalPowerMeasI[i] +=
+ REG_READ(ah, AR_PHY_CAL_MEAS_0(i));
+ ahp->ah_totalPowerMeasQ[i] +=
+ REG_READ(ah, AR_PHY_CAL_MEAS_1(i));
+ ahp->ah_totalIqCorrMeas[i] +=
+ (int32_t) REG_READ(ah, AR_PHY_CAL_MEAS_2(i));
+ DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE,
+ "%d: Chn %d pmi=0x%08x;pmq=0x%08x;iqcm=0x%08x;\n",
+ ahp->ah_CalSamples, i, ahp->ah_totalPowerMeasI[i],
+ ahp->ah_totalPowerMeasQ[i],
+ ahp->ah_totalIqCorrMeas[i]);
+ }
+}
+
+static void ath9k_hw_adc_gaincal_collect(struct ath_hal *ah)
+{
+ struct ath_hal_5416 *ahp = AH5416(ah);
+ int i;
+
+ for (i = 0; i < AR5416_MAX_CHAINS; i++) {
+ ahp->ah_totalAdcIOddPhase[i] +=
+ REG_READ(ah, AR_PHY_CAL_MEAS_0(i));
+ ahp->ah_totalAdcIEvenPhase[i] +=
+ REG_READ(ah, AR_PHY_CAL_MEAS_1(i));
+ ahp->ah_totalAdcQOddPhase[i] +=
+ REG_READ(ah, AR_PHY_CAL_MEAS_2(i));
+ ahp->ah_totalAdcQEvenPhase[i] +=
+ REG_READ(ah, AR_PHY_CAL_MEAS_3(i));
+
+ DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE,
+ "%d: Chn %d oddi=0x%08x; eveni=0x%08x; "
+ "oddq=0x%08x; evenq=0x%08x;\n",
+ ahp->ah_CalSamples, i,
+ ahp->ah_totalAdcIOddPhase[i],
+ ahp->ah_totalAdcIEvenPhase[i],
+ ahp->ah_totalAdcQOddPhase[i],
+ ahp->ah_totalAdcQEvenPhase[i]);
+ }
+}
+
+static void ath9k_hw_adc_dccal_collect(struct ath_hal *ah)
+{
+ struct ath_hal_5416 *ahp = AH5416(ah);
+ int i;
+
+ for (i = 0; i < AR5416_MAX_CHAINS; i++) {
+ ahp->ah_totalAdcDcOffsetIOddPhase[i] +=
+ (int32_t) REG_READ(ah, AR_PHY_CAL_MEAS_0(i));
+ ahp->ah_totalAdcDcOffsetIEvenPhase[i] +=
+ (int32_t) REG_READ(ah, AR_PHY_CAL_MEAS_1(i));
+ ahp->ah_totalAdcDcOffsetQOddPhase[i] +=
+ (int32_t) REG_READ(ah, AR_PHY_CAL_MEAS_2(i));
+ ahp->ah_totalAdcDcOffsetQEvenPhase[i] +=
+ (int32_t) REG_READ(ah, AR_PHY_CAL_MEAS_3(i));
+
+ DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE,
+ "%d: Chn %d oddi=0x%08x; eveni=0x%08x; "
+ "oddq=0x%08x; evenq=0x%08x;\n",
+ ahp->ah_CalSamples, i,
+ ahp->ah_totalAdcDcOffsetIOddPhase[i],
+ ahp->ah_totalAdcDcOffsetIEvenPhase[i],
+ ahp->ah_totalAdcDcOffsetQOddPhase[i],
+ ahp->ah_totalAdcDcOffsetQEvenPhase[i]);
+ }
+}
+
+static void ath9k_hw_iqcalibrate(struct ath_hal *ah, u8 numChains)
+{
+ struct ath_hal_5416 *ahp = AH5416(ah);
+ u32 powerMeasQ, powerMeasI, iqCorrMeas;
+ u32 qCoffDenom, iCoffDenom;
+ int32_t qCoff, iCoff;
+ int iqCorrNeg, i;
+
+ for (i = 0; i < numChains; i++) {
+ powerMeasI = ahp->ah_totalPowerMeasI[i];
+ powerMeasQ = ahp->ah_totalPowerMeasQ[i];
+ iqCorrMeas = ahp->ah_totalIqCorrMeas[i];
+
+ DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE,
+ "Starting IQ Cal and Correction for Chain %d\n",
+ i);
+
+ DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE,
+ "Orignal: Chn %diq_corr_meas = 0x%08x\n",
+ i, ahp->ah_totalIqCorrMeas[i]);
+
+ iqCorrNeg = 0;
+
+
+ if (iqCorrMeas > 0x80000000) {
+ iqCorrMeas = (0xffffffff - iqCorrMeas) + 1;
+ iqCorrNeg = 1;
+ }
+
+ DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE,
+ "Chn %d pwr_meas_i = 0x%08x\n", i, powerMeasI);
+ DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE,
+ "Chn %d pwr_meas_q = 0x%08x\n", i, powerMeasQ);
+ DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE, "iqCorrNeg is 0x%08x\n",
+ iqCorrNeg);
+
+ iCoffDenom = (powerMeasI / 2 + powerMeasQ / 2) / 128;
+ qCoffDenom = powerMeasQ / 64;
+
+ if (powerMeasQ != 0) {
+
+ iCoff = iqCorrMeas / iCoffDenom;
+ qCoff = powerMeasI / qCoffDenom - 64;
+ DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE,
+ "Chn %d iCoff = 0x%08x\n", i, iCoff);
+ DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE,
+ "Chn %d qCoff = 0x%08x\n", i, qCoff);
+
+
+ iCoff = iCoff & 0x3f;
+ DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE,
+ "New: Chn %d iCoff = 0x%08x\n", i, iCoff);
+ if (iqCorrNeg == 0x0)
+ iCoff = 0x40 - iCoff;
+
+ if (qCoff > 15)
+ qCoff = 15;
+ else if (qCoff <= -16)
+ qCoff = 16;
+
+ DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE,
+ "Chn %d : iCoff = 0x%x qCoff = 0x%x\n",
+ i, iCoff, qCoff);
+
+ REG_RMW_FIELD(ah, AR_PHY_TIMING_CTRL4(i),
+ AR_PHY_TIMING_CTRL4_IQCORR_Q_I_COFF,
+ iCoff);
+ REG_RMW_FIELD(ah, AR_PHY_TIMING_CTRL4(i),
+ AR_PHY_TIMING_CTRL4_IQCORR_Q_Q_COFF,
+ qCoff);
+ DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE,
+ "IQ Cal and Correction done for Chain %d\n",
+ i);
+ }
+ }
+
+ REG_SET_BIT(ah, AR_PHY_TIMING_CTRL4(0),
+ AR_PHY_TIMING_CTRL4_IQCORR_ENABLE);
+}
+
+static void
+ath9k_hw_adc_gaincal_calibrate(struct ath_hal *ah, u8 numChains)
+{
+ struct ath_hal_5416 *ahp = AH5416(ah);
+ u32 iOddMeasOffset, iEvenMeasOffset, qOddMeasOffset,
+ qEvenMeasOffset;
+ u32 qGainMismatch, iGainMismatch, val, i;
+
+ for (i = 0; i < numChains; i++) {
+ iOddMeasOffset = ahp->ah_totalAdcIOddPhase[i];
+ iEvenMeasOffset = ahp->ah_totalAdcIEvenPhase[i];
+ qOddMeasOffset = ahp->ah_totalAdcQOddPhase[i];
+ qEvenMeasOffset = ahp->ah_totalAdcQEvenPhase[i];
+
+ DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE,
+ "Starting ADC Gain Cal for Chain %d\n", i);
+
+ DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE,
+ "Chn %d pwr_meas_odd_i = 0x%08x\n", i,
+ iOddMeasOffset);
+ DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE,
+ "Chn %d pwr_meas_even_i = 0x%08x\n", i,
+ iEvenMeasOffset);
+ DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE,
+ "Chn %d pwr_meas_odd_q = 0x%08x\n", i,
+ qOddMeasOffset);
+ DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE,
+ "Chn %d pwr_meas_even_q = 0x%08x\n", i,
+ qEvenMeasOffset);
+
+ if (iOddMeasOffset != 0 && qEvenMeasOffset != 0) {
+ iGainMismatch =
+ ((iEvenMeasOffset * 32) /
+ iOddMeasOffset) & 0x3f;
+ qGainMismatch =
+ ((qOddMeasOffset * 32) /
+ qEvenMeasOffset) & 0x3f;
+
+ DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE,
+ "Chn %d gain_mismatch_i = 0x%08x\n", i,
+ iGainMismatch);
+ DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE,
+ "Chn %d gain_mismatch_q = 0x%08x\n", i,
+ qGainMismatch);
+
+ val = REG_READ(ah, AR_PHY_NEW_ADC_DC_GAIN_CORR(i));
+ val &= 0xfffff000;
+ val |= (qGainMismatch) | (iGainMismatch << 6);
+ REG_WRITE(ah, AR_PHY_NEW_ADC_DC_GAIN_CORR(i), val);
+
+ DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE,
+ "ADC Gain Cal done for Chain %d\n", i);
+ }
+ }
+
+ REG_WRITE(ah, AR_PHY_NEW_ADC_DC_GAIN_CORR(0),
+ REG_READ(ah, AR_PHY_NEW_ADC_DC_GAIN_CORR(0)) |
+ AR_PHY_NEW_ADC_GAIN_CORR_ENABLE);
+}
+
+static void
+ath9k_hw_adc_dccal_calibrate(struct ath_hal *ah, u8 numChains)
+{
+ struct ath_hal_5416 *ahp = AH5416(ah);
+ u32 iOddMeasOffset, iEvenMeasOffset, val, i;
+ int32_t qOddMeasOffset, qEvenMeasOffset, qDcMismatch, iDcMismatch;
+ const struct hal_percal_data *calData =
+ ahp->ah_cal_list_curr->calData;
+ u32 numSamples =
+ (1 << (calData->calCountMax + 5)) * calData->calNumSamples;
+
+ for (i = 0; i < numChains; i++) {
+ iOddMeasOffset = ahp->ah_totalAdcDcOffsetIOddPhase[i];
+ iEvenMeasOffset = ahp->ah_totalAdcDcOffsetIEvenPhase[i];
+ qOddMeasOffset = ahp->ah_totalAdcDcOffsetQOddPhase[i];
+ qEvenMeasOffset = ahp->ah_totalAdcDcOffsetQEvenPhase[i];
+
+ DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE,
+ "Starting ADC DC Offset Cal for Chain %d\n", i);
+
+ DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE,
+ "Chn %d pwr_meas_odd_i = %d\n", i,
+ iOddMeasOffset);
+ DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE,
+ "Chn %d pwr_meas_even_i = %d\n", i,
+ iEvenMeasOffset);
+ DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE,
+ "Chn %d pwr_meas_odd_q = %d\n", i,
+ qOddMeasOffset);
+ DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE,
+ "Chn %d pwr_meas_even_q = %d\n", i,
+ qEvenMeasOffset);
+
+ iDcMismatch = (((iEvenMeasOffset - iOddMeasOffset) * 2) /
+ numSamples) & 0x1ff;
+ qDcMismatch = (((qOddMeasOffset - qEvenMeasOffset) * 2) /
+ numSamples) & 0x1ff;
+
+ DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE,
+ "Chn %d dc_offset_mismatch_i = 0x%08x\n", i,
+ iDcMismatch);
+ DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE,
+ "Chn %d dc_offset_mismatch_q = 0x%08x\n", i,
+ qDcMismatch);
+
+ val = REG_READ(ah, AR_PHY_NEW_ADC_DC_GAIN_CORR(i));
+ val &= 0xc0000fff;
+ val |= (qDcMismatch << 12) | (iDcMismatch << 21);
+ REG_WRITE(ah, AR_PHY_NEW_ADC_DC_GAIN_CORR(i), val);
+
+ DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE,
+ "ADC DC Offset Cal done for Chain %d\n", i);
+ }
+
+ REG_WRITE(ah, AR_PHY_NEW_ADC_DC_GAIN_CORR(0),
+ REG_READ(ah, AR_PHY_NEW_ADC_DC_GAIN_CORR(0)) |
+ AR_PHY_NEW_ADC_DC_OFFSET_CORR_ENABLE);
+}
+
+bool ath9k_hw_set_txpowerlimit(struct ath_hal *ah, u32 limit)
+{
+ struct ath_hal_5416 *ahp = AH5416(ah);
+ struct ath9k_channel *chan = ah->ah_curchan;
+
+ ah->ah_powerLimit = min(limit, (u32) MAX_RATE_POWER);
+
+ if (ath9k_hw_set_txpower(ah, &ahp->ah_eeprom, chan,
+ ath9k_regd_get_ctl(ah, chan),
+ ath9k_regd_get_antenna_allowed(ah,
+ chan),
+ chan->maxRegTxPower * 2,
+ min((u32) MAX_RATE_POWER,
+ (u32) ah->ah_powerLimit)) != 0)
+ return false;
+
+ return true;
+}
+
+void
+ath9k_hw_get_channel_centers(struct ath_hal *ah,
+ struct ath9k_channel *chan,
+ struct chan_centers *centers)
+{
+ int8_t extoff;
+ struct ath_hal_5416 *ahp = AH5416(ah);
+
+ if (!IS_CHAN_HT40(chan)) {
+ centers->ctl_center = centers->ext_center =
+ centers->synth_center = chan->channel;
+ return;
+ }
+
+ if ((chan->chanmode == CHANNEL_A_HT40PLUS) ||
+ (chan->chanmode == CHANNEL_G_HT40PLUS)) {
+ centers->synth_center =
+ chan->channel + HT40_CHANNEL_CENTER_SHIFT;
+ extoff = 1;
+ } else {
+ centers->synth_center =
+ chan->channel - HT40_CHANNEL_CENTER_SHIFT;
+ extoff = -1;
+ }
+
+ centers->ctl_center = centers->synth_center - (extoff *
+ HT40_CHANNEL_CENTER_SHIFT);
+ centers->ext_center = centers->synth_center + (extoff *
+ ((ahp->
+ ah_extprotspacing
+ ==
+ ATH9K_HT_EXTPROTSPACING_20)
+ ?
+ HT40_CHANNEL_CENTER_SHIFT
+ : 15));
+
+}
+
+void
+ath9k_hw_reset_calvalid(struct ath_hal *ah, struct ath9k_channel *chan,
+ bool *isCalDone)
+{
+ struct ath_hal_5416 *ahp = AH5416(ah);
+ struct ath9k_channel *ichan =
+ ath9k_regd_check_channel(ah, chan);
+ struct hal_cal_list *currCal = ahp->ah_cal_list_curr;
+
+ *isCalDone = true;
+
+ if (!AR_SREV_9100(ah) && !AR_SREV_9160_10_OR_LATER(ah))
+ return;
+
+ if (currCal == NULL)
+ return;
+
+ if (ichan == NULL) {
+ DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE,
+ "%s: invalid channel %u/0x%x; no mapping\n",
+ __func__, chan->channel, chan->channelFlags);
+ return;
+ }
+
+
+ if (currCal->calState != CAL_DONE) {
+ DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE,
+ "%s: Calibration state incorrect, %d\n",
+ __func__, currCal->calState);
+ return;
+ }
+
+
+ if (!ath9k_hw_iscal_supported(ah, chan, currCal->calData->calType))
+ return;
+
+ DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE,
+ "%s: Resetting Cal %d state for channel %u/0x%x\n",
+ __func__, currCal->calData->calType, chan->channel,
+ chan->channelFlags);
+
+ ichan->CalValid &= ~currCal->calData->calType;
+ currCal->calState = CAL_WAITING;
+
+ *isCalDone = false;
+}
+
+void ath9k_hw_getmac(struct ath_hal *ah, u8 *mac)
+{
+ struct ath_hal_5416 *ahp = AH5416(ah);
+
+ memcpy(mac, ahp->ah_macaddr, ETH_ALEN);
+}
+
+bool ath9k_hw_setmac(struct ath_hal *ah, const u8 *mac)
+{
+ struct ath_hal_5416 *ahp = AH5416(ah);
+
+ memcpy(ahp->ah_macaddr, mac, ETH_ALEN);
+ return true;
+}
+
+void ath9k_hw_getbssidmask(struct ath_hal *ah, u8 *mask)
+{
+ struct ath_hal_5416 *ahp = AH5416(ah);
+
+ memcpy(mask, ahp->ah_bssidmask, ETH_ALEN);
+}
+
+bool
+ath9k_hw_setbssidmask(struct ath_hal *ah, const u8 *mask)
+{
+ struct ath_hal_5416 *ahp = AH5416(ah);
+
+ memcpy(ahp->ah_bssidmask, mask, ETH_ALEN);
+
+ REG_WRITE(ah, AR_BSSMSKL, get_unaligned_le32(ahp->ah_bssidmask));
+ REG_WRITE(ah, AR_BSSMSKU, get_unaligned_le16(ahp->ah_bssidmask + 4));
+
+ return true;
+}
+
+#ifdef CONFIG_ATH9K_RFKILL
+static void ath9k_enable_rfkill(struct ath_hal *ah)
+{
+ struct ath_hal_5416 *ahp = AH5416(ah);
+
+ REG_SET_BIT(ah, AR_GPIO_INPUT_EN_VAL,
+ AR_GPIO_INPUT_EN_VAL_RFSILENT_BB);
+
+ REG_CLR_BIT(ah, AR_GPIO_INPUT_MUX2,
+ AR_GPIO_INPUT_MUX2_RFSILENT);
+
+ ath9k_hw_cfg_gpio_input(ah, ahp->ah_gpioSelect);
+ REG_SET_BIT(ah, AR_PHY_TEST, RFSILENT_BB);
+
+ if (ahp->ah_gpioBit == ath9k_hw_gpio_get(ah, ahp->ah_gpioSelect)) {
+
+ ath9k_hw_set_gpio_intr(ah, ahp->ah_gpioSelect,
+ !ahp->ah_gpioBit);
+ } else {
+ ath9k_hw_set_gpio_intr(ah, ahp->ah_gpioSelect,
+ ahp->ah_gpioBit);
+ }
+}
+#endif
+
+void
+ath9k_hw_write_associd(struct ath_hal *ah, const u8 *bssid,
+ u16 assocId)
+{
+ struct ath_hal_5416 *ahp = AH5416(ah);
+
+ memcpy(ahp->ah_bssid, bssid, ETH_ALEN);
+ ahp->ah_assocId = assocId;
+
+ REG_WRITE(ah, AR_BSS_ID0, get_unaligned_le32(ahp->ah_bssid));
+ REG_WRITE(ah, AR_BSS_ID1, get_unaligned_le16(ahp->ah_bssid + 4) |
+ ((assocId & 0x3fff) << AR_BSS_ID1_AID_S));
+}
+
+u64 ath9k_hw_gettsf64(struct ath_hal *ah)
+{
+ u64 tsf;
+
+ tsf = REG_READ(ah, AR_TSF_U32);
+ tsf = (tsf << 32) | REG_READ(ah, AR_TSF_L32);
+ return tsf;
+}
+
+void ath9k_hw_reset_tsf(struct ath_hal *ah)
+{
+ int count;
+
+ count = 0;
+ while (REG_READ(ah, AR_SLP32_MODE) & AR_SLP32_TSF_WRITE_STATUS) {
+ count++;
+ if (count > 10) {
+ DPRINTF(ah->ah_sc, ATH_DBG_RESET,
+ "%s: AR_SLP32_TSF_WRITE_STATUS limit exceeded\n",
+ __func__);
+ break;
+ }
+ udelay(10);
+ }
+ REG_WRITE(ah, AR_RESET_TSF, AR_RESET_TSF_ONCE);
+}
+
+u32 ath9k_hw_getdefantenna(struct ath_hal *ah)
+{
+ return REG_READ(ah, AR_DEF_ANTENNA) & 0x7;
+}
+
+void ath9k_hw_setantenna(struct ath_hal *ah, u32 antenna)
+{
+ REG_WRITE(ah, AR_DEF_ANTENNA, (antenna & 0x7));
+}
+
+bool
+ath9k_hw_setantennaswitch(struct ath_hal *ah,
+ enum ath9k_ant_setting settings,
+ struct ath9k_channel *chan,
+ u8 *tx_chainmask,
+ u8 *rx_chainmask,
+ u8 *antenna_cfgd)
+{
+ struct ath_hal_5416 *ahp = AH5416(ah);
+ static u8 tx_chainmask_cfg, rx_chainmask_cfg;
+
+ if (AR_SREV_9280(ah)) {
+ if (!tx_chainmask_cfg) {
+
+ tx_chainmask_cfg = *tx_chainmask;
+ rx_chainmask_cfg = *rx_chainmask;
+ }
+
+ switch (settings) {
+ case ATH9K_ANT_FIXED_A:
+ *tx_chainmask = ATH9K_ANTENNA0_CHAINMASK;
+ *rx_chainmask = ATH9K_ANTENNA0_CHAINMASK;
+ *antenna_cfgd = true;
+ break;
+ case ATH9K_ANT_FIXED_B:
+ if (ah->ah_caps.tx_chainmask >
+ ATH9K_ANTENNA1_CHAINMASK) {
+ *tx_chainmask = ATH9K_ANTENNA1_CHAINMASK;
+ }
+ *rx_chainmask = ATH9K_ANTENNA1_CHAINMASK;
+ *antenna_cfgd = true;
+ break;
+ case ATH9K_ANT_VARIABLE:
+ *tx_chainmask = tx_chainmask_cfg;
+ *rx_chainmask = rx_chainmask_cfg;
+ *antenna_cfgd = true;
+ break;
+ default:
+ break;
+ }
+ } else {
+ ahp->ah_diversityControl = settings;
+ }
+
+ return true;
+}
+
+void ath9k_hw_setopmode(struct ath_hal *ah)
+{
+ ath9k_hw_set_operating_mode(ah, ah->ah_opmode);
+}
+
+bool
+ath9k_hw_getcapability(struct ath_hal *ah, enum ath9k_capability_type type,
+ u32 capability, u32 *result)
+{
+ struct ath_hal_5416 *ahp = AH5416(ah);
+ const struct ath9k_hw_capabilities *pCap = &ah->ah_caps;
+
+ switch (type) {
+ case ATH9K_CAP_CIPHER:
+ switch (capability) {
+ case ATH9K_CIPHER_AES_CCM:
+ case ATH9K_CIPHER_AES_OCB:
+ case ATH9K_CIPHER_TKIP:
+ case ATH9K_CIPHER_WEP:
+ case ATH9K_CIPHER_MIC:
+ case ATH9K_CIPHER_CLR:
+ return true;
+ default:
+ return false;
+ }
+ case ATH9K_CAP_TKIP_MIC:
+ switch (capability) {
+ case 0:
+ return true;
+ case 1:
+ return (ahp->ah_staId1Defaults &
+ AR_STA_ID1_CRPT_MIC_ENABLE) ? true :
+ false;
+ }
+ case ATH9K_CAP_TKIP_SPLIT:
+ return (ahp->ah_miscMode & AR_PCU_MIC_NEW_LOC_ENA) ?
+ false : true;
+ case ATH9K_CAP_WME_TKIPMIC:
+ return 0;
+ case ATH9K_CAP_PHYCOUNTERS:
+ return ahp->ah_hasHwPhyCounters ? 0 : -ENXIO;
+ case ATH9K_CAP_DIVERSITY:
+ return (REG_READ(ah, AR_PHY_CCK_DETECT) &
+ AR_PHY_CCK_DETECT_BB_ENABLE_ANT_FAST_DIV) ?
+ true : false;
+ case ATH9K_CAP_PHYDIAG:
+ return true;
+ case ATH9K_CAP_MCAST_KEYSRCH:
+ switch (capability) {
+ case 0:
+ return true;
+ case 1:
+ if (REG_READ(ah, AR_STA_ID1) & AR_STA_ID1_ADHOC) {
+ return false;
+ } else {
+ return (ahp->ah_staId1Defaults &
+ AR_STA_ID1_MCAST_KSRCH) ? true :
+ false;
+ }
+ }
+ return false;
+ case ATH9K_CAP_TSF_ADJUST:
+ return (ahp->ah_miscMode & AR_PCU_TX_ADD_TSF) ?
+ true : false;
+ case ATH9K_CAP_RFSILENT:
+ if (capability == 3)
+ return false;
+ case ATH9K_CAP_ANT_CFG_2GHZ:
+ *result = pCap->num_antcfg_2ghz;
+ return true;
+ case ATH9K_CAP_ANT_CFG_5GHZ:
+ *result = pCap->num_antcfg_5ghz;
+ return true;
+ case ATH9K_CAP_TXPOW:
+ switch (capability) {
+ case 0:
+ return 0;
+ case 1:
+ *result = ah->ah_powerLimit;
+ return 0;
+ case 2:
+ *result = ah->ah_maxPowerLevel;
+ return 0;
+ case 3:
+ *result = ah->ah_tpScale;
+ return 0;
+ }
+ return false;
+ default:
+ return false;
+ }
+}
+
+int
+ath9k_hw_select_antconfig(struct ath_hal *ah, u32 cfg)
+{
+ struct ath_hal_5416 *ahp = AH5416(ah);
+ struct ath9k_channel *chan = ah->ah_curchan;
+ const struct ath9k_hw_capabilities *pCap = &ah->ah_caps;
+ u16 ant_config;
+ u32 halNumAntConfig;
+
+ halNumAntConfig =
+ IS_CHAN_2GHZ(chan) ? pCap->num_antcfg_2ghz : pCap->
+ num_antcfg_5ghz;
+
+ if (cfg < halNumAntConfig) {
+ if (!ath9k_hw_get_eeprom_antenna_cfg(ahp, chan,
+ cfg, &ant_config)) {
+ REG_WRITE(ah, AR_PHY_SWITCH_COM, ant_config);
+ return 0;
+ }
+ }
+
+ return -EINVAL;
+}
+
+bool ath9k_hw_intrpend(struct ath_hal *ah)
+{
+ u32 host_isr;
+
+ if (AR_SREV_9100(ah))
+ return true;
+
+ host_isr = REG_READ(ah, AR_INTR_ASYNC_CAUSE);
+ if ((host_isr & AR_INTR_MAC_IRQ) && (host_isr != AR_INTR_SPURIOUS))
+ return true;
+
+ host_isr = REG_READ(ah, AR_INTR_SYNC_CAUSE);
+ if ((host_isr & AR_INTR_SYNC_DEFAULT)
+ && (host_isr != AR_INTR_SPURIOUS))
+ return true;
+
+ return false;
+}
+
+bool ath9k_hw_getisr(struct ath_hal *ah, enum ath9k_int *masked)
+{
+ u32 isr = 0;
+ u32 mask2 = 0;
+ struct ath9k_hw_capabilities *pCap = &ah->ah_caps;
+ u32 sync_cause = 0;
+ bool fatal_int = false;
+
+ if (!AR_SREV_9100(ah)) {
+ if (REG_READ(ah, AR_INTR_ASYNC_CAUSE) & AR_INTR_MAC_IRQ) {
+ if ((REG_READ(ah, AR_RTC_STATUS) & AR_RTC_STATUS_M)
+ == AR_RTC_STATUS_ON) {
+ isr = REG_READ(ah, AR_ISR);
+ }
+ }
+
+ sync_cause =
+ REG_READ(ah,
+ AR_INTR_SYNC_CAUSE) & AR_INTR_SYNC_DEFAULT;
+
+ *masked = 0;
+
+ if (!isr && !sync_cause)
+ return false;
+ } else {
+ *masked = 0;
+ isr = REG_READ(ah, AR_ISR);
+ }
+
+ if (isr) {
+ struct ath_hal_5416 *ahp = AH5416(ah);
+
+ if (isr & AR_ISR_BCNMISC) {
+ u32 isr2;
+ isr2 = REG_READ(ah, AR_ISR_S2);
+ if (isr2 & AR_ISR_S2_TIM)
+ mask2 |= ATH9K_INT_TIM;
+ if (isr2 & AR_ISR_S2_DTIM)
+ mask2 |= ATH9K_INT_DTIM;
+ if (isr2 & AR_ISR_S2_DTIMSYNC)
+ mask2 |= ATH9K_INT_DTIMSYNC;
+ if (isr2 & (AR_ISR_S2_CABEND))
+ mask2 |= ATH9K_INT_CABEND;
+ if (isr2 & AR_ISR_S2_GTT)
+ mask2 |= ATH9K_INT_GTT;
+ if (isr2 & AR_ISR_S2_CST)
+ mask2 |= ATH9K_INT_CST;
+ }
+
+ isr = REG_READ(ah, AR_ISR_RAC);
+ if (isr == 0xffffffff) {
+ *masked = 0;
+ return false;
+ }
+
+ *masked = isr & ATH9K_INT_COMMON;
+
+ if (ahp->ah_intrMitigation) {
+
+ if (isr & (AR_ISR_RXMINTR | AR_ISR_RXINTM))
+ *masked |= ATH9K_INT_RX;
+ }
+
+ if (isr & (AR_ISR_RXOK | AR_ISR_RXERR))
+ *masked |= ATH9K_INT_RX;
+ if (isr &
+ (AR_ISR_TXOK | AR_ISR_TXDESC | AR_ISR_TXERR |
+ AR_ISR_TXEOL)) {
+ u32 s0_s, s1_s;
+
+ *masked |= ATH9K_INT_TX;
+
+ s0_s = REG_READ(ah, AR_ISR_S0_S);
+ ahp->ah_intrTxqs |= MS(s0_s, AR_ISR_S0_QCU_TXOK);
+ ahp->ah_intrTxqs |= MS(s0_s, AR_ISR_S0_QCU_TXDESC);
+
+ s1_s = REG_READ(ah, AR_ISR_S1_S);
+ ahp->ah_intrTxqs |= MS(s1_s, AR_ISR_S1_QCU_TXERR);
+ ahp->ah_intrTxqs |= MS(s1_s, AR_ISR_S1_QCU_TXEOL);
+ }
+
+ if (isr & AR_ISR_RXORN) {
+ DPRINTF(ah->ah_sc, ATH_DBG_INTERRUPT,
+ "%s: receive FIFO overrun interrupt\n",
+ __func__);
+ }
+
+ if (!AR_SREV_9100(ah)) {
+ if (!(pCap->hw_caps & ATH9K_HW_CAP_AUTOSLEEP)) {
+ u32 isr5 = REG_READ(ah, AR_ISR_S5_S);
+ if (isr5 & AR_ISR_S5_TIM_TIMER)
+ *masked |= ATH9K_INT_TIM_TIMER;
+ }
+ }
+
+ *masked |= mask2;
+ }
+ if (AR_SREV_9100(ah))
+ return true;
+ if (sync_cause) {
+ fatal_int =
+ (sync_cause &
+ (AR_INTR_SYNC_HOST1_FATAL | AR_INTR_SYNC_HOST1_PERR))
+ ? true : false;
+
+ if (fatal_int) {
+ if (sync_cause & AR_INTR_SYNC_HOST1_FATAL) {
+ DPRINTF(ah->ah_sc, ATH_DBG_ANY,
+ "%s: received PCI FATAL interrupt\n",
+ __func__);
+ }
+ if (sync_cause & AR_INTR_SYNC_HOST1_PERR) {
+ DPRINTF(ah->ah_sc, ATH_DBG_ANY,
+ "%s: received PCI PERR interrupt\n",
+ __func__);
+ }
+ }
+ if (sync_cause & AR_INTR_SYNC_RADM_CPL_TIMEOUT) {
+ DPRINTF(ah->ah_sc, ATH_DBG_INTERRUPT,
+ "%s: AR_INTR_SYNC_RADM_CPL_TIMEOUT\n",
+ __func__);
+ REG_WRITE(ah, AR_RC, AR_RC_HOSTIF);
+ REG_WRITE(ah, AR_RC, 0);
+ *masked |= ATH9K_INT_FATAL;
+ }
+ if (sync_cause & AR_INTR_SYNC_LOCAL_TIMEOUT) {
+ DPRINTF(ah->ah_sc, ATH_DBG_INTERRUPT,
+ "%s: AR_INTR_SYNC_LOCAL_TIMEOUT\n",
+ __func__);
+ }
+
+ REG_WRITE(ah, AR_INTR_SYNC_CAUSE_CLR, sync_cause);
+ (void) REG_READ(ah, AR_INTR_SYNC_CAUSE_CLR);
+ }
+ return true;
+}
+
+enum ath9k_int ath9k_hw_intrget(struct ath_hal *ah)
+{
+ return AH5416(ah)->ah_maskReg;
+}
+
+enum ath9k_int ath9k_hw_set_interrupts(struct ath_hal *ah, enum ath9k_int ints)
+{
+ struct ath_hal_5416 *ahp = AH5416(ah);
+ u32 omask = ahp->ah_maskReg;
+ u32 mask, mask2;
+ struct ath9k_hw_capabilities *pCap = &ah->ah_caps;
+
+ DPRINTF(ah->ah_sc, ATH_DBG_INTERRUPT, "%s: 0x%x => 0x%x\n", __func__,
+ omask, ints);
+
+ if (omask & ATH9K_INT_GLOBAL) {
+ DPRINTF(ah->ah_sc, ATH_DBG_INTERRUPT, "%s: disable IER\n",
+ __func__);
+ REG_WRITE(ah, AR_IER, AR_IER_DISABLE);
+ (void) REG_READ(ah, AR_IER);
+ if (!AR_SREV_9100(ah)) {
+ REG_WRITE(ah, AR_INTR_ASYNC_ENABLE, 0);
+ (void) REG_READ(ah, AR_INTR_ASYNC_ENABLE);
+
+ REG_WRITE(ah, AR_INTR_SYNC_ENABLE, 0);
+ (void) REG_READ(ah, AR_INTR_SYNC_ENABLE);
+ }
+ }
+
+ mask = ints & ATH9K_INT_COMMON;
+ mask2 = 0;
+
+ if (ints & ATH9K_INT_TX) {
+ if (ahp->ah_txOkInterruptMask)
+ mask |= AR_IMR_TXOK;
+ if (ahp->ah_txDescInterruptMask)
+ mask |= AR_IMR_TXDESC;
+ if (ahp->ah_txErrInterruptMask)
+ mask |= AR_IMR_TXERR;
+ if (ahp->ah_txEolInterruptMask)
+ mask |= AR_IMR_TXEOL;
+ }
+ if (ints & ATH9K_INT_RX) {
+ mask |= AR_IMR_RXERR;
+ if (ahp->ah_intrMitigation)
+ mask |= AR_IMR_RXMINTR | AR_IMR_RXINTM;
+ else
+ mask |= AR_IMR_RXOK | AR_IMR_RXDESC;
+ if (!(pCap->hw_caps & ATH9K_HW_CAP_AUTOSLEEP))
+ mask |= AR_IMR_GENTMR;
+ }
+
+ if (ints & (ATH9K_INT_BMISC)) {
+ mask |= AR_IMR_BCNMISC;
+ if (ints & ATH9K_INT_TIM)
+ mask2 |= AR_IMR_S2_TIM;
+ if (ints & ATH9K_INT_DTIM)
+ mask2 |= AR_IMR_S2_DTIM;
+ if (ints & ATH9K_INT_DTIMSYNC)
+ mask2 |= AR_IMR_S2_DTIMSYNC;
+ if (ints & ATH9K_INT_CABEND)
+ mask2 |= (AR_IMR_S2_CABEND);
+ }
+
+ if (ints & (ATH9K_INT_GTT | ATH9K_INT_CST)) {
+ mask |= AR_IMR_BCNMISC;
+ if (ints & ATH9K_INT_GTT)
+ mask2 |= AR_IMR_S2_GTT;
+ if (ints & ATH9K_INT_CST)
+ mask2 |= AR_IMR_S2_CST;
+ }
+
+ DPRINTF(ah->ah_sc, ATH_DBG_INTERRUPT, "%s: new IMR 0x%x\n", __func__,
+ mask);
+ REG_WRITE(ah, AR_IMR, mask);
+ mask = REG_READ(ah, AR_IMR_S2) & ~(AR_IMR_S2_TIM |
+ AR_IMR_S2_DTIM |
+ AR_IMR_S2_DTIMSYNC |
+ AR_IMR_S2_CABEND |
+ AR_IMR_S2_CABTO |
+ AR_IMR_S2_TSFOOR |
+ AR_IMR_S2_GTT | AR_IMR_S2_CST);
+ REG_WRITE(ah, AR_IMR_S2, mask | mask2);
+ ahp->ah_maskReg = ints;
+
+ if (!(pCap->hw_caps & ATH9K_HW_CAP_AUTOSLEEP)) {
+ if (ints & ATH9K_INT_TIM_TIMER)
+ REG_SET_BIT(ah, AR_IMR_S5, AR_IMR_S5_TIM_TIMER);
+ else
+ REG_CLR_BIT(ah, AR_IMR_S5, AR_IMR_S5_TIM_TIMER);
+ }
+
+ if (ints & ATH9K_INT_GLOBAL) {
+ DPRINTF(ah->ah_sc, ATH_DBG_INTERRUPT, "%s: enable IER\n",
+ __func__);
+ REG_WRITE(ah, AR_IER, AR_IER_ENABLE);
+ if (!AR_SREV_9100(ah)) {
+ REG_WRITE(ah, AR_INTR_ASYNC_ENABLE,
+ AR_INTR_MAC_IRQ);
+ REG_WRITE(ah, AR_INTR_ASYNC_MASK, AR_INTR_MAC_IRQ);
+
+
+ REG_WRITE(ah, AR_INTR_SYNC_ENABLE,
+ AR_INTR_SYNC_DEFAULT);
+ REG_WRITE(ah, AR_INTR_SYNC_MASK,
+ AR_INTR_SYNC_DEFAULT);
+ }
+ DPRINTF(ah->ah_sc, ATH_DBG_INTERRUPT, "AR_IMR 0x%x IER 0x%x\n",
+ REG_READ(ah, AR_IMR), REG_READ(ah, AR_IER));
+ }
+
+ return omask;
+}
+
+void
+ath9k_hw_beaconinit(struct ath_hal *ah,
+ u32 next_beacon, u32 beacon_period)
+{
+ struct ath_hal_5416 *ahp = AH5416(ah);
+ int flags = 0;
+
+ ahp->ah_beaconInterval = beacon_period;
+
+ switch (ah->ah_opmode) {
+ case ATH9K_M_STA:
+ case ATH9K_M_MONITOR:
+ REG_WRITE(ah, AR_NEXT_TBTT_TIMER, TU_TO_USEC(next_beacon));
+ REG_WRITE(ah, AR_NEXT_DMA_BEACON_ALERT, 0xffff);
+ REG_WRITE(ah, AR_NEXT_SWBA, 0x7ffff);
+ flags |= AR_TBTT_TIMER_EN;
+ break;
+ case ATH9K_M_IBSS:
+ REG_SET_BIT(ah, AR_TXCFG,
+ AR_TXCFG_ADHOC_BEACON_ATIM_TX_POLICY);
+ REG_WRITE(ah, AR_NEXT_NDP_TIMER,
+ TU_TO_USEC(next_beacon +
+ (ahp->ah_atimWindow ? ahp->
+ ah_atimWindow : 1)));
+ flags |= AR_NDP_TIMER_EN;
+ case ATH9K_M_HOSTAP:
+ REG_WRITE(ah, AR_NEXT_TBTT_TIMER, TU_TO_USEC(next_beacon));
+ REG_WRITE(ah, AR_NEXT_DMA_BEACON_ALERT,
+ TU_TO_USEC(next_beacon -
+ ah->ah_config.
+ dma_beacon_response_time));
+ REG_WRITE(ah, AR_NEXT_SWBA,
+ TU_TO_USEC(next_beacon -
+ ah->ah_config.
+ sw_beacon_response_time));
+ flags |=
+ AR_TBTT_TIMER_EN | AR_DBA_TIMER_EN | AR_SWBA_TIMER_EN;
+ break;
+ }
+
+ REG_WRITE(ah, AR_BEACON_PERIOD, TU_TO_USEC(beacon_period));
+ REG_WRITE(ah, AR_DMA_BEACON_PERIOD, TU_TO_USEC(beacon_period));
+ REG_WRITE(ah, AR_SWBA_PERIOD, TU_TO_USEC(beacon_period));
+ REG_WRITE(ah, AR_NDP_PERIOD, TU_TO_USEC(beacon_period));
+
+ beacon_period &= ~ATH9K_BEACON_ENA;
+ if (beacon_period & ATH9K_BEACON_RESET_TSF) {
+ beacon_period &= ~ATH9K_BEACON_RESET_TSF;
+ ath9k_hw_reset_tsf(ah);
+ }
+
+ REG_SET_BIT(ah, AR_TIMER_MODE, flags);
+}
+
+void
+ath9k_hw_set_sta_beacon_timers(struct ath_hal *ah,
+ const struct ath9k_beacon_state *bs)
+{
+ u32 nextTbtt, beaconintval, dtimperiod, beacontimeout;
+ struct ath9k_hw_capabilities *pCap = &ah->ah_caps;
+
+ REG_WRITE(ah, AR_NEXT_TBTT_TIMER, TU_TO_USEC(bs->bs_nexttbtt));
+
+ REG_WRITE(ah, AR_BEACON_PERIOD,
+ TU_TO_USEC(bs->bs_intval & ATH9K_BEACON_PERIOD));
+ REG_WRITE(ah, AR_DMA_BEACON_PERIOD,
+ TU_TO_USEC(bs->bs_intval & ATH9K_BEACON_PERIOD));
+
+ REG_RMW_FIELD(ah, AR_RSSI_THR,
+ AR_RSSI_THR_BM_THR, bs->bs_bmissthreshold);
+
+ beaconintval = bs->bs_intval & ATH9K_BEACON_PERIOD;
+
+ if (bs->bs_sleepduration > beaconintval)
+ beaconintval = bs->bs_sleepduration;
+
+ dtimperiod = bs->bs_dtimperiod;
+ if (bs->bs_sleepduration > dtimperiod)
+ dtimperiod = bs->bs_sleepduration;
+
+ if (beaconintval == dtimperiod)
+ nextTbtt = bs->bs_nextdtim;
+ else
+ nextTbtt = bs->bs_nexttbtt;
+
+ DPRINTF(ah->ah_sc, ATH_DBG_BEACON, "%s: next DTIM %d\n", __func__,
+ bs->bs_nextdtim);
+ DPRINTF(ah->ah_sc, ATH_DBG_BEACON, "%s: next beacon %d\n", __func__,
+ nextTbtt);
+ DPRINTF(ah->ah_sc, ATH_DBG_BEACON, "%s: beacon period %d\n", __func__,
+ beaconintval);
+ DPRINTF(ah->ah_sc, ATH_DBG_BEACON, "%s: DTIM period %d\n", __func__,
+ dtimperiod);
+
+ REG_WRITE(ah, AR_NEXT_DTIM,
+ TU_TO_USEC(bs->bs_nextdtim - SLEEP_SLOP));
+ REG_WRITE(ah, AR_NEXT_TIM, TU_TO_USEC(nextTbtt - SLEEP_SLOP));
+
+ REG_WRITE(ah, AR_SLEEP1,
+ SM((CAB_TIMEOUT_VAL << 3), AR_SLEEP1_CAB_TIMEOUT)
+ | AR_SLEEP1_ASSUME_DTIM);
+
+ if (pCap->hw_caps & ATH9K_HW_CAP_AUTOSLEEP)
+ beacontimeout = (BEACON_TIMEOUT_VAL << 3);
+ else
+ beacontimeout = MIN_BEACON_TIMEOUT_VAL;
+
+ REG_WRITE(ah, AR_SLEEP2,
+ SM(beacontimeout, AR_SLEEP2_BEACON_TIMEOUT));
+
+ REG_WRITE(ah, AR_TIM_PERIOD, TU_TO_USEC(beaconintval));
+ REG_WRITE(ah, AR_DTIM_PERIOD, TU_TO_USEC(dtimperiod));
+
+ REG_SET_BIT(ah, AR_TIMER_MODE,
+ AR_TBTT_TIMER_EN | AR_TIM_TIMER_EN |
+ AR_DTIM_TIMER_EN);
+
+}
+
+bool ath9k_hw_keyisvalid(struct ath_hal *ah, u16 entry)
+{
+ if (entry < ah->ah_caps.keycache_size) {
+ u32 val = REG_READ(ah, AR_KEYTABLE_MAC1(entry));
+ if (val & AR_KEYTABLE_VALID)
+ return true;
+ }
+ return false;
+}
+
+bool ath9k_hw_keyreset(struct ath_hal *ah, u16 entry)
+{
+ u32 keyType;
+
+ if (entry >= ah->ah_caps.keycache_size) {
+ DPRINTF(ah->ah_sc, ATH_DBG_KEYCACHE,
+ "%s: entry %u out of range\n", __func__, entry);
+ return false;
+ }
+ keyType = REG_READ(ah, AR_KEYTABLE_TYPE(entry));
+
+ REG_WRITE(ah, AR_KEYTABLE_KEY0(entry), 0);
+ REG_WRITE(ah, AR_KEYTABLE_KEY1(entry), 0);
+ REG_WRITE(ah, AR_KEYTABLE_KEY2(entry), 0);
+ REG_WRITE(ah, AR_KEYTABLE_KEY3(entry), 0);
+ REG_WRITE(ah, AR_KEYTABLE_KEY4(entry), 0);
+ REG_WRITE(ah, AR_KEYTABLE_TYPE(entry), AR_KEYTABLE_TYPE_CLR);
+ REG_WRITE(ah, AR_KEYTABLE_MAC0(entry), 0);
+ REG_WRITE(ah, AR_KEYTABLE_MAC1(entry), 0);
+
+ if (keyType == AR_KEYTABLE_TYPE_TKIP && ATH9K_IS_MIC_ENABLED(ah)) {
+ u16 micentry = entry + 64;
+
+ REG_WRITE(ah, AR_KEYTABLE_KEY0(micentry), 0);
+ REG_WRITE(ah, AR_KEYTABLE_KEY1(micentry), 0);
+ REG_WRITE(ah, AR_KEYTABLE_KEY2(micentry), 0);
+ REG_WRITE(ah, AR_KEYTABLE_KEY3(micentry), 0);
+
+ }
+
+ if (ah->ah_curchan == NULL)
+ return true;
+
+ return true;
+}
+
+bool
+ath9k_hw_keysetmac(struct ath_hal *ah, u16 entry,
+ const u8 *mac)
+{
+ u32 macHi, macLo;
+
+ if (entry >= ah->ah_caps.keycache_size) {
+ DPRINTF(ah->ah_sc, ATH_DBG_KEYCACHE,
+ "%s: entry %u out of range\n", __func__, entry);
+ return false;
+ }
+
+ if (mac != NULL) {
+ macHi = (mac[5] << 8) | mac[4];
+ macLo = (mac[3] << 24) | (mac[2] << 16)
+ | (mac[1] << 8) | mac[0];
+ macLo >>= 1;
+ macLo |= (macHi & 1) << 31;
+ macHi >>= 1;
+ } else {
+ macLo = macHi = 0;
+ }
+ REG_WRITE(ah, AR_KEYTABLE_MAC0(entry), macLo);
+ REG_WRITE(ah, AR_KEYTABLE_MAC1(entry), macHi | AR_KEYTABLE_VALID);
+
+ return true;
+}
+
+bool
+ath9k_hw_set_keycache_entry(struct ath_hal *ah, u16 entry,
+ const struct ath9k_keyval *k,
+ const u8 *mac, int xorKey)
+{
+ const struct ath9k_hw_capabilities *pCap = &ah->ah_caps;
+ u32 key0, key1, key2, key3, key4;
+ u32 keyType;
+ u32 xorMask = xorKey ?
+ (ATH9K_KEY_XOR << 24 | ATH9K_KEY_XOR << 16 | ATH9K_KEY_XOR << 8
+ | ATH9K_KEY_XOR) : 0;
+ struct ath_hal_5416 *ahp = AH5416(ah);
+
+ if (entry >= pCap->keycache_size) {
+ DPRINTF(ah->ah_sc, ATH_DBG_KEYCACHE,
+ "%s: entry %u out of range\n", __func__, entry);
+ return false;
+ }
+ switch (k->kv_type) {
+ case ATH9K_CIPHER_AES_OCB:
+ keyType = AR_KEYTABLE_TYPE_AES;
+ break;
+ case ATH9K_CIPHER_AES_CCM:
+ if (!(pCap->hw_caps & ATH9K_HW_CAP_CIPHER_AESCCM)) {
+ DPRINTF(ah->ah_sc, ATH_DBG_KEYCACHE,
+ "%s: AES-CCM not supported by "
+ "mac rev 0x%x\n", __func__,
+ ah->ah_macRev);
+ return false;
+ }
+ keyType = AR_KEYTABLE_TYPE_CCM;
+ break;
+ case ATH9K_CIPHER_TKIP:
+ keyType = AR_KEYTABLE_TYPE_TKIP;
+ if (ATH9K_IS_MIC_ENABLED(ah)
+ && entry + 64 >= pCap->keycache_size) {
+ DPRINTF(ah->ah_sc, ATH_DBG_KEYCACHE,
+ "%s: entry %u inappropriate for TKIP\n",
+ __func__, entry);
+ return false;
+ }
+ break;
+ case ATH9K_CIPHER_WEP:
+ if (k->kv_len < 40 / NBBY) {
+ DPRINTF(ah->ah_sc, ATH_DBG_KEYCACHE,
+ "%s: WEP key length %u too small\n",
+ __func__, k->kv_len);
+ return false;
+ }
+ if (k->kv_len <= 40 / NBBY)
+ keyType = AR_KEYTABLE_TYPE_40;
+ else if (k->kv_len <= 104 / NBBY)
+ keyType = AR_KEYTABLE_TYPE_104;
+ else
+ keyType = AR_KEYTABLE_TYPE_128;
+ break;
+ case ATH9K_CIPHER_CLR:
+ keyType = AR_KEYTABLE_TYPE_CLR;
+ break;
+ default:
+ DPRINTF(ah->ah_sc, ATH_DBG_KEYCACHE,
+ "%s: cipher %u not supported\n", __func__,
+ k->kv_type);
+ return false;
+ }
+
+ key0 = get_unaligned_le32(k->kv_val + 0) ^ xorMask;
+ key1 = (get_unaligned_le16(k->kv_val + 4) ^ xorMask) & 0xffff;
+ key2 = get_unaligned_le32(k->kv_val + 6) ^ xorMask;
+ key3 = (get_unaligned_le16(k->kv_val + 10) ^ xorMask) & 0xffff;
+ key4 = get_unaligned_le32(k->kv_val + 12) ^ xorMask;
+ if (k->kv_len <= 104 / NBBY)
+ key4 &= 0xff;
+
+ if (keyType == AR_KEYTABLE_TYPE_TKIP && ATH9K_IS_MIC_ENABLED(ah)) {
+ u16 micentry = entry + 64;
+
+ REG_WRITE(ah, AR_KEYTABLE_KEY0(entry), ~key0);
+ REG_WRITE(ah, AR_KEYTABLE_KEY1(entry), ~key1);
+ REG_WRITE(ah, AR_KEYTABLE_KEY2(entry), key2);
+ REG_WRITE(ah, AR_KEYTABLE_KEY3(entry), key3);
+ REG_WRITE(ah, AR_KEYTABLE_KEY4(entry), key4);
+ REG_WRITE(ah, AR_KEYTABLE_TYPE(entry), keyType);
+ (void) ath9k_hw_keysetmac(ah, entry, mac);
+
+ if (ahp->ah_miscMode & AR_PCU_MIC_NEW_LOC_ENA) {
+ u32 mic0, mic1, mic2, mic3, mic4;
+
+ mic0 = get_unaligned_le32(k->kv_mic + 0);
+ mic2 = get_unaligned_le32(k->kv_mic + 4);
+ mic1 = get_unaligned_le16(k->kv_txmic + 2) & 0xffff;
+ mic3 = get_unaligned_le16(k->kv_txmic + 0) & 0xffff;
+ mic4 = get_unaligned_le32(k->kv_txmic + 4);
+ REG_WRITE(ah, AR_KEYTABLE_KEY0(micentry), mic0);
+ REG_WRITE(ah, AR_KEYTABLE_KEY1(micentry), mic1);
+ REG_WRITE(ah, AR_KEYTABLE_KEY2(micentry), mic2);
+ REG_WRITE(ah, AR_KEYTABLE_KEY3(micentry), mic3);
+ REG_WRITE(ah, AR_KEYTABLE_KEY4(micentry), mic4);
+ REG_WRITE(ah, AR_KEYTABLE_TYPE(micentry),
+ AR_KEYTABLE_TYPE_CLR);
+
+ } else {
+ u32 mic0, mic2;
+
+ mic0 = get_unaligned_le32(k->kv_mic + 0);
+ mic2 = get_unaligned_le32(k->kv_mic + 4);
+ REG_WRITE(ah, AR_KEYTABLE_KEY0(micentry), mic0);
+ REG_WRITE(ah, AR_KEYTABLE_KEY1(micentry), 0);
+ REG_WRITE(ah, AR_KEYTABLE_KEY2(micentry), mic2);
+ REG_WRITE(ah, AR_KEYTABLE_KEY3(micentry), 0);
+ REG_WRITE(ah, AR_KEYTABLE_KEY4(micentry), 0);
+ REG_WRITE(ah, AR_KEYTABLE_TYPE(micentry),
+ AR_KEYTABLE_TYPE_CLR);
+ }
+ REG_WRITE(ah, AR_KEYTABLE_MAC0(micentry), 0);
+ REG_WRITE(ah, AR_KEYTABLE_MAC1(micentry), 0);
+ REG_WRITE(ah, AR_KEYTABLE_KEY0(entry), key0);
+ REG_WRITE(ah, AR_KEYTABLE_KEY1(entry), key1);
+ } else {
+ REG_WRITE(ah, AR_KEYTABLE_KEY0(entry), key0);
+ REG_WRITE(ah, AR_KEYTABLE_KEY1(entry), key1);
+ REG_WRITE(ah, AR_KEYTABLE_KEY2(entry), key2);
+ REG_WRITE(ah, AR_KEYTABLE_KEY3(entry), key3);
+ REG_WRITE(ah, AR_KEYTABLE_KEY4(entry), key4);
+ REG_WRITE(ah, AR_KEYTABLE_TYPE(entry), keyType);
+
+ (void) ath9k_hw_keysetmac(ah, entry, mac);
+ }
+
+ if (ah->ah_curchan == NULL)
+ return true;
+
+ return true;
+}
+
+bool
+ath9k_hw_updatetxtriglevel(struct ath_hal *ah, bool bIncTrigLevel)
+{
+ struct ath_hal_5416 *ahp = AH5416(ah);
+ u32 txcfg, curLevel, newLevel;
+ enum ath9k_int omask;
+
+ if (ah->ah_txTrigLevel >= MAX_TX_FIFO_THRESHOLD)
+ return false;
+
+ omask = ath9k_hw_set_interrupts(ah,
+ ahp->ah_maskReg & ~ATH9K_INT_GLOBAL);
+
+ txcfg = REG_READ(ah, AR_TXCFG);
+ curLevel = MS(txcfg, AR_FTRIG);
+ newLevel = curLevel;
+ if (bIncTrigLevel) {
+ if (curLevel < MAX_TX_FIFO_THRESHOLD)
+ newLevel++;
+ } else if (curLevel > MIN_TX_FIFO_THRESHOLD)
+ newLevel--;
+ if (newLevel != curLevel)
+ REG_WRITE(ah, AR_TXCFG,
+ (txcfg & ~AR_FTRIG) | SM(newLevel, AR_FTRIG));
+
+ ath9k_hw_set_interrupts(ah, omask);
+
+ ah->ah_txTrigLevel = newLevel;
+
+ return newLevel != curLevel;
+}
+
+bool ath9k_hw_set_txq_props(struct ath_hal *ah, int q,
+ const struct ath9k_tx_queue_info *qinfo)
+{
+ u32 cw;
+ struct ath_hal_5416 *ahp = AH5416(ah);
+ struct ath9k_hw_capabilities *pCap = &ah->ah_caps;
+ struct ath9k_tx_queue_info *qi;
+
+ if (q >= pCap->total_queues) {
+ DPRINTF(ah->ah_sc, ATH_DBG_QUEUE, "%s: invalid queue num %u\n",
+ __func__, q);
+ return false;
+ }
+
+ qi = &ahp->ah_txq[q];
+ if (qi->tqi_type == ATH9K_TX_QUEUE_INACTIVE) {
+ DPRINTF(ah->ah_sc, ATH_DBG_QUEUE, "%s: inactive queue\n",
+ __func__);
+ return false;
+ }
+
+ DPRINTF(ah->ah_sc, ATH_DBG_QUEUE, "%s: queue %p\n", __func__, qi);
+
+ qi->tqi_ver = qinfo->tqi_ver;
+ qi->tqi_subtype = qinfo->tqi_subtype;
+ qi->tqi_qflags = qinfo->tqi_qflags;
+ qi->tqi_priority = qinfo->tqi_priority;
+ if (qinfo->tqi_aifs != ATH9K_TXQ_USEDEFAULT)
+ qi->tqi_aifs = min(qinfo->tqi_aifs, 255U);
+ else
+ qi->tqi_aifs = INIT_AIFS;
+ if (qinfo->tqi_cwmin != ATH9K_TXQ_USEDEFAULT) {
+ cw = min(qinfo->tqi_cwmin, 1024U);
+ qi->tqi_cwmin = 1;
+ while (qi->tqi_cwmin < cw)
+ qi->tqi_cwmin = (qi->tqi_cwmin << 1) | 1;
+ } else
+ qi->tqi_cwmin = qinfo->tqi_cwmin;
+ if (qinfo->tqi_cwmax != ATH9K_TXQ_USEDEFAULT) {
+ cw = min(qinfo->tqi_cwmax, 1024U);
+ qi->tqi_cwmax = 1;
+ while (qi->tqi_cwmax < cw)
+ qi->tqi_cwmax = (qi->tqi_cwmax << 1) | 1;
+ } else
+ qi->tqi_cwmax = INIT_CWMAX;
+
+ if (qinfo->tqi_shretry != 0)
+ qi->tqi_shretry = min((u32) qinfo->tqi_shretry, 15U);
+ else
+ qi->tqi_shretry = INIT_SH_RETRY;
+ if (qinfo->tqi_lgretry != 0)
+ qi->tqi_lgretry = min((u32) qinfo->tqi_lgretry, 15U);
+ else
+ qi->tqi_lgretry = INIT_LG_RETRY;
+ qi->tqi_cbrPeriod = qinfo->tqi_cbrPeriod;
+ qi->tqi_cbrOverflowLimit = qinfo->tqi_cbrOverflowLimit;
+ qi->tqi_burstTime = qinfo->tqi_burstTime;
+ qi->tqi_readyTime = qinfo->tqi_readyTime;
+
+ switch (qinfo->tqi_subtype) {
+ case ATH9K_WME_UPSD:
+ if (qi->tqi_type == ATH9K_TX_QUEUE_DATA)
+ qi->tqi_intFlags = ATH9K_TXQ_USE_LOCKOUT_BKOFF_DIS;
+ break;
+ default:
+ break;
+ }
+ return true;
+}
+
+bool ath9k_hw_get_txq_props(struct ath_hal *ah, int q,
+ struct ath9k_tx_queue_info *qinfo)
+{
+ struct ath_hal_5416 *ahp = AH5416(ah);
+ struct ath9k_hw_capabilities *pCap = &ah->ah_caps;
+ struct ath9k_tx_queue_info *qi;
+
+ if (q >= pCap->total_queues) {
+ DPRINTF(ah->ah_sc, ATH_DBG_QUEUE, "%s: invalid queue num %u\n",
+ __func__, q);
+ return false;
+ }
+
+ qi = &ahp->ah_txq[q];
+ if (qi->tqi_type == ATH9K_TX_QUEUE_INACTIVE) {
+ DPRINTF(ah->ah_sc, ATH_DBG_QUEUE, "%s: inactive queue\n",
+ __func__);
+ return false;
+ }
+
+ qinfo->tqi_qflags = qi->tqi_qflags;
+ qinfo->tqi_ver = qi->tqi_ver;
+ qinfo->tqi_subtype = qi->tqi_subtype;
+ qinfo->tqi_qflags = qi->tqi_qflags;
+ qinfo->tqi_priority = qi->tqi_priority;
+ qinfo->tqi_aifs = qi->tqi_aifs;
+ qinfo->tqi_cwmin = qi->tqi_cwmin;
+ qinfo->tqi_cwmax = qi->tqi_cwmax;
+ qinfo->tqi_shretry = qi->tqi_shretry;
+ qinfo->tqi_lgretry = qi->tqi_lgretry;
+ qinfo->tqi_cbrPeriod = qi->tqi_cbrPeriod;
+ qinfo->tqi_cbrOverflowLimit = qi->tqi_cbrOverflowLimit;
+ qinfo->tqi_burstTime = qi->tqi_burstTime;
+ qinfo->tqi_readyTime = qi->tqi_readyTime;
+
+ return true;
+}
+
+int
+ath9k_hw_setuptxqueue(struct ath_hal *ah, enum ath9k_tx_queue type,
+ const struct ath9k_tx_queue_info *qinfo)
+{
+ struct ath_hal_5416 *ahp = AH5416(ah);
+ struct ath9k_tx_queue_info *qi;
+ struct ath9k_hw_capabilities *pCap = &ah->ah_caps;
+ int q;
+
+ switch (type) {
+ case ATH9K_TX_QUEUE_BEACON:
+ q = pCap->total_queues - 1;
+ break;
+ case ATH9K_TX_QUEUE_CAB:
+ q = pCap->total_queues - 2;
+ break;
+ case ATH9K_TX_QUEUE_PSPOLL:
+ q = 1;
+ break;
+ case ATH9K_TX_QUEUE_UAPSD:
+ q = pCap->total_queues - 3;
+ break;
+ case ATH9K_TX_QUEUE_DATA:
+ for (q = 0; q < pCap->total_queues; q++)
+ if (ahp->ah_txq[q].tqi_type ==
+ ATH9K_TX_QUEUE_INACTIVE)
+ break;
+ if (q == pCap->total_queues) {
+ DPRINTF(ah->ah_sc, ATH_DBG_QUEUE,
+ "%s: no available tx queue\n", __func__);
+ return -1;
+ }
+ break;
+ default:
+ DPRINTF(ah->ah_sc, ATH_DBG_QUEUE, "%s: bad tx queue type %u\n",
+ __func__, type);
+ return -1;
+ }
+
+ DPRINTF(ah->ah_sc, ATH_DBG_QUEUE, "%s: queue %u\n", __func__, q);
+
+ qi = &ahp->ah_txq[q];
+ if (qi->tqi_type != ATH9K_TX_QUEUE_INACTIVE) {
+ DPRINTF(ah->ah_sc, ATH_DBG_QUEUE,
+ "%s: tx queue %u already active\n", __func__, q);
+ return -1;
+ }
+ memset(qi, 0, sizeof(struct ath9k_tx_queue_info));
+ qi->tqi_type = type;
+ if (qinfo == NULL) {
+ qi->tqi_qflags =
+ TXQ_FLAG_TXOKINT_ENABLE
+ | TXQ_FLAG_TXERRINT_ENABLE
+ | TXQ_FLAG_TXDESCINT_ENABLE | TXQ_FLAG_TXURNINT_ENABLE;
+ qi->tqi_aifs = INIT_AIFS;
+ qi->tqi_cwmin = ATH9K_TXQ_USEDEFAULT;
+ qi->tqi_cwmax = INIT_CWMAX;
+ qi->tqi_shretry = INIT_SH_RETRY;
+ qi->tqi_lgretry = INIT_LG_RETRY;
+ qi->tqi_physCompBuf = 0;
+ } else {
+ qi->tqi_physCompBuf = qinfo->tqi_physCompBuf;
+ (void) ath9k_hw_set_txq_props(ah, q, qinfo);
+ }
+
+ return q;
+}
+
+static void
+ath9k_hw_set_txq_interrupts(struct ath_hal *ah,
+ struct ath9k_tx_queue_info *qi)
+{
+ struct ath_hal_5416 *ahp = AH5416(ah);
+
+ DPRINTF(ah->ah_sc, ATH_DBG_INTERRUPT,
+ "%s: tx ok 0x%x err 0x%x desc 0x%x eol 0x%x urn 0x%x\n",
+ __func__, ahp->ah_txOkInterruptMask,
+ ahp->ah_txErrInterruptMask, ahp->ah_txDescInterruptMask,
+ ahp->ah_txEolInterruptMask, ahp->ah_txUrnInterruptMask);
+
+ REG_WRITE(ah, AR_IMR_S0,
+ SM(ahp->ah_txOkInterruptMask, AR_IMR_S0_QCU_TXOK)
+ | SM(ahp->ah_txDescInterruptMask, AR_IMR_S0_QCU_TXDESC));
+ REG_WRITE(ah, AR_IMR_S1,
+ SM(ahp->ah_txErrInterruptMask, AR_IMR_S1_QCU_TXERR)
+ | SM(ahp->ah_txEolInterruptMask, AR_IMR_S1_QCU_TXEOL));
+ REG_RMW_FIELD(ah, AR_IMR_S2,
+ AR_IMR_S2_QCU_TXURN, ahp->ah_txUrnInterruptMask);
+}
+
+bool ath9k_hw_releasetxqueue(struct ath_hal *ah, u32 q)
+{
+ struct ath_hal_5416 *ahp = AH5416(ah);
+ struct ath9k_hw_capabilities *pCap = &ah->ah_caps;
+ struct ath9k_tx_queue_info *qi;
+
+ if (q >= pCap->total_queues) {
+ DPRINTF(ah->ah_sc, ATH_DBG_QUEUE, "%s: invalid queue num %u\n",
+ __func__, q);
+ return false;
+ }
+ qi = &ahp->ah_txq[q];
+ if (qi->tqi_type == ATH9K_TX_QUEUE_INACTIVE) {
+ DPRINTF(ah->ah_sc, ATH_DBG_QUEUE, "%s: inactive queue %u\n",
+ __func__, q);
+ return false;
+ }
+
+ DPRINTF(ah->ah_sc, ATH_DBG_QUEUE, "%s: release queue %u\n",
+ __func__, q);
+
+ qi->tqi_type = ATH9K_TX_QUEUE_INACTIVE;
+ ahp->ah_txOkInterruptMask &= ~(1 << q);
+ ahp->ah_txErrInterruptMask &= ~(1 << q);
+ ahp->ah_txDescInterruptMask &= ~(1 << q);
+ ahp->ah_txEolInterruptMask &= ~(1 << q);
+ ahp->ah_txUrnInterruptMask &= ~(1 << q);
+ ath9k_hw_set_txq_interrupts(ah, qi);
+
+ return true;
+}
+
+bool ath9k_hw_resettxqueue(struct ath_hal *ah, u32 q)
+{
+ struct ath_hal_5416 *ahp = AH5416(ah);
+ struct ath9k_hw_capabilities *pCap = &ah->ah_caps;
+ struct ath9k_channel *chan = ah->ah_curchan;
+ struct ath9k_tx_queue_info *qi;
+ u32 cwMin, chanCwMin, value;
+
+ if (q >= pCap->total_queues) {
+ DPRINTF(ah->ah_sc, ATH_DBG_QUEUE, "%s: invalid queue num %u\n",
+ __func__, q);
+ return false;
+ }
+ qi = &ahp->ah_txq[q];
+ if (qi->tqi_type == ATH9K_TX_QUEUE_INACTIVE) {
+ DPRINTF(ah->ah_sc, ATH_DBG_QUEUE, "%s: inactive queue %u\n",
+ __func__, q);
+ return true;
+ }
+
+ DPRINTF(ah->ah_sc, ATH_DBG_QUEUE, "%s: reset queue %u\n", __func__, q);
+
+ if (qi->tqi_cwmin == ATH9K_TXQ_USEDEFAULT) {
+ if (chan && IS_CHAN_B(chan))
+ chanCwMin = INIT_CWMIN_11B;
+ else
+ chanCwMin = INIT_CWMIN;
+
+ for (cwMin = 1; cwMin < chanCwMin; cwMin = (cwMin << 1) | 1);
+ } else
+ cwMin = qi->tqi_cwmin;
+
+ REG_WRITE(ah, AR_DLCL_IFS(q), SM(cwMin, AR_D_LCL_IFS_CWMIN)
+ | SM(qi->tqi_cwmax, AR_D_LCL_IFS_CWMAX)
+ | SM(qi->tqi_aifs, AR_D_LCL_IFS_AIFS));
+
+ REG_WRITE(ah, AR_DRETRY_LIMIT(q),
+ SM(INIT_SSH_RETRY, AR_D_RETRY_LIMIT_STA_SH)
+ | SM(INIT_SLG_RETRY, AR_D_RETRY_LIMIT_STA_LG)
+ | SM(qi->tqi_shretry, AR_D_RETRY_LIMIT_FR_SH)
+ );
+
+ REG_WRITE(ah, AR_QMISC(q), AR_Q_MISC_DCU_EARLY_TERM_REQ);
+ REG_WRITE(ah, AR_DMISC(q),
+ AR_D_MISC_CW_BKOFF_EN | AR_D_MISC_FRAG_WAIT_EN | 0x2);
+
+ if (qi->tqi_cbrPeriod) {
+ REG_WRITE(ah, AR_QCBRCFG(q),
+ SM(qi->tqi_cbrPeriod, AR_Q_CBRCFG_INTERVAL)
+ | SM(qi->tqi_cbrOverflowLimit,
+ AR_Q_CBRCFG_OVF_THRESH));
+ REG_WRITE(ah, AR_QMISC(q),
+ REG_READ(ah,
+ AR_QMISC(q)) | AR_Q_MISC_FSP_CBR | (qi->
+ tqi_cbrOverflowLimit
+ ?
+ AR_Q_MISC_CBR_EXP_CNTR_LIMIT_EN
+ :
+ 0));
+ }
+ if (qi->tqi_readyTime && (qi->tqi_type != ATH9K_TX_QUEUE_CAB)) {
+ REG_WRITE(ah, AR_QRDYTIMECFG(q),
+ SM(qi->tqi_readyTime, AR_Q_RDYTIMECFG_DURATION) |
+ AR_Q_RDYTIMECFG_EN);
+ }
+
+ REG_WRITE(ah, AR_DCHNTIME(q),
+ SM(qi->tqi_burstTime, AR_D_CHNTIME_DUR) |
+ (qi->tqi_burstTime ? AR_D_CHNTIME_EN : 0));
+
+ if (qi->tqi_burstTime
+ && (qi->tqi_qflags & TXQ_FLAG_RDYTIME_EXP_POLICY_ENABLE)) {
+ REG_WRITE(ah, AR_QMISC(q),
+ REG_READ(ah,
+ AR_QMISC(q)) |
+ AR_Q_MISC_RDYTIME_EXP_POLICY);
+
+ }
+
+ if (qi->tqi_qflags & TXQ_FLAG_BACKOFF_DISABLE) {
+ REG_WRITE(ah, AR_DMISC(q),
+ REG_READ(ah, AR_DMISC(q)) |
+ AR_D_MISC_POST_FR_BKOFF_DIS);
+ }
+ if (qi->tqi_qflags & TXQ_FLAG_FRAG_BURST_BACKOFF_ENABLE) {
+ REG_WRITE(ah, AR_DMISC(q),
+ REG_READ(ah, AR_DMISC(q)) |
+ AR_D_MISC_FRAG_BKOFF_EN);
+ }
+ switch (qi->tqi_type) {
+ case ATH9K_TX_QUEUE_BEACON:
+ REG_WRITE(ah, AR_QMISC(q), REG_READ(ah, AR_QMISC(q))
+ | AR_Q_MISC_FSP_DBA_GATED
+ | AR_Q_MISC_BEACON_USE
+ | AR_Q_MISC_CBR_INCR_DIS1);
+
+ REG_WRITE(ah, AR_DMISC(q), REG_READ(ah, AR_DMISC(q))
+ | (AR_D_MISC_ARB_LOCKOUT_CNTRL_GLOBAL <<
+ AR_D_MISC_ARB_LOCKOUT_CNTRL_S)
+ | AR_D_MISC_BEACON_USE
+ | AR_D_MISC_POST_FR_BKOFF_DIS);
+ break;
+ case ATH9K_TX_QUEUE_CAB:
+ REG_WRITE(ah, AR_QMISC(q), REG_READ(ah, AR_QMISC(q))
+ | AR_Q_MISC_FSP_DBA_GATED
+ | AR_Q_MISC_CBR_INCR_DIS1
+ | AR_Q_MISC_CBR_INCR_DIS0);
+ value = (qi->tqi_readyTime
+ - (ah->ah_config.sw_beacon_response_time -
+ ah->ah_config.dma_beacon_response_time)
+ -
+ ah->ah_config.additional_swba_backoff) *
+ 1024;
+ REG_WRITE(ah, AR_QRDYTIMECFG(q),
+ value | AR_Q_RDYTIMECFG_EN);
+ REG_WRITE(ah, AR_DMISC(q), REG_READ(ah, AR_DMISC(q))
+ | (AR_D_MISC_ARB_LOCKOUT_CNTRL_GLOBAL <<
+ AR_D_MISC_ARB_LOCKOUT_CNTRL_S));
+ break;
+ case ATH9K_TX_QUEUE_PSPOLL:
+ REG_WRITE(ah, AR_QMISC(q),
+ REG_READ(ah,
+ AR_QMISC(q)) | AR_Q_MISC_CBR_INCR_DIS1);
+ break;
+ case ATH9K_TX_QUEUE_UAPSD:
+ REG_WRITE(ah, AR_DMISC(q), REG_READ(ah, AR_DMISC(q))
+ | AR_D_MISC_POST_FR_BKOFF_DIS);
+ break;
+ default:
+ break;
+ }
+
+ if (qi->tqi_intFlags & ATH9K_TXQ_USE_LOCKOUT_BKOFF_DIS) {
+ REG_WRITE(ah, AR_DMISC(q),
+ REG_READ(ah, AR_DMISC(q)) |
+ SM(AR_D_MISC_ARB_LOCKOUT_CNTRL_GLOBAL,
+ AR_D_MISC_ARB_LOCKOUT_CNTRL) |
+ AR_D_MISC_POST_FR_BKOFF_DIS);
+ }
+
+ if (qi->tqi_qflags & TXQ_FLAG_TXOKINT_ENABLE)
+ ahp->ah_txOkInterruptMask |= 1 << q;
+ else
+ ahp->ah_txOkInterruptMask &= ~(1 << q);
+ if (qi->tqi_qflags & TXQ_FLAG_TXERRINT_ENABLE)
+ ahp->ah_txErrInterruptMask |= 1 << q;
+ else
+ ahp->ah_txErrInterruptMask &= ~(1 << q);
+ if (qi->tqi_qflags & TXQ_FLAG_TXDESCINT_ENABLE)
+ ahp->ah_txDescInterruptMask |= 1 << q;
+ else
+ ahp->ah_txDescInterruptMask &= ~(1 << q);
+ if (qi->tqi_qflags & TXQ_FLAG_TXEOLINT_ENABLE)
+ ahp->ah_txEolInterruptMask |= 1 << q;
+ else
+ ahp->ah_txEolInterruptMask &= ~(1 << q);
+ if (qi->tqi_qflags & TXQ_FLAG_TXURNINT_ENABLE)
+ ahp->ah_txUrnInterruptMask |= 1 << q;
+ else
+ ahp->ah_txUrnInterruptMask &= ~(1 << q);
+ ath9k_hw_set_txq_interrupts(ah, qi);
+
+ return true;
+}
+
+void ath9k_hw_gettxintrtxqs(struct ath_hal *ah, u32 *txqs)
+{
+ struct ath_hal_5416 *ahp = AH5416(ah);
+ *txqs &= ahp->ah_intrTxqs;
+ ahp->ah_intrTxqs &= ~(*txqs);
+}
+
+bool
+ath9k_hw_filltxdesc(struct ath_hal *ah, struct ath_desc *ds,
+ u32 segLen, bool firstSeg,
+ bool lastSeg, const struct ath_desc *ds0)
+{
+ struct ar5416_desc *ads = AR5416DESC(ds);
+
+ if (firstSeg) {
+ ads->ds_ctl1 |= segLen | (lastSeg ? 0 : AR_TxMore);
+ } else if (lastSeg) {
+ ads->ds_ctl0 = 0;
+ ads->ds_ctl1 = segLen;
+ ads->ds_ctl2 = AR5416DESC_CONST(ds0)->ds_ctl2;
+ ads->ds_ctl3 = AR5416DESC_CONST(ds0)->ds_ctl3;
+ } else {
+ ads->ds_ctl0 = 0;
+ ads->ds_ctl1 = segLen | AR_TxMore;
+ ads->ds_ctl2 = 0;
+ ads->ds_ctl3 = 0;
+ }
+ ads->ds_txstatus0 = ads->ds_txstatus1 = 0;
+ ads->ds_txstatus2 = ads->ds_txstatus3 = 0;
+ ads->ds_txstatus4 = ads->ds_txstatus5 = 0;
+ ads->ds_txstatus6 = ads->ds_txstatus7 = 0;
+ ads->ds_txstatus8 = ads->ds_txstatus9 = 0;
+ return true;
+}
+
+void ath9k_hw_cleartxdesc(struct ath_hal *ah, struct ath_desc *ds)
+{
+ struct ar5416_desc *ads = AR5416DESC(ds);
+
+ ads->ds_txstatus0 = ads->ds_txstatus1 = 0;
+ ads->ds_txstatus2 = ads->ds_txstatus3 = 0;
+ ads->ds_txstatus4 = ads->ds_txstatus5 = 0;
+ ads->ds_txstatus6 = ads->ds_txstatus7 = 0;
+ ads->ds_txstatus8 = ads->ds_txstatus9 = 0;
+}
+
+int
+ath9k_hw_txprocdesc(struct ath_hal *ah, struct ath_desc *ds)
+{
+ struct ar5416_desc *ads = AR5416DESC(ds);
+
+ if ((ads->ds_txstatus9 & AR_TxDone) == 0)
+ return -EINPROGRESS;
+
+ ds->ds_txstat.ts_seqnum = MS(ads->ds_txstatus9, AR_SeqNum);
+ ds->ds_txstat.ts_tstamp = ads->AR_SendTimestamp;
+ ds->ds_txstat.ts_status = 0;
+ ds->ds_txstat.ts_flags = 0;
+
+ if (ads->ds_txstatus1 & AR_ExcessiveRetries)
+ ds->ds_txstat.ts_status |= ATH9K_TXERR_XRETRY;
+ if (ads->ds_txstatus1 & AR_Filtered)
+ ds->ds_txstat.ts_status |= ATH9K_TXERR_FILT;
+ if (ads->ds_txstatus1 & AR_FIFOUnderrun)
+ ds->ds_txstat.ts_status |= ATH9K_TXERR_FIFO;
+ if (ads->ds_txstatus9 & AR_TxOpExceeded)
+ ds->ds_txstat.ts_status |= ATH9K_TXERR_XTXOP;
+ if (ads->ds_txstatus1 & AR_TxTimerExpired)
+ ds->ds_txstat.ts_status |= ATH9K_TXERR_TIMER_EXPIRED;
+
+ if (ads->ds_txstatus1 & AR_DescCfgErr)
+ ds->ds_txstat.ts_flags |= ATH9K_TX_DESC_CFG_ERR;
+ if (ads->ds_txstatus1 & AR_TxDataUnderrun) {
+ ds->ds_txstat.ts_flags |= ATH9K_TX_DATA_UNDERRUN;
+ ath9k_hw_updatetxtriglevel(ah, true);
+ }
+ if (ads->ds_txstatus1 & AR_TxDelimUnderrun) {
+ ds->ds_txstat.ts_flags |= ATH9K_TX_DELIM_UNDERRUN;
+ ath9k_hw_updatetxtriglevel(ah, true);
+ }
+ if (ads->ds_txstatus0 & AR_TxBaStatus) {
+ ds->ds_txstat.ts_flags |= ATH9K_TX_BA;
+ ds->ds_txstat.ba_low = ads->AR_BaBitmapLow;
+ ds->ds_txstat.ba_high = ads->AR_BaBitmapHigh;
+ }
+
+ ds->ds_txstat.ts_rateindex = MS(ads->ds_txstatus9, AR_FinalTxIdx);
+ switch (ds->ds_txstat.ts_rateindex) {
+ case 0:
+ ds->ds_txstat.ts_ratecode = MS(ads->ds_ctl3, AR_XmitRate0);
+ break;
+ case 1:
+ ds->ds_txstat.ts_ratecode = MS(ads->ds_ctl3, AR_XmitRate1);
+ break;
+ case 2:
+ ds->ds_txstat.ts_ratecode = MS(ads->ds_ctl3, AR_XmitRate2);
+ break;
+ case 3:
+ ds->ds_txstat.ts_ratecode = MS(ads->ds_ctl3, AR_XmitRate3);
+ break;
+ }
+
+ ds->ds_txstat.ts_rssi = MS(ads->ds_txstatus5, AR_TxRSSICombined);
+ ds->ds_txstat.ts_rssi_ctl0 = MS(ads->ds_txstatus0, AR_TxRSSIAnt00);
+ ds->ds_txstat.ts_rssi_ctl1 = MS(ads->ds_txstatus0, AR_TxRSSIAnt01);
+ ds->ds_txstat.ts_rssi_ctl2 = MS(ads->ds_txstatus0, AR_TxRSSIAnt02);
+ ds->ds_txstat.ts_rssi_ext0 = MS(ads->ds_txstatus5, AR_TxRSSIAnt10);
+ ds->ds_txstat.ts_rssi_ext1 = MS(ads->ds_txstatus5, AR_TxRSSIAnt11);
+ ds->ds_txstat.ts_rssi_ext2 = MS(ads->ds_txstatus5, AR_TxRSSIAnt12);
+ ds->ds_txstat.evm0 = ads->AR_TxEVM0;
+ ds->ds_txstat.evm1 = ads->AR_TxEVM1;
+ ds->ds_txstat.evm2 = ads->AR_TxEVM2;
+ ds->ds_txstat.ts_shortretry = MS(ads->ds_txstatus1, AR_RTSFailCnt);
+ ds->ds_txstat.ts_longretry = MS(ads->ds_txstatus1, AR_DataFailCnt);
+ ds->ds_txstat.ts_virtcol = MS(ads->ds_txstatus1, AR_VirtRetryCnt);
+ ds->ds_txstat.ts_antenna = 1;
+
+ return 0;
+}
+
+void
+ath9k_hw_set11n_txdesc(struct ath_hal *ah, struct ath_desc *ds,
+ u32 pktLen, enum ath9k_pkt_type type, u32 txPower,
+ u32 keyIx, enum ath9k_key_type keyType, u32 flags)
+{
+ struct ar5416_desc *ads = AR5416DESC(ds);
+ struct ath_hal_5416 *ahp = AH5416(ah);
+
+ txPower += ahp->ah_txPowerIndexOffset;
+ if (txPower > 63)
+ txPower = 63;
+
+ ads->ds_ctl0 = (pktLen & AR_FrameLen)
+ | (flags & ATH9K_TXDESC_VMF ? AR_VirtMoreFrag : 0)
+ | SM(txPower, AR_XmitPower)
+ | (flags & ATH9K_TXDESC_VEOL ? AR_VEOL : 0)
+ | (flags & ATH9K_TXDESC_CLRDMASK ? AR_ClrDestMask : 0)
+ | (flags & ATH9K_TXDESC_INTREQ ? AR_TxIntrReq : 0)
+ | (keyIx != ATH9K_TXKEYIX_INVALID ? AR_DestIdxValid : 0);
+
+ ads->ds_ctl1 =
+ (keyIx != ATH9K_TXKEYIX_INVALID ? SM(keyIx, AR_DestIdx) : 0)
+ | SM(type, AR_FrameType)
+ | (flags & ATH9K_TXDESC_NOACK ? AR_NoAck : 0)
+ | (flags & ATH9K_TXDESC_EXT_ONLY ? AR_ExtOnly : 0)
+ | (flags & ATH9K_TXDESC_EXT_AND_CTL ? AR_ExtAndCtl : 0);
+
+ ads->ds_ctl6 = SM(keyType, AR_EncrType);
+
+ if (AR_SREV_9285(ah)) {
+
+ ads->ds_ctl8 = 0;
+ ads->ds_ctl9 = 0;
+ ads->ds_ctl10 = 0;
+ ads->ds_ctl11 = 0;
+ }
+}
+
+void
+ath9k_hw_set11n_ratescenario(struct ath_hal *ah, struct ath_desc *ds,
+ struct ath_desc *lastds,
+ u32 durUpdateEn, u32 rtsctsRate,
+ u32 rtsctsDuration,
+ struct ath9k_11n_rate_series series[],
+ u32 nseries, u32 flags)
+{
+ struct ar5416_desc *ads = AR5416DESC(ds);
+ struct ar5416_desc *last_ads = AR5416DESC(lastds);
+ u32 ds_ctl0;
+
+ (void) nseries;
+ (void) rtsctsDuration;
+
+ if (flags & (ATH9K_TXDESC_RTSENA | ATH9K_TXDESC_CTSENA)) {
+ ds_ctl0 = ads->ds_ctl0;
+
+ if (flags & ATH9K_TXDESC_RTSENA) {
+ ds_ctl0 &= ~AR_CTSEnable;
+ ds_ctl0 |= AR_RTSEnable;
+ } else {
+ ds_ctl0 &= ~AR_RTSEnable;
+ ds_ctl0 |= AR_CTSEnable;
+ }
+
+ ads->ds_ctl0 = ds_ctl0;
+ } else {
+ ads->ds_ctl0 =
+ (ads->ds_ctl0 & ~(AR_RTSEnable | AR_CTSEnable));
+ }
+
+ ads->ds_ctl2 = set11nTries(series, 0)
+ | set11nTries(series, 1)
+ | set11nTries(series, 2)
+ | set11nTries(series, 3)
+ | (durUpdateEn ? AR_DurUpdateEna : 0)
+ | SM(0, AR_BurstDur);
+
+ ads->ds_ctl3 = set11nRate(series, 0)
+ | set11nRate(series, 1)
+ | set11nRate(series, 2)
+ | set11nRate(series, 3);
+
+ ads->ds_ctl4 = set11nPktDurRTSCTS(series, 0)
+ | set11nPktDurRTSCTS(series, 1);
+
+ ads->ds_ctl5 = set11nPktDurRTSCTS(series, 2)
+ | set11nPktDurRTSCTS(series, 3);
+
+ ads->ds_ctl7 = set11nRateFlags(series, 0)
+ | set11nRateFlags(series, 1)
+ | set11nRateFlags(series, 2)
+ | set11nRateFlags(series, 3)
+ | SM(rtsctsRate, AR_RTSCTSRate);
+ last_ads->ds_ctl2 = ads->ds_ctl2;
+ last_ads->ds_ctl3 = ads->ds_ctl3;
+}
+
+void
+ath9k_hw_set11n_aggr_first(struct ath_hal *ah, struct ath_desc *ds,
+ u32 aggrLen)
+{
+ struct ar5416_desc *ads = AR5416DESC(ds);
+
+ ads->ds_ctl1 |= (AR_IsAggr | AR_MoreAggr);
+
+ ads->ds_ctl6 &= ~AR_AggrLen;
+ ads->ds_ctl6 |= SM(aggrLen, AR_AggrLen);
+}
+
+void
+ath9k_hw_set11n_aggr_middle(struct ath_hal *ah, struct ath_desc *ds,
+ u32 numDelims)
+{
+ struct ar5416_desc *ads = AR5416DESC(ds);
+ unsigned int ctl6;
+
+ ads->ds_ctl1 |= (AR_IsAggr | AR_MoreAggr);
+
+ ctl6 = ads->ds_ctl6;
+ ctl6 &= ~AR_PadDelim;
+ ctl6 |= SM(numDelims, AR_PadDelim);
+ ads->ds_ctl6 = ctl6;
+}
+
+void ath9k_hw_set11n_aggr_last(struct ath_hal *ah, struct ath_desc *ds)
+{
+ struct ar5416_desc *ads = AR5416DESC(ds);
+
+ ads->ds_ctl1 |= AR_IsAggr;
+ ads->ds_ctl1 &= ~AR_MoreAggr;
+ ads->ds_ctl6 &= ~AR_PadDelim;
+}
+
+void ath9k_hw_clr11n_aggr(struct ath_hal *ah, struct ath_desc *ds)
+{
+ struct ar5416_desc *ads = AR5416DESC(ds);
+
+ ads->ds_ctl1 &= (~AR_IsAggr & ~AR_MoreAggr);
+}
+
+void
+ath9k_hw_set11n_burstduration(struct ath_hal *ah, struct ath_desc *ds,
+ u32 burstDuration)
+{
+ struct ar5416_desc *ads = AR5416DESC(ds);
+
+ ads->ds_ctl2 &= ~AR_BurstDur;
+ ads->ds_ctl2 |= SM(burstDuration, AR_BurstDur);
+}
+
+void
+ath9k_hw_set11n_virtualmorefrag(struct ath_hal *ah, struct ath_desc *ds,
+ u32 vmf)
+{
+ struct ar5416_desc *ads = AR5416DESC(ds);
+
+ if (vmf)
+ ads->ds_ctl0 |= AR_VirtMoreFrag;
+ else
+ ads->ds_ctl0 &= ~AR_VirtMoreFrag;
+}
+
+void ath9k_hw_putrxbuf(struct ath_hal *ah, u32 rxdp)
+{
+ REG_WRITE(ah, AR_RXDP, rxdp);
+}
+
+void ath9k_hw_rxena(struct ath_hal *ah)
+{
+ REG_WRITE(ah, AR_CR, AR_CR_RXE);
+}
+
+bool ath9k_hw_setrxabort(struct ath_hal *ah, bool set)
+{
+ if (set) {
+
+ REG_SET_BIT(ah, AR_DIAG_SW,
+ (AR_DIAG_RX_DIS | AR_DIAG_RX_ABORT));
+
+ if (!ath9k_hw_wait
+ (ah, AR_OBS_BUS_1, AR_OBS_BUS_1_RX_STATE, 0)) {
+ u32 reg;
+
+ REG_CLR_BIT(ah, AR_DIAG_SW,
+ (AR_DIAG_RX_DIS |
+ AR_DIAG_RX_ABORT));
+
+ reg = REG_READ(ah, AR_OBS_BUS_1);
+ DPRINTF(ah->ah_sc, ATH_DBG_FATAL,
+ "%s: rx failed to go idle in 10 ms RXSM=0x%x\n",
+ __func__, reg);
+
+ return false;
+ }
+ } else {
+ REG_CLR_BIT(ah, AR_DIAG_SW,
+ (AR_DIAG_RX_DIS | AR_DIAG_RX_ABORT));
+ }
+
+ return true;
+}
+
+void
+ath9k_hw_setmcastfilter(struct ath_hal *ah, u32 filter0,
+ u32 filter1)
+{
+ REG_WRITE(ah, AR_MCAST_FIL0, filter0);
+ REG_WRITE(ah, AR_MCAST_FIL1, filter1);
+}
+
+bool
+ath9k_hw_setuprxdesc(struct ath_hal *ah, struct ath_desc *ds,
+ u32 size, u32 flags)
+{
+ struct ar5416_desc *ads = AR5416DESC(ds);
+ struct ath9k_hw_capabilities *pCap = &ah->ah_caps;
+
+ ads->ds_ctl1 = size & AR_BufLen;
+ if (flags & ATH9K_RXDESC_INTREQ)
+ ads->ds_ctl1 |= AR_RxIntrReq;
+
+ ads->ds_rxstatus8 &= ~AR_RxDone;
+ if (!(pCap->hw_caps & ATH9K_HW_CAP_AUTOSLEEP))
+ memset(&(ads->u), 0, sizeof(ads->u));
+ return true;
+}
+
+int
+ath9k_hw_rxprocdesc(struct ath_hal *ah, struct ath_desc *ds,
+ u32 pa, struct ath_desc *nds, u64 tsf)
+{
+ struct ar5416_desc ads;
+ struct ar5416_desc *adsp = AR5416DESC(ds);
+
+ if ((adsp->ds_rxstatus8 & AR_RxDone) == 0)
+ return -EINPROGRESS;
+
+ ads.u.rx = adsp->u.rx;
+
+ ds->ds_rxstat.rs_status = 0;
+ ds->ds_rxstat.rs_flags = 0;
+
+ ds->ds_rxstat.rs_datalen = ads.ds_rxstatus1 & AR_DataLen;
+ ds->ds_rxstat.rs_tstamp = ads.AR_RcvTimestamp;
+
+ ds->ds_rxstat.rs_rssi = MS(ads.ds_rxstatus4, AR_RxRSSICombined);
+ ds->ds_rxstat.rs_rssi_ctl0 = MS(ads.ds_rxstatus0, AR_RxRSSIAnt00);
+ ds->ds_rxstat.rs_rssi_ctl1 = MS(ads.ds_rxstatus0, AR_RxRSSIAnt01);
+ ds->ds_rxstat.rs_rssi_ctl2 = MS(ads.ds_rxstatus0, AR_RxRSSIAnt02);
+ ds->ds_rxstat.rs_rssi_ext0 = MS(ads.ds_rxstatus4, AR_RxRSSIAnt10);
+ ds->ds_rxstat.rs_rssi_ext1 = MS(ads.ds_rxstatus4, AR_RxRSSIAnt11);
+ ds->ds_rxstat.rs_rssi_ext2 = MS(ads.ds_rxstatus4, AR_RxRSSIAnt12);
+ if (ads.ds_rxstatus8 & AR_RxKeyIdxValid)
+ ds->ds_rxstat.rs_keyix = MS(ads.ds_rxstatus8, AR_KeyIdx);
+ else
+ ds->ds_rxstat.rs_keyix = ATH9K_RXKEYIX_INVALID;
+
+ ds->ds_rxstat.rs_rate = RXSTATUS_RATE(ah, (&ads));
+ ds->ds_rxstat.rs_more = (ads.ds_rxstatus1 & AR_RxMore) ? 1 : 0;
+
+ ds->ds_rxstat.rs_isaggr = (ads.ds_rxstatus8 & AR_RxAggr) ? 1 : 0;
+ ds->ds_rxstat.rs_moreaggr =
+ (ads.ds_rxstatus8 & AR_RxMoreAggr) ? 1 : 0;
+ ds->ds_rxstat.rs_antenna = MS(ads.ds_rxstatus3, AR_RxAntenna);
+ ds->ds_rxstat.rs_flags =
+ (ads.ds_rxstatus3 & AR_GI) ? ATH9K_RX_GI : 0;
+ ds->ds_rxstat.rs_flags |=
+ (ads.ds_rxstatus3 & AR_2040) ? ATH9K_RX_2040 : 0;
+
+ if (ads.ds_rxstatus8 & AR_PreDelimCRCErr)
+ ds->ds_rxstat.rs_flags |= ATH9K_RX_DELIM_CRC_PRE;
+ if (ads.ds_rxstatus8 & AR_PostDelimCRCErr)
+ ds->ds_rxstat.rs_flags |= ATH9K_RX_DELIM_CRC_POST;
+ if (ads.ds_rxstatus8 & AR_DecryptBusyErr)
+ ds->ds_rxstat.rs_flags |= ATH9K_RX_DECRYPT_BUSY;
+
+ if ((ads.ds_rxstatus8 & AR_RxFrameOK) == 0) {
+
+ if (ads.ds_rxstatus8 & AR_CRCErr)
+ ds->ds_rxstat.rs_status |= ATH9K_RXERR_CRC;
+ else if (ads.ds_rxstatus8 & AR_PHYErr) {
+ u32 phyerr;
+
+ ds->ds_rxstat.rs_status |= ATH9K_RXERR_PHY;
+ phyerr = MS(ads.ds_rxstatus8, AR_PHYErrCode);
+ ds->ds_rxstat.rs_phyerr = phyerr;
+ } else if (ads.ds_rxstatus8 & AR_DecryptCRCErr)
+ ds->ds_rxstat.rs_status |= ATH9K_RXERR_DECRYPT;
+ else if (ads.ds_rxstatus8 & AR_MichaelErr)
+ ds->ds_rxstat.rs_status |= ATH9K_RXERR_MIC;
+ }
+
+ return 0;
+}
+
+static void ath9k_hw_setup_rate_table(struct ath_hal *ah,
+ struct ath9k_rate_table *rt)
+{
+ int i;
+
+ if (rt->rateCodeToIndex[0] != 0)
+ return;
+ for (i = 0; i < 256; i++)
+ rt->rateCodeToIndex[i] = (u8) -1;
+ for (i = 0; i < rt->rateCount; i++) {
+ u8 code = rt->info[i].rateCode;
+ u8 cix = rt->info[i].controlRate;
+
+ rt->rateCodeToIndex[code] = i;
+ rt->rateCodeToIndex[code | rt->info[i].shortPreamble] = i;
+
+ rt->info[i].lpAckDuration =
+ ath9k_hw_computetxtime(ah, rt,
+ WLAN_CTRL_FRAME_SIZE,
+ cix,
+ false);
+ rt->info[i].spAckDuration =
+ ath9k_hw_computetxtime(ah, rt,
+ WLAN_CTRL_FRAME_SIZE,
+ cix,
+ true);
+ }
+}
+
+const struct ath9k_rate_table *ath9k_hw_getratetable(struct ath_hal *ah,
+ u32 mode)
+{
+ struct ath9k_rate_table *rt;
+ switch (mode) {
+ case ATH9K_MODE_11A:
+ rt = &ar5416_11a_table;
+ break;
+ case ATH9K_MODE_11B:
+ rt = &ar5416_11b_table;
+ break;
+ case ATH9K_MODE_11G:
+ rt = &ar5416_11g_table;
+ break;
+ case ATH9K_MODE_11NG_HT20:
+ case ATH9K_MODE_11NG_HT40PLUS:
+ case ATH9K_MODE_11NG_HT40MINUS:
+ rt = &ar5416_11ng_table;
+ break;
+ case ATH9K_MODE_11NA_HT20:
+ case ATH9K_MODE_11NA_HT40PLUS:
+ case ATH9K_MODE_11NA_HT40MINUS:
+ rt = &ar5416_11na_table;
+ break;
+ default:
+ DPRINTF(ah->ah_sc, ATH_DBG_CHANNEL, "%s: invalid mode 0x%x\n",
+ __func__, mode);
+ return NULL;
+ }
+ ath9k_hw_setup_rate_table(ah, rt);
+ return rt;
+}
+
+static const char *ath9k_hw_devname(u16 devid)
+{
+ switch (devid) {
+ case AR5416_DEVID_PCI:
+ case AR5416_DEVID_PCIE:
+ return "Atheros 5416";
+ case AR9160_DEVID_PCI:
+ return "Atheros 9160";
+ case AR9280_DEVID_PCI:
+ case AR9280_DEVID_PCIE:
+ return "Atheros 9280";
+ }
+ return NULL;
+}
+
+const char *ath9k_hw_probe(u16 vendorid, u16 devid)
+{
+ return vendorid == ATHEROS_VENDOR_ID ?
+ ath9k_hw_devname(devid) : NULL;
+}
+
+struct ath_hal *ath9k_hw_attach(u16 devid,
+ struct ath_softc *sc,
+ void __iomem *mem,
+ int *error)
+{
+ struct ath_hal *ah = NULL;
+
+ switch (devid) {
+ case AR5416_DEVID_PCI:
+ case AR5416_DEVID_PCIE:
+ case AR9160_DEVID_PCI:
+ case AR9280_DEVID_PCI:
+ case AR9280_DEVID_PCIE:
+ ah = ath9k_hw_do_attach(devid, sc, mem, error);
+ break;
+ default:
+ DPRINTF(ah->ah_sc, ATH_DBG_ANY,
+ "devid=0x%x not supported.\n", devid);
+ ah = NULL;
+ *error = -ENXIO;
+ break;
+ }
+ if (ah != NULL) {
+ ah->ah_devid = ah->ah_devid;
+ ah->ah_subvendorid = ah->ah_subvendorid;
+ ah->ah_macVersion = ah->ah_macVersion;
+ ah->ah_macRev = ah->ah_macRev;
+ ah->ah_phyRev = ah->ah_phyRev;
+ ah->ah_analog5GhzRev = ah->ah_analog5GhzRev;
+ ah->ah_analog2GhzRev = ah->ah_analog2GhzRev;
+ }
+ return ah;
+}
+
+u16
+ath9k_hw_computetxtime(struct ath_hal *ah,
+ const struct ath9k_rate_table *rates,
+ u32 frameLen, u16 rateix,
+ bool shortPreamble)
+{
+ u32 bitsPerSymbol, numBits, numSymbols, phyTime, txTime;
+ u32 kbps;
+
+ kbps = rates->info[rateix].rateKbps;
+
+ if (kbps == 0)
+ return 0;
+ switch (rates->info[rateix].phy) {
+
+ case PHY_CCK:
+ phyTime = CCK_PREAMBLE_BITS + CCK_PLCP_BITS;
+ if (shortPreamble && rates->info[rateix].shortPreamble)
+ phyTime >>= 1;
+ numBits = frameLen << 3;
+ txTime = CCK_SIFS_TIME + phyTime
+ + ((numBits * 1000) / kbps);
+ break;
+ case PHY_OFDM:
+ if (ah->ah_curchan && IS_CHAN_QUARTER_RATE(ah->ah_curchan)) {
+ bitsPerSymbol =
+ (kbps * OFDM_SYMBOL_TIME_QUARTER) / 1000;
+
+ numBits = OFDM_PLCP_BITS + (frameLen << 3);
+ numSymbols = DIV_ROUND_UP(numBits, bitsPerSymbol);
+ txTime = OFDM_SIFS_TIME_QUARTER
+ + OFDM_PREAMBLE_TIME_QUARTER
+ + (numSymbols * OFDM_SYMBOL_TIME_QUARTER);
+ } else if (ah->ah_curchan &&
+ IS_CHAN_HALF_RATE(ah->ah_curchan)) {
+ bitsPerSymbol =
+ (kbps * OFDM_SYMBOL_TIME_HALF) / 1000;
+
+ numBits = OFDM_PLCP_BITS + (frameLen << 3);
+ numSymbols = DIV_ROUND_UP(numBits, bitsPerSymbol);
+ txTime = OFDM_SIFS_TIME_HALF +
+ OFDM_PREAMBLE_TIME_HALF
+ + (numSymbols * OFDM_SYMBOL_TIME_HALF);
+ } else {
+ bitsPerSymbol = (kbps * OFDM_SYMBOL_TIME) / 1000;
+
+ numBits = OFDM_PLCP_BITS + (frameLen << 3);
+ numSymbols = DIV_ROUND_UP(numBits, bitsPerSymbol);
+ txTime = OFDM_SIFS_TIME + OFDM_PREAMBLE_TIME
+ + (numSymbols * OFDM_SYMBOL_TIME);
+ }
+ break;
+
+ default:
+ DPRINTF(ah->ah_sc, ATH_DBG_PHY_IO,
+ "%s: unknown phy %u (rate ix %u)\n", __func__,
+ rates->info[rateix].phy, rateix);
+ txTime = 0;
+ break;
+ }
+ return txTime;
+}
+
+u32 ath9k_hw_mhz2ieee(struct ath_hal *ah, u32 freq, u32 flags)
+{
+ if (flags & CHANNEL_2GHZ) {
+ if (freq == 2484)
+ return 14;
+ if (freq < 2484)
+ return (freq - 2407) / 5;
+ else
+ return 15 + ((freq - 2512) / 20);
+ } else if (flags & CHANNEL_5GHZ) {
+ if (ath9k_regd_is_public_safety_sku(ah) &&
+ IS_CHAN_IN_PUBLIC_SAFETY_BAND(freq)) {
+ return ((freq * 10) +
+ (((freq % 5) == 2) ? 5 : 0) - 49400) / 5;
+ } else if ((flags & CHANNEL_A) && (freq <= 5000)) {
+ return (freq - 4000) / 5;
+ } else {
+ return (freq - 5000) / 5;
+ }
+ } else {
+ if (freq == 2484)
+ return 14;
+ if (freq < 2484)
+ return (freq - 2407) / 5;
+ if (freq < 5000) {
+ if (ath9k_regd_is_public_safety_sku(ah)
+ && IS_CHAN_IN_PUBLIC_SAFETY_BAND(freq)) {
+ return ((freq * 10) +
+ (((freq % 5) ==
+ 2) ? 5 : 0) - 49400) / 5;
+ } else if (freq > 4900) {
+ return (freq - 4000) / 5;
+ } else {
+ return 15 + ((freq - 2512) / 20);
+ }
+ }
+ return (freq - 5000) / 5;
+ }
+}
+
+int16_t
+ath9k_hw_getchan_noise(struct ath_hal *ah, struct ath9k_channel *chan)
+{
+ struct ath9k_channel *ichan;
+
+ ichan = ath9k_regd_check_channel(ah, chan);
+ if (ichan == NULL) {
+ DPRINTF(ah->ah_sc, ATH_DBG_NF_CAL,
+ "%s: invalid channel %u/0x%x; no mapping\n",
+ __func__, chan->channel, chan->channelFlags);
+ return 0;
+ }
+ if (ichan->rawNoiseFloor == 0) {
+ enum wireless_mode mode = ath9k_hw_chan2wmode(ah, chan);
+ return NOISE_FLOOR[mode];
+ } else
+ return ichan->rawNoiseFloor;
+}
+
+bool ath9k_hw_set_tsfadjust(struct ath_hal *ah, u32 setting)
+{
+ struct ath_hal_5416 *ahp = AH5416(ah);
+
+ if (setting)
+ ahp->ah_miscMode |= AR_PCU_TX_ADD_TSF;
+ else
+ ahp->ah_miscMode &= ~AR_PCU_TX_ADD_TSF;
+ return true;
+}
+
+bool ath9k_hw_phycounters(struct ath_hal *ah)
+{
+ struct ath_hal_5416 *ahp = AH5416(ah);
+
+ return ahp->ah_hasHwPhyCounters ? true : false;
+}
+
+u32 ath9k_hw_gettxbuf(struct ath_hal *ah, u32 q)
+{
+ return REG_READ(ah, AR_QTXDP(q));
+}
+
+bool ath9k_hw_puttxbuf(struct ath_hal *ah, u32 q,
+ u32 txdp)
+{
+ REG_WRITE(ah, AR_QTXDP(q), txdp);
+
+ return true;
+}
+
+bool ath9k_hw_txstart(struct ath_hal *ah, u32 q)
+{
+ DPRINTF(ah->ah_sc, ATH_DBG_QUEUE, "%s: queue %u\n", __func__, q);
+
+ REG_WRITE(ah, AR_Q_TXE, 1 << q);
+
+ return true;
+}
+
+u32 ath9k_hw_numtxpending(struct ath_hal *ah, u32 q)
+{
+ u32 npend;
+
+ npend = REG_READ(ah, AR_QSTS(q)) & AR_Q_STS_PEND_FR_CNT;
+ if (npend == 0) {
+
+ if (REG_READ(ah, AR_Q_TXE) & (1 << q))
+ npend = 1;
+ }
+ return npend;
+}
+
+bool ath9k_hw_stoptxdma(struct ath_hal *ah, u32 q)
+{
+ u32 wait;
+
+ REG_WRITE(ah, AR_Q_TXD, 1 << q);
+
+ for (wait = 1000; wait != 0; wait--) {
+ if (ath9k_hw_numtxpending(ah, q) == 0)
+ break;
+ udelay(100);
+ }
+
+ if (ath9k_hw_numtxpending(ah, q)) {
+ u32 tsfLow, j;
+
+ DPRINTF(ah->ah_sc, ATH_DBG_QUEUE,
+ "%s: Num of pending TX Frames %d on Q %d\n",
+ __func__, ath9k_hw_numtxpending(ah, q), q);
+
+ for (j = 0; j < 2; j++) {
+ tsfLow = REG_READ(ah, AR_TSF_L32);
+ REG_WRITE(ah, AR_QUIET2,
+ SM(10, AR_QUIET2_QUIET_DUR));
+ REG_WRITE(ah, AR_QUIET_PERIOD, 100);
+ REG_WRITE(ah, AR_NEXT_QUIET_TIMER, tsfLow >> 10);
+ REG_SET_BIT(ah, AR_TIMER_MODE,
+ AR_QUIET_TIMER_EN);
+
+ if ((REG_READ(ah, AR_TSF_L32) >> 10) ==
+ (tsfLow >> 10)) {
+ break;
+ }
+ DPRINTF(ah->ah_sc, ATH_DBG_QUEUE,
+ "%s: TSF have moved while trying to set "
+ "quiet time TSF: 0x%08x\n",
+ __func__, tsfLow);
+ }
+
+ REG_SET_BIT(ah, AR_DIAG_SW, AR_DIAG_FORCE_CH_IDLE_HIGH);
+
+ udelay(200);
+ REG_CLR_BIT(ah, AR_TIMER_MODE, AR_QUIET_TIMER_EN);
+
+ wait = 1000;
+
+ while (ath9k_hw_numtxpending(ah, q)) {
+ if ((--wait) == 0) {
+ DPRINTF(ah->ah_sc, ATH_DBG_XMIT,
+ "%s: Failed to stop Tx DMA in 100 "
+ "msec after killing last frame\n",
+ __func__);
+ break;
+ }
+ udelay(100);
+ }
+
+ REG_CLR_BIT(ah, AR_DIAG_SW, AR_DIAG_FORCE_CH_IDLE_HIGH);
+ }
+
+ REG_WRITE(ah, AR_Q_TXD, 0);
+ return wait != 0;
+}
diff --git a/drivers/net/wireless/ath9k/hw.h b/drivers/net/wireless/ath9k/hw.h
new file mode 100644
index 00000000000..ae680f21ba7
--- /dev/null
+++ b/drivers/net/wireless/ath9k/hw.h
@@ -0,0 +1,969 @@
+/*
+ * Copyright (c) 2008 Atheros Communications Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef HW_H
+#define HW_H
+
+#include <linux/if_ether.h>
+#include <linux/delay.h>
+
+struct ar5416_desc {
+ u32 ds_link;
+ u32 ds_data;
+ u32 ds_ctl0;
+ u32 ds_ctl1;
+ union {
+ struct {
+ u32 ctl2;
+ u32 ctl3;
+ u32 ctl4;
+ u32 ctl5;
+ u32 ctl6;
+ u32 ctl7;
+ u32 ctl8;
+ u32 ctl9;
+ u32 ctl10;
+ u32 ctl11;
+ u32 status0;
+ u32 status1;
+ u32 status2;
+ u32 status3;
+ u32 status4;
+ u32 status5;
+ u32 status6;
+ u32 status7;
+ u32 status8;
+ u32 status9;
+ } tx;
+ struct {
+ u32 status0;
+ u32 status1;
+ u32 status2;
+ u32 status3;
+ u32 status4;
+ u32 status5;
+ u32 status6;
+ u32 status7;
+ u32 status8;
+ } rx;
+ } u;
+} __packed;
+
+#define AR5416DESC(_ds) ((struct ar5416_desc *)(_ds))
+#define AR5416DESC_CONST(_ds) ((const struct ar5416_desc *)(_ds))
+
+#define ds_ctl2 u.tx.ctl2
+#define ds_ctl3 u.tx.ctl3
+#define ds_ctl4 u.tx.ctl4
+#define ds_ctl5 u.tx.ctl5
+#define ds_ctl6 u.tx.ctl6
+#define ds_ctl7 u.tx.ctl7
+#define ds_ctl8 u.tx.ctl8
+#define ds_ctl9 u.tx.ctl9
+#define ds_ctl10 u.tx.ctl10
+#define ds_ctl11 u.tx.ctl11
+
+#define ds_txstatus0 u.tx.status0
+#define ds_txstatus1 u.tx.status1
+#define ds_txstatus2 u.tx.status2
+#define ds_txstatus3 u.tx.status3
+#define ds_txstatus4 u.tx.status4
+#define ds_txstatus5 u.tx.status5
+#define ds_txstatus6 u.tx.status6
+#define ds_txstatus7 u.tx.status7
+#define ds_txstatus8 u.tx.status8
+#define ds_txstatus9 u.tx.status9
+
+#define ds_rxstatus0 u.rx.status0
+#define ds_rxstatus1 u.rx.status1
+#define ds_rxstatus2 u.rx.status2
+#define ds_rxstatus3 u.rx.status3
+#define ds_rxstatus4 u.rx.status4
+#define ds_rxstatus5 u.rx.status5
+#define ds_rxstatus6 u.rx.status6
+#define ds_rxstatus7 u.rx.status7
+#define ds_rxstatus8 u.rx.status8
+
+#define AR_FrameLen 0x00000fff
+#define AR_VirtMoreFrag 0x00001000
+#define AR_TxCtlRsvd00 0x0000e000
+#define AR_XmitPower 0x003f0000
+#define AR_XmitPower_S 16
+#define AR_RTSEnable 0x00400000
+#define AR_VEOL 0x00800000
+#define AR_ClrDestMask 0x01000000
+#define AR_TxCtlRsvd01 0x1e000000
+#define AR_TxIntrReq 0x20000000
+#define AR_DestIdxValid 0x40000000
+#define AR_CTSEnable 0x80000000
+
+#define AR_BufLen 0x00000fff
+#define AR_TxMore 0x00001000
+#define AR_DestIdx 0x000fe000
+#define AR_DestIdx_S 13
+#define AR_FrameType 0x00f00000
+#define AR_FrameType_S 20
+#define AR_NoAck 0x01000000
+#define AR_InsertTS 0x02000000
+#define AR_CorruptFCS 0x04000000
+#define AR_ExtOnly 0x08000000
+#define AR_ExtAndCtl 0x10000000
+#define AR_MoreAggr 0x20000000
+#define AR_IsAggr 0x40000000
+
+#define AR_BurstDur 0x00007fff
+#define AR_BurstDur_S 0
+#define AR_DurUpdateEna 0x00008000
+#define AR_XmitDataTries0 0x000f0000
+#define AR_XmitDataTries0_S 16
+#define AR_XmitDataTries1 0x00f00000
+#define AR_XmitDataTries1_S 20
+#define AR_XmitDataTries2 0x0f000000
+#define AR_XmitDataTries2_S 24
+#define AR_XmitDataTries3 0xf0000000
+#define AR_XmitDataTries3_S 28
+
+#define AR_XmitRate0 0x000000ff
+#define AR_XmitRate0_S 0
+#define AR_XmitRate1 0x0000ff00
+#define AR_XmitRate1_S 8
+#define AR_XmitRate2 0x00ff0000
+#define AR_XmitRate2_S 16
+#define AR_XmitRate3 0xff000000
+#define AR_XmitRate3_S 24
+
+#define AR_PacketDur0 0x00007fff
+#define AR_PacketDur0_S 0
+#define AR_RTSCTSQual0 0x00008000
+#define AR_PacketDur1 0x7fff0000
+#define AR_PacketDur1_S 16
+#define AR_RTSCTSQual1 0x80000000
+
+#define AR_PacketDur2 0x00007fff
+#define AR_PacketDur2_S 0
+#define AR_RTSCTSQual2 0x00008000
+#define AR_PacketDur3 0x7fff0000
+#define AR_PacketDur3_S 16
+#define AR_RTSCTSQual3 0x80000000
+
+#define AR_AggrLen 0x0000ffff
+#define AR_AggrLen_S 0
+#define AR_TxCtlRsvd60 0x00030000
+#define AR_PadDelim 0x03fc0000
+#define AR_PadDelim_S 18
+#define AR_EncrType 0x0c000000
+#define AR_EncrType_S 26
+#define AR_TxCtlRsvd61 0xf0000000
+
+#define AR_2040_0 0x00000001
+#define AR_GI0 0x00000002
+#define AR_ChainSel0 0x0000001c
+#define AR_ChainSel0_S 2
+#define AR_2040_1 0x00000020
+#define AR_GI1 0x00000040
+#define AR_ChainSel1 0x00000380
+#define AR_ChainSel1_S 7
+#define AR_2040_2 0x00000400
+#define AR_GI2 0x00000800
+#define AR_ChainSel2 0x00007000
+#define AR_ChainSel2_S 12
+#define AR_2040_3 0x00008000
+#define AR_GI3 0x00010000
+#define AR_ChainSel3 0x000e0000
+#define AR_ChainSel3_S 17
+#define AR_RTSCTSRate 0x0ff00000
+#define AR_RTSCTSRate_S 20
+#define AR_TxCtlRsvd70 0xf0000000
+
+#define AR_TxRSSIAnt00 0x000000ff
+#define AR_TxRSSIAnt00_S 0
+#define AR_TxRSSIAnt01 0x0000ff00
+#define AR_TxRSSIAnt01_S 8
+#define AR_TxRSSIAnt02 0x00ff0000
+#define AR_TxRSSIAnt02_S 16
+#define AR_TxStatusRsvd00 0x3f000000
+#define AR_TxBaStatus 0x40000000
+#define AR_TxStatusRsvd01 0x80000000
+
+#define AR_FrmXmitOK 0x00000001
+#define AR_ExcessiveRetries 0x00000002
+#define AR_FIFOUnderrun 0x00000004
+#define AR_Filtered 0x00000008
+#define AR_RTSFailCnt 0x000000f0
+#define AR_RTSFailCnt_S 4
+#define AR_DataFailCnt 0x00000f00
+#define AR_DataFailCnt_S 8
+#define AR_VirtRetryCnt 0x0000f000
+#define AR_VirtRetryCnt_S 12
+#define AR_TxDelimUnderrun 0x00010000
+#define AR_TxDataUnderrun 0x00020000
+#define AR_DescCfgErr 0x00040000
+#define AR_TxTimerExpired 0x00080000
+#define AR_TxStatusRsvd10 0xfff00000
+
+#define AR_SendTimestamp ds_txstatus2
+#define AR_BaBitmapLow ds_txstatus3
+#define AR_BaBitmapHigh ds_txstatus4
+
+#define AR_TxRSSIAnt10 0x000000ff
+#define AR_TxRSSIAnt10_S 0
+#define AR_TxRSSIAnt11 0x0000ff00
+#define AR_TxRSSIAnt11_S 8
+#define AR_TxRSSIAnt12 0x00ff0000
+#define AR_TxRSSIAnt12_S 16
+#define AR_TxRSSICombined 0xff000000
+#define AR_TxRSSICombined_S 24
+
+#define AR_TxEVM0 ds_txstatus5
+#define AR_TxEVM1 ds_txstatus6
+#define AR_TxEVM2 ds_txstatus7
+
+#define AR_TxDone 0x00000001
+#define AR_SeqNum 0x00001ffe
+#define AR_SeqNum_S 1
+#define AR_TxStatusRsvd80 0x0001e000
+#define AR_TxOpExceeded 0x00020000
+#define AR_TxStatusRsvd81 0x001c0000
+#define AR_FinalTxIdx 0x00600000
+#define AR_FinalTxIdx_S 21
+#define AR_TxStatusRsvd82 0x01800000
+#define AR_PowerMgmt 0x02000000
+#define AR_TxStatusRsvd83 0xfc000000
+
+#define AR_RxCTLRsvd00 0xffffffff
+
+#define AR_BufLen 0x00000fff
+#define AR_RxCtlRsvd00 0x00001000
+#define AR_RxIntrReq 0x00002000
+#define AR_RxCtlRsvd01 0xffffc000
+
+#define AR_RxRSSIAnt00 0x000000ff
+#define AR_RxRSSIAnt00_S 0
+#define AR_RxRSSIAnt01 0x0000ff00
+#define AR_RxRSSIAnt01_S 8
+#define AR_RxRSSIAnt02 0x00ff0000
+#define AR_RxRSSIAnt02_S 16
+#define AR_RxRate 0xff000000
+#define AR_RxRate_S 24
+#define AR_RxStatusRsvd00 0xff000000
+
+#define AR_DataLen 0x00000fff
+#define AR_RxMore 0x00001000
+#define AR_NumDelim 0x003fc000
+#define AR_NumDelim_S 14
+#define AR_RxStatusRsvd10 0xff800000
+
+#define AR_RcvTimestamp ds_rxstatus2
+
+#define AR_GI 0x00000001
+#define AR_2040 0x00000002
+#define AR_Parallel40 0x00000004
+#define AR_Parallel40_S 2
+#define AR_RxStatusRsvd30 0x000000f8
+#define AR_RxAntenna 0xffffff00
+#define AR_RxAntenna_S 8
+
+#define AR_RxRSSIAnt10 0x000000ff
+#define AR_RxRSSIAnt10_S 0
+#define AR_RxRSSIAnt11 0x0000ff00
+#define AR_RxRSSIAnt11_S 8
+#define AR_RxRSSIAnt12 0x00ff0000
+#define AR_RxRSSIAnt12_S 16
+#define AR_RxRSSICombined 0xff000000
+#define AR_RxRSSICombined_S 24
+
+#define AR_RxEVM0 ds_rxstatus4
+#define AR_RxEVM1 ds_rxstatus5
+#define AR_RxEVM2 ds_rxstatus6
+
+#define AR_RxDone 0x00000001
+#define AR_RxFrameOK 0x00000002
+#define AR_CRCErr 0x00000004
+#define AR_DecryptCRCErr 0x00000008
+#define AR_PHYErr 0x00000010
+#define AR_MichaelErr 0x00000020
+#define AR_PreDelimCRCErr 0x00000040
+#define AR_RxStatusRsvd70 0x00000080
+#define AR_RxKeyIdxValid 0x00000100
+#define AR_KeyIdx 0x0000fe00
+#define AR_KeyIdx_S 9
+#define AR_PHYErrCode 0x0000ff00
+#define AR_PHYErrCode_S 8
+#define AR_RxMoreAggr 0x00010000
+#define AR_RxAggr 0x00020000
+#define AR_PostDelimCRCErr 0x00040000
+#define AR_RxStatusRsvd71 0x3ff80000
+#define AR_DecryptBusyErr 0x40000000
+#define AR_KeyMiss 0x80000000
+
+#define AR5416_MAGIC 0x19641014
+
+#define RXSTATUS_RATE(ah, ads) (AR_SREV_5416_V20_OR_LATER(ah) ? \
+ MS(ads->ds_rxstatus0, AR_RxRate) : \
+ (ads->ds_rxstatus3 >> 2) & 0xFF)
+#define RXSTATUS_DUPLICATE(ah, ads) (AR_SREV_5416_V20_OR_LATER(ah) ? \
+ MS(ads->ds_rxstatus3, AR_Parallel40) : \
+ (ads->ds_rxstatus3 >> 10) & 0x1)
+
+#define set11nTries(_series, _index) \
+ (SM((_series)[_index].Tries, AR_XmitDataTries##_index))
+
+#define set11nRate(_series, _index) \
+ (SM((_series)[_index].Rate, AR_XmitRate##_index))
+
+#define set11nPktDurRTSCTS(_series, _index) \
+ (SM((_series)[_index].PktDuration, AR_PacketDur##_index) | \
+ ((_series)[_index].RateFlags & ATH9K_RATESERIES_RTS_CTS ? \
+ AR_RTSCTSQual##_index : 0))
+
+#define set11nRateFlags(_series, _index) \
+ (((_series)[_index].RateFlags & ATH9K_RATESERIES_2040 ? \
+ AR_2040_##_index : 0) \
+ |((_series)[_index].RateFlags & ATH9K_RATESERIES_HALFGI ? \
+ AR_GI##_index : 0) \
+ |SM((_series)[_index].ChSel, AR_ChainSel##_index))
+
+#define AR_SREV_9100(ah) ((ah->ah_macVersion) == AR_SREV_VERSION_9100)
+
+#define INIT_CONFIG_STATUS 0x00000000
+#define INIT_RSSI_THR 0x00000700
+#define INIT_BCON_CNTRL_REG 0x00000000
+
+#define MIN_TX_FIFO_THRESHOLD 0x1
+#define MAX_TX_FIFO_THRESHOLD ((4096 / 64) - 1)
+#define INIT_TX_FIFO_THRESHOLD MIN_TX_FIFO_THRESHOLD
+
+#define NUM_CORNER_FIX_BITS_2133 7
+#define CCK_OFDM_GAIN_DELTA 15
+
+struct ar5416AniState {
+ struct ath9k_channel c;
+ u8 noiseImmunityLevel;
+ u8 spurImmunityLevel;
+ u8 firstepLevel;
+ u8 ofdmWeakSigDetectOff;
+ u8 cckWeakSigThreshold;
+ u32 listenTime;
+ u32 ofdmTrigHigh;
+ u32 ofdmTrigLow;
+ int32_t cckTrigHigh;
+ int32_t cckTrigLow;
+ int32_t rssiThrLow;
+ int32_t rssiThrHigh;
+ u32 noiseFloor;
+ u32 txFrameCount;
+ u32 rxFrameCount;
+ u32 cycleCount;
+ u32 ofdmPhyErrCount;
+ u32 cckPhyErrCount;
+ u32 ofdmPhyErrBase;
+ u32 cckPhyErrBase;
+ int16_t pktRssi[2];
+ int16_t ofdmErrRssi[2];
+ int16_t cckErrRssi[2];
+};
+
+#define HAL_PROCESS_ANI 0x00000001
+#define HAL_RADAR_EN 0x80000000
+#define HAL_AR_EN 0x40000000
+
+#define DO_ANI(ah) \
+ ((AH5416(ah)->ah_procPhyErr & HAL_PROCESS_ANI))
+
+struct ar5416Stats {
+ u32 ast_ani_niup;
+ u32 ast_ani_nidown;
+ u32 ast_ani_spurup;
+ u32 ast_ani_spurdown;
+ u32 ast_ani_ofdmon;
+ u32 ast_ani_ofdmoff;
+ u32 ast_ani_cckhigh;
+ u32 ast_ani_ccklow;
+ u32 ast_ani_stepup;
+ u32 ast_ani_stepdown;
+ u32 ast_ani_ofdmerrs;
+ u32 ast_ani_cckerrs;
+ u32 ast_ani_reset;
+ u32 ast_ani_lzero;
+ u32 ast_ani_lneg;
+ struct ath9k_mib_stats ast_mibstats;
+ struct ath9k_node_stats ast_nodestats;
+};
+
+#define AR5416_OPFLAGS_11A 0x01
+#define AR5416_OPFLAGS_11G 0x02
+#define AR5416_OPFLAGS_N_5G_HT40 0x04
+#define AR5416_OPFLAGS_N_2G_HT40 0x08
+#define AR5416_OPFLAGS_N_5G_HT20 0x10
+#define AR5416_OPFLAGS_N_2G_HT20 0x20
+
+#define EEP_RFSILENT_ENABLED 0x0001
+#define EEP_RFSILENT_ENABLED_S 0
+#define EEP_RFSILENT_POLARITY 0x0002
+#define EEP_RFSILENT_POLARITY_S 1
+#define EEP_RFSILENT_GPIO_SEL 0x001c
+#define EEP_RFSILENT_GPIO_SEL_S 2
+
+#define AR5416_EEP_NO_BACK_VER 0x1
+#define AR5416_EEP_VER 0xE
+#define AR5416_EEP_VER_MINOR_MASK 0x0FFF
+#define AR5416_EEP_MINOR_VER_2 0x2
+#define AR5416_EEP_MINOR_VER_3 0x3
+#define AR5416_EEP_MINOR_VER_7 0x7
+#define AR5416_EEP_MINOR_VER_9 0x9
+
+#define AR5416_EEP_START_LOC 256
+#define AR5416_NUM_5G_CAL_PIERS 8
+#define AR5416_NUM_2G_CAL_PIERS 4
+#define AR5416_NUM_5G_20_TARGET_POWERS 8
+#define AR5416_NUM_5G_40_TARGET_POWERS 8
+#define AR5416_NUM_2G_CCK_TARGET_POWERS 3
+#define AR5416_NUM_2G_20_TARGET_POWERS 4
+#define AR5416_NUM_2G_40_TARGET_POWERS 4
+#define AR5416_NUM_CTLS 24
+#define AR5416_NUM_BAND_EDGES 8
+#define AR5416_NUM_PD_GAINS 4
+#define AR5416_PD_GAINS_IN_MASK 4
+#define AR5416_PD_GAIN_ICEPTS 5
+#define AR5416_EEPROM_MODAL_SPURS 5
+#define AR5416_MAX_RATE_POWER 63
+#define AR5416_NUM_PDADC_VALUES 128
+#define AR5416_NUM_RATES 16
+#define AR5416_BCHAN_UNUSED 0xFF
+#define AR5416_MAX_PWR_RANGE_IN_HALF_DB 64
+#define AR5416_EEPMISC_BIG_ENDIAN 0x01
+#define AR5416_MAX_CHAINS 3
+#define AR5416_ANT_16S 25
+
+#define AR5416_NUM_ANT_CHAIN_FIELDS 7
+#define AR5416_NUM_ANT_COMMON_FIELDS 4
+#define AR5416_SIZE_ANT_CHAIN_FIELD 3
+#define AR5416_SIZE_ANT_COMMON_FIELD 4
+#define AR5416_ANT_CHAIN_MASK 0x7
+#define AR5416_ANT_COMMON_MASK 0xf
+#define AR5416_CHAIN_0_IDX 0
+#define AR5416_CHAIN_1_IDX 1
+#define AR5416_CHAIN_2_IDX 2
+
+#define AR5416_PWR_TABLE_OFFSET -5
+#define AR5416_LEGACY_CHAINMASK 1
+
+enum eeprom_param {
+ EEP_NFTHRESH_5,
+ EEP_NFTHRESH_2,
+ EEP_MAC_MSW,
+ EEP_MAC_MID,
+ EEP_MAC_LSW,
+ EEP_REG_0,
+ EEP_REG_1,
+ EEP_OP_CAP,
+ EEP_OP_MODE,
+ EEP_RF_SILENT,
+ EEP_OB_5,
+ EEP_DB_5,
+ EEP_OB_2,
+ EEP_DB_2,
+ EEP_MINOR_REV,
+ EEP_TX_MASK,
+ EEP_RX_MASK,
+};
+
+enum ar5416_rates {
+ rate6mb, rate9mb, rate12mb, rate18mb,
+ rate24mb, rate36mb, rate48mb, rate54mb,
+ rate1l, rate2l, rate2s, rate5_5l,
+ rate5_5s, rate11l, rate11s, rateXr,
+ rateHt20_0, rateHt20_1, rateHt20_2, rateHt20_3,
+ rateHt20_4, rateHt20_5, rateHt20_6, rateHt20_7,
+ rateHt40_0, rateHt40_1, rateHt40_2, rateHt40_3,
+ rateHt40_4, rateHt40_5, rateHt40_6, rateHt40_7,
+ rateDupCck, rateDupOfdm, rateExtCck, rateExtOfdm,
+ Ar5416RateSize
+};
+
+struct base_eep_header {
+ u16 length;
+ u16 checksum;
+ u16 version;
+ u8 opCapFlags;
+ u8 eepMisc;
+ u16 regDmn[2];
+ u8 macAddr[6];
+ u8 rxMask;
+ u8 txMask;
+ u16 rfSilent;
+ u16 blueToothOptions;
+ u16 deviceCap;
+ u32 binBuildNumber;
+ u8 deviceType;
+ u8 pwdclkind;
+ u8 futureBase[32];
+} __packed;
+
+struct spur_chan {
+ u16 spurChan;
+ u8 spurRangeLow;
+ u8 spurRangeHigh;
+} __packed;
+
+struct modal_eep_header {
+ u32 antCtrlChain[AR5416_MAX_CHAINS];
+ u32 antCtrlCommon;
+ u8 antennaGainCh[AR5416_MAX_CHAINS];
+ u8 switchSettling;
+ u8 txRxAttenCh[AR5416_MAX_CHAINS];
+ u8 rxTxMarginCh[AR5416_MAX_CHAINS];
+ u8 adcDesiredSize;
+ u8 pgaDesiredSize;
+ u8 xlnaGainCh[AR5416_MAX_CHAINS];
+ u8 txEndToXpaOff;
+ u8 txEndToRxOn;
+ u8 txFrameToXpaOn;
+ u8 thresh62;
+ u8 noiseFloorThreshCh[AR5416_MAX_CHAINS];
+ u8 xpdGain;
+ u8 xpd;
+ u8 iqCalICh[AR5416_MAX_CHAINS];
+ u8 iqCalQCh[AR5416_MAX_CHAINS];
+ u8 pdGainOverlap;
+ u8 ob;
+ u8 db;
+ u8 xpaBiasLvl;
+ u8 pwrDecreaseFor2Chain;
+ u8 pwrDecreaseFor3Chain;
+ u8 txFrameToDataStart;
+ u8 txFrameToPaOn;
+ u8 ht40PowerIncForPdadc;
+ u8 bswAtten[AR5416_MAX_CHAINS];
+ u8 bswMargin[AR5416_MAX_CHAINS];
+ u8 swSettleHt40;
+ u8 xatten2Db[AR5416_MAX_CHAINS];
+ u8 xatten2Margin[AR5416_MAX_CHAINS];
+ u8 ob_ch1;
+ u8 db_ch1;
+ u8 useAnt1:1,
+ force_xpaon:1,
+ local_bias:1,
+ femBandSelectUsed:1, xlnabufin:1, xlnaisel:2, xlnabufmode:1;
+ u8 futureModalar9280;
+ u16 xpaBiasLvlFreq[3];
+ u8 futureModal[6];
+
+ struct spur_chan spurChans[AR5416_EEPROM_MODAL_SPURS];
+} __packed;
+
+struct cal_data_per_freq {
+ u8 pwrPdg[AR5416_NUM_PD_GAINS][AR5416_PD_GAIN_ICEPTS];
+ u8 vpdPdg[AR5416_NUM_PD_GAINS][AR5416_PD_GAIN_ICEPTS];
+} __packed;
+
+struct cal_target_power_leg {
+ u8 bChannel;
+ u8 tPow2x[4];
+} __packed;
+
+struct cal_target_power_ht {
+ u8 bChannel;
+ u8 tPow2x[8];
+} __packed;
+
+#ifdef __BIG_ENDIAN_BITFIELD
+struct cal_ctl_edges {
+ u8 bChannel;
+ u8 flag:2, tPower:6;
+} __packed;
+#else
+struct cal_ctl_edges {
+ u8 bChannel;
+ u8 tPower:6, flag:2;
+} __packed;
+#endif
+
+struct cal_ctl_data {
+ struct cal_ctl_edges
+ ctlEdges[AR5416_MAX_CHAINS][AR5416_NUM_BAND_EDGES];
+} __packed;
+
+struct ar5416_eeprom {
+ struct base_eep_header baseEepHeader;
+ u8 custData[64];
+ struct modal_eep_header modalHeader[2];
+ u8 calFreqPier5G[AR5416_NUM_5G_CAL_PIERS];
+ u8 calFreqPier2G[AR5416_NUM_2G_CAL_PIERS];
+ struct cal_data_per_freq
+ calPierData5G[AR5416_MAX_CHAINS][AR5416_NUM_5G_CAL_PIERS];
+ struct cal_data_per_freq
+ calPierData2G[AR5416_MAX_CHAINS][AR5416_NUM_2G_CAL_PIERS];
+ struct cal_target_power_leg
+ calTargetPower5G[AR5416_NUM_5G_20_TARGET_POWERS];
+ struct cal_target_power_ht
+ calTargetPower5GHT20[AR5416_NUM_5G_20_TARGET_POWERS];
+ struct cal_target_power_ht
+ calTargetPower5GHT40[AR5416_NUM_5G_40_TARGET_POWERS];
+ struct cal_target_power_leg
+ calTargetPowerCck[AR5416_NUM_2G_CCK_TARGET_POWERS];
+ struct cal_target_power_leg
+ calTargetPower2G[AR5416_NUM_2G_20_TARGET_POWERS];
+ struct cal_target_power_ht
+ calTargetPower2GHT20[AR5416_NUM_2G_20_TARGET_POWERS];
+ struct cal_target_power_ht
+ calTargetPower2GHT40[AR5416_NUM_2G_40_TARGET_POWERS];
+ u8 ctlIndex[AR5416_NUM_CTLS];
+ struct cal_ctl_data ctlData[AR5416_NUM_CTLS];
+ u8 padding;
+} __packed;
+
+struct ar5416IniArray {
+ u32 *ia_array;
+ u32 ia_rows;
+ u32 ia_columns;
+};
+
+#define INIT_INI_ARRAY(iniarray, array, rows, columns) do { \
+ (iniarray)->ia_array = (u32 *)(array); \
+ (iniarray)->ia_rows = (rows); \
+ (iniarray)->ia_columns = (columns); \
+ } while (0)
+
+#define INI_RA(iniarray, row, column) \
+ (((iniarray)->ia_array)[(row) * ((iniarray)->ia_columns) + (column)])
+
+#define INIT_CAL(_perCal) do { \
+ (_perCal)->calState = CAL_WAITING; \
+ (_perCal)->calNext = NULL; \
+ } while (0)
+
+#define INSERT_CAL(_ahp, _perCal) \
+ do { \
+ if ((_ahp)->ah_cal_list_last == NULL) { \
+ (_ahp)->ah_cal_list = \
+ (_ahp)->ah_cal_list_last = (_perCal); \
+ ((_ahp)->ah_cal_list_last)->calNext = (_perCal); \
+ } else { \
+ ((_ahp)->ah_cal_list_last)->calNext = (_perCal); \
+ (_ahp)->ah_cal_list_last = (_perCal); \
+ (_perCal)->calNext = (_ahp)->ah_cal_list; \
+ } \
+ } while (0)
+
+enum hal_cal_types {
+ ADC_DC_INIT_CAL = 0x1,
+ ADC_GAIN_CAL = 0x2,
+ ADC_DC_CAL = 0x4,
+ IQ_MISMATCH_CAL = 0x8
+};
+
+enum hal_cal_state {
+ CAL_INACTIVE,
+ CAL_WAITING,
+ CAL_RUNNING,
+ CAL_DONE
+};
+
+#define MIN_CAL_SAMPLES 1
+#define MAX_CAL_SAMPLES 64
+#define INIT_LOG_COUNT 5
+#define PER_MIN_LOG_COUNT 2
+#define PER_MAX_LOG_COUNT 10
+
+struct hal_percal_data {
+ enum hal_cal_types calType;
+ u32 calNumSamples;
+ u32 calCountMax;
+ void (*calCollect) (struct ath_hal *);
+ void (*calPostProc) (struct ath_hal *, u8);
+};
+
+struct hal_cal_list {
+ const struct hal_percal_data *calData;
+ enum hal_cal_state calState;
+ struct hal_cal_list *calNext;
+};
+
+struct ath_hal_5416 {
+ struct ath_hal ah;
+ struct ar5416_eeprom ah_eeprom;
+ u8 ah_macaddr[ETH_ALEN];
+ u8 ah_bssid[ETH_ALEN];
+ u8 ah_bssidmask[ETH_ALEN];
+ u16 ah_assocId;
+ int16_t ah_curchanRadIndex;
+ u32 ah_maskReg;
+ struct ar5416Stats ah_stats;
+ u32 ah_txDescMask;
+ u32 ah_txOkInterruptMask;
+ u32 ah_txErrInterruptMask;
+ u32 ah_txDescInterruptMask;
+ u32 ah_txEolInterruptMask;
+ u32 ah_txUrnInterruptMask;
+ struct ath9k_tx_queue_info ah_txq[ATH9K_NUM_TX_QUEUES];
+ enum ath9k_power_mode ah_powerMode;
+ bool ah_chipFullSleep;
+ u32 ah_atimWindow;
+ enum ath9k_ant_setting ah_diversityControl;
+ u16 ah_antennaSwitchSwap;
+ enum hal_cal_types ah_suppCals;
+ struct hal_cal_list ah_iqCalData;
+ struct hal_cal_list ah_adcGainCalData;
+ struct hal_cal_list ah_adcDcCalInitData;
+ struct hal_cal_list ah_adcDcCalData;
+ struct hal_cal_list *ah_cal_list;
+ struct hal_cal_list *ah_cal_list_last;
+ struct hal_cal_list *ah_cal_list_curr;
+#define ah_totalPowerMeasI ah_Meas0.unsign
+#define ah_totalPowerMeasQ ah_Meas1.unsign
+#define ah_totalIqCorrMeas ah_Meas2.sign
+#define ah_totalAdcIOddPhase ah_Meas0.unsign
+#define ah_totalAdcIEvenPhase ah_Meas1.unsign
+#define ah_totalAdcQOddPhase ah_Meas2.unsign
+#define ah_totalAdcQEvenPhase ah_Meas3.unsign
+#define ah_totalAdcDcOffsetIOddPhase ah_Meas0.sign
+#define ah_totalAdcDcOffsetIEvenPhase ah_Meas1.sign
+#define ah_totalAdcDcOffsetQOddPhase ah_Meas2.sign
+#define ah_totalAdcDcOffsetQEvenPhase ah_Meas3.sign
+ union {
+ u32 unsign[AR5416_MAX_CHAINS];
+ int32_t sign[AR5416_MAX_CHAINS];
+ } ah_Meas0;
+ union {
+ u32 unsign[AR5416_MAX_CHAINS];
+ int32_t sign[AR5416_MAX_CHAINS];
+ } ah_Meas1;
+ union {
+ u32 unsign[AR5416_MAX_CHAINS];
+ int32_t sign[AR5416_MAX_CHAINS];
+ } ah_Meas2;
+ union {
+ u32 unsign[AR5416_MAX_CHAINS];
+ int32_t sign[AR5416_MAX_CHAINS];
+ } ah_Meas3;
+ u16 ah_CalSamples;
+ u32 ah_tx6PowerInHalfDbm;
+ u32 ah_staId1Defaults;
+ u32 ah_miscMode;
+ bool ah_tpcEnabled;
+ u32 ah_beaconInterval;
+ enum {
+ AUTO_32KHZ,
+ USE_32KHZ,
+ DONT_USE_32KHZ,
+ } ah_enable32kHzClock;
+ u32 *ah_analogBank0Data;
+ u32 *ah_analogBank1Data;
+ u32 *ah_analogBank2Data;
+ u32 *ah_analogBank3Data;
+ u32 *ah_analogBank6Data;
+ u32 *ah_analogBank6TPCData;
+ u32 *ah_analogBank7Data;
+ u32 *ah_addac5416_21;
+ u32 *ah_bank6Temp;
+ u32 ah_ofdmTxPower;
+ int16_t ah_txPowerIndexOffset;
+ u32 ah_slottime;
+ u32 ah_acktimeout;
+ u32 ah_ctstimeout;
+ u32 ah_globaltxtimeout;
+ u8 ah_gBeaconRate;
+ u32 ah_gpioSelect;
+ u32 ah_polarity;
+ u32 ah_gpioBit;
+ bool ah_eepEnabled;
+ u32 ah_procPhyErr;
+ bool ah_hasHwPhyCounters;
+ u32 ah_aniPeriod;
+ struct ar5416AniState *ah_curani;
+ struct ar5416AniState ah_ani[255];
+ int ah_totalSizeDesired[5];
+ int ah_coarseHigh[5];
+ int ah_coarseLow[5];
+ int ah_firpwr[5];
+ u16 ah_ratesArray[16];
+ u32 ah_intrTxqs;
+ bool ah_intrMitigation;
+ u32 ah_cycleCount;
+ u32 ah_ctlBusy;
+ u32 ah_extBusy;
+ enum ath9k_ht_extprotspacing ah_extprotspacing;
+ u8 ah_txchainmask;
+ u8 ah_rxchainmask;
+ int ah_hwp;
+ void __iomem *ah_cal_mem;
+ enum ath9k_ani_cmd ah_ani_function;
+ struct ar5416IniArray ah_iniModes;
+ struct ar5416IniArray ah_iniCommon;
+ struct ar5416IniArray ah_iniBank0;
+ struct ar5416IniArray ah_iniBB_RfGain;
+ struct ar5416IniArray ah_iniBank1;
+ struct ar5416IniArray ah_iniBank2;
+ struct ar5416IniArray ah_iniBank3;
+ struct ar5416IniArray ah_iniBank6;
+ struct ar5416IniArray ah_iniBank6TPC;
+ struct ar5416IniArray ah_iniBank7;
+ struct ar5416IniArray ah_iniAddac;
+ struct ar5416IniArray ah_iniPcieSerdes;
+ struct ar5416IniArray ah_iniModesAdditional;
+};
+#define AH5416(_ah) ((struct ath_hal_5416 *)(_ah))
+
+#define FREQ2FBIN(x, y) ((y) ? ((x) - 2300) : (((x) - 4800) / 5))
+
+#define IS_5416_EMU(ah) \
+ ((ah->ah_devid == AR5416_DEVID_EMU) || \
+ (ah->ah_devid == AR5416_DEVID_EMU_PCIE))
+
+#define ar5416RfDetach(ah) do { \
+ if (AH5416(ah)->ah_rfHal.rfDetach != NULL) \
+ AH5416(ah)->ah_rfHal.rfDetach(ah); \
+ } while (0)
+
+#define ath9k_hw_use_flash(_ah) \
+ (!(_ah->ah_flags & AH_USE_EEPROM))
+
+
+#define DO_DELAY(x) do { \
+ if ((++(x) % 64) == 0) \
+ udelay(1); \
+ } while (0)
+
+#define REG_WRITE_ARRAY(iniarray, column, regWr) do { \
+ int r; \
+ for (r = 0; r < ((iniarray)->ia_rows); r++) { \
+ REG_WRITE(ah, INI_RA((iniarray), (r), 0), \
+ INI_RA((iniarray), r, (column))); \
+ DO_DELAY(regWr); \
+ } \
+ } while (0)
+
+#define BASE_ACTIVATE_DELAY 100
+#define RTC_PLL_SETTLE_DELAY 1000
+#define COEF_SCALE_S 24
+#define HT40_CHANNEL_CENTER_SHIFT 10
+
+#define ar5416CheckOpMode(_opmode) \
+ ((_opmode == ATH9K_M_STA) || (_opmode == ATH9K_M_IBSS) || \
+ (_opmode == ATH9K_M_HOSTAP) || (_opmode == ATH9K_M_MONITOR))
+
+#define AR5416_EEPROM_MAGIC_OFFSET 0x0
+
+#define AR5416_EEPROM_S 2
+#define AR5416_EEPROM_OFFSET 0x2000
+#define AR5416_EEPROM_START_ADDR \
+ (AR_SREV_9100(ah)) ? 0x1fff1000 : 0x503f1200
+#define AR5416_EEPROM_MAX 0xae0
+#define ar5416_get_eep_ver(_ahp) \
+ (((_ahp)->ah_eeprom.baseEepHeader.version >> 12) & 0xF)
+#define ar5416_get_eep_rev(_ahp) \
+ (((_ahp)->ah_eeprom.baseEepHeader.version) & 0xFFF)
+#define ar5416_get_ntxchains(_txchainmask) \
+ (((_txchainmask >> 2) & 1) + \
+ ((_txchainmask >> 1) & 1) + (_txchainmask & 1))
+
+#define IS_EEP_MINOR_V3(_ahp) \
+ (ath9k_hw_get_eeprom((_ahp), EEP_MINOR_REV) >= AR5416_EEP_MINOR_VER_3)
+
+#define FIXED_CCA_THRESHOLD 15
+
+#ifdef __BIG_ENDIAN
+#define AR5416_EEPROM_MAGIC 0x5aa5
+#else
+#define AR5416_EEPROM_MAGIC 0xa55a
+#endif
+
+#define ATH9K_POW_SM(_r, _s) (((_r) & 0x3f) << (_s))
+
+#define ATH9K_ANTENNA0_CHAINMASK 0x1
+#define ATH9K_ANTENNA1_CHAINMASK 0x2
+
+#define ATH9K_NUM_DMA_DEBUG_REGS 8
+#define ATH9K_NUM_QUEUES 10
+
+#define HAL_NOISE_IMMUNE_MAX 4
+#define HAL_SPUR_IMMUNE_MAX 7
+#define HAL_FIRST_STEP_MAX 2
+
+#define ATH9K_ANI_OFDM_TRIG_HIGH 500
+#define ATH9K_ANI_OFDM_TRIG_LOW 200
+#define ATH9K_ANI_CCK_TRIG_HIGH 200
+#define ATH9K_ANI_CCK_TRIG_LOW 100
+#define ATH9K_ANI_NOISE_IMMUNE_LVL 4
+#define ATH9K_ANI_USE_OFDM_WEAK_SIG true
+#define ATH9K_ANI_CCK_WEAK_SIG_THR false
+#define ATH9K_ANI_SPUR_IMMUNE_LVL 7
+#define ATH9K_ANI_FIRSTEP_LVL 0
+#define ATH9K_ANI_RSSI_THR_HIGH 40
+#define ATH9K_ANI_RSSI_THR_LOW 7
+#define ATH9K_ANI_PERIOD 100
+
+#define AR_GPIOD_MASK 0x00001FFF
+#define AR_GPIO_BIT(_gpio) (1 << (_gpio))
+
+#define MAX_ANALOG_START 319
+
+#define HAL_EP_RND(x, mul) \
+ ((((x)%(mul)) >= ((mul)/2)) ? ((x) + ((mul) - 1)) / (mul) : (x)/(mul))
+#define BEACON_RSSI(ahp) \
+ HAL_EP_RND(ahp->ah_stats.ast_nodestats.ns_avgbrssi, \
+ ATH9K_RSSI_EP_MULTIPLIER)
+
+#define ah_mibStats ah_stats.ast_mibstats
+
+#define AH_TIMEOUT 100000
+#define AH_TIME_QUANTUM 10
+
+#define IS(_c, _f) (((_c)->channelFlags & _f) || 0)
+
+#define AR_KEYTABLE_SIZE 128
+#define POWER_UP_TIME 200000
+
+#define EXT_ADDITIVE (0x8000)
+#define CTL_11A_EXT (CTL_11A | EXT_ADDITIVE)
+#define CTL_11G_EXT (CTL_11G | EXT_ADDITIVE)
+#define CTL_11B_EXT (CTL_11B | EXT_ADDITIVE)
+
+#define SUB_NUM_CTL_MODES_AT_5G_40 2
+#define SUB_NUM_CTL_MODES_AT_2G_40 3
+#define SPUR_RSSI_THRESH 40
+
+#define TU_TO_USEC(_tu) ((_tu) << 10)
+
+#define CAB_TIMEOUT_VAL 10
+#define BEACON_TIMEOUT_VAL 10
+#define MIN_BEACON_TIMEOUT_VAL 1
+#define SLEEP_SLOP 3
+
+#define CCK_SIFS_TIME 10
+#define CCK_PREAMBLE_BITS 144
+#define CCK_PLCP_BITS 48
+
+#define OFDM_SIFS_TIME 16
+#define OFDM_PREAMBLE_TIME 20
+#define OFDM_PLCP_BITS 22
+#define OFDM_SYMBOL_TIME 4
+
+#define OFDM_SIFS_TIME_HALF 32
+#define OFDM_PREAMBLE_TIME_HALF 40
+#define OFDM_PLCP_BITS_HALF 22
+#define OFDM_SYMBOL_TIME_HALF 8
+
+#define OFDM_SIFS_TIME_QUARTER 64
+#define OFDM_PREAMBLE_TIME_QUARTER 80
+#define OFDM_PLCP_BITS_QUARTER 22
+#define OFDM_SYMBOL_TIME_QUARTER 16
+
+u32 ath9k_hw_get_eeprom(struct ath_hal_5416 *ahp,
+ enum eeprom_param param);
+
+#endif
diff --git a/drivers/net/wireless/ath9k/initvals.h b/drivers/net/wireless/ath9k/initvals.h
new file mode 100644
index 00000000000..3dd3815940a
--- /dev/null
+++ b/drivers/net/wireless/ath9k/initvals.h
@@ -0,0 +1,3146 @@
+/*
+ * Copyright (c) 2008 Atheros Communications Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+static const u32 ar5416Modes_9100[][6] = {
+ { 0x00001030, 0x00000230, 0x00000460, 0x000002c0, 0x00000160, 0x000001e0 },
+ { 0x00001070, 0x00000168, 0x000002d0, 0x00000318, 0x0000018c, 0x000001e0 },
+ { 0x000010b0, 0x00000e60, 0x00001cc0, 0x00007c70, 0x00003e38, 0x00001180 },
+ { 0x000010f0, 0x0000a000, 0x00014000, 0x00016000, 0x0000b000, 0x00014008 },
+ { 0x00008014, 0x03e803e8, 0x07d007d0, 0x10801600, 0x08400b00, 0x06e006e0 },
+ { 0x0000801c, 0x128d93a7, 0x128d93cf, 0x12e013d7, 0x12e013ab, 0x098813cf },
+ { 0x00009804, 0x00000300, 0x000003c4, 0x000003c4, 0x00000300, 0x00000303 },
+ { 0x00009820, 0x02020200, 0x02020200, 0x02020200, 0x02020200, 0x02020200 },
+ { 0x00009824, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e },
+ { 0x00009828, 0x0a020001, 0x0a020001, 0x0a020001, 0x0a020001, 0x0a020001 },
+ { 0x00009834, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e },
+ { 0x00009838, 0x00000007, 0x00000007, 0x00000007, 0x00000007, 0x00000007 },
+ { 0x00009844, 0x1372161e, 0x1372161e, 0x137216a0, 0x137216a0, 0x137216a0 },
+ { 0x00009848, 0x001a6a65, 0x001a6a65, 0x00197a68, 0x00197a68, 0x00197a68 },
+ { 0x0000a848, 0x001a6a65, 0x001a6a65, 0x00197a68, 0x00197a68, 0x00197a68 },
+ { 0x0000b848, 0x001a6a65, 0x001a6a65, 0x00197a68, 0x00197a68, 0x00197a68 },
+ { 0x00009850, 0x6de8b4e0, 0x6de8b4e0, 0x6de8b0de, 0x6de8b0de, 0x6de8b0de },
+ { 0x00009858, 0x7ec82d2e, 0x7ec82d2e, 0x7ec82d2e, 0x7ec82d2e, 0x7ec82d2e },
+ { 0x0000985c, 0x3139605e, 0x3139605e, 0x3139605e, 0x3139605e, 0x3139605e },
+ { 0x00009860, 0x00049d18, 0x00049d18, 0x00049d18, 0x00049d18, 0x00049d18 },
+ { 0x0000c864, 0x0001ce00, 0x0001ce00, 0x0001ce00, 0x0001ce00, 0x0001ce00 },
+ { 0x00009868, 0x409a4190, 0x409a4190, 0x409a4190, 0x409a4190, 0x409a4190 },
+ { 0x0000986c, 0x050cb081, 0x050cb081, 0x050cb081, 0x050cb081, 0x050cb081 },
+ { 0x00009914, 0x000007d0, 0x000007d0, 0x00000898, 0x00000898, 0x000007d0 },
+ { 0x00009918, 0x000001b8, 0x00000370, 0x00000268, 0x00000134, 0x00000134 },
+ { 0x00009924, 0xd0058a0b, 0xd0058a0b, 0xd0058a0b, 0xd0058a0b, 0xd0058a0b },
+ { 0x00009944, 0xdfb81020, 0xdfb81020, 0xdfb81020, 0xdfb81020, 0xdfb81020 },
+ { 0x00009960, 0x00000900, 0x00000900, 0x00012d80, 0x00012d80, 0x00012d80 },
+ { 0x0000a960, 0x00000900, 0x00000900, 0x00012d80, 0x00012d80, 0x00012d80 },
+ { 0x0000b960, 0x00000900, 0x00000900, 0x00012d80, 0x00012d80, 0x00012d80 },
+ { 0x00009964, 0x00000000, 0x00000000, 0x00001120, 0x00001120, 0x00001120 },
+ { 0x0000c9bc, 0x001a0a00, 0x001a0a00, 0x001a0a00, 0x001a0a00, 0x001a0a00 },
+ { 0x000099c0, 0x038919be, 0x038919be, 0x038919be, 0x038919be, 0x038919be },
+ { 0x000099c4, 0x06336f77, 0x06336f77, 0x06336f77, 0x06336f77, 0x06336f77 },
+ { 0x000099c8, 0x60f6532c, 0x60f6532c, 0x60f6532c, 0x60f6532c, 0x60f6532c },
+ { 0x000099cc, 0x08f186c8, 0x08f186c8, 0x08f186c8, 0x08f186c8, 0x08f186c8 },
+ { 0x000099d0, 0x00046384, 0x00046384, 0x00046384, 0x00046384, 0x00046384 },
+ { 0x000099d4, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 },
+ { 0x000099d8, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 },
+ { 0x0000a204, 0x00000880, 0x00000880, 0x00000880, 0x00000880, 0x00000880 },
+ { 0x0000a208, 0xd6be4788, 0xd6be4788, 0xd03e4788, 0xd03e4788, 0xd03e4788 },
+ { 0x0000a20c, 0x002ec1e0, 0x002ec1e0, 0x002ac120, 0x002ac120, 0x002ac120 },
+ { 0x0000b20c, 0x002ec1e0, 0x002ec1e0, 0x002ac120, 0x002ac120, 0x002ac120 },
+ { 0x0000c20c, 0x002ec1e0, 0x002ec1e0, 0x002ac120, 0x002ac120, 0x002ac120 },
+ { 0x0000a21c, 0x1883800a, 0x1883800a, 0x1883800a, 0x1883800a, 0x1883800a },
+ { 0x0000a230, 0x00000000, 0x00000000, 0x00000210, 0x00000108, 0x00000000 },
+ { 0x0000a274, 0x0a1a9caa, 0x0a1a9caa, 0x0a1a7caa, 0x0a1a7caa, 0x0a1a7caa },
+ { 0x0000a300, 0x18010000, 0x18010000, 0x18010000, 0x18010000, 0x18010000 },
+ { 0x0000a304, 0x30032602, 0x30032602, 0x2e032402, 0x2e032402, 0x2e032402 },
+ { 0x0000a308, 0x48073e06, 0x48073e06, 0x4a0a3c06, 0x4a0a3c06, 0x4a0a3c06 },
+ { 0x0000a30c, 0x560b4c0a, 0x560b4c0a, 0x621a540b, 0x621a540b, 0x621a540b },
+ { 0x0000a310, 0x641a600f, 0x641a600f, 0x764f6c1b, 0x764f6c1b, 0x764f6c1b },
+ { 0x0000a314, 0x7a4f6e1b, 0x7a4f6e1b, 0x845b7a5a, 0x845b7a5a, 0x845b7a5a },
+ { 0x0000a318, 0x8c5b7e5a, 0x8c5b7e5a, 0x950f8ccf, 0x950f8ccf, 0x950f8ccf },
+ { 0x0000a31c, 0x9d0f96cf, 0x9d0f96cf, 0xa5cf9b4f, 0xa5cf9b4f, 0xa5cf9b4f },
+ { 0x0000a320, 0xb51fa69f, 0xb51fa69f, 0xbddfaf1f, 0xbddfaf1f, 0xbddfaf1f },
+ { 0x0000a324, 0xcb3fbd07, 0xcb3fbcbf, 0xd1ffc93f, 0xd1ffc93f, 0xd1ffc93f },
+ { 0x0000a328, 0x0000d7bf, 0x0000d7bf, 0x00000000, 0x00000000, 0x00000000 },
+ { 0x0000a32c, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 },
+ { 0x0000a330, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 },
+ { 0x0000a334, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 },
+};
+
+static const u32 ar5416Common_9100[][2] = {
+ { 0x0000000c, 0x00000000 },
+ { 0x00000030, 0x00020015 },
+ { 0x00000034, 0x00000005 },
+ { 0x00000040, 0x00000000 },
+ { 0x00000044, 0x00000008 },
+ { 0x00000048, 0x00000008 },
+ { 0x0000004c, 0x00000010 },
+ { 0x00000050, 0x00000000 },
+ { 0x00000054, 0x0000001f },
+ { 0x00000800, 0x00000000 },
+ { 0x00000804, 0x00000000 },
+ { 0x00000808, 0x00000000 },
+ { 0x0000080c, 0x00000000 },
+ { 0x00000810, 0x00000000 },
+ { 0x00000814, 0x00000000 },
+ { 0x00000818, 0x00000000 },
+ { 0x0000081c, 0x00000000 },
+ { 0x00000820, 0x00000000 },
+ { 0x00000824, 0x00000000 },
+ { 0x00001040, 0x002ffc0f },
+ { 0x00001044, 0x002ffc0f },
+ { 0x00001048, 0x002ffc0f },
+ { 0x0000104c, 0x002ffc0f },
+ { 0x00001050, 0x002ffc0f },
+ { 0x00001054, 0x002ffc0f },
+ { 0x00001058, 0x002ffc0f },
+ { 0x0000105c, 0x002ffc0f },
+ { 0x00001060, 0x002ffc0f },
+ { 0x00001064, 0x002ffc0f },
+ { 0x00001230, 0x00000000 },
+ { 0x00001270, 0x00000000 },
+ { 0x00001038, 0x00000000 },
+ { 0x00001078, 0x00000000 },
+ { 0x000010b8, 0x00000000 },
+ { 0x000010f8, 0x00000000 },
+ { 0x00001138, 0x00000000 },
+ { 0x00001178, 0x00000000 },
+ { 0x000011b8, 0x00000000 },
+ { 0x000011f8, 0x00000000 },
+ { 0x00001238, 0x00000000 },
+ { 0x00001278, 0x00000000 },
+ { 0x000012b8, 0x00000000 },
+ { 0x000012f8, 0x00000000 },
+ { 0x00001338, 0x00000000 },
+ { 0x00001378, 0x00000000 },
+ { 0x000013b8, 0x00000000 },
+ { 0x000013f8, 0x00000000 },
+ { 0x00001438, 0x00000000 },
+ { 0x00001478, 0x00000000 },
+ { 0x000014b8, 0x00000000 },
+ { 0x000014f8, 0x00000000 },
+ { 0x00001538, 0x00000000 },
+ { 0x00001578, 0x00000000 },
+ { 0x000015b8, 0x00000000 },
+ { 0x000015f8, 0x00000000 },
+ { 0x00001638, 0x00000000 },
+ { 0x00001678, 0x00000000 },
+ { 0x000016b8, 0x00000000 },
+ { 0x000016f8, 0x00000000 },
+ { 0x00001738, 0x00000000 },
+ { 0x00001778, 0x00000000 },
+ { 0x000017b8, 0x00000000 },
+ { 0x000017f8, 0x00000000 },
+ { 0x0000103c, 0x00000000 },
+ { 0x0000107c, 0x00000000 },
+ { 0x000010bc, 0x00000000 },
+ { 0x000010fc, 0x00000000 },
+ { 0x0000113c, 0x00000000 },
+ { 0x0000117c, 0x00000000 },
+ { 0x000011bc, 0x00000000 },
+ { 0x000011fc, 0x00000000 },
+ { 0x0000123c, 0x00000000 },
+ { 0x0000127c, 0x00000000 },
+ { 0x000012bc, 0x00000000 },
+ { 0x000012fc, 0x00000000 },
+ { 0x0000133c, 0x00000000 },
+ { 0x0000137c, 0x00000000 },
+ { 0x000013bc, 0x00000000 },
+ { 0x000013fc, 0x00000000 },
+ { 0x0000143c, 0x00000000 },
+ { 0x0000147c, 0x00000000 },
+ { 0x00004030, 0x00000002 },
+ { 0x0000403c, 0x00000002 },
+ { 0x00007010, 0x00000000 },
+ { 0x00007038, 0x000004c2 },
+ { 0x00008004, 0x00000000 },
+ { 0x00008008, 0x00000000 },
+ { 0x0000800c, 0x00000000 },
+ { 0x00008018, 0x00000700 },
+ { 0x00008020, 0x00000000 },
+ { 0x00008038, 0x00000000 },
+ { 0x0000803c, 0x00000000 },
+ { 0x00008048, 0x40000000 },
+ { 0x00008054, 0x00000000 },
+ { 0x00008058, 0x00000000 },
+ { 0x0000805c, 0x000fc78f },
+ { 0x00008060, 0x0000000f },
+ { 0x00008064, 0x00000000 },
+ { 0x000080c0, 0x2a82301a },
+ { 0x000080c4, 0x05dc01e0 },
+ { 0x000080c8, 0x1f402710 },
+ { 0x000080cc, 0x01f40000 },
+ { 0x000080d0, 0x00001e00 },
+ { 0x000080d4, 0x00000000 },
+ { 0x000080d8, 0x00400000 },
+ { 0x000080e0, 0xffffffff },
+ { 0x000080e4, 0x0000ffff },
+ { 0x000080e8, 0x003f3f3f },
+ { 0x000080ec, 0x00000000 },
+ { 0x000080f0, 0x00000000 },
+ { 0x000080f4, 0x00000000 },
+ { 0x000080f8, 0x00000000 },
+ { 0x000080fc, 0x00020000 },
+ { 0x00008100, 0x00020000 },
+ { 0x00008104, 0x00000001 },
+ { 0x00008108, 0x00000052 },
+ { 0x0000810c, 0x00000000 },
+ { 0x00008110, 0x00000168 },
+ { 0x00008118, 0x000100aa },
+ { 0x0000811c, 0x00003210 },
+ { 0x00008120, 0x08f04800 },
+ { 0x00008124, 0x00000000 },
+ { 0x00008128, 0x00000000 },
+ { 0x0000812c, 0x00000000 },
+ { 0x00008130, 0x00000000 },
+ { 0x00008134, 0x00000000 },
+ { 0x00008138, 0x00000000 },
+ { 0x0000813c, 0x00000000 },
+ { 0x00008144, 0x00000000 },
+ { 0x00008168, 0x00000000 },
+ { 0x0000816c, 0x00000000 },
+ { 0x00008170, 0x32143320 },
+ { 0x00008174, 0xfaa4fa50 },
+ { 0x00008178, 0x00000100 },
+ { 0x0000817c, 0x00000000 },
+ { 0x000081c4, 0x00000000 },
+ { 0x000081d0, 0x00003210 },
+ { 0x000081ec, 0x00000000 },
+ { 0x000081f0, 0x00000000 },
+ { 0x000081f4, 0x00000000 },
+ { 0x000081f8, 0x00000000 },
+ { 0x000081fc, 0x00000000 },
+ { 0x00008200, 0x00000000 },
+ { 0x00008204, 0x00000000 },
+ { 0x00008208, 0x00000000 },
+ { 0x0000820c, 0x00000000 },
+ { 0x00008210, 0x00000000 },
+ { 0x00008214, 0x00000000 },
+ { 0x00008218, 0x00000000 },
+ { 0x0000821c, 0x00000000 },
+ { 0x00008220, 0x00000000 },
+ { 0x00008224, 0x00000000 },
+ { 0x00008228, 0x00000000 },
+ { 0x0000822c, 0x00000000 },
+ { 0x00008230, 0x00000000 },
+ { 0x00008234, 0x00000000 },
+ { 0x00008238, 0x00000000 },
+ { 0x0000823c, 0x00000000 },
+ { 0x00008240, 0x00100000 },
+ { 0x00008244, 0x0010f400 },
+ { 0x00008248, 0x00000100 },
+ { 0x0000824c, 0x0001e800 },
+ { 0x00008250, 0x00000000 },
+ { 0x00008254, 0x00000000 },
+ { 0x00008258, 0x00000000 },
+ { 0x0000825c, 0x400000ff },
+ { 0x00008260, 0x00080922 },
+ { 0x00008270, 0x00000000 },
+ { 0x00008274, 0x40000000 },
+ { 0x00008278, 0x003e4180 },
+ { 0x0000827c, 0x00000000 },
+ { 0x00008284, 0x0000002c },
+ { 0x00008288, 0x0000002c },
+ { 0x0000828c, 0x00000000 },
+ { 0x00008294, 0x00000000 },
+ { 0x00008298, 0x00000000 },
+ { 0x00008300, 0x00000000 },
+ { 0x00008304, 0x00000000 },
+ { 0x00008308, 0x00000000 },
+ { 0x0000830c, 0x00000000 },
+ { 0x00008310, 0x00000000 },
+ { 0x00008314, 0x00000000 },
+ { 0x00008318, 0x00000000 },
+ { 0x00008328, 0x00000000 },
+ { 0x0000832c, 0x00000007 },
+ { 0x00008330, 0x00000302 },
+ { 0x00008334, 0x00000e00 },
+ { 0x00008338, 0x00000000 },
+ { 0x0000833c, 0x00000000 },
+ { 0x00008340, 0x000107ff },
+ { 0x00009808, 0x00000000 },
+ { 0x0000980c, 0xad848e19 },
+ { 0x00009810, 0x7d14e000 },
+ { 0x00009814, 0x9c0a9f6b },
+ { 0x0000981c, 0x00000000 },
+ { 0x0000982c, 0x0000a000 },
+ { 0x00009830, 0x00000000 },
+ { 0x0000983c, 0x00200400 },
+ { 0x00009840, 0x206a002e },
+ { 0x0000984c, 0x1284233c },
+ { 0x00009854, 0x00000859 },
+ { 0x00009900, 0x00000000 },
+ { 0x00009904, 0x00000000 },
+ { 0x00009908, 0x00000000 },
+ { 0x0000990c, 0x00000000 },
+ { 0x0000991c, 0x10000fff },
+ { 0x00009920, 0x05100000 },
+ { 0x0000a920, 0x05100000 },
+ { 0x0000b920, 0x05100000 },
+ { 0x00009928, 0x00000001 },
+ { 0x0000992c, 0x00000004 },
+ { 0x00009934, 0x1e1f2022 },
+ { 0x00009938, 0x0a0b0c0d },
+ { 0x0000993c, 0x00000000 },
+ { 0x00009948, 0x9280b212 },
+ { 0x0000994c, 0x00020028 },
+ { 0x00009954, 0x5d50e188 },
+ { 0x00009958, 0x00081fff },
+ { 0x0000c95c, 0x004b6a8e },
+ { 0x0000c968, 0x000003ce },
+ { 0x00009970, 0x190fb515 },
+ { 0x00009974, 0x00000000 },
+ { 0x00009978, 0x00000001 },
+ { 0x0000997c, 0x00000000 },
+ { 0x00009980, 0x00000000 },
+ { 0x00009984, 0x00000000 },
+ { 0x00009988, 0x00000000 },
+ { 0x0000998c, 0x00000000 },
+ { 0x00009990, 0x00000000 },
+ { 0x00009994, 0x00000000 },
+ { 0x00009998, 0x00000000 },
+ { 0x0000999c, 0x00000000 },
+ { 0x000099a0, 0x00000000 },
+ { 0x000099a4, 0x00000001 },
+ { 0x000099a8, 0x001fff00 },
+ { 0x000099ac, 0x00000000 },
+ { 0x000099b0, 0x03051000 },
+ { 0x000099dc, 0x00000000 },
+ { 0x000099e0, 0x00000200 },
+ { 0x000099e4, 0xaaaaaaaa },
+ { 0x000099e8, 0x3c466478 },
+ { 0x000099ec, 0x000000aa },
+ { 0x000099fc, 0x00001042 },
+ { 0x00009b00, 0x00000000 },
+ { 0x00009b04, 0x00000001 },
+ { 0x00009b08, 0x00000002 },
+ { 0x00009b0c, 0x00000003 },
+ { 0x00009b10, 0x00000004 },
+ { 0x00009b14, 0x00000005 },
+ { 0x00009b18, 0x00000008 },
+ { 0x00009b1c, 0x00000009 },
+ { 0x00009b20, 0x0000000a },
+ { 0x00009b24, 0x0000000b },
+ { 0x00009b28, 0x0000000c },
+ { 0x00009b2c, 0x0000000d },
+ { 0x00009b30, 0x00000010 },
+ { 0x00009b34, 0x00000011 },
+ { 0x00009b38, 0x00000012 },
+ { 0x00009b3c, 0x00000013 },
+ { 0x00009b40, 0x00000014 },
+ { 0x00009b44, 0x00000015 },
+ { 0x00009b48, 0x00000018 },
+ { 0x00009b4c, 0x00000019 },
+ { 0x00009b50, 0x0000001a },
+ { 0x00009b54, 0x0000001b },
+ { 0x00009b58, 0x0000001c },
+ { 0x00009b5c, 0x0000001d },
+ { 0x00009b60, 0x00000020 },
+ { 0x00009b64, 0x00000021 },
+ { 0x00009b68, 0x00000022 },
+ { 0x00009b6c, 0x00000023 },
+ { 0x00009b70, 0x00000024 },
+ { 0x00009b74, 0x00000025 },
+ { 0x00009b78, 0x00000028 },
+ { 0x00009b7c, 0x00000029 },
+ { 0x00009b80, 0x0000002a },
+ { 0x00009b84, 0x0000002b },
+ { 0x00009b88, 0x0000002c },
+ { 0x00009b8c, 0x0000002d },
+ { 0x00009b90, 0x00000030 },
+ { 0x00009b94, 0x00000031 },
+ { 0x00009b98, 0x00000032 },
+ { 0x00009b9c, 0x00000033 },
+ { 0x00009ba0, 0x00000034 },
+ { 0x00009ba4, 0x00000035 },
+ { 0x00009ba8, 0x00000035 },
+ { 0x00009bac, 0x00000035 },
+ { 0x00009bb0, 0x00000035 },
+ { 0x00009bb4, 0x00000035 },
+ { 0x00009bb8, 0x00000035 },
+ { 0x00009bbc, 0x00000035 },
+ { 0x00009bc0, 0x00000035 },
+ { 0x00009bc4, 0x00000035 },
+ { 0x00009bc8, 0x00000035 },
+ { 0x00009bcc, 0x00000035 },
+ { 0x00009bd0, 0x00000035 },
+ { 0x00009bd4, 0x00000035 },
+ { 0x00009bd8, 0x00000035 },
+ { 0x00009bdc, 0x00000035 },
+ { 0x00009be0, 0x00000035 },
+ { 0x00009be4, 0x00000035 },
+ { 0x00009be8, 0x00000035 },
+ { 0x00009bec, 0x00000035 },
+ { 0x00009bf0, 0x00000035 },
+ { 0x00009bf4, 0x00000035 },
+ { 0x00009bf8, 0x00000010 },
+ { 0x00009bfc, 0x0000001a },
+ { 0x0000a210, 0x40806333 },
+ { 0x0000a214, 0x00106c10 },
+ { 0x0000a218, 0x009c4060 },
+ { 0x0000a220, 0x018830c6 },
+ { 0x0000a224, 0x00000400 },
+ { 0x0000a228, 0x00000bb5 },
+ { 0x0000a22c, 0x00000011 },
+ { 0x0000a234, 0x20202020 },
+ { 0x0000a238, 0x20202020 },
+ { 0x0000a23c, 0x13c889af },
+ { 0x0000a240, 0x38490a20 },
+ { 0x0000a244, 0x00007bb6 },
+ { 0x0000a248, 0x0fff3ffc },
+ { 0x0000a24c, 0x00000001 },
+ { 0x0000a250, 0x0000a000 },
+ { 0x0000a254, 0x00000000 },
+ { 0x0000a258, 0x0cc75380 },
+ { 0x0000a25c, 0x0f0f0f01 },
+ { 0x0000a260, 0xdfa91f01 },
+ { 0x0000a268, 0x00000000 },
+ { 0x0000a26c, 0x0ebae9c6 },
+ { 0x0000b26c, 0x0ebae9c6 },
+ { 0x0000c26c, 0x0ebae9c6 },
+ { 0x0000d270, 0x00820820 },
+ { 0x0000a278, 0x1ce739ce },
+ { 0x0000a27c, 0x051701ce },
+ { 0x0000a338, 0x00000000 },
+ { 0x0000a33c, 0x00000000 },
+ { 0x0000a340, 0x00000000 },
+ { 0x0000a344, 0x00000000 },
+ { 0x0000a348, 0x3fffffff },
+ { 0x0000a34c, 0x3fffffff },
+ { 0x0000a350, 0x3fffffff },
+ { 0x0000a354, 0x0003ffff },
+ { 0x0000a358, 0x79a8aa1f },
+ { 0x0000d35c, 0x07ffffef },
+ { 0x0000d360, 0x0fffffe7 },
+ { 0x0000d364, 0x17ffffe5 },
+ { 0x0000d368, 0x1fffffe4 },
+ { 0x0000d36c, 0x37ffffe3 },
+ { 0x0000d370, 0x3fffffe3 },
+ { 0x0000d374, 0x57ffffe3 },
+ { 0x0000d378, 0x5fffffe2 },
+ { 0x0000d37c, 0x7fffffe2 },
+ { 0x0000d380, 0x7f3c7bba },
+ { 0x0000d384, 0xf3307ff0 },
+ { 0x0000a388, 0x08000000 },
+ { 0x0000a38c, 0x20202020 },
+ { 0x0000a390, 0x20202020 },
+ { 0x0000a394, 0x1ce739ce },
+ { 0x0000a398, 0x000001ce },
+ { 0x0000a39c, 0x00000001 },
+ { 0x0000a3a0, 0x00000000 },
+ { 0x0000a3a4, 0x00000000 },
+ { 0x0000a3a8, 0x00000000 },
+ { 0x0000a3ac, 0x00000000 },
+ { 0x0000a3b0, 0x00000000 },
+ { 0x0000a3b4, 0x00000000 },
+ { 0x0000a3b8, 0x00000000 },
+ { 0x0000a3bc, 0x00000000 },
+ { 0x0000a3c0, 0x00000000 },
+ { 0x0000a3c4, 0x00000000 },
+ { 0x0000a3c8, 0x00000246 },
+ { 0x0000a3cc, 0x20202020 },
+ { 0x0000a3d0, 0x20202020 },
+ { 0x0000a3d4, 0x20202020 },
+ { 0x0000a3dc, 0x1ce739ce },
+ { 0x0000a3e0, 0x000001ce },
+};
+
+static const u32 ar5416Bank0_9100[][2] = {
+ { 0x000098b0, 0x1e5795e5 },
+ { 0x000098e0, 0x02008020 },
+};
+
+static const u32 ar5416BB_RfGain_9100[][3] = {
+ { 0x00009a00, 0x00000000, 0x00000000 },
+ { 0x00009a04, 0x00000040, 0x00000040 },
+ { 0x00009a08, 0x00000080, 0x00000080 },
+ { 0x00009a0c, 0x000001a1, 0x00000141 },
+ { 0x00009a10, 0x000001e1, 0x00000181 },
+ { 0x00009a14, 0x00000021, 0x000001c1 },
+ { 0x00009a18, 0x00000061, 0x00000001 },
+ { 0x00009a1c, 0x00000168, 0x00000041 },
+ { 0x00009a20, 0x000001a8, 0x000001a8 },
+ { 0x00009a24, 0x000001e8, 0x000001e8 },
+ { 0x00009a28, 0x00000028, 0x00000028 },
+ { 0x00009a2c, 0x00000068, 0x00000068 },
+ { 0x00009a30, 0x00000189, 0x000000a8 },
+ { 0x00009a34, 0x000001c9, 0x00000169 },
+ { 0x00009a38, 0x00000009, 0x000001a9 },
+ { 0x00009a3c, 0x00000049, 0x000001e9 },
+ { 0x00009a40, 0x00000089, 0x00000029 },
+ { 0x00009a44, 0x00000170, 0x00000069 },
+ { 0x00009a48, 0x000001b0, 0x00000190 },
+ { 0x00009a4c, 0x000001f0, 0x000001d0 },
+ { 0x00009a50, 0x00000030, 0x00000010 },
+ { 0x00009a54, 0x00000070, 0x00000050 },
+ { 0x00009a58, 0x00000191, 0x00000090 },
+ { 0x00009a5c, 0x000001d1, 0x00000151 },
+ { 0x00009a60, 0x00000011, 0x00000191 },
+ { 0x00009a64, 0x00000051, 0x000001d1 },
+ { 0x00009a68, 0x00000091, 0x00000011 },
+ { 0x00009a6c, 0x000001b8, 0x00000051 },
+ { 0x00009a70, 0x000001f8, 0x00000198 },
+ { 0x00009a74, 0x00000038, 0x000001d8 },
+ { 0x00009a78, 0x00000078, 0x00000018 },
+ { 0x00009a7c, 0x00000199, 0x00000058 },
+ { 0x00009a80, 0x000001d9, 0x00000098 },
+ { 0x00009a84, 0x00000019, 0x00000159 },
+ { 0x00009a88, 0x00000059, 0x00000199 },
+ { 0x00009a8c, 0x00000099, 0x000001d9 },
+ { 0x00009a90, 0x000000d9, 0x00000019 },
+ { 0x00009a94, 0x000000f9, 0x00000059 },
+ { 0x00009a98, 0x000000f9, 0x00000099 },
+ { 0x00009a9c, 0x000000f9, 0x000000d9 },
+ { 0x00009aa0, 0x000000f9, 0x000000f9 },
+ { 0x00009aa4, 0x000000f9, 0x000000f9 },
+ { 0x00009aa8, 0x000000f9, 0x000000f9 },
+ { 0x00009aac, 0x000000f9, 0x000000f9 },
+ { 0x00009ab0, 0x000000f9, 0x000000f9 },
+ { 0x00009ab4, 0x000000f9, 0x000000f9 },
+ { 0x00009ab8, 0x000000f9, 0x000000f9 },
+ { 0x00009abc, 0x000000f9, 0x000000f9 },
+ { 0x00009ac0, 0x000000f9, 0x000000f9 },
+ { 0x00009ac4, 0x000000f9, 0x000000f9 },
+ { 0x00009ac8, 0x000000f9, 0x000000f9 },
+ { 0x00009acc, 0x000000f9, 0x000000f9 },
+ { 0x00009ad0, 0x000000f9, 0x000000f9 },
+ { 0x00009ad4, 0x000000f9, 0x000000f9 },
+ { 0x00009ad8, 0x000000f9, 0x000000f9 },
+ { 0x00009adc, 0x000000f9, 0x000000f9 },
+ { 0x00009ae0, 0x000000f9, 0x000000f9 },
+ { 0x00009ae4, 0x000000f9, 0x000000f9 },
+ { 0x00009ae8, 0x000000f9, 0x000000f9 },
+ { 0x00009aec, 0x000000f9, 0x000000f9 },
+ { 0x00009af0, 0x000000f9, 0x000000f9 },
+ { 0x00009af4, 0x000000f9, 0x000000f9 },
+ { 0x00009af8, 0x000000f9, 0x000000f9 },
+ { 0x00009afc, 0x000000f9, 0x000000f9 },
+};
+
+static const u32 ar5416Bank1_9100[][2] = {
+ { 0x000098b0, 0x02108421 },
+ { 0x000098ec, 0x00000008 },
+};
+
+static const u32 ar5416Bank2_9100[][2] = {
+ { 0x000098b0, 0x0e73ff17 },
+ { 0x000098e0, 0x00000420 },
+};
+
+static const u32 ar5416Bank3_9100[][3] = {
+ { 0x000098f0, 0x01400018, 0x01c00018 },
+};
+
+static const u32 ar5416Bank6_9100[][3] = {
+
+ { 0x0000989c, 0x00000000, 0x00000000 },
+ { 0x0000989c, 0x00000000, 0x00000000 },
+ { 0x0000989c, 0x00000000, 0x00000000 },
+ { 0x0000989c, 0x00e00000, 0x00e00000 },
+ { 0x0000989c, 0x005e0000, 0x005e0000 },
+ { 0x0000989c, 0x00120000, 0x00120000 },
+ { 0x0000989c, 0x00620000, 0x00620000 },
+ { 0x0000989c, 0x00020000, 0x00020000 },
+ { 0x0000989c, 0x00ff0000, 0x00ff0000 },
+ { 0x0000989c, 0x00ff0000, 0x00ff0000 },
+ { 0x0000989c, 0x00ff0000, 0x00ff0000 },
+ { 0x0000989c, 0x40ff0000, 0x40ff0000 },
+ { 0x0000989c, 0x005f0000, 0x005f0000 },
+ { 0x0000989c, 0x00870000, 0x00870000 },
+ { 0x0000989c, 0x00f90000, 0x00f90000 },
+ { 0x0000989c, 0x007b0000, 0x007b0000 },
+ { 0x0000989c, 0x00ff0000, 0x00ff0000 },
+ { 0x0000989c, 0x00f50000, 0x00f50000 },
+ { 0x0000989c, 0x00dc0000, 0x00dc0000 },
+ { 0x0000989c, 0x00110000, 0x00110000 },
+ { 0x0000989c, 0x006100a8, 0x006100a8 },
+ { 0x0000989c, 0x004210a2, 0x004210a2 },
+ { 0x0000989c, 0x0014008f, 0x0014008f },
+ { 0x0000989c, 0x00c40003, 0x00c40003 },
+ { 0x0000989c, 0x003000f2, 0x003000f2 },
+ { 0x0000989c, 0x00440016, 0x00440016 },
+ { 0x0000989c, 0x00410040, 0x00410040 },
+ { 0x0000989c, 0x0001805e, 0x0001805e },
+ { 0x0000989c, 0x0000c0ab, 0x0000c0ab },
+ { 0x0000989c, 0x000000f1, 0x000000f1 },
+ { 0x0000989c, 0x00002081, 0x00002081 },
+ { 0x0000989c, 0x000000d4, 0x000000d4 },
+ { 0x000098d0, 0x0000000f, 0x0010000f },
+};
+
+static const u32 ar5416Bank6TPC_9100[][3] = {
+ { 0x0000989c, 0x00000000, 0x00000000 },
+ { 0x0000989c, 0x00000000, 0x00000000 },
+ { 0x0000989c, 0x00000000, 0x00000000 },
+ { 0x0000989c, 0x00e00000, 0x00e00000 },
+ { 0x0000989c, 0x005e0000, 0x005e0000 },
+ { 0x0000989c, 0x00120000, 0x00120000 },
+ { 0x0000989c, 0x00620000, 0x00620000 },
+ { 0x0000989c, 0x00020000, 0x00020000 },
+ { 0x0000989c, 0x00ff0000, 0x00ff0000 },
+ { 0x0000989c, 0x00ff0000, 0x00ff0000 },
+ { 0x0000989c, 0x00ff0000, 0x00ff0000 },
+ { 0x0000989c, 0x40ff0000, 0x40ff0000 },
+ { 0x0000989c, 0x005f0000, 0x005f0000 },
+ { 0x0000989c, 0x00870000, 0x00870000 },
+ { 0x0000989c, 0x00f90000, 0x00f90000 },
+ { 0x0000989c, 0x007b0000, 0x007b0000 },
+ { 0x0000989c, 0x00ff0000, 0x00ff0000 },
+ { 0x0000989c, 0x00f50000, 0x00f50000 },
+ { 0x0000989c, 0x00dc0000, 0x00dc0000 },
+ { 0x0000989c, 0x00110000, 0x00110000 },
+ { 0x0000989c, 0x006100a8, 0x006100a8 },
+ { 0x0000989c, 0x00423022, 0x00423022 },
+ { 0x0000989c, 0x201400df, 0x201400df },
+ { 0x0000989c, 0x00c40002, 0x00c40002 },
+ { 0x0000989c, 0x003000f2, 0x003000f2 },
+ { 0x0000989c, 0x00440016, 0x00440016 },
+ { 0x0000989c, 0x00410040, 0x00410040 },
+ { 0x0000989c, 0x0001805e, 0x0001805e },
+ { 0x0000989c, 0x0000c0ab, 0x0000c0ab },
+ { 0x0000989c, 0x000000e1, 0x000000e1 },
+ { 0x0000989c, 0x00007081, 0x00007081 },
+ { 0x0000989c, 0x000000d4, 0x000000d4 },
+ { 0x000098d0, 0x0000000f, 0x0010000f },
+};
+
+static const u32 ar5416Bank7_9100[][2] = {
+ { 0x0000989c, 0x00000500 },
+ { 0x0000989c, 0x00000800 },
+ { 0x000098cc, 0x0000000e },
+};
+
+static const u32 ar5416Addac_9100[][2] = {
+ {0x0000989c, 0x00000000 },
+ {0x0000989c, 0x00000003 },
+ {0x0000989c, 0x00000000 },
+ {0x0000989c, 0x0000000c },
+ {0x0000989c, 0x00000000 },
+ {0x0000989c, 0x00000030 },
+ {0x0000989c, 0x00000000 },
+ {0x0000989c, 0x00000000 },
+ {0x0000989c, 0x00000000 },
+ {0x0000989c, 0x00000000 },
+ {0x0000989c, 0x00000000 },
+ {0x0000989c, 0x00000000 },
+ {0x0000989c, 0x00000000 },
+ {0x0000989c, 0x00000000 },
+ {0x0000989c, 0x00000000 },
+ {0x0000989c, 0x00000000 },
+ {0x0000989c, 0x00000000 },
+ {0x0000989c, 0x00000000 },
+ {0x0000989c, 0x00000060 },
+ {0x0000989c, 0x00000000 },
+ {0x0000989c, 0x00000000 },
+ {0x0000989c, 0x00000000 },
+ {0x0000989c, 0x00000000 },
+ {0x0000989c, 0x00000000 },
+ {0x0000989c, 0x00000000 },
+ {0x0000989c, 0x00000000 },
+ {0x0000989c, 0x00000000 },
+ {0x0000989c, 0x00000000 },
+ {0x0000989c, 0x00000000 },
+ {0x0000989c, 0x00000000 },
+ {0x0000989c, 0x00000000 },
+ {0x0000989c, 0x00000058 },
+ {0x0000989c, 0x00000000 },
+ {0x0000989c, 0x00000000 },
+ {0x0000989c, 0x00000000 },
+ {0x0000989c, 0x00000000 },
+ {0x000098c4, 0x00000000 },
+};
+
+static const u32 ar5416Modes[][6] = {
+ { 0x00001030, 0x00000230, 0x00000460, 0x000002c0, 0x00000160, 0x000001e0 },
+ { 0x00001070, 0x00000168, 0x000002d0, 0x00000318, 0x0000018c, 0x000001e0 },
+ { 0x000010b0, 0x00000e60, 0x00001cc0, 0x00007c70, 0x00003e38, 0x00001180 },
+ { 0x000010f0, 0x0000a000, 0x00014000, 0x00016000, 0x0000b000, 0x00014008 },
+ { 0x00008014, 0x03e803e8, 0x07d007d0, 0x10801600, 0x08400b00, 0x06e006e0 },
+ { 0x0000801c, 0x128d93a7, 0x128d93cf, 0x12e013d7, 0x12e013ab, 0x098813cf },
+ { 0x00009804, 0x00000300, 0x000003c4, 0x000003c4, 0x00000300, 0x00000303 },
+ { 0x00009820, 0x02020200, 0x02020200, 0x02020200, 0x02020200, 0x02020200 },
+ { 0x00009824, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e },
+ { 0x00009828, 0x0a020001, 0x0a020001, 0x0a020001, 0x0a020001, 0x0a020001 },
+ { 0x00009834, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e },
+ { 0x00009838, 0x00000007, 0x00000007, 0x00000007, 0x00000007, 0x00000007 },
+ { 0x00009844, 0x0372161e, 0x0372161e, 0x037216a0, 0x037216a0, 0x037216a0 },
+ { 0x00009848, 0x001a6a65, 0x001a6a65, 0x00197a68, 0x00197a68, 0x00197a68 },
+ { 0x0000a848, 0x001a6a65, 0x001a6a65, 0x00197a68, 0x00197a68, 0x00197a68 },
+ { 0x0000b848, 0x001a6a65, 0x001a6a65, 0x00197a68, 0x00197a68, 0x00197a68 },
+ { 0x00009850, 0x6d48b4e2, 0x6d48b4e2, 0x6d48b0e2, 0x6d48b0e2, 0x6d48b0e2 },
+ { 0x00009858, 0x7ec82d2e, 0x7ec82d2e, 0x7ec86d2e, 0x7ec84d2e, 0x7ec82d2e },
+ { 0x0000985c, 0x3139605e, 0x3139605e, 0x3139605e, 0x3139605e, 0x3139605e },
+ { 0x00009860, 0x00048d18, 0x00048d18, 0x00048d20, 0x00048d20, 0x00048d18 },
+ { 0x0000c864, 0x0001ce00, 0x0001ce00, 0x0001ce00, 0x0001ce00, 0x0001ce00 },
+ { 0x00009868, 0x409a40d0, 0x409a40d0, 0x409a40d0, 0x409a40d0, 0x409a40d0 },
+ { 0x0000986c, 0x050cb081, 0x050cb081, 0x050cb081, 0x050cb081, 0x050cb081 },
+ { 0x00009914, 0x000007d0, 0x000007d0, 0x00000898, 0x00000898, 0x000007d0 },
+ { 0x00009918, 0x0000000a, 0x00000014, 0x00000016, 0x0000000b, 0x00000016 },
+ { 0x00009924, 0xd00a8a07, 0xd00a8a07, 0xd00a8a11, 0xd00a8a0d, 0xd00a8a0d },
+ { 0x00009940, 0x00754604, 0x00754604, 0xfff81204, 0xfff81204, 0xfff81204 },
+ { 0x00009944, 0xdfb81020, 0xdfb81020, 0xdfb81020, 0xdfb81020, 0xdfb81020 },
+ { 0x00009954, 0x5f3ca3de, 0x5f3ca3de, 0xe250a51e, 0xe250a51e, 0xe250a51e },
+ { 0x00009958, 0x2108ecff, 0x2108ecff, 0x3388ffff, 0x3388ffff, 0x3388ffff },
+#ifdef TB243
+ { 0x00009960, 0x00000900, 0x00000900, 0x00009b40, 0x00009b40, 0x00012d80 },
+ { 0x0000a960, 0x00000900, 0x00000900, 0x00009b40, 0x00009b40, 0x00012d80 },
+ { 0x0000b960, 0x00000900, 0x00000900, 0x00009b40, 0x00009b40, 0x00012d80 },
+ { 0x00009964, 0x00000000, 0x00000000, 0x00002210, 0x00002210, 0x00001120 },
+#else
+ { 0x00009960, 0x0001bfc0, 0x0001bfc0, 0x0001bfc0, 0x0001bfc0, 0x0001bfc0 },
+ { 0x0000a960, 0x0001bfc0, 0x0001bfc0, 0x0001bfc0, 0x0001bfc0, 0x0001bfc0 },
+ { 0x0000b960, 0x0001bfc0, 0x0001bfc0, 0x0001bfc0, 0x0001bfc0, 0x0001bfc0 },
+ { 0x00009964, 0x00001120, 0x00001120, 0x00001120, 0x00001120, 0x00001120 },
+#endif
+ { 0x0000c9bc, 0x001a0600, 0x001a0600, 0x001a1000, 0x001a0c00, 0x001a0c00 },
+ { 0x000099c0, 0x038919be, 0x038919be, 0x038919be, 0x038919be, 0x038919be },
+ { 0x000099c4, 0x06336f77, 0x06336f77, 0x06336f77, 0x06336f77, 0x06336f77 },
+ { 0x000099c8, 0x60f65329, 0x60f65329, 0x60f65329, 0x60f65329, 0x60f65329 },
+ { 0x000099cc, 0x08f186c8, 0x08f186c8, 0x08f186c8, 0x08f186c8, 0x08f186c8 },
+ { 0x000099d0, 0x00046384, 0x00046384, 0x00046384, 0x00046384, 0x00046384 },
+ { 0x000099d4, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 },
+ { 0x000099d8, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 },
+ { 0x0000a204, 0x00000880, 0x00000880, 0x00000880, 0x00000880, 0x00000880 },
+ { 0x0000a208, 0xd6be4788, 0xd6be4788, 0xd03e4788, 0xd03e4788, 0xd03e4788 },
+ { 0x0000a20c, 0x002fc160, 0x002fc160, 0x002ac120, 0x002ac120, 0x002ac120 },
+ { 0x0000b20c, 0x002fc160, 0x002fc160, 0x002ac120, 0x002ac120, 0x002ac120 },
+ { 0x0000c20c, 0x002fc160, 0x002fc160, 0x002ac120, 0x002ac120, 0x002ac120 },
+ { 0x0000a21c, 0x1883800a, 0x1883800a, 0x1883800a, 0x1883800a, 0x1883800a },
+ { 0x0000a230, 0x00000000, 0x00000000, 0x00000210, 0x00000108, 0x00000000 },
+ { 0x0000a274, 0x0a1a9caa, 0x0a1a9caa, 0x0a1a7caa, 0x0a1a7caa, 0x0a1a7caa },
+ { 0x0000a300, 0x18010000, 0x18010000, 0x18010000, 0x18010000, 0x18010000 },
+ { 0x0000a304, 0x30032602, 0x30032602, 0x2e032402, 0x2e032402, 0x2e032402 },
+ { 0x0000a308, 0x48073e06, 0x48073e06, 0x4a0a3c06, 0x4a0a3c06, 0x4a0a3c06 },
+ { 0x0000a30c, 0x560b4c0a, 0x560b4c0a, 0x621a540b, 0x621a540b, 0x621a540b },
+ { 0x0000a310, 0x641a600f, 0x641a600f, 0x764f6c1b, 0x764f6c1b, 0x764f6c1b },
+ { 0x0000a314, 0x7a4f6e1b, 0x7a4f6e1b, 0x845b7a5a, 0x845b7a5a, 0x845b7a5a },
+ { 0x0000a318, 0x8c5b7e5a, 0x8c5b7e5a, 0x950f8ccf, 0x950f8ccf, 0x950f8ccf },
+ { 0x0000a31c, 0x9d0f96cf, 0x9d0f96cf, 0xa5cf9b4f, 0xa5cf9b4f, 0xa5cf9b4f },
+ { 0x0000a320, 0xb51fa69f, 0xb51fa69f, 0xbddfaf1f, 0xbddfaf1f, 0xbddfaf1f },
+ { 0x0000a324, 0xcb3fbd07, 0xcb3fbcbf, 0xd1ffc93f, 0xd1ffc93f, 0xd1ffc93f },
+ { 0x0000a328, 0x0000d7bf, 0x0000d7bf, 0x00000000, 0x00000000, 0x00000000 },
+ { 0x0000a32c, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 },
+ { 0x0000a330, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 },
+ { 0x0000a334, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 },
+};
+
+static const u32 ar5416Common[][2] = {
+ { 0x0000000c, 0x00000000 },
+ { 0x00000030, 0x00020015 },
+ { 0x00000034, 0x00000005 },
+ { 0x00000040, 0x00000000 },
+ { 0x00000044, 0x00000008 },
+ { 0x00000048, 0x00000008 },
+ { 0x0000004c, 0x00000010 },
+ { 0x00000050, 0x00000000 },
+ { 0x00000054, 0x0000001f },
+ { 0x00000800, 0x00000000 },
+ { 0x00000804, 0x00000000 },
+ { 0x00000808, 0x00000000 },
+ { 0x0000080c, 0x00000000 },
+ { 0x00000810, 0x00000000 },
+ { 0x00000814, 0x00000000 },
+ { 0x00000818, 0x00000000 },
+ { 0x0000081c, 0x00000000 },
+ { 0x00000820, 0x00000000 },
+ { 0x00000824, 0x00000000 },
+ { 0x00001040, 0x002ffc0f },
+ { 0x00001044, 0x002ffc0f },
+ { 0x00001048, 0x002ffc0f },
+ { 0x0000104c, 0x002ffc0f },
+ { 0x00001050, 0x002ffc0f },
+ { 0x00001054, 0x002ffc0f },
+ { 0x00001058, 0x002ffc0f },
+ { 0x0000105c, 0x002ffc0f },
+ { 0x00001060, 0x002ffc0f },
+ { 0x00001064, 0x002ffc0f },
+ { 0x00001230, 0x00000000 },
+ { 0x00001270, 0x00000000 },
+ { 0x00001038, 0x00000000 },
+ { 0x00001078, 0x00000000 },
+ { 0x000010b8, 0x00000000 },
+ { 0x000010f8, 0x00000000 },
+ { 0x00001138, 0x00000000 },
+ { 0x00001178, 0x00000000 },
+ { 0x000011b8, 0x00000000 },
+ { 0x000011f8, 0x00000000 },
+ { 0x00001238, 0x00000000 },
+ { 0x00001278, 0x00000000 },
+ { 0x000012b8, 0x00000000 },
+ { 0x000012f8, 0x00000000 },
+ { 0x00001338, 0x00000000 },
+ { 0x00001378, 0x00000000 },
+ { 0x000013b8, 0x00000000 },
+ { 0x000013f8, 0x00000000 },
+ { 0x00001438, 0x00000000 },
+ { 0x00001478, 0x00000000 },
+ { 0x000014b8, 0x00000000 },
+ { 0x000014f8, 0x00000000 },
+ { 0x00001538, 0x00000000 },
+ { 0x00001578, 0x00000000 },
+ { 0x000015b8, 0x00000000 },
+ { 0x000015f8, 0x00000000 },
+ { 0x00001638, 0x00000000 },
+ { 0x00001678, 0x00000000 },
+ { 0x000016b8, 0x00000000 },
+ { 0x000016f8, 0x00000000 },
+ { 0x00001738, 0x00000000 },
+ { 0x00001778, 0x00000000 },
+ { 0x000017b8, 0x00000000 },
+ { 0x000017f8, 0x00000000 },
+ { 0x0000103c, 0x00000000 },
+ { 0x0000107c, 0x00000000 },
+ { 0x000010bc, 0x00000000 },
+ { 0x000010fc, 0x00000000 },
+ { 0x0000113c, 0x00000000 },
+ { 0x0000117c, 0x00000000 },
+ { 0x000011bc, 0x00000000 },
+ { 0x000011fc, 0x00000000 },
+ { 0x0000123c, 0x00000000 },
+ { 0x0000127c, 0x00000000 },
+ { 0x000012bc, 0x00000000 },
+ { 0x000012fc, 0x00000000 },
+ { 0x0000133c, 0x00000000 },
+ { 0x0000137c, 0x00000000 },
+ { 0x000013bc, 0x00000000 },
+ { 0x000013fc, 0x00000000 },
+ { 0x0000143c, 0x00000000 },
+ { 0x0000147c, 0x00000000 },
+ { 0x00020010, 0x00000003 },
+ { 0x00020038, 0x000004c2 },
+ { 0x00008004, 0x00000000 },
+ { 0x00008008, 0x00000000 },
+ { 0x0000800c, 0x00000000 },
+ { 0x00008018, 0x00000700 },
+ { 0x00008020, 0x00000000 },
+ { 0x00008038, 0x00000000 },
+ { 0x0000803c, 0x00000000 },
+ { 0x00008048, 0x40000000 },
+ { 0x00008054, 0x00004000 },
+ { 0x00008058, 0x00000000 },
+ { 0x0000805c, 0x000fc78f },
+ { 0x00008060, 0x0000000f },
+ { 0x00008064, 0x00000000 },
+ { 0x000080c0, 0x2a82301a },
+ { 0x000080c4, 0x05dc01e0 },
+ { 0x000080c8, 0x1f402710 },
+ { 0x000080cc, 0x01f40000 },
+ { 0x000080d0, 0x00001e00 },
+ { 0x000080d4, 0x00000000 },
+ { 0x000080d8, 0x00400000 },
+ { 0x000080e0, 0xffffffff },
+ { 0x000080e4, 0x0000ffff },
+ { 0x000080e8, 0x003f3f3f },
+ { 0x000080ec, 0x00000000 },
+ { 0x000080f0, 0x00000000 },
+ { 0x000080f4, 0x00000000 },
+ { 0x000080f8, 0x00000000 },
+ { 0x000080fc, 0x00020000 },
+ { 0x00008100, 0x00020000 },
+ { 0x00008104, 0x00000001 },
+ { 0x00008108, 0x00000052 },
+ { 0x0000810c, 0x00000000 },
+ { 0x00008110, 0x00000168 },
+ { 0x00008118, 0x000100aa },
+ { 0x0000811c, 0x00003210 },
+ { 0x00008120, 0x08f04800 },
+ { 0x00008124, 0x00000000 },
+ { 0x00008128, 0x00000000 },
+ { 0x0000812c, 0x00000000 },
+ { 0x00008130, 0x00000000 },
+ { 0x00008134, 0x00000000 },
+ { 0x00008138, 0x00000000 },
+ { 0x0000813c, 0x00000000 },
+ { 0x00008144, 0x00000000 },
+ { 0x00008168, 0x00000000 },
+ { 0x0000816c, 0x00000000 },
+ { 0x00008170, 0x32143320 },
+ { 0x00008174, 0xfaa4fa50 },
+ { 0x00008178, 0x00000100 },
+ { 0x0000817c, 0x00000000 },
+ { 0x000081c4, 0x00000000 },
+ { 0x000081d0, 0x00003210 },
+ { 0x000081ec, 0x00000000 },
+ { 0x000081f0, 0x00000000 },
+ { 0x000081f4, 0x00000000 },
+ { 0x000081f8, 0x00000000 },
+ { 0x000081fc, 0x00000000 },
+ { 0x00008200, 0x00000000 },
+ { 0x00008204, 0x00000000 },
+ { 0x00008208, 0x00000000 },
+ { 0x0000820c, 0x00000000 },
+ { 0x00008210, 0x00000000 },
+ { 0x00008214, 0x00000000 },
+ { 0x00008218, 0x00000000 },
+ { 0x0000821c, 0x00000000 },
+ { 0x00008220, 0x00000000 },
+ { 0x00008224, 0x00000000 },
+ { 0x00008228, 0x00000000 },
+ { 0x0000822c, 0x00000000 },
+ { 0x00008230, 0x00000000 },
+ { 0x00008234, 0x00000000 },
+ { 0x00008238, 0x00000000 },
+ { 0x0000823c, 0x00000000 },
+ { 0x00008240, 0x00100000 },
+ { 0x00008244, 0x0010f400 },
+ { 0x00008248, 0x00000100 },
+ { 0x0000824c, 0x0001e800 },
+ { 0x00008250, 0x00000000 },
+ { 0x00008254, 0x00000000 },
+ { 0x00008258, 0x00000000 },
+ { 0x0000825c, 0x400000ff },
+ { 0x00008260, 0x00080922 },
+ { 0x00008270, 0x00000000 },
+ { 0x00008274, 0x40000000 },
+ { 0x00008278, 0x003e4180 },
+ { 0x0000827c, 0x00000000 },
+ { 0x00008284, 0x0000002c },
+ { 0x00008288, 0x0000002c },
+ { 0x0000828c, 0x00000000 },
+ { 0x00008294, 0x00000000 },
+ { 0x00008298, 0x00000000 },
+ { 0x00008300, 0x00000000 },
+ { 0x00008304, 0x00000000 },
+ { 0x00008308, 0x00000000 },
+ { 0x0000830c, 0x00000000 },
+ { 0x00008310, 0x00000000 },
+ { 0x00008314, 0x00000000 },
+ { 0x00008318, 0x00000000 },
+ { 0x00008328, 0x00000000 },
+ { 0x0000832c, 0x00000007 },
+ { 0x00008330, 0x00000302 },
+ { 0x00008334, 0x00000e00 },
+ { 0x00008338, 0x00000000 },
+ { 0x0000833c, 0x00000000 },
+ { 0x00008340, 0x000107ff },
+ { 0x00009808, 0x00000000 },
+ { 0x0000980c, 0xad848e19 },
+ { 0x00009810, 0x7d14e000 },
+ { 0x00009814, 0x9c0a9f6b },
+ { 0x0000981c, 0x00000000 },
+ { 0x0000982c, 0x0000a000 },
+ { 0x00009830, 0x00000000 },
+ { 0x0000983c, 0x00200400 },
+ { 0x00009840, 0x206a01ae },
+ { 0x0000984c, 0x1284233c },
+ { 0x00009854, 0x00000859 },
+ { 0x00009900, 0x00000000 },
+ { 0x00009904, 0x00000000 },
+ { 0x00009908, 0x00000000 },
+ { 0x0000990c, 0x00000000 },
+ { 0x0000991c, 0x10000fff },
+ { 0x00009920, 0x05100000 },
+ { 0x0000a920, 0x05100000 },
+ { 0x0000b920, 0x05100000 },
+ { 0x00009928, 0x00000001 },
+ { 0x0000992c, 0x00000004 },
+ { 0x00009934, 0x1e1f2022 },
+ { 0x00009938, 0x0a0b0c0d },
+ { 0x0000993c, 0x00000000 },
+ { 0x00009948, 0x9280b212 },
+ { 0x0000994c, 0x00020028 },
+ { 0x0000c95c, 0x004b6a8e },
+ { 0x0000c968, 0x000003ce },
+ { 0x00009970, 0x190fb514 },
+ { 0x00009974, 0x00000000 },
+ { 0x00009978, 0x00000001 },
+ { 0x0000997c, 0x00000000 },
+ { 0x00009980, 0x00000000 },
+ { 0x00009984, 0x00000000 },
+ { 0x00009988, 0x00000000 },
+ { 0x0000998c, 0x00000000 },
+ { 0x00009990, 0x00000000 },
+ { 0x00009994, 0x00000000 },
+ { 0x00009998, 0x00000000 },
+ { 0x0000999c, 0x00000000 },
+ { 0x000099a0, 0x00000000 },
+ { 0x000099a4, 0x00000001 },
+ { 0x000099a8, 0x201fff00 },
+ { 0x000099ac, 0x006f0000 },
+ { 0x000099b0, 0x03051000 },
+ { 0x000099dc, 0x00000000 },
+ { 0x000099e0, 0x00000200 },
+ { 0x000099e4, 0xaaaaaaaa },
+ { 0x000099e8, 0x3c466478 },
+ { 0x000099ec, 0x0cc80caa },
+ { 0x000099fc, 0x00001042 },
+ { 0x00009b00, 0x00000000 },
+ { 0x00009b04, 0x00000001 },
+ { 0x00009b08, 0x00000002 },
+ { 0x00009b0c, 0x00000003 },
+ { 0x00009b10, 0x00000004 },
+ { 0x00009b14, 0x00000005 },
+ { 0x00009b18, 0x00000008 },
+ { 0x00009b1c, 0x00000009 },
+ { 0x00009b20, 0x0000000a },
+ { 0x00009b24, 0x0000000b },
+ { 0x00009b28, 0x0000000c },
+ { 0x00009b2c, 0x0000000d },
+ { 0x00009b30, 0x00000010 },
+ { 0x00009b34, 0x00000011 },
+ { 0x00009b38, 0x00000012 },
+ { 0x00009b3c, 0x00000013 },
+ { 0x00009b40, 0x00000014 },
+ { 0x00009b44, 0x00000015 },
+ { 0x00009b48, 0x00000018 },
+ { 0x00009b4c, 0x00000019 },
+ { 0x00009b50, 0x0000001a },
+ { 0x00009b54, 0x0000001b },
+ { 0x00009b58, 0x0000001c },
+ { 0x00009b5c, 0x0000001d },
+ { 0x00009b60, 0x00000020 },
+ { 0x00009b64, 0x00000021 },
+ { 0x00009b68, 0x00000022 },
+ { 0x00009b6c, 0x00000023 },
+ { 0x00009b70, 0x00000024 },
+ { 0x00009b74, 0x00000025 },
+ { 0x00009b78, 0x00000028 },
+ { 0x00009b7c, 0x00000029 },
+ { 0x00009b80, 0x0000002a },
+ { 0x00009b84, 0x0000002b },
+ { 0x00009b88, 0x0000002c },
+ { 0x00009b8c, 0x0000002d },
+ { 0x00009b90, 0x00000030 },
+ { 0x00009b94, 0x00000031 },
+ { 0x00009b98, 0x00000032 },
+ { 0x00009b9c, 0x00000033 },
+ { 0x00009ba0, 0x00000034 },
+ { 0x00009ba4, 0x00000035 },
+ { 0x00009ba8, 0x00000035 },
+ { 0x00009bac, 0x00000035 },
+ { 0x00009bb0, 0x00000035 },
+ { 0x00009bb4, 0x00000035 },
+ { 0x00009bb8, 0x00000035 },
+ { 0x00009bbc, 0x00000035 },
+ { 0x00009bc0, 0x00000035 },
+ { 0x00009bc4, 0x00000035 },
+ { 0x00009bc8, 0x00000035 },
+ { 0x00009bcc, 0x00000035 },
+ { 0x00009bd0, 0x00000035 },
+ { 0x00009bd4, 0x00000035 },
+ { 0x00009bd8, 0x00000035 },
+ { 0x00009bdc, 0x00000035 },
+ { 0x00009be0, 0x00000035 },
+ { 0x00009be4, 0x00000035 },
+ { 0x00009be8, 0x00000035 },
+ { 0x00009bec, 0x00000035 },
+ { 0x00009bf0, 0x00000035 },
+ { 0x00009bf4, 0x00000035 },
+ { 0x00009bf8, 0x00000010 },
+ { 0x00009bfc, 0x0000001a },
+ { 0x0000a210, 0x40806333 },
+ { 0x0000a214, 0x00106c10 },
+ { 0x0000a218, 0x009c4060 },
+ { 0x0000a220, 0x018830c6 },
+ { 0x0000a224, 0x00000400 },
+ { 0x0000a228, 0x001a0bb5 },
+ { 0x0000a22c, 0x00000000 },
+ { 0x0000a234, 0x20202020 },
+ { 0x0000a238, 0x20202020 },
+ { 0x0000a23c, 0x13c889ae },
+ { 0x0000a240, 0x38490a20 },
+ { 0x0000a244, 0x00007bb6 },
+ { 0x0000a248, 0x0fff3ffc },
+ { 0x0000a24c, 0x00000001 },
+ { 0x0000a250, 0x0000a000 },
+ { 0x0000a254, 0x00000000 },
+ { 0x0000a258, 0x0cc75380 },
+ { 0x0000a25c, 0x0f0f0f01 },
+ { 0x0000a260, 0xdfa91f01 },
+ { 0x0000a268, 0x00000001 },
+ { 0x0000a26c, 0x0ebae9c6 },
+ { 0x0000b26c, 0x0ebae9c6 },
+ { 0x0000c26c, 0x0ebae9c6 },
+ { 0x0000d270, 0x00820820 },
+ { 0x0000a278, 0x1ce739ce },
+ { 0x0000a27c, 0x050701ce },
+ { 0x0000a338, 0x00000000 },
+ { 0x0000a33c, 0x00000000 },
+ { 0x0000a340, 0x00000000 },
+ { 0x0000a344, 0x00000000 },
+ { 0x0000a348, 0x3fffffff },
+ { 0x0000a34c, 0x3fffffff },
+ { 0x0000a350, 0x3fffffff },
+ { 0x0000a354, 0x0003ffff },
+ { 0x0000a358, 0x79a8aa33 },
+ { 0x0000d35c, 0x07ffffef },
+ { 0x0000d360, 0x0fffffe7 },
+ { 0x0000d364, 0x17ffffe5 },
+ { 0x0000d368, 0x1fffffe4 },
+ { 0x0000d36c, 0x37ffffe3 },
+ { 0x0000d370, 0x3fffffe3 },
+ { 0x0000d374, 0x57ffffe3 },
+ { 0x0000d378, 0x5fffffe2 },
+ { 0x0000d37c, 0x7fffffe2 },
+ { 0x0000d380, 0x7f3c7bba },
+ { 0x0000d384, 0xf3307ff0 },
+ { 0x0000a388, 0x0c000000 },
+ { 0x0000a38c, 0x20202020 },
+ { 0x0000a390, 0x20202020 },
+ { 0x0000a394, 0x1ce739ce },
+ { 0x0000a398, 0x000001ce },
+ { 0x0000a39c, 0x00000001 },
+ { 0x0000a3a0, 0x00000000 },
+ { 0x0000a3a4, 0x00000000 },
+ { 0x0000a3a8, 0x00000000 },
+ { 0x0000a3ac, 0x00000000 },
+ { 0x0000a3b0, 0x00000000 },
+ { 0x0000a3b4, 0x00000000 },
+ { 0x0000a3b8, 0x00000000 },
+ { 0x0000a3bc, 0x00000000 },
+ { 0x0000a3c0, 0x00000000 },
+ { 0x0000a3c4, 0x00000000 },
+ { 0x0000a3c8, 0x00000246 },
+ { 0x0000a3cc, 0x20202020 },
+ { 0x0000a3d0, 0x20202020 },
+ { 0x0000a3d4, 0x20202020 },
+ { 0x0000a3dc, 0x1ce739ce },
+ { 0x0000a3e0, 0x000001ce },
+};
+
+static const u32 ar5416Bank0[][2] = {
+ { 0x000098b0, 0x1e5795e5 },
+ { 0x000098e0, 0x02008020 },
+};
+
+static const u32 ar5416BB_RfGain[][3] = {
+ { 0x00009a00, 0x00000000, 0x00000000 },
+ { 0x00009a04, 0x00000040, 0x00000040 },
+ { 0x00009a08, 0x00000080, 0x00000080 },
+ { 0x00009a0c, 0x000001a1, 0x00000141 },
+ { 0x00009a10, 0x000001e1, 0x00000181 },
+ { 0x00009a14, 0x00000021, 0x000001c1 },
+ { 0x00009a18, 0x00000061, 0x00000001 },
+ { 0x00009a1c, 0x00000168, 0x00000041 },
+ { 0x00009a20, 0x000001a8, 0x000001a8 },
+ { 0x00009a24, 0x000001e8, 0x000001e8 },
+ { 0x00009a28, 0x00000028, 0x00000028 },
+ { 0x00009a2c, 0x00000068, 0x00000068 },
+ { 0x00009a30, 0x00000189, 0x000000a8 },
+ { 0x00009a34, 0x000001c9, 0x00000169 },
+ { 0x00009a38, 0x00000009, 0x000001a9 },
+ { 0x00009a3c, 0x00000049, 0x000001e9 },
+ { 0x00009a40, 0x00000089, 0x00000029 },
+ { 0x00009a44, 0x00000170, 0x00000069 },
+ { 0x00009a48, 0x000001b0, 0x00000190 },
+ { 0x00009a4c, 0x000001f0, 0x000001d0 },
+ { 0x00009a50, 0x00000030, 0x00000010 },
+ { 0x00009a54, 0x00000070, 0x00000050 },
+ { 0x00009a58, 0x00000191, 0x00000090 },
+ { 0x00009a5c, 0x000001d1, 0x00000151 },
+ { 0x00009a60, 0x00000011, 0x00000191 },
+ { 0x00009a64, 0x00000051, 0x000001d1 },
+ { 0x00009a68, 0x00000091, 0x00000011 },
+ { 0x00009a6c, 0x000001b8, 0x00000051 },
+ { 0x00009a70, 0x000001f8, 0x00000198 },
+ { 0x00009a74, 0x00000038, 0x000001d8 },
+ { 0x00009a78, 0x00000078, 0x00000018 },
+ { 0x00009a7c, 0x00000199, 0x00000058 },
+ { 0x00009a80, 0x000001d9, 0x00000098 },
+ { 0x00009a84, 0x00000019, 0x00000159 },
+ { 0x00009a88, 0x00000059, 0x00000199 },
+ { 0x00009a8c, 0x00000099, 0x000001d9 },
+ { 0x00009a90, 0x000000d9, 0x00000019 },
+ { 0x00009a94, 0x000000f9, 0x00000059 },
+ { 0x00009a98, 0x000000f9, 0x00000099 },
+ { 0x00009a9c, 0x000000f9, 0x000000d9 },
+ { 0x00009aa0, 0x000000f9, 0x000000f9 },
+ { 0x00009aa4, 0x000000f9, 0x000000f9 },
+ { 0x00009aa8, 0x000000f9, 0x000000f9 },
+ { 0x00009aac, 0x000000f9, 0x000000f9 },
+ { 0x00009ab0, 0x000000f9, 0x000000f9 },
+ { 0x00009ab4, 0x000000f9, 0x000000f9 },
+ { 0x00009ab8, 0x000000f9, 0x000000f9 },
+ { 0x00009abc, 0x000000f9, 0x000000f9 },
+ { 0x00009ac0, 0x000000f9, 0x000000f9 },
+ { 0x00009ac4, 0x000000f9, 0x000000f9 },
+ { 0x00009ac8, 0x000000f9, 0x000000f9 },
+ { 0x00009acc, 0x000000f9, 0x000000f9 },
+ { 0x00009ad0, 0x000000f9, 0x000000f9 },
+ { 0x00009ad4, 0x000000f9, 0x000000f9 },
+ { 0x00009ad8, 0x000000f9, 0x000000f9 },
+ { 0x00009adc, 0x000000f9, 0x000000f9 },
+ { 0x00009ae0, 0x000000f9, 0x000000f9 },
+ { 0x00009ae4, 0x000000f9, 0x000000f9 },
+ { 0x00009ae8, 0x000000f9, 0x000000f9 },
+ { 0x00009aec, 0x000000f9, 0x000000f9 },
+ { 0x00009af0, 0x000000f9, 0x000000f9 },
+ { 0x00009af4, 0x000000f9, 0x000000f9 },
+ { 0x00009af8, 0x000000f9, 0x000000f9 },
+ { 0x00009afc, 0x000000f9, 0x000000f9 },
+};
+
+static const u32 ar5416Bank1[][2] = {
+ { 0x000098b0, 0x02108421},
+ { 0x000098ec, 0x00000008},
+};
+
+static const u32 ar5416Bank2[][2] = {
+ { 0x000098b0, 0x0e73ff17},
+ { 0x000098e0, 0x00000420},
+};
+
+static const u32 ar5416Bank3[][3] = {
+ { 0x000098f0, 0x01400018, 0x01c00018 },
+};
+
+static const u32 ar5416Bank6[][3] = {
+
+ { 0x0000989c, 0x00000000, 0x00000000 },
+ { 0x0000989c, 0x00000000, 0x00000000 },
+ { 0x0000989c, 0x00000000, 0x00000000 },
+ { 0x0000989c, 0x00e00000, 0x00e00000 },
+ { 0x0000989c, 0x005e0000, 0x005e0000 },
+ { 0x0000989c, 0x00120000, 0x00120000 },
+ { 0x0000989c, 0x00620000, 0x00620000 },
+ { 0x0000989c, 0x00020000, 0x00020000 },
+ { 0x0000989c, 0x00ff0000, 0x00ff0000 },
+ { 0x0000989c, 0x00ff0000, 0x00ff0000 },
+ { 0x0000989c, 0x00ff0000, 0x00ff0000 },
+ { 0x0000989c, 0x00ff0000, 0x00ff0000 },
+ { 0x0000989c, 0x005f0000, 0x005f0000 },
+ { 0x0000989c, 0x00870000, 0x00870000 },
+ { 0x0000989c, 0x00f90000, 0x00f90000 },
+ { 0x0000989c, 0x007b0000, 0x007b0000 },
+ { 0x0000989c, 0x00ff0000, 0x00ff0000 },
+ { 0x0000989c, 0x00f50000, 0x00f50000 },
+ { 0x0000989c, 0x00dc0000, 0x00dc0000 },
+ { 0x0000989c, 0x00110000, 0x00110000 },
+ { 0x0000989c, 0x006100a8, 0x006100a8 },
+ { 0x0000989c, 0x004210a2, 0x004210a2 },
+ { 0x0000989c, 0x0014000f, 0x0014000f },
+ { 0x0000989c, 0x00c40002, 0x00c40002 },
+ { 0x0000989c, 0x003000f2, 0x003000f2 },
+ { 0x0000989c, 0x00440016, 0x00440016 },
+ { 0x0000989c, 0x00410040, 0x00410040 },
+ { 0x0000989c, 0x000180d6, 0x000180d6 },
+ { 0x0000989c, 0x0000c0aa, 0x0000c0aa },
+ { 0x0000989c, 0x000000b1, 0x000000b1 },
+ { 0x0000989c, 0x00002000, 0x00002000 },
+ { 0x0000989c, 0x000000d4, 0x000000d4 },
+ { 0x000098d0, 0x0000000f, 0x0010000f },
+};
+
+
+static const u32 ar5416Bank6TPC[][3] = {
+
+ { 0x0000989c, 0x00000000, 0x00000000 },
+ { 0x0000989c, 0x00000000, 0x00000000 },
+ { 0x0000989c, 0x00000000, 0x00000000 },
+ { 0x0000989c, 0x00e00000, 0x00e00000 },
+ { 0x0000989c, 0x005e0000, 0x005e0000 },
+ { 0x0000989c, 0x00120000, 0x00120000 },
+ { 0x0000989c, 0x00620000, 0x00620000 },
+ { 0x0000989c, 0x00020000, 0x00020000 },
+ { 0x0000989c, 0x00ff0000, 0x00ff0000 },
+ { 0x0000989c, 0x00ff0000, 0x00ff0000 },
+ { 0x0000989c, 0x00ff0000, 0x00ff0000 },
+ { 0x0000989c, 0x40ff0000, 0x40ff0000 },
+ { 0x0000989c, 0x005f0000, 0x005f0000 },
+ { 0x0000989c, 0x00870000, 0x00870000 },
+ { 0x0000989c, 0x00f90000, 0x00f90000 },
+ { 0x0000989c, 0x007b0000, 0x007b0000 },
+ { 0x0000989c, 0x00ff0000, 0x00ff0000 },
+ { 0x0000989c, 0x00f50000, 0x00f50000 },
+ { 0x0000989c, 0x00dc0000, 0x00dc0000 },
+ { 0x0000989c, 0x00110000, 0x00110000 },
+ { 0x0000989c, 0x006100a8, 0x006100a8 },
+ { 0x0000989c, 0x00423022, 0x00423022 },
+ { 0x0000989c, 0x2014008f, 0x2014008f },
+ { 0x0000989c, 0x00c40002, 0x00c40002 },
+ { 0x0000989c, 0x003000f2, 0x003000f2 },
+ { 0x0000989c, 0x00440016, 0x00440016 },
+ { 0x0000989c, 0x00410040, 0x00410040 },
+ { 0x0000989c, 0x0001805e, 0x0001805e },
+ { 0x0000989c, 0x0000c0ab, 0x0000c0ab },
+ { 0x0000989c, 0x000000e1, 0x000000e1 },
+ { 0x0000989c, 0x00007080, 0x00007080 },
+ { 0x0000989c, 0x000000d4, 0x000000d4 },
+ { 0x000098d0, 0x0000000f, 0x0010000f },
+};
+
+static const u32 ar5416Bank7[][2] = {
+ { 0x0000989c, 0x00000500 },
+ { 0x0000989c, 0x00000800 },
+ { 0x000098cc, 0x0000000e },
+};
+
+static const u32 ar5416Addac[][2] = {
+ {0x0000989c, 0x00000000 },
+ {0x0000989c, 0x00000000 },
+ {0x0000989c, 0x00000000 },
+ {0x0000989c, 0x00000000 },
+ {0x0000989c, 0x00000000 },
+ {0x0000989c, 0x00000000 },
+ {0x0000989c, 0x00000000 },
+ {0x0000989c, 0x00000010 },
+ {0x0000989c, 0x00000000 },
+ {0x0000989c, 0x00000000 },
+ {0x0000989c, 0x00000000 },
+ {0x0000989c, 0x00000000 },
+ {0x0000989c, 0x00000000 },
+ {0x0000989c, 0x00000000 },
+ {0x0000989c, 0x00000000 },
+ {0x0000989c, 0x00000000 },
+ {0x0000989c, 0x00000000 },
+ {0x0000989c, 0x00000000 },
+ {0x0000989c, 0x00000000 },
+ {0x0000989c, 0x00000000 },
+ {0x0000989c, 0x00000000 },
+ {0x0000989c, 0x000000c0 },
+ {0x0000989c, 0x00000015 },
+ {0x0000989c, 0x00000000 },
+ {0x0000989c, 0x00000000 },
+ {0x0000989c, 0x00000000 },
+ {0x0000989c, 0x00000000 },
+ {0x0000989c, 0x00000000 },
+ {0x0000989c, 0x00000000 },
+ {0x0000989c, 0x00000000 },
+ {0x0000989c, 0x00000000 },
+ {0x000098cc, 0x00000000 },
+};
+
+
+static const u32 ar5416Modes_9160[][6] = {
+ { 0x00001030, 0x00000230, 0x00000460, 0x000002c0, 0x00000160, 0x000001e0 },
+ { 0x00001070, 0x00000168, 0x000002d0, 0x00000318, 0x0000018c, 0x000001e0 },
+ { 0x000010b0, 0x00000e60, 0x00001cc0, 0x00007c70, 0x00003e38, 0x00001180 },
+ { 0x000010f0, 0x0000a000, 0x00014000, 0x00016000, 0x0000b000, 0x00014008 },
+ { 0x00008014, 0x03e803e8, 0x07d007d0, 0x10801600, 0x08400b00, 0x06e006e0 },
+ { 0x0000801c, 0x128d93a7, 0x128d93cf, 0x12e013d7, 0x12e013ab, 0x098813cf },
+ { 0x00009804, 0x00000300, 0x000003c4, 0x000003c4, 0x00000300, 0x00000303 },
+ { 0x00009820, 0x02020200, 0x02020200, 0x02020200, 0x02020200, 0x02020200 },
+ { 0x00009824, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e },
+ { 0x00009828, 0x0a020001, 0x0a020001, 0x0a020001, 0x0a020001, 0x0a020001 },
+ { 0x00009834, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e },
+ { 0x00009838, 0x00000007, 0x00000007, 0x00000007, 0x00000007, 0x00000007 },
+ { 0x00009844, 0x0372161e, 0x0372161e, 0x037216a0, 0x037216a0, 0x037216a0 },
+ { 0x00009848, 0x001a6a65, 0x001a6a65, 0x00197a68, 0x00197a68, 0x00197a68 },
+ { 0x0000a848, 0x001a6a65, 0x001a6a65, 0x00197a68, 0x00197a68, 0x00197a68 },
+ { 0x0000b848, 0x001a6a65, 0x001a6a65, 0x00197a68, 0x00197a68, 0x00197a68 },
+ { 0x00009850, 0x6d48b4e2, 0x6d48b4e2, 0x6d48b0e2, 0x6d48b0e2, 0x6d48b0e2 },
+ { 0x00009858, 0x7ec82d2e, 0x7ec82d2e, 0x7ec82d2e, 0x7ec82d2e, 0x7ec82d2e },
+ { 0x0000985c, 0x3139605e, 0x3139605e, 0x3139605e, 0x3139605e, 0x3139605e },
+ { 0x00009860, 0x00048d18, 0x00048d18, 0x00048d20, 0x00048d20, 0x00048d18 },
+ { 0x0000c864, 0x0001ce00, 0x0001ce00, 0x0001ce00, 0x0001ce00, 0x0001ce00 },
+ { 0x00009868, 0x409a40d0, 0x409a40d0, 0x409a40d0, 0x409a40d0, 0x409a40d0 },
+ { 0x0000986c, 0x050cb081, 0x050cb081, 0x050cb081, 0x050cb081, 0x050cb081 },
+ { 0x00009914, 0x000007d0, 0x000007d0, 0x00000898, 0x00000898, 0x000007d0 },
+ { 0x00009918, 0x0000000a, 0x00000014, 0x00000016, 0x0000000b, 0x00000016 },
+ { 0x00009924, 0xd00a8a07, 0xd00a8a07, 0xd00a8a0d, 0xd00a8a0d, 0xd00a8a0d },
+ { 0x00009944, 0xdfb81020, 0xdfb81020, 0xdfb81020, 0xdfb81020, 0xdfb81020 },
+ { 0x00009960, 0x00009b40, 0x00009b40, 0x00009b40, 0x00009b40, 0x00009b40 },
+ { 0x0000a960, 0x00009b40, 0x00009b40, 0x00009b40, 0x00009b40, 0x00009b40 },
+ { 0x0000b960, 0x00009b40, 0x00009b40, 0x00009b40, 0x00009b40, 0x00009b40 },
+ { 0x00009964, 0x00001120, 0x00001120, 0x00001120, 0x00001120, 0x00001120 },
+ { 0x0000c9bc, 0x001a0600, 0x001a0600, 0x001a0c00, 0x001a0c00, 0x001a0c00 },
+ { 0x000099c0, 0x038919be, 0x038919be, 0x038919be, 0x038919be, 0x038919be },
+ { 0x000099c4, 0x06336f77, 0x06336f77, 0x06336f77, 0x06336f77, 0x06336f77 },
+ { 0x000099c8, 0x60f65329, 0x60f65329, 0x60f65329, 0x60f65329, 0x60f65329 },
+ { 0x000099cc, 0x08f186c8, 0x08f186c8, 0x08f186c8, 0x08f186c8, 0x08f186c8 },
+ { 0x000099d0, 0x00046384, 0x00046384, 0x00046384, 0x00046384, 0x00046384 },
+ { 0x000099d4, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 },
+ { 0x000099d8, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 },
+ { 0x0000a204, 0x00000880, 0x00000880, 0x00000880, 0x00000880, 0x00000880 },
+ { 0x0000a208, 0xd6be4788, 0xd6be4788, 0xd03e4788, 0xd03e4788, 0xd03e4788 },
+ { 0x0000a20c, 0x002fc160, 0x002fc160, 0x002ac120, 0x002ac120, 0x002ac120 },
+ { 0x0000b20c, 0x002fc160, 0x002fc160, 0x002ac120, 0x002ac120, 0x002ac120 },
+ { 0x0000c20c, 0x002fc160, 0x002fc160, 0x002ac120, 0x002ac120, 0x002ac120 },
+ { 0x0000a21c, 0x1883800a, 0x1883800a, 0x1883800a, 0x1883800a, 0x1883800a },
+ { 0x0000a230, 0x00000000, 0x00000000, 0x00000210, 0x00000108, 0x00000000 },
+ { 0x0000a274, 0x0a1a9caa, 0x0a1a9caa, 0x0a1a7caa, 0x0a1a7caa, 0x0a1a7caa },
+ { 0x0000a300, 0x18010000, 0x18010000, 0x18010000, 0x18010000, 0x18010000 },
+ { 0x0000a304, 0x30032602, 0x30032602, 0x2e032402, 0x2e032402, 0x2e032402 },
+ { 0x0000a308, 0x48073e06, 0x48073e06, 0x4a0a3c06, 0x4a0a3c06, 0x4a0a3c06 },
+ { 0x0000a30c, 0x560b4c0a, 0x560b4c0a, 0x621a540b, 0x621a540b, 0x621a540b },
+ { 0x0000a310, 0x641a600f, 0x641a600f, 0x764f6c1b, 0x764f6c1b, 0x764f6c1b },
+ { 0x0000a314, 0x7a4f6e1b, 0x7a4f6e1b, 0x845b7a5a, 0x845b7a5a, 0x845b7a5a },
+ { 0x0000a318, 0x8c5b7e5a, 0x8c5b7e5a, 0x950f8ccf, 0x950f8ccf, 0x950f8ccf },
+ { 0x0000a31c, 0x9d0f96cf, 0x9d0f96cf, 0xa5cf9b4f, 0xa5cf9b4f, 0xa5cf9b4f },
+ { 0x0000a320, 0xb51fa69f, 0xb51fa69f, 0xbddfaf1f, 0xbddfaf1f, 0xbddfaf1f },
+ { 0x0000a324, 0xcb3fbd07, 0xcb3fbcbf, 0xd1ffc93f, 0xd1ffc93f, 0xd1ffc93f },
+ { 0x0000a328, 0x0000d7bf, 0x0000d7bf, 0x00000000, 0x00000000, 0x00000000 },
+ { 0x0000a32c, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 },
+ { 0x0000a330, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 },
+ { 0x0000a334, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 },
+};
+
+static const u32 ar5416Common_9160[][2] = {
+ { 0x0000000c, 0x00000000 },
+ { 0x00000030, 0x00020015 },
+ { 0x00000034, 0x00000005 },
+ { 0x00000040, 0x00000000 },
+ { 0x00000044, 0x00000008 },
+ { 0x00000048, 0x00000008 },
+ { 0x0000004c, 0x00000010 },
+ { 0x00000050, 0x00000000 },
+ { 0x00000054, 0x0000001f },
+ { 0x00000800, 0x00000000 },
+ { 0x00000804, 0x00000000 },
+ { 0x00000808, 0x00000000 },
+ { 0x0000080c, 0x00000000 },
+ { 0x00000810, 0x00000000 },
+ { 0x00000814, 0x00000000 },
+ { 0x00000818, 0x00000000 },
+ { 0x0000081c, 0x00000000 },
+ { 0x00000820, 0x00000000 },
+ { 0x00000824, 0x00000000 },
+ { 0x00001040, 0x002ffc0f },
+ { 0x00001044, 0x002ffc0f },
+ { 0x00001048, 0x002ffc0f },
+ { 0x0000104c, 0x002ffc0f },
+ { 0x00001050, 0x002ffc0f },
+ { 0x00001054, 0x002ffc0f },
+ { 0x00001058, 0x002ffc0f },
+ { 0x0000105c, 0x002ffc0f },
+ { 0x00001060, 0x002ffc0f },
+ { 0x00001064, 0x002ffc0f },
+ { 0x00001230, 0x00000000 },
+ { 0x00001270, 0x00000000 },
+ { 0x00001038, 0x00000000 },
+ { 0x00001078, 0x00000000 },
+ { 0x000010b8, 0x00000000 },
+ { 0x000010f8, 0x00000000 },
+ { 0x00001138, 0x00000000 },
+ { 0x00001178, 0x00000000 },
+ { 0x000011b8, 0x00000000 },
+ { 0x000011f8, 0x00000000 },
+ { 0x00001238, 0x00000000 },
+ { 0x00001278, 0x00000000 },
+ { 0x000012b8, 0x00000000 },
+ { 0x000012f8, 0x00000000 },
+ { 0x00001338, 0x00000000 },
+ { 0x00001378, 0x00000000 },
+ { 0x000013b8, 0x00000000 },
+ { 0x000013f8, 0x00000000 },
+ { 0x00001438, 0x00000000 },
+ { 0x00001478, 0x00000000 },
+ { 0x000014b8, 0x00000000 },
+ { 0x000014f8, 0x00000000 },
+ { 0x00001538, 0x00000000 },
+ { 0x00001578, 0x00000000 },
+ { 0x000015b8, 0x00000000 },
+ { 0x000015f8, 0x00000000 },
+ { 0x00001638, 0x00000000 },
+ { 0x00001678, 0x00000000 },
+ { 0x000016b8, 0x00000000 },
+ { 0x000016f8, 0x00000000 },
+ { 0x00001738, 0x00000000 },
+ { 0x00001778, 0x00000000 },
+ { 0x000017b8, 0x00000000 },
+ { 0x000017f8, 0x00000000 },
+ { 0x0000103c, 0x00000000 },
+ { 0x0000107c, 0x00000000 },
+ { 0x000010bc, 0x00000000 },
+ { 0x000010fc, 0x00000000 },
+ { 0x0000113c, 0x00000000 },
+ { 0x0000117c, 0x00000000 },
+ { 0x000011bc, 0x00000000 },
+ { 0x000011fc, 0x00000000 },
+ { 0x0000123c, 0x00000000 },
+ { 0x0000127c, 0x00000000 },
+ { 0x000012bc, 0x00000000 },
+ { 0x000012fc, 0x00000000 },
+ { 0x0000133c, 0x00000000 },
+ { 0x0000137c, 0x00000000 },
+ { 0x000013bc, 0x00000000 },
+ { 0x000013fc, 0x00000000 },
+ { 0x0000143c, 0x00000000 },
+ { 0x0000147c, 0x00000000 },
+ { 0x00004030, 0x00000002 },
+ { 0x0000403c, 0x00000002 },
+ { 0x00007010, 0x00000020 },
+ { 0x00007038, 0x000004c2 },
+ { 0x00008004, 0x00000000 },
+ { 0x00008008, 0x00000000 },
+ { 0x0000800c, 0x00000000 },
+ { 0x00008018, 0x00000700 },
+ { 0x00008020, 0x00000000 },
+ { 0x00008038, 0x00000000 },
+ { 0x0000803c, 0x00000000 },
+ { 0x00008048, 0x40000000 },
+ { 0x00008054, 0x00000000 },
+ { 0x00008058, 0x00000000 },
+ { 0x0000805c, 0x000fc78f },
+ { 0x00008060, 0x0000000f },
+ { 0x00008064, 0x00000000 },
+ { 0x000080c0, 0x2a82301a },
+ { 0x000080c4, 0x05dc01e0 },
+ { 0x000080c8, 0x1f402710 },
+ { 0x000080cc, 0x01f40000 },
+ { 0x000080d0, 0x00001e00 },
+ { 0x000080d4, 0x00000000 },
+ { 0x000080d8, 0x00400000 },
+ { 0x000080e0, 0xffffffff },
+ { 0x000080e4, 0x0000ffff },
+ { 0x000080e8, 0x003f3f3f },
+ { 0x000080ec, 0x00000000 },
+ { 0x000080f0, 0x00000000 },
+ { 0x000080f4, 0x00000000 },
+ { 0x000080f8, 0x00000000 },
+ { 0x000080fc, 0x00020000 },
+ { 0x00008100, 0x00020000 },
+ { 0x00008104, 0x00000001 },
+ { 0x00008108, 0x00000052 },
+ { 0x0000810c, 0x00000000 },
+ { 0x00008110, 0x00000168 },
+ { 0x00008118, 0x000100aa },
+ { 0x0000811c, 0x00003210 },
+ { 0x00008120, 0x08f04800 },
+ { 0x00008124, 0x00000000 },
+ { 0x00008128, 0x00000000 },
+ { 0x0000812c, 0x00000000 },
+ { 0x00008130, 0x00000000 },
+ { 0x00008134, 0x00000000 },
+ { 0x00008138, 0x00000000 },
+ { 0x0000813c, 0x00000000 },
+ { 0x00008144, 0x00000000 },
+ { 0x00008168, 0x00000000 },
+ { 0x0000816c, 0x00000000 },
+ { 0x00008170, 0x32143320 },
+ { 0x00008174, 0xfaa4fa50 },
+ { 0x00008178, 0x00000100 },
+ { 0x0000817c, 0x00000000 },
+ { 0x000081c4, 0x00000000 },
+ { 0x000081d0, 0x00003210 },
+ { 0x000081ec, 0x00000000 },
+ { 0x000081f0, 0x00000000 },
+ { 0x000081f4, 0x00000000 },
+ { 0x000081f8, 0x00000000 },
+ { 0x000081fc, 0x00000000 },
+ { 0x00008200, 0x00000000 },
+ { 0x00008204, 0x00000000 },
+ { 0x00008208, 0x00000000 },
+ { 0x0000820c, 0x00000000 },
+ { 0x00008210, 0x00000000 },
+ { 0x00008214, 0x00000000 },
+ { 0x00008218, 0x00000000 },
+ { 0x0000821c, 0x00000000 },
+ { 0x00008220, 0x00000000 },
+ { 0x00008224, 0x00000000 },
+ { 0x00008228, 0x00000000 },
+ { 0x0000822c, 0x00000000 },
+ { 0x00008230, 0x00000000 },
+ { 0x00008234, 0x00000000 },
+ { 0x00008238, 0x00000000 },
+ { 0x0000823c, 0x00000000 },
+ { 0x00008240, 0x00100000 },
+ { 0x00008244, 0x0010f400 },
+ { 0x00008248, 0x00000100 },
+ { 0x0000824c, 0x0001e800 },
+ { 0x00008250, 0x00000000 },
+ { 0x00008254, 0x00000000 },
+ { 0x00008258, 0x00000000 },
+ { 0x0000825c, 0x400000ff },
+ { 0x00008260, 0x00080922 },
+ { 0x00008270, 0x00000000 },
+ { 0x00008274, 0x40000000 },
+ { 0x00008278, 0x003e4180 },
+ { 0x0000827c, 0x00000000 },
+ { 0x00008284, 0x0000002c },
+ { 0x00008288, 0x0000002c },
+ { 0x0000828c, 0x00000000 },
+ { 0x00008294, 0x00000000 },
+ { 0x00008298, 0x00000000 },
+ { 0x00008300, 0x00000000 },
+ { 0x00008304, 0x00000000 },
+ { 0x00008308, 0x00000000 },
+ { 0x0000830c, 0x00000000 },
+ { 0x00008310, 0x00000000 },
+ { 0x00008314, 0x00000000 },
+ { 0x00008318, 0x00000000 },
+ { 0x00008328, 0x00000000 },
+ { 0x0000832c, 0x00000007 },
+ { 0x00008330, 0x00000302 },
+ { 0x00008334, 0x00000e00 },
+ { 0x00008338, 0x00000000 },
+ { 0x0000833c, 0x00000000 },
+ { 0x00008340, 0x000107ff },
+ { 0x00009808, 0x00000000 },
+ { 0x0000980c, 0xad848e19 },
+ { 0x00009810, 0x7d14e000 },
+ { 0x00009814, 0x9c0a9f6b },
+ { 0x0000981c, 0x00000000 },
+ { 0x0000982c, 0x0000a000 },
+ { 0x00009830, 0x00000000 },
+ { 0x0000983c, 0x00200400 },
+ { 0x00009840, 0x206a01ae },
+ { 0x0000984c, 0x1284233c },
+ { 0x00009854, 0x00000859 },
+ { 0x00009900, 0x00000000 },
+ { 0x00009904, 0x00000000 },
+ { 0x00009908, 0x00000000 },
+ { 0x0000990c, 0x00000000 },
+ { 0x0000991c, 0x10000fff },
+ { 0x00009920, 0x05100000 },
+ { 0x0000a920, 0x05100000 },
+ { 0x0000b920, 0x05100000 },
+ { 0x00009928, 0x00000001 },
+ { 0x0000992c, 0x00000004 },
+ { 0x00009934, 0x1e1f2022 },
+ { 0x00009938, 0x0a0b0c0d },
+ { 0x0000993c, 0x00000000 },
+ { 0x00009948, 0x9280b212 },
+ { 0x0000994c, 0x00020028 },
+ { 0x00009954, 0x5f3ca3de },
+ { 0x00009958, 0x2108ecff },
+ { 0x00009940, 0x00750604 },
+ { 0x0000c95c, 0x004b6a8e },
+ { 0x0000c968, 0x000003ce },
+ { 0x00009970, 0x190fb515 },
+ { 0x00009974, 0x00000000 },
+ { 0x00009978, 0x00000001 },
+ { 0x0000997c, 0x00000000 },
+ { 0x00009980, 0x00000000 },
+ { 0x00009984, 0x00000000 },
+ { 0x00009988, 0x00000000 },
+ { 0x0000998c, 0x00000000 },
+ { 0x00009990, 0x00000000 },
+ { 0x00009994, 0x00000000 },
+ { 0x00009998, 0x00000000 },
+ { 0x0000999c, 0x00000000 },
+ { 0x000099a0, 0x00000000 },
+ { 0x000099a4, 0x00000001 },
+ { 0x000099a8, 0x201fff00 },
+ { 0x000099ac, 0x006f0000 },
+ { 0x000099b0, 0x03051000 },
+ { 0x000099dc, 0x00000000 },
+ { 0x000099e0, 0x00000200 },
+ { 0x000099e4, 0xaaaaaaaa },
+ { 0x000099e8, 0x3c466478 },
+ { 0x000099ec, 0x0cc80caa },
+ { 0x000099fc, 0x00001042 },
+ { 0x00009b00, 0x00000000 },
+ { 0x00009b04, 0x00000001 },
+ { 0x00009b08, 0x00000002 },
+ { 0x00009b0c, 0x00000003 },
+ { 0x00009b10, 0x00000004 },
+ { 0x00009b14, 0x00000005 },
+ { 0x00009b18, 0x00000008 },
+ { 0x00009b1c, 0x00000009 },
+ { 0x00009b20, 0x0000000a },
+ { 0x00009b24, 0x0000000b },
+ { 0x00009b28, 0x0000000c },
+ { 0x00009b2c, 0x0000000d },
+ { 0x00009b30, 0x00000010 },
+ { 0x00009b34, 0x00000011 },
+ { 0x00009b38, 0x00000012 },
+ { 0x00009b3c, 0x00000013 },
+ { 0x00009b40, 0x00000014 },
+ { 0x00009b44, 0x00000015 },
+ { 0x00009b48, 0x00000018 },
+ { 0x00009b4c, 0x00000019 },
+ { 0x00009b50, 0x0000001a },
+ { 0x00009b54, 0x0000001b },
+ { 0x00009b58, 0x0000001c },
+ { 0x00009b5c, 0x0000001d },
+ { 0x00009b60, 0x00000020 },
+ { 0x00009b64, 0x00000021 },
+ { 0x00009b68, 0x00000022 },
+ { 0x00009b6c, 0x00000023 },
+ { 0x00009b70, 0x00000024 },
+ { 0x00009b74, 0x00000025 },
+ { 0x00009b78, 0x00000028 },
+ { 0x00009b7c, 0x00000029 },
+ { 0x00009b80, 0x0000002a },
+ { 0x00009b84, 0x0000002b },
+ { 0x00009b88, 0x0000002c },
+ { 0x00009b8c, 0x0000002d },
+ { 0x00009b90, 0x00000030 },
+ { 0x00009b94, 0x00000031 },
+ { 0x00009b98, 0x00000032 },
+ { 0x00009b9c, 0x00000033 },
+ { 0x00009ba0, 0x00000034 },
+ { 0x00009ba4, 0x00000035 },
+ { 0x00009ba8, 0x00000035 },
+ { 0x00009bac, 0x00000035 },
+ { 0x00009bb0, 0x00000035 },
+ { 0x00009bb4, 0x00000035 },
+ { 0x00009bb8, 0x00000035 },
+ { 0x00009bbc, 0x00000035 },
+ { 0x00009bc0, 0x00000035 },
+ { 0x00009bc4, 0x00000035 },
+ { 0x00009bc8, 0x00000035 },
+ { 0x00009bcc, 0x00000035 },
+ { 0x00009bd0, 0x00000035 },
+ { 0x00009bd4, 0x00000035 },
+ { 0x00009bd8, 0x00000035 },
+ { 0x00009bdc, 0x00000035 },
+ { 0x00009be0, 0x00000035 },
+ { 0x00009be4, 0x00000035 },
+ { 0x00009be8, 0x00000035 },
+ { 0x00009bec, 0x00000035 },
+ { 0x00009bf0, 0x00000035 },
+ { 0x00009bf4, 0x00000035 },
+ { 0x00009bf8, 0x00000010 },
+ { 0x00009bfc, 0x0000001a },
+ { 0x0000a210, 0x40806333 },
+ { 0x0000a214, 0x00106c10 },
+ { 0x0000a218, 0x009c4060 },
+ { 0x0000a220, 0x018830c6 },
+ { 0x0000a224, 0x00000400 },
+ { 0x0000a228, 0x001a0bb5 },
+ { 0x0000a22c, 0x00000000 },
+ { 0x0000a234, 0x20202020 },
+ { 0x0000a238, 0x20202020 },
+ { 0x0000a23c, 0x13c889af },
+ { 0x0000a240, 0x38490a20 },
+ { 0x0000a244, 0x00007bb6 },
+ { 0x0000a248, 0x0fff3ffc },
+ { 0x0000a24c, 0x00000001 },
+ { 0x0000a250, 0x0000a000 },
+ { 0x0000a254, 0x00000000 },
+ { 0x0000a258, 0x0cc75380 },
+ { 0x0000a25c, 0x0f0f0f01 },
+ { 0x0000a260, 0xdfa91f01 },
+ { 0x0000a268, 0x00000001 },
+ { 0x0000a26c, 0x0ebae9c6 },
+ { 0x0000b26c, 0x0ebae9c6 },
+ { 0x0000c26c, 0x0ebae9c6 },
+ { 0x0000d270, 0x00820820 },
+ { 0x0000a278, 0x1ce739ce },
+ { 0x0000a27c, 0x050701ce },
+ { 0x0000a338, 0x00000000 },
+ { 0x0000a33c, 0x00000000 },
+ { 0x0000a340, 0x00000000 },
+ { 0x0000a344, 0x00000000 },
+ { 0x0000a348, 0x3fffffff },
+ { 0x0000a34c, 0x3fffffff },
+ { 0x0000a350, 0x3fffffff },
+ { 0x0000a354, 0x0003ffff },
+ { 0x0000a358, 0x79a8aa33 },
+ { 0x0000d35c, 0x07ffffef },
+ { 0x0000d360, 0x0fffffe7 },
+ { 0x0000d364, 0x17ffffe5 },
+ { 0x0000d368, 0x1fffffe4 },
+ { 0x0000d36c, 0x37ffffe3 },
+ { 0x0000d370, 0x3fffffe3 },
+ { 0x0000d374, 0x57ffffe3 },
+ { 0x0000d378, 0x5fffffe2 },
+ { 0x0000d37c, 0x7fffffe2 },
+ { 0x0000d380, 0x7f3c7bba },
+ { 0x0000d384, 0xf3307ff0 },
+ { 0x0000a388, 0x0c000000 },
+ { 0x0000a38c, 0x20202020 },
+ { 0x0000a390, 0x20202020 },
+ { 0x0000a394, 0x1ce739ce },
+ { 0x0000a398, 0x000001ce },
+ { 0x0000a39c, 0x00000001 },
+ { 0x0000a3a0, 0x00000000 },
+ { 0x0000a3a4, 0x00000000 },
+ { 0x0000a3a8, 0x00000000 },
+ { 0x0000a3ac, 0x00000000 },
+ { 0x0000a3b0, 0x00000000 },
+ { 0x0000a3b4, 0x00000000 },
+ { 0x0000a3b8, 0x00000000 },
+ { 0x0000a3bc, 0x00000000 },
+ { 0x0000a3c0, 0x00000000 },
+ { 0x0000a3c4, 0x00000000 },
+ { 0x0000a3c8, 0x00000246 },
+ { 0x0000a3cc, 0x20202020 },
+ { 0x0000a3d0, 0x20202020 },
+ { 0x0000a3d4, 0x20202020 },
+ { 0x0000a3dc, 0x1ce739ce },
+ { 0x0000a3e0, 0x000001ce },
+};
+
+static const u32 ar5416Bank0_9160[][2] = {
+ { 0x000098b0, 0x1e5795e5 },
+ { 0x000098e0, 0x02008020 },
+};
+
+static const u32 ar5416BB_RfGain_9160[][3] = {
+ { 0x00009a00, 0x00000000, 0x00000000 },
+ { 0x00009a04, 0x00000040, 0x00000040 },
+ { 0x00009a08, 0x00000080, 0x00000080 },
+ { 0x00009a0c, 0x000001a1, 0x00000141 },
+ { 0x00009a10, 0x000001e1, 0x00000181 },
+ { 0x00009a14, 0x00000021, 0x000001c1 },
+ { 0x00009a18, 0x00000061, 0x00000001 },
+ { 0x00009a1c, 0x00000168, 0x00000041 },
+ { 0x00009a20, 0x000001a8, 0x000001a8 },
+ { 0x00009a24, 0x000001e8, 0x000001e8 },
+ { 0x00009a28, 0x00000028, 0x00000028 },
+ { 0x00009a2c, 0x00000068, 0x00000068 },
+ { 0x00009a30, 0x00000189, 0x000000a8 },
+ { 0x00009a34, 0x000001c9, 0x00000169 },
+ { 0x00009a38, 0x00000009, 0x000001a9 },
+ { 0x00009a3c, 0x00000049, 0x000001e9 },
+ { 0x00009a40, 0x00000089, 0x00000029 },
+ { 0x00009a44, 0x00000170, 0x00000069 },
+ { 0x00009a48, 0x000001b0, 0x00000190 },
+ { 0x00009a4c, 0x000001f0, 0x000001d0 },
+ { 0x00009a50, 0x00000030, 0x00000010 },
+ { 0x00009a54, 0x00000070, 0x00000050 },
+ { 0x00009a58, 0x00000191, 0x00000090 },
+ { 0x00009a5c, 0x000001d1, 0x00000151 },
+ { 0x00009a60, 0x00000011, 0x00000191 },
+ { 0x00009a64, 0x00000051, 0x000001d1 },
+ { 0x00009a68, 0x00000091, 0x00000011 },
+ { 0x00009a6c, 0x000001b8, 0x00000051 },
+ { 0x00009a70, 0x000001f8, 0x00000198 },
+ { 0x00009a74, 0x00000038, 0x000001d8 },
+ { 0x00009a78, 0x00000078, 0x00000018 },
+ { 0x00009a7c, 0x00000199, 0x00000058 },
+ { 0x00009a80, 0x000001d9, 0x00000098 },
+ { 0x00009a84, 0x00000019, 0x00000159 },
+ { 0x00009a88, 0x00000059, 0x00000199 },
+ { 0x00009a8c, 0x00000099, 0x000001d9 },
+ { 0x00009a90, 0x000000d9, 0x00000019 },
+ { 0x00009a94, 0x000000f9, 0x00000059 },
+ { 0x00009a98, 0x000000f9, 0x00000099 },
+ { 0x00009a9c, 0x000000f9, 0x000000d9 },
+ { 0x00009aa0, 0x000000f9, 0x000000f9 },
+ { 0x00009aa4, 0x000000f9, 0x000000f9 },
+ { 0x00009aa8, 0x000000f9, 0x000000f9 },
+ { 0x00009aac, 0x000000f9, 0x000000f9 },
+ { 0x00009ab0, 0x000000f9, 0x000000f9 },
+ { 0x00009ab4, 0x000000f9, 0x000000f9 },
+ { 0x00009ab8, 0x000000f9, 0x000000f9 },
+ { 0x00009abc, 0x000000f9, 0x000000f9 },
+ { 0x00009ac0, 0x000000f9, 0x000000f9 },
+ { 0x00009ac4, 0x000000f9, 0x000000f9 },
+ { 0x00009ac8, 0x000000f9, 0x000000f9 },
+ { 0x00009acc, 0x000000f9, 0x000000f9 },
+ { 0x00009ad0, 0x000000f9, 0x000000f9 },
+ { 0x00009ad4, 0x000000f9, 0x000000f9 },
+ { 0x00009ad8, 0x000000f9, 0x000000f9 },
+ { 0x00009adc, 0x000000f9, 0x000000f9 },
+ { 0x00009ae0, 0x000000f9, 0x000000f9 },
+ { 0x00009ae4, 0x000000f9, 0x000000f9 },
+ { 0x00009ae8, 0x000000f9, 0x000000f9 },
+ { 0x00009aec, 0x000000f9, 0x000000f9 },
+ { 0x00009af0, 0x000000f9, 0x000000f9 },
+ { 0x00009af4, 0x000000f9, 0x000000f9 },
+ { 0x00009af8, 0x000000f9, 0x000000f9 },
+ { 0x00009afc, 0x000000f9, 0x000000f9 },
+};
+
+static const u32 ar5416Bank1_9160[][2] = {
+ { 0x000098b0, 0x02108421 },
+ { 0x000098ec, 0x00000008 },
+};
+
+static const u32 ar5416Bank2_9160[][2] = {
+ { 0x000098b0, 0x0e73ff17 },
+ { 0x000098e0, 0x00000420 },
+};
+
+static const u32 ar5416Bank3_9160[][3] = {
+ { 0x000098f0, 0x01400018, 0x01c00018 },
+};
+
+static const u32 ar5416Bank6_9160[][3] = {
+
+ { 0x0000989c, 0x00000000, 0x00000000 },
+ { 0x0000989c, 0x00000000, 0x00000000 },
+ { 0x0000989c, 0x00000000, 0x00000000 },
+ { 0x0000989c, 0x00e00000, 0x00e00000 },
+ { 0x0000989c, 0x005e0000, 0x005e0000 },
+ { 0x0000989c, 0x00120000, 0x00120000 },
+ { 0x0000989c, 0x00620000, 0x00620000 },
+ { 0x0000989c, 0x00020000, 0x00020000 },
+ { 0x0000989c, 0x00ff0000, 0x00ff0000 },
+ { 0x0000989c, 0x00ff0000, 0x00ff0000 },
+ { 0x0000989c, 0x00ff0000, 0x00ff0000 },
+ { 0x0000989c, 0x40ff0000, 0x40ff0000 },
+ { 0x0000989c, 0x005f0000, 0x005f0000 },
+ { 0x0000989c, 0x00870000, 0x00870000 },
+ { 0x0000989c, 0x00f90000, 0x00f90000 },
+ { 0x0000989c, 0x007b0000, 0x007b0000 },
+ { 0x0000989c, 0x00ff0000, 0x00ff0000 },
+ { 0x0000989c, 0x00f50000, 0x00f50000 },
+ { 0x0000989c, 0x00dc0000, 0x00dc0000 },
+ { 0x0000989c, 0x00110000, 0x00110000 },
+ { 0x0000989c, 0x006100a8, 0x006100a8 },
+ { 0x0000989c, 0x004210a2, 0x004210a2 },
+ { 0x0000989c, 0x0014008f, 0x0014008f },
+ { 0x0000989c, 0x00c40003, 0x00c40003 },
+ { 0x0000989c, 0x003000f2, 0x003000f2 },
+ { 0x0000989c, 0x00440016, 0x00440016 },
+ { 0x0000989c, 0x00410040, 0x00410040 },
+ { 0x0000989c, 0x0001805e, 0x0001805e },
+ { 0x0000989c, 0x0000c0ab, 0x0000c0ab },
+ { 0x0000989c, 0x000000f1, 0x000000f1 },
+ { 0x0000989c, 0x00002081, 0x00002081 },
+ { 0x0000989c, 0x000000d4, 0x000000d4 },
+ { 0x000098d0, 0x0000000f, 0x0010000f },
+};
+
+static const u32 ar5416Bank6TPC_9160[][3] = {
+ { 0x0000989c, 0x00000000, 0x00000000 },
+ { 0x0000989c, 0x00000000, 0x00000000 },
+ { 0x0000989c, 0x00000000, 0x00000000 },
+ { 0x0000989c, 0x00e00000, 0x00e00000 },
+ { 0x0000989c, 0x005e0000, 0x005e0000 },
+ { 0x0000989c, 0x00120000, 0x00120000 },
+ { 0x0000989c, 0x00620000, 0x00620000 },
+ { 0x0000989c, 0x00020000, 0x00020000 },
+ { 0x0000989c, 0x00ff0000, 0x00ff0000 },
+ { 0x0000989c, 0x00ff0000, 0x00ff0000 },
+ { 0x0000989c, 0x00ff0000, 0x00ff0000 },
+ { 0x0000989c, 0x40ff0000, 0x40ff0000 },
+ { 0x0000989c, 0x005f0000, 0x005f0000 },
+ { 0x0000989c, 0x00870000, 0x00870000 },
+ { 0x0000989c, 0x00f90000, 0x00f90000 },
+ { 0x0000989c, 0x007b0000, 0x007b0000 },
+ { 0x0000989c, 0x00ff0000, 0x00ff0000 },
+ { 0x0000989c, 0x00f50000, 0x00f50000 },
+ { 0x0000989c, 0x00dc0000, 0x00dc0000 },
+ { 0x0000989c, 0x00110000, 0x00110000 },
+ { 0x0000989c, 0x006100a8, 0x006100a8 },
+ { 0x0000989c, 0x00423022, 0x00423022 },
+ { 0x0000989c, 0x2014008f, 0x2014008f },
+ { 0x0000989c, 0x00c40002, 0x00c40002 },
+ { 0x0000989c, 0x003000f2, 0x003000f2 },
+ { 0x0000989c, 0x00440016, 0x00440016 },
+ { 0x0000989c, 0x00410040, 0x00410040 },
+ { 0x0000989c, 0x0001805e, 0x0001805e },
+ { 0x0000989c, 0x0000c0ab, 0x0000c0ab },
+ { 0x0000989c, 0x000000e1, 0x000000e1 },
+ { 0x0000989c, 0x00007080, 0x00007080 },
+ { 0x0000989c, 0x000000d4, 0x000000d4 },
+ { 0x000098d0, 0x0000000f, 0x0010000f },
+};
+
+static const u32 ar5416Bank7_9160[][2] = {
+ { 0x0000989c, 0x00000500 },
+ { 0x0000989c, 0x00000800 },
+ { 0x000098cc, 0x0000000e },
+};
+
+
+static u32 ar5416Addac_9160[][2] = {
+ {0x0000989c, 0x00000000 },
+ {0x0000989c, 0x00000000 },
+ {0x0000989c, 0x00000000 },
+ {0x0000989c, 0x00000000 },
+ {0x0000989c, 0x00000000 },
+ {0x0000989c, 0x00000000 },
+ {0x0000989c, 0x000000c0 },
+ {0x0000989c, 0x00000018 },
+ {0x0000989c, 0x00000004 },
+ {0x0000989c, 0x00000000 },
+ {0x0000989c, 0x00000000 },
+ {0x0000989c, 0x00000000 },
+ {0x0000989c, 0x00000000 },
+ {0x0000989c, 0x00000000 },
+ {0x0000989c, 0x00000000 },
+ {0x0000989c, 0x00000000 },
+ {0x0000989c, 0x00000000 },
+ {0x0000989c, 0x00000000 },
+ {0x0000989c, 0x00000000 },
+ {0x0000989c, 0x00000000 },
+ {0x0000989c, 0x00000000 },
+ {0x0000989c, 0x000000c0 },
+ {0x0000989c, 0x00000019 },
+ {0x0000989c, 0x00000004 },
+ {0x0000989c, 0x00000000 },
+ {0x0000989c, 0x00000000 },
+ {0x0000989c, 0x00000000 },
+ {0x0000989c, 0x00000004 },
+ {0x0000989c, 0x00000003 },
+ {0x0000989c, 0x00000008 },
+ {0x0000989c, 0x00000000 },
+ {0x000098cc, 0x00000000 },
+};
+
+
+static u32 ar5416Addac_91601_1[][2] = {
+ {0x0000989c, 0x00000000 },
+ {0x0000989c, 0x00000000 },
+ {0x0000989c, 0x00000000 },
+ {0x0000989c, 0x00000000 },
+ {0x0000989c, 0x00000000 },
+ {0x0000989c, 0x00000000 },
+ {0x0000989c, 0x000000c0 },
+ {0x0000989c, 0x00000018 },
+ {0x0000989c, 0x00000004 },
+ {0x0000989c, 0x00000000 },
+ {0x0000989c, 0x00000000 },
+ {0x0000989c, 0x00000000 },
+ {0x0000989c, 0x00000000 },
+ {0x0000989c, 0x00000000 },
+ {0x0000989c, 0x00000000 },
+ {0x0000989c, 0x00000000 },
+ {0x0000989c, 0x00000000 },
+ {0x0000989c, 0x00000000 },
+ {0x0000989c, 0x00000000 },
+ {0x0000989c, 0x00000000 },
+ {0x0000989c, 0x00000000 },
+ {0x0000989c, 0x000000c0 },
+ {0x0000989c, 0x00000019 },
+ {0x0000989c, 0x00000004 },
+ {0x0000989c, 0x00000000 },
+ {0x0000989c, 0x00000000 },
+ {0x0000989c, 0x00000000 },
+ {0x0000989c, 0x00000000 },
+ {0x0000989c, 0x00000000 },
+ {0x0000989c, 0x00000000 },
+ {0x0000989c, 0x00000000 },
+ {0x000098cc, 0x00000000 },
+};
+
+
+
+static const u32 ar9280Modes_9280[][6] = {
+ { 0x00001030, 0x00000230, 0x00000460, 0x000002c0, 0x00000160, 0x000001e0 },
+ { 0x00001070, 0x00000168, 0x000002d0, 0x00000318, 0x0000018c, 0x000001e0 },
+ { 0x000010b0, 0x00000e60, 0x00001cc0, 0x00007c70, 0x00003e38, 0x00001180 },
+ { 0x000010f0, 0x0000a000, 0x00014000, 0x00016000, 0x0000b000, 0x00014008 },
+ { 0x00008014, 0x03e803e8, 0x07d007d0, 0x10801080, 0x08400840, 0x06e006e0 },
+ { 0x0000801c, 0x128d8027, 0x128d804f, 0x12e00057, 0x12e0002b, 0x0988004f },
+ { 0x00009804, 0x00000300, 0x000003c4, 0x000003c4, 0x00000300, 0x00000303 },
+ { 0x00009820, 0x02020200, 0x02020200, 0x02020200, 0x02020200, 0x02020200 },
+ { 0x00009824, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e },
+ { 0x00009828, 0x0a020001, 0x0a020001, 0x0a020001, 0x0a020001, 0x0a020001 },
+ { 0x00009834, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e },
+ { 0x00009838, 0x00000007, 0x00000007, 0x00000007, 0x00000007, 0x00000007 },
+ { 0x00009844, 0x1372161e, 0x1372161e, 0x137216a0, 0x137216a0, 0x137216a0 },
+ { 0x00009848, 0x00028566, 0x00028566, 0x00028563, 0x00028563, 0x00028563 },
+ { 0x0000a848, 0x00028566, 0x00028566, 0x00028563, 0x00028563, 0x00028563 },
+ { 0x00009850, 0x6d4000e2, 0x6d4000e2, 0x6d4000e2, 0x6d4000e2, 0x6d4000e2 },
+ { 0x00009858, 0x7ec82d2e, 0x7ec82d2e, 0x7ec82d2e, 0x7ec82d2e, 0x7ec82d2e },
+ { 0x0000985c, 0x3139605e, 0x3139605e, 0x3139605e, 0x3139605e, 0x3139605e },
+ { 0x00009860, 0x00049d18, 0x00049d18, 0x00049d20, 0x00049d20, 0x00049d18 },
+ { 0x0000c864, 0x0001ce00, 0x0001ce00, 0x0001ce00, 0x0001ce00, 0x0001ce00 },
+ { 0x00009868, 0x5ac64190, 0x5ac64190, 0x5ac64190, 0x5ac64190, 0x5ac64190 },
+ { 0x0000986c, 0x06903081, 0x06903081, 0x06903881, 0x06903881, 0x06903881 },
+ { 0x00009914, 0x000007d0, 0x000007d0, 0x00000898, 0x00000898, 0x000007d0 },
+ { 0x00009918, 0x0000000a, 0x00000014, 0x00000016, 0x0000000b, 0x00000016 },
+ { 0x00009924, 0xd00a8a07, 0xd00a8a07, 0xd00a8a0d, 0xd00a8a0d, 0xd00a8a0d },
+ { 0x00009944, 0xdfbc1010, 0xdfbc1010, 0xdfbc1010, 0xdfbc1010, 0xdfbc1010 },
+ { 0x00009960, 0x00000010, 0x00000010, 0x00000010, 0x00000010, 0x00000010 },
+ { 0x0000a960, 0x00000010, 0x00000010, 0x00000010, 0x00000010, 0x00000010 },
+ { 0x00009964, 0x00000210, 0x00000210, 0x00000210, 0x00000210, 0x00000210 },
+ { 0x0000c9b8, 0x0000001a, 0x0000001a, 0x0000001a, 0x0000001a, 0x0000001a },
+ { 0x0000c9bc, 0x00000600, 0x00000600, 0x00000c00, 0x00000c00, 0x00000c00 },
+ { 0x000099c0, 0x05eea6d4, 0x05eea6d4, 0x05eea6d4, 0x05eea6d4, 0x05eea6d4 },
+ { 0x000099c4, 0x06336f77, 0x06336f77, 0x06336f77, 0x06336f77, 0x06336f77 },
+ { 0x000099c8, 0x60f6532c, 0x60f6532c, 0x60f6532c, 0x60f6532c, 0x60f6532c },
+ { 0x000099cc, 0x08f186c8, 0x08f186c8, 0x08f186c8, 0x08f186c8, 0x08f186c8 },
+ { 0x000099d0, 0x00046384, 0x00046384, 0x00046384, 0x00046384, 0x00046384 },
+ { 0x000099d4, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 },
+ { 0x000099d8, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 },
+ { 0x00009a00, 0x00008184, 0x00008184, 0x00000214, 0x00000214, 0x00000214 },
+ { 0x00009a04, 0x00008188, 0x00008188, 0x00000218, 0x00000218, 0x00000218 },
+ { 0x00009a08, 0x0000818c, 0x0000818c, 0x00000224, 0x00000224, 0x00000224 },
+ { 0x00009a0c, 0x00008190, 0x00008190, 0x00000228, 0x00000228, 0x00000228 },
+ { 0x00009a10, 0x00008194, 0x00008194, 0x0000022c, 0x0000022c, 0x0000022c },
+ { 0x00009a14, 0x00008200, 0x00008200, 0x00000230, 0x00000230, 0x00000230 },
+ { 0x00009a18, 0x00008204, 0x00008204, 0x000002a4, 0x000002a4, 0x000002a4 },
+ { 0x00009a1c, 0x00008208, 0x00008208, 0x000002a8, 0x000002a8, 0x000002a8 },
+ { 0x00009a20, 0x0000820c, 0x0000820c, 0x000002ac, 0x000002ac, 0x000002ac },
+ { 0x00009a24, 0x00008210, 0x00008210, 0x000002b0, 0x000002b0, 0x000002b0 },
+ { 0x00009a28, 0x00008214, 0x00008214, 0x000002b4, 0x000002b4, 0x000002b4 },
+ { 0x00009a2c, 0x00008280, 0x00008280, 0x000002b8, 0x000002b8, 0x000002b8 },
+ { 0x00009a30, 0x00008284, 0x00008284, 0x00000390, 0x00000390, 0x00000390 },
+ { 0x00009a34, 0x00008288, 0x00008288, 0x00000394, 0x00000394, 0x00000394 },
+ { 0x00009a38, 0x0000828c, 0x0000828c, 0x00000398, 0x00000398, 0x00000398 },
+ { 0x00009a3c, 0x00008290, 0x00008290, 0x00000334, 0x00000334, 0x00000334 },
+ { 0x00009a40, 0x00008300, 0x00008300, 0x00000338, 0x00000338, 0x00000338 },
+ { 0x00009a44, 0x00008304, 0x00008304, 0x000003ac, 0x000003ac, 0x000003ac },
+ { 0x00009a48, 0x00008308, 0x00008308, 0x000003b0, 0x000003b0, 0x000003b0 },
+ { 0x00009a4c, 0x0000830c, 0x0000830c, 0x000003b4, 0x000003b4, 0x000003b4 },
+ { 0x00009a50, 0x00008310, 0x00008310, 0x000003b8, 0x000003b8, 0x000003b8 },
+ { 0x00009a54, 0x00008314, 0x00008314, 0x000003a5, 0x000003a5, 0x000003a5 },
+ { 0x00009a58, 0x00008380, 0x00008380, 0x000003a9, 0x000003a9, 0x000003a9 },
+ { 0x00009a5c, 0x00008384, 0x00008384, 0x000003ad, 0x000003ad, 0x000003ad },
+ { 0x00009a60, 0x00008388, 0x00008388, 0x00008194, 0x00008194, 0x00008194 },
+ { 0x00009a64, 0x0000838c, 0x0000838c, 0x000081a0, 0x000081a0, 0x000081a0 },
+ { 0x00009a68, 0x00008390, 0x00008390, 0x0000820c, 0x0000820c, 0x0000820c },
+ { 0x00009a6c, 0x00008394, 0x00008394, 0x000081a8, 0x000081a8, 0x000081a8 },
+ { 0x00009a70, 0x0000a380, 0x0000a380, 0x00008284, 0x00008284, 0x00008284 },
+ { 0x00009a74, 0x0000a384, 0x0000a384, 0x00008288, 0x00008288, 0x00008288 },
+ { 0x00009a78, 0x0000a388, 0x0000a388, 0x00008224, 0x00008224, 0x00008224 },
+ { 0x00009a7c, 0x0000a38c, 0x0000a38c, 0x00008290, 0x00008290, 0x00008290 },
+ { 0x00009a80, 0x0000a390, 0x0000a390, 0x00008300, 0x00008300, 0x00008300 },
+ { 0x00009a84, 0x0000a394, 0x0000a394, 0x00008304, 0x00008304, 0x00008304 },
+ { 0x00009a88, 0x0000a780, 0x0000a780, 0x00008308, 0x00008308, 0x00008308 },
+ { 0x00009a8c, 0x0000a784, 0x0000a784, 0x0000830c, 0x0000830c, 0x0000830c },
+ { 0x00009a90, 0x0000a788, 0x0000a788, 0x00008380, 0x00008380, 0x00008380 },
+ { 0x00009a94, 0x0000a78c, 0x0000a78c, 0x00008384, 0x00008384, 0x00008384 },
+ { 0x00009a98, 0x0000a790, 0x0000a790, 0x00008700, 0x00008700, 0x00008700 },
+ { 0x00009a9c, 0x0000a794, 0x0000a794, 0x00008704, 0x00008704, 0x00008704 },
+ { 0x00009aa0, 0x0000ab84, 0x0000ab84, 0x00008708, 0x00008708, 0x00008708 },
+ { 0x00009aa4, 0x0000ab88, 0x0000ab88, 0x0000870c, 0x0000870c, 0x0000870c },
+ { 0x00009aa8, 0x0000ab8c, 0x0000ab8c, 0x00008780, 0x00008780, 0x00008780 },
+ { 0x00009aac, 0x0000ab90, 0x0000ab90, 0x00008784, 0x00008784, 0x00008784 },
+ { 0x00009ab0, 0x0000ab94, 0x0000ab94, 0x00008b00, 0x00008b00, 0x00008b00 },
+ { 0x00009ab4, 0x0000af80, 0x0000af80, 0x00008b04, 0x00008b04, 0x00008b04 },
+ { 0x00009ab8, 0x0000af84, 0x0000af84, 0x00008b08, 0x00008b08, 0x00008b08 },
+ { 0x00009abc, 0x0000af88, 0x0000af88, 0x00008b0c, 0x00008b0c, 0x00008b0c },
+ { 0x00009ac0, 0x0000af8c, 0x0000af8c, 0x00008b80, 0x00008b80, 0x00008b80 },
+ { 0x00009ac4, 0x0000af90, 0x0000af90, 0x00008b84, 0x00008b84, 0x00008b84 },
+ { 0x00009ac8, 0x0000af94, 0x0000af94, 0x00008b88, 0x00008b88, 0x00008b88 },
+ { 0x00009acc, 0x0000b380, 0x0000b380, 0x00008b8c, 0x00008b8c, 0x00008b8c },
+ { 0x00009ad0, 0x0000b384, 0x0000b384, 0x00008b90, 0x00008b90, 0x00008b90 },
+ { 0x00009ad4, 0x0000b388, 0x0000b388, 0x00008f80, 0x00008f80, 0x00008f80 },
+ { 0x00009ad8, 0x0000b38c, 0x0000b38c, 0x00008f84, 0x00008f84, 0x00008f84 },
+ { 0x00009adc, 0x0000b390, 0x0000b390, 0x00008f88, 0x00008f88, 0x00008f88 },
+ { 0x00009ae0, 0x0000b394, 0x0000b394, 0x00008f8c, 0x00008f8c, 0x00008f8c },
+ { 0x00009ae4, 0x0000b398, 0x0000b398, 0x00008f90, 0x00008f90, 0x00008f90 },
+ { 0x00009ae8, 0x0000b780, 0x0000b780, 0x0000930c, 0x0000930c, 0x0000930c },
+ { 0x00009aec, 0x0000b784, 0x0000b784, 0x00009310, 0x00009310, 0x00009310 },
+ { 0x00009af0, 0x0000b788, 0x0000b788, 0x00009384, 0x00009384, 0x00009384 },
+ { 0x00009af4, 0x0000b78c, 0x0000b78c, 0x00009388, 0x00009388, 0x00009388 },
+ { 0x00009af8, 0x0000b790, 0x0000b790, 0x00009324, 0x00009324, 0x00009324 },
+ { 0x00009afc, 0x0000b794, 0x0000b794, 0x00009704, 0x00009704, 0x00009704 },
+ { 0x00009b00, 0x0000b798, 0x0000b798, 0x000096a4, 0x000096a4, 0x000096a4 },
+ { 0x00009b04, 0x0000d784, 0x0000d784, 0x000096a8, 0x000096a8, 0x000096a8 },
+ { 0x00009b08, 0x0000d788, 0x0000d788, 0x00009710, 0x00009710, 0x00009710 },
+ { 0x00009b0c, 0x0000d78c, 0x0000d78c, 0x00009714, 0x00009714, 0x00009714 },
+ { 0x00009b10, 0x0000d790, 0x0000d790, 0x00009720, 0x00009720, 0x00009720 },
+ { 0x00009b14, 0x0000f780, 0x0000f780, 0x00009724, 0x00009724, 0x00009724 },
+ { 0x00009b18, 0x0000f784, 0x0000f784, 0x00009728, 0x00009728, 0x00009728 },
+ { 0x00009b1c, 0x0000f788, 0x0000f788, 0x0000972c, 0x0000972c, 0x0000972c },
+ { 0x00009b20, 0x0000f78c, 0x0000f78c, 0x000097a0, 0x000097a0, 0x000097a0 },
+ { 0x00009b24, 0x0000f790, 0x0000f790, 0x000097a4, 0x000097a4, 0x000097a4 },
+ { 0x00009b28, 0x0000f794, 0x0000f794, 0x000097a8, 0x000097a8, 0x000097a8 },
+ { 0x00009b2c, 0x0000f7a4, 0x0000f7a4, 0x000097b0, 0x000097b0, 0x000097b0 },
+ { 0x00009b30, 0x0000f7a8, 0x0000f7a8, 0x000097b4, 0x000097b4, 0x000097b4 },
+ { 0x00009b34, 0x0000f7ac, 0x0000f7ac, 0x000097b8, 0x000097b8, 0x000097b8 },
+ { 0x00009b38, 0x0000f7b0, 0x0000f7b0, 0x000097a5, 0x000097a5, 0x000097a5 },
+ { 0x00009b3c, 0x0000f7b4, 0x0000f7b4, 0x000097a9, 0x000097a9, 0x000097a9 },
+ { 0x00009b40, 0x0000f7a1, 0x0000f7a1, 0x000097ad, 0x000097ad, 0x000097ad },
+ { 0x00009b44, 0x0000f7a5, 0x0000f7a5, 0x000097b1, 0x000097b1, 0x000097b1 },
+ { 0x00009b48, 0x0000f7a9, 0x0000f7a9, 0x000097b5, 0x000097b5, 0x000097b5 },
+ { 0x00009b4c, 0x0000f7ad, 0x0000f7ad, 0x000097b9, 0x000097b9, 0x000097b9 },
+ { 0x00009b50, 0x0000f7b1, 0x0000f7b1, 0x000097c5, 0x000097c5, 0x000097c5 },
+ { 0x00009b54, 0x0000f7b5, 0x0000f7b5, 0x000097c9, 0x000097c9, 0x000097c9 },
+ { 0x00009b58, 0x0000f7c5, 0x0000f7c5, 0x000097d1, 0x000097d1, 0x000097d1 },
+ { 0x00009b5c, 0x0000f7c9, 0x0000f7c9, 0x000097d5, 0x000097d5, 0x000097d5 },
+ { 0x00009b60, 0x0000f7cd, 0x0000f7cd, 0x000097d9, 0x000097d9, 0x000097d9 },
+ { 0x00009b64, 0x0000f7d1, 0x0000f7d1, 0x000097c6, 0x000097c6, 0x000097c6 },
+ { 0x00009b68, 0x0000f7d5, 0x0000f7d5, 0x000097ca, 0x000097ca, 0x000097ca },
+ { 0x00009b6c, 0x0000f7c2, 0x0000f7c2, 0x000097ce, 0x000097ce, 0x000097ce },
+ { 0x00009b70, 0x0000f7c6, 0x0000f7c6, 0x000097d2, 0x000097d2, 0x000097d2 },
+ { 0x00009b74, 0x0000f7ca, 0x0000f7ca, 0x000097d6, 0x000097d6, 0x000097d6 },
+ { 0x00009b78, 0x0000f7ce, 0x0000f7ce, 0x000097c3, 0x000097c3, 0x000097c3 },
+ { 0x00009b7c, 0x0000f7d2, 0x0000f7d2, 0x000097c7, 0x000097c7, 0x000097c7 },
+ { 0x00009b80, 0x0000f7d6, 0x0000f7d6, 0x000097cb, 0x000097cb, 0x000097cb },
+ { 0x00009b84, 0x0000f7c3, 0x0000f7c3, 0x000097cf, 0x000097cf, 0x000097cf },
+ { 0x00009b88, 0x0000f7c7, 0x0000f7c7, 0x000097d7, 0x000097d7, 0x000097d7 },
+ { 0x00009b8c, 0x0000f7cb, 0x0000f7cb, 0x000097db, 0x000097db, 0x000097db },
+ { 0x00009b90, 0x0000f7d3, 0x0000f7d3, 0x000097db, 0x000097db, 0x000097db },
+ { 0x00009b94, 0x0000f7d7, 0x0000f7d7, 0x000097db, 0x000097db, 0x000097db },
+ { 0x00009b98, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db, 0x000097db },
+ { 0x00009b9c, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db, 0x000097db },
+ { 0x00009ba0, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db, 0x000097db },
+ { 0x00009ba4, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db, 0x000097db },
+ { 0x00009ba8, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db, 0x000097db },
+ { 0x00009bac, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db, 0x000097db },
+ { 0x00009bb0, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db, 0x000097db },
+ { 0x00009bb4, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db, 0x000097db },
+ { 0x00009bb8, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db, 0x000097db },
+ { 0x00009bbc, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db, 0x000097db },
+ { 0x00009bc0, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db, 0x000097db },
+ { 0x00009bc4, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db, 0x000097db },
+ { 0x00009bc8, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db, 0x000097db },
+ { 0x00009bcc, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db, 0x000097db },
+ { 0x00009bd0, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db, 0x000097db },
+ { 0x00009bd4, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db, 0x000097db },
+ { 0x00009bd8, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db, 0x000097db },
+ { 0x00009bdc, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db, 0x000097db },
+ { 0x00009be0, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db, 0x000097db },
+ { 0x00009be4, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db, 0x000097db },
+ { 0x00009be8, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db, 0x000097db },
+ { 0x00009bec, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db, 0x000097db },
+ { 0x00009bf0, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db, 0x000097db },
+ { 0x00009bf4, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db, 0x000097db },
+ { 0x00009bf8, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db, 0x000097db },
+ { 0x00009bfc, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db, 0x000097db },
+ { 0x0000a204, 0x00000444, 0x00000444, 0x00000444, 0x00000444, 0x00000444 },
+ { 0x0000a208, 0x803e4788, 0x803e4788, 0x803e4788, 0x803e4788, 0x803e4788 },
+ { 0x0000a20c, 0x000c6019, 0x000c6019, 0x000c6019, 0x000c6019, 0x000c6019 },
+ { 0x0000b20c, 0x000c6019, 0x000c6019, 0x000c6019, 0x000c6019, 0x000c6019 },
+ { 0x0000a21c, 0x1883800a, 0x1883800a, 0x1883800a, 0x1883800a, 0x1883800a },
+ { 0x0000a230, 0x00000000, 0x00000000, 0x00000210, 0x00000108, 0x00000000 },
+ { 0x0000a274, 0x0a19c652, 0x0a19c652, 0x0a1aa652, 0x0a1aa652, 0x0a1aa652 },
+ { 0x0000a300, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 },
+ { 0x0000a304, 0x00003002, 0x00003002, 0x00003002, 0x00003002, 0x00003002 },
+ { 0x0000a308, 0x00006004, 0x00006004, 0x00008009, 0x00008009, 0x00008009 },
+ { 0x0000a30c, 0x0000a006, 0x0000a006, 0x0000b00b, 0x0000b00b, 0x0000b00b },
+ { 0x0000a310, 0x0000e012, 0x0000e012, 0x0000e012, 0x0000e012, 0x0000e012 },
+ { 0x0000a314, 0x00011014, 0x00011014, 0x00012048, 0x00012048, 0x00012048 },
+ { 0x0000a318, 0x0001504a, 0x0001504a, 0x0001604a, 0x0001604a, 0x0001604a },
+ { 0x0000a31c, 0x0001904c, 0x0001904c, 0x0001a211, 0x0001a211, 0x0001a211 },
+ { 0x0000a320, 0x0001c04e, 0x0001c04e, 0x0001e213, 0x0001e213, 0x0001e213 },
+ { 0x0000a324, 0x00020092, 0x00020092, 0x0002121b, 0x0002121b, 0x0002121b },
+ { 0x0000a328, 0x0002410a, 0x0002410a, 0x00024412, 0x00024412, 0x00024412 },
+ { 0x0000a32c, 0x0002710c, 0x0002710c, 0x00028414, 0x00028414, 0x00028414 },
+ { 0x0000a330, 0x0002b18b, 0x0002b18b, 0x0002b44a, 0x0002b44a, 0x0002b44a },
+ { 0x0000a334, 0x0002e1cc, 0x0002e1cc, 0x00030649, 0x00030649, 0x00030649 },
+ { 0x0000a338, 0x000321ec, 0x000321ec, 0x0003364b, 0x0003364b, 0x0003364b },
+ { 0x0000a33c, 0x000321ec, 0x000321ec, 0x00038a49, 0x00038a49, 0x00038a49 },
+ { 0x0000a340, 0x000321ec, 0x000321ec, 0x0003be48, 0x0003be48, 0x0003be48 },
+ { 0x0000a344, 0x000321ec, 0x000321ec, 0x0003ee4a, 0x0003ee4a, 0x0003ee4a },
+ { 0x0000a348, 0x000321ec, 0x000321ec, 0x00042e88, 0x00042e88, 0x00042e88 },
+ { 0x0000a34c, 0x000321ec, 0x000321ec, 0x00046e8a, 0x00046e8a, 0x00046e8a },
+ { 0x0000a350, 0x000321ec, 0x000321ec, 0x00049ec9, 0x00049ec9, 0x00049ec9 },
+ { 0x0000a354, 0x000321ec, 0x000321ec, 0x0004bf42, 0x0004bf42, 0x0004bf42 },
+ { 0x0000784c, 0x0e4f048c, 0x0e4f048c, 0x0e4d048c, 0x0e4d048c, 0x0e4d048c },
+ { 0x00007854, 0x12031828, 0x12031828, 0x12035828, 0x12035828, 0x12035828 },
+ { 0x00007870, 0x807ec400, 0x807ec400, 0x807ec000, 0x807ec000, 0x807ec000 },
+ { 0x0000788c, 0x00010000, 0x00010000, 0x00110000, 0x00110000, 0x00110000 },
+};
+
+static const u32 ar9280Common_9280[][2] = {
+ { 0x0000000c, 0x00000000 },
+ { 0x00000030, 0x00020015 },
+ { 0x00000034, 0x00000005 },
+ { 0x00000040, 0x00000000 },
+ { 0x00000044, 0x00000008 },
+ { 0x00000048, 0x00000008 },
+ { 0x0000004c, 0x00000010 },
+ { 0x00000050, 0x00000000 },
+ { 0x00000054, 0x0000001f },
+ { 0x00000800, 0x00000000 },
+ { 0x00000804, 0x00000000 },
+ { 0x00000808, 0x00000000 },
+ { 0x0000080c, 0x00000000 },
+ { 0x00000810, 0x00000000 },
+ { 0x00000814, 0x00000000 },
+ { 0x00000818, 0x00000000 },
+ { 0x0000081c, 0x00000000 },
+ { 0x00000820, 0x00000000 },
+ { 0x00000824, 0x00000000 },
+ { 0x00001040, 0x002ffc0f },
+ { 0x00001044, 0x002ffc0f },
+ { 0x00001048, 0x002ffc0f },
+ { 0x0000104c, 0x002ffc0f },
+ { 0x00001050, 0x002ffc0f },
+ { 0x00001054, 0x002ffc0f },
+ { 0x00001058, 0x002ffc0f },
+ { 0x0000105c, 0x002ffc0f },
+ { 0x00001060, 0x002ffc0f },
+ { 0x00001064, 0x002ffc0f },
+ { 0x00001230, 0x00000000 },
+ { 0x00001270, 0x00000000 },
+ { 0x00001038, 0x00000000 },
+ { 0x00001078, 0x00000000 },
+ { 0x000010b8, 0x00000000 },
+ { 0x000010f8, 0x00000000 },
+ { 0x00001138, 0x00000000 },
+ { 0x00001178, 0x00000000 },
+ { 0x000011b8, 0x00000000 },
+ { 0x000011f8, 0x00000000 },
+ { 0x00001238, 0x00000000 },
+ { 0x00001278, 0x00000000 },
+ { 0x000012b8, 0x00000000 },
+ { 0x000012f8, 0x00000000 },
+ { 0x00001338, 0x00000000 },
+ { 0x00001378, 0x00000000 },
+ { 0x000013b8, 0x00000000 },
+ { 0x000013f8, 0x00000000 },
+ { 0x00001438, 0x00000000 },
+ { 0x00001478, 0x00000000 },
+ { 0x000014b8, 0x00000000 },
+ { 0x000014f8, 0x00000000 },
+ { 0x00001538, 0x00000000 },
+ { 0x00001578, 0x00000000 },
+ { 0x000015b8, 0x00000000 },
+ { 0x000015f8, 0x00000000 },
+ { 0x00001638, 0x00000000 },
+ { 0x00001678, 0x00000000 },
+ { 0x000016b8, 0x00000000 },
+ { 0x000016f8, 0x00000000 },
+ { 0x00001738, 0x00000000 },
+ { 0x00001778, 0x00000000 },
+ { 0x000017b8, 0x00000000 },
+ { 0x000017f8, 0x00000000 },
+ { 0x0000103c, 0x00000000 },
+ { 0x0000107c, 0x00000000 },
+ { 0x000010bc, 0x00000000 },
+ { 0x000010fc, 0x00000000 },
+ { 0x0000113c, 0x00000000 },
+ { 0x0000117c, 0x00000000 },
+ { 0x000011bc, 0x00000000 },
+ { 0x000011fc, 0x00000000 },
+ { 0x0000123c, 0x00000000 },
+ { 0x0000127c, 0x00000000 },
+ { 0x000012bc, 0x00000000 },
+ { 0x000012fc, 0x00000000 },
+ { 0x0000133c, 0x00000000 },
+ { 0x0000137c, 0x00000000 },
+ { 0x000013bc, 0x00000000 },
+ { 0x000013fc, 0x00000000 },
+ { 0x0000143c, 0x00000000 },
+ { 0x0000147c, 0x00000000 },
+ { 0x00004030, 0x00000002 },
+ { 0x0000403c, 0x00000002 },
+ { 0x00004024, 0x0000001f },
+ { 0x00007010, 0x00000033 },
+ { 0x00007038, 0x000004c2 },
+ { 0x00008004, 0x00000000 },
+ { 0x00008008, 0x00000000 },
+ { 0x0000800c, 0x00000000 },
+ { 0x00008018, 0x00000700 },
+ { 0x00008020, 0x00000000 },
+ { 0x00008038, 0x00000000 },
+ { 0x0000803c, 0x00000000 },
+ { 0x00008048, 0x40000000 },
+ { 0x00008054, 0x00000000 },
+ { 0x00008058, 0x00000000 },
+ { 0x0000805c, 0x000fc78f },
+ { 0x00008060, 0x0000000f },
+ { 0x00008064, 0x00000000 },
+ { 0x00008070, 0x00000000 },
+ { 0x000080c0, 0x2a82301a },
+ { 0x000080c4, 0x05dc01e0 },
+ { 0x000080c8, 0x1f402710 },
+ { 0x000080cc, 0x01f40000 },
+ { 0x000080d0, 0x00001e00 },
+ { 0x000080d4, 0x00000000 },
+ { 0x000080d8, 0x00400000 },
+ { 0x000080e0, 0xffffffff },
+ { 0x000080e4, 0x0000ffff },
+ { 0x000080e8, 0x003f3f3f },
+ { 0x000080ec, 0x00000000 },
+ { 0x000080f0, 0x00000000 },
+ { 0x000080f4, 0x00000000 },
+ { 0x000080f8, 0x00000000 },
+ { 0x000080fc, 0x00020000 },
+ { 0x00008100, 0x00020000 },
+ { 0x00008104, 0x00000001 },
+ { 0x00008108, 0x00000052 },
+ { 0x0000810c, 0x00000000 },
+ { 0x00008110, 0x00000168 },
+ { 0x00008118, 0x000100aa },
+ { 0x0000811c, 0x00003210 },
+ { 0x00008120, 0x08f04800 },
+ { 0x00008124, 0x00000000 },
+ { 0x00008128, 0x00000000 },
+ { 0x0000812c, 0x00000000 },
+ { 0x00008130, 0x00000000 },
+ { 0x00008134, 0x00000000 },
+ { 0x00008138, 0x00000000 },
+ { 0x0000813c, 0x00000000 },
+ { 0x00008144, 0x00000000 },
+ { 0x00008168, 0x00000000 },
+ { 0x0000816c, 0x00000000 },
+ { 0x00008170, 0x32143320 },
+ { 0x00008174, 0xfaa4fa50 },
+ { 0x00008178, 0x00000100 },
+ { 0x0000817c, 0x00000000 },
+ { 0x000081c4, 0x00000000 },
+ { 0x000081d0, 0x00003210 },
+ { 0x000081ec, 0x00000000 },
+ { 0x000081f0, 0x00000000 },
+ { 0x000081f4, 0x00000000 },
+ { 0x000081f8, 0x00000000 },
+ { 0x000081fc, 0x00000000 },
+ { 0x00008200, 0x00000000 },
+ { 0x00008204, 0x00000000 },
+ { 0x00008208, 0x00000000 },
+ { 0x0000820c, 0x00000000 },
+ { 0x00008210, 0x00000000 },
+ { 0x00008214, 0x00000000 },
+ { 0x00008218, 0x00000000 },
+ { 0x0000821c, 0x00000000 },
+ { 0x00008220, 0x00000000 },
+ { 0x00008224, 0x00000000 },
+ { 0x00008228, 0x00000000 },
+ { 0x0000822c, 0x00000000 },
+ { 0x00008230, 0x00000000 },
+ { 0x00008234, 0x00000000 },
+ { 0x00008238, 0x00000000 },
+ { 0x0000823c, 0x00000000 },
+ { 0x00008240, 0x00100000 },
+ { 0x00008244, 0x0010f400 },
+ { 0x00008248, 0x00000100 },
+ { 0x0000824c, 0x0001e800 },
+ { 0x00008250, 0x00000000 },
+ { 0x00008254, 0x00000000 },
+ { 0x00008258, 0x00000000 },
+ { 0x0000825c, 0x400000ff },
+ { 0x00008260, 0x00080922 },
+ { 0x00008270, 0x00000000 },
+ { 0x00008274, 0x40000000 },
+ { 0x00008278, 0x003e4180 },
+ { 0x0000827c, 0x00000000 },
+ { 0x00008284, 0x0000002c },
+ { 0x00008288, 0x0000002c },
+ { 0x0000828c, 0x00000000 },
+ { 0x00008294, 0x00000000 },
+ { 0x00008298, 0x00000000 },
+ { 0x00008300, 0x00000000 },
+ { 0x00008304, 0x00000000 },
+ { 0x00008308, 0x00000000 },
+ { 0x0000830c, 0x00000000 },
+ { 0x00008310, 0x00000000 },
+ { 0x00008314, 0x00000000 },
+ { 0x00008318, 0x00000000 },
+ { 0x00008328, 0x00000000 },
+ { 0x0000832c, 0x00000007 },
+ { 0x00008330, 0x00000302 },
+ { 0x00008334, 0x00000e00 },
+ { 0x00008338, 0x00000000 },
+ { 0x0000833c, 0x00000000 },
+ { 0x00008340, 0x000107ff },
+ { 0x00008344, 0x00000000 },
+ { 0x00009808, 0x00000000 },
+ { 0x0000980c, 0xaf268e30 },
+ { 0x00009810, 0xfd14e000 },
+ { 0x00009814, 0x9c0a9f6b },
+ { 0x0000981c, 0x00000000 },
+ { 0x0000982c, 0x0000a000 },
+ { 0x00009830, 0x00000000 },
+ { 0x0000983c, 0x00200400 },
+ { 0x00009840, 0x206a01ae },
+ { 0x0000984c, 0x0040233c },
+ { 0x0000a84c, 0x0040233c },
+ { 0x00009854, 0x00000044 },
+ { 0x00009900, 0x00000000 },
+ { 0x00009904, 0x00000000 },
+ { 0x00009908, 0x00000000 },
+ { 0x0000990c, 0x00000000 },
+ { 0x0000991c, 0x10000fff },
+ { 0x00009920, 0x04900000 },
+ { 0x0000a920, 0x04900000 },
+ { 0x00009928, 0x00000001 },
+ { 0x0000992c, 0x00000004 },
+ { 0x00009934, 0x1e1f2022 },
+ { 0x00009938, 0x0a0b0c0d },
+ { 0x0000993c, 0x00000000 },
+ { 0x00009948, 0x9280c00a },
+ { 0x0000994c, 0x00020028 },
+ { 0x00009954, 0xe250a51e },
+ { 0x00009958, 0x3388ffff },
+ { 0x00009940, 0x00781204 },
+ { 0x0000c95c, 0x004b6a8e },
+ { 0x0000c968, 0x000003ce },
+ { 0x00009970, 0x190fb514 },
+ { 0x00009974, 0x00000000 },
+ { 0x00009978, 0x00000001 },
+ { 0x0000997c, 0x00000000 },
+ { 0x00009980, 0x00000000 },
+ { 0x00009984, 0x00000000 },
+ { 0x00009988, 0x00000000 },
+ { 0x0000998c, 0x00000000 },
+ { 0x00009990, 0x00000000 },
+ { 0x00009994, 0x00000000 },
+ { 0x00009998, 0x00000000 },
+ { 0x0000999c, 0x00000000 },
+ { 0x000099a0, 0x00000000 },
+ { 0x000099a4, 0x00000001 },
+ { 0x000099a8, 0x201fff00 },
+ { 0x000099ac, 0x006f00c4 },
+ { 0x000099b0, 0x03051000 },
+ { 0x000099b4, 0x00000820 },
+ { 0x000099dc, 0x00000000 },
+ { 0x000099e0, 0x00000000 },
+ { 0x000099e4, 0xaaaaaaaa },
+ { 0x000099e8, 0x3c466478 },
+ { 0x000099ec, 0x0cc80caa },
+ { 0x000099fc, 0x00001042 },
+ { 0x0000a210, 0x4080a333 },
+ { 0x0000a214, 0x40206c10 },
+ { 0x0000a218, 0x009c4060 },
+ { 0x0000a220, 0x01834061 },
+ { 0x0000a224, 0x00000400 },
+ { 0x0000a228, 0x000003b5 },
+ { 0x0000a22c, 0x23277200 },
+ { 0x0000a234, 0x20202020 },
+ { 0x0000a238, 0x20202020 },
+ { 0x0000a23c, 0x13c889af },
+ { 0x0000a240, 0x38490a20 },
+ { 0x0000a244, 0x00007bb6 },
+ { 0x0000a248, 0x0fff3ffc },
+ { 0x0000a24c, 0x00000001 },
+ { 0x0000a250, 0x001da000 },
+ { 0x0000a254, 0x00000000 },
+ { 0x0000a258, 0x0cdbd380 },
+ { 0x0000a25c, 0x0f0f0f01 },
+ { 0x0000a260, 0xdfa91f01 },
+ { 0x0000a268, 0x00000000 },
+ { 0x0000a26c, 0x0ebae9c6 },
+ { 0x0000b26c, 0x0ebae9c6 },
+ { 0x0000d270, 0x00820820 },
+ { 0x0000a278, 0x1ce739ce },
+ { 0x0000a27c, 0x050701ce },
+ { 0x0000a358, 0x7999aa0f },
+ { 0x0000d35c, 0x07ffffef },
+ { 0x0000d360, 0x0fffffe7 },
+ { 0x0000d364, 0x17ffffe5 },
+ { 0x0000d368, 0x1fffffe4 },
+ { 0x0000d36c, 0x37ffffe3 },
+ { 0x0000d370, 0x3fffffe3 },
+ { 0x0000d374, 0x57ffffe3 },
+ { 0x0000d378, 0x5fffffe2 },
+ { 0x0000d37c, 0x7fffffe2 },
+ { 0x0000d380, 0x7f3c7bba },
+ { 0x0000d384, 0xf3307ff0 },
+ { 0x0000a388, 0x0c000000 },
+ { 0x0000a38c, 0x20202020 },
+ { 0x0000a390, 0x20202020 },
+ { 0x0000a394, 0x1ce739ce },
+ { 0x0000a398, 0x000001ce },
+ { 0x0000a39c, 0x00000001 },
+ { 0x0000a3a0, 0x00000000 },
+ { 0x0000a3a4, 0x00000000 },
+ { 0x0000a3a8, 0x00000000 },
+ { 0x0000a3ac, 0x00000000 },
+ { 0x0000a3b0, 0x00000000 },
+ { 0x0000a3b4, 0x00000000 },
+ { 0x0000a3b8, 0x00000000 },
+ { 0x0000a3bc, 0x00000000 },
+ { 0x0000a3c0, 0x00000000 },
+ { 0x0000a3c4, 0x00000000 },
+ { 0x0000a3c8, 0x00000246 },
+ { 0x0000a3cc, 0x20202020 },
+ { 0x0000a3d0, 0x20202020 },
+ { 0x0000a3d4, 0x20202020 },
+ { 0x0000a3dc, 0x1ce739ce },
+ { 0x0000a3e0, 0x000001ce },
+ { 0x0000a3e4, 0x00000000 },
+ { 0x0000a3e8, 0x18c43433 },
+ { 0x0000a3ec, 0x00f38081 },
+ { 0x00007800, 0x00040000 },
+ { 0x00007804, 0xdb005012 },
+ { 0x00007808, 0x04924914 },
+ { 0x0000780c, 0x21084210 },
+ { 0x00007810, 0x6d801300 },
+ { 0x00007814, 0x0019beff },
+ { 0x00007818, 0x07e40000 },
+ { 0x0000781c, 0x00492000 },
+ { 0x00007820, 0x92492480 },
+ { 0x00007824, 0x00040000 },
+ { 0x00007828, 0xdb005012 },
+ { 0x0000782c, 0x04924914 },
+ { 0x00007830, 0x21084210 },
+ { 0x00007834, 0x6d801300 },
+ { 0x00007838, 0x0019beff },
+ { 0x0000783c, 0x07e40000 },
+ { 0x00007840, 0x00492000 },
+ { 0x00007844, 0x92492480 },
+ { 0x00007848, 0x00120000 },
+ { 0x00007850, 0x54214514 },
+ { 0x00007858, 0x92592692 },
+ { 0x00007860, 0x52802000 },
+ { 0x00007864, 0x0a8e370e },
+ { 0x00007868, 0xc0102850 },
+ { 0x0000786c, 0x812d4000 },
+ { 0x00007874, 0x001b6db0 },
+ { 0x00007878, 0x00376b63 },
+ { 0x0000787c, 0x06db6db6 },
+ { 0x00007880, 0x006d8000 },
+ { 0x00007884, 0xffeffffe },
+ { 0x00007888, 0xffeffffe },
+ { 0x00007890, 0x00060aeb },
+ { 0x00007894, 0x5a108000 },
+ { 0x00007898, 0x2a850160 },
+};
+
+
+
+
+static const u32 ar9280Modes_9280_2[][6] = {
+ { 0x00001030, 0x00000230, 0x00000460, 0x000002c0, 0x00000160, 0x000001e0 },
+ { 0x00001070, 0x00000168, 0x000002d0, 0x00000318, 0x0000018c, 0x000001e0 },
+ { 0x000010b0, 0x00000e60, 0x00001cc0, 0x00007c70, 0x00003e38, 0x00001180 },
+ { 0x000010f0, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000008 },
+ { 0x00008014, 0x03e803e8, 0x07d007d0, 0x10801600, 0x08400b00, 0x06e006e0 },
+ { 0x0000801c, 0x128d8027, 0x128d804f, 0x12e00057, 0x12e0002b, 0x0988004f },
+ { 0x00008318, 0x00003e80, 0x00007d00, 0x00006880, 0x00003440, 0x00006880 },
+ { 0x00009804, 0x00000300, 0x000003c4, 0x000003c4, 0x00000300, 0x00000303 },
+ { 0x00009820, 0x02020200, 0x02020200, 0x02020200, 0x02020200, 0x02020200 },
+ { 0x00009824, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e },
+ { 0x00009828, 0x0a020001, 0x0a020001, 0x0a020001, 0x0a020001, 0x0a020001 },
+ { 0x00009834, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e },
+ { 0x00009838, 0x00000007, 0x00000007, 0x00000007, 0x00000007, 0x00000007 },
+ { 0x00009840, 0x206a012e, 0x206a012e, 0x206a022e, 0x206a022e, 0x206a022e },
+ { 0x00009844, 0x0372161e, 0x0372161e, 0x037216a0, 0x037216a0, 0x037216a0 },
+ { 0x00009848, 0x00001066, 0x00001066, 0x00001063, 0x00001063, 0x00001063 },
+ { 0x0000a848, 0x00001066, 0x00001066, 0x00001063, 0x00001063, 0x00001063 },
+ { 0x00009850, 0x6d4000e2, 0x6d4000e2, 0x6d4000e2, 0x6d4000e2, 0x6d4000e2 },
+ { 0x00009858, 0x7ec84d2e, 0x7ec84d2e, 0x7ec88d2e, 0x7ec88d2e, 0x7ec88d2e },
+ { 0x0000985c, 0x3139605e, 0x3139605e, 0x3139605e, 0x3139605e, 0x3139605e },
+ { 0x00009860, 0x00048d18, 0x00048d18, 0x00048d20, 0x00048d20, 0x00048d18 },
+ { 0x0000c864, 0x0000fe00, 0x0000fe00, 0x0001ce00, 0x0001ce00, 0x0001ce00 },
+ { 0x00009868, 0x5ac640d0, 0x5ac640d0, 0x5ac640d0, 0x5ac640d0, 0x5ac640d0 },
+ { 0x0000986c, 0x06903081, 0x06903081, 0x06903881, 0x06903881, 0x06903881 },
+ { 0x00009914, 0x000007d0, 0x000007d0, 0x00000898, 0x00000898, 0x000007d0 },
+ { 0x00009918, 0x0000000a, 0x00000014, 0x00000016, 0x0000000b, 0x00000016 },
+ { 0x00009924, 0xd00a8a07, 0xd00a8a07, 0xd00a8a0d, 0xd00a8a0d, 0xd00a8a0d },
+ { 0x00009944, 0xdfbc1010, 0xdfbc1010, 0xdfbc1010, 0xdfbc1010, 0xdfbc1010 },
+ { 0x00009960, 0x00000010, 0x00000010, 0x00000010, 0x00000010, 0x00000010 },
+ { 0x0000a960, 0x00000010, 0x00000010, 0x00000010, 0x00000010, 0x00000010 },
+ { 0x00009964, 0x00000210, 0x00000210, 0x00000210, 0x00000210, 0x00000210 },
+ { 0x0000c9b8, 0x0000000f, 0x0000000f, 0x0000001c, 0x0000001c, 0x0000001c },
+ { 0x0000c9bc, 0x00000600, 0x00000600, 0x00000c00, 0x00000c00, 0x00000c00 },
+ { 0x000099c0, 0x05eea6d4, 0x05eea6d4, 0x05eea6d4, 0x05eea6d4, 0x05eea6d4 },
+ { 0x000099c4, 0x06336f77, 0x06336f77, 0x06336f77, 0x06336f77, 0x06336f77 },
+ { 0x000099c8, 0x60f65329, 0x60f65329, 0x60f65329, 0x60f65329, 0x60f65329 },
+ { 0x000099cc, 0x08f186c8, 0x08f186c8, 0x08f186c8, 0x08f186c8, 0x08f186c8 },
+ { 0x000099d0, 0x00046384, 0x00046384, 0x00046384, 0x00046384, 0x00046384 },
+ { 0x000099d4, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 },
+ { 0x000099d8, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 },
+ { 0x00009a00, 0x00008184, 0x00008184, 0x00000290, 0x00000290, 0x00000290 },
+ { 0x00009a04, 0x00008188, 0x00008188, 0x00000300, 0x00000300, 0x00000300 },
+ { 0x00009a08, 0x0000818c, 0x0000818c, 0x00000304, 0x00000304, 0x00000304 },
+ { 0x00009a0c, 0x00008190, 0x00008190, 0x00000308, 0x00000308, 0x00000308 },
+ { 0x00009a10, 0x00008194, 0x00008194, 0x0000030c, 0x0000030c, 0x0000030c },
+ { 0x00009a14, 0x00008200, 0x00008200, 0x00008000, 0x00008000, 0x00008000 },
+ { 0x00009a18, 0x00008204, 0x00008204, 0x00008004, 0x00008004, 0x00008004 },
+ { 0x00009a1c, 0x00008208, 0x00008208, 0x00008008, 0x00008008, 0x00008008 },
+ { 0x00009a20, 0x0000820c, 0x0000820c, 0x0000800c, 0x0000800c, 0x0000800c },
+ { 0x00009a24, 0x00008210, 0x00008210, 0x00008080, 0x00008080, 0x00008080 },
+ { 0x00009a28, 0x00008214, 0x00008214, 0x00008084, 0x00008084, 0x00008084 },
+ { 0x00009a2c, 0x00008280, 0x00008280, 0x00008088, 0x00008088, 0x00008088 },
+ { 0x00009a30, 0x00008284, 0x00008284, 0x0000808c, 0x0000808c, 0x0000808c },
+ { 0x00009a34, 0x00008288, 0x00008288, 0x00008100, 0x00008100, 0x00008100 },
+ { 0x00009a38, 0x0000828c, 0x0000828c, 0x00008104, 0x00008104, 0x00008104 },
+ { 0x00009a3c, 0x00008290, 0x00008290, 0x00008108, 0x00008108, 0x00008108 },
+ { 0x00009a40, 0x00008300, 0x00008300, 0x0000810c, 0x0000810c, 0x0000810c },
+ { 0x00009a44, 0x00008304, 0x00008304, 0x00008110, 0x00008110, 0x00008110 },
+ { 0x00009a48, 0x00008308, 0x00008308, 0x00008114, 0x00008114, 0x00008114 },
+ { 0x00009a4c, 0x0000830c, 0x0000830c, 0x00008180, 0x00008180, 0x00008180 },
+ { 0x00009a50, 0x00008310, 0x00008310, 0x00008184, 0x00008184, 0x00008184 },
+ { 0x00009a54, 0x00008314, 0x00008314, 0x00008188, 0x00008188, 0x00008188 },
+ { 0x00009a58, 0x00008380, 0x00008380, 0x0000818c, 0x0000818c, 0x0000818c },
+ { 0x00009a5c, 0x00008384, 0x00008384, 0x00008190, 0x00008190, 0x00008190 },
+ { 0x00009a60, 0x00008388, 0x00008388, 0x00008194, 0x00008194, 0x00008194 },
+ { 0x00009a64, 0x0000838c, 0x0000838c, 0x000081a0, 0x000081a0, 0x000081a0 },
+ { 0x00009a68, 0x00008390, 0x00008390, 0x0000820c, 0x0000820c, 0x0000820c },
+ { 0x00009a6c, 0x00008394, 0x00008394, 0x000081a8, 0x000081a8, 0x000081a8 },
+ { 0x00009a70, 0x0000a380, 0x0000a380, 0x00008284, 0x00008284, 0x00008284 },
+ { 0x00009a74, 0x0000a384, 0x0000a384, 0x00008288, 0x00008288, 0x00008288 },
+ { 0x00009a78, 0x0000a388, 0x0000a388, 0x00008224, 0x00008224, 0x00008224 },
+ { 0x00009a7c, 0x0000a38c, 0x0000a38c, 0x00008290, 0x00008290, 0x00008290 },
+ { 0x00009a80, 0x0000a390, 0x0000a390, 0x00008300, 0x00008300, 0x00008300 },
+ { 0x00009a84, 0x0000a394, 0x0000a394, 0x00008304, 0x00008304, 0x00008304 },
+ { 0x00009a88, 0x0000a780, 0x0000a780, 0x00008308, 0x00008308, 0x00008308 },
+ { 0x00009a8c, 0x0000a784, 0x0000a784, 0x0000830c, 0x0000830c, 0x0000830c },
+ { 0x00009a90, 0x0000a788, 0x0000a788, 0x00008380, 0x00008380, 0x00008380 },
+ { 0x00009a94, 0x0000a78c, 0x0000a78c, 0x00008384, 0x00008384, 0x00008384 },
+ { 0x00009a98, 0x0000a790, 0x0000a790, 0x00008700, 0x00008700, 0x00008700 },
+ { 0x00009a9c, 0x0000a794, 0x0000a794, 0x00008704, 0x00008704, 0x00008704 },
+ { 0x00009aa0, 0x0000ab84, 0x0000ab84, 0x00008708, 0x00008708, 0x00008708 },
+ { 0x00009aa4, 0x0000ab88, 0x0000ab88, 0x0000870c, 0x0000870c, 0x0000870c },
+ { 0x00009aa8, 0x0000ab8c, 0x0000ab8c, 0x00008780, 0x00008780, 0x00008780 },
+ { 0x00009aac, 0x0000ab90, 0x0000ab90, 0x00008784, 0x00008784, 0x00008784 },
+ { 0x00009ab0, 0x0000ab94, 0x0000ab94, 0x00008b00, 0x00008b00, 0x00008b00 },
+ { 0x00009ab4, 0x0000af80, 0x0000af80, 0x00008b04, 0x00008b04, 0x00008b04 },
+ { 0x00009ab8, 0x0000af84, 0x0000af84, 0x00008b08, 0x00008b08, 0x00008b08 },
+ { 0x00009abc, 0x0000af88, 0x0000af88, 0x00008b0c, 0x00008b0c, 0x00008b0c },
+ { 0x00009ac0, 0x0000af8c, 0x0000af8c, 0x00008b80, 0x00008b80, 0x00008b80 },
+ { 0x00009ac4, 0x0000af90, 0x0000af90, 0x00008b84, 0x00008b84, 0x00008b84 },
+ { 0x00009ac8, 0x0000af94, 0x0000af94, 0x00008b88, 0x00008b88, 0x00008b88 },
+ { 0x00009acc, 0x0000b380, 0x0000b380, 0x00008b8c, 0x00008b8c, 0x00008b8c },
+ { 0x00009ad0, 0x0000b384, 0x0000b384, 0x00008b90, 0x00008b90, 0x00008b90 },
+ { 0x00009ad4, 0x0000b388, 0x0000b388, 0x00008f80, 0x00008f80, 0x00008f80 },
+ { 0x00009ad8, 0x0000b38c, 0x0000b38c, 0x00008f84, 0x00008f84, 0x00008f84 },
+ { 0x00009adc, 0x0000b390, 0x0000b390, 0x00008f88, 0x00008f88, 0x00008f88 },
+ { 0x00009ae0, 0x0000b394, 0x0000b394, 0x00008f8c, 0x00008f8c, 0x00008f8c },
+ { 0x00009ae4, 0x0000b398, 0x0000b398, 0x00008f90, 0x00008f90, 0x00008f90 },
+ { 0x00009ae8, 0x0000b780, 0x0000b780, 0x0000930c, 0x0000930c, 0x0000930c },
+ { 0x00009aec, 0x0000b784, 0x0000b784, 0x00009310, 0x00009310, 0x00009310 },
+ { 0x00009af0, 0x0000b788, 0x0000b788, 0x00009384, 0x00009384, 0x00009384 },
+ { 0x00009af4, 0x0000b78c, 0x0000b78c, 0x00009388, 0x00009388, 0x00009388 },
+ { 0x00009af8, 0x0000b790, 0x0000b790, 0x00009324, 0x00009324, 0x00009324 },
+ { 0x00009afc, 0x0000b794, 0x0000b794, 0x00009704, 0x00009704, 0x00009704 },
+ { 0x00009b00, 0x0000b798, 0x0000b798, 0x000096a4, 0x000096a4, 0x000096a4 },
+ { 0x00009b04, 0x0000d784, 0x0000d784, 0x000096a8, 0x000096a8, 0x000096a8 },
+ { 0x00009b08, 0x0000d788, 0x0000d788, 0x00009710, 0x00009710, 0x00009710 },
+ { 0x00009b0c, 0x0000d78c, 0x0000d78c, 0x00009714, 0x00009714, 0x00009714 },
+ { 0x00009b10, 0x0000d790, 0x0000d790, 0x00009720, 0x00009720, 0x00009720 },
+ { 0x00009b14, 0x0000f780, 0x0000f780, 0x00009724, 0x00009724, 0x00009724 },
+ { 0x00009b18, 0x0000f784, 0x0000f784, 0x00009728, 0x00009728, 0x00009728 },
+ { 0x00009b1c, 0x0000f788, 0x0000f788, 0x0000972c, 0x0000972c, 0x0000972c },
+ { 0x00009b20, 0x0000f78c, 0x0000f78c, 0x000097a0, 0x000097a0, 0x000097a0 },
+ { 0x00009b24, 0x0000f790, 0x0000f790, 0x000097a4, 0x000097a4, 0x000097a4 },
+ { 0x00009b28, 0x0000f794, 0x0000f794, 0x000097a8, 0x000097a8, 0x000097a8 },
+ { 0x00009b2c, 0x0000f7a4, 0x0000f7a4, 0x000097b0, 0x000097b0, 0x000097b0 },
+ { 0x00009b30, 0x0000f7a8, 0x0000f7a8, 0x000097b4, 0x000097b4, 0x000097b4 },
+ { 0x00009b34, 0x0000f7ac, 0x0000f7ac, 0x000097b8, 0x000097b8, 0x000097b8 },
+ { 0x00009b38, 0x0000f7b0, 0x0000f7b0, 0x000097a5, 0x000097a5, 0x000097a5 },
+ { 0x00009b3c, 0x0000f7b4, 0x0000f7b4, 0x000097a9, 0x000097a9, 0x000097a9 },
+ { 0x00009b40, 0x0000f7a1, 0x0000f7a1, 0x000097ad, 0x000097ad, 0x000097ad },
+ { 0x00009b44, 0x0000f7a5, 0x0000f7a5, 0x000097b1, 0x000097b1, 0x000097b1 },
+ { 0x00009b48, 0x0000f7a9, 0x0000f7a9, 0x000097b5, 0x000097b5, 0x000097b5 },
+ { 0x00009b4c, 0x0000f7ad, 0x0000f7ad, 0x000097b9, 0x000097b9, 0x000097b9 },
+ { 0x00009b50, 0x0000f7b1, 0x0000f7b1, 0x000097c5, 0x000097c5, 0x000097c5 },
+ { 0x00009b54, 0x0000f7b5, 0x0000f7b5, 0x000097c9, 0x000097c9, 0x000097c9 },
+ { 0x00009b58, 0x0000f7c5, 0x0000f7c5, 0x000097d1, 0x000097d1, 0x000097d1 },
+ { 0x00009b5c, 0x0000f7c9, 0x0000f7c9, 0x000097d5, 0x000097d5, 0x000097d5 },
+ { 0x00009b60, 0x0000f7cd, 0x0000f7cd, 0x000097d9, 0x000097d9, 0x000097d9 },
+ { 0x00009b64, 0x0000f7d1, 0x0000f7d1, 0x000097c6, 0x000097c6, 0x000097c6 },
+ { 0x00009b68, 0x0000f7d5, 0x0000f7d5, 0x000097ca, 0x000097ca, 0x000097ca },
+ { 0x00009b6c, 0x0000f7c2, 0x0000f7c2, 0x000097ce, 0x000097ce, 0x000097ce },
+ { 0x00009b70, 0x0000f7c6, 0x0000f7c6, 0x000097d2, 0x000097d2, 0x000097d2 },
+ { 0x00009b74, 0x0000f7ca, 0x0000f7ca, 0x000097d6, 0x000097d6, 0x000097d6 },
+ { 0x00009b78, 0x0000f7ce, 0x0000f7ce, 0x000097c3, 0x000097c3, 0x000097c3 },
+ { 0x00009b7c, 0x0000f7d2, 0x0000f7d2, 0x000097c7, 0x000097c7, 0x000097c7 },
+ { 0x00009b80, 0x0000f7d6, 0x0000f7d6, 0x000097cb, 0x000097cb, 0x000097cb },
+ { 0x00009b84, 0x0000f7c3, 0x0000f7c3, 0x000097cf, 0x000097cf, 0x000097cf },
+ { 0x00009b88, 0x0000f7c7, 0x0000f7c7, 0x000097d7, 0x000097d7, 0x000097d7 },
+ { 0x00009b8c, 0x0000f7cb, 0x0000f7cb, 0x000097db, 0x000097db, 0x000097db },
+ { 0x00009b90, 0x0000f7d3, 0x0000f7d3, 0x000097db, 0x000097db, 0x000097db },
+ { 0x00009b94, 0x0000f7d7, 0x0000f7d7, 0x000097db, 0x000097db, 0x000097db },
+ { 0x00009b98, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db, 0x000097db },
+ { 0x00009b9c, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db, 0x000097db },
+ { 0x00009ba0, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db, 0x000097db },
+ { 0x00009ba4, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db, 0x000097db },
+ { 0x00009ba8, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db, 0x000097db },
+ { 0x00009bac, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db, 0x000097db },
+ { 0x00009bb0, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db, 0x000097db },
+ { 0x00009bb4, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db, 0x000097db },
+ { 0x00009bb8, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db, 0x000097db },
+ { 0x00009bbc, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db, 0x000097db },
+ { 0x00009bc0, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db, 0x000097db },
+ { 0x00009bc4, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db, 0x000097db },
+ { 0x00009bc8, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db, 0x000097db },
+ { 0x00009bcc, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db, 0x000097db },
+ { 0x00009bd0, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db, 0x000097db },
+ { 0x00009bd4, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db, 0x000097db },
+ { 0x00009bd8, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db, 0x000097db },
+ { 0x00009bdc, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db, 0x000097db },
+ { 0x00009be0, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db, 0x000097db },
+ { 0x00009be4, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db, 0x000097db },
+ { 0x00009be8, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db, 0x000097db },
+ { 0x00009bec, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db, 0x000097db },
+ { 0x00009bf0, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db, 0x000097db },
+ { 0x00009bf4, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db, 0x000097db },
+ { 0x00009bf8, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db, 0x000097db },
+ { 0x00009bfc, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db, 0x000097db },
+ { 0x0000a204, 0x00000444, 0x00000444, 0x00000444, 0x00000444, 0x00000444 },
+ { 0x0000a208, 0x803e4788, 0x803e4788, 0x803e4788, 0x803e4788, 0x803e4788 },
+ { 0x0000a20c, 0x00000014, 0x00000014, 0x0001f019, 0x0001f019, 0x0001f019 },
+ { 0x0000b20c, 0x00000014, 0x00000014, 0x0001f019, 0x0001f019, 0x0001f019 },
+ { 0x0000a21c, 0x1463800a, 0x1463800a, 0x1463800a, 0x1463800a, 0x1463800a },
+ { 0x0000a230, 0x00000000, 0x00000000, 0x00000210, 0x00000108, 0x00000000 },
+ { 0x0000a250, 0x001ff000, 0x001ff000, 0x001da000, 0x001da000, 0x001da000 },
+ { 0x0000a274, 0x0a19c652, 0x0a19c652, 0x0a1aa652, 0x0a1aa652, 0x0a1aa652 },
+ { 0x0000a300, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 },
+ { 0x0000a304, 0x00003002, 0x00003002, 0x00003002, 0x00003002, 0x00003002 },
+ { 0x0000a308, 0x00006004, 0x00006004, 0x00008009, 0x00008009, 0x00008009 },
+ { 0x0000a30c, 0x0000a006, 0x0000a006, 0x0000b00b, 0x0000b00b, 0x0000b00b },
+ { 0x0000a310, 0x0000e012, 0x0000e012, 0x0000e012, 0x0000e012, 0x0000e012 },
+ { 0x0000a314, 0x00011014, 0x00011014, 0x00012048, 0x00012048, 0x00012048 },
+ { 0x0000a318, 0x0001504a, 0x0001504a, 0x0001604a, 0x0001604a, 0x0001604a },
+ { 0x0000a31c, 0x0001904c, 0x0001904c, 0x0001a211, 0x0001a211, 0x0001a211 },
+ { 0x0000a320, 0x0001c04e, 0x0001c04e, 0x0001e213, 0x0001e213, 0x0001e213 },
+ { 0x0000a324, 0x00020092, 0x00020092, 0x0002121b, 0x0002121b, 0x0002121b },
+ { 0x0000a328, 0x0002410a, 0x0002410a, 0x00024412, 0x00024412, 0x00024412 },
+ { 0x0000a32c, 0x0002710c, 0x0002710c, 0x00028414, 0x00028414, 0x00028414 },
+ { 0x0000a330, 0x0002b18b, 0x0002b18b, 0x0002b44a, 0x0002b44a, 0x0002b44a },
+ { 0x0000a334, 0x0002e1cc, 0x0002e1cc, 0x00030649, 0x00030649, 0x00030649 },
+ { 0x0000a338, 0x000321ec, 0x000321ec, 0x0003364b, 0x0003364b, 0x0003364b },
+ { 0x0000a33c, 0x000321ec, 0x000321ec, 0x00038a49, 0x00038a49, 0x00038a49 },
+ { 0x0000a340, 0x000321ec, 0x000321ec, 0x0003be48, 0x0003be48, 0x0003be48 },
+ { 0x0000a344, 0x000321ec, 0x000321ec, 0x0003ee4a, 0x0003ee4a, 0x0003ee4a },
+ { 0x0000a348, 0x000321ec, 0x000321ec, 0x00042e88, 0x00042e88, 0x00042e88 },
+ { 0x0000a34c, 0x000321ec, 0x000321ec, 0x00046e8a, 0x00046e8a, 0x00046e8a },
+ { 0x0000a350, 0x000321ec, 0x000321ec, 0x00049ec9, 0x00049ec9, 0x00049ec9 },
+ { 0x0000a354, 0x000321ec, 0x000321ec, 0x0004bf42, 0x0004bf42, 0x0004bf42 },
+ { 0x0000a358, 0x7999aa02, 0x7999aa02, 0x7999aa0e, 0x7999aa0e, 0x7999aa0e },
+ { 0x0000a3d8, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 },
+ { 0x00007894, 0x5a508000, 0x5a508000, 0x5a508000, 0x5a508000, 0x5a508000 },
+};
+
+static const u32 ar9280Common_9280_2[][2] = {
+ { 0x0000000c, 0x00000000 },
+ { 0x00000030, 0x00020015 },
+ { 0x00000034, 0x00000005 },
+ { 0x00000040, 0x00000000 },
+ { 0x00000044, 0x00000008 },
+ { 0x00000048, 0x00000008 },
+ { 0x0000004c, 0x00000010 },
+ { 0x00000050, 0x00000000 },
+ { 0x00000054, 0x0000001f },
+ { 0x00000800, 0x00000000 },
+ { 0x00000804, 0x00000000 },
+ { 0x00000808, 0x00000000 },
+ { 0x0000080c, 0x00000000 },
+ { 0x00000810, 0x00000000 },
+ { 0x00000814, 0x00000000 },
+ { 0x00000818, 0x00000000 },
+ { 0x0000081c, 0x00000000 },
+ { 0x00000820, 0x00000000 },
+ { 0x00000824, 0x00000000 },
+ { 0x00001040, 0x002ffc0f },
+ { 0x00001044, 0x002ffc0f },
+ { 0x00001048, 0x002ffc0f },
+ { 0x0000104c, 0x002ffc0f },
+ { 0x00001050, 0x002ffc0f },
+ { 0x00001054, 0x002ffc0f },
+ { 0x00001058, 0x002ffc0f },
+ { 0x0000105c, 0x002ffc0f },
+ { 0x00001060, 0x002ffc0f },
+ { 0x00001064, 0x002ffc0f },
+ { 0x00001230, 0x00000000 },
+ { 0x00001270, 0x00000000 },
+ { 0x00001038, 0x00000000 },
+ { 0x00001078, 0x00000000 },
+ { 0x000010b8, 0x00000000 },
+ { 0x000010f8, 0x00000000 },
+ { 0x00001138, 0x00000000 },
+ { 0x00001178, 0x00000000 },
+ { 0x000011b8, 0x00000000 },
+ { 0x000011f8, 0x00000000 },
+ { 0x00001238, 0x00000000 },
+ { 0x00001278, 0x00000000 },
+ { 0x000012b8, 0x00000000 },
+ { 0x000012f8, 0x00000000 },
+ { 0x00001338, 0x00000000 },
+ { 0x00001378, 0x00000000 },
+ { 0x000013b8, 0x00000000 },
+ { 0x000013f8, 0x00000000 },
+ { 0x00001438, 0x00000000 },
+ { 0x00001478, 0x00000000 },
+ { 0x000014b8, 0x00000000 },
+ { 0x000014f8, 0x00000000 },
+ { 0x00001538, 0x00000000 },
+ { 0x00001578, 0x00000000 },
+ { 0x000015b8, 0x00000000 },
+ { 0x000015f8, 0x00000000 },
+ { 0x00001638, 0x00000000 },
+ { 0x00001678, 0x00000000 },
+ { 0x000016b8, 0x00000000 },
+ { 0x000016f8, 0x00000000 },
+ { 0x00001738, 0x00000000 },
+ { 0x00001778, 0x00000000 },
+ { 0x000017b8, 0x00000000 },
+ { 0x000017f8, 0x00000000 },
+ { 0x0000103c, 0x00000000 },
+ { 0x0000107c, 0x00000000 },
+ { 0x000010bc, 0x00000000 },
+ { 0x000010fc, 0x00000000 },
+ { 0x0000113c, 0x00000000 },
+ { 0x0000117c, 0x00000000 },
+ { 0x000011bc, 0x00000000 },
+ { 0x000011fc, 0x00000000 },
+ { 0x0000123c, 0x00000000 },
+ { 0x0000127c, 0x00000000 },
+ { 0x000012bc, 0x00000000 },
+ { 0x000012fc, 0x00000000 },
+ { 0x0000133c, 0x00000000 },
+ { 0x0000137c, 0x00000000 },
+ { 0x000013bc, 0x00000000 },
+ { 0x000013fc, 0x00000000 },
+ { 0x0000143c, 0x00000000 },
+ { 0x0000147c, 0x00000000 },
+ { 0x00004030, 0x00000002 },
+ { 0x0000403c, 0x00000002 },
+ { 0x00004024, 0x0000001f },
+ { 0x00004060, 0x00000000 },
+ { 0x00004064, 0x00000000 },
+ { 0x00007010, 0x00000033 },
+ { 0x00007034, 0x00000002 },
+ { 0x00007038, 0x000004c2 },
+ { 0x00008004, 0x00000000 },
+ { 0x00008008, 0x00000000 },
+ { 0x0000800c, 0x00000000 },
+ { 0x00008018, 0x00000700 },
+ { 0x00008020, 0x00000000 },
+ { 0x00008038, 0x00000000 },
+ { 0x0000803c, 0x00000000 },
+ { 0x00008048, 0x40000000 },
+ { 0x00008054, 0x00000000 },
+ { 0x00008058, 0x00000000 },
+ { 0x0000805c, 0x000fc78f },
+ { 0x00008060, 0x0000000f },
+ { 0x00008064, 0x00000000 },
+ { 0x00008070, 0x00000000 },
+ { 0x000080c0, 0x2a80001a },
+ { 0x000080c4, 0x05dc01e0 },
+ { 0x000080c8, 0x1f402710 },
+ { 0x000080cc, 0x01f40000 },
+ { 0x000080d0, 0x00001e00 },
+ { 0x000080d4, 0x00000000 },
+ { 0x000080d8, 0x00400000 },
+ { 0x000080e0, 0xffffffff },
+ { 0x000080e4, 0x0000ffff },
+ { 0x000080e8, 0x003f3f3f },
+ { 0x000080ec, 0x00000000 },
+ { 0x000080f0, 0x00000000 },
+ { 0x000080f4, 0x00000000 },
+ { 0x000080f8, 0x00000000 },
+ { 0x000080fc, 0x00020000 },
+ { 0x00008100, 0x00020000 },
+ { 0x00008104, 0x00000001 },
+ { 0x00008108, 0x00000052 },
+ { 0x0000810c, 0x00000000 },
+ { 0x00008110, 0x00000168 },
+ { 0x00008118, 0x000100aa },
+ { 0x0000811c, 0x00003210 },
+ { 0x00008120, 0x08f04800 },
+ { 0x00008124, 0x00000000 },
+ { 0x00008128, 0x00000000 },
+ { 0x0000812c, 0x00000000 },
+ { 0x00008130, 0x00000000 },
+ { 0x00008134, 0x00000000 },
+ { 0x00008138, 0x00000000 },
+ { 0x0000813c, 0x00000000 },
+ { 0x00008144, 0x00000000 },
+ { 0x00008168, 0x00000000 },
+ { 0x0000816c, 0x00000000 },
+ { 0x00008170, 0x32143320 },
+ { 0x00008174, 0xfaa4fa50 },
+ { 0x00008178, 0x00000100 },
+ { 0x0000817c, 0x00000000 },
+ { 0x000081c0, 0x00000000 },
+ { 0x000081d0, 0x00003210 },
+ { 0x000081ec, 0x00000000 },
+ { 0x000081f0, 0x00000000 },
+ { 0x000081f4, 0x00000000 },
+ { 0x000081f8, 0x00000000 },
+ { 0x000081fc, 0x00000000 },
+ { 0x00008200, 0x00000000 },
+ { 0x00008204, 0x00000000 },
+ { 0x00008208, 0x00000000 },
+ { 0x0000820c, 0x00000000 },
+ { 0x00008210, 0x00000000 },
+ { 0x00008214, 0x00000000 },
+ { 0x00008218, 0x00000000 },
+ { 0x0000821c, 0x00000000 },
+ { 0x00008220, 0x00000000 },
+ { 0x00008224, 0x00000000 },
+ { 0x00008228, 0x00000000 },
+ { 0x0000822c, 0x00000000 },
+ { 0x00008230, 0x00000000 },
+ { 0x00008234, 0x00000000 },
+ { 0x00008238, 0x00000000 },
+ { 0x0000823c, 0x00000000 },
+ { 0x00008240, 0x00100000 },
+ { 0x00008244, 0x0010f400 },
+ { 0x00008248, 0x00000100 },
+ { 0x0000824c, 0x0001e800 },
+ { 0x00008250, 0x00000000 },
+ { 0x00008254, 0x00000000 },
+ { 0x00008258, 0x00000000 },
+ { 0x0000825c, 0x400000ff },
+ { 0x00008260, 0x00080922 },
+ { 0x00008270, 0x00000000 },
+ { 0x00008274, 0x40000000 },
+ { 0x00008278, 0x003e4180 },
+ { 0x0000827c, 0x00000000 },
+ { 0x00008284, 0x0000002c },
+ { 0x00008288, 0x0000002c },
+ { 0x0000828c, 0x00000000 },
+ { 0x00008294, 0x00000000 },
+ { 0x00008298, 0x00000000 },
+ { 0x0000829c, 0x00000000 },
+ { 0x00008300, 0x00000040 },
+ { 0x00008314, 0x00000000 },
+ { 0x00008328, 0x00000000 },
+ { 0x0000832c, 0x00000007 },
+ { 0x00008330, 0x00000302 },
+ { 0x00008334, 0x00000e00 },
+ { 0x00008338, 0x00000000 },
+ { 0x0000833c, 0x00000000 },
+ { 0x00008340, 0x000107ff },
+ { 0x00008344, 0x00581043 },
+ { 0x00009808, 0x00000000 },
+ { 0x0000980c, 0xafa68e30 },
+ { 0x00009810, 0xfd14e000 },
+ { 0x00009814, 0x9c0a9f6b },
+ { 0x0000981c, 0x00000000 },
+ { 0x0000982c, 0x0000a000 },
+ { 0x00009830, 0x00000000 },
+ { 0x0000983c, 0x00200400 },
+ { 0x0000984c, 0x0040233c },
+ { 0x0000a84c, 0x0040233c },
+ { 0x00009854, 0x00000044 },
+ { 0x00009900, 0x00000000 },
+ { 0x00009904, 0x00000000 },
+ { 0x00009908, 0x00000000 },
+ { 0x0000990c, 0x00000000 },
+ { 0x00009910, 0x01002310 },
+ { 0x0000991c, 0x10000fff },
+ { 0x00009920, 0x04900000 },
+ { 0x0000a920, 0x04900000 },
+ { 0x00009928, 0x00000001 },
+ { 0x0000992c, 0x00000004 },
+ { 0x00009934, 0x1e1f2022 },
+ { 0x00009938, 0x0a0b0c0d },
+ { 0x0000993c, 0x00000000 },
+ { 0x00009948, 0x9280c00a },
+ { 0x0000994c, 0x00020028 },
+ { 0x00009954, 0x5f3ca3de },
+ { 0x00009958, 0x2108ecff },
+ { 0x00009940, 0x14750604 },
+ { 0x0000c95c, 0x004b6a8e },
+ { 0x0000c968, 0x000003ce },
+ { 0x00009970, 0x190fb515 },
+ { 0x00009974, 0x00000000 },
+ { 0x00009978, 0x00000001 },
+ { 0x0000997c, 0x00000000 },
+ { 0x00009980, 0x00000000 },
+ { 0x00009984, 0x00000000 },
+ { 0x00009988, 0x00000000 },
+ { 0x0000998c, 0x00000000 },
+ { 0x00009990, 0x00000000 },
+ { 0x00009994, 0x00000000 },
+ { 0x00009998, 0x00000000 },
+ { 0x0000999c, 0x00000000 },
+ { 0x000099a0, 0x00000000 },
+ { 0x000099a4, 0x00000001 },
+ { 0x000099a8, 0x201fff00 },
+ { 0x000099ac, 0x006f0000 },
+ { 0x000099b0, 0x03051000 },
+ { 0x000099b4, 0x00000820 },
+ { 0x000099dc, 0x00000000 },
+ { 0x000099e0, 0x00000000 },
+ { 0x000099e4, 0xaaaaaaaa },
+ { 0x000099e8, 0x3c466478 },
+ { 0x000099ec, 0x0cc80caa },
+ { 0x000099f0, 0x00000000 },
+ { 0x000099fc, 0x00001042 },
+ { 0x0000a210, 0x4080a333 },
+ { 0x0000a214, 0x40206c10 },
+ { 0x0000a218, 0x009c4060 },
+ { 0x0000a220, 0x01834061 },
+ { 0x0000a224, 0x00000400 },
+ { 0x0000a228, 0x000003b5 },
+ { 0x0000a22c, 0x233f71c0 },
+ { 0x0000a234, 0x20202020 },
+ { 0x0000a238, 0x20202020 },
+ { 0x0000a23c, 0x13c88000 },
+ { 0x0000a240, 0x38490a20 },
+ { 0x0000a244, 0x00007bb6 },
+ { 0x0000a248, 0x0fff3ffc },
+ { 0x0000a24c, 0x00000000 },
+ { 0x0000a254, 0x00000000 },
+ { 0x0000a258, 0x0cdbd380 },
+ { 0x0000a25c, 0x0f0f0f01 },
+ { 0x0000a260, 0xdfa91f01 },
+ { 0x0000a268, 0x00000000 },
+ { 0x0000a26c, 0x0ebae9c6 },
+ { 0x0000b26c, 0x0ebae9c6 },
+ { 0x0000d270, 0x00820820 },
+ { 0x0000a278, 0x1ce739ce },
+ { 0x0000a27c, 0x050701ce },
+ { 0x0000d35c, 0x07ffffef },
+ { 0x0000d360, 0x0fffffe7 },
+ { 0x0000d364, 0x17ffffe5 },
+ { 0x0000d368, 0x1fffffe4 },
+ { 0x0000d36c, 0x37ffffe3 },
+ { 0x0000d370, 0x3fffffe3 },
+ { 0x0000d374, 0x57ffffe3 },
+ { 0x0000d378, 0x5fffffe2 },
+ { 0x0000d37c, 0x7fffffe2 },
+ { 0x0000d380, 0x7f3c7bba },
+ { 0x0000d384, 0xf3307ff0 },
+ { 0x0000a388, 0x0c000000 },
+ { 0x0000a38c, 0x20202020 },
+ { 0x0000a390, 0x20202020 },
+ { 0x0000a394, 0x1ce739ce },
+ { 0x0000a398, 0x000001ce },
+ { 0x0000a39c, 0x00000001 },
+ { 0x0000a3a0, 0x00000000 },
+ { 0x0000a3a4, 0x00000000 },
+ { 0x0000a3a8, 0x00000000 },
+ { 0x0000a3ac, 0x00000000 },
+ { 0x0000a3b0, 0x00000000 },
+ { 0x0000a3b4, 0x00000000 },
+ { 0x0000a3b8, 0x00000000 },
+ { 0x0000a3bc, 0x00000000 },
+ { 0x0000a3c0, 0x00000000 },
+ { 0x0000a3c4, 0x00000000 },
+ { 0x0000a3c8, 0x00000246 },
+ { 0x0000a3cc, 0x20202020 },
+ { 0x0000a3d0, 0x20202020 },
+ { 0x0000a3d4, 0x20202020 },
+ { 0x0000a3dc, 0x1ce739ce },
+ { 0x0000a3e0, 0x000001ce },
+ { 0x0000a3e4, 0x00000000 },
+ { 0x0000a3e8, 0x18c43433 },
+ { 0x0000a3ec, 0x00f70081 },
+ { 0x00007800, 0x00040000 },
+ { 0x00007804, 0xdb005012 },
+ { 0x00007808, 0x04924914 },
+ { 0x0000780c, 0x21084210 },
+ { 0x00007810, 0x6d801300 },
+ { 0x00007814, 0x0019beff },
+ { 0x00007818, 0x07e41000 },
+ { 0x0000781c, 0x00392000 },
+ { 0x00007820, 0x92592480 },
+ { 0x00007824, 0x00040000 },
+ { 0x00007828, 0xdb005012 },
+ { 0x0000782c, 0x04924914 },
+ { 0x00007830, 0x21084210 },
+ { 0x00007834, 0x6d801300 },
+ { 0x00007838, 0x0019beff },
+ { 0x0000783c, 0x07e40000 },
+ { 0x00007840, 0x00392000 },
+ { 0x00007844, 0x92592480 },
+ { 0x00007848, 0x00100000 },
+ { 0x0000784c, 0x773f0567 },
+ { 0x00007850, 0x54214514 },
+ { 0x00007854, 0x12035828 },
+ { 0x00007858, 0x9259269a },
+ { 0x00007860, 0x52802000 },
+ { 0x00007864, 0x0a8e370e },
+ { 0x00007868, 0xc0102850 },
+ { 0x0000786c, 0x812d4000 },
+ { 0x00007870, 0x807ec400 },
+ { 0x00007874, 0x001b6db0 },
+ { 0x00007878, 0x00376b63 },
+ { 0x0000787c, 0x06db6db6 },
+ { 0x00007880, 0x006d8000 },
+ { 0x00007884, 0xffeffffe },
+ { 0x00007888, 0xffeffffe },
+ { 0x0000788c, 0x00010000 },
+ { 0x00007890, 0x02060aeb },
+ { 0x00007898, 0x2a850160 },
+};
+
+static const u32 ar9280Modes_fast_clock_9280_2[][3] = {
+ { 0x00001030, 0x00000268, 0x000004d0 },
+ { 0x00001070, 0x0000018c, 0x00000318 },
+ { 0x000010b0, 0x00000fd0, 0x00001fa0 },
+ { 0x00008014, 0x044c044c, 0x08980898 },
+ { 0x0000801c, 0x148ec02b, 0x148ec057 },
+ { 0x00008318, 0x000044c0, 0x00008980 },
+ { 0x00009820, 0x02020200, 0x02020200 },
+ { 0x00009824, 0x00000f0f, 0x00000f0f },
+ { 0x00009828, 0x0b020001, 0x0b020001 },
+ { 0x00009834, 0x00000f0f, 0x00000f0f },
+ { 0x00009844, 0x03721821, 0x03721821 },
+ { 0x00009914, 0x00000898, 0x00000898 },
+ { 0x00009918, 0x0000000b, 0x00000016 },
+ { 0x00009944, 0xdfbc1210, 0xdfbc1210 },
+};
+
+
+
+static const u32 ar9280PciePhy_clkreq_off_L1_9280[][2] = {
+ {0x00004040, 0x9248fd00 },
+ {0x00004040, 0x24924924 },
+ {0x00004040, 0xa8000019 },
+ {0x00004040, 0x13160820 },
+ {0x00004040, 0xe5980560 },
+ {0x00004040, 0x401dcffc },
+ {0x00004040, 0x1aaabe40 },
+ {0x00004040, 0xbe105554 },
+ {0x00004040, 0x00043007 },
+ {0x00004044, 0x00000000 },
+};
+
+
+
+static const u32 ar9280PciePhy_clkreq_always_on_L1_9280[][2] = {
+ {0x00004040, 0x9248fd00 },
+ {0x00004040, 0x24924924 },
+ {0x00004040, 0xa8000019 },
+ {0x00004040, 0x13160820 },
+ {0x00004040, 0xe5980560 },
+ {0x00004040, 0x401dcffd },
+ {0x00004040, 0x1aaabe40 },
+ {0x00004040, 0xbe105554 },
+ {0x00004040, 0x00043007 },
+ {0x00004044, 0x00000000 },
+};
diff --git a/drivers/net/wireless/ath9k/main.c b/drivers/net/wireless/ath9k/main.c
new file mode 100644
index 00000000000..2888778040e
--- /dev/null
+++ b/drivers/net/wireless/ath9k/main.c
@@ -0,0 +1,1470 @@
+/*
+ * Copyright (c) 2008 Atheros Communications Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+/* mac80211 and PCI callbacks */
+
+#include <linux/nl80211.h>
+#include "core.h"
+
+#define ATH_PCI_VERSION "0.1"
+
+#define IEEE80211_HTCAP_MAXRXAMPDU_FACTOR 13
+#define IEEE80211_ACTION_CAT_HT 7
+#define IEEE80211_ACTION_HT_TXCHWIDTH 0
+
+static char *dev_info = "ath9k";
+
+MODULE_AUTHOR("Atheros Communications");
+MODULE_DESCRIPTION("Support for Atheros 802.11n wireless LAN cards.");
+MODULE_SUPPORTED_DEVICE("Atheros 802.11n WLAN cards");
+MODULE_LICENSE("Dual BSD/GPL");
+
+static struct pci_device_id ath_pci_id_table[] __devinitdata = {
+ { PCI_VDEVICE(ATHEROS, 0x0023) }, /* PCI */
+ { PCI_VDEVICE(ATHEROS, 0x0024) }, /* PCI-E */
+ { PCI_VDEVICE(ATHEROS, 0x0027) }, /* PCI */
+ { PCI_VDEVICE(ATHEROS, 0x0029) }, /* PCI */
+ { PCI_VDEVICE(ATHEROS, 0x002A) }, /* PCI-E */
+ { 0 }
+};
+
+static int ath_get_channel(struct ath_softc *sc,
+ struct ieee80211_channel *chan)
+{
+ int i;
+
+ for (i = 0; i < sc->sc_ah->ah_nchan; i++) {
+ if (sc->sc_ah->ah_channels[i].channel == chan->center_freq)
+ return i;
+ }
+
+ return -1;
+}
+
+static u32 ath_get_extchanmode(struct ath_softc *sc,
+ struct ieee80211_channel *chan)
+{
+ u32 chanmode = 0;
+ u8 ext_chan_offset = sc->sc_ht_info.ext_chan_offset;
+ enum ath9k_ht_macmode tx_chan_width = sc->sc_ht_info.tx_chan_width;
+
+ switch (chan->band) {
+ case IEEE80211_BAND_2GHZ:
+ if ((ext_chan_offset == IEEE80211_HT_IE_CHA_SEC_NONE) &&
+ (tx_chan_width == ATH9K_HT_MACMODE_20))
+ chanmode = CHANNEL_G_HT20;
+ if ((ext_chan_offset == IEEE80211_HT_IE_CHA_SEC_ABOVE) &&
+ (tx_chan_width == ATH9K_HT_MACMODE_2040))
+ chanmode = CHANNEL_G_HT40PLUS;
+ if ((ext_chan_offset == IEEE80211_HT_IE_CHA_SEC_BELOW) &&
+ (tx_chan_width == ATH9K_HT_MACMODE_2040))
+ chanmode = CHANNEL_G_HT40MINUS;
+ break;
+ case IEEE80211_BAND_5GHZ:
+ if ((ext_chan_offset == IEEE80211_HT_IE_CHA_SEC_NONE) &&
+ (tx_chan_width == ATH9K_HT_MACMODE_20))
+ chanmode = CHANNEL_A_HT20;
+ if ((ext_chan_offset == IEEE80211_HT_IE_CHA_SEC_ABOVE) &&
+ (tx_chan_width == ATH9K_HT_MACMODE_2040))
+ chanmode = CHANNEL_A_HT40PLUS;
+ if ((ext_chan_offset == IEEE80211_HT_IE_CHA_SEC_BELOW) &&
+ (tx_chan_width == ATH9K_HT_MACMODE_2040))
+ chanmode = CHANNEL_A_HT40MINUS;
+ break;
+ default:
+ break;
+ }
+
+ return chanmode;
+}
+
+
+static int ath_setkey_tkip(struct ath_softc *sc,
+ struct ieee80211_key_conf *key,
+ struct ath9k_keyval *hk,
+ const u8 *addr)
+{
+ u8 *key_rxmic = NULL;
+ u8 *key_txmic = NULL;
+
+ key_txmic = key->key + NL80211_TKIP_DATA_OFFSET_TX_MIC_KEY;
+ key_rxmic = key->key + NL80211_TKIP_DATA_OFFSET_RX_MIC_KEY;
+
+ if (addr == NULL) {
+ /* Group key installation */
+ memcpy(hk->kv_mic, key_rxmic, sizeof(hk->kv_mic));
+ return ath_keyset(sc, key->keyidx, hk, addr);
+ }
+ if (!sc->sc_splitmic) {
+ /*
+ * data key goes at first index,
+ * the hal handles the MIC keys at index+64.
+ */
+ memcpy(hk->kv_mic, key_rxmic, sizeof(hk->kv_mic));
+ memcpy(hk->kv_txmic, key_txmic, sizeof(hk->kv_txmic));
+ return ath_keyset(sc, key->keyidx, hk, addr);
+ }
+ /*
+ * TX key goes at first index, RX key at +32.
+ * The hal handles the MIC keys at index+64.
+ */
+ memcpy(hk->kv_mic, key_txmic, sizeof(hk->kv_mic));
+ if (!ath_keyset(sc, key->keyidx, hk, NULL)) {
+ /* Txmic entry failed. No need to proceed further */
+ DPRINTF(sc, ATH_DBG_KEYCACHE,
+ "%s Setting TX MIC Key Failed\n", __func__);
+ return 0;
+ }
+
+ memcpy(hk->kv_mic, key_rxmic, sizeof(hk->kv_mic));
+ /* XXX delete tx key on failure? */
+ return ath_keyset(sc, key->keyidx+32, hk, addr);
+}
+
+static int ath_key_config(struct ath_softc *sc,
+ const u8 *addr,
+ struct ieee80211_key_conf *key)
+{
+ struct ieee80211_vif *vif;
+ struct ath9k_keyval hk;
+ const u8 *mac = NULL;
+ int ret = 0;
+ enum ieee80211_if_types opmode;
+
+ memset(&hk, 0, sizeof(hk));
+
+ switch (key->alg) {
+ case ALG_WEP:
+ hk.kv_type = ATH9K_CIPHER_WEP;
+ break;
+ case ALG_TKIP:
+ hk.kv_type = ATH9K_CIPHER_TKIP;
+ break;
+ case ALG_CCMP:
+ hk.kv_type = ATH9K_CIPHER_AES_CCM;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ hk.kv_len = key->keylen;
+ memcpy(hk.kv_val, key->key, key->keylen);
+
+ if (!sc->sc_vaps[0])
+ return -EIO;
+
+ vif = sc->sc_vaps[0]->av_if_data;
+ opmode = vif->type;
+
+ /*
+ * Strategy:
+ * For _M_STA mc tx, we will not setup a key at all since we never
+ * tx mc.
+ * _M_STA mc rx, we will use the keyID.
+ * for _M_IBSS mc tx, we will use the keyID, and no macaddr.
+ * for _M_IBSS mc rx, we will alloc a slot and plumb the mac of the
+ * peer node. BUT we will plumb a cleartext key so that we can do
+ * perSta default key table lookup in software.
+ */
+ if (is_broadcast_ether_addr(addr)) {
+ switch (opmode) {
+ case IEEE80211_IF_TYPE_STA:
+ /* default key: could be group WPA key
+ * or could be static WEP key */
+ mac = NULL;
+ break;
+ case IEEE80211_IF_TYPE_IBSS:
+ break;
+ case IEEE80211_IF_TYPE_AP:
+ break;
+ default:
+ ASSERT(0);
+ break;
+ }
+ } else {
+ mac = addr;
+ }
+
+ if (key->alg == ALG_TKIP)
+ ret = ath_setkey_tkip(sc, key, &hk, mac);
+ else
+ ret = ath_keyset(sc, key->keyidx, &hk, mac);
+
+ if (!ret)
+ return -EIO;
+
+ sc->sc_keytype = hk.kv_type;
+ return 0;
+}
+
+static void ath_key_delete(struct ath_softc *sc, struct ieee80211_key_conf *key)
+{
+#define ATH_MAX_NUM_KEYS 4
+ int freeslot;
+
+ freeslot = (key->keyidx >= ATH_MAX_NUM_KEYS) ? 1 : 0;
+ ath_key_reset(sc, key->keyidx, freeslot);
+#undef ATH_MAX_NUM_KEYS
+}
+
+static void setup_ht_cap(struct ieee80211_ht_info *ht_info)
+{
+/* Until mac80211 includes these fields */
+
+#define IEEE80211_HT_CAP_DSSSCCK40 0x1000
+#define IEEE80211_HT_CAP_MAXRXAMPDU_65536 0x3 /* 2 ^ 16 */
+#define IEEE80211_HT_CAP_MPDUDENSITY_8 0x6 /* 8 usec */
+
+ ht_info->ht_supported = 1;
+ ht_info->cap = (u16)IEEE80211_HT_CAP_SUP_WIDTH
+ |(u16)IEEE80211_HT_CAP_MIMO_PS
+ |(u16)IEEE80211_HT_CAP_SGI_40
+ |(u16)IEEE80211_HT_CAP_DSSSCCK40;
+
+ ht_info->ampdu_factor = IEEE80211_HT_CAP_MAXRXAMPDU_65536;
+ ht_info->ampdu_density = IEEE80211_HT_CAP_MPDUDENSITY_8;
+ /* setup supported mcs set */
+ memset(ht_info->supp_mcs_set, 0, 16);
+ ht_info->supp_mcs_set[0] = 0xff;
+ ht_info->supp_mcs_set[1] = 0xff;
+ ht_info->supp_mcs_set[12] = IEEE80211_HT_CAP_MCS_TX_DEFINED;
+}
+
+static int ath_rate2idx(struct ath_softc *sc, int rate)
+{
+ int i = 0, cur_band, n_rates;
+ struct ieee80211_hw *hw = sc->hw;
+
+ cur_band = hw->conf.channel->band;
+ n_rates = sc->sbands[cur_band].n_bitrates;
+
+ for (i = 0; i < n_rates; i++) {
+ if (sc->sbands[cur_band].bitrates[i].bitrate == rate)
+ break;
+ }
+
+ /*
+ * NB:mac80211 validates rx rate index against the supported legacy rate
+ * index only (should be done against ht rates also), return the highest
+ * legacy rate index for rx rate which does not match any one of the
+ * supported basic and extended rates to make mac80211 happy.
+ * The following hack will be cleaned up once the issue with
+ * the rx rate index validation in mac80211 is fixed.
+ */
+ if (i == n_rates)
+ return n_rates - 1;
+ return i;
+}
+
+static void ath9k_rx_prepare(struct ath_softc *sc,
+ struct sk_buff *skb,
+ struct ath_recv_status *status,
+ struct ieee80211_rx_status *rx_status)
+{
+ struct ieee80211_hw *hw = sc->hw;
+ struct ieee80211_channel *curchan = hw->conf.channel;
+
+ memset(rx_status, 0, sizeof(struct ieee80211_rx_status));
+
+ rx_status->mactime = status->tsf;
+ rx_status->band = curchan->band;
+ rx_status->freq = curchan->center_freq;
+ rx_status->noise = ATH_DEFAULT_NOISE_FLOOR;
+ rx_status->signal = rx_status->noise + status->rssi;
+ rx_status->rate_idx = ath_rate2idx(sc, (status->rateKbps / 100));
+ rx_status->antenna = status->antenna;
+ rx_status->qual = status->rssi * 100 / 64;
+
+ if (status->flags & ATH_RX_MIC_ERROR)
+ rx_status->flag |= RX_FLAG_MMIC_ERROR;
+ if (status->flags & ATH_RX_FCS_ERROR)
+ rx_status->flag |= RX_FLAG_FAILED_FCS_CRC;
+
+ rx_status->flag |= RX_FLAG_TSFT;
+}
+
+static u8 parse_mpdudensity(u8 mpdudensity)
+{
+ /*
+ * 802.11n D2.0 defined values for "Minimum MPDU Start Spacing":
+ * 0 for no restriction
+ * 1 for 1/4 us
+ * 2 for 1/2 us
+ * 3 for 1 us
+ * 4 for 2 us
+ * 5 for 4 us
+ * 6 for 8 us
+ * 7 for 16 us
+ */
+ switch (mpdudensity) {
+ case 0:
+ return 0;
+ case 1:
+ case 2:
+ case 3:
+ /* Our lower layer calculations limit our precision to
+ 1 microsecond */
+ return 1;
+ case 4:
+ return 2;
+ case 5:
+ return 4;
+ case 6:
+ return 8;
+ case 7:
+ return 16;
+ default:
+ return 0;
+ }
+}
+
+static int ath9k_start(struct ieee80211_hw *hw)
+{
+ struct ath_softc *sc = hw->priv;
+ struct ieee80211_channel *curchan = hw->conf.channel;
+ int error = 0, pos;
+
+ DPRINTF(sc, ATH_DBG_CONFIG, "%s: Starting driver with "
+ "initial channel: %d MHz\n", __func__, curchan->center_freq);
+
+ /* setup initial channel */
+
+ pos = ath_get_channel(sc, curchan);
+ if (pos == -1) {
+ DPRINTF(sc, ATH_DBG_FATAL, "%s: Invalid channel\n", __func__);
+ return -EINVAL;
+ }
+
+ sc->sc_ah->ah_channels[pos].chanmode =
+ (curchan->band == IEEE80211_BAND_2GHZ) ? CHANNEL_G : CHANNEL_A;
+
+ /* open ath_dev */
+ error = ath_open(sc, &sc->sc_ah->ah_channels[pos]);
+ if (error) {
+ DPRINTF(sc, ATH_DBG_FATAL,
+ "%s: Unable to complete ath_open\n", __func__);
+ return error;
+ }
+
+ ieee80211_wake_queues(hw);
+ return 0;
+}
+
+static int ath9k_tx(struct ieee80211_hw *hw,
+ struct sk_buff *skb)
+{
+ struct ath_softc *sc = hw->priv;
+ int hdrlen, padsize;
+
+ /* Add the padding after the header if this is not already done */
+ hdrlen = ieee80211_get_hdrlen_from_skb(skb);
+ if (hdrlen & 3) {
+ padsize = hdrlen % 4;
+ if (skb_headroom(skb) < padsize)
+ return -1;
+ skb_push(skb, padsize);
+ memmove(skb->data, skb->data + padsize, hdrlen);
+ }
+
+ DPRINTF(sc, ATH_DBG_XMIT, "%s: transmitting packet, skb: %p\n",
+ __func__,
+ skb);
+
+ if (ath_tx_start(sc, skb) != 0) {
+ DPRINTF(sc, ATH_DBG_XMIT, "%s: TX failed\n", __func__);
+ dev_kfree_skb_any(skb);
+ /* FIXME: Check for proper return value from ATH_DEV */
+ return 0;
+ }
+
+ return 0;
+}
+
+static void ath9k_stop(struct ieee80211_hw *hw)
+{
+ struct ath_softc *sc = hw->priv;
+ int error;
+
+ DPRINTF(sc, ATH_DBG_CONFIG, "%s: Driver halt\n", __func__);
+
+ error = ath_suspend(sc);
+ if (error)
+ DPRINTF(sc, ATH_DBG_CONFIG,
+ "%s: Device is no longer present\n", __func__);
+
+ ieee80211_stop_queues(hw);
+}
+
+static int ath9k_add_interface(struct ieee80211_hw *hw,
+ struct ieee80211_if_init_conf *conf)
+{
+ struct ath_softc *sc = hw->priv;
+ int error, ic_opmode = 0;
+
+ /* Support only vap for now */
+
+ if (sc->sc_nvaps)
+ return -ENOBUFS;
+
+ switch (conf->type) {
+ case IEEE80211_IF_TYPE_STA:
+ ic_opmode = ATH9K_M_STA;
+ break;
+ case IEEE80211_IF_TYPE_IBSS:
+ ic_opmode = ATH9K_M_IBSS;
+ break;
+ default:
+ DPRINTF(sc, ATH_DBG_FATAL,
+ "%s: Only STA and IBSS are supported currently\n",
+ __func__);
+ return -EOPNOTSUPP;
+ }
+
+ DPRINTF(sc, ATH_DBG_CONFIG, "%s: Attach a VAP of type: %d\n",
+ __func__,
+ ic_opmode);
+
+ error = ath_vap_attach(sc, 0, conf->vif, ic_opmode);
+ if (error) {
+ DPRINTF(sc, ATH_DBG_FATAL,
+ "%s: Unable to attach vap, error: %d\n",
+ __func__, error);
+ return error;
+ }
+
+ return 0;
+}
+
+static void ath9k_remove_interface(struct ieee80211_hw *hw,
+ struct ieee80211_if_init_conf *conf)
+{
+ struct ath_softc *sc = hw->priv;
+ struct ath_vap *avp;
+ int error;
+
+ DPRINTF(sc, ATH_DBG_CONFIG, "%s: Detach VAP\n", __func__);
+
+ avp = sc->sc_vaps[0];
+ if (avp == NULL) {
+ DPRINTF(sc, ATH_DBG_FATAL, "%s: Invalid interface\n",
+ __func__);
+ return;
+ }
+
+#ifdef CONFIG_SLOW_ANT_DIV
+ ath_slow_ant_div_stop(&sc->sc_antdiv);
+#endif
+
+ /* Update ratectrl */
+ ath_rate_newstate(sc, avp);
+
+ /* Reclaim beacon resources */
+ if (sc->sc_opmode == ATH9K_M_HOSTAP || sc->sc_opmode == ATH9K_M_IBSS) {
+ ath9k_hw_stoptxdma(sc->sc_ah, sc->sc_bhalq);
+ ath_beacon_return(sc, avp);
+ }
+
+ /* Set interrupt mask */
+ sc->sc_imask &= ~(ATH9K_INT_SWBA | ATH9K_INT_BMISS);
+ ath9k_hw_set_interrupts(sc->sc_ah, sc->sc_imask & ~ATH9K_INT_GLOBAL);
+ sc->sc_beacons = 0;
+
+ error = ath_vap_detach(sc, 0);
+ if (error)
+ DPRINTF(sc, ATH_DBG_FATAL,
+ "%s: Unable to detach vap, error: %d\n",
+ __func__, error);
+}
+
+static int ath9k_config(struct ieee80211_hw *hw,
+ struct ieee80211_conf *conf)
+{
+ struct ath_softc *sc = hw->priv;
+ struct ieee80211_channel *curchan = hw->conf.channel;
+ int pos;
+
+ DPRINTF(sc, ATH_DBG_CONFIG, "%s: Set channel: %d MHz\n",
+ __func__,
+ curchan->center_freq);
+
+ pos = ath_get_channel(sc, curchan);
+ if (pos == -1) {
+ DPRINTF(sc, ATH_DBG_FATAL, "%s: Invalid channel\n", __func__);
+ return -EINVAL;
+ }
+
+ sc->sc_ah->ah_channels[pos].chanmode =
+ (curchan->band == IEEE80211_BAND_2GHZ) ?
+ CHANNEL_G : CHANNEL_A;
+
+ if (sc->sc_curaid && hw->conf.ht_conf.ht_supported)
+ sc->sc_ah->ah_channels[pos].chanmode =
+ ath_get_extchanmode(sc, curchan);
+
+ sc->sc_config.txpowlimit = 2 * conf->power_level;
+
+ /* set h/w channel */
+ if (ath_set_channel(sc, &sc->sc_ah->ah_channels[pos]) < 0)
+ DPRINTF(sc, ATH_DBG_FATAL, "%s: Unable to set channel\n",
+ __func__);
+
+ return 0;
+}
+
+static int ath9k_config_interface(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_if_conf *conf)
+{
+ struct ath_softc *sc = hw->priv;
+ struct ath_vap *avp;
+ u32 rfilt = 0;
+ int error, i;
+ DECLARE_MAC_BUF(mac);
+
+ avp = sc->sc_vaps[0];
+ if (avp == NULL) {
+ DPRINTF(sc, ATH_DBG_FATAL, "%s: Invalid interface\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ if ((conf->changed & IEEE80211_IFCC_BSSID) &&
+ !is_zero_ether_addr(conf->bssid)) {
+ switch (vif->type) {
+ case IEEE80211_IF_TYPE_STA:
+ case IEEE80211_IF_TYPE_IBSS:
+ /* Update ratectrl about the new state */
+ ath_rate_newstate(sc, avp);
+
+ /* Set rx filter */
+ rfilt = ath_calcrxfilter(sc);
+ ath9k_hw_setrxfilter(sc->sc_ah, rfilt);
+
+ /* Set BSSID */
+ memcpy(sc->sc_curbssid, conf->bssid, ETH_ALEN);
+ sc->sc_curaid = 0;
+ ath9k_hw_write_associd(sc->sc_ah, sc->sc_curbssid,
+ sc->sc_curaid);
+
+ /* Set aggregation protection mode parameters */
+ sc->sc_config.ath_aggr_prot = 0;
+
+ /*
+ * Reset our TSF so that its value is lower than the
+ * beacon that we are trying to catch.
+ * Only then hw will update its TSF register with the
+ * new beacon. Reset the TSF before setting the BSSID
+ * to avoid allowing in any frames that would update
+ * our TSF only to have us clear it
+ * immediately thereafter.
+ */
+ ath9k_hw_reset_tsf(sc->sc_ah);
+
+ /* Disable BMISS interrupt when we're not associated */
+ ath9k_hw_set_interrupts(sc->sc_ah,
+ sc->sc_imask &
+ ~(ATH9K_INT_SWBA | ATH9K_INT_BMISS));
+ sc->sc_imask &= ~(ATH9K_INT_SWBA | ATH9K_INT_BMISS);
+
+ DPRINTF(sc, ATH_DBG_CONFIG,
+ "%s: RX filter 0x%x bssid %s aid 0x%x\n",
+ __func__, rfilt,
+ print_mac(mac, sc->sc_curbssid), sc->sc_curaid);
+
+ /* need to reconfigure the beacon */
+ sc->sc_beacons = 0;
+
+ break;
+ default:
+ break;
+ }
+ }
+
+ if ((conf->changed & IEEE80211_IFCC_BEACON) &&
+ (vif->type == IEEE80211_IF_TYPE_IBSS)) {
+ /*
+ * Allocate and setup the beacon frame.
+ *
+ * Stop any previous beacon DMA. This may be
+ * necessary, for example, when an ibss merge
+ * causes reconfiguration; we may be called
+ * with beacon transmission active.
+ */
+ ath9k_hw_stoptxdma(sc->sc_ah, sc->sc_bhalq);
+
+ error = ath_beacon_alloc(sc, 0);
+ if (error != 0)
+ return error;
+
+ ath_beacon_sync(sc, 0);
+ }
+
+ /* Check for WLAN_CAPABILITY_PRIVACY ? */
+ if ((avp->av_opmode != IEEE80211_IF_TYPE_STA)) {
+ for (i = 0; i < IEEE80211_WEP_NKID; i++)
+ if (ath9k_hw_keyisvalid(sc->sc_ah, (u16)i))
+ ath9k_hw_keysetmac(sc->sc_ah,
+ (u16)i,
+ sc->sc_curbssid);
+ }
+
+ /* Only legacy IBSS for now */
+ if (vif->type == IEEE80211_IF_TYPE_IBSS)
+ ath_update_chainmask(sc, 0);
+
+ return 0;
+}
+
+#define SUPPORTED_FILTERS \
+ (FIF_PROMISC_IN_BSS | \
+ FIF_ALLMULTI | \
+ FIF_CONTROL | \
+ FIF_OTHER_BSS | \
+ FIF_BCN_PRBRESP_PROMISC | \
+ FIF_FCSFAIL)
+
+/* Accept unicast, bcast and mcast frames */
+
+static void ath9k_configure_filter(struct ieee80211_hw *hw,
+ unsigned int changed_flags,
+ unsigned int *total_flags,
+ int mc_count,
+ struct dev_mc_list *mclist)
+{
+ struct ath_softc *sc = hw->priv;
+
+ changed_flags &= SUPPORTED_FILTERS;
+ *total_flags &= SUPPORTED_FILTERS;
+
+ if (changed_flags & FIF_BCN_PRBRESP_PROMISC) {
+ if (*total_flags & FIF_BCN_PRBRESP_PROMISC)
+ ath_scan_start(sc);
+ else
+ ath_scan_end(sc);
+ }
+}
+
+static void ath9k_sta_notify(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ enum sta_notify_cmd cmd,
+ const u8 *addr)
+{
+ struct ath_softc *sc = hw->priv;
+ struct ath_node *an;
+ unsigned long flags;
+ DECLARE_MAC_BUF(mac);
+
+ spin_lock_irqsave(&sc->node_lock, flags);
+ an = ath_node_find(sc, (u8 *) addr);
+ spin_unlock_irqrestore(&sc->node_lock, flags);
+
+ switch (cmd) {
+ case STA_NOTIFY_ADD:
+ spin_lock_irqsave(&sc->node_lock, flags);
+ if (!an) {
+ ath_node_attach(sc, (u8 *)addr, 0);
+ DPRINTF(sc, ATH_DBG_CONFIG, "%s: Attach a node: %s\n",
+ __func__,
+ print_mac(mac, addr));
+ } else {
+ ath_node_get(sc, (u8 *)addr);
+ }
+ spin_unlock_irqrestore(&sc->node_lock, flags);
+ break;
+ case STA_NOTIFY_REMOVE:
+ if (!an)
+ DPRINTF(sc, ATH_DBG_FATAL,
+ "%s: Removal of a non-existent node\n",
+ __func__);
+ else {
+ ath_node_put(sc, an, ATH9K_BH_STATUS_INTACT);
+ DPRINTF(sc, ATH_DBG_CONFIG, "%s: Put a node: %s\n",
+ __func__,
+ print_mac(mac, addr));
+ }
+ break;
+ default:
+ break;
+ }
+}
+
+static int ath9k_conf_tx(struct ieee80211_hw *hw,
+ u16 queue,
+ const struct ieee80211_tx_queue_params *params)
+{
+ struct ath_softc *sc = hw->priv;
+ struct ath9k_tx_queue_info qi;
+ int ret = 0, qnum;
+
+ if (queue >= WME_NUM_AC)
+ return 0;
+
+ qi.tqi_aifs = params->aifs;
+ qi.tqi_cwmin = params->cw_min;
+ qi.tqi_cwmax = params->cw_max;
+ qi.tqi_burstTime = params->txop;
+ qnum = ath_get_hal_qnum(queue, sc);
+
+ DPRINTF(sc, ATH_DBG_CONFIG,
+ "%s: Configure tx [queue/halq] [%d/%d], "
+ "aifs: %d, cw_min: %d, cw_max: %d, txop: %d\n",
+ __func__,
+ queue,
+ qnum,
+ params->aifs,
+ params->cw_min,
+ params->cw_max,
+ params->txop);
+
+ ret = ath_txq_update(sc, qnum, &qi);
+ if (ret)
+ DPRINTF(sc, ATH_DBG_FATAL,
+ "%s: TXQ Update failed\n", __func__);
+
+ return ret;
+}
+
+static int ath9k_set_key(struct ieee80211_hw *hw,
+ enum set_key_cmd cmd,
+ const u8 *local_addr,
+ const u8 *addr,
+ struct ieee80211_key_conf *key)
+{
+ struct ath_softc *sc = hw->priv;
+ int ret = 0;
+
+ DPRINTF(sc, ATH_DBG_KEYCACHE, " %s: Set HW Key\n", __func__);
+
+ switch (cmd) {
+ case SET_KEY:
+ ret = ath_key_config(sc, addr, key);
+ if (!ret) {
+ set_bit(key->keyidx, sc->sc_keymap);
+ key->hw_key_idx = key->keyidx;
+ /* push IV and Michael MIC generation to stack */
+ key->flags |= IEEE80211_KEY_FLAG_GENERATE_IV;
+ key->flags |= IEEE80211_KEY_FLAG_GENERATE_MMIC;
+ }
+ break;
+ case DISABLE_KEY:
+ ath_key_delete(sc, key);
+ clear_bit(key->keyidx, sc->sc_keymap);
+ sc->sc_keytype = ATH9K_CIPHER_CLR;
+ break;
+ default:
+ ret = -EINVAL;
+ }
+
+ return ret;
+}
+
+static void ath9k_ht_conf(struct ath_softc *sc,
+ struct ieee80211_bss_conf *bss_conf)
+{
+#define IEEE80211_HT_CAP_40MHZ_INTOLERANT BIT(14)
+ struct ath_ht_info *ht_info = &sc->sc_ht_info;
+
+ if (bss_conf->assoc_ht) {
+ ht_info->ext_chan_offset =
+ bss_conf->ht_bss_conf->bss_cap &
+ IEEE80211_HT_IE_CHA_SEC_OFFSET;
+
+ if (!(bss_conf->ht_conf->cap &
+ IEEE80211_HT_CAP_40MHZ_INTOLERANT) &&
+ (bss_conf->ht_bss_conf->bss_cap &
+ IEEE80211_HT_IE_CHA_WIDTH))
+ ht_info->tx_chan_width = ATH9K_HT_MACMODE_2040;
+ else
+ ht_info->tx_chan_width = ATH9K_HT_MACMODE_20;
+
+ ath9k_hw_set11nmac2040(sc->sc_ah, ht_info->tx_chan_width);
+ ht_info->maxampdu = 1 << (IEEE80211_HTCAP_MAXRXAMPDU_FACTOR +
+ bss_conf->ht_conf->ampdu_factor);
+ ht_info->mpdudensity =
+ parse_mpdudensity(bss_conf->ht_conf->ampdu_density);
+
+ }
+
+#undef IEEE80211_HT_CAP_40MHZ_INTOLERANT
+}
+
+static void ath9k_bss_assoc_info(struct ath_softc *sc,
+ struct ieee80211_bss_conf *bss_conf)
+{
+ struct ieee80211_hw *hw = sc->hw;
+ struct ieee80211_channel *curchan = hw->conf.channel;
+ struct ath_vap *avp;
+ int pos;
+ DECLARE_MAC_BUF(mac);
+
+ if (bss_conf->assoc) {
+ DPRINTF(sc, ATH_DBG_CONFIG, "%s: Bss Info ASSOC %d\n",
+ __func__,
+ bss_conf->aid);
+
+ avp = sc->sc_vaps[0];
+ if (avp == NULL) {
+ DPRINTF(sc, ATH_DBG_FATAL, "%s: Invalid interface\n",
+ __func__);
+ return;
+ }
+
+ /* New association, store aid */
+ if (avp->av_opmode == ATH9K_M_STA) {
+ sc->sc_curaid = bss_conf->aid;
+ ath9k_hw_write_associd(sc->sc_ah, sc->sc_curbssid,
+ sc->sc_curaid);
+ }
+
+ /* Configure the beacon */
+ ath_beacon_config(sc, 0);
+ sc->sc_beacons = 1;
+
+ /* Reset rssi stats */
+ sc->sc_halstats.ns_avgbrssi = ATH_RSSI_DUMMY_MARKER;
+ sc->sc_halstats.ns_avgrssi = ATH_RSSI_DUMMY_MARKER;
+ sc->sc_halstats.ns_avgtxrssi = ATH_RSSI_DUMMY_MARKER;
+ sc->sc_halstats.ns_avgtxrate = ATH_RATE_DUMMY_MARKER;
+
+ /* Update chainmask */
+ ath_update_chainmask(sc, bss_conf->assoc_ht);
+
+ DPRINTF(sc, ATH_DBG_CONFIG,
+ "%s: bssid %s aid 0x%x\n",
+ __func__,
+ print_mac(mac, sc->sc_curbssid), sc->sc_curaid);
+
+ DPRINTF(sc, ATH_DBG_CONFIG, "%s: Set channel: %d MHz\n",
+ __func__,
+ curchan->center_freq);
+
+ pos = ath_get_channel(sc, curchan);
+ if (pos == -1) {
+ DPRINTF(sc, ATH_DBG_FATAL,
+ "%s: Invalid channel\n", __func__);
+ return;
+ }
+
+ if (hw->conf.ht_conf.ht_supported)
+ sc->sc_ah->ah_channels[pos].chanmode =
+ ath_get_extchanmode(sc, curchan);
+ else
+ sc->sc_ah->ah_channels[pos].chanmode =
+ (curchan->band == IEEE80211_BAND_2GHZ) ?
+ CHANNEL_G : CHANNEL_A;
+
+ /* set h/w channel */
+ if (ath_set_channel(sc, &sc->sc_ah->ah_channels[pos]) < 0)
+ DPRINTF(sc, ATH_DBG_FATAL,
+ "%s: Unable to set channel\n",
+ __func__);
+
+ ath_rate_newstate(sc, avp);
+ /* Update ratectrl about the new state */
+ ath_rc_node_update(hw, avp->rc_node);
+ } else {
+ DPRINTF(sc, ATH_DBG_CONFIG,
+ "%s: Bss Info DISSOC\n", __func__);
+ sc->sc_curaid = 0;
+ }
+}
+
+static void ath9k_bss_info_changed(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *bss_conf,
+ u32 changed)
+{
+ struct ath_softc *sc = hw->priv;
+
+ if (changed & BSS_CHANGED_ERP_PREAMBLE) {
+ DPRINTF(sc, ATH_DBG_CONFIG, "%s: BSS Changed PREAMBLE %d\n",
+ __func__,
+ bss_conf->use_short_preamble);
+ if (bss_conf->use_short_preamble)
+ sc->sc_flags |= ATH_PREAMBLE_SHORT;
+ else
+ sc->sc_flags &= ~ATH_PREAMBLE_SHORT;
+ }
+
+ if (changed & BSS_CHANGED_ERP_CTS_PROT) {
+ DPRINTF(sc, ATH_DBG_CONFIG, "%s: BSS Changed CTS PROT %d\n",
+ __func__,
+ bss_conf->use_cts_prot);
+ if (bss_conf->use_cts_prot &&
+ hw->conf.channel->band != IEEE80211_BAND_5GHZ)
+ sc->sc_flags |= ATH_PROTECT_ENABLE;
+ else
+ sc->sc_flags &= ~ATH_PROTECT_ENABLE;
+ }
+
+ if (changed & BSS_CHANGED_HT) {
+ DPRINTF(sc, ATH_DBG_CONFIG, "%s: BSS Changed HT %d\n",
+ __func__,
+ bss_conf->assoc_ht);
+ ath9k_ht_conf(sc, bss_conf);
+ }
+
+ if (changed & BSS_CHANGED_ASSOC) {
+ DPRINTF(sc, ATH_DBG_CONFIG, "%s: BSS Changed ASSOC %d\n",
+ __func__,
+ bss_conf->assoc);
+ ath9k_bss_assoc_info(sc, bss_conf);
+ }
+}
+
+static u64 ath9k_get_tsf(struct ieee80211_hw *hw)
+{
+ u64 tsf;
+ struct ath_softc *sc = hw->priv;
+ struct ath_hal *ah = sc->sc_ah;
+
+ tsf = ath9k_hw_gettsf64(ah);
+
+ return tsf;
+}
+
+static void ath9k_reset_tsf(struct ieee80211_hw *hw)
+{
+ struct ath_softc *sc = hw->priv;
+ struct ath_hal *ah = sc->sc_ah;
+
+ ath9k_hw_reset_tsf(ah);
+}
+
+static int ath9k_ampdu_action(struct ieee80211_hw *hw,
+ enum ieee80211_ampdu_mlme_action action,
+ const u8 *addr,
+ u16 tid,
+ u16 *ssn)
+{
+ struct ath_softc *sc = hw->priv;
+ int ret = 0;
+
+ switch (action) {
+ case IEEE80211_AMPDU_RX_START:
+ ret = ath_rx_aggr_start(sc, addr, tid, ssn);
+ if (ret < 0)
+ DPRINTF(sc, ATH_DBG_FATAL,
+ "%s: Unable to start RX aggregation\n",
+ __func__);
+ break;
+ case IEEE80211_AMPDU_RX_STOP:
+ ret = ath_rx_aggr_stop(sc, addr, tid);
+ if (ret < 0)
+ DPRINTF(sc, ATH_DBG_FATAL,
+ "%s: Unable to stop RX aggregation\n",
+ __func__);
+ break;
+ case IEEE80211_AMPDU_TX_START:
+ ret = ath_tx_aggr_start(sc, addr, tid, ssn);
+ if (ret < 0)
+ DPRINTF(sc, ATH_DBG_FATAL,
+ "%s: Unable to start TX aggregation\n",
+ __func__);
+ else
+ ieee80211_start_tx_ba_cb_irqsafe(hw, (u8 *)addr, tid);
+ break;
+ case IEEE80211_AMPDU_TX_STOP:
+ ret = ath_tx_aggr_stop(sc, addr, tid);
+ if (ret < 0)
+ DPRINTF(sc, ATH_DBG_FATAL,
+ "%s: Unable to stop TX aggregation\n",
+ __func__);
+
+ ieee80211_stop_tx_ba_cb_irqsafe(hw, (u8 *)addr, tid);
+ break;
+ default:
+ DPRINTF(sc, ATH_DBG_FATAL,
+ "%s: Unknown AMPDU action\n", __func__);
+ }
+
+ return ret;
+}
+
+static struct ieee80211_ops ath9k_ops = {
+ .tx = ath9k_tx,
+ .start = ath9k_start,
+ .stop = ath9k_stop,
+ .add_interface = ath9k_add_interface,
+ .remove_interface = ath9k_remove_interface,
+ .config = ath9k_config,
+ .config_interface = ath9k_config_interface,
+ .configure_filter = ath9k_configure_filter,
+ .get_stats = NULL,
+ .sta_notify = ath9k_sta_notify,
+ .conf_tx = ath9k_conf_tx,
+ .get_tx_stats = NULL,
+ .bss_info_changed = ath9k_bss_info_changed,
+ .set_tim = NULL,
+ .set_key = ath9k_set_key,
+ .hw_scan = NULL,
+ .get_tkip_seq = NULL,
+ .set_rts_threshold = NULL,
+ .set_frag_threshold = NULL,
+ .set_retry_limit = NULL,
+ .get_tsf = ath9k_get_tsf,
+ .reset_tsf = ath9k_reset_tsf,
+ .tx_last_beacon = NULL,
+ .ampdu_action = ath9k_ampdu_action
+};
+
+void ath_get_beaconconfig(struct ath_softc *sc,
+ int if_id,
+ struct ath_beacon_config *conf)
+{
+ struct ieee80211_hw *hw = sc->hw;
+
+ /* fill in beacon config data */
+
+ conf->beacon_interval = hw->conf.beacon_int;
+ conf->listen_interval = 100;
+ conf->dtim_count = 1;
+ conf->bmiss_timeout = ATH_DEFAULT_BMISS_LIMIT * conf->listen_interval;
+}
+
+int ath_update_beacon(struct ath_softc *sc,
+ int if_id,
+ struct ath_beacon_offset *bo,
+ struct sk_buff *skb,
+ int mcast)
+{
+ return 0;
+}
+
+void ath_tx_complete(struct ath_softc *sc, struct sk_buff *skb,
+ struct ath_xmit_status *tx_status, struct ath_node *an)
+{
+ struct ieee80211_hw *hw = sc->hw;
+ struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
+
+ DPRINTF(sc, ATH_DBG_XMIT,
+ "%s: TX complete: skb: %p\n", __func__, skb);
+
+ if (tx_info->flags & IEEE80211_TX_CTL_NO_ACK ||
+ tx_info->flags & IEEE80211_TX_STAT_TX_FILTERED) {
+ /* free driver's private data area of tx_info */
+ if (tx_info->driver_data[0] != NULL)
+ kfree(tx_info->driver_data[0]);
+ tx_info->driver_data[0] = NULL;
+ }
+
+ if (tx_status->flags & ATH_TX_BAR) {
+ tx_info->flags |= IEEE80211_TX_STAT_AMPDU_NO_BACK;
+ tx_status->flags &= ~ATH_TX_BAR;
+ }
+ if (tx_status->flags)
+ tx_info->status.excessive_retries = 1;
+
+ tx_info->status.retry_count = tx_status->retries;
+
+ ieee80211_tx_status(hw, skb);
+ if (an)
+ ath_node_put(sc, an, ATH9K_BH_STATUS_CHANGE);
+}
+
+int ath__rx_indicate(struct ath_softc *sc,
+ struct sk_buff *skb,
+ struct ath_recv_status *status,
+ u16 keyix)
+{
+ struct ieee80211_hw *hw = sc->hw;
+ struct ath_node *an = NULL;
+ struct ieee80211_rx_status rx_status;
+ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
+ int hdrlen = ieee80211_get_hdrlen_from_skb(skb);
+ int padsize;
+ enum ATH_RX_TYPE st;
+
+ /* see if any padding is done by the hw and remove it */
+ if (hdrlen & 3) {
+ padsize = hdrlen % 4;
+ memmove(skb->data + padsize, skb->data, hdrlen);
+ skb_pull(skb, padsize);
+ }
+
+ /* remove FCS before passing up to protocol stack */
+ skb_trim(skb, (skb->len - FCS_LEN));
+
+ /* Prepare rx status */
+ ath9k_rx_prepare(sc, skb, status, &rx_status);
+
+ if (!(keyix == ATH9K_RXKEYIX_INVALID) &&
+ !(status->flags & ATH_RX_DECRYPT_ERROR)) {
+ rx_status.flag |= RX_FLAG_DECRYPTED;
+ } else if ((le16_to_cpu(hdr->frame_control) & IEEE80211_FCTL_PROTECTED)
+ && !(status->flags & ATH_RX_DECRYPT_ERROR)
+ && skb->len >= hdrlen + 4) {
+ keyix = skb->data[hdrlen + 3] >> 6;
+
+ if (test_bit(keyix, sc->sc_keymap))
+ rx_status.flag |= RX_FLAG_DECRYPTED;
+ }
+
+ spin_lock_bh(&sc->node_lock);
+ an = ath_node_find(sc, hdr->addr2);
+ spin_unlock_bh(&sc->node_lock);
+
+ if (an) {
+ ath_rx_input(sc, an,
+ hw->conf.ht_conf.ht_supported,
+ skb, status, &st);
+ }
+ if (!an || (st != ATH_RX_CONSUMED))
+ __ieee80211_rx(hw, skb, &rx_status);
+
+ return 0;
+}
+
+int ath_rx_subframe(struct ath_node *an,
+ struct sk_buff *skb,
+ struct ath_recv_status *status)
+{
+ struct ath_softc *sc = an->an_sc;
+ struct ieee80211_hw *hw = sc->hw;
+ struct ieee80211_rx_status rx_status;
+
+ /* Prepare rx status */
+ ath9k_rx_prepare(sc, skb, status, &rx_status);
+ if (!(status->flags & ATH_RX_DECRYPT_ERROR))
+ rx_status.flag |= RX_FLAG_DECRYPTED;
+
+ __ieee80211_rx(hw, skb, &rx_status);
+
+ return 0;
+}
+
+enum ath9k_ht_macmode ath_cwm_macmode(struct ath_softc *sc)
+{
+ return sc->sc_ht_info.tx_chan_width;
+}
+
+static int ath_detach(struct ath_softc *sc)
+{
+ struct ieee80211_hw *hw = sc->hw;
+
+ DPRINTF(sc, ATH_DBG_CONFIG, "%s: Detach ATH hw\n", __func__);
+
+ /* Unregister hw */
+
+ ieee80211_unregister_hw(hw);
+
+ /* unregister Rate control */
+ ath_rate_control_unregister();
+
+ /* tx/rx cleanup */
+
+ ath_rx_cleanup(sc);
+ ath_tx_cleanup(sc);
+
+ /* Deinit */
+
+ ath_deinit(sc);
+
+ return 0;
+}
+
+static int ath_attach(u16 devid,
+ struct ath_softc *sc)
+{
+ struct ieee80211_hw *hw = sc->hw;
+ int error = 0;
+
+ DPRINTF(sc, ATH_DBG_CONFIG, "%s: Attach ATH hw\n", __func__);
+
+ error = ath_init(devid, sc);
+ if (error != 0)
+ return error;
+
+ /* Init nodes */
+
+ INIT_LIST_HEAD(&sc->node_list);
+ spin_lock_init(&sc->node_lock);
+
+ /* get mac address from hardware and set in mac80211 */
+
+ SET_IEEE80211_PERM_ADDR(hw, sc->sc_myaddr);
+
+ /* setup channels and rates */
+
+ sc->sbands[IEEE80211_BAND_2GHZ].channels =
+ sc->channels[IEEE80211_BAND_2GHZ];
+ sc->sbands[IEEE80211_BAND_2GHZ].bitrates =
+ sc->rates[IEEE80211_BAND_2GHZ];
+ sc->sbands[IEEE80211_BAND_2GHZ].band = IEEE80211_BAND_2GHZ;
+
+ if (sc->sc_ah->ah_caps.hw_caps & ATH9K_HW_CAP_HT)
+ /* Setup HT capabilities for 2.4Ghz*/
+ setup_ht_cap(&sc->sbands[IEEE80211_BAND_2GHZ].ht_info);
+
+ hw->wiphy->bands[IEEE80211_BAND_2GHZ] =
+ &sc->sbands[IEEE80211_BAND_2GHZ];
+
+ if (test_bit(ATH9K_MODE_11A, sc->sc_ah->ah_caps.wireless_modes)) {
+ sc->sbands[IEEE80211_BAND_5GHZ].channels =
+ sc->channels[IEEE80211_BAND_5GHZ];
+ sc->sbands[IEEE80211_BAND_5GHZ].bitrates =
+ sc->rates[IEEE80211_BAND_5GHZ];
+ sc->sbands[IEEE80211_BAND_5GHZ].band =
+ IEEE80211_BAND_5GHZ;
+
+ if (sc->sc_ah->ah_caps.hw_caps & ATH9K_HW_CAP_HT)
+ /* Setup HT capabilities for 5Ghz*/
+ setup_ht_cap(&sc->sbands[IEEE80211_BAND_5GHZ].ht_info);
+
+ hw->wiphy->bands[IEEE80211_BAND_5GHZ] =
+ &sc->sbands[IEEE80211_BAND_5GHZ];
+ }
+
+ /* FIXME: Have to figure out proper hw init values later */
+
+ hw->queues = 4;
+ hw->ampdu_queues = 1;
+
+ /* Register rate control */
+ hw->rate_control_algorithm = "ath9k_rate_control";
+ error = ath_rate_control_register();
+ if (error != 0) {
+ DPRINTF(sc, ATH_DBG_FATAL,
+ "%s: Unable to register rate control "
+ "algorithm:%d\n", __func__, error);
+ ath_rate_control_unregister();
+ goto bad;
+ }
+
+ error = ieee80211_register_hw(hw);
+ if (error != 0) {
+ ath_rate_control_unregister();
+ goto bad;
+ }
+
+ /* initialize tx/rx engine */
+
+ error = ath_tx_init(sc, ATH_TXBUF);
+ if (error != 0)
+ goto bad1;
+
+ error = ath_rx_init(sc, ATH_RXBUF);
+ if (error != 0)
+ goto bad1;
+
+ return 0;
+bad1:
+ ath_detach(sc);
+bad:
+ return error;
+}
+
+static int ath_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+{
+ void __iomem *mem;
+ struct ath_softc *sc;
+ struct ieee80211_hw *hw;
+ const char *athname;
+ u8 csz;
+ u32 val;
+ int ret = 0;
+
+ if (pci_enable_device(pdev))
+ return -EIO;
+
+ /* XXX 32-bit addressing only */
+ if (pci_set_dma_mask(pdev, 0xffffffff)) {
+ printk(KERN_ERR "ath_pci: 32-bit DMA not available\n");
+ ret = -ENODEV;
+ goto bad;
+ }
+
+ /*
+ * Cache line size is used to size and align various
+ * structures used to communicate with the hardware.
+ */
+ pci_read_config_byte(pdev, PCI_CACHE_LINE_SIZE, &csz);
+ if (csz == 0) {
+ /*
+ * Linux 2.4.18 (at least) writes the cache line size
+ * register as a 16-bit wide register which is wrong.
+ * We must have this setup properly for rx buffer
+ * DMA to work so force a reasonable value here if it
+ * comes up zero.
+ */
+ csz = L1_CACHE_BYTES / sizeof(u32);
+ pci_write_config_byte(pdev, PCI_CACHE_LINE_SIZE, csz);
+ }
+ /*
+ * The default setting of latency timer yields poor results,
+ * set it to the value used by other systems. It may be worth
+ * tweaking this setting more.
+ */
+ pci_write_config_byte(pdev, PCI_LATENCY_TIMER, 0xa8);
+
+ pci_set_master(pdev);
+
+ /*
+ * Disable the RETRY_TIMEOUT register (0x41) to keep
+ * PCI Tx retries from interfering with C3 CPU state.
+ */
+ pci_read_config_dword(pdev, 0x40, &val);
+ if ((val & 0x0000ff00) != 0)
+ pci_write_config_dword(pdev, 0x40, val & 0xffff00ff);
+
+ ret = pci_request_region(pdev, 0, "ath9k");
+ if (ret) {
+ dev_err(&pdev->dev, "PCI memory region reserve error\n");
+ ret = -ENODEV;
+ goto bad;
+ }
+
+ mem = pci_iomap(pdev, 0, 0);
+ if (!mem) {
+ printk(KERN_ERR "PCI memory map error\n") ;
+ ret = -EIO;
+ goto bad1;
+ }
+
+ hw = ieee80211_alloc_hw(sizeof(struct ath_softc), &ath9k_ops);
+ if (hw == NULL) {
+ printk(KERN_ERR "ath_pci: no memory for ieee80211_hw\n");
+ goto bad2;
+ }
+
+ hw->flags = IEEE80211_HW_SIGNAL_DBM |
+ IEEE80211_HW_NOISE_DBM;
+
+ SET_IEEE80211_DEV(hw, &pdev->dev);
+ pci_set_drvdata(pdev, hw);
+
+ sc = hw->priv;
+ sc->hw = hw;
+ sc->pdev = pdev;
+ sc->mem = mem;
+
+ if (ath_attach(id->device, sc) != 0) {
+ ret = -ENODEV;
+ goto bad3;
+ }
+
+ /* setup interrupt service routine */
+
+ if (request_irq(pdev->irq, ath_isr, IRQF_SHARED, "ath", sc)) {
+ printk(KERN_ERR "%s: request_irq failed\n",
+ wiphy_name(hw->wiphy));
+ ret = -EIO;
+ goto bad4;
+ }
+
+ athname = ath9k_hw_probe(id->vendor, id->device);
+
+ printk(KERN_INFO "%s: %s: mem=0x%lx, irq=%d\n",
+ wiphy_name(hw->wiphy),
+ athname ? athname : "Atheros ???",
+ (unsigned long)mem, pdev->irq);
+
+ return 0;
+bad4:
+ ath_detach(sc);
+bad3:
+ ieee80211_free_hw(hw);
+bad2:
+ pci_iounmap(pdev, mem);
+bad1:
+ pci_release_region(pdev, 0);
+bad:
+ pci_disable_device(pdev);
+ return ret;
+}
+
+static void ath_pci_remove(struct pci_dev *pdev)
+{
+ struct ieee80211_hw *hw = pci_get_drvdata(pdev);
+ struct ath_softc *sc = hw->priv;
+
+ if (pdev->irq)
+ free_irq(pdev->irq, sc);
+ ath_detach(sc);
+ pci_iounmap(pdev, sc->mem);
+ pci_release_region(pdev, 0);
+ pci_disable_device(pdev);
+ ieee80211_free_hw(hw);
+}
+
+#ifdef CONFIG_PM
+
+static int ath_pci_suspend(struct pci_dev *pdev, pm_message_t state)
+{
+ pci_save_state(pdev);
+ pci_disable_device(pdev);
+ pci_set_power_state(pdev, 3);
+
+ return 0;
+}
+
+static int ath_pci_resume(struct pci_dev *pdev)
+{
+ u32 val;
+ int err;
+
+ err = pci_enable_device(pdev);
+ if (err)
+ return err;
+ pci_restore_state(pdev);
+ /*
+ * Suspend/Resume resets the PCI configuration space, so we have to
+ * re-disable the RETRY_TIMEOUT register (0x41) to keep
+ * PCI Tx retries from interfering with C3 CPU state
+ */
+ pci_read_config_dword(pdev, 0x40, &val);
+ if ((val & 0x0000ff00) != 0)
+ pci_write_config_dword(pdev, 0x40, val & 0xffff00ff);
+
+ return 0;
+}
+
+#endif /* CONFIG_PM */
+
+MODULE_DEVICE_TABLE(pci, ath_pci_id_table);
+
+static struct pci_driver ath_pci_driver = {
+ .name = "ath9k",
+ .id_table = ath_pci_id_table,
+ .probe = ath_pci_probe,
+ .remove = ath_pci_remove,
+#ifdef CONFIG_PM
+ .suspend = ath_pci_suspend,
+ .resume = ath_pci_resume,
+#endif /* CONFIG_PM */
+};
+
+static int __init init_ath_pci(void)
+{
+ printk(KERN_INFO "%s: %s\n", dev_info, ATH_PCI_VERSION);
+
+ if (pci_register_driver(&ath_pci_driver) < 0) {
+ printk(KERN_ERR
+ "ath_pci: No devices found, driver not installed.\n");
+ pci_unregister_driver(&ath_pci_driver);
+ return -ENODEV;
+ }
+
+ return 0;
+}
+module_init(init_ath_pci);
+
+static void __exit exit_ath_pci(void)
+{
+ pci_unregister_driver(&ath_pci_driver);
+ printk(KERN_INFO "%s: driver unloaded\n", dev_info);
+}
+module_exit(exit_ath_pci);
diff --git a/drivers/net/wireless/ath9k/phy.c b/drivers/net/wireless/ath9k/phy.c
new file mode 100644
index 00000000000..eb9121fdfd3
--- /dev/null
+++ b/drivers/net/wireless/ath9k/phy.c
@@ -0,0 +1,436 @@
+/*
+ * Copyright (c) 2008 Atheros Communications Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include "core.h"
+#include "hw.h"
+#include "reg.h"
+#include "phy.h"
+
+void
+ath9k_hw_write_regs(struct ath_hal *ah, u32 modesIndex, u32 freqIndex,
+ int regWrites)
+{
+ struct ath_hal_5416 *ahp = AH5416(ah);
+
+ REG_WRITE_ARRAY(&ahp->ah_iniBB_RfGain, freqIndex, regWrites);
+}
+
+bool
+ath9k_hw_set_channel(struct ath_hal *ah, struct ath9k_channel *chan)
+{
+ u32 channelSel = 0;
+ u32 bModeSynth = 0;
+ u32 aModeRefSel = 0;
+ u32 reg32 = 0;
+ u16 freq;
+ struct chan_centers centers;
+
+ ath9k_hw_get_channel_centers(ah, chan, &centers);
+ freq = centers.synth_center;
+
+ if (freq < 4800) {
+ u32 txctl;
+
+ if (((freq - 2192) % 5) == 0) {
+ channelSel = ((freq - 672) * 2 - 3040) / 10;
+ bModeSynth = 0;
+ } else if (((freq - 2224) % 5) == 0) {
+ channelSel = ((freq - 704) * 2 - 3040) / 10;
+ bModeSynth = 1;
+ } else {
+ DPRINTF(ah->ah_sc, ATH_DBG_CHANNEL,
+ "%s: invalid channel %u MHz\n", __func__,
+ freq);
+ return false;
+ }
+
+ channelSel = (channelSel << 2) & 0xff;
+ channelSel = ath9k_hw_reverse_bits(channelSel, 8);
+
+ txctl = REG_READ(ah, AR_PHY_CCK_TX_CTRL);
+ if (freq == 2484) {
+
+ REG_WRITE(ah, AR_PHY_CCK_TX_CTRL,
+ txctl | AR_PHY_CCK_TX_CTRL_JAPAN);
+ } else {
+ REG_WRITE(ah, AR_PHY_CCK_TX_CTRL,
+ txctl & ~AR_PHY_CCK_TX_CTRL_JAPAN);
+ }
+
+ } else if ((freq % 20) == 0 && freq >= 5120) {
+ channelSel =
+ ath9k_hw_reverse_bits(((freq - 4800) / 20 << 2), 8);
+ aModeRefSel = ath9k_hw_reverse_bits(1, 2);
+ } else if ((freq % 10) == 0) {
+ channelSel =
+ ath9k_hw_reverse_bits(((freq - 4800) / 10 << 1), 8);
+ if (AR_SREV_9100(ah) || AR_SREV_9160_10_OR_LATER(ah))
+ aModeRefSel = ath9k_hw_reverse_bits(2, 2);
+ else
+ aModeRefSel = ath9k_hw_reverse_bits(1, 2);
+ } else if ((freq % 5) == 0) {
+ channelSel = ath9k_hw_reverse_bits((freq - 4800) / 5, 8);
+ aModeRefSel = ath9k_hw_reverse_bits(1, 2);
+ } else {
+ DPRINTF(ah->ah_sc, ATH_DBG_CHANNEL,
+ "%s: invalid channel %u MHz\n", __func__, freq);
+ return false;
+ }
+
+ reg32 =
+ (channelSel << 8) | (aModeRefSel << 2) | (bModeSynth << 1) |
+ (1 << 5) | 0x1;
+
+ REG_WRITE(ah, AR_PHY(0x37), reg32);
+
+ ah->ah_curchan = chan;
+
+ AH5416(ah)->ah_curchanRadIndex = -1;
+
+ return true;
+}
+
+bool
+ath9k_hw_ar9280_set_channel(struct ath_hal *ah,
+ struct ath9k_channel *chan)
+{
+ u16 bMode, fracMode, aModeRefSel = 0;
+ u32 freq, ndiv, channelSel = 0, channelFrac = 0, reg32 = 0;
+ struct chan_centers centers;
+ u32 refDivA = 24;
+
+ ath9k_hw_get_channel_centers(ah, chan, &centers);
+ freq = centers.synth_center;
+
+ reg32 = REG_READ(ah, AR_PHY_SYNTH_CONTROL);
+ reg32 &= 0xc0000000;
+
+ if (freq < 4800) {
+ u32 txctl;
+
+ bMode = 1;
+ fracMode = 1;
+ aModeRefSel = 0;
+ channelSel = (freq * 0x10000) / 15;
+
+ txctl = REG_READ(ah, AR_PHY_CCK_TX_CTRL);
+ if (freq == 2484) {
+
+ REG_WRITE(ah, AR_PHY_CCK_TX_CTRL,
+ txctl | AR_PHY_CCK_TX_CTRL_JAPAN);
+ } else {
+ REG_WRITE(ah, AR_PHY_CCK_TX_CTRL,
+ txctl & ~AR_PHY_CCK_TX_CTRL_JAPAN);
+ }
+ } else {
+ bMode = 0;
+ fracMode = 0;
+
+ if ((freq % 20) == 0) {
+ aModeRefSel = 3;
+ } else if ((freq % 10) == 0) {
+ aModeRefSel = 2;
+ } else {
+ aModeRefSel = 0;
+
+ fracMode = 1;
+ refDivA = 1;
+ channelSel = (freq * 0x8000) / 15;
+
+ REG_RMW_FIELD(ah, AR_AN_SYNTH9,
+ AR_AN_SYNTH9_REFDIVA, refDivA);
+ }
+ if (!fracMode) {
+ ndiv = (freq * (refDivA >> aModeRefSel)) / 60;
+ channelSel = ndiv & 0x1ff;
+ channelFrac = (ndiv & 0xfffffe00) * 2;
+ channelSel = (channelSel << 17) | channelFrac;
+ }
+ }
+
+ reg32 = reg32 |
+ (bMode << 29) |
+ (fracMode << 28) | (aModeRefSel << 26) | (channelSel);
+
+ REG_WRITE(ah, AR_PHY_SYNTH_CONTROL, reg32);
+
+ ah->ah_curchan = chan;
+
+ AH5416(ah)->ah_curchanRadIndex = -1;
+
+ return true;
+}
+
+static void
+ath9k_phy_modify_rx_buffer(u32 *rfBuf, u32 reg32,
+ u32 numBits, u32 firstBit,
+ u32 column)
+{
+ u32 tmp32, mask, arrayEntry, lastBit;
+ int32_t bitPosition, bitsLeft;
+
+ tmp32 = ath9k_hw_reverse_bits(reg32, numBits);
+ arrayEntry = (firstBit - 1) / 8;
+ bitPosition = (firstBit - 1) % 8;
+ bitsLeft = numBits;
+ while (bitsLeft > 0) {
+ lastBit = (bitPosition + bitsLeft > 8) ?
+ 8 : bitPosition + bitsLeft;
+ mask = (((1 << lastBit) - 1) ^ ((1 << bitPosition) - 1)) <<
+ (column * 8);
+ rfBuf[arrayEntry] &= ~mask;
+ rfBuf[arrayEntry] |= ((tmp32 << bitPosition) <<
+ (column * 8)) & mask;
+ bitsLeft -= 8 - bitPosition;
+ tmp32 = tmp32 >> (8 - bitPosition);
+ bitPosition = 0;
+ arrayEntry++;
+ }
+}
+
+bool
+ath9k_hw_set_rf_regs(struct ath_hal *ah, struct ath9k_channel *chan,
+ u16 modesIndex)
+{
+ struct ath_hal_5416 *ahp = AH5416(ah);
+
+ u32 eepMinorRev;
+ u32 ob5GHz = 0, db5GHz = 0;
+ u32 ob2GHz = 0, db2GHz = 0;
+ int regWrites = 0;
+
+ if (AR_SREV_9280_10_OR_LATER(ah))
+ return true;
+
+ eepMinorRev = ath9k_hw_get_eeprom(ahp, EEP_MINOR_REV);
+
+ RF_BANK_SETUP(ahp->ah_analogBank0Data, &ahp->ah_iniBank0, 1);
+
+ RF_BANK_SETUP(ahp->ah_analogBank1Data, &ahp->ah_iniBank1, 1);
+
+ RF_BANK_SETUP(ahp->ah_analogBank2Data, &ahp->ah_iniBank2, 1);
+
+ RF_BANK_SETUP(ahp->ah_analogBank3Data, &ahp->ah_iniBank3,
+ modesIndex);
+ {
+ int i;
+ for (i = 0; i < ahp->ah_iniBank6TPC.ia_rows; i++) {
+ ahp->ah_analogBank6Data[i] =
+ INI_RA(&ahp->ah_iniBank6TPC, i, modesIndex);
+ }
+ }
+
+ if (eepMinorRev >= 2) {
+ if (IS_CHAN_2GHZ(chan)) {
+ ob2GHz = ath9k_hw_get_eeprom(ahp, EEP_OB_2);
+ db2GHz = ath9k_hw_get_eeprom(ahp, EEP_DB_2);
+ ath9k_phy_modify_rx_buffer(ahp->ah_analogBank6Data,
+ ob2GHz, 3, 197, 0);
+ ath9k_phy_modify_rx_buffer(ahp->ah_analogBank6Data,
+ db2GHz, 3, 194, 0);
+ } else {
+ ob5GHz = ath9k_hw_get_eeprom(ahp, EEP_OB_5);
+ db5GHz = ath9k_hw_get_eeprom(ahp, EEP_DB_5);
+ ath9k_phy_modify_rx_buffer(ahp->ah_analogBank6Data,
+ ob5GHz, 3, 203, 0);
+ ath9k_phy_modify_rx_buffer(ahp->ah_analogBank6Data,
+ db5GHz, 3, 200, 0);
+ }
+ }
+
+ RF_BANK_SETUP(ahp->ah_analogBank7Data, &ahp->ah_iniBank7, 1);
+
+ REG_WRITE_RF_ARRAY(&ahp->ah_iniBank0, ahp->ah_analogBank0Data,
+ regWrites);
+ REG_WRITE_RF_ARRAY(&ahp->ah_iniBank1, ahp->ah_analogBank1Data,
+ regWrites);
+ REG_WRITE_RF_ARRAY(&ahp->ah_iniBank2, ahp->ah_analogBank2Data,
+ regWrites);
+ REG_WRITE_RF_ARRAY(&ahp->ah_iniBank3, ahp->ah_analogBank3Data,
+ regWrites);
+ REG_WRITE_RF_ARRAY(&ahp->ah_iniBank6TPC, ahp->ah_analogBank6Data,
+ regWrites);
+ REG_WRITE_RF_ARRAY(&ahp->ah_iniBank7, ahp->ah_analogBank7Data,
+ regWrites);
+
+ return true;
+}
+
+void
+ath9k_hw_rfdetach(struct ath_hal *ah)
+{
+ struct ath_hal_5416 *ahp = AH5416(ah);
+
+ if (ahp->ah_analogBank0Data != NULL) {
+ kfree(ahp->ah_analogBank0Data);
+ ahp->ah_analogBank0Data = NULL;
+ }
+ if (ahp->ah_analogBank1Data != NULL) {
+ kfree(ahp->ah_analogBank1Data);
+ ahp->ah_analogBank1Data = NULL;
+ }
+ if (ahp->ah_analogBank2Data != NULL) {
+ kfree(ahp->ah_analogBank2Data);
+ ahp->ah_analogBank2Data = NULL;
+ }
+ if (ahp->ah_analogBank3Data != NULL) {
+ kfree(ahp->ah_analogBank3Data);
+ ahp->ah_analogBank3Data = NULL;
+ }
+ if (ahp->ah_analogBank6Data != NULL) {
+ kfree(ahp->ah_analogBank6Data);
+ ahp->ah_analogBank6Data = NULL;
+ }
+ if (ahp->ah_analogBank6TPCData != NULL) {
+ kfree(ahp->ah_analogBank6TPCData);
+ ahp->ah_analogBank6TPCData = NULL;
+ }
+ if (ahp->ah_analogBank7Data != NULL) {
+ kfree(ahp->ah_analogBank7Data);
+ ahp->ah_analogBank7Data = NULL;
+ }
+ if (ahp->ah_addac5416_21 != NULL) {
+ kfree(ahp->ah_addac5416_21);
+ ahp->ah_addac5416_21 = NULL;
+ }
+ if (ahp->ah_bank6Temp != NULL) {
+ kfree(ahp->ah_bank6Temp);
+ ahp->ah_bank6Temp = NULL;
+ }
+}
+
+bool ath9k_hw_init_rf(struct ath_hal *ah, int *status)
+{
+ struct ath_hal_5416 *ahp = AH5416(ah);
+
+ if (!AR_SREV_9280_10_OR_LATER(ah)) {
+
+ ahp->ah_analogBank0Data =
+ kzalloc((sizeof(u32) *
+ ahp->ah_iniBank0.ia_rows), GFP_KERNEL);
+ ahp->ah_analogBank1Data =
+ kzalloc((sizeof(u32) *
+ ahp->ah_iniBank1.ia_rows), GFP_KERNEL);
+ ahp->ah_analogBank2Data =
+ kzalloc((sizeof(u32) *
+ ahp->ah_iniBank2.ia_rows), GFP_KERNEL);
+ ahp->ah_analogBank3Data =
+ kzalloc((sizeof(u32) *
+ ahp->ah_iniBank3.ia_rows), GFP_KERNEL);
+ ahp->ah_analogBank6Data =
+ kzalloc((sizeof(u32) *
+ ahp->ah_iniBank6.ia_rows), GFP_KERNEL);
+ ahp->ah_analogBank6TPCData =
+ kzalloc((sizeof(u32) *
+ ahp->ah_iniBank6TPC.ia_rows), GFP_KERNEL);
+ ahp->ah_analogBank7Data =
+ kzalloc((sizeof(u32) *
+ ahp->ah_iniBank7.ia_rows), GFP_KERNEL);
+
+ if (ahp->ah_analogBank0Data == NULL
+ || ahp->ah_analogBank1Data == NULL
+ || ahp->ah_analogBank2Data == NULL
+ || ahp->ah_analogBank3Data == NULL
+ || ahp->ah_analogBank6Data == NULL
+ || ahp->ah_analogBank6TPCData == NULL
+ || ahp->ah_analogBank7Data == NULL) {
+ DPRINTF(ah->ah_sc, ATH_DBG_FATAL,
+ "%s: cannot allocate RF banks\n",
+ __func__);
+ *status = -ENOMEM;
+ return false;
+ }
+
+ ahp->ah_addac5416_21 =
+ kzalloc((sizeof(u32) *
+ ahp->ah_iniAddac.ia_rows *
+ ahp->ah_iniAddac.ia_columns), GFP_KERNEL);
+ if (ahp->ah_addac5416_21 == NULL) {
+ DPRINTF(ah->ah_sc, ATH_DBG_FATAL,
+ "%s: cannot allocate ah_addac5416_21\n",
+ __func__);
+ *status = -ENOMEM;
+ return false;
+ }
+
+ ahp->ah_bank6Temp =
+ kzalloc((sizeof(u32) *
+ ahp->ah_iniBank6.ia_rows), GFP_KERNEL);
+ if (ahp->ah_bank6Temp == NULL) {
+ DPRINTF(ah->ah_sc, ATH_DBG_FATAL,
+ "%s: cannot allocate ah_bank6Temp\n",
+ __func__);
+ *status = -ENOMEM;
+ return false;
+ }
+ }
+
+ return true;
+}
+
+void
+ath9k_hw_decrease_chain_power(struct ath_hal *ah, struct ath9k_channel *chan)
+{
+ int i, regWrites = 0;
+ struct ath_hal_5416 *ahp = AH5416(ah);
+ u32 bank6SelMask;
+ u32 *bank6Temp = ahp->ah_bank6Temp;
+
+ switch (ahp->ah_diversityControl) {
+ case ATH9K_ANT_FIXED_A:
+ bank6SelMask =
+ (ahp->
+ ah_antennaSwitchSwap & ANTSWAP_AB) ? REDUCE_CHAIN_0 :
+ REDUCE_CHAIN_1;
+ break;
+ case ATH9K_ANT_FIXED_B:
+ bank6SelMask =
+ (ahp->
+ ah_antennaSwitchSwap & ANTSWAP_AB) ? REDUCE_CHAIN_1 :
+ REDUCE_CHAIN_0;
+ break;
+ case ATH9K_ANT_VARIABLE:
+ return;
+ break;
+ default:
+ return;
+ break;
+ }
+
+ for (i = 0; i < ahp->ah_iniBank6.ia_rows; i++)
+ bank6Temp[i] = ahp->ah_analogBank6Data[i];
+
+ REG_WRITE(ah, AR_PHY_BASE + 0xD8, bank6SelMask);
+
+ ath9k_phy_modify_rx_buffer(bank6Temp, 1, 1, 189, 0);
+ ath9k_phy_modify_rx_buffer(bank6Temp, 1, 1, 190, 0);
+ ath9k_phy_modify_rx_buffer(bank6Temp, 1, 1, 191, 0);
+ ath9k_phy_modify_rx_buffer(bank6Temp, 1, 1, 192, 0);
+ ath9k_phy_modify_rx_buffer(bank6Temp, 1, 1, 193, 0);
+ ath9k_phy_modify_rx_buffer(bank6Temp, 1, 1, 222, 0);
+ ath9k_phy_modify_rx_buffer(bank6Temp, 1, 1, 245, 0);
+ ath9k_phy_modify_rx_buffer(bank6Temp, 1, 1, 246, 0);
+ ath9k_phy_modify_rx_buffer(bank6Temp, 1, 1, 247, 0);
+
+ REG_WRITE_RF_ARRAY(&ahp->ah_iniBank6, bank6Temp, regWrites);
+
+ REG_WRITE(ah, AR_PHY_BASE + 0xD8, 0x00000053);
+#ifdef ALTER_SWITCH
+ REG_WRITE(ah, PHY_SWITCH_CHAIN_0,
+ (REG_READ(ah, PHY_SWITCH_CHAIN_0) & ~0x38)
+ | ((REG_READ(ah, PHY_SWITCH_CHAIN_0) >> 3) & 0x38));
+#endif
+}
diff --git a/drivers/net/wireless/ath9k/phy.h b/drivers/net/wireless/ath9k/phy.h
new file mode 100644
index 00000000000..0cd399a5344
--- /dev/null
+++ b/drivers/net/wireless/ath9k/phy.h
@@ -0,0 +1,543 @@
+/*
+ * Copyright (c) 2008 Atheros Communications Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef PHY_H
+#define PHY_H
+
+bool ath9k_hw_ar9280_set_channel(struct ath_hal *ah,
+ struct ath9k_channel
+ *chan);
+bool ath9k_hw_set_channel(struct ath_hal *ah,
+ struct ath9k_channel *chan);
+void ath9k_hw_write_regs(struct ath_hal *ah, u32 modesIndex,
+ u32 freqIndex, int regWrites);
+bool ath9k_hw_set_rf_regs(struct ath_hal *ah,
+ struct ath9k_channel *chan,
+ u16 modesIndex);
+void ath9k_hw_decrease_chain_power(struct ath_hal *ah,
+ struct ath9k_channel *chan);
+bool ath9k_hw_init_rf(struct ath_hal *ah,
+ int *status);
+
+#define AR_PHY_BASE 0x9800
+#define AR_PHY(_n) (AR_PHY_BASE + ((_n)<<2))
+
+#define AR_PHY_TEST 0x9800
+#define PHY_AGC_CLR 0x10000000
+#define RFSILENT_BB 0x00002000
+
+#define AR_PHY_TURBO 0x9804
+#define AR_PHY_FC_TURBO_MODE 0x00000001
+#define AR_PHY_FC_TURBO_SHORT 0x00000002
+#define AR_PHY_FC_DYN2040_EN 0x00000004
+#define AR_PHY_FC_DYN2040_PRI_ONLY 0x00000008
+#define AR_PHY_FC_DYN2040_PRI_CH 0x00000010
+#define AR_PHY_FC_DYN2040_EXT_CH 0x00000020
+#define AR_PHY_FC_HT_EN 0x00000040
+#define AR_PHY_FC_SHORT_GI_40 0x00000080
+#define AR_PHY_FC_WALSH 0x00000100
+#define AR_PHY_FC_SINGLE_HT_LTF1 0x00000200
+
+#define AR_PHY_TIMING2 0x9810
+#define AR_PHY_TIMING3 0x9814
+#define AR_PHY_TIMING3_DSC_MAN 0xFFFE0000
+#define AR_PHY_TIMING3_DSC_MAN_S 17
+#define AR_PHY_TIMING3_DSC_EXP 0x0001E000
+#define AR_PHY_TIMING3_DSC_EXP_S 13
+
+#define AR_PHY_CHIP_ID 0x9818
+#define AR_PHY_CHIP_ID_REV_0 0x80
+#define AR_PHY_CHIP_ID_REV_1 0x81
+#define AR_PHY_CHIP_ID_9160_REV_0 0xb0
+
+#define AR_PHY_ACTIVE 0x981C
+#define AR_PHY_ACTIVE_EN 0x00000001
+#define AR_PHY_ACTIVE_DIS 0x00000000
+
+#define AR_PHY_RF_CTL2 0x9824
+#define AR_PHY_TX_END_DATA_START 0x000000FF
+#define AR_PHY_TX_END_DATA_START_S 0
+#define AR_PHY_TX_END_PA_ON 0x0000FF00
+#define AR_PHY_TX_END_PA_ON_S 8
+
+#define AR_PHY_RF_CTL3 0x9828
+#define AR_PHY_TX_END_TO_A2_RX_ON 0x00FF0000
+#define AR_PHY_TX_END_TO_A2_RX_ON_S 16
+
+#define AR_PHY_ADC_CTL 0x982C
+#define AR_PHY_ADC_CTL_OFF_INBUFGAIN 0x00000003
+#define AR_PHY_ADC_CTL_OFF_INBUFGAIN_S 0
+#define AR_PHY_ADC_CTL_OFF_PWDDAC 0x00002000
+#define AR_PHY_ADC_CTL_OFF_PWDBANDGAP 0x00004000
+#define AR_PHY_ADC_CTL_OFF_PWDADC 0x00008000
+#define AR_PHY_ADC_CTL_ON_INBUFGAIN 0x00030000
+#define AR_PHY_ADC_CTL_ON_INBUFGAIN_S 16
+
+#define AR_PHY_ADC_SERIAL_CTL 0x9830
+#define AR_PHY_SEL_INTERNAL_ADDAC 0x00000000
+#define AR_PHY_SEL_EXTERNAL_RADIO 0x00000001
+
+#define AR_PHY_RF_CTL4 0x9834
+#define AR_PHY_RF_CTL4_TX_END_XPAB_OFF 0xFF000000
+#define AR_PHY_RF_CTL4_TX_END_XPAB_OFF_S 24
+#define AR_PHY_RF_CTL4_TX_END_XPAA_OFF 0x00FF0000
+#define AR_PHY_RF_CTL4_TX_END_XPAA_OFF_S 16
+#define AR_PHY_RF_CTL4_FRAME_XPAB_ON 0x0000FF00
+#define AR_PHY_RF_CTL4_FRAME_XPAB_ON_S 8
+#define AR_PHY_RF_CTL4_FRAME_XPAA_ON 0x000000FF
+#define AR_PHY_RF_CTL4_FRAME_XPAA_ON_S 0
+
+#define AR_PHY_SETTLING 0x9844
+#define AR_PHY_SETTLING_SWITCH 0x00003F80
+#define AR_PHY_SETTLING_SWITCH_S 7
+
+#define AR_PHY_RXGAIN 0x9848
+#define AR_PHY_RXGAIN_TXRX_ATTEN 0x0003F000
+#define AR_PHY_RXGAIN_TXRX_ATTEN_S 12
+#define AR_PHY_RXGAIN_TXRX_RF_MAX 0x007C0000
+#define AR_PHY_RXGAIN_TXRX_RF_MAX_S 18
+#define AR9280_PHY_RXGAIN_TXRX_ATTEN 0x00003F80
+#define AR9280_PHY_RXGAIN_TXRX_ATTEN_S 7
+#define AR9280_PHY_RXGAIN_TXRX_MARGIN 0x001FC000
+#define AR9280_PHY_RXGAIN_TXRX_MARGIN_S 14
+
+#define AR_PHY_DESIRED_SZ 0x9850
+#define AR_PHY_DESIRED_SZ_ADC 0x000000FF
+#define AR_PHY_DESIRED_SZ_ADC_S 0
+#define AR_PHY_DESIRED_SZ_PGA 0x0000FF00
+#define AR_PHY_DESIRED_SZ_PGA_S 8
+#define AR_PHY_DESIRED_SZ_TOT_DES 0x0FF00000
+#define AR_PHY_DESIRED_SZ_TOT_DES_S 20
+
+#define AR_PHY_FIND_SIG 0x9858
+#define AR_PHY_FIND_SIG_FIRSTEP 0x0003F000
+#define AR_PHY_FIND_SIG_FIRSTEP_S 12
+#define AR_PHY_FIND_SIG_FIRPWR 0x03FC0000
+#define AR_PHY_FIND_SIG_FIRPWR_S 18
+
+#define AR_PHY_AGC_CTL1 0x985C
+#define AR_PHY_AGC_CTL1_COARSE_LOW 0x00007F80
+#define AR_PHY_AGC_CTL1_COARSE_LOW_S 7
+#define AR_PHY_AGC_CTL1_COARSE_HIGH 0x003F8000
+#define AR_PHY_AGC_CTL1_COARSE_HIGH_S 15
+
+#define AR_PHY_AGC_CONTROL 0x9860
+#define AR_PHY_AGC_CONTROL_CAL 0x00000001
+#define AR_PHY_AGC_CONTROL_NF 0x00000002
+#define AR_PHY_AGC_CONTROL_ENABLE_NF 0x00008000
+#define AR_PHY_AGC_CONTROL_FLTR_CAL 0x00010000
+#define AR_PHY_AGC_CONTROL_NO_UPDATE_NF 0x00020000
+
+#define AR_PHY_CCA 0x9864
+#define AR_PHY_MINCCA_PWR 0x0FF80000
+#define AR_PHY_MINCCA_PWR_S 19
+#define AR_PHY_CCA_THRESH62 0x0007F000
+#define AR_PHY_CCA_THRESH62_S 12
+#define AR9280_PHY_MINCCA_PWR 0x1FF00000
+#define AR9280_PHY_MINCCA_PWR_S 20
+#define AR9280_PHY_CCA_THRESH62 0x000FF000
+#define AR9280_PHY_CCA_THRESH62_S 12
+
+#define AR_PHY_SFCORR_LOW 0x986C
+#define AR_PHY_SFCORR_LOW_USE_SELF_CORR_LOW 0x00000001
+#define AR_PHY_SFCORR_LOW_M2COUNT_THR_LOW 0x00003F00
+#define AR_PHY_SFCORR_LOW_M2COUNT_THR_LOW_S 8
+#define AR_PHY_SFCORR_LOW_M1_THRESH_LOW 0x001FC000
+#define AR_PHY_SFCORR_LOW_M1_THRESH_LOW_S 14
+#define AR_PHY_SFCORR_LOW_M2_THRESH_LOW 0x0FE00000
+#define AR_PHY_SFCORR_LOW_M2_THRESH_LOW_S 21
+
+#define AR_PHY_SFCORR 0x9868
+#define AR_PHY_SFCORR_M2COUNT_THR 0x0000001F
+#define AR_PHY_SFCORR_M2COUNT_THR_S 0
+#define AR_PHY_SFCORR_M1_THRESH 0x00FE0000
+#define AR_PHY_SFCORR_M1_THRESH_S 17
+#define AR_PHY_SFCORR_M2_THRESH 0x7F000000
+#define AR_PHY_SFCORR_M2_THRESH_S 24
+
+#define AR_PHY_SLEEP_CTR_CONTROL 0x9870
+#define AR_PHY_SLEEP_CTR_LIMIT 0x9874
+#define AR_PHY_SYNTH_CONTROL 0x9874
+#define AR_PHY_SLEEP_SCAL 0x9878
+
+#define AR_PHY_PLL_CTL 0x987c
+#define AR_PHY_PLL_CTL_40 0xaa
+#define AR_PHY_PLL_CTL_40_5413 0x04
+#define AR_PHY_PLL_CTL_44 0xab
+#define AR_PHY_PLL_CTL_44_2133 0xeb
+#define AR_PHY_PLL_CTL_40_2133 0xea
+
+#define AR_PHY_RX_DELAY 0x9914
+#define AR_PHY_SEARCH_START_DELAY 0x9918
+#define AR_PHY_RX_DELAY_DELAY 0x00003FFF
+
+#define AR_PHY_TIMING_CTRL4(_i) (0x9920 + ((_i) << 12))
+#define AR_PHY_TIMING_CTRL4_IQCORR_Q_Q_COFF 0x01F
+#define AR_PHY_TIMING_CTRL4_IQCORR_Q_Q_COFF_S 0
+#define AR_PHY_TIMING_CTRL4_IQCORR_Q_I_COFF 0x7E0
+#define AR_PHY_TIMING_CTRL4_IQCORR_Q_I_COFF_S 5
+#define AR_PHY_TIMING_CTRL4_IQCORR_ENABLE 0x800
+#define AR_PHY_TIMING_CTRL4_IQCAL_LOG_COUNT_MAX 0xF000
+#define AR_PHY_TIMING_CTRL4_IQCAL_LOG_COUNT_MAX_S 12
+#define AR_PHY_TIMING_CTRL4_DO_CAL 0x10000
+
+#define AR_PHY_TIMING_CTRL4_ENABLE_SPUR_RSSI 0x80000000
+#define AR_PHY_TIMING_CTRL4_ENABLE_SPUR_FILTER 0x40000000
+#define AR_PHY_TIMING_CTRL4_ENABLE_CHAN_MASK 0x20000000
+#define AR_PHY_TIMING_CTRL4_ENABLE_PILOT_MASK 0x10000000
+
+#define AR_PHY_TIMING5 0x9924
+#define AR_PHY_TIMING5_CYCPWR_THR1 0x000000FE
+#define AR_PHY_TIMING5_CYCPWR_THR1_S 1
+
+#define AR_PHY_POWER_TX_RATE1 0x9934
+#define AR_PHY_POWER_TX_RATE2 0x9938
+#define AR_PHY_POWER_TX_RATE_MAX 0x993c
+#define AR_PHY_POWER_TX_RATE_MAX_TPC_ENABLE 0x00000040
+
+#define AR_PHY_FRAME_CTL 0x9944
+#define AR_PHY_FRAME_CTL_TX_CLIP 0x00000038
+#define AR_PHY_FRAME_CTL_TX_CLIP_S 3
+
+#define AR_PHY_TXPWRADJ 0x994C
+#define AR_PHY_TXPWRADJ_CCK_GAIN_DELTA 0x00000FC0
+#define AR_PHY_TXPWRADJ_CCK_GAIN_DELTA_S 6
+#define AR_PHY_TXPWRADJ_CCK_PCDAC_INDEX 0x00FC0000
+#define AR_PHY_TXPWRADJ_CCK_PCDAC_INDEX_S 18
+
+#define AR_PHY_RADAR_EXT 0x9940
+#define AR_PHY_RADAR_EXT_ENA 0x00004000
+
+#define AR_PHY_RADAR_0 0x9954
+#define AR_PHY_RADAR_0_ENA 0x00000001
+#define AR_PHY_RADAR_0_FFT_ENA 0x80000000
+#define AR_PHY_RADAR_0_INBAND 0x0000003e
+#define AR_PHY_RADAR_0_INBAND_S 1
+#define AR_PHY_RADAR_0_PRSSI 0x00000FC0
+#define AR_PHY_RADAR_0_PRSSI_S 6
+#define AR_PHY_RADAR_0_HEIGHT 0x0003F000
+#define AR_PHY_RADAR_0_HEIGHT_S 12
+#define AR_PHY_RADAR_0_RRSSI 0x00FC0000
+#define AR_PHY_RADAR_0_RRSSI_S 18
+#define AR_PHY_RADAR_0_FIRPWR 0x7F000000
+#define AR_PHY_RADAR_0_FIRPWR_S 24
+
+#define AR_PHY_RADAR_1 0x9958
+#define AR_PHY_RADAR_1_RELPWR_ENA 0x00800000
+#define AR_PHY_RADAR_1_USE_FIR128 0x00400000
+#define AR_PHY_RADAR_1_RELPWR_THRESH 0x003F0000
+#define AR_PHY_RADAR_1_RELPWR_THRESH_S 16
+#define AR_PHY_RADAR_1_BLOCK_CHECK 0x00008000
+#define AR_PHY_RADAR_1_MAX_RRSSI 0x00004000
+#define AR_PHY_RADAR_1_RELSTEP_CHECK 0x00002000
+#define AR_PHY_RADAR_1_RELSTEP_THRESH 0x00001F00
+#define AR_PHY_RADAR_1_RELSTEP_THRESH_S 8
+#define AR_PHY_RADAR_1_MAXLEN 0x000000FF
+#define AR_PHY_RADAR_1_MAXLEN_S 0
+
+#define AR_PHY_SWITCH_CHAIN_0 0x9960
+#define AR_PHY_SWITCH_COM 0x9964
+
+#define AR_PHY_SIGMA_DELTA 0x996C
+#define AR_PHY_SIGMA_DELTA_ADC_SEL 0x00000003
+#define AR_PHY_SIGMA_DELTA_ADC_SEL_S 0
+#define AR_PHY_SIGMA_DELTA_FILT2 0x000000F8
+#define AR_PHY_SIGMA_DELTA_FILT2_S 3
+#define AR_PHY_SIGMA_DELTA_FILT1 0x00001F00
+#define AR_PHY_SIGMA_DELTA_FILT1_S 8
+#define AR_PHY_SIGMA_DELTA_ADC_CLIP 0x01FFE000
+#define AR_PHY_SIGMA_DELTA_ADC_CLIP_S 13
+
+#define AR_PHY_RESTART 0x9970
+#define AR_PHY_RESTART_DIV_GC 0x001C0000
+#define AR_PHY_RESTART_DIV_GC_S 18
+
+#define AR_PHY_RFBUS_REQ 0x997C
+#define AR_PHY_RFBUS_REQ_EN 0x00000001
+
+#define AR_PHY_TIMING7 0x9980
+#define AR_PHY_TIMING8 0x9984
+#define AR_PHY_TIMING8_PILOT_MASK_2 0x000FFFFF
+#define AR_PHY_TIMING8_PILOT_MASK_2_S 0
+
+#define AR_PHY_BIN_MASK2_1 0x9988
+#define AR_PHY_BIN_MASK2_2 0x998c
+#define AR_PHY_BIN_MASK2_3 0x9990
+#define AR_PHY_BIN_MASK2_4 0x9994
+
+#define AR_PHY_BIN_MASK_1 0x9900
+#define AR_PHY_BIN_MASK_2 0x9904
+#define AR_PHY_BIN_MASK_3 0x9908
+
+#define AR_PHY_MASK_CTL 0x990c
+
+#define AR_PHY_BIN_MASK2_4_MASK_4 0x00003FFF
+#define AR_PHY_BIN_MASK2_4_MASK_4_S 0
+
+#define AR_PHY_TIMING9 0x9998
+#define AR_PHY_TIMING10 0x999c
+#define AR_PHY_TIMING10_PILOT_MASK_2 0x000FFFFF
+#define AR_PHY_TIMING10_PILOT_MASK_2_S 0
+
+#define AR_PHY_TIMING11 0x99a0
+#define AR_PHY_TIMING11_SPUR_DELTA_PHASE 0x000FFFFF
+#define AR_PHY_TIMING11_SPUR_DELTA_PHASE_S 0
+#define AR_PHY_TIMING11_SPUR_FREQ_SD 0x3FF00000
+#define AR_PHY_TIMING11_SPUR_FREQ_SD_S 20
+#define AR_PHY_TIMING11_USE_SPUR_IN_AGC 0x40000000
+#define AR_PHY_TIMING11_USE_SPUR_IN_SELFCOR 0x80000000
+
+#define AR_PHY_RX_CHAINMASK 0x99a4
+#define AR_PHY_NEW_ADC_DC_GAIN_CORR(_i) (0x99b4 + ((_i) << 12))
+#define AR_PHY_NEW_ADC_GAIN_CORR_ENABLE 0x40000000
+#define AR_PHY_NEW_ADC_DC_OFFSET_CORR_ENABLE 0x80000000
+#define AR_PHY_MULTICHAIN_GAIN_CTL 0x99ac
+
+#define AR_PHY_EXT_CCA0 0x99b8
+#define AR_PHY_EXT_CCA0_THRESH62 0x000000FF
+#define AR_PHY_EXT_CCA0_THRESH62_S 0
+
+#define AR_PHY_EXT_CCA 0x99bc
+#define AR_PHY_EXT_CCA_CYCPWR_THR1 0x0000FE00
+#define AR_PHY_EXT_CCA_CYCPWR_THR1_S 9
+#define AR_PHY_EXT_CCA_THRESH62 0x007F0000
+#define AR_PHY_EXT_CCA_THRESH62_S 16
+#define AR_PHY_EXT_MINCCA_PWR 0xFF800000
+#define AR_PHY_EXT_MINCCA_PWR_S 23
+#define AR9280_PHY_EXT_MINCCA_PWR 0x01FF0000
+#define AR9280_PHY_EXT_MINCCA_PWR_S 16
+
+#define AR_PHY_SFCORR_EXT 0x99c0
+#define AR_PHY_SFCORR_EXT_M1_THRESH 0x0000007F
+#define AR_PHY_SFCORR_EXT_M1_THRESH_S 0
+#define AR_PHY_SFCORR_EXT_M2_THRESH 0x00003F80
+#define AR_PHY_SFCORR_EXT_M2_THRESH_S 7
+#define AR_PHY_SFCORR_EXT_M1_THRESH_LOW 0x001FC000
+#define AR_PHY_SFCORR_EXT_M1_THRESH_LOW_S 14
+#define AR_PHY_SFCORR_EXT_M2_THRESH_LOW 0x0FE00000
+#define AR_PHY_SFCORR_EXT_M2_THRESH_LOW_S 21
+#define AR_PHY_SFCORR_SPUR_SUBCHNL_SD_S 28
+
+#define AR_PHY_HALFGI 0x99D0
+#define AR_PHY_HALFGI_DSC_MAN 0x0007FFF0
+#define AR_PHY_HALFGI_DSC_MAN_S 4
+#define AR_PHY_HALFGI_DSC_EXP 0x0000000F
+#define AR_PHY_HALFGI_DSC_EXP_S 0
+
+#define AR_PHY_CHAN_INFO_MEMORY 0x99DC
+#define AR_PHY_CHAN_INFO_MEMORY_CAPTURE_MASK 0x0001
+
+#define AR_PHY_HEAVY_CLIP_ENABLE 0x99E0
+
+#define AR_PHY_M_SLEEP 0x99f0
+#define AR_PHY_REFCLKDLY 0x99f4
+#define AR_PHY_REFCLKPD 0x99f8
+
+#define AR_PHY_CALMODE 0x99f0
+
+#define AR_PHY_CALMODE_IQ 0x00000000
+#define AR_PHY_CALMODE_ADC_GAIN 0x00000001
+#define AR_PHY_CALMODE_ADC_DC_PER 0x00000002
+#define AR_PHY_CALMODE_ADC_DC_INIT 0x00000003
+
+#define AR_PHY_CAL_MEAS_0(_i) (0x9c10 + ((_i) << 12))
+#define AR_PHY_CAL_MEAS_1(_i) (0x9c14 + ((_i) << 12))
+#define AR_PHY_CAL_MEAS_2(_i) (0x9c18 + ((_i) << 12))
+#define AR_PHY_CAL_MEAS_3(_i) (0x9c1c + ((_i) << 12))
+
+#define AR_PHY_CURRENT_RSSI 0x9c1c
+#define AR9280_PHY_CURRENT_RSSI 0x9c3c
+
+#define AR_PHY_RFBUS_GRANT 0x9C20
+#define AR_PHY_RFBUS_GRANT_EN 0x00000001
+
+#define AR_PHY_CHAN_INFO_GAIN_DIFF 0x9CF4
+#define AR_PHY_CHAN_INFO_GAIN_DIFF_UPPER_LIMIT 320
+
+#define AR_PHY_CHAN_INFO_GAIN 0x9CFC
+
+#define AR_PHY_MODE 0xA200
+#define AR_PHY_MODE_AR2133 0x08
+#define AR_PHY_MODE_AR5111 0x00
+#define AR_PHY_MODE_AR5112 0x08
+#define AR_PHY_MODE_DYNAMIC 0x04
+#define AR_PHY_MODE_RF2GHZ 0x02
+#define AR_PHY_MODE_RF5GHZ 0x00
+#define AR_PHY_MODE_CCK 0x01
+#define AR_PHY_MODE_OFDM 0x00
+#define AR_PHY_MODE_DYN_CCK_DISABLE 0x100
+
+#define AR_PHY_CCK_TX_CTRL 0xA204
+#define AR_PHY_CCK_TX_CTRL_JAPAN 0x00000010
+
+#define AR_PHY_CCK_DETECT 0xA208
+#define AR_PHY_CCK_DETECT_WEAK_SIG_THR_CCK 0x0000003F
+#define AR_PHY_CCK_DETECT_WEAK_SIG_THR_CCK_S 0
+/* [12:6] settling time for antenna switch */
+#define AR_PHY_CCK_DETECT_ANT_SWITCH_TIME 0x00001FC0
+#define AR_PHY_CCK_DETECT_ANT_SWITCH_TIME_S 6
+#define AR_PHY_CCK_DETECT_BB_ENABLE_ANT_FAST_DIV 0x2000
+
+#define AR_PHY_GAIN_2GHZ 0xA20C
+#define AR_PHY_GAIN_2GHZ_RXTX_MARGIN 0x00FC0000
+#define AR_PHY_GAIN_2GHZ_RXTX_MARGIN_S 18
+#define AR_PHY_GAIN_2GHZ_BSW_MARGIN 0x00003C00
+#define AR_PHY_GAIN_2GHZ_BSW_MARGIN_S 10
+#define AR_PHY_GAIN_2GHZ_BSW_ATTEN 0x0000001F
+#define AR_PHY_GAIN_2GHZ_BSW_ATTEN_S 0
+
+#define AR_PHY_GAIN_2GHZ_XATTEN2_MARGIN 0x003E0000
+#define AR_PHY_GAIN_2GHZ_XATTEN2_MARGIN_S 17
+#define AR_PHY_GAIN_2GHZ_XATTEN1_MARGIN 0x0001F000
+#define AR_PHY_GAIN_2GHZ_XATTEN1_MARGIN_S 12
+#define AR_PHY_GAIN_2GHZ_XATTEN2_DB 0x00000FC0
+#define AR_PHY_GAIN_2GHZ_XATTEN2_DB_S 6
+#define AR_PHY_GAIN_2GHZ_XATTEN1_DB 0x0000003F
+#define AR_PHY_GAIN_2GHZ_XATTEN1_DB_S 0
+
+#define AR_PHY_CCK_RXCTRL4 0xA21C
+#define AR_PHY_CCK_RXCTRL4_FREQ_EST_SHORT 0x01F80000
+#define AR_PHY_CCK_RXCTRL4_FREQ_EST_SHORT_S 19
+
+#define AR_PHY_DAG_CTRLCCK 0xA228
+#define AR_PHY_DAG_CTRLCCK_EN_RSSI_THR 0x00000200
+#define AR_PHY_DAG_CTRLCCK_RSSI_THR 0x0001FC00
+#define AR_PHY_DAG_CTRLCCK_RSSI_THR_S 10
+
+#define AR_PHY_FORCE_CLKEN_CCK 0xA22C
+#define AR_PHY_FORCE_CLKEN_CCK_MRC_MUX 0x00000040
+
+#define AR_PHY_POWER_TX_RATE3 0xA234
+#define AR_PHY_POWER_TX_RATE4 0xA238
+
+#define AR_PHY_SCRM_SEQ_XR 0xA23C
+#define AR_PHY_HEADER_DETECT_XR 0xA240
+#define AR_PHY_CHIRP_DETECTED_XR 0xA244
+#define AR_PHY_BLUETOOTH 0xA254
+
+#define AR_PHY_TPCRG1 0xA258
+#define AR_PHY_TPCRG1_NUM_PD_GAIN 0x0000c000
+#define AR_PHY_TPCRG1_NUM_PD_GAIN_S 14
+
+#define AR_PHY_TPCRG1_PD_GAIN_1 0x00030000
+#define AR_PHY_TPCRG1_PD_GAIN_1_S 16
+#define AR_PHY_TPCRG1_PD_GAIN_2 0x000C0000
+#define AR_PHY_TPCRG1_PD_GAIN_2_S 18
+#define AR_PHY_TPCRG1_PD_GAIN_3 0x00300000
+#define AR_PHY_TPCRG1_PD_GAIN_3_S 20
+
+#define AR_PHY_VIT_MASK2_M_46_61 0xa3a0
+#define AR_PHY_MASK2_M_31_45 0xa3a4
+#define AR_PHY_MASK2_M_16_30 0xa3a8
+#define AR_PHY_MASK2_M_00_15 0xa3ac
+#define AR_PHY_MASK2_P_15_01 0xa3b8
+#define AR_PHY_MASK2_P_30_16 0xa3bc
+#define AR_PHY_MASK2_P_45_31 0xa3c0
+#define AR_PHY_MASK2_P_61_45 0xa3c4
+#define AR_PHY_SPUR_REG 0x994c
+
+#define AR_PHY_SPUR_REG_MASK_RATE_CNTL (0xFF << 18)
+#define AR_PHY_SPUR_REG_MASK_RATE_CNTL_S 18
+
+#define AR_PHY_SPUR_REG_ENABLE_MASK_PPM 0x20000
+#define AR_PHY_SPUR_REG_MASK_RATE_SELECT (0xFF << 9)
+#define AR_PHY_SPUR_REG_MASK_RATE_SELECT_S 9
+#define AR_PHY_SPUR_REG_ENABLE_VIT_SPUR_RSSI 0x100
+#define AR_PHY_SPUR_REG_SPUR_RSSI_THRESH 0x7F
+#define AR_PHY_SPUR_REG_SPUR_RSSI_THRESH_S 0
+
+#define AR_PHY_PILOT_MASK_01_30 0xa3b0
+#define AR_PHY_PILOT_MASK_31_60 0xa3b4
+
+#define AR_PHY_CHANNEL_MASK_01_30 0x99d4
+#define AR_PHY_CHANNEL_MASK_31_60 0x99d8
+
+#define AR_PHY_ANALOG_SWAP 0xa268
+#define AR_PHY_SWAP_ALT_CHAIN 0x00000040
+
+#define AR_PHY_TPCRG5 0xA26C
+#define AR_PHY_TPCRG5_PD_GAIN_OVERLAP 0x0000000F
+#define AR_PHY_TPCRG5_PD_GAIN_OVERLAP_S 0
+#define AR_PHY_TPCRG5_PD_GAIN_BOUNDARY_1 0x000003F0
+#define AR_PHY_TPCRG5_PD_GAIN_BOUNDARY_1_S 4
+#define AR_PHY_TPCRG5_PD_GAIN_BOUNDARY_2 0x0000FC00
+#define AR_PHY_TPCRG5_PD_GAIN_BOUNDARY_2_S 10
+#define AR_PHY_TPCRG5_PD_GAIN_BOUNDARY_3 0x003F0000
+#define AR_PHY_TPCRG5_PD_GAIN_BOUNDARY_3_S 16
+#define AR_PHY_TPCRG5_PD_GAIN_BOUNDARY_4 0x0FC00000
+#define AR_PHY_TPCRG5_PD_GAIN_BOUNDARY_4_S 22
+
+#define AR_PHY_POWER_TX_RATE5 0xA38C
+#define AR_PHY_POWER_TX_RATE6 0xA390
+
+#define AR_PHY_CAL_CHAINMASK 0xA39C
+
+#define AR_PHY_POWER_TX_SUB 0xA3C8
+#define AR_PHY_POWER_TX_RATE7 0xA3CC
+#define AR_PHY_POWER_TX_RATE8 0xA3D0
+#define AR_PHY_POWER_TX_RATE9 0xA3D4
+
+#define AR_PHY_XPA_CFG 0xA3D8
+#define AR_PHY_FORCE_XPA_CFG 0x000000001
+#define AR_PHY_FORCE_XPA_CFG_S 0
+
+#define AR_PHY_CH1_CCA 0xa864
+#define AR_PHY_CH1_MINCCA_PWR 0x0FF80000
+#define AR_PHY_CH1_MINCCA_PWR_S 19
+#define AR9280_PHY_CH1_MINCCA_PWR 0x1FF00000
+#define AR9280_PHY_CH1_MINCCA_PWR_S 20
+
+#define AR_PHY_CH2_CCA 0xb864
+#define AR_PHY_CH2_MINCCA_PWR 0x0FF80000
+#define AR_PHY_CH2_MINCCA_PWR_S 19
+
+#define AR_PHY_CH1_EXT_CCA 0xa9bc
+#define AR_PHY_CH1_EXT_MINCCA_PWR 0xFF800000
+#define AR_PHY_CH1_EXT_MINCCA_PWR_S 23
+#define AR9280_PHY_CH1_EXT_MINCCA_PWR 0x01FF0000
+#define AR9280_PHY_CH1_EXT_MINCCA_PWR_S 16
+
+#define AR_PHY_CH2_EXT_CCA 0xb9bc
+#define AR_PHY_CH2_EXT_MINCCA_PWR 0xFF800000
+#define AR_PHY_CH2_EXT_MINCCA_PWR_S 23
+
+#define REG_WRITE_RF_ARRAY(iniarray, regData, regWr) do { \
+ int r; \
+ for (r = 0; r < ((iniarray)->ia_rows); r++) { \
+ REG_WRITE(ah, INI_RA((iniarray), r, 0), (regData)[r]); \
+ DPRINTF(ah->ah_sc, ATH_DBG_CHANNEL, \
+ "RF 0x%x V 0x%x\n", \
+ INI_RA((iniarray), r, 0), (regData)[r]); \
+ DO_DELAY(regWr); \
+ } \
+ } while (0)
+
+#define ATH9K_KEY_XOR 0xaa
+
+#define ATH9K_IS_MIC_ENABLED(ah) \
+ (AH5416(ah)->ah_staId1Defaults & AR_STA_ID1_CRPT_MIC_ENABLE)
+
+#define ANTSWAP_AB 0x0001
+#define REDUCE_CHAIN_0 0x00000050
+#define REDUCE_CHAIN_1 0x00000051
+
+#define RF_BANK_SETUP(_bank, _iniarray, _col) do { \
+ int i; \
+ for (i = 0; i < (_iniarray)->ia_rows; i++) \
+ (_bank)[i] = INI_RA((_iniarray), i, _col);; \
+ } while (0)
+
+#endif
diff --git a/drivers/net/wireless/ath9k/rc.c b/drivers/net/wireless/ath9k/rc.c
new file mode 100644
index 00000000000..73c460ad355
--- /dev/null
+++ b/drivers/net/wireless/ath9k/rc.c
@@ -0,0 +1,2126 @@
+/*
+ * Copyright (c) 2004 Video54 Technologies, Inc.
+ * Copyright (c) 2004-2008 Atheros Communications, Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+/*
+ * Atheros rate control algorithm
+ */
+
+#include "core.h"
+#include "../net/mac80211/rate.h"
+
+static u32 tx_triglevel_max;
+
+static struct ath_rate_table ar5416_11na_ratetable = {
+ 42,
+ {
+ { TRUE, TRUE, WLAN_PHY_OFDM, 6000, /* 6 Mb */
+ 5400, 0x0b, 0x00, 12,
+ 0, 2, 1, 0, 0, 0, 0, 0 },
+ { TRUE, TRUE, WLAN_PHY_OFDM, 9000, /* 9 Mb */
+ 7800, 0x0f, 0x00, 18,
+ 0, 3, 1, 1, 1, 1, 1, 0 },
+ { TRUE, TRUE, WLAN_PHY_OFDM, 12000, /* 12 Mb */
+ 10000, 0x0a, 0x00, 24,
+ 2, 4, 2, 2, 2, 2, 2, 0 },
+ { TRUE, TRUE, WLAN_PHY_OFDM, 18000, /* 18 Mb */
+ 13900, 0x0e, 0x00, 36,
+ 2, 6, 2, 3, 3, 3, 3, 0 },
+ { TRUE, TRUE, WLAN_PHY_OFDM, 24000, /* 24 Mb */
+ 17300, 0x09, 0x00, 48,
+ 4, 10, 3, 4, 4, 4, 4, 0 },
+ { TRUE, TRUE, WLAN_PHY_OFDM, 36000, /* 36 Mb */
+ 23000, 0x0d, 0x00, 72,
+ 4, 14, 3, 5, 5, 5, 5, 0 },
+ { TRUE, TRUE, WLAN_PHY_OFDM, 48000, /* 48 Mb */
+ 27400, 0x08, 0x00, 96,
+ 4, 20, 3, 6, 6, 6, 6, 0 },
+ { TRUE, TRUE, WLAN_PHY_OFDM, 54000, /* 54 Mb */
+ 29300, 0x0c, 0x00, 108,
+ 4, 23, 3, 7, 7, 7, 7, 0 },
+ { TRUE_20, TRUE_20, WLAN_PHY_HT_20_SS, 6500, /* 6.5 Mb */
+ 6400, 0x80, 0x00, 0,
+ 0, 2, 3, 8, 24, 8, 24, 3216 },
+ { TRUE_20, TRUE_20, WLAN_PHY_HT_20_SS, 13000, /* 13 Mb */
+ 12700, 0x81, 0x00, 1,
+ 2, 4, 3, 9, 25, 9, 25, 6434 },
+ { TRUE_20, TRUE_20, WLAN_PHY_HT_20_SS, 19500, /* 19.5 Mb */
+ 18800, 0x82, 0x00, 2,
+ 2, 6, 3, 10, 26, 10, 26, 9650 },
+ { TRUE_20, TRUE_20, WLAN_PHY_HT_20_SS, 26000, /* 26 Mb */
+ 25000, 0x83, 0x00, 3,
+ 4, 10, 3, 11, 27, 11, 27, 12868 },
+ { TRUE_20, TRUE_20, WLAN_PHY_HT_20_SS, 39000, /* 39 Mb */
+ 36700, 0x84, 0x00, 4,
+ 4, 14, 3, 12, 28, 12, 28, 19304 },
+ { FALSE, TRUE_20, WLAN_PHY_HT_20_SS, 52000, /* 52 Mb */
+ 48100, 0x85, 0x00, 5,
+ 4, 20, 3, 13, 29, 13, 29, 25740 },
+ { FALSE, TRUE_20, WLAN_PHY_HT_20_SS, 58500, /* 58.5 Mb */
+ 53500, 0x86, 0x00, 6,
+ 4, 23, 3, 14, 30, 14, 30, 28956 },
+ { FALSE, TRUE_20, WLAN_PHY_HT_20_SS, 65000, /* 65 Mb */
+ 59000, 0x87, 0x00, 7,
+ 4, 25, 3, 15, 31, 15, 32, 32180 },
+ { FALSE, FALSE, WLAN_PHY_HT_20_DS, 13000, /* 13 Mb */
+ 12700, 0x88, 0x00,
+ 8, 0, 2, 3, 16, 33, 16, 33, 6430 },
+ { FALSE, FALSE, WLAN_PHY_HT_20_DS, 26000, /* 26 Mb */
+ 24800, 0x89, 0x00, 9,
+ 2, 4, 3, 17, 34, 17, 34, 12860 },
+ { FALSE, FALSE, WLAN_PHY_HT_20_DS, 39000, /* 39 Mb */
+ 36600, 0x8a, 0x00, 10,
+ 2, 6, 3, 18, 35, 18, 35, 19300 },
+ { TRUE_20, FALSE, WLAN_PHY_HT_20_DS, 52000, /* 52 Mb */
+ 48100, 0x8b, 0x00, 11,
+ 4, 10, 3, 19, 36, 19, 36, 25736 },
+ { TRUE_20, FALSE, WLAN_PHY_HT_20_DS, 78000, /* 78 Mb */
+ 69500, 0x8c, 0x00, 12,
+ 4, 14, 3, 20, 37, 20, 37, 38600 },
+ { TRUE_20, FALSE, WLAN_PHY_HT_20_DS, 104000, /* 104 Mb */
+ 89500, 0x8d, 0x00, 13,
+ 4, 20, 3, 21, 38, 21, 38, 51472 },
+ { TRUE_20, FALSE, WLAN_PHY_HT_20_DS, 117000, /* 117 Mb */
+ 98900, 0x8e, 0x00, 14,
+ 4, 23, 3, 22, 39, 22, 39, 57890 },
+ { TRUE_20, FALSE, WLAN_PHY_HT_20_DS, 130000, /* 130 Mb */
+ 108300, 0x8f, 0x00, 15,
+ 4, 25, 3, 23, 40, 23, 41, 64320 },
+ { TRUE_40, TRUE_40, WLAN_PHY_HT_40_SS, 13500, /* 13.5 Mb */
+ 13200, 0x80, 0x00, 0,
+ 0, 2, 3, 8, 24, 24, 24, 6684 },
+ { TRUE_40, TRUE_40, WLAN_PHY_HT_40_SS, 27500, /* 27.0 Mb */
+ 25900, 0x81, 0x00, 1,
+ 2, 4, 3, 9, 25, 25, 25, 13368 },
+ { TRUE_40, TRUE_40, WLAN_PHY_HT_40_SS, 40500, /* 40.5 Mb */
+ 38600, 0x82, 0x00, 2,
+ 2, 6, 3, 10, 26, 26, 26, 20052 },
+ { TRUE_40, TRUE_40, WLAN_PHY_HT_40_SS, 54000, /* 54 Mb */
+ 49800, 0x83, 0x00, 3,
+ 4, 10, 3, 11, 27, 27, 27, 26738 },
+ { TRUE_40, TRUE_40, WLAN_PHY_HT_40_SS, 81500, /* 81 Mb */
+ 72200, 0x84, 0x00, 4,
+ 4, 14, 3, 12, 28, 28, 28, 40104 },
+ { FALSE, TRUE_40, WLAN_PHY_HT_40_SS, 108000, /* 108 Mb */
+ 92900, 0x85, 0x00, 5,
+ 4, 20, 3, 13, 29, 29, 29, 53476 },
+ { FALSE, TRUE_40, WLAN_PHY_HT_40_SS, 121500, /* 121.5 Mb */
+ 102700, 0x86, 0x00, 6,
+ 4, 23, 3, 14, 30, 30, 30, 60156 },
+ { FALSE, TRUE_40, WLAN_PHY_HT_40_SS, 135000, /* 135 Mb */
+ 112000, 0x87, 0x00, 7,
+ 4, 25, 3, 15, 31, 32, 32, 66840 },
+ { FALSE, TRUE_40, WLAN_PHY_HT_40_SS_HGI, 150000, /* 150 Mb */
+ 122000, 0x87, 0x00, 7,
+ 4, 25, 3, 15, 31, 32, 32, 74200 },
+ { FALSE, FALSE, WLAN_PHY_HT_40_DS, 27000, /* 27 Mb */
+ 25800, 0x88, 0x00, 8,
+ 0, 2, 3, 16, 33, 33, 33, 13360 },
+ { FALSE, FALSE, WLAN_PHY_HT_40_DS, 54000, /* 54 Mb */
+ 49800, 0x89, 0x00, 9,
+ 2, 4, 3, 17, 34, 34, 34, 26720 },
+ { FALSE, FALSE, WLAN_PHY_HT_40_DS, 81000, /* 81 Mb */
+ 71900, 0x8a, 0x00, 10,
+ 2, 6, 3, 18, 35, 35, 35, 40080 },
+ { TRUE_40, FALSE, WLAN_PHY_HT_40_DS, 108000, /* 108 Mb */
+ 92500, 0x8b, 0x00, 11,
+ 4, 10, 3, 19, 36, 36, 36, 53440 },
+ { TRUE_40, FALSE, WLAN_PHY_HT_40_DS, 162000, /* 162 Mb */
+ 130300, 0x8c, 0x00, 12,
+ 4, 14, 3, 20, 37, 37, 37, 80160 },
+ { TRUE_40, FALSE, WLAN_PHY_HT_40_DS, 216000, /* 216 Mb */
+ 162800, 0x8d, 0x00, 13,
+ 4, 20, 3, 21, 38, 38, 38, 106880 },
+ { TRUE_40, FALSE, WLAN_PHY_HT_40_DS, 243000, /* 243 Mb */
+ 178200, 0x8e, 0x00, 14,
+ 4, 23, 3, 22, 39, 39, 39, 120240 },
+ { TRUE_40, FALSE, WLAN_PHY_HT_40_DS, 270000, /* 270 Mb */
+ 192100, 0x8f, 0x00, 15,
+ 4, 25, 3, 23, 40, 41, 41, 133600 },
+ { TRUE_40, FALSE, WLAN_PHY_HT_40_DS_HGI, 300000, /* 300 Mb */
+ 207000, 0x8f, 0x00, 15,
+ 4, 25, 3, 23, 40, 41, 41, 148400 },
+ },
+ 50, /* probe interval */
+ 50, /* rssi reduce interval */
+ WLAN_RC_HT_FLAG, /* Phy rates allowed initially */
+};
+
+/* TRUE_ALL - valid for 20/40/Legacy,
+ * TRUE - Legacy only,
+ * TRUE_20 - HT 20 only,
+ * TRUE_40 - HT 40 only */
+
+/* 4ms frame limit not used for NG mode. The values filled
+ * for HT are the 64K max aggregate limit */
+
+static struct ath_rate_table ar5416_11ng_ratetable = {
+ 46,
+ {
+ { TRUE_ALL, TRUE_ALL, WLAN_PHY_CCK, 1000, /* 1 Mb */
+ 900, 0x1b, 0x00, 2,
+ 0, 0, 1, 0, 0, 0, 0, 0 },
+ { TRUE_ALL, TRUE_ALL, WLAN_PHY_CCK, 2000, /* 2 Mb */
+ 1900, 0x1a, 0x04, 4,
+ 1, 1, 1, 1, 1, 1, 1, 0 },
+ { TRUE_ALL, TRUE_ALL, WLAN_PHY_CCK, 5500, /* 5.5 Mb */
+ 4900, 0x19, 0x04, 11,
+ 2, 2, 2, 2, 2, 2, 2, 0 },
+ { TRUE_ALL, TRUE_ALL, WLAN_PHY_CCK, 11000, /* 11 Mb */
+ 8100, 0x18, 0x04, 22,
+ 3, 3, 2, 3, 3, 3, 3, 0 },
+ { FALSE, FALSE, WLAN_PHY_OFDM, 6000, /* 6 Mb */
+ 5400, 0x0b, 0x00, 12,
+ 4, 2, 1, 4, 4, 4, 4, 0 },
+ { FALSE, FALSE, WLAN_PHY_OFDM, 9000, /* 9 Mb */
+ 7800, 0x0f, 0x00, 18,
+ 4, 3, 1, 5, 5, 5, 5, 0 },
+ { TRUE, TRUE, WLAN_PHY_OFDM, 12000, /* 12 Mb */
+ 10100, 0x0a, 0x00, 24,
+ 6, 4, 1, 6, 6, 6, 6, 0 },
+ { TRUE, TRUE, WLAN_PHY_OFDM, 18000, /* 18 Mb */
+ 14100, 0x0e, 0x00, 36,
+ 6, 6, 2, 7, 7, 7, 7, 0 },
+ { TRUE, TRUE, WLAN_PHY_OFDM, 24000, /* 24 Mb */
+ 17700, 0x09, 0x00, 48,
+ 8, 10, 3, 8, 8, 8, 8, 0 },
+ { TRUE, TRUE, WLAN_PHY_OFDM, 36000, /* 36 Mb */
+ 23700, 0x0d, 0x00, 72,
+ 8, 14, 3, 9, 9, 9, 9, 0 },
+ { TRUE, TRUE, WLAN_PHY_OFDM, 48000, /* 48 Mb */
+ 27400, 0x08, 0x00, 96,
+ 8, 20, 3, 10, 10, 10, 10, 0 },
+ { TRUE, TRUE, WLAN_PHY_OFDM, 54000, /* 54 Mb */
+ 30900, 0x0c, 0x00, 108,
+ 8, 23, 3, 11, 11, 11, 11, 0 },
+ { FALSE, FALSE, WLAN_PHY_HT_20_SS, 6500, /* 6.5 Mb */
+ 6400, 0x80, 0x00, 0,
+ 4, 2, 3, 12, 28, 12, 28, 3216 },
+ { TRUE_20, TRUE_20, WLAN_PHY_HT_20_SS, 13000, /* 13 Mb */
+ 12700, 0x81, 0x00, 1,
+ 6, 4, 3, 13, 29, 13, 29, 6434 },
+ { TRUE_20, TRUE_20, WLAN_PHY_HT_20_SS, 19500, /* 19.5 Mb */
+ 18800, 0x82, 0x00, 2,
+ 6, 6, 3, 14, 30, 14, 30, 9650 },
+ { TRUE_20, TRUE_20, WLAN_PHY_HT_20_SS, 26000, /* 26 Mb */
+ 25000, 0x83, 0x00, 3,
+ 8, 10, 3, 15, 31, 15, 31, 12868 },
+ { TRUE_20, TRUE_20, WLAN_PHY_HT_20_SS, 39000, /* 39 Mb */
+ 36700, 0x84, 0x00, 4,
+ 8, 14, 3, 16, 32, 16, 32, 19304 },
+ { FALSE, TRUE_20, WLAN_PHY_HT_20_SS, 52000, /* 52 Mb */
+ 48100, 0x85, 0x00, 5,
+ 8, 20, 3, 17, 33, 17, 33, 25740 },
+ { FALSE, TRUE_20, WLAN_PHY_HT_20_SS, 58500, /* 58.5 Mb */
+ 53500, 0x86, 0x00, 6,
+ 8, 23, 3, 18, 34, 18, 34, 28956 },
+ { FALSE, TRUE_20, WLAN_PHY_HT_20_SS, 65000, /* 65 Mb */
+ 59000, 0x87, 0x00, 7,
+ 8, 25, 3, 19, 35, 19, 36, 32180 },
+ { FALSE, FALSE, WLAN_PHY_HT_20_DS, 13000, /* 13 Mb */
+ 12700, 0x88, 0x00, 8,
+ 4, 2, 3, 20, 37, 20, 37, 6430 },
+ { FALSE, FALSE, WLAN_PHY_HT_20_DS, 26000, /* 26 Mb */
+ 24800, 0x89, 0x00, 9,
+ 6, 4, 3, 21, 38, 21, 38, 12860 },
+ { FALSE, FALSE, WLAN_PHY_HT_20_DS, 39000, /* 39 Mb */
+ 36600, 0x8a, 0x00, 10,
+ 6, 6, 3, 22, 39, 22, 39, 19300 },
+ { TRUE_20, FALSE, WLAN_PHY_HT_20_DS, 52000, /* 52 Mb */
+ 48100, 0x8b, 0x00, 11,
+ 8, 10, 3, 23, 40, 23, 40, 25736 },
+ { TRUE_20, FALSE, WLAN_PHY_HT_20_DS, 78000, /* 78 Mb */
+ 69500, 0x8c, 0x00, 12,
+ 8, 14, 3, 24, 41, 24, 41, 38600 },
+ { TRUE_20, FALSE, WLAN_PHY_HT_20_DS, 104000, /* 104 Mb */
+ 89500, 0x8d, 0x00, 13,
+ 8, 20, 3, 25, 42, 25, 42, 51472 },
+ { TRUE_20, FALSE, WLAN_PHY_HT_20_DS, 117000, /* 117 Mb */
+ 98900, 0x8e, 0x00, 14,
+ 8, 23, 3, 26, 43, 26, 44, 57890 },
+ { TRUE_20, FALSE, WLAN_PHY_HT_20_DS, 130000, /* 130 Mb */
+ 108300, 0x8f, 0x00, 15,
+ 8, 25, 3, 27, 44, 27, 45, 64320 },
+ { TRUE_40, TRUE_40, WLAN_PHY_HT_40_SS, 13500, /* 13.5 Mb */
+ 13200, 0x80, 0x00, 0,
+ 8, 2, 3, 12, 28, 28, 28, 6684 },
+ { TRUE_40, TRUE_40, WLAN_PHY_HT_40_SS, 27500, /* 27.0 Mb */
+ 25900, 0x81, 0x00, 1,
+ 8, 4, 3, 13, 29, 29, 29, 13368 },
+ { TRUE_40, TRUE_40, WLAN_PHY_HT_40_SS, 40500, /* 40.5 Mb */
+ 38600, 0x82, 0x00, 2,
+ 8, 6, 3, 14, 30, 30, 30, 20052 },
+ { TRUE_40, TRUE_40, WLAN_PHY_HT_40_SS, 54000, /* 54 Mb */
+ 49800, 0x83, 0x00, 3,
+ 8, 10, 3, 15, 31, 31, 31, 26738 },
+ { TRUE_40, TRUE_40, WLAN_PHY_HT_40_SS, 81500, /* 81 Mb */
+ 72200, 0x84, 0x00, 4,
+ 8, 14, 3, 16, 32, 32, 32, 40104 },
+ { FALSE, TRUE_40, WLAN_PHY_HT_40_SS, 108000, /* 108 Mb */
+ 92900, 0x85, 0x00, 5,
+ 8, 20, 3, 17, 33, 33, 33, 53476 },
+ { FALSE, TRUE_40, WLAN_PHY_HT_40_SS, 121500, /* 121.5 Mb */
+ 102700, 0x86, 0x00, 6,
+ 8, 23, 3, 18, 34, 34, 34, 60156 },
+ { FALSE, TRUE_40, WLAN_PHY_HT_40_SS, 135000, /* 135 Mb */
+ 112000, 0x87, 0x00, 7,
+ 8, 23, 3, 19, 35, 36, 36, 66840 },
+ { FALSE, TRUE_40, WLAN_PHY_HT_40_SS_HGI, 150000, /* 150 Mb */
+ 122000, 0x87, 0x00, 7,
+ 8, 25, 3, 19, 35, 36, 36, 74200 },
+ { FALSE, FALSE, WLAN_PHY_HT_40_DS, 27000, /* 27 Mb */
+ 25800, 0x88, 0x00, 8,
+ 8, 2, 3, 20, 37, 37, 37, 13360 },
+ { FALSE, FALSE, WLAN_PHY_HT_40_DS, 54000, /* 54 Mb */
+ 49800, 0x89, 0x00, 9,
+ 8, 4, 3, 21, 38, 38, 38, 26720 },
+ { FALSE, FALSE, WLAN_PHY_HT_40_DS, 81000, /* 81 Mb */
+ 71900, 0x8a, 0x00, 10,
+ 8, 6, 3, 22, 39, 39, 39, 40080 },
+ { TRUE_40, FALSE, WLAN_PHY_HT_40_DS, 108000, /* 108 Mb */
+ 92500, 0x8b, 0x00, 11,
+ 8, 10, 3, 23, 40, 40, 40, 53440 },
+ { TRUE_40, FALSE, WLAN_PHY_HT_40_DS, 162000, /* 162 Mb */
+ 130300, 0x8c, 0x00, 12,
+ 8, 14, 3, 24, 41, 41, 41, 80160 },
+ { TRUE_40, FALSE, WLAN_PHY_HT_40_DS, 216000, /* 216 Mb */
+ 162800, 0x8d, 0x00, 13,
+ 8, 20, 3, 25, 42, 42, 42, 106880 },
+ { TRUE_40, FALSE, WLAN_PHY_HT_40_DS, 243000, /* 243 Mb */
+ 178200, 0x8e, 0x00, 14,
+ 8, 23, 3, 26, 43, 43, 43, 120240 },
+ { TRUE_40, FALSE, WLAN_PHY_HT_40_DS, 270000, /* 270 Mb */
+ 192100, 0x8f, 0x00, 15,
+ 8, 23, 3, 27, 44, 45, 45, 133600 },
+ { TRUE_40, FALSE, WLAN_PHY_HT_40_DS_HGI, 300000, /* 300 Mb */
+ 207000, 0x8f, 0x00, 15,
+ 8, 25, 3, 27, 44, 45, 45, 148400 },
+ },
+ 50, /* probe interval */
+ 50, /* rssi reduce interval */
+ WLAN_RC_HT_FLAG, /* Phy rates allowed initially */
+};
+
+static struct ath_rate_table ar5416_11a_ratetable = {
+ 8,
+ {
+ { TRUE, TRUE, WLAN_PHY_OFDM, 6000, /* 6 Mb */
+ 5400, 0x0b, 0x00, (0x80|12),
+ 0, 2, 1, 0, 0 },
+ { TRUE, TRUE, WLAN_PHY_OFDM, 9000, /* 9 Mb */
+ 7800, 0x0f, 0x00, 18,
+ 0, 3, 1, 1, 0 },
+ { TRUE, TRUE, WLAN_PHY_OFDM, 12000, /* 12 Mb */
+ 10000, 0x0a, 0x00, (0x80|24),
+ 2, 4, 2, 2, 0 },
+ { TRUE, TRUE, WLAN_PHY_OFDM, 18000, /* 18 Mb */
+ 13900, 0x0e, 0x00, 36,
+ 2, 6, 2, 3, 0 },
+ { TRUE, TRUE, WLAN_PHY_OFDM, 24000, /* 24 Mb */
+ 17300, 0x09, 0x00, (0x80|48),
+ 4, 10, 3, 4, 0 },
+ { TRUE, TRUE, WLAN_PHY_OFDM, 36000, /* 36 Mb */
+ 23000, 0x0d, 0x00, 72,
+ 4, 14, 3, 5, 0 },
+ { TRUE, TRUE, WLAN_PHY_OFDM, 48000, /* 48 Mb */
+ 27400, 0x08, 0x00, 96,
+ 4, 19, 3, 6, 0 },
+ { TRUE, TRUE, WLAN_PHY_OFDM, 54000, /* 54 Mb */
+ 29300, 0x0c, 0x00, 108,
+ 4, 23, 3, 7, 0 },
+ },
+ 50, /* probe interval */
+ 50, /* rssi reduce interval */
+ 0, /* Phy rates allowed initially */
+};
+
+static struct ath_rate_table ar5416_11a_ratetable_Half = {
+ 8,
+ {
+ { TRUE, TRUE, WLAN_PHY_OFDM, 3000, /* 6 Mb */
+ 2700, 0x0b, 0x00, (0x80|6),
+ 0, 2, 1, 0, 0},
+ { TRUE, TRUE, WLAN_PHY_OFDM, 4500, /* 9 Mb */
+ 3900, 0x0f, 0x00, 9,
+ 0, 3, 1, 1, 0 },
+ { TRUE, TRUE, WLAN_PHY_OFDM, 6000, /* 12 Mb */
+ 5000, 0x0a, 0x00, (0x80|12),
+ 2, 4, 2, 2, 0 },
+ { TRUE, TRUE, WLAN_PHY_OFDM, 9000, /* 18 Mb */
+ 6950, 0x0e, 0x00, 18,
+ 2, 6, 2, 3, 0 },
+ { TRUE, TRUE, WLAN_PHY_OFDM, 12000, /* 24 Mb */
+ 8650, 0x09, 0x00, (0x80|24),
+ 4, 10, 3, 4, 0 },
+ { TRUE, TRUE, WLAN_PHY_OFDM, 18000, /* 36 Mb */
+ 11500, 0x0d, 0x00, 36,
+ 4, 14, 3, 5, 0 },
+ { TRUE, TRUE, WLAN_PHY_OFDM, 24000, /* 48 Mb */
+ 13700, 0x08, 0x00, 48,
+ 4, 19, 3, 6, 0 },
+ { TRUE, TRUE, WLAN_PHY_OFDM, 27000, /* 54 Mb */
+ 14650, 0x0c, 0x00, 54,
+ 4, 23, 3, 7, 0 },
+ },
+ 50, /* probe interval */
+ 50, /* rssi reduce interval */
+ 0, /* Phy rates allowed initially */
+};
+
+static struct ath_rate_table ar5416_11a_ratetable_Quarter = {
+ 8,
+ {
+ { TRUE, TRUE, WLAN_PHY_OFDM, 1500, /* 6 Mb */
+ 1350, 0x0b, 0x00, (0x80|3),
+ 0, 2, 1, 0, 0 },
+ { TRUE, TRUE, WLAN_PHY_OFDM, 2250, /* 9 Mb */
+ 1950, 0x0f, 0x00, 4,
+ 0, 3, 1, 1, 0 },
+ { TRUE, TRUE, WLAN_PHY_OFDM, 3000, /* 12 Mb */
+ 2500, 0x0a, 0x00, (0x80|6),
+ 2, 4, 2, 2, 0 },
+ { TRUE, TRUE, WLAN_PHY_OFDM, 4500, /* 18 Mb */
+ 3475, 0x0e, 0x00, 9,
+ 2, 6, 2, 3, 0 },
+ { TRUE, TRUE, WLAN_PHY_OFDM, 6000, /* 25 Mb */
+ 4325, 0x09, 0x00, (0x80|12),
+ 4, 10, 3, 4, 0 },
+ { TRUE, TRUE, WLAN_PHY_OFDM, 9000, /* 36 Mb */
+ 5750, 0x0d, 0x00, 18,
+ 4, 14, 3, 5, 0 },
+ { TRUE, TRUE, WLAN_PHY_OFDM, 12000, /* 48 Mb */
+ 6850, 0x08, 0x00, 24,
+ 4, 19, 3, 6, 0 },
+ { TRUE, TRUE, WLAN_PHY_OFDM, 13500, /* 54 Mb */
+ 7325, 0x0c, 0x00, 27,
+ 4, 23, 3, 7, 0 },
+ },
+ 50, /* probe interval */
+ 50, /* rssi reduce interval */
+ 0, /* Phy rates allowed initially */
+};
+
+static struct ath_rate_table ar5416_11g_ratetable = {
+ 12,
+ {
+ { TRUE, TRUE, WLAN_PHY_CCK, 1000, /* 1 Mb */
+ 900, 0x1b, 0x00, 2,
+ 0, 0, 1, 0, 0 },
+ { TRUE, TRUE, WLAN_PHY_CCK, 2000, /* 2 Mb */
+ 1900, 0x1a, 0x04, 4,
+ 1, 1, 1, 1, 0 },
+ { TRUE, TRUE, WLAN_PHY_CCK, 5500, /* 5.5 Mb */
+ 4900, 0x19, 0x04, 11,
+ 2, 2, 2, 2, 0 },
+ { TRUE, TRUE, WLAN_PHY_CCK, 11000, /* 11 Mb */
+ 8100, 0x18, 0x04, 22,
+ 3, 3, 2, 3, 0 },
+ { FALSE, FALSE, WLAN_PHY_OFDM, 6000, /* 6 Mb */
+ 5400, 0x0b, 0x00, 12,
+ 4, 2, 1, 4, 0 },
+ { FALSE, FALSE, WLAN_PHY_OFDM, 9000, /* 9 Mb */
+ 7800, 0x0f, 0x00, 18,
+ 4, 3, 1, 5, 0 },
+ { TRUE, TRUE, WLAN_PHY_OFDM, 12000, /* 12 Mb */
+ 10000, 0x0a, 0x00, 24,
+ 6, 4, 1, 6, 0 },
+ { TRUE, TRUE, WLAN_PHY_OFDM, 18000, /* 18 Mb */
+ 13900, 0x0e, 0x00, 36,
+ 6, 6, 2, 7, 0 },
+ { TRUE, TRUE, WLAN_PHY_OFDM, 24000, /* 24 Mb */
+ 17300, 0x09, 0x00, 48,
+ 8, 10, 3, 8, 0 },
+ { TRUE, TRUE, WLAN_PHY_OFDM, 36000, /* 36 Mb */
+ 23000, 0x0d, 0x00, 72,
+ 8, 14, 3, 9, 0 },
+ { TRUE, TRUE, WLAN_PHY_OFDM, 48000, /* 48 Mb */
+ 27400, 0x08, 0x00, 96,
+ 8, 19, 3, 10, 0 },
+ { TRUE, TRUE, WLAN_PHY_OFDM, 54000, /* 54 Mb */
+ 29300, 0x0c, 0x00, 108,
+ 8, 23, 3, 11, 0 },
+ },
+ 50, /* probe interval */
+ 50, /* rssi reduce interval */
+ 0, /* Phy rates allowed initially */
+};
+
+static struct ath_rate_table ar5416_11b_ratetable = {
+ 4,
+ {
+ { TRUE, TRUE, WLAN_PHY_CCK, 1000, /* 1 Mb */
+ 900, 0x1b, 0x00, (0x80|2),
+ 0, 0, 1, 0, 0 },
+ { TRUE, TRUE, WLAN_PHY_CCK, 2000, /* 2 Mb */
+ 1800, 0x1a, 0x04, (0x80|4),
+ 1, 1, 1, 1, 0 },
+ { TRUE, TRUE, WLAN_PHY_CCK, 5500, /* 5.5 Mb */
+ 4300, 0x19, 0x04, (0x80|11),
+ 1, 2, 2, 2, 0 },
+ { TRUE, TRUE, WLAN_PHY_CCK, 11000, /* 11 Mb */
+ 7100, 0x18, 0x04, (0x80|22),
+ 1, 4, 100, 3, 0 },
+ },
+ 100, /* probe interval */
+ 100, /* rssi reduce interval */
+ 0, /* Phy rates allowed initially */
+};
+
+static void ar5416_attach_ratetables(struct ath_rate_softc *sc)
+{
+ /*
+ * Attach rate tables.
+ */
+ sc->hw_rate_table[ATH9K_MODE_11B] = &ar5416_11b_ratetable;
+ sc->hw_rate_table[ATH9K_MODE_11A] = &ar5416_11a_ratetable;
+ sc->hw_rate_table[ATH9K_MODE_11G] = &ar5416_11g_ratetable;
+
+ sc->hw_rate_table[ATH9K_MODE_11NA_HT20] = &ar5416_11na_ratetable;
+ sc->hw_rate_table[ATH9K_MODE_11NG_HT20] = &ar5416_11ng_ratetable;
+ sc->hw_rate_table[ATH9K_MODE_11NA_HT40PLUS] =
+ &ar5416_11na_ratetable;
+ sc->hw_rate_table[ATH9K_MODE_11NA_HT40MINUS] =
+ &ar5416_11na_ratetable;
+ sc->hw_rate_table[ATH9K_MODE_11NG_HT40PLUS] =
+ &ar5416_11ng_ratetable;
+ sc->hw_rate_table[ATH9K_MODE_11NG_HT40MINUS] =
+ &ar5416_11ng_ratetable;
+}
+
+static void ar5416_setquarter_ratetable(struct ath_rate_softc *sc)
+{
+ sc->hw_rate_table[ATH9K_MODE_11A] = &ar5416_11a_ratetable_Quarter;
+ return;
+}
+
+static void ar5416_sethalf_ratetable(struct ath_rate_softc *sc)
+{
+ sc->hw_rate_table[ATH9K_MODE_11A] = &ar5416_11a_ratetable_Half;
+ return;
+}
+
+static void ar5416_setfull_ratetable(struct ath_rate_softc *sc)
+{
+ sc->hw_rate_table[ATH9K_MODE_11A] = &ar5416_11a_ratetable;
+ return;
+}
+
+/*
+ * Return the median of three numbers
+ */
+static inline int8_t median(int8_t a, int8_t b, int8_t c)
+{
+ if (a >= b) {
+ if (b >= c)
+ return b;
+ else if (a > c)
+ return c;
+ else
+ return a;
+ } else {
+ if (a >= c)
+ return a;
+ else if (b >= c)
+ return c;
+ else
+ return b;
+ }
+}
+
+static void ath_rc_sort_validrates(const struct ath_rate_table *rate_table,
+ struct ath_tx_ratectrl *rate_ctrl)
+{
+ u8 i, j, idx, idx_next;
+
+ for (i = rate_ctrl->max_valid_rate - 1; i > 0; i--) {
+ for (j = 0; j <= i-1; j++) {
+ idx = rate_ctrl->valid_rate_index[j];
+ idx_next = rate_ctrl->valid_rate_index[j+1];
+
+ if (rate_table->info[idx].ratekbps >
+ rate_table->info[idx_next].ratekbps) {
+ rate_ctrl->valid_rate_index[j] = idx_next;
+ rate_ctrl->valid_rate_index[j+1] = idx;
+ }
+ }
+ }
+}
+
+/* Access functions for valid_txrate_mask */
+
+static void ath_rc_init_valid_txmask(struct ath_tx_ratectrl *rate_ctrl)
+{
+ u8 i;
+
+ for (i = 0; i < rate_ctrl->rate_table_size; i++)
+ rate_ctrl->valid_rate_index[i] = FALSE;
+}
+
+static inline void ath_rc_set_valid_txmask(struct ath_tx_ratectrl *rate_ctrl,
+ u8 index, int valid_tx_rate)
+{
+ ASSERT(index <= rate_ctrl->rate_table_size);
+ rate_ctrl->valid_rate_index[index] = valid_tx_rate ? TRUE : FALSE;
+}
+
+static inline int ath_rc_isvalid_txmask(struct ath_tx_ratectrl *rate_ctrl,
+ u8 index)
+{
+ ASSERT(index <= rate_ctrl->rate_table_size);
+ return rate_ctrl->valid_rate_index[index];
+}
+
+/* Iterators for valid_txrate_mask */
+static inline int
+ath_rc_get_nextvalid_txrate(const struct ath_rate_table *rate_table,
+ struct ath_tx_ratectrl *rate_ctrl,
+ u8 cur_valid_txrate,
+ u8 *next_idx)
+{
+ u8 i;
+
+ for (i = 0; i < rate_ctrl->max_valid_rate - 1; i++) {
+ if (rate_ctrl->valid_rate_index[i] == cur_valid_txrate) {
+ *next_idx = rate_ctrl->valid_rate_index[i+1];
+ return TRUE;
+ }
+ }
+
+ /* No more valid rates */
+ *next_idx = 0;
+ return FALSE;
+}
+
+/* Return true only for single stream */
+
+static int ath_rc_valid_phyrate(u32 phy, u32 capflag, int ignore_cw)
+{
+ if (WLAN_RC_PHY_HT(phy) & !(capflag & WLAN_RC_HT_FLAG))
+ return FALSE;
+ if (WLAN_RC_PHY_DS(phy) && !(capflag & WLAN_RC_DS_FLAG))
+ return FALSE;
+ if (WLAN_RC_PHY_SGI(phy) && !(capflag & WLAN_RC_SGI_FLAG))
+ return FALSE;
+ if (!ignore_cw && WLAN_RC_PHY_HT(phy))
+ if (WLAN_RC_PHY_40(phy) && !(capflag & WLAN_RC_40_FLAG))
+ return FALSE;
+ if (!WLAN_RC_PHY_40(phy) && (capflag & WLAN_RC_40_FLAG))
+ return FALSE;
+ return TRUE;
+}
+
+static inline int
+ath_rc_get_nextlowervalid_txrate(const struct ath_rate_table *rate_table,
+ struct ath_tx_ratectrl *rate_ctrl,
+ u8 cur_valid_txrate, u8 *next_idx)
+{
+ int8_t i;
+
+ for (i = 1; i < rate_ctrl->max_valid_rate ; i++) {
+ if (rate_ctrl->valid_rate_index[i] == cur_valid_txrate) {
+ *next_idx = rate_ctrl->valid_rate_index[i-1];
+ return TRUE;
+ }
+ }
+ return FALSE;
+}
+
+/*
+ * Initialize the Valid Rate Index from valid entries in Rate Table
+ */
+static u8
+ath_rc_sib_init_validrates(struct ath_rate_node *ath_rc_priv,
+ const struct ath_rate_table *rate_table,
+ u32 capflag)
+{
+ struct ath_tx_ratectrl *rate_ctrl;
+ u8 i, hi = 0;
+ u32 valid;
+
+ rate_ctrl = (struct ath_tx_ratectrl *)(ath_rc_priv);
+ for (i = 0; i < rate_table->rate_cnt; i++) {
+ valid = (ath_rc_priv->single_stream ?
+ rate_table->info[i].valid_single_stream :
+ rate_table->info[i].valid);
+ if (valid == TRUE) {
+ u32 phy = rate_table->info[i].phy;
+ u8 valid_rate_count = 0;
+
+ if (!ath_rc_valid_phyrate(phy, capflag, FALSE))
+ continue;
+
+ valid_rate_count = rate_ctrl->valid_phy_ratecnt[phy];
+
+ rate_ctrl->valid_phy_rateidx[phy][valid_rate_count] = i;
+ rate_ctrl->valid_phy_ratecnt[phy] += 1;
+ ath_rc_set_valid_txmask(rate_ctrl, i, TRUE);
+ hi = A_MAX(hi, i);
+ }
+ }
+ return hi;
+}
+
+/*
+ * Initialize the Valid Rate Index from Rate Set
+ */
+static u8
+ath_rc_sib_setvalid_rates(struct ath_rate_node *ath_rc_priv,
+ const struct ath_rate_table *rate_table,
+ struct ath_rateset *rateset,
+ u32 capflag)
+{
+ /* XXX: Clean me up and make identation friendly */
+ u8 i, j, hi = 0;
+ struct ath_tx_ratectrl *rate_ctrl =
+ (struct ath_tx_ratectrl *)(ath_rc_priv);
+
+ /* Use intersection of working rates and valid rates */
+ for (i = 0; i < rateset->rs_nrates; i++) {
+ for (j = 0; j < rate_table->rate_cnt; j++) {
+ u32 phy = rate_table->info[j].phy;
+ u32 valid = (ath_rc_priv->single_stream ?
+ rate_table->info[j].valid_single_stream :
+ rate_table->info[j].valid);
+
+ /* We allow a rate only if its valid and the
+ * capflag matches one of the validity
+ * (TRUE/TRUE_20/TRUE_40) flags */
+
+ /* XXX: catch the negative of this branch
+ * first and then continue */
+ if (((rateset->rs_rates[i] & 0x7F) ==
+ (rate_table->info[j].dot11rate & 0x7F)) &&
+ ((valid & WLAN_RC_CAP_MODE(capflag)) ==
+ WLAN_RC_CAP_MODE(capflag)) &&
+ !WLAN_RC_PHY_HT(phy)) {
+
+ u8 valid_rate_count = 0;
+
+ if (!ath_rc_valid_phyrate(phy, capflag, FALSE))
+ continue;
+
+ valid_rate_count =
+ rate_ctrl->valid_phy_ratecnt[phy];
+
+ rate_ctrl->valid_phy_rateidx[phy]
+ [valid_rate_count] = j;
+ rate_ctrl->valid_phy_ratecnt[phy] += 1;
+ ath_rc_set_valid_txmask(rate_ctrl, j, TRUE);
+ hi = A_MAX(hi, j);
+ }
+ }
+ }
+ return hi;
+}
+
+static u8
+ath_rc_sib_setvalid_htrates(struct ath_rate_node *ath_rc_priv,
+ const struct ath_rate_table *rate_table,
+ u8 *mcs_set, u32 capflag)
+{
+ u8 i, j, hi = 0;
+ struct ath_tx_ratectrl *rate_ctrl =
+ (struct ath_tx_ratectrl *)(ath_rc_priv);
+
+ /* Use intersection of working rates and valid rates */
+ for (i = 0; i < ((struct ath_rateset *)mcs_set)->rs_nrates; i++) {
+ for (j = 0; j < rate_table->rate_cnt; j++) {
+ u32 phy = rate_table->info[j].phy;
+ u32 valid = (ath_rc_priv->single_stream ?
+ rate_table->info[j].valid_single_stream :
+ rate_table->info[j].valid);
+
+ if (((((struct ath_rateset *)
+ mcs_set)->rs_rates[i] & 0x7F) !=
+ (rate_table->info[j].dot11rate & 0x7F)) ||
+ !WLAN_RC_PHY_HT(phy) ||
+ !WLAN_RC_PHY_HT_VALID(valid, capflag))
+ continue;
+
+ if (!ath_rc_valid_phyrate(phy, capflag, FALSE))
+ continue;
+
+ rate_ctrl->valid_phy_rateidx[phy]
+ [rate_ctrl->valid_phy_ratecnt[phy]] = j;
+ rate_ctrl->valid_phy_ratecnt[phy] += 1;
+ ath_rc_set_valid_txmask(rate_ctrl, j, TRUE);
+ hi = A_MAX(hi, j);
+ }
+ }
+ return hi;
+}
+
+/*
+ * Attach to a device instance. Setup the public definition
+ * of how much per-node space we need and setup the private
+ * phy tables that have rate control parameters.
+ */
+struct ath_rate_softc *ath_rate_attach(struct ath_hal *ah)
+{
+ struct ath_rate_softc *asc;
+
+ /* we are only in user context so we can sleep for memory */
+ asc = kzalloc(sizeof(struct ath_rate_softc), GFP_KERNEL);
+ if (asc == NULL)
+ return NULL;
+
+ ar5416_attach_ratetables(asc);
+
+ /* Save Maximum TX Trigger Level (used for 11n) */
+ tx_triglevel_max = ah->ah_caps.tx_triglevel_max;
+ /* return alias for ath_rate_softc * */
+ return asc;
+}
+
+static struct ath_rate_node *ath_rate_node_alloc(struct ath_vap *avp,
+ struct ath_rate_softc *rsc,
+ gfp_t gfp)
+{
+ struct ath_rate_node *anode;
+
+ anode = kzalloc(sizeof(struct ath_rate_node), gfp);
+ if (anode == NULL)
+ return NULL;
+
+ anode->avp = avp;
+ anode->asc = rsc;
+ avp->rc_node = anode;
+
+ return anode;
+}
+
+static void ath_rate_node_free(struct ath_rate_node *anode)
+{
+ if (anode != NULL)
+ kfree(anode);
+}
+
+void ath_rate_detach(struct ath_rate_softc *asc)
+{
+ if (asc != NULL)
+ kfree(asc);
+}
+
+u8 ath_rate_findrateix(struct ath_softc *sc,
+ u8 dot11rate)
+{
+ const struct ath_rate_table *ratetable;
+ struct ath_rate_softc *rsc = sc->sc_rc;
+ int i;
+
+ ratetable = rsc->hw_rate_table[sc->sc_curmode];
+
+ if (WARN_ON(!ratetable))
+ return 0;
+
+ for (i = 0; i < ratetable->rate_cnt; i++) {
+ if ((ratetable->info[i].dot11rate & 0x7f) == (dot11rate & 0x7f))
+ return i;
+ }
+
+ return 0;
+}
+
+/*
+ * Update rate-control state on a device state change. When
+ * operating as a station this includes associate/reassociate
+ * with an AP. Otherwise this gets called, for example, when
+ * the we transition to run state when operating as an AP.
+ */
+void ath_rate_newstate(struct ath_softc *sc, struct ath_vap *avp)
+{
+ struct ath_rate_softc *asc = sc->sc_rc;
+
+ /* For half and quarter rate channles use different
+ * rate tables
+ */
+ if (sc->sc_curchan.channelFlags & CHANNEL_HALF)
+ ar5416_sethalf_ratetable(asc);
+ else if (sc->sc_curchan.channelFlags & CHANNEL_QUARTER)
+ ar5416_setquarter_ratetable(asc);
+ else /* full rate */
+ ar5416_setfull_ratetable(asc);
+
+ if (avp->av_config.av_fixed_rateset != IEEE80211_FIXED_RATE_NONE) {
+ asc->fixedrix =
+ sc->sc_rixmap[avp->av_config.av_fixed_rateset & 0xff];
+ /* NB: check the fixed rate exists */
+ if (asc->fixedrix == 0xff)
+ asc->fixedrix = IEEE80211_FIXED_RATE_NONE;
+ } else {
+ asc->fixedrix = IEEE80211_FIXED_RATE_NONE;
+ }
+}
+
+static u8 ath_rc_ratefind_ht(struct ath_softc *sc,
+ struct ath_rate_node *ath_rc_priv,
+ const struct ath_rate_table *rate_table,
+ int probe_allowed, int *is_probing,
+ int is_retry)
+{
+ u32 dt, best_thruput, this_thruput, now_msec;
+ u8 rate, next_rate, best_rate, maxindex, minindex;
+ int8_t rssi_last, rssi_reduce = 0, index = 0;
+ struct ath_tx_ratectrl *rate_ctrl = NULL;
+
+ rate_ctrl = (struct ath_tx_ratectrl *)(ath_rc_priv ?
+ (ath_rc_priv) : NULL);
+
+ *is_probing = FALSE;
+
+ rssi_last = median(rate_ctrl->rssi_last,
+ rate_ctrl->rssi_last_prev,
+ rate_ctrl->rssi_last_prev2);
+
+ /*
+ * Age (reduce) last ack rssi based on how old it is.
+ * The bizarre numbers are so the delta is 160msec,
+ * meaning we divide by 16.
+ * 0msec <= dt <= 25msec: don't derate
+ * 25msec <= dt <= 185msec: derate linearly from 0 to 10dB
+ * 185msec <= dt: derate by 10dB
+ */
+
+ now_msec = jiffies_to_msecs(jiffies);
+ dt = now_msec - rate_ctrl->rssi_time;
+
+ if (dt >= 185)
+ rssi_reduce = 10;
+ else if (dt >= 25)
+ rssi_reduce = (u8)((dt - 25) >> 4);
+
+ /* Now reduce rssi_last by rssi_reduce */
+ if (rssi_last < rssi_reduce)
+ rssi_last = 0;
+ else
+ rssi_last -= rssi_reduce;
+
+ /*
+ * Now look up the rate in the rssi table and return it.
+ * If no rates match then we return 0 (lowest rate)
+ */
+
+ best_thruput = 0;
+ maxindex = rate_ctrl->max_valid_rate-1;
+
+ minindex = 0;
+ best_rate = minindex;
+
+ /*
+ * Try the higher rate first. It will reduce memory moving time
+ * if we have very good channel characteristics.
+ */
+ for (index = maxindex; index >= minindex ; index--) {
+ u8 per_thres;
+
+ rate = rate_ctrl->valid_rate_index[index];
+ if (rate > rate_ctrl->rate_max_phy)
+ continue;
+
+ /*
+ * For TCP the average collision rate is around 11%,
+ * so we ignore PERs less than this. This is to
+ * prevent the rate we are currently using (whose
+ * PER might be in the 10-15 range because of TCP
+ * collisions) looking worse than the next lower
+ * rate whose PER has decayed close to 0. If we
+ * used to next lower rate, its PER would grow to
+ * 10-15 and we would be worse off then staying
+ * at the current rate.
+ */
+ per_thres = rate_ctrl->state[rate].per;
+ if (per_thres < 12)
+ per_thres = 12;
+
+ this_thruput = rate_table->info[rate].user_ratekbps *
+ (100 - per_thres);
+
+ if (best_thruput <= this_thruput) {
+ best_thruput = this_thruput;
+ best_rate = rate;
+ }
+ }
+
+ rate = best_rate;
+
+ /* if we are retrying for more than half the number
+ * of max retries, use the min rate for the next retry
+ */
+ if (is_retry)
+ rate = rate_ctrl->valid_rate_index[minindex];
+
+ rate_ctrl->rssi_last_lookup = rssi_last;
+
+ /*
+ * Must check the actual rate (ratekbps) to account for
+ * non-monoticity of 11g's rate table
+ */
+
+ if (rate >= rate_ctrl->rate_max_phy && probe_allowed) {
+ rate = rate_ctrl->rate_max_phy;
+
+ /* Probe the next allowed phy state */
+ /* FIXME:XXXX Check to make sure ratMax is checked properly */
+ if (ath_rc_get_nextvalid_txrate(rate_table,
+ rate_ctrl, rate, &next_rate) &&
+ (now_msec - rate_ctrl->probe_time >
+ rate_table->probe_interval) &&
+ (rate_ctrl->hw_maxretry_pktcnt >= 1)) {
+ rate = next_rate;
+ rate_ctrl->probe_rate = rate;
+ rate_ctrl->probe_time = now_msec;
+ rate_ctrl->hw_maxretry_pktcnt = 0;
+ *is_probing = TRUE;
+ }
+ }
+
+ /*
+ * Make sure rate is not higher than the allowed maximum.
+ * We should also enforce the min, but I suspect the min is
+ * normally 1 rather than 0 because of the rate 9 vs 6 issue
+ * in the old code.
+ */
+ if (rate > (rate_ctrl->rate_table_size - 1))
+ rate = rate_ctrl->rate_table_size - 1;
+
+ ASSERT((rate_table->info[rate].valid && !ath_rc_priv->single_stream) ||
+ (rate_table->info[rate].valid_single_stream &&
+ ath_rc_priv->single_stream));
+
+ return rate;
+}
+
+static void ath_rc_rate_set_series(const struct ath_rate_table *rate_table ,
+ struct ath_rc_series *series,
+ u8 tries,
+ u8 rix,
+ int rtsctsenable)
+{
+ series->tries = tries;
+ series->flags = (rtsctsenable ? ATH_RC_RTSCTS_FLAG : 0) |
+ (WLAN_RC_PHY_DS(rate_table->info[rix].phy) ?
+ ATH_RC_DS_FLAG : 0) |
+ (WLAN_RC_PHY_40(rate_table->info[rix].phy) ?
+ ATH_RC_CW40_FLAG : 0) |
+ (WLAN_RC_PHY_SGI(rate_table->info[rix].phy) ?
+ ATH_RC_SGI_FLAG : 0);
+
+ series->rix = rate_table->info[rix].base_index;
+ series->max_4ms_framelen = rate_table->info[rix].max_4ms_framelen;
+}
+
+static u8 ath_rc_rate_getidx(struct ath_softc *sc,
+ struct ath_rate_node *ath_rc_priv,
+ const struct ath_rate_table *rate_table,
+ u8 rix, u16 stepdown,
+ u16 min_rate)
+{
+ u32 j;
+ u8 nextindex;
+ struct ath_tx_ratectrl *rate_ctrl =
+ (struct ath_tx_ratectrl *)(ath_rc_priv);
+
+ if (min_rate) {
+ for (j = RATE_TABLE_SIZE; j > 0; j--) {
+ if (ath_rc_get_nextlowervalid_txrate(rate_table,
+ rate_ctrl, rix, &nextindex))
+ rix = nextindex;
+ else
+ break;
+ }
+ } else {
+ for (j = stepdown; j > 0; j--) {
+ if (ath_rc_get_nextlowervalid_txrate(rate_table,
+ rate_ctrl, rix, &nextindex))
+ rix = nextindex;
+ else
+ break;
+ }
+ }
+ return rix;
+}
+
+static void ath_rc_ratefind(struct ath_softc *sc,
+ struct ath_rate_node *ath_rc_priv,
+ int num_tries, int num_rates, unsigned int rcflag,
+ struct ath_rc_series series[], int *is_probe,
+ int is_retry)
+{
+ u8 try_per_rate = 0, i = 0, rix, nrix;
+ struct ath_rate_softc *asc = (struct ath_rate_softc *)sc->sc_rc;
+ struct ath_rate_table *rate_table;
+
+ rate_table =
+ (struct ath_rate_table *)asc->hw_rate_table[sc->sc_curmode];
+ rix = ath_rc_ratefind_ht(sc, ath_rc_priv, rate_table,
+ (rcflag & ATH_RC_PROBE_ALLOWED) ? 1 : 0,
+ is_probe, is_retry);
+ nrix = rix;
+
+ if ((rcflag & ATH_RC_PROBE_ALLOWED) && (*is_probe)) {
+ /* set one try for probe rates. For the
+ * probes don't enable rts */
+ ath_rc_rate_set_series(rate_table,
+ &series[i++], 1, nrix, FALSE);
+
+ try_per_rate = (num_tries/num_rates);
+ /* Get the next tried/allowed rate. No RTS for the next series
+ * after the probe rate
+ */
+ nrix = ath_rc_rate_getidx(sc,
+ ath_rc_priv, rate_table, nrix, 1, FALSE);
+ ath_rc_rate_set_series(rate_table,
+ &series[i++], try_per_rate, nrix, 0);
+ } else {
+ try_per_rate = (num_tries/num_rates);
+ /* Set the choosen rate. No RTS for first series entry. */
+ ath_rc_rate_set_series(rate_table,
+ &series[i++], try_per_rate, nrix, FALSE);
+ }
+
+ /* Fill in the other rates for multirate retry */
+ for ( ; i < num_rates; i++) {
+ u8 try_num;
+ u8 min_rate;
+
+ try_num = ((i + 1) == num_rates) ?
+ num_tries - (try_per_rate * i) : try_per_rate ;
+ min_rate = (((i + 1) == num_rates) &&
+ (rcflag & ATH_RC_MINRATE_LASTRATE)) ? 1 : 0;
+
+ nrix = ath_rc_rate_getidx(sc, ath_rc_priv,
+ rate_table, nrix, 1, min_rate);
+ /* All other rates in the series have RTS enabled */
+ ath_rc_rate_set_series(rate_table,
+ &series[i], try_num, nrix, TRUE);
+ }
+
+ /*
+ * NB:Change rate series to enable aggregation when operating
+ * at lower MCS rates. When first rate in series is MCS2
+ * in HT40 @ 2.4GHz, series should look like:
+ *
+ * {MCS2, MCS1, MCS0, MCS0}.
+ *
+ * When first rate in series is MCS3 in HT20 @ 2.4GHz, series should
+ * look like:
+ *
+ * {MCS3, MCS2, MCS1, MCS1}
+ *
+ * So, set fourth rate in series to be same as third one for
+ * above conditions.
+ */
+ if ((sc->sc_curmode == ATH9K_MODE_11NG_HT20) ||
+ (sc->sc_curmode == ATH9K_MODE_11NG_HT40PLUS) ||
+ (sc->sc_curmode == ATH9K_MODE_11NG_HT40MINUS)) {
+ u8 dot11rate = rate_table->info[rix].dot11rate;
+ u8 phy = rate_table->info[rix].phy;
+ if (i == 4 &&
+ ((dot11rate == 2 && phy == WLAN_RC_PHY_HT_40_SS) ||
+ (dot11rate == 3 && phy == WLAN_RC_PHY_HT_20_SS))) {
+ series[3].rix = series[2].rix;
+ series[3].flags = series[2].flags;
+ series[3].max_4ms_framelen = series[2].max_4ms_framelen;
+ }
+ }
+}
+
+/*
+ * Return the Tx rate series.
+ */
+void ath_rate_findrate(struct ath_softc *sc,
+ struct ath_rate_node *ath_rc_priv,
+ int num_tries,
+ int num_rates,
+ unsigned int rcflag,
+ struct ath_rc_series series[],
+ int *is_probe,
+ int is_retry)
+{
+ struct ath_vap *avp = ath_rc_priv->avp;
+
+ DPRINTF(sc, ATH_DBG_RATE, "%s", __func__);
+ if (!num_rates || !num_tries)
+ return;
+
+ if (avp->av_config.av_fixed_rateset == IEEE80211_FIXED_RATE_NONE) {
+ ath_rc_ratefind(sc, ath_rc_priv, num_tries, num_rates,
+ rcflag, series, is_probe, is_retry);
+ } else {
+ /* Fixed rate */
+ int idx;
+ u8 flags;
+ u32 rix;
+ struct ath_rate_softc *asc = ath_rc_priv->asc;
+ struct ath_rate_table *rate_table;
+
+ rate_table = (struct ath_rate_table *)
+ asc->hw_rate_table[sc->sc_curmode];
+
+ for (idx = 0; idx < 4; idx++) {
+ unsigned int mcs;
+ u8 series_rix = 0;
+
+ series[idx].tries =
+ IEEE80211_RATE_IDX_ENTRY(
+ avp->av_config.av_fixed_retryset, idx);
+
+ mcs = IEEE80211_RATE_IDX_ENTRY(
+ avp->av_config.av_fixed_rateset, idx);
+
+ if (idx == 3 && (mcs & 0xf0) == 0x70)
+ mcs = (mcs & ~0xf0)|0x80;
+
+ if (!(mcs & 0x80))
+ flags = 0;
+ else
+ flags = ((ath_rc_priv->ht_cap &
+ WLAN_RC_DS_FLAG) ?
+ ATH_RC_DS_FLAG : 0) |
+ ((ath_rc_priv->ht_cap &
+ WLAN_RC_40_FLAG) ?
+ ATH_RC_CW40_FLAG : 0) |
+ ((ath_rc_priv->ht_cap &
+ WLAN_RC_SGI_FLAG) ?
+ ((ath_rc_priv->ht_cap &
+ WLAN_RC_40_FLAG) ?
+ ATH_RC_SGI_FLAG : 0) : 0);
+
+ series[idx].rix = sc->sc_rixmap[mcs];
+ series_rix = series[idx].rix;
+
+ /* XXX: Give me some cleanup love */
+ if ((flags & ATH_RC_CW40_FLAG) &&
+ (flags & ATH_RC_SGI_FLAG))
+ rix = rate_table->info[series_rix].ht_index;
+ else if (flags & ATH_RC_SGI_FLAG)
+ rix = rate_table->info[series_rix].sgi_index;
+ else if (flags & ATH_RC_CW40_FLAG)
+ rix = rate_table->info[series_rix].cw40index;
+ else
+ rix = rate_table->info[series_rix].base_index;
+ series[idx].max_4ms_framelen =
+ rate_table->info[rix].max_4ms_framelen;
+ series[idx].flags = flags;
+ }
+ }
+}
+
+static void ath_rc_update_ht(struct ath_softc *sc,
+ struct ath_rate_node *ath_rc_priv,
+ struct ath_tx_info_priv *info_priv,
+ int tx_rate, int xretries, int retries)
+{
+ struct ath_tx_ratectrl *rate_ctrl;
+ u32 now_msec = jiffies_to_msecs(jiffies);
+ int state_change = FALSE, rate, count;
+ u8 last_per;
+ struct ath_rate_softc *asc = (struct ath_rate_softc *)sc->sc_rc;
+ struct ath_rate_table *rate_table =
+ (struct ath_rate_table *)asc->hw_rate_table[sc->sc_curmode];
+
+ static u32 nretry_to_per_lookup[10] = {
+ 100 * 0 / 1,
+ 100 * 1 / 4,
+ 100 * 1 / 2,
+ 100 * 3 / 4,
+ 100 * 4 / 5,
+ 100 * 5 / 6,
+ 100 * 6 / 7,
+ 100 * 7 / 8,
+ 100 * 8 / 9,
+ 100 * 9 / 10
+ };
+
+ if (!ath_rc_priv)
+ return;
+
+ rate_ctrl = (struct ath_tx_ratectrl *)(ath_rc_priv);
+
+ ASSERT(tx_rate >= 0);
+ if (tx_rate < 0)
+ return;
+
+ /* To compensate for some imbalance between ctrl and ext. channel */
+
+ if (WLAN_RC_PHY_40(rate_table->info[tx_rate].phy))
+ info_priv->tx.ts_rssi =
+ info_priv->tx.ts_rssi < 3 ? 0 :
+ info_priv->tx.ts_rssi - 3;
+
+ last_per = rate_ctrl->state[tx_rate].per;
+
+ if (xretries) {
+ /* Update the PER. */
+ if (xretries == 1) {
+ rate_ctrl->state[tx_rate].per += 30;
+ if (rate_ctrl->state[tx_rate].per > 100)
+ rate_ctrl->state[tx_rate].per = 100;
+ } else {
+ /* xretries == 2 */
+ count = sizeof(nretry_to_per_lookup) /
+ sizeof(nretry_to_per_lookup[0]);
+ if (retries >= count)
+ retries = count - 1;
+ /* new_PER = 7/8*old_PER + 1/8*(currentPER) */
+ rate_ctrl->state[tx_rate].per =
+ (u8)(rate_ctrl->state[tx_rate].per -
+ (rate_ctrl->state[tx_rate].per >> 3) +
+ ((100) >> 3));
+ }
+
+ /* xretries == 1 or 2 */
+
+ if (rate_ctrl->probe_rate == tx_rate)
+ rate_ctrl->probe_rate = 0;
+
+ } else { /* xretries == 0 */
+ /* Update the PER. */
+ /* Make sure it doesn't index out of array's bounds. */
+ count = sizeof(nretry_to_per_lookup) /
+ sizeof(nretry_to_per_lookup[0]);
+ if (retries >= count)
+ retries = count - 1;
+ if (info_priv->n_bad_frames) {
+ /* new_PER = 7/8*old_PER + 1/8*(currentPER) */
+ /*
+ * Assuming that n_frames is not 0. The current PER
+ * from the retries is 100 * retries / (retries+1),
+ * since the first retries attempts failed, and the
+ * next one worked. For the one that worked,
+ * n_bad_frames subframes out of n_frames wored,
+ * so the PER for that part is
+ * 100 * n_bad_frames / n_frames, and it contributes
+ * 100 * n_bad_frames / (n_frames * (retries+1)) to
+ * the above PER. The expression below is a
+ * simplified version of the sum of these two terms.
+ */
+ if (info_priv->n_frames > 0)
+ rate_ctrl->state[tx_rate].per
+ = (u8)
+ (rate_ctrl->state[tx_rate].per -
+ (rate_ctrl->state[tx_rate].per >> 3) +
+ ((100*(retries*info_priv->n_frames +
+ info_priv->n_bad_frames) /
+ (info_priv->n_frames *
+ (retries+1))) >> 3));
+ } else {
+ /* new_PER = 7/8*old_PER + 1/8*(currentPER) */
+
+ rate_ctrl->state[tx_rate].per = (u8)
+ (rate_ctrl->state[tx_rate].per -
+ (rate_ctrl->state[tx_rate].per >> 3) +
+ (nretry_to_per_lookup[retries] >> 3));
+ }
+
+ rate_ctrl->rssi_last_prev2 = rate_ctrl->rssi_last_prev;
+ rate_ctrl->rssi_last_prev = rate_ctrl->rssi_last;
+ rate_ctrl->rssi_last = info_priv->tx.ts_rssi;
+ rate_ctrl->rssi_time = now_msec;
+
+ /*
+ * If we got at most one retry then increase the max rate if
+ * this was a probe. Otherwise, ignore the probe.
+ */
+
+ if (rate_ctrl->probe_rate && rate_ctrl->probe_rate == tx_rate) {
+ if (retries > 0 || 2 * info_priv->n_bad_frames >
+ info_priv->n_frames) {
+ /*
+ * Since we probed with just a single attempt,
+ * any retries means the probe failed. Also,
+ * if the attempt worked, but more than half
+ * the subframes were bad then also consider
+ * the probe a failure.
+ */
+ rate_ctrl->probe_rate = 0;
+ } else {
+ u8 probe_rate = 0;
+
+ rate_ctrl->rate_max_phy = rate_ctrl->probe_rate;
+ probe_rate = rate_ctrl->probe_rate;
+
+ if (rate_ctrl->state[probe_rate].per > 30)
+ rate_ctrl->state[probe_rate].per = 20;
+
+ rate_ctrl->probe_rate = 0;
+
+ /*
+ * Since this probe succeeded, we allow the next
+ * probe twice as soon. This allows the maxRate
+ * to move up faster if the probes are
+ * succesful.
+ */
+ rate_ctrl->probe_time = now_msec -
+ rate_table->probe_interval / 2;
+ }
+ }
+
+ if (retries > 0) {
+ /*
+ * Don't update anything. We don't know if
+ * this was because of collisions or poor signal.
+ *
+ * Later: if rssi_ack is close to
+ * rate_ctrl->state[txRate].rssi_thres and we see lots
+ * of retries, then we could increase
+ * rate_ctrl->state[txRate].rssi_thres.
+ */
+ rate_ctrl->hw_maxretry_pktcnt = 0;
+ } else {
+ /*
+ * It worked with no retries. First ignore bogus (small)
+ * rssi_ack values.
+ */
+ if (tx_rate == rate_ctrl->rate_max_phy &&
+ rate_ctrl->hw_maxretry_pktcnt < 255) {
+ rate_ctrl->hw_maxretry_pktcnt++;
+ }
+
+ if (info_priv->tx.ts_rssi >=
+ rate_table->info[tx_rate].rssi_ack_validmin) {
+ /* Average the rssi */
+ if (tx_rate != rate_ctrl->rssi_sum_rate) {
+ rate_ctrl->rssi_sum_rate = tx_rate;
+ rate_ctrl->rssi_sum =
+ rate_ctrl->rssi_sum_cnt = 0;
+ }
+
+ rate_ctrl->rssi_sum += info_priv->tx.ts_rssi;
+ rate_ctrl->rssi_sum_cnt++;
+
+ if (rate_ctrl->rssi_sum_cnt > 4) {
+ int32_t rssi_ackAvg =
+ (rate_ctrl->rssi_sum + 2) / 4;
+ int8_t rssi_thres =
+ rate_ctrl->state[tx_rate].
+ rssi_thres;
+ int8_t rssi_ack_vmin =
+ rate_table->info[tx_rate].
+ rssi_ack_validmin;
+
+ rate_ctrl->rssi_sum =
+ rate_ctrl->rssi_sum_cnt = 0;
+
+ /* Now reduce the current
+ * rssi threshold. */
+ if ((rssi_ackAvg < rssi_thres + 2) &&
+ (rssi_thres > rssi_ack_vmin)) {
+ rate_ctrl->state[tx_rate].
+ rssi_thres--;
+ }
+
+ state_change = TRUE;
+ }
+ }
+ }
+ }
+
+ /* For all cases */
+
+ /*
+ * If this rate looks bad (high PER) then stop using it for
+ * a while (except if we are probing).
+ */
+ if (rate_ctrl->state[tx_rate].per >= 55 && tx_rate > 0 &&
+ rate_table->info[tx_rate].ratekbps <=
+ rate_table->info[rate_ctrl->rate_max_phy].ratekbps) {
+ ath_rc_get_nextlowervalid_txrate(rate_table, rate_ctrl,
+ (u8) tx_rate, &rate_ctrl->rate_max_phy);
+
+ /* Don't probe for a little while. */
+ rate_ctrl->probe_time = now_msec;
+ }
+
+ if (state_change) {
+ /*
+ * Make sure the rates above this have higher rssi thresholds.
+ * (Note: Monotonicity is kept within the OFDM rates and
+ * within the CCK rates. However, no adjustment is
+ * made to keep the rssi thresholds monotonically
+ * increasing between the CCK and OFDM rates.)
+ */
+ for (rate = tx_rate; rate <
+ rate_ctrl->rate_table_size - 1; rate++) {
+ if (rate_table->info[rate+1].phy !=
+ rate_table->info[tx_rate].phy)
+ break;
+
+ if (rate_ctrl->state[rate].rssi_thres +
+ rate_table->info[rate].rssi_ack_deltamin >
+ rate_ctrl->state[rate+1].rssi_thres) {
+ rate_ctrl->state[rate+1].rssi_thres =
+ rate_ctrl->state[rate].
+ rssi_thres +
+ rate_table->info[rate].
+ rssi_ack_deltamin;
+ }
+ }
+
+ /* Make sure the rates below this have lower rssi thresholds. */
+ for (rate = tx_rate - 1; rate >= 0; rate--) {
+ if (rate_table->info[rate].phy !=
+ rate_table->info[tx_rate].phy)
+ break;
+
+ if (rate_ctrl->state[rate].rssi_thres +
+ rate_table->info[rate].rssi_ack_deltamin >
+ rate_ctrl->state[rate+1].rssi_thres) {
+ if (rate_ctrl->state[rate+1].rssi_thres <
+ rate_table->info[rate].
+ rssi_ack_deltamin)
+ rate_ctrl->state[rate].rssi_thres = 0;
+ else {
+ rate_ctrl->state[rate].rssi_thres =
+ rate_ctrl->state[rate+1].
+ rssi_thres -
+ rate_table->info[rate].
+ rssi_ack_deltamin;
+ }
+
+ if (rate_ctrl->state[rate].rssi_thres <
+ rate_table->info[rate].
+ rssi_ack_validmin) {
+ rate_ctrl->state[rate].rssi_thres =
+ rate_table->info[rate].
+ rssi_ack_validmin;
+ }
+ }
+ }
+ }
+
+ /* Make sure the rates below this have lower PER */
+ /* Monotonicity is kept only for rates below the current rate. */
+ if (rate_ctrl->state[tx_rate].per < last_per) {
+ for (rate = tx_rate - 1; rate >= 0; rate--) {
+ if (rate_table->info[rate].phy !=
+ rate_table->info[tx_rate].phy)
+ break;
+
+ if (rate_ctrl->state[rate].per >
+ rate_ctrl->state[rate+1].per) {
+ rate_ctrl->state[rate].per =
+ rate_ctrl->state[rate+1].per;
+ }
+ }
+ }
+
+ /* Maintain monotonicity for rates above the current rate */
+ for (rate = tx_rate; rate < rate_ctrl->rate_table_size - 1; rate++) {
+ if (rate_ctrl->state[rate+1].per < rate_ctrl->state[rate].per)
+ rate_ctrl->state[rate+1].per =
+ rate_ctrl->state[rate].per;
+ }
+
+ /* Every so often, we reduce the thresholds and
+ * PER (different for CCK and OFDM). */
+ if (now_msec - rate_ctrl->rssi_down_time >=
+ rate_table->rssi_reduce_interval) {
+
+ for (rate = 0; rate < rate_ctrl->rate_table_size; rate++) {
+ if (rate_ctrl->state[rate].rssi_thres >
+ rate_table->info[rate].rssi_ack_validmin)
+ rate_ctrl->state[rate].rssi_thres -= 1;
+ }
+ rate_ctrl->rssi_down_time = now_msec;
+ }
+
+ /* Every so often, we reduce the thresholds
+ * and PER (different for CCK and OFDM). */
+ if (now_msec - rate_ctrl->per_down_time >=
+ rate_table->rssi_reduce_interval) {
+ for (rate = 0; rate < rate_ctrl->rate_table_size; rate++) {
+ rate_ctrl->state[rate].per =
+ 7 * rate_ctrl->state[rate].per / 8;
+ }
+
+ rate_ctrl->per_down_time = now_msec;
+ }
+}
+
+/*
+ * This routine is called in rate control callback tx_status() to give
+ * the status of previous frames.
+ */
+static void ath_rc_update(struct ath_softc *sc,
+ struct ath_rate_node *ath_rc_priv,
+ struct ath_tx_info_priv *info_priv, int final_ts_idx,
+ int xretries, int long_retry)
+{
+ struct ath_rate_softc *asc = (struct ath_rate_softc *)sc->sc_rc;
+ struct ath_rate_table *rate_table;
+ struct ath_tx_ratectrl *rate_ctrl;
+ struct ath_rc_series rcs[4];
+ u8 flags;
+ u32 series = 0, rix;
+
+ memcpy(rcs, info_priv->rcs, 4 * sizeof(rcs[0]));
+ rate_table = (struct ath_rate_table *)
+ asc->hw_rate_table[sc->sc_curmode];
+ rate_ctrl = (struct ath_tx_ratectrl *)(ath_rc_priv);
+ ASSERT(rcs[0].tries != 0);
+
+ /*
+ * If the first rate is not the final index, there
+ * are intermediate rate failures to be processed.
+ */
+ if (final_ts_idx != 0) {
+ /* Process intermediate rates that failed.*/
+ for (series = 0; series < final_ts_idx ; series++) {
+ if (rcs[series].tries != 0) {
+ flags = rcs[series].flags;
+ /* If HT40 and we have switched mode from
+ * 40 to 20 => don't update */
+ if ((flags & ATH_RC_CW40_FLAG) &&
+ (rate_ctrl->rc_phy_mode !=
+ (flags & ATH_RC_CW40_FLAG)))
+ return;
+ if ((flags & ATH_RC_CW40_FLAG) &&
+ (flags & ATH_RC_SGI_FLAG))
+ rix = rate_table->info[
+ rcs[series].rix].ht_index;
+ else if (flags & ATH_RC_SGI_FLAG)
+ rix = rate_table->info[
+ rcs[series].rix].sgi_index;
+ else if (flags & ATH_RC_CW40_FLAG)
+ rix = rate_table->info[
+ rcs[series].rix].cw40index;
+ else
+ rix = rate_table->info[
+ rcs[series].rix].base_index;
+ ath_rc_update_ht(sc, ath_rc_priv,
+ info_priv, rix,
+ xretries ? 1 : 2,
+ rcs[series].tries);
+ }
+ }
+ } else {
+ /*
+ * Handle the special case of MIMO PS burst, where the second
+ * aggregate is sent out with only one rate and one try.
+ * Treating it as an excessive retry penalizes the rate
+ * inordinately.
+ */
+ if (rcs[0].tries == 1 && xretries == 1)
+ xretries = 2;
+ }
+
+ flags = rcs[series].flags;
+ /* If HT40 and we have switched mode from 40 to 20 => don't update */
+ if ((flags & ATH_RC_CW40_FLAG) &&
+ (rate_ctrl->rc_phy_mode != (flags & ATH_RC_CW40_FLAG)))
+ return;
+
+ if ((flags & ATH_RC_CW40_FLAG) && (flags & ATH_RC_SGI_FLAG))
+ rix = rate_table->info[rcs[series].rix].ht_index;
+ else if (flags & ATH_RC_SGI_FLAG)
+ rix = rate_table->info[rcs[series].rix].sgi_index;
+ else if (flags & ATH_RC_CW40_FLAG)
+ rix = rate_table->info[rcs[series].rix].cw40index;
+ else
+ rix = rate_table->info[rcs[series].rix].base_index;
+
+ ath_rc_update_ht(sc, ath_rc_priv, info_priv, rix,
+ xretries, long_retry);
+}
+
+
+/*
+ * Process a tx descriptor for a completed transmit (success or failure).
+ */
+static void ath_rate_tx_complete(struct ath_softc *sc,
+ struct ath_node *an,
+ struct ath_rate_node *rc_priv,
+ struct ath_tx_info_priv *info_priv)
+{
+ int final_ts_idx = info_priv->tx.ts_rateindex;
+ int tx_status = 0, is_underrun = 0;
+ struct ath_vap *avp;
+
+ avp = rc_priv->avp;
+ if ((avp->av_config.av_fixed_rateset != IEEE80211_FIXED_RATE_NONE)
+ || info_priv->tx.ts_status & ATH9K_TXERR_FILT)
+ return;
+
+ if (info_priv->tx.ts_rssi > 0) {
+ ATH_RSSI_LPF(an->an_chainmask_sel.tx_avgrssi,
+ info_priv->tx.ts_rssi);
+ }
+
+ /*
+ * If underrun error is seen assume it as an excessive retry only
+ * if prefetch trigger level have reached the max (0x3f for 5416)
+ * Adjust the long retry as if the frame was tried ATH_11N_TXMAXTRY
+ * times. This affects how ratectrl updates PER for the failed rate.
+ */
+ if (info_priv->tx.ts_flags &
+ (ATH9K_TX_DATA_UNDERRUN | ATH9K_TX_DELIM_UNDERRUN) &&
+ ((sc->sc_ah->ah_txTrigLevel) >= tx_triglevel_max)) {
+ tx_status = 1;
+ is_underrun = 1;
+ }
+
+ if ((info_priv->tx.ts_status & ATH9K_TXERR_XRETRY) ||
+ (info_priv->tx.ts_status & ATH9K_TXERR_FIFO))
+ tx_status = 1;
+
+ ath_rc_update(sc, rc_priv, info_priv, final_ts_idx, tx_status,
+ (is_underrun) ? ATH_11N_TXMAXTRY :
+ info_priv->tx.ts_longretry);
+}
+
+
+/*
+ * Update the SIB's rate control information
+ *
+ * This should be called when the supported rates change
+ * (e.g. SME operation, wireless mode change)
+ *
+ * It will determine which rates are valid for use.
+ */
+static void ath_rc_sib_update(struct ath_softc *sc,
+ struct ath_rate_node *ath_rc_priv,
+ u32 capflag, int keep_state,
+ struct ath_rateset *negotiated_rates,
+ struct ath_rateset *negotiated_htrates)
+{
+ struct ath_rate_table *rate_table = NULL;
+ struct ath_rate_softc *asc = (struct ath_rate_softc *)sc->sc_rc;
+ struct ath_rateset *rateset = negotiated_rates;
+ u8 *ht_mcs = (u8 *)negotiated_htrates;
+ struct ath_tx_ratectrl *rate_ctrl = (struct ath_tx_ratectrl *)
+ (ath_rc_priv);
+ u8 i, j, k, hi = 0, hthi = 0;
+
+ rate_table = (struct ath_rate_table *)
+ asc->hw_rate_table[sc->sc_curmode];
+
+ /* Initial rate table size. Will change depending
+ * on the working rate set */
+ rate_ctrl->rate_table_size = MAX_TX_RATE_TBL;
+
+ /* Initialize thresholds according to the global rate table */
+ for (i = 0 ; (i < rate_ctrl->rate_table_size) && (!keep_state); i++) {
+ rate_ctrl->state[i].rssi_thres =
+ rate_table->info[i].rssi_ack_validmin;
+ rate_ctrl->state[i].per = 0;
+ }
+
+ /* Determine the valid rates */
+ ath_rc_init_valid_txmask(rate_ctrl);
+
+ for (i = 0; i < WLAN_RC_PHY_MAX; i++) {
+ for (j = 0; j < MAX_TX_RATE_PHY; j++)
+ rate_ctrl->valid_phy_rateidx[i][j] = 0;
+ rate_ctrl->valid_phy_ratecnt[i] = 0;
+ }
+ rate_ctrl->rc_phy_mode = (capflag & WLAN_RC_40_FLAG);
+
+ /* Set stream capability */
+ ath_rc_priv->single_stream = (capflag & WLAN_RC_DS_FLAG) ? 0 : 1;
+
+ if (!rateset->rs_nrates) {
+ /* No working rate, just initialize valid rates */
+ hi = ath_rc_sib_init_validrates(ath_rc_priv, rate_table,
+ capflag);
+ } else {
+ /* Use intersection of working rates and valid rates */
+ hi = ath_rc_sib_setvalid_rates(ath_rc_priv, rate_table,
+ rateset, capflag);
+ if (capflag & WLAN_RC_HT_FLAG) {
+ hthi = ath_rc_sib_setvalid_htrates(ath_rc_priv,
+ rate_table,
+ ht_mcs,
+ capflag);
+ }
+ hi = A_MAX(hi, hthi);
+ }
+
+ rate_ctrl->rate_table_size = hi + 1;
+ rate_ctrl->rate_max_phy = 0;
+ ASSERT(rate_ctrl->rate_table_size <= MAX_TX_RATE_TBL);
+
+ for (i = 0, k = 0; i < WLAN_RC_PHY_MAX; i++) {
+ for (j = 0; j < rate_ctrl->valid_phy_ratecnt[i]; j++) {
+ rate_ctrl->valid_rate_index[k++] =
+ rate_ctrl->valid_phy_rateidx[i][j];
+ }
+
+ if (!ath_rc_valid_phyrate(i, rate_table->initial_ratemax, TRUE)
+ || !rate_ctrl->valid_phy_ratecnt[i])
+ continue;
+
+ rate_ctrl->rate_max_phy = rate_ctrl->valid_phy_rateidx[i][j-1];
+ }
+ ASSERT(rate_ctrl->rate_table_size <= MAX_TX_RATE_TBL);
+ ASSERT(k <= MAX_TX_RATE_TBL);
+
+ rate_ctrl->max_valid_rate = k;
+ /*
+ * Some third party vendors don't send the supported rate series in
+ * order. So sorting to make sure its in order, otherwise our RateFind
+ * Algo will select wrong rates
+ */
+ ath_rc_sort_validrates(rate_table, rate_ctrl);
+ rate_ctrl->rate_max_phy = rate_ctrl->valid_rate_index[k-4];
+}
+
+/*
+ * Update rate-control state on station associate/reassociate.
+ */
+static int ath_rate_newassoc(struct ath_softc *sc,
+ struct ath_rate_node *ath_rc_priv,
+ unsigned int capflag,
+ struct ath_rateset *negotiated_rates,
+ struct ath_rateset *negotiated_htrates)
+{
+
+
+ ath_rc_priv->ht_cap =
+ ((capflag & ATH_RC_DS_FLAG) ? WLAN_RC_DS_FLAG : 0) |
+ ((capflag & ATH_RC_SGI_FLAG) ? WLAN_RC_SGI_FLAG : 0) |
+ ((capflag & ATH_RC_HT_FLAG) ? WLAN_RC_HT_FLAG : 0) |
+ ((capflag & ATH_RC_CW40_FLAG) ? WLAN_RC_40_FLAG : 0);
+
+ ath_rc_sib_update(sc, ath_rc_priv, ath_rc_priv->ht_cap, 0,
+ negotiated_rates, negotiated_htrates);
+
+ return 0;
+}
+
+/*
+ * This routine is called to initialize the rate control parameters
+ * in the SIB. It is called initially during system initialization
+ * or when a station is associated with the AP.
+ */
+static void ath_rc_sib_init(struct ath_rate_node *ath_rc_priv)
+{
+ struct ath_tx_ratectrl *rate_ctrl;
+
+ rate_ctrl = (struct ath_tx_ratectrl *)(ath_rc_priv);
+ rate_ctrl->rssi_down_time = jiffies_to_msecs(jiffies);
+}
+
+
+static void ath_setup_rates(struct ieee80211_local *local, struct sta_info *sta)
+
+{
+ struct ieee80211_supported_band *sband;
+ struct ieee80211_hw *hw = local_to_hw(local);
+ struct ath_softc *sc = hw->priv;
+ struct ath_rate_node *rc_priv = sta->rate_ctrl_priv;
+ int i, j = 0;
+
+ DPRINTF(sc, ATH_DBG_RATE, "%s", __func__);
+ sband = local->hw.wiphy->bands[local->hw.conf.channel->band];
+ for (i = 0; i < sband->n_bitrates; i++) {
+ if (sta->supp_rates[local->hw.conf.channel->band] & BIT(i)) {
+ rc_priv->neg_rates.rs_rates[j]
+ = (sband->bitrates[i].bitrate * 2) / 10;
+ j++;
+ }
+ }
+ rc_priv->neg_rates.rs_nrates = j;
+}
+
+void ath_rc_node_update(struct ieee80211_hw *hw, struct ath_rate_node *rc_priv)
+{
+ struct ath_softc *sc = hw->priv;
+ u32 capflag = 0;
+
+ if (hw->conf.ht_conf.ht_supported) {
+ capflag |= ATH_RC_HT_FLAG | ATH_RC_DS_FLAG;
+ if (sc->sc_ht_info.tx_chan_width == ATH9K_HT_MACMODE_2040)
+ capflag |= ATH_RC_CW40_FLAG;
+ }
+
+ ath_rate_newassoc(sc, rc_priv, capflag,
+ &rc_priv->neg_rates,
+ &rc_priv->neg_ht_rates);
+
+}
+
+/* Rate Control callbacks */
+static void ath_tx_status(void *priv, struct net_device *dev,
+ struct sk_buff *skb)
+{
+ struct ath_softc *sc = priv;
+ struct ath_tx_info_priv *tx_info_priv;
+ struct ath_node *an;
+ struct sta_info *sta;
+ struct ieee80211_local *local;
+ struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
+ struct ieee80211_hdr *hdr;
+ __le16 fc;
+
+ local = hw_to_local(sc->hw);
+ hdr = (struct ieee80211_hdr *)skb->data;
+ fc = hdr->frame_control;
+ tx_info_priv = (struct ath_tx_info_priv *)tx_info->driver_data[0];
+
+ spin_lock_bh(&sc->node_lock);
+ an = ath_node_find(sc, hdr->addr1);
+ spin_unlock_bh(&sc->node_lock);
+
+ sta = sta_info_get(local, hdr->addr1);
+ if (!an || !sta || !ieee80211_is_data(fc)) {
+ if (tx_info->driver_data[0] != NULL) {
+ kfree(tx_info->driver_data[0]);
+ tx_info->driver_data[0] = NULL;
+ }
+ return;
+ }
+ if (tx_info->driver_data[0] != NULL) {
+ ath_rate_tx_complete(sc, an, sta->rate_ctrl_priv, tx_info_priv);
+ kfree(tx_info->driver_data[0]);
+ tx_info->driver_data[0] = NULL;
+ }
+}
+
+static void ath_tx_aggr_resp(struct ath_softc *sc,
+ struct sta_info *sta,
+ struct ath_node *an,
+ u8 tidno)
+{
+ struct ieee80211_hw *hw = sc->hw;
+ struct ieee80211_local *local;
+ struct ath_atx_tid *txtid;
+ struct ieee80211_supported_band *sband;
+ u16 buffersize = 0;
+ int state;
+ DECLARE_MAC_BUF(mac);
+
+ if (!sc->sc_txaggr)
+ return;
+
+ txtid = ATH_AN_2_TID(an, tidno);
+ if (!txtid->paused)
+ return;
+
+ local = hw_to_local(sc->hw);
+ sband = hw->wiphy->bands[hw->conf.channel->band];
+ buffersize = IEEE80211_MIN_AMPDU_BUF <<
+ sband->ht_info.ampdu_factor; /* FIXME */
+ state = sta->ampdu_mlme.tid_state_tx[tidno];
+
+ if (state & HT_ADDBA_RECEIVED_MSK) {
+ txtid->addba_exchangecomplete = 1;
+ txtid->addba_exchangeinprogress = 0;
+ txtid->baw_size = buffersize;
+
+ DPRINTF(sc, ATH_DBG_AGGR,
+ "%s: Resuming tid, buffersize: %d\n",
+ __func__,
+ buffersize);
+
+ ath_tx_resume_tid(sc, txtid);
+ }
+}
+
+static void ath_get_rate(void *priv, struct net_device *dev,
+ struct ieee80211_supported_band *sband,
+ struct sk_buff *skb,
+ struct rate_selection *sel)
+{
+ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
+ struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr);
+ struct sta_info *sta;
+ struct ath_softc *sc = (struct ath_softc *)priv;
+ struct ieee80211_hw *hw = sc->hw;
+ struct ath_tx_info_priv *tx_info_priv;
+ struct ath_rate_node *ath_rc_priv;
+ struct ath_node *an;
+ struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
+ int is_probe, chk, ret;
+ s8 lowest_idx;
+ __le16 fc = hdr->frame_control;
+ u8 *qc, tid;
+ DECLARE_MAC_BUF(mac);
+
+ DPRINTF(sc, ATH_DBG_RATE, "%s\n", __func__);
+
+ /* allocate driver private area of tx_info */
+ tx_info->driver_data[0] = kzalloc(sizeof(*tx_info_priv), GFP_ATOMIC);
+ ASSERT(tx_info->driver_data[0] != NULL);
+ tx_info_priv = (struct ath_tx_info_priv *)tx_info->driver_data[0];
+
+ sta = sta_info_get(local, hdr->addr1);
+ lowest_idx = rate_lowest_index(local, sband, sta);
+ tx_info_priv->min_rate = (sband->bitrates[lowest_idx].bitrate * 2) / 10;
+ /* lowest rate for management and multicast/broadcast frames */
+ if (!ieee80211_is_data(fc) ||
+ is_multicast_ether_addr(hdr->addr1) || !sta) {
+ sel->rate_idx = lowest_idx;
+ return;
+ }
+
+ ath_rc_priv = sta->rate_ctrl_priv;
+
+ /* Find tx rate for unicast frames */
+ ath_rate_findrate(sc, ath_rc_priv,
+ ATH_11N_TXMAXTRY, 4,
+ ATH_RC_PROBE_ALLOWED,
+ tx_info_priv->rcs,
+ &is_probe,
+ false);
+ if (is_probe)
+ sel->probe_idx = ((struct ath_tx_ratectrl *)
+ sta->rate_ctrl_priv)->probe_rate;
+
+ /* Ratecontrol sometimes returns invalid rate index */
+ if (tx_info_priv->rcs[0].rix != 0xff)
+ ath_rc_priv->prev_data_rix = tx_info_priv->rcs[0].rix;
+ else
+ tx_info_priv->rcs[0].rix = ath_rc_priv->prev_data_rix;
+
+ sel->rate_idx = tx_info_priv->rcs[0].rix;
+
+ /* Check if aggregation has to be enabled for this tid */
+
+ if (hw->conf.ht_conf.ht_supported) {
+ if (ieee80211_is_data_qos(fc)) {
+ qc = ieee80211_get_qos_ctl(hdr);
+ tid = qc[0] & 0xf;
+
+ spin_lock_bh(&sc->node_lock);
+ an = ath_node_find(sc, hdr->addr1);
+ spin_unlock_bh(&sc->node_lock);
+
+ if (!an) {
+ DPRINTF(sc, ATH_DBG_AGGR,
+ "%s: Node not found to "
+ "init/chk TX aggr\n", __func__);
+ return;
+ }
+
+ chk = ath_tx_aggr_check(sc, an, tid);
+ if (chk == AGGR_REQUIRED) {
+ ret = ieee80211_start_tx_ba_session(hw,
+ hdr->addr1, tid);
+ if (ret)
+ DPRINTF(sc, ATH_DBG_AGGR,
+ "%s: Unable to start tx "
+ "aggr for: %s\n",
+ __func__,
+ print_mac(mac, hdr->addr1));
+ else
+ DPRINTF(sc, ATH_DBG_AGGR,
+ "%s: Started tx aggr for: %s\n",
+ __func__,
+ print_mac(mac, hdr->addr1));
+ } else if (chk == AGGR_EXCHANGE_PROGRESS)
+ ath_tx_aggr_resp(sc, sta, an, tid);
+ }
+ }
+}
+
+static void ath_rate_init(void *priv, void *priv_sta,
+ struct ieee80211_local *local,
+ struct sta_info *sta)
+{
+ struct ieee80211_supported_band *sband;
+ struct ieee80211_hw *hw = local_to_hw(local);
+ struct ieee80211_conf *conf = &local->hw.conf;
+ struct ath_softc *sc = hw->priv;
+ int i, j = 0;
+
+ DPRINTF(sc, ATH_DBG_RATE, "%s\n", __func__);
+
+ sband = local->hw.wiphy->bands[local->hw.conf.channel->band];
+ sta->txrate_idx = rate_lowest_index(local, sband, sta);
+
+ ath_setup_rates(local, sta);
+ if (conf->flags & IEEE80211_CONF_SUPPORT_HT_MODE) {
+ for (i = 0; i < MCS_SET_SIZE; i++) {
+ if (conf->ht_conf.supp_mcs_set[i/8] & (1<<(i%8)))
+ ((struct ath_rate_node *)
+ priv_sta)->neg_ht_rates.rs_rates[j++] = i;
+ if (j == ATH_RATE_MAX)
+ break;
+ }
+ ((struct ath_rate_node *)priv_sta)->neg_ht_rates.rs_nrates = j;
+ }
+ ath_rc_node_update(hw, priv_sta);
+}
+
+static void ath_rate_clear(void *priv)
+{
+ return;
+}
+
+static void *ath_rate_alloc(struct ieee80211_local *local)
+{
+ struct ieee80211_hw *hw = local_to_hw(local);
+ struct ath_softc *sc = hw->priv;
+
+ DPRINTF(sc, ATH_DBG_RATE, "%s", __func__);
+ return local->hw.priv;
+}
+
+static void ath_rate_free(void *priv)
+{
+ return;
+}
+
+static void *ath_rate_alloc_sta(void *priv, gfp_t gfp)
+{
+ struct ath_softc *sc = priv;
+ struct ath_vap *avp = sc->sc_vaps[0];
+ struct ath_rate_node *rate_priv;
+
+ DPRINTF(sc, ATH_DBG_RATE, "%s", __func__);
+ rate_priv = ath_rate_node_alloc(avp, sc->sc_rc, gfp);
+ if (!rate_priv) {
+ DPRINTF(sc, ATH_DBG_FATAL, "%s:Unable to allocate"
+ "private rate control structure", __func__);
+ return NULL;
+ }
+ ath_rc_sib_init(rate_priv);
+ return rate_priv;
+}
+
+static void ath_rate_free_sta(void *priv, void *priv_sta)
+{
+ struct ath_rate_node *rate_priv = priv_sta;
+ struct ath_softc *sc = priv;
+
+ DPRINTF(sc, ATH_DBG_RATE, "%s", __func__);
+ ath_rate_node_free(rate_priv);
+}
+
+static struct rate_control_ops ath_rate_ops = {
+ .module = NULL,
+ .name = "ath9k_rate_control",
+ .tx_status = ath_tx_status,
+ .get_rate = ath_get_rate,
+ .rate_init = ath_rate_init,
+ .clear = ath_rate_clear,
+ .alloc = ath_rate_alloc,
+ .free = ath_rate_free,
+ .alloc_sta = ath_rate_alloc_sta,
+ .free_sta = ath_rate_free_sta
+};
+
+int ath_rate_control_register(void)
+{
+ return ieee80211_rate_control_register(&ath_rate_ops);
+}
+
+void ath_rate_control_unregister(void)
+{
+ ieee80211_rate_control_unregister(&ath_rate_ops);
+}
+
diff --git a/drivers/net/wireless/ath9k/rc.h b/drivers/net/wireless/ath9k/rc.h
new file mode 100644
index 00000000000..71aef9c7523
--- /dev/null
+++ b/drivers/net/wireless/ath9k/rc.h
@@ -0,0 +1,316 @@
+/*
+ * Copyright (c) 2004 Sam Leffler, Errno Consulting
+ * Copyright (c) 2004 Video54 Technologies, Inc.
+ * Copyright (c) 2008 Atheros Communications Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef RC_H
+#define RC_H
+
+#include "ath9k.h"
+/*
+ * Interface definitions for transmit rate control modules for the
+ * Atheros driver.
+ *
+ * A rate control module is responsible for choosing the transmit rate
+ * for each data frame. Management+control frames are always sent at
+ * a fixed rate.
+ *
+ * Only one module may be present at a time; the driver references
+ * rate control interfaces by symbol name. If multiple modules are
+ * to be supported we'll need to switch to a registration-based scheme
+ * as is currently done, for example, for authentication modules.
+ *
+ * An instance of the rate control module is attached to each device
+ * at attach time and detached when the device is destroyed. The module
+ * may associate data with each device and each node (station). Both
+ * sets of storage are opaque except for the size of the per-node storage
+ * which must be provided when the module is attached.
+ *
+ * The rate control module is notified for each state transition and
+ * station association/reassociation. Otherwise it is queried for a
+ * rate for each outgoing frame and provided status from each transmitted
+ * frame. Any ancillary processing is the responsibility of the module
+ * (e.g. if periodic processing is required then the module should setup
+ * it's own timer).
+ *
+ * In addition to the transmit rate for each frame the module must also
+ * indicate the number of attempts to make at the specified rate. If this
+ * number is != ATH_TXMAXTRY then an additional callback is made to setup
+ * additional transmit state. The rate control code is assumed to write
+ * this additional data directly to the transmit descriptor.
+ */
+
+struct ath_softc;
+
+#define TRUE 1
+#define FALSE 0
+
+#define ATH_RATE_MAX 30
+#define MCS_SET_SIZE 128
+
+enum ieee80211_fixed_rate_mode {
+ IEEE80211_FIXED_RATE_NONE = 0,
+ IEEE80211_FIXED_RATE_MCS = 1 /* HT rates */
+};
+
+/*
+ * Use the hal os glue code to get ms time
+ */
+#define IEEE80211_RATE_IDX_ENTRY(val, idx) (((val&(0xff<<(idx*8)))>>(idx*8)))
+
+#define SHORT_PRE 1
+#define LONG_PRE 0
+
+#define WLAN_PHY_HT_20_SS WLAN_RC_PHY_HT_20_SS
+#define WLAN_PHY_HT_20_DS WLAN_RC_PHY_HT_20_DS
+#define WLAN_PHY_HT_20_DS_HGI WLAN_RC_PHY_HT_20_DS_HGI
+#define WLAN_PHY_HT_40_SS WLAN_RC_PHY_HT_40_SS
+#define WLAN_PHY_HT_40_SS_HGI WLAN_RC_PHY_HT_40_SS_HGI
+#define WLAN_PHY_HT_40_DS WLAN_RC_PHY_HT_40_DS
+#define WLAN_PHY_HT_40_DS_HGI WLAN_RC_PHY_HT_40_DS_HGI
+
+#define WLAN_PHY_OFDM PHY_OFDM
+#define WLAN_PHY_CCK PHY_CCK
+
+#define TRUE_20 0x2
+#define TRUE_40 0x4
+#define TRUE_2040 (TRUE_20|TRUE_40)
+#define TRUE_ALL (TRUE_2040|TRUE)
+
+enum {
+ WLAN_RC_PHY_HT_20_SS = 4,
+ WLAN_RC_PHY_HT_20_DS,
+ WLAN_RC_PHY_HT_40_SS,
+ WLAN_RC_PHY_HT_40_DS,
+ WLAN_RC_PHY_HT_20_SS_HGI,
+ WLAN_RC_PHY_HT_20_DS_HGI,
+ WLAN_RC_PHY_HT_40_SS_HGI,
+ WLAN_RC_PHY_HT_40_DS_HGI,
+ WLAN_RC_PHY_MAX
+};
+
+#define WLAN_RC_PHY_DS(_phy) ((_phy == WLAN_RC_PHY_HT_20_DS) \
+ || (_phy == WLAN_RC_PHY_HT_40_DS) \
+ || (_phy == WLAN_RC_PHY_HT_20_DS_HGI) \
+ || (_phy == WLAN_RC_PHY_HT_40_DS_HGI))
+#define WLAN_RC_PHY_40(_phy) ((_phy == WLAN_RC_PHY_HT_40_SS) \
+ || (_phy == WLAN_RC_PHY_HT_40_DS) \
+ || (_phy == WLAN_RC_PHY_HT_40_SS_HGI) \
+ || (_phy == WLAN_RC_PHY_HT_40_DS_HGI))
+#define WLAN_RC_PHY_SGI(_phy) ((_phy == WLAN_RC_PHY_HT_20_SS_HGI) \
+ || (_phy == WLAN_RC_PHY_HT_20_DS_HGI) \
+ || (_phy == WLAN_RC_PHY_HT_40_SS_HGI) \
+ || (_phy == WLAN_RC_PHY_HT_40_DS_HGI))
+
+#define WLAN_RC_PHY_HT(_phy) (_phy >= WLAN_RC_PHY_HT_20_SS)
+
+/* Returns the capflag mode */
+#define WLAN_RC_CAP_MODE(capflag) (((capflag & WLAN_RC_HT_FLAG) ? \
+ (capflag & WLAN_RC_40_FLAG) ? TRUE_40 : TRUE_20 : TRUE))
+
+/* Return TRUE if flag supports HT20 && client supports HT20 or
+ * return TRUE if flag supports HT40 && client supports HT40.
+ * This is used becos some rates overlap between HT20/HT40.
+ */
+
+#define WLAN_RC_PHY_HT_VALID(flag, capflag) (((flag & TRUE_20) && !(capflag \
+ & WLAN_RC_40_FLAG)) || ((flag & TRUE_40) && \
+ (capflag & WLAN_RC_40_FLAG)))
+
+#define WLAN_RC_DS_FLAG (0x01)
+#define WLAN_RC_40_FLAG (0x02)
+#define WLAN_RC_SGI_FLAG (0x04)
+#define WLAN_RC_HT_FLAG (0x08)
+
+/* Index into the rate table */
+#define INIT_RATE_MAX_20 23
+#define INIT_RATE_MAX_40 40
+
+#define RATE_TABLE_SIZE 64
+
+/* XXX: Convert to kdoc */
+struct ath_rate_table {
+ int rate_cnt;
+ struct {
+ int valid; /* Valid for use in rate control */
+ int valid_single_stream;/* Valid for use in rate control
+ for single stream operation */
+ u8 phy; /* CCK/OFDM/TURBO/XR */
+ u32 ratekbps; /* Rate in Kbits per second */
+ u32 user_ratekbps; /* User rate in KBits per second */
+ u8 ratecode; /* rate that goes into
+ hw descriptors */
+ u8 short_preamble; /* Mask for enabling short preamble
+ in rate code for CCK */
+ u8 dot11rate; /* Value that goes into supported
+ rates info element of MLME */
+ u8 ctrl_rate; /* Index of next lower basic rate,
+ used for duration computation */
+ int8_t rssi_ack_validmin; /* Rate control related */
+ int8_t rssi_ack_deltamin; /* Rate control related */
+ u8 base_index; /* base rate index */
+ u8 cw40index; /* 40cap rate index */
+ u8 sgi_index; /* shortgi rate index */
+ u8 ht_index; /* shortgi rate index */
+ u32 max_4ms_framelen; /* Maximum frame length(bytes)
+ for 4ms tx duration */
+ } info[RATE_TABLE_SIZE];
+ u32 probe_interval; /* interval for ratectrl to
+ probe for other rates */
+ u32 rssi_reduce_interval; /* interval for ratectrl
+ to reduce RSSI */
+ u8 initial_ratemax; /* the initial ratemax value used
+ in ath_rc_sib_update() */
+};
+
+#define ATH_RC_PROBE_ALLOWED 0x00000001
+#define ATH_RC_MINRATE_LASTRATE 0x00000002
+#define ATH_RC_SHORT_PREAMBLE 0x00000004
+
+struct ath_rc_series {
+ u8 rix;
+ u8 tries;
+ u8 flags;
+ u32 max_4ms_framelen;
+};
+
+/* rcs_flags definition */
+#define ATH_RC_DS_FLAG 0x01
+#define ATH_RC_CW40_FLAG 0x02 /* CW 40 */
+#define ATH_RC_SGI_FLAG 0x04 /* Short Guard Interval */
+#define ATH_RC_HT_FLAG 0x08 /* HT */
+#define ATH_RC_RTSCTS_FLAG 0x10 /* RTS-CTS */
+
+/*
+ * State structures for new rate adaptation code
+ */
+#define MAX_TX_RATE_TBL 64
+#define MAX_TX_RATE_PHY 48
+
+struct ath_tx_ratectrl_state {
+ int8_t rssi_thres; /* required rssi for this rate (dB) */
+ u8 per; /* recent estimate of packet error rate (%) */
+};
+
+struct ath_tx_ratectrl {
+ struct ath_tx_ratectrl_state state[MAX_TX_RATE_TBL]; /* state */
+ int8_t rssi_last; /* last ack rssi */
+ int8_t rssi_last_lookup; /* last ack rssi used for lookup */
+ int8_t rssi_last_prev; /* previous last ack rssi */
+ int8_t rssi_last_prev2; /* 2nd previous last ack rssi */
+ int32_t rssi_sum_cnt; /* count of rssi_sum for averaging */
+ int32_t rssi_sum_rate; /* rate that we are averaging */
+ int32_t rssi_sum; /* running sum of rssi for averaging */
+ u32 valid_txrate_mask; /* mask of valid rates */
+ u8 rate_table_size; /* rate table size */
+ u8 rate_max; /* max rate that has recently worked */
+ u8 probe_rate; /* rate we are probing at */
+ u32 rssi_time; /* msec timestamp for last ack rssi */
+ u32 rssi_down_time; /* msec timestamp for last down step */
+ u32 probe_time; /* msec timestamp for last probe */
+ u8 hw_maxretry_pktcnt; /* num packets since we got
+ HW max retry error */
+ u8 max_valid_rate; /* maximum number of valid rate */
+ u8 valid_rate_index[MAX_TX_RATE_TBL]; /* valid rate index */
+ u32 per_down_time; /* msec timstamp for last
+ PER down step */
+
+ /* 11n state */
+ u8 valid_phy_ratecnt[WLAN_RC_PHY_MAX]; /* valid rate count */
+ u8 valid_phy_rateidx[WLAN_RC_PHY_MAX][MAX_TX_RATE_TBL];
+ u8 rc_phy_mode;
+ u8 rate_max_phy; /* Phy index for the max rate */
+ u32 rate_max_lastused; /* msec timstamp of when we
+ last used rateMaxPhy */
+ u32 probe_interval; /* interval for ratectrl to probe
+ for other rates */
+};
+
+struct ath_rateset {
+ u8 rs_nrates;
+ u8 rs_rates[ATH_RATE_MAX];
+};
+
+/* per-device state */
+struct ath_rate_softc {
+ /* phy tables that contain rate control data */
+ const void *hw_rate_table[ATH9K_MODE_MAX];
+ int fixedrix; /* -1 or index of fixed rate */
+};
+
+/* per-node state */
+struct ath_rate_node {
+ struct ath_tx_ratectrl tx_ratectrl; /* rate control state proper */
+ u32 prev_data_rix; /* rate idx of last data frame */
+
+ /* map of rate ix -> negotiated rate set ix */
+ u8 rixmap[MAX_TX_RATE_TBL];
+
+ /* map of ht rate ix -> negotiated rate set ix */
+ u8 ht_rixmap[MAX_TX_RATE_TBL];
+
+ u8 ht_cap; /* ht capabilities */
+ u8 ant_tx; /* current transmit antenna */
+
+ u8 single_stream; /* When TRUE, only single
+ stream Tx possible */
+ struct ath_rateset neg_rates; /* Negotiated rates */
+ struct ath_rateset neg_ht_rates; /* Negotiated HT rates */
+ struct ath_rate_softc *asc; /* back pointer to atheros softc */
+ struct ath_vap *avp; /* back pointer to vap */
+};
+
+/* Driver data of ieee80211_tx_info */
+struct ath_tx_info_priv {
+ struct ath_rc_series rcs[4];
+ struct ath_tx_status tx;
+ int n_frames;
+ int n_bad_frames;
+ u8 min_rate;
+};
+
+/*
+ * Attach/detach a rate control module.
+ */
+struct ath_rate_softc *ath_rate_attach(struct ath_hal *ah);
+void ath_rate_detach(struct ath_rate_softc *asc);
+
+/*
+ * Update/reset rate control state for 802.11 state transitions.
+ * Important mostly as the analog to ath_rate_newassoc when operating
+ * in station mode.
+ */
+void ath_rc_node_update(struct ieee80211_hw *hw, struct ath_rate_node *rc_priv);
+void ath_rate_newstate(struct ath_softc *sc, struct ath_vap *avp);
+
+/*
+ * Return the tx rate series.
+ */
+void ath_rate_findrate(struct ath_softc *sc, struct ath_rate_node *ath_rc_priv,
+ int num_tries, int num_rates,
+ unsigned int rcflag, struct ath_rc_series[],
+ int *is_probe, int isretry);
+/*
+ * Return rate index for given Dot11 Rate.
+ */
+u8 ath_rate_findrateix(struct ath_softc *sc,
+ u8 dot11_rate);
+
+/* Routines to register/unregister rate control algorithm */
+int ath_rate_control_register(void);
+void ath_rate_control_unregister(void);
+
+#endif /* RC_H */
diff --git a/drivers/net/wireless/ath9k/recv.c b/drivers/net/wireless/ath9k/recv.c
new file mode 100644
index 00000000000..2fe806175c0
--- /dev/null
+++ b/drivers/net/wireless/ath9k/recv.c
@@ -0,0 +1,1318 @@
+/*
+ * Copyright (c) 2008 Atheros Communications Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+/*
+ * Implementation of receive path.
+ */
+
+#include "core.h"
+
+/*
+ * Setup and link descriptors.
+ *
+ * 11N: we can no longer afford to self link the last descriptor.
+ * MAC acknowledges BA status as long as it copies frames to host
+ * buffer (or rx fifo). This can incorrectly acknowledge packets
+ * to a sender if last desc is self-linked.
+ *
+ * NOTE: Caller should hold the rxbuf lock.
+ */
+
+static void ath_rx_buf_link(struct ath_softc *sc, struct ath_buf *bf)
+{
+ struct ath_hal *ah = sc->sc_ah;
+ struct ath_desc *ds;
+ struct sk_buff *skb;
+
+ ATH_RXBUF_RESET(bf);
+
+ ds = bf->bf_desc;
+ ds->ds_link = 0; /* link to null */
+ ds->ds_data = bf->bf_buf_addr;
+
+ /* XXX For RADAR?
+ * virtual addr of the beginning of the buffer. */
+ skb = bf->bf_mpdu;
+ ASSERT(skb != NULL);
+ ds->ds_vdata = skb->data;
+
+ /* setup rx descriptors */
+ ath9k_hw_setuprxdesc(ah,
+ ds,
+ skb_tailroom(skb), /* buffer size */
+ 0);
+
+ if (sc->sc_rxlink == NULL)
+ ath9k_hw_putrxbuf(ah, bf->bf_daddr);
+ else
+ *sc->sc_rxlink = bf->bf_daddr;
+
+ sc->sc_rxlink = &ds->ds_link;
+ ath9k_hw_rxena(ah);
+}
+
+/* Process received BAR frame */
+
+static int ath_bar_rx(struct ath_softc *sc,
+ struct ath_node *an,
+ struct sk_buff *skb)
+{
+ struct ieee80211_bar *bar;
+ struct ath_arx_tid *rxtid;
+ struct sk_buff *tskb;
+ struct ath_recv_status *rx_status;
+ int tidno, index, cindex;
+ u16 seqno;
+
+ /* look at BAR contents */
+
+ bar = (struct ieee80211_bar *)skb->data;
+ tidno = (le16_to_cpu(bar->control) & IEEE80211_BAR_CTL_TID_M)
+ >> IEEE80211_BAR_CTL_TID_S;
+ seqno = le16_to_cpu(bar->start_seq_num) >> IEEE80211_SEQ_SEQ_SHIFT;
+
+ /* process BAR - indicate all pending RX frames till the BAR seqno */
+
+ rxtid = &an->an_aggr.rx.tid[tidno];
+
+ spin_lock_bh(&rxtid->tidlock);
+
+ /* get relative index */
+
+ index = ATH_BA_INDEX(rxtid->seq_next, seqno);
+
+ /* drop BAR if old sequence (index is too large) */
+
+ if ((index > rxtid->baw_size) &&
+ (index > (IEEE80211_SEQ_MAX - (rxtid->baw_size << 2))))
+ /* discard frame, ieee layer may not treat frame as a dup */
+ goto unlock_and_free;
+
+ /* complete receive processing for all pending frames upto BAR seqno */
+
+ cindex = (rxtid->baw_head + index) & (ATH_TID_MAX_BUFS - 1);
+ while ((rxtid->baw_head != rxtid->baw_tail) &&
+ (rxtid->baw_head != cindex)) {
+ tskb = rxtid->rxbuf[rxtid->baw_head].rx_wbuf;
+ rx_status = &rxtid->rxbuf[rxtid->baw_head].rx_status;
+ rxtid->rxbuf[rxtid->baw_head].rx_wbuf = NULL;
+
+ if (tskb != NULL)
+ ath_rx_subframe(an, tskb, rx_status);
+
+ INCR(rxtid->baw_head, ATH_TID_MAX_BUFS);
+ INCR(rxtid->seq_next, IEEE80211_SEQ_MAX);
+ }
+
+ /* ... and indicate rest of the frames in-order */
+
+ while (rxtid->baw_head != rxtid->baw_tail &&
+ rxtid->rxbuf[rxtid->baw_head].rx_wbuf != NULL) {
+ tskb = rxtid->rxbuf[rxtid->baw_head].rx_wbuf;
+ rx_status = &rxtid->rxbuf[rxtid->baw_head].rx_status;
+ rxtid->rxbuf[rxtid->baw_head].rx_wbuf = NULL;
+
+ ath_rx_subframe(an, tskb, rx_status);
+
+ INCR(rxtid->baw_head, ATH_TID_MAX_BUFS);
+ INCR(rxtid->seq_next, IEEE80211_SEQ_MAX);
+ }
+
+unlock_and_free:
+ spin_unlock_bh(&rxtid->tidlock);
+ /* free bar itself */
+ dev_kfree_skb(skb);
+ return IEEE80211_FTYPE_CTL;
+}
+
+/* Function to handle a subframe of aggregation when HT is enabled */
+
+static int ath_ampdu_input(struct ath_softc *sc,
+ struct ath_node *an,
+ struct sk_buff *skb,
+ struct ath_recv_status *rx_status)
+{
+ struct ieee80211_hdr *hdr;
+ struct ath_arx_tid *rxtid;
+ struct ath_rxbuf *rxbuf;
+ u8 type, subtype;
+ u16 rxseq;
+ int tid = 0, index, cindex, rxdiff;
+ __le16 fc;
+ u8 *qc;
+
+ hdr = (struct ieee80211_hdr *)skb->data;
+ fc = hdr->frame_control;
+
+ /* collect stats of frames with non-zero version */
+
+ if ((le16_to_cpu(hdr->frame_control) & IEEE80211_FCTL_VERS) != 0) {
+ dev_kfree_skb(skb);
+ return -1;
+ }
+
+ type = le16_to_cpu(hdr->frame_control) & IEEE80211_FCTL_FTYPE;
+ subtype = le16_to_cpu(hdr->frame_control) & IEEE80211_FCTL_STYPE;
+
+ if (ieee80211_is_back_req(fc))
+ return ath_bar_rx(sc, an, skb);
+
+ /* special aggregate processing only for qos unicast data frames */
+
+ if (!ieee80211_is_data(fc) ||
+ !ieee80211_is_data_qos(fc) ||
+ is_multicast_ether_addr(hdr->addr1))
+ return ath_rx_subframe(an, skb, rx_status);
+
+ /* lookup rx tid state */
+
+ if (ieee80211_is_data_qos(fc)) {
+ qc = ieee80211_get_qos_ctl(hdr);
+ tid = qc[0] & 0xf;
+ }
+
+ if (sc->sc_opmode == ATH9K_M_STA) {
+ /* Drop the frame not belonging to me. */
+ if (memcmp(hdr->addr1, sc->sc_myaddr, ETH_ALEN)) {
+ dev_kfree_skb(skb);
+ return -1;
+ }
+ }
+
+ rxtid = &an->an_aggr.rx.tid[tid];
+
+ spin_lock(&rxtid->tidlock);
+
+ rxdiff = (rxtid->baw_tail - rxtid->baw_head) &
+ (ATH_TID_MAX_BUFS - 1);
+
+ /*
+ * If the ADDBA exchange has not been completed by the source,
+ * process via legacy path (i.e. no reordering buffer is needed)
+ */
+ if (!rxtid->addba_exchangecomplete) {
+ spin_unlock(&rxtid->tidlock);
+ return ath_rx_subframe(an, skb, rx_status);
+ }
+
+ /* extract sequence number from recvd frame */
+
+ rxseq = le16_to_cpu(hdr->seq_ctrl) >> IEEE80211_SEQ_SEQ_SHIFT;
+
+ if (rxtid->seq_reset) {
+ rxtid->seq_reset = 0;
+ rxtid->seq_next = rxseq;
+ }
+
+ index = ATH_BA_INDEX(rxtid->seq_next, rxseq);
+
+ /* drop frame if old sequence (index is too large) */
+
+ if (index > (IEEE80211_SEQ_MAX - (rxtid->baw_size << 2))) {
+ /* discard frame, ieee layer may not treat frame as a dup */
+ spin_unlock(&rxtid->tidlock);
+ dev_kfree_skb(skb);
+ return IEEE80211_FTYPE_DATA;
+ }
+
+ /* sequence number is beyond block-ack window */
+
+ if (index >= rxtid->baw_size) {
+
+ /* complete receive processing for all pending frames */
+
+ while (index >= rxtid->baw_size) {
+
+ rxbuf = rxtid->rxbuf + rxtid->baw_head;
+
+ if (rxbuf->rx_wbuf != NULL) {
+ ath_rx_subframe(an, rxbuf->rx_wbuf,
+ &rxbuf->rx_status);
+ rxbuf->rx_wbuf = NULL;
+ }
+
+ INCR(rxtid->baw_head, ATH_TID_MAX_BUFS);
+ INCR(rxtid->seq_next, IEEE80211_SEQ_MAX);
+
+ index--;
+ }
+ }
+
+ /* add buffer to the recv ba window */
+
+ cindex = (rxtid->baw_head + index) & (ATH_TID_MAX_BUFS - 1);
+ rxbuf = rxtid->rxbuf + cindex;
+
+ if (rxbuf->rx_wbuf != NULL) {
+ spin_unlock(&rxtid->tidlock);
+ /* duplicate frame */
+ dev_kfree_skb(skb);
+ return IEEE80211_FTYPE_DATA;
+ }
+
+ rxbuf->rx_wbuf = skb;
+ rxbuf->rx_time = get_timestamp();
+ rxbuf->rx_status = *rx_status;
+
+ /* advance tail if sequence received is newer
+ * than any received so far */
+
+ if (index >= rxdiff) {
+ rxtid->baw_tail = cindex;
+ INCR(rxtid->baw_tail, ATH_TID_MAX_BUFS);
+ }
+
+ /* indicate all in-order received frames */
+
+ while (rxtid->baw_head != rxtid->baw_tail) {
+ rxbuf = rxtid->rxbuf + rxtid->baw_head;
+ if (!rxbuf->rx_wbuf)
+ break;
+
+ ath_rx_subframe(an, rxbuf->rx_wbuf, &rxbuf->rx_status);
+ rxbuf->rx_wbuf = NULL;
+
+ INCR(rxtid->baw_head, ATH_TID_MAX_BUFS);
+ INCR(rxtid->seq_next, IEEE80211_SEQ_MAX);
+ }
+
+ /*
+ * start a timer to flush all received frames if there are pending
+ * receive frames
+ */
+ if (rxtid->baw_head != rxtid->baw_tail)
+ mod_timer(&rxtid->timer, ATH_RX_TIMEOUT);
+ else
+ del_timer_sync(&rxtid->timer);
+
+ spin_unlock(&rxtid->tidlock);
+ return IEEE80211_FTYPE_DATA;
+}
+
+/* Timer to flush all received sub-frames */
+
+static void ath_rx_timer(unsigned long data)
+{
+ struct ath_arx_tid *rxtid = (struct ath_arx_tid *)data;
+ struct ath_node *an = rxtid->an;
+ struct ath_rxbuf *rxbuf;
+ int nosched;
+
+ spin_lock_bh(&rxtid->tidlock);
+ while (rxtid->baw_head != rxtid->baw_tail) {
+ rxbuf = rxtid->rxbuf + rxtid->baw_head;
+ if (!rxbuf->rx_wbuf) {
+ INCR(rxtid->baw_head, ATH_TID_MAX_BUFS);
+ INCR(rxtid->seq_next, IEEE80211_SEQ_MAX);
+ continue;
+ }
+
+ /*
+ * Stop if the next one is a very recent frame.
+ *
+ * Call get_timestamp in every iteration to protect against the
+ * case in which a new frame is received while we are executing
+ * this function. Using a timestamp obtained before entering
+ * the loop could lead to a very large time interval
+ * (a negative value typecast to unsigned), breaking the
+ * function's logic.
+ */
+ if ((get_timestamp() - rxbuf->rx_time) <
+ (ATH_RX_TIMEOUT * HZ / 1000))
+ break;
+
+ ath_rx_subframe(an, rxbuf->rx_wbuf,
+ &rxbuf->rx_status);
+ rxbuf->rx_wbuf = NULL;
+
+ INCR(rxtid->baw_head, ATH_TID_MAX_BUFS);
+ INCR(rxtid->seq_next, IEEE80211_SEQ_MAX);
+ }
+
+ /*
+ * start a timer to flush all received frames if there are pending
+ * receive frames
+ */
+ if (rxtid->baw_head != rxtid->baw_tail)
+ nosched = 0;
+ else
+ nosched = 1; /* no need to re-arm the timer again */
+
+ spin_unlock_bh(&rxtid->tidlock);
+}
+
+/* Free all pending sub-frames in the re-ordering buffer */
+
+static void ath_rx_flush_tid(struct ath_softc *sc,
+ struct ath_arx_tid *rxtid, int drop)
+{
+ struct ath_rxbuf *rxbuf;
+
+ spin_lock_bh(&rxtid->tidlock);
+ while (rxtid->baw_head != rxtid->baw_tail) {
+ rxbuf = rxtid->rxbuf + rxtid->baw_head;
+ if (!rxbuf->rx_wbuf) {
+ INCR(rxtid->baw_head, ATH_TID_MAX_BUFS);
+ INCR(rxtid->seq_next, IEEE80211_SEQ_MAX);
+ continue;
+ }
+
+ if (drop)
+ dev_kfree_skb(rxbuf->rx_wbuf);
+ else
+ ath_rx_subframe(rxtid->an,
+ rxbuf->rx_wbuf,
+ &rxbuf->rx_status);
+
+ rxbuf->rx_wbuf = NULL;
+
+ INCR(rxtid->baw_head, ATH_TID_MAX_BUFS);
+ INCR(rxtid->seq_next, IEEE80211_SEQ_MAX);
+ }
+ spin_unlock_bh(&rxtid->tidlock);
+}
+
+static struct sk_buff *ath_rxbuf_alloc(struct ath_softc *sc,
+ u32 len)
+{
+ struct sk_buff *skb;
+ u32 off;
+
+ /*
+ * Cache-line-align. This is important (for the
+ * 5210 at least) as not doing so causes bogus data
+ * in rx'd frames.
+ */
+
+ skb = dev_alloc_skb(len + sc->sc_cachelsz - 1);
+ if (skb != NULL) {
+ off = ((unsigned long) skb->data) % sc->sc_cachelsz;
+ if (off != 0)
+ skb_reserve(skb, sc->sc_cachelsz - off);
+ } else {
+ DPRINTF(sc, ATH_DBG_FATAL,
+ "%s: skbuff alloc of size %u failed\n",
+ __func__, len);
+ return NULL;
+ }
+
+ return skb;
+}
+
+static void ath_rx_requeue(struct ath_softc *sc, struct sk_buff *skb)
+{
+ struct ath_buf *bf = ATH_RX_CONTEXT(skb)->ctx_rxbuf;
+
+ ASSERT(bf != NULL);
+
+ spin_lock_bh(&sc->sc_rxbuflock);
+ if (bf->bf_status & ATH_BUFSTATUS_STALE) {
+ /*
+ * This buffer is still held for hw acess.
+ * Mark it as free to be re-queued it later.
+ */
+ bf->bf_status |= ATH_BUFSTATUS_FREE;
+ } else {
+ /* XXX: we probably never enter here, remove after
+ * verification */
+ list_add_tail(&bf->list, &sc->sc_rxbuf);
+ ath_rx_buf_link(sc, bf);
+ }
+ spin_unlock_bh(&sc->sc_rxbuflock);
+}
+
+/*
+ * The skb indicated to upper stack won't be returned to us.
+ * So we have to allocate a new one and queue it by ourselves.
+ */
+static int ath_rx_indicate(struct ath_softc *sc,
+ struct sk_buff *skb,
+ struct ath_recv_status *status,
+ u16 keyix)
+{
+ struct ath_buf *bf = ATH_RX_CONTEXT(skb)->ctx_rxbuf;
+ struct sk_buff *nskb;
+ int type;
+
+ /* indicate frame to the stack, which will free the old skb. */
+ type = ath__rx_indicate(sc, skb, status, keyix);
+
+ /* allocate a new skb and queue it to for H/W processing */
+ nskb = ath_rxbuf_alloc(sc, sc->sc_rxbufsize);
+ if (nskb != NULL) {
+ bf->bf_mpdu = nskb;
+ bf->bf_buf_addr = ath_skb_map_single(sc,
+ nskb,
+ PCI_DMA_FROMDEVICE,
+ /* XXX: Remove get_dma_mem_context() */
+ get_dma_mem_context(bf, bf_dmacontext));
+ ATH_RX_CONTEXT(nskb)->ctx_rxbuf = bf;
+
+ /* queue the new wbuf to H/W */
+ ath_rx_requeue(sc, nskb);
+ }
+
+ return type;
+}
+
+static void ath_opmode_init(struct ath_softc *sc)
+{
+ struct ath_hal *ah = sc->sc_ah;
+ u32 rfilt, mfilt[2];
+
+ /* configure rx filter */
+ rfilt = ath_calcrxfilter(sc);
+ ath9k_hw_setrxfilter(ah, rfilt);
+
+ /* configure bssid mask */
+ if (ah->ah_caps.hw_caps & ATH9K_HW_CAP_BSSIDMASK)
+ ath9k_hw_setbssidmask(ah, sc->sc_bssidmask);
+
+ /* configure operational mode */
+ ath9k_hw_setopmode(ah);
+
+ /* Handle any link-level address change. */
+ ath9k_hw_setmac(ah, sc->sc_myaddr);
+
+ /* calculate and install multicast filter */
+ mfilt[0] = mfilt[1] = ~0;
+
+ ath9k_hw_setmcastfilter(ah, mfilt[0], mfilt[1]);
+ DPRINTF(sc, ATH_DBG_CONFIG ,
+ "%s: RX filter 0x%x, MC filter %08x:%08x\n",
+ __func__, rfilt, mfilt[0], mfilt[1]);
+}
+
+int ath_rx_init(struct ath_softc *sc, int nbufs)
+{
+ struct sk_buff *skb;
+ struct ath_buf *bf;
+ int error = 0;
+
+ do {
+ spin_lock_init(&sc->sc_rxflushlock);
+ sc->sc_rxflush = 0;
+ spin_lock_init(&sc->sc_rxbuflock);
+
+ /*
+ * Cisco's VPN software requires that drivers be able to
+ * receive encapsulated frames that are larger than the MTU.
+ * Since we can't be sure how large a frame we'll get, setup
+ * to handle the larges on possible.
+ */
+ sc->sc_rxbufsize = roundup(IEEE80211_MAX_MPDU_LEN,
+ min(sc->sc_cachelsz,
+ (u16)64));
+
+ DPRINTF(sc, ATH_DBG_CONFIG, "%s: cachelsz %u rxbufsize %u\n",
+ __func__, sc->sc_cachelsz, sc->sc_rxbufsize);
+
+ /* Initialize rx descriptors */
+
+ error = ath_descdma_setup(sc, &sc->sc_rxdma, &sc->sc_rxbuf,
+ "rx", nbufs, 1);
+ if (error != 0) {
+ DPRINTF(sc, ATH_DBG_FATAL,
+ "%s: failed to allocate rx descriptors: %d\n",
+ __func__, error);
+ break;
+ }
+
+ /* Pre-allocate a wbuf for each rx buffer */
+
+ list_for_each_entry(bf, &sc->sc_rxbuf, list) {
+ skb = ath_rxbuf_alloc(sc, sc->sc_rxbufsize);
+ if (skb == NULL) {
+ error = -ENOMEM;
+ break;
+ }
+
+ bf->bf_mpdu = skb;
+ bf->bf_buf_addr =
+ ath_skb_map_single(sc, skb, PCI_DMA_FROMDEVICE,
+ get_dma_mem_context(bf, bf_dmacontext));
+ ATH_RX_CONTEXT(skb)->ctx_rxbuf = bf;
+ }
+ sc->sc_rxlink = NULL;
+
+ } while (0);
+
+ if (error)
+ ath_rx_cleanup(sc);
+
+ return error;
+}
+
+/* Reclaim all rx queue resources */
+
+void ath_rx_cleanup(struct ath_softc *sc)
+{
+ struct sk_buff *skb;
+ struct ath_buf *bf;
+
+ list_for_each_entry(bf, &sc->sc_rxbuf, list) {
+ skb = bf->bf_mpdu;
+ if (skb)
+ dev_kfree_skb(skb);
+ }
+
+ /* cleanup rx descriptors */
+
+ if (sc->sc_rxdma.dd_desc_len != 0)
+ ath_descdma_cleanup(sc, &sc->sc_rxdma, &sc->sc_rxbuf);
+}
+
+/*
+ * Calculate the receive filter according to the
+ * operating mode and state:
+ *
+ * o always accept unicast, broadcast, and multicast traffic
+ * o maintain current state of phy error reception (the hal
+ * may enable phy error frames for noise immunity work)
+ * o probe request frames are accepted only when operating in
+ * hostap, adhoc, or monitor modes
+ * o enable promiscuous mode according to the interface state
+ * o accept beacons:
+ * - when operating in adhoc mode so the 802.11 layer creates
+ * node table entries for peers,
+ * - when operating in station mode for collecting rssi data when
+ * the station is otherwise quiet, or
+ * - when operating as a repeater so we see repeater-sta beacons
+ * - when scanning
+ */
+
+u32 ath_calcrxfilter(struct ath_softc *sc)
+{
+#define RX_FILTER_PRESERVE (ATH9K_RX_FILTER_PHYERR | ATH9K_RX_FILTER_PHYRADAR)
+ u32 rfilt;
+
+ rfilt = (ath9k_hw_getrxfilter(sc->sc_ah) & RX_FILTER_PRESERVE)
+ | ATH9K_RX_FILTER_UCAST | ATH9K_RX_FILTER_BCAST
+ | ATH9K_RX_FILTER_MCAST;
+
+ /* If not a STA, enable processing of Probe Requests */
+ if (sc->sc_opmode != ATH9K_M_STA)
+ rfilt |= ATH9K_RX_FILTER_PROBEREQ;
+
+ /* Can't set HOSTAP into promiscous mode */
+ if (sc->sc_opmode == ATH9K_M_MONITOR) {
+ rfilt |= ATH9K_RX_FILTER_PROM;
+ /* ??? To prevent from sending ACK */
+ rfilt &= ~ATH9K_RX_FILTER_UCAST;
+ }
+
+ if (sc->sc_opmode == ATH9K_M_STA || sc->sc_opmode == ATH9K_M_IBSS ||
+ sc->sc_scanning)
+ rfilt |= ATH9K_RX_FILTER_BEACON;
+
+ /* If in HOSTAP mode, want to enable reception of PSPOLL frames
+ & beacon frames */
+ if (sc->sc_opmode == ATH9K_M_HOSTAP)
+ rfilt |= (ATH9K_RX_FILTER_BEACON | ATH9K_RX_FILTER_PSPOLL);
+ return rfilt;
+#undef RX_FILTER_PRESERVE
+}
+
+/* Enable the receive h/w following a reset. */
+
+int ath_startrecv(struct ath_softc *sc)
+{
+ struct ath_hal *ah = sc->sc_ah;
+ struct ath_buf *bf, *tbf;
+
+ spin_lock_bh(&sc->sc_rxbuflock);
+ if (list_empty(&sc->sc_rxbuf))
+ goto start_recv;
+
+ sc->sc_rxlink = NULL;
+ list_for_each_entry_safe(bf, tbf, &sc->sc_rxbuf, list) {
+ if (bf->bf_status & ATH_BUFSTATUS_STALE) {
+ /* restarting h/w, no need for holding descriptors */
+ bf->bf_status &= ~ATH_BUFSTATUS_STALE;
+ /*
+ * Upper layer may not be done with the frame yet so
+ * we can't just re-queue it to hardware. Remove it
+ * from h/w queue. It'll be re-queued when upper layer
+ * returns the frame and ath_rx_requeue_mpdu is called.
+ */
+ if (!(bf->bf_status & ATH_BUFSTATUS_FREE)) {
+ list_del(&bf->list);
+ continue;
+ }
+ }
+ /* chain descriptors */
+ ath_rx_buf_link(sc, bf);
+ }
+
+ /* We could have deleted elements so the list may be empty now */
+ if (list_empty(&sc->sc_rxbuf))
+ goto start_recv;
+
+ bf = list_first_entry(&sc->sc_rxbuf, struct ath_buf, list);
+ ath9k_hw_putrxbuf(ah, bf->bf_daddr);
+ ath9k_hw_rxena(ah); /* enable recv descriptors */
+
+start_recv:
+ spin_unlock_bh(&sc->sc_rxbuflock);
+ ath_opmode_init(sc); /* set filters, etc. */
+ ath9k_hw_startpcureceive(ah); /* re-enable PCU/DMA engine */
+ return 0;
+}
+
+/* Disable the receive h/w in preparation for a reset. */
+
+bool ath_stoprecv(struct ath_softc *sc)
+{
+ struct ath_hal *ah = sc->sc_ah;
+ u64 tsf;
+ bool stopped;
+
+ ath9k_hw_stoppcurecv(ah); /* disable PCU */
+ ath9k_hw_setrxfilter(ah, 0); /* clear recv filter */
+ stopped = ath9k_hw_stopdmarecv(ah); /* disable DMA engine */
+ mdelay(3); /* 3ms is long enough for 1 frame */
+ tsf = ath9k_hw_gettsf64(ah);
+ sc->sc_rxlink = NULL; /* just in case */
+ return stopped;
+}
+
+/* Flush receive queue */
+
+void ath_flushrecv(struct ath_softc *sc)
+{
+ /*
+ * ath_rx_tasklet may be used to handle rx interrupt and flush receive
+ * queue at the same time. Use a lock to serialize the access of rx
+ * queue.
+ * ath_rx_tasklet cannot hold the spinlock while indicating packets.
+ * Instead, do not claim the spinlock but check for a flush in
+ * progress (see references to sc_rxflush)
+ */
+ spin_lock_bh(&sc->sc_rxflushlock);
+ sc->sc_rxflush = 1;
+
+ ath_rx_tasklet(sc, 1);
+
+ sc->sc_rxflush = 0;
+ spin_unlock_bh(&sc->sc_rxflushlock);
+}
+
+/* Process an individual frame */
+
+int ath_rx_input(struct ath_softc *sc,
+ struct ath_node *an,
+ int is_ampdu,
+ struct sk_buff *skb,
+ struct ath_recv_status *rx_status,
+ enum ATH_RX_TYPE *status)
+{
+ if (is_ampdu && sc->sc_rxaggr) {
+ *status = ATH_RX_CONSUMED;
+ return ath_ampdu_input(sc, an, skb, rx_status);
+ } else {
+ *status = ATH_RX_NON_CONSUMED;
+ return -1;
+ }
+}
+
+/* Process receive queue, as well as LED, etc. */
+
+int ath_rx_tasklet(struct ath_softc *sc, int flush)
+{
+#define PA2DESC(_sc, _pa) \
+ ((struct ath_desc *)((caddr_t)(_sc)->sc_rxdma.dd_desc + \
+ ((_pa) - (_sc)->sc_rxdma.dd_desc_paddr)))
+
+ struct ath_buf *bf, *bf_held = NULL;
+ struct ath_desc *ds;
+ struct ieee80211_hdr *hdr;
+ struct sk_buff *skb = NULL;
+ struct ath_recv_status rx_status;
+ struct ath_hal *ah = sc->sc_ah;
+ int type, rx_processed = 0;
+ u32 phyerr;
+ u8 chainreset = 0;
+ int retval;
+ __le16 fc;
+
+ do {
+ /* If handling rx interrupt and flush is in progress => exit */
+ if (sc->sc_rxflush && (flush == 0))
+ break;
+
+ spin_lock_bh(&sc->sc_rxbuflock);
+ if (list_empty(&sc->sc_rxbuf)) {
+ sc->sc_rxlink = NULL;
+ spin_unlock_bh(&sc->sc_rxbuflock);
+ break;
+ }
+
+ bf = list_first_entry(&sc->sc_rxbuf, struct ath_buf, list);
+
+ /*
+ * There is a race condition that BH gets scheduled after sw
+ * writes RxE and before hw re-load the last descriptor to get
+ * the newly chained one. Software must keep the last DONE
+ * descriptor as a holding descriptor - software does so by
+ * marking it with the STALE flag.
+ */
+ if (bf->bf_status & ATH_BUFSTATUS_STALE) {
+ bf_held = bf;
+ if (list_is_last(&bf_held->list, &sc->sc_rxbuf)) {
+ /*
+ * The holding descriptor is the last
+ * descriptor in queue. It's safe to
+ * remove the last holding descriptor
+ * in BH context.
+ */
+ list_del(&bf_held->list);
+ bf_held->bf_status &= ~ATH_BUFSTATUS_STALE;
+ sc->sc_rxlink = NULL;
+
+ if (bf_held->bf_status & ATH_BUFSTATUS_FREE) {
+ list_add_tail(&bf_held->list,
+ &sc->sc_rxbuf);
+ ath_rx_buf_link(sc, bf_held);
+ }
+ spin_unlock_bh(&sc->sc_rxbuflock);
+ break;
+ }
+ bf = list_entry(bf->list.next, struct ath_buf, list);
+ }
+
+ ds = bf->bf_desc;
+ ++rx_processed;
+
+ /*
+ * Must provide the virtual address of the current
+ * descriptor, the physical address, and the virtual
+ * address of the next descriptor in the h/w chain.
+ * This allows the HAL to look ahead to see if the
+ * hardware is done with a descriptor by checking the
+ * done bit in the following descriptor and the address
+ * of the current descriptor the DMA engine is working
+ * on. All this is necessary because of our use of
+ * a self-linked list to avoid rx overruns.
+ */
+ retval = ath9k_hw_rxprocdesc(ah,
+ ds,
+ bf->bf_daddr,
+ PA2DESC(sc, ds->ds_link),
+ 0);
+ if (retval == -EINPROGRESS) {
+ struct ath_buf *tbf;
+ struct ath_desc *tds;
+
+ if (list_is_last(&bf->list, &sc->sc_rxbuf)) {
+ spin_unlock_bh(&sc->sc_rxbuflock);
+ break;
+ }
+
+ tbf = list_entry(bf->list.next, struct ath_buf, list);
+
+ /*
+ * On some hardware the descriptor status words could
+ * get corrupted, including the done bit. Because of
+ * this, check if the next descriptor's done bit is
+ * set or not.
+ *
+ * If the next descriptor's done bit is set, the current
+ * descriptor has been corrupted. Force s/w to discard
+ * this descriptor and continue...
+ */
+
+ tds = tbf->bf_desc;
+ retval = ath9k_hw_rxprocdesc(ah,
+ tds, tbf->bf_daddr,
+ PA2DESC(sc, tds->ds_link), 0);
+ if (retval == -EINPROGRESS) {
+ spin_unlock_bh(&sc->sc_rxbuflock);
+ break;
+ }
+ }
+
+ /* XXX: we do not support frames spanning
+ * multiple descriptors */
+ bf->bf_status |= ATH_BUFSTATUS_DONE;
+
+ skb = bf->bf_mpdu;
+ if (skb == NULL) { /* XXX ??? can this happen */
+ spin_unlock_bh(&sc->sc_rxbuflock);
+ continue;
+ }
+ /*
+ * Now we know it's a completed frame, we can indicate the
+ * frame. Remove the previous holding descriptor and leave
+ * this one in the queue as the new holding descriptor.
+ */
+ if (bf_held) {
+ list_del(&bf_held->list);
+ bf_held->bf_status &= ~ATH_BUFSTATUS_STALE;
+ if (bf_held->bf_status & ATH_BUFSTATUS_FREE) {
+ list_add_tail(&bf_held->list, &sc->sc_rxbuf);
+ /* try to requeue this descriptor */
+ ath_rx_buf_link(sc, bf_held);
+ }
+ }
+
+ bf->bf_status |= ATH_BUFSTATUS_STALE;
+ bf_held = bf;
+ /*
+ * Release the lock here in case ieee80211_input() return
+ * the frame immediately by calling ath_rx_mpdu_requeue().
+ */
+ spin_unlock_bh(&sc->sc_rxbuflock);
+
+ if (flush) {
+ /*
+ * If we're asked to flush receive queue, directly
+ * chain it back at the queue without processing it.
+ */
+ goto rx_next;
+ }
+
+ hdr = (struct ieee80211_hdr *)skb->data;
+ fc = hdr->frame_control;
+ memzero(&rx_status, sizeof(struct ath_recv_status));
+
+ if (ds->ds_rxstat.rs_more) {
+ /*
+ * Frame spans multiple descriptors; this
+ * cannot happen yet as we don't support
+ * jumbograms. If not in monitor mode,
+ * discard the frame.
+ */
+#ifndef ERROR_FRAMES
+ /*
+ * Enable this if you want to see
+ * error frames in Monitor mode.
+ */
+ if (sc->sc_opmode != ATH9K_M_MONITOR)
+ goto rx_next;
+#endif
+ /* fall thru for monitor mode handling... */
+ } else if (ds->ds_rxstat.rs_status != 0) {
+ if (ds->ds_rxstat.rs_status & ATH9K_RXERR_CRC)
+ rx_status.flags |= ATH_RX_FCS_ERROR;
+ if (ds->ds_rxstat.rs_status & ATH9K_RXERR_PHY) {
+ phyerr = ds->ds_rxstat.rs_phyerr & 0x1f;
+ goto rx_next;
+ }
+
+ if (ds->ds_rxstat.rs_status & ATH9K_RXERR_DECRYPT) {
+ /*
+ * Decrypt error. We only mark packet status
+ * here and always push up the frame up to let
+ * mac80211 handle the actual error case, be
+ * it no decryption key or real decryption
+ * error. This let us keep statistics there.
+ */
+ rx_status.flags |= ATH_RX_DECRYPT_ERROR;
+ } else if (ds->ds_rxstat.rs_status & ATH9K_RXERR_MIC) {
+ /*
+ * Demic error. We only mark frame status here
+ * and always push up the frame up to let
+ * mac80211 handle the actual error case. This
+ * let us keep statistics there. Hardware may
+ * post a false-positive MIC error.
+ */
+ if (ieee80211_is_ctl(fc))
+ /*
+ * Sometimes, we get invalid
+ * MIC failures on valid control frames.
+ * Remove these mic errors.
+ */
+ ds->ds_rxstat.rs_status &=
+ ~ATH9K_RXERR_MIC;
+ else
+ rx_status.flags |= ATH_RX_MIC_ERROR;
+ }
+ /*
+ * Reject error frames with the exception of
+ * decryption and MIC failures. For monitor mode,
+ * we also ignore the CRC error.
+ */
+ if (sc->sc_opmode == ATH9K_M_MONITOR) {
+ if (ds->ds_rxstat.rs_status &
+ ~(ATH9K_RXERR_DECRYPT | ATH9K_RXERR_MIC |
+ ATH9K_RXERR_CRC))
+ goto rx_next;
+ } else {
+ if (ds->ds_rxstat.rs_status &
+ ~(ATH9K_RXERR_DECRYPT | ATH9K_RXERR_MIC)) {
+ goto rx_next;
+ }
+ }
+ }
+ /*
+ * The status portion of the descriptor could get corrupted.
+ */
+ if (sc->sc_rxbufsize < ds->ds_rxstat.rs_datalen)
+ goto rx_next;
+ /*
+ * Sync and unmap the frame. At this point we're
+ * committed to passing the sk_buff somewhere so
+ * clear buf_skb; this means a new sk_buff must be
+ * allocated when the rx descriptor is setup again
+ * to receive another frame.
+ */
+ skb_put(skb, ds->ds_rxstat.rs_datalen);
+ skb->protocol = cpu_to_be16(ETH_P_CONTROL);
+ rx_status.tsf = ath_extend_tsf(sc, ds->ds_rxstat.rs_tstamp);
+ rx_status.rateieee =
+ sc->sc_hwmap[ds->ds_rxstat.rs_rate].ieeerate;
+ rx_status.rateKbps =
+ sc->sc_hwmap[ds->ds_rxstat.rs_rate].rateKbps;
+ rx_status.ratecode = ds->ds_rxstat.rs_rate;
+
+ /* HT rate */
+ if (rx_status.ratecode & 0x80) {
+ /* TODO - add table to avoid division */
+ if (ds->ds_rxstat.rs_flags & ATH9K_RX_2040) {
+ rx_status.flags |= ATH_RX_40MHZ;
+ rx_status.rateKbps =
+ (rx_status.rateKbps * 27) / 13;
+ }
+ if (ds->ds_rxstat.rs_flags & ATH9K_RX_GI)
+ rx_status.rateKbps =
+ (rx_status.rateKbps * 10) / 9;
+ else
+ rx_status.flags |= ATH_RX_SHORT_GI;
+ }
+
+ /* sc->sc_noise_floor is only available when the station
+ attaches to an AP, so we use a default value
+ if we are not yet attached. */
+
+ /* XXX we should use either sc->sc_noise_floor or
+ * ath_hal_getChanNoise(ah, &sc->sc_curchan)
+ * to calculate the noise floor.
+ * However, the value returned by ath_hal_getChanNoise
+ * seems to be incorrect (-31dBm on the last test),
+ * so we will use a hard-coded value until we
+ * figure out what is going on.
+ */
+ rx_status.abs_rssi =
+ ds->ds_rxstat.rs_rssi + ATH_DEFAULT_NOISE_FLOOR;
+
+ pci_dma_sync_single_for_cpu(sc->pdev,
+ bf->bf_buf_addr,
+ skb_tailroom(skb),
+ PCI_DMA_FROMDEVICE);
+ pci_unmap_single(sc->pdev,
+ bf->bf_buf_addr,
+ sc->sc_rxbufsize,
+ PCI_DMA_FROMDEVICE);
+
+ /* XXX: Ah! make me more readable, use a helper */
+ if (ah->ah_caps.hw_caps & ATH9K_HW_CAP_HT) {
+ if (ds->ds_rxstat.rs_moreaggr == 0) {
+ rx_status.rssictl[0] =
+ ds->ds_rxstat.rs_rssi_ctl0;
+ rx_status.rssictl[1] =
+ ds->ds_rxstat.rs_rssi_ctl1;
+ rx_status.rssictl[2] =
+ ds->ds_rxstat.rs_rssi_ctl2;
+ rx_status.rssi = ds->ds_rxstat.rs_rssi;
+ if (ds->ds_rxstat.rs_flags & ATH9K_RX_2040) {
+ rx_status.rssiextn[0] =
+ ds->ds_rxstat.rs_rssi_ext0;
+ rx_status.rssiextn[1] =
+ ds->ds_rxstat.rs_rssi_ext1;
+ rx_status.rssiextn[2] =
+ ds->ds_rxstat.rs_rssi_ext2;
+ rx_status.flags |=
+ ATH_RX_RSSI_EXTN_VALID;
+ }
+ rx_status.flags |= ATH_RX_RSSI_VALID |
+ ATH_RX_CHAIN_RSSI_VALID;
+ }
+ } else {
+ /*
+ * Need to insert the "combined" rssi into the
+ * status structure for upper layer processing
+ */
+ rx_status.rssi = ds->ds_rxstat.rs_rssi;
+ rx_status.flags |= ATH_RX_RSSI_VALID;
+ }
+
+ /* Pass frames up to the stack. */
+
+ type = ath_rx_indicate(sc, skb,
+ &rx_status, ds->ds_rxstat.rs_keyix);
+
+ /*
+ * change the default rx antenna if rx diversity chooses the
+ * other antenna 3 times in a row.
+ */
+ if (sc->sc_defant != ds->ds_rxstat.rs_antenna) {
+ if (++sc->sc_rxotherant >= 3)
+ ath_setdefantenna(sc,
+ ds->ds_rxstat.rs_antenna);
+ } else {
+ sc->sc_rxotherant = 0;
+ }
+
+#ifdef CONFIG_SLOW_ANT_DIV
+ if ((rx_status.flags & ATH_RX_RSSI_VALID) &&
+ ieee80211_is_beacon(fc)) {
+ ath_slow_ant_div(&sc->sc_antdiv, hdr, &ds->ds_rxstat);
+ }
+#endif
+ /*
+ * For frames successfully indicated, the buffer will be
+ * returned to us by upper layers by calling
+ * ath_rx_mpdu_requeue, either synchronusly or asynchronously.
+ * So we don't want to do it here in this loop.
+ */
+ continue;
+
+rx_next:
+ bf->bf_status |= ATH_BUFSTATUS_FREE;
+ } while (TRUE);
+
+ if (chainreset) {
+ DPRINTF(sc, ATH_DBG_CONFIG,
+ "%s: Reset rx chain mask. "
+ "Do internal reset\n", __func__);
+ ASSERT(flush == 0);
+ ath_internal_reset(sc);
+ }
+
+ return 0;
+#undef PA2DESC
+}
+
+/* Process ADDBA request in per-TID data structure */
+
+int ath_rx_aggr_start(struct ath_softc *sc,
+ const u8 *addr,
+ u16 tid,
+ u16 *ssn)
+{
+ struct ath_arx_tid *rxtid;
+ struct ath_node *an;
+ struct ieee80211_hw *hw = sc->hw;
+ struct ieee80211_supported_band *sband;
+ u16 buffersize = 0;
+
+ spin_lock_bh(&sc->node_lock);
+ an = ath_node_find(sc, (u8 *) addr);
+ spin_unlock_bh(&sc->node_lock);
+
+ if (!an) {
+ DPRINTF(sc, ATH_DBG_AGGR,
+ "%s: Node not found to initialize RX aggregation\n",
+ __func__);
+ return -1;
+ }
+
+ sband = hw->wiphy->bands[hw->conf.channel->band];
+ buffersize = IEEE80211_MIN_AMPDU_BUF <<
+ sband->ht_info.ampdu_factor; /* FIXME */
+
+ rxtid = &an->an_aggr.rx.tid[tid];
+
+ spin_lock_bh(&rxtid->tidlock);
+ if (sc->sc_rxaggr) {
+ /* Allow aggregation reception
+ * Adjust rx BA window size. Peer might indicate a
+ * zero buffer size for a _dont_care_ condition.
+ */
+ if (buffersize)
+ rxtid->baw_size = min(buffersize, rxtid->baw_size);
+
+ /* set rx sequence number */
+ rxtid->seq_next = *ssn;
+
+ /* Allocate the receive buffers for this TID */
+ DPRINTF(sc, ATH_DBG_AGGR,
+ "%s: Allcating rxbuffer for TID %d\n", __func__, tid);
+
+ if (rxtid->rxbuf == NULL) {
+ /*
+ * If the rxbuff is not NULL at this point, we *probably*
+ * already allocated the buffer on a previous ADDBA,
+ * and this is a subsequent ADDBA that got through.
+ * Don't allocate, but use the value in the pointer,
+ * we zero it out when we de-allocate.
+ */
+ rxtid->rxbuf = kmalloc(ATH_TID_MAX_BUFS *
+ sizeof(struct ath_rxbuf), GFP_ATOMIC);
+ }
+ if (rxtid->rxbuf == NULL) {
+ DPRINTF(sc, ATH_DBG_AGGR,
+ "%s: Unable to allocate RX buffer, "
+ "refusing ADDBA\n", __func__);
+ } else {
+ /* Ensure the memory is zeroed out (all internal
+ * pointers are null) */
+ memzero(rxtid->rxbuf, ATH_TID_MAX_BUFS *
+ sizeof(struct ath_rxbuf));
+ DPRINTF(sc, ATH_DBG_AGGR,
+ "%s: Allocated @%p\n", __func__, rxtid->rxbuf);
+
+ /* Allow aggregation reception */
+ rxtid->addba_exchangecomplete = 1;
+ }
+ }
+ spin_unlock_bh(&rxtid->tidlock);
+
+ return 0;
+}
+
+/* Process DELBA */
+
+int ath_rx_aggr_stop(struct ath_softc *sc,
+ const u8 *addr,
+ u16 tid)
+{
+ struct ath_node *an;
+
+ spin_lock_bh(&sc->node_lock);
+ an = ath_node_find(sc, (u8 *) addr);
+ spin_unlock_bh(&sc->node_lock);
+
+ if (!an) {
+ DPRINTF(sc, ATH_DBG_AGGR,
+ "%s: RX aggr stop for non-existent node\n", __func__);
+ return -1;
+ }
+
+ ath_rx_aggr_teardown(sc, an, tid);
+ return 0;
+}
+
+/* Rx aggregation tear down */
+
+void ath_rx_aggr_teardown(struct ath_softc *sc,
+ struct ath_node *an, u8 tid)
+{
+ struct ath_arx_tid *rxtid = &an->an_aggr.rx.tid[tid];
+
+ if (!rxtid->addba_exchangecomplete)
+ return;
+
+ del_timer_sync(&rxtid->timer);
+ ath_rx_flush_tid(sc, rxtid, 0);
+ rxtid->addba_exchangecomplete = 0;
+
+ /* De-allocate the receive buffer array allocated when addba started */
+
+ if (rxtid->rxbuf) {
+ DPRINTF(sc, ATH_DBG_AGGR,
+ "%s: Deallocating TID %d rxbuff @%p\n",
+ __func__, tid, rxtid->rxbuf);
+ kfree(rxtid->rxbuf);
+
+ /* Set pointer to null to avoid reuse*/
+ rxtid->rxbuf = NULL;
+ }
+}
+
+/* Initialize per-node receive state */
+
+void ath_rx_node_init(struct ath_softc *sc, struct ath_node *an)
+{
+ if (sc->sc_rxaggr) {
+ struct ath_arx_tid *rxtid;
+ int tidno;
+
+ /* Init per tid rx state */
+ for (tidno = 0, rxtid = &an->an_aggr.rx.tid[tidno];
+ tidno < WME_NUM_TID;
+ tidno++, rxtid++) {
+ rxtid->an = an;
+ rxtid->seq_reset = 1;
+ rxtid->seq_next = 0;
+ rxtid->baw_size = WME_MAX_BA;
+ rxtid->baw_head = rxtid->baw_tail = 0;
+
+ /*
+ * Ensure the buffer pointer is null at this point
+ * (needs to be allocated when addba is received)
+ */
+
+ rxtid->rxbuf = NULL;
+ setup_timer(&rxtid->timer, ath_rx_timer,
+ (unsigned long)rxtid);
+ spin_lock_init(&rxtid->tidlock);
+
+ /* ADDBA state */
+ rxtid->addba_exchangecomplete = 0;
+ }
+ }
+}
+
+void ath_rx_node_cleanup(struct ath_softc *sc, struct ath_node *an)
+{
+ if (sc->sc_rxaggr) {
+ struct ath_arx_tid *rxtid;
+ int tidno, i;
+
+ /* Init per tid rx state */
+ for (tidno = 0, rxtid = &an->an_aggr.rx.tid[tidno];
+ tidno < WME_NUM_TID;
+ tidno++, rxtid++) {
+
+ if (!rxtid->addba_exchangecomplete)
+ continue;
+
+ /* must cancel timer first */
+ del_timer_sync(&rxtid->timer);
+
+ /* drop any pending sub-frames */
+ ath_rx_flush_tid(sc, rxtid, 1);
+
+ for (i = 0; i < ATH_TID_MAX_BUFS; i++)
+ ASSERT(rxtid->rxbuf[i].rx_wbuf == NULL);
+
+ rxtid->addba_exchangecomplete = 0;
+ }
+ }
+
+}
+
+/* Cleanup per-node receive state */
+
+void ath_rx_node_free(struct ath_softc *sc, struct ath_node *an)
+{
+ ath_rx_node_cleanup(sc, an);
+}
+
+dma_addr_t ath_skb_map_single(struct ath_softc *sc,
+ struct sk_buff *skb,
+ int direction,
+ dma_addr_t *pa)
+{
+ /*
+ * NB: do NOT use skb->len, which is 0 on initialization.
+ * Use skb's entire data area instead.
+ */
+ *pa = pci_map_single(sc->pdev, skb->data,
+ skb_end_pointer(skb) - skb->head, direction);
+ return *pa;
+}
+
+void ath_skb_unmap_single(struct ath_softc *sc,
+ struct sk_buff *skb,
+ int direction,
+ dma_addr_t *pa)
+{
+ /* Unmap skb's entire data area */
+ pci_unmap_single(sc->pdev, *pa,
+ skb_end_pointer(skb) - skb->head, direction);
+}
diff --git a/drivers/net/wireless/ath9k/reg.h b/drivers/net/wireless/ath9k/reg.h
new file mode 100644
index 00000000000..42b0890a468
--- /dev/null
+++ b/drivers/net/wireless/ath9k/reg.h
@@ -0,0 +1,1385 @@
+/*
+ * Copyright (c) 2008 Atheros Communications Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef REG_H
+#define REG_H
+
+#define AR_CR 0x0008
+#define AR_CR_RXE 0x00000004
+#define AR_CR_RXD 0x00000020
+#define AR_CR_SWI 0x00000040
+
+#define AR_RXDP 0x000C
+
+#define AR_CFG 0x0014
+#define AR_CFG_SWTD 0x00000001
+#define AR_CFG_SWTB 0x00000002
+#define AR_CFG_SWRD 0x00000004
+#define AR_CFG_SWRB 0x00000008
+#define AR_CFG_SWRG 0x00000010
+#define AR_CFG_AP_ADHOC_INDICATION 0x00000020
+#define AR_CFG_PHOK 0x00000100
+#define AR_CFG_CLK_GATE_DIS 0x00000400
+#define AR_CFG_EEBS 0x00000200
+#define AR_CFG_PCI_MASTER_REQ_Q_THRESH 0x00060000
+#define AR_CFG_PCI_MASTER_REQ_Q_THRESH_S 17
+
+#define AR_MIRT 0x0020
+#define AR_MIRT_VAL 0x0000ffff
+#define AR_MIRT_VAL_S 16
+
+#define AR_IER 0x0024
+#define AR_IER_ENABLE 0x00000001
+#define AR_IER_DISABLE 0x00000000
+
+#define AR_TIMT 0x0028
+#define AR_TIMT_LAST 0x0000ffff
+#define AR_TIMT_LAST_S 0
+#define AR_TIMT_FIRST 0xffff0000
+#define AR_TIMT_FIRST_S 16
+
+#define AR_RIMT 0x002C
+#define AR_RIMT_LAST 0x0000ffff
+#define AR_RIMT_LAST_S 0
+#define AR_RIMT_FIRST 0xffff0000
+#define AR_RIMT_FIRST_S 16
+
+#define AR_DMASIZE_4B 0x00000000
+#define AR_DMASIZE_8B 0x00000001
+#define AR_DMASIZE_16B 0x00000002
+#define AR_DMASIZE_32B 0x00000003
+#define AR_DMASIZE_64B 0x00000004
+#define AR_DMASIZE_128B 0x00000005
+#define AR_DMASIZE_256B 0x00000006
+#define AR_DMASIZE_512B 0x00000007
+
+#define AR_TXCFG 0x0030
+#define AR_TXCFG_DMASZ_MASK 0x00000003
+#define AR_TXCFG_DMASZ_4B 0
+#define AR_TXCFG_DMASZ_8B 1
+#define AR_TXCFG_DMASZ_16B 2
+#define AR_TXCFG_DMASZ_32B 3
+#define AR_TXCFG_DMASZ_64B 4
+#define AR_TXCFG_DMASZ_128B 5
+#define AR_TXCFG_DMASZ_256B 6
+#define AR_TXCFG_DMASZ_512B 7
+#define AR_FTRIG 0x000003F0
+#define AR_FTRIG_S 4
+#define AR_FTRIG_IMMED 0x00000000
+#define AR_FTRIG_64B 0x00000010
+#define AR_FTRIG_128B 0x00000020
+#define AR_FTRIG_192B 0x00000030
+#define AR_FTRIG_256B 0x00000040
+#define AR_FTRIG_512B 0x00000080
+#define AR_TXCFG_ADHOC_BEACON_ATIM_TX_POLICY 0x00000800
+
+#define AR_RXCFG 0x0034
+#define AR_RXCFG_CHIRP 0x00000008
+#define AR_RXCFG_ZLFDMA 0x00000010
+#define AR_RXCFG_DMASZ_MASK 0x00000007
+#define AR_RXCFG_DMASZ_4B 0
+#define AR_RXCFG_DMASZ_8B 1
+#define AR_RXCFG_DMASZ_16B 2
+#define AR_RXCFG_DMASZ_32B 3
+#define AR_RXCFG_DMASZ_64B 4
+#define AR_RXCFG_DMASZ_128B 5
+#define AR_RXCFG_DMASZ_256B 6
+#define AR_RXCFG_DMASZ_512B 7
+
+#define AR_MIBC 0x0040
+#define AR_MIBC_COW 0x00000001
+#define AR_MIBC_FMC 0x00000002
+#define AR_MIBC_CMC 0x00000004
+#define AR_MIBC_MCS 0x00000008
+
+#define AR_TOPS 0x0044
+#define AR_TOPS_MASK 0x0000FFFF
+
+#define AR_RXNPTO 0x0048
+#define AR_RXNPTO_MASK 0x000003FF
+
+#define AR_TXNPTO 0x004C
+#define AR_TXNPTO_MASK 0x000003FF
+#define AR_TXNPTO_QCU_MASK 0x000FFC00
+
+#define AR_RPGTO 0x0050
+#define AR_RPGTO_MASK 0x000003FF
+
+#define AR_RPCNT 0x0054
+#define AR_RPCNT_MASK 0x0000001F
+
+#define AR_MACMISC 0x0058
+#define AR_MACMISC_PCI_EXT_FORCE 0x00000010
+#define AR_MACMISC_DMA_OBS 0x000001E0
+#define AR_MACMISC_DMA_OBS_S 5
+#define AR_MACMISC_DMA_OBS_LINE_0 0
+#define AR_MACMISC_DMA_OBS_LINE_1 1
+#define AR_MACMISC_DMA_OBS_LINE_2 2
+#define AR_MACMISC_DMA_OBS_LINE_3 3
+#define AR_MACMISC_DMA_OBS_LINE_4 4
+#define AR_MACMISC_DMA_OBS_LINE_5 5
+#define AR_MACMISC_DMA_OBS_LINE_6 6
+#define AR_MACMISC_DMA_OBS_LINE_7 7
+#define AR_MACMISC_DMA_OBS_LINE_8 8
+#define AR_MACMISC_MISC_OBS 0x00000E00
+#define AR_MACMISC_MISC_OBS_S 9
+#define AR_MACMISC_MISC_OBS_BUS_LSB 0x00007000
+#define AR_MACMISC_MISC_OBS_BUS_LSB_S 12
+#define AR_MACMISC_MISC_OBS_BUS_MSB 0x00038000
+#define AR_MACMISC_MISC_OBS_BUS_MSB_S 15
+#define AR_MACMISC_MISC_OBS_BUS_1 1
+
+#define AR_GTXTO 0x0064
+#define AR_GTXTO_TIMEOUT_COUNTER 0x0000FFFF
+#define AR_GTXTO_TIMEOUT_LIMIT 0xFFFF0000
+#define AR_GTXTO_TIMEOUT_LIMIT_S 16
+
+#define AR_GTTM 0x0068
+#define AR_GTTM_USEC 0x00000001
+#define AR_GTTM_IGNORE_IDLE 0x00000002
+#define AR_GTTM_RESET_IDLE 0x00000004
+#define AR_GTTM_CST_USEC 0x00000008
+
+#define AR_CST 0x006C
+#define AR_CST_TIMEOUT_COUNTER 0x0000FFFF
+#define AR_CST_TIMEOUT_LIMIT 0xFFFF0000
+#define AR_CST_TIMEOUT_LIMIT_S 16
+
+#define AR_SREV_VERSION_9100 0x014
+
+#define AR_SREV_5416_V20_OR_LATER(_ah) \
+ (AR_SREV_9100((_ah)) || AR_SREV_5416_20_OR_LATER(_ah))
+#define AR_SREV_5416_V22_OR_LATER(_ah) \
+ (AR_SREV_9100((_ah)) || AR_SREV_5416_22_OR_LATER(_ah))
+
+#define AR_ISR 0x0080
+#define AR_ISR_RXOK 0x00000001
+#define AR_ISR_RXDESC 0x00000002
+#define AR_ISR_RXERR 0x00000004
+#define AR_ISR_RXNOPKT 0x00000008
+#define AR_ISR_RXEOL 0x00000010
+#define AR_ISR_RXORN 0x00000020
+#define AR_ISR_TXOK 0x00000040
+#define AR_ISR_TXDESC 0x00000080
+#define AR_ISR_TXERR 0x00000100
+#define AR_ISR_TXNOPKT 0x00000200
+#define AR_ISR_TXEOL 0x00000400
+#define AR_ISR_TXURN 0x00000800
+#define AR_ISR_MIB 0x00001000
+#define AR_ISR_SWI 0x00002000
+#define AR_ISR_RXPHY 0x00004000
+#define AR_ISR_RXKCM 0x00008000
+#define AR_ISR_SWBA 0x00010000
+#define AR_ISR_BRSSI 0x00020000
+#define AR_ISR_BMISS 0x00040000
+#define AR_ISR_BNR 0x00100000
+#define AR_ISR_RXCHIRP 0x00200000
+#define AR_ISR_BCNMISC 0x00800000
+#define AR_ISR_TIM 0x00800000
+#define AR_ISR_QCBROVF 0x02000000
+#define AR_ISR_QCBRURN 0x04000000
+#define AR_ISR_QTRIG 0x08000000
+#define AR_ISR_GENTMR 0x10000000
+
+#define AR_ISR_TXMINTR 0x00080000
+#define AR_ISR_RXMINTR 0x01000000
+#define AR_ISR_TXINTM 0x40000000
+#define AR_ISR_RXINTM 0x80000000
+
+#define AR_ISR_S0 0x0084
+#define AR_ISR_S0_QCU_TXOK 0x000003FF
+#define AR_ISR_S0_QCU_TXOK_S 0
+#define AR_ISR_S0_QCU_TXDESC 0x03FF0000
+#define AR_ISR_S0_QCU_TXDESC_S 16
+
+#define AR_ISR_S1 0x0088
+#define AR_ISR_S1_QCU_TXERR 0x000003FF
+#define AR_ISR_S1_QCU_TXERR_S 0
+#define AR_ISR_S1_QCU_TXEOL 0x03FF0000
+#define AR_ISR_S1_QCU_TXEOL_S 16
+
+#define AR_ISR_S2 0x008c
+#define AR_ISR_S2_QCU_TXURN 0x000003FF
+#define AR_ISR_S2_CST 0x00400000
+#define AR_ISR_S2_GTT 0x00800000
+#define AR_ISR_S2_TIM 0x01000000
+#define AR_ISR_S2_CABEND 0x02000000
+#define AR_ISR_S2_DTIMSYNC 0x04000000
+#define AR_ISR_S2_BCNTO 0x08000000
+#define AR_ISR_S2_CABTO 0x10000000
+#define AR_ISR_S2_DTIM 0x20000000
+#define AR_ISR_S2_TSFOOR 0x40000000
+#define AR_ISR_S2_TBTT_TIME 0x80000000
+
+#define AR_ISR_S3 0x0090
+#define AR_ISR_S3_QCU_QCBROVF 0x000003FF
+#define AR_ISR_S3_QCU_QCBRURN 0x03FF0000
+
+#define AR_ISR_S4 0x0094
+#define AR_ISR_S4_QCU_QTRIG 0x000003FF
+#define AR_ISR_S4_RESV0 0xFFFFFC00
+
+#define AR_ISR_S5 0x0098
+#define AR_ISR_S5_TIMER_TRIG 0x000000FF
+#define AR_ISR_S5_TIMER_THRESH 0x0007FE00
+#define AR_ISR_S5_TIM_TIMER 0x00000010
+#define AR_ISR_S5_DTIM_TIMER 0x00000020
+#define AR_ISR_S5_S 0x00d8
+#define AR_IMR_S5 0x00b8
+#define AR_IMR_S5_TIM_TIMER 0x00000010
+#define AR_IMR_S5_DTIM_TIMER 0x00000020
+
+
+#define AR_IMR 0x00a0
+#define AR_IMR_RXOK 0x00000001
+#define AR_IMR_RXDESC 0x00000002
+#define AR_IMR_RXERR 0x00000004
+#define AR_IMR_RXNOPKT 0x00000008
+#define AR_IMR_RXEOL 0x00000010
+#define AR_IMR_RXORN 0x00000020
+#define AR_IMR_TXOK 0x00000040
+#define AR_IMR_TXDESC 0x00000080
+#define AR_IMR_TXERR 0x00000100
+#define AR_IMR_TXNOPKT 0x00000200
+#define AR_IMR_TXEOL 0x00000400
+#define AR_IMR_TXURN 0x00000800
+#define AR_IMR_MIB 0x00001000
+#define AR_IMR_SWI 0x00002000
+#define AR_IMR_RXPHY 0x00004000
+#define AR_IMR_RXKCM 0x00008000
+#define AR_IMR_SWBA 0x00010000
+#define AR_IMR_BRSSI 0x00020000
+#define AR_IMR_BMISS 0x00040000
+#define AR_IMR_BNR 0x00100000
+#define AR_IMR_RXCHIRP 0x00200000
+#define AR_IMR_BCNMISC 0x00800000
+#define AR_IMR_TIM 0x00800000
+#define AR_IMR_QCBROVF 0x02000000
+#define AR_IMR_QCBRURN 0x04000000
+#define AR_IMR_QTRIG 0x08000000
+#define AR_IMR_GENTMR 0x10000000
+
+#define AR_IMR_TXMINTR 0x00080000
+#define AR_IMR_RXMINTR 0x01000000
+#define AR_IMR_TXINTM 0x40000000
+#define AR_IMR_RXINTM 0x80000000
+
+#define AR_IMR_S0 0x00a4
+#define AR_IMR_S0_QCU_TXOK 0x000003FF
+#define AR_IMR_S0_QCU_TXOK_S 0
+#define AR_IMR_S0_QCU_TXDESC 0x03FF0000
+#define AR_IMR_S0_QCU_TXDESC_S 16
+
+#define AR_IMR_S1 0x00a8
+#define AR_IMR_S1_QCU_TXERR 0x000003FF
+#define AR_IMR_S1_QCU_TXERR_S 0
+#define AR_IMR_S1_QCU_TXEOL 0x03FF0000
+#define AR_IMR_S1_QCU_TXEOL_S 16
+
+#define AR_IMR_S2 0x00ac
+#define AR_IMR_S2_QCU_TXURN 0x000003FF
+#define AR_IMR_S2_QCU_TXURN_S 0
+#define AR_IMR_S2_CST 0x00400000
+#define AR_IMR_S2_GTT 0x00800000
+#define AR_IMR_S2_TIM 0x01000000
+#define AR_IMR_S2_CABEND 0x02000000
+#define AR_IMR_S2_DTIMSYNC 0x04000000
+#define AR_IMR_S2_BCNTO 0x08000000
+#define AR_IMR_S2_CABTO 0x10000000
+#define AR_IMR_S2_DTIM 0x20000000
+#define AR_IMR_S2_TSFOOR 0x40000000
+
+#define AR_IMR_S3 0x00b0
+#define AR_IMR_S3_QCU_QCBROVF 0x000003FF
+#define AR_IMR_S3_QCU_QCBRURN 0x03FF0000
+#define AR_IMR_S3_QCU_QCBRURN_S 16
+
+#define AR_IMR_S4 0x00b4
+#define AR_IMR_S4_QCU_QTRIG 0x000003FF
+#define AR_IMR_S4_RESV0 0xFFFFFC00
+
+#define AR_IMR_S5 0x00b8
+#define AR_IMR_S5_TIMER_TRIG 0x000000FF
+#define AR_IMR_S5_TIMER_THRESH 0x0000FF00
+
+
+#define AR_ISR_RAC 0x00c0
+#define AR_ISR_S0_S 0x00c4
+#define AR_ISR_S0_QCU_TXOK 0x000003FF
+#define AR_ISR_S0_QCU_TXOK_S 0
+#define AR_ISR_S0_QCU_TXDESC 0x03FF0000
+#define AR_ISR_S0_QCU_TXDESC_S 16
+
+#define AR_ISR_S1_S 0x00c8
+#define AR_ISR_S1_QCU_TXERR 0x000003FF
+#define AR_ISR_S1_QCU_TXERR_S 0
+#define AR_ISR_S1_QCU_TXEOL 0x03FF0000
+#define AR_ISR_S1_QCU_TXEOL_S 16
+
+#define AR_ISR_S2_S 0x00cc
+#define AR_ISR_S3_S 0x00d0
+#define AR_ISR_S4_S 0x00d4
+#define AR_ISR_S5_S 0x00d8
+#define AR_DMADBG_0 0x00e0
+#define AR_DMADBG_1 0x00e4
+#define AR_DMADBG_2 0x00e8
+#define AR_DMADBG_3 0x00ec
+#define AR_DMADBG_4 0x00f0
+#define AR_DMADBG_5 0x00f4
+#define AR_DMADBG_6 0x00f8
+#define AR_DMADBG_7 0x00fc
+
+#define AR_NUM_QCU 10
+#define AR_QCU_0 0x0001
+#define AR_QCU_1 0x0002
+#define AR_QCU_2 0x0004
+#define AR_QCU_3 0x0008
+#define AR_QCU_4 0x0010
+#define AR_QCU_5 0x0020
+#define AR_QCU_6 0x0040
+#define AR_QCU_7 0x0080
+#define AR_QCU_8 0x0100
+#define AR_QCU_9 0x0200
+
+#define AR_Q0_TXDP 0x0800
+#define AR_Q1_TXDP 0x0804
+#define AR_Q2_TXDP 0x0808
+#define AR_Q3_TXDP 0x080c
+#define AR_Q4_TXDP 0x0810
+#define AR_Q5_TXDP 0x0814
+#define AR_Q6_TXDP 0x0818
+#define AR_Q7_TXDP 0x081c
+#define AR_Q8_TXDP 0x0820
+#define AR_Q9_TXDP 0x0824
+#define AR_QTXDP(_i) (AR_Q0_TXDP + ((_i)<<2))
+
+#define AR_Q_TXE 0x0840
+#define AR_Q_TXE_M 0x000003FF
+
+#define AR_Q_TXD 0x0880
+#define AR_Q_TXD_M 0x000003FF
+
+#define AR_Q0_CBRCFG 0x08c0
+#define AR_Q1_CBRCFG 0x08c4
+#define AR_Q2_CBRCFG 0x08c8
+#define AR_Q3_CBRCFG 0x08cc
+#define AR_Q4_CBRCFG 0x08d0
+#define AR_Q5_CBRCFG 0x08d4
+#define AR_Q6_CBRCFG 0x08d8
+#define AR_Q7_CBRCFG 0x08dc
+#define AR_Q8_CBRCFG 0x08e0
+#define AR_Q9_CBRCFG 0x08e4
+#define AR_QCBRCFG(_i) (AR_Q0_CBRCFG + ((_i)<<2))
+#define AR_Q_CBRCFG_INTERVAL 0x00FFFFFF
+#define AR_Q_CBRCFG_INTERVAL_S 0
+#define AR_Q_CBRCFG_OVF_THRESH 0xFF000000
+#define AR_Q_CBRCFG_OVF_THRESH_S 24
+
+#define AR_Q0_RDYTIMECFG 0x0900
+#define AR_Q1_RDYTIMECFG 0x0904
+#define AR_Q2_RDYTIMECFG 0x0908
+#define AR_Q3_RDYTIMECFG 0x090c
+#define AR_Q4_RDYTIMECFG 0x0910
+#define AR_Q5_RDYTIMECFG 0x0914
+#define AR_Q6_RDYTIMECFG 0x0918
+#define AR_Q7_RDYTIMECFG 0x091c
+#define AR_Q8_RDYTIMECFG 0x0920
+#define AR_Q9_RDYTIMECFG 0x0924
+#define AR_QRDYTIMECFG(_i) (AR_Q0_RDYTIMECFG + ((_i)<<2))
+#define AR_Q_RDYTIMECFG_DURATION 0x00FFFFFF
+#define AR_Q_RDYTIMECFG_DURATION_S 0
+#define AR_Q_RDYTIMECFG_EN 0x01000000
+
+#define AR_Q_ONESHOTARM_SC 0x0940
+#define AR_Q_ONESHOTARM_SC_M 0x000003FF
+#define AR_Q_ONESHOTARM_SC_RESV0 0xFFFFFC00
+
+#define AR_Q_ONESHOTARM_CC 0x0980
+#define AR_Q_ONESHOTARM_CC_M 0x000003FF
+#define AR_Q_ONESHOTARM_CC_RESV0 0xFFFFFC00
+
+#define AR_Q0_MISC 0x09c0
+#define AR_Q1_MISC 0x09c4
+#define AR_Q2_MISC 0x09c8
+#define AR_Q3_MISC 0x09cc
+#define AR_Q4_MISC 0x09d0
+#define AR_Q5_MISC 0x09d4
+#define AR_Q6_MISC 0x09d8
+#define AR_Q7_MISC 0x09dc
+#define AR_Q8_MISC 0x09e0
+#define AR_Q9_MISC 0x09e4
+#define AR_QMISC(_i) (AR_Q0_MISC + ((_i)<<2))
+#define AR_Q_MISC_FSP 0x0000000F
+#define AR_Q_MISC_FSP_ASAP 0
+#define AR_Q_MISC_FSP_CBR 1
+#define AR_Q_MISC_FSP_DBA_GATED 2
+#define AR_Q_MISC_FSP_TIM_GATED 3
+#define AR_Q_MISC_FSP_BEACON_SENT_GATED 4
+#define AR_Q_MISC_FSP_BEACON_RCVD_GATED 5
+#define AR_Q_MISC_ONE_SHOT_EN 0x00000010
+#define AR_Q_MISC_CBR_INCR_DIS1 0x00000020
+#define AR_Q_MISC_CBR_INCR_DIS0 0x00000040
+#define AR_Q_MISC_BEACON_USE 0x00000080
+#define AR_Q_MISC_CBR_EXP_CNTR_LIMIT_EN 0x00000100
+#define AR_Q_MISC_RDYTIME_EXP_POLICY 0x00000200
+#define AR_Q_MISC_RESET_CBR_EXP_CTR 0x00000400
+#define AR_Q_MISC_DCU_EARLY_TERM_REQ 0x00000800
+#define AR_Q_MISC_RESV0 0xFFFFF000
+
+#define AR_Q0_STS 0x0a00
+#define AR_Q1_STS 0x0a04
+#define AR_Q2_STS 0x0a08
+#define AR_Q3_STS 0x0a0c
+#define AR_Q4_STS 0x0a10
+#define AR_Q5_STS 0x0a14
+#define AR_Q6_STS 0x0a18
+#define AR_Q7_STS 0x0a1c
+#define AR_Q8_STS 0x0a20
+#define AR_Q9_STS 0x0a24
+#define AR_QSTS(_i) (AR_Q0_STS + ((_i)<<2))
+#define AR_Q_STS_PEND_FR_CNT 0x00000003
+#define AR_Q_STS_RESV0 0x000000FC
+#define AR_Q_STS_CBR_EXP_CNT 0x0000FF00
+#define AR_Q_STS_RESV1 0xFFFF0000
+
+#define AR_Q_RDYTIMESHDN 0x0a40
+#define AR_Q_RDYTIMESHDN_M 0x000003FF
+
+
+#define AR_NUM_DCU 10
+#define AR_DCU_0 0x0001
+#define AR_DCU_1 0x0002
+#define AR_DCU_2 0x0004
+#define AR_DCU_3 0x0008
+#define AR_DCU_4 0x0010
+#define AR_DCU_5 0x0020
+#define AR_DCU_6 0x0040
+#define AR_DCU_7 0x0080
+#define AR_DCU_8 0x0100
+#define AR_DCU_9 0x0200
+
+#define AR_D0_QCUMASK 0x1000
+#define AR_D1_QCUMASK 0x1004
+#define AR_D2_QCUMASK 0x1008
+#define AR_D3_QCUMASK 0x100c
+#define AR_D4_QCUMASK 0x1010
+#define AR_D5_QCUMASK 0x1014
+#define AR_D6_QCUMASK 0x1018
+#define AR_D7_QCUMASK 0x101c
+#define AR_D8_QCUMASK 0x1020
+#define AR_D9_QCUMASK 0x1024
+#define AR_DQCUMASK(_i) (AR_D0_QCUMASK + ((_i)<<2))
+#define AR_D_QCUMASK 0x000003FF
+#define AR_D_QCUMASK_RESV0 0xFFFFFC00
+
+#define AR_D_TXBLK_CMD 0x1038
+#define AR_D_TXBLK_DATA(i) (AR_D_TXBLK_CMD+(i))
+
+#define AR_D0_LCL_IFS 0x1040
+#define AR_D1_LCL_IFS 0x1044
+#define AR_D2_LCL_IFS 0x1048
+#define AR_D3_LCL_IFS 0x104c
+#define AR_D4_LCL_IFS 0x1050
+#define AR_D5_LCL_IFS 0x1054
+#define AR_D6_LCL_IFS 0x1058
+#define AR_D7_LCL_IFS 0x105c
+#define AR_D8_LCL_IFS 0x1060
+#define AR_D9_LCL_IFS 0x1064
+#define AR_DLCL_IFS(_i) (AR_D0_LCL_IFS + ((_i)<<2))
+#define AR_D_LCL_IFS_CWMIN 0x000003FF
+#define AR_D_LCL_IFS_CWMIN_S 0
+#define AR_D_LCL_IFS_CWMAX 0x000FFC00
+#define AR_D_LCL_IFS_CWMAX_S 10
+#define AR_D_LCL_IFS_AIFS 0x0FF00000
+#define AR_D_LCL_IFS_AIFS_S 20
+
+#define AR_D_LCL_IFS_RESV0 0xF0000000
+
+#define AR_D0_RETRY_LIMIT 0x1080
+#define AR_D1_RETRY_LIMIT 0x1084
+#define AR_D2_RETRY_LIMIT 0x1088
+#define AR_D3_RETRY_LIMIT 0x108c
+#define AR_D4_RETRY_LIMIT 0x1090
+#define AR_D5_RETRY_LIMIT 0x1094
+#define AR_D6_RETRY_LIMIT 0x1098
+#define AR_D7_RETRY_LIMIT 0x109c
+#define AR_D8_RETRY_LIMIT 0x10a0
+#define AR_D9_RETRY_LIMIT 0x10a4
+#define AR_DRETRY_LIMIT(_i) (AR_D0_RETRY_LIMIT + ((_i)<<2))
+#define AR_D_RETRY_LIMIT_FR_SH 0x0000000F
+#define AR_D_RETRY_LIMIT_FR_SH_S 0
+#define AR_D_RETRY_LIMIT_STA_SH 0x00003F00
+#define AR_D_RETRY_LIMIT_STA_SH_S 8
+#define AR_D_RETRY_LIMIT_STA_LG 0x000FC000
+#define AR_D_RETRY_LIMIT_STA_LG_S 14
+#define AR_D_RETRY_LIMIT_RESV0 0xFFF00000
+
+#define AR_D0_CHNTIME 0x10c0
+#define AR_D1_CHNTIME 0x10c4
+#define AR_D2_CHNTIME 0x10c8
+#define AR_D3_CHNTIME 0x10cc
+#define AR_D4_CHNTIME 0x10d0
+#define AR_D5_CHNTIME 0x10d4
+#define AR_D6_CHNTIME 0x10d8
+#define AR_D7_CHNTIME 0x10dc
+#define AR_D8_CHNTIME 0x10e0
+#define AR_D9_CHNTIME 0x10e4
+#define AR_DCHNTIME(_i) (AR_D0_CHNTIME + ((_i)<<2))
+#define AR_D_CHNTIME_DUR 0x000FFFFF
+#define AR_D_CHNTIME_DUR_S 0
+#define AR_D_CHNTIME_EN 0x00100000
+#define AR_D_CHNTIME_RESV0 0xFFE00000
+
+#define AR_D0_MISC 0x1100
+#define AR_D1_MISC 0x1104
+#define AR_D2_MISC 0x1108
+#define AR_D3_MISC 0x110c
+#define AR_D4_MISC 0x1110
+#define AR_D5_MISC 0x1114
+#define AR_D6_MISC 0x1118
+#define AR_D7_MISC 0x111c
+#define AR_D8_MISC 0x1120
+#define AR_D9_MISC 0x1124
+#define AR_DMISC(_i) (AR_D0_MISC + ((_i)<<2))
+#define AR_D_MISC_BKOFF_THRESH 0x0000003F
+#define AR_D_MISC_RETRY_CNT_RESET_EN 0x00000040
+#define AR_D_MISC_CW_RESET_EN 0x00000080
+#define AR_D_MISC_FRAG_WAIT_EN 0x00000100
+#define AR_D_MISC_FRAG_BKOFF_EN 0x00000200
+#define AR_D_MISC_CW_BKOFF_EN 0x00001000
+#define AR_D_MISC_VIR_COL_HANDLING 0x0000C000
+#define AR_D_MISC_VIR_COL_HANDLING_S 14
+#define AR_D_MISC_VIR_COL_HANDLING_DEFAULT 0
+#define AR_D_MISC_VIR_COL_HANDLING_IGNORE 1
+#define AR_D_MISC_BEACON_USE 0x00010000
+#define AR_D_MISC_ARB_LOCKOUT_CNTRL 0x00060000
+#define AR_D_MISC_ARB_LOCKOUT_CNTRL_S 17
+#define AR_D_MISC_ARB_LOCKOUT_CNTRL_NONE 0
+#define AR_D_MISC_ARB_LOCKOUT_CNTRL_INTRA_FR 1
+#define AR_D_MISC_ARB_LOCKOUT_CNTRL_GLOBAL 2
+#define AR_D_MISC_ARB_LOCKOUT_IGNORE 0x00080000
+#define AR_D_MISC_SEQ_NUM_INCR_DIS 0x00100000
+#define AR_D_MISC_POST_FR_BKOFF_DIS 0x00200000
+#define AR_D_MISC_VIT_COL_CW_BKOFF_EN 0x00400000
+#define AR_D_MISC_BLOWN_IFS_RETRY_EN 0x00800000
+#define AR_D_MISC_RESV0 0xFF000000
+
+#define AR_D_SEQNUM 0x1140
+
+#define AR_D_GBL_IFS_SIFS 0x1030
+#define AR_D_GBL_IFS_SIFS_M 0x0000FFFF
+#define AR_D_GBL_IFS_SIFS_RESV0 0xFFFFFFFF
+
+#define AR_D_TXBLK_BASE 0x1038
+#define AR_D_TXBLK_WRITE_BITMASK 0x0000FFFF
+#define AR_D_TXBLK_WRITE_BITMASK_S 0
+#define AR_D_TXBLK_WRITE_SLICE 0x000F0000
+#define AR_D_TXBLK_WRITE_SLICE_S 16
+#define AR_D_TXBLK_WRITE_DCU 0x00F00000
+#define AR_D_TXBLK_WRITE_DCU_S 20
+#define AR_D_TXBLK_WRITE_COMMAND 0x0F000000
+#define AR_D_TXBLK_WRITE_COMMAND_S 24
+
+#define AR_D_GBL_IFS_SLOT 0x1070
+#define AR_D_GBL_IFS_SLOT_M 0x0000FFFF
+#define AR_D_GBL_IFS_SLOT_RESV0 0xFFFF0000
+
+#define AR_D_GBL_IFS_EIFS 0x10b0
+#define AR_D_GBL_IFS_EIFS_M 0x0000FFFF
+#define AR_D_GBL_IFS_EIFS_RESV0 0xFFFF0000
+
+#define AR_D_GBL_IFS_MISC 0x10f0
+#define AR_D_GBL_IFS_MISC_LFSR_SLICE_SEL 0x00000007
+#define AR_D_GBL_IFS_MISC_TURBO_MODE 0x00000008
+#define AR_D_GBL_IFS_MISC_USEC_DURATION 0x000FFC00
+#define AR_D_GBL_IFS_MISC_DCU_ARBITER_DLY 0x00300000
+#define AR_D_GBL_IFS_MISC_RANDOM_LFSR_SLICE_DIS 0x01000000
+#define AR_D_GBL_IFS_MISC_SLOT_XMIT_WIND_LEN 0x06000000
+#define AR_D_GBL_IFS_MISC_FORCE_XMIT_SLOT_BOUND 0x08000000
+#define AR_D_GBL_IFS_MISC_IGNORE_BACKOFF 0x10000000
+
+#define AR_D_FPCTL 0x1230
+#define AR_D_FPCTL_DCU 0x0000000F
+#define AR_D_FPCTL_DCU_S 0
+#define AR_D_FPCTL_PREFETCH_EN 0x00000010
+#define AR_D_FPCTL_BURST_PREFETCH 0x00007FE0
+#define AR_D_FPCTL_BURST_PREFETCH_S 5
+
+#define AR_D_TXPSE 0x1270
+#define AR_D_TXPSE_CTRL 0x000003FF
+#define AR_D_TXPSE_RESV0 0x0000FC00
+#define AR_D_TXPSE_STATUS 0x00010000
+#define AR_D_TXPSE_RESV1 0xFFFE0000
+
+#define AR_D_TXSLOTMASK 0x12f0
+#define AR_D_TXSLOTMASK_NUM 0x0000000F
+
+#define AR_CFG_LED 0x1f04
+#define AR_CFG_SCLK_RATE_IND 0x00000003
+#define AR_CFG_SCLK_RATE_IND_S 0
+#define AR_CFG_SCLK_32MHZ 0x00000000
+#define AR_CFG_SCLK_4MHZ 0x00000001
+#define AR_CFG_SCLK_1MHZ 0x00000002
+#define AR_CFG_SCLK_32KHZ 0x00000003
+#define AR_CFG_LED_BLINK_SLOW 0x00000008
+#define AR_CFG_LED_BLINK_THRESH_SEL 0x00000070
+#define AR_CFG_LED_MODE_SEL 0x00000380
+#define AR_CFG_LED_MODE_SEL_S 7
+#define AR_CFG_LED_POWER 0x00000280
+#define AR_CFG_LED_POWER_S 7
+#define AR_CFG_LED_NETWORK 0x00000300
+#define AR_CFG_LED_NETWORK_S 7
+#define AR_CFG_LED_MODE_PROP 0x0
+#define AR_CFG_LED_MODE_RPROP 0x1
+#define AR_CFG_LED_MODE_SPLIT 0x2
+#define AR_CFG_LED_MODE_RAND 0x3
+#define AR_CFG_LED_MODE_POWER_OFF 0x4
+#define AR_CFG_LED_MODE_POWER_ON 0x5
+#define AR_CFG_LED_MODE_NETWORK_OFF 0x4
+#define AR_CFG_LED_MODE_NETWORK_ON 0x6
+#define AR_CFG_LED_ASSOC_CTL 0x00000c00
+#define AR_CFG_LED_ASSOC_CTL_S 10
+#define AR_CFG_LED_ASSOC_NONE 0x0
+#define AR_CFG_LED_ASSOC_ACTIVE 0x1
+#define AR_CFG_LED_ASSOC_PENDING 0x2
+
+#define AR_CFG_LED_BLINK_SLOW 0x00000008
+#define AR_CFG_LED_BLINK_SLOW_S 3
+
+#define AR_CFG_LED_BLINK_THRESH_SEL 0x00000070
+#define AR_CFG_LED_BLINK_THRESH_SEL_S 4
+
+#define AR_MAC_SLEEP 0x1f00
+#define AR_MAC_SLEEP_MAC_AWAKE 0x00000000
+#define AR_MAC_SLEEP_MAC_ASLEEP 0x00000001
+
+#define AR_RC 0x4000
+#define AR_RC_AHB 0x00000001
+#define AR_RC_APB 0x00000002
+#define AR_RC_HOSTIF 0x00000100
+
+#define AR_WA 0x4004
+
+#define AR_PM_STATE 0x4008
+#define AR_PM_STATE_PME_D3COLD_VAUX 0x00100000
+
+#define AR_HOST_TIMEOUT 0x4018
+#define AR_HOST_TIMEOUT_APB_CNTR 0x0000FFFF
+#define AR_HOST_TIMEOUT_APB_CNTR_S 0
+#define AR_HOST_TIMEOUT_LCL_CNTR 0xFFFF0000
+#define AR_HOST_TIMEOUT_LCL_CNTR_S 16
+
+#define AR_EEPROM 0x401c
+#define AR_EEPROM_ABSENT 0x00000100
+#define AR_EEPROM_CORRUPT 0x00000200
+#define AR_EEPROM_PROT_MASK 0x03FFFC00
+#define AR_EEPROM_PROT_MASK_S 10
+
+#define EEPROM_PROTECT_RP_0_31 0x0001
+#define EEPROM_PROTECT_WP_0_31 0x0002
+#define EEPROM_PROTECT_RP_32_63 0x0004
+#define EEPROM_PROTECT_WP_32_63 0x0008
+#define EEPROM_PROTECT_RP_64_127 0x0010
+#define EEPROM_PROTECT_WP_64_127 0x0020
+#define EEPROM_PROTECT_RP_128_191 0x0040
+#define EEPROM_PROTECT_WP_128_191 0x0080
+#define EEPROM_PROTECT_RP_192_255 0x0100
+#define EEPROM_PROTECT_WP_192_255 0x0200
+#define EEPROM_PROTECT_RP_256_511 0x0400
+#define EEPROM_PROTECT_WP_256_511 0x0800
+#define EEPROM_PROTECT_RP_512_1023 0x1000
+#define EEPROM_PROTECT_WP_512_1023 0x2000
+#define EEPROM_PROTECT_RP_1024_2047 0x4000
+#define EEPROM_PROTECT_WP_1024_2047 0x8000
+
+#define AR_SREV \
+ ((AR_SREV_9100(ah)) ? 0x0600 : 0x4020)
+
+#define AR_SREV_ID \
+ ((AR_SREV_9100(ah)) ? 0x00000FFF : 0x000000FF)
+#define AR_SREV_VERSION 0x000000F0
+#define AR_SREV_VERSION_S 4
+#define AR_SREV_REVISION 0x00000007
+
+#define AR_SREV_ID2 0xFFFFFFFF
+#define AR_SREV_VERSION2 0xFFFC0000
+#define AR_SREV_VERSION2_S 18
+#define AR_SREV_TYPE2 0x0003F000
+#define AR_SREV_TYPE2_S 12
+#define AR_SREV_TYPE2_CHAIN 0x00001000
+#define AR_SREV_TYPE2_HOST_MODE 0x00002000
+#define AR_SREV_REVISION2 0x00000F00
+#define AR_SREV_REVISION2_S 8
+
+#define AR_SREV_VERSION_5416_PCI 0xD
+#define AR_SREV_VERSION_5416_PCIE 0xC
+#define AR_SREV_REVISION_5416_10 0
+#define AR_SREV_REVISION_5416_20 1
+#define AR_SREV_REVISION_5416_22 2
+#define AR_SREV_VERSION_9160 0x40
+#define AR_SREV_REVISION_9160_10 0
+#define AR_SREV_REVISION_9160_11 1
+#define AR_SREV_VERSION_9280 0x80
+#define AR_SREV_REVISION_9280_10 0
+#define AR_SREV_REVISION_9280_20 1
+#define AR_SREV_REVISION_9280_21 2
+#define AR_SREV_VERSION_9285 0xC0
+#define AR_SREV_REVISION_9285_10 0
+
+#define AR_SREV_9100_OR_LATER(_ah) \
+ (((_ah)->ah_macVersion >= AR_SREV_VERSION_5416_PCIE))
+#define AR_SREV_5416_20_OR_LATER(_ah) \
+ (((_ah)->ah_macVersion >= AR_SREV_VERSION_9160) || \
+ ((_ah)->ah_macRev >= AR_SREV_REVISION_5416_20))
+#define AR_SREV_5416_22_OR_LATER(_ah) \
+ (((_ah)->ah_macVersion >= AR_SREV_VERSION_9160) || \
+ ((_ah)->ah_macRev >= AR_SREV_REVISION_5416_22))
+#define AR_SREV_9160(_ah) \
+ (((_ah)->ah_macVersion == AR_SREV_VERSION_9160))
+#define AR_SREV_9160_10_OR_LATER(_ah) \
+ (((_ah)->ah_macVersion >= AR_SREV_VERSION_9160))
+#define AR_SREV_9160_11(_ah) \
+ (AR_SREV_9160(_ah) && ((_ah)->ah_macRev == AR_SREV_REVISION_9160_11))
+#define AR_SREV_9280(_ah) \
+ (((_ah)->ah_macVersion == AR_SREV_VERSION_9280))
+#define AR_SREV_9280_10_OR_LATER(_ah) \
+ (((_ah)->ah_macVersion >= AR_SREV_VERSION_9280))
+#define AR_SREV_9280_20(_ah) \
+ (((_ah)->ah_macVersion == AR_SREV_VERSION_9280) && \
+ ((_ah)->ah_macRev >= AR_SREV_REVISION_9280_20))
+#define AR_SREV_9280_20_OR_LATER(_ah) \
+ (((_ah)->ah_macVersion > AR_SREV_VERSION_9280) || \
+ (((_ah)->ah_macVersion == AR_SREV_VERSION_9280) && \
+ ((_ah)->ah_macRev >= AR_SREV_REVISION_9280_20)))
+
+#define AR_SREV_9285(_ah) (((_ah)->ah_macVersion == AR_SREV_VERSION_9285))
+#define AR_SREV_9285_10_OR_LATER(_ah) \
+ (((_ah)->ah_macVersion >= AR_SREV_VERSION_9285))
+
+#define AR_RADIO_SREV_MAJOR 0xf0
+#define AR_RAD5133_SREV_MAJOR 0xc0
+#define AR_RAD2133_SREV_MAJOR 0xd0
+#define AR_RAD5122_SREV_MAJOR 0xe0
+#define AR_RAD2122_SREV_MAJOR 0xf0
+
+#define AR_AHB_MODE 0x4024
+#define AR_AHB_EXACT_WR_EN 0x00000000
+#define AR_AHB_BUF_WR_EN 0x00000001
+#define AR_AHB_EXACT_RD_EN 0x00000000
+#define AR_AHB_CACHELINE_RD_EN 0x00000002
+#define AR_AHB_PREFETCH_RD_EN 0x00000004
+#define AR_AHB_PAGE_SIZE_1K 0x00000000
+#define AR_AHB_PAGE_SIZE_2K 0x00000008
+#define AR_AHB_PAGE_SIZE_4K 0x00000010
+
+#define AR_INTR_RTC_IRQ 0x00000001
+#define AR_INTR_MAC_IRQ 0x00000002
+#define AR_INTR_EEP_PROT_ACCESS 0x00000004
+#define AR_INTR_MAC_AWAKE 0x00020000
+#define AR_INTR_MAC_ASLEEP 0x00040000
+#define AR_INTR_SPURIOUS 0xFFFFFFFF
+
+
+#define AR_INTR_SYNC_CAUSE_CLR 0x4028
+
+#define AR_INTR_SYNC_CAUSE 0x4028
+
+#define AR_INTR_SYNC_ENABLE 0x402c
+#define AR_INTR_SYNC_ENABLE_GPIO 0xFFFC0000
+#define AR_INTR_SYNC_ENABLE_GPIO_S 18
+
+enum {
+ AR_INTR_SYNC_RTC_IRQ = 0x00000001,
+ AR_INTR_SYNC_MAC_IRQ = 0x00000002,
+ AR_INTR_SYNC_EEPROM_ILLEGAL_ACCESS = 0x00000004,
+ AR_INTR_SYNC_APB_TIMEOUT = 0x00000008,
+ AR_INTR_SYNC_PCI_MODE_CONFLICT = 0x00000010,
+ AR_INTR_SYNC_HOST1_FATAL = 0x00000020,
+ AR_INTR_SYNC_HOST1_PERR = 0x00000040,
+ AR_INTR_SYNC_TRCV_FIFO_PERR = 0x00000080,
+ AR_INTR_SYNC_RADM_CPL_EP = 0x00000100,
+ AR_INTR_SYNC_RADM_CPL_DLLP_ABORT = 0x00000200,
+ AR_INTR_SYNC_RADM_CPL_TLP_ABORT = 0x00000400,
+ AR_INTR_SYNC_RADM_CPL_ECRC_ERR = 0x00000800,
+ AR_INTR_SYNC_RADM_CPL_TIMEOUT = 0x00001000,
+ AR_INTR_SYNC_LOCAL_TIMEOUT = 0x00002000,
+ AR_INTR_SYNC_PM_ACCESS = 0x00004000,
+ AR_INTR_SYNC_MAC_AWAKE = 0x00008000,
+ AR_INTR_SYNC_MAC_ASLEEP = 0x00010000,
+ AR_INTR_SYNC_MAC_SLEEP_ACCESS = 0x00020000,
+ AR_INTR_SYNC_ALL = 0x0003FFFF,
+
+
+ AR_INTR_SYNC_DEFAULT = (AR_INTR_SYNC_HOST1_FATAL |
+ AR_INTR_SYNC_HOST1_PERR |
+ AR_INTR_SYNC_RADM_CPL_EP |
+ AR_INTR_SYNC_RADM_CPL_DLLP_ABORT |
+ AR_INTR_SYNC_RADM_CPL_TLP_ABORT |
+ AR_INTR_SYNC_RADM_CPL_ECRC_ERR |
+ AR_INTR_SYNC_RADM_CPL_TIMEOUT |
+ AR_INTR_SYNC_LOCAL_TIMEOUT |
+ AR_INTR_SYNC_MAC_SLEEP_ACCESS),
+
+ AR_INTR_SYNC_SPURIOUS = 0xFFFFFFFF,
+
+};
+
+#define AR_INTR_ASYNC_MASK 0x4030
+#define AR_INTR_ASYNC_MASK_GPIO 0xFFFC0000
+#define AR_INTR_ASYNC_MASK_GPIO_S 18
+
+#define AR_INTR_SYNC_MASK 0x4034
+#define AR_INTR_SYNC_MASK_GPIO 0xFFFC0000
+#define AR_INTR_SYNC_MASK_GPIO_S 18
+
+#define AR_INTR_ASYNC_CAUSE_CLR 0x4038
+#define AR_INTR_ASYNC_CAUSE 0x4038
+
+#define AR_INTR_ASYNC_ENABLE 0x403c
+#define AR_INTR_ASYNC_ENABLE_GPIO 0xFFFC0000
+#define AR_INTR_ASYNC_ENABLE_GPIO_S 18
+
+#define AR_PCIE_SERDES 0x4040
+#define AR_PCIE_SERDES2 0x4044
+#define AR_PCIE_PM_CTRL 0x4014
+#define AR_PCIE_PM_CTRL_ENA 0x00080000
+
+#define AR_NUM_GPIO 14
+#define AR928X_NUM_GPIO 10
+
+#define AR_GPIO_IN_OUT 0x4048
+#define AR_GPIO_IN_VAL 0x0FFFC000
+#define AR_GPIO_IN_VAL_S 14
+#define AR928X_GPIO_IN_VAL 0x000FFC00
+#define AR928X_GPIO_IN_VAL_S 10
+
+#define AR_GPIO_OE_OUT 0x404c
+#define AR_GPIO_OE_OUT_DRV 0x3
+#define AR_GPIO_OE_OUT_DRV_NO 0x0
+#define AR_GPIO_OE_OUT_DRV_LOW 0x1
+#define AR_GPIO_OE_OUT_DRV_HI 0x2
+#define AR_GPIO_OE_OUT_DRV_ALL 0x3
+
+#define AR_GPIO_INTR_POL 0x4050
+#define AR_GPIO_INTR_POL_VAL 0x00001FFF
+#define AR_GPIO_INTR_POL_VAL_S 0
+
+#define AR_GPIO_INPUT_EN_VAL 0x4054
+#define AR_GPIO_INPUT_EN_VAL_RFSILENT_DEF 0x00000080
+#define AR_GPIO_INPUT_EN_VAL_RFSILENT_DEF_S 7
+#define AR_GPIO_INPUT_EN_VAL_RFSILENT_BB 0x00008000
+#define AR_GPIO_INPUT_EN_VAL_RFSILENT_BB_S 15
+#define AR_GPIO_RTC_RESET_OVERRIDE_ENABLE 0x00010000
+#define AR_GPIO_JTAG_DISABLE 0x00020000
+
+#define AR_GPIO_INPUT_MUX1 0x4058
+
+#define AR_GPIO_INPUT_MUX2 0x405c
+#define AR_GPIO_INPUT_MUX2_CLK25 0x0000000f
+#define AR_GPIO_INPUT_MUX2_CLK25_S 0
+#define AR_GPIO_INPUT_MUX2_RFSILENT 0x000000f0
+#define AR_GPIO_INPUT_MUX2_RFSILENT_S 4
+#define AR_GPIO_INPUT_MUX2_RTC_RESET 0x00000f00
+#define AR_GPIO_INPUT_MUX2_RTC_RESET_S 8
+
+#define AR_GPIO_OUTPUT_MUX1 0x4060
+#define AR_GPIO_OUTPUT_MUX2 0x4064
+#define AR_GPIO_OUTPUT_MUX3 0x4068
+
+#define AR_GPIO_OUTPUT_MUX_AS_OUTPUT 0
+#define AR_GPIO_OUTPUT_MUX_AS_PCIE_ATTENTION_LED 1
+#define AR_GPIO_OUTPUT_MUX_AS_PCIE_POWER_LED 2
+#define AR_GPIO_OUTPUT_MUX_AS_MAC_NETWORK_LED 5
+#define AR_GPIO_OUTPUT_MUX_AS_MAC_POWER_LED 6
+
+#define AR_INPUT_STATE 0x406c
+
+#define AR_EEPROM_STATUS_DATA 0x407c
+#define AR_EEPROM_STATUS_DATA_VAL 0x0000ffff
+#define AR_EEPROM_STATUS_DATA_VAL_S 0
+#define AR_EEPROM_STATUS_DATA_BUSY 0x00010000
+#define AR_EEPROM_STATUS_DATA_BUSY_ACCESS 0x00020000
+#define AR_EEPROM_STATUS_DATA_PROT_ACCESS 0x00040000
+#define AR_EEPROM_STATUS_DATA_ABSENT_ACCESS 0x00080000
+
+#define AR_OBS 0x4080
+
+#define AR_PCIE_MSI 0x4094
+#define AR_PCIE_MSI_ENABLE 0x00000001
+
+
+#define AR_RTC_9160_PLL_DIV 0x000003ff
+#define AR_RTC_9160_PLL_DIV_S 0
+#define AR_RTC_9160_PLL_REFDIV 0x00003C00
+#define AR_RTC_9160_PLL_REFDIV_S 10
+#define AR_RTC_9160_PLL_CLKSEL 0x0000C000
+#define AR_RTC_9160_PLL_CLKSEL_S 14
+
+#define AR_RTC_BASE 0x00020000
+#define AR_RTC_RC \
+ (AR_SREV_9100(ah)) ? (AR_RTC_BASE + 0x0000) : 0x7000
+#define AR_RTC_RC_M 0x00000003
+#define AR_RTC_RC_MAC_WARM 0x00000001
+#define AR_RTC_RC_MAC_COLD 0x00000002
+#define AR_RTC_RC_COLD_RESET 0x00000004
+#define AR_RTC_RC_WARM_RESET 0x00000008
+
+#define AR_RTC_PLL_CONTROL \
+ (AR_SREV_9100(ah)) ? (AR_RTC_BASE + 0x0014) : 0x7014
+
+#define AR_RTC_PLL_DIV 0x0000001f
+#define AR_RTC_PLL_DIV_S 0
+#define AR_RTC_PLL_DIV2 0x00000020
+#define AR_RTC_PLL_REFDIV_5 0x000000c0
+#define AR_RTC_PLL_CLKSEL 0x00000300
+#define AR_RTC_PLL_CLKSEL_S 8
+
+
+
+#define AR_RTC_RESET \
+ ((AR_SREV_9100(ah)) ? (AR_RTC_BASE + 0x0040) : 0x7040)
+#define AR_RTC_RESET_EN (0x00000001)
+
+#define AR_RTC_STATUS \
+ ((AR_SREV_9100(ah)) ? (AR_RTC_BASE + 0x0044) : 0x7044)
+
+#define AR_RTC_STATUS_M \
+ ((AR_SREV_9100(ah)) ? 0x0000003f : 0x0000000f)
+
+#define AR_RTC_PM_STATUS_M 0x0000000f
+
+#define AR_RTC_STATUS_SHUTDOWN 0x00000001
+#define AR_RTC_STATUS_ON 0x00000002
+#define AR_RTC_STATUS_SLEEP 0x00000004
+#define AR_RTC_STATUS_WAKEUP 0x00000008
+
+#define AR_RTC_SLEEP_CLK \
+ ((AR_SREV_9100(ah)) ? (AR_RTC_BASE + 0x0048) : 0x7048)
+#define AR_RTC_FORCE_DERIVED_CLK 0x2
+
+#define AR_RTC_FORCE_WAKE \
+ ((AR_SREV_9100(ah)) ? (AR_RTC_BASE + 0x004c) : 0x704c)
+#define AR_RTC_FORCE_WAKE_EN 0x00000001
+#define AR_RTC_FORCE_WAKE_ON_INT 0x00000002
+
+
+#define AR_RTC_INTR_CAUSE \
+ ((AR_SREV_9100(ah)) ? (AR_RTC_BASE + 0x0050) : 0x7050)
+
+#define AR_RTC_INTR_ENABLE \
+ ((AR_SREV_9100(ah)) ? (AR_RTC_BASE + 0x0054) : 0x7054)
+
+#define AR_RTC_INTR_MASK \
+ ((AR_SREV_9100(ah)) ? (AR_RTC_BASE + 0x0058) : 0x7058)
+
+#define AR_SEQ_MASK 0x8060
+
+#define AR_AN_RF2G1_CH0 0x7810
+#define AR_AN_RF2G1_CH0_OB 0x03800000
+#define AR_AN_RF2G1_CH0_OB_S 23
+#define AR_AN_RF2G1_CH0_DB 0x1C000000
+#define AR_AN_RF2G1_CH0_DB_S 26
+
+#define AR_AN_RF5G1_CH0 0x7818
+#define AR_AN_RF5G1_CH0_OB5 0x00070000
+#define AR_AN_RF5G1_CH0_OB5_S 16
+#define AR_AN_RF5G1_CH0_DB5 0x00380000
+#define AR_AN_RF5G1_CH0_DB5_S 19
+
+#define AR_AN_RF2G1_CH1 0x7834
+#define AR_AN_RF2G1_CH1_OB 0x03800000
+#define AR_AN_RF2G1_CH1_OB_S 23
+#define AR_AN_RF2G1_CH1_DB 0x1C000000
+#define AR_AN_RF2G1_CH1_DB_S 26
+
+#define AR_AN_RF5G1_CH1 0x783C
+#define AR_AN_RF5G1_CH1_OB5 0x00070000
+#define AR_AN_RF5G1_CH1_OB5_S 16
+#define AR_AN_RF5G1_CH1_DB5 0x00380000
+#define AR_AN_RF5G1_CH1_DB5_S 19
+
+#define AR_AN_TOP2 0x7894
+#define AR_AN_TOP2_XPABIAS_LVL 0xC0000000
+#define AR_AN_TOP2_XPABIAS_LVL_S 30
+#define AR_AN_TOP2_LOCALBIAS 0x00200000
+#define AR_AN_TOP2_LOCALBIAS_S 21
+#define AR_AN_TOP2_PWDCLKIND 0x00400000
+#define AR_AN_TOP2_PWDCLKIND_S 22
+
+#define AR_AN_SYNTH9 0x7868
+#define AR_AN_SYNTH9_REFDIVA 0xf8000000
+#define AR_AN_SYNTH9_REFDIVA_S 27
+
+#define AR_STA_ID0 0x8000
+#define AR_STA_ID1 0x8004
+#define AR_STA_ID1_SADH_MASK 0x0000FFFF
+#define AR_STA_ID1_STA_AP 0x00010000
+#define AR_STA_ID1_ADHOC 0x00020000
+#define AR_STA_ID1_PWR_SAV 0x00040000
+#define AR_STA_ID1_KSRCHDIS 0x00080000
+#define AR_STA_ID1_PCF 0x00100000
+#define AR_STA_ID1_USE_DEFANT 0x00200000
+#define AR_STA_ID1_DEFANT_UPDATE 0x00400000
+#define AR_STA_ID1_RTS_USE_DEF 0x00800000
+#define AR_STA_ID1_ACKCTS_6MB 0x01000000
+#define AR_STA_ID1_BASE_RATE_11B 0x02000000
+#define AR_STA_ID1_SECTOR_SELF_GEN 0x04000000
+#define AR_STA_ID1_CRPT_MIC_ENABLE 0x08000000
+#define AR_STA_ID1_KSRCH_MODE 0x10000000
+#define AR_STA_ID1_PRESERVE_SEQNUM 0x20000000
+#define AR_STA_ID1_CBCIV_ENDIAN 0x40000000
+#define AR_STA_ID1_MCAST_KSRCH 0x80000000
+
+#define AR_BSS_ID0 0x8008
+#define AR_BSS_ID1 0x800C
+#define AR_BSS_ID1_U16 0x0000FFFF
+#define AR_BSS_ID1_AID 0x07FF0000
+#define AR_BSS_ID1_AID_S 16
+
+#define AR_BCN_RSSI_AVE 0x8010
+#define AR_BCN_RSSI_AVE_MASK 0x00000FFF
+
+#define AR_TIME_OUT 0x8014
+#define AR_TIME_OUT_ACK 0x00003FFF
+#define AR_TIME_OUT_ACK_S 0
+#define AR_TIME_OUT_CTS 0x3FFF0000
+#define AR_TIME_OUT_CTS_S 16
+
+#define AR_RSSI_THR 0x8018
+#define AR_RSSI_THR_MASK 0x000000FF
+#define AR_RSSI_THR_BM_THR 0x0000FF00
+#define AR_RSSI_THR_BM_THR_S 8
+#define AR_RSSI_BCN_WEIGHT 0x1F000000
+#define AR_RSSI_BCN_WEIGHT_S 24
+#define AR_RSSI_BCN_RSSI_RST 0x20000000
+
+#define AR_USEC 0x801c
+#define AR_USEC_USEC 0x0000007F
+#define AR_USEC_TX_LAT 0x007FC000
+#define AR_USEC_TX_LAT_S 14
+#define AR_USEC_RX_LAT 0x1F800000
+#define AR_USEC_RX_LAT_S 23
+
+#define AR_RESET_TSF 0x8020
+#define AR_RESET_TSF_ONCE 0x01000000
+
+#define AR_MAX_CFP_DUR 0x8038
+#define AR_CFP_VAL 0x0000FFFF
+
+#define AR_RX_FILTER 0x803C
+#define AR_RX_FILTER_ALL 0x00000000
+#define AR_RX_UCAST 0x00000001
+#define AR_RX_MCAST 0x00000002
+#define AR_RX_BCAST 0x00000004
+#define AR_RX_CONTROL 0x00000008
+#define AR_RX_BEACON 0x00000010
+#define AR_RX_PROM 0x00000020
+#define AR_RX_PROBE_REQ 0x00000080
+#define AR_RX_MY_BEACON 0x00000200
+#define AR_RX_COMPR_BAR 0x00000400
+#define AR_RX_COMPR_BA 0x00000800
+#define AR_RX_UNCOM_BA_BAR 0x00001000
+
+#define AR_MCAST_FIL0 0x8040
+#define AR_MCAST_FIL1 0x8044
+
+#define AR_DIAG_SW 0x8048
+#define AR_DIAG_CACHE_ACK 0x00000001
+#define AR_DIAG_ACK_DIS 0x00000002
+#define AR_DIAG_CTS_DIS 0x00000004
+#define AR_DIAG_ENCRYPT_DIS 0x00000008
+#define AR_DIAG_DECRYPT_DIS 0x00000010
+#define AR_DIAG_RX_DIS 0x00000020
+#define AR_DIAG_LOOP_BACK 0x00000040
+#define AR_DIAG_CORR_FCS 0x00000080
+#define AR_DIAG_CHAN_INFO 0x00000100
+#define AR_DIAG_SCRAM_SEED 0x0001FE00
+#define AR_DIAG_SCRAM_SEED_S 8
+#define AR_DIAG_FRAME_NV0 0x00020000
+#define AR_DIAG_OBS_PT_SEL1 0x000C0000
+#define AR_DIAG_OBS_PT_SEL1_S 18
+#define AR_DIAG_FORCE_RX_CLEAR 0x00100000
+#define AR_DIAG_IGNORE_VIRT_CS 0x00200000
+#define AR_DIAG_FORCE_CH_IDLE_HIGH 0x00400000
+#define AR_DIAG_EIFS_CTRL_ENA 0x00800000
+#define AR_DIAG_DUAL_CHAIN_INFO 0x01000000
+#define AR_DIAG_RX_ABORT 0x02000000
+#define AR_DIAG_SATURATE_CYCLE_CNT 0x04000000
+#define AR_DIAG_OBS_PT_SEL2 0x08000000
+#define AR_DIAG_RX_CLEAR_CTL_LOW 0x10000000
+#define AR_DIAG_RX_CLEAR_EXT_LOW 0x20000000
+
+#define AR_TSF_L32 0x804c
+#define AR_TSF_U32 0x8050
+
+#define AR_TST_ADDAC 0x8054
+#define AR_DEF_ANTENNA 0x8058
+
+#define AR_AES_MUTE_MASK0 0x805c
+#define AR_AES_MUTE_MASK0_FC 0x0000FFFF
+#define AR_AES_MUTE_MASK0_QOS 0xFFFF0000
+#define AR_AES_MUTE_MASK0_QOS_S 16
+
+#define AR_AES_MUTE_MASK1 0x8060
+#define AR_AES_MUTE_MASK1_SEQ 0x0000FFFF
+
+#define AR_GATED_CLKS 0x8064
+#define AR_GATED_CLKS_TX 0x00000002
+#define AR_GATED_CLKS_RX 0x00000004
+#define AR_GATED_CLKS_REG 0x00000008
+
+#define AR_OBS_BUS_CTRL 0x8068
+#define AR_OBS_BUS_SEL_1 0x00040000
+#define AR_OBS_BUS_SEL_2 0x00080000
+#define AR_OBS_BUS_SEL_3 0x000C0000
+#define AR_OBS_BUS_SEL_4 0x08040000
+#define AR_OBS_BUS_SEL_5 0x08080000
+
+#define AR_OBS_BUS_1 0x806c
+#define AR_OBS_BUS_1_PCU 0x00000001
+#define AR_OBS_BUS_1_RX_END 0x00000002
+#define AR_OBS_BUS_1_RX_WEP 0x00000004
+#define AR_OBS_BUS_1_RX_BEACON 0x00000008
+#define AR_OBS_BUS_1_RX_FILTER 0x00000010
+#define AR_OBS_BUS_1_TX_HCF 0x00000020
+#define AR_OBS_BUS_1_QUIET_TIME 0x00000040
+#define AR_OBS_BUS_1_CHAN_IDLE 0x00000080
+#define AR_OBS_BUS_1_TX_HOLD 0x00000100
+#define AR_OBS_BUS_1_TX_FRAME 0x00000200
+#define AR_OBS_BUS_1_RX_FRAME 0x00000400
+#define AR_OBS_BUS_1_RX_CLEAR 0x00000800
+#define AR_OBS_BUS_1_WEP_STATE 0x0003F000
+#define AR_OBS_BUS_1_WEP_STATE_S 12
+#define AR_OBS_BUS_1_RX_STATE 0x01F00000
+#define AR_OBS_BUS_1_RX_STATE_S 20
+#define AR_OBS_BUS_1_TX_STATE 0x7E000000
+#define AR_OBS_BUS_1_TX_STATE_S 25
+
+#define AR_LAST_TSTP 0x8080
+#define AR_NAV 0x8084
+#define AR_RTS_OK 0x8088
+#define AR_RTS_FAIL 0x808c
+#define AR_ACK_FAIL 0x8090
+#define AR_FCS_FAIL 0x8094
+#define AR_BEACON_CNT 0x8098
+
+#define AR_SLEEP1 0x80d4
+#define AR_SLEEP1_ASSUME_DTIM 0x00080000
+#define AR_SLEEP1_CAB_TIMEOUT 0xFFE00000
+#define AR_SLEEP1_CAB_TIMEOUT_S 21
+
+#define AR_SLEEP2 0x80d8
+#define AR_SLEEP2_BEACON_TIMEOUT 0xFFE00000
+#define AR_SLEEP2_BEACON_TIMEOUT_S 21
+
+#define AR_BSSMSKL 0x80e0
+#define AR_BSSMSKU 0x80e4
+
+#define AR_TPC 0x80e8
+#define AR_TPC_ACK 0x0000003f
+#define AR_TPC_ACK_S 0x00
+#define AR_TPC_CTS 0x00003f00
+#define AR_TPC_CTS_S 0x08
+#define AR_TPC_CHIRP 0x003f0000
+#define AR_TPC_CHIRP_S 0x16
+
+#define AR_TFCNT 0x80ec
+#define AR_RFCNT 0x80f0
+#define AR_RCCNT 0x80f4
+#define AR_CCCNT 0x80f8
+
+#define AR_QUIET1 0x80fc
+#define AR_QUIET1_NEXT_QUIET_S 0
+#define AR_QUIET1_NEXT_QUIET_M 0x0000ffff
+#define AR_QUIET1_QUIET_ENABLE 0x00010000
+#define AR_QUIET1_QUIET_ACK_CTS_ENABLE 0x00020000
+#define AR_QUIET2 0x8100
+#define AR_QUIET2_QUIET_PERIOD_S 0
+#define AR_QUIET2_QUIET_PERIOD_M 0x0000ffff
+#define AR_QUIET2_QUIET_DUR_S 16
+#define AR_QUIET2_QUIET_DUR 0xffff0000
+
+#define AR_TSF_PARM 0x8104
+#define AR_TSF_INCREMENT_M 0x000000ff
+#define AR_TSF_INCREMENT_S 0x00
+
+#define AR_QOS_NO_ACK 0x8108
+#define AR_QOS_NO_ACK_TWO_BIT 0x0000000f
+#define AR_QOS_NO_ACK_TWO_BIT_S 0
+#define AR_QOS_NO_ACK_BIT_OFF 0x00000070
+#define AR_QOS_NO_ACK_BIT_OFF_S 4
+#define AR_QOS_NO_ACK_BYTE_OFF 0x00000180
+#define AR_QOS_NO_ACK_BYTE_OFF_S 7
+
+#define AR_PHY_ERR 0x810c
+
+#define AR_PHY_ERR_DCHIRP 0x00000008
+#define AR_PHY_ERR_RADAR 0x00000020
+#define AR_PHY_ERR_OFDM_TIMING 0x00020000
+#define AR_PHY_ERR_CCK_TIMING 0x02000000
+
+#define AR_RXFIFO_CFG 0x8114
+
+
+#define AR_MIC_QOS_CONTROL 0x8118
+#define AR_MIC_QOS_SELECT 0x811c
+
+#define AR_PCU_MISC 0x8120
+#define AR_PCU_FORCE_BSSID_MATCH 0x00000001
+#define AR_PCU_MIC_NEW_LOC_ENA 0x00000004
+#define AR_PCU_TX_ADD_TSF 0x00000008
+#define AR_PCU_CCK_SIFS_MODE 0x00000010
+#define AR_PCU_RX_ANT_UPDT 0x00000800
+#define AR_PCU_TXOP_TBTT_LIMIT_ENA 0x00001000
+#define AR_PCU_MISS_BCN_IN_SLEEP 0x00004000
+#define AR_PCU_BUG_12306_FIX_ENA 0x00020000
+#define AR_PCU_FORCE_QUIET_COLL 0x00040000
+#define AR_PCU_TBTT_PROTECT 0x00200000
+#define AR_PCU_CLEAR_VMF 0x01000000
+#define AR_PCU_CLEAR_BA_VALID 0x04000000
+
+
+#define AR_FILT_OFDM 0x8124
+#define AR_FILT_OFDM_COUNT 0x00FFFFFF
+
+#define AR_FILT_CCK 0x8128
+#define AR_FILT_CCK_COUNT 0x00FFFFFF
+
+#define AR_PHY_ERR_1 0x812c
+#define AR_PHY_ERR_1_COUNT 0x00FFFFFF
+#define AR_PHY_ERR_MASK_1 0x8130
+
+#define AR_PHY_ERR_2 0x8134
+#define AR_PHY_ERR_2_COUNT 0x00FFFFFF
+#define AR_PHY_ERR_MASK_2 0x8138
+
+#define AR_PHY_COUNTMAX (3 << 22)
+#define AR_MIBCNT_INTRMASK (3 << 22)
+
+#define AR_TSF_THRESHOLD 0x813c
+#define AR_TSF_THRESHOLD_VAL 0x0000FFFF
+
+#define AR_PHY_ERR_EIFS_MASK 8144
+
+#define AR_PHY_ERR_3 0x8168
+#define AR_PHY_ERR_3_COUNT 0x00FFFFFF
+#define AR_PHY_ERR_MASK_3 0x816c
+
+#define AR_TXSIFS 0x81d0
+#define AR_TXSIFS_TIME 0x000000FF
+#define AR_TXSIFS_TX_LATENCY 0x00000F00
+#define AR_TXSIFS_TX_LATENCY_S 8
+#define AR_TXSIFS_ACK_SHIFT 0x00007000
+#define AR_TXSIFS_ACK_SHIFT_S 12
+
+#define AR_TXOP_X 0x81ec
+#define AR_TXOP_X_VAL 0x000000FF
+
+
+#define AR_TXOP_0_3 0x81f0
+#define AR_TXOP_4_7 0x81f4
+#define AR_TXOP_8_11 0x81f8
+#define AR_TXOP_12_15 0x81fc
+
+
+#define AR_NEXT_TBTT_TIMER 0x8200
+#define AR_NEXT_DMA_BEACON_ALERT 0x8204
+#define AR_NEXT_SWBA 0x8208
+#define AR_NEXT_CFP 0x8208
+#define AR_NEXT_HCF 0x820C
+#define AR_NEXT_TIM 0x8210
+#define AR_NEXT_DTIM 0x8214
+#define AR_NEXT_QUIET_TIMER 0x8218
+#define AR_NEXT_NDP_TIMER 0x821C
+
+#define AR_BEACON_PERIOD 0x8220
+#define AR_DMA_BEACON_PERIOD 0x8224
+#define AR_SWBA_PERIOD 0x8228
+#define AR_HCF_PERIOD 0x822C
+#define AR_TIM_PERIOD 0x8230
+#define AR_DTIM_PERIOD 0x8234
+#define AR_QUIET_PERIOD 0x8238
+#define AR_NDP_PERIOD 0x823C
+
+#define AR_TIMER_MODE 0x8240
+#define AR_TBTT_TIMER_EN 0x00000001
+#define AR_DBA_TIMER_EN 0x00000002
+#define AR_SWBA_TIMER_EN 0x00000004
+#define AR_HCF_TIMER_EN 0x00000008
+#define AR_TIM_TIMER_EN 0x00000010
+#define AR_DTIM_TIMER_EN 0x00000020
+#define AR_QUIET_TIMER_EN 0x00000040
+#define AR_NDP_TIMER_EN 0x00000080
+#define AR_TIMER_OVERFLOW_INDEX 0x00000700
+#define AR_TIMER_OVERFLOW_INDEX_S 8
+#define AR_TIMER_THRESH 0xFFFFF000
+#define AR_TIMER_THRESH_S 12
+
+#define AR_SLP32_MODE 0x8244
+#define AR_SLP32_HALF_CLK_LATENCY 0x000FFFFF
+#define AR_SLP32_ENA 0x00100000
+#define AR_SLP32_TSF_WRITE_STATUS 0x00200000
+
+#define AR_SLP32_WAKE 0x8248
+#define AR_SLP32_WAKE_XTL_TIME 0x0000FFFF
+
+#define AR_SLP32_INC 0x824c
+#define AR_SLP32_TST_INC 0x000FFFFF
+
+#define AR_SLP_CNT 0x8250
+#define AR_SLP_CYCLE_CNT 0x8254
+
+#define AR_SLP_MIB_CTRL 0x8258
+#define AR_SLP_MIB_CLEAR 0x00000001
+#define AR_SLP_MIB_PENDING 0x00000002
+
+#define AR_2040_MODE 0x8318
+#define AR_2040_JOINED_RX_CLEAR 0x00000001
+
+
+#define AR_EXTRCCNT 0x8328
+
+#define AR_SELFGEN_MASK 0x832c
+
+#define AR_PCU_TXBUF_CTRL 0x8340
+#define AR_PCU_TXBUF_CTRL_SIZE_MASK 0x7FF
+#define AR_PCU_TXBUF_CTRL_USABLE_SIZE 0x700
+#define AR_9285_PCU_TXBUF_CTRL_USABLE_SIZE 0x380
+
+#define AR_KEYTABLE_0 0x8800
+#define AR_KEYTABLE(_n) (AR_KEYTABLE_0 + ((_n)*32))
+#define AR_KEY_CACHE_SIZE 128
+#define AR_RSVD_KEYTABLE_ENTRIES 4
+#define AR_KEY_TYPE 0x00000007
+#define AR_KEYTABLE_TYPE_40 0x00000000
+#define AR_KEYTABLE_TYPE_104 0x00000001
+#define AR_KEYTABLE_TYPE_128 0x00000003
+#define AR_KEYTABLE_TYPE_TKIP 0x00000004
+#define AR_KEYTABLE_TYPE_AES 0x00000005
+#define AR_KEYTABLE_TYPE_CCM 0x00000006
+#define AR_KEYTABLE_TYPE_CLR 0x00000007
+#define AR_KEYTABLE_ANT 0x00000008
+#define AR_KEYTABLE_VALID 0x00008000
+#define AR_KEYTABLE_KEY0(_n) (AR_KEYTABLE(_n) + 0)
+#define AR_KEYTABLE_KEY1(_n) (AR_KEYTABLE(_n) + 4)
+#define AR_KEYTABLE_KEY2(_n) (AR_KEYTABLE(_n) + 8)
+#define AR_KEYTABLE_KEY3(_n) (AR_KEYTABLE(_n) + 12)
+#define AR_KEYTABLE_KEY4(_n) (AR_KEYTABLE(_n) + 16)
+#define AR_KEYTABLE_TYPE(_n) (AR_KEYTABLE(_n) + 20)
+#define AR_KEYTABLE_MAC0(_n) (AR_KEYTABLE(_n) + 24)
+#define AR_KEYTABLE_MAC1(_n) (AR_KEYTABLE(_n) + 28)
+
+#endif
diff --git a/drivers/net/wireless/ath9k/regd.c b/drivers/net/wireless/ath9k/regd.c
new file mode 100644
index 00000000000..62e28887ccd
--- /dev/null
+++ b/drivers/net/wireless/ath9k/regd.c
@@ -0,0 +1,1026 @@
+/*
+ * Copyright (c) 2008 Atheros Communications Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include "core.h"
+#include "hw.h"
+#include "regd.h"
+#include "regd_common.h"
+
+static int ath9k_regd_chansort(const void *a, const void *b)
+{
+ const struct ath9k_channel *ca = a;
+ const struct ath9k_channel *cb = b;
+
+ return (ca->channel == cb->channel) ?
+ (ca->channelFlags & CHAN_FLAGS) -
+ (cb->channelFlags & CHAN_FLAGS) : ca->channel - cb->channel;
+}
+
+static void
+ath9k_regd_sort(void *a, u32 n, u32 size, ath_hal_cmp_t *cmp)
+{
+ u8 *aa = a;
+ u8 *ai, *t;
+
+ for (ai = aa + size; --n >= 1; ai += size)
+ for (t = ai; t > aa; t -= size) {
+ u8 *u = t - size;
+ if (cmp(u, t) <= 0)
+ break;
+ swap(u, t, size);
+ }
+}
+
+static u16 ath9k_regd_get_eepromRD(struct ath_hal *ah)
+{
+ return ah->ah_currentRD & ~WORLDWIDE_ROAMING_FLAG;
+}
+
+static bool ath9k_regd_is_chan_bm_zero(u64 *bitmask)
+{
+ int i;
+
+ for (i = 0; i < BMLEN; i++) {
+ if (bitmask[i] != 0)
+ return false;
+ }
+ return true;
+}
+
+static bool ath9k_regd_is_eeprom_valid(struct ath_hal *ah)
+{
+ u16 rd = ath9k_regd_get_eepromRD(ah);
+ int i;
+
+ if (rd & COUNTRY_ERD_FLAG) {
+ u16 cc = rd & ~COUNTRY_ERD_FLAG;
+ for (i = 0; i < ARRAY_SIZE(allCountries); i++)
+ if (allCountries[i].countryCode == cc)
+ return true;
+ } else {
+ for (i = 0; i < ARRAY_SIZE(regDomainPairs); i++)
+ if (regDomainPairs[i].regDmnEnum == rd)
+ return true;
+ }
+ DPRINTF(ah->ah_sc, ATH_DBG_REGULATORY,
+ "%s: invalid regulatory domain/country code 0x%x\n",
+ __func__, rd);
+ return false;
+}
+
+static bool ath9k_regd_is_fcc_midband_supported(struct ath_hal *ah)
+{
+ u32 regcap;
+
+ regcap = ah->ah_caps.reg_cap;
+
+ if (regcap & AR_EEPROM_EEREGCAP_EN_FCC_MIDBAND)
+ return true;
+ else
+ return false;
+}
+
+static bool ath9k_regd_is_ccode_valid(struct ath_hal *ah,
+ u16 cc)
+{
+ u16 rd;
+ int i;
+
+ if (cc == CTRY_DEFAULT)
+ return true;
+ if (cc == CTRY_DEBUG)
+ return true;
+
+ rd = ath9k_regd_get_eepromRD(ah);
+ DPRINTF(ah->ah_sc, ATH_DBG_REGULATORY, "%s: EEPROM regdomain 0x%x\n",
+ __func__, rd);
+
+ if (rd & COUNTRY_ERD_FLAG) {
+ DPRINTF(ah->ah_sc, ATH_DBG_REGULATORY,
+ "%s: EEPROM setting is country code %u\n",
+ __func__, rd & ~COUNTRY_ERD_FLAG);
+ return cc == (rd & ~COUNTRY_ERD_FLAG);
+ }
+
+ for (i = 0; i < ARRAY_SIZE(allCountries); i++) {
+ if (cc == allCountries[i].countryCode) {
+#ifdef AH_SUPPORT_11D
+ if ((rd & WORLD_SKU_MASK) == WORLD_SKU_PREFIX)
+ return true;
+#endif
+ if (allCountries[i].regDmnEnum == rd ||
+ rd == DEBUG_REG_DMN || rd == NO_ENUMRD)
+ return true;
+ }
+ }
+ return false;
+}
+
+static void
+ath9k_regd_get_wmodes_nreg(struct ath_hal *ah,
+ struct country_code_to_enum_rd *country,
+ struct regDomain *rd5GHz,
+ unsigned long *modes_allowed)
+{
+ bitmap_copy(modes_allowed, ah->ah_caps.wireless_modes, ATH9K_MODE_MAX);
+
+ if (test_bit(ATH9K_MODE_11G, ah->ah_caps.wireless_modes) &&
+ (!country->allow11g))
+ clear_bit(ATH9K_MODE_11G, modes_allowed);
+
+ if (test_bit(ATH9K_MODE_11A, ah->ah_caps.wireless_modes) &&
+ (ath9k_regd_is_chan_bm_zero(rd5GHz->chan11a)))
+ clear_bit(ATH9K_MODE_11A, modes_allowed);
+
+ if (test_bit(ATH9K_MODE_11NG_HT20, ah->ah_caps.wireless_modes)
+ && (!country->allow11ng20))
+ clear_bit(ATH9K_MODE_11NG_HT20, modes_allowed);
+
+ if (test_bit(ATH9K_MODE_11NA_HT20, ah->ah_caps.wireless_modes)
+ && (!country->allow11na20))
+ clear_bit(ATH9K_MODE_11NA_HT20, modes_allowed);
+
+ if (test_bit(ATH9K_MODE_11NG_HT40PLUS, ah->ah_caps.wireless_modes) &&
+ (!country->allow11ng40))
+ clear_bit(ATH9K_MODE_11NG_HT40PLUS, modes_allowed);
+
+ if (test_bit(ATH9K_MODE_11NG_HT40MINUS, ah->ah_caps.wireless_modes) &&
+ (!country->allow11ng40))
+ clear_bit(ATH9K_MODE_11NG_HT40MINUS, modes_allowed);
+
+ if (test_bit(ATH9K_MODE_11NA_HT40PLUS, ah->ah_caps.wireless_modes) &&
+ (!country->allow11na40))
+ clear_bit(ATH9K_MODE_11NA_HT40PLUS, modes_allowed);
+
+ if (test_bit(ATH9K_MODE_11NA_HT40MINUS, ah->ah_caps.wireless_modes) &&
+ (!country->allow11na40))
+ clear_bit(ATH9K_MODE_11NA_HT40MINUS, modes_allowed);
+}
+
+bool ath9k_regd_is_public_safety_sku(struct ath_hal *ah)
+{
+ u16 rd;
+
+ rd = ath9k_regd_get_eepromRD(ah);
+
+ switch (rd) {
+ case FCC4_FCCA:
+ case (CTRY_UNITED_STATES_FCC49 | COUNTRY_ERD_FLAG):
+ return true;
+ case DEBUG_REG_DMN:
+ case NO_ENUMRD:
+ if (ah->ah_countryCode == CTRY_UNITED_STATES_FCC49)
+ return true;
+ break;
+ }
+ return false;
+}
+
+static struct country_code_to_enum_rd*
+ath9k_regd_find_country(u16 countryCode)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(allCountries); i++) {
+ if (allCountries[i].countryCode == countryCode)
+ return &allCountries[i];
+ }
+ return NULL;
+}
+
+static u16 ath9k_regd_get_default_country(struct ath_hal *ah)
+{
+ u16 rd;
+ int i;
+
+ rd = ath9k_regd_get_eepromRD(ah);
+ if (rd & COUNTRY_ERD_FLAG) {
+ struct country_code_to_enum_rd *country = NULL;
+ u16 cc = rd & ~COUNTRY_ERD_FLAG;
+
+ country = ath9k_regd_find_country(cc);
+ if (country != NULL)
+ return cc;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(regDomainPairs); i++)
+ if (regDomainPairs[i].regDmnEnum == rd) {
+ if (regDomainPairs[i].singleCC != 0)
+ return regDomainPairs[i].singleCC;
+ else
+ i = ARRAY_SIZE(regDomainPairs);
+ }
+ return CTRY_DEFAULT;
+}
+
+static bool ath9k_regd_is_valid_reg_domain(int regDmn,
+ struct regDomain *rd)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(regDomains); i++) {
+ if (regDomains[i].regDmnEnum == regDmn) {
+ if (rd != NULL) {
+ memcpy(rd, &regDomains[i],
+ sizeof(struct regDomain));
+ }
+ return true;
+ }
+ }
+ return false;
+}
+
+static bool ath9k_regd_is_valid_reg_domainPair(int regDmnPair)
+{
+ int i;
+
+ if (regDmnPair == NO_ENUMRD)
+ return false;
+ for (i = 0; i < ARRAY_SIZE(regDomainPairs); i++) {
+ if (regDomainPairs[i].regDmnEnum == regDmnPair)
+ return true;
+ }
+ return false;
+}
+
+static bool
+ath9k_regd_get_wmode_regdomain(struct ath_hal *ah, int regDmn,
+ u16 channelFlag, struct regDomain *rd)
+{
+ int i, found;
+ u64 flags = NO_REQ;
+ struct reg_dmn_pair_mapping *regPair = NULL;
+ int regOrg;
+
+ regOrg = regDmn;
+ if (regDmn == CTRY_DEFAULT) {
+ u16 rdnum;
+ rdnum = ath9k_regd_get_eepromRD(ah);
+
+ if (!(rdnum & COUNTRY_ERD_FLAG)) {
+ if (ath9k_regd_is_valid_reg_domain(rdnum, NULL) ||
+ ath9k_regd_is_valid_reg_domainPair(rdnum)) {
+ regDmn = rdnum;
+ }
+ }
+ }
+
+ if ((regDmn & MULTI_DOMAIN_MASK) == 0) {
+ for (i = 0, found = 0;
+ (i < ARRAY_SIZE(regDomainPairs)) && (!found); i++) {
+ if (regDomainPairs[i].regDmnEnum == regDmn) {
+ regPair = &regDomainPairs[i];
+ found = 1;
+ }
+ }
+ if (!found) {
+ DPRINTF(ah->ah_sc, ATH_DBG_REGULATORY,
+ "%s: Failed to find reg domain pair %u\n",
+ __func__, regDmn);
+ return false;
+ }
+ if (!(channelFlag & CHANNEL_2GHZ)) {
+ regDmn = regPair->regDmn5GHz;
+ flags = regPair->flags5GHz;
+ }
+ if (channelFlag & CHANNEL_2GHZ) {
+ regDmn = regPair->regDmn2GHz;
+ flags = regPair->flags2GHz;
+ }
+ }
+
+ found = ath9k_regd_is_valid_reg_domain(regDmn, rd);
+ if (!found) {
+ DPRINTF(ah->ah_sc, ATH_DBG_REGULATORY,
+ "%s: Failed to find unitary reg domain %u\n",
+ __func__, regDmn);
+ return false;
+ } else {
+ rd->pscan &= regPair->pscanMask;
+ if (((regOrg & MULTI_DOMAIN_MASK) == 0) &&
+ (flags != NO_REQ)) {
+ rd->flags = flags;
+ }
+
+ rd->flags &= (channelFlag & CHANNEL_2GHZ) ?
+ REG_DOMAIN_2GHZ_MASK : REG_DOMAIN_5GHZ_MASK;
+ return true;
+ }
+}
+
+static bool ath9k_regd_is_bit_set(int bit, u64 *bitmask)
+{
+ int byteOffset, bitnum;
+ u64 val;
+
+ byteOffset = bit / 64;
+ bitnum = bit - byteOffset * 64;
+ val = ((u64) 1) << bitnum;
+ if (bitmask[byteOffset] & val)
+ return true;
+ else
+ return false;
+}
+
+static void
+ath9k_regd_add_reg_classid(u8 *regclassids, u32 maxregids,
+ u32 *nregids, u8 regclassid)
+{
+ int i;
+
+ if (regclassid == 0)
+ return;
+
+ for (i = 0; i < maxregids; i++) {
+ if (regclassids[i] == regclassid)
+ return;
+ if (regclassids[i] == 0)
+ break;
+ }
+
+ if (i == maxregids)
+ return;
+ else {
+ regclassids[i] = regclassid;
+ *nregids += 1;
+ }
+
+ return;
+}
+
+static bool
+ath9k_regd_get_eeprom_reg_ext_bits(struct ath_hal *ah,
+ enum reg_ext_bitmap bit)
+{
+ return (ah->ah_currentRDExt & (1 << bit)) ? true : false;
+}
+
+#ifdef ATH_NF_PER_CHAN
+
+static void ath9k_regd_init_rf_buffer(struct ath9k_channel *ichans,
+ int nchans)
+{
+ int i, j, next;
+
+ for (next = 0; next < nchans; next++) {
+ for (i = 0; i < NUM_NF_READINGS; i++) {
+ ichans[next].nfCalHist[i].currIndex = 0;
+ ichans[next].nfCalHist[i].privNF =
+ AR_PHY_CCA_MAX_GOOD_VALUE;
+ ichans[next].nfCalHist[i].invalidNFcount =
+ AR_PHY_CCA_FILTERWINDOW_LENGTH;
+ for (j = 0; j < ATH9K_NF_CAL_HIST_MAX; j++) {
+ ichans[next].nfCalHist[i].nfCalBuffer[j] =
+ AR_PHY_CCA_MAX_GOOD_VALUE;
+ }
+ }
+ }
+}
+#endif
+
+static int ath9k_regd_is_chan_present(struct ath_hal *ah,
+ u16 c)
+{
+ int i;
+
+ for (i = 0; i < 150; i++) {
+ if (!ah->ah_channels[i].channel)
+ return -1;
+ else if (ah->ah_channels[i].channel == c)
+ return i;
+ }
+
+ return -1;
+}
+
+static bool
+ath9k_regd_add_channel(struct ath_hal *ah,
+ u16 c,
+ u16 c_lo,
+ u16 c_hi,
+ u16 maxChan,
+ u8 ctl,
+ int pos,
+ struct regDomain rd5GHz,
+ struct RegDmnFreqBand *fband,
+ struct regDomain *rd,
+ const struct cmode *cm,
+ struct ath9k_channel *ichans,
+ bool enableExtendedChannels)
+{
+ struct ath9k_channel *chan;
+ int ret;
+ u32 channelFlags = 0;
+ u8 privFlags = 0;
+
+ if (!(c_lo <= c && c <= c_hi)) {
+ DPRINTF(ah->ah_sc, ATH_DBG_REGULATORY,
+ "%s: c %u out of range [%u..%u]\n",
+ __func__, c, c_lo, c_hi);
+ return false;
+ }
+ if ((fband->channelBW == CHANNEL_HALF_BW) &&
+ !(ah->ah_caps.hw_caps & ATH9K_HW_CAP_CHAN_HALFRATE)) {
+ DPRINTF(ah->ah_sc, ATH_DBG_REGULATORY,
+ "%s: Skipping %u half rate channel\n",
+ __func__, c);
+ return false;
+ }
+
+ if ((fband->channelBW == CHANNEL_QUARTER_BW) &&
+ !(ah->ah_caps.hw_caps & ATH9K_HW_CAP_CHAN_QUARTERRATE)) {
+ DPRINTF(ah->ah_sc, ATH_DBG_REGULATORY,
+ "%s: Skipping %u quarter rate channel\n",
+ __func__, c);
+ return false;
+ }
+
+ if (((c + fband->channelSep) / 2) > (maxChan + HALF_MAXCHANBW)) {
+ DPRINTF(ah->ah_sc, ATH_DBG_REGULATORY,
+ "%s: c %u > maxChan %u\n",
+ __func__, c, maxChan);
+ return false;
+ }
+
+ if ((fband->usePassScan & IS_ECM_CHAN) && !enableExtendedChannels) {
+ DPRINTF(ah->ah_sc, ATH_DBG_REGULATORY,
+ "Skipping ecm channel\n");
+ return false;
+ }
+
+ if ((rd->flags & NO_HOSTAP) && (ah->ah_opmode == ATH9K_M_HOSTAP)) {
+ DPRINTF(ah->ah_sc, ATH_DBG_REGULATORY,
+ "Skipping HOSTAP channel\n");
+ return false;
+ }
+
+ if (IS_HT40_MODE(cm->mode) &&
+ !(ath9k_regd_get_eeprom_reg_ext_bits(ah, REG_EXT_FCC_DFS_HT40)) &&
+ (fband->useDfs) &&
+ (rd->conformanceTestLimit != MKK)) {
+ DPRINTF(ah->ah_sc, ATH_DBG_REGULATORY,
+ "Skipping HT40 channel (en_fcc_dfs_ht40 = 0)\n");
+ return false;
+ }
+
+ if (IS_HT40_MODE(cm->mode) &&
+ !(ath9k_regd_get_eeprom_reg_ext_bits(ah,
+ REG_EXT_JAPAN_NONDFS_HT40)) &&
+ !(fband->useDfs) && (rd->conformanceTestLimit == MKK)) {
+ DPRINTF(ah->ah_sc, ATH_DBG_REGULATORY,
+ "Skipping HT40 channel (en_jap_ht40 = 0)\n");
+ return false;
+ }
+
+ if (IS_HT40_MODE(cm->mode) &&
+ !(ath9k_regd_get_eeprom_reg_ext_bits(ah, REG_EXT_JAPAN_DFS_HT40)) &&
+ (fband->useDfs) &&
+ (rd->conformanceTestLimit == MKK)) {
+ DPRINTF(ah->ah_sc, ATH_DBG_REGULATORY,
+ "Skipping HT40 channel (en_jap_dfs_ht40 = 0)\n");
+ return false;
+ }
+
+ /* Calculate channel flags */
+
+ channelFlags = cm->flags;
+
+ switch (fband->channelBW) {
+ case CHANNEL_HALF_BW:
+ channelFlags |= CHANNEL_HALF;
+ break;
+ case CHANNEL_QUARTER_BW:
+ channelFlags |= CHANNEL_QUARTER;
+ break;
+ }
+
+ if (fband->usePassScan & rd->pscan)
+ channelFlags |= CHANNEL_PASSIVE;
+ else
+ channelFlags &= ~CHANNEL_PASSIVE;
+ if (fband->useDfs & rd->dfsMask)
+ privFlags = CHANNEL_DFS;
+ else
+ privFlags = 0;
+ if (rd->flags & LIMIT_FRAME_4MS)
+ privFlags |= CHANNEL_4MS_LIMIT;
+ if (privFlags & CHANNEL_DFS)
+ privFlags |= CHANNEL_DISALLOW_ADHOC;
+ if (rd->flags & ADHOC_PER_11D)
+ privFlags |= CHANNEL_PER_11D_ADHOC;
+
+ if (channelFlags & CHANNEL_PASSIVE) {
+ if ((c < 2412) || (c > 2462)) {
+ if (rd5GHz.regDmnEnum == MKK1 ||
+ rd5GHz.regDmnEnum == MKK2) {
+ u32 regcap = ah->ah_caps.reg_cap;
+ if (!(regcap &
+ (AR_EEPROM_EEREGCAP_EN_KK_U1_EVEN |
+ AR_EEPROM_EEREGCAP_EN_KK_U2 |
+ AR_EEPROM_EEREGCAP_EN_KK_MIDBAND)) &&
+ isUNII1OddChan(c)) {
+ channelFlags &= ~CHANNEL_PASSIVE;
+ } else {
+ privFlags |= CHANNEL_DISALLOW_ADHOC;
+ }
+ } else {
+ privFlags |= CHANNEL_DISALLOW_ADHOC;
+ }
+ }
+ }
+
+ if ((cm->mode == ATH9K_MODE_11A) ||
+ (cm->mode == ATH9K_MODE_11NA_HT20) ||
+ (cm->mode == ATH9K_MODE_11NA_HT40PLUS) ||
+ (cm->mode == ATH9K_MODE_11NA_HT40MINUS)) {
+ if (rd->flags & (ADHOC_NO_11A | DISALLOW_ADHOC_11A))
+ privFlags |= CHANNEL_DISALLOW_ADHOC;
+ }
+
+ /* Fill in channel details */
+
+ ret = ath9k_regd_is_chan_present(ah, c);
+ if (ret == -1) {
+ chan = &ah->ah_channels[pos];
+ chan->channel = c;
+ chan->maxRegTxPower = fband->powerDfs;
+ chan->antennaMax = fband->antennaMax;
+ chan->regDmnFlags = rd->flags;
+ chan->maxTxPower = AR5416_MAX_RATE_POWER;
+ chan->minTxPower = AR5416_MAX_RATE_POWER;
+ chan->channelFlags = channelFlags;
+ chan->privFlags = privFlags;
+ } else {
+ chan = &ah->ah_channels[ret];
+ chan->channelFlags |= channelFlags;
+ chan->privFlags |= privFlags;
+ }
+
+ /* Set CTLs */
+
+ if ((cm->flags & CHANNEL_ALL) == CHANNEL_A)
+ chan->conformanceTestLimit[0] = ctl;
+ else if ((cm->flags & CHANNEL_ALL) == CHANNEL_B)
+ chan->conformanceTestLimit[1] = ctl;
+ else if ((cm->flags & CHANNEL_ALL) == CHANNEL_G)
+ chan->conformanceTestLimit[2] = ctl;
+
+ return (ret == -1) ? true : false;
+}
+
+static bool ath9k_regd_japan_check(struct ath_hal *ah,
+ int b,
+ struct regDomain *rd5GHz)
+{
+ bool skipband = false;
+ int i;
+ u32 regcap;
+
+ for (i = 0; i < ARRAY_SIZE(j_bandcheck); i++) {
+ if (j_bandcheck[i].freqbandbit == b) {
+ regcap = ah->ah_caps.reg_cap;
+ if ((j_bandcheck[i].eepromflagtocheck & regcap) == 0) {
+ skipband = true;
+ } else if ((regcap & AR_EEPROM_EEREGCAP_EN_KK_U2) ||
+ (regcap & AR_EEPROM_EEREGCAP_EN_KK_MIDBAND)) {
+ rd5GHz->dfsMask |= DFS_MKK4;
+ rd5GHz->pscan |= PSCAN_MKK3;
+ }
+ break;
+ }
+ }
+
+ DPRINTF(ah->ah_sc, ATH_DBG_REGULATORY,
+ "%s: Skipping %d freq band\n",
+ __func__, j_bandcheck[i].freqbandbit);
+
+ return skipband;
+}
+
+bool
+ath9k_regd_init_channels(struct ath_hal *ah,
+ u32 maxchans,
+ u32 *nchans, u8 *regclassids,
+ u32 maxregids, u32 *nregids, u16 cc,
+ bool enableOutdoor,
+ bool enableExtendedChannels)
+{
+ u16 maxChan = 7000;
+ struct country_code_to_enum_rd *country = NULL;
+ struct regDomain rd5GHz, rd2GHz;
+ const struct cmode *cm;
+ struct ath9k_channel *ichans = &ah->ah_channels[0];
+ int next = 0, b;
+ u8 ctl;
+ int regdmn;
+ u16 chanSep;
+ unsigned long *modes_avail;
+ DECLARE_BITMAP(modes_allowed, ATH9K_MODE_MAX);
+
+ DPRINTF(ah->ah_sc, ATH_DBG_REGULATORY, "%s: cc %u %s %s\n",
+ __func__, cc,
+ enableOutdoor ? "Enable outdoor" : "",
+ enableExtendedChannels ? "Enable ecm" : "");
+
+ if (!ath9k_regd_is_ccode_valid(ah, cc)) {
+ DPRINTF(ah->ah_sc, ATH_DBG_REGULATORY,
+ "%s: invalid country code %d\n", __func__, cc);
+ return false;
+ }
+
+ if (!ath9k_regd_is_eeprom_valid(ah)) {
+ DPRINTF(ah->ah_sc, ATH_DBG_REGULATORY,
+ "%s: invalid EEPROM contents\n", __func__);
+ return false;
+ }
+
+ ah->ah_countryCode = ath9k_regd_get_default_country(ah);
+
+ if (ah->ah_countryCode == CTRY_DEFAULT) {
+ ah->ah_countryCode = cc & COUNTRY_CODE_MASK;
+ if ((ah->ah_countryCode == CTRY_DEFAULT) &&
+ (ath9k_regd_get_eepromRD(ah) == CTRY_DEFAULT)) {
+ ah->ah_countryCode = CTRY_UNITED_STATES;
+ }
+ }
+
+#ifdef AH_SUPPORT_11D
+ if (ah->ah_countryCode == CTRY_DEFAULT) {
+ regdmn = ath9k_regd_get_eepromRD(ah);
+ country = NULL;
+ } else {
+#endif
+ country = ath9k_regd_find_country(ah->ah_countryCode);
+ if (country == NULL) {
+ DPRINTF(ah->ah_sc, ATH_DBG_REGULATORY,
+ "Country is NULL!!!!, cc= %d\n",
+ ah->ah_countryCode);
+ return false;
+ } else {
+ regdmn = country->regDmnEnum;
+#ifdef AH_SUPPORT_11D
+ if (((ath9k_regd_get_eepromRD(ah) &
+ WORLD_SKU_MASK) == WORLD_SKU_PREFIX) &&
+ (cc == CTRY_UNITED_STATES)) {
+ if (!isWwrSKU_NoMidband(ah)
+ && ath9k_regd_is_fcc_midband_supported(ah))
+ regdmn = FCC3_FCCA;
+ else
+ regdmn = FCC1_FCCA;
+ }
+#endif
+ }
+#ifdef AH_SUPPORT_11D
+ }
+#endif
+ if (!ath9k_regd_get_wmode_regdomain(ah,
+ regdmn,
+ ~CHANNEL_2GHZ,
+ &rd5GHz)) {
+ DPRINTF(ah->ah_sc, ATH_DBG_REGULATORY,
+ "%s: couldn't find unitary "
+ "5GHz reg domain for country %u\n",
+ __func__, ah->ah_countryCode);
+ return false;
+ }
+ if (!ath9k_regd_get_wmode_regdomain(ah,
+ regdmn,
+ CHANNEL_2GHZ,
+ &rd2GHz)) {
+ DPRINTF(ah->ah_sc, ATH_DBG_REGULATORY,
+ "%s: couldn't find unitary 2GHz "
+ "reg domain for country %u\n",
+ __func__, ah->ah_countryCode);
+ return false;
+ }
+
+ if (!isWwrSKU(ah) && ((rd5GHz.regDmnEnum == FCC1) ||
+ (rd5GHz.regDmnEnum == FCC2))) {
+ if (ath9k_regd_is_fcc_midband_supported(ah)) {
+ if (!ath9k_regd_get_wmode_regdomain(ah,
+ FCC3_FCCA,
+ ~CHANNEL_2GHZ,
+ &rd5GHz)) {
+ DPRINTF(ah->ah_sc, ATH_DBG_REGULATORY,
+ "%s: couldn't find unitary 5GHz "
+ "reg domain for country %u\n",
+ __func__, ah->ah_countryCode);
+ return false;
+ }
+ }
+ }
+
+ if (country == NULL) {
+ modes_avail = ah->ah_caps.wireless_modes;
+ } else {
+ ath9k_regd_get_wmodes_nreg(ah, country, &rd5GHz, modes_allowed);
+ modes_avail = modes_allowed;
+
+ if (!enableOutdoor)
+ maxChan = country->outdoorChanStart;
+ }
+
+ next = 0;
+
+ if (maxchans > ARRAY_SIZE(ah->ah_channels))
+ maxchans = ARRAY_SIZE(ah->ah_channels);
+
+ for (cm = modes; cm < &modes[ARRAY_SIZE(modes)]; cm++) {
+ u16 c, c_hi, c_lo;
+ u64 *channelBM = NULL;
+ struct regDomain *rd = NULL;
+ struct RegDmnFreqBand *fband = NULL, *freqs;
+ int8_t low_adj = 0, hi_adj = 0;
+
+ if (!test_bit(cm->mode, modes_avail)) {
+ DPRINTF(ah->ah_sc, ATH_DBG_REGULATORY,
+ "%s: !avail mode %d flags 0x%x\n",
+ __func__, cm->mode, cm->flags);
+ continue;
+ }
+ if (!ath9k_get_channel_edges(ah, cm->flags, &c_lo, &c_hi)) {
+ DPRINTF(ah->ah_sc, ATH_DBG_REGULATORY,
+ "%s: channels 0x%x not supported "
+ "by hardware\n",
+ __func__, cm->flags);
+ continue;
+ }
+
+ switch (cm->mode) {
+ case ATH9K_MODE_11A:
+ case ATH9K_MODE_11NA_HT20:
+ case ATH9K_MODE_11NA_HT40PLUS:
+ case ATH9K_MODE_11NA_HT40MINUS:
+ rd = &rd5GHz;
+ channelBM = rd->chan11a;
+ freqs = &regDmn5GhzFreq[0];
+ ctl = rd->conformanceTestLimit;
+ break;
+ case ATH9K_MODE_11B:
+ rd = &rd2GHz;
+ channelBM = rd->chan11b;
+ freqs = &regDmn2GhzFreq[0];
+ ctl = rd->conformanceTestLimit | CTL_11B;
+ break;
+ case ATH9K_MODE_11G:
+ case ATH9K_MODE_11NG_HT20:
+ case ATH9K_MODE_11NG_HT40PLUS:
+ case ATH9K_MODE_11NG_HT40MINUS:
+ rd = &rd2GHz;
+ channelBM = rd->chan11g;
+ freqs = &regDmn2Ghz11gFreq[0];
+ ctl = rd->conformanceTestLimit | CTL_11G;
+ break;
+ default:
+ DPRINTF(ah->ah_sc, ATH_DBG_REGULATORY,
+ "%s: Unknown HAL mode 0x%x\n", __func__,
+ cm->mode);
+ continue;
+ }
+
+ if (ath9k_regd_is_chan_bm_zero(channelBM))
+ continue;
+
+ if ((cm->mode == ATH9K_MODE_11NA_HT40PLUS) ||
+ (cm->mode == ATH9K_MODE_11NG_HT40PLUS)) {
+ hi_adj = -20;
+ }
+
+ if ((cm->mode == ATH9K_MODE_11NA_HT40MINUS) ||
+ (cm->mode == ATH9K_MODE_11NG_HT40MINUS)) {
+ low_adj = 20;
+ }
+
+ /* XXX: Add a helper here instead */
+ for (b = 0; b < 64 * BMLEN; b++) {
+ if (ath9k_regd_is_bit_set(b, channelBM)) {
+ fband = &freqs[b];
+ if (rd5GHz.regDmnEnum == MKK1
+ || rd5GHz.regDmnEnum == MKK2) {
+ if (ath9k_regd_japan_check(ah,
+ b,
+ &rd5GHz))
+ continue;
+ }
+
+ ath9k_regd_add_reg_classid(regclassids,
+ maxregids,
+ nregids,
+ fband->
+ regClassId);
+
+ if (IS_HT40_MODE(cm->mode) && (rd == &rd5GHz)) {
+ chanSep = 40;
+ if (fband->lowChannel == 5280)
+ low_adj += 20;
+
+ if (fband->lowChannel == 5170)
+ continue;
+ } else
+ chanSep = fband->channelSep;
+
+ for (c = fband->lowChannel + low_adj;
+ ((c <= (fband->highChannel + hi_adj)) &&
+ (c >= (fband->lowChannel + low_adj)));
+ c += chanSep) {
+ if (next >= maxchans) {
+ DPRINTF(ah->ah_sc,
+ ATH_DBG_REGULATORY,
+ "%s: too many channels "
+ "for channel table\n",
+ __func__);
+ goto done;
+ }
+ if (ath9k_regd_add_channel(ah,
+ c, c_lo, c_hi,
+ maxChan, ctl,
+ next,
+ rd5GHz,
+ fband, rd, cm,
+ ichans,
+ enableExtendedChannels))
+ next++;
+ }
+ if (IS_HT40_MODE(cm->mode) &&
+ (fband->lowChannel == 5280)) {
+ low_adj -= 20;
+ }
+ }
+ }
+ }
+done:
+ if (next != 0) {
+ int i;
+
+ if (next > ARRAY_SIZE(ah->ah_channels)) {
+ DPRINTF(ah->ah_sc, ATH_DBG_REGULATORY,
+ "%s: too many channels %u; truncating to %u\n",
+ __func__, next,
+ (int) ARRAY_SIZE(ah->ah_channels));
+ next = ARRAY_SIZE(ah->ah_channels);
+ }
+#ifdef ATH_NF_PER_CHAN
+ ath9k_regd_init_rf_buffer(ichans, next);
+#endif
+ ath9k_regd_sort(ichans, next,
+ sizeof(struct ath9k_channel),
+ ath9k_regd_chansort);
+
+ ah->ah_nchan = next;
+
+ DPRINTF(ah->ah_sc, ATH_DBG_REGULATORY, "Channel list:\n");
+ for (i = 0; i < next; i++) {
+ DPRINTF(ah->ah_sc, ATH_DBG_REGULATORY,
+ "chan: %d flags: 0x%x\n",
+ ah->ah_channels[i].channel,
+ ah->ah_channels[i].channelFlags);
+ }
+ }
+ *nchans = next;
+
+ ah->ah_countryCode = ah->ah_countryCode;
+
+ ah->ah_currentRDInUse = regdmn;
+ ah->ah_currentRD5G = rd5GHz.regDmnEnum;
+ ah->ah_currentRD2G = rd2GHz.regDmnEnum;
+ if (country == NULL) {
+ ah->ah_iso[0] = 0;
+ ah->ah_iso[1] = 0;
+ } else {
+ ah->ah_iso[0] = country->isoName[0];
+ ah->ah_iso[1] = country->isoName[1];
+ }
+
+ return next != 0;
+}
+
+struct ath9k_channel*
+ath9k_regd_check_channel(struct ath_hal *ah,
+ const struct ath9k_channel *c)
+{
+ struct ath9k_channel *base, *cc;
+
+ int flags = c->channelFlags & CHAN_FLAGS;
+ int n, lim;
+
+ DPRINTF(ah->ah_sc, ATH_DBG_REGULATORY,
+ "%s: channel %u/0x%x (0x%x) requested\n", __func__,
+ c->channel, c->channelFlags, flags);
+
+ cc = ah->ah_curchan;
+ if (cc != NULL && cc->channel == c->channel &&
+ (cc->channelFlags & CHAN_FLAGS) == flags) {
+ if ((cc->privFlags & CHANNEL_INTERFERENCE) &&
+ (cc->privFlags & CHANNEL_DFS))
+ return NULL;
+ else
+ return cc;
+ }
+
+ base = ah->ah_channels;
+ n = ah->ah_nchan;
+
+ for (lim = n; lim != 0; lim >>= 1) {
+ int d;
+ cc = &base[lim >> 1];
+ d = c->channel - cc->channel;
+ if (d == 0) {
+ if ((cc->channelFlags & CHAN_FLAGS) == flags) {
+ if ((cc->privFlags & CHANNEL_INTERFERENCE) &&
+ (cc->privFlags & CHANNEL_DFS))
+ return NULL;
+ else
+ return cc;
+ }
+ d = flags - (cc->channelFlags & CHAN_FLAGS);
+ }
+ DPRINTF(ah->ah_sc, ATH_DBG_REGULATORY,
+ "%s: channel %u/0x%x d %d\n", __func__,
+ cc->channel, cc->channelFlags, d);
+ if (d > 0) {
+ base = cc + 1;
+ lim--;
+ }
+ }
+ DPRINTF(ah->ah_sc, ATH_DBG_REGULATORY, "%s: no match for %u/0x%x\n",
+ __func__, c->channel, c->channelFlags);
+ return NULL;
+}
+
+u32
+ath9k_regd_get_antenna_allowed(struct ath_hal *ah,
+ struct ath9k_channel *chan)
+{
+ struct ath9k_channel *ichan = NULL;
+
+ ichan = ath9k_regd_check_channel(ah, chan);
+ if (!ichan)
+ return 0;
+
+ return ichan->antennaMax;
+}
+
+u32 ath9k_regd_get_ctl(struct ath_hal *ah, struct ath9k_channel *chan)
+{
+ u32 ctl = NO_CTL;
+ struct ath9k_channel *ichan;
+
+ if (ah->ah_countryCode == CTRY_DEFAULT && isWwrSKU(ah)) {
+ if (IS_CHAN_B(chan))
+ ctl = SD_NO_CTL | CTL_11B;
+ else if (IS_CHAN_G(chan))
+ ctl = SD_NO_CTL | CTL_11G;
+ else
+ ctl = SD_NO_CTL | CTL_11A;
+ } else {
+ ichan = ath9k_regd_check_channel(ah, chan);
+ if (ichan != NULL) {
+ /* FIXME */
+ if (IS_CHAN_A(ichan))
+ ctl = ichan->conformanceTestLimit[0];
+ else if (IS_CHAN_B(ichan))
+ ctl = ichan->conformanceTestLimit[1];
+ else if (IS_CHAN_G(ichan))
+ ctl = ichan->conformanceTestLimit[2];
+
+ if (IS_CHAN_G(chan) && (ctl & 0xf) == CTL_11B)
+ ctl = (ctl & ~0xf) | CTL_11G;
+ }
+ }
+ return ctl;
+}
+
+void ath9k_regd_get_current_country(struct ath_hal *ah,
+ struct ath9k_country_entry *ctry)
+{
+ u16 rd = ath9k_regd_get_eepromRD(ah);
+
+ ctry->isMultidomain = false;
+ if (rd == CTRY_DEFAULT)
+ ctry->isMultidomain = true;
+ else if (!(rd & COUNTRY_ERD_FLAG))
+ ctry->isMultidomain = isWwrSKU(ah);
+
+ ctry->countryCode = ah->ah_countryCode;
+ ctry->regDmnEnum = ah->ah_currentRD;
+ ctry->regDmn5G = ah->ah_currentRD5G;
+ ctry->regDmn2G = ah->ah_currentRD2G;
+ ctry->iso[0] = ah->ah_iso[0];
+ ctry->iso[1] = ah->ah_iso[1];
+ ctry->iso[2] = ah->ah_iso[2];
+}
diff --git a/drivers/net/wireless/ath9k/regd.h b/drivers/net/wireless/ath9k/regd.h
new file mode 100644
index 00000000000..0ecd344fbd9
--- /dev/null
+++ b/drivers/net/wireless/ath9k/regd.h
@@ -0,0 +1,412 @@
+/*
+ * Copyright (c) 2008 Atheros Communications Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef REGD_H
+#define REGD_H
+
+#include "ath9k.h"
+
+#define BMLEN 2
+#define BMZERO {(u64) 0, (u64) 0}
+
+#define BM(_fa, _fb, _fc, _fd, _fe, _ff, _fg, _fh, _fi, _fj, _fk, _fl) \
+ {((((_fa >= 0) && (_fa < 64)) ? \
+ (((u64) 1) << _fa) : (u64) 0) | \
+ (((_fb >= 0) && (_fb < 64)) ? \
+ (((u64) 1) << _fb) : (u64) 0) | \
+ (((_fc >= 0) && (_fc < 64)) ? \
+ (((u64) 1) << _fc) : (u64) 0) | \
+ (((_fd >= 0) && (_fd < 64)) ? \
+ (((u64) 1) << _fd) : (u64) 0) | \
+ (((_fe >= 0) && (_fe < 64)) ? \
+ (((u64) 1) << _fe) : (u64) 0) | \
+ (((_ff >= 0) && (_ff < 64)) ? \
+ (((u64) 1) << _ff) : (u64) 0) | \
+ (((_fg >= 0) && (_fg < 64)) ? \
+ (((u64) 1) << _fg) : (u64) 0) | \
+ (((_fh >= 0) && (_fh < 64)) ? \
+ (((u64) 1) << _fh) : (u64) 0) | \
+ (((_fi >= 0) && (_fi < 64)) ? \
+ (((u64) 1) << _fi) : (u64) 0) | \
+ (((_fj >= 0) && (_fj < 64)) ? \
+ (((u64) 1) << _fj) : (u64) 0) | \
+ (((_fk >= 0) && (_fk < 64)) ? \
+ (((u64) 1) << _fk) : (u64) 0) | \
+ (((_fl >= 0) && (_fl < 64)) ? \
+ (((u64) 1) << _fl) : (u64) 0) | \
+ ((((_fa > 63) && (_fa < 128)) ? \
+ (((u64) 1) << (_fa - 64)) : (u64) 0) | \
+ (((_fb > 63) && (_fb < 128)) ? \
+ (((u64) 1) << (_fb - 64)) : (u64) 0) | \
+ (((_fc > 63) && (_fc < 128)) ? \
+ (((u64) 1) << (_fc - 64)) : (u64) 0) | \
+ (((_fd > 63) && (_fd < 128)) ? \
+ (((u64) 1) << (_fd - 64)) : (u64) 0) | \
+ (((_fe > 63) && (_fe < 128)) ? \
+ (((u64) 1) << (_fe - 64)) : (u64) 0) | \
+ (((_ff > 63) && (_ff < 128)) ? \
+ (((u64) 1) << (_ff - 64)) : (u64) 0) | \
+ (((_fg > 63) && (_fg < 128)) ? \
+ (((u64) 1) << (_fg - 64)) : (u64) 0) | \
+ (((_fh > 63) && (_fh < 128)) ? \
+ (((u64) 1) << (_fh - 64)) : (u64) 0) | \
+ (((_fi > 63) && (_fi < 128)) ? \
+ (((u64) 1) << (_fi - 64)) : (u64) 0) | \
+ (((_fj > 63) && (_fj < 128)) ? \
+ (((u64) 1) << (_fj - 64)) : (u64) 0) | \
+ (((_fk > 63) && (_fk < 128)) ? \
+ (((u64) 1) << (_fk - 64)) : (u64) 0) | \
+ (((_fl > 63) && (_fl < 128)) ? \
+ (((u64) 1) << (_fl - 64)) : (u64) 0)))}
+
+#define DEF_REGDMN FCC1_FCCA
+#define DEF_DMN_5 FCC1
+#define DEF_DMN_2 FCCA
+#define COUNTRY_ERD_FLAG 0x8000
+#define WORLDWIDE_ROAMING_FLAG 0x4000
+#define SUPER_DOMAIN_MASK 0x0fff
+#define COUNTRY_CODE_MASK 0x3fff
+#define CF_INTERFERENCE (CHANNEL_CW_INT | CHANNEL_RADAR_INT)
+#define CHANNEL_14 (2484)
+#define IS_11G_CH14(_ch,_cf) \
+ (((_ch) == CHANNEL_14) && ((_cf) == CHANNEL_G))
+
+#define NO_PSCAN 0x0ULL
+#define PSCAN_FCC 0x0000000000000001ULL
+#define PSCAN_FCC_T 0x0000000000000002ULL
+#define PSCAN_ETSI 0x0000000000000004ULL
+#define PSCAN_MKK1 0x0000000000000008ULL
+#define PSCAN_MKK2 0x0000000000000010ULL
+#define PSCAN_MKKA 0x0000000000000020ULL
+#define PSCAN_MKKA_G 0x0000000000000040ULL
+#define PSCAN_ETSIA 0x0000000000000080ULL
+#define PSCAN_ETSIB 0x0000000000000100ULL
+#define PSCAN_ETSIC 0x0000000000000200ULL
+#define PSCAN_WWR 0x0000000000000400ULL
+#define PSCAN_MKKA1 0x0000000000000800ULL
+#define PSCAN_MKKA1_G 0x0000000000001000ULL
+#define PSCAN_MKKA2 0x0000000000002000ULL
+#define PSCAN_MKKA2_G 0x0000000000004000ULL
+#define PSCAN_MKK3 0x0000000000008000ULL
+#define PSCAN_DEFER 0x7FFFFFFFFFFFFFFFULL
+#define IS_ECM_CHAN 0x8000000000000000ULL
+
+#define isWwrSKU(_ah) \
+ (((ath9k_regd_get_eepromRD((_ah)) & WORLD_SKU_MASK) == \
+ WORLD_SKU_PREFIX) || \
+ (ath9k_regd_get_eepromRD(_ah) == WORLD))
+
+#define isWwrSKU_NoMidband(_ah) \
+ ((ath9k_regd_get_eepromRD((_ah)) == WOR3_WORLD) || \
+ (ath9k_regd_get_eepromRD(_ah) == WOR4_WORLD) || \
+ (ath9k_regd_get_eepromRD(_ah) == WOR5_ETSIC))
+
+#define isUNII1OddChan(ch) \
+ ((ch == 5170) || (ch == 5190) || (ch == 5210) || (ch == 5230))
+
+#define IS_HT40_MODE(_mode) \
+ (((_mode == ATH9K_MODE_11NA_HT40PLUS || \
+ _mode == ATH9K_MODE_11NG_HT40PLUS || \
+ _mode == ATH9K_MODE_11NA_HT40MINUS || \
+ _mode == ATH9K_MODE_11NG_HT40MINUS) ? true : false))
+
+#define CHAN_FLAGS (CHANNEL_ALL|CHANNEL_HALF|CHANNEL_QUARTER)
+
+#define swap(_a, _b, _size) { \
+ u8 *s = _b; \
+ int i = _size; \
+ do { \
+ u8 tmp = *_a; \
+ *_a++ = *s; \
+ *s++ = tmp; \
+ } while (--i); \
+ _a -= _size; \
+}
+
+
+#define HALF_MAXCHANBW 10
+
+#define MULTI_DOMAIN_MASK 0xFF00
+
+#define WORLD_SKU_MASK 0x00F0
+#define WORLD_SKU_PREFIX 0x0060
+
+#define CHANNEL_HALF_BW 10
+#define CHANNEL_QUARTER_BW 5
+
+typedef int ath_hal_cmp_t(const void *, const void *);
+
+struct reg_dmn_pair_mapping {
+ u16 regDmnEnum;
+ u16 regDmn5GHz;
+ u16 regDmn2GHz;
+ u32 flags5GHz;
+ u32 flags2GHz;
+ u64 pscanMask;
+ u16 singleCC;
+};
+
+struct ccmap {
+ char isoName[3];
+ u16 countryCode;
+};
+
+struct country_code_to_enum_rd {
+ u16 countryCode;
+ u16 regDmnEnum;
+ const char *isoName;
+ const char *name;
+ bool allow11g;
+ bool allow11aTurbo;
+ bool allow11gTurbo;
+ bool allow11ng20;
+ bool allow11ng40;
+ bool allow11na20;
+ bool allow11na40;
+ u16 outdoorChanStart;
+};
+
+struct RegDmnFreqBand {
+ u16 lowChannel;
+ u16 highChannel;
+ u8 powerDfs;
+ u8 antennaMax;
+ u8 channelBW;
+ u8 channelSep;
+ u64 useDfs;
+ u64 usePassScan;
+ u8 regClassId;
+};
+
+struct regDomain {
+ u16 regDmnEnum;
+ u8 conformanceTestLimit;
+ u64 dfsMask;
+ u64 pscan;
+ u32 flags;
+ u64 chan11a[BMLEN];
+ u64 chan11a_turbo[BMLEN];
+ u64 chan11a_dyn_turbo[BMLEN];
+ u64 chan11b[BMLEN];
+ u64 chan11g[BMLEN];
+ u64 chan11g_turbo[BMLEN];
+};
+
+struct cmode {
+ u32 mode;
+ u32 flags;
+};
+
+#define YES true
+#define NO false
+
+struct japan_bandcheck {
+ u16 freqbandbit;
+ u32 eepromflagtocheck;
+};
+
+struct common_mode_power {
+ u16 lchan;
+ u16 hchan;
+ u8 pwrlvl;
+};
+
+enum CountryCode {
+ CTRY_ALBANIA = 8,
+ CTRY_ALGERIA = 12,
+ CTRY_ARGENTINA = 32,
+ CTRY_ARMENIA = 51,
+ CTRY_AUSTRALIA = 36,
+ CTRY_AUSTRIA = 40,
+ CTRY_AZERBAIJAN = 31,
+ CTRY_BAHRAIN = 48,
+ CTRY_BELARUS = 112,
+ CTRY_BELGIUM = 56,
+ CTRY_BELIZE = 84,
+ CTRY_BOLIVIA = 68,
+ CTRY_BOSNIA_HERZ = 70,
+ CTRY_BRAZIL = 76,
+ CTRY_BRUNEI_DARUSSALAM = 96,
+ CTRY_BULGARIA = 100,
+ CTRY_CANADA = 124,
+ CTRY_CHILE = 152,
+ CTRY_CHINA = 156,
+ CTRY_COLOMBIA = 170,
+ CTRY_COSTA_RICA = 188,
+ CTRY_CROATIA = 191,
+ CTRY_CYPRUS = 196,
+ CTRY_CZECH = 203,
+ CTRY_DENMARK = 208,
+ CTRY_DOMINICAN_REPUBLIC = 214,
+ CTRY_ECUADOR = 218,
+ CTRY_EGYPT = 818,
+ CTRY_EL_SALVADOR = 222,
+ CTRY_ESTONIA = 233,
+ CTRY_FAEROE_ISLANDS = 234,
+ CTRY_FINLAND = 246,
+ CTRY_FRANCE = 250,
+ CTRY_GEORGIA = 268,
+ CTRY_GERMANY = 276,
+ CTRY_GREECE = 300,
+ CTRY_GUATEMALA = 320,
+ CTRY_HONDURAS = 340,
+ CTRY_HONG_KONG = 344,
+ CTRY_HUNGARY = 348,
+ CTRY_ICELAND = 352,
+ CTRY_INDIA = 356,
+ CTRY_INDONESIA = 360,
+ CTRY_IRAN = 364,
+ CTRY_IRAQ = 368,
+ CTRY_IRELAND = 372,
+ CTRY_ISRAEL = 376,
+ CTRY_ITALY = 380,
+ CTRY_JAMAICA = 388,
+ CTRY_JAPAN = 392,
+ CTRY_JORDAN = 400,
+ CTRY_KAZAKHSTAN = 398,
+ CTRY_KENYA = 404,
+ CTRY_KOREA_NORTH = 408,
+ CTRY_KOREA_ROC = 410,
+ CTRY_KOREA_ROC2 = 411,
+ CTRY_KOREA_ROC3 = 412,
+ CTRY_KUWAIT = 414,
+ CTRY_LATVIA = 428,
+ CTRY_LEBANON = 422,
+ CTRY_LIBYA = 434,
+ CTRY_LIECHTENSTEIN = 438,
+ CTRY_LITHUANIA = 440,
+ CTRY_LUXEMBOURG = 442,
+ CTRY_MACAU = 446,
+ CTRY_MACEDONIA = 807,
+ CTRY_MALAYSIA = 458,
+ CTRY_MALTA = 470,
+ CTRY_MEXICO = 484,
+ CTRY_MONACO = 492,
+ CTRY_MOROCCO = 504,
+ CTRY_NEPAL = 524,
+ CTRY_NETHERLANDS = 528,
+ CTRY_NETHERLANDS_ANTILLES = 530,
+ CTRY_NEW_ZEALAND = 554,
+ CTRY_NICARAGUA = 558,
+ CTRY_NORWAY = 578,
+ CTRY_OMAN = 512,
+ CTRY_PAKISTAN = 586,
+ CTRY_PANAMA = 591,
+ CTRY_PAPUA_NEW_GUINEA = 598,
+ CTRY_PARAGUAY = 600,
+ CTRY_PERU = 604,
+ CTRY_PHILIPPINES = 608,
+ CTRY_POLAND = 616,
+ CTRY_PORTUGAL = 620,
+ CTRY_PUERTO_RICO = 630,
+ CTRY_QATAR = 634,
+ CTRY_ROMANIA = 642,
+ CTRY_RUSSIA = 643,
+ CTRY_SAUDI_ARABIA = 682,
+ CTRY_SERBIA_MONTENEGRO = 891,
+ CTRY_SINGAPORE = 702,
+ CTRY_SLOVAKIA = 703,
+ CTRY_SLOVENIA = 705,
+ CTRY_SOUTH_AFRICA = 710,
+ CTRY_SPAIN = 724,
+ CTRY_SRI_LANKA = 144,
+ CTRY_SWEDEN = 752,
+ CTRY_SWITZERLAND = 756,
+ CTRY_SYRIA = 760,
+ CTRY_TAIWAN = 158,
+ CTRY_THAILAND = 764,
+ CTRY_TRINIDAD_Y_TOBAGO = 780,
+ CTRY_TUNISIA = 788,
+ CTRY_TURKEY = 792,
+ CTRY_UAE = 784,
+ CTRY_UKRAINE = 804,
+ CTRY_UNITED_KINGDOM = 826,
+ CTRY_UNITED_STATES = 840,
+ CTRY_UNITED_STATES_FCC49 = 842,
+ CTRY_URUGUAY = 858,
+ CTRY_UZBEKISTAN = 860,
+ CTRY_VENEZUELA = 862,
+ CTRY_VIET_NAM = 704,
+ CTRY_YEMEN = 887,
+ CTRY_ZIMBABWE = 716,
+ CTRY_JAPAN1 = 393,
+ CTRY_JAPAN2 = 394,
+ CTRY_JAPAN3 = 395,
+ CTRY_JAPAN4 = 396,
+ CTRY_JAPAN5 = 397,
+ CTRY_JAPAN6 = 4006,
+ CTRY_JAPAN7 = 4007,
+ CTRY_JAPAN8 = 4008,
+ CTRY_JAPAN9 = 4009,
+ CTRY_JAPAN10 = 4010,
+ CTRY_JAPAN11 = 4011,
+ CTRY_JAPAN12 = 4012,
+ CTRY_JAPAN13 = 4013,
+ CTRY_JAPAN14 = 4014,
+ CTRY_JAPAN15 = 4015,
+ CTRY_JAPAN16 = 4016,
+ CTRY_JAPAN17 = 4017,
+ CTRY_JAPAN18 = 4018,
+ CTRY_JAPAN19 = 4019,
+ CTRY_JAPAN20 = 4020,
+ CTRY_JAPAN21 = 4021,
+ CTRY_JAPAN22 = 4022,
+ CTRY_JAPAN23 = 4023,
+ CTRY_JAPAN24 = 4024,
+ CTRY_JAPAN25 = 4025,
+ CTRY_JAPAN26 = 4026,
+ CTRY_JAPAN27 = 4027,
+ CTRY_JAPAN28 = 4028,
+ CTRY_JAPAN29 = 4029,
+ CTRY_JAPAN30 = 4030,
+ CTRY_JAPAN31 = 4031,
+ CTRY_JAPAN32 = 4032,
+ CTRY_JAPAN33 = 4033,
+ CTRY_JAPAN34 = 4034,
+ CTRY_JAPAN35 = 4035,
+ CTRY_JAPAN36 = 4036,
+ CTRY_JAPAN37 = 4037,
+ CTRY_JAPAN38 = 4038,
+ CTRY_JAPAN39 = 4039,
+ CTRY_JAPAN40 = 4040,
+ CTRY_JAPAN41 = 4041,
+ CTRY_JAPAN42 = 4042,
+ CTRY_JAPAN43 = 4043,
+ CTRY_JAPAN44 = 4044,
+ CTRY_JAPAN45 = 4045,
+ CTRY_JAPAN46 = 4046,
+ CTRY_JAPAN47 = 4047,
+ CTRY_JAPAN48 = 4048,
+ CTRY_JAPAN49 = 4049,
+ CTRY_JAPAN50 = 4050,
+ CTRY_JAPAN51 = 4051,
+ CTRY_JAPAN52 = 4052,
+ CTRY_JAPAN53 = 4053,
+ CTRY_JAPAN54 = 4054,
+ CTRY_JAPAN55 = 4055,
+ CTRY_JAPAN56 = 4056,
+ CTRY_JAPAN57 = 4057,
+ CTRY_JAPAN58 = 4058,
+ CTRY_JAPAN59 = 4059,
+ CTRY_AUSTRALIA2 = 5000,
+ CTRY_CANADA2 = 5001,
+ CTRY_BELGIUM2 = 5002
+};
+
+void ath9k_regd_get_current_country(struct ath_hal *ah,
+ struct ath9k_country_entry *ctry);
+
+#endif
diff --git a/drivers/net/wireless/ath9k/regd_common.h b/drivers/net/wireless/ath9k/regd_common.h
new file mode 100644
index 00000000000..9112c030b1e
--- /dev/null
+++ b/drivers/net/wireless/ath9k/regd_common.h
@@ -0,0 +1,1915 @@
+/*
+ * Copyright (c) 2008 Atheros Communications Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef REGD_COMMON_H
+#define REGD_COMMON_H
+
+enum EnumRd {
+ NO_ENUMRD = 0x00,
+ NULL1_WORLD = 0x03,
+ NULL1_ETSIB = 0x07,
+ NULL1_ETSIC = 0x08,
+ FCC1_FCCA = 0x10,
+ FCC1_WORLD = 0x11,
+ FCC4_FCCA = 0x12,
+ FCC5_FCCA = 0x13,
+ FCC6_FCCA = 0x14,
+
+ FCC2_FCCA = 0x20,
+ FCC2_WORLD = 0x21,
+ FCC2_ETSIC = 0x22,
+ FCC6_WORLD = 0x23,
+ FRANCE_RES = 0x31,
+ FCC3_FCCA = 0x3A,
+ FCC3_WORLD = 0x3B,
+
+ ETSI1_WORLD = 0x37,
+ ETSI3_ETSIA = 0x32,
+ ETSI2_WORLD = 0x35,
+ ETSI3_WORLD = 0x36,
+ ETSI4_WORLD = 0x30,
+ ETSI4_ETSIC = 0x38,
+ ETSI5_WORLD = 0x39,
+ ETSI6_WORLD = 0x34,
+ ETSI_RESERVED = 0x33,
+
+ MKK1_MKKA = 0x40,
+ MKK1_MKKB = 0x41,
+ APL4_WORLD = 0x42,
+ MKK2_MKKA = 0x43,
+ APL_RESERVED = 0x44,
+ APL2_WORLD = 0x45,
+ APL2_APLC = 0x46,
+ APL3_WORLD = 0x47,
+ MKK1_FCCA = 0x48,
+ APL2_APLD = 0x49,
+ MKK1_MKKA1 = 0x4A,
+ MKK1_MKKA2 = 0x4B,
+ MKK1_MKKC = 0x4C,
+
+ APL3_FCCA = 0x50,
+ APL1_WORLD = 0x52,
+ APL1_FCCA = 0x53,
+ APL1_APLA = 0x54,
+ APL1_ETSIC = 0x55,
+ APL2_ETSIC = 0x56,
+ APL5_WORLD = 0x58,
+ APL6_WORLD = 0x5B,
+ APL7_FCCA = 0x5C,
+ APL8_WORLD = 0x5D,
+ APL9_WORLD = 0x5E,
+
+ WOR0_WORLD = 0x60,
+ WOR1_WORLD = 0x61,
+ WOR2_WORLD = 0x62,
+ WOR3_WORLD = 0x63,
+ WOR4_WORLD = 0x64,
+ WOR5_ETSIC = 0x65,
+
+ WOR01_WORLD = 0x66,
+ WOR02_WORLD = 0x67,
+ EU1_WORLD = 0x68,
+
+ WOR9_WORLD = 0x69,
+ WORA_WORLD = 0x6A,
+ WORB_WORLD = 0x6B,
+
+ MKK3_MKKB = 0x80,
+ MKK3_MKKA2 = 0x81,
+ MKK3_MKKC = 0x82,
+
+ MKK4_MKKB = 0x83,
+ MKK4_MKKA2 = 0x84,
+ MKK4_MKKC = 0x85,
+
+ MKK5_MKKB = 0x86,
+ MKK5_MKKA2 = 0x87,
+ MKK5_MKKC = 0x88,
+
+ MKK6_MKKB = 0x89,
+ MKK6_MKKA2 = 0x8A,
+ MKK6_MKKC = 0x8B,
+
+ MKK7_MKKB = 0x8C,
+ MKK7_MKKA2 = 0x8D,
+ MKK7_MKKC = 0x8E,
+
+ MKK8_MKKB = 0x8F,
+ MKK8_MKKA2 = 0x90,
+ MKK8_MKKC = 0x91,
+
+ MKK14_MKKA1 = 0x92,
+ MKK15_MKKA1 = 0x93,
+
+ MKK10_FCCA = 0xD0,
+ MKK10_MKKA1 = 0xD1,
+ MKK10_MKKC = 0xD2,
+ MKK10_MKKA2 = 0xD3,
+
+ MKK11_MKKA = 0xD4,
+ MKK11_FCCA = 0xD5,
+ MKK11_MKKA1 = 0xD6,
+ MKK11_MKKC = 0xD7,
+ MKK11_MKKA2 = 0xD8,
+
+ MKK12_MKKA = 0xD9,
+ MKK12_FCCA = 0xDA,
+ MKK12_MKKA1 = 0xDB,
+ MKK12_MKKC = 0xDC,
+ MKK12_MKKA2 = 0xDD,
+
+ MKK13_MKKB = 0xDE,
+
+ MKK3_MKKA = 0xF0,
+ MKK3_MKKA1 = 0xF1,
+ MKK3_FCCA = 0xF2,
+ MKK4_MKKA = 0xF3,
+ MKK4_MKKA1 = 0xF4,
+ MKK4_FCCA = 0xF5,
+ MKK9_MKKA = 0xF6,
+ MKK10_MKKA = 0xF7,
+ MKK6_MKKA1 = 0xF8,
+ MKK6_FCCA = 0xF9,
+ MKK7_MKKA1 = 0xFA,
+ MKK7_FCCA = 0xFB,
+ MKK9_FCCA = 0xFC,
+ MKK9_MKKA1 = 0xFD,
+ MKK9_MKKC = 0xFE,
+ MKK9_MKKA2 = 0xFF,
+
+ APL1 = 0x0150,
+ APL2 = 0x0250,
+ APL3 = 0x0350,
+ APL4 = 0x0450,
+ APL5 = 0x0550,
+ APL6 = 0x0650,
+ APL7 = 0x0750,
+ APL8 = 0x0850,
+ APL9 = 0x0950,
+ APL10 = 0x1050,
+
+ ETSI1 = 0x0130,
+ ETSI2 = 0x0230,
+ ETSI3 = 0x0330,
+ ETSI4 = 0x0430,
+ ETSI5 = 0x0530,
+ ETSI6 = 0x0630,
+ ETSIA = 0x0A30,
+ ETSIB = 0x0B30,
+ ETSIC = 0x0C30,
+
+ FCC1 = 0x0110,
+ FCC2 = 0x0120,
+ FCC3 = 0x0160,
+ FCC4 = 0x0165,
+ FCC5 = 0x0510,
+ FCC6 = 0x0610,
+ FCCA = 0x0A10,
+
+ APLD = 0x0D50,
+
+ MKK1 = 0x0140,
+ MKK2 = 0x0240,
+ MKK3 = 0x0340,
+ MKK4 = 0x0440,
+ MKK5 = 0x0540,
+ MKK6 = 0x0640,
+ MKK7 = 0x0740,
+ MKK8 = 0x0840,
+ MKK9 = 0x0940,
+ MKK10 = 0x0B40,
+ MKK11 = 0x1140,
+ MKK12 = 0x1240,
+ MKK13 = 0x0C40,
+ MKK14 = 0x1440,
+ MKK15 = 0x1540,
+ MKKA = 0x0A40,
+ MKKC = 0x0A50,
+
+ NULL1 = 0x0198,
+ WORLD = 0x0199,
+ DEBUG_REG_DMN = 0x01ff,
+};
+
+enum {
+ FCC = 0x10,
+ MKK = 0x40,
+ ETSI = 0x30,
+};
+
+enum {
+ NO_REQ = 0x00000000,
+ DISALLOW_ADHOC_11A = 0x00000001,
+ DISALLOW_ADHOC_11A_TURB = 0x00000002,
+ NEED_NFC = 0x00000004,
+
+ ADHOC_PER_11D = 0x00000008,
+ ADHOC_NO_11A = 0x00000010,
+
+ PUBLIC_SAFETY_DOMAIN = 0x00000020,
+ LIMIT_FRAME_4MS = 0x00000040,
+
+ NO_HOSTAP = 0x00000080,
+
+ REQ_MASK = 0x000000FF,
+};
+
+#define REG_DOMAIN_2GHZ_MASK (REQ_MASK & \
+ (!(ADHOC_NO_11A | DISALLOW_ADHOC_11A | DISALLOW_ADHOC_11A_TURB)))
+#define REG_DOMAIN_5GHZ_MASK REQ_MASK
+
+static struct reg_dmn_pair_mapping regDomainPairs[] = {
+ {NO_ENUMRD, DEBUG_REG_DMN, DEBUG_REG_DMN, NO_REQ, NO_REQ,
+ PSCAN_DEFER, 0},
+ {NULL1_WORLD, NULL1, WORLD, NO_REQ, NO_REQ, PSCAN_DEFER, 0},
+ {NULL1_ETSIB, NULL1, ETSIB, NO_REQ, NO_REQ, PSCAN_DEFER, 0},
+ {NULL1_ETSIC, NULL1, ETSIC, NO_REQ, NO_REQ, PSCAN_DEFER, 0},
+
+ {FCC2_FCCA, FCC2, FCCA, NO_REQ, NO_REQ, PSCAN_DEFER, 0},
+ {FCC2_WORLD, FCC2, WORLD, NO_REQ, NO_REQ, PSCAN_DEFER, 0},
+ {FCC2_ETSIC, FCC2, ETSIC, NO_REQ, NO_REQ, PSCAN_DEFER, 0},
+ {FCC3_FCCA, FCC3, FCCA, NO_REQ, NO_REQ, PSCAN_DEFER, 0},
+ {FCC3_WORLD, FCC3, WORLD, NO_REQ, NO_REQ, PSCAN_DEFER, 0},
+ {FCC4_FCCA, FCC4, FCCA,
+ DISALLOW_ADHOC_11A | DISALLOW_ADHOC_11A_TURB, NO_REQ, PSCAN_DEFER,
+ 0},
+ {FCC5_FCCA, FCC5, FCCA, NO_REQ, NO_REQ, PSCAN_DEFER, 0},
+ {FCC6_FCCA, FCC6, FCCA, NO_REQ, NO_REQ, PSCAN_DEFER, 0},
+ {FCC6_WORLD, FCC6, WORLD, NO_REQ, NO_REQ, PSCAN_DEFER, 0},
+
+ {ETSI1_WORLD, ETSI1, WORLD,
+ DISALLOW_ADHOC_11A | DISALLOW_ADHOC_11A_TURB, NO_REQ, PSCAN_DEFER,
+ 0},
+ {ETSI2_WORLD, ETSI2, WORLD,
+ DISALLOW_ADHOC_11A | DISALLOW_ADHOC_11A_TURB, NO_REQ, PSCAN_DEFER,
+ 0},
+ {ETSI3_WORLD, ETSI3, WORLD,
+ DISALLOW_ADHOC_11A | DISALLOW_ADHOC_11A_TURB, NO_REQ, PSCAN_DEFER,
+ 0},
+ {ETSI4_WORLD, ETSI4, WORLD,
+ DISALLOW_ADHOC_11A | DISALLOW_ADHOC_11A_TURB, NO_REQ, PSCAN_DEFER,
+ 0},
+ {ETSI5_WORLD, ETSI5, WORLD,
+ DISALLOW_ADHOC_11A | DISALLOW_ADHOC_11A_TURB, NO_REQ, PSCAN_DEFER,
+ 0},
+ {ETSI6_WORLD, ETSI6, WORLD,
+ DISALLOW_ADHOC_11A | DISALLOW_ADHOC_11A_TURB, NO_REQ, PSCAN_DEFER,
+ 0},
+
+ {ETSI3_ETSIA, ETSI3, WORLD,
+ DISALLOW_ADHOC_11A | DISALLOW_ADHOC_11A_TURB, NO_REQ, PSCAN_DEFER,
+ 0},
+ {FRANCE_RES, ETSI3, WORLD, NO_REQ, NO_REQ, PSCAN_DEFER, 0},
+
+ {FCC1_WORLD, FCC1, WORLD, NO_REQ, NO_REQ, PSCAN_DEFER, 0},
+ {FCC1_FCCA, FCC1, FCCA, NO_REQ, NO_REQ, PSCAN_DEFER, 0},
+ {APL1_WORLD, APL1, WORLD, NO_REQ, NO_REQ, PSCAN_DEFER, 0},
+ {APL2_WORLD, APL2, WORLD, NO_REQ, NO_REQ, PSCAN_DEFER, 0},
+ {APL3_WORLD, APL3, WORLD, NO_REQ, NO_REQ, PSCAN_DEFER, 0},
+ {APL4_WORLD, APL4, WORLD, NO_REQ, NO_REQ, PSCAN_DEFER, 0},
+ {APL5_WORLD, APL5, WORLD, NO_REQ, NO_REQ, PSCAN_DEFER, 0},
+ {APL6_WORLD, APL6, WORLD, NO_REQ, NO_REQ, PSCAN_DEFER, 0},
+ {APL8_WORLD, APL8, WORLD, NO_REQ, NO_REQ, PSCAN_DEFER, 0},
+ {APL9_WORLD, APL9, WORLD, NO_REQ, NO_REQ, PSCAN_DEFER, 0},
+
+ {APL3_FCCA, APL3, FCCA, NO_REQ, NO_REQ, PSCAN_DEFER, 0},
+ {APL1_ETSIC, APL1, ETSIC, NO_REQ, NO_REQ, PSCAN_DEFER, 0},
+ {APL2_ETSIC, APL2, ETSIC, NO_REQ, NO_REQ, PSCAN_DEFER, 0},
+ {APL2_APLD, APL2, APLD, NO_REQ, NO_REQ, PSCAN_DEFER,},
+
+ {MKK1_MKKA, MKK1, MKKA,
+ DISALLOW_ADHOC_11A_TURB | NEED_NFC | LIMIT_FRAME_4MS, NEED_NFC,
+ PSCAN_MKK1 | PSCAN_MKKA, CTRY_JAPAN},
+ {MKK1_MKKB, MKK1, MKKA,
+ DISALLOW_ADHOC_11A | DISALLOW_ADHOC_11A_TURB | NEED_NFC |
+ LIMIT_FRAME_4MS, NEED_NFC, PSCAN_MKK1 | PSCAN_MKKA | PSCAN_MKKA_G,
+ CTRY_JAPAN1},
+ {MKK1_FCCA, MKK1, FCCA,
+ DISALLOW_ADHOC_11A_TURB | NEED_NFC | LIMIT_FRAME_4MS, NEED_NFC,
+ PSCAN_MKK1, CTRY_JAPAN2},
+ {MKK1_MKKA1, MKK1, MKKA,
+ DISALLOW_ADHOC_11A_TURB | NEED_NFC | LIMIT_FRAME_4MS, NEED_NFC,
+ PSCAN_MKK1 | PSCAN_MKKA1 | PSCAN_MKKA1_G, CTRY_JAPAN4},
+ {MKK1_MKKA2, MKK1, MKKA,
+ DISALLOW_ADHOC_11A_TURB | NEED_NFC | LIMIT_FRAME_4MS, NEED_NFC,
+ PSCAN_MKK1 | PSCAN_MKKA2 | PSCAN_MKKA2_G, CTRY_JAPAN5},
+ {MKK1_MKKC, MKK1, MKKC,
+ DISALLOW_ADHOC_11A_TURB | NEED_NFC | LIMIT_FRAME_4MS, NEED_NFC,
+ PSCAN_MKK1, CTRY_JAPAN6},
+
+ {MKK2_MKKA, MKK2, MKKA,
+ DISALLOW_ADHOC_11A | DISALLOW_ADHOC_11A_TURB | NEED_NFC |
+ LIMIT_FRAME_4MS, NEED_NFC, PSCAN_MKK2 | PSCAN_MKKA | PSCAN_MKKA_G,
+ CTRY_JAPAN3},
+
+ {MKK3_MKKA, MKK3, MKKA,
+ DISALLOW_ADHOC_11A_TURB | NEED_NFC | LIMIT_FRAME_4MS, NEED_NFC,
+ PSCAN_MKKA, CTRY_JAPAN25},
+ {MKK3_MKKB, MKK3, MKKA,
+ DISALLOW_ADHOC_11A | DISALLOW_ADHOC_11A_TURB | NEED_NFC |
+ LIMIT_FRAME_4MS, NEED_NFC, PSCAN_MKKA | PSCAN_MKKA_G,
+ CTRY_JAPAN7},
+ {MKK3_MKKA1, MKK3, MKKA,
+ DISALLOW_ADHOC_11A_TURB | NEED_NFC | LIMIT_FRAME_4MS, NEED_NFC,
+ PSCAN_MKKA1 | PSCAN_MKKA1_G, CTRY_JAPAN26},
+ {MKK3_MKKA2, MKK3, MKKA,
+ DISALLOW_ADHOC_11A_TURB | NEED_NFC | LIMIT_FRAME_4MS, NEED_NFC,
+ PSCAN_MKKA2 | PSCAN_MKKA2_G, CTRY_JAPAN8},
+ {MKK3_MKKC, MKK3, MKKC,
+ DISALLOW_ADHOC_11A_TURB | NEED_NFC | LIMIT_FRAME_4MS, NEED_NFC,
+ NO_PSCAN, CTRY_JAPAN9},
+ {MKK3_FCCA, MKK3, FCCA,
+ DISALLOW_ADHOC_11A_TURB | NEED_NFC | LIMIT_FRAME_4MS, NEED_NFC,
+ NO_PSCAN, CTRY_JAPAN27},
+
+ {MKK4_MKKA, MKK4, MKKA,
+ DISALLOW_ADHOC_11A_TURB | NEED_NFC | LIMIT_FRAME_4MS, NEED_NFC,
+ PSCAN_MKK3, CTRY_JAPAN36},
+ {MKK4_MKKB, MKK4, MKKA,
+ DISALLOW_ADHOC_11A | DISALLOW_ADHOC_11A_TURB | NEED_NFC |
+ LIMIT_FRAME_4MS, NEED_NFC, PSCAN_MKK3 | PSCAN_MKKA | PSCAN_MKKA_G,
+ CTRY_JAPAN10},
+ {MKK4_MKKA1, MKK4, MKKA,
+ DISALLOW_ADHOC_11A_TURB | NEED_NFC | LIMIT_FRAME_4MS, NEED_NFC,
+ PSCAN_MKK3 | PSCAN_MKKA1 | PSCAN_MKKA1_G, CTRY_JAPAN28},
+ {MKK4_MKKA2, MKK4, MKKA,
+ DISALLOW_ADHOC_11A_TURB | NEED_NFC | LIMIT_FRAME_4MS, NEED_NFC,
+ PSCAN_MKK3 | PSCAN_MKKA2 | PSCAN_MKKA2_G, CTRY_JAPAN11},
+ {MKK4_MKKC, MKK4, MKKC,
+ DISALLOW_ADHOC_11A_TURB | NEED_NFC | LIMIT_FRAME_4MS, NEED_NFC,
+ PSCAN_MKK3, CTRY_JAPAN12},
+ {MKK4_FCCA, MKK4, FCCA,
+ DISALLOW_ADHOC_11A_TURB | NEED_NFC | LIMIT_FRAME_4MS, NEED_NFC,
+ PSCAN_MKK3, CTRY_JAPAN29},
+
+ {MKK5_MKKB, MKK5, MKKA,
+ DISALLOW_ADHOC_11A | DISALLOW_ADHOC_11A_TURB | NEED_NFC |
+ LIMIT_FRAME_4MS, NEED_NFC, PSCAN_MKK3 | PSCAN_MKKA | PSCAN_MKKA_G,
+ CTRY_JAPAN13},
+ {MKK5_MKKA2, MKK5, MKKA,
+ DISALLOW_ADHOC_11A_TURB | NEED_NFC | LIMIT_FRAME_4MS, NEED_NFC,
+ PSCAN_MKK3 | PSCAN_MKKA2 | PSCAN_MKKA2_G, CTRY_JAPAN14},
+ {MKK5_MKKC, MKK5, MKKC,
+ DISALLOW_ADHOC_11A_TURB | NEED_NFC | LIMIT_FRAME_4MS, NEED_NFC,
+ PSCAN_MKK3, CTRY_JAPAN15},
+
+ {MKK6_MKKB, MKK6, MKKA,
+ DISALLOW_ADHOC_11A_TURB | NEED_NFC | LIMIT_FRAME_4MS, NEED_NFC,
+ PSCAN_MKK1 | PSCAN_MKKA | PSCAN_MKKA_G, CTRY_JAPAN16},
+ {MKK6_MKKA1, MKK6, MKKA,
+ DISALLOW_ADHOC_11A_TURB | NEED_NFC | LIMIT_FRAME_4MS, NEED_NFC,
+ PSCAN_MKK1 | PSCAN_MKKA1 | PSCAN_MKKA1_G, CTRY_JAPAN30},
+ {MKK6_MKKA2, MKK6, MKKA,
+ DISALLOW_ADHOC_11A_TURB | NEED_NFC | LIMIT_FRAME_4MS, NEED_NFC,
+ PSCAN_MKK1 | PSCAN_MKKA2 | PSCAN_MKKA2_G, CTRY_JAPAN17},
+ {MKK6_MKKC, MKK6, MKKC,
+ DISALLOW_ADHOC_11A_TURB | NEED_NFC | LIMIT_FRAME_4MS, NEED_NFC,
+ PSCAN_MKK1, CTRY_JAPAN18},
+ {MKK6_FCCA, MKK6, FCCA,
+ DISALLOW_ADHOC_11A_TURB | NEED_NFC | LIMIT_FRAME_4MS, NEED_NFC,
+ NO_PSCAN, CTRY_JAPAN31},
+
+ {MKK7_MKKB, MKK7, MKKA,
+ DISALLOW_ADHOC_11A_TURB | NEED_NFC | LIMIT_FRAME_4MS, NEED_NFC,
+ PSCAN_MKK1 | PSCAN_MKK3 | PSCAN_MKKA | PSCAN_MKKA_G,
+ CTRY_JAPAN19},
+ {MKK7_MKKA1, MKK7, MKKA,
+ DISALLOW_ADHOC_11A_TURB | NEED_NFC | LIMIT_FRAME_4MS, NEED_NFC,
+ PSCAN_MKK1 | PSCAN_MKKA1 | PSCAN_MKKA1_G, CTRY_JAPAN32},
+ {MKK7_MKKA2, MKK7, MKKA,
+ DISALLOW_ADHOC_11A_TURB | NEED_NFC | LIMIT_FRAME_4MS, NEED_NFC,
+ PSCAN_MKK1 | PSCAN_MKK3 | PSCAN_MKKA2 | PSCAN_MKKA2_G,
+ CTRY_JAPAN20},
+ {MKK7_MKKC, MKK7, MKKC,
+ DISALLOW_ADHOC_11A_TURB | NEED_NFC | LIMIT_FRAME_4MS, NEED_NFC,
+ PSCAN_MKK1 | PSCAN_MKK3, CTRY_JAPAN21},
+ {MKK7_FCCA, MKK7, FCCA,
+ DISALLOW_ADHOC_11A_TURB | NEED_NFC | LIMIT_FRAME_4MS, NEED_NFC,
+ PSCAN_MKK1 | PSCAN_MKK3, CTRY_JAPAN33},
+
+ {MKK8_MKKB, MKK8, MKKA,
+ DISALLOW_ADHOC_11A_TURB | NEED_NFC | LIMIT_FRAME_4MS, NEED_NFC,
+ PSCAN_MKK1 | PSCAN_MKK3 | PSCAN_MKKA | PSCAN_MKKA_G,
+ CTRY_JAPAN22},
+ {MKK8_MKKA2, MKK8, MKKA,
+ DISALLOW_ADHOC_11A_TURB | NEED_NFC | LIMIT_FRAME_4MS, NEED_NFC,
+ PSCAN_MKK1 | PSCAN_MKK3 | PSCAN_MKKA2 | PSCAN_MKKA2_G,
+ CTRY_JAPAN23},
+ {MKK8_MKKC, MKK8, MKKC,
+ DISALLOW_ADHOC_11A_TURB | NEED_NFC | LIMIT_FRAME_4MS, NEED_NFC,
+ PSCAN_MKK1 | PSCAN_MKK3, CTRY_JAPAN24},
+
+ {MKK9_MKKA, MKK9, MKKA,
+ DISALLOW_ADHOC_11A | DISALLOW_ADHOC_11A_TURB | NEED_NFC |
+ LIMIT_FRAME_4MS, NEED_NFC,
+ PSCAN_MKK2 | PSCAN_MKK3 | PSCAN_MKKA | PSCAN_MKKA_G,
+ CTRY_JAPAN34},
+ {MKK9_FCCA, MKK9, FCCA,
+ DISALLOW_ADHOC_11A_TURB | NEED_NFC | LIMIT_FRAME_4MS, NEED_NFC,
+ NO_PSCAN, CTRY_JAPAN37},
+ {MKK9_MKKA1, MKK9, MKKA,
+ DISALLOW_ADHOC_11A_TURB | NEED_NFC | LIMIT_FRAME_4MS, NEED_NFC,
+ PSCAN_MKKA1 | PSCAN_MKKA1_G, CTRY_JAPAN38},
+ {MKK9_MKKA2, MKK9, MKKA,
+ DISALLOW_ADHOC_11A_TURB | NEED_NFC | LIMIT_FRAME_4MS, NEED_NFC,
+ PSCAN_MKKA2 | PSCAN_MKKA2_G, CTRY_JAPAN40},
+ {MKK9_MKKC, MKK9, MKKC,
+ DISALLOW_ADHOC_11A_TURB | NEED_NFC | LIMIT_FRAME_4MS, NEED_NFC,
+ NO_PSCAN, CTRY_JAPAN39},
+
+ {MKK10_MKKA, MKK10, MKKA,
+ DISALLOW_ADHOC_11A | DISALLOW_ADHOC_11A_TURB | NEED_NFC |
+ LIMIT_FRAME_4MS, NEED_NFC, PSCAN_MKK2 | PSCAN_MKK3, CTRY_JAPAN35},
+ {MKK10_FCCA, MKK10, FCCA,
+ DISALLOW_ADHOC_11A_TURB | NEED_NFC | LIMIT_FRAME_4MS, NEED_NFC,
+ NO_PSCAN, CTRY_JAPAN41},
+ {MKK10_MKKA1, MKK10, MKKA,
+ DISALLOW_ADHOC_11A_TURB | NEED_NFC | LIMIT_FRAME_4MS, NEED_NFC,
+ PSCAN_MKKA1 | PSCAN_MKKA1_G, CTRY_JAPAN42},
+ {MKK10_MKKA2, MKK10, MKKA,
+ DISALLOW_ADHOC_11A_TURB | NEED_NFC | LIMIT_FRAME_4MS, NEED_NFC,
+ PSCAN_MKKA2 | PSCAN_MKKA2_G, CTRY_JAPAN44},
+ {MKK10_MKKC, MKK10, MKKC,
+ DISALLOW_ADHOC_11A_TURB | NEED_NFC | LIMIT_FRAME_4MS, NEED_NFC,
+ NO_PSCAN, CTRY_JAPAN43},
+
+ {MKK11_MKKA, MKK11, MKKA,
+ DISALLOW_ADHOC_11A_TURB | NEED_NFC | LIMIT_FRAME_4MS, NEED_NFC,
+ PSCAN_MKK3, CTRY_JAPAN45},
+ {MKK11_FCCA, MKK11, FCCA,
+ DISALLOW_ADHOC_11A_TURB | NEED_NFC | LIMIT_FRAME_4MS, NEED_NFC,
+ PSCAN_MKK3, CTRY_JAPAN46},
+ {MKK11_MKKA1, MKK11, MKKA,
+ DISALLOW_ADHOC_11A_TURB | NEED_NFC | LIMIT_FRAME_4MS, NEED_NFC,
+ PSCAN_MKK3 | PSCAN_MKKA1 | PSCAN_MKKA1_G, CTRY_JAPAN47},
+ {MKK11_MKKA2, MKK11, MKKA,
+ DISALLOW_ADHOC_11A_TURB | NEED_NFC | LIMIT_FRAME_4MS, NEED_NFC,
+ PSCAN_MKK3 | PSCAN_MKKA2 | PSCAN_MKKA2_G, CTRY_JAPAN49},
+ {MKK11_MKKC, MKK11, MKKC,
+ DISALLOW_ADHOC_11A_TURB | NEED_NFC | LIMIT_FRAME_4MS, NEED_NFC,
+ PSCAN_MKK3, CTRY_JAPAN48},
+
+ {MKK12_MKKA, MKK12, MKKA,
+ DISALLOW_ADHOC_11A_TURB | NEED_NFC | LIMIT_FRAME_4MS, NEED_NFC,
+ PSCAN_MKK1 | PSCAN_MKK3, CTRY_JAPAN50},
+ {MKK12_FCCA, MKK12, FCCA,
+ DISALLOW_ADHOC_11A_TURB | NEED_NFC | LIMIT_FRAME_4MS, NEED_NFC,
+ PSCAN_MKK1 | PSCAN_MKK3, CTRY_JAPAN51},
+ {MKK12_MKKA1, MKK12, MKKA,
+ DISALLOW_ADHOC_11A_TURB | NEED_NFC | LIMIT_FRAME_4MS, NEED_NFC,
+ PSCAN_MKK1 | PSCAN_MKK3 | PSCAN_MKKA1 | PSCAN_MKKA1_G,
+ CTRY_JAPAN52},
+ {MKK12_MKKA2, MKK12, MKKA,
+ DISALLOW_ADHOC_11A_TURB | NEED_NFC | LIMIT_FRAME_4MS, NEED_NFC,
+ PSCAN_MKK1 | PSCAN_MKK3 | PSCAN_MKKA2 | PSCAN_MKKA2_G,
+ CTRY_JAPAN54},
+ {MKK12_MKKC, MKK12, MKKC,
+ DISALLOW_ADHOC_11A_TURB | NEED_NFC | LIMIT_FRAME_4MS, NEED_NFC,
+ PSCAN_MKK1 | PSCAN_MKK3, CTRY_JAPAN53},
+
+ {MKK13_MKKB, MKK13, MKKA,
+ DISALLOW_ADHOC_11A | DISALLOW_ADHOC_11A_TURB | NEED_NFC |
+ LIMIT_FRAME_4MS, NEED_NFC,
+ PSCAN_MKK1 | PSCAN_MKK3 | PSCAN_MKKA | PSCAN_MKKA_G,
+ CTRY_JAPAN57},
+
+ {MKK14_MKKA1, MKK14, MKKA,
+ DISALLOW_ADHOC_11A_TURB | NEED_NFC | LIMIT_FRAME_4MS, NEED_NFC,
+ PSCAN_MKK1 | PSCAN_MKKA1 | PSCAN_MKKA1_G, CTRY_JAPAN58},
+ {MKK15_MKKA1, MKK15, MKKA,
+ DISALLOW_ADHOC_11A_TURB | NEED_NFC | LIMIT_FRAME_4MS, NEED_NFC,
+ PSCAN_MKK1 | PSCAN_MKKA1 | PSCAN_MKKA1_G, CTRY_JAPAN59},
+
+ {WOR0_WORLD, WOR0_WORLD, WOR0_WORLD, NO_REQ, NO_REQ, PSCAN_DEFER,
+ 0},
+ {WOR1_WORLD, WOR1_WORLD, WOR1_WORLD,
+ DISALLOW_ADHOC_11A | DISALLOW_ADHOC_11A_TURB, NO_REQ, PSCAN_DEFER,
+ 0},
+ {WOR2_WORLD, WOR2_WORLD, WOR2_WORLD, DISALLOW_ADHOC_11A_TURB,
+ NO_REQ, PSCAN_DEFER, 0},
+ {WOR3_WORLD, WOR3_WORLD, WOR3_WORLD, NO_REQ, NO_REQ, PSCAN_DEFER,
+ 0},
+ {WOR4_WORLD, WOR4_WORLD, WOR4_WORLD,
+ DISALLOW_ADHOC_11A | DISALLOW_ADHOC_11A_TURB, NO_REQ, PSCAN_DEFER,
+ 0},
+ {WOR5_ETSIC, WOR5_ETSIC, WOR5_ETSIC,
+ DISALLOW_ADHOC_11A | DISALLOW_ADHOC_11A_TURB, NO_REQ, PSCAN_DEFER,
+ 0},
+ {WOR01_WORLD, WOR01_WORLD, WOR01_WORLD, NO_REQ, NO_REQ,
+ PSCAN_DEFER, 0},
+ {WOR02_WORLD, WOR02_WORLD, WOR02_WORLD, NO_REQ, NO_REQ,
+ PSCAN_DEFER, 0},
+ {EU1_WORLD, EU1_WORLD, EU1_WORLD, NO_REQ, NO_REQ, PSCAN_DEFER, 0},
+ {WOR9_WORLD, WOR9_WORLD, WOR9_WORLD,
+ DISALLOW_ADHOC_11A | DISALLOW_ADHOC_11A_TURB, NO_REQ, PSCAN_DEFER,
+ 0},
+ {WORA_WORLD, WORA_WORLD, WORA_WORLD,
+ DISALLOW_ADHOC_11A | DISALLOW_ADHOC_11A_TURB, NO_REQ, PSCAN_DEFER,
+ 0},
+ {WORB_WORLD, WORB_WORLD, WORB_WORLD,
+ DISALLOW_ADHOC_11A | DISALLOW_ADHOC_11A_TURB, NO_REQ, PSCAN_DEFER,
+ 0},
+};
+
+#define NO_INTERSECT_REQ 0xFFFFFFFF
+#define NO_UNION_REQ 0
+
+static struct country_code_to_enum_rd allCountries[] = {
+ {CTRY_DEBUG, NO_ENUMRD, "DB", "DEBUG", YES, YES, YES, YES, YES,
+ YES, YES, 7000},
+ {CTRY_DEFAULT, DEF_REGDMN, "NA", "NO_COUNTRY_SET", YES, YES, YES,
+ YES, YES, YES, YES, 7000},
+ {CTRY_ALBANIA, NULL1_WORLD, "AL", "ALBANIA", YES, NO, YES, YES, NO,
+ NO, NO, 7000},
+ {CTRY_ALGERIA, NULL1_WORLD, "DZ", "ALGERIA", YES, NO, YES, YES, NO,
+ NO, NO, 7000},
+ {CTRY_ARGENTINA, APL3_WORLD, "AR", "ARGENTINA", YES, NO, NO, YES,
+ NO, YES, NO, 7000},
+ {CTRY_ARMENIA, ETSI4_WORLD, "AM", "ARMENIA", YES, NO, YES, YES,
+ YES, NO, NO, 7000},
+ {CTRY_AUSTRALIA, FCC2_WORLD, "AU", "AUSTRALIA", YES, YES, YES, YES,
+ YES, YES, YES, 7000},
+ {CTRY_AUSTRALIA2, FCC6_WORLD, "AU", "AUSTRALIA2", YES, YES, YES,
+ YES, YES, YES, YES, 7000},
+ {CTRY_AUSTRIA, ETSI1_WORLD, "AT", "AUSTRIA", YES, NO, YES, YES,
+ YES, YES, YES, 7000},
+ {CTRY_AZERBAIJAN, ETSI4_WORLD, "AZ", "AZERBAIJAN", YES, YES, YES,
+ YES, YES, YES, YES, 7000},
+ {CTRY_BAHRAIN, APL6_WORLD, "BH", "BAHRAIN", YES, NO, YES, YES, YES,
+ YES, NO, 7000},
+ {CTRY_BELARUS, ETSI1_WORLD, "BY", "BELARUS", YES, NO, YES, YES,
+ YES, YES, YES, 7000},
+ {CTRY_BELGIUM, ETSI1_WORLD, "BE", "BELGIUM", YES, NO, YES, YES,
+ YES, YES, YES, 7000},
+ {CTRY_BELGIUM2, ETSI4_WORLD, "BL", "BELGIUM", YES, NO, YES, YES,
+ YES, YES, YES, 7000},
+ {CTRY_BELIZE, APL1_ETSIC, "BZ", "BELIZE", YES, YES, YES, YES, YES,
+ YES, YES, 7000},
+ {CTRY_BOLIVIA, APL1_ETSIC, "BO", "BOLVIA", YES, YES, YES, YES, YES,
+ YES, YES, 7000},
+ {CTRY_BOSNIA_HERZ, ETSI1_WORLD, "BA", "BOSNIA_HERZGOWINA", YES, NO,
+ YES, YES, YES, YES, NO, 7000},
+ {CTRY_BRAZIL, FCC3_WORLD, "BR", "BRAZIL", YES, NO, NO, YES, NO,
+ YES, NO, 7000},
+ {CTRY_BRUNEI_DARUSSALAM, APL1_WORLD, "BN", "BRUNEI DARUSSALAM",
+ YES, YES, YES, YES, YES, YES, YES, 7000},
+ {CTRY_BULGARIA, ETSI6_WORLD, "BG", "BULGARIA", YES, NO, YES, YES,
+ YES, YES, YES, 7000},
+ {CTRY_CANADA, FCC2_FCCA, "CA", "CANADA", YES, YES, YES, YES, YES,
+ YES, YES, 7000},
+ {CTRY_CANADA2, FCC6_FCCA, "CA", "CANADA2", YES, YES, YES, YES, YES,
+ YES, YES, 7000},
+ {CTRY_CHILE, APL6_WORLD, "CL", "CHILE", YES, YES, YES, YES, YES,
+ YES, YES, 7000},
+ {CTRY_CHINA, APL1_WORLD, "CN", "CHINA", YES, YES, YES, YES, YES,
+ YES, YES, 7000},
+ {CTRY_COLOMBIA, FCC1_FCCA, "CO", "COLOMBIA", YES, NO, YES, YES,
+ YES, YES, NO, 7000},
+ {CTRY_COSTA_RICA, FCC1_WORLD, "CR", "COSTA RICA", YES, NO, YES,
+ YES, YES, YES, NO, 7000},
+ {CTRY_CROATIA, ETSI3_WORLD, "HR", "CROATIA", YES, NO, YES, YES,
+ YES, YES, NO, 7000},
+ {CTRY_CYPRUS, ETSI1_WORLD, "CY", "CYPRUS", YES, YES, YES, YES, YES,
+ YES, YES, 7000},
+ {CTRY_CZECH, ETSI3_WORLD, "CZ", "CZECH REPUBLIC", YES, NO, YES,
+ YES, YES, YES, YES, 7000},
+ {CTRY_DENMARK, ETSI1_WORLD, "DK", "DENMARK", YES, NO, YES, YES,
+ YES, YES, YES, 7000},
+ {CTRY_DOMINICAN_REPUBLIC, FCC1_FCCA, "DO", "DOMINICAN REPUBLIC",
+ YES, YES, YES, YES, YES, YES, YES, 7000},
+ {CTRY_ECUADOR, FCC1_WORLD, "EC", "ECUADOR", YES, NO, NO, YES, YES,
+ YES, NO, 7000},
+ {CTRY_EGYPT, ETSI3_WORLD, "EG", "EGYPT", YES, NO, YES, YES, YES,
+ YES, NO, 7000},
+ {CTRY_EL_SALVADOR, FCC1_WORLD, "SV", "EL SALVADOR", YES, NO, YES,
+ YES, YES, YES, NO, 7000},
+ {CTRY_ESTONIA, ETSI1_WORLD, "EE", "ESTONIA", YES, NO, YES, YES,
+ YES, YES, YES, 7000},
+ {CTRY_FINLAND, ETSI1_WORLD, "FI", "FINLAND", YES, NO, YES, YES,
+ YES, YES, YES, 7000},
+ {CTRY_FRANCE, ETSI1_WORLD, "FR", "FRANCE", YES, NO, YES, YES, YES,
+ YES, YES, 7000},
+ {CTRY_GEORGIA, ETSI4_WORLD, "GE", "GEORGIA", YES, YES, YES, YES,
+ YES, YES, YES, 7000},
+ {CTRY_GERMANY, ETSI1_WORLD, "DE", "GERMANY", YES, NO, YES, YES,
+ YES, YES, YES, 7000},
+ {CTRY_GREECE, ETSI1_WORLD, "GR", "GREECE", YES, NO, YES, YES, YES,
+ YES, YES, 7000},
+ {CTRY_GUATEMALA, FCC1_FCCA, "GT", "GUATEMALA", YES, YES, YES, YES,
+ YES, YES, YES, 7000},
+ {CTRY_HONDURAS, NULL1_WORLD, "HN", "HONDURAS", YES, NO, YES, YES,
+ YES, NO, NO, 7000},
+ {CTRY_HONG_KONG, FCC2_WORLD, "HK", "HONG KONG", YES, YES, YES, YES,
+ YES, YES, YES, 7000},
+ {CTRY_HUNGARY, ETSI1_WORLD, "HU", "HUNGARY", YES, NO, YES, YES,
+ YES, YES, YES, 7000},
+ {CTRY_ICELAND, ETSI1_WORLD, "IS", "ICELAND", YES, NO, YES, YES,
+ YES, YES, YES, 7000},
+ {CTRY_INDIA, APL6_WORLD, "IN", "INDIA", YES, NO, YES, YES, YES,
+ YES, NO, 7000},
+ {CTRY_INDONESIA, APL1_WORLD, "ID", "INDONESIA", YES, NO, YES, YES,
+ YES, YES, NO, 7000},
+ {CTRY_IRAN, APL1_WORLD, "IR", "IRAN", YES, YES, YES, YES, YES, YES,
+ YES, 7000},
+ {CTRY_IRELAND, ETSI1_WORLD, "IE", "IRELAND", YES, NO, YES, YES,
+ YES, YES, YES, 7000},
+ {CTRY_ISRAEL, NULL1_WORLD, "IL", "ISRAEL", YES, NO, YES, YES, YES,
+ NO, NO, 7000},
+ {CTRY_ITALY, ETSI1_WORLD, "IT", "ITALY", YES, NO, YES, YES, YES,
+ YES, YES, 7000},
+ {CTRY_JAMAICA, ETSI1_WORLD, "JM", "JAMAICA", YES, NO, YES, YES,
+ YES, YES, YES, 7000},
+
+ {CTRY_JAPAN, MKK1_MKKA, "JP", "JAPAN", YES, NO, NO, YES, YES, YES,
+ YES, 7000},
+ {CTRY_JAPAN1, MKK1_MKKB, "JP", "JAPAN1", YES, NO, NO, YES, YES,
+ YES, YES, 7000},
+ {CTRY_JAPAN2, MKK1_FCCA, "JP", "JAPAN2", YES, NO, NO, YES, YES,
+ YES, YES, 7000},
+ {CTRY_JAPAN3, MKK2_MKKA, "JP", "JAPAN3", YES, NO, NO, YES, YES,
+ YES, YES, 7000},
+ {CTRY_JAPAN4, MKK1_MKKA1, "JP", "JAPAN4", YES, NO, NO, YES, YES,
+ YES, YES, 7000},
+ {CTRY_JAPAN5, MKK1_MKKA2, "JP", "JAPAN5", YES, NO, NO, YES, YES,
+ YES, YES, 7000},
+ {CTRY_JAPAN6, MKK1_MKKC, "JP", "JAPAN6", YES, NO, NO, YES, YES,
+ YES, YES, 7000},
+
+ {CTRY_JAPAN7, MKK3_MKKB, "JP", "JAPAN7", YES, NO, NO, YES, YES,
+ YES, YES, 7000},
+ {CTRY_JAPAN8, MKK3_MKKA2, "JP", "JAPAN8", YES, NO, NO, YES, YES,
+ YES, YES, 7000},
+ {CTRY_JAPAN9, MKK3_MKKC, "JP", "JAPAN9", YES, NO, NO, YES, YES,
+ YES, YES, 7000},
+
+ {CTRY_JAPAN10, MKK4_MKKB, "JP", "JAPAN10", YES, NO, NO, YES, YES,
+ YES, YES, 7000},
+ {CTRY_JAPAN11, MKK4_MKKA2, "JP", "JAPAN11", YES, NO, NO, YES, YES,
+ YES, YES, 7000},
+ {CTRY_JAPAN12, MKK4_MKKC, "JP", "JAPAN12", YES, NO, NO, YES, YES,
+ YES, YES, 7000},
+
+ {CTRY_JAPAN13, MKK5_MKKB, "JP", "JAPAN13", YES, NO, NO, YES, YES,
+ YES, YES, 7000},
+ {CTRY_JAPAN14, MKK5_MKKA2, "JP", "JAPAN14", YES, NO, NO, YES, YES,
+ YES, YES, 7000},
+ {CTRY_JAPAN15, MKK5_MKKC, "JP", "JAPAN15", YES, NO, NO, YES, YES,
+ YES, YES, 7000},
+
+ {CTRY_JAPAN16, MKK6_MKKB, "JP", "JAPAN16", YES, NO, NO, YES, YES,
+ YES, YES, 7000},
+ {CTRY_JAPAN17, MKK6_MKKA2, "JP", "JAPAN17", YES, NO, NO, YES, YES,
+ YES, YES, 7000},
+ {CTRY_JAPAN18, MKK6_MKKC, "JP", "JAPAN18", YES, NO, NO, YES, YES,
+ YES, YES, 7000},
+
+ {CTRY_JAPAN19, MKK7_MKKB, "JP", "JAPAN19", YES, NO, NO, YES, YES,
+ YES, YES, 7000},
+ {CTRY_JAPAN20, MKK7_MKKA2, "JP", "JAPAN20", YES, NO, NO, YES, YES,
+ YES, YES, 7000},
+ {CTRY_JAPAN21, MKK7_MKKC, "JP", "JAPAN21", YES, NO, NO, YES, YES,
+ YES, YES, 7000},
+
+ {CTRY_JAPAN22, MKK8_MKKB, "JP", "JAPAN22", YES, NO, NO, YES, YES,
+ YES, YES, 7000},
+ {CTRY_JAPAN23, MKK8_MKKA2, "JP", "JAPAN23", YES, NO, NO, YES, YES,
+ YES, YES, 7000},
+ {CTRY_JAPAN24, MKK8_MKKC, "JP", "JAPAN24", YES, NO, NO, YES, YES,
+ YES, YES, 7000},
+
+ {CTRY_JAPAN25, MKK3_MKKA, "JP", "JAPAN25", YES, NO, NO, YES, YES,
+ YES, YES, 7000},
+ {CTRY_JAPAN26, MKK3_MKKA1, "JP", "JAPAN26", YES, NO, NO, YES, YES,
+ YES, YES, 7000},
+ {CTRY_JAPAN27, MKK3_FCCA, "JP", "JAPAN27", YES, NO, NO, YES, YES,
+ YES, YES, 7000},
+ {CTRY_JAPAN28, MKK4_MKKA1, "JP", "JAPAN28", YES, NO, NO, YES, YES,
+ YES, YES, 7000},
+ {CTRY_JAPAN29, MKK4_FCCA, "JP", "JAPAN29", YES, NO, NO, YES, YES,
+ YES, YES, 7000},
+ {CTRY_JAPAN30, MKK6_MKKA1, "JP", "JAPAN30", YES, NO, NO, YES, YES,
+ YES, YES, 7000},
+ {CTRY_JAPAN31, MKK6_FCCA, "JP", "JAPAN31", YES, NO, NO, YES, YES,
+ YES, YES, 7000},
+ {CTRY_JAPAN32, MKK7_MKKA1, "JP", "JAPAN32", YES, NO, NO, YES, YES,
+ YES, YES, 7000},
+ {CTRY_JAPAN33, MKK7_FCCA, "JP", "JAPAN33", YES, NO, NO, YES, YES,
+ YES, YES, 7000},
+ {CTRY_JAPAN34, MKK9_MKKA, "JP", "JAPAN34", YES, NO, NO, YES, YES,
+ YES, YES, 7000},
+ {CTRY_JAPAN35, MKK10_MKKA, "JP", "JAPAN35", YES, NO, NO, YES, YES,
+ YES, YES, 7000},
+ {CTRY_JAPAN36, MKK4_MKKA, "JP", "JAPAN36", YES, NO, NO, YES, YES,
+ YES, YES, 7000},
+ {CTRY_JAPAN37, MKK9_FCCA, "JP", "JAPAN37", YES, NO, NO, YES, YES,
+ YES, YES, 7000},
+ {CTRY_JAPAN38, MKK9_MKKA1, "JP", "JAPAN38", YES, NO, NO, YES, YES,
+ YES, YES, 7000},
+ {CTRY_JAPAN39, MKK9_MKKC, "JP", "JAPAN39", YES, NO, NO, YES, YES,
+ YES, YES, 7000},
+ {CTRY_JAPAN40, MKK9_MKKA2, "JP", "JAPAN40", YES, NO, NO, YES, YES,
+ YES, YES, 7000},
+ {CTRY_JAPAN41, MKK10_FCCA, "JP", "JAPAN41", YES, NO, NO, YES, YES,
+ YES, YES, 7000},
+ {CTRY_JAPAN42, MKK10_MKKA1, "JP", "JAPAN42", YES, NO, NO, YES, YES,
+ YES, YES, 7000},
+ {CTRY_JAPAN43, MKK10_MKKC, "JP", "JAPAN43", YES, NO, NO, YES, YES,
+ YES, YES, 7000},
+ {CTRY_JAPAN44, MKK10_MKKA2, "JP", "JAPAN44", YES, NO, NO, YES, YES,
+ YES, YES, 7000},
+ {CTRY_JAPAN45, MKK11_MKKA, "JP", "JAPAN45", YES, NO, NO, YES, YES,
+ YES, YES, 7000},
+ {CTRY_JAPAN46, MKK11_FCCA, "JP", "JAPAN46", YES, NO, NO, YES, YES,
+ YES, YES, 7000},
+ {CTRY_JAPAN47, MKK11_MKKA1, "JP", "JAPAN47", YES, NO, NO, YES, YES,
+ YES, YES, 7000},
+ {CTRY_JAPAN48, MKK11_MKKC, "JP", "JAPAN48", YES, NO, NO, YES, YES,
+ YES, YES, 7000},
+ {CTRY_JAPAN49, MKK11_MKKA2, "JP", "JAPAN49", YES, NO, NO, YES, YES,
+ YES, YES, 7000},
+ {CTRY_JAPAN50, MKK12_MKKA, "JP", "JAPAN50", YES, NO, NO, YES, YES,
+ YES, YES, 7000},
+ {CTRY_JAPAN51, MKK12_FCCA, "JP", "JAPAN51", YES, NO, NO, YES, YES,
+ YES, YES, 7000},
+ {CTRY_JAPAN52, MKK12_MKKA1, "JP", "JAPAN52", YES, NO, NO, YES, YES,
+ YES, YES, 7000},
+ {CTRY_JAPAN53, MKK12_MKKC, "JP", "JAPAN53", YES, NO, NO, YES, YES,
+ YES, YES, 7000},
+ {CTRY_JAPAN54, MKK12_MKKA2, "JP", "JAPAN54", YES, NO, NO, YES, YES,
+ YES, YES, 7000},
+
+ {CTRY_JAPAN57, MKK13_MKKB, "JP", "JAPAN57", YES, NO, NO, YES, YES,
+ YES, YES, 7000},
+ {CTRY_JAPAN58, MKK14_MKKA1, "JP", "JAPAN58", YES, NO, NO, YES, YES,
+ YES, YES, 7000},
+ {CTRY_JAPAN59, MKK15_MKKA1, "JP", "JAPAN59", YES, NO, NO, YES, YES,
+ YES, YES, 7000},
+
+ {CTRY_JORDAN, ETSI2_WORLD, "JO", "JORDAN", YES, NO, YES, YES, YES,
+ YES, NO, 7000},
+ {CTRY_KAZAKHSTAN, NULL1_WORLD, "KZ", "KAZAKHSTAN", YES, NO, YES,
+ YES, YES, NO, NO, 7000},
+ {CTRY_KOREA_NORTH, APL9_WORLD, "KP", "NORTH KOREA", YES, NO, NO,
+ YES, YES, YES, YES, 7000},
+ {CTRY_KOREA_ROC, APL9_WORLD, "KR", "KOREA REPUBLIC", YES, NO, NO,
+ YES, NO, YES, NO, 7000},
+ {CTRY_KOREA_ROC2, APL2_WORLD, "K2", "KOREA REPUBLIC2", YES, NO, NO,
+ YES, NO, YES, NO, 7000},
+ {CTRY_KOREA_ROC3, APL9_WORLD, "K3", "KOREA REPUBLIC3", YES, NO, NO,
+ YES, NO, YES, NO, 7000},
+ {CTRY_KUWAIT, NULL1_WORLD, "KW", "KUWAIT", YES, NO, YES, YES, YES,
+ NO, NO, 7000},
+ {CTRY_LATVIA, ETSI1_WORLD, "LV", "LATVIA", YES, NO, YES, YES, YES,
+ YES, YES, 7000},
+ {CTRY_LEBANON, NULL1_WORLD, "LB", "LEBANON", YES, NO, YES, YES,
+ YES, NO, NO, 7000},
+ {CTRY_LIECHTENSTEIN, ETSI1_WORLD, "LI", "LIECHTENSTEIN", YES, NO,
+ YES, YES, YES, YES, YES, 7000},
+ {CTRY_LITHUANIA, ETSI1_WORLD, "LT", "LITHUANIA", YES, NO, YES, YES,
+ YES, YES, YES, 7000},
+ {CTRY_LUXEMBOURG, ETSI1_WORLD, "LU", "LUXEMBOURG", YES, NO, YES,
+ YES, YES, YES, YES, 7000},
+ {CTRY_MACAU, FCC2_WORLD, "MO", "MACAU", YES, YES, YES, YES, YES,
+ YES, YES, 7000},
+ {CTRY_MACEDONIA, NULL1_WORLD, "MK", "MACEDONIA", YES, NO, YES, YES,
+ YES, NO, NO, 7000},
+ {CTRY_MALAYSIA, APL8_WORLD, "MY", "MALAYSIA", YES, NO, NO, YES, NO,
+ YES, NO, 7000},
+ {CTRY_MALTA, ETSI1_WORLD, "MT", "MALTA", YES, NO, YES, YES, YES,
+ YES, YES, 7000},
+ {CTRY_MEXICO, FCC1_FCCA, "MX", "MEXICO", YES, YES, YES, YES, YES,
+ YES, YES, 7000},
+ {CTRY_MONACO, ETSI4_WORLD, "MC", "MONACO", YES, YES, YES, YES, YES,
+ YES, YES, 7000},
+ {CTRY_MOROCCO, NULL1_WORLD, "MA", "MOROCCO", YES, NO, YES, YES,
+ YES, NO, NO, 7000},
+ {CTRY_NEPAL, APL1_WORLD, "NP", "NEPAL", YES, NO, YES, YES, YES,
+ YES, YES, 7000},
+ {CTRY_NETHERLANDS, ETSI1_WORLD, "NL", "NETHERLANDS", YES, NO, YES,
+ YES, YES, YES, YES, 7000},
+ {CTRY_NETHERLANDS_ANTILLES, ETSI1_WORLD, "AN",
+ "NETHERLANDS-ANTILLES", YES, NO, YES, YES, YES, YES, YES, 7000},
+ {CTRY_NEW_ZEALAND, FCC2_ETSIC, "NZ", "NEW ZEALAND", YES, NO, YES,
+ YES, YES, YES, NO, 7000},
+ {CTRY_NORWAY, ETSI1_WORLD, "NO", "NORWAY", YES, NO, YES, YES, YES,
+ YES, YES, 7000},
+ {CTRY_OMAN, APL6_WORLD, "OM", "OMAN", YES, NO, YES, YES, YES, YES,
+ NO, 7000},
+ {CTRY_PAKISTAN, NULL1_WORLD, "PK", "PAKISTAN", YES, NO, YES, YES,
+ YES, NO, NO, 7000},
+ {CTRY_PANAMA, FCC1_FCCA, "PA", "PANAMA", YES, YES, YES, YES, YES,
+ YES, YES, 7000},
+ {CTRY_PAPUA_NEW_GUINEA, FCC1_WORLD, "PG", "PAPUA NEW GUINEA", YES,
+ YES, YES, YES, YES, YES, YES, 7000},
+ {CTRY_PERU, APL1_WORLD, "PE", "PERU", YES, NO, YES, YES, YES, YES,
+ NO, 7000},
+ {CTRY_PHILIPPINES, APL1_WORLD, "PH", "PHILIPPINES", YES, YES, YES,
+ YES, YES, YES, YES, 7000},
+ {CTRY_POLAND, ETSI1_WORLD, "PL", "POLAND", YES, NO, YES, YES, YES,
+ YES, YES, 7000},
+ {CTRY_PORTUGAL, ETSI1_WORLD, "PT", "PORTUGAL", YES, NO, YES, YES,
+ YES, YES, YES, 7000},
+ {CTRY_PUERTO_RICO, FCC1_FCCA, "PR", "PUERTO RICO", YES, YES, YES,
+ YES, YES, YES, YES, 7000},
+ {CTRY_QATAR, NULL1_WORLD, "QA", "QATAR", YES, NO, YES, YES, YES,
+ NO, NO, 7000},
+ {CTRY_ROMANIA, NULL1_WORLD, "RO", "ROMANIA", YES, NO, YES, YES,
+ YES, NO, NO, 7000},
+ {CTRY_RUSSIA, NULL1_WORLD, "RU", "RUSSIA", YES, NO, YES, YES, YES,
+ NO, NO, 7000},
+ {CTRY_SAUDI_ARABIA, NULL1_WORLD, "SA", "SAUDI ARABIA", YES, NO,
+ YES, YES, YES, NO, NO, 7000},
+ {CTRY_SERBIA_MONTENEGRO, ETSI1_WORLD, "CS", "SERBIA & MONTENEGRO",
+ YES, NO, YES, YES, YES, YES, YES, 7000},
+ {CTRY_SINGAPORE, APL6_WORLD, "SG", "SINGAPORE", YES, YES, YES, YES,
+ YES, YES, YES, 7000},
+ {CTRY_SLOVAKIA, ETSI1_WORLD, "SK", "SLOVAK REPUBLIC", YES, NO, YES,
+ YES, YES, YES, YES, 7000},
+ {CTRY_SLOVENIA, ETSI1_WORLD, "SI", "SLOVENIA", YES, NO, YES, YES,
+ YES, YES, YES, 7000},
+ {CTRY_SOUTH_AFRICA, FCC3_WORLD, "ZA", "SOUTH AFRICA", YES, NO, YES,
+ YES, YES, YES, NO, 7000},
+ {CTRY_SPAIN, ETSI1_WORLD, "ES", "SPAIN", YES, NO, YES, YES, YES,
+ YES, YES, 7000},
+ {CTRY_SRI_LANKA, FCC3_WORLD, "LK", "SRI LANKA", YES, NO, YES, YES,
+ YES, YES, NO, 7000},
+ {CTRY_SWEDEN, ETSI1_WORLD, "SE", "SWEDEN", YES, NO, YES, YES, YES,
+ YES, YES, 7000},
+ {CTRY_SWITZERLAND, ETSI1_WORLD, "CH", "SWITZERLAND", YES, NO, YES,
+ YES, YES, YES, YES, 7000},
+ {CTRY_SYRIA, NULL1_WORLD, "SY", "SYRIA", YES, NO, YES, YES, YES,
+ NO, NO, 7000},
+ {CTRY_TAIWAN, APL3_FCCA, "TW", "TAIWAN", YES, YES, YES, YES, YES,
+ YES, YES, 7000},
+ {CTRY_THAILAND, NULL1_WORLD, "TH", "THAILAND", YES, NO, YES, YES,
+ YES, NO, NO, 7000},
+ {CTRY_TRINIDAD_Y_TOBAGO, ETSI4_WORLD, "TT", "TRINIDAD & TOBAGO",
+ YES, NO, YES, YES, YES, YES, NO, 7000},
+ {CTRY_TUNISIA, ETSI3_WORLD, "TN", "TUNISIA", YES, NO, YES, YES,
+ YES, YES, NO, 7000},
+ {CTRY_TURKEY, ETSI3_WORLD, "TR", "TURKEY", YES, NO, YES, YES, YES,
+ YES, NO, 7000},
+ {CTRY_UKRAINE, NULL1_WORLD, "UA", "UKRAINE", YES, NO, YES, YES,
+ YES, NO, NO, 7000},
+ {CTRY_UAE, NULL1_WORLD, "AE", "UNITED ARAB EMIRATES", YES, NO, YES,
+ YES, YES, NO, NO, 7000},
+ {CTRY_UNITED_KINGDOM, ETSI1_WORLD, "GB", "UNITED KINGDOM", YES, NO,
+ YES, YES, YES, YES, YES, 7000},
+ {CTRY_UNITED_STATES, FCC3_FCCA, "US", "UNITED STATES", YES, YES,
+ YES, YES, YES, YES, YES, 5825},
+ {CTRY_UNITED_STATES_FCC49, FCC4_FCCA, "PS",
+ "UNITED STATES (PUBLIC SAFETY)", YES, YES, YES, YES, YES, YES,
+ YES, 7000},
+ {CTRY_URUGUAY, APL2_WORLD, "UY", "URUGUAY", YES, NO, YES, YES, YES,
+ YES, NO, 7000},
+ {CTRY_UZBEKISTAN, FCC3_FCCA, "UZ", "UZBEKISTAN", YES, YES, YES,
+ YES, YES, YES, YES, 7000},
+ {CTRY_VENEZUELA, APL2_ETSIC, "VE", "VENEZUELA", YES, NO, YES, YES,
+ YES, YES, NO, 7000},
+ {CTRY_VIET_NAM, NULL1_WORLD, "VN", "VIET NAM", YES, NO, YES, YES,
+ YES, NO, NO, 7000},
+ {CTRY_YEMEN, NULL1_WORLD, "YE", "YEMEN", YES, NO, YES, YES, YES,
+ NO, NO, 7000},
+ {CTRY_ZIMBABWE, NULL1_WORLD, "ZW", "ZIMBABWE", YES, NO, YES, YES,
+ YES, NO, NO, 7000}
+};
+
+enum {
+ NO_DFS = 0x0000000000000000ULL,
+ DFS_FCC3 = 0x0000000000000001ULL,
+ DFS_ETSI = 0x0000000000000002ULL,
+ DFS_MKK4 = 0x0000000000000004ULL,
+};
+
+enum {
+ F1_4915_4925,
+ F1_4935_4945,
+ F1_4920_4980,
+ F1_4942_4987,
+ F1_4945_4985,
+ F1_4950_4980,
+ F1_5035_5040,
+ F1_5040_5080,
+ F1_5055_5055,
+
+ F1_5120_5240,
+
+ F1_5170_5230,
+ F2_5170_5230,
+
+ F1_5180_5240,
+ F2_5180_5240,
+ F3_5180_5240,
+ F4_5180_5240,
+ F5_5180_5240,
+ F6_5180_5240,
+ F7_5180_5240,
+ F8_5180_5240,
+
+ F1_5180_5320,
+
+ F1_5240_5280,
+
+ F1_5260_5280,
+
+ F1_5260_5320,
+ F2_5260_5320,
+ F3_5260_5320,
+ F4_5260_5320,
+ F5_5260_5320,
+ F6_5260_5320,
+
+ F1_5260_5700,
+
+ F1_5280_5320,
+
+ F1_5500_5580,
+
+ F1_5500_5620,
+
+ F1_5500_5700,
+ F2_5500_5700,
+ F3_5500_5700,
+ F4_5500_5700,
+ F5_5500_5700,
+
+ F1_5660_5700,
+
+ F1_5745_5805,
+ F2_5745_5805,
+ F3_5745_5805,
+
+ F1_5745_5825,
+ F2_5745_5825,
+ F3_5745_5825,
+ F4_5745_5825,
+ F5_5745_5825,
+ F6_5745_5825,
+
+ W1_4920_4980,
+ W1_5040_5080,
+ W1_5170_5230,
+ W1_5180_5240,
+ W1_5260_5320,
+ W1_5745_5825,
+ W1_5500_5700,
+ A_DEMO_ALL_CHANNELS
+};
+
+static struct RegDmnFreqBand regDmn5GhzFreq[] = {
+ {4915, 4925, 23, 0, 10, 5, NO_DFS, PSCAN_MKK2, 16},
+ {4935, 4945, 23, 0, 10, 5, NO_DFS, PSCAN_MKK2, 16},
+ {4920, 4980, 23, 0, 20, 20, NO_DFS, PSCAN_MKK2, 7},
+ {4942, 4987, 27, 6, 5, 5, NO_DFS, PSCAN_FCC, 0},
+ {4945, 4985, 30, 6, 10, 5, NO_DFS, PSCAN_FCC, 0},
+ {4950, 4980, 33, 6, 20, 5, NO_DFS, PSCAN_FCC, 0},
+ {5035, 5040, 23, 0, 10, 5, NO_DFS, PSCAN_MKK2, 12},
+ {5040, 5080, 23, 0, 20, 20, NO_DFS, PSCAN_MKK2, 2},
+ {5055, 5055, 23, 0, 10, 5, NO_DFS, PSCAN_MKK2, 12},
+
+ {5120, 5240, 5, 6, 20, 20, NO_DFS, NO_PSCAN, 0},
+
+ {5170, 5230, 23, 0, 20, 20, NO_DFS, PSCAN_MKK1 | PSCAN_MKK2, 1},
+ {5170, 5230, 20, 0, 20, 20, NO_DFS, PSCAN_MKK1 | PSCAN_MKK2, 1},
+
+ {5180, 5240, 15, 0, 20, 20, NO_DFS, PSCAN_FCC | PSCAN_ETSI, 0},
+ {5180, 5240, 17, 6, 20, 20, NO_DFS, NO_PSCAN, 1},
+ {5180, 5240, 18, 0, 20, 20, NO_DFS, PSCAN_FCC | PSCAN_ETSI, 0},
+ {5180, 5240, 20, 0, 20, 20, NO_DFS, PSCAN_FCC | PSCAN_ETSI, 0},
+ {5180, 5240, 23, 0, 20, 20, NO_DFS, PSCAN_FCC | PSCAN_ETSI, 0},
+ {5180, 5240, 23, 6, 20, 20, NO_DFS, PSCAN_FCC, 0},
+ {5180, 5240, 20, 0, 20, 20, NO_DFS, PSCAN_MKK1 | PSCAN_MKK3, 0},
+ {5180, 5240, 23, 6, 20, 20, NO_DFS, NO_PSCAN, 0},
+
+ {5180, 5320, 20, 6, 20, 20, NO_DFS, PSCAN_ETSI, 0},
+
+ {5240, 5280, 23, 0, 20, 20, DFS_FCC3, PSCAN_FCC | PSCAN_ETSI, 0},
+
+ {5260, 5280, 23, 0, 20, 20, DFS_FCC3 | DFS_ETSI,
+ PSCAN_FCC | PSCAN_ETSI, 0},
+
+ {5260, 5320, 18, 0, 20, 20, DFS_FCC3 | DFS_ETSI,
+ PSCAN_FCC | PSCAN_ETSI, 0},
+
+ {5260, 5320, 20, 0, 20, 20, DFS_FCC3 | DFS_ETSI | DFS_MKK4,
+ PSCAN_FCC | PSCAN_ETSI | PSCAN_MKK3, 0},
+
+
+ {5260, 5320, 20, 6, 20, 20, DFS_FCC3 | DFS_ETSI,
+ PSCAN_FCC | PSCAN_ETSI, 2},
+ {5260, 5320, 23, 6, 20, 20, DFS_FCC3 | DFS_ETSI, PSCAN_FCC, 2},
+ {5260, 5320, 23, 6, 20, 20, DFS_FCC3 | DFS_ETSI, PSCAN_FCC, 0},
+ {5260, 5320, 30, 0, 20, 20, NO_DFS, NO_PSCAN, 0},
+
+ {5260, 5700, 5, 6, 20, 20, DFS_FCC3 | DFS_ETSI, NO_PSCAN, 0},
+
+ {5280, 5320, 17, 6, 20, 20, DFS_FCC3 | DFS_ETSI, PSCAN_FCC, 0},
+
+ {5500, 5580, 23, 6, 20, 20, DFS_FCC3, PSCAN_FCC, 0},
+
+ {5500, 5620, 30, 6, 20, 20, DFS_ETSI, PSCAN_ETSI, 0},
+
+ {5500, 5700, 20, 6, 20, 20, DFS_FCC3 | DFS_ETSI, PSCAN_FCC, 4},
+ {5500, 5700, 27, 0, 20, 20, DFS_FCC3 | DFS_ETSI,
+ PSCAN_FCC | PSCAN_ETSI, 0},
+ {5500, 5700, 30, 0, 20, 20, DFS_FCC3 | DFS_ETSI,
+ PSCAN_FCC | PSCAN_ETSI, 0},
+ {5500, 5700, 23, 0, 20, 20, DFS_FCC3 | DFS_ETSI | DFS_MKK4,
+ PSCAN_MKK3 | PSCAN_FCC, 0},
+ {5500, 5700, 30, 6, 20, 20, DFS_ETSI, PSCAN_ETSI, 0},
+
+ {5660, 5700, 23, 6, 20, 20, DFS_FCC3, PSCAN_FCC, 0},
+
+ {5745, 5805, 23, 0, 20, 20, NO_DFS, NO_PSCAN, 0},
+ {5745, 5805, 30, 6, 20, 20, NO_DFS, NO_PSCAN, 0},
+ {5745, 5805, 30, 6, 20, 20, NO_DFS, PSCAN_ETSI, 0},
+ {5745, 5825, 5, 6, 20, 20, NO_DFS, NO_PSCAN, 0},
+ {5745, 5825, 17, 0, 20, 20, NO_DFS, NO_PSCAN, 0},
+ {5745, 5825, 20, 0, 20, 20, NO_DFS, NO_PSCAN, 0},
+ {5745, 5825, 30, 0, 20, 20, NO_DFS, NO_PSCAN, 0},
+ {5745, 5825, 30, 6, 20, 20, NO_DFS, NO_PSCAN, 3},
+ {5745, 5825, 30, 6, 20, 20, NO_DFS, NO_PSCAN, 0},
+
+
+ {4920, 4980, 30, 0, 20, 20, NO_DFS, PSCAN_WWR, 0},
+ {5040, 5080, 30, 0, 20, 20, NO_DFS, PSCAN_WWR, 0},
+ {5170, 5230, 30, 0, 20, 20, NO_DFS, PSCAN_WWR, 0},
+ {5180, 5240, 30, 0, 20, 20, NO_DFS, PSCAN_WWR, 0},
+ {5260, 5320, 30, 0, 20, 20, DFS_FCC3 | DFS_ETSI, PSCAN_WWR, 0},
+ {5745, 5825, 30, 0, 20, 20, NO_DFS, PSCAN_WWR, 0},
+ {5500, 5700, 30, 0, 20, 20, DFS_FCC3 | DFS_ETSI, PSCAN_WWR, 0},
+ {4920, 6100, 30, 6, 20, 20, NO_DFS, NO_PSCAN, 0},
+};
+
+enum {
+ T1_5130_5650,
+ T1_5150_5670,
+
+ T1_5200_5200,
+ T2_5200_5200,
+ T3_5200_5200,
+ T4_5200_5200,
+ T5_5200_5200,
+ T6_5200_5200,
+ T7_5200_5200,
+ T8_5200_5200,
+
+ T1_5200_5280,
+ T2_5200_5280,
+ T3_5200_5280,
+ T4_5200_5280,
+ T5_5200_5280,
+ T6_5200_5280,
+
+ T1_5200_5240,
+ T1_5210_5210,
+ T2_5210_5210,
+ T3_5210_5210,
+ T4_5210_5210,
+ T5_5210_5210,
+ T6_5210_5210,
+ T7_5210_5210,
+ T8_5210_5210,
+ T9_5210_5210,
+ T10_5210_5210,
+ T1_5240_5240,
+
+ T1_5210_5250,
+ T1_5210_5290,
+ T2_5210_5290,
+ T3_5210_5290,
+
+ T1_5280_5280,
+ T2_5280_5280,
+ T1_5290_5290,
+ T2_5290_5290,
+ T3_5290_5290,
+ T1_5250_5290,
+ T2_5250_5290,
+ T3_5250_5290,
+ T4_5250_5290,
+
+ T1_5540_5660,
+ T2_5540_5660,
+ T3_5540_5660,
+ T1_5760_5800,
+ T2_5760_5800,
+ T3_5760_5800,
+ T4_5760_5800,
+ T5_5760_5800,
+ T6_5760_5800,
+ T7_5760_5800,
+
+ T1_5765_5805,
+ T2_5765_5805,
+ T3_5765_5805,
+ T4_5765_5805,
+ T5_5765_5805,
+ T6_5765_5805,
+ T7_5765_5805,
+ T8_5765_5805,
+ T9_5765_5805,
+
+ WT1_5210_5250,
+ WT1_5290_5290,
+ WT1_5540_5660,
+ WT1_5760_5800,
+};
+
+enum {
+ F1_2312_2372,
+ F2_2312_2372,
+
+ F1_2412_2472,
+ F2_2412_2472,
+ F3_2412_2472,
+
+ F1_2412_2462,
+ F2_2412_2462,
+
+ F1_2432_2442,
+
+ F1_2457_2472,
+
+ F1_2467_2472,
+
+ F1_2484_2484,
+ F2_2484_2484,
+
+ F1_2512_2732,
+
+ W1_2312_2372,
+ W1_2412_2412,
+ W1_2417_2432,
+ W1_2437_2442,
+ W1_2447_2457,
+ W1_2462_2462,
+ W1_2467_2467,
+ W2_2467_2467,
+ W1_2472_2472,
+ W2_2472_2472,
+ W1_2484_2484,
+ W2_2484_2484,
+};
+
+static struct RegDmnFreqBand regDmn2GhzFreq[] = {
+ {2312, 2372, 5, 6, 20, 5, NO_DFS, NO_PSCAN, 0},
+ {2312, 2372, 20, 0, 20, 5, NO_DFS, NO_PSCAN, 0},
+
+ {2412, 2472, 5, 6, 20, 5, NO_DFS, NO_PSCAN, 0},
+ {2412, 2472, 20, 0, 20, 5, NO_DFS, PSCAN_MKKA, 0},
+ {2412, 2472, 30, 0, 20, 5, NO_DFS, NO_PSCAN, 0},
+
+ {2412, 2462, 27, 6, 20, 5, NO_DFS, NO_PSCAN, 0},
+ {2412, 2462, 20, 0, 20, 5, NO_DFS, PSCAN_MKKA, 0},
+
+ {2432, 2442, 20, 0, 20, 5, NO_DFS, NO_PSCAN, 0},
+
+ {2457, 2472, 20, 0, 20, 5, NO_DFS, NO_PSCAN, 0},
+
+ {2467, 2472, 20, 0, 20, 5, NO_DFS, PSCAN_MKKA2 | PSCAN_MKKA, 0},
+
+ {2484, 2484, 5, 6, 20, 5, NO_DFS, NO_PSCAN, 0},
+ {2484, 2484, 20, 0, 20, 5, NO_DFS,
+ PSCAN_MKKA | PSCAN_MKKA1 | PSCAN_MKKA2, 0},
+
+ {2512, 2732, 5, 6, 20, 5, NO_DFS, NO_PSCAN, 0},
+
+ {2312, 2372, 20, 0, 20, 5, NO_DFS, NO_PSCAN, 0},
+ {2412, 2412, 20, 0, 20, 5, NO_DFS, NO_PSCAN, 0},
+ {2417, 2432, 20, 0, 20, 5, NO_DFS, NO_PSCAN, 0},
+ {2437, 2442, 20, 0, 20, 5, NO_DFS, NO_PSCAN, 0},
+ {2447, 2457, 20, 0, 20, 5, NO_DFS, NO_PSCAN, 0},
+ {2462, 2462, 20, 0, 20, 5, NO_DFS, NO_PSCAN, 0},
+ {2467, 2467, 20, 0, 20, 5, NO_DFS, PSCAN_WWR | IS_ECM_CHAN, 0},
+ {2467, 2467, 20, 0, 20, 5, NO_DFS, NO_PSCAN | IS_ECM_CHAN, 0},
+ {2472, 2472, 20, 0, 20, 5, NO_DFS, PSCAN_WWR | IS_ECM_CHAN, 0},
+ {2472, 2472, 20, 0, 20, 5, NO_DFS, NO_PSCAN | IS_ECM_CHAN, 0},
+ {2484, 2484, 20, 0, 20, 5, NO_DFS, PSCAN_WWR | IS_ECM_CHAN, 0},
+ {2484, 2484, 20, 0, 20, 5, NO_DFS, NO_PSCAN | IS_ECM_CHAN, 0},
+};
+
+enum {
+ G1_2312_2372,
+ G2_2312_2372,
+
+ G1_2412_2472,
+ G2_2412_2472,
+ G3_2412_2472,
+
+ G1_2412_2462,
+ G2_2412_2462,
+
+ G1_2432_2442,
+
+ G1_2457_2472,
+
+ G1_2512_2732,
+
+ G1_2467_2472,
+
+ WG1_2312_2372,
+ WG1_2412_2462,
+ WG1_2467_2472,
+ WG2_2467_2472,
+ G_DEMO_ALL_CHANNELS
+};
+
+static struct RegDmnFreqBand regDmn2Ghz11gFreq[] = {
+ {2312, 2372, 5, 6, 20, 5, NO_DFS, NO_PSCAN, 0},
+ {2312, 2372, 20, 0, 20, 5, NO_DFS, NO_PSCAN, 0},
+
+ {2412, 2472, 5, 6, 20, 5, NO_DFS, NO_PSCAN, 0},
+ {2412, 2472, 20, 0, 20, 5, NO_DFS, PSCAN_MKKA_G, 0},
+ {2412, 2472, 30, 0, 20, 5, NO_DFS, NO_PSCAN, 0},
+
+ {2412, 2462, 27, 6, 20, 5, NO_DFS, NO_PSCAN, 0},
+ {2412, 2462, 20, 0, 20, 5, NO_DFS, PSCAN_MKKA_G, 0},
+
+ {2432, 2442, 20, 0, 20, 5, NO_DFS, NO_PSCAN, 0},
+
+ {2457, 2472, 20, 0, 20, 5, NO_DFS, NO_PSCAN, 0},
+
+ {2512, 2732, 5, 6, 20, 5, NO_DFS, NO_PSCAN, 0},
+
+ {2467, 2472, 20, 0, 20, 5, NO_DFS, PSCAN_MKKA2 | PSCAN_MKKA, 0},
+
+ {2312, 2372, 20, 0, 20, 5, NO_DFS, NO_PSCAN, 0},
+ {2412, 2462, 20, 0, 20, 5, NO_DFS, NO_PSCAN, 0},
+ {2467, 2472, 20, 0, 20, 5, NO_DFS, PSCAN_WWR | IS_ECM_CHAN, 0},
+ {2467, 2472, 20, 0, 20, 5, NO_DFS, NO_PSCAN | IS_ECM_CHAN, 0},
+ {2312, 2732, 27, 6, 20, 5, NO_DFS, NO_PSCAN, 0},
+};
+
+enum {
+ T1_2312_2372,
+ T1_2437_2437,
+ T2_2437_2437,
+ T3_2437_2437,
+ T1_2512_2732
+};
+
+static struct regDomain regDomains[] = {
+
+ {DEBUG_REG_DMN, FCC, DFS_FCC3, NO_PSCAN, NO_REQ,
+ BM(A_DEMO_ALL_CHANNELS, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ -1),
+ BM(T1_5130_5650, T1_5150_5670, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ -1),
+ BM(T1_5200_5240, T1_5280_5280, T1_5540_5660, T1_5765_5805, -1, -1,
+ -1, -1, -1, -1, -1, -1),
+ BM(F1_2312_2372, F1_2412_2472, F1_2484_2484, F1_2512_2732, -1, -1,
+ -1, -1, -1, -1, -1, -1),
+ BM(G_DEMO_ALL_CHANNELS, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ -1),
+ BM(T1_2312_2372, T1_2437_2437, T1_2512_2732, -1, -1, -1, -1, -1,
+ -1, -1, -1, -1)},
+
+ {APL1, FCC, NO_DFS, NO_PSCAN, NO_REQ,
+ BM(F4_5745_5825, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1),
+ BM(T2_5760_5800, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1),
+ BM(T1_5765_5805, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1),
+ BMZERO,
+ BMZERO,
+ BMZERO},
+
+ {APL2, FCC, NO_DFS, NO_PSCAN, NO_REQ,
+ BM(F1_5745_5805, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1),
+ BM(T1_5760_5800, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1),
+ BM(T2_5765_5805, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1),
+ BMZERO,
+ BMZERO,
+ BMZERO},
+
+ {APL3, FCC, NO_DFS, NO_PSCAN, NO_REQ,
+ BM(F1_5280_5320, F2_5745_5805, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ -1),
+ BM(T1_5290_5290, T1_5760_5800, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ -1),
+ BM(T1_5765_5805, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1),
+ BMZERO,
+ BMZERO,
+ BMZERO},
+
+ {APL4, FCC, NO_DFS, NO_PSCAN, NO_REQ,
+ BM(F4_5180_5240, F3_5745_5825, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ -1),
+ BM(T1_5210_5210, T3_5760_5800, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ -1),
+ BM(T1_5200_5200, T3_5765_5805, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ -1),
+ BMZERO,
+ BMZERO,
+ BMZERO},
+
+ {APL5, FCC, NO_DFS, NO_PSCAN, NO_REQ,
+ BM(F2_5745_5825, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1),
+ BM(T4_5760_5800, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1),
+ BM(T4_5765_5805, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1),
+ BMZERO,
+ BMZERO,
+ BMZERO},
+
+ {APL6, ETSI, DFS_ETSI, PSCAN_FCC_T | PSCAN_FCC, NO_REQ,
+ BM(F4_5180_5240, F2_5260_5320, F3_5745_5825, -1, -1, -1, -1, -1,
+ -1, -1, -1, -1),
+ BM(T2_5210_5210, T1_5250_5290, T1_5760_5800, -1, -1, -1, -1, -1,
+ -1, -1, -1, -1),
+ BM(T1_5200_5280, T5_5765_5805, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ -1),
+ BMZERO,
+ BMZERO,
+ BMZERO},
+
+ {APL7, ETSI, DFS_ETSI, PSCAN_ETSI, NO_REQ,
+ BM(F1_5280_5320, F5_5500_5700, F3_5745_5805, -1, -1, -1, -1, -1,
+ -1, -1, -1, -1),
+ BM(T3_5290_5290, T5_5760_5800, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ -1),
+ BM(T1_5540_5660, T6_5765_5805, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ -1),
+ BMZERO,
+ BMZERO,
+ BMZERO},
+
+ {APL8, ETSI, NO_DFS, NO_PSCAN,
+ DISALLOW_ADHOC_11A | DISALLOW_ADHOC_11A_TURB,
+ BM(F6_5260_5320, F4_5745_5825, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ -1),
+ BM(T2_5290_5290, T2_5760_5800, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ -1),
+ BM(T1_5280_5280, T1_5765_5805, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ -1),
+ BMZERO,
+ BMZERO,
+ BMZERO},
+
+ {APL9, ETSI, DFS_ETSI, PSCAN_ETSI,
+ DISALLOW_ADHOC_11A | DISALLOW_ADHOC_11A_TURB,
+ BM(F1_5180_5320, F1_5500_5620, F3_5745_5805, -1, -1, -1, -1, -1,
+ -1, -1, -1, -1),
+ BM(T3_5290_5290, T5_5760_5800, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ -1),
+ BM(T1_5540_5660, T6_5765_5805, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ -1),
+ BMZERO,
+ BMZERO,
+ BMZERO},
+
+ {APL10, ETSI, DFS_ETSI, PSCAN_ETSI,
+ DISALLOW_ADHOC_11A | DISALLOW_ADHOC_11A_TURB,
+ BM(F1_5180_5320, F5_5500_5700, F3_5745_5805, -1, -1, -1, -1, -1,
+ -1, -1, -1, -1),
+ BM(T3_5290_5290, T5_5760_5800, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ -1),
+ BM(T1_5540_5660, T6_5765_5805, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ -1),
+ BMZERO,
+ BMZERO,
+ BMZERO},
+
+ {ETSI1, ETSI, DFS_ETSI, PSCAN_ETSI,
+ DISALLOW_ADHOC_11A | DISALLOW_ADHOC_11A_TURB,
+ BM(F4_5180_5240, F2_5260_5320, F2_5500_5700, -1, -1, -1, -1, -1,
+ -1, -1, -1, -1),
+ BM(T1_5210_5290, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1),
+ BM(T2_5200_5280, T2_5540_5660, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ -1),
+ BMZERO,
+ BMZERO,
+ BMZERO},
+
+ {ETSI2, ETSI, DFS_ETSI, PSCAN_ETSI,
+ DISALLOW_ADHOC_11A | DISALLOW_ADHOC_11A_TURB,
+ BM(F3_5180_5240, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1),
+ BM(T3_5210_5210, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1),
+ BM(T2_5200_5200, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1),
+ BMZERO,
+ BMZERO,
+ BMZERO},
+
+ {ETSI3, ETSI, DFS_ETSI, PSCAN_ETSI,
+ DISALLOW_ADHOC_11A | DISALLOW_ADHOC_11A_TURB,
+ BM(F4_5180_5240, F2_5260_5320, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ -1),
+ BM(T1_5210_5290, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1),
+ BM(T2_5200_5280, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1),
+ BMZERO,
+ BMZERO,
+ BMZERO},
+
+ {ETSI4, ETSI, DFS_ETSI, PSCAN_ETSI,
+ DISALLOW_ADHOC_11A | DISALLOW_ADHOC_11A_TURB,
+ BM(F3_5180_5240, F1_5260_5320, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ -1),
+ BM(T2_5210_5290, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1),
+ BM(T3_5200_5280, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1),
+ BMZERO,
+ BMZERO,
+ BMZERO},
+
+ {ETSI5, ETSI, DFS_ETSI, PSCAN_ETSI,
+ DISALLOW_ADHOC_11A | DISALLOW_ADHOC_11A_TURB,
+ BM(F1_5180_5240, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1),
+ BM(T4_5210_5210, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1),
+ BM(T3_5200_5200, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1),
+ BMZERO,
+ BMZERO,
+ BMZERO},
+
+ {ETSI6, ETSI, DFS_ETSI, PSCAN_ETSI,
+ DISALLOW_ADHOC_11A | DISALLOW_ADHOC_11A_TURB,
+ BM(F5_5180_5240, F1_5260_5280, F3_5500_5700, -1, -1, -1, -1, -1,
+ -1, -1, -1, -1),
+ BM(T1_5210_5250, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1),
+ BM(T4_5200_5280, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1),
+ BMZERO,
+ BMZERO,
+ BMZERO},
+
+ {FCC1, FCC, NO_DFS, NO_PSCAN, NO_REQ,
+ BM(F2_5180_5240, F4_5260_5320, F5_5745_5825, -1, -1, -1, -1, -1,
+ -1, -1, -1, -1),
+ BM(T6_5210_5210, T2_5250_5290, T6_5760_5800, -1, -1, -1, -1, -1,
+ -1, -1, -1, -1),
+ BM(T1_5200_5240, T2_5280_5280, T7_5765_5805, -1, -1, -1, -1, -1,
+ -1, -1, -1, -1),
+ BMZERO,
+ BMZERO,
+ BMZERO},
+
+ {FCC2, FCC, NO_DFS, NO_PSCAN, NO_REQ,
+ BM(F6_5180_5240, F5_5260_5320, F6_5745_5825, -1, -1, -1, -1, -1,
+ -1, -1, -1, -1),
+ BM(T7_5210_5210, T3_5250_5290, T2_5760_5800, -1, -1, -1, -1, -1,
+ -1, -1, -1, -1),
+ BM(T7_5200_5200, T1_5240_5240, T2_5280_5280, T1_5765_5805, -1, -1,
+ -1, -1, -1, -1, -1, -1),
+ BMZERO,
+ BMZERO,
+ BMZERO},
+
+ {FCC3, FCC, DFS_FCC3, PSCAN_FCC | PSCAN_FCC_T, NO_REQ,
+ BM(F2_5180_5240, F3_5260_5320, F1_5500_5700, F5_5745_5825, -1, -1,
+ -1, -1, -1, -1, -1, -1),
+ BM(T6_5210_5210, T2_5760_5800, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ -1),
+ BM(T4_5200_5200, T8_5765_5805, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ -1),
+ BMZERO,
+ BMZERO,
+ BMZERO},
+
+ {FCC4, FCC, DFS_FCC3, PSCAN_FCC | PSCAN_FCC_T, NO_REQ,
+ BM(F1_4942_4987, F1_4945_4985, F1_4950_4980, -1, -1, -1, -1, -1,
+ -1, -1, -1, -1),
+ BM(T8_5210_5210, T4_5250_5290, T7_5760_5800, -1, -1, -1, -1, -1,
+ -1, -1, -1, -1),
+ BM(T1_5200_5240, T1_5280_5280, T9_5765_5805, -1, -1, -1, -1, -1,
+ -1, -1, -1, -1),
+ BMZERO,
+ BMZERO,
+ BMZERO},
+
+ {FCC5, FCC, NO_DFS, NO_PSCAN, NO_REQ,
+ BM(F2_5180_5240, F6_5745_5825, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ -1),
+ BM(T6_5210_5210, T2_5760_5800, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ -1),
+ BM(T8_5200_5200, T7_5765_5805, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ -1),
+ BMZERO,
+ BMZERO,
+ BMZERO},
+
+ {FCC6, FCC, DFS_FCC3, PSCAN_FCC, NO_REQ,
+ BM(F8_5180_5240, F5_5260_5320, F1_5500_5580, F1_5660_5700,
+ F6_5745_5825, -1, -1, -1, -1, -1, -1, -1),
+ BM(T7_5210_5210, T3_5250_5290, T2_5760_5800, -1, -1, -1, -1, -1,
+ -1, -1, -1, -1),
+ BM(T7_5200_5200, T1_5240_5240, T2_5280_5280, T1_5765_5805, -1, -1,
+ -1, -1, -1, -1, -1, -1),
+ BMZERO,
+ BMZERO,
+ BMZERO},
+
+ {MKK1, MKK, NO_DFS, PSCAN_MKK1, DISALLOW_ADHOC_11A_TURB,
+ BM(F1_5170_5230, F4_5180_5240, F2_5260_5320, F4_5500_5700, -1, -1,
+ -1, -1, -1, -1, -1, -1),
+ BM(T7_5210_5210, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1),
+ BM(T5_5200_5200, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1),
+ BMZERO,
+ BMZERO,
+ BMZERO},
+
+ {MKK2, MKK, NO_DFS, PSCAN_MKK2, DISALLOW_ADHOC_11A_TURB,
+ BM(F1_4915_4925, F1_4935_4945, F1_4920_4980, F1_5035_5040,
+ F1_5055_5055, F1_5040_5080, F1_5170_5230, F4_5180_5240,
+ F2_5260_5320, F4_5500_5700, -1, -1),
+ BM(T7_5210_5210, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1),
+ BM(T5_5200_5200, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1),
+ BMZERO,
+ BMZERO,
+ BMZERO},
+
+
+ {MKK3, MKK, NO_DFS, PSCAN_MKK3, DISALLOW_ADHOC_11A_TURB,
+ BM(F4_5180_5240, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1),
+ BM(T9_5210_5210, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1),
+ BM(T1_5200_5200, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1),
+ BMZERO,
+ BMZERO,
+ BMZERO},
+
+
+ {MKK4, MKK, DFS_MKK4, PSCAN_MKK3, DISALLOW_ADHOC_11A_TURB,
+ BM(F4_5180_5240, F2_5260_5320, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ -1),
+ BM(T10_5210_5210, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1),
+ BM(T6_5200_5200, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1),
+ BMZERO,
+ BMZERO,
+ BMZERO},
+
+
+ {MKK5, MKK, DFS_MKK4, PSCAN_MKK3, DISALLOW_ADHOC_11A_TURB,
+ BM(F4_5180_5240, F2_5260_5320, F4_5500_5700, -1, -1, -1, -1, -1,
+ -1, -1, -1, -1),
+ BM(T3_5210_5290, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1),
+ BM(T5_5200_5280, T3_5540_5660, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ -1),
+ BMZERO,
+ BMZERO,
+ BMZERO},
+
+
+ {MKK6, MKK, NO_DFS, PSCAN_MKK1, DISALLOW_ADHOC_11A_TURB,
+ BM(F2_5170_5230, F4_5180_5240, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ -1),
+ BM(T3_5210_5210, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1),
+ BM(T6_5200_5200, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1),
+ BMZERO,
+ BMZERO,
+ BMZERO},
+
+
+ {MKK7, MKK, DFS_MKK4, PSCAN_MKK1 | PSCAN_MKK3,
+ DISALLOW_ADHOC_11A_TURB,
+ BM(F1_5170_5230, F4_5180_5240, F2_5260_5320, -1, -1, -1, -1, -1,
+ -1, -1, -1, -1),
+ BM(T3_5210_5290, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1),
+ BM(T5_5200_5280, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1),
+ BMZERO,
+ BMZERO,
+ BMZERO},
+
+
+ {MKK8, MKK, DFS_MKK4, PSCAN_MKK1 | PSCAN_MKK3,
+ DISALLOW_ADHOC_11A_TURB,
+ BM(F1_5170_5230, F4_5180_5240, F2_5260_5320, F4_5500_5700, -1, -1,
+ -1, -1, -1, -1, -1, -1),
+ BM(T3_5210_5290, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1),
+ BM(T5_5200_5280, T3_5540_5660, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ -1),
+ BMZERO,
+ BMZERO,
+ BMZERO},
+
+
+ {MKK9, MKK, NO_DFS, PSCAN_MKK2 | PSCAN_MKK3,
+ DISALLOW_ADHOC_11A_TURB,
+ BM(F1_4915_4925, F1_4935_4945, F1_4920_4980, F1_5035_5040,
+ F1_5055_5055, F1_5040_5080, F4_5180_5240, -1, -1, -1, -1, -1),
+ BM(T9_5210_5210, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1),
+ BM(T1_5200_5200, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1),
+ BMZERO,
+ BMZERO,
+ BMZERO},
+
+
+ {MKK10, MKK, DFS_MKK4, PSCAN_MKK2 | PSCAN_MKK3,
+ DISALLOW_ADHOC_11A_TURB,
+ BM(F1_4915_4925, F1_4935_4945, F1_4920_4980, F1_5035_5040,
+ F1_5055_5055, F1_5040_5080, F4_5180_5240, F2_5260_5320, -1, -1,
+ -1, -1),
+ BM(T3_5210_5290, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1),
+ BM(T1_5200_5280, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1),
+ BMZERO,
+ BMZERO,
+ BMZERO},
+
+
+ {MKK11, MKK, DFS_MKK4, PSCAN_MKK3, DISALLOW_ADHOC_11A_TURB,
+ BM(F1_4915_4925, F1_4935_4945, F1_4920_4980, F1_5035_5040,
+ F1_5055_5055, F1_5040_5080, F4_5180_5240, F2_5260_5320,
+ F4_5500_5700, -1, -1, -1),
+ BM(T3_5210_5290, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1),
+ BM(T1_5200_5280, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1),
+ BMZERO,
+ BMZERO,
+ BMZERO},
+
+
+ {MKK12, MKK, DFS_MKK4, PSCAN_MKK1 | PSCAN_MKK3,
+ DISALLOW_ADHOC_11A_TURB,
+ BM(F1_4915_4925, F1_4935_4945, F1_4920_4980, F1_5035_5040,
+ F1_5055_5055, F1_5040_5080, F1_5170_5230, F4_5180_5240,
+ F2_5260_5320, F4_5500_5700, -1, -1),
+ BM(T3_5210_5290, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1),
+ BM(T1_5200_5280, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1),
+ BMZERO,
+ BMZERO,
+ BMZERO},
+
+
+ {MKK13, MKK, DFS_MKK4, PSCAN_MKK1 | PSCAN_MKK3,
+ DISALLOW_ADHOC_11A | DISALLOW_ADHOC_11A_TURB,
+ BM(F1_5170_5230, F7_5180_5240, F2_5260_5320, F4_5500_5700, -1, -1,
+ -1, -1, -1, -1, -1, -1),
+ BMZERO,
+ BMZERO,
+ BMZERO,
+ BMZERO,
+ BMZERO},
+
+
+ {MKK14, MKK, DFS_MKK4, PSCAN_MKK1, DISALLOW_ADHOC_11A_TURB,
+ BM(F1_4915_4925, F1_4935_4945, F1_4920_4980, F1_5035_5040,
+ F1_5040_5080, F1_5055_5055, F1_5170_5230, F4_5180_5240, -1, -1,
+ -1, -1),
+ BMZERO,
+ BMZERO,
+ BMZERO,
+ BMZERO,
+ BMZERO},
+
+
+ {MKK15, MKK, DFS_MKK4, PSCAN_MKK1, DISALLOW_ADHOC_11A_TURB,
+ BM(F1_4915_4925, F1_4935_4945, F1_4920_4980, F1_5035_5040,
+ F1_5040_5080, F1_5055_5055, F1_5170_5230, F4_5180_5240,
+ F2_5260_5320, -1, -1, -1),
+ BMZERO,
+ BMZERO,
+ BMZERO,
+ BMZERO,
+ BMZERO},
+
+
+ {APLD, NO_CTL, NO_DFS, NO_PSCAN, NO_REQ,
+ BMZERO,
+ BMZERO,
+ BMZERO,
+ BM(F2_2312_2372, F2_2412_2472, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ -1),
+ BM(G2_2312_2372, G2_2412_2472, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ -1),
+ BMZERO},
+
+ {ETSIA, NO_CTL, NO_DFS, PSCAN_ETSIA,
+ DISALLOW_ADHOC_11A | DISALLOW_ADHOC_11A_TURB,
+ BMZERO,
+ BMZERO,
+ BMZERO,
+ BM(F1_2457_2472, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1),
+ BM(G1_2457_2472, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1),
+ BM(T2_2437_2437, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1)},
+
+ {ETSIB, ETSI, NO_DFS, PSCAN_ETSIB,
+ DISALLOW_ADHOC_11A | DISALLOW_ADHOC_11A_TURB,
+ BMZERO,
+ BMZERO,
+ BMZERO,
+ BM(F1_2432_2442, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1),
+ BM(G1_2432_2442, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1),
+ BM(T2_2437_2437, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1)},
+
+ {ETSIC, ETSI, NO_DFS, PSCAN_ETSIC,
+ DISALLOW_ADHOC_11A | DISALLOW_ADHOC_11A_TURB,
+ BMZERO,
+ BMZERO,
+ BMZERO,
+ BM(F3_2412_2472, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1),
+ BM(G3_2412_2472, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1),
+ BM(T2_2437_2437, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1)},
+
+ {FCCA, FCC, NO_DFS, NO_PSCAN, NO_REQ,
+ BMZERO,
+ BMZERO,
+ BMZERO,
+ BM(F1_2412_2462, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1),
+ BM(G1_2412_2462, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1),
+ BM(T2_2437_2437, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1)},
+
+ {MKKA, MKK, NO_DFS,
+ PSCAN_MKKA | PSCAN_MKKA_G | PSCAN_MKKA1 | PSCAN_MKKA1_G |
+ PSCAN_MKKA2 | PSCAN_MKKA2_G, DISALLOW_ADHOC_11A_TURB,
+ BMZERO,
+ BMZERO,
+ BMZERO,
+ BM(F2_2412_2462, F1_2467_2472, F2_2484_2484, -1, -1, -1, -1, -1,
+ -1, -1, -1, -1),
+ BM(G2_2412_2462, G1_2467_2472, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ -1),
+ BM(T2_2437_2437, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1)},
+
+ {MKKC, MKK, NO_DFS, NO_PSCAN, NO_REQ,
+ BMZERO,
+ BMZERO,
+ BMZERO,
+ BM(F2_2412_2472, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1),
+ BM(G2_2412_2472, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1),
+ BM(T2_2437_2437, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1)},
+
+ {WORLD, ETSI, NO_DFS, NO_PSCAN, NO_REQ,
+ BMZERO,
+ BMZERO,
+ BMZERO,
+ BM(F2_2412_2472, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1),
+ BM(G2_2412_2472, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1),
+ BM(T2_2437_2437, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1)},
+
+ {WOR0_WORLD, NO_CTL, DFS_FCC3 | DFS_ETSI, PSCAN_WWR, ADHOC_PER_11D,
+ BM(W1_5260_5320, W1_5180_5240, W1_5170_5230, W1_5745_5825,
+ W1_5500_5700, -1, -1, -1, -1, -1, -1, -1),
+ BM(WT1_5210_5250, WT1_5290_5290, WT1_5760_5800, -1, -1, -1, -1,
+ -1, -1, -1, -1, -1),
+ BMZERO,
+ BM(W1_2412_2412, W1_2437_2442, W1_2462_2462, W1_2472_2472,
+ W1_2417_2432, W1_2447_2457, W1_2467_2467, W1_2484_2484, -1, -1,
+ -1, -1),
+ BM(WG1_2412_2462, WG1_2467_2472, -1, -1, -1, -1, -1, -1, -1, -1,
+ -1, -1),
+ BM(T3_2437_2437, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1)},
+
+ {WOR01_WORLD, NO_CTL, DFS_FCC3 | DFS_ETSI, PSCAN_WWR,
+ ADHOC_PER_11D,
+ BM(W1_5260_5320, W1_5180_5240, W1_5170_5230, W1_5745_5825,
+ W1_5500_5700, -1, -1, -1, -1, -1, -1, -1),
+ BM(WT1_5210_5250, WT1_5290_5290, WT1_5760_5800, -1, -1, -1, -1,
+ -1, -1, -1, -1, -1),
+ BMZERO,
+ BM(W1_2412_2412, W1_2437_2442, W1_2462_2462, W1_2417_2432,
+ W1_2447_2457, -1, -1, -1, -1, -1, -1, -1),
+ BM(WG1_2412_2462, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1),
+ BM(T3_2437_2437, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1)},
+
+ {WOR02_WORLD, NO_CTL, DFS_FCC3 | DFS_ETSI, PSCAN_WWR,
+ ADHOC_PER_11D,
+ BM(W1_5260_5320, W1_5180_5240, W1_5170_5230, W1_5745_5825,
+ W1_5500_5700, -1, -1, -1, -1, -1, -1, -1),
+ BM(WT1_5210_5250, WT1_5290_5290, WT1_5760_5800, -1, -1, -1, -1,
+ -1, -1, -1, -1, -1),
+ BMZERO,
+ BM(W1_2412_2412, W1_2437_2442, W1_2462_2462, W1_2472_2472,
+ W1_2417_2432, W1_2447_2457, W1_2467_2467, -1, -1, -1, -1, -1),
+ BM(WG1_2412_2462, WG1_2467_2472, -1, -1, -1, -1, -1, -1, -1, -1,
+ -1, -1),
+ BM(T3_2437_2437, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1)},
+
+ {EU1_WORLD, NO_CTL, DFS_FCC3 | DFS_ETSI, PSCAN_WWR, ADHOC_PER_11D,
+ BM(W1_5260_5320, W1_5180_5240, W1_5170_5230, W1_5745_5825,
+ W1_5500_5700, -1, -1, -1, -1, -1, -1, -1),
+ BM(WT1_5210_5250, WT1_5290_5290, WT1_5760_5800, -1, -1, -1, -1,
+ -1, -1, -1, -1, -1),
+ BMZERO,
+ BM(W1_2412_2412, W1_2437_2442, W1_2462_2462, W2_2472_2472,
+ W1_2417_2432, W1_2447_2457, W2_2467_2467, -1, -1, -1, -1, -1),
+ BM(WG1_2412_2462, WG2_2467_2472, -1, -1, -1, -1, -1, -1, -1, -1,
+ -1, -1),
+ BM(T3_2437_2437, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1)},
+
+ {WOR1_WORLD, NO_CTL, DFS_FCC3 | DFS_ETSI, PSCAN_WWR, ADHOC_NO_11A,
+ BM(W1_5260_5320, W1_5180_5240, W1_5170_5230, W1_5745_5825,
+ W1_5500_5700, -1, -1, -1, -1, -1, -1, -1),
+ BMZERO,
+ BMZERO,
+ BM(W1_2412_2412, W1_2437_2442, W1_2462_2462, W1_2472_2472,
+ W1_2417_2432, W1_2447_2457, W1_2467_2467, W1_2484_2484, -1, -1,
+ -1, -1),
+ BM(WG1_2412_2462, WG1_2467_2472, -1, -1, -1, -1, -1, -1, -1, -1,
+ -1, -1),
+ BM(T3_2437_2437, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1)},
+
+ {WOR2_WORLD, NO_CTL, DFS_FCC3 | DFS_ETSI, PSCAN_WWR, ADHOC_NO_11A,
+ BM(W1_5260_5320, W1_5180_5240, W1_5170_5230, W1_5745_5825,
+ W1_5500_5700, -1, -1, -1, -1, -1, -1, -1),
+ BM(WT1_5210_5250, WT1_5290_5290, WT1_5760_5800, -1, -1, -1, -1,
+ -1, -1, -1, -1, -1),
+ BMZERO,
+ BM(W1_2412_2412, W1_2437_2442, W1_2462_2462, W1_2472_2472,
+ W1_2417_2432, W1_2447_2457, W1_2467_2467, W1_2484_2484, -1, -1,
+ -1, -1),
+ BM(WG1_2412_2462, WG1_2467_2472, -1, -1, -1, -1, -1, -1, -1, -1,
+ -1, -1),
+ BM(T3_2437_2437, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1)},
+
+ {WOR3_WORLD, NO_CTL, DFS_FCC3 | DFS_ETSI, PSCAN_WWR, ADHOC_PER_11D,
+ BM(W1_5260_5320, W1_5180_5240, W1_5170_5230, W1_5745_5825, -1, -1,
+ -1, -1, -1, -1, -1, -1),
+ BM(WT1_5210_5250, WT1_5290_5290, WT1_5760_5800, -1, -1, -1, -1,
+ -1, -1, -1, -1, -1),
+ BMZERO,
+ BM(W1_2412_2412, W1_2437_2442, W1_2462_2462, W1_2472_2472,
+ W1_2417_2432, W1_2447_2457, W1_2467_2467, -1, -1, -1, -1, -1),
+ BM(WG1_2412_2462, WG2_2467_2472, -1, -1, -1, -1, -1, -1, -1, -1,
+ -1, -1),
+ BM(T3_2437_2437, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1)},
+
+ {WOR4_WORLD, NO_CTL, DFS_FCC3 | DFS_ETSI, PSCAN_WWR, ADHOC_NO_11A,
+ BM(W1_5260_5320, W1_5180_5240, W1_5745_5825, -1, -1, -1, -1, -1,
+ -1, -1, -1, -1),
+ BM(WT1_5210_5250, WT1_5290_5290, WT1_5760_5800, -1, -1, -1, -1,
+ -1, -1, -1, -1, -1),
+ BMZERO,
+ BM(W1_2412_2412, W1_2437_2442, W1_2462_2462, W1_2417_2432,
+ W1_2447_2457, -1, -1, -1, -1, -1, -1, -1),
+ BM(WG1_2412_2462, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1),
+ BM(T3_2437_2437, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1)},
+
+ {WOR5_ETSIC, NO_CTL, DFS_FCC3 | DFS_ETSI, PSCAN_WWR, ADHOC_NO_11A,
+ BM(W1_5260_5320, W1_5180_5240, W1_5745_5825, -1, -1, -1, -1, -1,
+ -1, -1, -1, -1),
+ BMZERO,
+ BMZERO,
+ BM(W1_2412_2412, W1_2437_2442, W1_2462_2462, W1_2472_2472,
+ W1_2417_2432, W1_2447_2457, W1_2467_2467, -1, -1, -1, -1, -1),
+ BM(WG1_2412_2462, WG1_2467_2472, -1, -1, -1, -1, -1, -1, -1, -1,
+ -1, -1),
+ BM(T3_2437_2437, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1)},
+
+ {WOR9_WORLD, NO_CTL, DFS_FCC3 | DFS_ETSI, PSCAN_WWR, ADHOC_NO_11A,
+ BM(W1_5260_5320, W1_5180_5240, W1_5745_5825, W1_5500_5700, -1, -1,
+ -1, -1, -1, -1, -1, -1),
+ BM(WT1_5210_5250, WT1_5290_5290, WT1_5760_5800, -1, -1, -1, -1,
+ -1, -1, -1, -1, -1),
+ BMZERO,
+ BM(W1_2412_2412, W1_2437_2442, W1_2462_2462, W1_2417_2432,
+ W1_2447_2457, -1, -1, -1, -1, -1, -1, -1),
+ BM(WG1_2412_2462, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1),
+ BM(T3_2437_2437, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1)},
+
+ {WORA_WORLD, NO_CTL, DFS_FCC3 | DFS_ETSI, PSCAN_WWR, ADHOC_NO_11A,
+ BM(W1_5260_5320, W1_5180_5240, W1_5745_5825, W1_5500_5700, -1, -1,
+ -1, -1, -1, -1, -1, -1),
+ BMZERO,
+ BMZERO,
+ BM(W1_2412_2412, W1_2437_2442, W1_2462_2462, W1_2472_2472,
+ W1_2417_2432, W1_2447_2457, W1_2467_2467, -1, -1, -1, -1, -1),
+ BM(WG1_2412_2462, WG1_2467_2472, -1, -1, -1, -1, -1, -1, -1, -1,
+ -1, -1),
+ BM(T3_2437_2437, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1)},
+
+ {WORB_WORLD, NO_CTL, DFS_FCC3 | DFS_ETSI, PSCAN_WWR, ADHOC_NO_11A,
+ BM(W1_5260_5320, W1_5180_5240, W1_5500_5700, -1, -1, -1, -1, -1,
+ -1, -1, -1, -1),
+ BMZERO,
+ BMZERO,
+ BM(W1_2412_2412, W1_2437_2442, W1_2462_2462, W1_2472_2472,
+ W1_2417_2432, W1_2447_2457, W1_2467_2467, -1, -1, -1, -1, -1),
+ BM(WG1_2412_2462, WG1_2467_2472, -1, -1, -1, -1, -1, -1, -1, -1,
+ -1, -1),
+ BM(T3_2437_2437, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1)},
+
+ {NULL1, NO_CTL, NO_DFS, NO_PSCAN, NO_REQ,
+ BMZERO,
+ BMZERO,
+ BMZERO,
+ BMZERO,
+ BMZERO,
+ BMZERO}
+};
+
+static const struct cmode modes[] = {
+ {ATH9K_MODE_11A, CHANNEL_A},
+ {ATH9K_MODE_11B, CHANNEL_B},
+ {ATH9K_MODE_11G, CHANNEL_G},
+ {ATH9K_MODE_11NG_HT20, CHANNEL_G_HT20},
+ {ATH9K_MODE_11NG_HT40PLUS, CHANNEL_G_HT40PLUS},
+ {ATH9K_MODE_11NG_HT40MINUS, CHANNEL_G_HT40MINUS},
+ {ATH9K_MODE_11NA_HT20, CHANNEL_A_HT20},
+ {ATH9K_MODE_11NA_HT40PLUS, CHANNEL_A_HT40PLUS},
+ {ATH9K_MODE_11NA_HT40MINUS, CHANNEL_A_HT40MINUS},
+};
+
+static struct japan_bandcheck j_bandcheck[] = {
+ {F1_5170_5230, AR_EEPROM_EEREGCAP_EN_KK_U1_ODD},
+ {F4_5180_5240, AR_EEPROM_EEREGCAP_EN_KK_U1_EVEN},
+ {F2_5260_5320, AR_EEPROM_EEREGCAP_EN_KK_U2},
+ {F4_5500_5700, AR_EEPROM_EEREGCAP_EN_KK_MIDBAND}
+};
+
+
+#endif
diff --git a/drivers/net/wireless/ath9k/xmit.c b/drivers/net/wireless/ath9k/xmit.c
new file mode 100644
index 00000000000..157f830ee6b
--- /dev/null
+++ b/drivers/net/wireless/ath9k/xmit.c
@@ -0,0 +1,2871 @@
+/*
+ * Copyright (c) 2008 Atheros Communications Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+/*
+ * Implementation of transmit path.
+ */
+
+#include "core.h"
+
+#define BITS_PER_BYTE 8
+#define OFDM_PLCP_BITS 22
+#define HT_RC_2_MCS(_rc) ((_rc) & 0x0f)
+#define HT_RC_2_STREAMS(_rc) ((((_rc) & 0x78) >> 3) + 1)
+#define L_STF 8
+#define L_LTF 8
+#define L_SIG 4
+#define HT_SIG 8
+#define HT_STF 4
+#define HT_LTF(_ns) (4 * (_ns))
+#define SYMBOL_TIME(_ns) ((_ns) << 2) /* ns * 4 us */
+#define SYMBOL_TIME_HALFGI(_ns) (((_ns) * 18 + 4) / 5) /* ns * 3.6 us */
+#define NUM_SYMBOLS_PER_USEC(_usec) (_usec >> 2)
+#define NUM_SYMBOLS_PER_USEC_HALFGI(_usec) (((_usec*5)-4)/18)
+
+#define OFDM_SIFS_TIME 16
+
+static u32 bits_per_symbol[][2] = {
+ /* 20MHz 40MHz */
+ { 26, 54 }, /* 0: BPSK */
+ { 52, 108 }, /* 1: QPSK 1/2 */
+ { 78, 162 }, /* 2: QPSK 3/4 */
+ { 104, 216 }, /* 3: 16-QAM 1/2 */
+ { 156, 324 }, /* 4: 16-QAM 3/4 */
+ { 208, 432 }, /* 5: 64-QAM 2/3 */
+ { 234, 486 }, /* 6: 64-QAM 3/4 */
+ { 260, 540 }, /* 7: 64-QAM 5/6 */
+ { 52, 108 }, /* 8: BPSK */
+ { 104, 216 }, /* 9: QPSK 1/2 */
+ { 156, 324 }, /* 10: QPSK 3/4 */
+ { 208, 432 }, /* 11: 16-QAM 1/2 */
+ { 312, 648 }, /* 12: 16-QAM 3/4 */
+ { 416, 864 }, /* 13: 64-QAM 2/3 */
+ { 468, 972 }, /* 14: 64-QAM 3/4 */
+ { 520, 1080 }, /* 15: 64-QAM 5/6 */
+};
+
+#define IS_HT_RATE(_rate) ((_rate) & 0x80)
+
+/*
+ * Insert a chain of ath_buf (descriptors) on a multicast txq
+ * but do NOT start tx DMA on this queue.
+ * NB: must be called with txq lock held
+ */
+
+static void ath_tx_mcastqaddbuf(struct ath_softc *sc,
+ struct ath_txq *txq,
+ struct list_head *head)
+{
+ struct ath_hal *ah = sc->sc_ah;
+ struct ath_buf *bf;
+
+ if (list_empty(head))
+ return;
+
+ /*
+ * Insert the frame on the outbound list and
+ * pass it on to the hardware.
+ */
+ bf = list_first_entry(head, struct ath_buf, list);
+
+ /*
+ * The CAB queue is started from the SWBA handler since
+ * frames only go out on DTIM and to avoid possible races.
+ */
+ ath9k_hw_set_interrupts(ah, 0);
+
+ /*
+ * If there is anything in the mcastq, we want to set
+ * the "more data" bit in the last item in the queue to
+ * indicate that there is "more data". It makes sense to add
+ * it here since you are *always* going to have
+ * more data when adding to this queue, no matter where
+ * you call from.
+ */
+
+ if (txq->axq_depth) {
+ struct ath_buf *lbf;
+ struct ieee80211_hdr *hdr;
+
+ /*
+ * Add the "more data flag" to the last frame
+ */
+
+ lbf = list_entry(txq->axq_q.prev, struct ath_buf, list);
+ hdr = (struct ieee80211_hdr *)
+ ((struct sk_buff *)(lbf->bf_mpdu))->data;
+ hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_MOREDATA);
+ }
+
+ /*
+ * Now, concat the frame onto the queue
+ */
+ list_splice_tail_init(head, &txq->axq_q);
+ txq->axq_depth++;
+ txq->axq_totalqueued++;
+ txq->axq_linkbuf = list_entry(txq->axq_q.prev, struct ath_buf, list);
+
+ DPRINTF(sc, ATH_DBG_QUEUE,
+ "%s: txq depth = %d\n", __func__, txq->axq_depth);
+ if (txq->axq_link != NULL) {
+ *txq->axq_link = bf->bf_daddr;
+ DPRINTF(sc, ATH_DBG_XMIT,
+ "%s: link[%u](%p)=%llx (%p)\n",
+ __func__,
+ txq->axq_qnum, txq->axq_link,
+ ito64(bf->bf_daddr), bf->bf_desc);
+ }
+ txq->axq_link = &(bf->bf_lastbf->bf_desc->ds_link);
+ ath9k_hw_set_interrupts(ah, sc->sc_imask);
+}
+
+/*
+ * Insert a chain of ath_buf (descriptors) on a txq and
+ * assume the descriptors are already chained together by caller.
+ * NB: must be called with txq lock held
+ */
+
+static void ath_tx_txqaddbuf(struct ath_softc *sc,
+ struct ath_txq *txq, struct list_head *head)
+{
+ struct ath_hal *ah = sc->sc_ah;
+ struct ath_buf *bf;
+ /*
+ * Insert the frame on the outbound list and
+ * pass it on to the hardware.
+ */
+
+ if (list_empty(head))
+ return;
+
+ bf = list_first_entry(head, struct ath_buf, list);
+
+ list_splice_tail_init(head, &txq->axq_q);
+ txq->axq_depth++;
+ txq->axq_totalqueued++;
+ txq->axq_linkbuf = list_entry(txq->axq_q.prev, struct ath_buf, list);
+
+ DPRINTF(sc, ATH_DBG_QUEUE,
+ "%s: txq depth = %d\n", __func__, txq->axq_depth);
+
+ if (txq->axq_link == NULL) {
+ ath9k_hw_puttxbuf(ah, txq->axq_qnum, bf->bf_daddr);
+ DPRINTF(sc, ATH_DBG_XMIT,
+ "%s: TXDP[%u] = %llx (%p)\n",
+ __func__, txq->axq_qnum,
+ ito64(bf->bf_daddr), bf->bf_desc);
+ } else {
+ *txq->axq_link = bf->bf_daddr;
+ DPRINTF(sc, ATH_DBG_XMIT, "%s: link[%u] (%p)=%llx (%p)\n",
+ __func__,
+ txq->axq_qnum, txq->axq_link,
+ ito64(bf->bf_daddr), bf->bf_desc);
+ }
+ txq->axq_link = &(bf->bf_lastbf->bf_desc->ds_link);
+ ath9k_hw_txstart(ah, txq->axq_qnum);
+}
+
+/* Get transmit rate index using rate in Kbps */
+
+static int ath_tx_findindex(const struct ath9k_rate_table *rt, int rate)
+{
+ int i;
+ int ndx = 0;
+
+ for (i = 0; i < rt->rateCount; i++) {
+ if (rt->info[i].rateKbps == rate) {
+ ndx = i;
+ break;
+ }
+ }
+
+ return ndx;
+}
+
+/* Check if it's okay to send out aggregates */
+
+static int ath_aggr_query(struct ath_softc *sc,
+ struct ath_node *an, u8 tidno)
+{
+ struct ath_atx_tid *tid;
+ tid = ATH_AN_2_TID(an, tidno);
+
+ if (tid->addba_exchangecomplete || tid->addba_exchangeinprogress)
+ return 1;
+ else
+ return 0;
+}
+
+static enum ath9k_pkt_type get_hal_packet_type(struct ieee80211_hdr *hdr)
+{
+ enum ath9k_pkt_type htype;
+ __le16 fc;
+
+ fc = hdr->frame_control;
+
+ /* Calculate Atheros packet type from IEEE80211 packet header */
+
+ if (ieee80211_is_beacon(fc))
+ htype = ATH9K_PKT_TYPE_BEACON;
+ else if (ieee80211_is_probe_resp(fc))
+ htype = ATH9K_PKT_TYPE_PROBE_RESP;
+ else if (ieee80211_is_atim(fc))
+ htype = ATH9K_PKT_TYPE_ATIM;
+ else if (ieee80211_is_pspoll(fc))
+ htype = ATH9K_PKT_TYPE_PSPOLL;
+ else
+ htype = ATH9K_PKT_TYPE_NORMAL;
+
+ return htype;
+}
+
+static void fill_min_rates(struct sk_buff *skb, struct ath_tx_control *txctl)
+{
+ struct ieee80211_hdr *hdr;
+ struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
+ struct ath_tx_info_priv *tx_info_priv;
+ __le16 fc;
+
+ hdr = (struct ieee80211_hdr *)skb->data;
+ fc = hdr->frame_control;
+ tx_info_priv = (struct ath_tx_info_priv *)tx_info->driver_data[0];
+
+ if (ieee80211_is_mgmt(fc) || ieee80211_is_ctl(fc)) {
+ txctl->use_minrate = 1;
+ txctl->min_rate = tx_info_priv->min_rate;
+ } else if (ieee80211_is_data(fc)) {
+ if (ieee80211_is_nullfunc(fc) ||
+ /* Port Access Entity (IEEE 802.1X) */
+ (skb->protocol == cpu_to_be16(0x888E))) {
+ txctl->use_minrate = 1;
+ txctl->min_rate = tx_info_priv->min_rate;
+ }
+ if (is_multicast_ether_addr(hdr->addr1))
+ txctl->mcast_rate = tx_info_priv->min_rate;
+ }
+
+}
+
+/* This function will setup additional txctl information, mostly rate stuff */
+/* FIXME: seqno, ps */
+static int ath_tx_prepare(struct ath_softc *sc,
+ struct sk_buff *skb,
+ struct ath_tx_control *txctl)
+{
+ struct ieee80211_hw *hw = sc->hw;
+ struct ieee80211_hdr *hdr;
+ struct ath_rc_series *rcs;
+ struct ath_txq *txq = NULL;
+ const struct ath9k_rate_table *rt;
+ struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
+ struct ath_tx_info_priv *tx_info_priv;
+ int hdrlen;
+ u8 rix, antenna;
+ __le16 fc;
+ u8 *qc;
+
+ memset(txctl, 0, sizeof(struct ath_tx_control));
+
+ txctl->dev = sc;
+ hdr = (struct ieee80211_hdr *)skb->data;
+ hdrlen = ieee80211_get_hdrlen_from_skb(skb);
+ fc = hdr->frame_control;
+
+ rt = sc->sc_currates;
+ BUG_ON(!rt);
+
+ /* Fill misc fields */
+
+ spin_lock_bh(&sc->node_lock);
+ txctl->an = ath_node_get(sc, hdr->addr1);
+ /* create a temp node, if the node is not there already */
+ if (!txctl->an)
+ txctl->an = ath_node_attach(sc, hdr->addr1, 0);
+ spin_unlock_bh(&sc->node_lock);
+
+ if (ieee80211_is_data_qos(fc)) {
+ qc = ieee80211_get_qos_ctl(hdr);
+ txctl->tidno = qc[0] & 0xf;
+ }
+
+ txctl->if_id = 0;
+ txctl->nextfraglen = 0;
+ txctl->frmlen = skb->len + FCS_LEN - (hdrlen & 3);
+ txctl->txpower = MAX_RATE_POWER; /* FIXME */
+
+ /* Fill Key related fields */
+
+ txctl->keytype = ATH9K_KEY_TYPE_CLEAR;
+ txctl->keyix = ATH9K_TXKEYIX_INVALID;
+
+ if (tx_info->control.hw_key) {
+ txctl->keyix = tx_info->control.hw_key->hw_key_idx;
+ txctl->frmlen += tx_info->control.icv_len;
+
+ if (sc->sc_keytype == ATH9K_CIPHER_WEP)
+ txctl->keytype = ATH9K_KEY_TYPE_WEP;
+ else if (sc->sc_keytype == ATH9K_CIPHER_TKIP)
+ txctl->keytype = ATH9K_KEY_TYPE_TKIP;
+ else if (sc->sc_keytype == ATH9K_CIPHER_AES_CCM)
+ txctl->keytype = ATH9K_KEY_TYPE_AES;
+ }
+
+ /* Fill packet type */
+
+ txctl->atype = get_hal_packet_type(hdr);
+
+ /* Fill qnum */
+
+ txctl->qnum = ath_get_hal_qnum(skb_get_queue_mapping(skb), sc);
+ txq = &sc->sc_txq[txctl->qnum];
+ spin_lock_bh(&txq->axq_lock);
+
+ /* Try to avoid running out of descriptors */
+ if (txq->axq_depth >= (ATH_TXBUF - 20)) {
+ DPRINTF(sc, ATH_DBG_FATAL,
+ "%s: TX queue: %d is full, depth: %d\n",
+ __func__,
+ txctl->qnum,
+ txq->axq_depth);
+ ieee80211_stop_queue(hw, skb_get_queue_mapping(skb));
+ txq->stopped = 1;
+ spin_unlock_bh(&txq->axq_lock);
+ return -1;
+ }
+
+ spin_unlock_bh(&txq->axq_lock);
+
+ /* Fill rate */
+
+ fill_min_rates(skb, txctl);
+
+ /* Fill flags */
+
+ txctl->flags = ATH9K_TXDESC_CLRDMASK; /* needed for crypto errors */
+
+ if (tx_info->flags & IEEE80211_TX_CTL_NO_ACK)
+ tx_info->flags |= ATH9K_TXDESC_NOACK;
+ if (tx_info->flags & IEEE80211_TX_CTL_USE_RTS_CTS)
+ tx_info->flags |= ATH9K_TXDESC_RTSENA;
+
+ /*
+ * Setup for rate calculations.
+ */
+ tx_info_priv = (struct ath_tx_info_priv *)tx_info->driver_data[0];
+ rcs = tx_info_priv->rcs;
+
+ if (ieee80211_is_data(fc) && !txctl->use_minrate) {
+
+ /* Enable HT only for DATA frames and not for EAPOL */
+ txctl->ht = (hw->conf.ht_conf.ht_supported &&
+ (tx_info->flags & IEEE80211_TX_CTL_AMPDU));
+
+ if (is_multicast_ether_addr(hdr->addr1)) {
+ rcs[0].rix = (u8)
+ ath_tx_findindex(rt, txctl->mcast_rate);
+
+ /*
+ * mcast packets are not re-tried.
+ */
+ rcs[0].tries = 1;
+ }
+ /* For HT capable stations, we save tidno for later use.
+ * We also override seqno set by upper layer with the one
+ * in tx aggregation state.
+ *
+ * First, the fragmentation stat is determined.
+ * If fragmentation is on, the sequence number is
+ * not overridden, since it has been
+ * incremented by the fragmentation routine.
+ */
+ if (likely(!(txctl->flags & ATH9K_TXDESC_FRAG_IS_ON)) &&
+ txctl->ht && sc->sc_txaggr) {
+ struct ath_atx_tid *tid;
+
+ tid = ATH_AN_2_TID(txctl->an, txctl->tidno);
+
+ hdr->seq_ctrl = cpu_to_le16(tid->seq_next <<
+ IEEE80211_SEQ_SEQ_SHIFT);
+ txctl->seqno = tid->seq_next;
+ INCR(tid->seq_next, IEEE80211_SEQ_MAX);
+ }
+ } else {
+ /* for management and control frames,
+ * or for NULL and EAPOL frames */
+ if (txctl->min_rate)
+ rcs[0].rix = ath_rate_findrateix(sc, txctl->min_rate);
+ else
+ rcs[0].rix = 0;
+ rcs[0].tries = ATH_MGT_TXMAXTRY;
+ }
+ rix = rcs[0].rix;
+
+ /*
+ * Calculate duration. This logically belongs in the 802.11
+ * layer but it lacks sufficient information to calculate it.
+ */
+ if ((txctl->flags & ATH9K_TXDESC_NOACK) == 0 && !ieee80211_is_ctl(fc)) {
+ u16 dur;
+ /*
+ * XXX not right with fragmentation.
+ */
+ if (sc->sc_flags & ATH_PREAMBLE_SHORT)
+ dur = rt->info[rix].spAckDuration;
+ else
+ dur = rt->info[rix].lpAckDuration;
+
+ if (le16_to_cpu(hdr->frame_control) &
+ IEEE80211_FCTL_MOREFRAGS) {
+ dur += dur; /* Add additional 'SIFS + ACK' */
+
+ /*
+ ** Compute size of next fragment in order to compute
+ ** durations needed to update NAV.
+ ** The last fragment uses the ACK duration only.
+ ** Add time for next fragment.
+ */
+ dur += ath9k_hw_computetxtime(sc->sc_ah, rt,
+ txctl->nextfraglen,
+ rix, sc->sc_flags & ATH_PREAMBLE_SHORT);
+ }
+
+ if (ieee80211_has_morefrags(fc) ||
+ (le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_FRAG)) {
+ /*
+ ** Force hardware to use computed duration for next
+ ** fragment by disabling multi-rate retry, which
+ ** updates duration based on the multi-rate
+ ** duration table.
+ */
+ rcs[1].tries = rcs[2].tries = rcs[3].tries = 0;
+ rcs[1].rix = rcs[2].rix = rcs[3].rix = 0;
+ /* reset tries but keep rate index */
+ rcs[0].tries = ATH_TXMAXTRY;
+ }
+
+ hdr->duration_id = cpu_to_le16(dur);
+ }
+
+ /*
+ * Determine if a tx interrupt should be generated for
+ * this descriptor. We take a tx interrupt to reap
+ * descriptors when the h/w hits an EOL condition or
+ * when the descriptor is specifically marked to generate
+ * an interrupt. We periodically mark descriptors in this
+ * way to insure timely replenishing of the supply needed
+ * for sending frames. Defering interrupts reduces system
+ * load and potentially allows more concurrent work to be
+ * done but if done to aggressively can cause senders to
+ * backup.
+ *
+ * NB: use >= to deal with sc_txintrperiod changing
+ * dynamically through sysctl.
+ */
+ spin_lock_bh(&txq->axq_lock);
+ if ((++txq->axq_intrcnt >= sc->sc_txintrperiod)) {
+ txctl->flags |= ATH9K_TXDESC_INTREQ;
+ txq->axq_intrcnt = 0;
+ }
+ spin_unlock_bh(&txq->axq_lock);
+
+ if (is_multicast_ether_addr(hdr->addr1)) {
+ antenna = sc->sc_mcastantenna + 1;
+ sc->sc_mcastantenna = (sc->sc_mcastantenna + 1) & 0x1;
+ } else
+ antenna = sc->sc_txantenna;
+
+#ifdef USE_LEGACY_HAL
+ txctl->antenna = antenna;
+#endif
+ return 0;
+}
+
+/* To complete a chain of buffers associated a frame */
+
+static void ath_tx_complete_buf(struct ath_softc *sc,
+ struct ath_buf *bf,
+ struct list_head *bf_q,
+ int txok, int sendbar)
+{
+ struct sk_buff *skb = bf->bf_mpdu;
+ struct ath_xmit_status tx_status;
+ dma_addr_t *pa;
+
+ /*
+ * Set retry information.
+ * NB: Don't use the information in the descriptor, because the frame
+ * could be software retried.
+ */
+ tx_status.retries = bf->bf_retries;
+ tx_status.flags = 0;
+
+ if (sendbar)
+ tx_status.flags = ATH_TX_BAR;
+
+ if (!txok) {
+ tx_status.flags |= ATH_TX_ERROR;
+
+ if (bf->bf_isxretried)
+ tx_status.flags |= ATH_TX_XRETRY;
+ }
+ /* Unmap this frame */
+ pa = get_dma_mem_context(bf, bf_dmacontext);
+ pci_unmap_single(sc->pdev,
+ *pa,
+ skb->len,
+ PCI_DMA_TODEVICE);
+ /* complete this frame */
+ ath_tx_complete(sc, skb, &tx_status, bf->bf_node);
+
+ /*
+ * Return the list of ath_buf of this mpdu to free queue
+ */
+ spin_lock_bh(&sc->sc_txbuflock);
+ list_splice_tail_init(bf_q, &sc->sc_txbuf);
+ spin_unlock_bh(&sc->sc_txbuflock);
+}
+
+/*
+ * queue up a dest/ac pair for tx scheduling
+ * NB: must be called with txq lock held
+ */
+
+static void ath_tx_queue_tid(struct ath_txq *txq, struct ath_atx_tid *tid)
+{
+ struct ath_atx_ac *ac = tid->ac;
+
+ /*
+ * if tid is paused, hold off
+ */
+ if (tid->paused)
+ return;
+
+ /*
+ * add tid to ac atmost once
+ */
+ if (tid->sched)
+ return;
+
+ tid->sched = true;
+ list_add_tail(&tid->list, &ac->tid_q);
+
+ /*
+ * add node ac to txq atmost once
+ */
+ if (ac->sched)
+ return;
+
+ ac->sched = true;
+ list_add_tail(&ac->list, &txq->axq_acq);
+}
+
+/* pause a tid */
+
+static void ath_tx_pause_tid(struct ath_softc *sc, struct ath_atx_tid *tid)
+{
+ struct ath_txq *txq = &sc->sc_txq[tid->ac->qnum];
+
+ spin_lock_bh(&txq->axq_lock);
+
+ tid->paused++;
+
+ spin_unlock_bh(&txq->axq_lock);
+}
+
+/* resume a tid and schedule aggregate */
+
+void ath_tx_resume_tid(struct ath_softc *sc, struct ath_atx_tid *tid)
+{
+ struct ath_txq *txq = &sc->sc_txq[tid->ac->qnum];
+
+ ASSERT(tid->paused > 0);
+ spin_lock_bh(&txq->axq_lock);
+
+ tid->paused--;
+
+ if (tid->paused > 0)
+ goto unlock;
+
+ if (list_empty(&tid->buf_q))
+ goto unlock;
+
+ /*
+ * Add this TID to scheduler and try to send out aggregates
+ */
+ ath_tx_queue_tid(txq, tid);
+ ath_txq_schedule(sc, txq);
+unlock:
+ spin_unlock_bh(&txq->axq_lock);
+}
+
+/* Compute the number of bad frames */
+
+static int ath_tx_num_badfrms(struct ath_softc *sc,
+ struct ath_buf *bf, int txok)
+{
+ struct ath_node *an = bf->bf_node;
+ int isnodegone = (an->an_flags & ATH_NODE_CLEAN);
+ struct ath_buf *bf_last = bf->bf_lastbf;
+ struct ath_desc *ds = bf_last->bf_desc;
+ u16 seq_st = 0;
+ u32 ba[WME_BA_BMP_SIZE >> 5];
+ int ba_index;
+ int nbad = 0;
+ int isaggr = 0;
+
+ if (isnodegone || ds->ds_txstat.ts_flags == ATH9K_TX_SW_ABORTED)
+ return 0;
+
+ isaggr = bf->bf_isaggr;
+ if (isaggr) {
+ seq_st = ATH_DS_BA_SEQ(ds);
+ memcpy(ba, ATH_DS_BA_BITMAP(ds), WME_BA_BMP_SIZE >> 3);
+ }
+
+ while (bf) {
+ ba_index = ATH_BA_INDEX(seq_st, bf->bf_seqno);
+ if (!txok || (isaggr && !ATH_BA_ISSET(ba, ba_index)))
+ nbad++;
+
+ bf = bf->bf_next;
+ }
+
+ return nbad;
+}
+
+static void ath_tx_set_retry(struct ath_softc *sc, struct ath_buf *bf)
+{
+ struct sk_buff *skb;
+ struct ieee80211_hdr *hdr;
+
+ bf->bf_isretried = 1;
+ bf->bf_retries++;
+
+ skb = bf->bf_mpdu;
+ hdr = (struct ieee80211_hdr *)skb->data;
+ hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_RETRY);
+}
+
+/* Update block ack window */
+
+static void ath_tx_update_baw(struct ath_softc *sc,
+ struct ath_atx_tid *tid, int seqno)
+{
+ int index, cindex;
+
+ index = ATH_BA_INDEX(tid->seq_start, seqno);
+ cindex = (tid->baw_head + index) & (ATH_TID_MAX_BUFS - 1);
+
+ tid->tx_buf[cindex] = NULL;
+
+ while (tid->baw_head != tid->baw_tail && !tid->tx_buf[tid->baw_head]) {
+ INCR(tid->seq_start, IEEE80211_SEQ_MAX);
+ INCR(tid->baw_head, ATH_TID_MAX_BUFS);
+ }
+}
+
+/*
+ * ath_pkt_dur - compute packet duration (NB: not NAV)
+ *
+ * rix - rate index
+ * pktlen - total bytes (delims + data + fcs + pads + pad delims)
+ * width - 0 for 20 MHz, 1 for 40 MHz
+ * half_gi - to use 4us v/s 3.6 us for symbol time
+ */
+
+static u32 ath_pkt_duration(struct ath_softc *sc,
+ u8 rix,
+ struct ath_buf *bf,
+ int width,
+ int half_gi,
+ bool shortPreamble)
+{
+ const struct ath9k_rate_table *rt = sc->sc_currates;
+ u32 nbits, nsymbits, duration, nsymbols;
+ u8 rc;
+ int streams, pktlen;
+
+ pktlen = bf->bf_isaggr ? bf->bf_al : bf->bf_frmlen;
+ rc = rt->info[rix].rateCode;
+
+ /*
+ * for legacy rates, use old function to compute packet duration
+ */
+ if (!IS_HT_RATE(rc))
+ return ath9k_hw_computetxtime(sc->sc_ah,
+ rt,
+ pktlen,
+ rix,
+ shortPreamble);
+ /*
+ * find number of symbols: PLCP + data
+ */
+ nbits = (pktlen << 3) + OFDM_PLCP_BITS;
+ nsymbits = bits_per_symbol[HT_RC_2_MCS(rc)][width];
+ nsymbols = (nbits + nsymbits - 1) / nsymbits;
+
+ if (!half_gi)
+ duration = SYMBOL_TIME(nsymbols);
+ else
+ duration = SYMBOL_TIME_HALFGI(nsymbols);
+
+ /*
+ * addup duration for legacy/ht training and signal fields
+ */
+ streams = HT_RC_2_STREAMS(rc);
+ duration += L_STF + L_LTF + L_SIG + HT_SIG + HT_STF + HT_LTF(streams);
+ return duration;
+}
+
+/* Rate module function to set rate related fields in tx descriptor */
+
+static void ath_buf_set_rate(struct ath_softc *sc, struct ath_buf *bf)
+{
+ struct ath_hal *ah = sc->sc_ah;
+ const struct ath9k_rate_table *rt;
+ struct ath_desc *ds = bf->bf_desc;
+ struct ath_desc *lastds = bf->bf_lastbf->bf_desc;
+ struct ath9k_11n_rate_series series[4];
+ int i, flags, rtsctsena = 0, dynamic_mimops = 0;
+ u32 ctsduration = 0;
+ u8 rix = 0, cix, ctsrate = 0;
+ u32 aggr_limit_with_rts = sc->sc_rtsaggrlimit;
+ struct ath_node *an = (struct ath_node *) bf->bf_node;
+
+ /*
+ * get the cix for the lowest valid rix.
+ */
+ rt = sc->sc_currates;
+ for (i = 4; i--;) {
+ if (bf->bf_rcs[i].tries) {
+ rix = bf->bf_rcs[i].rix;
+ break;
+ }
+ }
+ flags = (bf->bf_flags & (ATH9K_TXDESC_RTSENA | ATH9K_TXDESC_CTSENA));
+ cix = rt->info[rix].controlRate;
+
+ /*
+ * If 802.11g protection is enabled, determine whether
+ * to use RTS/CTS or just CTS. Note that this is only
+ * done for OFDM/HT unicast frames.
+ */
+ if (sc->sc_protmode != PROT_M_NONE &&
+ (rt->info[rix].phy == PHY_OFDM ||
+ rt->info[rix].phy == PHY_HT) &&
+ (bf->bf_flags & ATH9K_TXDESC_NOACK) == 0) {
+ if (sc->sc_protmode == PROT_M_RTSCTS)
+ flags = ATH9K_TXDESC_RTSENA;
+ else if (sc->sc_protmode == PROT_M_CTSONLY)
+ flags = ATH9K_TXDESC_CTSENA;
+
+ cix = rt->info[sc->sc_protrix].controlRate;
+ rtsctsena = 1;
+ }
+
+ /* For 11n, the default behavior is to enable RTS for
+ * hw retried frames. We enable the global flag here and
+ * let rate series flags determine which rates will actually
+ * use RTS.
+ */
+ if ((ah->ah_caps.hw_caps & ATH9K_HW_CAP_HT) && bf->bf_isdata) {
+ BUG_ON(!an);
+ /*
+ * 802.11g protection not needed, use our default behavior
+ */
+ if (!rtsctsena)
+ flags = ATH9K_TXDESC_RTSENA;
+ /*
+ * For dynamic MIMO PS, RTS needs to precede the first aggregate
+ * and the second aggregate should have any protection at all.
+ */
+ if (an->an_smmode == ATH_SM_PWRSAV_DYNAMIC) {
+ if (!bf->bf_aggrburst) {
+ flags = ATH9K_TXDESC_RTSENA;
+ dynamic_mimops = 1;
+ } else {
+ flags = 0;
+ }
+ }
+ }
+
+ /*
+ * Set protection if aggregate protection on
+ */
+ if (sc->sc_config.ath_aggr_prot &&
+ (!bf->bf_isaggr || (bf->bf_isaggr && bf->bf_al < 8192))) {
+ flags = ATH9K_TXDESC_RTSENA;
+ cix = rt->info[sc->sc_protrix].controlRate;
+ rtsctsena = 1;
+ }
+
+ /*
+ * For AR5416 - RTS cannot be followed by a frame larger than 8K.
+ */
+ if (bf->bf_isaggr && (bf->bf_al > aggr_limit_with_rts)) {
+ /*
+ * Ensure that in the case of SM Dynamic power save
+ * while we are bursting the second aggregate the
+ * RTS is cleared.
+ */
+ flags &= ~(ATH9K_TXDESC_RTSENA);
+ }
+
+ /*
+ * CTS transmit rate is derived from the transmit rate
+ * by looking in the h/w rate table. We must also factor
+ * in whether or not a short preamble is to be used.
+ */
+ /* NB: cix is set above where RTS/CTS is enabled */
+ BUG_ON(cix == 0xff);
+ ctsrate = rt->info[cix].rateCode |
+ (bf->bf_shpreamble ? rt->info[cix].shortPreamble : 0);
+
+ /*
+ * Setup HAL rate series
+ */
+ memzero(series, sizeof(struct ath9k_11n_rate_series) * 4);
+
+ for (i = 0; i < 4; i++) {
+ if (!bf->bf_rcs[i].tries)
+ continue;
+
+ rix = bf->bf_rcs[i].rix;
+
+ series[i].Rate = rt->info[rix].rateCode |
+ (bf->bf_shpreamble ? rt->info[rix].shortPreamble : 0);
+
+ series[i].Tries = bf->bf_rcs[i].tries;
+
+ series[i].RateFlags = (
+ (bf->bf_rcs[i].flags & ATH_RC_RTSCTS_FLAG) ?
+ ATH9K_RATESERIES_RTS_CTS : 0) |
+ ((bf->bf_rcs[i].flags & ATH_RC_CW40_FLAG) ?
+ ATH9K_RATESERIES_2040 : 0) |
+ ((bf->bf_rcs[i].flags & ATH_RC_SGI_FLAG) ?
+ ATH9K_RATESERIES_HALFGI : 0);
+
+ series[i].PktDuration = ath_pkt_duration(
+ sc, rix, bf,
+ (bf->bf_rcs[i].flags & ATH_RC_CW40_FLAG) != 0,
+ (bf->bf_rcs[i].flags & ATH_RC_SGI_FLAG),
+ bf->bf_shpreamble);
+
+ if ((an->an_smmode == ATH_SM_PWRSAV_STATIC) &&
+ (bf->bf_rcs[i].flags & ATH_RC_DS_FLAG) == 0) {
+ /*
+ * When sending to an HT node that has enabled static
+ * SM/MIMO power save, send at single stream rates but
+ * use maximum allowed transmit chains per user,
+ * hardware, regulatory, or country limits for
+ * better range.
+ */
+ series[i].ChSel = sc->sc_tx_chainmask;
+ } else {
+ if (bf->bf_ht)
+ series[i].ChSel =
+ ath_chainmask_sel_logic(sc, an);
+ else
+ series[i].ChSel = sc->sc_tx_chainmask;
+ }
+
+ if (rtsctsena)
+ series[i].RateFlags |= ATH9K_RATESERIES_RTS_CTS;
+
+ /*
+ * Set RTS for all rates if node is in dynamic powersave
+ * mode and we are using dual stream rates.
+ */
+ if (dynamic_mimops && (bf->bf_rcs[i].flags & ATH_RC_DS_FLAG))
+ series[i].RateFlags |= ATH9K_RATESERIES_RTS_CTS;
+ }
+
+ /*
+ * For non-HT devices, calculate RTS/CTS duration in software
+ * and disable multi-rate retry.
+ */
+ if (flags && !(ah->ah_caps.hw_caps & ATH9K_HW_CAP_HT)) {
+ /*
+ * Compute the transmit duration based on the frame
+ * size and the size of an ACK frame. We call into the
+ * HAL to do the computation since it depends on the
+ * characteristics of the actual PHY being used.
+ *
+ * NB: CTS is assumed the same size as an ACK so we can
+ * use the precalculated ACK durations.
+ */
+ if (flags & ATH9K_TXDESC_RTSENA) { /* SIFS + CTS */
+ ctsduration += bf->bf_shpreamble ?
+ rt->info[cix].spAckDuration :
+ rt->info[cix].lpAckDuration;
+ }
+
+ ctsduration += series[0].PktDuration;
+
+ if ((bf->bf_flags & ATH9K_TXDESC_NOACK) == 0) { /* SIFS + ACK */
+ ctsduration += bf->bf_shpreamble ?
+ rt->info[rix].spAckDuration :
+ rt->info[rix].lpAckDuration;
+ }
+
+ /*
+ * Disable multi-rate retry when using RTS/CTS by clearing
+ * series 1, 2 and 3.
+ */
+ memzero(&series[1], sizeof(struct ath9k_11n_rate_series) * 3);
+ }
+
+ /*
+ * set dur_update_en for l-sig computation except for PS-Poll frames
+ */
+ ath9k_hw_set11n_ratescenario(ah, ds, lastds,
+ !bf->bf_ispspoll,
+ ctsrate,
+ ctsduration,
+ series, 4, flags);
+ if (sc->sc_config.ath_aggr_prot && flags)
+ ath9k_hw_set11n_burstduration(ah, ds, 8192);
+}
+
+/*
+ * Function to send a normal HT (non-AMPDU) frame
+ * NB: must be called with txq lock held
+ */
+
+static int ath_tx_send_normal(struct ath_softc *sc,
+ struct ath_txq *txq,
+ struct ath_atx_tid *tid,
+ struct list_head *bf_head)
+{
+ struct ath_buf *bf;
+ struct sk_buff *skb;
+ struct ieee80211_tx_info *tx_info;
+ struct ath_tx_info_priv *tx_info_priv;
+
+ BUG_ON(list_empty(bf_head));
+
+ bf = list_first_entry(bf_head, struct ath_buf, list);
+ bf->bf_isampdu = 0; /* regular HT frame */
+
+ skb = (struct sk_buff *)bf->bf_mpdu;
+ tx_info = IEEE80211_SKB_CB(skb);
+ tx_info_priv = (struct ath_tx_info_priv *)tx_info->driver_data[0];
+ memcpy(bf->bf_rcs, tx_info_priv->rcs, 4 * sizeof(tx_info_priv->rcs[0]));
+
+ /* update starting sequence number for subsequent ADDBA request */
+ INCR(tid->seq_start, IEEE80211_SEQ_MAX);
+
+ /* Queue to h/w without aggregation */
+ bf->bf_nframes = 1;
+ bf->bf_lastbf = bf->bf_lastfrm; /* one single frame */
+ ath_buf_set_rate(sc, bf);
+ ath_tx_txqaddbuf(sc, txq, bf_head);
+
+ return 0;
+}
+
+/* flush tid's software queue and send frames as non-ampdu's */
+
+static void ath_tx_flush_tid(struct ath_softc *sc, struct ath_atx_tid *tid)
+{
+ struct ath_txq *txq = &sc->sc_txq[tid->ac->qnum];
+ struct ath_buf *bf;
+ struct list_head bf_head;
+ INIT_LIST_HEAD(&bf_head);
+
+ ASSERT(tid->paused > 0);
+ spin_lock_bh(&txq->axq_lock);
+
+ tid->paused--;
+
+ if (tid->paused > 0) {
+ spin_unlock_bh(&txq->axq_lock);
+ return;
+ }
+
+ while (!list_empty(&tid->buf_q)) {
+ bf = list_first_entry(&tid->buf_q, struct ath_buf, list);
+ ASSERT(!bf->bf_isretried);
+ list_cut_position(&bf_head, &tid->buf_q, &bf->bf_lastfrm->list);
+ ath_tx_send_normal(sc, txq, tid, &bf_head);
+ }
+
+ spin_unlock_bh(&txq->axq_lock);
+}
+
+/* Completion routine of an aggregate */
+
+static void ath_tx_complete_aggr_rifs(struct ath_softc *sc,
+ struct ath_txq *txq,
+ struct ath_buf *bf,
+ struct list_head *bf_q,
+ int txok)
+{
+ struct ath_node *an = bf->bf_node;
+ struct ath_atx_tid *tid = ATH_AN_2_TID(an, bf->bf_tidno);
+ struct ath_buf *bf_last = bf->bf_lastbf;
+ struct ath_desc *ds = bf_last->bf_desc;
+ struct ath_buf *bf_next, *bf_lastq = NULL;
+ struct list_head bf_head, bf_pending;
+ u16 seq_st = 0;
+ u32 ba[WME_BA_BMP_SIZE >> 5];
+ int isaggr, txfail, txpending, sendbar = 0, needreset = 0;
+ int isnodegone = (an->an_flags & ATH_NODE_CLEAN);
+
+ isaggr = bf->bf_isaggr;
+ if (isaggr) {
+ if (txok) {
+ if (ATH_DS_TX_BA(ds)) {
+ /*
+ * extract starting sequence and
+ * block-ack bitmap
+ */
+ seq_st = ATH_DS_BA_SEQ(ds);
+ memcpy(ba,
+ ATH_DS_BA_BITMAP(ds),
+ WME_BA_BMP_SIZE >> 3);
+ } else {
+ memzero(ba, WME_BA_BMP_SIZE >> 3);
+
+ /*
+ * AR5416 can become deaf/mute when BA
+ * issue happens. Chip needs to be reset.
+ * But AP code may have sychronization issues
+ * when perform internal reset in this routine.
+ * Only enable reset in STA mode for now.
+ */
+ if (sc->sc_opmode == ATH9K_M_STA)
+ needreset = 1;
+ }
+ } else {
+ memzero(ba, WME_BA_BMP_SIZE >> 3);
+ }
+ }
+
+ INIT_LIST_HEAD(&bf_pending);
+ INIT_LIST_HEAD(&bf_head);
+
+ while (bf) {
+ txfail = txpending = 0;
+ bf_next = bf->bf_next;
+
+ if (ATH_BA_ISSET(ba, ATH_BA_INDEX(seq_st, bf->bf_seqno))) {
+ /* transmit completion, subframe is
+ * acked by block ack */
+ } else if (!isaggr && txok) {
+ /* transmit completion */
+ } else {
+
+ if (!tid->cleanup_inprogress && !isnodegone &&
+ ds->ds_txstat.ts_flags != ATH9K_TX_SW_ABORTED) {
+ if (bf->bf_retries < ATH_MAX_SW_RETRIES) {
+ ath_tx_set_retry(sc, bf);
+ txpending = 1;
+ } else {
+ bf->bf_isxretried = 1;
+ txfail = 1;
+ sendbar = 1;
+ }
+ } else {
+ /*
+ * cleanup in progress, just fail
+ * the un-acked sub-frames
+ */
+ txfail = 1;
+ }
+ }
+ /*
+ * Remove ath_buf's of this sub-frame from aggregate queue.
+ */
+ if (bf_next == NULL) { /* last subframe in the aggregate */
+ ASSERT(bf->bf_lastfrm == bf_last);
+
+ /*
+ * The last descriptor of the last sub frame could be
+ * a holding descriptor for h/w. If that's the case,
+ * bf->bf_lastfrm won't be in the bf_q.
+ * Make sure we handle bf_q properly here.
+ */
+
+ if (!list_empty(bf_q)) {
+ bf_lastq = list_entry(bf_q->prev,
+ struct ath_buf, list);
+ list_cut_position(&bf_head,
+ bf_q, &bf_lastq->list);
+ } else {
+ /*
+ * XXX: if the last subframe only has one
+ * descriptor which is also being used as
+ * a holding descriptor. Then the ath_buf
+ * is not in the bf_q at all.
+ */
+ INIT_LIST_HEAD(&bf_head);
+ }
+ } else {
+ ASSERT(!list_empty(bf_q));
+ list_cut_position(&bf_head,
+ bf_q, &bf->bf_lastfrm->list);
+ }
+
+ if (!txpending) {
+ /*
+ * complete the acked-ones/xretried ones; update
+ * block-ack window
+ */
+ spin_lock_bh(&txq->axq_lock);
+ ath_tx_update_baw(sc, tid, bf->bf_seqno);
+ spin_unlock_bh(&txq->axq_lock);
+
+ /* complete this sub-frame */
+ ath_tx_complete_buf(sc, bf, &bf_head, !txfail, sendbar);
+ } else {
+ /*
+ * retry the un-acked ones
+ */
+ /*
+ * XXX: if the last descriptor is holding descriptor,
+ * in order to requeue the frame to software queue, we
+ * need to allocate a new descriptor and
+ * copy the content of holding descriptor to it.
+ */
+ if (bf->bf_next == NULL &&
+ bf_last->bf_status & ATH_BUFSTATUS_STALE) {
+ struct ath_buf *tbf;
+
+ /* allocate new descriptor */
+ spin_lock_bh(&sc->sc_txbuflock);
+ ASSERT(!list_empty((&sc->sc_txbuf)));
+ tbf = list_first_entry(&sc->sc_txbuf,
+ struct ath_buf, list);
+ list_del(&tbf->list);
+ spin_unlock_bh(&sc->sc_txbuflock);
+
+ ATH_TXBUF_RESET(tbf);
+
+ /* copy descriptor content */
+ tbf->bf_mpdu = bf_last->bf_mpdu;
+ tbf->bf_node = bf_last->bf_node;
+ tbf->bf_buf_addr = bf_last->bf_buf_addr;
+ *(tbf->bf_desc) = *(bf_last->bf_desc);
+
+ /* link it to the frame */
+ if (bf_lastq) {
+ bf_lastq->bf_desc->ds_link =
+ tbf->bf_daddr;
+ bf->bf_lastfrm = tbf;
+ ath9k_hw_cleartxdesc(sc->sc_ah,
+ bf->bf_lastfrm->bf_desc);
+ } else {
+ tbf->bf_state = bf_last->bf_state;
+ tbf->bf_lastfrm = tbf;
+ ath9k_hw_cleartxdesc(sc->sc_ah,
+ tbf->bf_lastfrm->bf_desc);
+
+ /* copy the DMA context */
+ copy_dma_mem_context(
+ get_dma_mem_context(tbf,
+ bf_dmacontext),
+ get_dma_mem_context(bf_last,
+ bf_dmacontext));
+ }
+ list_add_tail(&tbf->list, &bf_head);
+ } else {
+ /*
+ * Clear descriptor status words for
+ * software retry
+ */
+ ath9k_hw_cleartxdesc(sc->sc_ah,
+ bf->bf_lastfrm->bf_desc);
+ }
+
+ /*
+ * Put this buffer to the temporary pending
+ * queue to retain ordering
+ */
+ list_splice_tail_init(&bf_head, &bf_pending);
+ }
+
+ bf = bf_next;
+ }
+
+ /*
+ * node is already gone. no more assocication
+ * with the node. the node might have been freed
+ * any node acces can result in panic.note tid
+ * is part of the node.
+ */
+ if (isnodegone)
+ return;
+
+ if (tid->cleanup_inprogress) {
+ /* check to see if we're done with cleaning the h/w queue */
+ spin_lock_bh(&txq->axq_lock);
+
+ if (tid->baw_head == tid->baw_tail) {
+ tid->addba_exchangecomplete = 0;
+ tid->addba_exchangeattempts = 0;
+ spin_unlock_bh(&txq->axq_lock);
+
+ tid->cleanup_inprogress = false;
+
+ /* send buffered frames as singles */
+ ath_tx_flush_tid(sc, tid);
+ } else
+ spin_unlock_bh(&txq->axq_lock);
+
+ return;
+ }
+
+ /*
+ * prepend un-acked frames to the beginning of the pending frame queue
+ */
+ if (!list_empty(&bf_pending)) {
+ spin_lock_bh(&txq->axq_lock);
+ /* Note: we _prepend_, we _do_not_ at to
+ * the end of the queue ! */
+ list_splice(&bf_pending, &tid->buf_q);
+ ath_tx_queue_tid(txq, tid);
+ spin_unlock_bh(&txq->axq_lock);
+ }
+
+ if (needreset)
+ ath_internal_reset(sc);
+
+ return;
+}
+
+/* Process completed xmit descriptors from the specified queue */
+
+static int ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq)
+{
+ struct ath_hal *ah = sc->sc_ah;
+ struct ath_buf *bf, *lastbf, *bf_held = NULL;
+ struct list_head bf_head;
+ struct ath_desc *ds, *tmp_ds;
+ struct sk_buff *skb;
+ struct ieee80211_tx_info *tx_info;
+ struct ath_tx_info_priv *tx_info_priv;
+ int nacked, txok, nbad = 0, isrifs = 0;
+ int status;
+
+ DPRINTF(sc, ATH_DBG_QUEUE,
+ "%s: tx queue %d (%x), link %p\n", __func__,
+ txq->axq_qnum, ath9k_hw_gettxbuf(sc->sc_ah, txq->axq_qnum),
+ txq->axq_link);
+
+ nacked = 0;
+ for (;;) {
+ spin_lock_bh(&txq->axq_lock);
+ txq->axq_intrcnt = 0; /* reset periodic desc intr count */
+ if (list_empty(&txq->axq_q)) {
+ txq->axq_link = NULL;
+ txq->axq_linkbuf = NULL;
+ spin_unlock_bh(&txq->axq_lock);
+ break;
+ }
+ bf = list_first_entry(&txq->axq_q, struct ath_buf, list);
+
+ /*
+ * There is a race condition that a BH gets scheduled
+ * after sw writes TxE and before hw re-load the last
+ * descriptor to get the newly chained one.
+ * Software must keep the last DONE descriptor as a
+ * holding descriptor - software does so by marking
+ * it with the STALE flag.
+ */
+ bf_held = NULL;
+ if (bf->bf_status & ATH_BUFSTATUS_STALE) {
+ bf_held = bf;
+ if (list_is_last(&bf_held->list, &txq->axq_q)) {
+ /* FIXME:
+ * The holding descriptor is the last
+ * descriptor in queue. It's safe to remove
+ * the last holding descriptor in BH context.
+ */
+ spin_unlock_bh(&txq->axq_lock);
+ break;
+ } else {
+ /* Lets work with the next buffer now */
+ bf = list_entry(bf_held->list.next,
+ struct ath_buf, list);
+ }
+ }
+
+ lastbf = bf->bf_lastbf;
+ ds = lastbf->bf_desc; /* NB: last decriptor */
+
+ status = ath9k_hw_txprocdesc(ah, ds);
+ if (status == -EINPROGRESS) {
+ spin_unlock_bh(&txq->axq_lock);
+ break;
+ }
+ if (bf->bf_desc == txq->axq_lastdsWithCTS)
+ txq->axq_lastdsWithCTS = NULL;
+ if (ds == txq->axq_gatingds)
+ txq->axq_gatingds = NULL;
+
+ /*
+ * Remove ath_buf's of the same transmit unit from txq,
+ * however leave the last descriptor back as the holding
+ * descriptor for hw.
+ */
+ lastbf->bf_status |= ATH_BUFSTATUS_STALE;
+ INIT_LIST_HEAD(&bf_head);
+
+ if (!list_is_singular(&lastbf->list))
+ list_cut_position(&bf_head,
+ &txq->axq_q, lastbf->list.prev);
+
+ txq->axq_depth--;
+
+ if (bf->bf_isaggr)
+ txq->axq_aggr_depth--;
+
+ txok = (ds->ds_txstat.ts_status == 0);
+
+ spin_unlock_bh(&txq->axq_lock);
+
+ if (bf_held) {
+ list_del(&bf_held->list);
+ spin_lock_bh(&sc->sc_txbuflock);
+ list_add_tail(&bf_held->list, &sc->sc_txbuf);
+ spin_unlock_bh(&sc->sc_txbuflock);
+ }
+
+ if (!bf->bf_isampdu) {
+ /*
+ * This frame is sent out as a single frame.
+ * Use hardware retry status for this frame.
+ */
+ bf->bf_retries = ds->ds_txstat.ts_longretry;
+ if (ds->ds_txstat.ts_status & ATH9K_TXERR_XRETRY)
+ bf->bf_isxretried = 1;
+ nbad = 0;
+ } else {
+ nbad = ath_tx_num_badfrms(sc, bf, txok);
+ }
+ skb = bf->bf_mpdu;
+ tx_info = IEEE80211_SKB_CB(skb);
+ tx_info_priv = (struct ath_tx_info_priv *)
+ tx_info->driver_data[0];
+ if (ds->ds_txstat.ts_status & ATH9K_TXERR_FILT)
+ tx_info->flags |= IEEE80211_TX_STAT_TX_FILTERED;
+ if ((ds->ds_txstat.ts_status & ATH9K_TXERR_FILT) == 0 &&
+ (bf->bf_flags & ATH9K_TXDESC_NOACK) == 0) {
+ if (ds->ds_txstat.ts_status == 0)
+ nacked++;
+
+ if (bf->bf_isdata) {
+ if (isrifs)
+ tmp_ds = bf->bf_rifslast->bf_desc;
+ else
+ tmp_ds = ds;
+ memcpy(&tx_info_priv->tx,
+ &tmp_ds->ds_txstat,
+ sizeof(tx_info_priv->tx));
+ tx_info_priv->n_frames = bf->bf_nframes;
+ tx_info_priv->n_bad_frames = nbad;
+ }
+ }
+
+ /*
+ * Complete this transmit unit
+ */
+ if (bf->bf_isampdu)
+ ath_tx_complete_aggr_rifs(sc, txq, bf, &bf_head, txok);
+ else
+ ath_tx_complete_buf(sc, bf, &bf_head, txok, 0);
+
+ /* Wake up mac80211 queue */
+
+ spin_lock_bh(&txq->axq_lock);
+ if (txq->stopped && ath_txq_depth(sc, txq->axq_qnum) <=
+ (ATH_TXBUF - 20)) {
+ int qnum;
+ qnum = ath_get_mac80211_qnum(txq->axq_qnum, sc);
+ if (qnum != -1) {
+ ieee80211_wake_queue(sc->hw, qnum);
+ txq->stopped = 0;
+ }
+
+ }
+
+ /*
+ * schedule any pending packets if aggregation is enabled
+ */
+ if (sc->sc_txaggr)
+ ath_txq_schedule(sc, txq);
+ spin_unlock_bh(&txq->axq_lock);
+ }
+ return nacked;
+}
+
+static void ath_tx_stopdma(struct ath_softc *sc, struct ath_txq *txq)
+{
+ struct ath_hal *ah = sc->sc_ah;
+
+ (void) ath9k_hw_stoptxdma(ah, txq->axq_qnum);
+ DPRINTF(sc, ATH_DBG_XMIT, "%s: tx queue [%u] %x, link %p\n",
+ __func__, txq->axq_qnum,
+ ath9k_hw_gettxbuf(ah, txq->axq_qnum), txq->axq_link);
+}
+
+/* Drain only the data queues */
+
+static void ath_drain_txdataq(struct ath_softc *sc, bool retry_tx)
+{
+ struct ath_hal *ah = sc->sc_ah;
+ int i;
+ int npend = 0;
+ enum ath9k_ht_macmode ht_macmode = ath_cwm_macmode(sc);
+
+ /* XXX return value */
+ if (!sc->sc_invalid) {
+ for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
+ if (ATH_TXQ_SETUP(sc, i)) {
+ ath_tx_stopdma(sc, &sc->sc_txq[i]);
+
+ /* The TxDMA may not really be stopped.
+ * Double check the hal tx pending count */
+ npend += ath9k_hw_numtxpending(ah,
+ sc->sc_txq[i].axq_qnum);
+ }
+ }
+ }
+
+ if (npend) {
+ int status;
+
+ /* TxDMA not stopped, reset the hal */
+ DPRINTF(sc, ATH_DBG_XMIT,
+ "%s: Unable to stop TxDMA. Reset HAL!\n", __func__);
+
+ spin_lock_bh(&sc->sc_resetlock);
+ if (!ath9k_hw_reset(ah, sc->sc_opmode,
+ &sc->sc_curchan, ht_macmode,
+ sc->sc_tx_chainmask, sc->sc_rx_chainmask,
+ sc->sc_ht_extprotspacing, true, &status)) {
+
+ DPRINTF(sc, ATH_DBG_FATAL,
+ "%s: unable to reset hardware; hal status %u\n",
+ __func__,
+ status);
+ }
+ spin_unlock_bh(&sc->sc_resetlock);
+ }
+
+ for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
+ if (ATH_TXQ_SETUP(sc, i))
+ ath_tx_draintxq(sc, &sc->sc_txq[i], retry_tx);
+ }
+}
+
+/* Add a sub-frame to block ack window */
+
+static void ath_tx_addto_baw(struct ath_softc *sc,
+ struct ath_atx_tid *tid,
+ struct ath_buf *bf)
+{
+ int index, cindex;
+
+ if (bf->bf_isretried)
+ return;
+
+ index = ATH_BA_INDEX(tid->seq_start, bf->bf_seqno);
+ cindex = (tid->baw_head + index) & (ATH_TID_MAX_BUFS - 1);
+
+ ASSERT(tid->tx_buf[cindex] == NULL);
+ tid->tx_buf[cindex] = bf;
+
+ if (index >= ((tid->baw_tail - tid->baw_head) &
+ (ATH_TID_MAX_BUFS - 1))) {
+ tid->baw_tail = cindex;
+ INCR(tid->baw_tail, ATH_TID_MAX_BUFS);
+ }
+}
+
+/*
+ * Function to send an A-MPDU
+ * NB: must be called with txq lock held
+ */
+
+static int ath_tx_send_ampdu(struct ath_softc *sc,
+ struct ath_txq *txq,
+ struct ath_atx_tid *tid,
+ struct list_head *bf_head,
+ struct ath_tx_control *txctl)
+{
+ struct ath_buf *bf;
+ struct sk_buff *skb;
+ struct ieee80211_tx_info *tx_info;
+ struct ath_tx_info_priv *tx_info_priv;
+
+ BUG_ON(list_empty(bf_head));
+
+ bf = list_first_entry(bf_head, struct ath_buf, list);
+ bf->bf_isampdu = 1;
+ bf->bf_seqno = txctl->seqno; /* save seqno and tidno in buffer */
+ bf->bf_tidno = txctl->tidno;
+
+ /*
+ * Do not queue to h/w when any of the following conditions is true:
+ * - there are pending frames in software queue
+ * - the TID is currently paused for ADDBA/BAR request
+ * - seqno is not within block-ack window
+ * - h/w queue depth exceeds low water mark
+ */
+ if (!list_empty(&tid->buf_q) || tid->paused ||
+ !BAW_WITHIN(tid->seq_start, tid->baw_size, bf->bf_seqno) ||
+ txq->axq_depth >= ATH_AGGR_MIN_QDEPTH) {
+ /*
+ * Add this frame to software queue for scheduling later
+ * for aggregation.
+ */
+ list_splice_tail_init(bf_head, &tid->buf_q);
+ ath_tx_queue_tid(txq, tid);
+ return 0;
+ }
+
+ skb = (struct sk_buff *)bf->bf_mpdu;
+ tx_info = IEEE80211_SKB_CB(skb);
+ tx_info_priv = (struct ath_tx_info_priv *)tx_info->driver_data[0];
+ memcpy(bf->bf_rcs, tx_info_priv->rcs, 4 * sizeof(tx_info_priv->rcs[0]));
+
+ /* Add sub-frame to BAW */
+ ath_tx_addto_baw(sc, tid, bf);
+
+ /* Queue to h/w without aggregation */
+ bf->bf_nframes = 1;
+ bf->bf_lastbf = bf->bf_lastfrm; /* one single frame */
+ ath_buf_set_rate(sc, bf);
+ ath_tx_txqaddbuf(sc, txq, bf_head);
+ return 0;
+}
+
+/*
+ * looks up the rate
+ * returns aggr limit based on lowest of the rates
+ */
+
+static u32 ath_lookup_rate(struct ath_softc *sc,
+ struct ath_buf *bf)
+{
+ const struct ath9k_rate_table *rt = sc->sc_currates;
+ struct sk_buff *skb;
+ struct ieee80211_tx_info *tx_info;
+ struct ath_tx_info_priv *tx_info_priv;
+ u32 max_4ms_framelen, frame_length;
+ u16 aggr_limit, legacy = 0, maxampdu;
+ int i;
+
+
+ skb = (struct sk_buff *)bf->bf_mpdu;
+ tx_info = IEEE80211_SKB_CB(skb);
+ tx_info_priv = (struct ath_tx_info_priv *)
+ tx_info->driver_data[0];
+ memcpy(bf->bf_rcs,
+ tx_info_priv->rcs, 4 * sizeof(tx_info_priv->rcs[0]));
+
+ /*
+ * Find the lowest frame length among the rate series that will have a
+ * 4ms transmit duration.
+ * TODO - TXOP limit needs to be considered.
+ */
+ max_4ms_framelen = ATH_AMPDU_LIMIT_MAX;
+
+ for (i = 0; i < 4; i++) {
+ if (bf->bf_rcs[i].tries) {
+ frame_length = bf->bf_rcs[i].max_4ms_framelen;
+
+ if (rt->info[bf->bf_rcs[i].rix].phy != PHY_HT) {
+ legacy = 1;
+ break;
+ }
+
+ max_4ms_framelen = min(max_4ms_framelen, frame_length);
+ }
+ }
+
+ /*
+ * limit aggregate size by the minimum rate if rate selected is
+ * not a probe rate, if rate selected is a probe rate then
+ * avoid aggregation of this packet.
+ */
+ if (tx_info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE || legacy)
+ return 0;
+
+ aggr_limit = min(max_4ms_framelen,
+ (u32)ATH_AMPDU_LIMIT_DEFAULT);
+
+ /*
+ * h/w can accept aggregates upto 16 bit lengths (65535).
+ * The IE, however can hold upto 65536, which shows up here
+ * as zero. Ignore 65536 since we are constrained by hw.
+ */
+ maxampdu = sc->sc_ht_info.maxampdu;
+ if (maxampdu)
+ aggr_limit = min(aggr_limit, maxampdu);
+
+ return aggr_limit;
+}
+
+/*
+ * returns the number of delimiters to be added to
+ * meet the minimum required mpdudensity.
+ * caller should make sure that the rate is HT rate .
+ */
+
+static int ath_compute_num_delims(struct ath_softc *sc,
+ struct ath_buf *bf,
+ u16 frmlen)
+{
+ const struct ath9k_rate_table *rt = sc->sc_currates;
+ u32 nsymbits, nsymbols, mpdudensity;
+ u16 minlen;
+ u8 rc, flags, rix;
+ int width, half_gi, ndelim, mindelim;
+
+ /* Select standard number of delimiters based on frame length alone */
+ ndelim = ATH_AGGR_GET_NDELIM(frmlen);
+
+ /*
+ * If encryption enabled, hardware requires some more padding between
+ * subframes.
+ * TODO - this could be improved to be dependent on the rate.
+ * The hardware can keep up at lower rates, but not higher rates
+ */
+ if (bf->bf_keytype != ATH9K_KEY_TYPE_CLEAR)
+ ndelim += ATH_AGGR_ENCRYPTDELIM;
+
+ /*
+ * Convert desired mpdu density from microeconds to bytes based
+ * on highest rate in rate series (i.e. first rate) to determine
+ * required minimum length for subframe. Take into account
+ * whether high rate is 20 or 40Mhz and half or full GI.
+ */
+ mpdudensity = sc->sc_ht_info.mpdudensity;
+
+ /*
+ * If there is no mpdu density restriction, no further calculation
+ * is needed.
+ */
+ if (mpdudensity == 0)
+ return ndelim;
+
+ rix = bf->bf_rcs[0].rix;
+ flags = bf->bf_rcs[0].flags;
+ rc = rt->info[rix].rateCode;
+ width = (flags & ATH_RC_CW40_FLAG) ? 1 : 0;
+ half_gi = (flags & ATH_RC_SGI_FLAG) ? 1 : 0;
+
+ if (half_gi)
+ nsymbols = NUM_SYMBOLS_PER_USEC_HALFGI(mpdudensity);
+ else
+ nsymbols = NUM_SYMBOLS_PER_USEC(mpdudensity);
+
+ if (nsymbols == 0)
+ nsymbols = 1;
+
+ nsymbits = bits_per_symbol[HT_RC_2_MCS(rc)][width];
+ minlen = (nsymbols * nsymbits) / BITS_PER_BYTE;
+
+ /* Is frame shorter than required minimum length? */
+ if (frmlen < minlen) {
+ /* Get the minimum number of delimiters required. */
+ mindelim = (minlen - frmlen) / ATH_AGGR_DELIM_SZ;
+ ndelim = max(mindelim, ndelim);
+ }
+
+ return ndelim;
+}
+
+/*
+ * For aggregation from software buffer queue.
+ * NB: must be called with txq lock held
+ */
+
+static enum ATH_AGGR_STATUS ath_tx_form_aggr(struct ath_softc *sc,
+ struct ath_atx_tid *tid,
+ struct list_head *bf_q,
+ struct ath_buf **bf_last,
+ struct aggr_rifs_param *param,
+ int *prev_frames)
+{
+#define PADBYTES(_len) ((4 - ((_len) % 4)) % 4)
+ struct ath_buf *bf, *tbf, *bf_first, *bf_prev = NULL;
+ struct list_head bf_head;
+ int rl = 0, nframes = 0, ndelim;
+ u16 aggr_limit = 0, al = 0, bpad = 0,
+ al_delta, h_baw = tid->baw_size / 2;
+ enum ATH_AGGR_STATUS status = ATH_AGGR_DONE;
+ int prev_al = 0, is_ds_rate = 0;
+ INIT_LIST_HEAD(&bf_head);
+
+ BUG_ON(list_empty(&tid->buf_q));
+
+ bf_first = list_first_entry(&tid->buf_q, struct ath_buf, list);
+
+ do {
+ bf = list_first_entry(&tid->buf_q, struct ath_buf, list);
+
+ /*
+ * do not step over block-ack window
+ */
+ if (!BAW_WITHIN(tid->seq_start, tid->baw_size, bf->bf_seqno)) {
+ status = ATH_AGGR_BAW_CLOSED;
+ break;
+ }
+
+ if (!rl) {
+ aggr_limit = ath_lookup_rate(sc, bf);
+ rl = 1;
+ /*
+ * Is rate dual stream
+ */
+ is_ds_rate =
+ (bf->bf_rcs[0].flags & ATH_RC_DS_FLAG) ? 1 : 0;
+ }
+
+ /*
+ * do not exceed aggregation limit
+ */
+ al_delta = ATH_AGGR_DELIM_SZ + bf->bf_frmlen;
+
+ if (nframes && (aggr_limit <
+ (al + bpad + al_delta + prev_al))) {
+ status = ATH_AGGR_LIMITED;
+ break;
+ }
+
+ /*
+ * do not exceed subframe limit
+ */
+ if ((nframes + *prev_frames) >=
+ min((int)h_baw, ATH_AMPDU_SUBFRAME_DEFAULT)) {
+ status = ATH_AGGR_LIMITED;
+ break;
+ }
+
+ /*
+ * add padding for previous frame to aggregation length
+ */
+ al += bpad + al_delta;
+
+ /*
+ * Get the delimiters needed to meet the MPDU
+ * density for this node.
+ */
+ ndelim = ath_compute_num_delims(sc, bf_first, bf->bf_frmlen);
+
+ bpad = PADBYTES(al_delta) + (ndelim << 2);
+
+ bf->bf_next = NULL;
+ bf->bf_lastfrm->bf_desc->ds_link = 0;
+
+ /*
+ * this packet is part of an aggregate
+ * - remove all descriptors belonging to this frame from
+ * software queue
+ * - add it to block ack window
+ * - set up descriptors for aggregation
+ */
+ list_cut_position(&bf_head, &tid->buf_q, &bf->bf_lastfrm->list);
+ ath_tx_addto_baw(sc, tid, bf);
+
+ list_for_each_entry(tbf, &bf_head, list) {
+ ath9k_hw_set11n_aggr_middle(sc->sc_ah,
+ tbf->bf_desc, ndelim);
+ }
+
+ /*
+ * link buffers of this frame to the aggregate
+ */
+ list_splice_tail_init(&bf_head, bf_q);
+ nframes++;
+
+ if (bf_prev) {
+ bf_prev->bf_next = bf;
+ bf_prev->bf_lastfrm->bf_desc->ds_link = bf->bf_daddr;
+ }
+ bf_prev = bf;
+
+#ifdef AGGR_NOSHORT
+ /*
+ * terminate aggregation on a small packet boundary
+ */
+ if (bf->bf_frmlen < ATH_AGGR_MINPLEN) {
+ status = ATH_AGGR_SHORTPKT;
+ break;
+ }
+#endif
+ } while (!list_empty(&tid->buf_q));
+
+ bf_first->bf_al = al;
+ bf_first->bf_nframes = nframes;
+ *bf_last = bf_prev;
+ return status;
+#undef PADBYTES
+}
+
+/*
+ * process pending frames possibly doing a-mpdu aggregation
+ * NB: must be called with txq lock held
+ */
+
+static void ath_tx_sched_aggr(struct ath_softc *sc,
+ struct ath_txq *txq, struct ath_atx_tid *tid)
+{
+ struct ath_buf *bf, *tbf, *bf_last, *bf_lastaggr = NULL;
+ enum ATH_AGGR_STATUS status;
+ struct list_head bf_q;
+ struct aggr_rifs_param param = {0, 0, 0, 0, NULL};
+ int prev_frames = 0;
+
+ do {
+ if (list_empty(&tid->buf_q))
+ return;
+
+ INIT_LIST_HEAD(&bf_q);
+
+ status = ath_tx_form_aggr(sc, tid, &bf_q, &bf_lastaggr, &param,
+ &prev_frames);
+
+ /*
+ * no frames picked up to be aggregated; block-ack
+ * window is not open
+ */
+ if (list_empty(&bf_q))
+ break;
+
+ bf = list_first_entry(&bf_q, struct ath_buf, list);
+ bf_last = list_entry(bf_q.prev, struct ath_buf, list);
+ bf->bf_lastbf = bf_last;
+
+ /*
+ * if only one frame, send as non-aggregate
+ */
+ if (bf->bf_nframes == 1) {
+ ASSERT(bf->bf_lastfrm == bf_last);
+
+ bf->bf_isaggr = 0;
+ /*
+ * clear aggr bits for every descriptor
+ * XXX TODO: is there a way to optimize it?
+ */
+ list_for_each_entry(tbf, &bf_q, list) {
+ ath9k_hw_clr11n_aggr(sc->sc_ah, tbf->bf_desc);
+ }
+
+ ath_buf_set_rate(sc, bf);
+ ath_tx_txqaddbuf(sc, txq, &bf_q);
+ continue;
+ }
+
+ /*
+ * setup first desc with rate and aggr info
+ */
+ bf->bf_isaggr = 1;
+ ath_buf_set_rate(sc, bf);
+ ath9k_hw_set11n_aggr_first(sc->sc_ah, bf->bf_desc, bf->bf_al);
+
+ /*
+ * anchor last frame of aggregate correctly
+ */
+ ASSERT(bf_lastaggr);
+ ASSERT(bf_lastaggr->bf_lastfrm == bf_last);
+ tbf = bf_lastaggr;
+ ath9k_hw_set11n_aggr_last(sc->sc_ah, tbf->bf_desc);
+
+ /* XXX: We don't enter into this loop, consider removing this */
+ while (!list_empty(&bf_q) && !list_is_last(&tbf->list, &bf_q)) {
+ tbf = list_entry(tbf->list.next, struct ath_buf, list);
+ ath9k_hw_set11n_aggr_last(sc->sc_ah, tbf->bf_desc);
+ }
+
+ txq->axq_aggr_depth++;
+
+ /*
+ * Normal aggregate, queue to hardware
+ */
+ ath_tx_txqaddbuf(sc, txq, &bf_q);
+
+ } while (txq->axq_depth < ATH_AGGR_MIN_QDEPTH &&
+ status != ATH_AGGR_BAW_CLOSED);
+}
+
+/* Called with txq lock held */
+
+static void ath_tid_drain(struct ath_softc *sc,
+ struct ath_txq *txq,
+ struct ath_atx_tid *tid,
+ bool bh_flag)
+{
+ struct ath_buf *bf;
+ struct list_head bf_head;
+ INIT_LIST_HEAD(&bf_head);
+
+ for (;;) {
+ if (list_empty(&tid->buf_q))
+ break;
+ bf = list_first_entry(&tid->buf_q, struct ath_buf, list);
+
+ list_cut_position(&bf_head, &tid->buf_q, &bf->bf_lastfrm->list);
+
+ /* update baw for software retried frame */
+ if (bf->bf_isretried)
+ ath_tx_update_baw(sc, tid, bf->bf_seqno);
+
+ /*
+ * do not indicate packets while holding txq spinlock.
+ * unlock is intentional here
+ */
+ if (likely(bh_flag))
+ spin_unlock_bh(&txq->axq_lock);
+ else
+ spin_unlock(&txq->axq_lock);
+
+ /* complete this sub-frame */
+ ath_tx_complete_buf(sc, bf, &bf_head, 0, 0);
+
+ if (likely(bh_flag))
+ spin_lock_bh(&txq->axq_lock);
+ else
+ spin_lock(&txq->axq_lock);
+ }
+
+ /*
+ * TODO: For frame(s) that are in the retry state, we will reuse the
+ * sequence number(s) without setting the retry bit. The
+ * alternative is to give up on these and BAR the receiver's window
+ * forward.
+ */
+ tid->seq_next = tid->seq_start;
+ tid->baw_tail = tid->baw_head;
+}
+
+/*
+ * Drain all pending buffers
+ * NB: must be called with txq lock held
+ */
+
+static void ath_txq_drain_pending_buffers(struct ath_softc *sc,
+ struct ath_txq *txq,
+ bool bh_flag)
+{
+ struct ath_atx_ac *ac, *ac_tmp;
+ struct ath_atx_tid *tid, *tid_tmp;
+
+ list_for_each_entry_safe(ac, ac_tmp, &txq->axq_acq, list) {
+ list_del(&ac->list);
+ ac->sched = false;
+ list_for_each_entry_safe(tid, tid_tmp, &ac->tid_q, list) {
+ list_del(&tid->list);
+ tid->sched = false;
+ ath_tid_drain(sc, txq, tid, bh_flag);
+ }
+ }
+}
+
+static int ath_tx_start_dma(struct ath_softc *sc,
+ struct sk_buff *skb,
+ struct scatterlist *sg,
+ u32 n_sg,
+ struct ath_tx_control *txctl)
+{
+ struct ath_node *an = txctl->an;
+ struct ath_buf *bf = NULL;
+ struct list_head bf_head;
+ struct ath_desc *ds;
+ struct ath_hal *ah = sc->sc_ah;
+ struct ath_txq *txq = &sc->sc_txq[txctl->qnum];
+ struct ath_tx_info_priv *tx_info_priv;
+ struct ath_rc_series *rcs;
+ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
+ struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
+ __le16 fc = hdr->frame_control;
+
+ /* For each sglist entry, allocate an ath_buf for DMA */
+ INIT_LIST_HEAD(&bf_head);
+ spin_lock_bh(&sc->sc_txbuflock);
+ if (unlikely(list_empty(&sc->sc_txbuf))) {
+ spin_unlock_bh(&sc->sc_txbuflock);
+ return -ENOMEM;
+ }
+
+ bf = list_first_entry(&sc->sc_txbuf, struct ath_buf, list);
+ list_del(&bf->list);
+ spin_unlock_bh(&sc->sc_txbuflock);
+
+ list_add_tail(&bf->list, &bf_head);
+
+ /* set up this buffer */
+ ATH_TXBUF_RESET(bf);
+ bf->bf_frmlen = txctl->frmlen;
+ bf->bf_isdata = ieee80211_is_data(fc);
+ bf->bf_isbar = ieee80211_is_back_req(fc);
+ bf->bf_ispspoll = ieee80211_is_pspoll(fc);
+ bf->bf_flags = txctl->flags;
+ bf->bf_shpreamble = sc->sc_flags & ATH_PREAMBLE_SHORT;
+ bf->bf_keytype = txctl->keytype;
+ tx_info_priv = (struct ath_tx_info_priv *)tx_info->driver_data[0];
+ rcs = tx_info_priv->rcs;
+ bf->bf_rcs[0] = rcs[0];
+ bf->bf_rcs[1] = rcs[1];
+ bf->bf_rcs[2] = rcs[2];
+ bf->bf_rcs[3] = rcs[3];
+ bf->bf_node = an;
+ bf->bf_mpdu = skb;
+ bf->bf_buf_addr = sg_dma_address(sg);
+
+ /* setup descriptor */
+ ds = bf->bf_desc;
+ ds->ds_link = 0;
+ ds->ds_data = bf->bf_buf_addr;
+
+ /*
+ * Save the DMA context in the first ath_buf
+ */
+ copy_dma_mem_context(get_dma_mem_context(bf, bf_dmacontext),
+ get_dma_mem_context(txctl, dmacontext));
+
+ /*
+ * Formulate first tx descriptor with tx controls.
+ */
+ ath9k_hw_set11n_txdesc(ah,
+ ds,
+ bf->bf_frmlen, /* frame length */
+ txctl->atype, /* Atheros packet type */
+ min(txctl->txpower, (u16)60), /* txpower */
+ txctl->keyix, /* key cache index */
+ txctl->keytype, /* key type */
+ txctl->flags); /* flags */
+ ath9k_hw_filltxdesc(ah,
+ ds,
+ sg_dma_len(sg), /* segment length */
+ true, /* first segment */
+ (n_sg == 1) ? true : false, /* last segment */
+ ds); /* first descriptor */
+
+ bf->bf_lastfrm = bf;
+ bf->bf_ht = txctl->ht;
+
+ spin_lock_bh(&txq->axq_lock);
+
+ if (txctl->ht && sc->sc_txaggr) {
+ struct ath_atx_tid *tid = ATH_AN_2_TID(an, txctl->tidno);
+ if (ath_aggr_query(sc, an, txctl->tidno)) {
+ /*
+ * Try aggregation if it's a unicast data frame
+ * and the destination is HT capable.
+ */
+ ath_tx_send_ampdu(sc, txq, tid, &bf_head, txctl);
+ } else {
+ /*
+ * Send this frame as regular when ADDBA exchange
+ * is neither complete nor pending.
+ */
+ ath_tx_send_normal(sc, txq, tid, &bf_head);
+ }
+ } else {
+ bf->bf_lastbf = bf;
+ bf->bf_nframes = 1;
+ ath_buf_set_rate(sc, bf);
+
+ if (ieee80211_is_back_req(fc)) {
+ /* This is required for resuming tid
+ * during BAR completion */
+ bf->bf_tidno = txctl->tidno;
+ }
+
+ if (is_multicast_ether_addr(hdr->addr1)) {
+ struct ath_vap *avp = sc->sc_vaps[txctl->if_id];
+
+ /*
+ * When servicing one or more stations in power-save
+ * mode (or) if there is some mcast data waiting on
+ * mcast queue (to prevent out of order delivery of
+ * mcast,bcast packets) multicast frames must be
+ * buffered until after the beacon. We use the private
+ * mcast queue for that.
+ */
+ /* XXX? more bit in 802.11 frame header */
+ spin_lock_bh(&avp->av_mcastq.axq_lock);
+ if (txctl->ps || avp->av_mcastq.axq_depth)
+ ath_tx_mcastqaddbuf(sc,
+ &avp->av_mcastq, &bf_head);
+ else
+ ath_tx_txqaddbuf(sc, txq, &bf_head);
+ spin_unlock_bh(&avp->av_mcastq.axq_lock);
+ } else
+ ath_tx_txqaddbuf(sc, txq, &bf_head);
+ }
+ spin_unlock_bh(&txq->axq_lock);
+ return 0;
+}
+
+static void xmit_map_sg(struct ath_softc *sc,
+ struct sk_buff *skb,
+ dma_addr_t *pa,
+ struct ath_tx_control *txctl)
+{
+ struct ath_xmit_status tx_status;
+ struct ath_atx_tid *tid;
+ struct scatterlist sg;
+
+ *pa = pci_map_single(sc->pdev, skb->data, skb->len, PCI_DMA_TODEVICE);
+
+ /* setup S/G list */
+ memset(&sg, 0, sizeof(struct scatterlist));
+ sg_dma_address(&sg) = *pa;
+ sg_dma_len(&sg) = skb->len;
+
+ if (ath_tx_start_dma(sc, skb, &sg, 1, txctl) != 0) {
+ /*
+ * We have to do drop frame here.
+ */
+ pci_unmap_single(sc->pdev, *pa, skb->len, PCI_DMA_TODEVICE);
+
+ tx_status.retries = 0;
+ tx_status.flags = ATH_TX_ERROR;
+
+ if (txctl->ht && sc->sc_txaggr) {
+ /* Reclaim the seqno. */
+ tid = ATH_AN_2_TID((struct ath_node *)
+ txctl->an, txctl->tidno);
+ DECR(tid->seq_next, IEEE80211_SEQ_MAX);
+ }
+ ath_tx_complete(sc, skb, &tx_status, txctl->an);
+ }
+}
+
+/* Initialize TX queue and h/w */
+
+int ath_tx_init(struct ath_softc *sc, int nbufs)
+{
+ int error = 0;
+
+ do {
+ spin_lock_init(&sc->sc_txbuflock);
+
+ /* Setup tx descriptors */
+ error = ath_descdma_setup(sc, &sc->sc_txdma, &sc->sc_txbuf,
+ "tx", nbufs * ATH_FRAG_PER_MSDU, ATH_TXDESC);
+ if (error != 0) {
+ DPRINTF(sc, ATH_DBG_FATAL,
+ "%s: failed to allocate tx descriptors: %d\n",
+ __func__, error);
+ break;
+ }
+
+ /* XXX allocate beacon state together with vap */
+ error = ath_descdma_setup(sc, &sc->sc_bdma, &sc->sc_bbuf,
+ "beacon", ATH_BCBUF, 1);
+ if (error != 0) {
+ DPRINTF(sc, ATH_DBG_FATAL,
+ "%s: failed to allocate "
+ "beacon descripotrs: %d\n",
+ __func__, error);
+ break;
+ }
+
+ } while (0);
+
+ if (error != 0)
+ ath_tx_cleanup(sc);
+
+ return error;
+}
+
+/* Reclaim all tx queue resources */
+
+int ath_tx_cleanup(struct ath_softc *sc)
+{
+ /* cleanup beacon descriptors */
+ if (sc->sc_bdma.dd_desc_len != 0)
+ ath_descdma_cleanup(sc, &sc->sc_bdma, &sc->sc_bbuf);
+
+ /* cleanup tx descriptors */
+ if (sc->sc_txdma.dd_desc_len != 0)
+ ath_descdma_cleanup(sc, &sc->sc_txdma, &sc->sc_txbuf);
+
+ return 0;
+}
+
+/* Setup a h/w transmit queue */
+
+struct ath_txq *ath_txq_setup(struct ath_softc *sc, int qtype, int subtype)
+{
+ struct ath_hal *ah = sc->sc_ah;
+ struct ath9k_tx_queue_info qi;
+ int qnum;
+
+ memzero(&qi, sizeof(qi));
+ qi.tqi_subtype = subtype;
+ qi.tqi_aifs = ATH9K_TXQ_USEDEFAULT;
+ qi.tqi_cwmin = ATH9K_TXQ_USEDEFAULT;
+ qi.tqi_cwmax = ATH9K_TXQ_USEDEFAULT;
+ qi.tqi_physCompBuf = 0;
+
+ /*
+ * Enable interrupts only for EOL and DESC conditions.
+ * We mark tx descriptors to receive a DESC interrupt
+ * when a tx queue gets deep; otherwise waiting for the
+ * EOL to reap descriptors. Note that this is done to
+ * reduce interrupt load and this only defers reaping
+ * descriptors, never transmitting frames. Aside from
+ * reducing interrupts this also permits more concurrency.
+ * The only potential downside is if the tx queue backs
+ * up in which case the top half of the kernel may backup
+ * due to a lack of tx descriptors.
+ *
+ * The UAPSD queue is an exception, since we take a desc-
+ * based intr on the EOSP frames.
+ */
+ if (qtype == ATH9K_TX_QUEUE_UAPSD)
+ qi.tqi_qflags = TXQ_FLAG_TXDESCINT_ENABLE;
+ else
+ qi.tqi_qflags = TXQ_FLAG_TXEOLINT_ENABLE |
+ TXQ_FLAG_TXDESCINT_ENABLE;
+ qnum = ath9k_hw_setuptxqueue(ah, qtype, &qi);
+ if (qnum == -1) {
+ /*
+ * NB: don't print a message, this happens
+ * normally on parts with too few tx queues
+ */
+ return NULL;
+ }
+ if (qnum >= ARRAY_SIZE(sc->sc_txq)) {
+ DPRINTF(sc, ATH_DBG_FATAL,
+ "%s: hal qnum %u out of range, max %u!\n",
+ __func__, qnum, (unsigned int)ARRAY_SIZE(sc->sc_txq));
+ ath9k_hw_releasetxqueue(ah, qnum);
+ return NULL;
+ }
+ if (!ATH_TXQ_SETUP(sc, qnum)) {
+ struct ath_txq *txq = &sc->sc_txq[qnum];
+
+ txq->axq_qnum = qnum;
+ txq->axq_link = NULL;
+ INIT_LIST_HEAD(&txq->axq_q);
+ INIT_LIST_HEAD(&txq->axq_acq);
+ spin_lock_init(&txq->axq_lock);
+ txq->axq_depth = 0;
+ txq->axq_aggr_depth = 0;
+ txq->axq_totalqueued = 0;
+ txq->axq_intrcnt = 0;
+ txq->axq_linkbuf = NULL;
+ sc->sc_txqsetup |= 1<<qnum;
+ }
+ return &sc->sc_txq[qnum];
+}
+
+/* Reclaim resources for a setup queue */
+
+void ath_tx_cleanupq(struct ath_softc *sc, struct ath_txq *txq)
+{
+ ath9k_hw_releasetxqueue(sc->sc_ah, txq->axq_qnum);
+ sc->sc_txqsetup &= ~(1<<txq->axq_qnum);
+}
+
+/*
+ * Setup a hardware data transmit queue for the specified
+ * access control. The hal may not support all requested
+ * queues in which case it will return a reference to a
+ * previously setup queue. We record the mapping from ac's
+ * to h/w queues for use by ath_tx_start and also track
+ * the set of h/w queues being used to optimize work in the
+ * transmit interrupt handler and related routines.
+ */
+
+int ath_tx_setup(struct ath_softc *sc, int haltype)
+{
+ struct ath_txq *txq;
+
+ if (haltype >= ARRAY_SIZE(sc->sc_haltype2q)) {
+ DPRINTF(sc, ATH_DBG_FATAL,
+ "%s: HAL AC %u out of range, max %zu!\n",
+ __func__, haltype, ARRAY_SIZE(sc->sc_haltype2q));
+ return 0;
+ }
+ txq = ath_txq_setup(sc, ATH9K_TX_QUEUE_DATA, haltype);
+ if (txq != NULL) {
+ sc->sc_haltype2q[haltype] = txq->axq_qnum;
+ return 1;
+ } else
+ return 0;
+}
+
+int ath_tx_get_qnum(struct ath_softc *sc, int qtype, int haltype)
+{
+ int qnum;
+
+ switch (qtype) {
+ case ATH9K_TX_QUEUE_DATA:
+ if (haltype >= ARRAY_SIZE(sc->sc_haltype2q)) {
+ DPRINTF(sc, ATH_DBG_FATAL,
+ "%s: HAL AC %u out of range, max %zu!\n",
+ __func__,
+ haltype, ARRAY_SIZE(sc->sc_haltype2q));
+ return -1;
+ }
+ qnum = sc->sc_haltype2q[haltype];
+ break;
+ case ATH9K_TX_QUEUE_BEACON:
+ qnum = sc->sc_bhalq;
+ break;
+ case ATH9K_TX_QUEUE_CAB:
+ qnum = sc->sc_cabq->axq_qnum;
+ break;
+ default:
+ qnum = -1;
+ }
+ return qnum;
+}
+
+/* Update parameters for a transmit queue */
+
+int ath_txq_update(struct ath_softc *sc, int qnum,
+ struct ath9k_tx_queue_info *qinfo)
+{
+ struct ath_hal *ah = sc->sc_ah;
+ int error = 0;
+ struct ath9k_tx_queue_info qi;
+
+ if (qnum == sc->sc_bhalq) {
+ /*
+ * XXX: for beacon queue, we just save the parameter.
+ * It will be picked up by ath_beaconq_config when
+ * it's necessary.
+ */
+ sc->sc_beacon_qi = *qinfo;
+ return 0;
+ }
+
+ ASSERT(sc->sc_txq[qnum].axq_qnum == qnum);
+
+ ath9k_hw_get_txq_props(ah, qnum, &qi);
+ qi.tqi_aifs = qinfo->tqi_aifs;
+ qi.tqi_cwmin = qinfo->tqi_cwmin;
+ qi.tqi_cwmax = qinfo->tqi_cwmax;
+ qi.tqi_burstTime = qinfo->tqi_burstTime;
+ qi.tqi_readyTime = qinfo->tqi_readyTime;
+
+ if (!ath9k_hw_set_txq_props(ah, qnum, &qi)) {
+ DPRINTF(sc, ATH_DBG_FATAL,
+ "%s: unable to update hardware queue %u!\n",
+ __func__, qnum);
+ error = -EIO;
+ } else {
+ ath9k_hw_resettxqueue(ah, qnum); /* push to h/w */
+ }
+
+ return error;
+}
+
+int ath_cabq_update(struct ath_softc *sc)
+{
+ struct ath9k_tx_queue_info qi;
+ int qnum = sc->sc_cabq->axq_qnum;
+ struct ath_beacon_config conf;
+
+ ath9k_hw_get_txq_props(sc->sc_ah, qnum, &qi);
+ /*
+ * Ensure the readytime % is within the bounds.
+ */
+ if (sc->sc_config.cabqReadytime < ATH9K_READY_TIME_LO_BOUND)
+ sc->sc_config.cabqReadytime = ATH9K_READY_TIME_LO_BOUND;
+ else if (sc->sc_config.cabqReadytime > ATH9K_READY_TIME_HI_BOUND)
+ sc->sc_config.cabqReadytime = ATH9K_READY_TIME_HI_BOUND;
+
+ ath_get_beaconconfig(sc, ATH_IF_ID_ANY, &conf);
+ qi.tqi_readyTime =
+ (conf.beacon_interval * sc->sc_config.cabqReadytime) / 100;
+ ath_txq_update(sc, qnum, &qi);
+
+ return 0;
+}
+
+int ath_tx_start(struct ath_softc *sc, struct sk_buff *skb)
+{
+ struct ath_tx_control txctl;
+ int error = 0;
+
+ error = ath_tx_prepare(sc, skb, &txctl);
+ if (error == 0)
+ /*
+ * Start DMA mapping.
+ * ath_tx_start_dma() will be called either synchronously
+ * or asynchrounsly once DMA is complete.
+ */
+ xmit_map_sg(sc, skb,
+ get_dma_mem_context(&txctl, dmacontext),
+ &txctl);
+ else
+ ath_node_put(sc, txctl.an, ATH9K_BH_STATUS_CHANGE);
+
+ /* failed packets will be dropped by the caller */
+ return error;
+}
+
+/* Deferred processing of transmit interrupt */
+
+void ath_tx_tasklet(struct ath_softc *sc)
+{
+ u64 tsf = ath9k_hw_gettsf64(sc->sc_ah);
+ int i, nacked = 0;
+ u32 qcumask = ((1 << ATH9K_NUM_TX_QUEUES) - 1);
+
+ ath9k_hw_gettxintrtxqs(sc->sc_ah, &qcumask);
+
+ /*
+ * Process each active queue.
+ */
+ for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
+ if (ATH_TXQ_SETUP(sc, i) && (qcumask & (1 << i)))
+ nacked += ath_tx_processq(sc, &sc->sc_txq[i]);
+ }
+ if (nacked)
+ sc->sc_lastrx = tsf;
+}
+
+void ath_tx_draintxq(struct ath_softc *sc,
+ struct ath_txq *txq, bool retry_tx)
+{
+ struct ath_buf *bf, *lastbf;
+ struct list_head bf_head;
+
+ INIT_LIST_HEAD(&bf_head);
+
+ /*
+ * NB: this assumes output has been stopped and
+ * we do not need to block ath_tx_tasklet
+ */
+ for (;;) {
+ spin_lock_bh(&txq->axq_lock);
+
+ if (list_empty(&txq->axq_q)) {
+ txq->axq_link = NULL;
+ txq->axq_linkbuf = NULL;
+ spin_unlock_bh(&txq->axq_lock);
+ break;
+ }
+
+ bf = list_first_entry(&txq->axq_q, struct ath_buf, list);
+
+ if (bf->bf_status & ATH_BUFSTATUS_STALE) {
+ list_del(&bf->list);
+ spin_unlock_bh(&txq->axq_lock);
+
+ spin_lock_bh(&sc->sc_txbuflock);
+ list_add_tail(&bf->list, &sc->sc_txbuf);
+ spin_unlock_bh(&sc->sc_txbuflock);
+ continue;
+ }
+
+ lastbf = bf->bf_lastbf;
+ if (!retry_tx)
+ lastbf->bf_desc->ds_txstat.ts_flags =
+ ATH9K_TX_SW_ABORTED;
+
+ /* remove ath_buf's of the same mpdu from txq */
+ list_cut_position(&bf_head, &txq->axq_q, &lastbf->list);
+ txq->axq_depth--;
+
+ spin_unlock_bh(&txq->axq_lock);
+
+ if (bf->bf_isampdu)
+ ath_tx_complete_aggr_rifs(sc, txq, bf, &bf_head, 0);
+ else
+ ath_tx_complete_buf(sc, bf, &bf_head, 0, 0);
+ }
+
+ /* flush any pending frames if aggregation is enabled */
+ if (sc->sc_txaggr) {
+ if (!retry_tx) {
+ spin_lock_bh(&txq->axq_lock);
+ ath_txq_drain_pending_buffers(sc, txq,
+ ATH9K_BH_STATUS_CHANGE);
+ spin_unlock_bh(&txq->axq_lock);
+ }
+ }
+}
+
+/* Drain the transmit queues and reclaim resources */
+
+void ath_draintxq(struct ath_softc *sc, bool retry_tx)
+{
+ /* stop beacon queue. The beacon will be freed when
+ * we go to INIT state */
+ if (!sc->sc_invalid) {
+ (void) ath9k_hw_stoptxdma(sc->sc_ah, sc->sc_bhalq);
+ DPRINTF(sc, ATH_DBG_XMIT, "%s: beacon queue %x\n", __func__,
+ ath9k_hw_gettxbuf(sc->sc_ah, sc->sc_bhalq));
+ }
+
+ ath_drain_txdataq(sc, retry_tx);
+}
+
+u32 ath_txq_depth(struct ath_softc *sc, int qnum)
+{
+ return sc->sc_txq[qnum].axq_depth;
+}
+
+u32 ath_txq_aggr_depth(struct ath_softc *sc, int qnum)
+{
+ return sc->sc_txq[qnum].axq_aggr_depth;
+}
+
+/* Check if an ADDBA is required. A valid node must be passed. */
+enum ATH_AGGR_CHECK ath_tx_aggr_check(struct ath_softc *sc,
+ struct ath_node *an,
+ u8 tidno)
+{
+ struct ath_atx_tid *txtid;
+ DECLARE_MAC_BUF(mac);
+
+ if (!sc->sc_txaggr)
+ return AGGR_NOT_REQUIRED;
+
+ /* ADDBA exchange must be completed before sending aggregates */
+ txtid = ATH_AN_2_TID(an, tidno);
+
+ if (txtid->addba_exchangecomplete)
+ return AGGR_EXCHANGE_DONE;
+
+ if (txtid->cleanup_inprogress)
+ return AGGR_CLEANUP_PROGRESS;
+
+ if (txtid->addba_exchangeinprogress)
+ return AGGR_EXCHANGE_PROGRESS;
+
+ if (!txtid->addba_exchangecomplete) {
+ if (!txtid->addba_exchangeinprogress &&
+ (txtid->addba_exchangeattempts < ADDBA_EXCHANGE_ATTEMPTS)) {
+ txtid->addba_exchangeattempts++;
+ return AGGR_REQUIRED;
+ }
+ }
+
+ return AGGR_NOT_REQUIRED;
+}
+
+/* Start TX aggregation */
+
+int ath_tx_aggr_start(struct ath_softc *sc,
+ const u8 *addr,
+ u16 tid,
+ u16 *ssn)
+{
+ struct ath_atx_tid *txtid;
+ struct ath_node *an;
+
+ spin_lock_bh(&sc->node_lock);
+ an = ath_node_find(sc, (u8 *) addr);
+ spin_unlock_bh(&sc->node_lock);
+
+ if (!an) {
+ DPRINTF(sc, ATH_DBG_AGGR,
+ "%s: Node not found to initialize "
+ "TX aggregation\n", __func__);
+ return -1;
+ }
+
+ if (sc->sc_txaggr) {
+ txtid = ATH_AN_2_TID(an, tid);
+ txtid->addba_exchangeinprogress = 1;
+ ath_tx_pause_tid(sc, txtid);
+ }
+
+ return 0;
+}
+
+/* Stop tx aggregation */
+
+int ath_tx_aggr_stop(struct ath_softc *sc,
+ const u8 *addr,
+ u16 tid)
+{
+ struct ath_node *an;
+
+ spin_lock_bh(&sc->node_lock);
+ an = ath_node_find(sc, (u8 *) addr);
+ spin_unlock_bh(&sc->node_lock);
+
+ if (!an) {
+ DPRINTF(sc, ATH_DBG_AGGR,
+ "%s: TX aggr stop for non-existent node\n", __func__);
+ return -1;
+ }
+
+ ath_tx_aggr_teardown(sc, an, tid);
+ return 0;
+}
+
+/*
+ * Performs transmit side cleanup when TID changes from aggregated to
+ * unaggregated.
+ * - Pause the TID and mark cleanup in progress
+ * - Discard all retry frames from the s/w queue.
+ */
+
+void ath_tx_aggr_teardown(struct ath_softc *sc,
+ struct ath_node *an, u8 tid)
+{
+ struct ath_atx_tid *txtid = ATH_AN_2_TID(an, tid);
+ struct ath_txq *txq = &sc->sc_txq[txtid->ac->qnum];
+ struct ath_buf *bf;
+ struct list_head bf_head;
+ INIT_LIST_HEAD(&bf_head);
+
+ DPRINTF(sc, ATH_DBG_AGGR, "%s: teardown TX aggregation\n", __func__);
+
+ if (txtid->cleanup_inprogress) /* cleanup is in progress */
+ return;
+
+ if (!txtid->addba_exchangecomplete) {
+ txtid->addba_exchangeattempts = 0;
+ return;
+ }
+
+ /* TID must be paused first */
+ ath_tx_pause_tid(sc, txtid);
+
+ /* drop all software retried frames and mark this TID */
+ spin_lock_bh(&txq->axq_lock);
+ while (!list_empty(&txtid->buf_q)) {
+ bf = list_first_entry(&txtid->buf_q, struct ath_buf, list);
+ if (!bf->bf_isretried) {
+ /*
+ * NB: it's based on the assumption that
+ * software retried frame will always stay
+ * at the head of software queue.
+ */
+ break;
+ }
+ list_cut_position(&bf_head,
+ &txtid->buf_q, &bf->bf_lastfrm->list);
+ ath_tx_update_baw(sc, txtid, bf->bf_seqno);
+
+ /* complete this sub-frame */
+ ath_tx_complete_buf(sc, bf, &bf_head, 0, 0);
+ }
+
+ if (txtid->baw_head != txtid->baw_tail) {
+ spin_unlock_bh(&txq->axq_lock);
+ txtid->cleanup_inprogress = true;
+ } else {
+ txtid->addba_exchangecomplete = 0;
+ txtid->addba_exchangeattempts = 0;
+ spin_unlock_bh(&txq->axq_lock);
+ ath_tx_flush_tid(sc, txtid);
+ }
+}
+
+/*
+ * Tx scheduling logic
+ * NB: must be called with txq lock held
+ */
+
+void ath_txq_schedule(struct ath_softc *sc, struct ath_txq *txq)
+{
+ struct ath_atx_ac *ac;
+ struct ath_atx_tid *tid;
+
+ /* nothing to schedule */
+ if (list_empty(&txq->axq_acq))
+ return;
+ /*
+ * get the first node/ac pair on the queue
+ */
+ ac = list_first_entry(&txq->axq_acq, struct ath_atx_ac, list);
+ list_del(&ac->list);
+ ac->sched = false;
+
+ /*
+ * process a single tid per destination
+ */
+ do {
+ /* nothing to schedule */
+ if (list_empty(&ac->tid_q))
+ return;
+
+ tid = list_first_entry(&ac->tid_q, struct ath_atx_tid, list);
+ list_del(&tid->list);
+ tid->sched = false;
+
+ if (tid->paused) /* check next tid to keep h/w busy */
+ continue;
+
+ if (!(tid->an->an_smmode == ATH_SM_PWRSAV_DYNAMIC) ||
+ ((txq->axq_depth % 2) == 0)) {
+ ath_tx_sched_aggr(sc, txq, tid);
+ }
+
+ /*
+ * add tid to round-robin queue if more frames
+ * are pending for the tid
+ */
+ if (!list_empty(&tid->buf_q))
+ ath_tx_queue_tid(txq, tid);
+
+ /* only schedule one TID at a time */
+ break;
+ } while (!list_empty(&ac->tid_q));
+
+ /*
+ * schedule AC if more TIDs need processing
+ */
+ if (!list_empty(&ac->tid_q)) {
+ /*
+ * add dest ac to txq if not already added
+ */
+ if (!ac->sched) {
+ ac->sched = true;
+ list_add_tail(&ac->list, &txq->axq_acq);
+ }
+ }
+}
+
+/* Initialize per-node transmit state */
+
+void ath_tx_node_init(struct ath_softc *sc, struct ath_node *an)
+{
+ if (sc->sc_txaggr) {
+ struct ath_atx_tid *tid;
+ struct ath_atx_ac *ac;
+ int tidno, acno;
+
+ sc->sc_ht_info.maxampdu = ATH_AMPDU_LIMIT_DEFAULT;
+
+ /*
+ * Init per tid tx state
+ */
+ for (tidno = 0, tid = &an->an_aggr.tx.tid[tidno];
+ tidno < WME_NUM_TID;
+ tidno++, tid++) {
+ tid->an = an;
+ tid->tidno = tidno;
+ tid->seq_start = tid->seq_next = 0;
+ tid->baw_size = WME_MAX_BA;
+ tid->baw_head = tid->baw_tail = 0;
+ tid->sched = false;
+ tid->paused = false;
+ tid->cleanup_inprogress = false;
+ INIT_LIST_HEAD(&tid->buf_q);
+
+ acno = TID_TO_WME_AC(tidno);
+ tid->ac = &an->an_aggr.tx.ac[acno];
+
+ /* ADDBA state */
+ tid->addba_exchangecomplete = 0;
+ tid->addba_exchangeinprogress = 0;
+ tid->addba_exchangeattempts = 0;
+ }
+
+ /*
+ * Init per ac tx state
+ */
+ for (acno = 0, ac = &an->an_aggr.tx.ac[acno];
+ acno < WME_NUM_AC; acno++, ac++) {
+ ac->sched = false;
+ INIT_LIST_HEAD(&ac->tid_q);
+
+ switch (acno) {
+ case WME_AC_BE:
+ ac->qnum = ath_tx_get_qnum(sc,
+ ATH9K_TX_QUEUE_DATA, ATH9K_WME_AC_BE);
+ break;
+ case WME_AC_BK:
+ ac->qnum = ath_tx_get_qnum(sc,
+ ATH9K_TX_QUEUE_DATA, ATH9K_WME_AC_BK);
+ break;
+ case WME_AC_VI:
+ ac->qnum = ath_tx_get_qnum(sc,
+ ATH9K_TX_QUEUE_DATA, ATH9K_WME_AC_VI);
+ break;
+ case WME_AC_VO:
+ ac->qnum = ath_tx_get_qnum(sc,
+ ATH9K_TX_QUEUE_DATA, ATH9K_WME_AC_VO);
+ break;
+ }
+ }
+ }
+}
+
+/* Cleanupthe pending buffers for the node. */
+
+void ath_tx_node_cleanup(struct ath_softc *sc,
+ struct ath_node *an, bool bh_flag)
+{
+ int i;
+ struct ath_atx_ac *ac, *ac_tmp;
+ struct ath_atx_tid *tid, *tid_tmp;
+ struct ath_txq *txq;
+ for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
+ if (ATH_TXQ_SETUP(sc, i)) {
+ txq = &sc->sc_txq[i];
+
+ if (likely(bh_flag))
+ spin_lock_bh(&txq->axq_lock);
+ else
+ spin_lock(&txq->axq_lock);
+
+ list_for_each_entry_safe(ac,
+ ac_tmp, &txq->axq_acq, list) {
+ tid = list_first_entry(&ac->tid_q,
+ struct ath_atx_tid, list);
+ if (tid && tid->an != an)
+ continue;
+ list_del(&ac->list);
+ ac->sched = false;
+
+ list_for_each_entry_safe(tid,
+ tid_tmp, &ac->tid_q, list) {
+ list_del(&tid->list);
+ tid->sched = false;
+ ath_tid_drain(sc, txq, tid, bh_flag);
+ tid->addba_exchangecomplete = 0;
+ tid->addba_exchangeattempts = 0;
+ tid->cleanup_inprogress = false;
+ }
+ }
+
+ if (likely(bh_flag))
+ spin_unlock_bh(&txq->axq_lock);
+ else
+ spin_unlock(&txq->axq_lock);
+ }
+ }
+}
+
+/* Cleanup per node transmit state */
+
+void ath_tx_node_free(struct ath_softc *sc, struct ath_node *an)
+{
+ if (sc->sc_txaggr) {
+ struct ath_atx_tid *tid;
+ int tidno, i;
+
+ /* Init per tid rx state */
+ for (tidno = 0, tid = &an->an_aggr.tx.tid[tidno];
+ tidno < WME_NUM_TID;
+ tidno++, tid++) {
+
+ for (i = 0; i < ATH_TID_MAX_BUFS; i++)
+ ASSERT(tid->tx_buf[i] == NULL);
+ }
+ }
+}
diff --git a/drivers/net/wireless/ipw2200.c b/drivers/net/wireless/ipw2200.c
index 1acfbcd3703..36e8d2f6e7b 100644
--- a/drivers/net/wireless/ipw2200.c
+++ b/drivers/net/wireless/ipw2200.c
@@ -305,9 +305,10 @@ static inline void ipw_write_reg32(struct ipw_priv *a, u32 b, u32 c)
#define _ipw_write8(ipw, ofs, val) writeb((val), (ipw)->hw_base + (ofs))
/* 8-bit direct write (for low 4K of SRAM/regs), with debug wrapper */
-#define ipw_write8(ipw, ofs, val) \
+#define ipw_write8(ipw, ofs, val) do { \
IPW_DEBUG_IO("%s %d: write_direct8(0x%08X, 0x%08X)\n", __FILE__, __LINE__, (u32)(ofs), (u32)(val)); \
- _ipw_write8(ipw, ofs, val)
+ _ipw_write8(ipw, ofs, val); \
+ } while (0)
/* 16-bit direct write (low 4K) */
#define _ipw_write16(ipw, ofs, val) writew((val), (ipw)->hw_base + (ofs))
@@ -11946,7 +11947,7 @@ module_param(auto_create, int, 0444);
MODULE_PARM_DESC(auto_create, "auto create adhoc network (default on)");
module_param(led, int, 0444);
-MODULE_PARM_DESC(led, "enable led control on some systems (default 0 off)\n");
+MODULE_PARM_DESC(led, "enable led control on some systems (default 0 off)");
module_param(debug, int, 0444);
MODULE_PARM_DESC(debug, "debug output mask");
diff --git a/drivers/net/wireless/iwlwifi/Kconfig b/drivers/net/wireless/iwlwifi/Kconfig
index 82b66a3d3a5..b0ac0ce3fb9 100644
--- a/drivers/net/wireless/iwlwifi/Kconfig
+++ b/drivers/net/wireless/iwlwifi/Kconfig
@@ -14,18 +14,49 @@ config IWLWIFI_LEDS
default n
config IWLWIFI_RFKILL
- boolean "IWLWIFI RF kill support"
+ boolean "Iwlwifi RF kill support"
depends on IWLCORE
-config IWL4965
- tristate "Intel Wireless WiFi 4965AGN"
+config IWLWIFI_DEBUG
+ bool "Enable full debugging output in iwlagn driver"
+ depends on IWLCORE
+ ---help---
+ This option will enable debug tracing output for the iwlwifi drivers
+
+ This will result in the kernel module being ~100k larger. You can
+ control which debug output is sent to the kernel log by setting the
+ value in
+
+ /sys/class/net/wlan0/device/debug_level
+
+ This entry will only exist if this option is enabled.
+
+ To set a value, simply echo an 8-byte hex value to the same file:
+
+ % echo 0x43fff > /sys/class/net/wlan0/device/debug_level
+
+ You can find the list of debug mask values in:
+ drivers/net/wireless/iwlwifi/iwl-debug.h
+
+ If this is your first time using this driver, you should say Y here
+ as the debug information can assist others in helping you resolve
+ any problems you may encounter.
+
+config IWLWIFI_DEBUGFS
+ bool "Iwlwifi debugfs support"
+ depends on IWLCORE && IWLWIFI_DEBUG && MAC80211_DEBUGFS
+ ---help---
+ Enable creation of debugfs files for the iwlwifi drivers.
+
+config IWLAGN
+ tristate "Intel Wireless WiFi Next Gen AGN"
depends on PCI && MAC80211 && WLAN_80211 && EXPERIMENTAL
select FW_LOADER
select IWLCORE
---help---
Select to build the driver supporting the:
- Intel Wireless WiFi Link 4965AGN
+ Intel Wireless WiFi Link Next-Gen AGN
This driver uses the kernel's mac80211 subsystem.
@@ -42,60 +73,33 @@ config IWL4965
If you want to compile the driver as a module ( = code which can be
inserted in and removed from the running kernel whenever you want),
say M here and read <file:Documentation/kbuild/modules.txt>. The
- module will be called iwl4965.ko.
-
-config IWL4965_LEDS
- bool "Enable LEDS features in iwl4965 driver"
- depends on IWL4965
- select IWLWIFI_LEDS
- ---help---
- This option enables LEDS for the iwlwifi drivers
+ module will be called iwlagn.ko.
-
-config IWL4965_SPECTRUM_MEASUREMENT
- bool "Enable Spectrum Measurement in iwl4965 driver"
- depends on IWL4965
+config IWLAGN_SPECTRUM_MEASUREMENT
+ bool "Enable Spectrum Measurement in iwlagn driver"
+ depends on IWLAGN
---help---
- This option will enable spectrum measurement for the iwl4965 driver.
+ This option will enable spectrum measurement for the iwlagn driver.
-config IWLWIFI_DEBUG
- bool "Enable full debugging output in iwl4965 driver"
- depends on IWL4965
+config IWLAGN_LEDS
+ bool "Enable LEDS features in iwlagn driver"
+ depends on IWLAGN
+ select IWLWIFI_LEDS
---help---
- This option will enable debug tracing output for the iwl4965
- driver.
-
- This will result in the kernel module being ~100k larger. You can
- control which debug output is sent to the kernel log by setting the
- value in
-
- /sys/class/net/wlan0/device/debug_level
-
- This entry will only exist if this option is enabled.
-
- To set a value, simply echo an 8-byte hex value to the same file:
-
- % echo 0x43fff > /sys/class/net/wlan0/device/debug_level
+ This option enables LEDS for the iwlagn drivers
- You can find the list of debug mask values in:
- drivers/net/wireless/iwlwifi/iwl-4965-debug.h
- If this is your first time using this driver, you should say Y here
- as the debug information can assist others in helping you resolve
- any problems you may encounter.
+config IWL4965
+ bool "Intel Wireless WiFi 4965AGN"
+ depends on IWLAGN
+ ---help---
+ This option enables support for Intel Wireless WiFi Link 4965AGN
config IWL5000
bool "Intel Wireless WiFi 5000AGN"
- depends on IWL4965
+ depends on IWLAGN
---help---
This option enables support for Intel Wireless WiFi Link 5000AGN Family
- Dependency on 4965 is temporary
-
-config IWLWIFI_DEBUGFS
- bool "Iwlwifi debugfs support"
- depends on IWLCORE && IWLWIFI_DEBUG && MAC80211_DEBUGFS
- ---help---
- Enable creation of debugfs files for the iwlwifi drivers.
config IWL3945
tristate "Intel PRO/Wireless 3945ABG/BG Network Connection"
diff --git a/drivers/net/wireless/iwlwifi/Makefile b/drivers/net/wireless/iwlwifi/Makefile
index 1f52b92f08b..47aa28f6a51 100644
--- a/drivers/net/wireless/iwlwifi/Makefile
+++ b/drivers/net/wireless/iwlwifi/Makefile
@@ -6,15 +6,14 @@ iwlcore-$(CONFIG_IWLWIFI_DEBUGFS) += iwl-debugfs.o
iwlcore-$(CONFIG_IWLWIFI_LEDS) += iwl-led.o
iwlcore-$(CONFIG_IWLWIFI_RFKILL) += iwl-rfkill.o
+obj-$(CONFIG_IWLAGN) += iwlagn.o
+iwlagn-objs := iwl-agn.o iwl-agn-rs.o
+
+iwlagn-$(CONFIG_IWL4965) += iwl-4965.o
+iwlagn-$(CONFIG_IWL5000) += iwl-5000.o
+
obj-$(CONFIG_IWL3945) += iwl3945.o
iwl3945-objs := iwl3945-base.o iwl-3945.o iwl-3945-rs.o
iwl3945-$(CONFIG_IWL3945_LEDS) += iwl-3945-led.o
-obj-$(CONFIG_IWL4965) += iwl4965.o
-iwl4965-objs := iwl4965-base.o iwl-4965.o iwl-4965-rs.o
-
-ifeq ($(CONFIG_IWL5000),y)
- iwl4965-objs += iwl-5000.o
-endif
-
diff --git a/drivers/net/wireless/iwlwifi/iwl-3945-led.c b/drivers/net/wireless/iwlwifi/iwl-3945-led.c
index 6be1fe13fa5..d3336966b6b 100644
--- a/drivers/net/wireless/iwlwifi/iwl-3945-led.c
+++ b/drivers/net/wireless/iwlwifi/iwl-3945-led.c
@@ -206,12 +206,12 @@ static void iwl3945_led_brightness_set(struct led_classdev *led_cdev,
static int iwl3945_led_register_led(struct iwl3945_priv *priv,
struct iwl3945_led *led,
enum led_type type, u8 set_led,
- const char *name, char *trigger)
+ char *trigger)
{
struct device *device = wiphy_dev(priv->hw->wiphy);
int ret;
- led->led_dev.name = name;
+ led->led_dev.name = led->name;
led->led_dev.brightness_set = iwl3945_led_brightness_set;
led->led_dev.default_trigger = trigger;
@@ -308,7 +308,6 @@ void iwl3945_led_background(struct iwl3945_priv *priv)
int iwl3945_led_register(struct iwl3945_priv *priv)
{
char *trigger;
- char name[32];
int ret;
priv->last_blink_rate = 0;
@@ -318,7 +317,8 @@ int iwl3945_led_register(struct iwl3945_priv *priv)
priv->allow_blinking = 0;
trigger = ieee80211_get_radio_led_name(priv->hw);
- snprintf(name, sizeof(name), "iwl-%s:radio",
+ snprintf(priv->led[IWL_LED_TRG_RADIO].name,
+ sizeof(priv->led[IWL_LED_TRG_RADIO].name), "iwl-%s:radio",
wiphy_name(priv->hw->wiphy));
priv->led[IWL_LED_TRG_RADIO].led_on = iwl3945_led_on;
@@ -327,19 +327,20 @@ int iwl3945_led_register(struct iwl3945_priv *priv)
ret = iwl3945_led_register_led(priv,
&priv->led[IWL_LED_TRG_RADIO],
- IWL_LED_TRG_RADIO, 1,
- name, trigger);
+ IWL_LED_TRG_RADIO, 1, trigger);
+
if (ret)
goto exit_fail;
trigger = ieee80211_get_assoc_led_name(priv->hw);
- snprintf(name, sizeof(name), "iwl-%s:assoc",
+ snprintf(priv->led[IWL_LED_TRG_ASSOC].name,
+ sizeof(priv->led[IWL_LED_TRG_ASSOC].name), "iwl-%s:assoc",
wiphy_name(priv->hw->wiphy));
ret = iwl3945_led_register_led(priv,
&priv->led[IWL_LED_TRG_ASSOC],
- IWL_LED_TRG_ASSOC, 0,
- name, trigger);
+ IWL_LED_TRG_ASSOC, 0, trigger);
+
/* for assoc always turn led on */
priv->led[IWL_LED_TRG_ASSOC].led_on = iwl3945_led_on;
priv->led[IWL_LED_TRG_ASSOC].led_off = iwl3945_led_on;
@@ -349,14 +350,13 @@ int iwl3945_led_register(struct iwl3945_priv *priv)
goto exit_fail;
trigger = ieee80211_get_rx_led_name(priv->hw);
- snprintf(name, sizeof(name), "iwl-%s:RX",
+ snprintf(priv->led[IWL_LED_TRG_RX].name,
+ sizeof(priv->led[IWL_LED_TRG_RX].name), "iwl-%s:RX",
wiphy_name(priv->hw->wiphy));
-
ret = iwl3945_led_register_led(priv,
&priv->led[IWL_LED_TRG_RX],
- IWL_LED_TRG_RX, 0,
- name, trigger);
+ IWL_LED_TRG_RX, 0, trigger);
priv->led[IWL_LED_TRG_RX].led_on = iwl3945_led_associated;
priv->led[IWL_LED_TRG_RX].led_off = iwl3945_led_associated;
@@ -366,13 +366,14 @@ int iwl3945_led_register(struct iwl3945_priv *priv)
goto exit_fail;
trigger = ieee80211_get_tx_led_name(priv->hw);
- snprintf(name, sizeof(name), "iwl-%s:TX",
+ snprintf(priv->led[IWL_LED_TRG_TX].name,
+ sizeof(priv->led[IWL_LED_TRG_TX].name), "iwl-%s:TX",
wiphy_name(priv->hw->wiphy));
ret = iwl3945_led_register_led(priv,
&priv->led[IWL_LED_TRG_TX],
- IWL_LED_TRG_TX, 0,
- name, trigger);
+ IWL_LED_TRG_TX, 0, trigger);
+
priv->led[IWL_LED_TRG_TX].led_on = iwl3945_led_associated;
priv->led[IWL_LED_TRG_TX].led_off = iwl3945_led_associated;
priv->led[IWL_LED_TRG_TX].led_pattern = iwl3945_led_pattern;
diff --git a/drivers/net/wireless/iwlwifi/iwl-3945-led.h b/drivers/net/wireless/iwlwifi/iwl-3945-led.h
index 47b7e0bac80..2fbd126c134 100644
--- a/drivers/net/wireless/iwlwifi/iwl-3945-led.h
+++ b/drivers/net/wireless/iwlwifi/iwl-3945-led.h
@@ -50,6 +50,7 @@ enum led_type {
struct iwl3945_led {
struct iwl3945_priv *priv;
struct led_classdev led_dev;
+ char name[32];
int (*led_on) (struct iwl3945_priv *priv, int led_id);
int (*led_off) (struct iwl3945_priv *priv, int led_id);
diff --git a/drivers/net/wireless/iwlwifi/iwl-3945.c b/drivers/net/wireless/iwlwifi/iwl-3945.c
index a51e0eaa133..b3931f6135a 100644
--- a/drivers/net/wireless/iwlwifi/iwl-3945.c
+++ b/drivers/net/wireless/iwlwifi/iwl-3945.c
@@ -710,10 +710,7 @@ static void iwl3945_rx_reply_rx(struct iwl3945_priv *priv,
return;
}
- if (priv->iw_mode == IEEE80211_IF_TYPE_MNTR) {
- iwl3945_pass_packet_to_mac80211(priv, rxb, &rx_status);
- return;
- }
+
/* Convert 3945's rssi indicator to dBm */
rx_status.signal = rx_stats->rssi - IWL_RSSI_OFFSET;
@@ -775,6 +772,11 @@ static void iwl3945_rx_reply_rx(struct iwl3945_priv *priv,
priv->last_rx_noise = rx_status.noise;
}
+ if (priv->iw_mode == IEEE80211_IF_TYPE_MNTR) {
+ iwl3945_pass_packet_to_mac80211(priv, rxb, &rx_status);
+ return;
+ }
+
switch (le16_to_cpu(header->frame_control) & IEEE80211_FCTL_FTYPE) {
case IEEE80211_FTYPE_MGMT:
switch (le16_to_cpu(header->frame_control) &
@@ -793,8 +795,7 @@ static void iwl3945_rx_reply_rx(struct iwl3945_priv *priv,
struct ieee80211_mgmt *mgmt =
(struct ieee80211_mgmt *)header;
__le32 *pos;
- pos =
- (__le32 *) & mgmt->u.beacon.
+ pos = (__le32 *)&mgmt->u.beacon.
timestamp;
priv->timestamp0 = le32_to_cpu(pos[0]);
priv->timestamp1 = le32_to_cpu(pos[1]);
@@ -1507,7 +1508,7 @@ static int iwl3945_hw_reg_adjust_power_by_temp(int new_reading, int old_reading)
*/
static inline int iwl3945_hw_reg_temp_out_of_range(int temperature)
{
- return (((temperature < -260) || (temperature > 25)) ? 1 : 0);
+ return ((temperature < -260) || (temperature > 25)) ? 1 : 0;
}
int iwl3945_hw_get_temperature(struct iwl3945_priv *priv)
@@ -2628,7 +2629,7 @@ unsigned int iwl3945_hw_get_beacon_cmd(struct iwl3945_priv *priv,
tx_beacon_cmd->tx.supp_rates[1] =
(IWL_CCK_BASIC_RATES_MASK & 0xF);
- return (sizeof(struct iwl3945_tx_beacon_cmd) + frame_size);
+ return sizeof(struct iwl3945_tx_beacon_cmd) + frame_size;
}
void iwl3945_hw_rx_handler_setup(struct iwl3945_priv *priv)
diff --git a/drivers/net/wireless/iwlwifi/iwl-4965.c b/drivers/net/wireless/iwlwifi/iwl-4965.c
index 9afecb81371..22bb26985c2 100644
--- a/drivers/net/wireless/iwlwifi/iwl-4965.c
+++ b/drivers/net/wireless/iwlwifi/iwl-4965.c
@@ -341,39 +341,6 @@ err:
return -EINVAL;
}
-int iwl4965_set_pwr_src(struct iwl_priv *priv, enum iwl_pwr_src src)
-{
- int ret;
- unsigned long flags;
-
- spin_lock_irqsave(&priv->lock, flags);
- ret = iwl_grab_nic_access(priv);
- if (ret) {
- spin_unlock_irqrestore(&priv->lock, flags);
- return ret;
- }
-
- if (src == IWL_PWR_SRC_VAUX) {
- u32 val;
- ret = pci_read_config_dword(priv->pci_dev, PCI_POWER_SOURCE,
- &val);
-
- if (val & PCI_CFG_PMC_PME_FROM_D3COLD_SUPPORT) {
- iwl_set_bits_mask_prph(priv, APMG_PS_CTRL_REG,
- APMG_PS_CTRL_VAL_PWR_SRC_VAUX,
- ~APMG_PS_CTRL_MSK_PWR_SRC);
- }
- } else {
- iwl_set_bits_mask_prph(priv, APMG_PS_CTRL_REG,
- APMG_PS_CTRL_VAL_PWR_SRC_VMAIN,
- ~APMG_PS_CTRL_MSK_PWR_SRC);
- }
-
- iwl_release_nic_access(priv);
- spin_unlock_irqrestore(&priv->lock, flags);
-
- return ret;
-}
/*
* Activate/Deactivat Tx DMA/FIFO channels according tx fifos mask
@@ -875,18 +842,6 @@ static int iwl4965_hw_set_hw_params(struct iwl_priv *priv)
return 0;
}
-/* set card power command */
-static int iwl4965_set_power(struct iwl_priv *priv,
- void *cmd)
-{
- int ret = 0;
-
- ret = iwl_send_cmd_pdu_async(priv, POWER_TABLE_CMD,
- sizeof(struct iwl4965_powertable_cmd),
- cmd, NULL);
- return ret;
-}
-
static s32 iwl4965_math_div_round(s32 num, s32 denom, s32 *res)
{
s32 sign = 1;
@@ -1560,11 +1515,11 @@ static int iwl4965_fill_txpower_tbl(struct iwl_priv *priv, u8 band, u16 channel,
c, atten_value, power_index,
tx_power.s.radio_tx_gain[c],
tx_power.s.dsp_predis_atten[c]);
- }/* for each chain */
+ } /* for each chain */
tx_power_tbl->power_tbl[i].dw = cpu_to_le32(tx_power.dw);
- }/* for each rate */
+ } /* for each rate */
return 0;
}
@@ -1701,38 +1656,6 @@ static int iwl4965_shared_mem_rx_idx(struct iwl_priv *priv)
return le32_to_cpu(s->rb_closed) & 0xFFF;
}
-unsigned int iwl4965_hw_get_beacon_cmd(struct iwl_priv *priv,
- struct iwl_frame *frame, u8 rate)
-{
- struct iwl4965_tx_beacon_cmd *tx_beacon_cmd;
- unsigned int frame_size;
-
- tx_beacon_cmd = &frame->u.beacon;
- memset(tx_beacon_cmd, 0, sizeof(*tx_beacon_cmd));
-
- tx_beacon_cmd->tx.sta_id = priv->hw_params.bcast_sta_id;
- tx_beacon_cmd->tx.stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE;
-
- frame_size = iwl4965_fill_beacon_frame(priv,
- tx_beacon_cmd->frame,
- iwl_bcast_addr,
- sizeof(frame->u) - sizeof(*tx_beacon_cmd));
-
- BUG_ON(frame_size > MAX_MPDU_SIZE);
- tx_beacon_cmd->tx.len = cpu_to_le16((u16)frame_size);
-
- if ((rate == IWL_RATE_1M_PLCP) || (rate >= IWL_RATE_2M_PLCP))
- tx_beacon_cmd->tx.rate_n_flags =
- iwl_hw_set_rate_n_flags(rate, RATE_MCS_CCK_MSK);
- else
- tx_beacon_cmd->tx.rate_n_flags =
- iwl_hw_set_rate_n_flags(rate, 0);
-
- tx_beacon_cmd->tx.tx_flags = (TX_CMD_FLG_SEQ_CTL_MSK |
- TX_CMD_FLG_TSF_MSK | TX_CMD_FLG_STA_RATE_MSK);
- return (sizeof(*tx_beacon_cmd) + frame_size);
-}
-
static int iwl4965_alloc_shared_mem(struct iwl_priv *priv)
{
priv->shared_virt = pci_alloc_consistent(priv->pci_dev,
@@ -2079,39 +2002,6 @@ static int iwl4965_txq_agg_enable(struct iwl_priv *priv, int txq_id,
return 0;
}
-int iwl4965_mac_ampdu_action(struct ieee80211_hw *hw,
- enum ieee80211_ampdu_mlme_action action,
- const u8 *addr, u16 tid, u16 *ssn)
-{
- struct iwl_priv *priv = hw->priv;
- DECLARE_MAC_BUF(mac);
-
- IWL_DEBUG_HT("A-MPDU action on addr %s tid %d\n",
- print_mac(mac, addr), tid);
-
- if (!(priv->cfg->sku & IWL_SKU_N))
- return -EACCES;
-
- switch (action) {
- case IEEE80211_AMPDU_RX_START:
- IWL_DEBUG_HT("start Rx\n");
- return iwl_rx_agg_start(priv, addr, tid, *ssn);
- case IEEE80211_AMPDU_RX_STOP:
- IWL_DEBUG_HT("stop Rx\n");
- return iwl_rx_agg_stop(priv, addr, tid);
- case IEEE80211_AMPDU_TX_START:
- IWL_DEBUG_HT("start Tx\n");
- return iwl_tx_agg_start(priv, addr, tid, ssn);
- case IEEE80211_AMPDU_TX_STOP:
- IWL_DEBUG_HT("stop Tx\n");
- return iwl_tx_agg_stop(priv, addr, tid);
- default:
- IWL_DEBUG_HT("unknown\n");
- return -EINVAL;
- break;
- }
- return 0;
-}
static u16 iwl4965_get_hcmd_size(u8 cmd_id, u16 len)
{
@@ -2240,9 +2130,9 @@ static int iwl4965_tx_status_reply_tx(struct iwl_priv *priv,
bitmap = bitmap << sh;
sh = 0;
}
- bitmap |= (1 << sh);
- IWL_DEBUG_TX_REPLY("start=%d bitmap=0x%x\n",
- start, (u32)(bitmap & 0xFFFFFFFF));
+ bitmap |= 1ULL << sh;
+ IWL_DEBUG_TX_REPLY("start=%d bitmap=0x%llx\n",
+ start, (unsigned long long)bitmap);
}
agg->bitmap = bitmap;
@@ -2368,6 +2258,40 @@ static void iwl4965_rx_reply_tx(struct iwl_priv *priv,
IWL_ERROR("TODO: Implement Tx ABORT REQUIRED!!!\n");
}
+static int iwl4965_calc_rssi(struct iwl_priv *priv,
+ struct iwl_rx_phy_res *rx_resp)
+{
+ /* data from PHY/DSP regarding signal strength, etc.,
+ * contents are always there, not configurable by host. */
+ struct iwl4965_rx_non_cfg_phy *ncphy =
+ (struct iwl4965_rx_non_cfg_phy *)rx_resp->non_cfg_phy_buf;
+ u32 agc = (le16_to_cpu(ncphy->agc_info) & IWL49_AGC_DB_MASK)
+ >> IWL49_AGC_DB_POS;
+
+ u32 valid_antennae =
+ (le16_to_cpu(rx_resp->phy_flags) & IWL49_RX_PHY_FLAGS_ANTENNAE_MASK)
+ >> IWL49_RX_PHY_FLAGS_ANTENNAE_OFFSET;
+ u8 max_rssi = 0;
+ u32 i;
+
+ /* Find max rssi among 3 possible receivers.
+ * These values are measured by the digital signal processor (DSP).
+ * They should stay fairly constant even as the signal strength varies,
+ * if the radio's automatic gain control (AGC) is working right.
+ * AGC value (see below) will provide the "interesting" info. */
+ for (i = 0; i < 3; i++)
+ if (valid_antennae & (1 << i))
+ max_rssi = max(ncphy->rssi_info[i << 1], max_rssi);
+
+ IWL_DEBUG_STATS("Rssi In A %d B %d C %d Max %d AGC dB %d\n",
+ ncphy->rssi_info[0], ncphy->rssi_info[2], ncphy->rssi_info[4],
+ max_rssi, agc);
+
+ /* dBm = max_rssi dB - agc dB - constant.
+ * Higher AGC (higher radio gain) means lower signal. */
+ return max_rssi - agc - IWL_RSSI_OFFSET;
+}
+
/* Set up 4965-specific Rx frame reply handlers */
static void iwl4965_rx_handler_setup(struct iwl_priv *priv)
@@ -2399,6 +2323,7 @@ static struct iwl_hcmd_utils_ops iwl4965_hcmd_utils = {
.chain_noise_reset = iwl4965_chain_noise_reset,
.gain_computation = iwl4965_gain_computation,
.rts_tx_cmd_flag = iwl4965_rts_tx_cmd_flag,
+ .calc_rssi = iwl4965_calc_rssi,
};
static struct iwl_lib_ops iwl4965_lib = {
@@ -2440,7 +2365,6 @@ static struct iwl_lib_ops iwl4965_lib = {
.check_version = iwl4965_eeprom_check_version,
.query_addr = iwlcore_eeprom_query_addr,
},
- .set_power = iwl4965_set_power,
.send_tx_power = iwl4965_send_tx_power,
.update_chain_flags = iwl4965_update_chain_flags,
.temperature = iwl4965_temperature_calib,
@@ -2469,7 +2393,7 @@ MODULE_PARM_DESC(antenna, "select antenna (1=Main, 2=Aux, default 0 [both])");
module_param_named(disable, iwl4965_mod_params.disable, int, 0444);
MODULE_PARM_DESC(disable, "manually disable the radio (default 0 [radio on])");
module_param_named(swcrypto, iwl4965_mod_params.sw_crypto, int, 0444);
-MODULE_PARM_DESC(swcrypto, "using crypto in software (default 0 [hardware])\n");
+MODULE_PARM_DESC(swcrypto, "using crypto in software (default 0 [hardware])");
module_param_named(debug, iwl4965_mod_params.debug, int, 0444);
MODULE_PARM_DESC(debug, "debug output mask");
module_param_named(
diff --git a/drivers/net/wireless/iwlwifi/iwl-5000.c b/drivers/net/wireless/iwlwifi/iwl-5000.c
index 878d6193b23..f3d139b663e 100644
--- a/drivers/net/wireless/iwlwifi/iwl-5000.c
+++ b/drivers/net/wireless/iwlwifi/iwl-5000.c
@@ -93,6 +93,13 @@ static int iwl5000_apm_init(struct iwl_priv *priv)
iwl_set_bit(priv, CSR_GIO_CHICKEN_BITS,
CSR_GIO_CHICKEN_BITS_REG_BIT_L1A_NO_L0S_RX);
+ /* Set FH wait treshold to maximum (HW error during stress W/A) */
+ iwl_set_bit(priv, CSR_DBG_HPET_MEM_REG, CSR_DBG_HPET_MEM_REG_VAL);
+
+ /* enable HAP INTA to move device L1a -> L0s */
+ iwl_set_bit(priv, CSR_HW_IF_CONFIG_REG,
+ CSR_HW_IF_CONFIG_REG_BIT_HAP_WAKE_L1A);
+
iwl_set_bit(priv, CSR_ANA_PLL_CFG, CSR50_ANA_PLL_CFG_VAL);
/* set "initialization complete" bit to move adapter
@@ -230,6 +237,16 @@ static void iwl5000_nic_config(struct iwl_priv *priv)
CSR_HW_IF_CONFIG_REG_BIT_RADIO_SI |
CSR_HW_IF_CONFIG_REG_BIT_MAC_SI);
+ /* W/A : NIC is stuck in a reset state after Early PCIe power off
+ * (PCIe power is lost before PERST# is asserted),
+ * causing ME FW to lose ownership and not being able to obtain it back.
+ */
+ iwl_grab_nic_access(priv);
+ iwl_set_bits_mask_prph(priv, APMG_PS_CTRL_REG,
+ APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS,
+ ~APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS);
+ iwl_release_nic_access(priv);
+
spin_unlock_irqrestore(&priv->lock, flags);
}
@@ -924,8 +941,8 @@ static void iwl5000_txq_update_byte_cnt_tbl(struct iwl_priv *priv,
len = byte_cnt + IWL_TX_CRC_SIZE + IWL_TX_DELIMITER_SIZE;
if (txq_id != IWL_CMD_QUEUE_NUM) {
- sta = txq->cmd[txq->q.write_ptr].cmd.tx.sta_id;
- sec_ctl = txq->cmd[txq->q.write_ptr].cmd.tx.sec_ctl;
+ sta = txq->cmd[txq->q.write_ptr]->cmd.tx.sta_id;
+ sec_ctl = txq->cmd[txq->q.write_ptr]->cmd.tx.sec_ctl;
switch (sec_ctl & TX_CMD_SEC_MSK) {
case TX_CMD_SEC_CCM:
@@ -964,7 +981,7 @@ static void iwl5000_txq_inval_byte_cnt_tbl(struct iwl_priv *priv,
u8 sta = 0;
if (txq_id != IWL_CMD_QUEUE_NUM)
- sta = txq->cmd[txq->q.read_ptr].cmd.tx.sta_id;
+ sta = txq->cmd[txq->q.read_ptr]->cmd.tx.sta_id;
shared_data->queues_byte_cnt_tbls[txq_id].tfd_offset[txq->q.read_ptr].
val = cpu_to_le16(1 | (sta << 12));
@@ -1131,7 +1148,7 @@ static void iwl5000_txq_set_sched(struct iwl_priv *priv, u32 mask)
static inline u32 iwl5000_get_scd_ssn(struct iwl5000_tx_resp *tx_resp)
{
- return le32_to_cpup((__le32*)&tx_resp->status +
+ return le32_to_cpup((__le32 *)&tx_resp->status +
tx_resp->frame_count) & MAX_SN;
}
@@ -1228,9 +1245,9 @@ static int iwl5000_tx_status_reply_tx(struct iwl_priv *priv,
bitmap = bitmap << sh;
sh = 0;
}
- bitmap |= (1 << sh);
- IWL_DEBUG_TX_REPLY("start=%d bitmap=0x%x\n",
- start, (u32)(bitmap & 0xFFFFFFFF));
+ bitmap |= 1ULL << sh;
+ IWL_DEBUG_TX_REPLY("start=%d bitmap=0x%llx\n",
+ start, (unsigned long long)bitmap);
}
agg->bitmap = bitmap;
@@ -1444,6 +1461,44 @@ static void iwl5000_temperature(struct iwl_priv *priv)
priv->temperature = le32_to_cpu(priv->statistics.general.temperature);
}
+/* Calc max signal level (dBm) among 3 possible receivers */
+static int iwl5000_calc_rssi(struct iwl_priv *priv,
+ struct iwl_rx_phy_res *rx_resp)
+{
+ /* data from PHY/DSP regarding signal strength, etc.,
+ * contents are always there, not configurable by host
+ */
+ struct iwl5000_non_cfg_phy *ncphy =
+ (struct iwl5000_non_cfg_phy *)rx_resp->non_cfg_phy_buf;
+ u32 val, rssi_a, rssi_b, rssi_c, max_rssi;
+ u8 agc;
+
+ val = le32_to_cpu(ncphy->non_cfg_phy[IWL50_RX_RES_AGC_IDX]);
+ agc = (val & IWL50_OFDM_AGC_MSK) >> IWL50_OFDM_AGC_BIT_POS;
+
+ /* Find max rssi among 3 possible receivers.
+ * These values are measured by the digital signal processor (DSP).
+ * They should stay fairly constant even as the signal strength varies,
+ * if the radio's automatic gain control (AGC) is working right.
+ * AGC value (see below) will provide the "interesting" info.
+ */
+ val = le32_to_cpu(ncphy->non_cfg_phy[IWL50_RX_RES_RSSI_AB_IDX]);
+ rssi_a = (val & IWL50_OFDM_RSSI_A_MSK) >> IWL50_OFDM_RSSI_A_BIT_POS;
+ rssi_b = (val & IWL50_OFDM_RSSI_B_MSK) >> IWL50_OFDM_RSSI_B_BIT_POS;
+ val = le32_to_cpu(ncphy->non_cfg_phy[IWL50_RX_RES_RSSI_C_IDX]);
+ rssi_c = (val & IWL50_OFDM_RSSI_C_MSK) >> IWL50_OFDM_RSSI_C_BIT_POS;
+
+ max_rssi = max_t(u32, rssi_a, rssi_b);
+ max_rssi = max_t(u32, max_rssi, rssi_c);
+
+ IWL_DEBUG_STATS("Rssi In A %d B %d C %d Max %d AGC dB %d\n",
+ rssi_a, rssi_b, rssi_c, max_rssi, agc);
+
+ /* dBm = max_rssi dB - agc dB - constant.
+ * Higher AGC (higher radio gain) means lower signal. */
+ return max_rssi - agc - IWL_RSSI_OFFSET;
+}
+
static struct iwl_hcmd_ops iwl5000_hcmd = {
.rxon_assoc = iwl5000_send_rxon_assoc,
};
@@ -1454,6 +1509,7 @@ static struct iwl_hcmd_utils_ops iwl5000_hcmd_utils = {
.gain_computation = iwl5000_gain_computation,
.chain_noise_reset = iwl5000_chain_noise_reset,
.rts_tx_cmd_flag = iwl5000_rts_tx_cmd_flag,
+ .calc_rssi = iwl5000_calc_rssi,
};
static struct iwl_lib_ops iwl5000_lib = {
@@ -1474,6 +1530,7 @@ static struct iwl_lib_ops iwl5000_lib = {
.alive_notify = iwl5000_alive_notify,
.send_tx_power = iwl5000_send_tx_power,
.temperature = iwl5000_temperature,
+ .update_chain_flags = iwl4965_update_chain_flags,
.apm_ops = {
.init = iwl5000_apm_init,
.reset = iwl5000_apm_reset,
diff --git a/drivers/net/wireless/iwlwifi/iwl-4965-rs.c b/drivers/net/wireless/iwlwifi/iwl-agn-rs.c
index 3ccb84aa5db..754fef5b592 100644
--- a/drivers/net/wireless/iwlwifi/iwl-4965-rs.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-rs.c
@@ -42,7 +42,7 @@
#include "iwl-core.h"
#include "iwl-helpers.h"
-#define RS_NAME "iwl-4965-rs"
+#define RS_NAME "iwl-agn-rs"
#define NUM_TRY_BEFORE_ANT_TOGGLE 1
#define IWL_NUMBER_TRY 1
@@ -77,9 +77,9 @@ static const u8 ant_toggle_lookup[] = {
};
/**
- * struct iwl4965_rate_scale_data -- tx success history for one rate
+ * struct iwl_rate_scale_data -- tx success history for one rate
*/
-struct iwl4965_rate_scale_data {
+struct iwl_rate_scale_data {
u64 data; /* bitmap of successful frames */
s32 success_counter; /* number of frames successful */
s32 success_ratio; /* per-cent * 128 */
@@ -89,12 +89,12 @@ struct iwl4965_rate_scale_data {
};
/**
- * struct iwl4965_scale_tbl_info -- tx params and success history for all rates
+ * struct iwl_scale_tbl_info -- tx params and success history for all rates
*
- * There are two of these in struct iwl4965_lq_sta,
+ * There are two of these in struct iwl_lq_sta,
* one for "active", and one for "search".
*/
-struct iwl4965_scale_tbl_info {
+struct iwl_scale_tbl_info {
enum iwl_table_type lq_type;
u8 ant_type;
u8 is_SGI; /* 1 = short guard interval */
@@ -103,10 +103,10 @@ struct iwl4965_scale_tbl_info {
u8 action; /* change modulation; IWL_[LEGACY/SISO/MIMO]_SWITCH_* */
s32 *expected_tpt; /* throughput metrics; expected_tpt_G, etc. */
u32 current_rate; /* rate_n_flags, uCode API format */
- struct iwl4965_rate_scale_data win[IWL_RATE_COUNT]; /* rate histories */
+ struct iwl_rate_scale_data win[IWL_RATE_COUNT]; /* rate histories */
};
-struct iwl4965_traffic_load {
+struct iwl_traffic_load {
unsigned long time_stamp; /* age of the oldest statistics */
u32 packet_count[TID_QUEUE_MAX_SIZE]; /* packet count in this time
* slice */
@@ -118,11 +118,11 @@ struct iwl4965_traffic_load {
};
/**
- * struct iwl4965_lq_sta -- driver's rate scaling private structure
+ * struct iwl_lq_sta -- driver's rate scaling private structure
*
* Pointer to this gets passed back and forth between driver and mac80211.
*/
-struct iwl4965_lq_sta {
+struct iwl_lq_sta {
u8 active_tbl; /* index of active table, range 0-1 */
u8 enable_counter; /* indicates HT mode */
u8 stay_in_tbl; /* 1: disallow, 0: allow search for new mode */
@@ -153,8 +153,8 @@ struct iwl4965_lq_sta {
u16 active_rate_basic;
struct iwl_link_quality_cmd lq;
- struct iwl4965_scale_tbl_info lq_info[LQ_SIZE]; /* "active", "search" */
- struct iwl4965_traffic_load load[TID_MAX_LOAD_COUNT];
+ struct iwl_scale_tbl_info lq_info[LQ_SIZE]; /* "active", "search" */
+ struct iwl_traffic_load load[TID_MAX_LOAD_COUNT];
u8 tx_agg_tid_en;
#ifdef CONFIG_MAC80211_DEBUGFS
struct dentry *rs_sta_dbgfs_scale_table_file;
@@ -170,16 +170,15 @@ static void rs_rate_scale_perform(struct iwl_priv *priv,
struct ieee80211_hdr *hdr,
struct sta_info *sta);
static void rs_fill_link_cmd(const struct iwl_priv *priv,
- struct iwl4965_lq_sta *lq_sta,
- u32 rate_n_flags);
+ struct iwl_lq_sta *lq_sta, u32 rate_n_flags);
#ifdef CONFIG_MAC80211_DEBUGFS
-static void rs_dbgfs_set_mcs(struct iwl4965_lq_sta *lq_sta,
- u32 *rate_n_flags, int index);
+static void rs_dbgfs_set_mcs(struct iwl_lq_sta *lq_sta,
+ u32 *rate_n_flags, int index);
#else
-static void rs_dbgfs_set_mcs(struct iwl4965_lq_sta *lq_sta,
- u32 *rate_n_flags, int index)
+static void rs_dbgfs_set_mcs(struct iwl_lq_sta *lq_sta,
+ u32 *rate_n_flags, int index)
{}
#endif
@@ -234,7 +233,7 @@ static inline u8 rs_extract_rate(u32 rate_n_flags)
return (u8)(rate_n_flags & 0xFF);
}
-static void rs_rate_scale_clear_window(struct iwl4965_rate_scale_data *window)
+static void rs_rate_scale_clear_window(struct iwl_rate_scale_data *window)
{
window->data = 0;
window->success_counter = 0;
@@ -246,14 +245,14 @@ static void rs_rate_scale_clear_window(struct iwl4965_rate_scale_data *window)
static inline u8 rs_is_valid_ant(u8 valid_antenna, u8 ant_type)
{
- return ((ant_type & valid_antenna) == ant_type);
+ return (ant_type & valid_antenna) == ant_type;
}
/*
* removes the old data from the statistics. All data that is older than
* TID_MAX_TIME_DIFF, will be deleted.
*/
-static void rs_tl_rm_old_stats(struct iwl4965_traffic_load *tl, u32 curr_time)
+static void rs_tl_rm_old_stats(struct iwl_traffic_load *tl, u32 curr_time)
{
/* The oldest age we want to keep */
u32 oldest_time = curr_time - TID_MAX_TIME_DIFF;
@@ -274,13 +273,13 @@ static void rs_tl_rm_old_stats(struct iwl4965_traffic_load *tl, u32 curr_time)
* increment traffic load value for tid and also remove
* any old values if passed the certain time period
*/
-static u8 rs_tl_add_packet(struct iwl4965_lq_sta *lq_data,
+static u8 rs_tl_add_packet(struct iwl_lq_sta *lq_data,
struct ieee80211_hdr *hdr)
{
u32 curr_time = jiffies_to_msecs(jiffies);
u32 time_diff;
s32 index;
- struct iwl4965_traffic_load *tl = NULL;
+ struct iwl_traffic_load *tl = NULL;
__le16 fc = hdr->frame_control;
u8 tid;
@@ -325,12 +324,12 @@ static u8 rs_tl_add_packet(struct iwl4965_lq_sta *lq_data,
/*
get the traffic load value for tid
*/
-static u32 rs_tl_get_load(struct iwl4965_lq_sta *lq_data, u8 tid)
+static u32 rs_tl_get_load(struct iwl_lq_sta *lq_data, u8 tid)
{
u32 curr_time = jiffies_to_msecs(jiffies);
u32 time_diff;
s32 index;
- struct iwl4965_traffic_load *tl = NULL;
+ struct iwl_traffic_load *tl = NULL;
if (tid >= TID_MAX_LOAD_COUNT)
return 0;
@@ -354,8 +353,8 @@ static u32 rs_tl_get_load(struct iwl4965_lq_sta *lq_data, u8 tid)
}
static void rs_tl_turn_on_agg_for_tid(struct iwl_priv *priv,
- struct iwl4965_lq_sta *lq_data, u8 tid,
- struct sta_info *sta)
+ struct iwl_lq_sta *lq_data, u8 tid,
+ struct sta_info *sta)
{
unsigned long state;
DECLARE_MAC_BUF(mac);
@@ -373,8 +372,8 @@ static void rs_tl_turn_on_agg_for_tid(struct iwl_priv *priv,
}
static void rs_tl_turn_on_agg(struct iwl_priv *priv, u8 tid,
- struct iwl4965_lq_sta *lq_data,
- struct sta_info *sta)
+ struct iwl_lq_sta *lq_data,
+ struct sta_info *sta)
{
if ((tid < TID_MAX_LOAD_COUNT))
rs_tl_turn_on_agg_for_tid(priv, lq_data, tid, sta);
@@ -385,9 +384,9 @@ static void rs_tl_turn_on_agg(struct iwl_priv *priv, u8 tid,
static inline int get_num_of_ant_from_rate(u32 rate_n_flags)
{
- return (!!(rate_n_flags & RATE_MCS_ANT_A_MSK) +
- !!(rate_n_flags & RATE_MCS_ANT_B_MSK) +
- !!(rate_n_flags & RATE_MCS_ANT_C_MSK));
+ return !!(rate_n_flags & RATE_MCS_ANT_A_MSK) +
+ !!(rate_n_flags & RATE_MCS_ANT_B_MSK) +
+ !!(rate_n_flags & RATE_MCS_ANT_C_MSK);
}
/**
@@ -397,11 +396,11 @@ static inline int get_num_of_ant_from_rate(u32 rate_n_flags)
* at this rate. window->data contains the bitmask of successful
* packets.
*/
-static int rs_collect_tx_data(struct iwl4965_rate_scale_data *windows,
+static int rs_collect_tx_data(struct iwl_rate_scale_data *windows,
int scale_index, s32 tpt, int retries,
int successes)
{
- struct iwl4965_rate_scale_data *window = NULL;
+ struct iwl_rate_scale_data *window = NULL;
static const u64 mask = (((u64)1) << (IWL_RATE_MAX_WINDOW - 1));
s32 fail_count;
@@ -473,7 +472,7 @@ static int rs_collect_tx_data(struct iwl4965_rate_scale_data *windows,
* Fill uCode API rate_n_flags field, based on "search" or "active" table.
*/
/* FIXME:RS:remove this function and put the flags statically in the table */
-static u32 rate_n_flags_from_tbl(struct iwl4965_scale_tbl_info *tbl,
+static u32 rate_n_flags_from_tbl(struct iwl_scale_tbl_info *tbl,
int index, u8 use_green)
{
u32 rate_n_flags = 0;
@@ -530,7 +529,7 @@ static u32 rate_n_flags_from_tbl(struct iwl4965_scale_tbl_info *tbl,
*/
static int rs_get_tbl_info_from_mcs(const u32 rate_n_flags,
enum ieee80211_band band,
- struct iwl4965_scale_tbl_info *tbl,
+ struct iwl_scale_tbl_info *tbl,
int *rate_idx)
{
u32 ant_msk = (rate_n_flags & RATE_MCS_ANT_ABC_MSK);
@@ -591,7 +590,7 @@ static int rs_get_tbl_info_from_mcs(const u32 rate_n_flags,
/* switch to another antenna/antennas and return 1 */
/* if no other valid antenna found, return 0 */
static int rs_toggle_antenna(u32 valid_ant, u32 *rate_n_flags,
- struct iwl4965_scale_tbl_info *tbl)
+ struct iwl_scale_tbl_info *tbl)
{
u8 new_ant_type;
@@ -621,9 +620,9 @@ static int rs_toggle_antenna(u32 valid_ant, u32 *rate_n_flags,
#if 0
static inline u8 rs_use_green(struct iwl_priv *priv, struct ieee80211_conf *conf)
{
- return ((conf->flags & IEEE80211_CONF_SUPPORT_HT_MODE) &&
+ return (conf->flags & IEEE80211_CONF_SUPPORT_HT_MODE) &&
priv->current_ht_config.is_green_field &&
- !priv->current_ht_config.non_GF_STA_present);
+ !priv->current_ht_config.non_GF_STA_present;
}
#endif
static inline u8 rs_use_green(struct iwl_priv *priv, struct ieee80211_conf *conf)
@@ -638,9 +637,9 @@ static inline u8 rs_use_green(struct iwl_priv *priv, struct ieee80211_conf *conf
* basic available rates.
*
*/
-static u16 rs_get_supported_rates(struct iwl4965_lq_sta *lq_sta,
- struct ieee80211_hdr *hdr,
- enum iwl_table_type rate_type)
+static u16 rs_get_supported_rates(struct iwl_lq_sta *lq_sta,
+ struct ieee80211_hdr *hdr,
+ enum iwl_table_type rate_type)
{
if (hdr && is_multicast_ether_addr(hdr->addr1) &&
lq_sta->active_rate_basic)
@@ -714,9 +713,9 @@ static u16 rs_get_adjacent_rate(struct iwl_priv *priv, u8 index, u16 rate_mask,
return (high << 8) | low;
}
-static u32 rs_get_lower_rate(struct iwl4965_lq_sta *lq_sta,
- struct iwl4965_scale_tbl_info *tbl, u8 scale_index,
- u8 ht_possible)
+static u32 rs_get_lower_rate(struct iwl_lq_sta *lq_sta,
+ struct iwl_scale_tbl_info *tbl,
+ u8 scale_index, u8 ht_possible)
{
s32 low;
u16 rate_mask;
@@ -780,7 +779,7 @@ static void rs_tx_status(void *priv_rate, struct net_device *dev,
int status;
u8 retries;
int rs_index, index = 0;
- struct iwl4965_lq_sta *lq_sta;
+ struct iwl_lq_sta *lq_sta;
struct iwl_link_quality_cmd *table;
struct sta_info *sta;
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
@@ -788,11 +787,11 @@ static void rs_tx_status(void *priv_rate, struct net_device *dev,
struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr);
struct ieee80211_hw *hw = local_to_hw(local);
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
- struct iwl4965_rate_scale_data *window = NULL;
- struct iwl4965_rate_scale_data *search_win = NULL;
+ struct iwl_rate_scale_data *window = NULL;
+ struct iwl_rate_scale_data *search_win = NULL;
u32 tx_rate;
- struct iwl4965_scale_tbl_info tbl_type;
- struct iwl4965_scale_tbl_info *curr_tbl, *search_tbl;
+ struct iwl_scale_tbl_info tbl_type;
+ struct iwl_scale_tbl_info *curr_tbl, *search_tbl;
u8 active_index = 0;
__le16 fc = hdr->frame_control;
s32 tpt = 0;
@@ -820,7 +819,7 @@ static void rs_tx_status(void *priv_rate, struct net_device *dev,
goto out;
- lq_sta = (struct iwl4965_lq_sta *)sta->rate_ctrl_priv;
+ lq_sta = (struct iwl_lq_sta *)sta->rate_ctrl_priv;
if ((priv->iw_mode == IEEE80211_IF_TYPE_IBSS) &&
!lq_sta->ibss_sta_added)
@@ -831,10 +830,8 @@ static void rs_tx_status(void *priv_rate, struct net_device *dev,
curr_tbl = &(lq_sta->lq_info[active_index]);
search_tbl = &(lq_sta->lq_info[(1 - active_index)]);
- window = (struct iwl4965_rate_scale_data *)
- &(curr_tbl->win[0]);
- search_win = (struct iwl4965_rate_scale_data *)
- &(search_tbl->win[0]);
+ window = (struct iwl_rate_scale_data *)&(curr_tbl->win[0]);
+ search_win = (struct iwl_rate_scale_data *)&(search_tbl->win[0]);
/*
* Ignore this Tx frame response if its initial rate doesn't match
@@ -983,7 +980,7 @@ out:
* searching for a new mode.
*/
static void rs_set_stay_in_table(struct iwl_priv *priv, u8 is_legacy,
- struct iwl4965_lq_sta *lq_sta)
+ struct iwl_lq_sta *lq_sta)
{
IWL_DEBUG_RATE("we are staying in the same table\n");
lq_sta->stay_in_tbl = 1; /* only place this gets set */
@@ -1004,8 +1001,8 @@ static void rs_set_stay_in_table(struct iwl_priv *priv, u8 is_legacy,
/*
* Find correct throughput table for given mode of modulation
*/
-static void rs_set_expected_tpt_table(struct iwl4965_lq_sta *lq_sta,
- struct iwl4965_scale_tbl_info *tbl)
+static void rs_set_expected_tpt_table(struct iwl_lq_sta *lq_sta,
+ struct iwl_scale_tbl_info *tbl)
{
if (is_legacy(tbl->lq_type)) {
if (!is_a_band(tbl->lq_type))
@@ -1050,12 +1047,12 @@ static void rs_set_expected_tpt_table(struct iwl4965_lq_sta *lq_sta,
* bit rate will typically need to increase, but not if performance was bad.
*/
static s32 rs_get_best_rate(struct iwl_priv *priv,
- struct iwl4965_lq_sta *lq_sta,
- struct iwl4965_scale_tbl_info *tbl, /* "search" */
+ struct iwl_lq_sta *lq_sta,
+ struct iwl_scale_tbl_info *tbl, /* "search" */
u16 rate_mask, s8 index)
{
/* "active" values */
- struct iwl4965_scale_tbl_info *active_tbl =
+ struct iwl_scale_tbl_info *active_tbl =
&(lq_sta->lq_info[lq_sta->active_tbl]);
s32 active_sr = active_tbl->win[index].success_ratio;
s32 active_tpt = active_tbl->expected_tpt[index];
@@ -1143,10 +1140,10 @@ static s32 rs_get_best_rate(struct iwl_priv *priv,
* Set up search table for MIMO
*/
static int rs_switch_to_mimo2(struct iwl_priv *priv,
- struct iwl4965_lq_sta *lq_sta,
+ struct iwl_lq_sta *lq_sta,
struct ieee80211_conf *conf,
struct sta_info *sta,
- struct iwl4965_scale_tbl_info *tbl, int index)
+ struct iwl_scale_tbl_info *tbl, int index)
{
u16 rate_mask;
s32 rate;
@@ -1210,10 +1207,10 @@ static int rs_switch_to_mimo2(struct iwl_priv *priv,
* Set up search table for SISO
*/
static int rs_switch_to_siso(struct iwl_priv *priv,
- struct iwl4965_lq_sta *lq_sta,
+ struct iwl_lq_sta *lq_sta,
struct ieee80211_conf *conf,
struct sta_info *sta,
- struct iwl4965_scale_tbl_info *tbl, int index)
+ struct iwl_scale_tbl_info *tbl, int index)
{
u16 rate_mask;
u8 is_green = lq_sta->is_green;
@@ -1270,18 +1267,17 @@ static int rs_switch_to_siso(struct iwl_priv *priv,
* Try to switch to new modulation mode from legacy
*/
static int rs_move_legacy_other(struct iwl_priv *priv,
- struct iwl4965_lq_sta *lq_sta,
+ struct iwl_lq_sta *lq_sta,
struct ieee80211_conf *conf,
struct sta_info *sta,
int index)
{
- struct iwl4965_scale_tbl_info *tbl =
- &(lq_sta->lq_info[lq_sta->active_tbl]);
- struct iwl4965_scale_tbl_info *search_tbl =
- &(lq_sta->lq_info[(1 - lq_sta->active_tbl)]);
- struct iwl4965_rate_scale_data *window = &(tbl->win[index]);
- u32 sz = (sizeof(struct iwl4965_scale_tbl_info) -
- (sizeof(struct iwl4965_rate_scale_data) * IWL_RATE_COUNT));
+ struct iwl_scale_tbl_info *tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
+ struct iwl_scale_tbl_info *search_tbl =
+ &(lq_sta->lq_info[(1 - lq_sta->active_tbl)]);
+ struct iwl_rate_scale_data *window = &(tbl->win[index]);
+ u32 sz = (sizeof(struct iwl_scale_tbl_info) -
+ (sizeof(struct iwl_rate_scale_data) * IWL_RATE_COUNT));
u8 start_action = tbl->action;
u8 valid_tx_ant = priv->hw_params.valid_tx_ant;
int ret = 0;
@@ -1360,19 +1356,17 @@ static int rs_move_legacy_other(struct iwl_priv *priv,
* Try to switch to new modulation mode from SISO
*/
static int rs_move_siso_to_other(struct iwl_priv *priv,
- struct iwl4965_lq_sta *lq_sta,
+ struct iwl_lq_sta *lq_sta,
struct ieee80211_conf *conf,
- struct sta_info *sta,
- int index)
+ struct sta_info *sta, int index)
{
u8 is_green = lq_sta->is_green;
- struct iwl4965_scale_tbl_info *tbl =
- &(lq_sta->lq_info[lq_sta->active_tbl]);
- struct iwl4965_scale_tbl_info *search_tbl =
- &(lq_sta->lq_info[(1 - lq_sta->active_tbl)]);
- struct iwl4965_rate_scale_data *window = &(tbl->win[index]);
- u32 sz = (sizeof(struct iwl4965_scale_tbl_info) -
- (sizeof(struct iwl4965_rate_scale_data) * IWL_RATE_COUNT));
+ struct iwl_scale_tbl_info *tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
+ struct iwl_scale_tbl_info *search_tbl =
+ &(lq_sta->lq_info[(1 - lq_sta->active_tbl)]);
+ struct iwl_rate_scale_data *window = &(tbl->win[index]);
+ u32 sz = (sizeof(struct iwl_scale_tbl_info) -
+ (sizeof(struct iwl_rate_scale_data) * IWL_RATE_COUNT));
u8 start_action = tbl->action;
u8 valid_tx_ant = priv->hw_params.valid_tx_ant;
int ret;
@@ -1455,18 +1449,16 @@ static int rs_move_siso_to_other(struct iwl_priv *priv,
* Try to switch to new modulation mode from MIMO
*/
static int rs_move_mimo_to_other(struct iwl_priv *priv,
- struct iwl4965_lq_sta *lq_sta,
+ struct iwl_lq_sta *lq_sta,
struct ieee80211_conf *conf,
- struct sta_info *sta,
- int index)
+ struct sta_info *sta, int index)
{
s8 is_green = lq_sta->is_green;
- struct iwl4965_scale_tbl_info *tbl =
- &(lq_sta->lq_info[lq_sta->active_tbl]);
- struct iwl4965_scale_tbl_info *search_tbl =
- &(lq_sta->lq_info[(1 - lq_sta->active_tbl)]);
- u32 sz = (sizeof(struct iwl4965_scale_tbl_info) -
- (sizeof(struct iwl4965_rate_scale_data) * IWL_RATE_COUNT));
+ struct iwl_scale_tbl_info *tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
+ struct iwl_scale_tbl_info *search_tbl =
+ &(lq_sta->lq_info[(1 - lq_sta->active_tbl)]);
+ u32 sz = (sizeof(struct iwl_scale_tbl_info) -
+ (sizeof(struct iwl_rate_scale_data) * IWL_RATE_COUNT));
u8 start_action = tbl->action;
/*u8 valid_tx_ant = priv->hw_params.valid_tx_ant;*/
int ret;
@@ -1552,9 +1544,9 @@ static int rs_move_mimo_to_other(struct iwl_priv *priv,
* 2) # times calling this function
* 3) elapsed time in this mode (not used, for now)
*/
-static void rs_stay_in_table(struct iwl4965_lq_sta *lq_sta)
+static void rs_stay_in_table(struct iwl_lq_sta *lq_sta)
{
- struct iwl4965_scale_tbl_info *tbl;
+ struct iwl_scale_tbl_info *tbl;
int i;
int active_tbl;
int flush_interval_passed = 0;
@@ -1642,7 +1634,7 @@ static void rs_rate_scale_perform(struct iwl_priv *priv,
int high = IWL_RATE_INVALID;
int index;
int i;
- struct iwl4965_rate_scale_data *window = NULL;
+ struct iwl_rate_scale_data *window = NULL;
int current_tpt = IWL_INVALID_VALUE;
int low_tpt = IWL_INVALID_VALUE;
int high_tpt = IWL_INVALID_VALUE;
@@ -1651,8 +1643,8 @@ static void rs_rate_scale_perform(struct iwl_priv *priv,
__le16 fc;
u16 rate_mask;
u8 update_lq = 0;
- struct iwl4965_lq_sta *lq_sta;
- struct iwl4965_scale_tbl_info *tbl, *tbl1;
+ struct iwl_lq_sta *lq_sta;
+ struct iwl_scale_tbl_info *tbl, *tbl1;
u16 rate_scale_index_msk = 0;
u32 rate;
u8 is_green = 0;
@@ -1675,7 +1667,7 @@ static void rs_rate_scale_perform(struct iwl_priv *priv,
if (!sta || !sta->rate_ctrl_priv)
return;
- lq_sta = (struct iwl4965_lq_sta *)sta->rate_ctrl_priv;
+ lq_sta = (struct iwl_lq_sta *)sta->rate_ctrl_priv;
tid = rs_tl_add_packet(lq_sta, hdr);
@@ -2030,8 +2022,8 @@ static void rs_initialize_lq(struct iwl_priv *priv,
struct ieee80211_conf *conf,
struct sta_info *sta)
{
- struct iwl4965_lq_sta *lq_sta;
- struct iwl4965_scale_tbl_info *tbl;
+ struct iwl_lq_sta *lq_sta;
+ struct iwl_scale_tbl_info *tbl;
int rate_idx;
int i;
u32 rate;
@@ -2042,7 +2034,7 @@ static void rs_initialize_lq(struct iwl_priv *priv,
if (!sta || !sta->rate_ctrl_priv)
goto out;
- lq_sta = (struct iwl4965_lq_sta *)sta->rate_ctrl_priv;
+ lq_sta = (struct iwl_lq_sta *)sta->rate_ctrl_priv;
i = sta->last_txrate_idx;
if ((lq_sta->lq.sta_id == 0xff) &&
@@ -2096,7 +2088,7 @@ static void rs_get_rate(void *priv_rate, struct net_device *dev,
struct sta_info *sta;
__le16 fc;
struct iwl_priv *priv = (struct iwl_priv *)priv_rate;
- struct iwl4965_lq_sta *lq_sta;
+ struct iwl_lq_sta *lq_sta;
IWL_DEBUG_RATE_LIMIT("rate scale calculate new rate for skb\n");
@@ -2113,7 +2105,7 @@ static void rs_get_rate(void *priv_rate, struct net_device *dev,
goto out;
}
- lq_sta = (struct iwl4965_lq_sta *)sta->rate_ctrl_priv;
+ lq_sta = (struct iwl_lq_sta *)sta->rate_ctrl_priv;
i = sta->last_txrate_idx;
if ((priv->iw_mode == IEEE80211_IF_TYPE_IBSS) &&
@@ -2149,14 +2141,14 @@ out:
static void *rs_alloc_sta(void *priv_rate, gfp_t gfp)
{
- struct iwl4965_lq_sta *lq_sta;
+ struct iwl_lq_sta *lq_sta;
struct iwl_priv *priv;
int i, j;
priv = (struct iwl_priv *)priv_rate;
IWL_DEBUG_RATE("create station rate scale window\n");
- lq_sta = kzalloc(sizeof(struct iwl4965_lq_sta), gfp);
+ lq_sta = kzalloc(sizeof(struct iwl_lq_sta), gfp);
if (lq_sta == NULL)
return NULL;
@@ -2165,7 +2157,7 @@ static void *rs_alloc_sta(void *priv_rate, gfp_t gfp)
for (j = 0; j < LQ_SIZE; j++)
for (i = 0; i < IWL_RATE_COUNT; i++)
- rs_rate_scale_clear_window(&(lq_sta->lq_info[j].win[i]));
+ rs_rate_scale_clear_window(&lq_sta->lq_info[j].win[i]);
return lq_sta;
}
@@ -2178,7 +2170,7 @@ static void rs_rate_init(void *priv_rate, void *priv_sta,
struct ieee80211_conf *conf = &local->hw.conf;
struct ieee80211_supported_band *sband;
struct iwl_priv *priv = (struct iwl_priv *)priv_rate;
- struct iwl4965_lq_sta *lq_sta = priv_sta;
+ struct iwl_lq_sta *lq_sta = priv_sta;
sband = local->hw.wiphy->bands[local->hw.conf.channel->band];
@@ -2187,7 +2179,7 @@ static void rs_rate_init(void *priv_rate, void *priv_sta,
sta->txrate_idx = 3;
for (j = 0; j < LQ_SIZE; j++)
for (i = 0; i < IWL_RATE_COUNT; i++)
- rs_rate_scale_clear_window(&(lq_sta->lq_info[j].win[i]));
+ rs_rate_scale_clear_window(&lq_sta->lq_info[j].win[i]);
IWL_DEBUG_RATE("LQ: *** rate scale global init ***\n");
/* TODO: what is a good starting rate for STA? About middle? Maybe not
@@ -2271,10 +2263,9 @@ static void rs_rate_init(void *priv_rate, void *priv_sta,
}
static void rs_fill_link_cmd(const struct iwl_priv *priv,
- struct iwl4965_lq_sta *lq_sta,
- u32 new_rate)
+ struct iwl_lq_sta *lq_sta, u32 new_rate)
{
- struct iwl4965_scale_tbl_info tbl_type;
+ struct iwl_scale_tbl_info tbl_type;
int index = 0;
int rate_idx;
int repeat_rate = 0;
@@ -2402,6 +2393,7 @@ static void rs_free(void *priv_rate)
static void rs_clear(void *priv_rate)
{
+#ifdef CONFIG_IWLWIFI_DEBUG
struct iwl_priv *priv = (struct iwl_priv *) priv_rate;
IWL_DEBUG_RATE("enter\n");
@@ -2409,11 +2401,12 @@ static void rs_clear(void *priv_rate)
/* TODO - add rate scale state reset */
IWL_DEBUG_RATE("leave\n");
+#endif /* CONFIG_IWLWIFI_DEBUG */
}
static void rs_free_sta(void *priv_rate, void *priv_sta)
{
- struct iwl4965_lq_sta *lq_sta = priv_sta;
+ struct iwl_lq_sta *lq_sta = priv_sta;
struct iwl_priv *priv;
priv = (struct iwl_priv *)priv_rate;
@@ -2429,8 +2422,8 @@ static int open_file_generic(struct inode *inode, struct file *file)
file->private_data = inode->i_private;
return 0;
}
-static void rs_dbgfs_set_mcs(struct iwl4965_lq_sta *lq_sta,
- u32 *rate_n_flags, int index)
+static void rs_dbgfs_set_mcs(struct iwl_lq_sta *lq_sta,
+ u32 *rate_n_flags, int index)
{
struct iwl_priv *priv;
@@ -2453,7 +2446,7 @@ static void rs_dbgfs_set_mcs(struct iwl4965_lq_sta *lq_sta,
static ssize_t rs_sta_dbgfs_scale_table_write(struct file *file,
const char __user *user_buf, size_t count, loff_t *ppos)
{
- struct iwl4965_lq_sta *lq_sta = file->private_data;
+ struct iwl_lq_sta *lq_sta = file->private_data;
struct iwl_priv *priv;
char buf[64];
int buf_size;
@@ -2493,7 +2486,7 @@ static ssize_t rs_sta_dbgfs_scale_table_read(struct file *file,
int desc = 0;
int i = 0;
- struct iwl4965_lq_sta *lq_sta = file->private_data;
+ struct iwl_lq_sta *lq_sta = file->private_data;
desc += sprintf(buff+desc, "sta_id %d\n", lq_sta->lq.sta_id);
desc += sprintf(buff+desc, "failed=%d success=%d rate=0%X\n",
@@ -2541,7 +2534,7 @@ static ssize_t rs_sta_dbgfs_stats_table_read(struct file *file,
int desc = 0;
int i, j;
- struct iwl4965_lq_sta *lq_sta = file->private_data;
+ struct iwl_lq_sta *lq_sta = file->private_data;
for (i = 0; i < LQ_SIZE; i++) {
desc += sprintf(buff+desc, "%s type=%d SGI=%d FAT=%d DUP=%d\n"
"rate=0x%X\n",
@@ -2570,7 +2563,7 @@ static const struct file_operations rs_sta_dbgfs_stats_table_ops = {
static void rs_add_debugfs(void *priv, void *priv_sta,
struct dentry *dir)
{
- struct iwl4965_lq_sta *lq_sta = priv_sta;
+ struct iwl_lq_sta *lq_sta = priv_sta;
lq_sta->rs_sta_dbgfs_scale_table_file =
debugfs_create_file("rate_scale_table", 0600, dir,
lq_sta, &rs_sta_dbgfs_scale_table_ops);
@@ -2585,7 +2578,7 @@ static void rs_add_debugfs(void *priv, void *priv_sta,
static void rs_remove_debugfs(void *priv, void *priv_sta)
{
- struct iwl4965_lq_sta *lq_sta = priv_sta;
+ struct iwl_lq_sta *lq_sta = priv_sta;
debugfs_remove(lq_sta->rs_sta_dbgfs_scale_table_file);
debugfs_remove(lq_sta->rs_sta_dbgfs_stats_table_file);
debugfs_remove(lq_sta->rs_sta_dbgfs_tx_agg_tid_en_file);
@@ -2609,104 +2602,12 @@ static struct rate_control_ops rs_ops = {
#endif
};
-int iwl4965_fill_rs_info(struct ieee80211_hw *hw, char *buf, u8 sta_id)
-{
- struct ieee80211_local *local = hw_to_local(hw);
- struct iwl_priv *priv = hw->priv;
- struct iwl4965_lq_sta *lq_sta;
- struct sta_info *sta;
- int cnt = 0, i;
- u32 samples = 0, success = 0, good = 0;
- unsigned long now = jiffies;
- u32 max_time = 0;
- u8 lq_type, antenna;
-
- rcu_read_lock();
-
- sta = sta_info_get(local, priv->stations[sta_id].sta.sta.addr);
- if (!sta || !sta->rate_ctrl_priv) {
- if (sta)
- IWL_DEBUG_RATE("leave - no private rate data!\n");
- else
- IWL_DEBUG_RATE("leave - no station!\n");
- rcu_read_unlock();
- return sprintf(buf, "station %d not found\n", sta_id);
- }
-
- lq_sta = (void *)sta->rate_ctrl_priv;
-
- lq_type = lq_sta->lq_info[lq_sta->active_tbl].lq_type;
- antenna = lq_sta->lq_info[lq_sta->active_tbl].ant_type;
-
- if (is_legacy(lq_type))
- i = IWL_RATE_54M_INDEX;
- else
- i = IWL_RATE_60M_INDEX;
- while (1) {
- u64 mask;
- int j;
- int active = lq_sta->active_tbl;
-
- cnt +=
- sprintf(&buf[cnt], " %2dMbs: ", iwl_rates[i].ieee / 2);
-
- mask = (1ULL << (IWL_RATE_MAX_WINDOW - 1));
- for (j = 0; j < IWL_RATE_MAX_WINDOW; j++, mask >>= 1)
- buf[cnt++] =
- (lq_sta->lq_info[active].win[i].data & mask)
- ? '1' : '0';
-
- samples += lq_sta->lq_info[active].win[i].counter;
- good += lq_sta->lq_info[active].win[i].success_counter;
- success += lq_sta->lq_info[active].win[i].success_counter *
- iwl_rates[i].ieee;
-
- if (lq_sta->lq_info[active].win[i].stamp) {
- int delta =
- jiffies_to_msecs(now -
- lq_sta->lq_info[active].win[i].stamp);
-
- if (delta > max_time)
- max_time = delta;
-
- cnt += sprintf(&buf[cnt], "%5dms\n", delta);
- } else
- buf[cnt++] = '\n';
-
- j = iwl4965_get_prev_ieee_rate(i);
- if (j == i)
- break;
- i = j;
- }
-
- /*
- * Display the average rate of all samples taken.
- * NOTE: We multiply # of samples by 2 since the IEEE measurement
- * added from iwl_rates is actually 2X the rate.
- */
- if (samples)
- cnt += sprintf(&buf[cnt],
- "\nAverage rate is %3d.%02dMbs over last %4dms\n"
- "%3d%% success (%d good packets over %d tries)\n",
- success / (2 * samples), (success * 5 / samples) % 10,
- max_time, good * 100 / samples, good, samples);
- else
- cnt += sprintf(&buf[cnt], "\nAverage rate: 0Mbs\n");
-
- cnt += sprintf(&buf[cnt], "\nrate scale type %d antenna %d "
- "active_search %d rate index %d\n", lq_type, antenna,
- lq_sta->search_better_tbl, sta->last_txrate_idx);
-
- rcu_read_unlock();
- return cnt;
-}
-
-int iwl4965_rate_control_register(void)
+int iwlagn_rate_control_register(void)
{
return ieee80211_rate_control_register(&rs_ops);
}
-void iwl4965_rate_control_unregister(void)
+void iwlagn_rate_control_unregister(void)
{
ieee80211_rate_control_unregister(&rs_ops);
}
diff --git a/drivers/net/wireless/iwlwifi/iwl-4965-rs.h b/drivers/net/wireless/iwlwifi/iwl-agn-rs.h
index 9b9972885aa..84d4d1e3375 100644
--- a/drivers/net/wireless/iwlwifi/iwl-4965-rs.h
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-rs.h
@@ -24,8 +24,8 @@
*
*****************************************************************************/
-#ifndef __iwl_4965_rs_h__
-#define __iwl_4965_rs_h__
+#ifndef __iwl_agn_rs_h__
+#define __iwl_agn_rs_h__
#include "iwl-dev.h"
@@ -88,7 +88,7 @@ enum {
#define IWL_RATE_5M_MASK (1 << IWL_RATE_5M_INDEX)
#define IWL_RATE_11M_MASK (1 << IWL_RATE_11M_INDEX)
-/* 4965 uCode API values for legacy bit rates, both OFDM and CCK */
+/* uCode API values for legacy bit rates, both OFDM and CCK */
enum {
IWL_RATE_6M_PLCP = 13,
IWL_RATE_9M_PLCP = 15,
@@ -107,7 +107,7 @@ enum {
/*FIXME:RS:add IWL_RATE_LEGACY_INVM_PLCP = 0,*/
};
-/* 4965 uCode API values for OFDM high-throughput (HT) bit rates */
+/* uCode API values for OFDM high-throughput (HT) bit rates */
enum {
IWL_RATE_SISO_6M_PLCP = 0,
IWL_RATE_SISO_12M_PLCP = 1,
@@ -287,15 +287,6 @@ static inline u8 iwl4965_get_prev_ieee_rate(u8 rate_index)
}
/**
- * iwl4965_fill_rs_info - Fill an output text buffer with the rate representation
- *
- * NOTE: This is provided as a quick mechanism for a user to visualize
- * the performance of the rate control algorithm and is not meant to be
- * parsed software.
- */
-extern int iwl4965_fill_rs_info(struct ieee80211_hw *, char *buf, u8 sta_id);
-
-/**
* iwl4965_rate_control_register - Register the rate control algorithm callbacks
*
* Since the rate control algorithm is hardware specific, there is no need
@@ -305,7 +296,7 @@ extern int iwl4965_fill_rs_info(struct ieee80211_hw *, char *buf, u8 sta_id);
* ieee80211_register_hw
*
*/
-extern int iwl4965_rate_control_register(void);
+extern int iwlagn_rate_control_register(void);
/**
* iwl4965_rate_control_unregister - Unregister the rate control callbacks
@@ -313,6 +304,6 @@ extern int iwl4965_rate_control_register(void);
* This should be called after calling ieee80211_unregister_hw, but before
* the driver is unloaded.
*/
-extern void iwl4965_rate_control_unregister(void);
+extern void iwlagn_rate_control_unregister(void);
-#endif
+#endif /* __iwl_agn__rs__ */
diff --git a/drivers/net/wireless/iwlwifi/iwl4965-base.c b/drivers/net/wireless/iwlwifi/iwl-agn.c
index 71f5da3fe5c..ed09e48b1b6 100644
--- a/drivers/net/wireless/iwlwifi/iwl4965-base.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn.c
@@ -65,7 +65,7 @@
* NOTE: DRV_NAME is defined in iwlwifi.h for use by iwl-debug.h and printk
*/
-#define DRV_DESCRIPTION "Intel(R) Wireless WiFi Link 4965AGN driver for Linux"
+#define DRV_DESCRIPTION "Intel(R) Wireless WiFi Link AGN driver for Linux"
#ifdef CONFIG_IWLWIFI_DEBUG
#define VD "d"
@@ -73,7 +73,7 @@
#define VD
#endif
-#ifdef CONFIG_IWL4965_SPECTRUM_MEASUREMENT
+#ifdef CONFIG_IWLAGN_SPECTRUM_MEASUREMENT
#define VS "s"
#else
#define VS
@@ -86,6 +86,7 @@ MODULE_DESCRIPTION(DRV_DESCRIPTION);
MODULE_VERSION(DRV_VERSION);
MODULE_AUTHOR(DRV_COPYRIGHT);
MODULE_LICENSE("GPL");
+MODULE_ALIAS("iwl4965");
/*************** STATION TABLE MANAGEMENT ****
* mac80211 should be examined to determine if sta_info is duplicating
@@ -444,11 +445,10 @@ static void iwl_free_frame(struct iwl_priv *priv, struct iwl_frame *frame)
list_add(&frame->list, &priv->free_frames);
}
-unsigned int iwl4965_fill_beacon_frame(struct iwl_priv *priv,
- struct ieee80211_hdr *hdr,
- const u8 *dest, int left)
+static unsigned int iwl_fill_beacon_frame(struct iwl_priv *priv,
+ struct ieee80211_hdr *hdr,
+ const u8 *dest, int left)
{
-
if (!iwl_is_associated(priv) || !priv->ibss_beacon ||
((priv->iw_mode != IEEE80211_IF_TYPE_IBSS) &&
(priv->iw_mode != IEEE80211_IF_TYPE_AP)))
@@ -487,6 +487,38 @@ static u8 iwl4965_rate_get_lowest_plcp(struct iwl_priv *priv)
return IWL_RATE_6M_PLCP;
}
+unsigned int iwl4965_hw_get_beacon_cmd(struct iwl_priv *priv,
+ struct iwl_frame *frame, u8 rate)
+{
+ struct iwl_tx_beacon_cmd *tx_beacon_cmd;
+ unsigned int frame_size;
+
+ tx_beacon_cmd = &frame->u.beacon;
+ memset(tx_beacon_cmd, 0, sizeof(*tx_beacon_cmd));
+
+ tx_beacon_cmd->tx.sta_id = priv->hw_params.bcast_sta_id;
+ tx_beacon_cmd->tx.stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE;
+
+ frame_size = iwl_fill_beacon_frame(priv, tx_beacon_cmd->frame,
+ iwl_bcast_addr,
+ sizeof(frame->u) - sizeof(*tx_beacon_cmd));
+
+ BUG_ON(frame_size > MAX_MPDU_SIZE);
+ tx_beacon_cmd->tx.len = cpu_to_le16((u16)frame_size);
+
+ if ((rate == IWL_RATE_1M_PLCP) || (rate >= IWL_RATE_2M_PLCP))
+ tx_beacon_cmd->tx.rate_n_flags =
+ iwl_hw_set_rate_n_flags(rate, RATE_MCS_CCK_MSK);
+ else
+ tx_beacon_cmd->tx.rate_n_flags =
+ iwl_hw_set_rate_n_flags(rate, 0);
+
+ tx_beacon_cmd->tx.tx_flags = TX_CMD_FLG_SEQ_CTL_MSK |
+ TX_CMD_FLG_TSF_MSK |
+ TX_CMD_FLG_STA_RATE_MSK;
+
+ return sizeof(*tx_beacon_cmd) + frame_size;
+}
static int iwl4965_send_beacon_cmd(struct iwl_priv *priv)
{
struct iwl_frame *frame;
@@ -608,7 +640,6 @@ static void iwl_activate_qos(struct iwl_priv *priv, u8 force)
}
#define MAX_UCODE_BEACON_INTERVAL 4096
-#define INTEL_CONN_LISTEN_INTERVAL __constant_cpu_to_le16(0xA)
static __le16 iwl4965_adjust_beacon_interval(u16 beacon_val)
{
@@ -638,7 +669,7 @@ static void iwl4965_setup_rxon_timing(struct iwl_priv *priv)
priv->rxon_timing.timestamp.dw[0] =
cpu_to_le32(priv->timestamp & 0xFFFFFFFF);
- priv->rxon_timing.listen_interval = INTEL_CONN_LISTEN_INTERVAL;
+ priv->rxon_timing.listen_interval = cpu_to_le16(conf->listen_interval);
tsf = priv->timestamp;
@@ -853,7 +884,7 @@ static void iwl4965_set_rate(struct iwl_priv *priv)
(IWL_OFDM_BASIC_RATES_MASK >> IWL_FIRST_OFDM_RATE) & 0xFF;
}
-#ifdef CONFIG_IWL4965_SPECTRUM_MEASUREMENT
+#ifdef CONFIG_IWLAGN_SPECTRUM_MEASUREMENT
#include "iwl-spectrum.h"
@@ -1057,7 +1088,7 @@ static void iwl4965_rx_csa(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb)
static void iwl4965_rx_spectrum_measure_notif(struct iwl_priv *priv,
struct iwl_rx_mem_buffer *rxb)
{
-#ifdef CONFIG_IWL4965_SPECTRUM_MEASUREMENT
+#ifdef CONFIG_IWLAGN_SPECTRUM_MEASUREMENT
struct iwl_rx_packet *pkt = (struct iwl_rx_packet *)rxb->skb->data;
struct iwl4965_spectrum_notification *report = &(pkt->u.spectrum_notif);
@@ -1231,6 +1262,37 @@ static void iwl4965_rx_card_state_notif(struct iwl_priv *priv,
wake_up_interruptible(&priv->wait_command_queue);
}
+int iwl4965_set_pwr_src(struct iwl_priv *priv, enum iwl_pwr_src src)
+{
+ int ret;
+ unsigned long flags;
+
+ spin_lock_irqsave(&priv->lock, flags);
+ ret = iwl_grab_nic_access(priv);
+ if (ret)
+ goto err;
+
+ if (src == IWL_PWR_SRC_VAUX) {
+ u32 val;
+ ret = pci_read_config_dword(priv->pci_dev, PCI_POWER_SOURCE,
+ &val);
+
+ if (val & PCI_CFG_PMC_PME_FROM_D3COLD_SUPPORT)
+ iwl_set_bits_mask_prph(priv, APMG_PS_CTRL_REG,
+ APMG_PS_CTRL_VAL_PWR_SRC_VAUX,
+ ~APMG_PS_CTRL_MSK_PWR_SRC);
+ } else {
+ iwl_set_bits_mask_prph(priv, APMG_PS_CTRL_REG,
+ APMG_PS_CTRL_VAL_PWR_SRC_VMAIN,
+ ~APMG_PS_CTRL_MSK_PWR_SRC);
+ }
+
+ iwl_release_nic_access(priv);
+err:
+ spin_unlock_irqrestore(&priv->lock, flags);
+ return ret;
+}
+
/**
* iwl4965_setup_rx_handlers - Initialize Rx handler callbacks
*
@@ -2170,17 +2232,16 @@ static int __iwl4965_up(struct iwl_priv *priv)
}
/* If platform's RF_KILL switch is NOT set to KILL */
- if (iwl_read32(priv, CSR_GP_CNTRL) &
- CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW)
+ if (iwl_read32(priv, CSR_GP_CNTRL) & CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW)
clear_bit(STATUS_RF_KILL_HW, &priv->status);
else
set_bit(STATUS_RF_KILL_HW, &priv->status);
- if (!test_bit(STATUS_IN_SUSPEND, &priv->status) &&
- iwl_is_rfkill(priv)) {
+ if (iwl_is_rfkill(priv)) {
+ iwl4965_enable_interrupts(priv);
IWL_WARNING("Radio disabled by %s RF Kill switch\n",
test_bit(STATUS_RF_KILL_HW, &priv->status) ? "HW" : "SW");
- return -ENODEV;
+ return 0;
}
iwl_write32(priv, CSR_INT, 0xFFFFFFFF);
@@ -2216,11 +2277,6 @@ static int __iwl4965_up(struct iwl_priv *priv)
memcpy(priv->ucode_data_backup.v_addr, priv->ucode_data.v_addr,
priv->ucode_data.len);
- /* We return success when we resume from suspend and rf_kill is on. */
- if (test_bit(STATUS_RF_KILL_HW, &priv->status) ||
- test_bit(STATUS_RF_KILL_SW, &priv->status))
- return 0;
-
for (i = 0; i < MAX_HW_RESTARTS; i++) {
iwl_clear_stations_table(priv);
@@ -2415,7 +2471,7 @@ static void iwl4965_post_associate(struct iwl_priv *priv)
unsigned long flags;
if (priv->iw_mode == IEEE80211_IF_TYPE_AP) {
- IWL_ERROR("%s Should not be called in AP mode\n", __FUNCTION__);
+ IWL_ERROR("%s Should not be called in AP mode\n", __func__);
return;
}
@@ -2491,7 +2547,7 @@ static void iwl4965_post_associate(struct iwl_priv *priv)
default:
IWL_ERROR("%s Should not be called in %d mode\n",
- __FUNCTION__, priv->iw_mode);
+ __func__, priv->iw_mode);
break;
}
@@ -2589,6 +2645,9 @@ static int iwl4965_mac_start(struct ieee80211_hw *hw)
if (ret)
goto out_release_irq;
+ if (iwl_is_rfkill(priv))
+ goto out;
+
IWL_DEBUG_INFO("Start UP work done.\n");
if (test_bit(STATUS_IN_SUSPEND, &priv->status))
@@ -2608,6 +2667,7 @@ static int iwl4965_mac_start(struct ieee80211_hw *hw)
}
}
+out:
priv->is_open = 1;
IWL_DEBUG_MAC80211("leave\n");
return 0;
@@ -2659,7 +2719,7 @@ static int iwl4965_mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
{
struct iwl_priv *priv = hw->priv;
- IWL_DEBUG_MAC80211("enter\n");
+ IWL_DEBUG_MACDUMP("enter\n");
if (priv->iw_mode == IEEE80211_IF_TYPE_MNTR) {
IWL_DEBUG_MAC80211("leave - monitor\n");
@@ -2673,7 +2733,7 @@ static int iwl4965_mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
if (iwl_tx_skb(priv, skb))
dev_kfree_skb_any(skb);
- IWL_DEBUG_MAC80211("leave\n");
+ IWL_DEBUG_MACDUMP("leave\n");
return 0;
}
@@ -2773,6 +2833,7 @@ static int iwl4965_mac_config(struct ieee80211_hw *hw, struct ieee80211_conf *co
spin_lock_irqsave(&priv->lock, flags);
+
/* if we are switching from ht to 2.4 clear flags
* from any ht related info since 2.4 does not
* support ht */
@@ -3102,6 +3163,7 @@ static void iwl4965_bss_info_changed(struct ieee80211_hw *hw,
if (bss_conf->assoc) {
priv->assoc_id = bss_conf->aid;
priv->beacon_int = bss_conf->beacon_int;
+ priv->power_data.dtim_period = bss_conf->dtim_period;
priv->timestamp = bss_conf->timestamp;
priv->assoc_capability = bss_conf->assoc_capability;
priv->next_scan_jiffies = jiffies +
@@ -3345,6 +3407,39 @@ static int iwl4965_mac_conf_tx(struct ieee80211_hw *hw, u16 queue,
return 0;
}
+static int iwl4965_mac_ampdu_action(struct ieee80211_hw *hw,
+ enum ieee80211_ampdu_mlme_action action,
+ const u8 *addr, u16 tid, u16 *ssn)
+{
+ struct iwl_priv *priv = hw->priv;
+ DECLARE_MAC_BUF(mac);
+
+ IWL_DEBUG_HT("A-MPDU action on addr %s tid %d\n",
+ print_mac(mac, addr), tid);
+
+ if (!(priv->cfg->sku & IWL_SKU_N))
+ return -EACCES;
+
+ switch (action) {
+ case IEEE80211_AMPDU_RX_START:
+ IWL_DEBUG_HT("start Rx\n");
+ return iwl_rx_agg_start(priv, addr, tid, *ssn);
+ case IEEE80211_AMPDU_RX_STOP:
+ IWL_DEBUG_HT("stop Rx\n");
+ return iwl_rx_agg_stop(priv, addr, tid);
+ case IEEE80211_AMPDU_TX_START:
+ IWL_DEBUG_HT("start Tx\n");
+ return iwl_tx_agg_start(priv, addr, tid, ssn);
+ case IEEE80211_AMPDU_TX_STOP:
+ IWL_DEBUG_HT("stop Tx\n");
+ return iwl_tx_agg_stop(priv, addr, tid);
+ default:
+ IWL_DEBUG_HT("unknown\n");
+ return -EINVAL;
+ break;
+ }
+ return 0;
+}
static int iwl4965_mac_get_tx_stats(struct ieee80211_hw *hw,
struct ieee80211_tx_queue_stats *stats)
{
@@ -3592,15 +3687,6 @@ static ssize_t show_temperature(struct device *d,
static DEVICE_ATTR(temperature, S_IRUGO, show_temperature, NULL);
-static ssize_t show_rs_window(struct device *d,
- struct device_attribute *attr,
- char *buf)
-{
- struct iwl_priv *priv = d->driver_data;
- return iwl4965_fill_rs_info(priv->hw, buf, IWL_AP_ID);
-}
-static DEVICE_ATTR(rs_window, S_IRUGO, show_rs_window, NULL);
-
static ssize_t show_tx_power(struct device *d,
struct device_attribute *attr, char *buf)
{
@@ -3699,7 +3785,7 @@ static ssize_t store_filter_flags(struct device *d,
static DEVICE_ATTR(filter_flags, S_IWUSR | S_IRUGO, show_filter_flags,
store_filter_flags);
-#ifdef CONFIG_IWL4965_SPECTRUM_MEASUREMENT
+#ifdef CONFIG_IWLAGN_SPECTRUM_MEASUREMENT
static ssize_t show_measurement(struct device *d,
struct device_attribute *attr, char *buf)
@@ -3707,7 +3793,7 @@ static ssize_t show_measurement(struct device *d,
struct iwl_priv *priv = dev_get_drvdata(d);
struct iwl4965_spectrum_notification measure_report;
u32 size = sizeof(measure_report), len = 0, ofs = 0;
- u8 *data = (u8 *) & measure_report;
+ u8 *data = (u8 *)&measure_report;
unsigned long flags;
spin_lock_irqsave(&priv->lock, flags);
@@ -3770,7 +3856,7 @@ static ssize_t store_measurement(struct device *d,
static DEVICE_ATTR(measurement, S_IRUSR | S_IWUSR,
show_measurement, store_measurement);
-#endif /* CONFIG_IWL4965_SPECTRUM_MEASUREMENT */
+#endif /* CONFIG_IWLAGN_SPECTRUM_MEASUREMENT */
static ssize_t store_retry_rate(struct device *d,
struct device_attribute *attr,
@@ -3800,77 +3886,54 @@ static ssize_t store_power_level(struct device *d,
const char *buf, size_t count)
{
struct iwl_priv *priv = dev_get_drvdata(d);
- int rc;
+ int ret;
int mode;
mode = simple_strtoul(buf, NULL, 0);
mutex_lock(&priv->mutex);
if (!iwl_is_ready(priv)) {
- rc = -EAGAIN;
+ ret = -EAGAIN;
goto out;
}
- rc = iwl_power_set_user_mode(priv, mode);
- if (rc) {
+ ret = iwl_power_set_user_mode(priv, mode);
+ if (ret) {
IWL_DEBUG_MAC80211("failed setting power mode.\n");
goto out;
}
- rc = count;
+ ret = count;
out:
mutex_unlock(&priv->mutex);
- return rc;
+ return ret;
}
-#define MAX_WX_STRING 80
-
-/* Values are in microsecond */
-static const s32 timeout_duration[] = {
- 350000,
- 250000,
- 75000,
- 37000,
- 25000,
-};
-static const s32 period_duration[] = {
- 400000,
- 700000,
- 1000000,
- 1000000,
- 1000000
-};
-
static ssize_t show_power_level(struct device *d,
struct device_attribute *attr, char *buf)
{
struct iwl_priv *priv = dev_get_drvdata(d);
+ int mode = priv->power_data.user_power_setting;
+ int system = priv->power_data.system_power_setting;
int level = priv->power_data.power_mode;
char *p = buf;
- p += sprintf(p, "%d ", level);
- switch (level) {
- case IWL_POWER_MODE_CAM:
- case IWL_POWER_AC:
- p += sprintf(p, "(AC)");
+ switch (system) {
+ case IWL_POWER_SYS_AUTO:
+ p += sprintf(p, "SYSTEM:auto");
break;
- case IWL_POWER_BATTERY:
- p += sprintf(p, "(BATTERY)");
+ case IWL_POWER_SYS_AC:
+ p += sprintf(p, "SYSTEM:ac");
+ break;
+ case IWL_POWER_SYS_BATTERY:
+ p += sprintf(p, "SYSTEM:battery");
break;
- default:
- p += sprintf(p,
- "(Timeout %dms, Period %dms)",
- timeout_duration[level - 1] / 1000,
- period_duration[level - 1] / 1000);
}
-/*
- if (!(priv->power_mode & IWL_POWER_ENABLED))
- p += sprintf(p, " OFF\n");
- else
- p += sprintf(p, " \n");
-*/
- p += sprintf(p, " \n");
- return (p - buf + 1);
+
+ p += sprintf(p, "\tMODE:%s", (mode < IWL_POWER_AUTO)?"fixed":"auto");
+ p += sprintf(p, "\tINDEX:%d", level);
+ p += sprintf(p, "\n");
+ return p - buf + 1;
}
static DEVICE_ATTR(power_level, S_IWUSR | S_IRUSR, show_power_level,
@@ -3945,7 +4008,7 @@ static ssize_t show_statistics(struct device *d,
struct iwl_priv *priv = dev_get_drvdata(d);
u32 size = sizeof(struct iwl_notif_statistics);
u32 len = 0, ofs = 0;
- u8 *data = (u8 *) & priv->statistics;
+ u8 *data = (u8 *)&priv->statistics;
int rc = 0;
if (!iwl_is_alive(priv))
@@ -4041,12 +4104,11 @@ static struct attribute *iwl4965_sysfs_entries[] = {
&dev_attr_channels.attr,
&dev_attr_flags.attr,
&dev_attr_filter_flags.attr,
-#ifdef CONFIG_IWL4965_SPECTRUM_MEASUREMENT
+#ifdef CONFIG_IWLAGN_SPECTRUM_MEASUREMENT
&dev_attr_measurement.attr,
#endif
&dev_attr_power_level.attr,
&dev_attr_retry_rate.attr,
- &dev_attr_rs_window.attr,
&dev_attr_statistics.attr,
&dev_attr_status.attr,
&dev_attr_temperature.attr,
@@ -4394,8 +4456,10 @@ static int iwl4965_pci_resume(struct pci_dev *pdev)
/* Hardware specific file defines the PCI IDs table for that hardware module */
static struct pci_device_id iwl_hw_card_ids[] = {
+#ifdef CONFIG_IWL4965
{IWL_PCI_DEVICE(0x4229, PCI_ANY_ID, iwl4965_agn_cfg)},
{IWL_PCI_DEVICE(0x4230, PCI_ANY_ID, iwl4965_agn_cfg)},
+#endif /* CONFIG_IWL4965 */
#ifdef CONFIG_IWL5000
{IWL_PCI_DEVICE(0x4232, 0x1205, iwl5100_bg_cfg)},
{IWL_PCI_DEVICE(0x4232, 0x1305, iwl5100_bg_cfg)},
@@ -4431,7 +4495,7 @@ static int __init iwl4965_init(void)
printk(KERN_INFO DRV_NAME ": " DRV_DESCRIPTION ", " DRV_VERSION "\n");
printk(KERN_INFO DRV_NAME ": " DRV_COPYRIGHT "\n");
- ret = iwl4965_rate_control_register();
+ ret = iwlagn_rate_control_register();
if (ret) {
IWL_ERROR("Unable to register rate control algorithm: %d\n", ret);
return ret;
@@ -4446,14 +4510,14 @@ static int __init iwl4965_init(void)
return ret;
error_register:
- iwl4965_rate_control_unregister();
+ iwlagn_rate_control_unregister();
return ret;
}
static void __exit iwl4965_exit(void)
{
pci_unregister_driver(&iwl_driver);
- iwl4965_rate_control_unregister();
+ iwlagn_rate_control_unregister();
}
module_exit(iwl4965_exit);
diff --git a/drivers/net/wireless/iwlwifi/iwl-commands.h b/drivers/net/wireless/iwlwifi/iwl-commands.h
index e9bb1de0ce3..28b5b09996e 100644
--- a/drivers/net/wireless/iwlwifi/iwl-commands.h
+++ b/drivers/net/wireless/iwlwifi/iwl-commands.h
@@ -666,8 +666,7 @@ struct iwl4965_rxon_assoc_cmd {
__le16 reserved;
} __attribute__ ((packed));
-
-
+#define IWL_CONN_MAX_LISTEN_INTERVAL 10
/*
* REPLY_RXON_TIMING = 0x14 (command, has simple generic response)
@@ -1076,10 +1075,12 @@ struct iwl4965_rx_frame {
} __attribute__ ((packed));
/* Fixed (non-configurable) rx data from phy */
-#define RX_PHY_FLAGS_ANTENNAE_OFFSET (4)
-#define RX_PHY_FLAGS_ANTENNAE_MASK (0x70)
-#define IWL_AGC_DB_MASK (0x3f80) /* MASK(7,13) */
-#define IWL_AGC_DB_POS (7)
+
+#define IWL49_RX_RES_PHY_CNT 14
+#define IWL49_RX_PHY_FLAGS_ANTENNAE_OFFSET (4)
+#define IWL49_RX_PHY_FLAGS_ANTENNAE_MASK (0x70)
+#define IWL49_AGC_DB_MASK (0x3f80) /* MASK(7,13) */
+#define IWL49_AGC_DB_POS (7)
struct iwl4965_rx_non_cfg_phy {
__le16 ant_selection; /* ant A bit 4, ant B bit 5, ant C bit 6 */
__le16 agc_info; /* agc code 0:6, agc dB 7:13, reserved 14:15 */
@@ -1087,12 +1088,30 @@ struct iwl4965_rx_non_cfg_phy {
u8 pad[0];
} __attribute__ ((packed));
+
+#define IWL50_RX_RES_PHY_CNT 8
+#define IWL50_RX_RES_AGC_IDX 1
+#define IWL50_RX_RES_RSSI_AB_IDX 2
+#define IWL50_RX_RES_RSSI_C_IDX 3
+#define IWL50_OFDM_AGC_MSK 0xfe00
+#define IWL50_OFDM_AGC_BIT_POS 9
+#define IWL50_OFDM_RSSI_A_MSK 0x00ff
+#define IWL50_OFDM_RSSI_A_BIT_POS 0
+#define IWL50_OFDM_RSSI_B_MSK 0xff0000
+#define IWL50_OFDM_RSSI_B_BIT_POS 16
+#define IWL50_OFDM_RSSI_C_MSK 0x00ff
+#define IWL50_OFDM_RSSI_C_BIT_POS 0
+
+struct iwl5000_non_cfg_phy {
+ __le32 non_cfg_phy[IWL50_RX_RES_PHY_CNT]; /* upto 8 phy entries */
+} __attribute__ ((packed));
+
+
/*
* REPLY_RX = 0xc3 (response only, not a command)
* Used only for legacy (non 11n) frames.
*/
-#define RX_RES_PHY_CNT 14
-struct iwl4965_rx_phy_res {
+struct iwl_rx_phy_res {
u8 non_cfg_phy_cnt; /* non configurable DSP phy data byte count */
u8 cfg_phy_cnt; /* configurable DSP phy data byte count */
u8 stat_id; /* configurable DSP phy data set ID */
@@ -1101,8 +1120,7 @@ struct iwl4965_rx_phy_res {
__le32 beacon_time_stamp; /* beacon at on-air rise */
__le16 phy_flags; /* general phy flags: band, modulation, ... */
__le16 channel; /* channel number */
- __le16 non_cfg_phy[RX_RES_PHY_CNT]; /* upto 14 phy entries */
- __le32 reserved2;
+ u8 non_cfg_phy_buf[32]; /* for various implementations of non_cfg_phy */
__le32 rate_n_flags; /* RATE_MCS_* */
__le16 byte_count; /* frame's byte-count */
__le16 reserved3;
@@ -1993,7 +2011,7 @@ struct iwl4965_spectrum_notification {
*****************************************************************************/
/**
- * struct iwl4965_powertable_cmd - Power Table Command
+ * struct iwl_powertable_cmd - Power Table Command
* @flags: See below:
*
* POWER_TABLE_CMD = 0x77 (command, has simple generic response)
@@ -2027,7 +2045,7 @@ struct iwl4965_spectrum_notification {
#define IWL_POWER_PCI_PM_MSK __constant_cpu_to_le16(1 << 3)
#define IWL_POWER_FAST_PD __constant_cpu_to_le16(1 << 4)
-struct iwl4965_powertable_cmd {
+struct iwl_powertable_cmd {
__le16 flags;
u8 keep_alive_seconds;
u8 debug_flags;
@@ -2324,7 +2342,7 @@ struct iwl4965_beacon_notif {
/*
* REPLY_TX_BEACON = 0x91 (command, has simple generic response)
*/
-struct iwl4965_tx_beacon_cmd {
+struct iwl_tx_beacon_cmd {
struct iwl_tx_cmd tx;
__le16 tim_idx;
u8 tim_size;
diff --git a/drivers/net/wireless/iwlwifi/iwl-core.c b/drivers/net/wireless/iwlwifi/iwl-core.c
index e3427c205cc..9bd61809129 100644
--- a/drivers/net/wireless/iwlwifi/iwl-core.c
+++ b/drivers/net/wireless/iwlwifi/iwl-core.c
@@ -383,8 +383,8 @@ void iwl_reset_qos(struct iwl_priv *priv)
}
EXPORT_SYMBOL(iwl_reset_qos);
-#define MAX_BIT_RATE_40_MHZ 0x96; /* 150 Mbps */
-#define MAX_BIT_RATE_20_MHZ 0x48; /* 72 Mbps */
+#define MAX_BIT_RATE_40_MHZ 0x96 /* 150 Mbps */
+#define MAX_BIT_RATE_20_MHZ 0x48 /* 72 Mbps */
static void iwlcore_init_ht_hw_capab(const struct iwl_priv *priv,
struct ieee80211_ht_info *ht_info,
enum ieee80211_band band)
@@ -815,7 +815,7 @@ int iwl_setup_mac(struct iwl_priv *priv)
{
int ret;
struct ieee80211_hw *hw = priv->hw;
- hw->rate_control_algorithm = "iwl-4965-rs";
+ hw->rate_control_algorithm = "iwl-agn-rs";
/* Tell mac80211 our characteristics */
hw->flags = IEEE80211_HW_SIGNAL_DBM |
@@ -827,6 +827,7 @@ int iwl_setup_mac(struct iwl_priv *priv)
hw->ampdu_queues = priv->cfg->mod_params->num_of_ampdu_queues;
hw->conf.beacon_int = 100;
+ hw->max_listen_interval = IWL_CONN_MAX_LISTEN_INTERVAL;
if (priv->bands[IEEE80211_BAND_2GHZ].n_channels)
priv->hw->wiphy->bands[IEEE80211_BAND_2GHZ] =
diff --git a/drivers/net/wireless/iwlwifi/iwl-core.h b/drivers/net/wireless/iwlwifi/iwl-core.h
index db66114f1e5..64f139e9744 100644
--- a/drivers/net/wireless/iwlwifi/iwl-core.h
+++ b/drivers/net/wireless/iwlwifi/iwl-core.h
@@ -95,6 +95,8 @@ struct iwl_hcmd_utils_ops {
void (*chain_noise_reset)(struct iwl_priv *priv);
void (*rts_tx_cmd_flag)(struct ieee80211_tx_info *info,
__le32 *tx_flags);
+ int (*calc_rssi)(struct iwl_priv *priv,
+ struct iwl_rx_phy_res *rx_resp);
};
struct iwl_lib_ops {
@@ -139,7 +141,6 @@ struct iwl_lib_ops {
int (*set_pwr_src)(struct iwl_priv *priv, enum iwl_pwr_src src);
} apm_ops;
/* power */
- int (*set_power)(struct iwl_priv *priv, void *cmd);
int (*send_tx_power) (struct iwl_priv *priv);
void (*update_chain_flags)(struct iwl_priv *priv);
void (*temperature) (struct iwl_priv *priv);
diff --git a/drivers/net/wireless/iwlwifi/iwl-csr.h b/drivers/net/wireless/iwlwifi/iwl-csr.h
index 545ed692d88..52629fbd835 100644
--- a/drivers/net/wireless/iwlwifi/iwl-csr.h
+++ b/drivers/net/wireless/iwlwifi/iwl-csr.h
@@ -104,6 +104,7 @@
* 3-2: 0 = A, 1 = B, 2 = C, 3 = D step
*/
#define CSR_HW_REV_WA_REG (CSR_BASE+0x22C)
+#define CSR_DBG_HPET_MEM_REG (CSR_BASE+0x240)
/* Bits for CSR_HW_IF_CONFIG_REG */
#define CSR49_HW_IF_CONFIG_REG_BIT_4965_R (0x00000010)
@@ -118,7 +119,12 @@
#define CSR39_HW_IF_CONFIG_REG_BITS_SILICON_TYPE_A (0x00000000)
#define CSR39_HW_IF_CONFIG_REG_BITS_SILICON_TYPE_B (0x00001000)
-#define CSR_HW_IF_CONFIG_REG_BIT_EEPROM_OWN_SEM (0x00200000)
+#define CSR_HW_IF_CONFIG_REG_BIT_HAP_WAKE_L1A (0x00080000)
+#define CSR_HW_IF_CONFIG_REG_BIT_EEPROM_OWN_SEM (0x00200000)
+#define CSR_HW_IF_CONFIG_REG_BIT_PCI_OWN_SEM (0x00400000)
+#define CSR_HW_IF_CONFIG_REG_BIT_ME_OWN (0x02000000)
+#define CSR_HW_IF_CONFIG_REG_BIT_WAKE_ME (0x08000000)
+
/* interrupt flags in INTA, set by uCode or hardware (e.g. dma),
* acknowledged (reset) by host writing "1" to flagged bits. */
@@ -236,6 +242,8 @@
#define CSR39_ANA_PLL_CFG_VAL (0x01000000)
#define CSR50_ANA_PLL_CFG_VAL (0x00880300)
+/* HPET MEM debug */
+#define CSR_DBG_HPET_MEM_REG_VAL (0xFFFF0000)
/*=== HBUS (Host-side Bus) ===*/
#define HBUS_BASE (0x400)
/*
diff --git a/drivers/net/wireless/iwlwifi/iwl-debug.h b/drivers/net/wireless/iwlwifi/iwl-debug.h
index d6d729e86bd..d2daa174df2 100644
--- a/drivers/net/wireless/iwlwifi/iwl-debug.h
+++ b/drivers/net/wireless/iwlwifi/iwl-debug.h
@@ -33,12 +33,12 @@
#define IWL_DEBUG(level, fmt, args...) \
do { if (priv->debug_level & (level)) \
dev_printk(KERN_ERR, &(priv->hw->wiphy->dev), "%c %s " fmt, \
- in_interrupt() ? 'I' : 'U', __FUNCTION__ , ## args); } while (0)
+ in_interrupt() ? 'I' : 'U', __func__ , ## args); } while (0)
#define IWL_DEBUG_LIMIT(level, fmt, args...) \
do { if ((priv->debug_level & (level)) && net_ratelimit()) \
dev_printk(KERN_ERR, &(priv->hw->wiphy->dev), "%c %s " fmt, \
- in_interrupt() ? 'I' : 'U', __FUNCTION__ , ## args); } while (0)
+ in_interrupt() ? 'I' : 'U', __func__ , ## args); } while (0)
#ifdef CONFIG_IWLWIFI_DEBUGFS
struct iwl_debugfs {
@@ -114,7 +114,7 @@ static inline void iwl_dbgfs_unregister(struct iwl_priv *priv)
#define IWL_DL_MAC80211 (1 << 1)
#define IWL_DL_HOST_COMMAND (1 << 2)
#define IWL_DL_STATE (1 << 3)
-
+#define IWL_DL_MACDUMP (1 << 4)
#define IWL_DL_RADIO (1 << 7)
#define IWL_DL_POWER (1 << 8)
#define IWL_DL_TEMP (1 << 9)
@@ -154,6 +154,7 @@ static inline void iwl_dbgfs_unregister(struct iwl_priv *priv)
#define IWL_DEBUG_INFO(f, a...) IWL_DEBUG(IWL_DL_INFO, f, ## a)
#define IWL_DEBUG_MAC80211(f, a...) IWL_DEBUG(IWL_DL_MAC80211, f, ## a)
+#define IWL_DEBUG_MACDUMP(f, a...) IWL_DEBUG(IWL_DL_MACDUMP, f, ## a)
#define IWL_DEBUG_TEMP(f, a...) IWL_DEBUG(IWL_DL_TEMP, f, ## a)
#define IWL_DEBUG_SCAN(f, a...) IWL_DEBUG(IWL_DL_SCAN, f, ## a)
#define IWL_DEBUG_RX(f, a...) IWL_DEBUG(IWL_DL_RX, f, ## a)
diff --git a/drivers/net/wireless/iwlwifi/iwl-debugfs.c b/drivers/net/wireless/iwlwifi/iwl-debugfs.c
index ed948dc59b3..20db0eb636a 100644
--- a/drivers/net/wireless/iwlwifi/iwl-debugfs.c
+++ b/drivers/net/wireless/iwlwifi/iwl-debugfs.c
@@ -231,7 +231,7 @@ static ssize_t iwl_dbgfs_stations_read(struct file *file, char __user *user_buf,
DECLARE_MAC_BUF(mac);
buf = kmalloc(bufsz, GFP_KERNEL);
- if(!buf)
+ if (!buf)
return -ENOMEM;
pos += scnprintf(buf + pos, bufsz - pos, "num of stations: %d\n\n",
@@ -364,16 +364,19 @@ int iwl_dbgfs_register(struct iwl_priv *priv, const char *name)
{
struct iwl_debugfs *dbgfs;
struct dentry *phyd = priv->hw->wiphy->debugfsdir;
+ int ret = 0;
dbgfs = kzalloc(sizeof(struct iwl_debugfs), GFP_KERNEL);
if (!dbgfs) {
+ ret = -ENOMEM;
goto err;
}
priv->dbgfs = dbgfs;
dbgfs->name = name;
dbgfs->dir_drv = debugfs_create_dir(name, phyd);
- if (!dbgfs->dir_drv || IS_ERR(dbgfs->dir_drv)){
+ if (!dbgfs->dir_drv || IS_ERR(dbgfs->dir_drv)) {
+ ret = -ENOENT;
goto err;
}
@@ -394,7 +397,7 @@ int iwl_dbgfs_register(struct iwl_priv *priv, const char *name)
err:
IWL_ERROR("Can't open the debugfs directory\n");
iwl_dbgfs_unregister(priv);
- return -ENOENT;
+ return ret;
}
EXPORT_SYMBOL(iwl_dbgfs_register);
@@ -404,7 +407,7 @@ EXPORT_SYMBOL(iwl_dbgfs_register);
*/
void iwl_dbgfs_unregister(struct iwl_priv *priv)
{
- if (!(priv->dbgfs))
+ if (!priv->dbgfs)
return;
DEBUGFS_REMOVE(priv->dbgfs->dbgfs_data_files.file_eeprom);
diff --git a/drivers/net/wireless/iwlwifi/iwl-dev.h b/drivers/net/wireless/iwlwifi/iwl-dev.h
index 4d789e353e3..c19db438306 100644
--- a/drivers/net/wireless/iwlwifi/iwl-dev.h
+++ b/drivers/net/wireless/iwlwifi/iwl-dev.h
@@ -36,7 +36,7 @@
#include <linux/kernel.h>
#include <net/ieee80211_radiotap.h>
-#define DRV_NAME "iwl4965"
+#define DRV_NAME "iwlagn"
#include "iwl-rfkill.h"
#include "iwl-eeprom.h"
#include "iwl-4965-hw.h"
@@ -45,6 +45,7 @@
#include "iwl-debug.h"
#include "iwl-led.h"
#include "iwl-power.h"
+#include "iwl-agn-rs.h"
/* configuration for the iwl4965 */
extern struct iwl_cfg iwl4965_agn_cfg;
@@ -134,8 +135,7 @@ struct iwl_tx_info {
struct iwl_tx_queue {
struct iwl_queue q;
struct iwl_tfd_frame *bd;
- struct iwl_cmd *cmd;
- dma_addr_t dma_addr_cmd;
+ struct iwl_cmd *cmd[TFD_TX_CMD_SLOTS];
struct iwl_tx_info *txb;
int need_update;
int sched_retry;
@@ -191,7 +191,6 @@ struct iwl4965_clip_group {
const s8 clip_powers[IWL_MAX_RATES];
};
-#include "iwl-4965-rs.h"
#define IWL_TX_FIFO_AC0 0
#define IWL_TX_FIFO_AC1 1
@@ -219,7 +218,7 @@ enum iwl_pwr_src {
struct iwl_frame {
union {
struct ieee80211_hdr frame;
- struct iwl4965_tx_beacon_cmd beacon;
+ struct iwl_tx_beacon_cmd beacon;
u8 raw[IEEE80211_FRAME_LEN];
u8 cmd[360];
} u;
@@ -283,10 +282,9 @@ struct iwl_cmd {
u32 val32;
struct iwl4965_bt_cmd bt;
struct iwl4965_rxon_time_cmd rxon_time;
- struct iwl4965_powertable_cmd powertable;
+ struct iwl_powertable_cmd powertable;
struct iwl_qosparam_cmd qosparam;
struct iwl_tx_cmd tx;
- struct iwl4965_tx_beacon_cmd tx_beacon;
struct iwl4965_rxon_assoc_cmd rxon_assoc;
struct iwl_rem_sta_cmd rm_sta;
u8 *indirect;
@@ -590,6 +588,7 @@ extern unsigned int iwl4965_fill_beacon_frame(struct iwl_priv *priv,
const u8 *dest, int left);
extern void iwl4965_update_chain_flags(struct iwl_priv *priv);
int iwl4965_set_pwr_src(struct iwl_priv *priv, enum iwl_pwr_src src);
+extern int iwl4965_set_power(struct iwl_priv *priv, void *cmd);
extern const u8 iwl_bcast_addr[ETH_ALEN];
@@ -642,10 +641,6 @@ struct iwl_priv;
* Forward declare iwl-4965.c functions for iwl-base.c
*/
extern void iwl4965_rf_kill_ct_config(struct iwl_priv *priv);
-
-int iwl4965_mac_ampdu_action(struct ieee80211_hw *hw,
- enum ieee80211_ampdu_mlme_action action,
- const u8 *addr, u16 tid, u16 *ssn);
int iwl4965_check_empty_hw_queue(struct iwl_priv *priv, int sta_id,
u8 tid, int txq_id);
@@ -812,14 +807,11 @@ struct iwl_chain_noise_data {
#define EEPROM_SEM_RETRY_LIMIT 1000 /* number of attempts (not time) */
-#ifdef CONFIG_IWL4965_SPECTRUM_MEASUREMENT
-
enum {
MEASUREMENT_READY = (1 << 0),
MEASUREMENT_ACTIVE = (1 << 1),
};
-#endif
#define IWL_MAX_NUM_QUEUES 20 /* FIXME: do dynamic allocation */
@@ -844,7 +836,7 @@ struct iwl_priv {
struct ieee80211_supported_band bands[IEEE80211_NUM_BANDS];
-#ifdef CONFIG_IWL4965_SPECTRUM_MEASUREMENT
+#ifdef CONFIG_IWLAGN_SPECTRUM_MEASUREMENT
/* spectrum measurement report caching */
struct iwl4965_spectrum_notification measure_report;
u8 measurement_status;
diff --git a/drivers/net/wireless/iwlwifi/iwl-eeprom.c b/drivers/net/wireless/iwlwifi/iwl-eeprom.c
index 4a08a1b5097..bce53830b30 100644
--- a/drivers/net/wireless/iwlwifi/iwl-eeprom.c
+++ b/drivers/net/wireless/iwlwifi/iwl-eeprom.c
@@ -273,8 +273,7 @@ EXPORT_SYMBOL(iwl_eeprom_init);
void iwl_eeprom_free(struct iwl_priv *priv)
{
- if(priv->eeprom)
- kfree(priv->eeprom);
+ kfree(priv->eeprom);
priv->eeprom = NULL;
}
EXPORT_SYMBOL(iwl_eeprom_free);
diff --git a/drivers/net/wireless/iwlwifi/iwl-hcmd.c b/drivers/net/wireless/iwlwifi/iwl-hcmd.c
index 8fa991b7202..6512834bb91 100644
--- a/drivers/net/wireless/iwlwifi/iwl-hcmd.c
+++ b/drivers/net/wireless/iwlwifi/iwl-hcmd.c
@@ -228,7 +228,7 @@ cancel:
* TX cmd queue. Otherwise in case the cmd comes
* in later, it will possibly set an invalid
* address (cmd->meta.source). */
- qcmd = &priv->txq[IWL_CMD_QUEUE_NUM].cmd[cmd_idx];
+ qcmd = priv->txq[IWL_CMD_QUEUE_NUM].cmd[cmd_idx];
qcmd->meta.flags &= ~CMD_WANT_SKB;
}
fail:
diff --git a/drivers/net/wireless/iwlwifi/iwl-led.c b/drivers/net/wireless/iwlwifi/iwl-led.c
index 61250e6a7d1..cb11c4a4d69 100644
--- a/drivers/net/wireless/iwlwifi/iwl-led.c
+++ b/drivers/net/wireless/iwlwifi/iwl-led.c
@@ -161,12 +161,32 @@ int iwl4965_led_off(struct iwl_priv *priv, int led_id)
/* Set led register off */
static int iwl4965_led_off_reg(struct iwl_priv *priv, int led_id)
{
- IWL_DEBUG_LED("radio off\n");
+ IWL_DEBUG_LED("LED Reg off\n");
iwl_write32(priv, CSR_LED_REG, CSR_LED_REG_TRUN_OFF);
return 0;
}
/*
+ * Set led register in case of disassociation according to rfkill state
+ */
+static int iwl_led_associate(struct iwl_priv *priv, int led_id)
+{
+ IWL_DEBUG_LED("Associated\n");
+ priv->allow_blinking = 1;
+ return iwl4965_led_on_reg(priv, led_id);
+}
+static int iwl_led_disassociate(struct iwl_priv *priv, int led_id)
+{
+ priv->allow_blinking = 0;
+ if (iwl_is_rfkill(priv))
+ iwl4965_led_off_reg(priv, led_id);
+ else
+ iwl4965_led_on_reg(priv, led_id);
+
+ return 0;
+}
+
+/*
* brightness call back function for Tx/Rx LED
*/
static int iwl_led_associated(struct iwl_priv *priv, int led_id)
@@ -199,16 +219,10 @@ static void iwl_led_brightness_set(struct led_classdev *led_cdev,
led_type_str[led->type], brightness);
switch (brightness) {
case LED_FULL:
- if (led->type == IWL_LED_TRG_ASSOC)
- priv->allow_blinking = 1;
-
if (led->led_on)
led->led_on(priv, IWL_LED_LINK);
break;
case LED_OFF:
- if (led->type == IWL_LED_TRG_ASSOC)
- priv->allow_blinking = 0;
-
if (led->led_off)
led->led_off(priv, IWL_LED_LINK);
break;
@@ -228,12 +242,12 @@ static void iwl_led_brightness_set(struct led_classdev *led_cdev,
*/
static int iwl_leds_register_led(struct iwl_priv *priv, struct iwl_led *led,
enum led_type type, u8 set_led,
- const char *name, char *trigger)
+ char *trigger)
{
struct device *device = wiphy_dev(priv->hw->wiphy);
int ret;
- led->led_dev.name = name;
+ led->led_dev.name = led->name;
led->led_dev.brightness_set = iwl_led_brightness_set;
led->led_dev.default_trigger = trigger;
@@ -284,12 +298,6 @@ static int iwl_get_blink_rate(struct iwl_priv *priv)
return i;
}
-static inline int is_rf_kill(struct iwl_priv *priv)
-{
- return test_bit(STATUS_RF_KILL_HW, &priv->status) ||
- test_bit(STATUS_RF_KILL_SW, &priv->status);
-}
-
/*
* this function called from handler. Since setting Led command can
* happen very frequent we postpone led command to be called from
@@ -303,7 +311,7 @@ void iwl_leds_background(struct iwl_priv *priv)
priv->last_blink_time = 0;
return;
}
- if (is_rf_kill(priv)) {
+ if (iwl_is_rfkill(priv)) {
priv->last_blink_time = 0;
return;
}
@@ -337,7 +345,6 @@ EXPORT_SYMBOL(iwl_leds_background);
int iwl_leds_register(struct iwl_priv *priv)
{
char *trigger;
- char name[32];
int ret;
priv->last_blink_rate = 0;
@@ -346,7 +353,8 @@ int iwl_leds_register(struct iwl_priv *priv)
priv->allow_blinking = 0;
trigger = ieee80211_get_radio_led_name(priv->hw);
- snprintf(name, sizeof(name), "iwl-%s:radio",
+ snprintf(priv->led[IWL_LED_TRG_RADIO].name,
+ sizeof(priv->led[IWL_LED_TRG_RADIO].name), "iwl-%s:radio",
wiphy_name(priv->hw->wiphy));
priv->led[IWL_LED_TRG_RADIO].led_on = iwl4965_led_on_reg;
@@ -354,31 +362,33 @@ int iwl_leds_register(struct iwl_priv *priv)
priv->led[IWL_LED_TRG_RADIO].led_pattern = NULL;
ret = iwl_leds_register_led(priv, &priv->led[IWL_LED_TRG_RADIO],
- IWL_LED_TRG_RADIO, 1, name, trigger);
+ IWL_LED_TRG_RADIO, 1, trigger);
if (ret)
goto exit_fail;
trigger = ieee80211_get_assoc_led_name(priv->hw);
- snprintf(name, sizeof(name), "iwl-%s:assoc",
+ snprintf(priv->led[IWL_LED_TRG_ASSOC].name,
+ sizeof(priv->led[IWL_LED_TRG_ASSOC].name), "iwl-%s:assoc",
wiphy_name(priv->hw->wiphy));
ret = iwl_leds_register_led(priv, &priv->led[IWL_LED_TRG_ASSOC],
- IWL_LED_TRG_ASSOC, 0, name, trigger);
+ IWL_LED_TRG_ASSOC, 0, trigger);
/* for assoc always turn led on */
- priv->led[IWL_LED_TRG_ASSOC].led_on = iwl4965_led_on_reg;
- priv->led[IWL_LED_TRG_ASSOC].led_off = iwl4965_led_on_reg;
+ priv->led[IWL_LED_TRG_ASSOC].led_on = iwl_led_associate;
+ priv->led[IWL_LED_TRG_ASSOC].led_off = iwl_led_disassociate;
priv->led[IWL_LED_TRG_ASSOC].led_pattern = NULL;
if (ret)
goto exit_fail;
trigger = ieee80211_get_rx_led_name(priv->hw);
- snprintf(name, sizeof(name), "iwl-%s:RX", wiphy_name(priv->hw->wiphy));
-
+ snprintf(priv->led[IWL_LED_TRG_RX].name,
+ sizeof(priv->led[IWL_LED_TRG_RX].name), "iwl-%s:RX",
+ wiphy_name(priv->hw->wiphy));
ret = iwl_leds_register_led(priv, &priv->led[IWL_LED_TRG_RX],
- IWL_LED_TRG_RX, 0, name, trigger);
+ IWL_LED_TRG_RX, 0, trigger);
priv->led[IWL_LED_TRG_RX].led_on = iwl_led_associated;
priv->led[IWL_LED_TRG_RX].led_off = iwl_led_associated;
@@ -388,9 +398,12 @@ int iwl_leds_register(struct iwl_priv *priv)
goto exit_fail;
trigger = ieee80211_get_tx_led_name(priv->hw);
- snprintf(name, sizeof(name), "iwl-%s:TX", wiphy_name(priv->hw->wiphy));
+ snprintf(priv->led[IWL_LED_TRG_TX].name,
+ sizeof(priv->led[IWL_LED_TRG_TX].name), "iwl-%s:TX",
+ wiphy_name(priv->hw->wiphy));
+
ret = iwl_leds_register_led(priv, &priv->led[IWL_LED_TRG_TX],
- IWL_LED_TRG_TX, 0, name, trigger);
+ IWL_LED_TRG_TX, 0, trigger);
priv->led[IWL_LED_TRG_TX].led_on = iwl_led_associated;
priv->led[IWL_LED_TRG_TX].led_off = iwl_led_associated;
diff --git a/drivers/net/wireless/iwlwifi/iwl-led.h b/drivers/net/wireless/iwlwifi/iwl-led.h
index 1980ae5a7e8..588c9ad20e8 100644
--- a/drivers/net/wireless/iwlwifi/iwl-led.h
+++ b/drivers/net/wireless/iwlwifi/iwl-led.h
@@ -52,6 +52,7 @@ enum led_type {
struct iwl_led {
struct iwl_priv *priv;
struct led_classdev led_dev;
+ char name[32];
int (*led_on) (struct iwl_priv *priv, int led_id);
int (*led_off) (struct iwl_priv *priv, int led_id);
diff --git a/drivers/net/wireless/iwlwifi/iwl-power.c b/drivers/net/wireless/iwlwifi/iwl-power.c
index 2e71803e09b..028e3053c0c 100644
--- a/drivers/net/wireless/iwlwifi/iwl-power.c
+++ b/drivers/net/wireless/iwlwifi/iwl-power.c
@@ -82,7 +82,7 @@
/* default power management (not Tx power) table values */
/* for tim 0-10 */
-static struct iwl_power_vec_entry range_0[IWL_POWER_AC] = {
+static struct iwl_power_vec_entry range_0[IWL_POWER_MAX] = {
{{NOSLP, SLP_TOUT(0), SLP_TOUT(0), SLP_VEC(0, 0, 0, 0, 0)}, 0},
{{SLP, SLP_TOUT(200), SLP_TOUT(500), SLP_VEC(1, 2, 2, 2, 0xFF)}, 0},
{{SLP, SLP_TOUT(200), SLP_TOUT(300), SLP_VEC(1, 2, 2, 2, 0xFF)}, 0},
@@ -93,7 +93,7 @@ static struct iwl_power_vec_entry range_0[IWL_POWER_AC] = {
/* for tim = 3-10 */
-static struct iwl_power_vec_entry range_1[IWL_POWER_AC] = {
+static struct iwl_power_vec_entry range_1[IWL_POWER_MAX] = {
{{NOSLP, SLP_TOUT(0), SLP_TOUT(0), SLP_VEC(0, 0, 0, 0, 0)}, 0},
{{SLP, SLP_TOUT(200), SLP_TOUT(500), SLP_VEC(1, 2, 3, 4, 4)}, 0},
{{SLP, SLP_TOUT(200), SLP_TOUT(300), SLP_VEC(1, 2, 3, 4, 7)}, 0},
@@ -103,7 +103,7 @@ static struct iwl_power_vec_entry range_1[IWL_POWER_AC] = {
};
/* for tim > 11 */
-static struct iwl_power_vec_entry range_2[IWL_POWER_AC] = {
+static struct iwl_power_vec_entry range_2[IWL_POWER_MAX] = {
{{NOSLP, SLP_TOUT(0), SLP_TOUT(0), SLP_VEC(0, 0, 0, 0, 0)}, 0},
{{SLP, SLP_TOUT(200), SLP_TOUT(500), SLP_VEC(1, 2, 3, 4, 0xFF)}, 0},
{{SLP, SLP_TOUT(200), SLP_TOUT(300), SLP_VEC(2, 4, 6, 7, 0xFF)}, 0},
@@ -112,12 +112,19 @@ static struct iwl_power_vec_entry range_2[IWL_POWER_AC] = {
{{SLP, SLP_TOUT(25), SLP_TOUT(25), SLP_VEC(4, 7, 10, 10, 0xFF)}, 0}
};
+/* set card power command */
+static int iwl_set_power(struct iwl_priv *priv, void *cmd)
+{
+ return iwl_send_cmd_pdu_async(priv, POWER_TABLE_CMD,
+ sizeof(struct iwl_powertable_cmd),
+ cmd, NULL);
+}
/* decide the right power level according to association status
* and battery status
*/
static u16 iwl_get_auto_power_mode(struct iwl_priv *priv)
{
- u16 mode = priv->power_data.user_power_setting;
+ u16 mode;
switch (priv->power_data.user_power_setting) {
case IWL_POWER_AUTO:
@@ -129,12 +136,16 @@ static u16 iwl_get_auto_power_mode(struct iwl_priv *priv)
else
mode = IWL_POWER_ON_AC_DISASSOC;
break;
+ /* FIXME: remove battery and ac from here */
case IWL_POWER_BATTERY:
mode = IWL_POWER_INDEX_3;
break;
case IWL_POWER_AC:
mode = IWL_POWER_MODE_CAM;
break;
+ default:
+ mode = priv->power_data.user_power_setting;
+ break;
}
return mode;
}
@@ -144,7 +155,7 @@ static int iwl_power_init_handle(struct iwl_priv *priv)
{
int ret = 0, i;
struct iwl_power_mgr *pow_data;
- int size = sizeof(struct iwl_power_vec_entry) * IWL_POWER_AC;
+ int size = sizeof(struct iwl_power_vec_entry) * IWL_POWER_MAX;
u16 pci_pm;
IWL_DEBUG_POWER("Initialize power \n");
@@ -162,11 +173,11 @@ static int iwl_power_init_handle(struct iwl_priv *priv)
if (ret != 0)
return 0;
else {
- struct iwl4965_powertable_cmd *cmd;
+ struct iwl_powertable_cmd *cmd;
IWL_DEBUG_POWER("adjust power command flags\n");
- for (i = 0; i < IWL_POWER_AC; i++) {
+ for (i = 0; i < IWL_POWER_MAX; i++) {
cmd = &pow_data->pwr_range_0[i].cmd;
if (pci_pm & 0x1)
@@ -180,7 +191,7 @@ static int iwl_power_init_handle(struct iwl_priv *priv)
/* adjust power command according to dtim period and power level*/
static int iwl_update_power_command(struct iwl_priv *priv,
- struct iwl4965_powertable_cmd *cmd,
+ struct iwl_powertable_cmd *cmd,
u16 mode)
{
int ret = 0, i;
@@ -204,7 +215,7 @@ static int iwl_update_power_command(struct iwl_priv *priv,
range = &pow_data->pwr_range_2[0];
period = pow_data->dtim_period;
- memcpy(cmd, &range[mode].cmd, sizeof(struct iwl4965_powertable_cmd));
+ memcpy(cmd, &range[mode].cmd, sizeof(struct iwl_powertable_cmd));
if (period == 0) {
period = 1;
@@ -258,17 +269,18 @@ int iwl_power_update_mode(struct iwl_priv *priv, u8 refresh)
* else user level */
switch (setting->system_power_setting) {
- case IWL_POWER_AUTO:
+ case IWL_POWER_SYS_AUTO:
final_mode = iwl_get_auto_power_mode(priv);
break;
- case IWL_POWER_BATTERY:
+ case IWL_POWER_SYS_BATTERY:
final_mode = IWL_POWER_INDEX_3;
break;
- case IWL_POWER_AC:
+ case IWL_POWER_SYS_AC:
final_mode = IWL_POWER_MODE_CAM;
break;
default:
- final_mode = setting->system_power_setting;
+ final_mode = IWL_POWER_INDEX_3;
+ WARN_ON(1);
}
if (setting->critical_power_setting > final_mode)
@@ -280,7 +292,7 @@ int iwl_power_update_mode(struct iwl_priv *priv, u8 refresh)
if (!iwl_is_rfkill(priv) && !setting->power_disabled &&
((setting->power_mode != final_mode) || refresh)) {
- struct iwl4965_powertable_cmd cmd;
+ struct iwl_powertable_cmd cmd;
if (final_mode != IWL_POWER_MODE_CAM)
set_bit(STATUS_POWER_PMI, &priv->status);
@@ -291,8 +303,7 @@ int iwl_power_update_mode(struct iwl_priv *priv, u8 refresh)
if (final_mode == IWL_POWER_INDEX_5)
cmd.flags |= IWL_POWER_FAST_PD;
- if (priv->cfg->ops->lib->set_power)
- ret = priv->cfg->ops->lib->set_power(priv, &cmd);
+ ret = iwl_set_power(priv, &cmd);
if (final_mode == IWL_POWER_MODE_CAM)
clear_bit(STATUS_POWER_PMI, &priv->status);
@@ -388,7 +399,7 @@ void iwl_power_initialize(struct iwl_priv *priv)
iwl_power_init_handle(priv);
priv->power_data.user_power_setting = IWL_POWER_AUTO;
priv->power_data.power_disabled = 0;
- priv->power_data.system_power_setting = IWL_POWER_AUTO;
+ priv->power_data.system_power_setting = IWL_POWER_SYS_AUTO;
priv->power_data.is_battery_active = 0;
priv->power_data.power_disabled = 0;
priv->power_data.critical_power_setting = 0;
diff --git a/drivers/net/wireless/iwlwifi/iwl-power.h b/drivers/net/wireless/iwlwifi/iwl-power.h
index b066724a1c2..abcbbf96a84 100644
--- a/drivers/net/wireless/iwlwifi/iwl-power.h
+++ b/drivers/net/wireless/iwlwifi/iwl-power.h
@@ -33,12 +33,25 @@
struct iwl_priv;
-#define IWL_POWER_MODE_CAM 0x00 /* Continuously Aware Mode, always on */
-#define IWL_POWER_INDEX_3 0x03
-#define IWL_POWER_INDEX_5 0x05
-#define IWL_POWER_AC 0x06
-#define IWL_POWER_BATTERY 0x07
-#define IWL_POWER_AUTO 0x08
+enum {
+ IWL_POWER_MODE_CAM, /* Continuously Aware Mode, always on */
+ IWL_POWER_INDEX_1,
+ IWL_POWER_INDEX_2,
+ IWL_POWER_INDEX_3,
+ IWL_POWER_INDEX_4,
+ IWL_POWER_INDEX_5,
+ IWL_POWER_AUTO,
+ IWL_POWER_MAX = IWL_POWER_AUTO,
+ IWL_POWER_AC,
+ IWL_POWER_BATTERY,
+};
+
+enum {
+ IWL_POWER_SYS_AUTO,
+ IWL_POWER_SYS_AC,
+ IWL_POWER_SYS_BATTERY,
+};
+
#define IWL_POWER_LIMIT 0x08
#define IWL_POWER_MASK 0x0F
#define IWL_POWER_ENABLED 0x10
@@ -46,15 +59,15 @@ struct iwl_priv;
/* Power management (not Tx power) structures */
struct iwl_power_vec_entry {
- struct iwl4965_powertable_cmd cmd;
+ struct iwl_powertable_cmd cmd;
u8 no_dtim;
};
struct iwl_power_mgr {
spinlock_t lock;
- struct iwl_power_vec_entry pwr_range_0[IWL_POWER_AC];
- struct iwl_power_vec_entry pwr_range_1[IWL_POWER_AC];
- struct iwl_power_vec_entry pwr_range_2[IWL_POWER_AC];
+ struct iwl_power_vec_entry pwr_range_0[IWL_POWER_MAX];
+ struct iwl_power_vec_entry pwr_range_1[IWL_POWER_MAX];
+ struct iwl_power_vec_entry pwr_range_2[IWL_POWER_MAX];
u32 dtim_period;
/* final power level that used to calculate final power command */
u8 power_mode;
diff --git a/drivers/net/wireless/iwlwifi/iwl-prph.h b/drivers/net/wireless/iwlwifi/iwl-prph.h
index 70d9c7568b9..ee5afd48d3a 100644
--- a/drivers/net/wireless/iwlwifi/iwl-prph.h
+++ b/drivers/net/wireless/iwlwifi/iwl-prph.h
@@ -84,14 +84,16 @@
#define APMG_CLK_VAL_DMA_CLK_RQT (0x00000200)
#define APMG_CLK_VAL_BSM_CLK_RQT (0x00000800)
-#define APMG_PS_CTRL_VAL_RESET_REQ (0x04000000)
-#define APMG_PCIDEV_STT_VAL_L1_ACT_DIS (0x00000800)
+#define APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS (0x00400000)
+#define APMG_PS_CTRL_VAL_RESET_REQ (0x04000000)
+#define APMG_PS_CTRL_MSK_PWR_SRC (0x03000000)
+#define APMG_PS_CTRL_VAL_PWR_SRC_VMAIN (0x00000000)
+#define APMG_PS_CTRL_VAL_PWR_SRC_MAX (0x01000000) /* 3945 only */
+#define APMG_PS_CTRL_VAL_PWR_SRC_VAUX (0x02000000)
-#define APMG_PS_CTRL_MSK_PWR_SRC (0x03000000)
-#define APMG_PS_CTRL_VAL_PWR_SRC_VMAIN (0x00000000)
-#define APMG_PS_CTRL_VAL_PWR_SRC_VAUX (0x01000000)
+#define APMG_PCIDEV_STT_VAL_L1_ACT_DIS (0x00000800)
/**
* BSM (Bootstrap State Machine)
diff --git a/drivers/net/wireless/iwlwifi/iwl-rx.c b/drivers/net/wireless/iwlwifi/iwl-rx.c
index e2d9afba38a..f3f6ea49fdd 100644
--- a/drivers/net/wireless/iwlwifi/iwl-rx.c
+++ b/drivers/net/wireless/iwlwifi/iwl-rx.c
@@ -791,7 +791,7 @@ static inline void iwl_dbg_report_frame(struct iwl_priv *priv,
static void iwl_add_radiotap(struct iwl_priv *priv,
struct sk_buff *skb,
- struct iwl4965_rx_phy_res *rx_start,
+ struct iwl_rx_phy_res *rx_start,
struct ieee80211_rx_status *stats,
u32 ampdu_status)
{
@@ -1010,8 +1010,8 @@ static void iwl_pass_packet_to_mac80211(struct iwl_priv *priv,
struct ieee80211_rx_status *stats)
{
struct iwl_rx_packet *pkt = (struct iwl_rx_packet *)rxb->skb->data;
- struct iwl4965_rx_phy_res *rx_start = (include_phy) ?
- (struct iwl4965_rx_phy_res *)&(pkt->u.raw[0]) : NULL;
+ struct iwl_rx_phy_res *rx_start = (include_phy) ?
+ (struct iwl_rx_phy_res *)&(pkt->u.raw[0]) : NULL;
struct ieee80211_hdr *hdr;
u16 len;
__le32 *rx_end;
@@ -1020,7 +1020,7 @@ static void iwl_pass_packet_to_mac80211(struct iwl_priv *priv,
u32 ampdu_status_legacy;
if (!include_phy && priv->last_phy_res[0])
- rx_start = (struct iwl4965_rx_phy_res *)&priv->last_phy_res[1];
+ rx_start = (struct iwl_rx_phy_res *)&priv->last_phy_res[1];
if (!rx_start) {
IWL_ERROR("MPDU frame without a PHY data\n");
@@ -1032,8 +1032,8 @@ static void iwl_pass_packet_to_mac80211(struct iwl_priv *priv,
len = le16_to_cpu(rx_start->byte_count);
- rx_end = (__le32 *) ((u8 *) &pkt->u.raw[0] +
- sizeof(struct iwl4965_rx_phy_res) +
+ rx_end = (__le32 *)((u8 *) &pkt->u.raw[0] +
+ sizeof(struct iwl_rx_phy_res) +
rx_start->cfg_phy_cnt + len);
} else {
@@ -1084,40 +1084,13 @@ static void iwl_pass_packet_to_mac80211(struct iwl_priv *priv,
}
/* Calc max signal level (dBm) among 3 possible receivers */
-static int iwl_calc_rssi(struct iwl_priv *priv,
- struct iwl4965_rx_phy_res *rx_resp)
+static inline int iwl_calc_rssi(struct iwl_priv *priv,
+ struct iwl_rx_phy_res *rx_resp)
{
- /* data from PHY/DSP regarding signal strength, etc.,
- * contents are always there, not configurable by host. */
- struct iwl4965_rx_non_cfg_phy *ncphy =
- (struct iwl4965_rx_non_cfg_phy *)rx_resp->non_cfg_phy;
- u32 agc = (le16_to_cpu(ncphy->agc_info) & IWL_AGC_DB_MASK)
- >> IWL_AGC_DB_POS;
-
- u32 valid_antennae =
- (le16_to_cpu(rx_resp->phy_flags) & RX_PHY_FLAGS_ANTENNAE_MASK)
- >> RX_PHY_FLAGS_ANTENNAE_OFFSET;
- u8 max_rssi = 0;
- u32 i;
-
- /* Find max rssi among 3 possible receivers.
- * These values are measured by the digital signal processor (DSP).
- * They should stay fairly constant even as the signal strength varies,
- * if the radio's automatic gain control (AGC) is working right.
- * AGC value (see below) will provide the "interesting" info. */
- for (i = 0; i < 3; i++)
- if (valid_antennae & (1 << i))
- max_rssi = max(ncphy->rssi_info[i << 1], max_rssi);
-
- IWL_DEBUG_STATS("Rssi In A %d B %d C %d Max %d AGC dB %d\n",
- ncphy->rssi_info[0], ncphy->rssi_info[2], ncphy->rssi_info[4],
- max_rssi, agc);
-
- /* dBm = max_rssi dB - agc dB - constant.
- * Higher AGC (higher radio gain) means lower signal. */
- return max_rssi - agc - IWL_RSSI_OFFSET;
+ return priv->cfg->ops->utils->calc_rssi(priv, rx_resp);
}
+
static void iwl_sta_modify_ps_wake(struct iwl_priv *priv, int sta_id)
{
unsigned long flags;
@@ -1180,9 +1153,9 @@ void iwl_rx_reply_rx(struct iwl_priv *priv,
* this rx packet for legacy frames,
* or phy data cached from REPLY_RX_PHY_CMD for HT frames. */
int include_phy = (pkt->hdr.cmd == REPLY_RX);
- struct iwl4965_rx_phy_res *rx_start = (include_phy) ?
- (struct iwl4965_rx_phy_res *)&(pkt->u.raw[0]) :
- (struct iwl4965_rx_phy_res *)&priv->last_phy_res[1];
+ struct iwl_rx_phy_res *rx_start = (include_phy) ?
+ (struct iwl_rx_phy_res *)&(pkt->u.raw[0]) :
+ (struct iwl_rx_phy_res *)&priv->last_phy_res[1];
__le32 *rx_end;
unsigned int len = 0;
u16 fc;
@@ -1210,7 +1183,7 @@ void iwl_rx_reply_rx(struct iwl_priv *priv,
if (!include_phy) {
if (priv->last_phy_res[0])
- rx_start = (struct iwl4965_rx_phy_res *)
+ rx_start = (struct iwl_rx_phy_res *)
&priv->last_phy_res[1];
else
rx_start = NULL;
@@ -1227,7 +1200,7 @@ void iwl_rx_reply_rx(struct iwl_priv *priv,
len = le16_to_cpu(rx_start->byte_count);
rx_end = (__le32 *)(pkt->u.raw + rx_start->cfg_phy_cnt +
- sizeof(struct iwl4965_rx_phy_res) + len);
+ sizeof(struct iwl_rx_phy_res) + len);
} else {
struct iwl4965_rx_mpdu_res_start *amsdu =
(struct iwl4965_rx_mpdu_res_start *)pkt->u.raw;
@@ -1316,6 +1289,6 @@ void iwl_rx_reply_rx_phy(struct iwl_priv *priv,
struct iwl_rx_packet *pkt = (struct iwl_rx_packet *)rxb->skb->data;
priv->last_phy_res[0] = 1;
memcpy(&priv->last_phy_res[1], &(pkt->u.raw[0]),
- sizeof(struct iwl4965_rx_phy_res));
+ sizeof(struct iwl_rx_phy_res));
}
EXPORT_SYMBOL(iwl_rx_reply_rx_phy);
diff --git a/drivers/net/wireless/iwlwifi/iwl-scan.c b/drivers/net/wireless/iwlwifi/iwl-scan.c
index 5a00ac23e2d..9bb6adb28b7 100644
--- a/drivers/net/wireless/iwlwifi/iwl-scan.c
+++ b/drivers/net/wireless/iwlwifi/iwl-scan.c
@@ -202,6 +202,7 @@ static int iwl_send_scan_abort(struct iwl_priv *priv)
clear_bit(STATUS_SCAN_HW, &priv->status);
}
+ priv->alloc_rxb_skb--;
dev_kfree_skb_any(cmd.meta.u.skb);
return ret;
diff --git a/drivers/net/wireless/iwlwifi/iwl-sta.c b/drivers/net/wireless/iwlwifi/iwl-sta.c
index 6d1467d0bd9..60a6e010603 100644
--- a/drivers/net/wireless/iwlwifi/iwl-sta.c
+++ b/drivers/net/wireless/iwlwifi/iwl-sta.c
@@ -823,7 +823,7 @@ int iwl_send_lq_cmd(struct iwl_priv *priv,
if (lq->sta_id == 0xFF)
lq->sta_id = IWL_AP_ID;
- iwl_dump_lq_cmd(priv,lq);
+ iwl_dump_lq_cmd(priv, lq);
if (iwl_is_associated(priv) && priv->assoc_station_added)
return iwl_send_cmd(priv, &cmd);
@@ -839,7 +839,7 @@ EXPORT_SYMBOL(iwl_send_lq_cmd);
* for automatic fallback during transmission.
*
* NOTE: This sets up a default set of values. These will be replaced later
- * if the driver's iwl-4965-rs rate scaling algorithm is used, instead of
+ * if the driver's iwl-agn-rs rate scaling algorithm is used, instead of
* rc80211_simple.
*
* NOTE: Run REPLY_ADD_STA command to set up station table entry, before
diff --git a/drivers/net/wireless/iwlwifi/iwl-tx.c b/drivers/net/wireless/iwlwifi/iwl-tx.c
index f72cd0bf6aa..4108c7c8f00 100644
--- a/drivers/net/wireless/iwlwifi/iwl-tx.c
+++ b/drivers/net/wireless/iwlwifi/iwl-tx.c
@@ -208,11 +208,12 @@ EXPORT_SYMBOL(iwl_txq_update_write_ptr);
* Free all buffers.
* 0-fill, but do not free "txq" descriptor structure.
*/
-static void iwl_tx_queue_free(struct iwl_priv *priv, struct iwl_tx_queue *txq)
+static void iwl_tx_queue_free(struct iwl_priv *priv, int txq_id)
{
+ struct iwl_tx_queue *txq = &priv->txq[txq_id];
struct iwl_queue *q = &txq->q;
struct pci_dev *dev = priv->pci_dev;
- int len;
+ int i, slots_num, len;
if (q->n_bd == 0)
return;
@@ -227,7 +228,12 @@ static void iwl_tx_queue_free(struct iwl_priv *priv, struct iwl_tx_queue *txq)
len += IWL_MAX_SCAN_SIZE;
/* De-alloc array of command/tx buffers */
- pci_free_consistent(dev, len, txq->cmd, txq->dma_addr_cmd);
+ slots_num = (txq_id == IWL_CMD_QUEUE_NUM) ?
+ TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS;
+ for (i = 0; i < slots_num; i++)
+ kfree(txq->cmd[i]);
+ if (txq_id == IWL_CMD_QUEUE_NUM)
+ kfree(txq->cmd[slots_num]);
/* De-alloc circular buffer of TFDs */
if (txq->q.n_bd)
@@ -400,8 +406,7 @@ static int iwl_tx_queue_init(struct iwl_priv *priv,
struct iwl_tx_queue *txq,
int slots_num, u32 txq_id)
{
- struct pci_dev *dev = priv->pci_dev;
- int len;
+ int i, len;
int rc = 0;
/*
@@ -412,17 +417,25 @@ static int iwl_tx_queue_init(struct iwl_priv *priv,
* For normal Tx queues (all other queues), no super-size command
* space is needed.
*/
- len = sizeof(struct iwl_cmd) * slots_num;
- if (txq_id == IWL_CMD_QUEUE_NUM)
- len += IWL_MAX_SCAN_SIZE;
- txq->cmd = pci_alloc_consistent(dev, len, &txq->dma_addr_cmd);
- if (!txq->cmd)
- return -ENOMEM;
+ len = sizeof(struct iwl_cmd);
+ for (i = 0; i <= slots_num; i++) {
+ if (i == slots_num) {
+ if (txq_id == IWL_CMD_QUEUE_NUM)
+ len += IWL_MAX_SCAN_SIZE;
+ else
+ continue;
+ }
+
+ txq->cmd[i] = kmalloc(len, GFP_KERNEL | GFP_DMA);
+ if (!txq->cmd[i])
+ return -ENOMEM;
+ }
/* Alloc driver data array and TFD circular buffer */
rc = iwl_tx_queue_alloc(priv, txq, txq_id);
if (rc) {
- pci_free_consistent(dev, len, txq->cmd, txq->dma_addr_cmd);
+ for (i = 0; i < slots_num; i++)
+ kfree(txq->cmd[i]);
return -ENOMEM;
}
@@ -451,7 +464,7 @@ void iwl_hw_txq_ctx_free(struct iwl_priv *priv)
/* Tx queues */
for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++)
- iwl_tx_queue_free(priv, &priv->txq[txq_id]);
+ iwl_tx_queue_free(priv, txq_id);
/* Keep-warm buffer */
iwl_kw_free(priv);
@@ -751,20 +764,19 @@ int iwl_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
struct iwl_tfd_frame *tfd;
- u32 *control_flags;
- int txq_id = skb_get_queue_mapping(skb);
- struct iwl_tx_queue *txq = NULL;
- struct iwl_queue *q = NULL;
+ struct iwl_tx_queue *txq;
+ struct iwl_queue *q;
+ struct iwl_cmd *out_cmd;
+ struct iwl_tx_cmd *tx_cmd;
+ int swq_id, txq_id;
dma_addr_t phys_addr;
dma_addr_t txcmd_phys;
dma_addr_t scratch_phys;
- struct iwl_cmd *out_cmd = NULL;
- struct iwl_tx_cmd *tx_cmd;
u16 len, idx, len_org;
u16 seq_number = 0;
- u8 id, hdr_len, unicast;
- u8 sta_id;
__le16 fc;
+ u8 hdr_len, unicast;
+ u8 sta_id;
u8 wait_write_ptr = 0;
u8 tid = 0;
u8 *qc = NULL;
@@ -789,7 +801,6 @@ int iwl_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
}
unicast = !is_multicast_ether_addr(hdr->addr1);
- id = 0;
fc = hdr->frame_control;
@@ -827,14 +838,16 @@ int iwl_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
IWL_DEBUG_TX("station Id %d\n", sta_id);
+ swq_id = skb_get_queue_mapping(skb);
+ txq_id = swq_id;
if (ieee80211_is_data_qos(fc)) {
qc = ieee80211_get_qos_ctl(hdr);
tid = qc[0] & 0xf;
- seq_number = priv->stations[sta_id].tid[tid].seq_number &
- IEEE80211_SCTL_SEQ;
- hdr->seq_ctrl = cpu_to_le16(seq_number) |
- (hdr->seq_ctrl &
- __constant_cpu_to_le16(IEEE80211_SCTL_FRAG));
+ seq_number = priv->stations[sta_id].tid[tid].seq_number;
+ seq_number &= IEEE80211_SCTL_SEQ;
+ hdr->seq_ctrl = hdr->seq_ctrl &
+ __constant_cpu_to_le16(IEEE80211_SCTL_FRAG);
+ hdr->seq_ctrl |= cpu_to_le16(seq_number);
seq_number += 0x10;
/* aggregation is on for this <sta,tid> */
if (info->flags & IEEE80211_TX_CTL_AMPDU)
@@ -851,7 +864,6 @@ int iwl_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
/* Set up first empty TFD within this queue's circular TFD buffer */
tfd = &txq->bd[q->write_ptr];
memset(tfd, 0, sizeof(*tfd));
- control_flags = (u32 *) tfd;
idx = get_cmd_index(q, q->write_ptr, 0);
/* Set up driver data for this TFD */
@@ -859,7 +871,7 @@ int iwl_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
txq->txb[q->write_ptr].skb[0] = skb;
/* Set up first empty entry in queue's array of Tx/cmd buffers */
- out_cmd = &txq->cmd[idx];
+ out_cmd = txq->cmd[idx];
tx_cmd = &out_cmd->cmd.tx;
memset(&out_cmd->hdr, 0, sizeof(out_cmd->hdr));
memset(tx_cmd, 0, sizeof(struct iwl_tx_cmd));
@@ -899,8 +911,9 @@ int iwl_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
/* Physical address of this Tx command's header (not MAC header!),
* within command buffer array. */
- txcmd_phys = txq->dma_addr_cmd + sizeof(struct iwl_cmd) * idx +
- offsetof(struct iwl_cmd, hdr);
+ txcmd_phys = pci_map_single(priv->pci_dev, out_cmd,
+ sizeof(struct iwl_cmd), PCI_DMA_TODEVICE);
+ txcmd_phys += offsetof(struct iwl_cmd, hdr);
/* Add buffer containing Tx command and MAC(!) header to TFD's
* first entry */
@@ -962,16 +975,15 @@ int iwl_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
if (ret)
return ret;
- if ((iwl_queue_space(q) < q->high_mark)
- && priv->mac80211_registered) {
+ if ((iwl_queue_space(q) < q->high_mark) && priv->mac80211_registered) {
if (wait_write_ptr) {
spin_lock_irqsave(&priv->lock, flags);
txq->need_update = 1;
iwl_txq_update_write_ptr(priv, txq);
spin_unlock_irqrestore(&priv->lock, flags);
+ } else {
+ ieee80211_stop_queue(priv->hw, swq_id);
}
-
- ieee80211_stop_queue(priv->hw, skb_get_queue_mapping(skb));
}
return 0;
@@ -999,13 +1011,12 @@ int iwl_enqueue_hcmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd)
struct iwl_tx_queue *txq = &priv->txq[IWL_CMD_QUEUE_NUM];
struct iwl_queue *q = &txq->q;
struct iwl_tfd_frame *tfd;
- u32 *control_flags;
struct iwl_cmd *out_cmd;
- u32 idx;
- u16 fix_size;
dma_addr_t phys_addr;
- int ret;
unsigned long flags;
+ int len, ret;
+ u32 idx;
+ u16 fix_size;
cmd->len = priv->cfg->ops->utils->get_hcmd_size(cmd->id, cmd->len);
fix_size = (u16)(cmd->len + sizeof(out_cmd->hdr));
@@ -1031,10 +1042,9 @@ int iwl_enqueue_hcmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd)
tfd = &txq->bd[q->write_ptr];
memset(tfd, 0, sizeof(*tfd));
- control_flags = (u32 *) tfd;
idx = get_cmd_index(q, q->write_ptr, cmd->meta.flags & CMD_SIZE_HUGE);
- out_cmd = &txq->cmd[idx];
+ out_cmd = txq->cmd[idx];
out_cmd->hdr.cmd = cmd->id;
memcpy(&out_cmd->meta, &cmd->meta, sizeof(cmd->meta));
@@ -1048,9 +1058,11 @@ int iwl_enqueue_hcmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd)
INDEX_TO_SEQ(q->write_ptr));
if (out_cmd->meta.flags & CMD_SIZE_HUGE)
out_cmd->hdr.sequence |= cpu_to_le16(SEQ_HUGE_FRAME);
-
- phys_addr = txq->dma_addr_cmd + sizeof(txq->cmd[0]) * idx +
- offsetof(struct iwl_cmd, hdr);
+ len = (idx == TFD_CMD_SLOTS) ?
+ IWL_MAX_SCAN_SIZE : sizeof(struct iwl_cmd);
+ phys_addr = pci_map_single(priv->pci_dev, out_cmd, len,
+ PCI_DMA_TODEVICE);
+ phys_addr += offsetof(struct iwl_cmd, hdr);
iwl_hw_txq_attach_buf_to_tfd(priv, tfd, phys_addr, fix_size);
IWL_DEBUG_HC("Sending command %s (#%x), seq: 0x%04X, "
@@ -1115,6 +1127,9 @@ static void iwl_hcmd_queue_reclaim(struct iwl_priv *priv, int txq_id, int index)
{
struct iwl_tx_queue *txq = &priv->txq[txq_id];
struct iwl_queue *q = &txq->q;
+ struct iwl_tfd_frame *bd = &txq->bd[index];
+ dma_addr_t dma_addr;
+ int is_odd, buf_len;
int nfreed = 0;
if ((index >= q->n_bd) || (iwl_queue_used(q, index) == 0)) {
@@ -1132,6 +1147,19 @@ static void iwl_hcmd_queue_reclaim(struct iwl_priv *priv, int txq_id, int index)
q->write_ptr, q->read_ptr);
queue_work(priv->workqueue, &priv->restart);
}
+ is_odd = (index/2) & 0x1;
+ if (is_odd) {
+ dma_addr = IWL_GET_BITS(bd->pa[index], tb2_addr_lo16) |
+ (IWL_GET_BITS(bd->pa[index],
+ tb2_addr_hi20) << 16);
+ buf_len = IWL_GET_BITS(bd->pa[index], tb2_len);
+ } else {
+ dma_addr = le32_to_cpu(bd->pa[index].tb1_addr);
+ buf_len = IWL_GET_BITS(bd->pa[index], tb1_len);
+ }
+
+ pci_unmap_single(priv->pci_dev, dma_addr, buf_len,
+ PCI_DMA_TODEVICE);
nfreed++;
}
}
@@ -1163,7 +1191,7 @@ void iwl_tx_cmd_complete(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb)
BUG_ON(txq_id != IWL_CMD_QUEUE_NUM);
cmd_index = get_cmd_index(&priv->txq[IWL_CMD_QUEUE_NUM].q, index, huge);
- cmd = &priv->txq[IWL_CMD_QUEUE_NUM].cmd[cmd_index];
+ cmd = priv->txq[IWL_CMD_QUEUE_NUM].cmd[cmd_index];
/* Input error checking is done when commands are added to queue. */
if (cmd->meta.flags & CMD_WANT_SKB) {
@@ -1391,7 +1419,7 @@ static int iwl_tx_status_reply_compressed_ba(struct iwl_priv *priv,
/* For each frame attempted in aggregation,
* update driver's record of tx frame's status. */
for (i = 0; i < agg->frame_count ; i++) {
- ack = bitmap & (1 << i);
+ ack = bitmap & (1ULL << i);
successes += !!ack;
IWL_DEBUG_TX_REPLY("%s ON i=%d idx=%d raw=%d\n",
ack? "ACK":"NACK", i, (agg->start_idx + i) & 0xff,
diff --git a/drivers/net/wireless/iwlwifi/iwl3945-base.c b/drivers/net/wireless/iwlwifi/iwl3945-base.c
index 7c82ecfa30a..444847ab1b5 100644
--- a/drivers/net/wireless/iwlwifi/iwl3945-base.c
+++ b/drivers/net/wireless/iwlwifi/iwl3945-base.c
@@ -275,10 +275,8 @@ static int iwl3945_tx_queue_alloc(struct iwl3945_priv *priv,
return 0;
error:
- if (txq->txb) {
- kfree(txq->txb);
- txq->txb = NULL;
- }
+ kfree(txq->txb);
+ txq->txb = NULL;
return -ENOMEM;
}
@@ -365,10 +363,8 @@ void iwl3945_tx_queue_free(struct iwl3945_priv *priv, struct iwl3945_tx_queue *t
txq->q.n_bd, txq->bd, txq->q.dma_addr);
/* De-alloc array of per-TFD driver data */
- if (txq->txb) {
- kfree(txq->txb);
- txq->txb = NULL;
- }
+ kfree(txq->txb);
+ txq->txb = NULL;
/* 0-fill queue descriptor structure */
memset(txq, 0, sizeof(*txq));
@@ -2703,9 +2699,8 @@ static int iwl3945_tx_skb(struct iwl3945_priv *priv, struct sk_buff *skb)
if (!ieee80211_has_morefrags(hdr->frame_control)) {
txq->need_update = 1;
- if (qc) {
+ if (qc)
priv->stations[sta_id].tid[tid].seq_number = seq_number;
- }
} else {
wait_write_ptr = 1;
txq->need_update = 0;
@@ -3813,7 +3808,7 @@ int iwl3945_calc_db_from_ratio(int sig_ratio)
/* 100:1 or higher, divide by 10 and use table,
* add 20 dB to make up for divide by 10 */
if (sig_ratio >= 100)
- return (20 + (int)ratio2dB[sig_ratio/10]);
+ return 20 + (int)ratio2dB[sig_ratio/10];
/* We shouldn't see this */
if (sig_ratio < 1)
@@ -5088,7 +5083,7 @@ static void iwl3945_dealloc_ucode_pci(struct iwl3945_priv *priv)
* iwl3945_verify_inst_full - verify runtime uCode image in card vs. host,
* looking at all data.
*/
-static int iwl3945_verify_inst_full(struct iwl3945_priv *priv, __le32 * image, u32 len)
+static int iwl3945_verify_inst_full(struct iwl3945_priv *priv, __le32 *image, u32 len)
{
u32 val;
u32 save_len = len;
@@ -5237,7 +5232,7 @@ static int iwl3945_verify_bsm(struct iwl3945_priv *priv)
val = iwl3945_read_prph(priv, BSM_WR_DWCOUNT_REG);
for (reg = BSM_SRAM_LOWER_BOUND;
reg < BSM_SRAM_LOWER_BOUND + len;
- reg += sizeof(u32), image ++) {
+ reg += sizeof(u32), image++) {
val = iwl3945_read_prph(priv, reg);
if (val != le32_to_cpu(*image)) {
IWL_ERROR("BSM uCode verification failed at "
@@ -6336,7 +6331,7 @@ static void iwl3945_bg_post_associate(struct work_struct *data)
DECLARE_MAC_BUF(mac);
if (priv->iw_mode == IEEE80211_IF_TYPE_AP) {
- IWL_ERROR("%s Should not be called in AP mode\n", __FUNCTION__);
+ IWL_ERROR("%s Should not be called in AP mode\n", __func__);
return;
}
@@ -6417,7 +6412,7 @@ static void iwl3945_bg_post_associate(struct work_struct *data)
default:
IWL_ERROR("%s Should not be called in %d mode\n",
- __FUNCTION__, priv->iw_mode);
+ __func__, priv->iw_mode);
break;
}
@@ -6594,12 +6589,6 @@ static int iwl3945_mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
IWL_DEBUG_MAC80211("enter\n");
- if (priv->iw_mode == IEEE80211_IF_TYPE_MNTR) {
- IWL_DEBUG_MAC80211("leave - monitor\n");
- dev_kfree_skb_any(skb);
- return 0;
- }
-
IWL_DEBUG_TX("dev->xmit(%d bytes) at rate 0x%02x\n", skb->len,
ieee80211_get_tx_rate(hw, IEEE80211_SKB_CB(skb))->bitrate);
@@ -7456,7 +7445,7 @@ static ssize_t show_measurement(struct device *d,
struct iwl3945_priv *priv = dev_get_drvdata(d);
struct iwl3945_spectrum_notification measure_report;
u32 size = sizeof(measure_report), len = 0, ofs = 0;
- u8 *data = (u8 *) & measure_report;
+ u8 *data = (u8 *)&measure_report;
unsigned long flags;
spin_lock_irqsave(&priv->lock, flags);
@@ -7627,7 +7616,7 @@ static ssize_t show_power_level(struct device *d,
else
p += sprintf(p, " \n");
- return (p - buf + 1);
+ return p - buf + 1;
}
@@ -7649,7 +7638,7 @@ static ssize_t show_statistics(struct device *d,
struct iwl3945_priv *priv = dev_get_drvdata(d);
u32 size = sizeof(struct iwl3945_notif_statistics);
u32 len = 0, ofs = 0;
- u8 *data = (u8 *) & priv->statistics;
+ u8 *data = (u8 *)&priv->statistics;
int rc = 0;
if (!iwl3945_is_alive(priv))
@@ -8003,16 +7992,16 @@ static int iwl3945_pci_probe(struct pci_dev *pdev, const struct pci_device_id *e
/* nic init */
iwl3945_set_bit(priv, CSR_GIO_CHICKEN_BITS,
- CSR_GIO_CHICKEN_BITS_REG_BIT_DIS_L0S_EXIT_TIMER);
-
- iwl3945_set_bit(priv, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
- err = iwl3945_poll_bit(priv, CSR_GP_CNTRL,
- CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
- CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, 25000);
- if (err < 0) {
- IWL_DEBUG_INFO("Failed to init the card\n");
+ CSR_GIO_CHICKEN_BITS_REG_BIT_DIS_L0S_EXIT_TIMER);
+
+ iwl3945_set_bit(priv, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
+ err = iwl3945_poll_bit(priv, CSR_GP_CNTRL,
+ CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
+ CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, 25000);
+ if (err < 0) {
+ IWL_DEBUG_INFO("Failed to init the card\n");
goto out_remove_sysfs;
- }
+ }
/* Read the EEPROM */
err = iwl3945_eeprom_init(priv);
if (err) {
@@ -8114,9 +8103,8 @@ static void __devexit iwl3945_pci_remove(struct pci_dev *pdev)
iwl3945_unset_hw_setting(priv);
iwl3945_clear_stations_table(priv);
- if (priv->mac80211_registered) {
+ if (priv->mac80211_registered)
ieee80211_unregister_hw(priv->hw);
- }
/*netif_stop_queue(dev); */
flush_workqueue(priv->workqueue);
diff --git a/drivers/net/wireless/libertas/main.c b/drivers/net/wireless/libertas/main.c
index 14d5d61cec4..bd32ac0b4e0 100644
--- a/drivers/net/wireless/libertas/main.c
+++ b/drivers/net/wireless/libertas/main.c
@@ -297,9 +297,7 @@ static ssize_t lbs_rtap_set(struct device *dev,
lbs_add_rtap(priv);
}
priv->monitormode = monitor_mode;
- }
-
- else {
+ } else {
if (!priv->monitormode)
return strlen(buf);
priv->monitormode = 0;
@@ -1242,8 +1240,6 @@ int lbs_start_card(struct lbs_private *priv)
lbs_pr_err("cannot register ethX device\n");
goto done;
}
- if (device_create_file(&dev->dev, &dev_attr_lbs_rtap))
- lbs_pr_err("cannot register lbs_rtap attribute\n");
lbs_update_channel(priv);
@@ -1275,6 +1271,13 @@ int lbs_start_card(struct lbs_private *priv)
if (device_create_file(&dev->dev, &dev_attr_lbs_mesh))
lbs_pr_err("cannot register lbs_mesh attribute\n");
+
+ /* While rtap isn't related to mesh, only mesh-enabled
+ * firmware implements the rtap functionality via
+ * CMD_802_11_MONITOR_MODE.
+ */
+ if (device_create_file(&dev->dev, &dev_attr_lbs_rtap))
+ lbs_pr_err("cannot register lbs_rtap attribute\n");
}
}
@@ -1306,9 +1309,9 @@ void lbs_stop_card(struct lbs_private *priv)
netif_carrier_off(priv->dev);
lbs_debugfs_remove_one(priv);
- device_remove_file(&dev->dev, &dev_attr_lbs_rtap);
if (priv->mesh_tlv) {
device_remove_file(&dev->dev, &dev_attr_lbs_mesh);
+ device_remove_file(&dev->dev, &dev_attr_lbs_rtap);
}
/* Flush pending command nodes */
diff --git a/drivers/net/wireless/orinoco.c b/drivers/net/wireless/orinoco.c
index b047306bf38..1ebcafe7ca5 100644
--- a/drivers/net/wireless/orinoco.c
+++ b/drivers/net/wireless/orinoco.c
@@ -1998,13 +1998,6 @@ __orinoco_set_multicast_list(struct net_device *dev)
else
priv->mc_count = mc_count;
}
-
- /* Since we can set the promiscuous flag when it wasn't asked
- for, make sure the net_device knows about it. */
- if (priv->promiscuous)
- dev->flags |= IFF_PROMISC;
- else
- dev->flags &= ~IFF_PROMISC;
}
/* This must be called from user context, without locks held - use
diff --git a/drivers/net/wireless/p54/p54.h b/drivers/net/wireless/p54/p54.h
index c6f27b9022f..4801a363507 100644
--- a/drivers/net/wireless/p54/p54.h
+++ b/drivers/net/wireless/p54/p54.h
@@ -52,6 +52,8 @@ struct p54_common {
int (*open)(struct ieee80211_hw *dev);
void (*stop)(struct ieee80211_hw *dev);
int mode;
+ u16 seqno;
+ struct mutex conf_mutex;
u8 mac_addr[ETH_ALEN];
u8 bssid[ETH_ALEN];
struct pda_iq_autocal_entry *iq_autocal;
diff --git a/drivers/net/wireless/p54/p54common.c b/drivers/net/wireless/p54/p54common.c
index ffaf7a6b681..83cd85e1f84 100644
--- a/drivers/net/wireless/p54/p54common.c
+++ b/drivers/net/wireless/p54/p54common.c
@@ -553,6 +553,7 @@ static int p54_tx(struct ieee80211_hw *dev, struct sk_buff *skb)
struct ieee80211_tx_queue_stats *current_queue;
struct p54_common *priv = dev->priv;
struct p54_control_hdr *hdr;
+ struct ieee80211_hdr *ieee80211hdr = (struct ieee80211_hdr *)skb->data;
struct p54_tx_control_allocdata *txhdr;
size_t padding, len;
u8 rate;
@@ -605,6 +606,19 @@ static int p54_tx(struct ieee80211_hw *dev, struct sk_buff *skb)
if (padding)
txhdr->align[0] = padding;
+ /* FIXME: The sequence that follows is needed for this driver to
+ * work with mac80211 since "mac80211: fix TX sequence numbers".
+ * As with the temporary code in rt2x00, changes will be needed
+ * to get proper sequence numbers on beacons. In addition, this
+ * patch places the sequence number in the hardware state, which
+ * limits us to a single virtual state.
+ */
+ if (info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ) {
+ if (info->flags & IEEE80211_TX_CTL_FIRST_FRAGMENT)
+ priv->seqno += 0x10;
+ ieee80211hdr->seq_ctrl &= cpu_to_le16(IEEE80211_SCTL_FRAG);
+ ieee80211hdr->seq_ctrl |= cpu_to_le16(priv->seqno);
+ }
/* modifies skb->cb and with it info, so must be last! */
p54_assign_address(dev, skb, hdr, skb->len);
@@ -803,8 +817,8 @@ static void p54_set_vdcf(struct ieee80211_hw *dev)
if (dev->conf.flags & IEEE80211_CONF_SHORT_SLOT_TIME) {
vdcf->slottime = 9;
- vdcf->magic1 = 0x00;
- vdcf->magic2 = 0x10;
+ vdcf->magic1 = 0x10;
+ vdcf->magic2 = 0x00;
} else {
vdcf->slottime = 20;
vdcf->magic1 = 0x0a;
@@ -886,9 +900,12 @@ static void p54_remove_interface(struct ieee80211_hw *dev,
static int p54_config(struct ieee80211_hw *dev, struct ieee80211_conf *conf)
{
int ret;
+ struct p54_common *priv = dev->priv;
+ mutex_lock(&priv->conf_mutex);
ret = p54_set_freq(dev, cpu_to_le16(conf->channel->center_freq));
p54_set_vdcf(dev);
+ mutex_unlock(&priv->conf_mutex);
return ret;
}
@@ -898,10 +915,12 @@ static int p54_config_interface(struct ieee80211_hw *dev,
{
struct p54_common *priv = dev->priv;
+ mutex_lock(&priv->conf_mutex);
p54_set_filter(dev, 0, priv->mac_addr, conf->bssid, 0, 1, 0, 0xF642);
p54_set_filter(dev, 0, priv->mac_addr, conf->bssid, 2, 0, 0, 0);
p54_set_leds(dev, 1, !is_multicast_ether_addr(conf->bssid), 0);
memcpy(priv->bssid, conf->bssid, ETH_ALEN);
+ mutex_unlock(&priv->conf_mutex);
return 0;
}
@@ -1009,6 +1028,7 @@ struct ieee80211_hw *p54_init_common(size_t priv_data_len)
}
p54_init_vdcf(dev);
+ mutex_init(&priv->conf_mutex);
return dev;
}
diff --git a/drivers/net/wireless/prism54/isl_ioctl.c b/drivers/net/wireless/prism54/isl_ioctl.c
index 97fa14e0a47..3d75a7137d3 100644
--- a/drivers/net/wireless/prism54/isl_ioctl.c
+++ b/drivers/net/wireless/prism54/isl_ioctl.c
@@ -2518,7 +2518,7 @@ enum {
#define PRISM2_HOSTAPD_MAX_BUF_SIZE 1024
#define PRISM2_HOSTAPD_GENERIC_ELEMENT_HDR_LEN \
-((int) (&((struct prism2_hostapd_param *) 0)->u.generic_elem.data))
+ offsetof(struct prism2_hostapd_param, u.generic_elem.data)
/* Maximum length for algorithm names (-1 for nul termination)
* used in ioctl() */
diff --git a/drivers/net/wireless/rt2x00/rt2500pci.c b/drivers/net/wireless/rt2x00/rt2500pci.c
index aa6dfb811c7..181a146b476 100644
--- a/drivers/net/wireless/rt2x00/rt2500pci.c
+++ b/drivers/net/wireless/rt2x00/rt2500pci.c
@@ -1220,6 +1220,7 @@ static void rt2500pci_write_tx_desc(struct rt2x00_dev *rt2x00dev,
rt2x00_set_field32(&word, TXD_W0_IFS, txdesc->ifs);
rt2x00_set_field32(&word, TXD_W0_RETRY_MODE,
test_bit(ENTRY_TXD_RETRY_MODE, &txdesc->flags));
+ rt2x00_set_field32(&word, TXD_W0_DATABYTE_COUNT, skb->len);
rt2x00_set_field32(&word, TXD_W0_CIPHER_ALG, CIPHER_NONE);
rt2x00_desc_write(txd, 0, word);
}
diff --git a/drivers/net/wireless/rt2x00/rt2500usb.c b/drivers/net/wireless/rt2x00/rt2500usb.c
index 3078417b326..cd5af656932 100644
--- a/drivers/net/wireless/rt2x00/rt2500usb.c
+++ b/drivers/net/wireless/rt2x00/rt2500usb.c
@@ -633,6 +633,16 @@ static void rt2500usb_reset_tuner(struct rt2x00_dev *rt2x00dev)
rt2x00dev->link.vgc_level = value;
}
+/*
+ * NOTE: This function is directly ported from legacy driver, but
+ * despite it being declared it was never called. Although link tuning
+ * sounds like a good idea, and usually works well for the other drivers,
+ * it does _not_ work with rt2500usb. Enabling this function will result
+ * in TX capabilities only until association kicks in. Immediately
+ * after the successful association all TX frames will be kept in the
+ * hardware queue and never transmitted.
+ */
+#if 0
static void rt2500usb_link_tuner(struct rt2x00_dev *rt2x00dev)
{
int rssi = rt2x00_get_link_rssi(&rt2x00dev->link);
@@ -752,6 +762,9 @@ dynamic_cca_tune:
rt2x00dev->link.vgc_level = r17;
}
}
+#else
+#define rt2500usb_link_tuner NULL
+#endif
/*
* Initialization functions.
@@ -1376,6 +1389,9 @@ static int rt2500usb_validate_eeprom(struct rt2x00_dev *rt2x00dev)
rt2x00_set_field16(&word, EEPROM_BBPTUNE_VGCLOWER, bbp);
rt2x00_eeprom_write(rt2x00dev, EEPROM_BBPTUNE_VGC, word);
EEPROM(rt2x00dev, "BBPtune vgc: 0x%04x\n", word);
+ } else {
+ rt2x00_set_field16(&word, EEPROM_BBPTUNE_VGCLOWER, bbp);
+ rt2x00_eeprom_write(rt2x00dev, EEPROM_BBPTUNE_VGC, word);
}
rt2x00_eeprom_read(rt2x00dev, EEPROM_BBPTUNE_R17, &word);
@@ -1384,9 +1400,6 @@ static int rt2500usb_validate_eeprom(struct rt2x00_dev *rt2x00dev)
rt2x00_set_field16(&word, EEPROM_BBPTUNE_R17_HIGH, 0x41);
rt2x00_eeprom_write(rt2x00dev, EEPROM_BBPTUNE_R17, word);
EEPROM(rt2x00dev, "BBPtune r17: 0x%04x\n", word);
- } else {
- rt2x00_set_field16(&word, EEPROM_BBPTUNE_VGCLOWER, bbp);
- rt2x00_eeprom_write(rt2x00dev, EEPROM_BBPTUNE_VGC, word);
}
rt2x00_eeprom_read(rt2x00dev, EEPROM_BBPTUNE_R24, &word);
@@ -1737,6 +1750,7 @@ static int rt2500usb_probe_hw(struct rt2x00_dev *rt2x00dev)
__set_bit(DRIVER_REQUIRE_ATIM_QUEUE, &rt2x00dev->flags);
__set_bit(DRIVER_REQUIRE_BEACON_GUARD, &rt2x00dev->flags);
__set_bit(DRIVER_REQUIRE_SCHEDULED, &rt2x00dev->flags);
+ __set_bit(CONFIG_DISABLE_LINK_TUNING, &rt2x00dev->flags);
/*
* Set the rssi offset.
diff --git a/drivers/net/wireless/rt2x00/rt2x00.h b/drivers/net/wireless/rt2x00/rt2x00.h
index db2dc976d83..8b10ea41b20 100644
--- a/drivers/net/wireless/rt2x00/rt2x00.h
+++ b/drivers/net/wireless/rt2x00/rt2x00.h
@@ -368,6 +368,12 @@ struct rt2x00_intf {
#define DELAYED_CONFIG_ERP 0x00000002
#define DELAYED_LED_ASSOC 0x00000004
+ /*
+ * Software sequence counter, this is only required
+ * for hardware which doesn't support hardware
+ * sequence counting.
+ */
+ spinlock_t seqlock;
u16 seqno;
};
diff --git a/drivers/net/wireless/rt2x00/rt2x00config.c b/drivers/net/wireless/rt2x00/rt2x00config.c
index 3f89516e833..d134c3be539 100644
--- a/drivers/net/wireless/rt2x00/rt2x00config.c
+++ b/drivers/net/wireless/rt2x00/rt2x00config.c
@@ -254,6 +254,8 @@ config:
libconf.ant.rx = default_ant->rx;
else if (active_ant->rx == ANTENNA_SW_DIVERSITY)
libconf.ant.rx = ANTENNA_B;
+ else
+ libconf.ant.rx = active_ant->rx;
if (conf->antenna_sel_tx)
libconf.ant.tx = conf->antenna_sel_tx;
@@ -261,6 +263,8 @@ config:
libconf.ant.tx = default_ant->tx;
else if (active_ant->tx == ANTENNA_SW_DIVERSITY)
libconf.ant.tx = ANTENNA_B;
+ else
+ libconf.ant.tx = active_ant->tx;
}
if (flags & CONFIG_UPDATE_SLOT_TIME) {
diff --git a/drivers/net/wireless/rt2x00/rt2x00debug.c b/drivers/net/wireless/rt2x00/rt2x00debug.c
index 300cf061035..6bee1d611bb 100644
--- a/drivers/net/wireless/rt2x00/rt2x00debug.c
+++ b/drivers/net/wireless/rt2x00/rt2x00debug.c
@@ -372,9 +372,6 @@ static ssize_t rt2x00debug_write_##__name(struct file *file, \
if (*offset) \
return 0; \
\
- if (!capable(CAP_NET_ADMIN)) \
- return -EPERM; \
- \
if (intf->offset_##__name >= debug->__name.word_count) \
return -EINVAL; \
\
@@ -454,7 +451,7 @@ static struct dentry *rt2x00debug_create_file_driver(const char *name,
data += sprintf(data, "compiled: %s %s\n", __DATE__, __TIME__);
blob->size = strlen(blob->data);
- return debugfs_create_blob(name, S_IRUGO, intf->driver_folder, blob);
+ return debugfs_create_blob(name, S_IRUSR, intf->driver_folder, blob);
}
static struct dentry *rt2x00debug_create_file_chipset(const char *name,
@@ -482,7 +479,7 @@ static struct dentry *rt2x00debug_create_file_chipset(const char *name,
data += sprintf(data, "rf length: %d\n", debug->rf.word_count);
blob->size = strlen(blob->data);
- return debugfs_create_blob(name, S_IRUGO, intf->driver_folder, blob);
+ return debugfs_create_blob(name, S_IRUSR, intf->driver_folder, blob);
}
void rt2x00debug_register(struct rt2x00_dev *rt2x00dev)
@@ -517,7 +514,7 @@ void rt2x00debug_register(struct rt2x00_dev *rt2x00dev)
if (IS_ERR(intf->chipset_entry))
goto exit;
- intf->dev_flags = debugfs_create_file("dev_flags", S_IRUGO,
+ intf->dev_flags = debugfs_create_file("dev_flags", S_IRUSR,
intf->driver_folder, intf,
&rt2x00debug_fop_dev_flags);
if (IS_ERR(intf->dev_flags))
@@ -532,7 +529,7 @@ void rt2x00debug_register(struct rt2x00_dev *rt2x00dev)
({ \
(__intf)->__name##_off_entry = \
debugfs_create_u32(__stringify(__name) "_offset", \
- S_IRUGO | S_IWUSR, \
+ S_IRUSR | S_IWUSR, \
(__intf)->register_folder, \
&(__intf)->offset_##__name); \
if (IS_ERR((__intf)->__name##_off_entry)) \
@@ -540,7 +537,7 @@ void rt2x00debug_register(struct rt2x00_dev *rt2x00dev)
\
(__intf)->__name##_val_entry = \
debugfs_create_file(__stringify(__name) "_value", \
- S_IRUGO | S_IWUSR, \
+ S_IRUSR | S_IWUSR, \
(__intf)->register_folder, \
(__intf), &rt2x00debug_fop_##__name);\
if (IS_ERR((__intf)->__name##_val_entry)) \
@@ -560,7 +557,7 @@ void rt2x00debug_register(struct rt2x00_dev *rt2x00dev)
goto exit;
intf->queue_frame_dump_entry =
- debugfs_create_file("dump", S_IRUGO, intf->queue_folder,
+ debugfs_create_file("dump", S_IRUSR, intf->queue_folder,
intf, &rt2x00debug_fop_queue_dump);
if (IS_ERR(intf->queue_frame_dump_entry))
goto exit;
@@ -569,7 +566,7 @@ void rt2x00debug_register(struct rt2x00_dev *rt2x00dev)
init_waitqueue_head(&intf->frame_dump_waitqueue);
intf->queue_stats_entry =
- debugfs_create_file("queue", S_IRUGO, intf->queue_folder,
+ debugfs_create_file("queue", S_IRUSR, intf->queue_folder,
intf, &rt2x00debug_fop_queue_stats);
return;
diff --git a/drivers/net/wireless/rt2x00/rt2x00mac.c b/drivers/net/wireless/rt2x00/rt2x00mac.c
index c3ee4ecba79..d0650738863 100644
--- a/drivers/net/wireless/rt2x00/rt2x00mac.c
+++ b/drivers/net/wireless/rt2x00/rt2x00mac.c
@@ -203,23 +203,43 @@ int rt2x00mac_add_interface(struct ieee80211_hw *hw,
!test_bit(DEVICE_STARTED, &rt2x00dev->flags))
return -ENODEV;
- /*
- * We don't support mixed combinations of sta and ap virtual
- * interfaces. We can only add this interface when the rival
- * interface count is 0.
- */
- if ((conf->type == IEEE80211_IF_TYPE_AP && rt2x00dev->intf_sta_count) ||
- (conf->type != IEEE80211_IF_TYPE_AP && rt2x00dev->intf_ap_count))
- return -ENOBUFS;
-
- /*
- * Check if we exceeded the maximum amount of supported interfaces.
- */
- if ((conf->type == IEEE80211_IF_TYPE_AP &&
- rt2x00dev->intf_ap_count >= rt2x00dev->ops->max_ap_intf) ||
- (conf->type != IEEE80211_IF_TYPE_AP &&
- rt2x00dev->intf_sta_count >= rt2x00dev->ops->max_sta_intf))
- return -ENOBUFS;
+ switch (conf->type) {
+ case IEEE80211_IF_TYPE_AP:
+ /*
+ * We don't support mixed combinations of
+ * sta and ap interfaces.
+ */
+ if (rt2x00dev->intf_sta_count)
+ return -ENOBUFS;
+
+ /*
+ * Check if we exceeded the maximum amount
+ * of supported interfaces.
+ */
+ if (rt2x00dev->intf_ap_count >= rt2x00dev->ops->max_ap_intf)
+ return -ENOBUFS;
+
+ break;
+ case IEEE80211_IF_TYPE_STA:
+ case IEEE80211_IF_TYPE_IBSS:
+ /*
+ * We don't support mixed combinations of
+ * sta and ap interfaces.
+ */
+ if (rt2x00dev->intf_ap_count)
+ return -ENOBUFS;
+
+ /*
+ * Check if we exceeded the maximum amount
+ * of supported interfaces.
+ */
+ if (rt2x00dev->intf_sta_count >= rt2x00dev->ops->max_sta_intf)
+ return -ENOBUFS;
+
+ break;
+ default:
+ return -EINVAL;
+ }
/*
* Loop through all beacon queues to find a free
@@ -247,6 +267,7 @@ int rt2x00mac_add_interface(struct ieee80211_hw *hw,
rt2x00dev->intf_sta_count++;
spin_lock_init(&intf->lock);
+ spin_lock_init(&intf->seqlock);
intf->beacon = entry;
if (conf->type == IEEE80211_IF_TYPE_AP)
diff --git a/drivers/net/wireless/rt2x00/rt2x00queue.c b/drivers/net/wireless/rt2x00/rt2x00queue.c
index 3b27f6aa860..898cdd7f57d 100644
--- a/drivers/net/wireless/rt2x00/rt2x00queue.c
+++ b/drivers/net/wireless/rt2x00/rt2x00queue.c
@@ -128,6 +128,7 @@ static void rt2x00queue_create_tx_descriptor(struct queue_entry *entry,
unsigned int data_length;
unsigned int duration;
unsigned int residual;
+ unsigned long irqflags;
memset(txdesc, 0, sizeof(*txdesc));
@@ -213,14 +214,14 @@ static void rt2x00queue_create_tx_descriptor(struct queue_entry *entry,
* sequence counter given by mac80211.
*/
if (tx_info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ) {
- spin_lock(&intf->lock);
+ spin_lock_irqsave(&intf->seqlock, irqflags);
if (test_bit(ENTRY_TXD_FIRST_FRAGMENT, &txdesc->flags))
intf->seqno += 0x10;
hdr->seq_ctrl &= cpu_to_le16(IEEE80211_SCTL_FRAG);
hdr->seq_ctrl |= cpu_to_le16(intf->seqno);
- spin_unlock(&intf->lock);
+ spin_unlock_irqrestore(&intf->seqlock, irqflags);
__set_bit(ENTRY_TXD_GENERATE_SEQ, &txdesc->flags);
}
diff --git a/drivers/net/wireless/rt2x00/rt2x00usb.c b/drivers/net/wireless/rt2x00/rt2x00usb.c
index 933e6cc9359..8d76bb2e031 100644
--- a/drivers/net/wireless/rt2x00/rt2x00usb.c
+++ b/drivers/net/wireless/rt2x00/rt2x00usb.c
@@ -124,7 +124,7 @@ EXPORT_SYMBOL_GPL(rt2x00usb_vendor_request_buff);
int rt2x00usb_vendor_request_large_buff(struct rt2x00_dev *rt2x00dev,
const u8 request, const u8 requesttype,
- const u16 offset, void *buffer,
+ const u16 offset, const void *buffer,
const u16 buffer_length,
const int timeout)
{
@@ -134,7 +134,7 @@ int rt2x00usb_vendor_request_large_buff(struct rt2x00_dev *rt2x00dev,
mutex_lock(&rt2x00dev->usb_cache_mutex);
- tb = buffer;
+ tb = (char *)buffer;
off = offset;
len = buffer_length;
while (len && !status) {
diff --git a/drivers/net/wireless/rt2x00/rt2x00usb.h b/drivers/net/wireless/rt2x00/rt2x00usb.h
index ee3875f894a..3b4a67417f9 100644
--- a/drivers/net/wireless/rt2x00/rt2x00usb.h
+++ b/drivers/net/wireless/rt2x00/rt2x00usb.h
@@ -185,7 +185,7 @@ int rt2x00usb_vendor_req_buff_lock(struct rt2x00_dev *rt2x00dev,
*/
int rt2x00usb_vendor_request_large_buff(struct rt2x00_dev *rt2x00dev,
const u8 request, const u8 requesttype,
- const u16 offset, void *buffer,
+ const u16 offset, const void *buffer,
const u16 buffer_length,
const int timeout);
diff --git a/drivers/net/wireless/rt2x00/rt61pci.c b/drivers/net/wireless/rt2x00/rt61pci.c
index fbe2a652e01..087e90b328c 100644
--- a/drivers/net/wireless/rt2x00/rt61pci.c
+++ b/drivers/net/wireless/rt2x00/rt61pci.c
@@ -1004,6 +1004,11 @@ static int rt61pci_load_firmware(struct rt2x00_dev *rt2x00dev, const void *data,
}
/*
+ * Hardware needs another millisecond before it is ready.
+ */
+ msleep(1);
+
+ /*
* Reset MAC and BBP registers.
*/
reg = 0;
diff --git a/drivers/net/wireless/rtl8187.h b/drivers/net/wireless/rtl8187.h
index 1b0d750f662..5a9515c9996 100644
--- a/drivers/net/wireless/rtl8187.h
+++ b/drivers/net/wireless/rtl8187.h
@@ -94,6 +94,10 @@ struct rtl8187_priv {
const struct rtl818x_rf_ops *rf;
struct ieee80211_vif *vif;
int mode;
+ /* The mutex protects the TX loopback state.
+ * Any attempt to set channels concurrently locks the device.
+ */
+ struct mutex conf_mutex;
/* rtl8187 specific */
struct ieee80211_channel channels[14];
diff --git a/drivers/net/wireless/rtl8187_dev.c b/drivers/net/wireless/rtl8187_dev.c
index 177988efd66..57376fb993e 100644
--- a/drivers/net/wireless/rtl8187_dev.c
+++ b/drivers/net/wireless/rtl8187_dev.c
@@ -31,6 +31,8 @@ MODULE_DESCRIPTION("RTL8187/RTL8187B USB wireless driver");
MODULE_LICENSE("GPL");
static struct usb_device_id rtl8187_table[] __devinitdata = {
+ /* Asus */
+ {USB_DEVICE(0x0b05, 0x171d), .driver_info = DEVICE_RTL8187},
/* Realtek */
{USB_DEVICE(0x0bda, 0x8187), .driver_info = DEVICE_RTL8187},
{USB_DEVICE(0x0bda, 0x8189), .driver_info = DEVICE_RTL8187B},
@@ -726,6 +728,7 @@ static int rtl8187_start(struct ieee80211_hw *dev)
if (ret)
return ret;
+ mutex_lock(&priv->conf_mutex);
if (priv->is_rtl8187b) {
reg = RTL818X_RX_CONF_MGMT |
RTL818X_RX_CONF_DATA |
@@ -747,6 +750,7 @@ static int rtl8187_start(struct ieee80211_hw *dev)
(7 << 0 /* long retry limit */) |
(7 << 21 /* MAX TX DMA */));
rtl8187_init_urbs(dev);
+ mutex_unlock(&priv->conf_mutex);
return 0;
}
@@ -790,6 +794,7 @@ static int rtl8187_start(struct ieee80211_hw *dev)
reg |= RTL818X_CMD_TX_ENABLE;
reg |= RTL818X_CMD_RX_ENABLE;
rtl818x_iowrite8(priv, &priv->map->CMD, reg);
+ mutex_unlock(&priv->conf_mutex);
return 0;
}
@@ -801,6 +806,7 @@ static void rtl8187_stop(struct ieee80211_hw *dev)
struct sk_buff *skb;
u32 reg;
+ mutex_lock(&priv->conf_mutex);
rtl818x_iowrite16(priv, &priv->map->INT_MASK, 0);
reg = rtl818x_ioread8(priv, &priv->map->CMD);
@@ -820,7 +826,7 @@ static void rtl8187_stop(struct ieee80211_hw *dev)
usb_kill_urb(info->urb);
kfree_skb(skb);
}
- return;
+ mutex_unlock(&priv->conf_mutex);
}
static int rtl8187_add_interface(struct ieee80211_hw *dev,
@@ -840,6 +846,7 @@ static int rtl8187_add_interface(struct ieee80211_hw *dev,
return -EOPNOTSUPP;
}
+ mutex_lock(&priv->conf_mutex);
priv->vif = conf->vif;
rtl818x_iowrite8(priv, &priv->map->EEPROM_CMD, RTL818X_EEPROM_CMD_CONFIG);
@@ -848,6 +855,7 @@ static int rtl8187_add_interface(struct ieee80211_hw *dev,
((u8 *)conf->mac_addr)[i]);
rtl818x_iowrite8(priv, &priv->map->EEPROM_CMD, RTL818X_EEPROM_CMD_NORMAL);
+ mutex_unlock(&priv->conf_mutex);
return 0;
}
@@ -855,8 +863,10 @@ static void rtl8187_remove_interface(struct ieee80211_hw *dev,
struct ieee80211_if_init_conf *conf)
{
struct rtl8187_priv *priv = dev->priv;
+ mutex_lock(&priv->conf_mutex);
priv->mode = IEEE80211_IF_TYPE_MNTR;
priv->vif = NULL;
+ mutex_unlock(&priv->conf_mutex);
}
static int rtl8187_config(struct ieee80211_hw *dev, struct ieee80211_conf *conf)
@@ -864,6 +874,7 @@ static int rtl8187_config(struct ieee80211_hw *dev, struct ieee80211_conf *conf)
struct rtl8187_priv *priv = dev->priv;
u32 reg;
+ mutex_lock(&priv->conf_mutex);
reg = rtl818x_ioread32(priv, &priv->map->TX_CONF);
/* Enable TX loopback on MAC level to avoid TX during channel
* changes, as this has be seen to causes problems and the
@@ -896,6 +907,7 @@ static int rtl8187_config(struct ieee80211_hw *dev, struct ieee80211_conf *conf)
rtl818x_iowrite16(priv, &priv->map->ATIMTR_INTERVAL, 100);
rtl818x_iowrite16(priv, &priv->map->BEACON_INTERVAL, 100);
rtl818x_iowrite16(priv, &priv->map->BEACON_INTERVAL_TIME, 100);
+ mutex_unlock(&priv->conf_mutex);
return 0;
}
@@ -907,6 +919,7 @@ static int rtl8187_config_interface(struct ieee80211_hw *dev,
int i;
u8 reg;
+ mutex_lock(&priv->conf_mutex);
for (i = 0; i < ETH_ALEN; i++)
rtl818x_iowrite8(priv, &priv->map->BSSID[i], conf->bssid[i]);
@@ -920,6 +933,7 @@ static int rtl8187_config_interface(struct ieee80211_hw *dev,
rtl818x_iowrite8(priv, &priv->map->MSR, reg);
}
+ mutex_unlock(&priv->conf_mutex);
return 0;
}
@@ -1187,6 +1201,7 @@ static int __devinit rtl8187_probe(struct usb_interface *intf,
printk(KERN_ERR "rtl8187: Cannot register device\n");
goto err_free_dev;
}
+ mutex_init(&priv->conf_mutex);
printk(KERN_INFO "%s: hwaddr %s, %s V%d + %s\n",
wiphy_name(dev->wiphy), print_mac(mac, dev->wiphy->perm_addr),
diff --git a/drivers/net/wireless/wavelan.c b/drivers/net/wireless/wavelan.c
index 49ae9700395..136220b5ca8 100644
--- a/drivers/net/wireless/wavelan.c
+++ b/drivers/net/wireless/wavelan.c
@@ -1409,9 +1409,6 @@ static void wavelan_set_multicast_list(struct net_device * dev)
lp->mc_count = 0;
wv_82586_reconfig(dev);
-
- /* Tell the kernel that we are doing a really bad job. */
- dev->flags |= IFF_PROMISC;
}
} else
/* Are there multicast addresses to send? */
diff --git a/drivers/net/wireless/wavelan_cs.c b/drivers/net/wireless/wavelan_cs.c
index b584c0ecc62..00a3559e5aa 100644
--- a/drivers/net/wireless/wavelan_cs.c
+++ b/drivers/net/wireless/wavelan_cs.c
@@ -1412,9 +1412,6 @@ wavelan_set_multicast_list(struct net_device * dev)
lp->mc_count = 0;
wv_82593_reconfig(dev);
-
- /* Tell the kernel that we are doing a really bad job... */
- dev->flags |= IFF_PROMISC;
}
}
else
@@ -1433,9 +1430,6 @@ wavelan_set_multicast_list(struct net_device * dev)
lp->mc_count = 0;
wv_82593_reconfig(dev);
-
- /* Tell the kernel that we are doing a really bad job... */
- dev->flags |= IFF_ALLMULTI;
}
}
else
diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c
index 902bbe78821..c749bdba214 100644
--- a/drivers/net/xen-netfront.c
+++ b/drivers/net/xen-netfront.c
@@ -329,7 +329,7 @@ static int xennet_open(struct net_device *dev)
}
spin_unlock_bh(&np->rx_lock);
- xennet_maybe_wake_tx(dev);
+ netif_start_queue(dev);
return 0;
}