diff options
35 files changed, 5936 insertions, 489 deletions
diff --git a/Documentation/networking/LICENSE.qla3xxx b/Documentation/networking/LICENSE.qla3xxx new file mode 100644 index 00000000000..2f2077e34d8 --- /dev/null +++ b/Documentation/networking/LICENSE.qla3xxx @@ -0,0 +1,46 @@ +Copyright (c) 2003-2006 QLogic Corporation +QLogic Linux Networking HBA Driver + +This program includes a device driver for Linux 2.6 that may be +distributed with QLogic hardware specific firmware binary file. +You may modify and redistribute the device driver code under the +GNU General Public License as published by the Free Software +Foundation (version 2 or a later version). + +You may redistribute the hardware specific firmware binary file +under the following terms: + + 1. Redistribution of source code (only if applicable), + must retain the above copyright notice, this list of + conditions and the following disclaimer. + + 2. Redistribution in binary form must reproduce the above + copyright notice, this list of conditions and the + following disclaimer in the documentation and/or other + materials provided with the distribution. + + 3. The name of QLogic Corporation may not be used to + endorse or promote products derived from this software + without specific prior written permission + +REGARDLESS OF WHAT LICENSING MECHANISM IS USED OR APPLICABLE, +THIS PROGRAM IS PROVIDED BY QLOGIC CORPORATION "AS IS'' AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A +PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR +BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, +EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED +TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON +ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY +OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +POSSIBILITY OF SUCH DAMAGE. + +USER ACKNOWLEDGES AND AGREES THAT USE OF THIS PROGRAM WILL NOT +CREATE OR GIVE GROUNDS FOR A LICENSE BY IMPLICATION, ESTOPPEL, OR +OTHERWISE IN ANY INTELLECTUAL PROPERTY RIGHTS (PATENT, COPYRIGHT, +TRADE SECRET, MASK WORK, OR OTHER PROPRIETARY RIGHT) EMBODIED IN +ANY OTHER QLOGIC HARDWARE OR SOFTWARE EITHER SOLELY OR IN +COMBINATION WITH THIS PROGRAM. + diff --git a/MAINTAINERS b/MAINTAINERS index b2afc7ae965..dbb9d90ccad 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -2338,6 +2338,12 @@ M: linux-driver@qlogic.com L: linux-scsi@vger.kernel.org S: Supported +QLOGIC QLA3XXX NETWORK DRIVER +P: Ron Mercer +M: linux-driver@qlogic.com +L: netdev@vger.kernel.org +S: Supported + QNX4 FILESYSTEM P: Anders Larsen M: al@alarsen.net @@ -2588,6 +2594,18 @@ P: Nicolas Pitre M: nico@cam.org S: Maintained +SOFTMAC LAYER (IEEE 802.11) +P: Johannes Berg +M: johannes@sipsolutions.net +P: Joe Jezak +M: josejx@gentoo.org +P: Daniel Drake +M: dsd@gentoo.org +W: http://softmac.sipsolutions.net/ +L: softmac-dev@sipsolutions.net +L: netdev@vger.kernel.org +S: Maintained + SOFTWARE RAID (Multiple Disks) SUPPORT P: Ingo Molnar M: mingo@redhat.com @@ -3296,6 +3314,15 @@ W: http://www.qsl.net/dl1bke/ L: linux-hams@vger.kernel.org S: Maintained +ZD1211RW WIRELESS DRIVER +P: Daniel Drake +M: dsd@gentoo.org +P: Ulrich Kunitz +M: kune@deine-taler.de +W: http://zd1211.ath.cx/wiki/DriverRewrite +L: zd1211-devs@lists.sourceforge.net (subscribers-only) +S: Maintained + ZF MACHZ WATCHDOG P: Fernando Fuganti M: fuganti@netbank.com.br diff --git a/drivers/isdn/i4l/Kconfig b/drivers/isdn/i4l/Kconfig index a4f7288a1fc..3ef567b99c7 100644 --- a/drivers/isdn/i4l/Kconfig +++ b/drivers/isdn/i4l/Kconfig @@ -5,6 +5,7 @@ config ISDN_PPP bool "Support synchronous PPP" depends on INET + select SLHC help Over digital connections such as ISDN, there is no need to synchronize sender and recipient's clocks with start and stop bits diff --git a/drivers/net/8390.c b/drivers/net/8390.c index d2935ae3981..3eb7048684a 100644 --- a/drivers/net/8390.c +++ b/drivers/net/8390.c @@ -299,7 +299,7 @@ static int ei_start_xmit(struct sk_buff *skb, struct net_device *dev) * Slow phase with lock held. */ - disable_irq_nosync(dev->irq); + disable_irq_nosync_lockdep(dev->irq); spin_lock(&ei_local->page_lock); @@ -338,7 +338,7 @@ static int ei_start_xmit(struct sk_buff *skb, struct net_device *dev) netif_stop_queue(dev); outb_p(ENISR_ALL, e8390_base + EN0_IMR); spin_unlock(&ei_local->page_lock); - enable_irq(dev->irq); + enable_irq_lockdep(dev->irq); ei_local->stat.tx_errors++; return 1; } @@ -379,7 +379,7 @@ static int ei_start_xmit(struct sk_buff *skb, struct net_device *dev) outb_p(ENISR_ALL, e8390_base + EN0_IMR); spin_unlock(&ei_local->page_lock); - enable_irq(dev->irq); + enable_irq_lockdep(dev->irq); dev_kfree_skb (skb); ei_local->stat.tx_bytes += send_length; @@ -505,9 +505,9 @@ irqreturn_t ei_interrupt(int irq, void *dev_id, struct pt_regs * regs) #ifdef CONFIG_NET_POLL_CONTROLLER void ei_poll(struct net_device *dev) { - disable_irq(dev->irq); + disable_irq_lockdep(dev->irq); ei_interrupt(dev->irq, dev, NULL); - enable_irq(dev->irq); + enable_irq_lockdep(dev->irq); } #endif diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig index 39189903e35..3a0d80b2850 100644 --- a/drivers/net/Kconfig +++ b/drivers/net/Kconfig @@ -2249,6 +2249,15 @@ config MV643XX_ETH_2 This enables support for Port 2 of the Marvell MV643XX Gigabit Ethernet. +config QLA3XXX + tristate "QLogic QLA3XXX Network Driver Support" + depends on PCI + help + This driver supports QLogic ISP3XXX gigabit Ethernet cards. + + To compile this driver as a module, choose M here: the module + will be called qla3xxx. + endmenu # @@ -2509,6 +2518,7 @@ config PLIP config PPP tristate "PPP (point-to-point protocol) support" + select SLHC ---help--- PPP (Point to Point Protocol) is a newer and better SLIP. It serves the same purpose: sending Internet traffic over telephone (and other @@ -2689,6 +2699,7 @@ config SLIP config SLIP_COMPRESSED bool "CSLIP compressed headers" depends on SLIP + select SLHC ---help--- This protocol is faster than SLIP because it uses compression on the TCP/IP headers (not on the data itself), but it has to be supported @@ -2701,6 +2712,12 @@ config SLIP_COMPRESSED <http://www.tldp.org/docs.html#howto>, explains how to configure CSLIP. This won't enlarge your kernel. +config SLHC + tristate + help + This option enables Van Jacobsen serial line header compression + routines. + config SLIP_SMART bool "Keepalive and linefill" depends on SLIP diff --git a/drivers/net/Makefile b/drivers/net/Makefile index c91e95126f7..5e91c3562ad 100644 --- a/drivers/net/Makefile +++ b/drivers/net/Makefile @@ -2,10 +2,6 @@ # Makefile for the Linux network (ethercard) device drivers. # -ifeq ($(CONFIG_ISDN_PPP),y) - obj-$(CONFIG_ISDN) += slhc.o -endif - obj-$(CONFIG_E1000) += e1000/ obj-$(CONFIG_IBM_EMAC) += ibm_emac/ obj-$(CONFIG_IXGB) += ixgb/ @@ -110,8 +106,9 @@ obj-$(CONFIG_FORCEDETH) += forcedeth.o obj-$(CONFIG_NE_H8300) += ne-h8300.o 8390.o obj-$(CONFIG_MV643XX_ETH) += mv643xx_eth.o +obj-$(CONFIG_QLA3XXX) += qla3xxx.o -obj-$(CONFIG_PPP) += ppp_generic.o slhc.o +obj-$(CONFIG_PPP) += ppp_generic.o obj-$(CONFIG_PPP_ASYNC) += ppp_async.o obj-$(CONFIG_PPP_SYNC_TTY) += ppp_synctty.o obj-$(CONFIG_PPP_DEFLATE) += ppp_deflate.o @@ -120,9 +117,7 @@ obj-$(CONFIG_PPP_MPPE) += ppp_mppe.o obj-$(CONFIG_PPPOE) += pppox.o pppoe.o obj-$(CONFIG_SLIP) += slip.o -ifeq ($(CONFIG_SLIP_COMPRESSED),y) - obj-$(CONFIG_SLIP) += slhc.o -endif +obj-$(CONFIG_SLHC) += slhc.o obj-$(CONFIG_DUMMY) += dummy.o obj-$(CONFIG_IFB) += ifb.o diff --git a/drivers/net/forcedeth.c b/drivers/net/forcedeth.c index 11b8f1b43dd..6fc6d1b05f1 100644 --- a/drivers/net/forcedeth.c +++ b/drivers/net/forcedeth.c @@ -381,21 +381,21 @@ enum { /* Big endian: should work, but is untested */ struct ring_desc { - u32 PacketBuffer; - u32 FlagLen; + __le32 buf; + __le32 flaglen; }; struct ring_desc_ex { - u32 PacketBufferHigh; - u32 PacketBufferLow; - u32 TxVlan; - u32 FlagLen; + __le32 bufhigh; + __le32 buflow; + __le32 txvlan; + __le32 flaglen; }; -typedef union _ring_type { +union ring_type { struct ring_desc* orig; struct ring_desc_ex* ex; -} ring_type; +}; #define FLAG_MASK_V1 0xffff0000 #define FLAG_MASK_V2 0xffffc000 @@ -653,8 +653,8 @@ static const struct nv_ethtool_str nv_etests_str[] = { }; struct register_test { - u32 reg; - u32 mask; + __le32 reg; + __le32 mask; }; static const struct register_test nv_registers_test[] = { @@ -713,7 +713,7 @@ struct fe_priv { /* rx specific fields. * Locking: Within irq hander or disable_irq+spin_lock(&np->lock); */ - ring_type rx_ring; + union ring_type rx_ring; unsigned int cur_rx, refill_rx; struct sk_buff **rx_skbuff; dma_addr_t *rx_dma; @@ -733,7 +733,7 @@ struct fe_priv { /* * tx specific fields. */ - ring_type tx_ring; + union ring_type tx_ring; unsigned int next_tx, nic_tx; struct sk_buff **tx_skbuff; dma_addr_t *tx_dma; @@ -826,13 +826,13 @@ static inline void pci_push(u8 __iomem *base) static inline u32 nv_descr_getlength(struct ring_desc *prd, u32 v) { - return le32_to_cpu(prd->FlagLen) + return le32_to_cpu(prd->flaglen) & ((v == DESC_VER_1) ? LEN_MASK_V1 : LEN_MASK_V2); } static inline u32 nv_descr_getlength_ex(struct ring_desc_ex *prd, u32 v) { - return le32_to_cpu(prd->FlagLen) & LEN_MASK_V2; + return le32_to_cpu(prd->flaglen) & LEN_MASK_V2; } static int reg_delay(struct net_device *dev, int offset, u32 mask, u32 target, @@ -885,7 +885,7 @@ static void free_rings(struct net_device *dev) struct fe_priv *np = get_nvpriv(dev); if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) { - if(np->rx_ring.orig) + if (np->rx_ring.orig) pci_free_consistent(np->pci_dev, sizeof(struct ring_desc) * (np->rx_ring_size + np->tx_ring_size), np->rx_ring.orig, np->ring_addr); } else { @@ -1258,14 +1258,14 @@ static int nv_alloc_rx(struct net_device *dev) np->rx_dma[nr] = pci_map_single(np->pci_dev, skb->data, skb->end-skb->data, PCI_DMA_FROMDEVICE); if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) { - np->rx_ring.orig[nr].PacketBuffer = cpu_to_le32(np->rx_dma[nr]); + np->rx_ring.orig[nr].buf = cpu_to_le32(np->rx_dma[nr]); wmb(); - np->rx_ring.orig[nr].FlagLen = cpu_to_le32(np->rx_buf_sz | NV_RX_AVAIL); + np->rx_ring.orig[nr].flaglen = cpu_to_le32(np->rx_buf_sz | NV_RX_AVAIL); } else { - np->rx_ring.ex[nr].PacketBufferHigh = cpu_to_le64(np->rx_dma[nr]) >> 32; - np->rx_ring.ex[nr].PacketBufferLow = cpu_to_le64(np->rx_dma[nr]) & 0x0FFFFFFFF; + np->rx_ring.ex[nr].bufhigh = cpu_to_le64(np->rx_dma[nr]) >> 32; + np->rx_ring.ex[nr].buflow = cpu_to_le64(np->rx_dma[nr]) & 0x0FFFFFFFF; wmb(); - np->rx_ring.ex[nr].FlagLen = cpu_to_le32(np->rx_buf_sz | NV_RX2_AVAIL); + np->rx_ring.ex[nr].flaglen = cpu_to_le32(np->rx_buf_sz | NV_RX2_AVAIL); } dprintk(KERN_DEBUG "%s: nv_alloc_rx: Packet %d marked as Available\n", dev->name, refill_rx); @@ -1315,9 +1315,9 @@ static void nv_init_rx(struct net_device *dev) np->refill_rx = 0; for (i = 0; i < np->rx_ring_size; i++) if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) - np->rx_ring.orig[i].FlagLen = 0; + np->rx_ring.orig[i].flaglen = 0; else - np->rx_ring.ex[i].FlagLen = 0; + np->rx_ring.ex[i].flaglen = 0; } static void nv_init_tx(struct net_device *dev) @@ -1328,9 +1328,9 @@ static void nv_init_tx(struct net_device *dev) np->next_tx = np->nic_tx = 0; for (i = 0; i < np->tx_ring_size; i++) { if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) - np->tx_ring.orig[i].FlagLen = 0; + np->tx_ring.orig[i].flaglen = 0; else - np->tx_ring.ex[i].FlagLen = 0; + np->tx_ring.ex[i].flaglen = 0; np->tx_skbuff[i] = NULL; np->tx_dma[i] = 0; } @@ -1373,9 +1373,9 @@ static void nv_drain_tx(struct net_device *dev) for (i = 0; i < np->tx_ring_size; i++) { if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) - np->tx_ring.orig[i].FlagLen = 0; + np->tx_ring.orig[i].flaglen = 0; else - np->tx_ring.ex[i].FlagLen = 0; + np->tx_ring.ex[i].flaglen = 0; if (nv_release_txskb(dev, i)) np->stats.tx_dropped++; } @@ -1387,9 +1387,9 @@ static void nv_drain_rx(struct net_device *dev) int i; for (i = 0; i < np->rx_ring_size; i++) { if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) - np->rx_ring.orig[i].FlagLen = 0; + np->rx_ring.orig[i].flaglen = 0; else - np->rx_ring.ex[i].FlagLen = 0; + np->rx_ring.ex[i].flaglen = 0; wmb(); if (np->rx_skbuff[i]) { pci_unmap_single(np->pci_dev, np->rx_dma[i], @@ -1450,17 +1450,17 @@ static int nv_start_xmit(struct sk_buff *skb, struct net_device *dev) np->tx_dma_len[nr] = bcnt; if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) { - np->tx_ring.orig[nr].PacketBuffer = cpu_to_le32(np->tx_dma[nr]); - np->tx_ring.orig[nr].FlagLen = cpu_to_le32((bcnt-1) | tx_flags); + np->tx_ring.orig[nr].buf = cpu_to_le32(np->tx_dma[nr]); + np->tx_ring.orig[nr].flaglen = cpu_to_le32((bcnt-1) | tx_flags); } else { - np->tx_ring.ex[nr].PacketBufferHigh = cpu_to_le64(np->tx_dma[nr]) >> 32; - np->tx_ring.ex[nr].PacketBufferLow = cpu_to_le64(np->tx_dma[nr]) & 0x0FFFFFFFF; - np->tx_ring.ex[nr].FlagLen = cpu_to_le32((bcnt-1) | tx_flags); + np->tx_ring.ex[nr].bufhigh = cpu_to_le64(np->tx_dma[nr]) >> 32; + np->tx_ring.ex[nr].buflow = cpu_to_le64(np->tx_dma[nr]) & 0x0FFFFFFFF; + np->tx_ring.ex[nr].flaglen = cpu_to_le32((bcnt-1) | tx_flags); } tx_flags = np->tx_flags; offset += bcnt; size -= bcnt; - } while(size); + } while (size); /* setup the fragments */ for (i = 0; i < fragments; i++) { @@ -1477,12 +1477,12 @@ static int nv_start_xmit(struct sk_buff *skb, struct net_device *dev) np->tx_dma_len[nr] = bcnt; if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) { - np->tx_ring.orig[nr].PacketBuffer = cpu_to_le32(np->tx_dma[nr]); - np->tx_ring.orig[nr].FlagLen = cpu_to_le32((bcnt-1) | tx_flags); + np->tx_ring.orig[nr].buf = cpu_to_le32(np->tx_dma[nr]); + np->tx_ring.orig[nr].flaglen = cpu_to_le32((bcnt-1) | tx_flags); } else { - np->tx_ring.ex[nr].PacketBufferHigh = cpu_to_le64(np->tx_dma[nr]) >> 32; - np->tx_ring.ex[nr].PacketBufferLow = cpu_to_le64(np->tx_dma[nr]) & 0x0FFFFFFFF; - np->tx_ring.ex[nr].FlagLen = cpu_to_le32((bcnt-1) | tx_flags); + np->tx_ring.ex[nr].bufhigh = cpu_to_le64(np->tx_dma[nr]) >> 32; + np->tx_ring.ex[nr].buflow = cpu_to_le64(np->tx_dma[nr]) & 0x0FFFFFFFF; + np->tx_ring.ex[nr].flaglen = cpu_to_le32((bcnt-1) | tx_flags); } offset += bcnt; size -= bcnt; @@ -1491,9 +1491,9 @@ static int nv_start_xmit(struct sk_buff *skb, struct net_device *dev) /* set last fragment flag */ if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) { - np->tx_ring.orig[nr].FlagLen |= cpu_to_le32(tx_flags_extra); + np->tx_ring.orig[nr].flaglen |= cpu_to_le32(tx_flags_extra); } else { - np->tx_ring.ex[nr].FlagLen |= cpu_to_le32(tx_flags_extra); + np->tx_ring.ex[nr].flaglen |= cpu_to_le32(tx_flags_extra); } np->tx_skbuff[nr] = skb; @@ -1512,10 +1512,10 @@ static int nv_start_xmit(struct sk_buff *skb, struct net_device *dev) /* set tx flags */ if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) { - np->tx_ring.orig[start_nr].FlagLen |= cpu_to_le32(tx_flags | tx_flags_extra); + np->tx_ring.orig[start_nr].flaglen |= cpu_to_le32(tx_flags | tx_flags_extra); } else { - np->tx_ring.ex[start_nr].TxVlan = cpu_to_le32(tx_flags_vlan); - np->tx_ring.ex[start_nr].FlagLen |= cpu_to_le32(tx_flags | tx_flags_extra); + np->tx_ring.ex[start_nr].txvlan = cpu_to_le32(tx_flags_vlan); + np->tx_ring.ex[start_nr].flaglen |= cpu_to_le32(tx_flags | tx_flags_extra); } dprintk(KERN_DEBUG "%s: nv_start_xmit: packet %d (entries %d) queued for transmission. tx_flags_extra: %x\n", @@ -1547,7 +1547,7 @@ static int nv_start_xmit(struct sk_buff *skb, struct net_device *dev) static void nv_tx_done(struct net_device *dev) { struct fe_priv *np = netdev_priv(dev); - u32 Flags; + u32 flags; unsigned int i; struct sk_buff *skb; @@ -1555,22 +1555,22 @@ static void nv_tx_done(struct net_device *dev) i = np->nic_tx % np->tx_ring_size; if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) - Flags = le32_to_cpu(np->tx_ring.orig[i].FlagLen); + flags = le32_to_cpu(np->tx_ring.orig[i].flaglen); else - Flags = le32_to_cpu(np->tx_ring.ex[i].FlagLen); + flags = le32_to_cpu(np->tx_ring.ex[i].flaglen); - dprintk(KERN_DEBUG "%s: nv_tx_done: looking at packet %d, Flags 0x%x.\n", - dev->name, np->nic_tx, Flags); - if (Flags & NV_TX_VALID) + dprintk(KERN_DEBUG "%s: nv_tx_done: looking at packet %d, flags 0x%x.\n", + dev->name, np->nic_tx, flags); + if (flags & NV_TX_VALID) break; if (np->desc_ver == DESC_VER_1) { - if (Flags & NV_TX_LASTPACKET) { + if (flags & NV_TX_LASTPACKET) { skb = np->tx_skbuff[i]; - if (Flags & (NV_TX_RETRYERROR|NV_TX_CARRIERLOST|NV_TX_LATECOLLISION| + if (flags & (NV_TX_RETRYERROR|NV_TX_CARRIERLOST|NV_TX_LATECOLLISION| NV_TX_UNDERFLOW|NV_TX_ERROR)) { - if (Flags & NV_TX_UNDERFLOW) + if (flags & NV_TX_UNDERFLOW) np->stats.tx_fifo_errors++; - if (Flags & NV_TX_CARRIERLOST) + if (flags & NV_TX_CARRIERLOST) np->stats.tx_carrier_errors++; np->stats.tx_errors++; } else { @@ -1579,13 +1579,13 @@ static void nv_tx_done(struct net_device *dev) } } } else { - if (Flags & NV_TX2_LASTPACKET) { + if (flags & NV_TX2_LASTPACKET) { skb = np->tx_skbuff[i]; - if (Flags & (NV_TX2_RETRYERROR|NV_TX2_CARRIERLOST|NV_TX2_LATECOLLISION| + if (flags & (NV_TX2_RETRYERROR|NV_TX2_CARRIERLOST|NV_TX2_LATECOLLISION| NV_TX2_UNDERFLOW|NV_TX2_ERROR)) { - if (Flags & NV_TX2_UNDERFLOW) + if (flags & NV_TX2_UNDERFLOW) np->stats.tx_fifo_errors++; - if (Flags & NV_TX2_CARRIERLOST) + if (flags & NV_TX2_CARRIERLOST) np->stats.tx_carrier_errors++; np->stats.tx_errors++; } else { @@ -1638,29 +1638,29 @@ static void nv_tx_timeout(struct net_device *dev) if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) { printk(KERN_INFO "%03x: %08x %08x // %08x %08x // %08x %08x // %08x %08x\n", i, - le32_to_cpu(np->tx_ring.orig[i].PacketBuffer), - le32_to_cpu(np->tx_ring.orig[i].FlagLen), - le32_to_cpu(np->tx_ring.orig[i+1].PacketBuffer), - le32_to_cpu(np->tx_ring.orig[i+1].FlagLen), - le32_to_cpu(np->tx_ring.orig[i+2].PacketBuffer), - le32_to_cpu(np->tx_ring.orig[i+2].FlagLen), - le32_to_cpu(np->tx_ring.orig[i+3].PacketBuffer), - le32_to_cpu(np->tx_ring.orig[i+3].FlagLen)); + le32_to_cpu(np->tx_ring.orig[i].buf), + le32_to_cpu(np->tx_ring.orig[i].flaglen), + le32_to_cpu(np->tx_ring.orig[i+1].buf), + le32_to_cpu(np->tx_ring.orig[i+1].flaglen), + le32_to_cpu(np->tx_ring.orig[i+2].buf), + le32_to_cpu(np->tx_ring.orig[i+2].flaglen), + le32_to_cpu(np->tx_ring.orig[i+3].buf), + le32_to_cpu(np->tx_ring.orig[i+3].flaglen)); } else { printk(KERN_INFO "%03x: %08x %08x %08x // %08x %08x %08x // %08x %08x %08x // %08x %08x %08x\n", i, - le32_to_cpu(np->tx_ring.ex[i].PacketBufferHigh), - le32_to_cpu(np->tx_ring.ex[i].PacketBufferLow), - le32_to_cpu(np->tx_ring.ex[i].FlagLen), - le32_to_cpu(np->tx_ring.ex[i+1].PacketBufferHigh), - le32_to_cpu(np->tx_ring.ex[i+1].PacketBufferLow), - le32_to_cpu(np->tx_ring.ex[i+1].FlagLen), - le32_to_cpu(np->tx_ring.ex[i+2].PacketBufferHigh), - le32_to_cpu(np->tx_ring.ex[i+2].PacketBufferLow), - le32_to_cpu(np->tx_ring.ex[i+2].FlagLen), - le32_to_cpu(np->tx_ring.ex[i+3].PacketBufferHigh), - le32_to_cpu(np->tx_ring.ex[i+3].PacketBufferLow), - le32_to_cpu(np->tx_ring.ex[i+3].FlagLen)); + le32_to_cpu(np->tx_ring.ex[i].bufhigh), + le32_to_cpu(np->tx_ring.ex[i].buflow), + le32_to_cpu(np->tx_ring.ex[i].flaglen), + le32_to_cpu(np->tx_ring.ex[i+1].bufhigh), + le32_to_cpu(np->tx_ring.ex[i+1].buflow), + le32_to_cpu(np->tx_ring.ex[i+1].flaglen), + le32_to_cpu(np->tx_ring.ex[i+2].bufhigh), + le32_to_cpu(np->tx_ring.ex[i+2].buflow), + le32_to_cpu(np->tx_ring.ex[i+2].flaglen), + le32_to_cpu(np->tx_ring.ex[i+3].bufhigh), + le32_to_cpu(np->tx_ring.ex[i+3].buflow), + le32_to_cpu(np->tx_ring.ex[i+3].flaglen)); } } } @@ -1697,7 +1697,7 @@ static int nv_getlen(struct net_device *dev, void *packet, int datalen) int protolen; /* length as stored in the proto field */ /* 1) calculate len according to header */ - if ( ((struct vlan_ethhdr *)packet)->h_vlan_proto == __constant_htons(ETH_P_8021Q)) { + if ( ((struct vlan_ethhdr *)packet)->h_vlan_proto == htons(ETH_P_8021Q)) { protolen = ntohs( ((struct vlan_ethhdr *)packet)->h_vlan_encapsulated_proto ); hdrlen = VLAN_HLEN; } else { @@ -1743,7 +1743,7 @@ static int nv_getlen(struct net_device *dev, void *packet, int datalen) static void nv_rx_process(struct net_device *dev) { struct fe_priv *np = netdev_priv(dev); - u32 Flags; + u32 flags; u32 vlanflags = 0; for (;;) { @@ -1755,18 +1755,18 @@ static void nv_rx_process(struct net_device *dev) i = np->cur_rx % np->rx_ring_size; if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) { - Flags = le32_to_cpu(np->rx_ring.orig[i].FlagLen); + flags = le32_to_cpu(np->rx_ring.orig[i].flaglen); len = nv_descr_getlength(&np->rx_ring.orig[i], np->desc_ver); } else { - Flags = le32_to_cpu(np->rx_ring.ex[i].FlagLen); + flags = le32_to_cpu(np->rx_ring.ex[i].flaglen); len = nv_descr_getlength_ex(&np->rx_ring.ex[i], np->desc_ver); - vlanflags = le32_to_cpu(np->rx_ring.ex[i].PacketBufferLow); + vlanflags = le32_to_cpu(np->rx_ring.ex[i].buflow); } - dprintk(KERN_DEBUG "%s: nv_rx_process: looking at packet %d, Flags 0x%x.\n", - dev->name, np->cur_rx, Flags); + dprintk(KERN_DEBUG "%s: nv_rx_process: looking at packet %d, flags 0x%x.\n", + dev->name, np->cur_rx, flags); - if (Flags & NV_RX_AVAIL) + if (flags & NV_RX_AVAIL) break; /* still owned by hardware, */ /* @@ -1780,7 +1780,7 @@ static void nv_rx_process(struct net_device *dev) { int j; - dprintk(KERN_DEBUG "Dumping packet (flags 0x%x).",Flags); + dprintk(KERN_DEBUG "Dumping packet (flags 0x%x).",flags); for (j=0; j<64; j++) { if ((j%16) == 0) dprintk("\n%03x:", j); @@ -1790,30 +1790,30 @@ static void nv_rx_process(struct net_device *dev) } /* look at what we actually got: */ if (np->desc_ver == DESC_VER_1) { - if (!(Flags & NV_RX_DESCRIPTORVALID)) + if (!(flags & NV_RX_DESCRIPTORVALID)) goto next_pkt; - if (Flags & NV_RX_ERROR) { - if (Flags & NV_RX_MISSEDFRAME) { + if (flags & NV_RX_ERROR) { + if (flags & NV_RX_MISSEDFRAME) { np->stats.rx_missed_errors++; np->stats.rx_errors++; goto next_pkt; } - if (Flags & (NV_RX_ERROR1|NV_RX_ERROR2|NV_RX_ERROR3)) { + if (flags & (NV_RX_ERROR1|NV_RX_ERROR2|NV_RX_ERROR3)) { np->stats.rx_errors++; goto next_pkt; } - if (Flags & NV_RX_CRCERR) { + if (flags & NV_RX_CRCERR) { np->stats.rx_crc_errors++; np->stats.rx_errors++; goto next_pkt; } - if (Flags & NV_RX_OVERFLOW) { + if (flags & NV_RX_OVERFLOW) { np->stats.rx_over_errors++; np->stats.rx_errors++; goto next_pkt; } - if (Flags & NV_RX_ERROR4) { + if (flags & NV_RX_ERROR4) { len = nv_getlen(dev, np->rx_skbuff[i]->data, len); if (len < 0) { np->stats.rx_errors++; @@ -1821,32 +1821,32 @@ static void nv_rx_process(struct net_device *dev) } } /* framing errors are soft errors. */ - if (Flags & NV_RX_FRAMINGERR) { - if (Flags & NV_RX_SUBSTRACT1) { + if (flags & NV_RX_FRAMINGERR) { + if (flags & NV_RX_SUBSTRACT1) { len--; } } } } else { - if (!(Flags & NV_RX2_DESCRIPTORVALID)) + if (!(flags & NV_RX2_DESCRIPTORVALID)) goto next_pkt; - if (Flags & NV_RX2_ERROR) { - if (Flags & (NV_RX2_ERROR1|NV_RX2_ERROR2|NV_RX2_ERROR3)) { + if (flags & NV_RX2_ERROR) { + if (flags & (NV_RX2_ERROR1|NV_RX2_ERROR2|NV_RX2_ERROR3)) { np->stats.rx_errors++; goto next_pkt; } - if (Flags & NV_RX2_CRCERR) { + if (flags & NV_RX2_CRCERR) { np->stats.rx_crc_errors++; np->stats.rx_errors++; goto next_pkt; } - if (Flags & NV_RX2_OVERFLOW) { + if (flags & NV_RX2_OVERFLOW) { np->stats.rx_over_errors++; np->stats.rx_errors++; goto next_pkt; } - if (Flags & NV_RX2_ERROR4) { + if (flags & NV_RX2_ERROR4) { len = nv_getlen(dev, np->rx_skbuff[i]->data, len); if (len < 0) { np->stats.rx_errors++; @@ -1854,17 +1854,17 @@ static void nv_rx_process(struct net_device *dev) } } /* framing errors are soft errors */ - if (Flags & NV_RX2_FRAMINGERR) { - if (Flags & NV_RX2_SUBSTRACT1) { + if (flags & NV_RX2_FRAMINGERR) { + if (flags & NV_RX2_SUBSTRACT1) { len--; } } } if (np->txrxctl_bits & NVREG_TXRXCTL_RXCHECK) { - Flags &= NV_RX2_CHECKSUMMASK; - if (Flags == NV_RX2_CHECKSUMOK1 || - Flags == NV_RX2_CHECKSUMOK2 || - Flags == NV_RX2_CHECKSUMOK3) { + flags &= NV_RX2_CHECKSUMMASK; + if (flags == NV_RX2_CHECKSUMOK1 || + flags == NV_RX2_CHECKSUMOK2 || + flags == NV_RX2_CHECKSUMOK3) { dprintk(KERN_DEBUG "%s: hw checksum hit!.\n", dev->name); np->rx_skbuff[i]->ip_summed = CHECKSUM_UNNECESSARY; } else { @@ -1990,7 +1990,7 @@ static int nv_set_mac_address(struct net_device *dev, void *addr) struct fe_priv *np = netdev_priv(dev); struct sockaddr *macaddr = (struct sockaddr*)addr; - if(!is_valid_ether_addr(macaddr->sa_data)) + if (!is_valid_ether_addr(macaddr->sa_data)) return -EADDRNOTAVAIL; /* synchronized against open : rtnl_lock() held by caller */ @@ -2283,20 +2283,20 @@ set_speed: lpa_pause = lpa & (LPA_PAUSE_CAP| LPA_PAUSE_ASYM); switch (adv_pause) { - case (ADVERTISE_PAUSE_CAP): + case ADVERTISE_PAUSE_CAP: if (lpa_pause & LPA_PAUSE_CAP) { pause_flags |= NV_PAUSEFRAME_RX_ENABLE; if (np->pause_flags & NV_PAUSEFRAME_TX_REQ) pause_flags |= NV_PAUSEFRAME_TX_ENABLE; } break; - case (ADVERTISE_PAUSE_ASYM): + case ADVERTISE_PAUSE_ASYM: if (lpa_pause == (LPA_PAUSE_CAP| LPA_PAUSE_ASYM)) { pause_flags |= NV_PAUSEFRAME_TX_ENABLE; } break; - case (ADVERTISE_PAUSE_CAP| ADVERTISE_PAUSE_ASYM): + case ADVERTISE_PAUSE_CAP| ADVERTISE_PAUSE_ASYM: if (lpa_pause & LPA_PAUSE_CAP) { pause_flags |= NV_PAUSEFRAME_RX_ENABLE; @@ -3245,7 +3245,7 @@ static int nv_set_ringparam(struct net_device *dev, struct ethtool_ringparam* ri if (!rxtx_ring || !rx_skbuff || !rx_dma || !tx_skbuff || !tx_dma || !tx_dma_len) { /* fall back to old rings */ if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) { - if(rxtx_ring) + if (rxtx_ring) pci_free_consistent(np->pci_dev, sizeof(struct ring_desc) * (ring->rx_pending + ring->tx_pending), rxtx_ring, ring_addr); } else { @@ -3481,7 +3481,7 @@ static int nv_get_stats_count(struct net_device *dev) struct fe_priv *np = netdev_priv(dev); if (np->driver_data & DEV_HAS_STATISTICS) - return (sizeof(struct nv_ethtool_stats)/sizeof(u64)); + return sizeof(struct nv_ethtool_stats)/sizeof(u64); else return 0; } @@ -3619,7 +3619,7 @@ static int nv_loopback_test(struct net_device *dev) struct sk_buff *tx_skb, *rx_skb; dma_addr_t test_dma_addr; u32 tx_flags_extra = (np->desc_ver == DESC_VER_1 ? NV_TX_LASTPACKET : NV_TX2_LASTPACKET); - u32 Flags; + u32 flags; int len, i, pkt_len; u8 *pkt_data; u32 filter_flags = 0; @@ -3663,12 +3663,12 @@ static int nv_loopback_test(struct net_device *dev) tx_skb->end-tx_skb->data, PCI_DMA_FROMDEVICE); if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) { - np->tx_ring.orig[0].PacketBuffer = cpu_to_le32(test_dma_addr); - np->tx_ring.orig[0].FlagLen = cpu_to_le32((pkt_len-1) | np->tx_flags | tx_flags_extra); + np->tx_ring.orig[0].buf = cpu_to_le32(test_dma_addr); + np->tx_ring.orig[0].flaglen = cpu_to_le32((pkt_len-1) | np->tx_flags | tx_flags_extra); } else { - np->tx_ring.ex[0].PacketBufferHigh = cpu_to_le64(test_dma_addr) >> 32; - np->tx_ring.ex[0].PacketBufferLow = cpu_to_le64(test_dma_addr) & 0x0FFFFFFFF; - np->tx_ring.ex[0].FlagLen = cpu_to_le32((pkt_len-1) | np->tx_flags | tx_flags_extra); + np->tx_ring.ex[0].bufhigh = cpu_to_le64(test_dma_addr) >> 32; + np->tx_ring.ex[0].buflow = cpu_to_le64(test_dma_addr) & 0x0FFFFFFFF; + np->tx_ring.ex[0].flaglen = cpu_to_le32((pkt_len-1) | np->tx_flags | tx_flags_extra); } writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl); pci_push(get_hwbase(dev)); @@ -3677,21 +3677,21 @@ static int nv_loopback_test(struct net_device *dev) /* check for rx of the packet */ if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) { - Flags = le32_to_cpu(np->rx_ring.orig[0].FlagLen); + flags = le32_to_cpu(np->rx_ring.orig[0].flaglen); len = nv_descr_getlength(&np->rx_ring.orig[0], np->desc_ver); } else { - Flags = le32_to_cpu(np->rx_ring.ex[0].FlagLen); + flags = le32_to_cpu(np->rx_ring.ex[0].flaglen); len = nv_descr_getlength_ex(&np->rx_ring.ex[0], np->desc_ver); } - if (Flags & NV_RX_AVAIL) { + if (flags & NV_RX_AVAIL) { ret = 0; } else if (np->desc_ver == DESC_VER_1) { - if (Flags & NV_RX_ERROR) + if (flags & NV_RX_ERROR) ret = 0; } else { - if (Flags & NV_RX2_ERROR) { + if (flags & NV_RX2_ERROR) { ret = 0; } } diff --git a/drivers/net/qla3xxx.c b/drivers/net/qla3xxx.c new file mode 100644 index 00000000000..c729aeeb469 --- /dev/null +++ b/drivers/net/qla3xxx.c @@ -0,0 +1,3537 @@ +/* + * QLogic QLA3xxx NIC HBA Driver + * Copyright (c) 2003-2006 QLogic Corporation + * + * See LICENSE.qla3xxx for copyright and licensing details. + */ + +#include <linux/kernel.h> +#include <linux/init.h> +#include <linux/types.h> +#include <linux/module.h> +#include <linux/list.h> +#include <linux/pci.h> +#include <linux/dma-mapping.h> +#include <linux/sched.h> +#include <linux/slab.h> +#include <linux/dmapool.h> +#include <linux/mempool.h> +#include <linux/spinlock.h> +#include <linux/kthread.h> +#include <linux/interrupt.h> +#include <linux/errno.h> +#include <linux/ioport.h> +#include <linux/ip.h> +#include <linux/if_arp.h> +#include <linux/if_ether.h> +#include <linux/netdevice.h> +#include <linux/etherdevice.h> +#include <linux/ethtool.h> +#include <linux/skbuff.h> +#include <linux/rtnetlink.h> +#include <linux/if_vlan.h> +#include <linux/init.h> +#include <linux/delay.h> +#include <linux/mm.h> + +#include "qla3xxx.h" + +#define DRV_NAME "qla3xxx" +#define DRV_STRING "QLogic ISP3XXX Network Driver" +#define DRV_VERSION "v2.02.00-k36" +#define PFX DRV_NAME " " + +static const char ql3xxx_driver_name[] = DRV_NAME; +static const char ql3xxx_driver_version[] = DRV_VERSION; + +MODULE_AUTHOR("QLogic Corporation"); +MODULE_DESCRIPTION("QLogic ISP3XXX Network Driver " DRV_VERSION " "); +MODULE_LICENSE("GPL"); +MODULE_VERSION(DRV_VERSION); + +static const u32 default_msg + = NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK + | NETIF_MSG_IFUP | NETIF_MSG_IFDOWN; + +static int debug = -1; /* defaults above */ +module_param(debug, int, 0); +MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)"); + +static int msi; +module_param(msi, int, 0); +MODULE_PARM_DESC(msi, "Turn on Message Signaled Interrupts."); + +static struct pci_device_id ql3xxx_pci_tbl[] __devinitdata = { + {PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, QL3022_DEVICE_ID)}, + /* required last entry */ + {0,} +}; + +MODULE_DEVICE_TABLE(pci, ql3xxx_pci_tbl); + +/* + * Caller must take hw_lock. + */ +static int ql_sem_spinlock(struct ql3_adapter *qdev, + u32 sem_mask, u32 sem_bits) +{ + struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers; + u32 value; + unsigned int seconds = 3; + + do { + writel((sem_mask | sem_bits), + &port_regs->CommonRegs.semaphoreReg); + value = readl(&port_regs->CommonRegs.semaphoreReg); + if ((value & (sem_mask >> 16)) == sem_bits) + return 0; + ssleep(1); + } while(--seconds); + return -1; +} + +static void ql_sem_unlock(struct ql3_adapter *qdev, u32 sem_mask) +{ + struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers; + writel(sem_mask, &port_regs->CommonRegs.semaphoreReg); + readl(&port_regs->CommonRegs.semaphoreReg); +} + +static int ql_sem_lock(struct ql3_adapter *qdev, u32 sem_mask, u32 sem_bits) +{ + struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers; + u32 value; + + writel((sem_mask | sem_bits), &port_regs->CommonRegs.semaphoreReg); + value = readl(&port_regs->CommonRegs.semaphoreReg); + return ((value & (sem_mask >> 16)) == sem_bits); +} + +/* + * Caller holds hw_lock. + */ +static int ql_wait_for_drvr_lock(struct ql3_adapter *qdev) +{ + int i = 0; + + while (1) { + if (!ql_sem_lock(qdev, + QL_DRVR_SEM_MASK, + (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) + * 2) << 1)) { + if (i < 10) { + ssleep(1); + i++; + } else { + printk(KERN_ERR PFX "%s: Timed out waiting for " + "driver lock...\n", + qdev->ndev->name); + return 0; + } + } else { + printk(KERN_DEBUG PFX + "%s: driver lock acquired.\n", + qdev->ndev->name); + return 1; + } + } +} + +static void ql_set_register_page(struct ql3_adapter *qdev, u32 page) +{ + struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers; + + writel(((ISP_CONTROL_NP_MASK << 16) | page), + &port_regs->CommonRegs.ispControlStatus); + readl(&port_regs->CommonRegs.ispControlStatus); + qdev->current_page = page; +} + +static u32 ql_read_common_reg_l(struct ql3_adapter *qdev, + u32 __iomem * reg) +{ + u32 value; + unsigned long hw_flags; + + spin_lock_irqsave(&qdev->hw_lock, hw_flags); + value = readl(reg); + spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); + + return value; +} + +static u32 ql_read_common_reg(struct ql3_adapter *qdev, + u32 __iomem * reg) +{ + return readl(reg); +} + +static u32 ql_read_page0_reg_l(struct ql3_adapter *qdev, u32 __iomem *reg) +{ + u32 value; + unsigned long hw_flags; + + spin_lock_irqsave(&qdev->hw_lock, hw_flags); + + if (qdev->current_page != 0) + ql_set_register_page(qdev,0); + value = readl(reg); + + spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); + return value; +} + +static u32 ql_read_page0_reg(struct ql3_adapter *qdev, u32 __iomem *reg) +{ + if (qdev->current_page != 0) + ql_set_register_page(qdev,0); + return readl(reg); +} + +static void ql_write_common_reg_l(struct ql3_adapter *qdev, + u32 * reg, u32 value) +{ + unsigned long hw_flags; + + spin_lock_irqsave(&qdev->hw_lock, hw_flags); + writel(value, (u32 *) reg); + readl(reg); + spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); + return; +} + +static void ql_write_common_reg(struct ql3_adapter *qdev, + u32 * reg, u32 value) +{ + writel(value, (u32 *) reg); + readl(reg); + return; +} + +static void ql_write_page0_reg(struct ql3_adapter *qdev, + u32 * reg, u32 value) +{ + if (qdev->current_page != 0) + ql_set_register_page(qdev,0); + writel(value, (u32 *) reg); + readl(reg); + return; +} + +/* + * Caller holds hw_lock. Only called during init. + */ +static void ql_write_page1_reg(struct ql3_adapter *qdev, + u32 * reg, u32 value) +{ + if (qdev->current_page != 1) + ql_set_register_page(qdev,1); + writel(value, (u32 *) reg); + readl(reg); + return; +} + +/* + * Caller holds hw_lock. Only called during init. + */ +static void ql_write_page2_reg(struct ql3_adapter *qdev, + u32 * reg, u32 value) +{ + if (qdev->current_page != 2) + ql_set_register_page(qdev,2); + writel(value, (u32 *) reg); + readl(reg); + return; +} + +static void ql_disable_interrupts(struct ql3_adapter *qdev) +{ + struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers; + + ql_write_common_reg_l(qdev, &port_regs->CommonRegs.ispInterruptMaskReg, + (ISP_IMR_ENABLE_INT << 16)); + +} + +static void ql_enable_interrupts(struct ql3_adapter *qdev) +{ + struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers; + + ql_write_common_reg_l(qdev, &port_regs->CommonRegs.ispInterruptMaskReg, + ((0xff << 16) | ISP_IMR_ENABLE_INT)); + +} + +static void ql_release_to_lrg_buf_free_list(struct ql3_adapter *qdev, + struct ql_rcv_buf_cb *lrg_buf_cb) +{ + u64 map; + lrg_buf_cb->next = NULL; + + if (qdev->lrg_buf_free_tail == NULL) { /* The list is empty */ + qdev->lrg_buf_free_head = qdev->lrg_buf_free_tail = lrg_buf_cb; + } else { + qdev->lrg_buf_free_tail->next = lrg_buf_cb; + qdev->lrg_buf_free_tail = lrg_buf_cb; + } + + if (!lrg_buf_cb->skb) { + lrg_buf_cb->skb = dev_alloc_skb(qdev->lrg_buffer_len); + if (unlikely(!lrg_buf_cb->skb)) { + printk(KERN_ERR PFX "%s: failed dev_alloc_skb().\n", + qdev->ndev->name); + qdev->lrg_buf_skb_check++; + } else { + /* + * We save some space to copy the ethhdr from first + * buffer + */ + skb_reserve(lrg_buf_cb->skb, QL_HEADER_SPACE); + map = pci_map_single(qdev->pdev, + lrg_buf_cb->skb->data, + qdev->lrg_buffer_len - + QL_HEADER_SPACE, + PCI_DMA_FROMDEVICE); + lrg_buf_cb->buf_phy_addr_low = + cpu_to_le32(LS_64BITS(map)); + lrg_buf_cb->buf_phy_addr_high = + cpu_to_le32(MS_64BITS(map)); + pci_unmap_addr_set(lrg_buf_cb, mapaddr, map); + pci_unmap_len_set(lrg_buf_cb, maplen, + qdev->lrg_buffer_len - + QL_HEADER_SPACE); + } + } + + qdev->lrg_buf_free_count++; +} + +static struct ql_rcv_buf_cb *ql_get_from_lrg_buf_free_list(struct ql3_adapter + *qdev) +{ + struct ql_rcv_buf_cb *lrg_buf_cb; + + if ((lrg_buf_cb = qdev->lrg_buf_free_head) != NULL) { + if ((qdev->lrg_buf_free_head = lrg_buf_cb->next) == NULL) + qdev->lrg_buf_free_tail = NULL; + qdev->lrg_buf_free_count--; + } + + return lrg_buf_cb; +} + +static u32 addrBits = EEPROM_NO_ADDR_BITS; +static u32 dataBits = EEPROM_NO_DATA_BITS; + +static void fm93c56a_deselect(struct ql3_adapter *qdev); +static void eeprom_readword(struct ql3_adapter *qdev, u32 eepromAddr, + unsigned short *value); + +/* + * Caller holds hw_lock. + */ +static void fm93c56a_select(struct ql3_adapter *qdev) +{ + struct ql3xxx_port_registers __iomem *port_regs = + qdev->mem_map_registers; + + qdev->eeprom_cmd_data = AUBURN_EEPROM_CS_1; + ql_write_common_reg(qdev, &port_regs->CommonRegs.serialPortInterfaceReg, + ISP_NVRAM_MASK | qdev->eeprom_cmd_data); + ql_write_common_reg(qdev, &port_regs->CommonRegs.serialPortInterfaceReg, + ((ISP_NVRAM_MASK << 16) | qdev->eeprom_cmd_data)); +} + +/* + * Caller holds hw_lock. + */ +static void fm93c56a_cmd(struct ql3_adapter *qdev, u32 cmd, u32 eepromAddr) +{ + int i; + u32 mask; + u32 dataBit; + u32 previousBit; + struct ql3xxx_port_registers __iomem *port_regs = + qdev->mem_map_registers; + + /* Clock in a zero, then do the start bit */ + ql_write_common_reg(qdev, &port_regs->CommonRegs.serialPortInterfaceReg, + ISP_NVRAM_MASK | qdev->eeprom_cmd_data | + AUBURN_EEPROM_DO_1); + ql_write_common_reg(qdev, &port_regs->CommonRegs.serialPortInterfaceReg, + ISP_NVRAM_MASK | qdev-> + eeprom_cmd_data | AUBURN_EEPROM_DO_1 | + AUBURN_EEPROM_CLK_RISE); + ql_write_common_reg(qdev, &port_regs->CommonRegs.serialPortInterfaceReg, + ISP_NVRAM_MASK | qdev-> + eeprom_cmd_data | AUBURN_EEPROM_DO_1 | + AUBURN_EEPROM_CLK_FALL); + + mask = 1 << (FM93C56A_CMD_BITS - 1); + /* Force the previous data bit to be different */ + previousBit = 0xffff; + for (i = 0; i < FM93C56A_CMD_BITS; i++) { + dataBit = + (cmd & mask) ? AUBURN_EEPROM_DO_1 : AUBURN_EEPROM_DO_0; + if (previousBit != dataBit) { + /* + * If the bit changed, then change the DO state to + * match + */ + ql_write_common_reg(qdev, + &port_regs->CommonRegs. + serialPortInterfaceReg, + ISP_NVRAM_MASK | qdev-> + eeprom_cmd_data | dataBit); + previousBit = dataBit; + } + ql_write_common_reg(qdev, + &port_regs->CommonRegs. + serialPortInterfaceReg, + ISP_NVRAM_MASK | qdev-> + eeprom_cmd_data | dataBit | + AUBURN_EEPROM_CLK_RISE); + ql_write_common_reg(qdev, + &port_regs->CommonRegs. + serialPortInterfaceReg, + ISP_NVRAM_MASK | qdev-> + eeprom_cmd_data | dataBit | + AUBURN_EEPROM_CLK_FALL); + cmd = cmd << 1; + } + + mask = 1 << (addrBits - 1); + /* Force the previous data bit to be different */ + previousBit = 0xffff; + for (i = 0; i < addrBits; i++) { + dataBit = + (eepromAddr & mask) ? AUBURN_EEPROM_DO_1 : + AUBURN_EEPROM_DO_0; + if (previousBit != dataBit) { + /* + * If the bit changed, then change the DO state to + * match + */ + ql_write_common_reg(qdev, + &port_regs->CommonRegs. + serialPortInterfaceReg, + ISP_NVRAM_MASK | qdev-> + eeprom_cmd_data | dataBit); + previousBit = dataBit; + } + ql_write_common_reg(qdev, + &port_regs->CommonRegs. + serialPortInterfaceReg, + ISP_NVRAM_MASK | qdev-> + eeprom_cmd_data | dataBit | + AUBURN_EEPROM_CLK_RISE); + ql_write_common_reg(qdev, + &port_regs->CommonRegs. + serialPortInterfaceReg, + ISP_NVRAM_MASK | qdev-> + eeprom_cmd_data | dataBit | + AUBURN_EEPROM_CLK_FALL); + eepromAddr = eepromAddr << 1; + } +} + +/* + * Caller holds hw_lock. + */ +static void fm93c56a_deselect(struct ql3_adapter *qdev) +{ + struct ql3xxx_port_registers __iomem *port_regs = + qdev->mem_map_registers; + qdev->eeprom_cmd_data = AUBURN_EEPROM_CS_0; + ql_write_common_reg(qdev, &port_regs->CommonRegs.serialPortInterfaceReg, + ISP_NVRAM_MASK | qdev->eeprom_cmd_data); +} + +/* + * Caller holds hw_lock. + */ +static void fm93c56a_datain(struct ql3_adapter *qdev, unsigned short *value) +{ + int i; + u32 data = 0; + u32 dataBit; + struct ql3xxx_port_registers __iomem *port_regs = + qdev->mem_map_registers; + + /* Read the data bits */ + /* The first bit is a dummy. Clock right over it. */ + for (i = 0; i < dataBits; i++) { + ql_write_common_reg(qdev, + &port_regs->CommonRegs. + serialPortInterfaceReg, + ISP_NVRAM_MASK | qdev->eeprom_cmd_data | + AUBURN_EEPROM_CLK_RISE); + ql_write_common_reg(qdev, + &port_regs->CommonRegs. + serialPortInterfaceReg, + ISP_NVRAM_MASK | qdev->eeprom_cmd_data | + AUBURN_EEPROM_CLK_FALL); + dataBit = + (ql_read_common_reg + (qdev, + &port_regs->CommonRegs. + serialPortInterfaceReg) & AUBURN_EEPROM_DI_1) ? 1 : 0; + data = (data << 1) | dataBit; + } + *value = (u16) data; +} + +/* + * Caller holds hw_lock. + */ +static void eeprom_readword(struct ql3_adapter *qdev, + u32 eepromAddr, unsigned short *value) +{ + fm93c56a_select(qdev); + fm93c56a_cmd(qdev, (int)FM93C56A_READ, eepromAddr); + fm93c56a_datain(qdev, value); + fm93c56a_deselect(qdev); +} + +static void ql_swap_mac_addr(u8 * macAddress) +{ +#ifdef __BIG_ENDIAN + u8 temp; + temp = macAddress[0]; + macAddress[0] = macAddress[1]; + macAddress[1] = temp; + temp = macAddress[2]; + macAddress[2] = macAddress[3]; + macAddress[3] = temp; + temp = macAddress[4]; + macAddress[4] = macAddress[5]; + macAddress[5] = temp; +#endif +} + +static int ql_get_nvram_params(struct ql3_adapter *qdev) +{ + u16 *pEEPROMData; + u16 checksum = 0; + u32 index; + unsigned long hw_flags; + + spin_lock_irqsave(&qdev->hw_lock, hw_flags); + + pEEPROMData = (u16 *) & qdev->nvram_data; + qdev->eeprom_cmd_data = 0; + if(ql_sem_spinlock(qdev, QL_NVRAM_SEM_MASK, + (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) * + 2) << 10)) { + printk(KERN_ERR PFX"%s: Failed ql_sem_spinlock().\n", + __func__); + spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); + return -1; + } + + for (index = 0; index < EEPROM_SIZE; index++) { + eeprom_readword(qdev, index, pEEPROMData); + checksum += *pEEPROMData; + pEEPROMData++; + } + ql_sem_unlock(qdev, QL_NVRAM_SEM_MASK); + + if (checksum != 0) { + printk(KERN_ERR PFX "%s: checksum should be zero, is %x!!\n", + qdev->ndev->name, checksum); + spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); + return -1; + } + + /* + * We have a problem with endianness for the MAC addresses + * and the two 8-bit values version, and numPorts. We + * have to swap them on big endian systems. + */ + ql_swap_mac_addr(qdev->nvram_data.funcCfg_fn0.macAddress); + ql_swap_mac_addr(qdev->nvram_data.funcCfg_fn1.macAddress); + ql_swap_mac_addr(qdev->nvram_data.funcCfg_fn2.macAddress); + ql_swap_mac_addr(qdev->nvram_data.funcCfg_fn3.macAddress); + pEEPROMData = (u16 *) & qdev->nvram_data.version; + *pEEPROMData = le16_to_cpu(*pEEPROMData); + + spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); + return checksum; +} + +static const u32 PHYAddr[2] = { + PORT0_PHY_ADDRESS, PORT1_PHY_ADDRESS +}; + +static int ql_wait_for_mii_ready(struct ql3_adapter *qdev) +{ + struct ql3xxx_port_registers __iomem *port_regs = + qdev->mem_map_registers; + u32 temp; + int count = 1000; + + while (count) { + temp = ql_read_page0_reg(qdev, &port_regs->macMIIStatusReg); + if (!(temp & MAC_MII_STATUS_BSY)) + return 0; + udelay(10); + count--; + } + return -1; +} + +static void ql_mii_enable_scan_mode(struct ql3_adapter *qdev) +{ + struct ql3xxx_port_registers __iomem *port_regs = + qdev->mem_map_registers; + u32 scanControl; + + if (qdev->numPorts > 1) { + /* Auto scan will cycle through multiple ports */ + scanControl = MAC_MII_CONTROL_AS | MAC_MII_CONTROL_SC; + } else { + scanControl = MAC_MII_CONTROL_SC; + } + + /* + * Scan register 1 of PHY/PETBI, + * Set up to scan both devices + * The autoscan starts from the first register, completes + * the last one before rolling over to the first + */ + ql_write_page0_reg(qdev, &port_regs->macMIIMgmtAddrReg, + PHYAddr[0] | MII_SCAN_REGISTER); + + ql_write_page0_reg(qdev, &port_regs->macMIIMgmtControlReg, + (scanControl) | + ((MAC_MII_CONTROL_SC | MAC_MII_CONTROL_AS) << 16)); +} + +static u8 ql_mii_disable_scan_mode(struct ql3_adapter *qdev) +{ + u8 ret; + struct ql3xxx_port_registers __iomem *port_regs = + qdev->mem_map_registers; + + /* See if scan mode is enabled before we turn it off */ + if (ql_read_page0_reg(qdev, &port_regs->macMIIMgmtControlReg) & + (MAC_MII_CONTROL_AS | MAC_MII_CONTROL_SC)) { + /* Scan is enabled */ + ret = 1; + } else { + /* Scan is disabled */ + ret = 0; + } + + /* + * When disabling scan mode you must first change the MII register + * address + */ + ql_write_page0_reg(qdev, &port_regs->macMIIMgmtAddrReg, + PHYAddr[0] | MII_SCAN_REGISTER); + + ql_write_page0_reg(qdev, &port_regs->macMIIMgmtControlReg, + ((MAC_MII_CONTROL_SC | MAC_MII_CONTROL_AS | + MAC_MII_CONTROL_RC) << 16)); + + return ret; +} + +static int ql_mii_write_reg_ex(struct ql3_adapter *qdev, + u16 regAddr, u16 value, u32 mac_index) +{ + struct ql3xxx_port_registers __iomem *port_regs = + qdev->mem_map_registers; + u8 scanWasEnabled; + + scanWasEnabled = ql_mii_disable_scan_mode(qdev); + + if (ql_wait_for_mii_ready(qdev)) { + if (netif_msg_link(qdev)) + printk(KERN_WARNING PFX + "%s Timed out waiting for management port to " + "get free before issuing command.\n", + qdev->ndev->name); + return -1; + } + + ql_write_page0_reg(qdev, &port_regs->macMIIMgmtAddrReg, + PHYAddr[mac_index] | regAddr); + + ql_write_page0_reg(qdev, &port_regs->macMIIMgmtDataReg, value); + + /* Wait for write to complete 9/10/04 SJP */ + if (ql_wait_for_mii_ready(qdev)) { + if (netif_msg_link(qdev)) + printk(KERN_WARNING PFX + "%s: Timed out waiting for management port to" + "get free before issuing command.\n", + qdev->ndev->name); + return -1; + } + + if (scanWasEnabled) + ql_mii_enable_scan_mode(qdev); + + return 0; +} + +static int ql_mii_read_reg_ex(struct ql3_adapter *qdev, u16 regAddr, + u16 * value, u32 mac_index) +{ + struct ql3xxx_port_registers __iomem *port_regs = + qdev->mem_map_registers; + u8 scanWasEnabled; + u32 temp; + + scanWasEnabled = ql_mii_disable_scan_mode(qdev); + + if (ql_wait_for_mii_ready(qdev)) { + if (netif_msg_link(qdev)) + printk(KERN_WARNING PFX + "%s: Timed out waiting for management port to " + "get free before issuing command.\n", + qdev->ndev->name); + return -1; + } + + ql_write_page0_reg(qdev, &port_regs->macMIIMgmtAddrReg, + PHYAddr[mac_index] | regAddr); + + ql_write_page0_reg(qdev, &port_regs->macMIIMgmtControlReg, + (MAC_MII_CONTROL_RC << 16)); + + ql_write_page0_reg(qdev, &port_regs->macMIIMgmtControlReg, + (MAC_MII_CONTROL_RC << 16) | MAC_MII_CONTROL_RC); + + /* Wait for the read to complete */ + if (ql_wait_for_mii_ready(qdev)) { + if (netif_msg_link(qdev)) + printk(KERN_WARNING PFX + "%s: Timed out waiting for management port to " + "get free after issuing command.\n", + qdev->ndev->name); + return -1; + } + + temp = ql_read_page0_reg(qdev, &port_regs->macMIIMgmtDataReg); + *value = (u16) temp; + + if (scanWasEnabled) + ql_mii_enable_scan_mode(qdev); + + return 0; +} + +static int ql_mii_write_reg(struct ql3_adapter *qdev, u16 regAddr, u16 value) +{ + struct ql3xxx_port_registers __iomem *port_regs = + qdev->mem_map_registers; + + ql_mii_disable_scan_mode(qdev); + + if (ql_wait_for_mii_ready(qdev)) { + if (netif_msg_link(qdev)) + printk(KERN_WARNING PFX + "%s: Timed out waiting for management port to " + "get free before issuing command.\n", + qdev->ndev->name); + return -1; + } + + ql_write_page0_reg(qdev, &port_regs->macMIIMgmtAddrReg, + qdev->PHYAddr | regAddr); + + ql_write_page0_reg(qdev, &port_regs->macMIIMgmtDataReg, value); + + /* Wait for write to complete. */ + if (ql_wait_for_mii_ready(qdev)) { + if (netif_msg_link(qdev)) + printk(KERN_WARNING PFX + "%s: Timed out waiting for management port to " + "get free before issuing command.\n", + qdev->ndev->name); + return -1; + } + + ql_mii_enable_scan_mode(qdev); + + return 0; +} + +static int ql_mii_read_reg(struct ql3_adapter *qdev, u16 regAddr, u16 *value) +{ + u32 temp; + struct ql3xxx_port_registers __iomem *port_regs = + qdev->mem_map_registers; + + ql_mii_disable_scan_mode(qdev); + + if (ql_wait_for_mii_ready(qdev)) { + if (netif_msg_link(qdev)) + printk(KERN_WARNING PFX + "%s: Timed out waiting for management port to " + "get free before issuing command.\n", + qdev->ndev->name); + return -1; + } + + ql_write_page0_reg(qdev, &port_regs->macMIIMgmtAddrReg, + qdev->PHYAddr | regAddr); + + ql_write_page0_reg(qdev, &port_regs->macMIIMgmtControlReg, + (MAC_MII_CONTROL_RC << 16)); + + ql_write_page0_reg(qdev, &port_regs->macMIIMgmtControlReg, + (MAC_MII_CONTROL_RC << 16) | MAC_MII_CONTROL_RC); + + /* Wait for the read to complete */ + if (ql_wait_for_mii_ready(qdev)) { + if (netif_msg_link(qdev)) + printk(KERN_WARNING PFX + "%s: Timed out waiting for management port to " + "get free before issuing command.\n", + qdev->ndev->name); + return -1; + } + + temp = ql_read_page0_reg(qdev, &port_regs->macMIIMgmtDataReg); + *value = (u16) temp; + + ql_mii_enable_scan_mode(qdev); + + return 0; +} + +static void ql_petbi_reset(struct ql3_adapter *qdev) +{ + ql_mii_write_reg(qdev, PETBI_CONTROL_REG, PETBI_CTRL_SOFT_RESET); +} + +static void ql_petbi_start_neg(struct ql3_adapter *qdev) +{ + u16 reg; + + /* Enable Auto-negotiation sense */ + ql_mii_read_reg(qdev, PETBI_TBI_CTRL, ®); + reg |= PETBI_TBI_AUTO_SENSE; + ql_mii_write_reg(qdev, PETBI_TBI_CTRL, reg); + + ql_mii_write_reg(qdev, PETBI_NEG_ADVER, + PETBI_NEG_PAUSE | PETBI_NEG_DUPLEX); + + ql_mii_write_reg(qdev, PETBI_CONTROL_REG, + PETBI_CTRL_AUTO_NEG | PETBI_CTRL_RESTART_NEG | + PETBI_CTRL_FULL_DUPLEX | PETBI_CTRL_SPEED_1000); + +} + +static void ql_petbi_reset_ex(struct ql3_adapter *qdev, u32 mac_index) +{ + ql_mii_write_reg_ex(qdev, PETBI_CONTROL_REG, PETBI_CTRL_SOFT_RESET, + mac_index); +} + +static void ql_petbi_start_neg_ex(struct ql3_adapter *qdev, u32 mac_index) +{ + u16 reg; + + /* Enable Auto-negotiation sense */ + ql_mii_read_reg_ex(qdev, PETBI_TBI_CTRL, ®, mac_index); + reg |= PETBI_TBI_AUTO_SENSE; + ql_mii_write_reg_ex(qdev, PETBI_TBI_CTRL, reg, mac_index); + + ql_mii_write_reg_ex(qdev, PETBI_NEG_ADVER, + PETBI_NEG_PAUSE | PETBI_NEG_DUPLEX, mac_index); + + ql_mii_write_reg_ex(qdev, PETBI_CONTROL_REG, + PETBI_CTRL_AUTO_NEG | PETBI_CTRL_RESTART_NEG | + PETBI_CTRL_FULL_DUPLEX | PETBI_CTRL_SPEED_1000, + mac_index); +} + +static void ql_petbi_init(struct ql3_adapter *qdev) +{ + ql_petbi_reset(qdev); + ql_petbi_start_neg(qdev); +} + +static void ql_petbi_init_ex(struct ql3_adapter *qdev, u32 mac_index) +{ + ql_petbi_reset_ex(qdev, mac_index); + ql_petbi_start_neg_ex(qdev, mac_index); +} + +static int ql_is_petbi_neg_pause(struct ql3_adapter *qdev) +{ + u16 reg; + + if (ql_mii_read_reg(qdev, PETBI_NEG_PARTNER, ®) < 0) + return 0; + + return (reg & PETBI_NEG_PAUSE_MASK) == PETBI_NEG_PAUSE; +} + +static int ql_phy_get_speed(struct ql3_adapter *qdev) +{ + u16 reg; + + if (ql_mii_read_reg(qdev, AUX_CONTROL_STATUS, ®) < 0) + return 0; + + reg = (((reg & 0x18) >> 3) & 3); + + if (reg == 2) + return SPEED_1000; + else if (reg == 1) + return SPEED_100; + else if (reg == 0) + return SPEED_10; + else + return -1; +} + +static int ql_is_full_dup(struct ql3_adapter *qdev) +{ + u16 reg; + + if (ql_mii_read_reg(qdev, AUX_CONTROL_STATUS, ®) < 0) + return 0; + + return (reg & PHY_AUX_DUPLEX_STAT) != 0; +} + +static int ql_is_phy_neg_pause(struct ql3_adapter *qdev) +{ + u16 reg; + + if (ql_mii_read_reg(qdev, PHY_NEG_PARTNER, ®) < 0) + return 0; + + return (reg & PHY_NEG_PAUSE) != 0; +} + +/* + * Caller holds hw_lock. + */ +static void ql_mac_enable(struct ql3_adapter *qdev, u32 enable) +{ + struct ql3xxx_port_registers __iomem *port_regs = + qdev->mem_map_registers; + u32 value; + + if (enable) + value = (MAC_CONFIG_REG_PE | (MAC_CONFIG_REG_PE << 16)); + else + value = (MAC_CONFIG_REG_PE << 16); + + if (qdev->mac_index) + ql_write_page0_reg(qdev, &port_regs->mac1ConfigReg, value); + else + ql_write_page0_reg(qdev, &port_regs->mac0ConfigReg, value); +} + +/* + * Caller holds hw_lock. + */ +static void ql_mac_cfg_soft_reset(struct ql3_adapter *qdev, u32 enable) +{ + struct ql3xxx_port_registers __iomem *port_regs = + qdev->mem_map_registers; + u32 value; + + if (enable) + value = (MAC_CONFIG_REG_SR | (MAC_CONFIG_REG_SR << 16)); + else + value = (MAC_CONFIG_REG_SR << 16); + + if (qdev->mac_index) + ql_write_page0_reg(qdev, &port_regs->mac1ConfigReg, value); + else + ql_write_page0_reg(qdev, &port_regs->mac0ConfigReg, value); +} + +/* + * Caller holds hw_lock. + */ +static void ql_mac_cfg_gig(struct ql3_adapter *qdev, u32 enable) +{ + struct ql3xxx_port_registers __iomem *port_regs = + qdev->mem_map_registers; + u32 value; + + if (enable) + value = (MAC_CONFIG_REG_GM | (MAC_CONFIG_REG_GM << 16)); + else + value = (MAC_CONFIG_REG_GM << 16); + + if (qdev->mac_index) + ql_write_page0_reg(qdev, &port_regs->mac1ConfigReg, value); + else + ql_write_page0_reg(qdev, &port_regs->mac0ConfigReg, value); +} + +/* + * Caller holds hw_lock. + */ +static void ql_mac_cfg_full_dup(struct ql3_adapter *qdev, u32 enable) +{ + struct ql3xxx_port_registers __iomem *port_regs = + qdev->mem_map_registers; + u32 value; + + if (enable) + value = (MAC_CONFIG_REG_FD | (MAC_CONFIG_REG_FD << 16)); + else + value = (MAC_CONFIG_REG_FD << 16); + + if (qdev->mac_index) + ql_write_page0_reg(qdev, &port_regs->mac1ConfigReg, value); + else + ql_write_page0_reg(qdev, &port_regs->mac0ConfigReg, value); +} + +/* + * Caller holds hw_lock. + */ +static void ql_mac_cfg_pause(struct ql3_adapter *qdev, u32 enable) +{ + struct ql3xxx_port_registers __iomem *port_regs = + qdev->mem_map_registers; + u32 value; + + if (enable) + value = + ((MAC_CONFIG_REG_TF | MAC_CONFIG_REG_RF) | + ((MAC_CONFIG_REG_TF | MAC_CONFIG_REG_RF) << 16)); + else + value = ((MAC_CONFIG_REG_TF | MAC_CONFIG_REG_RF) << 16); + + if (qdev->mac_index) + ql_write_page0_reg(qdev, &port_regs->mac1ConfigReg, value); + else + ql_write_page0_reg(qdev, &port_regs->mac0ConfigReg, value); +} + +/* + * Caller holds hw_lock. + */ +static int ql_is_fiber(struct ql3_adapter *qdev) +{ + struct ql3xxx_port_registers __iomem *port_regs = + qdev->mem_map_registers; + u32 bitToCheck = 0; + u32 temp; + + switch (qdev->mac_index) { + case 0: + bitToCheck = PORT_STATUS_SM0; + break; + case 1: + bitToCheck = PORT_STATUS_SM1; + break; + } + + temp = ql_read_page0_reg(qdev, &port_regs->portStatus); + return (temp & bitToCheck) != 0; +} + +static int ql_is_auto_cfg(struct ql3_adapter *qdev) +{ + u16 reg; + ql_mii_read_reg(qdev, 0x00, ®); + return (reg & 0x1000) != 0; +} + +/* + * Caller holds hw_lock. + */ +static int ql_is_auto_neg_complete(struct ql3_adapter *qdev) +{ + struct ql3xxx_port_registers __iomem *port_regs = + qdev->mem_map_registers; + u32 bitToCheck = 0; + u32 temp; + + switch (qdev->mac_index) { + case 0: + bitToCheck = PORT_STATUS_AC0; + break; + case 1: + bitToCheck = PORT_STATUS_AC1; + break; + } + + temp = ql_read_page0_reg(qdev, &port_regs->portStatus); + if (temp & bitToCheck) { + if (netif_msg_link(qdev)) + printk(KERN_INFO PFX + "%s: Auto-Negotiate complete.\n", + qdev->ndev->name); + return 1; + } else { + if (netif_msg_link(qdev)) + printk(KERN_WARNING PFX + "%s: Auto-Negotiate incomplete.\n", + qdev->ndev->name); + return 0; + } +} + +/* + * ql_is_neg_pause() returns 1 if pause was negotiated to be on + */ +static int ql_is_neg_pause(struct ql3_adapter *qdev) +{ + if (ql_is_fiber(qdev)) + return ql_is_petbi_neg_pause(qdev); + else + return ql_is_phy_neg_pause(qdev); +} + +static int ql_auto_neg_error(struct ql3_adapter *qdev) +{ + struct ql3xxx_port_registers __iomem *port_regs = + qdev->mem_map_registers; + u32 bitToCheck = 0; + u32 temp; + + switch (qdev->mac_index) { + case 0: + bitToCheck = PORT_STATUS_AE0; + break; + case 1: + bitToCheck = PORT_STATUS_AE1; + break; + } + temp = ql_read_page0_reg(qdev, &port_regs->portStatus); + return (temp & bitToCheck) != 0; +} + +static u32 ql_get_link_speed(struct ql3_adapter *qdev) +{ + if (ql_is_fiber(qdev)) + return SPEED_1000; + else + return ql_phy_get_speed(qdev); +} + +static int ql_is_link_full_dup(struct ql3_adapter *qdev) +{ + if (ql_is_fiber(qdev)) + return 1; + else + return ql_is_full_dup(qdev); +} + +/* + * Caller holds hw_lock. + */ +static int ql_link_down_detect(struct ql3_adapter *qdev) +{ + struct ql3xxx_port_registers __iomem *port_regs = + qdev->mem_map_registers; + u32 bitToCheck = 0; + u32 temp; + + switch (qdev->mac_index) { + case 0: + bitToCheck = ISP_CONTROL_LINK_DN_0; + break; + case 1: + bitToCheck = ISP_CONTROL_LINK_DN_1; + break; + } + + temp = + ql_read_common_reg(qdev, &port_regs->CommonRegs.ispControlStatus); + return (temp & bitToCheck) != 0; +} + +/* + * Caller holds hw_lock. + */ +static int ql_link_down_detect_clear(struct ql3_adapter *qdev) +{ + struct ql3xxx_port_registers __iomem *port_regs = + qdev->mem_map_registers; + + switch (qdev->mac_index) { + case 0: + ql_write_common_reg(qdev, + &port_regs->CommonRegs.ispControlStatus, + (ISP_CONTROL_LINK_DN_0) | + (ISP_CONTROL_LINK_DN_0 << 16)); + break; + + case 1: + ql_write_common_reg(qdev, + &port_regs->CommonRegs.ispControlStatus, + (ISP_CONTROL_LINK_DN_1) | + (ISP_CONTROL_LINK_DN_1 << 16)); + break; + + default: + return 1; + } + + return 0; +} + +/* + * Caller holds hw_lock. + */ +static int ql_this_adapter_controls_port(struct ql3_adapter *qdev, + u32 mac_index) +{ + struct ql3xxx_port_registers __iomem *port_regs = + qdev->mem_map_registers; + u32 bitToCheck = 0; + u32 temp; + + switch (mac_index) { + case 0: + bitToCheck = PORT_STATUS_F1_ENABLED; + break; + case 1: + bitToCheck = PORT_STATUS_F3_ENABLED; + break; + default: + break; + } + + temp = ql_read_page0_reg(qdev, &port_regs->portStatus); + if (temp & bitToCheck) { + if (netif_msg_link(qdev)) + printk(KERN_DEBUG PFX + "%s: is not link master.\n", qdev->ndev->name); + return 0; + } else { + if (netif_msg_link(qdev)) + printk(KERN_DEBUG PFX + "%s: is link master.\n", qdev->ndev->name); + return 1; + } +} + +static void ql_phy_reset_ex(struct ql3_adapter *qdev, u32 mac_index) +{ + ql_mii_write_reg_ex(qdev, CONTROL_REG, PHY_CTRL_SOFT_RESET, mac_index); +} + +static void ql_phy_start_neg_ex(struct ql3_adapter *qdev, u32 mac_index) +{ + u16 reg; + + ql_mii_write_reg_ex(qdev, PHY_NEG_ADVER, + PHY_NEG_PAUSE | PHY_NEG_ADV_SPEED | 1, mac_index); + + ql_mii_read_reg_ex(qdev, CONTROL_REG, ®, mac_index); + ql_mii_write_reg_ex(qdev, CONTROL_REG, reg | PHY_CTRL_RESTART_NEG, + mac_index); +} + +static void ql_phy_init_ex(struct ql3_adapter *qdev, u32 mac_index) +{ + ql_phy_reset_ex(qdev, mac_index); + ql_phy_start_neg_ex(qdev, mac_index); +} + +/* + * Caller holds hw_lock. + */ +static u32 ql_get_link_state(struct ql3_adapter *qdev) +{ + struct ql3xxx_port_registers __iomem *port_regs = + qdev->mem_map_registers; + u32 bitToCheck = 0; + u32 temp, linkState; + + switch (qdev->mac_index) { + case 0: + bitToCheck = PORT_STATUS_UP0; + break; + case 1: + bitToCheck = PORT_STATUS_UP1; + break; + } + temp = ql_read_page0_reg(qdev, &port_regs->portStatus); + if (temp & bitToCheck) { + linkState = LS_UP; + } else { + linkState = LS_DOWN; + if (netif_msg_link(qdev)) + printk(KERN_WARNING PFX + "%s: Link is down.\n", qdev->ndev->name); + } + return linkState; +} + +static int ql_port_start(struct ql3_adapter *qdev) +{ + if(ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK, + (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) * + 2) << 7)) + return -1; + + if (ql_is_fiber(qdev)) { + ql_petbi_init(qdev); + } else { + /* Copper port */ + ql_phy_init_ex(qdev, qdev->mac_index); + } + + ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK); + return 0; +} + +static int ql_finish_auto_neg(struct ql3_adapter *qdev) +{ + + if(ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK, + (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) * + 2) << 7)) + return -1; + + if (!ql_auto_neg_error(qdev)) { + if (test_bit(QL_LINK_MASTER,&qdev->flags)) { + /* configure the MAC */ + if (netif_msg_link(qdev)) + printk(KERN_DEBUG PFX + "%s: Configuring link.\n", + qdev->ndev-> + name); + ql_mac_cfg_soft_reset(qdev, 1); + ql_mac_cfg_gig(qdev, + (ql_get_link_speed + (qdev) == + SPEED_1000)); + ql_mac_cfg_full_dup(qdev, + ql_is_link_full_dup + (qdev)); + ql_mac_cfg_pause(qdev, + ql_is_neg_pause + (qdev)); + ql_mac_cfg_soft_reset(qdev, 0); + + /* enable the MAC */ + if (netif_msg_link(qdev)) + printk(KERN_DEBUG PFX + "%s: Enabling mac.\n", + qdev->ndev-> + name); + ql_mac_enable(qdev, 1); + } + + if (netif_msg_link(qdev)) + printk(KERN_DEBUG PFX + "%s: Change port_link_state LS_DOWN to LS_UP.\n", + qdev->ndev->name); + qdev->port_link_state = LS_UP; + netif_start_queue(qdev->ndev); + netif_carrier_on(qdev->ndev); + if (netif_msg_link(qdev)) + printk(KERN_INFO PFX + "%s: Link is up at %d Mbps, %s duplex.\n", + qdev->ndev->name, + ql_get_link_speed(qdev), + ql_is_link_full_dup(qdev) + ? "full" : "half"); + + } else { /* Remote error detected */ + + if (test_bit(QL_LINK_MASTER,&qdev->flags)) { + if (netif_msg_link(qdev)) + printk(KERN_DEBUG PFX + "%s: Remote error detected. " + "Calling ql_port_start().\n", + qdev->ndev-> + name); + /* + * ql_port_start() is shared code and needs + * to lock the PHY on it's own. + */ + ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK); + if(ql_port_start(qdev)) {/* Restart port */ + return -1; + } else + return 0; + } + } + ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK); + return 0; +} + +static void ql_link_state_machine(struct ql3_adapter *qdev) +{ + u32 curr_link_state; + unsigned long hw_flags; + + spin_lock_irqsave(&qdev->hw_lock, hw_flags); + + curr_link_state = ql_get_link_state(qdev); + + if (test_bit(QL_RESET_ACTIVE,&qdev->flags)) { + if (netif_msg_link(qdev)) + printk(KERN_INFO PFX + "%s: Reset in progress, skip processing link " + "state.\n", qdev->ndev->name); + return; + } + + switch (qdev->port_link_state) { + default: + if (test_bit(QL_LINK_MASTER,&qdev->flags)) { + ql_port_start(qdev); + } + qdev->port_link_state = LS_DOWN; + /* Fall Through */ + + case LS_DOWN: + if (netif_msg_link(qdev)) + printk(KERN_DEBUG PFX + "%s: port_link_state = LS_DOWN.\n", + qdev->ndev->name); + if (curr_link_state == LS_UP) { + if (netif_msg_link(qdev)) + printk(KERN_DEBUG PFX + "%s: curr_link_state = LS_UP.\n", + qdev->ndev->name); + if (ql_is_auto_neg_complete(qdev)) + ql_finish_auto_neg(qdev); + + if (qdev->port_link_state == LS_UP) + ql_link_down_detect_clear(qdev); + + } + break; + + case LS_UP: + /* + * See if the link is currently down or went down and came + * back up + */ + if ((curr_link_state == LS_DOWN) || ql_link_down_detect(qdev)) { + if (netif_msg_link(qdev)) + printk(KERN_INFO PFX "%s: Link is down.\n", + qdev->ndev->name); + qdev->port_link_state = LS_DOWN; + } + break; + } + spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); +} + +/* + * Caller must take hw_lock and QL_PHY_GIO_SEM. + */ +static void ql_get_phy_owner(struct ql3_adapter *qdev) +{ + if (ql_this_adapter_controls_port(qdev, qdev->mac_index)) + set_bit(QL_LINK_MASTER,&qdev->flags); + else + clear_bit(QL_LINK_MASTER,&qdev->flags); +} + +/* + * Caller must take hw_lock and QL_PHY_GIO_SEM. + */ +static void ql_init_scan_mode(struct ql3_adapter *qdev) +{ + ql_mii_enable_scan_mode(qdev); + + if (test_bit(QL_LINK_OPTICAL,&qdev->flags)) { + if (ql_this_adapter_controls_port(qdev, qdev->mac_index)) + ql_petbi_init_ex(qdev, qdev->mac_index); + } else { + if (ql_this_adapter_controls_port(qdev, qdev->mac_index)) + ql_phy_init_ex(qdev, qdev->mac_index); + } +} + +/* + * MII_Setup needs to be called before taking the PHY out of reset so that the + * management interface clock speed can be set properly. It would be better if + * we had a way to disable MDC until after the PHY is out of reset, but we + * don't have that capability. + */ +static int ql_mii_setup(struct ql3_adapter *qdev) +{ + u32 reg; + struct ql3xxx_port_registers __iomem *port_regs = + qdev->mem_map_registers; + + if(ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK, + (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) * + 2) << 7)) + return -1; + + /* Divide 125MHz clock by 28 to meet PHY timing requirements */ + reg = MAC_MII_CONTROL_CLK_SEL_DIV28; + + ql_write_page0_reg(qdev, &port_regs->macMIIMgmtControlReg, + reg | ((MAC_MII_CONTROL_CLK_SEL_MASK) << 16)); + + ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK); + return 0; +} + +static u32 ql_supported_modes(struct ql3_adapter *qdev) +{ + u32 supported; + + if (test_bit(QL_LINK_OPTICAL,&qdev->flags)) { + supported = SUPPORTED_1000baseT_Full | SUPPORTED_FIBRE + | SUPPORTED_Autoneg; + } else { + supported = SUPPORTED_10baseT_Half + | SUPPORTED_10baseT_Full + | SUPPORTED_100baseT_Half + | SUPPORTED_100baseT_Full + | SUPPORTED_1000baseT_Half + | SUPPORTED_1000baseT_Full + | SUPPORTED_Autoneg | SUPPORTED_TP; + } + + return supported; +} + +static int ql_get_auto_cfg_status(struct ql3_adapter *qdev) +{ + int status; + unsigned long hw_flags; + spin_lock_irqsave(&qdev->hw_lock, hw_flags); + if(ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK, + (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) * + 2) << 7)) + return 0; + status = ql_is_auto_cfg(qdev); + ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK); + spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); + return status; +} + +static u32 ql_get_speed(struct ql3_adapter *qdev) +{ + u32 status; + unsigned long hw_flags; + spin_lock_irqsave(&qdev->hw_lock, hw_flags); + if(ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK, + (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) * + 2) << 7)) + return 0; + status = ql_get_link_speed(qdev); + ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK); + spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); + return status; +} + +static int ql_get_full_dup(struct ql3_adapter *qdev) +{ + int status; + unsigned long hw_flags; + spin_lock_irqsave(&qdev->hw_lock, hw_flags); + if(ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK, + (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) * + 2) << 7)) + return 0; + status = ql_is_link_full_dup(qdev); + ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK); + spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); + return status; +} + + +static int ql_get_settings(struct net_device *ndev, struct ethtool_cmd *ecmd) +{ + struct ql3_adapter *qdev = netdev_priv(ndev); + + ecmd->transceiver = XCVR_INTERNAL; + ecmd->supported = ql_supported_modes(qdev); + + if (test_bit(QL_LINK_OPTICAL,&qdev->flags)) { + ecmd->port = PORT_FIBRE; + } else { + ecmd->port = PORT_TP; + ecmd->phy_address = qdev->PHYAddr; + } + ecmd->advertising = ql_supported_modes(qdev); + ecmd->autoneg = ql_get_auto_cfg_status(qdev); + ecmd->speed = ql_get_speed(qdev); + ecmd->duplex = ql_get_full_dup(qdev); + return 0; +} + +static void ql_get_drvinfo(struct net_device *ndev, + struct ethtool_drvinfo *drvinfo) +{ + struct ql3_adapter *qdev = netdev_priv(ndev); + strncpy(drvinfo->driver, ql3xxx_driver_name, 32); + strncpy(drvinfo->version, ql3xxx_driver_version, 32); + strncpy(drvinfo->fw_version, "N/A", 32); + strncpy(drvinfo->bus_info, pci_name(qdev->pdev), 32); + drvinfo->n_stats = 0; + drvinfo->testinfo_len = 0; + drvinfo->regdump_len = 0; + drvinfo->eedump_len = 0; +} + +static u32 ql_get_msglevel(struct net_device *ndev) +{ + struct ql3_adapter *qdev = netdev_priv(ndev); + return qdev->msg_enable; +} + +static void ql_set_msglevel(struct net_device *ndev, u32 value) +{ + struct ql3_adapter *qdev = netdev_priv(ndev); + qdev->msg_enable = value; +} + +static struct ethtool_ops ql3xxx_ethtool_ops = { + .get_settings = ql_get_settings, + .get_drvinfo = ql_get_drvinfo, + .get_perm_addr = ethtool_op_get_perm_addr, + .get_link = ethtool_op_get_link, + .get_msglevel = ql_get_msglevel, + .set_msglevel = ql_set_msglevel, +}; + +static int ql_populate_free_queue(struct ql3_adapter *qdev) +{ + struct ql_rcv_buf_cb *lrg_buf_cb = qdev->lrg_buf_free_head; + u64 map; + + while (lrg_buf_cb) { + if (!lrg_buf_cb->skb) { + lrg_buf_cb->skb = dev_alloc_skb(qdev->lrg_buffer_len); + if (unlikely(!lrg_buf_cb->skb)) { + printk(KERN_DEBUG PFX + "%s: Failed dev_alloc_skb().\n", + qdev->ndev->name); + break; + } else { + /* + * We save some space to copy the ethhdr from + * first buffer + */ + skb_reserve(lrg_buf_cb->skb, QL_HEADER_SPACE); + map = pci_map_single(qdev->pdev, + lrg_buf_cb->skb->data, + qdev->lrg_buffer_len - + QL_HEADER_SPACE, + PCI_DMA_FROMDEVICE); + lrg_buf_cb->buf_phy_addr_low = + cpu_to_le32(LS_64BITS(map)); + lrg_buf_cb->buf_phy_addr_high = + cpu_to_le32(MS_64BITS(map)); + pci_unmap_addr_set(lrg_buf_cb, mapaddr, map); + pci_unmap_len_set(lrg_buf_cb, maplen, + qdev->lrg_buffer_len - + QL_HEADER_SPACE); + --qdev->lrg_buf_skb_check; + if (!qdev->lrg_buf_skb_check) + return 1; + } + } + lrg_buf_cb = lrg_buf_cb->next; + } + return 0; +} + +/* + * Caller holds hw_lock. + */ +static void ql_update_lrg_bufq_prod_index(struct ql3_adapter *qdev) +{ + struct bufq_addr_element *lrg_buf_q_ele; + int i; + struct ql_rcv_buf_cb *lrg_buf_cb; + struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers; + + if ((qdev->lrg_buf_free_count >= 8) + && (qdev->lrg_buf_release_cnt >= 16)) { + + if (qdev->lrg_buf_skb_check) + if (!ql_populate_free_queue(qdev)) + return; + + lrg_buf_q_ele = qdev->lrg_buf_next_free; + + while ((qdev->lrg_buf_release_cnt >= 16) + && (qdev->lrg_buf_free_count >= 8)) { + + for (i = 0; i < 8; i++) { + lrg_buf_cb = + ql_get_from_lrg_buf_free_list(qdev); + lrg_buf_q_ele->addr_high = + lrg_buf_cb->buf_phy_addr_high; + lrg_buf_q_ele->addr_low = + lrg_buf_cb->buf_phy_addr_low; + lrg_buf_q_ele++; + + qdev->lrg_buf_release_cnt--; + } + + qdev->lrg_buf_q_producer_index++; + + if (qdev->lrg_buf_q_producer_index == NUM_LBUFQ_ENTRIES) + qdev->lrg_buf_q_producer_index = 0; + + if (qdev->lrg_buf_q_producer_index == + (NUM_LBUFQ_ENTRIES - 1)) { + lrg_buf_q_ele = qdev->lrg_buf_q_virt_addr; + } + } + + qdev->lrg_buf_next_free = lrg_buf_q_ele; + + ql_write_common_reg(qdev, + (u32 *) & port_regs->CommonRegs. + rxLargeQProducerIndex, + qdev->lrg_buf_q_producer_index); + } +} + +static void ql_process_mac_tx_intr(struct ql3_adapter *qdev, + struct ob_mac_iocb_rsp *mac_rsp) +{ + struct ql_tx_buf_cb *tx_cb; + + tx_cb = &qdev->tx_buf[mac_rsp->transaction_id]; + pci_unmap_single(qdev->pdev, + pci_unmap_addr(tx_cb, mapaddr), + pci_unmap_len(tx_cb, maplen), PCI_DMA_TODEVICE); + dev_kfree_skb_irq(tx_cb->skb); + qdev->stats.tx_packets++; + qdev->stats.tx_bytes += tx_cb->skb->len; + tx_cb->skb = NULL; + atomic_inc(&qdev->tx_count); +} + +static void ql_process_mac_rx_intr(struct ql3_adapter *qdev, + struct ib_mac_iocb_rsp *ib_mac_rsp_ptr) +{ + long int offset; + u32 lrg_buf_phy_addr_low = 0; + struct ql_rcv_buf_cb *lrg_buf_cb1 = NULL; + struct ql_rcv_buf_cb *lrg_buf_cb2 = NULL; + u32 *curr_ial_ptr; + struct sk_buff *skb; + u16 length = le16_to_cpu(ib_mac_rsp_ptr->length); + + /* + * Get the inbound address list (small buffer). + */ + offset = qdev->small_buf_index * QL_SMALL_BUFFER_SIZE; + if (++qdev->small_buf_index == NUM_SMALL_BUFFERS) + qdev->small_buf_index = 0; + + curr_ial_ptr = (u32 *) (qdev->small_buf_virt_addr + offset); + qdev->last_rsp_offset = qdev->small_buf_phy_addr_low + offset; + qdev->small_buf_release_cnt++; + + /* start of first buffer */ + lrg_buf_phy_addr_low = le32_to_cpu(*curr_ial_ptr); + lrg_buf_cb1 = &qdev->lrg_buf[qdev->lrg_buf_index]; + qdev->lrg_buf_release_cnt++; + if (++qdev->lrg_buf_index == NUM_LARGE_BUFFERS) + qdev->lrg_buf_index = 0; + curr_ial_ptr++; /* 64-bit pointers require two incs. */ + curr_ial_ptr++; + + /* start of second buffer */ + lrg_buf_phy_addr_low = le32_to_cpu(*curr_ial_ptr); + lrg_buf_cb2 = &qdev->lrg_buf[qdev->lrg_buf_index]; + + /* + * Second buffer gets sent up the stack. + */ + qdev->lrg_buf_release_cnt++; + if (++qdev->lrg_buf_index == NUM_LARGE_BUFFERS) + qdev->lrg_buf_index = 0; + skb = lrg_buf_cb2->skb; + + qdev->stats.rx_packets++; + qdev->stats.rx_bytes += length; + + skb_put(skb, length); + pci_unmap_single(qdev->pdev, + pci_unmap_addr(lrg_buf_cb2, mapaddr), + pci_unmap_len(lrg_buf_cb2, maplen), + PCI_DMA_FROMDEVICE); + prefetch(skb->data); + skb->dev = qdev->ndev; + skb->ip_summed = CHECKSUM_NONE; + skb->protocol = eth_type_trans(skb, qdev->ndev); + + netif_receive_skb(skb); + qdev->ndev->last_rx = jiffies; + lrg_buf_cb2->skb = NULL; + + ql_release_to_lrg_buf_free_list(qdev, lrg_buf_cb1); + ql_release_to_lrg_buf_free_list(qdev, lrg_buf_cb2); +} + +static void ql_process_macip_rx_intr(struct ql3_adapter *qdev, + struct ib_ip_iocb_rsp *ib_ip_rsp_ptr) +{ + long int offset; + u32 lrg_buf_phy_addr_low = 0; + struct ql_rcv_buf_cb *lrg_buf_cb1 = NULL; + struct ql_rcv_buf_cb *lrg_buf_cb2 = NULL; + u32 *curr_ial_ptr; + struct sk_buff *skb1, *skb2; + struct net_device *ndev = qdev->ndev; + u16 length = le16_to_cpu(ib_ip_rsp_ptr->length); + u16 size = 0; + + /* + * Get the inbound address list (small buffer). + */ + + offset = qdev->small_buf_index * QL_SMALL_BUFFER_SIZE; + if (++qdev->small_buf_index == NUM_SMALL_BUFFERS) + qdev->small_buf_index = 0; + curr_ial_ptr = (u32 *) (qdev->small_buf_virt_addr + offset); + qdev->last_rsp_offset = qdev->small_buf_phy_addr_low + offset; + qdev->small_buf_release_cnt++; + + /* start of first buffer */ + lrg_buf_phy_addr_low = le32_to_cpu(*curr_ial_ptr); + lrg_buf_cb1 = &qdev->lrg_buf[qdev->lrg_buf_index]; + + qdev->lrg_buf_release_cnt++; + if (++qdev->lrg_buf_index == NUM_LARGE_BUFFERS) + qdev->lrg_buf_index = 0; + skb1 = lrg_buf_cb1->skb; + curr_ial_ptr++; /* 64-bit pointers require two incs. */ + curr_ial_ptr++; + + /* start of second buffer */ + lrg_buf_phy_addr_low = le32_to_cpu(*curr_ial_ptr); + lrg_buf_cb2 = &qdev->lrg_buf[qdev->lrg_buf_index]; + skb2 = lrg_buf_cb2->skb; + qdev->lrg_buf_release_cnt++; + if (++qdev->lrg_buf_index == NUM_LARGE_BUFFERS) + qdev->lrg_buf_index = 0; + + qdev->stats.rx_packets++; + qdev->stats.rx_bytes += length; + + /* + * Copy the ethhdr from first buffer to second. This + * is necessary for IP completions. + */ + if (*((u16 *) skb1->data) != 0xFFFF) + size = VLAN_ETH_HLEN; + else + size = ETH_HLEN; + + skb_put(skb2, length); /* Just the second buffer length here. */ + pci_unmap_single(qdev->pdev, + pci_unmap_addr(lrg_buf_cb2, mapaddr), + pci_unmap_len(lrg_buf_cb2, maplen), + PCI_DMA_FROMDEVICE); + prefetch(skb2->data); + + memcpy(skb_push(skb2, size), skb1->data + VLAN_ID_LEN, size); + skb2->dev = qdev->ndev; + skb2->ip_summed = CHECKSUM_NONE; + skb2->protocol = eth_type_trans(skb2, qdev->ndev); + + netif_receive_skb(skb2); + ndev->last_rx = jiffies; + lrg_buf_cb2->skb = NULL; + + ql_release_to_lrg_buf_free_list(qdev, lrg_buf_cb1); + ql_release_to_lrg_buf_free_list(qdev, lrg_buf_cb2); +} + +static int ql_tx_rx_clean(struct ql3_adapter *qdev, + int *tx_cleaned, int *rx_cleaned, int work_to_do) +{ + struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers; + struct net_rsp_iocb *net_rsp; + struct net_device *ndev = qdev->ndev; + unsigned long hw_flags; + + /* While there are entries in the completion queue. */ + while ((cpu_to_le32(*(qdev->prsp_producer_index)) != + qdev->rsp_consumer_index) && (*rx_cleaned < work_to_do)) { + + net_rsp = qdev->rsp_current; + switch (net_rsp->opcode) { + + case OPCODE_OB_MAC_IOCB_FN0: + case OPCODE_OB_MAC_IOCB_FN2: + ql_process_mac_tx_intr(qdev, (struct ob_mac_iocb_rsp *) + net_rsp); + (*tx_cleaned)++; + break; + + case OPCODE_IB_MAC_IOCB: + ql_process_mac_rx_intr(qdev, (struct ib_mac_iocb_rsp *) + net_rsp); + (*rx_cleaned)++; + break; + + case OPCODE_IB_IP_IOCB: + ql_process_macip_rx_intr(qdev, (struct ib_ip_iocb_rsp *) + net_rsp); + (*rx_cleaned)++; + break; + default: + { + u32 *tmp = (u32 *) net_rsp; + printk(KERN_ERR PFX + "%s: Hit default case, not " + "handled!\n" + " dropping the packet, opcode = " + "%x.\n", + ndev->name, net_rsp->opcode); + printk(KERN_ERR PFX + "0x%08lx 0x%08lx 0x%08lx 0x%08lx \n", + (unsigned long int)tmp[0], + (unsigned long int)tmp[1], + (unsigned long int)tmp[2], + (unsigned long int)tmp[3]); + } + } + + qdev->rsp_consumer_index++; + + if (qdev->rsp_consumer_index == NUM_RSP_Q_ENTRIES) { + qdev->rsp_consumer_index = 0; + qdev->rsp_current = qdev->rsp_q_virt_addr; + } else { + qdev->rsp_current++; + } + } + + spin_lock_irqsave(&qdev->hw_lock, hw_flags); + + ql_update_lrg_bufq_prod_index(qdev); + + if (qdev->small_buf_release_cnt >= 16) { + while (qdev->small_buf_release_cnt >= 16) { + qdev->small_buf_q_producer_index++; + + if (qdev->small_buf_q_producer_index == + NUM_SBUFQ_ENTRIES) + qdev->small_buf_q_producer_index = 0; + qdev->small_buf_release_cnt -= 8; + } + + ql_write_common_reg(qdev, + (u32 *) & port_regs->CommonRegs. + rxSmallQProducerIndex, + qdev->small_buf_q_producer_index); + } + + ql_write_common_reg(qdev, + (u32 *) & port_regs->CommonRegs.rspQConsumerIndex, + qdev->rsp_consumer_index); + spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); + + if (unlikely(netif_queue_stopped(qdev->ndev))) { + if (netif_queue_stopped(qdev->ndev) && + (atomic_read(&qdev->tx_count) > (NUM_REQ_Q_ENTRIES / 4))) + netif_wake_queue(qdev->ndev); + } + + return *tx_cleaned + *rx_cleaned; +} + +static int ql_poll(struct net_device *ndev, int *budget) +{ + struct ql3_adapter *qdev = netdev_priv(ndev); + int work_to_do = min(*budget, ndev->quota); + int rx_cleaned = 0, tx_cleaned = 0; + + if (!netif_carrier_ok(ndev)) + goto quit_polling; + + ql_tx_rx_clean(qdev, &tx_cleaned, &rx_cleaned, work_to_do); + *budget -= rx_cleaned; + ndev->quota -= rx_cleaned; + + if ((!tx_cleaned && !rx_cleaned) || !netif_running(ndev)) { +quit_polling: + netif_rx_complete(ndev); + ql_enable_interrupts(qdev); + return 0; + } + return 1; +} + +static irqreturn_t ql3xxx_isr(int irq, void *dev_id, struct pt_regs *regs) +{ + + struct net_device *ndev = dev_id; + struct ql3_adapter *qdev = netdev_priv(ndev); + struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers; + u32 value; + int handled = 1; + u32 var; + + port_regs = qdev->mem_map_registers; + + value = + ql_read_common_reg_l(qdev, &port_regs->CommonRegs.ispControlStatus); + + if (value & (ISP_CONTROL_FE | ISP_CONTROL_RI)) { + spin_lock(&qdev->adapter_lock); + netif_stop_queue(qdev->ndev); + netif_carrier_off(qdev->ndev); + ql_disable_interrupts(qdev); + qdev->port_link_state = LS_DOWN; + set_bit(QL_RESET_ACTIVE,&qdev->flags) ; + + if (value & ISP_CONTROL_FE) { + /* + * Chip Fatal Error. + */ + var = + ql_read_page0_reg_l(qdev, + &port_regs->PortFatalErrStatus); + printk(KERN_WARNING PFX + "%s: Resetting chip. PortFatalErrStatus " + "register = 0x%x\n", ndev->name, var); + set_bit(QL_RESET_START,&qdev->flags) ; + } else { + /* + * Soft Reset Requested. + */ + set_bit(QL_RESET_PER_SCSI,&qdev->flags) ; + printk(KERN_ERR PFX + "%s: Another function issued a reset to the " + "chip. ISR value = %x.\n", ndev->name, value); + } + queue_work(qdev->workqueue, &qdev->reset_work); + spin_unlock(&qdev->adapter_lock); + } else if (value & ISP_IMR_DISABLE_CMPL_INT) { + ql_disable_interrupts(qdev); + if (likely(netif_rx_schedule_prep(ndev))) + __netif_rx_schedule(ndev); + else + ql_enable_interrupts(qdev); + } else { + return IRQ_NONE; + } + + return IRQ_RETVAL(handled); +} + +static int ql3xxx_send(struct sk_buff *skb, struct net_device *ndev) +{ + struct ql3_adapter *qdev = (struct ql3_adapter *)netdev_priv(ndev); + struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers; + struct ql_tx_buf_cb *tx_cb; + struct ob_mac_iocb_req *mac_iocb_ptr; + u64 map; + + if (unlikely(atomic_read(&qdev->tx_count) < 2)) { + if (!netif_queue_stopped(ndev)) + netif_stop_queue(ndev); + return NETDEV_TX_BUSY; + } + tx_cb = &qdev->tx_buf[qdev->req_producer_index] ; + mac_iocb_ptr = tx_cb->queue_entry; + memset((void *)mac_iocb_ptr, 0, sizeof(struct ob_mac_iocb_req)); + mac_iocb_ptr->opcode = qdev->mac_ob_opcode; + mac_iocb_ptr->flags |= qdev->mb_bit_mask; + mac_iocb_ptr->transaction_id = qdev->req_producer_index; + mac_iocb_ptr->data_len = cpu_to_le16((u16) skb->len); + tx_cb->skb = skb; + map = pci_map_single(qdev->pdev, skb->data, skb->len, PCI_DMA_TODEVICE); + mac_iocb_ptr->buf_addr0_low = cpu_to_le32(LS_64BITS(map)); + mac_iocb_ptr->buf_addr0_high = cpu_to_le32(MS_64BITS(map)); + mac_iocb_ptr->buf_0_len = cpu_to_le32(skb->len | OB_MAC_IOCB_REQ_E); + pci_unmap_addr_set(tx_cb, mapaddr, map); + pci_unmap_len_set(tx_cb, maplen, skb->len); + atomic_dec(&qdev->tx_count); + + qdev->req_producer_index++; + if (qdev->req_producer_index == NUM_REQ_Q_ENTRIES) + qdev->req_producer_index = 0; + wmb(); + ql_write_common_reg_l(qdev, + (u32 *) & port_regs->CommonRegs.reqQProducerIndex, + qdev->req_producer_index); + + ndev->trans_start = jiffies; + if (netif_msg_tx_queued(qdev)) + printk(KERN_DEBUG PFX "%s: tx queued, slot %d, len %d\n", + ndev->name, qdev->req_producer_index, skb->len); + + return NETDEV_TX_OK; +} +static int ql_alloc_net_req_rsp_queues(struct ql3_adapter *qdev) +{ + qdev->req_q_size = + (u32) (NUM_REQ_Q_ENTRIES * sizeof(struct ob_mac_iocb_req)); + + qdev->req_q_virt_addr = + pci_alloc_consistent(qdev->pdev, + (size_t) qdev->req_q_size, + &qdev->req_q_phy_addr); + + if ((qdev->req_q_virt_addr == NULL) || + LS_64BITS(qdev->req_q_phy_addr) & (qdev->req_q_size - 1)) { + printk(KERN_ERR PFX "%s: reqQ failed.\n", + qdev->ndev->name); + return -ENOMEM; + } + + qdev->rsp_q_size = NUM_RSP_Q_ENTRIES * sizeof(struct net_rsp_iocb); + + qdev->rsp_q_virt_addr = + pci_alloc_consistent(qdev->pdev, + (size_t) qdev->rsp_q_size, + &qdev->rsp_q_phy_addr); + + if ((qdev->rsp_q_virt_addr == NULL) || + LS_64BITS(qdev->rsp_q_phy_addr) & (qdev->rsp_q_size - 1)) { + printk(KERN_ERR PFX + "%s: rspQ allocation failed\n", + qdev->ndev->name); + pci_free_consistent(qdev->pdev, (size_t) qdev->req_q_size, + qdev->req_q_virt_addr, + qdev->req_q_phy_addr); + return -ENOMEM; + } + + set_bit(QL_ALLOC_REQ_RSP_Q_DONE,&qdev->flags); + + return 0; +} + +static void ql_free_net_req_rsp_queues(struct ql3_adapter *qdev) +{ + if (!test_bit(QL_ALLOC_REQ_RSP_Q_DONE,&qdev->flags)) { + printk(KERN_INFO PFX + "%s: Already done.\n", qdev->ndev->name); + return; + } + + pci_free_consistent(qdev->pdev, + qdev->req_q_size, + qdev->req_q_virt_addr, qdev->req_q_phy_addr); + + qdev->req_q_virt_addr = NULL; + + pci_free_consistent(qdev->pdev, + qdev->rsp_q_size, + qdev->rsp_q_virt_addr, qdev->rsp_q_phy_addr); + + qdev->rsp_q_virt_addr = NULL; + + clear_bit(QL_ALLOC_REQ_RSP_Q_DONE,&qdev->flags); +} + +static int ql_alloc_buffer_queues(struct ql3_adapter *qdev) +{ + /* Create Large Buffer Queue */ + qdev->lrg_buf_q_size = + NUM_LBUFQ_ENTRIES * sizeof(struct lrg_buf_q_entry); + if (qdev->lrg_buf_q_size < PAGE_SIZE) + qdev->lrg_buf_q_alloc_size = PAGE_SIZE; + else + qdev->lrg_buf_q_alloc_size = qdev->lrg_buf_q_size * 2; + + qdev->lrg_buf_q_alloc_virt_addr = + pci_alloc_consistent(qdev->pdev, + qdev->lrg_buf_q_alloc_size, + &qdev->lrg_buf_q_alloc_phy_addr); + + if (qdev->lrg_buf_q_alloc_virt_addr == NULL) { + printk(KERN_ERR PFX + "%s: lBufQ failed\n", qdev->ndev->name); + return -ENOMEM; + } + qdev->lrg_buf_q_virt_addr = qdev->lrg_buf_q_alloc_virt_addr; + qdev->lrg_buf_q_phy_addr = qdev->lrg_buf_q_alloc_phy_addr; + + /* Create Small Buffer Queue */ + qdev->small_buf_q_size = + NUM_SBUFQ_ENTRIES * sizeof(struct lrg_buf_q_entry); + if (qdev->small_buf_q_size < PAGE_SIZE) + qdev->small_buf_q_alloc_size = PAGE_SIZE; + else + qdev->small_buf_q_alloc_size = qdev->small_buf_q_size * 2; + + qdev->small_buf_q_alloc_virt_addr = + pci_alloc_consistent(qdev->pdev, + qdev->small_buf_q_alloc_size, + &qdev->small_buf_q_alloc_phy_addr); + + if (qdev->small_buf_q_alloc_virt_addr == NULL) { + printk(KERN_ERR PFX + "%s: Small Buffer Queue allocation failed.\n", + qdev->ndev->name); + pci_free_consistent(qdev->pdev, qdev->lrg_buf_q_alloc_size, + qdev->lrg_buf_q_alloc_virt_addr, + qdev->lrg_buf_q_alloc_phy_addr); + return -ENOMEM; + } + + qdev->small_buf_q_virt_addr = qdev->small_buf_q_alloc_virt_addr; + qdev->small_buf_q_phy_addr = qdev->small_buf_q_alloc_phy_addr; + set_bit(QL_ALLOC_BUFQS_DONE,&qdev->flags); + return 0; +} + +static void ql_free_buffer_queues(struct ql3_adapter *qdev) +{ + if (!test_bit(QL_ALLOC_BUFQS_DONE,&qdev->flags)) { + printk(KERN_INFO PFX + "%s: Already done.\n", qdev->ndev->name); + return; + } + + pci_free_consistent(qdev->pdev, + qdev->lrg_buf_q_alloc_size, + qdev->lrg_buf_q_alloc_virt_addr, + qdev->lrg_buf_q_alloc_phy_addr); + + qdev->lrg_buf_q_virt_addr = NULL; + + pci_free_consistent(qdev->pdev, + qdev->small_buf_q_alloc_size, + qdev->small_buf_q_alloc_virt_addr, + qdev->small_buf_q_alloc_phy_addr); + + qdev->small_buf_q_virt_addr = NULL; + + clear_bit(QL_ALLOC_BUFQS_DONE,&qdev->flags); +} + +static int ql_alloc_small_buffers(struct ql3_adapter *qdev) +{ + int i; + struct bufq_addr_element *small_buf_q_entry; + + /* Currently we allocate on one of memory and use it for smallbuffers */ + qdev->small_buf_total_size = + (QL_ADDR_ELE_PER_BUFQ_ENTRY * NUM_SBUFQ_ENTRIES * + QL_SMALL_BUFFER_SIZE); + + qdev->small_buf_virt_addr = + pci_alloc_consistent(qdev->pdev, + qdev->small_buf_total_size, + &qdev->small_buf_phy_addr); + + if (qdev->small_buf_virt_addr == NULL) { + printk(KERN_ERR PFX + "%s: Failed to get small buffer memory.\n", + qdev->ndev->name); + return -ENOMEM; + } + + qdev->small_buf_phy_addr_low = LS_64BITS(qdev->small_buf_phy_addr); + qdev->small_buf_phy_addr_high = MS_64BITS(qdev->small_buf_phy_addr); + + small_buf_q_entry = qdev->small_buf_q_virt_addr; + + qdev->last_rsp_offset = qdev->small_buf_phy_addr_low; + + /* Initialize the small buffer queue. */ + for (i = 0; i < (QL_ADDR_ELE_PER_BUFQ_ENTRY * NUM_SBUFQ_ENTRIES); i++) { + small_buf_q_entry->addr_high = + cpu_to_le32(qdev->small_buf_phy_addr_high); + small_buf_q_entry->addr_low = + cpu_to_le32(qdev->small_buf_phy_addr_low + + (i * QL_SMALL_BUFFER_SIZE)); + small_buf_q_entry++; + } + qdev->small_buf_index = 0; + set_bit(QL_ALLOC_SMALL_BUF_DONE,&qdev->flags); + return 0; +} + +static void ql_free_small_buffers(struct ql3_adapter *qdev) +{ + if (!test_bit(QL_ALLOC_SMALL_BUF_DONE,&qdev->flags)) { + printk(KERN_INFO PFX + "%s: Already done.\n", qdev->ndev->name); + return; + } + if (qdev->small_buf_virt_addr != NULL) { + pci_free_consistent(qdev->pdev, + qdev->small_buf_total_size, + qdev->small_buf_virt_addr, + qdev->small_buf_phy_addr); + + qdev->small_buf_virt_addr = NULL; + } +} + +static void ql_free_large_buffers(struct ql3_adapter *qdev) +{ + int i = 0; + struct ql_rcv_buf_cb *lrg_buf_cb; + + for (i = 0; i < NUM_LARGE_BUFFERS; i++) { + lrg_buf_cb = &qdev->lrg_buf[i]; + if (lrg_buf_cb->skb) { + dev_kfree_skb(lrg_buf_cb->skb); + pci_unmap_single(qdev->pdev, + pci_unmap_addr(lrg_buf_cb, mapaddr), + pci_unmap_len(lrg_buf_cb, maplen), + PCI_DMA_FROMDEVICE); + memset(lrg_buf_cb, 0, sizeof(struct ql_rcv_buf_cb)); + } else { + break; + } + } +} + +static void ql_init_large_buffers(struct ql3_adapter *qdev) +{ + int i; + struct ql_rcv_buf_cb *lrg_buf_cb; + struct bufq_addr_element *buf_addr_ele = qdev->lrg_buf_q_virt_addr; + + for (i = 0; i < NUM_LARGE_BUFFERS; i++) { + lrg_buf_cb = &qdev->lrg_buf[i]; + buf_addr_ele->addr_high = lrg_buf_cb->buf_phy_addr_high; + buf_addr_ele->addr_low = lrg_buf_cb->buf_phy_addr_low; + buf_addr_ele++; + } + qdev->lrg_buf_index = 0; + qdev->lrg_buf_skb_check = 0; +} + +static int ql_alloc_large_buffers(struct ql3_adapter *qdev) +{ + int i; + struct ql_rcv_buf_cb *lrg_buf_cb; + struct sk_buff *skb; + u64 map; + + for (i = 0; i < NUM_LARGE_BUFFERS; i++) { + skb = dev_alloc_skb(qdev->lrg_buffer_len); + if (unlikely(!skb)) { + /* Better luck next round */ + printk(KERN_ERR PFX + "%s: large buff alloc failed, " + "for %d bytes at index %d.\n", + qdev->ndev->name, + qdev->lrg_buffer_len * 2, i); + ql_free_large_buffers(qdev); + return -ENOMEM; + } else { + + lrg_buf_cb = &qdev->lrg_buf[i]; + memset(lrg_buf_cb, 0, sizeof(struct ql_rcv_buf_cb)); + lrg_buf_cb->index = i; + lrg_buf_cb->skb = skb; + /* + * We save some space to copy the ethhdr from first + * buffer + */ + skb_reserve(skb, QL_HEADER_SPACE); + map = pci_map_single(qdev->pdev, + skb->data, + qdev->lrg_buffer_len - + QL_HEADER_SPACE, + PCI_DMA_FROMDEVICE); + pci_unmap_addr_set(lrg_buf_cb, mapaddr, map); + pci_unmap_len_set(lrg_buf_cb, maplen, + qdev->lrg_buffer_len - + QL_HEADER_SPACE); + lrg_buf_cb->buf_phy_addr_low = + cpu_to_le32(LS_64BITS(map)); + lrg_buf_cb->buf_phy_addr_high = + cpu_to_le32(MS_64BITS(map)); + } + } + return 0; +} + +static void ql_create_send_free_list(struct ql3_adapter *qdev) +{ + struct ql_tx_buf_cb *tx_cb; + int i; + struct ob_mac_iocb_req *req_q_curr = + qdev->req_q_virt_addr; + + /* Create free list of transmit buffers */ + for (i = 0; i < NUM_REQ_Q_ENTRIES; i++) { + tx_cb = &qdev->tx_buf[i]; + tx_cb->skb = NULL; + tx_cb->queue_entry = req_q_curr; + req_q_curr++; + } +} + +static int ql_alloc_mem_resources(struct ql3_adapter *qdev) +{ + if (qdev->ndev->mtu == NORMAL_MTU_SIZE) + qdev->lrg_buffer_len = NORMAL_MTU_SIZE; + else if (qdev->ndev->mtu == JUMBO_MTU_SIZE) { + qdev->lrg_buffer_len = JUMBO_MTU_SIZE; + } else { + printk(KERN_ERR PFX + "%s: Invalid mtu size. Only 1500 and 9000 are accepted.\n", + qdev->ndev->name); + return -ENOMEM; + } + qdev->lrg_buffer_len += VLAN_ETH_HLEN + VLAN_ID_LEN + QL_HEADER_SPACE; + qdev->max_frame_size = + (qdev->lrg_buffer_len - QL_HEADER_SPACE) + ETHERNET_CRC_SIZE; + + /* + * First allocate a page of shared memory and use it for shadow + * locations of Network Request Queue Consumer Address Register and + * Network Completion Queue Producer Index Register + */ + qdev->shadow_reg_virt_addr = + pci_alloc_consistent(qdev->pdev, + PAGE_SIZE, &qdev->shadow_reg_phy_addr); + + if (qdev->shadow_reg_virt_addr != NULL) { + qdev->preq_consumer_index = (u16 *) qdev->shadow_reg_virt_addr; + qdev->req_consumer_index_phy_addr_high = + MS_64BITS(qdev->shadow_reg_phy_addr); + qdev->req_consumer_index_phy_addr_low = + LS_64BITS(qdev->shadow_reg_phy_addr); + + qdev->prsp_producer_index = + (u32 *) (((u8 *) qdev->preq_consumer_index) + 8); + qdev->rsp_producer_index_phy_addr_high = + qdev->req_consumer_index_phy_addr_high; + qdev->rsp_producer_index_phy_addr_low = + qdev->req_consumer_index_phy_addr_low + 8; + } else { + printk(KERN_ERR PFX + "%s: shadowReg Alloc failed.\n", qdev->ndev->name); + return -ENOMEM; + } + + if (ql_alloc_net_req_rsp_queues(qdev) != 0) { + printk(KERN_ERR PFX + "%s: ql_alloc_net_req_rsp_queues failed.\n", + qdev->ndev->name); + goto err_req_rsp; + } + + if (ql_alloc_buffer_queues(qdev) != 0) { + printk(KERN_ERR PFX + "%s: ql_alloc_buffer_queues failed.\n", + qdev->ndev->name); + goto err_buffer_queues; + } + + if (ql_alloc_small_buffers(qdev) != 0) { + printk(KERN_ERR PFX + "%s: ql_alloc_small_buffers failed\n", qdev->ndev->name); + goto err_small_buffers; + } + + if (ql_alloc_large_buffers(qdev) != 0) { + printk(KERN_ERR PFX + "%s: ql_alloc_large_buffers failed\n", qdev->ndev->name); + goto err_small_buffers; + } + + /* Initialize the large buffer queue. */ + ql_init_large_buffers(qdev); + ql_create_send_free_list(qdev); + + qdev->rsp_current = qdev->rsp_q_virt_addr; + + return 0; + +err_small_buffers: + ql_free_buffer_queues(qdev); +err_buffer_queues: + ql_free_net_req_rsp_queues(qdev); +err_req_rsp: + pci_free_consistent(qdev->pdev, + PAGE_SIZE, + qdev->shadow_reg_virt_addr, + qdev->shadow_reg_phy_addr); + + return -ENOMEM; +} + +static void ql_free_mem_resources(struct ql3_adapter *qdev) +{ + ql_free_large_buffers(qdev); + ql_free_small_buffers(qdev); + ql_free_buffer_queues(qdev); + ql_free_net_req_rsp_queues(qdev); + if (qdev->shadow_reg_virt_addr != NULL) { + pci_free_consistent(qdev->pdev, + PAGE_SIZE, + qdev->shadow_reg_virt_addr, + qdev->shadow_reg_phy_addr); + qdev->shadow_reg_virt_addr = NULL; + } +} + +static int ql_init_misc_registers(struct ql3_adapter *qdev) +{ + struct ql3xxx_local_ram_registers *local_ram = + (struct ql3xxx_local_ram_registers *)qdev->mem_map_registers; + + if(ql_sem_spinlock(qdev, QL_DDR_RAM_SEM_MASK, + (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) * + 2) << 4)) + return -1; + + ql_write_page2_reg(qdev, + &local_ram->bufletSize, qdev->nvram_data.bufletSize); + + ql_write_page2_reg(qdev, + &local_ram->maxBufletCount, + qdev->nvram_data.bufletCount); + + ql_write_page2_reg(qdev, + &local_ram->freeBufletThresholdLow, + (qdev->nvram_data.tcpWindowThreshold25 << 16) | + (qdev->nvram_data.tcpWindowThreshold0)); + + ql_write_page2_reg(qdev, + &local_ram->freeBufletThresholdHigh, + qdev->nvram_data.tcpWindowThreshold50); + + ql_write_page2_reg(qdev, + &local_ram->ipHashTableBase, + (qdev->nvram_data.ipHashTableBaseHi << 16) | + qdev->nvram_data.ipHashTableBaseLo); + ql_write_page2_reg(qdev, + &local_ram->ipHashTableCount, + qdev->nvram_data.ipHashTableSize); + ql_write_page2_reg(qdev, + &local_ram->tcpHashTableBase, + (qdev->nvram_data.tcpHashTableBaseHi << 16) | + qdev->nvram_data.tcpHashTableBaseLo); + ql_write_page2_reg(qdev, + &local_ram->tcpHashTableCount, + qdev->nvram_data.tcpHashTableSize); + ql_write_page2_reg(qdev, + &local_ram->ncbBase, + (qdev->nvram_data.ncbTableBaseHi << 16) | + qdev->nvram_data.ncbTableBaseLo); + ql_write_page2_reg(qdev, + &local_ram->maxNcbCount, + qdev->nvram_data.ncbTableSize); + ql_write_page2_reg(qdev, + &local_ram->drbBase, + (qdev->nvram_data.drbTableBaseHi << 16) | + qdev->nvram_data.drbTableBaseLo); + ql_write_page2_reg(qdev, + &local_ram->maxDrbCount, + qdev->nvram_data.drbTableSize); + ql_sem_unlock(qdev, QL_DDR_RAM_SEM_MASK); + return 0; +} + +static int ql_adapter_initialize(struct ql3_adapter *qdev) +{ + u32 value; + struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers; + struct ql3xxx_host_memory_registers __iomem *hmem_regs = + (struct ql3xxx_host_memory_registers *)port_regs; + u32 delay = 10; + int status = 0; + + if(ql_mii_setup(qdev)) + return -1; + + /* Bring out PHY out of reset */ + ql_write_common_reg(qdev, &port_regs->CommonRegs.serialPortInterfaceReg, + (ISP_SERIAL_PORT_IF_WE | + (ISP_SERIAL_PORT_IF_WE << 16))); + + qdev->port_link_state = LS_DOWN; + netif_carrier_off(qdev->ndev); + + /* V2 chip fix for ARS-39168. */ + ql_write_common_reg(qdev, &port_regs->CommonRegs.serialPortInterfaceReg, + (ISP_SERIAL_PORT_IF_SDE | + (ISP_SERIAL_PORT_IF_SDE << 16))); + + /* Request Queue Registers */ + *((u32 *) (qdev->preq_consumer_index)) = 0; + atomic_set(&qdev->tx_count,NUM_REQ_Q_ENTRIES); + qdev->req_producer_index = 0; + + ql_write_page1_reg(qdev, + &hmem_regs->reqConsumerIndexAddrHigh, + qdev->req_consumer_index_phy_addr_high); + ql_write_page1_reg(qdev, + &hmem_regs->reqConsumerIndexAddrLow, + qdev->req_consumer_index_phy_addr_low); + + ql_write_page1_reg(qdev, + &hmem_regs->reqBaseAddrHigh, + MS_64BITS(qdev->req_q_phy_addr)); + ql_write_page1_reg(qdev, + &hmem_regs->reqBaseAddrLow, + LS_64BITS(qdev->req_q_phy_addr)); + ql_write_page1_reg(qdev, &hmem_regs->reqLength, NUM_REQ_Q_ENTRIES); + + /* Response Queue Registers */ + *((u16 *) (qdev->prsp_producer_index)) = 0; + qdev->rsp_consumer_index = 0; + qdev->rsp_current = qdev->rsp_q_virt_addr; + + ql_write_page1_reg(qdev, + &hmem_regs->rspProducerIndexAddrHigh, + qdev->rsp_producer_index_phy_addr_high); + + ql_write_page1_reg(qdev, + &hmem_regs->rspProducerIndexAddrLow, + qdev->rsp_producer_index_phy_addr_low); + + ql_write_page1_reg(qdev, + &hmem_regs->rspBaseAddrHigh, + MS_64BITS(qdev->rsp_q_phy_addr)); + + ql_write_page1_reg(qdev, + &hmem_regs->rspBaseAddrLow, + LS_64BITS(qdev->rsp_q_phy_addr)); + + ql_write_page1_reg(qdev, &hmem_regs->rspLength, NUM_RSP_Q_ENTRIES); + + /* Large Buffer Queue */ + ql_write_page1_reg(qdev, + &hmem_regs->rxLargeQBaseAddrHigh, + MS_64BITS(qdev->lrg_buf_q_phy_addr)); + + ql_write_page1_reg(qdev, + &hmem_regs->rxLargeQBaseAddrLow, + LS_64BITS(qdev->lrg_buf_q_phy_addr)); + + ql_write_page1_reg(qdev, &hmem_regs->rxLargeQLength, NUM_LBUFQ_ENTRIES); + + ql_write_page1_reg(qdev, + &hmem_regs->rxLargeBufferLength, + qdev->lrg_buffer_len); + + /* Small Buffer Queue */ + ql_write_page1_reg(qdev, + &hmem_regs->rxSmallQBaseAddrHigh, + MS_64BITS(qdev->small_buf_q_phy_addr)); + + ql_write_page1_reg(qdev, + &hmem_regs->rxSmallQBaseAddrLow, + LS_64BITS(qdev->small_buf_q_phy_addr)); + + ql_write_page1_reg(qdev, &hmem_regs->rxSmallQLength, NUM_SBUFQ_ENTRIES); + ql_write_page1_reg(qdev, + &hmem_regs->rxSmallBufferLength, + QL_SMALL_BUFFER_SIZE); + + qdev->small_buf_q_producer_index = NUM_SBUFQ_ENTRIES - 1; + qdev->small_buf_release_cnt = 8; + qdev->lrg_buf_q_producer_index = NUM_LBUFQ_ENTRIES - 1; + qdev->lrg_buf_release_cnt = 8; + qdev->lrg_buf_next_free = + (struct bufq_addr_element *)qdev->lrg_buf_q_virt_addr; + qdev->small_buf_index = 0; + qdev->lrg_buf_index = 0; + qdev->lrg_buf_free_count = 0; + qdev->lrg_buf_free_head = NULL; + qdev->lrg_buf_free_tail = NULL; + + ql_write_common_reg(qdev, + (u32 *) & port_regs->CommonRegs. + rxSmallQProducerIndex, + qdev->small_buf_q_producer_index); + ql_write_common_reg(qdev, + (u32 *) & port_regs->CommonRegs. + rxLargeQProducerIndex, + qdev->lrg_buf_q_producer_index); + + /* + * Find out if the chip has already been initialized. If it has, then + * we skip some of the initialization. + */ + clear_bit(QL_LINK_MASTER, &qdev->flags); + value = ql_read_page0_reg(qdev, &port_regs->portStatus); + if ((value & PORT_STATUS_IC) == 0) { + + /* Chip has not been configured yet, so let it rip. */ + if(ql_init_misc_registers(qdev)) { + status = -1; + goto out; + } + + if (qdev->mac_index) + ql_write_page0_reg(qdev, + &port_regs->mac1MaxFrameLengthReg, + qdev->max_frame_size); + else + ql_write_page0_reg(qdev, + &port_regs->mac0MaxFrameLengthReg, + qdev->max_frame_size); + + value = qdev->nvram_data.tcpMaxWindowSize; + ql_write_page0_reg(qdev, &port_regs->tcpMaxWindow, value); + + value = (0xFFFF << 16) | qdev->nvram_data.extHwConfig; + + if(ql_sem_spinlock(qdev, QL_FLASH_SEM_MASK, + (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) + * 2) << 13)) { + status = -1; + goto out; + } + ql_write_page0_reg(qdev, &port_regs->ExternalHWConfig, value); + ql_write_page0_reg(qdev, &port_regs->InternalChipConfig, + (((INTERNAL_CHIP_SD | INTERNAL_CHIP_WE) << + 16) | (INTERNAL_CHIP_SD | + INTERNAL_CHIP_WE))); + ql_sem_unlock(qdev, QL_FLASH_SEM_MASK); + } + + + if(ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK, + (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) * + 2) << 7)) { + status = -1; + goto out; + } + + ql_init_scan_mode(qdev); + ql_get_phy_owner(qdev); + + /* Load the MAC Configuration */ + + /* Program lower 32 bits of the MAC address */ + ql_write_page0_reg(qdev, &port_regs->macAddrIndirectPtrReg, + (MAC_ADDR_INDIRECT_PTR_REG_RP_MASK << 16)); + ql_write_page0_reg(qdev, &port_regs->macAddrDataReg, + ((qdev->ndev->dev_addr[2] << 24) + | (qdev->ndev->dev_addr[3] << 16) + | (qdev->ndev->dev_addr[4] << 8) + | qdev->ndev->dev_addr[5])); + + /* Program top 16 bits of the MAC address */ + ql_write_page0_reg(qdev, &port_regs->macAddrIndirectPtrReg, + ((MAC_ADDR_INDIRECT_PTR_REG_RP_MASK << 16) | 1)); + ql_write_page0_reg(qdev, &port_regs->macAddrDataReg, + ((qdev->ndev->dev_addr[0] << 8) + | qdev->ndev->dev_addr[1])); + + /* Enable Primary MAC */ + ql_write_page0_reg(qdev, &port_regs->macAddrIndirectPtrReg, + ((MAC_ADDR_INDIRECT_PTR_REG_PE << 16) | + MAC_ADDR_INDIRECT_PTR_REG_PE)); + + /* Clear Primary and Secondary IP addresses */ + ql_write_page0_reg(qdev, &port_regs->ipAddrIndexReg, + ((IP_ADDR_INDEX_REG_MASK << 16) | + (qdev->mac_index << 2))); + ql_write_page0_reg(qdev, &port_regs->ipAddrDataReg, 0); + + ql_write_page0_reg(qdev, &port_regs->ipAddrIndexReg, + ((IP_ADDR_INDEX_REG_MASK << 16) | + ((qdev->mac_index << 2) + 1))); + ql_write_page0_reg(qdev, &port_regs->ipAddrDataReg, 0); + + ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK); + + /* Indicate Configuration Complete */ + ql_write_page0_reg(qdev, + &port_regs->portControl, + ((PORT_CONTROL_CC << 16) | PORT_CONTROL_CC)); + + do { + value = ql_read_page0_reg(qdev, &port_regs->portStatus); + if (value & PORT_STATUS_IC) + break; + msleep(500); + } while (--delay); + + if (delay == 0) { + printk(KERN_ERR PFX + "%s: Hw Initialization timeout.\n", qdev->ndev->name); + status = -1; + goto out; + } + + /* Enable Ethernet Function */ + value = + (PORT_CONTROL_EF | PORT_CONTROL_ET | PORT_CONTROL_EI | + PORT_CONTROL_HH); + ql_write_page0_reg(qdev, &port_regs->portControl, + ((value << 16) | value)); + +out: + return status; +} + +/* + * Caller holds hw_lock. + */ +static int ql_adapter_reset(struct ql3_adapter *qdev) +{ + struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers; + int status = 0; + u16 value; + int max_wait_time; + + set_bit(QL_RESET_ACTIVE, &qdev->flags); + clear_bit(QL_RESET_DONE, &qdev->flags); + + /* + * Issue soft reset to chip. + */ + printk(KERN_DEBUG PFX + "%s: Issue soft reset to chip.\n", + qdev->ndev->name); + ql_write_common_reg(qdev, + (u32 *) & port_regs->CommonRegs.ispControlStatus, + ((ISP_CONTROL_SR << 16) | ISP_CONTROL_SR)); + + /* Wait 3 seconds for reset to complete. */ + printk(KERN_DEBUG PFX + "%s: Wait 10 milliseconds for reset to complete.\n", + qdev->ndev->name); + + /* Wait until the firmware tells us the Soft Reset is done */ + max_wait_time = 5; + do { + value = + ql_read_common_reg(qdev, + &port_regs->CommonRegs.ispControlStatus); + if ((value & ISP_CONTROL_SR) == 0) + break; + + ssleep(1); + } while ((--max_wait_time)); + + /* + * Also, make sure that the Network Reset Interrupt bit has been + * cleared after the soft reset has taken place. + */ + value = + ql_read_common_reg(qdev, &port_regs->CommonRegs.ispControlStatus); + if (value & ISP_CONTROL_RI) { + printk(KERN_DEBUG PFX + "ql_adapter_reset: clearing RI after reset.\n"); + ql_write_common_reg(qdev, + (u32 *) & port_regs->CommonRegs. + ispControlStatus, + ((ISP_CONTROL_RI << 16) | ISP_CONTROL_RI)); + } + + if (max_wait_time == 0) { + /* Issue Force Soft Reset */ + ql_write_common_reg(qdev, + (u32 *) & port_regs->CommonRegs. + ispControlStatus, + ((ISP_CONTROL_FSR << 16) | + ISP_CONTROL_FSR)); + /* + * Wait until the firmware tells us the Force Soft Reset is + * done + */ + max_wait_time = 5; + do { + value = + ql_read_common_reg(qdev, + &port_regs->CommonRegs. + ispControlStatus); + if ((value & ISP_CONTROL_FSR) == 0) { + break; + } + ssleep(1); + } while ((--max_wait_time)); + } + if (max_wait_time == 0) + status = 1; + + clear_bit(QL_RESET_ACTIVE, &qdev->flags); + set_bit(QL_RESET_DONE, &qdev->flags); + return status; +} + +static void ql_set_mac_info(struct ql3_adapter *qdev) +{ + struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers; + u32 value, port_status; + u8 func_number; + + /* Get the function number */ + value = + ql_read_common_reg_l(qdev, &port_regs->CommonRegs.ispControlStatus); + func_number = (u8) ((value >> 4) & OPCODE_FUNC_ID_MASK); + port_status = ql_read_page0_reg(qdev, &port_regs->portStatus); + switch (value & ISP_CONTROL_FN_MASK) { + case ISP_CONTROL_FN0_NET: + qdev->mac_index = 0; + qdev->mac_ob_opcode = OUTBOUND_MAC_IOCB | func_number; + qdev->tcp_ob_opcode = OUTBOUND_TCP_IOCB | func_number; + qdev->update_ob_opcode = UPDATE_NCB_IOCB | func_number; + qdev->mb_bit_mask = FN0_MA_BITS_MASK; + qdev->PHYAddr = PORT0_PHY_ADDRESS; + if (port_status & PORT_STATUS_SM0) + set_bit(QL_LINK_OPTICAL,&qdev->flags); + else + clear_bit(QL_LINK_OPTICAL,&qdev->flags); + break; + + case ISP_CONTROL_FN1_NET: + qdev->mac_index = 1; + qdev->mac_ob_opcode = OUTBOUND_MAC_IOCB | func_number; + qdev->tcp_ob_opcode = OUTBOUND_TCP_IOCB | func_number; + qdev->update_ob_opcode = UPDATE_NCB_IOCB | func_number; + qdev->mb_bit_mask = FN1_MA_BITS_MASK; + qdev->PHYAddr = PORT1_PHY_ADDRESS; + if (port_status & PORT_STATUS_SM1) + set_bit(QL_LINK_OPTICAL,&qdev->flags); + else + clear_bit(QL_LINK_OPTICAL,&qdev->flags); + break; + + case ISP_CONTROL_FN0_SCSI: + case ISP_CONTROL_FN1_SCSI: + default: + printk(KERN_DEBUG PFX + "%s: Invalid function number, ispControlStatus = 0x%x\n", + qdev->ndev->name,value); + break; + } + qdev->numPorts = qdev->nvram_data.numPorts; +} + +static void ql_display_dev_info(struct net_device *ndev) +{ + struct ql3_adapter *qdev = (struct ql3_adapter *)netdev_priv(ndev); + struct pci_dev *pdev = qdev->pdev; + + printk(KERN_INFO PFX + "\n%s Adapter %d RevisionID %d found on PCI slot %d.\n", + DRV_NAME, qdev->index, qdev->chip_rev_id, qdev->pci_slot); + printk(KERN_INFO PFX + "%s Interface.\n", + test_bit(QL_LINK_OPTICAL,&qdev->flags) ? "OPTICAL" : "COPPER"); + + /* + * Print PCI bus width/type. + */ + printk(KERN_INFO PFX + "Bus interface is %s %s.\n", + ((qdev->pci_width == 64) ? "64-bit" : "32-bit"), + ((qdev->pci_x) ? "PCI-X" : "PCI")); + + printk(KERN_INFO PFX + "mem IO base address adjusted = 0x%p\n", + qdev->mem_map_registers); + printk(KERN_INFO PFX "Interrupt number = %d\n", pdev->irq); + + if (netif_msg_probe(qdev)) + printk(KERN_INFO PFX + "%s: MAC address %02x:%02x:%02x:%02x:%02x:%02x\n", + ndev->name, ndev->dev_addr[0], ndev->dev_addr[1], + ndev->dev_addr[2], ndev->dev_addr[3], ndev->dev_addr[4], + ndev->dev_addr[5]); +} + +static int ql_adapter_down(struct ql3_adapter *qdev, int do_reset) +{ + struct net_device *ndev = qdev->ndev; + int retval = 0; + + netif_stop_queue(ndev); + netif_carrier_off(ndev); + + clear_bit(QL_ADAPTER_UP,&qdev->flags); + clear_bit(QL_LINK_MASTER,&qdev->flags); + + ql_disable_interrupts(qdev); + + free_irq(qdev->pdev->irq, ndev); + + if (qdev->msi && test_bit(QL_MSI_ENABLED,&qdev->flags)) { + printk(KERN_INFO PFX + "%s: calling pci_disable_msi().\n", qdev->ndev->name); + clear_bit(QL_MSI_ENABLED,&qdev->flags); + pci_disable_msi(qdev->pdev); + } + + del_timer_sync(&qdev->adapter_timer); + + netif_poll_disable(ndev); + + if (do_reset) { + int soft_reset; + unsigned long hw_flags; + + spin_lock_irqsave(&qdev->hw_lock, hw_flags); + if (ql_wait_for_drvr_lock(qdev)) { + if ((soft_reset = ql_adapter_reset(qdev))) { + printk(KERN_ERR PFX + "%s: ql_adapter_reset(%d) FAILED!\n", + ndev->name, qdev->index); + } + printk(KERN_ERR PFX + "%s: Releaseing driver lock via chip reset.\n",ndev->name); + } else { + printk(KERN_ERR PFX + "%s: Could not acquire driver lock to do " + "reset!\n", ndev->name); + retval = -1; + } + spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); + } + ql_free_mem_resources(qdev); + return retval; +} + +static int ql_adapter_up(struct ql3_adapter *qdev) +{ + struct net_device *ndev = qdev->ndev; + int err; + unsigned long irq_flags = SA_SAMPLE_RANDOM | SA_SHIRQ; + unsigned long hw_flags; + + if (ql_alloc_mem_resources(qdev)) { + printk(KERN_ERR PFX + "%s Unable to allocate buffers.\n", ndev->name); + return -ENOMEM; + } + + if (qdev->msi) { + if (pci_enable_msi(qdev->pdev)) { + printk(KERN_ERR PFX + "%s: User requested MSI, but MSI failed to " + "initialize. Continuing without MSI.\n", + qdev->ndev->name); + qdev->msi = 0; + } else { + printk(KERN_INFO PFX "%s: MSI Enabled...\n", qdev->ndev->name); + set_bit(QL_MSI_ENABLED,&qdev->flags); + irq_flags &= ~SA_SHIRQ; + } + } + + if ((err = request_irq(qdev->pdev->irq, + ql3xxx_isr, + irq_flags, ndev->name, ndev))) { + printk(KERN_ERR PFX + "%s: Failed to reserve interrupt %d already in use.\n", + ndev->name, qdev->pdev->irq); + goto err_irq; + } + + spin_lock_irqsave(&qdev->hw_lock, hw_flags); + + if ((err = ql_wait_for_drvr_lock(qdev))) { + if ((err = ql_adapter_initialize(qdev))) { + printk(KERN_ERR PFX + "%s: Unable to initialize adapter.\n", + ndev->name); + goto err_init; + } + printk(KERN_ERR PFX + "%s: Releaseing driver lock.\n",ndev->name); + ql_sem_unlock(qdev, QL_DRVR_SEM_MASK); + } else { + printk(KERN_ERR PFX + "%s: Could not aquire driver lock.\n", + ndev->name); + goto err_lock; + } + + spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); + + set_bit(QL_ADAPTER_UP,&qdev->flags); + + mod_timer(&qdev->adapter_timer, jiffies + HZ * 1); + + netif_poll_enable(ndev); + ql_enable_interrupts(qdev); + return 0; + +err_init: + ql_sem_unlock(qdev, QL_DRVR_SEM_MASK); +err_lock: + free_irq(qdev->pdev->irq, ndev); +err_irq: + if (qdev->msi && test_bit(QL_MSI_ENABLED,&qdev->flags)) { + printk(KERN_INFO PFX + "%s: calling pci_disable_msi().\n", + qdev->ndev->name); + clear_bit(QL_MSI_ENABLED,&qdev->flags); + pci_disable_msi(qdev->pdev); + } + return err; +} + +static int ql_cycle_adapter(struct ql3_adapter *qdev, int reset) +{ + if( ql_adapter_down(qdev,reset) || ql_adapter_up(qdev)) { + printk(KERN_ERR PFX + "%s: Driver up/down cycle failed, " + "closing device\n",qdev->ndev->name); + dev_close(qdev->ndev); + return -1; + } + return 0; +} + +static int ql3xxx_close(struct net_device *ndev) +{ + struct ql3_adapter *qdev = netdev_priv(ndev); + + /* + * Wait for device to recover from a reset. + * (Rarely happens, but possible.) + */ + while (!test_bit(QL_ADAPTER_UP,&qdev->flags)) + msleep(50); + + ql_adapter_down(qdev,QL_DO_RESET); + return 0; +} + +static int ql3xxx_open(struct net_device *ndev) +{ + struct ql3_adapter *qdev = netdev_priv(ndev); + return (ql_adapter_up(qdev)); +} + +static struct net_device_stats *ql3xxx_get_stats(struct net_device *dev) +{ + struct ql3_adapter *qdev = (struct ql3_adapter *)dev->priv; + return &qdev->stats; +} + +static int ql3xxx_change_mtu(struct net_device *ndev, int new_mtu) +{ + struct ql3_adapter *qdev = netdev_priv(ndev); + printk(KERN_ERR PFX "%s: new mtu size = %d.\n", ndev->name, new_mtu); + if (new_mtu != NORMAL_MTU_SIZE && new_mtu != JUMBO_MTU_SIZE) { + printk(KERN_ERR PFX + "%s: mtu size of %d is not valid. Use exactly %d or " + "%d.\n", ndev->name, new_mtu, NORMAL_MTU_SIZE, + JUMBO_MTU_SIZE); + return -EINVAL; + } + + if (!netif_running(ndev)) { + ndev->mtu = new_mtu; + return 0; + } + + ndev->mtu = new_mtu; + return ql_cycle_adapter(qdev,QL_DO_RESET); +} + +static void ql3xxx_set_multicast_list(struct net_device *ndev) +{ + /* + * We are manually parsing the list in the net_device structure. + */ + return; +} + +static int ql3xxx_set_mac_address(struct net_device *ndev, void *p) +{ + struct ql3_adapter *qdev = (struct ql3_adapter *)netdev_priv(ndev); + struct ql3xxx_port_registers __iomem *port_regs = + qdev->mem_map_registers; + struct sockaddr *addr = p; + unsigned long hw_flags; + + if (netif_running(ndev)) + return -EBUSY; + + if (!is_valid_ether_addr(addr->sa_data)) + return -EADDRNOTAVAIL; + + memcpy(ndev->dev_addr, addr->sa_data, ndev->addr_len); + + spin_lock_irqsave(&qdev->hw_lock, hw_flags); + /* Program lower 32 bits of the MAC address */ + ql_write_page0_reg(qdev, &port_regs->macAddrIndirectPtrReg, + (MAC_ADDR_INDIRECT_PTR_REG_RP_MASK << 16)); + ql_write_page0_reg(qdev, &port_regs->macAddrDataReg, + ((ndev->dev_addr[2] << 24) | (ndev-> + dev_addr[3] << 16) | + (ndev->dev_addr[4] << 8) | ndev->dev_addr[5])); + + /* Program top 16 bits of the MAC address */ + ql_write_page0_reg(qdev, &port_regs->macAddrIndirectPtrReg, + ((MAC_ADDR_INDIRECT_PTR_REG_RP_MASK << 16) | 1)); + ql_write_page0_reg(qdev, &port_regs->macAddrDataReg, + ((ndev->dev_addr[0] << 8) | ndev->dev_addr[1])); + spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); + + return 0; +} + +static void ql3xxx_tx_timeout(struct net_device *ndev) +{ + struct ql3_adapter *qdev = (struct ql3_adapter *)netdev_priv(ndev); + + printk(KERN_ERR PFX "%s: Resetting...\n", ndev->name); + /* + * Stop the queues, we've got a problem. + */ + netif_stop_queue(ndev); + + /* + * Wake up the worker to process this event. + */ + queue_work(qdev->workqueue, &qdev->tx_timeout_work); +} + +static void ql_reset_work(struct ql3_adapter *qdev) +{ + struct net_device *ndev = qdev->ndev; + u32 value; + struct ql_tx_buf_cb *tx_cb; + int max_wait_time, i; + struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers; + unsigned long hw_flags; + + if (test_bit((QL_RESET_PER_SCSI | QL_RESET_START),&qdev->flags)) { + clear_bit(QL_LINK_MASTER,&qdev->flags); + + /* + * Loop through the active list and return the skb. + */ + for (i = 0; i < NUM_REQ_Q_ENTRIES; i++) { + tx_cb = &qdev->tx_buf[i]; + if (tx_cb->skb) { + + printk(KERN_DEBUG PFX + "%s: Freeing lost SKB.\n", + qdev->ndev->name); + pci_unmap_single(qdev->pdev, + pci_unmap_addr(tx_cb, mapaddr), + pci_unmap_len(tx_cb, maplen), PCI_DMA_TODEVICE); + dev_kfree_skb(tx_cb->skb); + tx_cb->skb = NULL; + } + } + + printk(KERN_ERR PFX + "%s: Clearing NRI after reset.\n", qdev->ndev->name); + spin_lock_irqsave(&qdev->hw_lock, hw_flags); + ql_write_common_reg(qdev, + &port_regs->CommonRegs. + ispControlStatus, + ((ISP_CONTROL_RI << 16) | ISP_CONTROL_RI)); + /* + * Wait the for Soft Reset to Complete. + */ + max_wait_time = 10; + do { + value = ql_read_common_reg(qdev, + &port_regs->CommonRegs. + + ispControlStatus); + if ((value & ISP_CONTROL_SR) == 0) { + printk(KERN_DEBUG PFX + "%s: reset completed.\n", + qdev->ndev->name); + break; + } + + if (value & ISP_CONTROL_RI) { + printk(KERN_DEBUG PFX + "%s: clearing NRI after reset.\n", + qdev->ndev->name); + ql_write_common_reg(qdev, + (u32 *) & + port_regs-> + CommonRegs. + ispControlStatus, + ((ISP_CONTROL_RI << + 16) | ISP_CONTROL_RI)); + } + + ssleep(1); + } while (--max_wait_time); + spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); + + if (value & ISP_CONTROL_SR) { + + /* + * Set the reset flags and clear the board again. + * Nothing else to do... + */ + printk(KERN_ERR PFX + "%s: Timed out waiting for reset to " + "complete.\n", ndev->name); + printk(KERN_ERR PFX + "%s: Do a reset.\n", ndev->name); + clear_bit(QL_RESET_PER_SCSI,&qdev->flags); + clear_bit(QL_RESET_START,&qdev->flags); + ql_cycle_adapter(qdev,QL_DO_RESET); + return; + } + + clear_bit(QL_RESET_ACTIVE,&qdev->flags); + clear_bit(QL_RESET_PER_SCSI,&qdev->flags); + clear_bit(QL_RESET_START,&qdev->flags); + ql_cycle_adapter(qdev,QL_NO_RESET); + } +} + +static void ql_tx_timeout_work(struct ql3_adapter *qdev) +{ + ql_cycle_adapter(qdev,QL_DO_RESET); +} + +static void ql_get_board_info(struct ql3_adapter *qdev) +{ + struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers; + u32 value; + + value = ql_read_page0_reg_l(qdev, &port_regs->portStatus); + + qdev->chip_rev_id = ((value & PORT_STATUS_REV_ID_MASK) >> 12); + if (value & PORT_STATUS_64) + qdev->pci_width = 64; + else + qdev->pci_width = 32; + if (value & PORT_STATUS_X) + qdev->pci_x = 1; + else + qdev->pci_x = 0; + qdev->pci_slot = (u8) PCI_SLOT(qdev->pdev->devfn); +} + +static void ql3xxx_timer(unsigned long ptr) +{ + struct ql3_adapter *qdev = (struct ql3_adapter *)ptr; + + if (test_bit(QL_RESET_ACTIVE,&qdev->flags)) { + printk(KERN_DEBUG PFX + "%s: Reset in progress.\n", + qdev->ndev->name); + goto end; + } + + ql_link_state_machine(qdev); + + /* Restart timer on 2 second interval. */ +end: + mod_timer(&qdev->adapter_timer, jiffies + HZ * 1); +} + +static int __devinit ql3xxx_probe(struct pci_dev *pdev, + const struct pci_device_id *pci_entry) +{ + struct net_device *ndev = NULL; + struct ql3_adapter *qdev = NULL; + static int cards_found = 0; + int pci_using_dac, err; + + err = pci_enable_device(pdev); + if (err) { + printk(KERN_ERR PFX "%s cannot enable PCI device\n", + pci_name(pdev)); + goto err_out; + } + + err = pci_request_regions(pdev, DRV_NAME); + if (err) { + printk(KERN_ERR PFX "%s cannot obtain PCI resources\n", + pci_name(pdev)); + goto err_out_disable_pdev; + } + + pci_set_master(pdev); + + if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK)) { + pci_using_dac = 1; + err = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK); + } else if (!(err = pci_set_dma_mask(pdev, DMA_32BIT_MASK))) { + pci_using_dac = 0; + err = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK); + } + + if (err) { + printk(KERN_ERR PFX "%s no usable DMA configuration\n", + pci_name(pdev)); + goto err_out_free_regions; + } + + ndev = alloc_etherdev(sizeof(struct ql3_adapter)); + if (!ndev) + goto err_out_free_regions; + + SET_MODULE_OWNER(ndev); + SET_NETDEV_DEV(ndev, &pdev->dev); + + ndev->features = NETIF_F_LLTX; + if (pci_using_dac) + ndev->features |= NETIF_F_HIGHDMA; + + pci_set_drvdata(pdev, ndev); + + qdev = netdev_priv(ndev); + qdev->index = cards_found; + qdev->ndev = ndev; + qdev->pdev = pdev; + qdev->port_link_state = LS_DOWN; + if (msi) + qdev->msi = 1; + + qdev->msg_enable = netif_msg_init(debug, default_msg); + + qdev->mem_map_registers = + ioremap_nocache(pci_resource_start(pdev, 1), + pci_resource_len(qdev->pdev, 1)); + if (!qdev->mem_map_registers) { + printk(KERN_ERR PFX "%s: cannot map device registers\n", + pci_name(pdev)); + goto err_out_free_ndev; + } + + spin_lock_init(&qdev->adapter_lock); + spin_lock_init(&qdev->hw_lock); + + /* Set driver entry points */ + ndev->open = ql3xxx_open; + ndev->hard_start_xmit = ql3xxx_send; + ndev->stop = ql3xxx_close; + ndev->get_stats = ql3xxx_get_stats; + ndev->change_mtu = ql3xxx_change_mtu; + ndev->set_multicast_list = ql3xxx_set_multicast_list; + SET_ETHTOOL_OPS(ndev, &ql3xxx_ethtool_ops); + ndev->set_mac_address = ql3xxx_set_mac_address; + ndev->tx_timeout = ql3xxx_tx_timeout; + ndev->watchdog_timeo = 5 * HZ; + + ndev->poll = &ql_poll; + ndev->weight = 64; + + ndev->irq = pdev->irq; + + /* make sure the EEPROM is good */ + if (ql_get_nvram_params(qdev)) { + printk(KERN_ALERT PFX + "ql3xxx_probe: Adapter #%d, Invalid NVRAM parameters.\n", + qdev->index); + goto err_out_iounmap; + } + + ql_set_mac_info(qdev); + + /* Validate and set parameters */ + if (qdev->mac_index) { + memcpy(ndev->dev_addr, &qdev->nvram_data.funcCfg_fn2.macAddress, + ETH_ALEN); + } else { + memcpy(ndev->dev_addr, &qdev->nvram_data.funcCfg_fn0.macAddress, + ETH_ALEN); + } + memcpy(ndev->perm_addr, ndev->dev_addr, ndev->addr_len); + + ndev->tx_queue_len = NUM_REQ_Q_ENTRIES; + + /* Turn off support for multicasting */ + ndev->flags &= ~IFF_MULTICAST; + + /* Record PCI bus information. */ + ql_get_board_info(qdev); + + /* + * Set the Maximum Memory Read Byte Count value. We do this to handle + * jumbo frames. + */ + if (qdev->pci_x) { + pci_write_config_word(pdev, (int)0x4e, (u16) 0x0036); + } + + err = register_netdev(ndev); + if (err) { + printk(KERN_ERR PFX "%s: cannot register net device\n", + pci_name(pdev)); + goto err_out_iounmap; + } + + /* we're going to reset, so assume we have no link for now */ + + netif_carrier_off(ndev); + netif_stop_queue(ndev); + + qdev->workqueue = create_singlethread_workqueue(ndev->name); + INIT_WORK(&qdev->reset_work, (void (*)(void *))ql_reset_work, qdev); + INIT_WORK(&qdev->tx_timeout_work, + (void (*)(void *))ql_tx_timeout_work, qdev); + + init_timer(&qdev->adapter_timer); + qdev->adapter_timer.function = ql3xxx_timer; + qdev->adapter_timer.expires = jiffies + HZ * 2; /* two second delay */ + qdev->adapter_timer.data = (unsigned long)qdev; + + if(!cards_found) { + printk(KERN_ALERT PFX "%s\n", DRV_STRING); + printk(KERN_ALERT PFX "Driver name: %s, Version: %s.\n", + DRV_NAME, DRV_VERSION); + } + ql_display_dev_info(ndev); + + cards_found++; + return 0; + +err_out_iounmap: + iounmap(qdev->mem_map_registers); +err_out_free_ndev: + free_netdev(ndev); +err_out_free_regions: + pci_release_regions(pdev); +err_out_disable_pdev: + pci_disable_device(pdev); + pci_set_drvdata(pdev, NULL); +err_out: + return err; +} + +static void __devexit ql3xxx_remove(struct pci_dev *pdev) +{ + struct net_device *ndev = pci_get_drvdata(pdev); + struct ql3_adapter *qdev = netdev_priv(ndev); + + unregister_netdev(ndev); + qdev = netdev_priv(ndev); + + ql_disable_interrupts(qdev); + + if (qdev->workqueue) { + cancel_delayed_work(&qdev->reset_work); + cancel_delayed_work(&qdev->tx_timeout_work); + destroy_workqueue(qdev->workqueue); + qdev->workqueue = NULL; + } + + iounmap((void *)qdev->mmap_virt_base); + pci_release_regions(pdev); + pci_set_drvdata(pdev, NULL); + free_netdev(ndev); +} + +static struct pci_driver ql3xxx_driver = { + + .name = DRV_NAME, + .id_table = ql3xxx_pci_tbl, + .probe = ql3xxx_probe, + .remove = __devexit_p(ql3xxx_remove), +}; + +static int __init ql3xxx_init_module(void) +{ + return pci_register_driver(&ql3xxx_driver); +} + +static void __exit ql3xxx_exit(void) +{ + pci_unregister_driver(&ql3xxx_driver); +} + +module_init(ql3xxx_init_module); +module_exit(ql3xxx_exit); diff --git a/drivers/net/qla3xxx.h b/drivers/net/qla3xxx.h new file mode 100644 index 00000000000..9492cee6b08 --- /dev/null +++ b/drivers/net/qla3xxx.h @@ -0,0 +1,1194 @@ +/* + * QLogic QLA3xxx NIC HBA Driver + * Copyright (c) 2003-2006 QLogic Corporation + * + * See LICENSE.qla3xxx for copyright and licensing details. + */ +#ifndef _QLA3XXX_H_ +#define _QLA3XXX_H_ + +/* + * IOCB Definitions... + */ +#pragma pack(1) + +#define OPCODE_OB_MAC_IOCB_FN0 0x01 +#define OPCODE_OB_MAC_IOCB_FN2 0x21 +#define OPCODE_OB_TCP_IOCB_FN0 0x03 +#define OPCODE_OB_TCP_IOCB_FN2 0x23 +#define OPCODE_UPDATE_NCB_IOCB_FN0 0x00 +#define OPCODE_UPDATE_NCB_IOCB_FN2 0x20 + +#define OPCODE_UPDATE_NCB_IOCB 0xF0 +#define OPCODE_IB_MAC_IOCB 0xF9 +#define OPCODE_IB_IP_IOCB 0xFA +#define OPCODE_IB_TCP_IOCB 0xFB +#define OPCODE_DUMP_PROTO_IOCB 0xFE +#define OPCODE_BUFFER_ALERT_IOCB 0xFB + +#define OPCODE_FUNC_ID_MASK 0x30 +#define OUTBOUND_MAC_IOCB 0x01 /* plus function bits */ +#define OUTBOUND_TCP_IOCB 0x03 /* plus function bits */ +#define UPDATE_NCB_IOCB 0x00 /* plus function bits */ + +#define FN0_MA_BITS_MASK 0x00 +#define FN1_MA_BITS_MASK 0x80 + +struct ob_mac_iocb_req { + u8 opcode; + u8 flags; +#define OB_MAC_IOCB_REQ_MA 0xC0 +#define OB_MAC_IOCB_REQ_F 0x20 +#define OB_MAC_IOCB_REQ_X 0x10 +#define OB_MAC_IOCB_REQ_D 0x02 +#define OB_MAC_IOCB_REQ_I 0x01 + __le16 reserved0; + + __le32 transaction_id; + __le16 data_len; + __le16 reserved1; + __le32 reserved2; + __le32 reserved3; + __le32 buf_addr0_low; + __le32 buf_addr0_high; + __le32 buf_0_len; + __le32 buf_addr1_low; + __le32 buf_addr1_high; + __le32 buf_1_len; + __le32 buf_addr2_low; + __le32 buf_addr2_high; + __le32 buf_2_len; + __le32 reserved4; + __le32 reserved5; +}; +/* + * The following constants define control bits for buffer + * length fields for all IOCB's. + */ +#define OB_MAC_IOCB_REQ_E 0x80000000 /* Last valid buffer in list. */ +#define OB_MAC_IOCB_REQ_C 0x40000000 /* points to an OAL. (continuation) */ +#define OB_MAC_IOCB_REQ_L 0x20000000 /* Auburn local address pointer. */ +#define OB_MAC_IOCB_REQ_R 0x10000000 /* 32-bit address pointer. */ + +struct ob_mac_iocb_rsp { + u8 opcode; + u8 flags; +#define OB_MAC_IOCB_RSP_P 0x08 +#define OB_MAC_IOCB_RSP_S 0x02 +#define OB_MAC_IOCB_RSP_I 0x01 + + __le16 reserved0; + __le32 transaction_id; + __le32 reserved1; + __le32 reserved2; +}; + +struct ib_mac_iocb_rsp { + u8 opcode; + u8 flags; +#define IB_MAC_IOCB_RSP_S 0x80 +#define IB_MAC_IOCB_RSP_H1 0x40 +#define IB_MAC_IOCB_RSP_H0 0x20 +#define IB_MAC_IOCB_RSP_B 0x10 +#define IB_MAC_IOCB_RSP_M 0x08 +#define IB_MAC_IOCB_RSP_MA 0x07 + + __le16 length; + __le32 reserved; + __le32 ial_low; + __le32 ial_high; + +}; + +struct ob_ip_iocb_req { + u8 opcode; + __le16 flags; +#define OB_IP_IOCB_REQ_O 0x100 +#define OB_IP_IOCB_REQ_H 0x008 +#define OB_IP_IOCB_REQ_U 0x004 +#define OB_IP_IOCB_REQ_D 0x002 +#define OB_IP_IOCB_REQ_I 0x001 + + u8 reserved0; + + __le32 transaction_id; + __le16 data_len; + __le16 reserved1; + __le32 hncb_ptr_low; + __le32 hncb_ptr_high; + __le32 buf_addr0_low; + __le32 buf_addr0_high; + __le32 buf_0_len; + __le32 buf_addr1_low; + __le32 buf_addr1_high; + __le32 buf_1_len; + __le32 buf_addr2_low; + __le32 buf_addr2_high; + __le32 buf_2_len; + __le32 reserved2; + __le32 reserved3; +}; + +/* defines for BufferLength fields above */ +#define OB_IP_IOCB_REQ_E 0x80000000 +#define OB_IP_IOCB_REQ_C 0x40000000 +#define OB_IP_IOCB_REQ_L 0x20000000 +#define OB_IP_IOCB_REQ_R 0x10000000 + +struct ob_ip_iocb_rsp { + u8 opcode; + u8 flags; +#define OB_MAC_IOCB_RSP_E 0x08 +#define OB_MAC_IOCB_RSP_L 0x04 +#define OB_MAC_IOCB_RSP_S 0x02 +#define OB_MAC_IOCB_RSP_I 0x01 + + __le16 reserved0; + __le32 transaction_id; + __le32 reserved1; + __le32 reserved2; +}; + +struct ob_tcp_iocb_req { + u8 opcode; + + u8 flags0; +#define OB_TCP_IOCB_REQ_P 0x80 +#define OB_TCP_IOCB_REQ_CI 0x20 +#define OB_TCP_IOCB_REQ_H 0x10 +#define OB_TCP_IOCB_REQ_LN 0x08 +#define OB_TCP_IOCB_REQ_K 0x04 +#define OB_TCP_IOCB_REQ_D 0x02 +#define OB_TCP_IOCB_REQ_I 0x01 + + u8 flags1; +#define OB_TCP_IOCB_REQ_OSM 0x40 +#define OB_TCP_IOCB_REQ_URG 0x20 +#define OB_TCP_IOCB_REQ_ACK 0x10 +#define OB_TCP_IOCB_REQ_PSH 0x08 +#define OB_TCP_IOCB_REQ_RST 0x04 +#define OB_TCP_IOCB_REQ_SYN 0x02 +#define OB_TCP_IOCB_REQ_FIN 0x01 + + u8 options_len; +#define OB_TCP_IOCB_REQ_OMASK 0xF0 +#define OB_TCP_IOCB_REQ_SHIFT 4 + + __le32 transaction_id; + __le32 data_len; + __le32 hncb_ptr_low; + __le32 hncb_ptr_high; + __le32 buf_addr0_low; + __le32 buf_addr0_high; + __le32 buf_0_len; + __le32 buf_addr1_low; + __le32 buf_addr1_high; + __le32 buf_1_len; + __le32 buf_addr2_low; + __le32 buf_addr2_high; + __le32 buf_2_len; + __le32 time_stamp; + __le32 reserved1; +}; + +struct ob_tcp_iocb_rsp { + u8 opcode; + + u8 flags0; +#define OB_TCP_IOCB_RSP_C 0x20 +#define OB_TCP_IOCB_RSP_H 0x10 +#define OB_TCP_IOCB_RSP_LN 0x08 +#define OB_TCP_IOCB_RSP_K 0x04 +#define OB_TCP_IOCB_RSP_D 0x02 +#define OB_TCP_IOCB_RSP_I 0x01 + + u8 flags1; +#define OB_TCP_IOCB_RSP_E 0x10 +#define OB_TCP_IOCB_RSP_W 0x08 +#define OB_TCP_IOCB_RSP_P 0x04 +#define OB_TCP_IOCB_RSP_T 0x02 +#define OB_TCP_IOCB_RSP_F 0x01 + + u8 state; +#define OB_TCP_IOCB_RSP_SMASK 0xF0 +#define OB_TCP_IOCB_RSP_SHIFT 4 + + __le32 transaction_id; + __le32 local_ncb_ptr; + __le32 reserved0; +}; + +struct ib_ip_iocb_rsp { + u8 opcode; + u8 flags; +#define IB_IP_IOCB_RSP_S 0x80 +#define IB_IP_IOCB_RSP_H1 0x40 +#define IB_IP_IOCB_RSP_H0 0x20 +#define IB_IP_IOCB_RSP_B 0x10 +#define IB_IP_IOCB_RSP_M 0x08 +#define IB_IP_IOCB_RSP_MA 0x07 + + __le16 length; + __le16 checksum; + __le16 reserved; +#define IB_IP_IOCB_RSP_R 0x01 + __le32 ial_low; + __le32 ial_high; +}; + +struct ib_tcp_iocb_rsp { + u8 opcode; + u8 flags; +#define IB_TCP_IOCB_RSP_P 0x80 +#define IB_TCP_IOCB_RSP_T 0x40 +#define IB_TCP_IOCB_RSP_D 0x20 +#define IB_TCP_IOCB_RSP_N 0x10 +#define IB_TCP_IOCB_RSP_IP 0x03 +#define IB_TCP_FLAG_MASK 0xf0 +#define IB_TCP_FLAG_IOCB_SYN 0x00 + +#define TCP_IB_RSP_FLAGS(x) (x->flags & ~IB_TCP_FLAG_MASK) + + __le16 length; + __le32 hncb_ref_num; + __le32 ial_low; + __le32 ial_high; +}; + +struct net_rsp_iocb { + u8 opcode; + u8 flags; + __le16 reserved0; + __le32 reserved[3]; +}; +#pragma pack() + +/* + * Register Definitions... + */ +#define PORT0_PHY_ADDRESS 0x1e00 +#define PORT1_PHY_ADDRESS 0x1f00 + +#define ETHERNET_CRC_SIZE 4 + +#define MII_SCAN_REGISTER 0x00000001 + +/* 32-bit ispControlStatus */ +enum { + ISP_CONTROL_NP_MASK = 0x0003, + ISP_CONTROL_NP_PCSR = 0x0000, + ISP_CONTROL_NP_HMCR = 0x0001, + ISP_CONTROL_NP_LRAMCR = 0x0002, + ISP_CONTROL_NP_PSR = 0x0003, + ISP_CONTROL_RI = 0x0008, + ISP_CONTROL_CI = 0x0010, + ISP_CONTROL_PI = 0x0020, + ISP_CONTROL_IN = 0x0040, + ISP_CONTROL_BE = 0x0080, + ISP_CONTROL_FN_MASK = 0x0700, + ISP_CONTROL_FN0_NET = 0x0400, + ISP_CONTROL_FN0_SCSI = 0x0500, + ISP_CONTROL_FN1_NET = 0x0600, + ISP_CONTROL_FN1_SCSI = 0x0700, + ISP_CONTROL_LINK_DN_0 = 0x0800, + ISP_CONTROL_LINK_DN_1 = 0x1000, + ISP_CONTROL_FSR = 0x2000, + ISP_CONTROL_FE = 0x4000, + ISP_CONTROL_SR = 0x8000, +}; + +/* 32-bit ispInterruptMaskReg */ +enum { + ISP_IMR_ENABLE_INT = 0x0004, + ISP_IMR_DISABLE_RESET_INT = 0x0008, + ISP_IMR_DISABLE_CMPL_INT = 0x0010, + ISP_IMR_DISABLE_PROC_INT = 0x0020, +}; + +/* 32-bit serialPortInterfaceReg */ +enum { + ISP_SERIAL_PORT_IF_CLK = 0x0001, + ISP_SERIAL_PORT_IF_CS = 0x0002, + ISP_SERIAL_PORT_IF_D0 = 0x0004, + ISP_SERIAL_PORT_IF_DI = 0x0008, + ISP_NVRAM_MASK = (0x000F << 16), + ISP_SERIAL_PORT_IF_WE = 0x0010, + ISP_SERIAL_PORT_IF_NVR_MASK = 0x001F, + ISP_SERIAL_PORT_IF_SCI = 0x0400, + ISP_SERIAL_PORT_IF_SC0 = 0x0800, + ISP_SERIAL_PORT_IF_SCE = 0x1000, + ISP_SERIAL_PORT_IF_SDI = 0x2000, + ISP_SERIAL_PORT_IF_SDO = 0x4000, + ISP_SERIAL_PORT_IF_SDE = 0x8000, + ISP_SERIAL_PORT_IF_I2C_MASK = 0xFC00, +}; + +/* semaphoreReg */ +enum { + QL_RESOURCE_MASK_BASE_CODE = 0x7, + QL_RESOURCE_BITS_BASE_CODE = 0x4, + QL_DRVR_SEM_BITS = (QL_RESOURCE_BITS_BASE_CODE << 1), + QL_DDR_RAM_SEM_BITS = (QL_RESOURCE_BITS_BASE_CODE << 4), + QL_PHY_GIO_SEM_BITS = (QL_RESOURCE_BITS_BASE_CODE << 7), + QL_NVRAM_SEM_BITS = (QL_RESOURCE_BITS_BASE_CODE << 10), + QL_FLASH_SEM_BITS = (QL_RESOURCE_BITS_BASE_CODE << 13), + QL_DRVR_SEM_MASK = (QL_RESOURCE_MASK_BASE_CODE << (1 + 16)), + QL_DDR_RAM_SEM_MASK = (QL_RESOURCE_MASK_BASE_CODE << (4 + 16)), + QL_PHY_GIO_SEM_MASK = (QL_RESOURCE_MASK_BASE_CODE << (7 + 16)), + QL_NVRAM_SEM_MASK = (QL_RESOURCE_MASK_BASE_CODE << (10 + 16)), + QL_FLASH_SEM_MASK = (QL_RESOURCE_MASK_BASE_CODE << (13 + 16)), +}; + + /* + * QL3XXX memory-mapped registers + * QL3XXX has 4 "pages" of registers, each page occupying + * 256 bytes. Each page has a "common" area at the start and then + * page-specific registers after that. + */ +struct ql3xxx_common_registers { + u32 MB0; /* Offset 0x00 */ + u32 MB1; /* Offset 0x04 */ + u32 MB2; /* Offset 0x08 */ + u32 MB3; /* Offset 0x0c */ + u32 MB4; /* Offset 0x10 */ + u32 MB5; /* Offset 0x14 */ + u32 MB6; /* Offset 0x18 */ + u32 MB7; /* Offset 0x1c */ + u32 flashBiosAddr; + u32 flashBiosData; + u32 ispControlStatus; + u32 ispInterruptMaskReg; + u32 serialPortInterfaceReg; + u32 semaphoreReg; + u32 reqQProducerIndex; + u32 rspQConsumerIndex; + + u32 rxLargeQProducerIndex; + u32 rxSmallQProducerIndex; + u32 arcMadiCommand; + u32 arcMadiData; +}; + +enum { + EXT_HW_CONFIG_SP_MASK = 0x0006, + EXT_HW_CONFIG_SP_NONE = 0x0000, + EXT_HW_CONFIG_SP_BYTE_PARITY = 0x0002, + EXT_HW_CONFIG_SP_ECC = 0x0004, + EXT_HW_CONFIG_SP_ECCx = 0x0006, + EXT_HW_CONFIG_SIZE_MASK = 0x0060, + EXT_HW_CONFIG_SIZE_128M = 0x0000, + EXT_HW_CONFIG_SIZE_256M = 0x0020, + EXT_HW_CONFIG_SIZE_512M = 0x0040, + EXT_HW_CONFIG_SIZE_INVALID = 0x0060, + EXT_HW_CONFIG_PD = 0x0080, + EXT_HW_CONFIG_FW = 0x0200, + EXT_HW_CONFIG_US = 0x0400, + EXT_HW_CONFIG_DCS_MASK = 0x1800, + EXT_HW_CONFIG_DCS_9MA = 0x0000, + EXT_HW_CONFIG_DCS_15MA = 0x0800, + EXT_HW_CONFIG_DCS_18MA = 0x1000, + EXT_HW_CONFIG_DCS_24MA = 0x1800, + EXT_HW_CONFIG_DDS_MASK = 0x6000, + EXT_HW_CONFIG_DDS_9MA = 0x0000, + EXT_HW_CONFIG_DDS_15MA = 0x2000, + EXT_HW_CONFIG_DDS_18MA = 0x4000, + EXT_HW_CONFIG_DDS_24MA = 0x6000, +}; + +/* InternalChipConfig */ +enum { + INTERNAL_CHIP_DM = 0x0001, + INTERNAL_CHIP_SD = 0x0002, + INTERNAL_CHIP_RAP_MASK = 0x000C, + INTERNAL_CHIP_RAP_RR = 0x0000, + INTERNAL_CHIP_RAP_NRM = 0x0004, + INTERNAL_CHIP_RAP_ERM = 0x0008, + INTERNAL_CHIP_RAP_ERMx = 0x000C, + INTERNAL_CHIP_WE = 0x0010, + INTERNAL_CHIP_EF = 0x0020, + INTERNAL_CHIP_FR = 0x0040, + INTERNAL_CHIP_FW = 0x0080, + INTERNAL_CHIP_FI = 0x0100, + INTERNAL_CHIP_FT = 0x0200, +}; + +/* portControl */ +enum { + PORT_CONTROL_DS = 0x0001, + PORT_CONTROL_HH = 0x0002, + PORT_CONTROL_EI = 0x0004, + PORT_CONTROL_ET = 0x0008, + PORT_CONTROL_EF = 0x0010, + PORT_CONTROL_DRM = 0x0020, + PORT_CONTROL_RLB = 0x0040, + PORT_CONTROL_RCB = 0x0080, + PORT_CONTROL_MAC = 0x0100, + PORT_CONTROL_IPV = 0x0200, + PORT_CONTROL_IFP = 0x0400, + PORT_CONTROL_ITP = 0x0800, + PORT_CONTROL_FI = 0x1000, + PORT_CONTROL_DFP = 0x2000, + PORT_CONTROL_OI = 0x4000, + PORT_CONTROL_CC = 0x8000, +}; + +/* portStatus */ +enum { + PORT_STATUS_SM0 = 0x0001, + PORT_STATUS_SM1 = 0x0002, + PORT_STATUS_X = 0x0008, + PORT_STATUS_DL = 0x0080, + PORT_STATUS_IC = 0x0200, + PORT_STATUS_MRC = 0x0400, + PORT_STATUS_NL = 0x0800, + PORT_STATUS_REV_ID_MASK = 0x7000, + PORT_STATUS_REV_ID_1 = 0x1000, + PORT_STATUS_REV_ID_2 = 0x2000, + PORT_STATUS_REV_ID_3 = 0x3000, + PORT_STATUS_64 = 0x8000, + PORT_STATUS_UP0 = 0x10000, + PORT_STATUS_AC0 = 0x20000, + PORT_STATUS_AE0 = 0x40000, + PORT_STATUS_UP1 = 0x100000, + PORT_STATUS_AC1 = 0x200000, + PORT_STATUS_AE1 = 0x400000, + PORT_STATUS_F0_ENABLED = 0x1000000, + PORT_STATUS_F1_ENABLED = 0x2000000, + PORT_STATUS_F2_ENABLED = 0x4000000, + PORT_STATUS_F3_ENABLED = 0x8000000, +}; + +/* macMIIMgmtControlReg */ +enum { + MAC_ADDR_INDIRECT_PTR_REG_RP_MASK = 0x0003, + MAC_ADDR_INDIRECT_PTR_REG_RP_PRI_LWR = 0x0000, + MAC_ADDR_INDIRECT_PTR_REG_RP_PRI_UPR = 0x0001, + MAC_ADDR_INDIRECT_PTR_REG_RP_SEC_LWR = 0x0002, + MAC_ADDR_INDIRECT_PTR_REG_RP_SEC_UPR = 0x0003, + MAC_ADDR_INDIRECT_PTR_REG_PR = 0x0008, + MAC_ADDR_INDIRECT_PTR_REG_SS = 0x0010, + MAC_ADDR_INDIRECT_PTR_REG_SE = 0x0020, + MAC_ADDR_INDIRECT_PTR_REG_SP = 0x0040, + MAC_ADDR_INDIRECT_PTR_REG_PE = 0x0080, +}; + +/* macMIIMgmtControlReg */ +enum { + MAC_MII_CONTROL_RC = 0x0001, + MAC_MII_CONTROL_SC = 0x0002, + MAC_MII_CONTROL_AS = 0x0004, + MAC_MII_CONTROL_NP = 0x0008, + MAC_MII_CONTROL_CLK_SEL_MASK = 0x0070, + MAC_MII_CONTROL_CLK_SEL_DIV2 = 0x0000, + MAC_MII_CONTROL_CLK_SEL_DIV4 = 0x0010, + MAC_MII_CONTROL_CLK_SEL_DIV6 = 0x0020, + MAC_MII_CONTROL_CLK_SEL_DIV8 = 0x0030, + MAC_MII_CONTROL_CLK_SEL_DIV10 = 0x0040, + MAC_MII_CONTROL_CLK_SEL_DIV14 = 0x0050, + MAC_MII_CONTROL_CLK_SEL_DIV20 = 0x0060, + MAC_MII_CONTROL_CLK_SEL_DIV28 = 0x0070, + MAC_MII_CONTROL_RM = 0x8000, +}; + +/* macMIIStatusReg */ +enum { + MAC_MII_STATUS_BSY = 0x0001, + MAC_MII_STATUS_SC = 0x0002, + MAC_MII_STATUS_NV = 0x0004, +}; + +enum { + MAC_CONFIG_REG_PE = 0x0001, + MAC_CONFIG_REG_TF = 0x0002, + MAC_CONFIG_REG_RF = 0x0004, + MAC_CONFIG_REG_FD = 0x0008, + MAC_CONFIG_REG_GM = 0x0010, + MAC_CONFIG_REG_LB = 0x0020, + MAC_CONFIG_REG_SR = 0x8000, +}; + +enum { + MAC_HALF_DUPLEX_REG_ED = 0x10000, + MAC_HALF_DUPLEX_REG_NB = 0x20000, + MAC_HALF_DUPLEX_REG_BNB = 0x40000, + MAC_HALF_DUPLEX_REG_ALT = 0x80000, +}; + +enum { + IP_ADDR_INDEX_REG_MASK = 0x000f, + IP_ADDR_INDEX_REG_FUNC_0_PRI = 0x0000, + IP_ADDR_INDEX_REG_FUNC_0_SEC = 0x0001, + IP_ADDR_INDEX_REG_FUNC_1_PRI = 0x0002, + IP_ADDR_INDEX_REG_FUNC_1_SEC = 0x0003, + IP_ADDR_INDEX_REG_FUNC_2_PRI = 0x0004, + IP_ADDR_INDEX_REG_FUNC_2_SEC = 0x0005, + IP_ADDR_INDEX_REG_FUNC_3_PRI = 0x0006, + IP_ADDR_INDEX_REG_FUNC_3_SEC = 0x0007, +}; + +enum { + PROBE_MUX_ADDR_REG_MUX_SEL_MASK = 0x003f, + PROBE_MUX_ADDR_REG_SYSCLK = 0x0000, + PROBE_MUX_ADDR_REG_PCICLK = 0x0040, + PROBE_MUX_ADDR_REG_NRXCLK = 0x0080, + PROBE_MUX_ADDR_REG_CPUCLK = 0x00C0, + PROBE_MUX_ADDR_REG_MODULE_SEL_MASK = 0x3f00, + PROBE_MUX_ADDR_REG_UP = 0x4000, + PROBE_MUX_ADDR_REG_RE = 0x8000, +}; + +enum { + STATISTICS_INDEX_REG_MASK = 0x01ff, + STATISTICS_INDEX_REG_MAC0_TX_FRAME = 0x0000, + STATISTICS_INDEX_REG_MAC0_TX_BYTES = 0x0001, + STATISTICS_INDEX_REG_MAC0_TX_STAT1 = 0x0002, + STATISTICS_INDEX_REG_MAC0_TX_STAT2 = 0x0003, + STATISTICS_INDEX_REG_MAC0_TX_STAT3 = 0x0004, + STATISTICS_INDEX_REG_MAC0_TX_STAT4 = 0x0005, + STATISTICS_INDEX_REG_MAC0_TX_STAT5 = 0x0006, + STATISTICS_INDEX_REG_MAC0_RX_FRAME = 0x0007, + STATISTICS_INDEX_REG_MAC0_RX_BYTES = 0x0008, + STATISTICS_INDEX_REG_MAC0_RX_STAT1 = 0x0009, + STATISTICS_INDEX_REG_MAC0_RX_STAT2 = 0x000a, + STATISTICS_INDEX_REG_MAC0_RX_STAT3 = 0x000b, + STATISTICS_INDEX_REG_MAC0_RX_ERR_CRC = 0x000c, + STATISTICS_INDEX_REG_MAC0_RX_ERR_ENC = 0x000d, + STATISTICS_INDEX_REG_MAC0_RX_ERR_LEN = 0x000e, + STATISTICS_INDEX_REG_MAC0_RX_STAT4 = 0x000f, + STATISTICS_INDEX_REG_MAC1_TX_FRAME = 0x0010, + STATISTICS_INDEX_REG_MAC1_TX_BYTES = 0x0011, + STATISTICS_INDEX_REG_MAC1_TX_STAT1 = 0x0012, + STATISTICS_INDEX_REG_MAC1_TX_STAT2 = 0x0013, + STATISTICS_INDEX_REG_MAC1_TX_STAT3 = 0x0014, + STATISTICS_INDEX_REG_MAC1_TX_STAT4 = 0x0015, + STATISTICS_INDEX_REG_MAC1_TX_STAT5 = 0x0016, + STATISTICS_INDEX_REG_MAC1_RX_FRAME = 0x0017, + STATISTICS_INDEX_REG_MAC1_RX_BYTES = 0x0018, + STATISTICS_INDEX_REG_MAC1_RX_STAT1 = 0x0019, + STATISTICS_INDEX_REG_MAC1_RX_STAT2 = 0x001a, + STATISTICS_INDEX_REG_MAC1_RX_STAT3 = 0x001b, + STATISTICS_INDEX_REG_MAC1_RX_ERR_CRC = 0x001c, + STATISTICS_INDEX_REG_MAC1_RX_ERR_ENC = 0x001d, + STATISTICS_INDEX_REG_MAC1_RX_ERR_LEN = 0x001e, + STATISTICS_INDEX_REG_MAC1_RX_STAT4 = 0x001f, + STATISTICS_INDEX_REG_IP_TX_PKTS = 0x0020, + STATISTICS_INDEX_REG_IP_TX_BYTES = 0x0021, + STATISTICS_INDEX_REG_IP_TX_FRAG = 0x0022, + STATISTICS_INDEX_REG_IP_RX_PKTS = 0x0023, + STATISTICS_INDEX_REG_IP_RX_BYTES = 0x0024, + STATISTICS_INDEX_REG_IP_RX_FRAG = 0x0025, + STATISTICS_INDEX_REG_IP_DGRM_REASSEMBLY = 0x0026, + STATISTICS_INDEX_REG_IP_V6_RX_PKTS = 0x0027, + STATISTICS_INDEX_REG_IP_RX_PKTERR = 0x0028, + STATISTICS_INDEX_REG_IP_REASSEMBLY_ERR = 0x0029, + STATISTICS_INDEX_REG_TCP_TX_SEG = 0x0030, + STATISTICS_INDEX_REG_TCP_TX_BYTES = 0x0031, + STATISTICS_INDEX_REG_TCP_RX_SEG = 0x0032, + STATISTICS_INDEX_REG_TCP_RX_BYTES = 0x0033, + STATISTICS_INDEX_REG_TCP_TIMER_EXP = 0x0034, + STATISTICS_INDEX_REG_TCP_RX_ACK = 0x0035, + STATISTICS_INDEX_REG_TCP_TX_ACK = 0x0036, + STATISTICS_INDEX_REG_TCP_RX_ERR = 0x0037, + STATISTICS_INDEX_REG_TCP_RX_WIN_PROBE = 0x0038, + STATISTICS_INDEX_REG_TCP_ECC_ERR_CORR = 0x003f, +}; + +enum { + PORT_FATAL_ERROR_STATUS_OFB_RE_MAC0 = 0x00000001, + PORT_FATAL_ERROR_STATUS_OFB_RE_MAC1 = 0x00000002, + PORT_FATAL_ERROR_STATUS_OFB_WE = 0x00000004, + PORT_FATAL_ERROR_STATUS_IFB_RE = 0x00000008, + PORT_FATAL_ERROR_STATUS_IFB_WE_MAC0 = 0x00000010, + PORT_FATAL_ERROR_STATUS_IFB_WE_MAC1 = 0x00000020, + PORT_FATAL_ERROR_STATUS_ODE_RE = 0x00000040, + PORT_FATAL_ERROR_STATUS_ODE_WE = 0x00000080, + PORT_FATAL_ERROR_STATUS_IDE_RE = 0x00000100, + PORT_FATAL_ERROR_STATUS_IDE_WE = 0x00000200, + PORT_FATAL_ERROR_STATUS_SDE_RE = 0x00000400, + PORT_FATAL_ERROR_STATUS_SDE_WE = 0x00000800, + PORT_FATAL_ERROR_STATUS_BLE = 0x00001000, + PORT_FATAL_ERROR_STATUS_SPE = 0x00002000, + PORT_FATAL_ERROR_STATUS_EP0 = 0x00004000, + PORT_FATAL_ERROR_STATUS_EP1 = 0x00008000, + PORT_FATAL_ERROR_STATUS_ICE = 0x00010000, + PORT_FATAL_ERROR_STATUS_ILE = 0x00020000, + PORT_FATAL_ERROR_STATUS_OPE = 0x00040000, + PORT_FATAL_ERROR_STATUS_TA = 0x00080000, + PORT_FATAL_ERROR_STATUS_MA = 0x00100000, + PORT_FATAL_ERROR_STATUS_SCE = 0x00200000, + PORT_FATAL_ERROR_STATUS_RPE = 0x00400000, + PORT_FATAL_ERROR_STATUS_MPE = 0x00800000, + PORT_FATAL_ERROR_STATUS_OCE = 0x01000000, +}; + +/* + * port control and status page - page 0 + */ + +struct ql3xxx_port_registers { + struct ql3xxx_common_registers CommonRegs; + + u32 ExternalHWConfig; + u32 InternalChipConfig; + u32 portControl; + u32 portStatus; + u32 macAddrIndirectPtrReg; + u32 macAddrDataReg; + u32 macMIIMgmtControlReg; + u32 macMIIMgmtAddrReg; + u32 macMIIMgmtDataReg; + u32 macMIIStatusReg; + u32 mac0ConfigReg; + u32 mac0IpgIfgReg; + u32 mac0HalfDuplexReg; + u32 mac0MaxFrameLengthReg; + u32 mac0PauseThresholdReg; + u32 mac1ConfigReg; + u32 mac1IpgIfgReg; + u32 mac1HalfDuplexReg; + u32 mac1MaxFrameLengthReg; + u32 mac1PauseThresholdReg; + u32 ipAddrIndexReg; + u32 ipAddrDataReg; + u32 ipReassemblyTimeout; + u32 tcpMaxWindow; + u32 currentTcpTimestamp[2]; + u32 internalRamRWAddrReg; + u32 internalRamWDataReg; + u32 reclaimedBufferAddrRegLow; + u32 reclaimedBufferAddrRegHigh; + u32 reserved[2]; + u32 fpgaRevID; + u32 localRamAddr; + u32 localRamDataAutoIncr; + u32 localRamDataNonIncr; + u32 gpOutput; + u32 gpInput; + u32 probeMuxAddr; + u32 probeMuxData; + u32 statisticsIndexReg; + u32 statisticsReadDataRegAutoIncr; + u32 statisticsReadDataRegNoIncr; + u32 PortFatalErrStatus; +}; + +/* + * port host memory config page - page 1 + */ +struct ql3xxx_host_memory_registers { + struct ql3xxx_common_registers CommonRegs; + + u32 reserved[12]; + + /* Network Request Queue */ + u32 reqConsumerIndex; + u32 reqConsumerIndexAddrLow; + u32 reqConsumerIndexAddrHigh; + u32 reqBaseAddrLow; + u32 reqBaseAddrHigh; + u32 reqLength; + + /* Network Completion Queue */ + u32 rspProducerIndex; + u32 rspProducerIndexAddrLow; + u32 rspProducerIndexAddrHigh; + u32 rspBaseAddrLow; + u32 rspBaseAddrHigh; + u32 rspLength; + + /* RX Large Buffer Queue */ + u32 rxLargeQConsumerIndex; + u32 rxLargeQBaseAddrLow; + u32 rxLargeQBaseAddrHigh; + u32 rxLargeQLength; + u32 rxLargeBufferLength; + + /* RX Small Buffer Queue */ + u32 rxSmallQConsumerIndex; + u32 rxSmallQBaseAddrLow; + u32 rxSmallQBaseAddrHigh; + u32 rxSmallQLength; + u32 rxSmallBufferLength; + +}; + +/* + * port local RAM page - page 2 + */ +struct ql3xxx_local_ram_registers { + struct ql3xxx_common_registers CommonRegs; + u32 bufletSize; + u32 maxBufletCount; + u32 currentBufletCount; + u32 reserved; + u32 freeBufletThresholdLow; + u32 freeBufletThresholdHigh; + u32 ipHashTableBase; + u32 ipHashTableCount; + u32 tcpHashTableBase; + u32 tcpHashTableCount; + u32 ncbBase; + u32 maxNcbCount; + u32 currentNcbCount; + u32 drbBase; + u32 maxDrbCount; + u32 currentDrbCount; +}; + +/* + * definitions for Semaphore bits in Semaphore/Serial NVRAM interface register + */ + +#define LS_64BITS(x) (u32)(0xffffffff & ((u64)x)) +#define MS_64BITS(x) (u32)(0xffffffff & (((u64)x)>>16>>16) ) + +/* + * I/O register + */ + +enum { + CONTROL_REG = 0, + STATUS_REG = 1, + PHY_STAT_LINK_UP = 0x0004, + PHY_CTRL_LOOPBACK = 0x4000, + + PETBI_CONTROL_REG = 0x00, + PETBI_CTRL_SOFT_RESET = 0x8000, + PETBI_CTRL_AUTO_NEG = 0x1000, + PETBI_CTRL_RESTART_NEG = 0x0200, + PETBI_CTRL_FULL_DUPLEX = 0x0100, + PETBI_CTRL_SPEED_1000 = 0x0040, + + PETBI_STATUS_REG = 0x01, + PETBI_STAT_NEG_DONE = 0x0020, + PETBI_STAT_LINK_UP = 0x0004, + + PETBI_NEG_ADVER = 0x04, + PETBI_NEG_PAUSE = 0x0080, + PETBI_NEG_PAUSE_MASK = 0x0180, + PETBI_NEG_DUPLEX = 0x0020, + PETBI_NEG_DUPLEX_MASK = 0x0060, + + PETBI_NEG_PARTNER = 0x05, + PETBI_NEG_ERROR_MASK = 0x3000, + + PETBI_EXPANSION_REG = 0x06, + PETBI_EXP_PAGE_RX = 0x0002, + + PETBI_TBI_CTRL = 0x11, + PETBI_TBI_RESET = 0x8000, + PETBI_TBI_AUTO_SENSE = 0x0100, + PETBI_TBI_SERDES_MODE = 0x0010, + PETBI_TBI_SERDES_WRAP = 0x0002, + + AUX_CONTROL_STATUS = 0x1c, + PHY_AUX_NEG_DONE = 0x8000, + PHY_NEG_PARTNER = 5, + PHY_AUX_DUPLEX_STAT = 0x0020, + PHY_AUX_SPEED_STAT = 0x0018, + PHY_AUX_NO_HW_STRAP = 0x0004, + PHY_AUX_RESET_STICK = 0x0002, + PHY_NEG_PAUSE = 0x0400, + PHY_CTRL_SOFT_RESET = 0x8000, + PHY_NEG_ADVER = 4, + PHY_NEG_ADV_SPEED = 0x01e0, + PHY_CTRL_RESTART_NEG = 0x0200, +}; +enum { +/* AM29LV Flash definitions */ + FM93C56A_START = 0x1, +/* Commands */ + FM93C56A_READ = 0x2, + FM93C56A_WEN = 0x0, + FM93C56A_WRITE = 0x1, + FM93C56A_WRITE_ALL = 0x0, + FM93C56A_WDS = 0x0, + FM93C56A_ERASE = 0x3, + FM93C56A_ERASE_ALL = 0x0, +/* Command Extentions */ + FM93C56A_WEN_EXT = 0x3, + FM93C56A_WRITE_ALL_EXT = 0x1, + FM93C56A_WDS_EXT = 0x0, + FM93C56A_ERASE_ALL_EXT = 0x2, +/* Special Bits */ + FM93C56A_READ_DUMMY_BITS = 1, + FM93C56A_READY = 0, + FM93C56A_BUSY = 1, + FM93C56A_CMD_BITS = 2, +/* AM29LV Flash definitions */ + FM93C56A_SIZE_8 = 0x100, + FM93C56A_SIZE_16 = 0x80, + FM93C66A_SIZE_8 = 0x200, + FM93C66A_SIZE_16 = 0x100, + FM93C86A_SIZE_16 = 0x400, +/* Address Bits */ + FM93C56A_NO_ADDR_BITS_16 = 8, + FM93C56A_NO_ADDR_BITS_8 = 9, + FM93C86A_NO_ADDR_BITS_16 = 10, +/* Data Bits */ + FM93C56A_DATA_BITS_16 = 16, + FM93C56A_DATA_BITS_8 = 8, +}; +enum { +/* Auburn Bits */ + AUBURN_EEPROM_DI = 0x8, + AUBURN_EEPROM_DI_0 = 0x0, + AUBURN_EEPROM_DI_1 = 0x8, + AUBURN_EEPROM_DO = 0x4, + AUBURN_EEPROM_DO_0 = 0x0, + AUBURN_EEPROM_DO_1 = 0x4, + AUBURN_EEPROM_CS = 0x2, + AUBURN_EEPROM_CS_0 = 0x0, + AUBURN_EEPROM_CS_1 = 0x2, + AUBURN_EEPROM_CLK_RISE = 0x1, + AUBURN_EEPROM_CLK_FALL = 0x0, +}; +enum {EEPROM_SIZE = FM93C86A_SIZE_16, + EEPROM_NO_ADDR_BITS = FM93C86A_NO_ADDR_BITS_16, + EEPROM_NO_DATA_BITS = FM93C56A_DATA_BITS_16, +}; + +/* + * MAC Config data structure + */ + struct eeprom_port_cfg { + u16 etherMtu_mac; + u16 pauseThreshold_mac; + u16 resumeThreshold_mac; + u16 portConfiguration; +#define PORT_CONFIG_AUTO_NEG_ENABLED 0x8000 +#define PORT_CONFIG_SYM_PAUSE_ENABLED 0x4000 +#define PORT_CONFIG_FULL_DUPLEX_ENABLED 0x2000 +#define PORT_CONFIG_HALF_DUPLEX_ENABLED 0x1000 +#define PORT_CONFIG_1000MB_SPEED 0x0400 +#define PORT_CONFIG_100MB_SPEED 0x0200 +#define PORT_CONFIG_10MB_SPEED 0x0100 +#define PORT_CONFIG_LINK_SPEED_MASK 0x0F00 + u16 reserved[12]; + +}; + +/* + * BIOS data structure + */ +struct eeprom_bios_cfg { + u16 SpinDlyEn:1, disBios:1, EnMemMap:1, EnSelectBoot:1, Reserved:12; + + u8 bootID0:7, boodID0Valid:1; + u8 bootLun0[8]; + + u8 bootID1:7, boodID1Valid:1; + u8 bootLun1[8]; + + u16 MaxLunsTrgt; + u8 reserved[10]; +}; + +/* + * Function Specific Data structure + */ +struct eeprom_function_cfg { + u8 reserved[30]; + u8 macAddress[6]; + u8 macAddressSecondary[6]; + + u16 subsysVendorId; + u16 subsysDeviceId; +}; + +/* + * EEPROM format + */ +struct eeprom_data { + u8 asicId[4]; + u8 version; + u8 numPorts; + u16 boardId; + +#define EEPROM_BOARDID_STR_SIZE 16 +#define EEPROM_SERIAL_NUM_SIZE 16 + + u8 boardIdStr[16]; + u8 serialNumber[16]; + u16 extHwConfig; + struct eeprom_port_cfg macCfg_port0; + struct eeprom_port_cfg macCfg_port1; + u16 bufletSize; + u16 bufletCount; + u16 tcpWindowThreshold50; + u16 tcpWindowThreshold25; + u16 tcpWindowThreshold0; + u16 ipHashTableBaseHi; + u16 ipHashTableBaseLo; + u16 ipHashTableSize; + u16 tcpHashTableBaseHi; + u16 tcpHashTableBaseLo; + u16 tcpHashTableSize; + u16 ncbTableBaseHi; + u16 ncbTableBaseLo; + u16 ncbTableSize; + u16 drbTableBaseHi; + u16 drbTableBaseLo; + u16 drbTableSize; + u16 reserved_142[4]; + u16 ipReassemblyTimeout; + u16 tcpMaxWindowSize; + u16 ipSecurity; +#define IPSEC_CONFIG_PRESENT 0x0001 + u8 reserved_156[294]; + u16 qDebug[8]; + struct eeprom_function_cfg funcCfg_fn0; + u16 reserved_510; + u8 oemSpace[432]; + struct eeprom_bios_cfg biosCfg_fn1; + struct eeprom_function_cfg funcCfg_fn1; + u16 reserved_1022; + u8 reserved_1024[464]; + struct eeprom_function_cfg funcCfg_fn2; + u16 reserved_1534; + u8 reserved_1536[432]; + struct eeprom_bios_cfg biosCfg_fn3; + struct eeprom_function_cfg funcCfg_fn3; + u16 checksum; +}; + +/* + * General definitions... + */ + +/* + * Below are a number compiler switches for controlling driver behavior. + * Some are not supported under certain conditions and are notated as such. + */ + +#define QL3XXX_VENDOR_ID 0x1077 +#define QL3022_DEVICE_ID 0x3022 + +/* MTU & Frame Size stuff */ +#define NORMAL_MTU_SIZE ETH_DATA_LEN +#define JUMBO_MTU_SIZE 9000 +#define VLAN_ID_LEN 2 + +/* Request Queue Related Definitions */ +#define NUM_REQ_Q_ENTRIES 256 /* so that 64 * 64 = 4096 (1 page) */ + +/* Response Queue Related Definitions */ +#define NUM_RSP_Q_ENTRIES 256 /* so that 256 * 16 = 4096 (1 page) */ + +/* Transmit and Receive Buffers */ +#define NUM_LBUFQ_ENTRIES 128 +#define NUM_SBUFQ_ENTRIES 64 +#define QL_SMALL_BUFFER_SIZE 32 +#define QL_ADDR_ELE_PER_BUFQ_ENTRY \ +(sizeof(struct lrg_buf_q_entry) / sizeof(struct bufq_addr_element)) + /* Each send has at least control block. This is how many we keep. */ +#define NUM_SMALL_BUFFERS NUM_SBUFQ_ENTRIES * QL_ADDR_ELE_PER_BUFQ_ENTRY +#define NUM_LARGE_BUFFERS NUM_LBUFQ_ENTRIES * QL_ADDR_ELE_PER_BUFQ_ENTRY +#define QL_HEADER_SPACE 32 /* make header space at top of skb. */ +/* + * Large & Small Buffers for Receives + */ +struct lrg_buf_q_entry { + + u32 addr0_lower; +#define IAL_LAST_ENTRY 0x00000001 +#define IAL_CONT_ENTRY 0x00000002 +#define IAL_FLAG_MASK 0x00000003 + u32 addr0_upper; + u32 addr1_lower; + u32 addr1_upper; + u32 addr2_lower; + u32 addr2_upper; + u32 addr3_lower; + u32 addr3_upper; + u32 addr4_lower; + u32 addr4_upper; + u32 addr5_lower; + u32 addr5_upper; + u32 addr6_lower; + u32 addr6_upper; + u32 addr7_lower; + u32 addr7_upper; + +}; + +struct bufq_addr_element { + u32 addr_low; + u32 addr_high; +}; + +#define QL_NO_RESET 0 +#define QL_DO_RESET 1 + +enum link_state_t { + LS_UNKNOWN = 0, + LS_DOWN, + LS_DEGRADE, + LS_RECOVER, + LS_UP, +}; + +struct ql_rcv_buf_cb { + struct ql_rcv_buf_cb *next; + struct sk_buff *skb; + DECLARE_PCI_UNMAP_ADDR(mapaddr); + DECLARE_PCI_UNMAP_LEN(maplen); + __le32 buf_phy_addr_low; + __le32 buf_phy_addr_high; + int index; +}; + +struct ql_tx_buf_cb { + struct sk_buff *skb; + struct ob_mac_iocb_req *queue_entry ; + DECLARE_PCI_UNMAP_ADDR(mapaddr); + DECLARE_PCI_UNMAP_LEN(maplen); +}; + +/* definitions for type field */ +#define QL_BUF_TYPE_MACIOCB 0x01 +#define QL_BUF_TYPE_IPIOCB 0x02 +#define QL_BUF_TYPE_TCPIOCB 0x03 + +/* qdev->flags definitions. */ +enum { QL_RESET_DONE = 1, /* Reset finished. */ + QL_RESET_ACTIVE = 2, /* Waiting for reset to finish. */ + QL_RESET_START = 3, /* Please reset the chip. */ + QL_RESET_PER_SCSI = 4, /* SCSI driver requests reset. */ + QL_TX_TIMEOUT = 5, /* Timeout in progress. */ + QL_LINK_MASTER = 6, /* This driver controls the link. */ + QL_ADAPTER_UP = 7, /* Adapter has been brought up. */ + QL_THREAD_UP = 8, /* This flag is available. */ + QL_LINK_UP = 9, /* Link Status. */ + QL_ALLOC_REQ_RSP_Q_DONE = 10, + QL_ALLOC_BUFQS_DONE = 11, + QL_ALLOC_SMALL_BUF_DONE = 12, + QL_LINK_OPTICAL = 13, + QL_MSI_ENABLED = 14, +}; + +/* + * ql3_adapter - The main Adapter structure definition. + * This structure has all fields relevant to the hardware. + */ + +struct ql3_adapter { + u32 reserved_00; + unsigned long flags; + + /* PCI Configuration information for this device */ + struct pci_dev *pdev; + struct net_device *ndev; /* Parent NET device */ + + /* Hardware information */ + u8 chip_rev_id; + u8 pci_slot; + u8 pci_width; + u8 pci_x; + u32 msi; + int index; + struct timer_list adapter_timer; /* timer used for various functions */ + + spinlock_t adapter_lock; + spinlock_t hw_lock; + + /* PCI Bus Relative Register Addresses */ + u8 *mmap_virt_base; /* stores return value from ioremap() */ + struct ql3xxx_port_registers __iomem *mem_map_registers; + u32 current_page; /* tracks current register page */ + + u32 msg_enable; + u8 reserved_01[2]; + u8 reserved_02[2]; + + /* Page for Shadow Registers */ + void *shadow_reg_virt_addr; + dma_addr_t shadow_reg_phy_addr; + + /* Net Request Queue */ + u32 req_q_size; + u32 reserved_03; + struct ob_mac_iocb_req *req_q_virt_addr; + dma_addr_t req_q_phy_addr; + u16 req_producer_index; + u16 reserved_04; + u16 *preq_consumer_index; + u32 req_consumer_index_phy_addr_high; + u32 req_consumer_index_phy_addr_low; + atomic_t tx_count; + struct ql_tx_buf_cb tx_buf[NUM_REQ_Q_ENTRIES]; + + /* Net Response Queue */ + u32 rsp_q_size; + u32 eeprom_cmd_data; + struct net_rsp_iocb *rsp_q_virt_addr; + dma_addr_t rsp_q_phy_addr; + struct net_rsp_iocb *rsp_current; + u16 rsp_consumer_index; + u16 reserved_06; + u32 *prsp_producer_index; + u32 rsp_producer_index_phy_addr_high; + u32 rsp_producer_index_phy_addr_low; + + /* Large Buffer Queue */ + u32 lrg_buf_q_alloc_size; + u32 lrg_buf_q_size; + void *lrg_buf_q_alloc_virt_addr; + void *lrg_buf_q_virt_addr; + dma_addr_t lrg_buf_q_alloc_phy_addr; + dma_addr_t lrg_buf_q_phy_addr; + u32 lrg_buf_q_producer_index; + u32 lrg_buf_release_cnt; + struct bufq_addr_element *lrg_buf_next_free; + + /* Large (Receive) Buffers */ + struct ql_rcv_buf_cb lrg_buf[NUM_LARGE_BUFFERS]; + struct ql_rcv_buf_cb *lrg_buf_free_head; + struct ql_rcv_buf_cb *lrg_buf_free_tail; + u32 lrg_buf_free_count; + u32 lrg_buffer_len; + u32 lrg_buf_index; + u32 lrg_buf_skb_check; + + /* Small Buffer Queue */ + u32 small_buf_q_alloc_size; + u32 small_buf_q_size; + u32 small_buf_q_producer_index; + void *small_buf_q_alloc_virt_addr; + void *small_buf_q_virt_addr; + dma_addr_t small_buf_q_alloc_phy_addr; + dma_addr_t small_buf_q_phy_addr; + u32 small_buf_index; + + /* Small (Receive) Buffers */ + void *small_buf_virt_addr; + dma_addr_t small_buf_phy_addr; + u32 small_buf_phy_addr_low; + u32 small_buf_phy_addr_high; + u32 small_buf_release_cnt; + u32 small_buf_total_size; + + /* ISR related, saves status for DPC. */ + u32 control_status; + + struct eeprom_data nvram_data; + struct timer_list ioctl_timer; + u32 port_link_state; + u32 last_rsp_offset; + + /* 4022 specific */ + u32 mac_index; /* Driver's MAC number can be 0 or 1 for first and second networking functions respectively */ + u32 PHYAddr; /* Address of PHY 0x1e00 Port 0 and 0x1f00 Port 1 */ + u32 mac_ob_opcode; /* Opcode to use on mac transmission */ + u32 tcp_ob_opcode; /* Opcode to use on tcp transmission */ + u32 update_ob_opcode; /* Opcode to use for updating NCB */ + u32 mb_bit_mask; /* MA Bits mask to use on transmission */ + u32 numPorts; + struct net_device_stats stats; + struct workqueue_struct *workqueue; + struct work_struct reset_work; + struct work_struct tx_timeout_work; + u32 max_frame_size; +}; + +#endif /* _QLA3XXX_H_ */ diff --git a/drivers/net/sky2.c b/drivers/net/sky2.c index de91609ca11..8f8799c3f9d 100644 --- a/drivers/net/sky2.c +++ b/drivers/net/sky2.c @@ -121,6 +121,7 @@ static const struct pci_device_id sky2_id_table[] = { { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4361) }, { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4362) }, { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4363) }, + { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4364) }, { 0 } }; diff --git a/drivers/net/slhc.c b/drivers/net/slhc.c index 3a1b7131681..9a540e2092b 100644 --- a/drivers/net/slhc.c +++ b/drivers/net/slhc.c @@ -94,27 +94,23 @@ slhc_init(int rslots, int tslots) register struct cstate *ts; struct slcompress *comp; - comp = (struct slcompress *)kmalloc(sizeof(struct slcompress), - GFP_KERNEL); + comp = kzalloc(sizeof(struct slcompress), GFP_KERNEL); if (! comp) goto out_fail; - memset(comp, 0, sizeof(struct slcompress)); if ( rslots > 0 && rslots < 256 ) { size_t rsize = rslots * sizeof(struct cstate); - comp->rstate = (struct cstate *) kmalloc(rsize, GFP_KERNEL); + comp->rstate = kzalloc(rsize, GFP_KERNEL); if (! comp->rstate) goto out_free; - memset(comp->rstate, 0, rsize); comp->rslot_limit = rslots - 1; } if ( tslots > 0 && tslots < 256 ) { size_t tsize = tslots * sizeof(struct cstate); - comp->tstate = (struct cstate *) kmalloc(tsize, GFP_KERNEL); + comp->tstate = kzalloc(tsize, GFP_KERNEL); if (! comp->tstate) goto out_free2; - memset(comp->tstate, 0, tsize); comp->tslot_limit = tslots - 1; } @@ -141,9 +137,9 @@ slhc_init(int rslots, int tslots) return comp; out_free2: - kfree((unsigned char *)comp->rstate); + kfree(comp->rstate); out_free: - kfree((unsigned char *)comp); + kfree(comp); out_fail: return NULL; } @@ -700,20 +696,6 @@ EXPORT_SYMBOL(slhc_compress); EXPORT_SYMBOL(slhc_uncompress); EXPORT_SYMBOL(slhc_toss); -#ifdef MODULE - -int init_module(void) -{ - printk(KERN_INFO "CSLIP: code copyright 1989 Regents of the University of California\n"); - return 0; -} - -void cleanup_module(void) -{ - return; -} - -#endif /* MODULE */ #else /* CONFIG_INET */ diff --git a/drivers/net/tulip/uli526x.c b/drivers/net/tulip/uli526x.c index fd64b2b3e99..c4c720e2d4c 100644 --- a/drivers/net/tulip/uli526x.c +++ b/drivers/net/tulip/uli526x.c @@ -1702,7 +1702,6 @@ MODULE_PARM_DESC(mode, "ULi M5261/M5263: Bit 0: 10/100Mbps, bit 2: duplex, bit 8 static int __init uli526x_init_module(void) { - int rc; printk(version); printed_version = 1; @@ -1714,22 +1713,19 @@ static int __init uli526x_init_module(void) if (cr6set) uli526x_cr6_user_set = cr6set; - switch(mode) { + switch (mode) { case ULI526X_10MHF: case ULI526X_100MHF: case ULI526X_10MFD: case ULI526X_100MFD: uli526x_media_mode = mode; break; - default:uli526x_media_mode = ULI526X_AUTO; + default: + uli526x_media_mode = ULI526X_AUTO; break; } - rc = pci_module_init(&uli526x_driver); - if (rc < 0) - return rc; - - return 0; + return pci_register_driver(&uli526x_driver); } diff --git a/drivers/net/wireless/airo.c b/drivers/net/wireless/airo.c index a4dd1394271..16befbcea58 100644 --- a/drivers/net/wireless/airo.c +++ b/drivers/net/wireless/airo.c @@ -3950,13 +3950,11 @@ static u16 issuecommand(struct airo_info *ai, Cmd *pCmd, Resp *pRsp) { pRsp->rsp0 = IN4500(ai, RESP0); pRsp->rsp1 = IN4500(ai, RESP1); pRsp->rsp2 = IN4500(ai, RESP2); - if ((pRsp->status & 0xff00)!=0 && pCmd->cmd != CMD_SOFTRESET) { - airo_print_err(ai->dev->name, "cmd= %x\n", pCmd->cmd); - airo_print_err(ai->dev->name, "status= %x\n", pRsp->status); - airo_print_err(ai->dev->name, "Rsp0= %x\n", pRsp->rsp0); - airo_print_err(ai->dev->name, "Rsp1= %x\n", pRsp->rsp1); - airo_print_err(ai->dev->name, "Rsp2= %x\n", pRsp->rsp2); - } + if ((pRsp->status & 0xff00)!=0 && pCmd->cmd != CMD_SOFTRESET) + airo_print_err(ai->dev->name, + "cmd:%x status:%x rsp0:%x rsp1:%x rsp2:%x", + pCmd->cmd, pRsp->status, pRsp->rsp0, pRsp->rsp1, + pRsp->rsp2); // clear stuck command busy if necessary if (IN4500(ai, COMMAND) & COMMAND_BUSY) { diff --git a/drivers/net/wireless/bcm43xx/bcm43xx.h b/drivers/net/wireless/bcm43xx/bcm43xx.h index 17a56828e23..ee6571ed706 100644 --- a/drivers/net/wireless/bcm43xx/bcm43xx.h +++ b/drivers/net/wireless/bcm43xx/bcm43xx.h @@ -649,6 +649,19 @@ enum { #define bcm43xx_status(bcm) atomic_read(&(bcm)->init_status) #define bcm43xx_set_status(bcm, stat) atomic_set(&(bcm)->init_status, (stat)) +/* *** THEORY OF LOCKING *** + * + * We have two different locks in the bcm43xx driver. + * => bcm->mutex: General sleeping mutex. Protects struct bcm43xx_private + * and the device registers. This mutex does _not_ protect + * against concurrency from the IRQ handler. + * => bcm->irq_lock: IRQ spinlock. Protects against IRQ handler concurrency. + * + * Please note that, if you only take the irq_lock, you are not protected + * against concurrency from the periodic work handlers. + * Most times you want to take _both_ locks. + */ + struct bcm43xx_private { struct ieee80211_device *ieee; struct ieee80211softmac_device *softmac; @@ -659,7 +672,6 @@ struct bcm43xx_private { void __iomem *mmio_addr; - /* Locking, see "theory of locking" text below. */ spinlock_t irq_lock; struct mutex mutex; @@ -691,6 +703,7 @@ struct bcm43xx_private { struct bcm43xx_sprominfo sprom; #define BCM43xx_NR_LEDS 4 struct bcm43xx_led leds[BCM43xx_NR_LEDS]; + spinlock_t leds_lock; /* The currently active core. */ struct bcm43xx_coreinfo *current_core; @@ -763,55 +776,6 @@ struct bcm43xx_private { }; -/* *** THEORY OF LOCKING *** - * - * We have two different locks in the bcm43xx driver. - * => bcm->mutex: General sleeping mutex. Protects struct bcm43xx_private - * and the device registers. - * => bcm->irq_lock: IRQ spinlock. Protects against IRQ handler concurrency. - * - * We have three types of helper function pairs to utilize these locks. - * (Always use the helper functions.) - * 1) bcm43xx_{un}lock_noirq(): - * Takes bcm->mutex. Does _not_ protect against IRQ concurrency, - * so it is almost always unsafe, if device IRQs are enabled. - * So only use this, if device IRQs are masked. - * Locking may sleep. - * You can sleep within the critical section. - * 2) bcm43xx_{un}lock_irqonly(): - * Takes bcm->irq_lock. Does _not_ protect against - * bcm43xx_lock_noirq() critical sections. - * Does only protect against the IRQ handler path and other - * irqonly() critical sections. - * Locking does not sleep. - * You must not sleep within the critical section. - * 3) bcm43xx_{un}lock_irqsafe(): - * This is the cummulative lock and takes both, mutex and irq_lock. - * Protects against noirq() and irqonly() critical sections (and - * the IRQ handler path). - * Locking may sleep. - * You must not sleep within the critical section. - */ - -/* Lock type 1 */ -#define bcm43xx_lock_noirq(bcm) mutex_lock(&(bcm)->mutex) -#define bcm43xx_unlock_noirq(bcm) mutex_unlock(&(bcm)->mutex) -/* Lock type 2 */ -#define bcm43xx_lock_irqonly(bcm, flags) \ - spin_lock_irqsave(&(bcm)->irq_lock, flags) -#define bcm43xx_unlock_irqonly(bcm, flags) \ - spin_unlock_irqrestore(&(bcm)->irq_lock, flags) -/* Lock type 3 */ -#define bcm43xx_lock_irqsafe(bcm, flags) do { \ - bcm43xx_lock_noirq(bcm); \ - bcm43xx_lock_irqonly(bcm, flags); \ - } while (0) -#define bcm43xx_unlock_irqsafe(bcm, flags) do { \ - bcm43xx_unlock_irqonly(bcm, flags); \ - bcm43xx_unlock_noirq(bcm); \ - } while (0) - - static inline struct bcm43xx_private * bcm43xx_priv(struct net_device *dev) { diff --git a/drivers/net/wireless/bcm43xx/bcm43xx_debugfs.c b/drivers/net/wireless/bcm43xx/bcm43xx_debugfs.c index ce2e40b29b4..2600ee4b803 100644 --- a/drivers/net/wireless/bcm43xx/bcm43xx_debugfs.c +++ b/drivers/net/wireless/bcm43xx/bcm43xx_debugfs.c @@ -77,7 +77,8 @@ static ssize_t devinfo_read_file(struct file *file, char __user *userbuf, down(&big_buffer_sem); - bcm43xx_lock_irqsafe(bcm, flags); + mutex_lock(&bcm->mutex); + spin_lock_irqsave(&bcm->irq_lock, flags); if (bcm43xx_status(bcm) != BCM43xx_STAT_INITIALIZED) { fappend("Board not initialized.\n"); goto out; @@ -121,7 +122,8 @@ static ssize_t devinfo_read_file(struct file *file, char __user *userbuf, fappend("\n"); out: - bcm43xx_unlock_irqsafe(bcm, flags); + spin_unlock_irqrestore(&bcm->irq_lock, flags); + mutex_unlock(&bcm->mutex); res = simple_read_from_buffer(userbuf, count, ppos, buf, pos); up(&big_buffer_sem); return res; @@ -159,7 +161,8 @@ static ssize_t spromdump_read_file(struct file *file, char __user *userbuf, unsigned long flags; down(&big_buffer_sem); - bcm43xx_lock_irqsafe(bcm, flags); + mutex_lock(&bcm->mutex); + spin_lock_irqsave(&bcm->irq_lock, flags); if (bcm43xx_status(bcm) != BCM43xx_STAT_INITIALIZED) { fappend("Board not initialized.\n"); goto out; @@ -169,7 +172,8 @@ static ssize_t spromdump_read_file(struct file *file, char __user *userbuf, fappend("boardflags: 0x%04x\n", bcm->sprom.boardflags); out: - bcm43xx_unlock_irqsafe(bcm, flags); + spin_unlock_irqrestore(&bcm->irq_lock, flags); + mutex_unlock(&bcm->mutex); res = simple_read_from_buffer(userbuf, count, ppos, buf, pos); up(&big_buffer_sem); return res; @@ -188,7 +192,8 @@ static ssize_t tsf_read_file(struct file *file, char __user *userbuf, u64 tsf; down(&big_buffer_sem); - bcm43xx_lock_irqsafe(bcm, flags); + mutex_lock(&bcm->mutex); + spin_lock_irqsave(&bcm->irq_lock, flags); if (bcm43xx_status(bcm) != BCM43xx_STAT_INITIALIZED) { fappend("Board not initialized.\n"); goto out; @@ -199,7 +204,8 @@ static ssize_t tsf_read_file(struct file *file, char __user *userbuf, (unsigned int)(tsf & 0xFFFFFFFFULL)); out: - bcm43xx_unlock_irqsafe(bcm, flags); + spin_unlock_irqrestore(&bcm->irq_lock, flags); + mutex_unlock(&bcm->mutex); res = simple_read_from_buffer(userbuf, count, ppos, buf, pos); up(&big_buffer_sem); return res; @@ -221,7 +227,8 @@ static ssize_t tsf_write_file(struct file *file, const char __user *user_buf, res = -EFAULT; goto out_up; } - bcm43xx_lock_irqsafe(bcm, flags); + mutex_lock(&bcm->mutex); + spin_lock_irqsave(&bcm->irq_lock, flags); if (bcm43xx_status(bcm) != BCM43xx_STAT_INITIALIZED) { printk(KERN_INFO PFX "debugfs: Board not initialized.\n"); res = -EFAULT; @@ -237,7 +244,8 @@ static ssize_t tsf_write_file(struct file *file, const char __user *user_buf, res = buf_size; out_unlock: - bcm43xx_unlock_irqsafe(bcm, flags); + spin_unlock_irqrestore(&bcm->irq_lock, flags); + mutex_unlock(&bcm->mutex); out_up: up(&big_buffer_sem); return res; @@ -258,7 +266,8 @@ static ssize_t txstat_read_file(struct file *file, char __user *userbuf, int i, cnt, j = 0; down(&big_buffer_sem); - bcm43xx_lock_irqsafe(bcm, flags); + mutex_lock(&bcm->mutex); + spin_lock_irqsave(&bcm->irq_lock, flags); fappend("Last %d logged xmitstatus blobs (Latest first):\n\n", BCM43xx_NR_LOGGED_XMITSTATUS); @@ -294,14 +303,15 @@ static ssize_t txstat_read_file(struct file *file, char __user *userbuf, i = BCM43xx_NR_LOGGED_XMITSTATUS - 1; } - bcm43xx_unlock_irqsafe(bcm, flags); + spin_unlock_irqrestore(&bcm->irq_lock, flags); res = simple_read_from_buffer(userbuf, count, ppos, buf, pos); - bcm43xx_lock_irqsafe(bcm, flags); + spin_lock_irqsave(&bcm->irq_lock, flags); if (*ppos == pos) { /* Done. Drop the copied data. */ e->xmitstatus_printing = 0; } - bcm43xx_unlock_irqsafe(bcm, flags); + spin_unlock_irqrestore(&bcm->irq_lock, flags); + mutex_unlock(&bcm->mutex); up(&big_buffer_sem); return res; } diff --git a/drivers/net/wireless/bcm43xx/bcm43xx_leds.c b/drivers/net/wireless/bcm43xx/bcm43xx_leds.c index ec80692d638..c3f90c8563d 100644 --- a/drivers/net/wireless/bcm43xx/bcm43xx_leds.c +++ b/drivers/net/wireless/bcm43xx/bcm43xx_leds.c @@ -51,12 +51,12 @@ static void bcm43xx_led_blink(unsigned long d) struct bcm43xx_private *bcm = led->bcm; unsigned long flags; - bcm43xx_lock_irqonly(bcm, flags); + spin_lock_irqsave(&bcm->leds_lock, flags); if (led->blink_interval) { bcm43xx_led_changestate(led); mod_timer(&led->blink_timer, jiffies + led->blink_interval); } - bcm43xx_unlock_irqonly(bcm, flags); + spin_unlock_irqrestore(&bcm->leds_lock, flags); } static void bcm43xx_led_blink_start(struct bcm43xx_led *led, @@ -177,7 +177,9 @@ void bcm43xx_leds_update(struct bcm43xx_private *bcm, int activity) int i, turn_on; unsigned long interval = 0; u16 ledctl; + unsigned long flags; + spin_lock_irqsave(&bcm->leds_lock, flags); ledctl = bcm43xx_read16(bcm, BCM43xx_MMIO_GPIO_CONTROL); for (i = 0; i < BCM43xx_NR_LEDS; i++) { led = &(bcm->leds[i]); @@ -266,6 +268,7 @@ void bcm43xx_leds_update(struct bcm43xx_private *bcm, int activity) ledctl &= ~(1 << i); } bcm43xx_write16(bcm, BCM43xx_MMIO_GPIO_CONTROL, ledctl); + spin_unlock_irqrestore(&bcm->leds_lock, flags); } void bcm43xx_leds_switch_all(struct bcm43xx_private *bcm, int on) @@ -274,7 +277,9 @@ void bcm43xx_leds_switch_all(struct bcm43xx_private *bcm, int on) u16 ledctl; int i; int bit_on; + unsigned long flags; + spin_lock_irqsave(&bcm->leds_lock, flags); ledctl = bcm43xx_read16(bcm, BCM43xx_MMIO_GPIO_CONTROL); for (i = 0; i < BCM43xx_NR_LEDS; i++) { led = &(bcm->leds[i]); @@ -290,4 +295,5 @@ void bcm43xx_leds_switch_all(struct bcm43xx_private *bcm, int on) ledctl &= ~(1 << i); } bcm43xx_write16(bcm, BCM43xx_MMIO_GPIO_CONTROL, ledctl); + spin_unlock_irqrestore(&bcm->leds_lock, flags); } diff --git a/drivers/net/wireless/bcm43xx/bcm43xx_main.c b/drivers/net/wireless/bcm43xx/bcm43xx_main.c index df317c1e12a..ab3a0ee9fac 100644 --- a/drivers/net/wireless/bcm43xx/bcm43xx_main.c +++ b/drivers/net/wireless/bcm43xx/bcm43xx_main.c @@ -514,13 +514,13 @@ static int bcm43xx_disable_interrupts_sync(struct bcm43xx_private *bcm, u32 *old unsigned long flags; u32 old; - bcm43xx_lock_irqonly(bcm, flags); + spin_lock_irqsave(&bcm->irq_lock, flags); if (unlikely(bcm43xx_status(bcm) != BCM43xx_STAT_INITIALIZED)) { - bcm43xx_unlock_irqonly(bcm, flags); + spin_unlock_irqrestore(&bcm->irq_lock, flags); return -EBUSY; } old = bcm43xx_interrupt_disable(bcm, BCM43xx_IRQ_ALL); - bcm43xx_unlock_irqonly(bcm, flags); + spin_unlock_irqrestore(&bcm->irq_lock, flags); bcm43xx_synchronize_irq(bcm); if (oldstate) @@ -1720,7 +1720,7 @@ static void bcm43xx_interrupt_tasklet(struct bcm43xx_private *bcm) # define bcmirq_handled(irq) do { /* nothing */ } while (0) #endif /* CONFIG_BCM43XX_DEBUG*/ - bcm43xx_lock_irqonly(bcm, flags); + spin_lock_irqsave(&bcm->irq_lock, flags); reason = bcm->irq_reason; dma_reason[0] = bcm->dma_reason[0]; dma_reason[1] = bcm->dma_reason[1]; @@ -1746,7 +1746,7 @@ static void bcm43xx_interrupt_tasklet(struct bcm43xx_private *bcm) dma_reason[2], dma_reason[3]); bcm43xx_controller_restart(bcm, "DMA error"); mmiowb(); - bcm43xx_unlock_irqonly(bcm, flags); + spin_unlock_irqrestore(&bcm->irq_lock, flags); return; } if (unlikely((dma_reason[0] & BCM43xx_DMAIRQ_NONFATALMASK) | @@ -1834,7 +1834,7 @@ static void bcm43xx_interrupt_tasklet(struct bcm43xx_private *bcm) bcm43xx_leds_update(bcm, activity); bcm43xx_interrupt_enable(bcm, bcm->irq_savedstate); mmiowb(); - bcm43xx_unlock_irqonly(bcm, flags); + spin_unlock_irqrestore(&bcm->irq_lock, flags); } static void pio_irq_workaround(struct bcm43xx_private *bcm, @@ -3182,25 +3182,26 @@ static void bcm43xx_periodic_work_handler(void *d) /* Periodic work will take a long time, so we want it to * be preemtible. */ - bcm43xx_lock_irqonly(bcm, flags); netif_stop_queue(bcm->net_dev); + spin_lock_irqsave(&bcm->irq_lock, flags); if (bcm43xx_using_pio(bcm)) bcm43xx_pio_freeze_txqueues(bcm); savedirqs = bcm43xx_interrupt_disable(bcm, BCM43xx_IRQ_ALL); - bcm43xx_unlock_irqonly(bcm, flags); - bcm43xx_lock_noirq(bcm); + spin_unlock_irqrestore(&bcm->irq_lock, flags); + mutex_lock(&bcm->mutex); bcm43xx_synchronize_irq(bcm); } else { /* Periodic work should take short time, so we want low * locking overhead. */ - bcm43xx_lock_irqsafe(bcm, flags); + mutex_lock(&bcm->mutex); + spin_lock_irqsave(&bcm->irq_lock, flags); } do_periodic_work(bcm); if (badness > BADNESS_LIMIT) { - bcm43xx_lock_irqonly(bcm, flags); + spin_lock_irqsave(&bcm->irq_lock, flags); if (likely(bcm43xx_status(bcm) == BCM43xx_STAT_INITIALIZED)) { tasklet_enable(&bcm->isr_tasklet); bcm43xx_interrupt_enable(bcm, savedirqs); @@ -3208,13 +3209,10 @@ static void bcm43xx_periodic_work_handler(void *d) bcm43xx_pio_thaw_txqueues(bcm); } netif_wake_queue(bcm->net_dev); - mmiowb(); - bcm43xx_unlock_irqonly(bcm, flags); - bcm43xx_unlock_noirq(bcm); - } else { - mmiowb(); - bcm43xx_unlock_irqsafe(bcm, flags); } + mmiowb(); + spin_unlock_irqrestore(&bcm->irq_lock, flags); + mutex_unlock(&bcm->mutex); } static void bcm43xx_periodic_tasks_delete(struct bcm43xx_private *bcm) @@ -3276,7 +3274,7 @@ static void bcm43xx_free_board(struct bcm43xx_private *bcm) { int i, err; - bcm43xx_lock_noirq(bcm); + mutex_lock(&bcm->mutex); bcm43xx_sysfs_unregister(bcm); bcm43xx_periodic_tasks_delete(bcm); @@ -3297,7 +3295,7 @@ static void bcm43xx_free_board(struct bcm43xx_private *bcm) bcm43xx_pctl_set_crystal(bcm, 0); bcm43xx_set_status(bcm, BCM43xx_STAT_UNINIT); - bcm43xx_unlock_noirq(bcm); + mutex_unlock(&bcm->mutex); } static int bcm43xx_init_board(struct bcm43xx_private *bcm) @@ -3307,7 +3305,7 @@ static int bcm43xx_init_board(struct bcm43xx_private *bcm) might_sleep(); - bcm43xx_lock_noirq(bcm); + mutex_lock(&bcm->mutex); bcm43xx_set_status(bcm, BCM43xx_STAT_INITIALIZING); err = bcm43xx_pctl_set_crystal(bcm, 1); @@ -3389,7 +3387,7 @@ static int bcm43xx_init_board(struct bcm43xx_private *bcm) assert(err == 0); out: - bcm43xx_unlock_noirq(bcm); + mutex_unlock(&bcm->mutex); return err; @@ -3647,7 +3645,8 @@ static void bcm43xx_ieee80211_set_chan(struct net_device *net_dev, struct bcm43xx_radioinfo *radio; unsigned long flags; - bcm43xx_lock_irqsafe(bcm, flags); + mutex_lock(&bcm->mutex); + spin_lock_irqsave(&bcm->irq_lock, flags); if (bcm43xx_status(bcm) == BCM43xx_STAT_INITIALIZED) { bcm43xx_mac_suspend(bcm); bcm43xx_radio_selectchannel(bcm, channel, 0); @@ -3656,7 +3655,8 @@ static void bcm43xx_ieee80211_set_chan(struct net_device *net_dev, radio = bcm43xx_current_radio(bcm); radio->initial_channel = channel; } - bcm43xx_unlock_irqsafe(bcm, flags); + spin_unlock_irqrestore(&bcm->irq_lock, flags); + mutex_unlock(&bcm->mutex); } /* set_security() callback in struct ieee80211_device */ @@ -3670,7 +3670,8 @@ static void bcm43xx_ieee80211_set_security(struct net_device *net_dev, dprintk(KERN_INFO PFX "set security called"); - bcm43xx_lock_irqsafe(bcm, flags); + mutex_lock(&bcm->mutex); + spin_lock_irqsave(&bcm->irq_lock, flags); for (keyidx = 0; keyidx<WEP_KEYS; keyidx++) if (sec->flags & (1<<keyidx)) { @@ -3739,7 +3740,8 @@ static void bcm43xx_ieee80211_set_security(struct net_device *net_dev, } else bcm43xx_clear_keys(bcm); } - bcm43xx_unlock_irqsafe(bcm, flags); + spin_unlock_irqrestore(&bcm->irq_lock, flags); + mutex_unlock(&bcm->mutex); } /* hard_start_xmit() callback in struct ieee80211_device */ @@ -3751,10 +3753,10 @@ static int bcm43xx_ieee80211_hard_start_xmit(struct ieee80211_txb *txb, int err = -ENODEV; unsigned long flags; - bcm43xx_lock_irqonly(bcm, flags); + spin_lock_irqsave(&bcm->irq_lock, flags); if (likely(bcm43xx_status(bcm) == BCM43xx_STAT_INITIALIZED)) err = bcm43xx_tx(bcm, txb); - bcm43xx_unlock_irqonly(bcm, flags); + spin_unlock_irqrestore(&bcm->irq_lock, flags); return err; } @@ -3769,9 +3771,9 @@ static void bcm43xx_net_tx_timeout(struct net_device *net_dev) struct bcm43xx_private *bcm = bcm43xx_priv(net_dev); unsigned long flags; - bcm43xx_lock_irqonly(bcm, flags); + spin_lock_irqsave(&bcm->irq_lock, flags); bcm43xx_controller_restart(bcm, "TX timeout"); - bcm43xx_unlock_irqonly(bcm, flags); + spin_unlock_irqrestore(&bcm->irq_lock, flags); } #ifdef CONFIG_NET_POLL_CONTROLLER @@ -3822,6 +3824,7 @@ static int bcm43xx_init_private(struct bcm43xx_private *bcm, bcm->net_dev = net_dev; bcm->bad_frames_preempt = modparam_bad_frames_preempt; spin_lock_init(&bcm->irq_lock); + spin_lock_init(&bcm->leds_lock); mutex_init(&bcm->mutex); tasklet_init(&bcm->isr_tasklet, (void (*)(unsigned long))bcm43xx_interrupt_tasklet, @@ -4002,16 +4005,13 @@ static int bcm43xx_suspend(struct pci_dev *pdev, pm_message_t state) { struct net_device *net_dev = pci_get_drvdata(pdev); struct bcm43xx_private *bcm = bcm43xx_priv(net_dev); - unsigned long flags; int try_to_shutdown = 0, err; dprintk(KERN_INFO PFX "Suspending...\n"); - bcm43xx_lock_irqsafe(bcm, flags); bcm->was_initialized = (bcm43xx_status(bcm) == BCM43xx_STAT_INITIALIZED); if (bcm->was_initialized) try_to_shutdown = 1; - bcm43xx_unlock_irqsafe(bcm, flags); netif_device_detach(net_dev); if (try_to_shutdown) { diff --git a/drivers/net/wireless/bcm43xx/bcm43xx_phy.c b/drivers/net/wireless/bcm43xx/bcm43xx_phy.c index f8200deecc8..eafd0f66268 100644 --- a/drivers/net/wireless/bcm43xx/bcm43xx_phy.c +++ b/drivers/net/wireless/bcm43xx/bcm43xx_phy.c @@ -81,6 +81,16 @@ static const s8 bcm43xx_tssi2dbm_g_table[] = { static void bcm43xx_phy_initg(struct bcm43xx_private *bcm); +static inline +void bcm43xx_voluntary_preempt(void) +{ + assert(!in_atomic() && !in_irq() && + !in_interrupt() && !irqs_disabled()); +#ifndef CONFIG_PREEMPT + cond_resched(); +#endif /* CONFIG_PREEMPT */ +} + void bcm43xx_raw_phy_lock(struct bcm43xx_private *bcm) { struct bcm43xx_phyinfo *phy = bcm43xx_current_phy(bcm); @@ -133,22 +143,14 @@ void bcm43xx_phy_write(struct bcm43xx_private *bcm, u16 offset, u16 val) void bcm43xx_phy_calibrate(struct bcm43xx_private *bcm) { struct bcm43xx_phyinfo *phy = bcm43xx_current_phy(bcm); - unsigned long flags; bcm43xx_read32(bcm, BCM43xx_MMIO_STATUS_BITFIELD); /* Dummy read. */ if (phy->calibrated) return; if (phy->type == BCM43xx_PHYTYPE_G && phy->rev == 1) { - /* We do not want to be preempted while calibrating - * the hardware. - */ - local_irq_save(flags); - bcm43xx_wireless_core_reset(bcm, 0); bcm43xx_phy_initg(bcm); bcm43xx_wireless_core_reset(bcm, 1); - - local_irq_restore(flags); } phy->calibrated = 1; } @@ -1299,7 +1301,9 @@ static u16 bcm43xx_phy_lo_b_r15_loop(struct bcm43xx_private *bcm) { int i; u16 ret = 0; + unsigned long flags; + local_irq_save(flags); for (i = 0; i < 10; i++){ bcm43xx_phy_write(bcm, 0x0015, 0xAFA0); udelay(1); @@ -1309,6 +1313,8 @@ static u16 bcm43xx_phy_lo_b_r15_loop(struct bcm43xx_private *bcm) udelay(40); ret += bcm43xx_phy_read(bcm, 0x002C); } + local_irq_restore(flags); + bcm43xx_voluntary_preempt(); return ret; } @@ -1435,6 +1441,7 @@ u16 bcm43xx_phy_lo_g_deviation_subval(struct bcm43xx_private *bcm, u16 control) } ret = bcm43xx_phy_read(bcm, 0x002D); local_irq_restore(flags); + bcm43xx_voluntary_preempt(); return ret; } @@ -1760,6 +1767,7 @@ void bcm43xx_phy_lo_g_measure(struct bcm43xx_private *bcm) bcm43xx_radio_write16(bcm, 0x43, i); bcm43xx_radio_write16(bcm, 0x52, radio->txctl2); udelay(10); + bcm43xx_voluntary_preempt(); bcm43xx_phy_set_baseband_attenuation(bcm, j * 2); @@ -1803,6 +1811,7 @@ void bcm43xx_phy_lo_g_measure(struct bcm43xx_private *bcm) radio->txctl2 | (3/*txctl1*/ << 4));//FIXME: shouldn't txctl1 be zero here and 3 in the loop above? udelay(10); + bcm43xx_voluntary_preempt(); bcm43xx_phy_set_baseband_attenuation(bcm, j * 2); @@ -1824,6 +1833,7 @@ void bcm43xx_phy_lo_g_measure(struct bcm43xx_private *bcm) bcm43xx_phy_write(bcm, 0x0812, (r27 << 8) | 0xA2); udelay(2); bcm43xx_phy_write(bcm, 0x0812, (r27 << 8) | 0xA3); + bcm43xx_voluntary_preempt(); } else bcm43xx_phy_write(bcm, 0x0015, r27 | 0xEFA0); bcm43xx_phy_lo_adjust(bcm, is_initializing); @@ -2188,12 +2198,6 @@ int bcm43xx_phy_init(struct bcm43xx_private *bcm) { struct bcm43xx_phyinfo *phy = bcm43xx_current_phy(bcm); int err = -ENODEV; - unsigned long flags; - - /* We do not want to be preempted while calibrating - * the hardware. - */ - local_irq_save(flags); switch (phy->type) { case BCM43xx_PHYTYPE_A: @@ -2227,7 +2231,6 @@ int bcm43xx_phy_init(struct bcm43xx_private *bcm) err = 0; break; } - local_irq_restore(flags); if (err) printk(KERN_WARNING PFX "Unknown PHYTYPE found!\n"); diff --git a/drivers/net/wireless/bcm43xx/bcm43xx_pio.c b/drivers/net/wireless/bcm43xx/bcm43xx_pio.c index 574085c4615..c60c1743ea0 100644 --- a/drivers/net/wireless/bcm43xx/bcm43xx_pio.c +++ b/drivers/net/wireless/bcm43xx/bcm43xx_pio.c @@ -262,7 +262,7 @@ static void tx_tasklet(unsigned long d) int err; u16 txctl; - bcm43xx_lock_irqonly(bcm, flags); + spin_lock_irqsave(&bcm->irq_lock, flags); if (queue->tx_frozen) goto out_unlock; @@ -300,7 +300,7 @@ static void tx_tasklet(unsigned long d) continue; } out_unlock: - bcm43xx_unlock_irqonly(bcm, flags); + spin_unlock_irqrestore(&bcm->irq_lock, flags); } static void setup_txqueues(struct bcm43xx_pioqueue *queue) diff --git a/drivers/net/wireless/bcm43xx/bcm43xx_sysfs.c b/drivers/net/wireless/bcm43xx/bcm43xx_sysfs.c index 6a23bdc7541..cc1ff3c6f14 100644 --- a/drivers/net/wireless/bcm43xx/bcm43xx_sysfs.c +++ b/drivers/net/wireless/bcm43xx/bcm43xx_sysfs.c @@ -120,12 +120,14 @@ static ssize_t bcm43xx_attr_sprom_show(struct device *dev, GFP_KERNEL); if (!sprom) return -ENOMEM; - bcm43xx_lock_irqsafe(bcm, flags); + mutex_lock(&bcm->mutex); + spin_lock_irqsave(&bcm->irq_lock, flags); err = bcm43xx_sprom_read(bcm, sprom); if (!err) err = sprom2hex(sprom, buf, PAGE_SIZE); mmiowb(); - bcm43xx_unlock_irqsafe(bcm, flags); + spin_unlock_irqrestore(&bcm->irq_lock, flags); + mutex_unlock(&bcm->mutex); kfree(sprom); return err; @@ -150,10 +152,14 @@ static ssize_t bcm43xx_attr_sprom_store(struct device *dev, err = hex2sprom(sprom, buf, count); if (err) goto out_kfree; - bcm43xx_lock_irqsafe(bcm, flags); + mutex_lock(&bcm->mutex); + spin_lock_irqsave(&bcm->irq_lock, flags); + spin_lock(&bcm->leds_lock); err = bcm43xx_sprom_write(bcm, sprom); mmiowb(); - bcm43xx_unlock_irqsafe(bcm, flags); + spin_unlock(&bcm->leds_lock); + spin_unlock_irqrestore(&bcm->irq_lock, flags); + mutex_unlock(&bcm->mutex); out_kfree: kfree(sprom); @@ -176,7 +182,7 @@ static ssize_t bcm43xx_attr_interfmode_show(struct device *dev, if (!capable(CAP_NET_ADMIN)) return -EPERM; - bcm43xx_lock_noirq(bcm); + mutex_lock(&bcm->mutex); switch (bcm43xx_current_radio(bcm)->interfmode) { case BCM43xx_RADIO_INTERFMODE_NONE: @@ -193,7 +199,7 @@ static ssize_t bcm43xx_attr_interfmode_show(struct device *dev, } err = 0; - bcm43xx_unlock_noirq(bcm); + mutex_unlock(&bcm->mutex); return err ? err : count; @@ -229,7 +235,8 @@ static ssize_t bcm43xx_attr_interfmode_store(struct device *dev, return -EINVAL; } - bcm43xx_lock_irqsafe(bcm, flags); + mutex_lock(&bcm->mutex); + spin_lock_irqsave(&bcm->irq_lock, flags); err = bcm43xx_radio_set_interference_mitigation(bcm, mode); if (err) { @@ -237,7 +244,8 @@ static ssize_t bcm43xx_attr_interfmode_store(struct device *dev, "supported by device\n"); } mmiowb(); - bcm43xx_unlock_irqsafe(bcm, flags); + spin_unlock_irqrestore(&bcm->irq_lock, flags); + mutex_unlock(&bcm->mutex); return err ? err : count; } @@ -257,7 +265,7 @@ static ssize_t bcm43xx_attr_preamble_show(struct device *dev, if (!capable(CAP_NET_ADMIN)) return -EPERM; - bcm43xx_lock_noirq(bcm); + mutex_lock(&bcm->mutex); if (bcm->short_preamble) count = snprintf(buf, PAGE_SIZE, "1 (Short Preamble enabled)\n"); @@ -265,7 +273,7 @@ static ssize_t bcm43xx_attr_preamble_show(struct device *dev, count = snprintf(buf, PAGE_SIZE, "0 (Short Preamble disabled)\n"); err = 0; - bcm43xx_unlock_noirq(bcm); + mutex_unlock(&bcm->mutex); return err ? err : count; } @@ -285,12 +293,14 @@ static ssize_t bcm43xx_attr_preamble_store(struct device *dev, value = get_boolean(buf, count); if (value < 0) return value; - bcm43xx_lock_irqsafe(bcm, flags); + mutex_lock(&bcm->mutex); + spin_lock_irqsave(&bcm->irq_lock, flags); bcm->short_preamble = !!value; err = 0; - bcm43xx_unlock_irqsafe(bcm, flags); + spin_unlock_irqrestore(&bcm->irq_lock, flags); + mutex_unlock(&bcm->mutex); return err ? err : count; } diff --git a/drivers/net/wireless/bcm43xx/bcm43xx_wx.c b/drivers/net/wireless/bcm43xx/bcm43xx_wx.c index 5c36e29efff..8ffd760dc83 100644 --- a/drivers/net/wireless/bcm43xx/bcm43xx_wx.c +++ b/drivers/net/wireless/bcm43xx/bcm43xx_wx.c @@ -47,6 +47,9 @@ #define BCM43xx_WX_VERSION 18 #define MAX_WX_STRING 80 +/* FIXME: the next line is a guess as to what the maximum value of RX power + (in dBm) might be */ +#define RX_POWER_MAX -10 static int bcm43xx_wx_get_name(struct net_device *net_dev, @@ -56,12 +59,11 @@ static int bcm43xx_wx_get_name(struct net_device *net_dev, { struct bcm43xx_private *bcm = bcm43xx_priv(net_dev); int i; - unsigned long flags; struct bcm43xx_phyinfo *phy; char suffix[7] = { 0 }; int have_a = 0, have_b = 0, have_g = 0; - bcm43xx_lock_irqsafe(bcm, flags); + mutex_lock(&bcm->mutex); for (i = 0; i < bcm->nr_80211_available; i++) { phy = &(bcm->core_80211_ext[i].phy); switch (phy->type) { @@ -77,7 +79,7 @@ static int bcm43xx_wx_get_name(struct net_device *net_dev, assert(0); } } - bcm43xx_unlock_irqsafe(bcm, flags); + mutex_unlock(&bcm->mutex); i = 0; if (have_a) { @@ -111,7 +113,9 @@ static int bcm43xx_wx_set_channelfreq(struct net_device *net_dev, int freq; int err = -EINVAL; - bcm43xx_lock_irqsafe(bcm, flags); + mutex_lock(&bcm->mutex); + spin_lock_irqsave(&bcm->irq_lock, flags); + if ((data->freq.m >= 0) && (data->freq.m <= 1000)) { channel = data->freq.m; freq = bcm43xx_channel_to_freq(bcm, channel); @@ -131,7 +135,8 @@ static int bcm43xx_wx_set_channelfreq(struct net_device *net_dev, err = 0; } out_unlock: - bcm43xx_unlock_irqsafe(bcm, flags); + spin_unlock_irqrestore(&bcm->irq_lock, flags); + mutex_unlock(&bcm->mutex); return err; } @@ -143,11 +148,10 @@ static int bcm43xx_wx_get_channelfreq(struct net_device *net_dev, { struct bcm43xx_private *bcm = bcm43xx_priv(net_dev); struct bcm43xx_radioinfo *radio; - unsigned long flags; int err = -ENODEV; u16 channel; - bcm43xx_lock_irqsafe(bcm, flags); + mutex_lock(&bcm->mutex); radio = bcm43xx_current_radio(bcm); channel = radio->channel; if (channel == 0xFF) { @@ -162,7 +166,7 @@ static int bcm43xx_wx_get_channelfreq(struct net_device *net_dev, err = 0; out_unlock: - bcm43xx_unlock_irqsafe(bcm, flags); + mutex_unlock(&bcm->mutex); return err; } @@ -180,13 +184,15 @@ static int bcm43xx_wx_set_mode(struct net_device *net_dev, if (mode == IW_MODE_AUTO) mode = BCM43xx_INITIAL_IWMODE; - bcm43xx_lock_irqsafe(bcm, flags); + mutex_lock(&bcm->mutex); + spin_lock_irqsave(&bcm->irq_lock, flags); if (bcm43xx_status(bcm) == BCM43xx_STAT_INITIALIZED) { if (bcm->ieee->iw_mode != mode) bcm43xx_set_iwmode(bcm, mode); } else bcm->ieee->iw_mode = mode; - bcm43xx_unlock_irqsafe(bcm, flags); + spin_unlock_irqrestore(&bcm->irq_lock, flags); + mutex_unlock(&bcm->mutex); return 0; } @@ -197,11 +203,10 @@ static int bcm43xx_wx_get_mode(struct net_device *net_dev, char *extra) { struct bcm43xx_private *bcm = bcm43xx_priv(net_dev); - unsigned long flags; - bcm43xx_lock_irqsafe(bcm, flags); + mutex_lock(&bcm->mutex); data->mode = bcm->ieee->iw_mode; - bcm43xx_unlock_irqsafe(bcm, flags); + mutex_unlock(&bcm->mutex); return 0; } @@ -214,7 +219,6 @@ static int bcm43xx_wx_get_rangeparams(struct net_device *net_dev, struct bcm43xx_private *bcm = bcm43xx_priv(net_dev); struct iw_range *range = (struct iw_range *)extra; const struct ieee80211_geo *geo; - unsigned long flags; int i, j; struct bcm43xx_phyinfo *phy; @@ -227,14 +231,14 @@ static int bcm43xx_wx_get_rangeparams(struct net_device *net_dev, range->max_qual.qual = 100; /* TODO: Real max RSSI */ - range->max_qual.level = 3; - range->max_qual.noise = 100; - range->max_qual.updated = 7; + range->max_qual.level = 0; + range->max_qual.noise = 0; + range->max_qual.updated = IW_QUAL_ALL_UPDATED; - range->avg_qual.qual = 70; - range->avg_qual.level = 2; - range->avg_qual.noise = 40; - range->avg_qual.updated = 7; + range->avg_qual.qual = 50; + range->avg_qual.level = 0; + range->avg_qual.noise = 0; + range->avg_qual.updated = IW_QUAL_ALL_UPDATED; range->min_rts = BCM43xx_MIN_RTS_THRESHOLD; range->max_rts = BCM43xx_MAX_RTS_THRESHOLD; @@ -254,7 +258,7 @@ static int bcm43xx_wx_get_rangeparams(struct net_device *net_dev, IW_ENC_CAPA_CIPHER_TKIP | IW_ENC_CAPA_CIPHER_CCMP; - bcm43xx_lock_irqsafe(bcm, flags); + mutex_lock(&bcm->mutex); phy = bcm43xx_current_phy(bcm); range->num_bitrates = 0; @@ -301,7 +305,7 @@ static int bcm43xx_wx_get_rangeparams(struct net_device *net_dev, } range->num_frequency = j; - bcm43xx_unlock_irqsafe(bcm, flags); + mutex_unlock(&bcm->mutex); return 0; } @@ -314,11 +318,11 @@ static int bcm43xx_wx_set_nick(struct net_device *net_dev, struct bcm43xx_private *bcm = bcm43xx_priv(net_dev); size_t len; - bcm43xx_lock_noirq(bcm); + mutex_lock(&bcm->mutex); len = min((size_t)data->data.length, (size_t)IW_ESSID_MAX_SIZE); memcpy(bcm->nick, extra, len); bcm->nick[len] = '\0'; - bcm43xx_unlock_noirq(bcm); + mutex_unlock(&bcm->mutex); return 0; } @@ -331,12 +335,12 @@ static int bcm43xx_wx_get_nick(struct net_device *net_dev, struct bcm43xx_private *bcm = bcm43xx_priv(net_dev); size_t len; - bcm43xx_lock_noirq(bcm); + mutex_lock(&bcm->mutex); len = strlen(bcm->nick) + 1; memcpy(extra, bcm->nick, len); data->data.length = (__u16)len; data->data.flags = 1; - bcm43xx_unlock_noirq(bcm); + mutex_unlock(&bcm->mutex); return 0; } @@ -350,7 +354,8 @@ static int bcm43xx_wx_set_rts(struct net_device *net_dev, unsigned long flags; int err = -EINVAL; - bcm43xx_lock_irqsafe(bcm, flags); + mutex_lock(&bcm->mutex); + spin_lock_irqsave(&bcm->irq_lock, flags); if (data->rts.disabled) { bcm->rts_threshold = BCM43xx_MAX_RTS_THRESHOLD; err = 0; @@ -361,7 +366,8 @@ static int bcm43xx_wx_set_rts(struct net_device *net_dev, err = 0; } } - bcm43xx_unlock_irqsafe(bcm, flags); + spin_unlock_irqrestore(&bcm->irq_lock, flags); + mutex_unlock(&bcm->mutex); return err; } @@ -372,13 +378,12 @@ static int bcm43xx_wx_get_rts(struct net_device *net_dev, char *extra) { struct bcm43xx_private *bcm = bcm43xx_priv(net_dev); - unsigned long flags; - bcm43xx_lock_irqsafe(bcm, flags); + mutex_lock(&bcm->mutex); data->rts.value = bcm->rts_threshold; data->rts.fixed = 0; data->rts.disabled = (bcm->rts_threshold == BCM43xx_MAX_RTS_THRESHOLD); - bcm43xx_unlock_irqsafe(bcm, flags); + mutex_unlock(&bcm->mutex); return 0; } @@ -392,7 +397,8 @@ static int bcm43xx_wx_set_frag(struct net_device *net_dev, unsigned long flags; int err = -EINVAL; - bcm43xx_lock_irqsafe(bcm, flags); + mutex_lock(&bcm->mutex); + spin_lock_irqsave(&bcm->irq_lock, flags); if (data->frag.disabled) { bcm->ieee->fts = MAX_FRAG_THRESHOLD; err = 0; @@ -403,7 +409,8 @@ static int bcm43xx_wx_set_frag(struct net_device *net_dev, err = 0; } } - bcm43xx_unlock_irqsafe(bcm, flags); + spin_unlock_irqrestore(&bcm->irq_lock, flags); + mutex_unlock(&bcm->mutex); return err; } @@ -414,13 +421,12 @@ static int bcm43xx_wx_get_frag(struct net_device *net_dev, char *extra) { struct bcm43xx_private *bcm = bcm43xx_priv(net_dev); - unsigned long flags; - bcm43xx_lock_irqsafe(bcm, flags); + mutex_lock(&bcm->mutex); data->frag.value = bcm->ieee->fts; data->frag.fixed = 0; data->frag.disabled = (bcm->ieee->fts == MAX_FRAG_THRESHOLD); - bcm43xx_unlock_irqsafe(bcm, flags); + mutex_unlock(&bcm->mutex); return 0; } @@ -442,7 +448,8 @@ static int bcm43xx_wx_set_xmitpower(struct net_device *net_dev, return -EOPNOTSUPP; } - bcm43xx_lock_irqsafe(bcm, flags); + mutex_lock(&bcm->mutex); + spin_lock_irqsave(&bcm->irq_lock, flags); if (bcm43xx_status(bcm) != BCM43xx_STAT_INITIALIZED) goto out_unlock; radio = bcm43xx_current_radio(bcm); @@ -466,7 +473,8 @@ static int bcm43xx_wx_set_xmitpower(struct net_device *net_dev, err = 0; out_unlock: - bcm43xx_unlock_irqsafe(bcm, flags); + spin_unlock_irqrestore(&bcm->irq_lock, flags); + mutex_unlock(&bcm->mutex); return err; } @@ -478,10 +486,9 @@ static int bcm43xx_wx_get_xmitpower(struct net_device *net_dev, { struct bcm43xx_private *bcm = bcm43xx_priv(net_dev); struct bcm43xx_radioinfo *radio; - unsigned long flags; int err = -ENODEV; - bcm43xx_lock_irqsafe(bcm, flags); + mutex_lock(&bcm->mutex); if (bcm43xx_status(bcm) != BCM43xx_STAT_INITIALIZED) goto out_unlock; radio = bcm43xx_current_radio(bcm); @@ -493,7 +500,7 @@ static int bcm43xx_wx_get_xmitpower(struct net_device *net_dev, err = 0; out_unlock: - bcm43xx_unlock_irqsafe(bcm, flags); + mutex_unlock(&bcm->mutex); return err; } @@ -580,7 +587,8 @@ static int bcm43xx_wx_set_interfmode(struct net_device *net_dev, return -EINVAL; } - bcm43xx_lock_irqsafe(bcm, flags); + mutex_lock(&bcm->mutex); + spin_lock_irqsave(&bcm->irq_lock, flags); if (bcm43xx_status(bcm) == BCM43xx_STAT_INITIALIZED) { err = bcm43xx_radio_set_interference_mitigation(bcm, mode); if (err) { @@ -595,7 +603,8 @@ static int bcm43xx_wx_set_interfmode(struct net_device *net_dev, } else bcm43xx_current_radio(bcm)->interfmode = mode; } - bcm43xx_unlock_irqsafe(bcm, flags); + spin_unlock_irqrestore(&bcm->irq_lock, flags); + mutex_unlock(&bcm->mutex); return err; } @@ -606,12 +615,11 @@ static int bcm43xx_wx_get_interfmode(struct net_device *net_dev, char *extra) { struct bcm43xx_private *bcm = bcm43xx_priv(net_dev); - unsigned long flags; int mode; - bcm43xx_lock_irqsafe(bcm, flags); + mutex_lock(&bcm->mutex); mode = bcm43xx_current_radio(bcm)->interfmode; - bcm43xx_unlock_irqsafe(bcm, flags); + mutex_unlock(&bcm->mutex); switch (mode) { case BCM43xx_RADIO_INTERFMODE_NONE: @@ -641,9 +649,11 @@ static int bcm43xx_wx_set_shortpreamble(struct net_device *net_dev, int on; on = *((int *)extra); - bcm43xx_lock_irqsafe(bcm, flags); + mutex_lock(&bcm->mutex); + spin_lock_irqsave(&bcm->irq_lock, flags); bcm->short_preamble = !!on; - bcm43xx_unlock_irqsafe(bcm, flags); + spin_unlock_irqrestore(&bcm->irq_lock, flags); + mutex_unlock(&bcm->mutex); return 0; } @@ -654,12 +664,11 @@ static int bcm43xx_wx_get_shortpreamble(struct net_device *net_dev, char *extra) { struct bcm43xx_private *bcm = bcm43xx_priv(net_dev); - unsigned long flags; int on; - bcm43xx_lock_irqsafe(bcm, flags); + mutex_lock(&bcm->mutex); on = bcm->short_preamble; - bcm43xx_unlock_irqsafe(bcm, flags); + mutex_unlock(&bcm->mutex); if (on) strncpy(extra, "1 (Short Preamble enabled)", MAX_WX_STRING); @@ -681,11 +690,13 @@ static int bcm43xx_wx_set_swencryption(struct net_device *net_dev, on = *((int *)extra); - bcm43xx_lock_irqsafe(bcm, flags); + mutex_lock(&bcm->mutex); + spin_lock_irqsave(&bcm->irq_lock, flags); bcm->ieee->host_encrypt = !!on; bcm->ieee->host_decrypt = !!on; bcm->ieee->host_build_iv = !on; - bcm43xx_unlock_irqsafe(bcm, flags); + spin_unlock_irqrestore(&bcm->irq_lock, flags); + mutex_unlock(&bcm->mutex); return 0; } @@ -696,12 +707,11 @@ static int bcm43xx_wx_get_swencryption(struct net_device *net_dev, char *extra) { struct bcm43xx_private *bcm = bcm43xx_priv(net_dev); - unsigned long flags; int on; - bcm43xx_lock_irqsafe(bcm, flags); + mutex_lock(&bcm->mutex); on = bcm->ieee->host_encrypt; - bcm43xx_unlock_irqsafe(bcm, flags); + mutex_unlock(&bcm->mutex); if (on) strncpy(extra, "1 (SW encryption enabled) ", MAX_WX_STRING); @@ -764,11 +774,13 @@ static int bcm43xx_wx_sprom_read(struct net_device *net_dev, if (!sprom) goto out; - bcm43xx_lock_irqsafe(bcm, flags); + mutex_lock(&bcm->mutex); + spin_lock_irqsave(&bcm->irq_lock, flags); err = -ENODEV; if (bcm43xx_status(bcm) == BCM43xx_STAT_INITIALIZED) err = bcm43xx_sprom_read(bcm, sprom); - bcm43xx_unlock_irqsafe(bcm, flags); + spin_unlock_irqrestore(&bcm->irq_lock, flags); + mutex_unlock(&bcm->mutex); if (!err) data->data.length = sprom2hex(sprom, extra); kfree(sprom); @@ -809,11 +821,15 @@ static int bcm43xx_wx_sprom_write(struct net_device *net_dev, if (err) goto out_kfree; - bcm43xx_lock_irqsafe(bcm, flags); + mutex_lock(&bcm->mutex); + spin_lock_irqsave(&bcm->irq_lock, flags); + spin_lock(&bcm->leds_lock); err = -ENODEV; if (bcm43xx_status(bcm) == BCM43xx_STAT_INITIALIZED) err = bcm43xx_sprom_write(bcm, sprom); - bcm43xx_unlock_irqsafe(bcm, flags); + spin_unlock(&bcm->leds_lock); + spin_unlock_irqrestore(&bcm->irq_lock, flags); + mutex_unlock(&bcm->mutex); out_kfree: kfree(sprom); out: @@ -827,6 +843,9 @@ static struct iw_statistics *bcm43xx_get_wireless_stats(struct net_device *net_d struct bcm43xx_private *bcm = bcm43xx_priv(net_dev); struct ieee80211softmac_device *mac = ieee80211_priv(net_dev); struct iw_statistics *wstats; + struct ieee80211_network *network = NULL; + static int tmp_level = 0; + unsigned long flags; wstats = &bcm->stats.wstats; if (!mac->associated) { @@ -844,16 +863,25 @@ static struct iw_statistics *bcm43xx_get_wireless_stats(struct net_device *net_d wstats->qual.level = 0; wstats->qual.noise = 0; wstats->qual.updated = 7; - wstats->qual.updated |= IW_QUAL_NOISE_INVALID | - IW_QUAL_QUAL_INVALID | IW_QUAL_LEVEL_INVALID; + wstats->qual.updated |= IW_QUAL_ALL_UPDATED; return wstats; } /* fill in the real statistics when iface associated */ - wstats->qual.qual = 100; // TODO: get the real signal quality - wstats->qual.level = 3 - bcm->stats.link_quality; + spin_lock_irqsave(&mac->ieee->lock, flags); + list_for_each_entry(network, &mac->ieee->network_list, list) { + if (!memcmp(mac->associnfo.bssid, network->bssid, ETH_ALEN)) { + if (!tmp_level) /* get initial value */ + tmp_level = network->stats.rssi; + else /* smooth results */ + tmp_level = (7 * tmp_level + network->stats.rssi)/8; + break; + } + } + spin_unlock_irqrestore(&mac->ieee->lock, flags); + wstats->qual.level = tmp_level; + wstats->qual.qual = 100 + tmp_level - RX_POWER_MAX; // TODO: get the real signal quality wstats->qual.noise = bcm->stats.noise; - wstats->qual.updated = IW_QUAL_QUAL_UPDATED | IW_QUAL_LEVEL_UPDATED | - IW_QUAL_NOISE_UPDATED; + wstats->qual.updated = IW_QUAL_ALL_UPDATED; wstats->discard.code = bcm->ieee->ieee_stats.rx_discards_undecryptable; wstats->discard.retries = bcm->ieee->ieee_stats.tx_retry_limit_exceeded; wstats->discard.nwid = bcm->ieee->ieee_stats.tx_discards_wrong_sa; diff --git a/drivers/net/wireless/ipw2200.c b/drivers/net/wireless/ipw2200.c index b3300ffe4ee..758459e72f3 100644 --- a/drivers/net/wireless/ipw2200.c +++ b/drivers/net/wireless/ipw2200.c @@ -2667,7 +2667,7 @@ static void ipw_fw_dma_abort(struct ipw_priv *priv) IPW_DEBUG_FW(">> :\n"); - //set the Stop and Abort bit + /* set the Stop and Abort bit */ control = DMA_CONTROL_SMALL_CB_CONST_VALUE | DMA_CB_STOP_AND_ABORT; ipw_write_reg32(priv, IPW_DMA_I_DMA_CONTROL, control); priv->sram_desc.last_cb_index = 0; @@ -3002,8 +3002,6 @@ static int ipw_load_ucode(struct ipw_priv *priv, u8 * data, size_t len) if (rc < 0) return rc; -// spin_lock_irqsave(&priv->lock, flags); - for (addr = IPW_SHARED_LOWER_BOUND; addr < IPW_REGISTER_DOMAIN1_END; addr += 4) { ipw_write32(priv, addr, 0); @@ -3097,8 +3095,6 @@ static int ipw_load_ucode(struct ipw_priv *priv, u8 * data, size_t len) firmware have problem getting alive resp. */ ipw_write_reg8(priv, IPW_BASEBAND_CONTROL_STATUS, 0); -// spin_unlock_irqrestore(&priv->lock, flags); - return rc; } @@ -6387,13 +6383,6 @@ static int ipw_wx_set_genie(struct net_device *dev, (wrqu->data.length && extra == NULL)) return -EINVAL; - //mutex_lock(&priv->mutex); - - //if (!ieee->wpa_enabled) { - // err = -EOPNOTSUPP; - // goto out; - //} - if (wrqu->data.length) { buf = kmalloc(wrqu->data.length, GFP_KERNEL); if (buf == NULL) { @@ -6413,7 +6402,6 @@ static int ipw_wx_set_genie(struct net_device *dev, ipw_wpa_assoc_frame(priv, ieee->wpa_ie, ieee->wpa_ie_len); out: - //mutex_unlock(&priv->mutex); return err; } @@ -6426,13 +6414,6 @@ static int ipw_wx_get_genie(struct net_device *dev, struct ieee80211_device *ieee = priv->ieee; int err = 0; - //mutex_lock(&priv->mutex); - - //if (!ieee->wpa_enabled) { - // err = -EOPNOTSUPP; - // goto out; - //} - if (ieee->wpa_ie_len == 0 || ieee->wpa_ie == NULL) { wrqu->data.length = 0; goto out; @@ -6447,7 +6428,6 @@ static int ipw_wx_get_genie(struct net_device *dev, memcpy(extra, ieee->wpa_ie, ieee->wpa_ie_len); out: - //mutex_unlock(&priv->mutex); return err; } @@ -6558,7 +6538,6 @@ static int ipw_wx_set_auth(struct net_device *dev, ieee->ieee802_1x = param->value; break; - //case IW_AUTH_ROAMING_CONTROL: case IW_AUTH_PRIVACY_INVOKED: ieee->privacy_invoked = param->value; break; @@ -6680,7 +6659,7 @@ static int ipw_wx_set_mlme(struct net_device *dev, switch (mlme->cmd) { case IW_MLME_DEAUTH: - // silently ignore + /* silently ignore */ break; case IW_MLME_DISASSOC: @@ -9766,7 +9745,7 @@ static int ipw_wx_set_monitor(struct net_device *dev, return 0; } -#endif // CONFIG_IPW2200_MONITOR +#endif /* CONFIG_IPW2200_MONITOR */ static int ipw_wx_reset(struct net_device *dev, struct iw_request_info *info, @@ -10009,7 +9988,7 @@ static void init_sys_config(struct ipw_sys_config *sys_config) sys_config->dot11g_auto_detection = 0; sys_config->enable_cts_to_self = 0; sys_config->bt_coexist_collision_thr = 0; - sys_config->pass_noise_stats_to_host = 1; //1 -- fix for 256 + sys_config->pass_noise_stats_to_host = 1; /* 1 -- fix for 256 */ sys_config->silence_threshold = 0x1e; } diff --git a/drivers/net/wireless/prism54/isl_ioctl.c b/drivers/net/wireless/prism54/isl_ioctl.c index 989599ad33e..0c30fe7e8f7 100644 --- a/drivers/net/wireless/prism54/isl_ioctl.c +++ b/drivers/net/wireless/prism54/isl_ioctl.c @@ -35,10 +35,14 @@ #include <net/iw_handler.h> /* New driver API */ +#define KEY_SIZE_WEP104 13 /* 104/128-bit WEP keys */ +#define KEY_SIZE_WEP40 5 /* 40/64-bit WEP keys */ +/* KEY_SIZE_TKIP should match isl_oid.h, struct obj_key.key[] size */ +#define KEY_SIZE_TKIP 32 /* TKIP keys */ -static void prism54_wpa_ie_add(islpci_private *priv, u8 *bssid, +static void prism54_wpa_bss_ie_add(islpci_private *priv, u8 *bssid, u8 *wpa_ie, size_t wpa_ie_len); -static size_t prism54_wpa_ie_get(islpci_private *priv, u8 *bssid, u8 *wpa_ie); +static size_t prism54_wpa_bss_ie_get(islpci_private *priv, u8 *bssid, u8 *wpa_ie); static int prism54_set_wpa(struct net_device *, struct iw_request_info *, __u32 *, char *); @@ -468,6 +472,9 @@ prism54_get_range(struct net_device *ndev, struct iw_request_info *info, range->event_capa[1] = IW_EVENT_CAPA_K_1; range->event_capa[4] = IW_EVENT_CAPA_MASK(IWEVCUSTOM); + range->enc_capa = IW_ENC_CAPA_WPA | IW_ENC_CAPA_WPA2 | + IW_ENC_CAPA_CIPHER_TKIP; + if (islpci_get_state(priv) < PRV_STATE_INIT) return 0; @@ -567,6 +574,8 @@ prism54_translate_bss(struct net_device *ndev, char *current_ev, struct iw_event iwe; /* Temporary buffer */ short cap; islpci_private *priv = netdev_priv(ndev); + u8 wpa_ie[MAX_WPA_IE_LEN]; + size_t wpa_ie_len; /* The first entry must be the MAC address */ memcpy(iwe.u.ap_addr.sa_data, bss->address, 6); @@ -627,27 +636,13 @@ prism54_translate_bss(struct net_device *ndev, char *current_ev, current_ev = iwe_stream_add_event(current_ev, end_buf, &iwe, IW_EV_QUAL_LEN); - if (priv->wpa) { - u8 wpa_ie[MAX_WPA_IE_LEN]; - char *buf, *p; - size_t wpa_ie_len; - int i; - - wpa_ie_len = prism54_wpa_ie_get(priv, bss->address, wpa_ie); - if (wpa_ie_len > 0 && - (buf = kmalloc(wpa_ie_len * 2 + 10, GFP_ATOMIC))) { - p = buf; - p += sprintf(p, "wpa_ie="); - for (i = 0; i < wpa_ie_len; i++) { - p += sprintf(p, "%02x", wpa_ie[i]); - } - memset(&iwe, 0, sizeof (iwe)); - iwe.cmd = IWEVCUSTOM; - iwe.u.data.length = strlen(buf); - current_ev = iwe_stream_add_point(current_ev, end_buf, - &iwe, buf); - kfree(buf); - } + /* Add WPA/RSN Information Element, if any */ + wpa_ie_len = prism54_wpa_bss_ie_get(priv, bss->address, wpa_ie); + if (wpa_ie_len > 0) { + iwe.cmd = IWEVGENIE; + iwe.u.data.length = min(wpa_ie_len, (size_t)MAX_WPA_IE_LEN); + current_ev = iwe_stream_add_point(current_ev, end_buf, + &iwe, wpa_ie); } return current_ev; } @@ -1051,12 +1046,24 @@ prism54_set_encode(struct net_device *ndev, struct iw_request_info *info, current_index = r.u; /* Verify that the key is not marked as invalid */ if (!(dwrq->flags & IW_ENCODE_NOKEY)) { - key.length = dwrq->length > sizeof (key.key) ? - sizeof (key.key) : dwrq->length; - memcpy(key.key, extra, key.length); - if (key.length == 32) - /* we want WPA-PSK */ + if (dwrq->length > KEY_SIZE_TKIP) { + /* User-provided key data too big */ + return -EINVAL; + } + if (dwrq->length > KEY_SIZE_WEP104) { + /* WPA-PSK TKIP */ key.type = DOT11_PRIV_TKIP; + key.length = KEY_SIZE_TKIP; + } else if (dwrq->length > KEY_SIZE_WEP40) { + /* WEP 104/128 */ + key.length = KEY_SIZE_WEP104; + } else { + /* WEP 40/64 */ + key.length = KEY_SIZE_WEP40; + } + memset(key.key, 0, sizeof (key.key)); + memcpy(key.key, extra, dwrq->length); + if ((index < 0) || (index > 3)) /* no index provided use the current one */ index = current_index; @@ -1210,6 +1217,489 @@ prism54_set_txpower(struct net_device *ndev, struct iw_request_info *info, } } +static int prism54_set_genie(struct net_device *ndev, + struct iw_request_info *info, + struct iw_point *data, char *extra) +{ + islpci_private *priv = netdev_priv(ndev); + int alen, ret = 0; + struct obj_attachment *attach; + + if (data->length > MAX_WPA_IE_LEN || + (data->length && extra == NULL)) + return -EINVAL; + + memcpy(priv->wpa_ie, extra, data->length); + priv->wpa_ie_len = data->length; + + alen = sizeof(*attach) + priv->wpa_ie_len; + attach = kzalloc(alen, GFP_KERNEL); + if (attach == NULL) + return -ENOMEM; + +#define WLAN_FC_TYPE_MGMT 0 +#define WLAN_FC_STYPE_ASSOC_REQ 0 +#define WLAN_FC_STYPE_REASSOC_REQ 2 + + /* Note: endianness is covered by mgt_set_varlen */ + attach->type = (WLAN_FC_TYPE_MGMT << 2) | + (WLAN_FC_STYPE_ASSOC_REQ << 4); + attach->id = -1; + attach->size = priv->wpa_ie_len; + memcpy(attach->data, extra, priv->wpa_ie_len); + + ret = mgt_set_varlen(priv, DOT11_OID_ATTACHMENT, attach, + priv->wpa_ie_len); + if (ret == 0) { + attach->type = (WLAN_FC_TYPE_MGMT << 2) | + (WLAN_FC_STYPE_REASSOC_REQ << 4); + + ret = mgt_set_varlen(priv, DOT11_OID_ATTACHMENT, attach, + priv->wpa_ie_len); + if (ret == 0) + printk(KERN_DEBUG "%s: WPA IE Attachment was set\n", + ndev->name); + } + + kfree(attach); + return ret; +} + + +static int prism54_get_genie(struct net_device *ndev, + struct iw_request_info *info, + struct iw_point *data, char *extra) +{ + islpci_private *priv = netdev_priv(ndev); + int len = priv->wpa_ie_len; + + if (len <= 0) { + data->length = 0; + return 0; + } + + if (data->length < len) + return -E2BIG; + + data->length = len; + memcpy(extra, priv->wpa_ie, len); + + return 0; +} + +static int prism54_set_auth(struct net_device *ndev, + struct iw_request_info *info, + union iwreq_data *wrqu, char *extra) +{ + islpci_private *priv = netdev_priv(ndev); + struct iw_param *param = &wrqu->param; + u32 mlmelevel = 0, authen = 0, dot1x = 0; + u32 exunencrypt = 0, privinvoked = 0, wpa = 0; + u32 old_wpa; + int ret = 0; + union oid_res_t r; + + if (islpci_get_state(priv) < PRV_STATE_INIT) + return 0; + + /* first get the flags */ + down_write(&priv->mib_sem); + wpa = old_wpa = priv->wpa; + up_write(&priv->mib_sem); + ret = mgt_get_request(priv, DOT11_OID_AUTHENABLE, 0, NULL, &r); + authen = r.u; + ret = mgt_get_request(priv, DOT11_OID_PRIVACYINVOKED, 0, NULL, &r); + privinvoked = r.u; + ret = mgt_get_request(priv, DOT11_OID_EXUNENCRYPTED, 0, NULL, &r); + exunencrypt = r.u; + ret = mgt_get_request(priv, DOT11_OID_DOT1XENABLE, 0, NULL, &r); + dot1x = r.u; + ret = mgt_get_request(priv, DOT11_OID_MLMEAUTOLEVEL, 0, NULL, &r); + mlmelevel = r.u; + + if (ret < 0) + goto out; + + switch (param->flags & IW_AUTH_INDEX) { + case IW_AUTH_CIPHER_PAIRWISE: + case IW_AUTH_CIPHER_GROUP: + case IW_AUTH_KEY_MGMT: + break; + + case IW_AUTH_WPA_ENABLED: + /* Do the same thing as IW_AUTH_WPA_VERSION */ + if (param->value) { + wpa = 1; + privinvoked = 1; /* For privacy invoked */ + exunencrypt = 1; /* Filter out all unencrypted frames */ + dot1x = 0x01; /* To enable eap filter */ + mlmelevel = DOT11_MLME_EXTENDED; + authen = DOT11_AUTH_OS; /* Only WEP uses _SK and _BOTH */ + } else { + wpa = 0; + privinvoked = 0; + exunencrypt = 0; /* Do not filter un-encrypted data */ + dot1x = 0; + mlmelevel = DOT11_MLME_AUTO; + } + break; + + case IW_AUTH_WPA_VERSION: + if (param->value & IW_AUTH_WPA_VERSION_DISABLED) { + wpa = 0; + privinvoked = 0; + exunencrypt = 0; /* Do not filter un-encrypted data */ + dot1x = 0; + mlmelevel = DOT11_MLME_AUTO; + } else { + if (param->value & IW_AUTH_WPA_VERSION_WPA) + wpa = 1; + else if (param->value & IW_AUTH_WPA_VERSION_WPA2) + wpa = 2; + privinvoked = 1; /* For privacy invoked */ + exunencrypt = 1; /* Filter out all unencrypted frames */ + dot1x = 0x01; /* To enable eap filter */ + mlmelevel = DOT11_MLME_EXTENDED; + authen = DOT11_AUTH_OS; /* Only WEP uses _SK and _BOTH */ + } + break; + + case IW_AUTH_RX_UNENCRYPTED_EAPOL: + dot1x = param->value ? 1 : 0; + break; + + case IW_AUTH_PRIVACY_INVOKED: + privinvoked = param->value ? 1 : 0; + + case IW_AUTH_DROP_UNENCRYPTED: + exunencrypt = param->value ? 1 : 0; + break; + + case IW_AUTH_80211_AUTH_ALG: + if (param->value & IW_AUTH_ALG_SHARED_KEY) { + /* Only WEP uses _SK and _BOTH */ + if (wpa > 0) { + ret = -EINVAL; + goto out; + } + authen = DOT11_AUTH_SK; + } else if (param->value & IW_AUTH_ALG_OPEN_SYSTEM) { + authen = DOT11_AUTH_OS; + } else { + ret = -EINVAL; + goto out; + } + break; + + default: + return -EOPNOTSUPP; + } + + /* Set all the values */ + down_write(&priv->mib_sem); + priv->wpa = wpa; + up_write(&priv->mib_sem); + mgt_set_request(priv, DOT11_OID_AUTHENABLE, 0, &authen); + mgt_set_request(priv, DOT11_OID_PRIVACYINVOKED, 0, &privinvoked); + mgt_set_request(priv, DOT11_OID_EXUNENCRYPTED, 0, &exunencrypt); + mgt_set_request(priv, DOT11_OID_DOT1XENABLE, 0, &dot1x); + mgt_set_request(priv, DOT11_OID_MLMEAUTOLEVEL, 0, &mlmelevel); + +out: + return ret; +} + +static int prism54_get_auth(struct net_device *ndev, + struct iw_request_info *info, + union iwreq_data *wrqu, char *extra) +{ + islpci_private *priv = netdev_priv(ndev); + struct iw_param *param = &wrqu->param; + u32 wpa = 0; + int ret = 0; + union oid_res_t r; + + if (islpci_get_state(priv) < PRV_STATE_INIT) + return 0; + + /* first get the flags */ + down_write(&priv->mib_sem); + wpa = priv->wpa; + up_write(&priv->mib_sem); + + switch (param->flags & IW_AUTH_INDEX) { + case IW_AUTH_CIPHER_PAIRWISE: + case IW_AUTH_CIPHER_GROUP: + case IW_AUTH_KEY_MGMT: + /* + * wpa_supplicant will control these internally + */ + ret = -EOPNOTSUPP; + break; + + case IW_AUTH_WPA_VERSION: + switch (wpa) { + case 1: + param->value = IW_AUTH_WPA_VERSION_WPA; + break; + case 2: + param->value = IW_AUTH_WPA_VERSION_WPA2; + break; + case 0: + default: + param->value = IW_AUTH_WPA_VERSION_DISABLED; + break; + } + break; + + case IW_AUTH_DROP_UNENCRYPTED: + ret = mgt_get_request(priv, DOT11_OID_EXUNENCRYPTED, 0, NULL, &r); + if (ret >= 0) + param->value = r.u > 0 ? 1 : 0; + break; + + case IW_AUTH_80211_AUTH_ALG: + ret = mgt_get_request(priv, DOT11_OID_AUTHENABLE, 0, NULL, &r); + if (ret >= 0) { + switch (r.u) { + case DOT11_AUTH_OS: + param->value = IW_AUTH_ALG_OPEN_SYSTEM; + break; + case DOT11_AUTH_BOTH: + case DOT11_AUTH_SK: + param->value = IW_AUTH_ALG_SHARED_KEY; + case DOT11_AUTH_NONE: + default: + param->value = 0; + break; + } + } + break; + + case IW_AUTH_WPA_ENABLED: + param->value = wpa > 0 ? 1 : 0; + break; + + case IW_AUTH_RX_UNENCRYPTED_EAPOL: + ret = mgt_get_request(priv, DOT11_OID_DOT1XENABLE, 0, NULL, &r); + if (ret >= 0) + param->value = r.u > 0 ? 1 : 0; + break; + + case IW_AUTH_PRIVACY_INVOKED: + ret = mgt_get_request(priv, DOT11_OID_PRIVACYINVOKED, 0, NULL, &r); + if (ret >= 0) + param->value = r.u > 0 ? 1 : 0; + break; + + default: + return -EOPNOTSUPP; + } + return ret; +} + +static int prism54_set_encodeext(struct net_device *ndev, + struct iw_request_info *info, + union iwreq_data *wrqu, + char *extra) +{ + islpci_private *priv = netdev_priv(ndev); + struct iw_point *encoding = &wrqu->encoding; + struct iw_encode_ext *ext = (struct iw_encode_ext *)extra; + int idx, alg = ext->alg, set_key = 1; + union oid_res_t r; + int authen = DOT11_AUTH_OS, invoke = 0, exunencrypt = 0; + int ret = 0; + + if (islpci_get_state(priv) < PRV_STATE_INIT) + return 0; + + /* Determine and validate the key index */ + idx = (encoding->flags & IW_ENCODE_INDEX) - 1; + if (idx) { + if (idx < 0 || idx > 3) + return -EINVAL; + } else { + ret = mgt_get_request(priv, DOT11_OID_DEFKEYID, 0, NULL, &r); + if (ret < 0) + goto out; + idx = r.u; + } + + if (encoding->flags & IW_ENCODE_DISABLED) + alg = IW_ENCODE_ALG_NONE; + + if (ext->ext_flags & IW_ENCODE_EXT_SET_TX_KEY) { + /* Only set transmit key index here, actual + * key is set below if needed. + */ + ret = mgt_set_request(priv, DOT11_OID_DEFKEYID, 0, &idx); + set_key = ext->key_len > 0 ? 1 : 0; + } + + if (set_key) { + struct obj_key key = { DOT11_PRIV_WEP, 0, "" }; + switch (alg) { + case IW_ENCODE_ALG_NONE: + break; + case IW_ENCODE_ALG_WEP: + if (ext->key_len > KEY_SIZE_WEP104) { + ret = -EINVAL; + goto out; + } + if (ext->key_len > KEY_SIZE_WEP40) + key.length = KEY_SIZE_WEP104; + else + key.length = KEY_SIZE_WEP40; + break; + case IW_ENCODE_ALG_TKIP: + if (ext->key_len > KEY_SIZE_TKIP) { + ret = -EINVAL; + goto out; + } + key.type = DOT11_PRIV_TKIP; + key.length = KEY_SIZE_TKIP; + default: + return -EINVAL; + } + + if (key.length) { + memset(key.key, 0, sizeof(key.key)); + memcpy(key.key, ext->key, ext->key_len); + ret = mgt_set_request(priv, DOT11_OID_DEFKEYX, idx, + &key); + if (ret < 0) + goto out; + } + } + + /* Read the flags */ + if (encoding->flags & IW_ENCODE_DISABLED) { + /* Encoding disabled, + * authen = DOT11_AUTH_OS; + * invoke = 0; + * exunencrypt = 0; */ + } + if (encoding->flags & IW_ENCODE_OPEN) { + /* Encode but accept non-encoded packets. No auth */ + invoke = 1; + } + if (encoding->flags & IW_ENCODE_RESTRICTED) { + /* Refuse non-encoded packets. Auth */ + authen = DOT11_AUTH_BOTH; + invoke = 1; + exunencrypt = 1; + } + + /* do the change if requested */ + if (encoding->flags & IW_ENCODE_MODE) { + ret = mgt_set_request(priv, DOT11_OID_AUTHENABLE, 0, + &authen); + ret = mgt_set_request(priv, DOT11_OID_PRIVACYINVOKED, 0, + &invoke); + ret = mgt_set_request(priv, DOT11_OID_EXUNENCRYPTED, 0, + &exunencrypt); + } + +out: + return ret; +} + + +static int prism54_get_encodeext(struct net_device *ndev, + struct iw_request_info *info, + union iwreq_data *wrqu, + char *extra) +{ + islpci_private *priv = netdev_priv(ndev); + struct iw_point *encoding = &wrqu->encoding; + struct iw_encode_ext *ext = (struct iw_encode_ext *)extra; + int idx, max_key_len; + union oid_res_t r; + int authen = DOT11_AUTH_OS, invoke = 0, exunencrypt = 0, wpa = 0; + int ret = 0; + + if (islpci_get_state(priv) < PRV_STATE_INIT) + return 0; + + /* first get the flags */ + ret = mgt_get_request(priv, DOT11_OID_AUTHENABLE, 0, NULL, &r); + authen = r.u; + ret = mgt_get_request(priv, DOT11_OID_PRIVACYINVOKED, 0, NULL, &r); + invoke = r.u; + ret = mgt_get_request(priv, DOT11_OID_EXUNENCRYPTED, 0, NULL, &r); + exunencrypt = r.u; + if (ret < 0) + goto out; + + max_key_len = encoding->length - sizeof(*ext); + if (max_key_len < 0) + return -EINVAL; + + idx = (encoding->flags & IW_ENCODE_INDEX) - 1; + if (idx) { + if (idx < 0 || idx > 3) + return -EINVAL; + } else { + ret = mgt_get_request(priv, DOT11_OID_DEFKEYID, 0, NULL, &r); + if (ret < 0) + goto out; + idx = r.u; + } + + encoding->flags = idx + 1; + memset(ext, 0, sizeof(*ext)); + + switch (authen) { + case DOT11_AUTH_BOTH: + case DOT11_AUTH_SK: + wrqu->encoding.flags |= IW_ENCODE_RESTRICTED; + case DOT11_AUTH_OS: + default: + wrqu->encoding.flags |= IW_ENCODE_OPEN; + break; + } + + down_write(&priv->mib_sem); + wpa = priv->wpa; + up_write(&priv->mib_sem); + + if (authen == DOT11_AUTH_OS && !exunencrypt && !invoke && !wpa) { + /* No encryption */ + ext->alg = IW_ENCODE_ALG_NONE; + ext->key_len = 0; + wrqu->encoding.flags |= IW_ENCODE_DISABLED; + } else { + struct obj_key *key; + + ret = mgt_get_request(priv, DOT11_OID_DEFKEYX, idx, NULL, &r); + if (ret < 0) + goto out; + key = r.ptr; + if (max_key_len < key->length) { + ret = -E2BIG; + goto out; + } + memcpy(ext->key, key->key, key->length); + ext->key_len = key->length; + + switch (key->type) { + case DOT11_PRIV_TKIP: + ext->alg = IW_ENCODE_ALG_TKIP; + break; + default: + case DOT11_PRIV_WEP: + ext->alg = IW_ENCODE_ALG_WEP; + break; + } + wrqu->encoding.flags |= IW_ENCODE_ENABLED; + } + +out: + return ret; +} + + static int prism54_reset(struct net_device *ndev, struct iw_request_info *info, __u32 * uwrq, char *extra) @@ -1591,8 +2081,8 @@ static u8 wpa_oid[4] = { 0x00, 0x50, 0xf2, 1 }; #define MACSTR "%02x:%02x:%02x:%02x:%02x:%02x" static void -prism54_wpa_ie_add(islpci_private *priv, u8 *bssid, - u8 *wpa_ie, size_t wpa_ie_len) +prism54_wpa_bss_ie_add(islpci_private *priv, u8 *bssid, + u8 *wpa_ie, size_t wpa_ie_len) { struct list_head *ptr; struct islpci_bss_wpa_ie *bss = NULL; @@ -1658,7 +2148,7 @@ prism54_wpa_ie_add(islpci_private *priv, u8 *bssid, } static size_t -prism54_wpa_ie_get(islpci_private *priv, u8 *bssid, u8 *wpa_ie) +prism54_wpa_bss_ie_get(islpci_private *priv, u8 *bssid, u8 *wpa_ie) { struct list_head *ptr; struct islpci_bss_wpa_ie *bss = NULL; @@ -1683,14 +2173,14 @@ prism54_wpa_ie_get(islpci_private *priv, u8 *bssid, u8 *wpa_ie) } void -prism54_wpa_ie_init(islpci_private *priv) +prism54_wpa_bss_ie_init(islpci_private *priv) { INIT_LIST_HEAD(&priv->bss_wpa_list); sema_init(&priv->wpa_sem, 1); } void -prism54_wpa_ie_clean(islpci_private *priv) +prism54_wpa_bss_ie_clean(islpci_private *priv) { struct list_head *ptr, *n; @@ -1722,7 +2212,7 @@ prism54_process_bss_data(islpci_private *priv, u32 oid, u8 *addr, } if (pos[0] == WLAN_EID_GENERIC && pos[1] >= 4 && memcmp(pos + 2, wpa_oid, 4) == 0) { - prism54_wpa_ie_add(priv, addr, pos, pos[1] + 2); + prism54_wpa_bss_ie_add(priv, addr, pos, pos[1] + 2); return; } pos += 2 + pos[1]; @@ -1879,7 +2369,7 @@ prism54_process_trap_helper(islpci_private *priv, enum oid_num_t oid, send_formatted_event(priv, "Associate request (ex)", mlme, 1); if (priv->iw_mode != IW_MODE_MASTER - && mlmeex->state != DOT11_STATE_AUTHING) + && mlmeex->state != DOT11_STATE_ASSOCING) break; confirm = kmalloc(sizeof(struct obj_mlmeex), GFP_ATOMIC); @@ -1893,7 +2383,7 @@ prism54_process_trap_helper(islpci_private *priv, enum oid_num_t oid, confirm->state = 0; /* not used */ confirm->code = 0; - wpa_ie_len = prism54_wpa_ie_get(priv, mlmeex->address, wpa_ie); + wpa_ie_len = prism54_wpa_bss_ie_get(priv, mlmeex->address, wpa_ie); if (!wpa_ie_len) { printk(KERN_DEBUG "No WPA IE found from " @@ -1937,7 +2427,7 @@ prism54_process_trap_helper(islpci_private *priv, enum oid_num_t oid, confirm->state = 0; /* not used */ confirm->code = 0; - wpa_ie_len = prism54_wpa_ie_get(priv, mlmeex->address, wpa_ie); + wpa_ie_len = prism54_wpa_bss_ie_get(priv, mlmeex->address, wpa_ie); if (!wpa_ie_len) { printk(KERN_DEBUG "No WPA IE found from " @@ -2553,6 +3043,15 @@ static const iw_handler prism54_handler[] = { (iw_handler) prism54_get_encode, /* SIOCGIWENCODE */ (iw_handler) NULL, /* SIOCSIWPOWER */ (iw_handler) NULL, /* SIOCGIWPOWER */ + NULL, /* -- hole -- */ + NULL, /* -- hole -- */ + (iw_handler) prism54_set_genie, /* SIOCSIWGENIE */ + (iw_handler) prism54_get_genie, /* SIOCGIWGENIE */ + (iw_handler) prism54_set_auth, /* SIOCSIWAUTH */ + (iw_handler) prism54_get_auth, /* SIOCGIWAUTH */ + (iw_handler) prism54_set_encodeext, /* SIOCSIWENCODEEXT */ + (iw_handler) prism54_get_encodeext, /* SIOCGIWENCODEEXT */ + NULL, /* SIOCSIWPMKSA */ }; /* The low order bit identify a SET (0) or a GET (1) ioctl. */ diff --git a/drivers/net/wireless/prism54/isl_ioctl.h b/drivers/net/wireless/prism54/isl_ioctl.h index 46d5cde80c8..65f33acd0a4 100644 --- a/drivers/net/wireless/prism54/isl_ioctl.h +++ b/drivers/net/wireless/prism54/isl_ioctl.h @@ -27,7 +27,7 @@ #include <net/iw_handler.h> /* New driver API */ -#define SUPPORTED_WIRELESS_EXT 16 +#define SUPPORTED_WIRELESS_EXT 19 void prism54_mib_init(islpci_private *); @@ -39,8 +39,8 @@ void prism54_acl_clean(struct islpci_acl *); void prism54_process_trap(void *); -void prism54_wpa_ie_init(islpci_private *priv); -void prism54_wpa_ie_clean(islpci_private *priv); +void prism54_wpa_bss_ie_init(islpci_private *priv); +void prism54_wpa_bss_ie_clean(islpci_private *priv); int prism54_set_mac_address(struct net_device *, void *); diff --git a/drivers/net/wireless/prism54/islpci_dev.c b/drivers/net/wireless/prism54/islpci_dev.c index 5ddf2959903..ab3c5a27efd 100644 --- a/drivers/net/wireless/prism54/islpci_dev.c +++ b/drivers/net/wireless/prism54/islpci_dev.c @@ -715,7 +715,7 @@ islpci_alloc_memory(islpci_private *priv) } prism54_acl_init(&priv->acl); - prism54_wpa_ie_init(priv); + prism54_wpa_bss_ie_init(priv); if (mgt_init(priv)) goto out_free; @@ -774,7 +774,7 @@ islpci_free_memory(islpci_private *priv) /* Free the acces control list and the WPA list */ prism54_acl_clean(&priv->acl); - prism54_wpa_ie_clean(priv); + prism54_wpa_bss_ie_clean(priv); mgt_clean(priv); return 0; diff --git a/drivers/net/wireless/prism54/islpci_dev.h b/drivers/net/wireless/prism54/islpci_dev.h index 07053165e4c..5049f37455b 100644 --- a/drivers/net/wireless/prism54/islpci_dev.h +++ b/drivers/net/wireless/prism54/islpci_dev.h @@ -179,6 +179,8 @@ typedef struct { struct list_head bss_wpa_list; int num_bss_wpa; struct semaphore wpa_sem; + u8 wpa_ie[MAX_WPA_IE_LEN]; + size_t wpa_ie_len; struct work_struct reset_task; int reset_task_pending; diff --git a/drivers/net/wireless/zd1211rw/zd_netdev.c b/drivers/net/wireless/zd1211rw/zd_netdev.c index 9df232c2c86..440ef24b5fd 100644 --- a/drivers/net/wireless/zd1211rw/zd_netdev.c +++ b/drivers/net/wireless/zd1211rw/zd_netdev.c @@ -72,10 +72,18 @@ static int iw_get_name(struct net_device *netdev, struct iw_request_info *info, union iwreq_data *req, char *extra) { - /* FIXME: check whether 802.11a will also supported, add also - * zd1211B, if we support it. - */ - strlcpy(req->name, "802.11g zd1211", IFNAMSIZ); + /* FIXME: check whether 802.11a will also supported */ + strlcpy(req->name, "IEEE 802.11b/g", IFNAMSIZ); + return 0; +} + +static int iw_get_nick(struct net_device *netdev, + struct iw_request_info *info, + union iwreq_data *req, char *extra) +{ + strcpy(extra, "zd1211"); + req->data.length = strlen(extra) + 1; + req->data.flags = 1; return 0; } @@ -181,6 +189,7 @@ static int iw_get_encodeext(struct net_device *netdev, static const iw_handler zd_standard_iw_handlers[] = { WX(SIOCGIWNAME) = iw_get_name, + WX(SIOCGIWNICKN) = iw_get_nick, WX(SIOCSIWFREQ) = iw_set_freq, WX(SIOCGIWFREQ) = iw_get_freq, WX(SIOCSIWMODE) = iw_set_mode, diff --git a/drivers/net/wireless/zd1211rw/zd_usb.c b/drivers/net/wireless/zd1211rw/zd_usb.c index 72f90525bf6..c68b9f8995c 100644 --- a/drivers/net/wireless/zd1211rw/zd_usb.c +++ b/drivers/net/wireless/zd1211rw/zd_usb.c @@ -39,9 +39,11 @@ static struct usb_device_id usb_ids[] = { { USB_DEVICE(0x6891, 0xa727), .driver_info = DEVICE_ZD1211 }, { USB_DEVICE(0x0df6, 0x9071), .driver_info = DEVICE_ZD1211 }, { USB_DEVICE(0x157e, 0x300b), .driver_info = DEVICE_ZD1211 }, + { USB_DEVICE(0x079b, 0x004a), .driver_info = DEVICE_ZD1211 }, /* ZD1211B */ { USB_DEVICE(0x0ace, 0x1215), .driver_info = DEVICE_ZD1211B }, { USB_DEVICE(0x157e, 0x300d), .driver_info = DEVICE_ZD1211B }, + { USB_DEVICE(0x079b, 0x0062), .driver_info = DEVICE_ZD1211B }, {} }; diff --git a/include/net/ieee80211.h b/include/net/ieee80211.h index ecc42864b00..b174ebb277a 100644 --- a/include/net/ieee80211.h +++ b/include/net/ieee80211.h @@ -240,6 +240,11 @@ struct ieee80211_snap_hdr { #define WLAN_CAPABILITY_SHORT_SLOT_TIME (1<<10) #define WLAN_CAPABILITY_DSSS_OFDM (1<<13) +/* 802.11g ERP information element */ +#define WLAN_ERP_NON_ERP_PRESENT (1<<0) +#define WLAN_ERP_USE_PROTECTION (1<<1) +#define WLAN_ERP_BARKER_PREAMBLE (1<<2) + /* Status codes */ enum ieee80211_statuscode { WLAN_STATUS_SUCCESS = 0, @@ -747,6 +752,8 @@ struct ieee80211_txb { #define NETWORK_HAS_IBSS_DFS (1<<8) #define NETWORK_HAS_TPC_REPORT (1<<9) +#define NETWORK_HAS_ERP_VALUE (1<<10) + #define QOS_QUEUE_NUM 4 #define QOS_OUI_LEN 3 #define QOS_OUI_TYPE 2 @@ -1252,6 +1259,8 @@ extern int ieee80211_tx_frame(struct ieee80211_device *ieee, int total_len, int encrypt_mpdu); /* ieee80211_rx.c */ +extern void ieee80211_rx_any(struct ieee80211_device *ieee, + struct sk_buff *skb, struct ieee80211_rx_stats *stats); extern int ieee80211_rx(struct ieee80211_device *ieee, struct sk_buff *skb, struct ieee80211_rx_stats *rx_stats); /* make sure to set stats->len */ diff --git a/include/net/ieee80211softmac.h b/include/net/ieee80211softmac.h index 00ad810eb88..425b3a57ac7 100644 --- a/include/net/ieee80211softmac.h +++ b/include/net/ieee80211softmac.h @@ -86,9 +86,6 @@ struct ieee80211softmac_assoc_info { /* BSSID we're trying to associate to */ char bssid[ETH_ALEN]; - - /* Rates supported by the network */ - struct ieee80211softmac_ratesinfo supported_rates; /* some flags. * static_essid is valid if the essid is constant, @@ -103,6 +100,7 @@ struct ieee80211softmac_assoc_info { * bssfixed is used for SIOCSIWAP. */ u8 static_essid:1, + short_preamble_available:1, associating:1, assoc_wait:1, bssvalid:1, @@ -115,6 +113,19 @@ struct ieee80211softmac_assoc_info { struct work_struct timeout; }; +struct ieee80211softmac_bss_info { + /* Rates supported by the network */ + struct ieee80211softmac_ratesinfo supported_rates; + + /* This indicates whether frames can currently be transmitted with + * short preamble (only use this variable during TX at CCK rates) */ + u8 short_preamble:1; + + /* This indicates whether protection (e.g. self-CTS) should be used + * when transmitting with OFDM modulation */ + u8 use_protection:1; +}; + enum { IEEE80211SOFTMAC_AUTH_OPEN_REQUEST = 1, IEEE80211SOFTMAC_AUTH_OPEN_RESPONSE = 2, @@ -157,6 +168,10 @@ struct ieee80211softmac_txrates { #define IEEE80211SOFTMAC_TXRATECHG_MCAST (1 << 2) /* mcast_rate */ #define IEEE80211SOFTMAC_TXRATECHG_MGT_MCAST (1 << 3) /* mgt_mcast_rate */ +#define IEEE80211SOFTMAC_BSSINFOCHG_RATES (1 << 0) /* supported_rates */ +#define IEEE80211SOFTMAC_BSSINFOCHG_SHORT_PREAMBLE (1 << 1) /* short_preamble */ +#define IEEE80211SOFTMAC_BSSINFOCHG_PROTECTION (1 << 2) /* use_protection */ + struct ieee80211softmac_device { /* 802.11 structure for data stuff */ struct ieee80211_device *ieee; @@ -200,10 +215,16 @@ struct ieee80211softmac_device { * The driver just needs to read them. */ struct ieee80211softmac_txrates txrates; - /* If the driver needs to do stuff on TX rate changes, assign this callback. */ + + /* If the driver needs to do stuff on TX rate changes, assign this + * callback. See IEEE80211SOFTMAC_TXRATECHG for change flags. */ void (*txrates_change)(struct net_device *dev, - u32 changes, /* see IEEE80211SOFTMAC_TXRATECHG flags */ - const struct ieee80211softmac_txrates *rates_before_change); + u32 changes); + + /* If the driver needs to do stuff when BSS properties change, assign + * this callback. see IEEE80211SOFTMAC_BSSINFOCHG for change flags. */ + void (*bssinfo_change)(struct net_device *dev, + u32 changes); /* private stuff follows */ /* this lock protects this structure */ @@ -216,6 +237,7 @@ struct ieee80211softmac_device { struct ieee80211softmac_scaninfo *scaninfo; struct ieee80211softmac_assoc_info associnfo; + struct ieee80211softmac_bss_info bssinfo; struct list_head auth_queue; struct list_head events; @@ -257,6 +279,14 @@ extern void ieee80211softmac_fragment_lost(struct net_device *dev, * Note that the rates need to be sorted. */ extern void ieee80211softmac_set_rates(struct net_device *dev, u8 count, u8 *rates); +/* Finds the highest rate which is: + * 1. Present in ri (optionally a basic rate) + * 2. Supported by the device + * 3. Less than or equal to the user-defined rate + */ +extern u8 ieee80211softmac_highest_supported_rate(struct ieee80211softmac_device *mac, + struct ieee80211softmac_ratesinfo *ri, int basic_only); + /* Helper function which advises you the rate at which a frame should be * transmitted at. */ static inline u8 ieee80211softmac_suggest_txrate(struct ieee80211softmac_device *mac, @@ -279,6 +309,24 @@ static inline u8 ieee80211softmac_suggest_txrate(struct ieee80211softmac_device return txrates->mcast_rate; } +/* Helper function which advises you when it is safe to transmit with short + * preamble. + * You should only call this function when transmitting at CCK rates. */ +static inline int ieee80211softmac_short_preamble_ok(struct ieee80211softmac_device *mac, + int is_multicast, + int is_mgt) +{ + return (is_multicast && is_mgt) ? 0 : mac->bssinfo.short_preamble; +} + +/* Helper function which advises you whether protection (e.g. self-CTS) is + * needed. 1 = protection needed, 0 = no protection needed + * Only use this function when transmitting with OFDM modulation. */ +static inline int ieee80211softmac_protection_needed(struct ieee80211softmac_device *mac) +{ + return mac->bssinfo.use_protection; +} + /* Start the SoftMAC. Call this after you initialized the device * and it is ready to run. */ diff --git a/net/ieee80211/ieee80211_rx.c b/net/ieee80211/ieee80211_rx.c index 72d4d4e04d4..d60358d702d 100644 --- a/net/ieee80211/ieee80211_rx.c +++ b/net/ieee80211/ieee80211_rx.c @@ -779,33 +779,44 @@ int ieee80211_rx(struct ieee80211_device *ieee, struct sk_buff *skb, return 0; } -/* Filter out unrelated packets, call ieee80211_rx[_mgt] */ -int ieee80211_rx_any(struct ieee80211_device *ieee, +/* Filter out unrelated packets, call ieee80211_rx[_mgt] + * This function takes over the skb, it should not be used again after calling + * this function. */ +void ieee80211_rx_any(struct ieee80211_device *ieee, struct sk_buff *skb, struct ieee80211_rx_stats *stats) { struct ieee80211_hdr_4addr *hdr; int is_packet_for_us; u16 fc; - if (ieee->iw_mode == IW_MODE_MONITOR) - return ieee80211_rx(ieee, skb, stats) ? 0 : -EINVAL; + if (ieee->iw_mode == IW_MODE_MONITOR) { + if (!ieee80211_rx(ieee, skb, stats)) + dev_kfree_skb_irq(skb); + return; + } + + if (skb->len < sizeof(struct ieee80211_hdr)) + goto drop_free; hdr = (struct ieee80211_hdr_4addr *)skb->data; fc = le16_to_cpu(hdr->frame_ctl); if ((fc & IEEE80211_FCTL_VERS) != 0) - return -EINVAL; + goto drop_free; switch (fc & IEEE80211_FCTL_FTYPE) { case IEEE80211_FTYPE_MGMT: + if (skb->len < sizeof(struct ieee80211_hdr_3addr)) + goto drop_free; ieee80211_rx_mgt(ieee, hdr, stats); - return 0; + dev_kfree_skb_irq(skb); + return; case IEEE80211_FTYPE_DATA: break; case IEEE80211_FTYPE_CTL: - return 0; + return; default: - return -EINVAL; + return; } is_packet_for_us = 0; @@ -849,8 +860,14 @@ int ieee80211_rx_any(struct ieee80211_device *ieee, } if (is_packet_for_us) - return (ieee80211_rx(ieee, skb, stats) ? 0 : -EINVAL); - return 0; + if (!ieee80211_rx(ieee, skb, stats)) + dev_kfree_skb_irq(skb); + return; + +drop_free: + dev_kfree_skb_irq(skb); + ieee->stats.rx_dropped++; + return; } #define MGMT_FRAME_FIXED_PART_LENGTH 0x24 @@ -1166,6 +1183,7 @@ static int ieee80211_parse_info_param(struct ieee80211_info_element case MFIE_TYPE_ERP_INFO: network->erp_value = info_element->data[0]; + network->flags |= NETWORK_HAS_ERP_VALUE; IEEE80211_DEBUG_MGMT("MFIE_TYPE_ERP_SET: %d\n", network->erp_value); break; @@ -1729,5 +1747,6 @@ void ieee80211_rx_mgt(struct ieee80211_device *ieee, } } +EXPORT_SYMBOL_GPL(ieee80211_rx_any); EXPORT_SYMBOL(ieee80211_rx_mgt); EXPORT_SYMBOL(ieee80211_rx); diff --git a/net/ieee80211/softmac/ieee80211softmac_assoc.c b/net/ieee80211/softmac/ieee80211softmac_assoc.c index 44215ce64d4..589f6d2c548 100644 --- a/net/ieee80211/softmac/ieee80211softmac_assoc.c +++ b/net/ieee80211/softmac/ieee80211softmac_assoc.c @@ -96,7 +96,7 @@ ieee80211softmac_disassoc(struct ieee80211softmac_device *mac) mac->associated = 0; mac->associnfo.bssvalid = 0; mac->associnfo.associating = 0; - ieee80211softmac_init_txrates(mac); + ieee80211softmac_init_bss(mac); ieee80211softmac_call_events_locked(mac, IEEE80211SOFTMAC_EVENT_DISASSOCIATED, NULL); spin_unlock_irqrestore(&mac->lock, flags); } @@ -334,11 +334,19 @@ ieee80211softmac_associated(struct ieee80211softmac_device *mac, struct ieee80211_assoc_response * resp, struct ieee80211softmac_network *net) { + u16 cap = le16_to_cpu(resp->capability); + u8 erp_value = net->erp_value; + mac->associnfo.associating = 0; - mac->associnfo.supported_rates = net->supported_rates; + mac->bssinfo.supported_rates = net->supported_rates; ieee80211softmac_recalc_txrates(mac); mac->associated = 1; + + mac->associnfo.short_preamble_available = + (cap & WLAN_CAPABILITY_SHORT_PREAMBLE) != 0; + ieee80211softmac_process_erp(mac, erp_value); + if (mac->set_bssid_filter) mac->set_bssid_filter(mac->dev, net->bssid); memcpy(mac->ieee->bssid, net->bssid, ETH_ALEN); @@ -351,9 +359,9 @@ ieee80211softmac_associated(struct ieee80211softmac_device *mac, int ieee80211softmac_handle_assoc_response(struct net_device * dev, struct ieee80211_assoc_response * resp, - struct ieee80211_network * _ieee80211_network_do_not_use) + struct ieee80211_network * _ieee80211_network) { - /* NOTE: the network parameter has to be ignored by + /* NOTE: the network parameter has to be mostly ignored by * this code because it is the ieee80211's pointer * to the struct, not ours (we made a copy) */ @@ -385,6 +393,11 @@ ieee80211softmac_handle_assoc_response(struct net_device * dev, /* now that we know it was for us, we can cancel the timeout */ cancel_delayed_work(&mac->associnfo.timeout); + /* if the association response included an ERP IE, update our saved + * copy */ + if (_ieee80211_network->flags & NETWORK_HAS_ERP_VALUE) + network->erp_value = _ieee80211_network->erp_value; + switch (status) { case 0: dprintk(KERN_INFO PFX "associated!\n"); diff --git a/net/ieee80211/softmac/ieee80211softmac_io.c b/net/ieee80211/softmac/ieee80211softmac_io.c index 6ae5a1dc795..82bfddbf33a 100644 --- a/net/ieee80211/softmac/ieee80211softmac_io.c +++ b/net/ieee80211/softmac/ieee80211softmac_io.c @@ -467,3 +467,17 @@ ieee80211softmac_send_mgt_frame(struct ieee80211softmac_device *mac, kfree(pkt); return 0; } + +/* Beacon handling */ +int ieee80211softmac_handle_beacon(struct net_device *dev, + struct ieee80211_beacon *beacon, + struct ieee80211_network *network) +{ + struct ieee80211softmac_device *mac = ieee80211_priv(dev); + + if (mac->associated && memcmp(network->bssid, mac->associnfo.bssid, ETH_ALEN) == 0) + ieee80211softmac_process_erp(mac, network->erp_value); + + return 0; +} + diff --git a/net/ieee80211/softmac/ieee80211softmac_module.c b/net/ieee80211/softmac/ieee80211softmac_module.c index 4b2e57d1241..addea1cf73a 100644 --- a/net/ieee80211/softmac/ieee80211softmac_module.c +++ b/net/ieee80211/softmac/ieee80211softmac_module.c @@ -44,6 +44,7 @@ struct net_device *alloc_ieee80211softmac(int sizeof_priv) softmac->ieee->handle_assoc_response = ieee80211softmac_handle_assoc_response; softmac->ieee->handle_reassoc_request = ieee80211softmac_handle_reassoc_req; softmac->ieee->handle_disassoc = ieee80211softmac_handle_disassoc; + softmac->ieee->handle_beacon = ieee80211softmac_handle_beacon; softmac->scaninfo = NULL; softmac->associnfo.scan_retry = IEEE80211SOFTMAC_ASSOC_SCAN_RETRY_LIMIT; @@ -178,21 +179,14 @@ int ieee80211softmac_ratesinfo_rate_supported(struct ieee80211softmac_ratesinfo return 0; } -/* Finds the highest rate which is: - * 1. Present in ri (optionally a basic rate) - * 2. Supported by the device - * 3. Less than or equal to the user-defined rate - */ -static u8 highest_supported_rate(struct ieee80211softmac_device *mac, +u8 ieee80211softmac_highest_supported_rate(struct ieee80211softmac_device *mac, struct ieee80211softmac_ratesinfo *ri, int basic_only) { u8 user_rate = mac->txrates.user_rate; int i; - if (ri->count == 0) { - dprintk(KERN_ERR PFX "empty ratesinfo?\n"); + if (ri->count == 0) return IEEE80211_CCK_RATE_1MB; - } for (i = ri->count - 1; i >= 0; i--) { u8 rate = ri->rates[i]; @@ -208,36 +202,61 @@ static u8 highest_supported_rate(struct ieee80211softmac_device *mac, /* If we haven't found a suitable rate by now, just trust the user */ return user_rate; } +EXPORT_SYMBOL_GPL(ieee80211softmac_highest_supported_rate); + +void ieee80211softmac_process_erp(struct ieee80211softmac_device *mac, + u8 erp_value) +{ + int use_protection; + int short_preamble; + u32 changes = 0; + + /* Barker preamble mode */ + short_preamble = ((erp_value & WLAN_ERP_BARKER_PREAMBLE) == 0 + && mac->associnfo.short_preamble_available) ? 1 : 0; + + /* Protection needed? */ + use_protection = (erp_value & WLAN_ERP_USE_PROTECTION) != 0; + + if (mac->bssinfo.short_preamble != short_preamble) { + changes |= IEEE80211SOFTMAC_BSSINFOCHG_SHORT_PREAMBLE; + mac->bssinfo.short_preamble = short_preamble; + } + + if (mac->bssinfo.use_protection != use_protection) { + changes |= IEEE80211SOFTMAC_BSSINFOCHG_PROTECTION; + mac->bssinfo.use_protection = use_protection; + } + + if (mac->bssinfo_change && changes) + mac->bssinfo_change(mac->dev, changes); +} void ieee80211softmac_recalc_txrates(struct ieee80211softmac_device *mac) { struct ieee80211softmac_txrates *txrates = &mac->txrates; - struct ieee80211softmac_txrates oldrates; u32 change = 0; - if (mac->txrates_change) - oldrates = mac->txrates; - change |= IEEE80211SOFTMAC_TXRATECHG_DEFAULT; - txrates->default_rate = highest_supported_rate(mac, &mac->associnfo.supported_rates, 0); + txrates->default_rate = ieee80211softmac_highest_supported_rate(mac, &mac->bssinfo.supported_rates, 0); change |= IEEE80211SOFTMAC_TXRATECHG_DEFAULT_FBACK; txrates->default_fallback = lower_rate(mac, txrates->default_rate); change |= IEEE80211SOFTMAC_TXRATECHG_MCAST; - txrates->mcast_rate = highest_supported_rate(mac, &mac->associnfo.supported_rates, 1); + txrates->mcast_rate = ieee80211softmac_highest_supported_rate(mac, &mac->bssinfo.supported_rates, 1); if (mac->txrates_change) - mac->txrates_change(mac->dev, change, &oldrates); + mac->txrates_change(mac->dev, change); } -void ieee80211softmac_init_txrates(struct ieee80211softmac_device *mac) +void ieee80211softmac_init_bss(struct ieee80211softmac_device *mac) { struct ieee80211_device *ieee = mac->ieee; u32 change = 0; struct ieee80211softmac_txrates *txrates = &mac->txrates; - struct ieee80211softmac_txrates oldrates; + struct ieee80211softmac_bss_info *bssinfo = &mac->bssinfo; /* TODO: We need some kind of state machine to lower the default rates * if we loose too many packets. @@ -245,8 +264,6 @@ void ieee80211softmac_init_txrates(struct ieee80211softmac_device *mac) /* Change the default txrate to the highest possible value. * The txrate machine will lower it, if it is too high. */ - if (mac->txrates_change) - oldrates = mac->txrates; /* FIXME: We don't correctly handle backing down to lower rates, so 801.11g devices start off at 11M for now. People can manually change it if they really need to, but 11M is @@ -272,7 +289,23 @@ void ieee80211softmac_init_txrates(struct ieee80211softmac_device *mac) change |= IEEE80211SOFTMAC_TXRATECHG_MGT_MCAST; if (mac->txrates_change) - mac->txrates_change(mac->dev, change, &oldrates); + mac->txrates_change(mac->dev, change); + + change = 0; + + bssinfo->supported_rates.count = 0; + memset(bssinfo->supported_rates.rates, 0, + sizeof(bssinfo->supported_rates.rates)); + change |= IEEE80211SOFTMAC_BSSINFOCHG_RATES; + + bssinfo->short_preamble = 0; + change |= IEEE80211SOFTMAC_BSSINFOCHG_SHORT_PREAMBLE; + + bssinfo->use_protection = 0; + change |= IEEE80211SOFTMAC_BSSINFOCHG_PROTECTION; + + if (mac->bssinfo_change) + mac->bssinfo_change(mac->dev, change); mac->running = 1; } @@ -282,7 +315,7 @@ void ieee80211softmac_start(struct net_device *dev) struct ieee80211softmac_device *mac = ieee80211_priv(dev); ieee80211softmac_start_check_rates(mac); - ieee80211softmac_init_txrates(mac); + ieee80211softmac_init_bss(mac); } EXPORT_SYMBOL_GPL(ieee80211softmac_start); @@ -335,7 +368,6 @@ u8 ieee80211softmac_lower_rate_delta(struct ieee80211softmac_device *mac, u8 rat static void ieee80211softmac_add_txrates_badness(struct ieee80211softmac_device *mac, int amount) { - struct ieee80211softmac_txrates oldrates; u8 default_rate = mac->txrates.default_rate; u8 default_fallback = mac->txrates.default_fallback; u32 changes = 0; @@ -348,8 +380,6 @@ printk("badness %d\n", mac->txrate_badness); mac->txrate_badness += amount; if (mac->txrate_badness <= -1000) { /* Very small badness. Try a faster bitrate. */ - if (mac->txrates_change) - memcpy(&oldrates, &mac->txrates, sizeof(oldrates)); default_rate = raise_rate(mac, default_rate); changes |= IEEE80211SOFTMAC_TXRATECHG_DEFAULT; default_fallback = get_fallback_rate(mac, default_rate); @@ -358,8 +388,6 @@ printk("badness %d\n", mac->txrate_badness); printk("Bitrate raised to %u\n", default_rate); } else if (mac->txrate_badness >= 10000) { /* Very high badness. Try a slower bitrate. */ - if (mac->txrates_change) - memcpy(&oldrates, &mac->txrates, sizeof(oldrates)); default_rate = lower_rate(mac, default_rate); changes |= IEEE80211SOFTMAC_TXRATECHG_DEFAULT; default_fallback = get_fallback_rate(mac, default_rate); @@ -372,7 +400,7 @@ printk("Bitrate lowered to %u\n", default_rate); mac->txrates.default_fallback = default_fallback; if (changes && mac->txrates_change) - mac->txrates_change(mac->dev, changes, &oldrates); + mac->txrates_change(mac->dev, changes); } void ieee80211softmac_fragment_lost(struct net_device *dev, @@ -416,7 +444,11 @@ ieee80211softmac_create_network(struct ieee80211softmac_device *mac, memcpy(&softnet->supported_rates.rates[softnet->supported_rates.count], net->rates_ex, net->rates_ex_len); softnet->supported_rates.count += net->rates_ex_len; sort(softnet->supported_rates.rates, softnet->supported_rates.count, sizeof(softnet->supported_rates.rates[0]), rate_cmp, NULL); - + + /* we save the ERP value because it is needed at association time, and + * many AP's do not include an ERP IE in the association response. */ + softnet->erp_value = net->erp_value; + softnet->capabilities = net->capability; return softnet; } diff --git a/net/ieee80211/softmac/ieee80211softmac_priv.h b/net/ieee80211/softmac/ieee80211softmac_priv.h index fa1f8e3acfc..0642e090b8a 100644 --- a/net/ieee80211/softmac/ieee80211softmac_priv.h +++ b/net/ieee80211/softmac/ieee80211softmac_priv.h @@ -116,9 +116,11 @@ ieee80211softmac_get_network_by_essid(struct ieee80211softmac_device *mac, struct ieee80211softmac_essid *essid); /* Rates related */ +void ieee80211softmac_process_erp(struct ieee80211softmac_device *mac, + u8 erp_value); int ieee80211softmac_ratesinfo_rate_supported(struct ieee80211softmac_ratesinfo *ri, u8 rate); u8 ieee80211softmac_lower_rate_delta(struct ieee80211softmac_device *mac, u8 rate, int delta); -void ieee80211softmac_init_txrates(struct ieee80211softmac_device *mac); +void ieee80211softmac_init_bss(struct ieee80211softmac_device *mac); void ieee80211softmac_recalc_txrates(struct ieee80211softmac_device *mac); static inline u8 lower_rate(struct ieee80211softmac_device *mac, u8 rate) { return ieee80211softmac_lower_rate_delta(mac, rate, 1); @@ -133,6 +135,9 @@ static inline u8 get_fallback_rate(struct ieee80211softmac_device *mac, u8 rate) /*** prototypes from _io.c */ int ieee80211softmac_send_mgt_frame(struct ieee80211softmac_device *mac, void* ptrarg, u32 type, u32 arg); +int ieee80211softmac_handle_beacon(struct net_device *dev, + struct ieee80211_beacon *beacon, + struct ieee80211_network *network); /*** prototypes from _auth.c */ /* do these have to go into the public header? */ @@ -189,6 +194,7 @@ struct ieee80211softmac_network { authenticated:1, auth_desynced_once:1; + u8 erp_value; /* Saved ERP value */ u16 capabilities; /* Capabilities bitfield */ u8 challenge_len; /* Auth Challenge length */ char *challenge; /* Challenge Text */ |