diff options
Diffstat (limited to 'drivers/net')
229 files changed, 7119 insertions, 2839 deletions
diff --git a/drivers/net/3c509.c b/drivers/net/3c509.c index e6c545fe5f5..b9d097c9f6b 100644 --- a/drivers/net/3c509.c +++ b/drivers/net/3c509.c @@ -413,7 +413,7 @@ static int __devinit el3_pnp_probe(struct pnp_dev *pdev, { short i; int ioaddr, irq, if_port; - u16 phys_addr[3]; + __be16 phys_addr[3]; struct net_device *dev = NULL; int err; @@ -605,7 +605,7 @@ static int __init el3_mca_probe(struct device *device) short i; int ioaddr, irq, if_port; - u16 phys_addr[3]; + __be16 phys_addr[3]; struct net_device *dev = NULL; u_char pos4, pos5; struct mca_device *mdev = to_mca_device(device); @@ -635,14 +635,13 @@ static int __init el3_mca_probe(struct device *device) printk(KERN_DEBUG "3c529: irq %d ioaddr 0x%x ifport %d\n", irq, ioaddr, if_port); } EL3WINDOW(0); - for (i = 0; i < 3; i++) { - phys_addr[i] = htons(read_eeprom(ioaddr, i)); - } + for (i = 0; i < 3; i++) + phys_addr[i] = htons(read_eeprom(ioaddr, i)); dev = alloc_etherdev(sizeof (struct el3_private)); if (dev == NULL) { - release_region(ioaddr, EL3_IO_EXTENT); - return -ENOMEM; + release_region(ioaddr, EL3_IO_EXTENT); + return -ENOMEM; } netdev_boot_setup_check(dev); @@ -668,7 +667,7 @@ static int __init el3_eisa_probe (struct device *device) { short i; int ioaddr, irq, if_port; - u16 phys_addr[3]; + __be16 phys_addr[3]; struct net_device *dev = NULL; struct eisa_device *edev; int err; @@ -1063,7 +1062,6 @@ el3_rx(struct net_device *dev) struct sk_buff *skb; skb = dev_alloc_skb(pkt_len+5); - dev->stats.rx_bytes += pkt_len; if (el3_debug > 4) printk("Receiving packet size %d status %4.4x.\n", pkt_len, rx_status); @@ -1078,6 +1076,7 @@ el3_rx(struct net_device *dev) skb->protocol = eth_type_trans(skb,dev); netif_rx(skb); dev->last_rx = jiffies; + dev->stats.rx_bytes += pkt_len; dev->stats.rx_packets++; continue; } diff --git a/drivers/net/3c59x.c b/drivers/net/3c59x.c index 6f8e7d4cf74..aabad8ce745 100644 --- a/drivers/net/3c59x.c +++ b/drivers/net/3c59x.c @@ -319,7 +319,7 @@ static struct vortex_chip_info { {"3c920B-EMB-WNM (ATI Radeon 9100 IGP)", PCI_USES_MASTER, IS_TORNADO|HAS_MII|HAS_HWCKSM, 128, }, {"3c980 Cyclone", - PCI_USES_MASTER, IS_CYCLONE|HAS_HWCKSM, 128, }, + PCI_USES_MASTER, IS_CYCLONE|HAS_HWCKSM|EXTRA_PREAMBLE, 128, }, {"3c980C Python-T", PCI_USES_MASTER, IS_CYCLONE|HAS_NWAY|HAS_HWCKSM, 128, }, @@ -600,7 +600,6 @@ struct vortex_private { struct sk_buff* tx_skbuff[TX_RING_SIZE]; unsigned int cur_rx, cur_tx; /* The next free ring entry */ unsigned int dirty_rx, dirty_tx; /* The ring entries to be free()ed. */ - struct net_device_stats stats; /* Generic stats */ struct vortex_extra_stats xstats; /* NIC-specific extra stats */ struct sk_buff *tx_skb; /* Packet being eaten by bus master ctrl. */ dma_addr_t tx_skb_dma; /* Allocated DMA address for bus master ctrl DMA. */ @@ -1769,9 +1768,10 @@ vortex_timer(unsigned long data) case XCVR_MII: case XCVR_NWAY: { ok = 1; - spin_lock_bh(&vp->lock); + /* Interrupts are already disabled */ + spin_lock(&vp->lock); vortex_check_media(dev, 0); - spin_unlock_bh(&vp->lock); + spin_unlock(&vp->lock); } break; default: /* Other media types handled by Tx timeouts. */ @@ -1875,7 +1875,7 @@ static void vortex_tx_timeout(struct net_device *dev) issue_and_wait(dev, TxReset); - vp->stats.tx_errors++; + dev->stats.tx_errors++; if (vp->full_bus_master_tx) { printk(KERN_DEBUG "%s: Resetting the Tx ring pointer.\n", dev->name); if (vp->cur_tx - vp->dirty_tx > 0 && ioread32(ioaddr + DownListPtr) == 0) @@ -1887,7 +1887,7 @@ static void vortex_tx_timeout(struct net_device *dev) iowrite8(PKT_BUF_SZ>>8, ioaddr + TxFreeThreshold); iowrite16(DownUnstall, ioaddr + EL3_CMD); } else { - vp->stats.tx_dropped++; + dev->stats.tx_dropped++; netif_wake_queue(dev); } @@ -1928,8 +1928,8 @@ vortex_error(struct net_device *dev, int status) } dump_tx_ring(dev); } - if (tx_status & 0x14) vp->stats.tx_fifo_errors++; - if (tx_status & 0x38) vp->stats.tx_aborted_errors++; + if (tx_status & 0x14) dev->stats.tx_fifo_errors++; + if (tx_status & 0x38) dev->stats.tx_aborted_errors++; if (tx_status & 0x08) vp->xstats.tx_max_collisions++; iowrite8(0, ioaddr + TxStatus); if (tx_status & 0x30) { /* txJabber or txUnderrun */ @@ -2051,8 +2051,8 @@ vortex_start_xmit(struct sk_buff *skb, struct net_device *dev) if (vortex_debug > 2) printk(KERN_DEBUG "%s: Tx error, status %2.2x.\n", dev->name, tx_status); - if (tx_status & 0x04) vp->stats.tx_fifo_errors++; - if (tx_status & 0x38) vp->stats.tx_aborted_errors++; + if (tx_status & 0x04) dev->stats.tx_fifo_errors++; + if (tx_status & 0x38) dev->stats.tx_aborted_errors++; if (tx_status & 0x30) { issue_and_wait(dev, TxReset); } @@ -2350,7 +2350,7 @@ boomerang_interrupt(int irq, void *dev_id) } else { printk(KERN_DEBUG "boomerang_interrupt: no skb!\n"); } - /* vp->stats.tx_packets++; Counted below. */ + /* dev->stats.tx_packets++; Counted below. */ dirty_tx++; } vp->dirty_tx = dirty_tx; @@ -2409,12 +2409,12 @@ static int vortex_rx(struct net_device *dev) unsigned char rx_error = ioread8(ioaddr + RxErrors); if (vortex_debug > 2) printk(KERN_DEBUG " Rx error: status %2.2x.\n", rx_error); - vp->stats.rx_errors++; - if (rx_error & 0x01) vp->stats.rx_over_errors++; - if (rx_error & 0x02) vp->stats.rx_length_errors++; - if (rx_error & 0x04) vp->stats.rx_frame_errors++; - if (rx_error & 0x08) vp->stats.rx_crc_errors++; - if (rx_error & 0x10) vp->stats.rx_length_errors++; + dev->stats.rx_errors++; + if (rx_error & 0x01) dev->stats.rx_over_errors++; + if (rx_error & 0x02) dev->stats.rx_length_errors++; + if (rx_error & 0x04) dev->stats.rx_frame_errors++; + if (rx_error & 0x08) dev->stats.rx_crc_errors++; + if (rx_error & 0x10) dev->stats.rx_length_errors++; } else { /* The packet length: up to 4.5K!. */ int pkt_len = rx_status & 0x1fff; @@ -2446,7 +2446,7 @@ static int vortex_rx(struct net_device *dev) skb->protocol = eth_type_trans(skb, dev); netif_rx(skb); dev->last_rx = jiffies; - vp->stats.rx_packets++; + dev->stats.rx_packets++; /* Wait a limited time to go to next packet. */ for (i = 200; i >= 0; i--) if ( ! (ioread16(ioaddr + EL3_STATUS) & CmdInProgress)) @@ -2455,7 +2455,7 @@ static int vortex_rx(struct net_device *dev) } else if (vortex_debug > 0) printk(KERN_NOTICE "%s: No memory to allocate a sk_buff of " "size %d.\n", dev->name, pkt_len); - vp->stats.rx_dropped++; + dev->stats.rx_dropped++; } issue_and_wait(dev, RxDiscard); } @@ -2482,12 +2482,12 @@ boomerang_rx(struct net_device *dev) unsigned char rx_error = rx_status >> 16; if (vortex_debug > 2) printk(KERN_DEBUG " Rx error: status %2.2x.\n", rx_error); - vp->stats.rx_errors++; - if (rx_error & 0x01) vp->stats.rx_over_errors++; - if (rx_error & 0x02) vp->stats.rx_length_errors++; - if (rx_error & 0x04) vp->stats.rx_frame_errors++; - if (rx_error & 0x08) vp->stats.rx_crc_errors++; - if (rx_error & 0x10) vp->stats.rx_length_errors++; + dev->stats.rx_errors++; + if (rx_error & 0x01) dev->stats.rx_over_errors++; + if (rx_error & 0x02) dev->stats.rx_length_errors++; + if (rx_error & 0x04) dev->stats.rx_frame_errors++; + if (rx_error & 0x08) dev->stats.rx_crc_errors++; + if (rx_error & 0x10) dev->stats.rx_length_errors++; } else { /* The packet length: up to 4.5K!. */ int pkt_len = rx_status & 0x1fff; @@ -2529,7 +2529,7 @@ boomerang_rx(struct net_device *dev) } netif_rx(skb); dev->last_rx = jiffies; - vp->stats.rx_packets++; + dev->stats.rx_packets++; } entry = (++vp->cur_rx) % RX_RING_SIZE; } @@ -2591,7 +2591,7 @@ vortex_down(struct net_device *dev, int final_down) del_timer_sync(&vp->rx_oom_timer); del_timer_sync(&vp->timer); - /* Turn off statistics ASAP. We update vp->stats below. */ + /* Turn off statistics ASAP. We update dev->stats below. */ iowrite16(StatsDisable, ioaddr + EL3_CMD); /* Disable the receiver and transmitter. */ @@ -2728,7 +2728,7 @@ static struct net_device_stats *vortex_get_stats(struct net_device *dev) update_stats(ioaddr, dev); spin_unlock_irqrestore (&vp->lock, flags); } - return &vp->stats; + return &dev->stats; } /* Update statistics. @@ -2748,18 +2748,18 @@ static void update_stats(void __iomem *ioaddr, struct net_device *dev) /* Unlike the 3c5x9 we need not turn off stats updates while reading. */ /* Switch to the stats window, and read everything. */ EL3WINDOW(6); - vp->stats.tx_carrier_errors += ioread8(ioaddr + 0); - vp->stats.tx_heartbeat_errors += ioread8(ioaddr + 1); - vp->stats.tx_window_errors += ioread8(ioaddr + 4); - vp->stats.rx_fifo_errors += ioread8(ioaddr + 5); - vp->stats.tx_packets += ioread8(ioaddr + 6); - vp->stats.tx_packets += (ioread8(ioaddr + 9)&0x30) << 4; + dev->stats.tx_carrier_errors += ioread8(ioaddr + 0); + dev->stats.tx_heartbeat_errors += ioread8(ioaddr + 1); + dev->stats.tx_window_errors += ioread8(ioaddr + 4); + dev->stats.rx_fifo_errors += ioread8(ioaddr + 5); + dev->stats.tx_packets += ioread8(ioaddr + 6); + dev->stats.tx_packets += (ioread8(ioaddr + 9)&0x30) << 4; /* Rx packets */ ioread8(ioaddr + 7); /* Must read to clear */ /* Don't bother with register 9, an extension of registers 6&7. If we do use the 6&7 values the atomic update assumption above is invalid. */ - vp->stats.rx_bytes += ioread16(ioaddr + 10); - vp->stats.tx_bytes += ioread16(ioaddr + 12); + dev->stats.rx_bytes += ioread16(ioaddr + 10); + dev->stats.tx_bytes += ioread16(ioaddr + 12); /* Extra stats for get_ethtool_stats() */ vp->xstats.tx_multiple_collisions += ioread8(ioaddr + 2); vp->xstats.tx_single_collisions += ioread8(ioaddr + 3); @@ -2767,14 +2767,14 @@ static void update_stats(void __iomem *ioaddr, struct net_device *dev) EL3WINDOW(4); vp->xstats.rx_bad_ssd += ioread8(ioaddr + 12); - vp->stats.collisions = vp->xstats.tx_multiple_collisions + dev->stats.collisions = vp->xstats.tx_multiple_collisions + vp->xstats.tx_single_collisions + vp->xstats.tx_max_collisions; { u8 up = ioread8(ioaddr + 13); - vp->stats.rx_bytes += (up & 0x0f) << 16; - vp->stats.tx_bytes += (up & 0xf0) << 12; + dev->stats.rx_bytes += (up & 0x0f) << 16; + dev->stats.tx_bytes += (up & 0xf0) << 12; } EL3WINDOW(old_window >> 13); diff --git a/drivers/net/7990.c b/drivers/net/7990.c index 750a46f4bc5..ad6b8a5b657 100644 --- a/drivers/net/7990.c +++ b/drivers/net/7990.c @@ -506,6 +506,7 @@ int lance_open (struct net_device *dev) return res; } +EXPORT_SYMBOL_GPL(lance_open); int lance_close (struct net_device *dev) { @@ -521,6 +522,7 @@ int lance_close (struct net_device *dev) return 0; } +EXPORT_SYMBOL_GPL(lance_close); void lance_tx_timeout(struct net_device *dev) { @@ -529,7 +531,7 @@ void lance_tx_timeout(struct net_device *dev) dev->trans_start = jiffies; netif_wake_queue (dev); } - +EXPORT_SYMBOL_GPL(lance_tx_timeout); int lance_start_xmit (struct sk_buff *skb, struct net_device *dev) { @@ -586,6 +588,7 @@ int lance_start_xmit (struct sk_buff *skb, struct net_device *dev) return 0; } +EXPORT_SYMBOL_GPL(lance_start_xmit); /* taken from the depca driver via a2065.c */ static void lance_load_multicast (struct net_device *dev) @@ -654,6 +657,7 @@ void lance_set_multicast (struct net_device *dev) if (!stopped) netif_start_queue (dev); } +EXPORT_SYMBOL_GPL(lance_set_multicast); #ifdef CONFIG_NET_POLL_CONTROLLER void lance_poll(struct net_device *dev) diff --git a/drivers/net/82596.c b/drivers/net/82596.c index 2797da7eeee..da292e647eb 100644 --- a/drivers/net/82596.c +++ b/drivers/net/82596.c @@ -1162,6 +1162,7 @@ struct net_device * __init i82596_probe(int unit) memcpy(eth_addr, (void *) 0xfffc1f2c, 6); /* YUCK! Get addr from NOVRAM */ dev->base_addr = MVME_I596_BASE; dev->irq = (unsigned) MVME16x_IRQ_I596; + goto found; } #endif #ifdef ENABLE_BVME6000_NET @@ -1176,6 +1177,7 @@ struct net_device * __init i82596_probe(int unit) rtc[3] = msr; dev->base_addr = BVME_I596_BASE; dev->irq = (unsigned) BVME_IRQ_I596; + goto found; } #endif #ifdef ENABLE_APRICOT @@ -1212,8 +1214,13 @@ struct net_device * __init i82596_probe(int unit) } dev->irq = 10; + goto found; } #endif + err = -ENODEV; + goto out; + +found: dev->mem_start = (int)__get_free_pages(GFP_ATOMIC, 0); if (!dev->mem_start) { err = -ENOMEM; diff --git a/drivers/net/8390.c b/drivers/net/8390.c index a499e867f0f..dc5d2584bd0 100644 --- a/drivers/net/8390.c +++ b/drivers/net/8390.c @@ -34,7 +34,7 @@ struct net_device *__alloc_ei_netdev(int size) void NS8390_init(struct net_device *dev, int startp) { - return __NS8390_init(dev, startp); + __NS8390_init(dev, startp); } EXPORT_SYMBOL(ei_open); diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig index af46341827f..f4182cfffe9 100644 --- a/drivers/net/Kconfig +++ b/drivers/net/Kconfig @@ -1273,20 +1273,6 @@ config PCNET32 To compile this driver as a module, choose M here. The module will be called pcnet32. -config PCNET32_NAPI - bool "Use RX polling (NAPI)" - depends on PCNET32 - help - NAPI is a new driver API designed to reduce CPU and interrupt load - when the driver is receiving lots of packets from the card. It is - still somewhat experimental and thus not yet enabled by default. - - If your estimated Rx load is 10kpps or more, or if the card will be - deployed on potentially unfriendly networks (e.g. in a firewall), - then say Y here. - - If in doubt, say N. - config AMD8111_ETH tristate "AMD 8111 (new PCI lance) support" depends on NET_PCI && PCI @@ -1367,7 +1353,7 @@ config APRICOT config B44 tristate "Broadcom 440x/47xx ethernet support" - depends on SSB_POSSIBLE + depends on SSB_POSSIBLE && HAS_DMA select SSB select MII help diff --git a/drivers/net/apne.c b/drivers/net/apne.c index 47a8275d396..867f6fff543 100644 --- a/drivers/net/apne.c +++ b/drivers/net/apne.c @@ -127,6 +127,9 @@ struct net_device * __init apne_probe(int unit) #endif int err; + if (!MACH_IS_AMIGA) + return ERR_PTR(-ENODEV); + if (apne_owned) return ERR_PTR(-ENODEV); diff --git a/drivers/net/appletalk/cops.c b/drivers/net/appletalk/cops.c index 82e9a5bd0dd..a0b4c851607 100644 --- a/drivers/net/appletalk/cops.c +++ b/drivers/net/appletalk/cops.c @@ -499,19 +499,13 @@ static void cops_reset(struct net_device *dev, int sleep) { outb(0, ioaddr+DAYNA_RESET); /* Assert the reset port */ inb(ioaddr+DAYNA_RESET); /* Clear the reset */ - if(sleep) - { - long snap=jiffies; - - /* Let card finish initializing, about 1/3 second */ - while (time_before(jiffies, snap + HZ/3)) - schedule(); - } - else - mdelay(333); + if (sleep) + msleep(333); + else + mdelay(333); } + netif_wake_queue(dev); - return; } static void cops_load (struct net_device *dev) diff --git a/drivers/net/atlx/atl1.c b/drivers/net/atlx/atl1.c index 0afe522b8f7..3c798ae5c34 100644 --- a/drivers/net/atlx/atl1.c +++ b/drivers/net/atlx/atl1.c @@ -1,7 +1,7 @@ /* * Copyright(c) 2005 - 2006 Attansic Corporation. All rights reserved. * Copyright(c) 2006 - 2007 Chris Snook <csnook@redhat.com> - * Copyright(c) 2006 Jay Cliburn <jcliburn@gmail.com> + * Copyright(c) 2006 - 2008 Jay Cliburn <jcliburn@gmail.com> * * Derived from Intel e1000 driver * Copyright(c) 1999 - 2005 Intel Corporation. All rights reserved. @@ -36,7 +36,6 @@ * A very incomplete list of things that need to be dealt with: * * TODO: - * Wake on LAN. * Add more ethtool functions. * Fix abstruse irq enable/disable condition described here: * http://marc.theaimsgroup.com/?l=linux-netdev&m=116398508500553&w=2 @@ -472,7 +471,6 @@ static int atl1_get_permanent_address(struct atl1_hw *hw) memcpy(hw->perm_mac_addr, eth_addr, ETH_ALEN); return 0; } - return 1; } /* see if SPI FLAGS exist ? */ @@ -638,25 +636,6 @@ static s32 atl1_phy_leave_power_saving(struct atl1_hw *hw) } /* - *TODO: do something or get rid of this - */ -#ifdef CONFIG_PM -static s32 atl1_phy_enter_power_saving(struct atl1_hw *hw) -{ -/* s32 ret_val; - * u16 phy_data; - */ - -/* - ret_val = atl1_write_phy_reg(hw, ...); - ret_val = atl1_write_phy_reg(hw, ...); - .... -*/ - return 0; -} -#endif - -/* * Resets the PHY and make all config validate * hw - Struct containing variables accessed by shared code * @@ -2027,6 +2006,7 @@ rrd_ok: /* Good Receive */ pci_unmap_page(adapter->pdev, buffer_info->dma, buffer_info->length, PCI_DMA_FROMDEVICE); + buffer_info->dma = 0; skb = buffer_info->skb; length = le16_to_cpu(rrd->xsz.xsum_sz.pkt_size); @@ -2139,7 +2119,7 @@ static int atl1_tso(struct atl1_adapter *adapter, struct sk_buff *skb, return -1; } - if (skb->protocol == ntohs(ETH_P_IP)) { + if (skb->protocol == htons(ETH_P_IP)) { struct iphdr *iph = ip_hdr(skb); real_len = (((unsigned char *)iph - skb->data) + @@ -2784,64 +2764,92 @@ static int atl1_suspend(struct pci_dev *pdev, pm_message_t state) struct atl1_hw *hw = &adapter->hw; u32 ctrl = 0; u32 wufc = adapter->wol; + u32 val; + int retval; + u16 speed; + u16 duplex; netif_device_detach(netdev); if (netif_running(netdev)) atl1_down(adapter); + retval = pci_save_state(pdev); + if (retval) + return retval; + atl1_read_phy_reg(hw, MII_BMSR, (u16 *) & ctrl); atl1_read_phy_reg(hw, MII_BMSR, (u16 *) & ctrl); - if (ctrl & BMSR_LSTATUS) + val = ctrl & BMSR_LSTATUS; + if (val) wufc &= ~ATLX_WUFC_LNKC; - /* reduce speed to 10/100M */ - if (wufc) { - atl1_phy_enter_power_saving(hw); - /* if resume, let driver to re- setup link */ - hw->phy_configured = false; - atl1_set_mac_addr(hw); - atlx_set_multi(netdev); + if (val && wufc) { + val = atl1_get_speed_and_duplex(hw, &speed, &duplex); + if (val) { + if (netif_msg_ifdown(adapter)) + dev_printk(KERN_DEBUG, &pdev->dev, + "error getting speed/duplex\n"); + goto disable_wol; + } ctrl = 0; - /* turn on magic packet wol */ - if (wufc & ATLX_WUFC_MAG) - ctrl = WOL_MAGIC_EN | WOL_MAGIC_PME_EN; - /* turn on Link change WOL */ - if (wufc & ATLX_WUFC_LNKC) - ctrl |= (WOL_LINK_CHG_EN | WOL_LINK_CHG_PME_EN); + /* enable magic packet WOL */ + if (wufc & ATLX_WUFC_MAG) + ctrl |= (WOL_MAGIC_EN | WOL_MAGIC_PME_EN); iowrite32(ctrl, hw->hw_addr + REG_WOL_CTRL); + ioread32(hw->hw_addr + REG_WOL_CTRL); + + /* configure the mac */ + ctrl = MAC_CTRL_RX_EN; + ctrl |= ((u32)((speed == SPEED_1000) ? MAC_CTRL_SPEED_1000 : + MAC_CTRL_SPEED_10_100) << MAC_CTRL_SPEED_SHIFT); + if (duplex == FULL_DUPLEX) + ctrl |= MAC_CTRL_DUPLX; + ctrl |= (((u32)adapter->hw.preamble_len & + MAC_CTRL_PRMLEN_MASK) << MAC_CTRL_PRMLEN_SHIFT); + if (adapter->vlgrp) + ctrl |= MAC_CTRL_RMV_VLAN; + if (wufc & ATLX_WUFC_MAG) + ctrl |= MAC_CTRL_BC_EN; + iowrite32(ctrl, hw->hw_addr + REG_MAC_CTRL); + ioread32(hw->hw_addr + REG_MAC_CTRL); - /* turn on all-multi mode if wake on multicast is enabled */ - ctrl = ioread32(hw->hw_addr + REG_MAC_CTRL); - ctrl &= ~MAC_CTRL_DBG; - ctrl &= ~MAC_CTRL_PROMIS_EN; - if (wufc & ATLX_WUFC_MC) - ctrl |= MAC_CTRL_MC_ALL_EN; - else - ctrl &= ~MAC_CTRL_MC_ALL_EN; + /* poke the PHY */ + ctrl = ioread32(hw->hw_addr + REG_PCIE_PHYMISC); + ctrl |= PCIE_PHYMISC_FORCE_RCV_DET; + iowrite32(ctrl, hw->hw_addr + REG_PCIE_PHYMISC); + ioread32(hw->hw_addr + REG_PCIE_PHYMISC); - /* turn on broadcast mode if wake on-BC is enabled */ - if (wufc & ATLX_WUFC_BC) - ctrl |= MAC_CTRL_BC_EN; - else - ctrl &= ~MAC_CTRL_BC_EN; + pci_enable_wake(pdev, pci_choose_state(pdev, state), 1); + goto exit; + } - /* enable RX */ - ctrl |= MAC_CTRL_RX_EN; - iowrite32(ctrl, hw->hw_addr + REG_MAC_CTRL); - pci_enable_wake(pdev, PCI_D3hot, 1); - pci_enable_wake(pdev, PCI_D3cold, 1); - } else { - iowrite32(0, hw->hw_addr + REG_WOL_CTRL); - pci_enable_wake(pdev, PCI_D3hot, 0); - pci_enable_wake(pdev, PCI_D3cold, 0); + if (!val && wufc) { + ctrl |= (WOL_LINK_CHG_EN | WOL_LINK_CHG_PME_EN); + iowrite32(ctrl, hw->hw_addr + REG_WOL_CTRL); + ioread32(hw->hw_addr + REG_WOL_CTRL); + iowrite32(0, hw->hw_addr + REG_MAC_CTRL); + ioread32(hw->hw_addr + REG_MAC_CTRL); + hw->phy_configured = false; + pci_enable_wake(pdev, pci_choose_state(pdev, state), 1); + goto exit; } - pci_save_state(pdev); +disable_wol: + iowrite32(0, hw->hw_addr + REG_WOL_CTRL); + ioread32(hw->hw_addr + REG_WOL_CTRL); + ctrl = ioread32(hw->hw_addr + REG_PCIE_PHYMISC); + ctrl |= PCIE_PHYMISC_FORCE_RCV_DET; + iowrite32(ctrl, hw->hw_addr + REG_PCIE_PHYMISC); + ioread32(hw->hw_addr + REG_PCIE_PHYMISC); + hw->phy_configured = false; + pci_enable_wake(pdev, pci_choose_state(pdev, state), 0); +exit: + if (netif_running(netdev)) + pci_disable_msi(adapter->pdev); pci_disable_device(pdev); - - pci_set_power_state(pdev, PCI_D3hot); + pci_set_power_state(pdev, pci_choose_state(pdev, state)); return 0; } @@ -2855,20 +2863,26 @@ static int atl1_resume(struct pci_dev *pdev) pci_set_power_state(pdev, PCI_D0); pci_restore_state(pdev); - /* FIXME: check and handle */ err = pci_enable_device(pdev); + if (err) { + if (netif_msg_ifup(adapter)) + dev_printk(KERN_DEBUG, &pdev->dev, + "error enabling pci device\n"); + return err; + } + + pci_set_master(pdev); + iowrite32(0, adapter->hw.hw_addr + REG_WOL_CTRL); pci_enable_wake(pdev, PCI_D3hot, 0); pci_enable_wake(pdev, PCI_D3cold, 0); - iowrite32(0, adapter->hw.hw_addr + REG_WOL_CTRL); - atl1_reset(adapter); + atl1_reset_hw(&adapter->hw); + adapter->cmb.cmb->int_stats = 0; if (netif_running(netdev)) atl1_up(adapter); netif_device_attach(netdev); - atl1_via_workaround(adapter); - return 0; } #else @@ -2876,6 +2890,13 @@ static int atl1_resume(struct pci_dev *pdev) #define atl1_resume NULL #endif +static void atl1_shutdown(struct pci_dev *pdev) +{ +#ifdef CONFIG_PM + atl1_suspend(pdev, PMSG_SUSPEND); +#endif +} + #ifdef CONFIG_NET_POLL_CONTROLLER static void atl1_poll_controller(struct net_device *netdev) { @@ -3122,7 +3143,8 @@ static struct pci_driver atl1_driver = { .probe = atl1_probe, .remove = __devexit_p(atl1_remove), .suspend = atl1_suspend, - .resume = atl1_resume + .resume = atl1_resume, + .shutdown = atl1_shutdown }; /* diff --git a/drivers/net/atlx/atl1.h b/drivers/net/atlx/atl1.h index 51893d66eae..a5015b14a42 100644 --- a/drivers/net/atlx/atl1.h +++ b/drivers/net/atlx/atl1.h @@ -1,7 +1,7 @@ /* * Copyright(c) 2005 - 2006 Attansic Corporation. All rights reserved. * Copyright(c) 2006 - 2007 Chris Snook <csnook@redhat.com> - * Copyright(c) 2006 Jay Cliburn <jcliburn@gmail.com> + * Copyright(c) 2006 - 2008 Jay Cliburn <jcliburn@gmail.com> * * Derived from Intel e1000 driver * Copyright(c) 1999 - 2005 Intel Corporation. All rights reserved. diff --git a/drivers/net/atlx/atlx.c b/drivers/net/atlx/atlx.c index f06b854e250..b3e7fcf0f6e 100644 --- a/drivers/net/atlx/atlx.c +++ b/drivers/net/atlx/atlx.c @@ -2,7 +2,7 @@ * * Copyright(c) 2005 - 2006 Attansic Corporation. All rights reserved. * Copyright(c) 2006 - 2007 Chris Snook <csnook@redhat.com> - * Copyright(c) 2006 Jay Cliburn <jcliburn@gmail.com> + * Copyright(c) 2006 - 2008 Jay Cliburn <jcliburn@gmail.com> * Copyright(c) 2007 Atheros Corporation. All rights reserved. * * Derived from Intel e1000 driver diff --git a/drivers/net/atlx/atlx.h b/drivers/net/atlx/atlx.h index 3be7c09734d..297a03da6b7 100644 --- a/drivers/net/atlx/atlx.h +++ b/drivers/net/atlx/atlx.h @@ -2,7 +2,7 @@ * * Copyright(c) 2005 - 2006 Attansic Corporation. All rights reserved. * Copyright(c) 2006 - 2007 Chris Snook <csnook@redhat.com> - * Copyright(c) 2006 Jay Cliburn <jcliburn@gmail.com> + * Copyright(c) 2006 - 2008 Jay Cliburn <jcliburn@gmail.com> * Copyright(c) 2007 Atheros Corporation. All rights reserved. * * Derived from Intel e1000 driver @@ -29,7 +29,7 @@ #include <linux/module.h> #include <linux/types.h> -#define ATLX_DRIVER_VERSION "2.1.1" +#define ATLX_DRIVER_VERSION "2.1.3" MODULE_AUTHOR("Xiong Huang <xiong.huang@atheros.com>, \ Chris Snook <csnook@redhat.com>, Jay Cliburn <jcliburn@gmail.com>"); MODULE_LICENSE("GPL"); @@ -460,6 +460,9 @@ MODULE_VERSION(ATLX_DRIVER_VERSION); #define MII_ATLX_PSSR_100MBS 0x4000 /* 01=100Mbs */ #define MII_ATLX_PSSR_1000MBS 0x8000 /* 10=1000Mbs */ +#define MII_DBG_ADDR 0x1D +#define MII_DBG_DATA 0x1E + /* PCI Command Register Bit Definitions */ #define PCI_REG_COMMAND 0x04 /* PCI Command Register */ #define CMD_IO_SPACE 0x0001 diff --git a/drivers/net/au1000_eth.c b/drivers/net/au1000_eth.c index 3634b5fd791..7023d77bf38 100644 --- a/drivers/net/au1000_eth.c +++ b/drivers/net/au1000_eth.c @@ -1239,12 +1239,7 @@ static int au1000_rx(struct net_device *dev) */ static irqreturn_t au1000_interrupt(int irq, void *dev_id) { - struct net_device *dev = (struct net_device *) dev_id; - - if (dev == NULL) { - printk(KERN_ERR "%s: isr: null dev ptr\n", dev->name); - return IRQ_RETVAL(1); - } + struct net_device *dev = dev_id; /* Handle RX interrupts first to minimize chance of overrun */ diff --git a/drivers/net/bfin_mac.c b/drivers/net/bfin_mac.c index 89c0018132e..41443435ab1 100644 --- a/drivers/net/bfin_mac.c +++ b/drivers/net/bfin_mac.c @@ -22,7 +22,6 @@ #include <linux/crc32.h> #include <linux/device.h> #include <linux/spinlock.h> -#include <linux/ethtool.h> #include <linux/mii.h> #include <linux/phy.h> #include <linux/netdevice.h> diff --git a/drivers/net/bnx2.c b/drivers/net/bnx2.c index 15853be4680..367b6d46270 100644 --- a/drivers/net/bnx2.c +++ b/drivers/net/bnx2.c @@ -56,8 +56,8 @@ #define DRV_MODULE_NAME "bnx2" #define PFX DRV_MODULE_NAME ": " -#define DRV_MODULE_VERSION "1.7.4" -#define DRV_MODULE_RELDATE "February 18, 2008" +#define DRV_MODULE_VERSION "1.7.5" +#define DRV_MODULE_RELDATE "April 29, 2008" #define RUN_AT(x) (jiffies + (x)) @@ -1631,8 +1631,10 @@ bnx2_set_default_remote_link(struct bnx2 *bp) static void bnx2_set_default_link(struct bnx2 *bp) { - if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) - return bnx2_set_default_remote_link(bp); + if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) { + bnx2_set_default_remote_link(bp); + return; + } bp->autoneg = AUTONEG_SPEED | AUTONEG_FLOW_CTRL; bp->req_line_speed = 0; @@ -1715,7 +1717,6 @@ bnx2_remote_phy_event(struct bnx2 *bp) break; } - spin_lock(&bp->phy_lock); bp->flow_ctrl = 0; if ((bp->autoneg & (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) != (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) { @@ -1737,7 +1738,6 @@ bnx2_remote_phy_event(struct bnx2 *bp) if (old_port != bp->phy_port) bnx2_set_default_link(bp); - spin_unlock(&bp->phy_lock); } if (bp->link_up != link_up) bnx2_report_link(bp); @@ -2222,6 +2222,11 @@ bnx2_init_5709_context(struct bnx2 *bp) for (i = 0; i < bp->ctx_pages; i++) { int j; + if (bp->ctx_blk[i]) + memset(bp->ctx_blk[i], 0, BCM_PAGE_SIZE); + else + return -ENOMEM; + REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA0, (bp->ctx_blk_mapping[i] & 0xffffffff) | BNX2_CTX_HOST_PAGE_TBL_DATA0_VALID); @@ -2445,14 +2450,15 @@ bnx2_phy_event_is_set(struct bnx2 *bp, struct bnx2_napi *bnapi, u32 event) static void bnx2_phy_int(struct bnx2 *bp, struct bnx2_napi *bnapi) { - if (bnx2_phy_event_is_set(bp, bnapi, STATUS_ATTN_BITS_LINK_STATE)) { - spin_lock(&bp->phy_lock); + spin_lock(&bp->phy_lock); + + if (bnx2_phy_event_is_set(bp, bnapi, STATUS_ATTN_BITS_LINK_STATE)) bnx2_set_link(bp); - spin_unlock(&bp->phy_lock); - } if (bnx2_phy_event_is_set(bp, bnapi, STATUS_ATTN_BITS_TIMER_ABORT)) bnx2_set_remote_link(bp); + spin_unlock(&bp->phy_lock); + } static inline u16 @@ -3174,6 +3180,12 @@ load_rv2p_fw(struct bnx2 *bp, __le32 *rv2p_code, u32 rv2p_code_len, int i; u32 val; + if (rv2p_proc == RV2P_PROC2 && CHIP_NUM(bp) == CHIP_NUM_5709) { + val = le32_to_cpu(rv2p_code[XI_RV2P_PROC2_MAX_BD_PAGE_LOC]); + val &= ~XI_RV2P_PROC2_BD_PAGE_SIZE_MSK; + val |= XI_RV2P_PROC2_BD_PAGE_SIZE; + rv2p_code[XI_RV2P_PROC2_MAX_BD_PAGE_LOC] = cpu_to_le32(val); + } for (i = 0; i < rv2p_code_len; i += 8) { REG_WR(bp, BNX2_RV2P_INSTR_HIGH, le32_to_cpu(*rv2p_code)); @@ -4215,13 +4227,6 @@ bnx2_init_remote_phy(struct bnx2 *bp) if (netif_running(bp->dev)) { u32 sig; - if (val & BNX2_LINK_STATUS_LINK_UP) { - bp->link_up = 1; - netif_carrier_on(bp->dev); - } else { - bp->link_up = 0; - netif_carrier_off(bp->dev); - } sig = BNX2_DRV_ACK_CAP_SIGNATURE | BNX2_FW_CAP_REMOTE_PHY_CAPABLE; bnx2_shmem_wr(bp, BNX2_DRV_ACK_CAP_MB, sig); @@ -4878,6 +4883,8 @@ bnx2_init_nic(struct bnx2 *bp) spin_lock_bh(&bp->phy_lock); bnx2_init_phy(bp); bnx2_set_link(bp); + if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) + bnx2_remote_phy_event(bp); spin_unlock_bh(&bp->phy_lock); return 0; } @@ -4920,7 +4927,7 @@ bnx2_test_registers(struct bnx2 *bp) { 0x0c08, BNX2_FL_NOT_5709, 0x0f0ff073, 0x00000000 }, { 0x1000, 0, 0x00000000, 0x00000001 }, - { 0x1004, 0, 0x00000000, 0x000f0001 }, + { 0x1004, BNX2_FL_NOT_5709, 0x00000000, 0x000f0001 }, { 0x1408, 0, 0x01c00800, 0x00000000 }, { 0x149c, 0, 0x8000ffff, 0x00000000 }, @@ -5717,14 +5724,12 @@ bnx2_reset_task(struct work_struct *work) if (!netif_running(bp->dev)) return; - bp->in_reset_task = 1; bnx2_netif_stop(bp); bnx2_init_nic(bp); atomic_set(&bp->intr_sem, 1); bnx2_netif_start(bp); - bp->in_reset_task = 0; } static void @@ -5900,12 +5905,7 @@ bnx2_close(struct net_device *dev) struct bnx2 *bp = netdev_priv(dev); u32 reset_code; - /* Calling flush_scheduled_work() may deadlock because - * linkwatch_event() may be on the workqueue and it will try to get - * the rtnl_lock which we are holding. - */ - while (bp->in_reset_task) - msleep(1); + cancel_work_sync(&bp->reset_task); bnx2_disable_int_sync(bp); bnx2_napi_disable(bp); diff --git a/drivers/net/bnx2.h b/drivers/net/bnx2.h index 1eaf5bb3d9c..2377cc13bf6 100644 --- a/drivers/net/bnx2.h +++ b/drivers/net/bnx2.h @@ -6656,7 +6656,6 @@ struct bnx2 { int current_interval; struct timer_list timer; struct work_struct reset_task; - int in_reset_task; /* Used to synchronize phy accesses. */ spinlock_t phy_lock; diff --git a/drivers/net/bnx2_fw2.h b/drivers/net/bnx2_fw2.h index e6ffa2769f3..ed0514cba0e 100644 --- a/drivers/net/bnx2_fw2.h +++ b/drivers/net/bnx2_fw2.h @@ -3173,251 +3173,267 @@ static struct fw_info bnx2_rxp_fw_09 = { }; static u8 bnx2_xi_rv2p_proc1[] = { - /* Date: 01/14/2008 15:44 */ - 0xc5, 0x56, 0xcd, 0x6b, 0x13, 0x51, 0x10, 0x9f, 0xdd, 0x7c, 0x6c, 0x9a, - 0x6c, 0xb2, 0xa1, 0x6a, 0x09, 0x35, 0xd2, 0x58, 0x7a, 0x30, 0x6d, 0xc4, - 0x56, 0x3d, 0x78, 0x28, 0x54, 0x7a, 0x11, 0xac, 0xa7, 0x1e, 0x44, 0xc4, - 0xcf, 0x20, 0x05, 0xf5, 0x8f, 0x70, 0x51, 0xab, 0x20, 0x78, 0x28, 0x68, - 0xb4, 0x7e, 0xa0, 0x27, 0x15, 0xf1, 0x90, 0x1c, 0x04, 0x05, 0x45, 0x50, - 0xf0, 0xa4, 0x37, 0x41, 0xbd, 0x54, 0xc5, 0x0f, 0xf0, 0xe2, 0x45, 0x8f, - 0xda, 0xf8, 0xde, 0xcc, 0xef, 0xd9, 0xdd, 0x4d, 0xd2, 0x14, 0x0f, 0x1a, - 0x68, 0x7f, 0xec, 0xdb, 0xdf, 0x9b, 0x37, 0xf3, 0x9b, 0x79, 0x33, 0x9b, - 0x27, 0x22, 0x9b, 0xfc, 0xc6, 0x80, 0x42, 0x72, 0xad, 0x58, 0x4a, 0x81, - 0x45, 0x74, 0xcf, 0x65, 0xf4, 0x37, 0x91, 0xfc, 0x46, 0x04, 0xfc, 0x91, - 0xbc, 0xfa, 0xff, 0x9d, 0x26, 0x4a, 0x1a, 0x63, 0x34, 0xb1, 0x5e, 0xe3, - 0x24, 0x3d, 0x29, 0x15, 0x14, 0xfe, 0x6a, 0x92, 0xaf, 0x9f, 0x87, 0xea, - 0x0f, 0x1a, 0x19, 0xb6, 0xfb, 0x0e, 0xfb, 0xdf, 0xc4, 0x04, 0xb7, 0x55, - 0x52, 0x62, 0x07, 0x48, 0x1b, 0xf3, 0x0c, 0xaf, 0xe6, 0xf4, 0x73, 0xd1, - 0xf2, 0x37, 0xe2, 0x7c, 0x5b, 0xd6, 0x17, 0xe6, 0x3c, 0xbd, 0x4e, 0xef, - 0x27, 0xf5, 0xb3, 0x97, 0x3e, 0xdd, 0x48, 0xb1, 0x5d, 0x79, 0xdf, 0x9b, - 0x3e, 0xcd, 0xfb, 0x5c, 0x4b, 0xec, 0xa9, 0x3f, 0xde, 0xbf, 0x55, 0xd9, - 0x81, 0xdf, 0x24, 0x76, 0x0e, 0x96, 0xf4, 0xfa, 0x76, 0xf0, 0xc6, 0xc1, - 0x2b, 0xb6, 0xf0, 0x16, 0xe6, 0x34, 0x3a, 0x54, 0xad, 0xe8, 0x78, 0x06, - 0x49, 0xe2, 0x49, 0xd0, 0x4c, 0xca, 0x15, 0x9d, 0x06, 0x84, 0xfd, 0x6e, - 0x58, 0xef, 0x57, 0xbe, 0x0d, 0x6b, 0xde, 0x82, 0x8a, 0xdb, 0xc4, 0x1b, - 0xe6, 0x39, 0x15, 0x63, 0x57, 0xf3, 0xde, 0x2a, 0x9e, 0x89, 0x2f, 0x18, - 0x57, 0x26, 0x10, 0x57, 0x24, 0xde, 0x96, 0xf8, 0x82, 0x7a, 0xa5, 0xda, - 0xf8, 0xaf, 0xcf, 0x51, 0xbe, 0xf0, 0x39, 0x49, 0xe8, 0x9c, 0x8c, 0xec, - 0x4b, 0x76, 0x88, 0xfb, 0x93, 0x35, 0xb3, 0x21, 0xec, 0x3f, 0x91, 0xb6, - 0xf7, 0x54, 0xf9, 0x8d, 0xf5, 0x72, 0x3b, 0x1d, 0x12, 0xd0, 0xe1, 0x31, - 0xe2, 0x9b, 0xa2, 0x21, 0xbb, 0xc0, 0xef, 0xe3, 0xbc, 0x7f, 0xad, 0xf2, - 0x47, 0xe3, 0x3a, 0xe0, 0x7a, 0xe0, 0x01, 0xe0, 0x7e, 0xe0, 0x1a, 0xe0, - 0x6a, 0xe0, 0x2a, 0x60, 0x2f, 0xf0, 0x32, 0x30, 0x0f, 0xf4, 0x80, 0x39, - 0xe0, 0x05, 0xa0, 0x0b, 0xcc, 0x00, 0x6b, 0xc0, 0xab, 0xc0, 0x14, 0xf0, - 0x28, 0xf0, 0x21, 0xf0, 0x31, 0xf0, 0x0b, 0xf0, 0x1c, 0xd0, 0xb1, 0x60, - 0x0f, 0xa8, 0x7e, 0x3e, 0xee, 0x47, 0x48, 0xa7, 0xeb, 0xa8, 0x7f, 0xad, - 0x33, 0xde, 0x97, 0x0d, 0x0f, 0xf9, 0x65, 0x9d, 0x2e, 0x83, 0xd7, 0x5b, - 0xbf, 0x19, 0xb9, 0x27, 0xa5, 0xae, 0xf7, 0x23, 0x9a, 0x37, 0x8f, 0xe3, - 0x39, 0xb4, 0xc3, 0xe3, 0x73, 0x72, 0x49, 0x59, 0x37, 0x6e, 0xed, 0xf1, - 0x04, 0x8f, 0xa4, 0x05, 0x3f, 0xa7, 0x7b, 0xd4, 0xff, 0x66, 0x73, 0x26, - 0x23, 0xcf, 0x87, 0xb3, 0x46, 0x67, 0x63, 0xc7, 0xf8, 0xd3, 0xcd, 0x8f, - 0x4e, 0xe7, 0x19, 0xbf, 0xba, 0x9d, 0x2b, 0x58, 0xb5, 0xc3, 0xf1, 0x5f, - 0x19, 0x15, 0x8c, 0x8f, 0x31, 0x54, 0xdc, 0x64, 0x5c, 0xe3, 0x56, 0xf7, - 0xb9, 0x39, 0x47, 0xa3, 0x5b, 0xa8, 0xf1, 0x7d, 0x89, 0x53, 0x2d, 0xa9, - 0xed, 0xfe, 0x6c, 0x9e, 0x17, 0x5e, 0xff, 0xe1, 0x97, 0x8c, 0x85, 0x2b, - 0x2f, 0x84, 0xff, 0xba, 0xe4, 0x32, 0xee, 0x1e, 0xa1, 0xc8, 0xcf, 0xbc, - 0x97, 0xfb, 0xe8, 0xb3, 0xdf, 0x3f, 0x2c, 0xbf, 0x61, 0xce, 0xc1, 0xbe, - 0xe3, 0x26, 0x8f, 0x79, 0xf6, 0x73, 0x90, 0xe4, 0x79, 0xba, 0x2c, 0xef, - 0xa7, 0xcb, 0xb8, 0xcf, 0x83, 0xe1, 0x7a, 0x90, 0x7b, 0x11, 0x43, 0xbe, - 0xf7, 0xe2, 0x5e, 0x44, 0xef, 0x71, 0xaa, 0x7e, 0x73, 0x2e, 0x58, 0x2f, - 0x05, 0xaa, 0x8e, 0xc1, 0x9f, 0x96, 0x3c, 0x9b, 0xbe, 0x6c, 0xea, 0x9d, - 0x97, 0xeb, 0x7e, 0x2c, 0xa4, 0xdf, 0x76, 0xaa, 0x04, 0xf3, 0x64, 0xb5, - 0xa9, 0x97, 0x6e, 0xe7, 0x84, 0xec, 0xe5, 0x54, 0x06, 0xa8, 0xb5, 0x8e, - 0x1d, 0xc4, 0x35, 0x81, 0x3a, 0x5e, 0xdb, 0x52, 0xc7, 0xa6, 0xdf, 0x4b, - 0x3d, 0x77, 0xea, 0x5f, 0x7f, 0xdf, 0xa7, 0x85, 0xe7, 0x07, 0xea, 0xd3, - 0xf4, 0x43, 0xe8, 0xe4, 0x30, 0xaf, 0xb8, 0x70, 0x5f, 0xf2, 0x26, 0xfd, - 0x5c, 0x15, 0xa3, 0x1f, 0xf6, 0xd3, 0x31, 0xf1, 0x0d, 0x04, 0xfb, 0xe7, - 0x50, 0x87, 0x7c, 0x05, 0xfb, 0x6e, 0x54, 0x97, 0x70, 0xdd, 0x4b, 0xfe, - 0xd3, 0xd0, 0xa9, 0xbf, 0x4b, 0x5f, 0xe8, 0x01, 0x6f, 0xcd, 0x32, 0x3c, - 0xb1, 0x3b, 0x59, 0x0e, 0xf6, 0x11, 0xaf, 0x89, 0xfe, 0x87, 0x7d, 0x7d, - 0xf5, 0x47, 0x1d, 0xf2, 0x30, 0xfe, 0x7f, 0xf3, 0x80, 0xf9, 0x52, 0xb4, - 0x24, 0x0f, 0x09, 0x5a, 0x99, 0xbe, 0x84, 0xf8, 0xa9, 0x83, 0xbe, 0x49, - 0xe8, 0xf0, 0x6d, 0x71, 0x79, 0x7d, 0x33, 0xe0, 0x7d, 0x0d, 0xf0, 0xb8, - 0x2e, 0xc6, 0xe5, 0xfe, 0x39, 0xd5, 0x2f, 0x11, 0xdd, 0xc6, 0x2a, 0xba, - 0xaf, 0x9c, 0xa0, 0x06, 0xe2, 0x7a, 0x1b, 0x8a, 0x2f, 0xab, 0xfc, 0x93, - 0xef, 0x84, 0x3b, 0x0d, 0xa3, 0x83, 0xbc, 0x2e, 0x55, 0x04, 0x6f, 0x33, - 0x3f, 0x1f, 0xd0, 0x23, 0xac, 0x9b, 0xe8, 0x91, 0xa7, 0x5b, 0x7f, 0xfa, - 0x8d, 0xc7, 0xf6, 0x46, 0xd1, 0xaf, 0x0f, 0xa1, 0x6f, 0x7e, 0x48, 0x4b, - 0x5f, 0xae, 0x4e, 0x71, 0xff, 0xa4, 0x3e, 0xf4, 0xcf, 0x6a, 0x56, 0x9e, - 0xfb, 0xb3, 0xf2, 0x1d, 0x36, 0xea, 0xb8, 0xcc, 0xeb, 0xcf, 0x0a, 0xf6, - 0x65, 0xf4, 0xbe, 0x02, 0x7d, 0xdc, 0xc5, 0xf4, 0xca, 0xbc, 0x2b, 0x7d, - 0x74, 0xfe, 0x05, 0xfa, 0xba, 0x67, 0x74, 0x42, 0xbc, 0x5b, 0xf4, 0x7a, - 0x1f, 0x7f, 0xf2, 0x2c, 0xe9, 0xab, 0x38, 0xc3, 0xe2, 0xdf, 0x0d, 0x78, - 0x5f, 0x32, 0xfb, 0x06, 0xb4, 0x9e, 0x4f, 0x16, 0xcd, 0xdc, 0x18, 0xdc, - 0xa1, 0xfd, 0xf1, 0x28, 0xe7, 0x48, 0x3e, 0x05, 0x15, 0xcf, 0x76, 0xf4, - 0xb6, 0xe2, 0xac, 0x2d, 0xcf, 0xb3, 0x27, 0xd9, 0xcc, 0xae, 0x59, 0xb3, - 0x3e, 0xc9, 0x05, 0x3a, 0x7d, 0xf7, 0x19, 0xaf, 0xe7, 0x1a, 0x31, 0x59, - 0x77, 0xa6, 0x8c, 0x1e, 0x1e, 0xc7, 0x57, 0x13, 0x3d, 0xf6, 0x5d, 0x14, - 0xdc, 0x4b, 0x3b, 0x19, 0xd3, 0x35, 0x57, 0xe6, 0xca, 0xbc, 0x9b, 0x62, - 0x24, 0xd6, 0xc3, 0xde, 0x2c, 0xf3, 0x21, 0x81, 0xbe, 0xde, 0x13, 0xc8, - 0x53, 0x74, 0xde, 0xae, 0x34, 0x5f, 0xc1, 0x39, 0x60, 0xe6, 0x43, 0xb4, - 0xdf, 0x67, 0x51, 0x67, 0xd7, 0xba, 0xd4, 0xa3, 0xe9, 0x9f, 0x97, 0x16, - 0xe5, 0x1e, 0xb4, 0x9b, 0xb3, 0x1a, 0x73, 0x1d, 0xbe, 0x0f, 0x8a, 0xa8, - 0x3f, 0x33, 0x0f, 0xdb, 0x7d, 0x07, 0x08, 0x7f, 0x65, 0xf3, 0x3f, 0xdf, - 0x61, 0xfe, 0xff, 0xb3, 0x39, 0x5f, 0x58, 0xca, 0xa3, 0xa9, 0xd3, 0x60, - 0x1e, 0x83, 0xf5, 0x1a, 0x9d, 0xc3, 0xcb, 0xcd, 0xdf, 0x1c, 0x74, 0x3e, - 0x06, 0x9d, 0xe3, 0x94, 0x88, 0xb1, 0x30, 0x6e, 0xfc, 0x14, 0xdb, 0xb5, - 0x67, 0x6d, 0xa6, 0xbb, 0x89, 0x33, 0x96, 0xc6, 0x9c, 0x7b, 0x46, 0x78, - 0x71, 0x59, 0x2f, 0x18, 0x3c, 0x7b, 0x4a, 0xbe, 0xfb, 0x6c, 0xfa, 0x0d, - 0x6d, 0x29, 0x98, 0xe1, 0x30, 0x0d, 0x00, 0x00, 0x00 }; + /* Date: 04/25/2008 22:02 */ + 0xbd, 0x56, 0x4f, 0x68, 0x1c, 0x55, 0x18, 0xff, 0x76, 0x76, 0x77, 0x66, + 0x33, 0x3b, 0xbb, 0xb3, 0xd8, 0x34, 0x4c, 0xb7, 0x2b, 0x59, 0x83, 0x97, + 0xdd, 0x6c, 0x69, 0xa2, 0x15, 0x04, 0x53, 0x5a, 0x72, 0x09, 0xd8, 0x9e, + 0x02, 0xb5, 0x52, 0x84, 0xb6, 0x8b, 0xf4, 0x52, 0x5a, 0x28, 0x78, 0x11, + 0x84, 0x0e, 0x6d, 0x93, 0x82, 0xe8, 0x61, 0xc1, 0x06, 0x12, 0x44, 0xa3, + 0x07, 0x95, 0x60, 0x61, 0x07, 0x3c, 0x78, 0x10, 0x14, 0x15, 0x11, 0x6c, + 0x0f, 0x85, 0x88, 0xf6, 0xd2, 0x54, 0x4b, 0x0b, 0x1e, 0x5b, 0x3c, 0xd6, + 0x8c, 0xef, 0xfb, 0xf3, 0x92, 0x99, 0x97, 0x9d, 0x24, 0xa7, 0x2e, 0xb4, + 0x3f, 0xbe, 0x37, 0xdf, 0xbf, 0xf7, 0xfd, 0xf9, 0xbd, 0xd4, 0x00, 0xc0, + 0x82, 0x30, 0x1a, 0x55, 0x08, 0x65, 0x2b, 0x5f, 0x52, 0x90, 0x03, 0xf8, + 0x1a, 0xf8, 0x57, 0xf4, 0x48, 0x0e, 0x0f, 0x8a, 0x3c, 0xce, 0x10, 0x8e, + 0xd7, 0xd4, 0xff, 0x17, 0xe0, 0x48, 0x13, 0x31, 0x0f, 0x47, 0x5e, 0x40, + 0x3c, 0x0c, 0xdf, 0x37, 0x03, 0x85, 0xff, 0xc5, 0x10, 0xa2, 0x3c, 0xdc, + 0xff, 0x36, 0x2a, 0x93, 0xff, 0x35, 0xb1, 0xff, 0x33, 0xcf, 0xf8, 0x6a, + 0xa7, 0xc4, 0x7e, 0x04, 0xe1, 0x40, 0x8d, 0x60, 0xb5, 0x87, 0xf2, 0x89, + 0x13, 0x60, 0xa3, 0x9f, 0x4f, 0x94, 0x02, 0xca, 0x8d, 0x5c, 0x78, 0x40, + 0xf2, 0xb2, 0x58, 0xef, 0x5e, 0xcf, 0xc7, 0x73, 0xb8, 0x3f, 0x8d, 0xf2, + 0x3e, 0xf7, 0x5a, 0x0f, 0x31, 0x80, 0x73, 0x25, 0x8f, 0xef, 0x33, 0xca, + 0x6e, 0xd7, 0xda, 0x68, 0xa7, 0x74, 0xdb, 0xe2, 0xb7, 0x88, 0x7e, 0xff, + 0x89, 0xd9, 0x2f, 0xfa, 0x4b, 0xfa, 0x69, 0x28, 0x3f, 0x78, 0x6e, 0x4b, + 0x5e, 0xb6, 0x91, 0x97, 0xad, 0xf2, 0x90, 0x3a, 0x80, 0xce, 0x03, 0x71, + 0xaf, 0x8a, 0x8b, 0x7e, 0x1f, 0xcb, 0xbd, 0x01, 0x4e, 0x37, 0xc5, 0x7f, + 0x84, 0xe8, 0xe5, 0xd8, 0x9f, 0xfa, 0x27, 0xf7, 0xd8, 0xea, 0x47, 0xd7, + 0x29, 0x9d, 0xbf, 0xd3, 0xd1, 0xdf, 0x75, 0x3f, 0x30, 0xce, 0x1d, 0x15, + 0x27, 0xa9, 0x0f, 0x3b, 0xe8, 0xff, 0xa6, 0xf4, 0xd3, 0x7e, 0xf9, 0xfc, + 0xd7, 0xcd, 0xf3, 0xd6, 0xa0, 0xba, 0x15, 0x8d, 0xba, 0xfd, 0x28, 0x75, + 0x9b, 0x81, 0x17, 0xad, 0x80, 0xf4, 0x0a, 0x80, 0xb8, 0x5f, 0x25, 0x80, + 0xf8, 0xbc, 0xe0, 0x45, 0xc1, 0xcf, 0x04, 0x97, 0x05, 0xf7, 0x0a, 0x0e, + 0x0b, 0xee, 0x11, 0x7c, 0x4e, 0xf0, 0x6f, 0xc1, 0x9a, 0xa0, 0x2f, 0x58, + 0x15, 0xbc, 0x27, 0xe8, 0x09, 0x96, 0x0d, 0x7f, 0x75, 0xc1, 0x92, 0x60, + 0x24, 0xf8, 0x9a, 0x61, 0xef, 0xe6, 0x18, 0x57, 0x45, 0x3e, 0x28, 0xf2, + 0x49, 0x91, 0xb1, 0xa0, 0x32, 0xf7, 0xa9, 0x7a, 0x7d, 0xbe, 0xd1, 0xdf, + 0xd5, 0x9e, 0x7c, 0x6f, 0x69, 0xbd, 0x12, 0xd5, 0x0f, 0xda, 0x49, 0xfd, + 0x8f, 0xb7, 0xd1, 0x67, 0xb5, 0xe9, 0xd6, 0x20, 0xbb, 0x1b, 0x31, 0xe7, + 0xf1, 0x91, 0xd8, 0x07, 0xfd, 0xef, 0x32, 0xf6, 0x68, 0xaa, 0x63, 0xce, + 0xd7, 0xa0, 0x3d, 0x7a, 0x45, 0xf6, 0xe8, 0xd0, 0x96, 0xf9, 0xe5, 0x39, + 0x3d, 0x2a, 0xf6, 0x53, 0x32, 0x9f, 0x8d, 0x0c, 0xbd, 0x30, 0xb1, 0xaf, + 0x14, 0x2f, 0x63, 0x1f, 0x6e, 0xe6, 0xba, 0x1d, 0x8c, 0x5b, 0x94, 0xb8, + 0x59, 0xf9, 0xa1, 0xbd, 0xcc, 0x6f, 0x4b, 0xcf, 0x71, 0x7a, 0x7e, 0x79, + 0x0e, 0x6d, 0x63, 0x0e, 0x2f, 0xed, 0xd0, 0x87, 0xb2, 0x51, 0xcf, 0xf3, + 0x4a, 0x9f, 0x45, 0xcb, 0x62, 0x5c, 0x62, 0xec, 0x78, 0x76, 0x01, 0xf1, + 0x90, 0xf7, 0x0b, 0xfb, 0x1b, 0xa5, 0x7b, 0x78, 0xc1, 0x02, 0xed, 0x6d, + 0x01, 0x16, 0xec, 0x21, 0x85, 0x4f, 0xe3, 0x0f, 0x59, 0xaf, 0x5e, 0xbc, + 0x4d, 0x18, 0x2c, 0xdd, 0x62, 0xfd, 0x3f, 0x9a, 0x9c, 0xf7, 0x1b, 0xe3, + 0x60, 0xfc, 0xf4, 0x77, 0xd9, 0x77, 0x1f, 0xe5, 0x7f, 0x73, 0x61, 0xa4, + 0xe3, 0x88, 0xdd, 0x79, 0xbd, 0x47, 0xfc, 0xbb, 0x62, 0xd7, 0xa8, 0x6e, + 0xef, 0x47, 0x24, 0x0e, 0x7b, 0xf3, 0xcc, 0xaf, 0x1f, 0x44, 0xfa, 0x3e, + 0xc2, 0x2b, 0x6d, 0xb6, 0xab, 0x50, 0x9c, 0x3d, 0xfd, 0x65, 0x63, 0x3e, + 0x9a, 0xbb, 0xe2, 0xd7, 0x27, 0xf1, 0x26, 0xbf, 0x26, 0xef, 0xaf, 0xf9, + 0xb5, 0x04, 0x67, 0x66, 0x7c, 0x8a, 0x57, 0xb5, 0xd9, 0xcd, 0x9b, 0x3e, + 0xe3, 0xdb, 0x2e, 0xe3, 0x43, 0x17, 0xeb, 0x13, 0xc7, 0xe7, 0xca, 0x2c, + 0x9f, 0xad, 0xe8, 0xbd, 0xd6, 0xf6, 0x3a, 0xaf, 0xed, 0xf2, 0xc1, 0xf8, + 0x3a, 0x8e, 0xce, 0x43, 0xc7, 0x4b, 0xcf, 0x43, 0x76, 0x5c, 0xc6, 0xae, + 0x95, 0xae, 0xc3, 0xd2, 0x04, 0x63, 0x61, 0x12, 0xf3, 0xfa, 0x21, 0xde, + 0xd8, 0xeb, 0x56, 0x8d, 0xf4, 0xc6, 0x80, 0xe5, 0x59, 0x99, 0xbf, 0x59, + 0xda, 0x47, 0xc5, 0x37, 0x16, 0x62, 0x1d, 0x42, 0x7a, 0x6f, 0x2c, 0xf7, + 0x67, 0x9a, 0x87, 0xbc, 0x9c, 0xab, 0xfa, 0x8f, 0xa5, 0xf7, 0x78, 0x8d, + 0xe7, 0xad, 0x94, 0x9e, 0xd3, 0x46, 0x3c, 0x78, 0xfe, 0xdd, 0xfe, 0x72, + 0x6f, 0x50, 0x3f, 0x74, 0x7e, 0x01, 0x74, 0x27, 0xb3, 0xde, 0x09, 0xfd, + 0x3e, 0x6b, 0x9e, 0xa4, 0xe3, 0x7e, 0x98, 0x4f, 0xdd, 0xfb, 0x28, 0x74, + 0x06, 0xf9, 0xff, 0x46, 0xbf, 0x7b, 0x03, 0xf6, 0x76, 0xa7, 0xb8, 0x29, + 0xff, 0x55, 0xb5, 0x39, 0xb0, 0x75, 0xef, 0x1c, 0x63, 0x4f, 0x9f, 0xae, + 0xf3, 0x9e, 0x36, 0xb6, 0xcc, 0xa7, 0xe6, 0xaf, 0xe6, 0xb6, 0xfc, 0xf5, + 0xac, 0xf8, 0xca, 0x02, 0xe6, 0x2b, 0x7c, 0x4f, 0xd2, 0x79, 0x3a, 0xfa, + 0x9e, 0x06, 0x2f, 0xf1, 0xfd, 0xee, 0xaf, 0xef, 0x8e, 0xdf, 0x92, 0x75, + 0x1a, 0xc4, 0x6f, 0xae, 0xc1, 0x57, 0xbf, 0xaf, 0x6f, 0xf2, 0x1b, 0x7e, + 0x5f, 0x59, 0xe1, 0xfe, 0xbd, 0x97, 0x98, 0xdf, 0x64, 0xdd, 0x87, 0xa4, + 0xee, 0x4a, 0x8f, 0xec, 0x6f, 0x1b, 0xf6, 0xba, 0xff, 0xef, 0x08, 0x6f, + 0x5a, 0x53, 0x3c, 0x7f, 0x4e, 0xf7, 0x91, 0xd1, 0x97, 0xc9, 0x0e, 0xee, + 0xd5, 0x65, 0x88, 0xa4, 0x6e, 0x77, 0x53, 0xf5, 0xab, 0x08, 0x4f, 0x38, + 0xf0, 0x55, 0xa4, 0xeb, 0xac, 0xfb, 0xc8, 0xf8, 0x25, 0xe9, 0xd7, 0x76, + 0xa8, 0x77, 0x0d, 0xbe, 0xd8, 0xe0, 0x41, 0x9f, 0xfc, 0x4d, 0x08, 0xaf, + 0x9c, 0x91, 0xfd, 0xfe, 0xcb, 0x65, 0xfe, 0xe8, 0x1e, 0xa3, 0x3d, 0x87, + 0x11, 0xd9, 0xf3, 0x6e, 0x85, 0xe5, 0x7a, 0x85, 0x79, 0x71, 0xc2, 0xf1, + 0x48, 0xaf, 0x5e, 0x61, 0x1c, 0x29, 0xa3, 0x5d, 0x00, 0x0f, 0x8e, 0x93, + 0x7a, 0x67, 0xd1, 0x63, 0x7e, 0x5f, 0xbc, 0x25, 0xfc, 0xe3, 0xeb, 0xfa, + 0xc9, 0x7d, 0x5f, 0xc6, 0xf3, 0x11, 0xb5, 0xcf, 0xc9, 0x7e, 0x28, 0x9d, + 0x36, 0xe7, 0xf7, 0xa9, 0x64, 0xdf, 0xf4, 0x93, 0xf5, 0xd6, 0xf3, 0xbd, + 0x9c, 0xd1, 0xa7, 0x99, 0x58, 0xf3, 0xdf, 0xd8, 0x0c, 0xe6, 0xeb, 0x43, + 0xd5, 0xe1, 0xf9, 0x60, 0x54, 0x7e, 0x2c, 0x07, 0xcd, 0x1a, 0x73, 0xc2, + 0x27, 0x73, 0x57, 0xc8, 0xcd, 0xf1, 0x39, 0x7d, 0x3e, 0x4d, 0x0b, 0x32, + 0xbb, 0xf2, 0x13, 0x9d, 0x57, 0xa3, 0x3c, 0x9f, 0x3b, 0xc7, 0x74, 0xbd, + 0x7c, 0xba, 0xff, 0x02, 0xd7, 0xeb, 0xad, 0x1b, 0x8c, 0xa7, 0xe0, 0x75, + 0x42, 0x77, 0xc1, 0x63, 0x7e, 0x5c, 0xf4, 0x4a, 0x84, 0x40, 0xf5, 0xb2, + 0x5e, 0xe2, 0x77, 0xad, 0x28, 0xef, 0xd1, 0x50, 0xa2, 0x8f, 0xe6, 0xfb, + 0xb1, 0xdb, 0x7e, 0x26, 0xf9, 0x54, 0xbf, 0x6b, 0x39, 0xe3, 0xef, 0xc8, + 0x8a, 0x31, 0x9f, 0xef, 0x66, 0xcc, 0x67, 0x33, 0x63, 0xbe, 0x4d, 0x5e, + 0xb9, 0x24, 0x7b, 0x57, 0x80, 0x62, 0x9e, 0x1e, 0x26, 0xaf, 0x70, 0x95, + 0xfa, 0x6b, 0xcd, 0xf1, 0xbb, 0xee, 0x15, 0xe7, 0x73, 0x54, 0x37, 0x6f, + 0x9e, 0xf5, 0x0a, 0x7c, 0x1e, 0x68, 0xbc, 0x7e, 0x95, 0xdf, 0x4f, 0x0b, + 0xfe, 0x07, 0x89, 0x6e, 0x1e, 0x13, 0x00, 0x0d, 0x00, 0x00, 0x00 }; static u8 bnx2_xi_rv2p_proc2[] = { - /* Date: 01/14/2008 15:44 */ - 0xad, 0x58, 0x5d, 0x6c, 0xd3, 0x55, 0x14, 0xbf, 0xfd, 0x58, 0xdb, 0x75, - 0xff, 0xb6, 0x63, 0x9b, 0xdd, 0xa7, 0x6e, 0x6e, 0x61, 0x6c, 0xd8, 0xcd, - 0xd1, 0x8d, 0x4f, 0x4d, 0x5c, 0x86, 0x19, 0x20, 0x26, 0x8c, 0x61, 0xd4, - 0x37, 0xd8, 0x90, 0xb2, 0xb2, 0x8d, 0x2c, 0x8c, 0xf0, 0xc0, 0x8b, 0x0d, - 0xd3, 0xf1, 0xd2, 0x07, 0x47, 0xb2, 0x0d, 0x8d, 0xc1, 0x45, 0x7d, 0x40, - 0x9f, 0xec, 0x83, 0x52, 0x30, 0xc6, 0xc4, 0xe8, 0x42, 0xf0, 0x01, 0x48, - 0x30, 0xc6, 0x68, 0x48, 0x08, 0xea, 0x32, 0x10, 0x75, 0x0c, 0xfb, 0x64, - 0x98, 0xf7, 0x9e, 0xdf, 0xb9, 0xff, 0xfe, 0xff, 0x5d, 0x27, 0x18, 0xec, - 0x43, 0x4f, 0xef, 0xbd, 0xe7, 0x9e, 0x7b, 0x3e, 0x7e, 0xe7, 0x9c, 0x7b, - 0x5b, 0x2c, 0x84, 0x70, 0x8a, 0x44, 0xaa, 0x56, 0x52, 0x61, 0x38, 0x5c, - 0x02, 0x9f, 0xb5, 0xc5, 0x44, 0xae, 0xa5, 0x7c, 0xf2, 0xbb, 0x40, 0xbc, - 0xe4, 0xac, 0xa0, 0xb1, 0x5b, 0x28, 0x1a, 0x12, 0x22, 0x61, 0xa5, 0xa5, - 0x4c, 0xaf, 0x32, 0xfd, 0x9d, 0xe9, 0xe3, 0x0e, 0xd0, 0x2b, 0x3c, 0xde, - 0xc2, 0xe3, 0x6b, 0x3c, 0xfe, 0x91, 0xe9, 0x46, 0x9e, 0xdf, 0xcc, 0x34, - 0xc9, 0x74, 0x3b, 0xaf, 0xa7, 0x99, 0xca, 0x4f, 0xc2, 0x90, 0x5f, 0x72, - 0xb9, 0x59, 0xeb, 0x69, 0x60, 0xba, 0x19, 0xfa, 0xee, 0xa9, 0x53, 0x7c, - 0xf3, 0x4b, 0x59, 0x3e, 0xcc, 0x5f, 0x9f, 0x00, 0xad, 0xc5, 0xae, 0x8f, - 0x13, 0x4f, 0xeb, 0xfd, 0x20, 0x7d, 0x01, 0xd0, 0x7e, 0xb6, 0xbf, 0x33, - 0x42, 0x24, 0xb9, 0xdf, 0x89, 0x71, 0x77, 0xa3, 0xf2, 0x43, 0x89, 0x70, - 0x3b, 0x95, 0x9c, 0x56, 0x9f, 0xe7, 0x3c, 0xe6, 0x5f, 0x0d, 0x81, 0xbe, - 0xe6, 0x07, 0xfd, 0xc5, 0x5f, 0x28, 0xbf, 0x97, 0x96, 0x62, 0x45, 0x2c, - 0xdf, 0x60, 0xb5, 0x8b, 0xb0, 0x7f, 0xd6, 0x80, 0x1e, 0x2f, 0xd7, 0x41, - 0xbf, 0xef, 0x9f, 0x52, 0xf3, 0x2e, 0x91, 0x60, 0x39, 0x42, 0x68, 0x3d, - 0x79, 0x7d, 0x10, 0xfb, 0x56, 0xad, 0xc1, 0xea, 0x5b, 0x31, 0x8c, 0xab, - 0x3f, 0x28, 0xa6, 0xb8, 0x9c, 0x4e, 0x69, 0xfe, 0x7c, 0x72, 0xdd, 0x52, - 0x2e, 0xe4, 0x8b, 0x7a, 0x1f, 0x29, 0x93, 0x88, 0x80, 0x8a, 0x96, 0xdc, - 0x73, 0x20, 0x7f, 0x6a, 0xb5, 0x9a, 0x77, 0x8a, 0x5e, 0x97, 0x9a, 0xf7, - 0x88, 0xde, 0xb8, 0xf6, 0x2f, 0xd6, 0x63, 0x1e, 0x22, 0x15, 0x7d, 0xe3, - 0xca, 0xce, 0x90, 0xd8, 0xe7, 0x0c, 0x11, 0x3f, 0xfc, 0xe2, 0xf2, 0x19, - 0x9f, 0x81, 0xff, 0xcb, 0x5a, 0x83, 0x6c, 0x89, 0xb5, 0x63, 0x5f, 0x59, - 0x14, 0x74, 0x32, 0x5a, 0xa0, 0x48, 0x24, 0x36, 0x4a, 0xc3, 0xd6, 0x9b, - 0xeb, 0x7c, 0xc4, 0x97, 0x68, 0xd1, 0xf1, 0xd3, 0xf1, 0x52, 0x71, 0xfc, - 0x44, 0xc6, 0x91, 0xdd, 0xd2, 0x00, 0xbf, 0xfe, 0xba, 0x5a, 0xf1, 0x4b, - 0xe7, 0xd6, 0xe3, 0x9c, 0xac, 0x7e, 0xd6, 0xf8, 0x7f, 0xf4, 0x1f, 0xe2, - 0xaf, 0xe4, 0x75, 0xb2, 0x5f, 0xea, 0xa4, 0x5f, 0x14, 0xad, 0x71, 0x24, - 0x5a, 0xec, 0xf1, 0xb8, 0x3e, 0x11, 0xa2, 0xdf, 0xb7, 0xba, 0x8a, 0xc9, - 0xaf, 0xbb, 0x30, 0x7f, 0xaa, 0xfb, 0x1c, 0xe2, 0xb1, 0x83, 0xec, 0x17, - 0xfe, 0x37, 0x3e, 0xc5, 0xae, 0xbe, 0x80, 0x1a, 0xbf, 0xd2, 0x11, 0xbb, - 0x80, 0xf5, 0x82, 0x31, 0xf8, 0x75, 0x17, 0x4b, 0xdd, 0xe1, 0x72, 0x28, - 0x92, 0xf4, 0x8c, 0xd1, 0xd0, 0x98, 0xa5, 0x75, 0x43, 0x9c, 0x4c, 0x61, - 0xfd, 0x70, 0x91, 0x1a, 0xef, 0x8a, 0xcc, 0x63, 0x1c, 0x89, 0x8f, 0xf3, - 0x46, 0x27, 0xfc, 0x70, 0xcb, 0x09, 0x79, 0x0c, 0x2f, 0xbf, 0x9b, 0xe2, - 0xe0, 0x10, 0x46, 0x37, 0xe8, 0x9b, 0xb4, 0xfe, 0xb7, 0x23, 0x49, 0x76, - 0x77, 0x07, 0xdd, 0xe7, 0xc0, 0xc8, 0xb8, 0x36, 0x71, 0xab, 0x71, 0xff, - 0xb0, 0xf8, 0x1d, 0x37, 0x34, 0x5e, 0xd9, 0xff, 0xec, 0xdf, 0xf7, 0x44, - 0x2e, 0x4e, 0x41, 0xbb, 0x1b, 0x41, 0x3d, 0x0d, 0xb9, 0x78, 0xd5, 0xf8, - 0xb4, 0xfb, 0x99, 0xe3, 0x63, 0xc1, 0x0b, 0x11, 0x89, 0x13, 0x1b, 0x6e, - 0x18, 0xa7, 0x95, 0xd2, 0x5f, 0x3a, 0xfe, 0x4a, 0x90, 0x57, 0x0c, 0xb2, - 0xbc, 0x38, 0xdb, 0x35, 0xc4, 0x76, 0xdd, 0xf1, 0x6b, 0xbf, 0x6a, 0x7b, - 0x40, 0x4f, 0xda, 0xec, 0x71, 0x48, 0x3c, 0xd9, 0x71, 0xc8, 0xfa, 0x24, - 0xbf, 0xa9, 0xc7, 0x8f, 0xea, 0x06, 0x50, 0xd3, 0xce, 0x46, 0xc5, 0xe7, - 0x89, 0x4e, 0xa7, 0xec, 0x38, 0xd4, 0xf9, 0xb8, 0xa7, 0x4e, 0xcb, 0x57, - 0xb8, 0xcc, 0x48, 0x5c, 0x22, 0x6e, 0xa7, 0x53, 0xd6, 0xfc, 0xac, 0xca, - 0x93, 0x9f, 0xf6, 0xbc, 0xd0, 0x7e, 0x39, 0x1c, 0xa0, 0x02, 0xd5, 0x71, - 0x79, 0xce, 0x7e, 0x1e, 0xf0, 0xed, 0x35, 0xf1, 0x53, 0xb6, 0x81, 0xfd, - 0xc7, 0x34, 0xbc, 0x51, 0xc9, 0xeb, 0x61, 0xf9, 0x6d, 0x2c, 0xdf, 0xb0, - 0xe4, 0x9d, 0xd2, 0xaf, 0xcb, 0xcc, 0x37, 0x1d, 0xb7, 0x6c, 0xde, 0x69, - 0xff, 0xd1, 0xf9, 0x91, 0xcb, 0x73, 0x6a, 0x7f, 0xf5, 0x03, 0xf2, 0x70, - 0x93, 0x29, 0xef, 0x3b, 0x33, 0xdf, 0xd4, 0x7a, 0x91, 0x78, 0x8e, 0x87, - 0xf6, 0x7a, 0xf2, 0xa7, 0xac, 0x27, 0x64, 0x87, 0xcf, 0x38, 0xc7, 0xf5, - 0x63, 0x54, 0x9d, 0x53, 0xc1, 0x7a, 0x57, 0xb0, 0xde, 0xb2, 0x5f, 0xb5, - 0x70, 0x9d, 0xd9, 0x6b, 0xad, 0x17, 0x6b, 0x2d, 0x79, 0xaf, 0xc6, 0x4d, - 0x4b, 0xcb, 0xfb, 0x85, 0xcd, 0x9f, 0x09, 0x41, 0xfe, 0xf7, 0x72, 0x7c, - 0x3c, 0x79, 0xfa, 0x8b, 0xe6, 0x07, 0xbe, 0xb6, 0x11, 0xbf, 0xcf, 0xc4, - 0xbf, 0xdd, 0xde, 0xaa, 0x3c, 0x75, 0x27, 0xd7, 0x7e, 0xf8, 0xb3, 0xcf, - 0x19, 0x20, 0xbe, 0x1b, 0x23, 0x6a, 0xdf, 0x49, 0x87, 0xf6, 0x53, 0x27, - 0xea, 0x90, 0x03, 0xf6, 0xd6, 0xb0, 0xbd, 0x72, 0xb9, 0x85, 0xf0, 0xef, - 0xbb, 0x31, 0x62, 0xb5, 0xd7, 0xf8, 0x97, 0xf3, 0xec, 0xb8, 0x19, 0xe1, - 0x3e, 0xd6, 0x8f, 0xbc, 0xf0, 0xed, 0xff, 0x5c, 0xeb, 0xc3, 0xe7, 0x86, - 0xf4, 0xf9, 0x4a, 0x5e, 0xb5, 0x98, 0x1b, 0x55, 0xfb, 0x1f, 0x13, 0x0c, - 0x33, 0x31, 0xdc, 0x84, 0xfa, 0x77, 0xe7, 0x00, 0xf4, 0x1f, 0x6e, 0xd4, - 0x7d, 0x1c, 0x38, 0x16, 0x5c, 0xff, 0xbf, 0x9e, 0xc8, 0xe7, 0x97, 0x41, - 0x07, 0xf8, 0xca, 0xd8, 0xae, 0x62, 0xb6, 0x2b, 0x22, 0x72, 0xeb, 0xec, - 0x5e, 0xca, 0x97, 0x4e, 0xe6, 0x7b, 0x56, 0xd7, 0xe3, 0x65, 0x7c, 0xb0, - 0xbf, 0x80, 0xcf, 0xcf, 0xe7, 0xaf, 0x7c, 0x72, 0xd3, 0x8c, 0xa3, 0x01, - 0xe6, 0x73, 0xe7, 0xa9, 0xf3, 0x18, 0x65, 0xd6, 0x50, 0x9d, 0x3f, 0x73, - 0x3c, 0xad, 0xf8, 0x02, 0x26, 0xce, 0xed, 0x76, 0xfd, 0x74, 0xff, 0xd1, - 0xfd, 0xaf, 0xf8, 0xc2, 0xe2, 0x60, 0x70, 0x25, 0x3f, 0xbb, 0xd5, 0xf4, - 0xcc, 0x42, 0x5a, 0xc7, 0xc9, 0x20, 0x3b, 0xe7, 0x46, 0xd5, 0xf9, 0x1f, - 0xe6, 0xf8, 0xdf, 0x69, 0xf1, 0x3f, 0xf8, 0x9f, 0x88, 0x3c, 0xaa, 0xdf, - 0xf3, 0xf5, 0xe5, 0x2f, 0xee, 0x2f, 0xcf, 0x13, 0x35, 0x7f, 0xe1, 0xa1, - 0xfd, 0xb1, 0xbb, 0xdd, 0x6a, 0x7f, 0x83, 0x98, 0x4d, 0x21, 0xbf, 0x7a, - 0x18, 0x87, 0xfb, 0xb8, 0x5e, 0xdf, 0xf0, 0xab, 0x09, 0x9f, 0xe8, 0xdf, - 0x49, 0xfe, 0x10, 0xe1, 0x22, 0xf8, 0xa7, 0xff, 0x45, 0xed, 0x4f, 0xcc, - 0x57, 0x51, 0xbf, 0x75, 0x89, 0x1e, 0xaf, 0x41, 0xfc, 0x55, 0x01, 0xd0, - 0x30, 0xd7, 0xf9, 0x59, 0xb3, 0x8f, 0x81, 0x9e, 0xf6, 0xe8, 0xba, 0x8c, - 0x7e, 0xfe, 0x95, 0x47, 0x31, 0xc8, 0x20, 0x35, 0xa3, 0x3e, 0x77, 0x35, - 0x1a, 0xb4, 0xde, 0xdb, 0x0c, 0x3c, 0x89, 0x7a, 0xdd, 0xe7, 0xf0, 0xe1, - 0x3e, 0x50, 0x95, 0xed, 0x77, 0xd6, 0x7e, 0x58, 0x68, 0xe9, 0x07, 0xfa, - 0x3c, 0xed, 0x47, 0x2d, 0x97, 0x86, 0xb2, 0xaf, 0x58, 0xfb, 0xa1, 0xee, - 0x13, 0x8b, 0xdc, 0x27, 0x4a, 0xc5, 0xc5, 0x14, 0xec, 0x9a, 0x4d, 0xe5, - 0xe2, 0x4f, 0x9f, 0xa7, 0xe5, 0x41, 0x6f, 0x6d, 0x47, 0x56, 0x3e, 0xce, - 0x3f, 0xc0, 0x7a, 0xfe, 0x4c, 0xf7, 0xd8, 0x30, 0xdb, 0xa3, 0xe4, 0x62, - 0x7e, 0x3b, 0xf7, 0xe7, 0x84, 0x39, 0xb6, 0xf7, 0xd5, 0x1e, 0xd2, 0xab, - 0x84, 0xf1, 0x16, 0xb6, 0xe4, 0x03, 0xf8, 0xcb, 0xda, 0x40, 0x27, 0xdb, - 0x74, 0x1c, 0x74, 0xbc, 0x74, 0x7c, 0x10, 0xc7, 0xf0, 0x3a, 0x62, 0xeb, - 0xe8, 0x5f, 0x47, 0x7d, 0xa4, 0xad, 0x7f, 0x41, 0xe3, 0x0f, 0xfb, 0x77, - 0x47, 0x14, 0xff, 0xeb, 0xe2, 0x2a, 0xe1, 0x50, 0x88, 0x1f, 0x98, 0x66, - 0xfb, 0x15, 0x07, 0xc0, 0xcc, 0x57, 0x8e, 0x5f, 0x01, 0x4f, 0xb7, 0xeb, - 0x7a, 0xae, 0xe3, 0x65, 0xcd, 0xd7, 0xd8, 0x32, 0xdc, 0x66, 0xeb, 0xb2, - 0xb6, 0x53, 0xf1, 0x47, 0x18, 0x8f, 0x3e, 0xd1, 0xb5, 0x0d, 0xf7, 0xdc, - 0xa0, 0x17, 0x75, 0x3f, 0xe8, 0xb5, 0xc6, 0x4b, 0xe2, 0xa2, 0xd0, 0xab, - 0x86, 0x35, 0x25, 0x85, 0x64, 0xcf, 0xa9, 0x4b, 0xdf, 0xd2, 0xf2, 0xfb, - 0xd3, 0x45, 0x98, 0x2f, 0xdf, 0x19, 0x22, 0x7f, 0x4c, 0x01, 0xcf, 0xef, - 0x4e, 0x82, 0xbe, 0x23, 0x5e, 0xc0, 0xfe, 0x92, 0x13, 0x74, 0x0f, 0xf4, - 0x95, 0x33, 0x3e, 0x2b, 0x50, 0x27, 0x92, 0xd3, 0x74, 0x2f, 0x59, 0x5a, - 0x12, 0x01, 0x45, 0x3d, 0x66, 0xbf, 0x01, 0x3e, 0xdd, 0x96, 0x38, 0x3f, - 0x08, 0xaf, 0x74, 0xaf, 0x94, 0x78, 0xc4, 0x76, 0xc6, 0xad, 0x2f, 0x17, - 0xb7, 0xda, 0x1f, 0x15, 0xce, 0xbc, 0x38, 0xdd, 0x60, 0xc7, 0xa9, 0x87, - 0x71, 0x7a, 0xcf, 0xec, 0xef, 0xcb, 0xe5, 0xa2, 0xcf, 0x5f, 0xfc, 0xdf, - 0x70, 0x0b, 0xba, 0xbd, 0x41, 0x9d, 0x5f, 0xbe, 0xac, 0x1e, 0xd7, 0xda, - 0xe2, 0xdc, 0x7c, 0x5f, 0xeb, 0x75, 0xc2, 0x63, 0x5d, 0x6f, 0x31, 0xfb, - 0xd9, 0x11, 0x7e, 0xe7, 0x65, 0x0c, 0xfa, 0x11, 0xbd, 0x93, 0xa4, 0xa1, - 0x51, 0x79, 0x56, 0xf1, 0x35, 0x45, 0x8f, 0x70, 0xbd, 0xbd, 0xe4, 0x42, - 0xbd, 0x19, 0x38, 0x80, 0xf1, 0x65, 0xae, 0x1f, 0x77, 0xd7, 0x50, 0x5d, - 0x8e, 0x1e, 0x39, 0xaf, 0xe5, 0x91, 0x1c, 0x23, 0xc3, 0x75, 0xfd, 0x79, - 0x17, 0xd7, 0x5b, 0xf2, 0x9b, 0x3b, 0xfa, 0x07, 0xdd, 0x67, 0xdc, 0xa2, - 0xeb, 0x49, 0x45, 0x2b, 0x65, 0xfd, 0xe6, 0xf3, 0x9f, 0x01, 0xed, 0xf5, - 0x82, 0x8a, 0x66, 0x7b, 0x3c, 0x84, 0x69, 0x17, 0x46, 0x9e, 0x7a, 0x96, - 0xd3, 0x87, 0xb1, 0x97, 0xef, 0x65, 0xd3, 0xec, 0xa7, 0x20, 0xf9, 0xa3, - 0x58, 0xda, 0xa9, 0x68, 0x28, 0x3a, 0x9a, 0x86, 0xfe, 0x43, 0x5b, 0x61, - 0xdf, 0x22, 0xdb, 0xcd, 0x34, 0xf8, 0xf6, 0x18, 0xe1, 0x2f, 0x38, 0x8e, - 0x77, 0x48, 0xd0, 0x33, 0x06, 0x3b, 0x86, 0x32, 0x18, 0x2f, 0x6e, 0x06, - 0xfd, 0x6b, 0x0b, 0xf6, 0x1d, 0x3d, 0xce, 0xfe, 0xd8, 0x9a, 0x7f, 0xdf, - 0xc0, 0x3d, 0xf0, 0x0d, 0x37, 0xa9, 0xf3, 0x07, 0x67, 0xf8, 0xfd, 0x22, - 0xe2, 0x2e, 0x35, 0x8e, 0x1b, 0x19, 0x1e, 0x1f, 0xe2, 0xfa, 0x7e, 0x9b, - 0xdf, 0x1b, 0x43, 0x39, 0xef, 0x8d, 0x79, 0xdc, 0x33, 0x67, 0x32, 0x49, - 0xe0, 0x22, 0x51, 0x98, 0xfb, 0x5e, 0x55, 0xe3, 0x9a, 0x68, 0x39, 0xc7, - 0xa9, 0x6c, 0x3d, 0xe8, 0xe4, 0x7a, 0xbc, 0x13, 0x86, 0x8e, 0xb1, 0x5f, - 0x3a, 0x28, 0x4e, 0xad, 0x0b, 0xe9, 0x95, 0xde, 0xc9, 0xe0, 0x9b, 0xe2, - 0x73, 0xc3, 0xdc, 0x4f, 0xc2, 0xc8, 0x3f, 0x51, 0x91, 0xe4, 0x77, 0xcb, - 0x04, 0xee, 0x9d, 0x53, 0x06, 0x68, 0x38, 0xa0, 0xf5, 0x45, 0x3e, 0x26, - 0x52, 0xc8, 0x3b, 0xac, 0x3b, 0x2c, 0xeb, 0x7c, 0x1f, 0x59, 0xf6, 0xce, - 0x51, 0x74, 0xd1, 0x51, 0xea, 0xd0, 0x76, 0x62, 0xb5, 0x3f, 0xa0, 0xf0, - 0x7b, 0xd3, 0xcc, 0xab, 0x79, 0xf2, 0x5b, 0xf5, 0x4c, 0x86, 0xf4, 0xaf, - 0x12, 0xa5, 0xd4, 0x87, 0x2b, 0x83, 0x0b, 0xf0, 0x63, 0x74, 0x9a, 0xfd, - 0x3f, 0xb2, 0x09, 0xf4, 0x18, 0xe3, 0x4f, 0xe3, 0xea, 0xca, 0x46, 0x83, - 0xf6, 0xcd, 0x8d, 0xe2, 0x1c, 0x7d, 0x8f, 0xc8, 0x7d, 0x8f, 0x6b, 0x3c, - 0x56, 0xb6, 0xa3, 0x90, 0x0e, 0x1c, 0x55, 0xe7, 0x04, 0x24, 0x8e, 0x94, - 0xfe, 0xd2, 0x27, 0x9c, 0x8f, 0x76, 0x9c, 0x2a, 0x1c, 0xeb, 0xfc, 0xb0, - 0xe2, 0x3b, 0x37, 0xdf, 0xb3, 0x78, 0x0d, 0xd2, 0xbd, 0x5c, 0x16, 0xb9, - 0x04, 0xbf, 0x9b, 0xf8, 0x7e, 0xb0, 0x52, 0xfc, 0xde, 0x46, 0xfc, 0xa2, - 0xac, 0xb7, 0x11, 0x1f, 0x43, 0x5f, 0x1c, 0x67, 0x1c, 0xcd, 0x37, 0xf1, - 0xfd, 0x87, 0xf5, 0xfb, 0x8d, 0xdf, 0x67, 0xc0, 0x9b, 0xd7, 0x88, 0xa5, - 0x19, 0x5f, 0x8c, 0xfb, 0x43, 0x6c, 0xf7, 0x6d, 0xd8, 0x6d, 0x68, 0xbb, - 0xe3, 0xa6, 0xdd, 0xfa, 0x7e, 0x65, 0x95, 0x53, 0x22, 0x71, 0xab, 0xe8, - 0x2a, 0xe3, 0x0a, 0xd5, 0xb3, 0x02, 0xb6, 0x53, 0xf2, 0xb5, 0x2b, 0x7b, - 0x82, 0x6c, 0x4f, 0x40, 0x1c, 0x6c, 0xb5, 0xee, 0x2b, 0xe2, 0x7d, 0x7e, - 0xb9, 0x0f, 0xf3, 0xa8, 0x0b, 0xc6, 0x0a, 0xfe, 0x54, 0x7e, 0xd3, 0x72, - 0x73, 0xf3, 0xde, 0xea, 0x3f, 0xba, 0x91, 0xd2, 0x07, 0xf5, 0x4d, 0xc6, - 0xa9, 0x05, 0xff, 0x1f, 0xe8, 0xfa, 0x76, 0x97, 0xee, 0xaf, 0xfe, 0x33, - 0xc3, 0xa8, 0x4f, 0x67, 0x86, 0xcf, 0xf2, 0xbb, 0x83, 0xfd, 0xd2, 0x43, - 0xff, 0x5b, 0xc8, 0xd8, 0xd5, 0xdb, 0xeb, 0x9b, 0x5d, 0x8f, 0x6a, 0x8b, - 0x1e, 0xfa, 0xdc, 0x7f, 0x00, 0x5a, 0x33, 0xe6, 0xc0, 0x30, 0x14, 0x00, - 0x00, 0x00 }; + /* Date: 04/25/2008 22:02 */ +#define XI_RV2P_PROC2_MAX_BD_PAGE_LOC 5 +#define XI_RV2P_PROC2_BD_PAGE_SIZE_MSK 0xffff +#define XI_RV2P_PROC2_BD_PAGE_SIZE ((PAGE_SIZE / 16) - 1) + 0xad, 0x58, 0x5b, 0x6c, 0x54, 0x55, 0x14, 0x3d, 0xf3, 0xe8, 0xcc, 0xed, + 0xcc, 0x9d, 0x99, 0xd2, 0xd6, 0xe9, 0x8b, 0x48, 0x69, 0xa5, 0x74, 0x70, + 0x0a, 0x65, 0x5a, 0x1e, 0x3e, 0x12, 0x49, 0xd1, 0x02, 0x3e, 0x42, 0xa9, + 0x86, 0x98, 0x18, 0x03, 0x9d, 0x4a, 0xe9, 0x40, 0x4b, 0x2a, 0x25, 0x7c, + 0xf0, 0xe3, 0x84, 0x62, 0xf9, 0x99, 0x44, 0x4b, 0x80, 0x16, 0x63, 0x48, + 0x23, 0x3f, 0xc4, 0xbf, 0x26, 0x28, 0x45, 0x3f, 0x4c, 0x88, 0x36, 0x04, + 0x3e, 0xc0, 0x44, 0x63, 0xfc, 0x21, 0x12, 0xc4, 0x5a, 0xa0, 0xc1, 0x82, + 0x36, 0xc6, 0x48, 0xeb, 0x3d, 0x7b, 0xed, 0x73, 0xe7, 0xde, 0xe9, 0x2d, + 0x8f, 0x48, 0x3f, 0x58, 0x9c, 0x73, 0xf7, 0x39, 0x67, 0xef, 0xb5, 0x1f, + 0x67, 0x9f, 0x29, 0x10, 0x42, 0x78, 0x45, 0x7a, 0x64, 0x91, 0x81, 0x22, + 0xe8, 0xf6, 0x68, 0x06, 0xcc, 0x0a, 0x91, 0x57, 0x2a, 0xc7, 0xc2, 0x2d, + 0xf8, 0x6f, 0x59, 0x01, 0xc1, 0x0f, 0x23, 0xf2, 0xbb, 0x5f, 0xbc, 0xe5, + 0xc6, 0x77, 0xaf, 0x90, 0x18, 0x11, 0x22, 0x2d, 0xb1, 0x80, 0x31, 0xc6, + 0xe8, 0x72, 0x01, 0x4b, 0x18, 0x5f, 0x61, 0x14, 0x8c, 0xba, 0x1b, 0xe8, + 0x66, 0xf4, 0xaa, 0x79, 0x5e, 0xaf, 0xf1, 0x7c, 0x3b, 0xe3, 0x76, 0x9e, + 0xff, 0xdd, 0x40, 0xa5, 0x97, 0x1c, 0x4f, 0xce, 0x8a, 0xb4, 0x8e, 0x6d, + 0x63, 0x6a, 0x5e, 0x27, 0x48, 0xc7, 0xa0, 0xf7, 0x9b, 0x95, 0xb4, 0xce, + 0x41, 0x4e, 0xce, 0xdf, 0x98, 0x55, 0xfb, 0x1d, 0xf4, 0xc8, 0xf1, 0x2f, + 0xc6, 0xd8, 0x25, 0x87, 0xc5, 0x51, 0x6c, 0x53, 0x5c, 0x9a, 0x91, 0xfb, + 0x78, 0xc4, 0xd0, 0x80, 0x46, 0xac, 0x1c, 0xd7, 0x31, 0x4e, 0x13, 0x1f, + 0x2e, 0x63, 0xcc, 0xfb, 0x31, 0x96, 0x85, 0x70, 0xee, 0xd6, 0x4a, 0x9c, + 0xf7, 0xd3, 0xb3, 0x90, 0x4b, 0x47, 0x14, 0xa1, 0xf8, 0xbe, 0x48, 0xf0, + 0xf7, 0xdd, 0x72, 0x3c, 0xe5, 0x2a, 0x72, 0x81, 0x57, 0xbf, 0x50, 0x7a, + 0x42, 0xee, 0xea, 0x40, 0xae, 0x3c, 0x0d, 0x3f, 0x4f, 0xaf, 0x50, 0x76, + 0x00, 0xda, 0x42, 0xc0, 0xa4, 0x07, 0xb8, 0x2e, 0x4e, 0x90, 0x69, 0x67, + 0x47, 0x36, 0xd7, 0x48, 0x3d, 0x0a, 0x85, 0xd7, 0x2d, 0xf7, 0x5b, 0xa1, + 0xf9, 0xce, 0x61, 0xfe, 0x6d, 0xd6, 0xeb, 0xbd, 0x00, 0xf0, 0x46, 0x20, + 0xdf, 0xf8, 0x77, 0x76, 0xb6, 0x23, 0x68, 0xb7, 0x2b, 0x1d, 0xc4, 0xfa, + 0x31, 0xdd, 0xc9, 0x3e, 0xcf, 0x03, 0xec, 0xc3, 0xba, 0x05, 0x4b, 0xf1, + 0xf5, 0xe3, 0x0e, 0x8c, 0x2b, 0x4e, 0x49, 0x39, 0x9f, 0x18, 0x1a, 0x71, + 0xe2, 0x23, 0x77, 0x7f, 0x23, 0x4e, 0x23, 0x38, 0x47, 0x54, 0x69, 0xa4, + 0x54, 0x3a, 0x0e, 0x14, 0x75, 0x8e, 0xfc, 0x88, 0xe3, 0x4b, 0xe4, 0xbc, + 0x5b, 0xb4, 0x7a, 0x74, 0x3a, 0xa7, 0x35, 0xa5, 0xe2, 0x02, 0xdf, 0x3b, + 0x7c, 0x04, 0xa5, 0x6d, 0xfd, 0xd2, 0xde, 0x88, 0xd8, 0xee, 0x8e, 0x90, + 0x3c, 0xf8, 0xf1, 0x68, 0xfa, 0x97, 0x90, 0xff, 0x66, 0x91, 0x4e, 0x36, + 0x75, 0x34, 0x60, 0x5d, 0x71, 0x02, 0x78, 0x2c, 0x91, 0x27, 0x21, 0xde, + 0xd1, 0x4b, 0xc3, 0xe5, 0xd7, 0x57, 0x6a, 0x24, 0x97, 0xae, 0x53, 0x71, + 0xa7, 0xfc, 0x27, 0xfd, 0x3a, 0x64, 0xc6, 0x99, 0xa8, 0x06, 0xbf, 0xbf, + 0x2d, 0x91, 0xf2, 0x06, 0xc9, 0x55, 0x38, 0x27, 0xab, 0x9f, 0x35, 0x6e, + 0x07, 0x2c, 0x71, 0xfb, 0x78, 0xf1, 0xb0, 0x8e, 0xf8, 0x59, 0xc7, 0x3c, + 0x55, 0x1a, 0x3c, 0x49, 0x5c, 0xe8, 0x4a, 0xd7, 0xd9, 0xfd, 0x74, 0x75, + 0x20, 0x42, 0xff, 0xbf, 0xd9, 0x54, 0x40, 0x3c, 0x6f, 0xc6, 0xfc, 0x91, + 0xe6, 0xb3, 0xf0, 0xd3, 0x26, 0xe2, 0x43, 0x04, 0x0e, 0x7d, 0x81, 0x55, + 0x6d, 0x14, 0xdf, 0xdd, 0x8d, 0x1d, 0x5f, 0x61, 0x9c, 0xf4, 0xc8, 0xf1, + 0x6e, 0xbd, 0x7d, 0x14, 0xf2, 0x79, 0x7d, 0xe0, 0x7d, 0x33, 0x9f, 0xb2, + 0xc9, 0x43, 0xf9, 0x94, 0xf1, 0xf5, 0xd1, 0x50, 0x1f, 0xa3, 0xef, 0x05, + 0xe2, 0xf0, 0x08, 0xbe, 0xef, 0x09, 0x4a, 0x3b, 0xdf, 0x37, 0xf3, 0x2e, + 0xe5, 0xc3, 0xfa, 0xcc, 0x00, 0xf2, 0x66, 0xe2, 0x8c, 0x1c, 0x6f, 0x8b, + 0x4f, 0x40, 0x3e, 0x9e, 0xea, 0xe7, 0x8d, 0xdd, 0xe0, 0xf1, 0xa6, 0x1b, + 0xf2, 0x1c, 0xa6, 0x01, 0x2f, 0xf9, 0xd1, 0x25, 0xf4, 0x66, 0xe0, 0x87, + 0xf4, 0xfd, 0x5f, 0x57, 0x86, 0x78, 0xdb, 0x1a, 0xf6, 0x9e, 0x55, 0xfc, + 0x30, 0x46, 0x94, 0x5d, 0xc0, 0x47, 0xcd, 0x83, 0xfe, 0x79, 0xf3, 0x80, + 0xfd, 0x19, 0x7b, 0x58, 0x1e, 0x00, 0x9b, 0x6b, 0x80, 0xbe, 0x6a, 0x29, + 0x9f, 0xff, 0x18, 0xf9, 0xc0, 0x7a, 0xcd, 0xc9, 0x37, 0xcc, 0x66, 0xe3, + 0x94, 0xc0, 0x88, 0x4f, 0x5b, 0xbc, 0x72, 0x7e, 0x2c, 0x36, 0xfc, 0xa0, + 0xe2, 0x4e, 0x6e, 0xe8, 0x17, 0xbb, 0x79, 0xdf, 0x14, 0xf3, 0xd1, 0xc5, + 0x7c, 0x4c, 0x32, 0xee, 0x09, 0x2a, 0x1e, 0x80, 0x87, 0x75, 0x9c, 0xdb, + 0x9a, 0x92, 0x7e, 0x5c, 0xeb, 0x50, 0x3f, 0x55, 0x9d, 0x84, 0x9f, 0x86, + 0xd8, 0xaf, 0x27, 0xcc, 0x7a, 0xa9, 0xf8, 0x9d, 0xaf, 0x6e, 0xda, 0xf3, + 0x29, 0xc7, 0xce, 0xcc, 0x77, 0x55, 0x58, 0x5e, 0x58, 0x0d, 0x34, 0xf9, + 0xad, 0xa1, 0x3c, 0x0e, 0x97, 0x8c, 0x2a, 0xfb, 0xa4, 0x7e, 0xb7, 0x55, + 0x1e, 0x86, 0x07, 0x0f, 0x12, 0x06, 0x0a, 0x4f, 0xc9, 0x73, 0xca, 0x1d, + 0xea, 0x8a, 0x3d, 0x9f, 0x73, 0xf9, 0xdd, 0x13, 0xa2, 0x42, 0xdb, 0x78, + 0x79, 0xdc, 0x9e, 0xb7, 0xc8, 0x4f, 0xbf, 0x19, 0xdf, 0xc5, 0xab, 0xd9, + 0x0f, 0x8c, 0xd1, 0x35, 0x72, 0xdf, 0x16, 0x3e, 0xa7, 0x9e, 0xcf, 0xd1, + 0x2d, 0x75, 0x43, 0xea, 0x59, 0x68, 0xd6, 0x0b, 0x15, 0x1f, 0xd9, 0xba, + 0xa1, 0xfc, 0x40, 0xe7, 0xc7, 0x2f, 0x8f, 0xcb, 0xf5, 0x15, 0x0f, 0xa9, + 0x23, 0x9a, 0xb9, 0xdf, 0x8f, 0x66, 0x9d, 0x90, 0xdf, 0x83, 0xe2, 0x25, + 0x1e, 0xda, 0xeb, 0xe1, 0x1f, 0x46, 0x3d, 0x24, 0x3b, 0x34, 0xfd, 0x2c, + 0xd7, 0xbf, 0x5e, 0x79, 0x4e, 0x29, 0xeb, 0x5d, 0xca, 0x7a, 0x1b, 0xd7, + 0x73, 0x1d, 0xd7, 0xc9, 0x6d, 0xd6, 0x7a, 0xf7, 0xcf, 0x4c, 0xb6, 0x6e, + 0xc9, 0xf1, 0xdf, 0x33, 0x73, 0xef, 0x5f, 0x47, 0x5e, 0xd3, 0x22, 0x82, + 0x3e, 0x23, 0x1d, 0x51, 0x7d, 0x41, 0x6e, 0xfd, 0xcb, 0xb5, 0x03, 0xbc, + 0xb4, 0xb9, 0x43, 0x24, 0x77, 0xad, 0xc7, 0xde, 0x27, 0xa0, 0x0e, 0x56, + 0xba, 0xa0, 0xf7, 0x42, 0xd6, 0xdb, 0xd8, 0xaf, 0x8e, 0xf2, 0x41, 0xbb, + 0xd6, 0x63, 0xd5, 0xfb, 0xfa, 0xcc, 0xfc, 0xe7, 0x39, 0xc7, 0x41, 0x0f, + 0xdf, 0xaf, 0x49, 0xc4, 0xb1, 0xd6, 0xfe, 0xb5, 0xd2, 0x8b, 0xcf, 0x8f, + 0x28, 0x3d, 0x74, 0xf2, 0xd3, 0x78, 0xaf, 0xdc, 0xa7, 0x48, 0x70, 0xd8, + 0x88, 0xee, 0x5a, 0xd4, 0xdf, 0xc9, 0x1d, 0xb0, 0xa3, 0xbb, 0x46, 0xea, + 0x61, 0x78, 0x25, 0x8d, 0x3c, 0x17, 0x7c, 0x1f, 0x7d, 0x3b, 0xe0, 0xe4, + 0xd7, 0x4b, 0x2c, 0x57, 0xcc, 0xf6, 0x15, 0xb0, 0x7d, 0x71, 0x91, 0x5b, + 0xe7, 0xb7, 0x55, 0xd2, 0x7d, 0xc0, 0x72, 0x2f, 0xaa, 0xfb, 0xc0, 0x90, + 0x93, 0xfb, 0xe4, 0xf1, 0x3e, 0x4e, 0x3c, 0x39, 0xed, 0x33, 0xca, 0x71, + 0xd0, 0xc9, 0x72, 0x5e, 0x87, 0x7b, 0x05, 0xa3, 0xe9, 0xa5, 0x74, 0xaf, + 0x9c, 0x3c, 0x40, 0xf7, 0x43, 0xc8, 0x8c, 0x53, 0xbb, 0x1d, 0x67, 0x9e, + 0x20, 0xef, 0x52, 0x3e, 0x2a, 0x76, 0x86, 0xe7, 0xe3, 0xd7, 0x2b, 0xa7, + 0x87, 0xa7, 0x46, 0x95, 0x7f, 0x74, 0xb2, 0x77, 0xbc, 0x57, 0xf5, 0xb1, + 0x56, 0xde, 0xdd, 0x16, 0xde, 0x21, 0xff, 0x74, 0xfc, 0xff, 0xf2, 0xed, + 0xd4, 0x1f, 0x1c, 0x9d, 0x31, 0xfb, 0x50, 0x9f, 0x53, 0xfd, 0x5f, 0x61, + 0xc6, 0xcb, 0x5e, 0xee, 0xef, 0xa6, 0x75, 0xfa, 0x4f, 0x62, 0x32, 0x43, + 0x43, 0xbd, 0xec, 0xb4, 0x94, 0x5b, 0x96, 0xd8, 0xcb, 0x76, 0x5d, 0xf4, + 0xc0, 0xee, 0xce, 0x1d, 0x18, 0x5f, 0xe6, 0x7a, 0x7d, 0x97, 0xeb, 0xe3, + 0x16, 0x0d, 0x38, 0x59, 0x4b, 0x7c, 0x24, 0xf6, 0x9e, 0x53, 0xfb, 0xd3, + 0xbe, 0xfa, 0x34, 0xf3, 0xf9, 0xb2, 0x87, 0xed, 0xac, 0x22, 0x3f, 0x26, + 0xee, 0x50, 0x3d, 0xf0, 0x8a, 0xa6, 0xc5, 0x12, 0xcb, 0x0c, 0xde, 0x58, + 0x9f, 0x17, 0x80, 0xad, 0x7e, 0xa6, 0x21, 0x96, 0xeb, 0x67, 0x4c, 0xfb, + 0xaa, 0x78, 0x7d, 0x1b, 0xc6, 0x7e, 0xae, 0x67, 0x83, 0xac, 0xd7, 0xd1, + 0x5a, 0x60, 0x38, 0x86, 0x3e, 0x61, 0x9c, 0xee, 0x85, 0x48, 0xa2, 0x77, + 0x14, 0xf6, 0x74, 0xad, 0x87, 0xbd, 0xf7, 0x98, 0x07, 0xc6, 0xf0, 0x89, + 0x3e, 0xba, 0x77, 0xc2, 0xfd, 0xe8, 0x33, 0xc2, 0xbe, 0x3e, 0xd8, 0xd1, + 0x35, 0x8d, 0xf1, 0xbd, 0xe7, 0x80, 0x7f, 0x3d, 0x8f, 0x75, 0xfb, 0x0e, + 0x30, 0x3f, 0xeb, 0x9d, 0xd7, 0x75, 0xfe, 0x09, 0xb9, 0xee, 0x5a, 0x79, + 0xfe, 0xbb, 0xc3, 0xdc, 0x7f, 0x88, 0x14, 0xf5, 0x3b, 0xef, 0xe8, 0xd3, + 0x3c, 0xde, 0xc5, 0xf7, 0xe2, 0x2d, 0xee, 0x17, 0xba, 0x72, 0xfa, 0x85, + 0x09, 0xd4, 0xe9, 0xe1, 0xe9, 0x8c, 0x9c, 0x30, 0xea, 0x65, 0xbe, 0x93, + 0x7f, 0xf5, 0x44, 0x09, 0xfb, 0xad, 0x78, 0x15, 0xf0, 0xd8, 0x2a, 0xdc, + 0xd7, 0x5d, 0xfb, 0x99, 0x9f, 0x46, 0xf2, 0xd3, 0xf2, 0xa9, 0xd1, 0xdc, + 0xf5, 0x32, 0x7e, 0x3a, 0x8d, 0xf8, 0x51, 0xe7, 0x40, 0x3e, 0x19, 0x92, + 0xf3, 0xf7, 0xcc, 0xba, 0x3f, 0x41, 0x7a, 0x57, 0x0c, 0x4f, 0xd3, 0xfa, + 0x72, 0x51, 0x44, 0xf1, 0x57, 0x16, 0x9e, 0x82, 0x1d, 0x89, 0x41, 0xb6, + 0xbf, 0x67, 0x2d, 0x70, 0x3f, 0xfb, 0x5f, 0xf9, 0xf5, 0xca, 0x1a, 0x9d, + 0xd6, 0x8d, 0xf7, 0xe2, 0x1c, 0x95, 0x3f, 0xb9, 0xfd, 0xb0, 0x8a, 0x87, + 0xb2, 0x06, 0x1a, 0x8b, 0xce, 0x7d, 0xf2, 0x9c, 0x90, 0xe1, 0x47, 0x19, + 0x47, 0x06, 0x47, 0x7c, 0x5f, 0xdb, 0xe3, 0x44, 0xc6, 0x91, 0x8a, 0x57, + 0x6b, 0x7c, 0x59, 0xe3, 0xc7, 0x1e, 0x37, 0x61, 0xba, 0x57, 0x8c, 0x22, + 0x90, 0xa6, 0x77, 0x44, 0x62, 0x70, 0xe0, 0xc1, 0xfc, 0x9d, 0x00, 0x7f, + 0x09, 0xd6, 0x5b, 0x4f, 0x51, 0x1f, 0xfa, 0x94, 0xe8, 0x67, 0x3f, 0x4e, + 0xd4, 0x72, 0xde, 0x57, 0xc1, 0x8f, 0x3d, 0xcf, 0x40, 0x9f, 0x1e, 0xce, + 0x9f, 0xdb, 0xdc, 0x5f, 0xc0, 0xff, 0x7e, 0xbd, 0x63, 0x94, 0xfd, 0xcd, + 0x71, 0xb8, 0x8b, 0x79, 0xb8, 0x05, 0x1e, 0x74, 0xc5, 0x43, 0xca, 0xe4, + 0x41, 0xd5, 0x19, 0xeb, 0x3e, 0x85, 0x46, 0x1c, 0x49, 0x5c, 0xa0, 0x5f, + 0xa1, 0x3e, 0x2c, 0x8f, 0xed, 0x36, 0xe4, 0x1a, 0xa4, 0x7d, 0x61, 0xb6, + 0x2f, 0x24, 0x76, 0x2e, 0xb7, 0xae, 0x0b, 0xf2, 0xba, 0x80, 0xb1, 0x0e, + 0xf3, 0xc8, 0x53, 0x7d, 0x1e, 0x7e, 0x25, 0x8f, 0x6a, 0xdf, 0xdc, 0x7c, + 0xb4, 0xf2, 0x49, 0x15, 0x9a, 0xfe, 0x50, 0x87, 0x0c, 0xbf, 0x51, 0xbd, + 0xd2, 0xcd, 0xfa, 0x73, 0x97, 0xea, 0x79, 0xe0, 0x64, 0x37, 0xea, 0xc5, + 0xc9, 0xee, 0xd3, 0x7c, 0xff, 0x32, 0x2f, 0x2d, 0xf4, 0x6e, 0x30, 0xb8, + 0xab, 0xb2, 0xd7, 0x1f, 0xbb, 0x1e, 0x15, 0x16, 0x3d, 0xd4, 0xb9, 0x0f, + 0xeb, 0x0f, 0xd0, 0x9f, 0x6e, 0xa0, 0xfe, 0x40, 0x33, 0xfb, 0x6c, 0xfb, + 0x3d, 0x72, 0xeb, 0xfe, 0xe3, 0xde, 0x23, 0x5b, 0x1a, 0xac, 0xe7, 0xc5, + 0xc4, 0xd8, 0x08, 0xce, 0x69, 0xe1, 0x7b, 0x7b, 0x3b, 0xe7, 0xf7, 0xb5, + 0x40, 0x84, 0xce, 0x4d, 0xbe, 0x46, 0xf6, 0x8a, 0x68, 0x10, 0xf6, 0x25, + 0x5f, 0xc7, 0xf7, 0x64, 0x08, 0xf3, 0xe5, 0x21, 0xfc, 0x3e, 0xd0, 0xe2, + 0xd7, 0x49, 0xbe, 0x3c, 0x04, 0x8c, 0x72, 0x5d, 0x18, 0x33, 0xdf, 0x11, + 0xc0, 0x21, 0xdf, 0x7c, 0xef, 0x08, 0xbc, 0xc7, 0xce, 0xfb, 0x50, 0x37, + 0x44, 0x0c, 0xfd, 0x73, 0x53, 0x8d, 0x4e, 0xdf, 0x5b, 0x63, 0xb8, 0x9f, + 0x51, 0x97, 0xe7, 0xda, 0x55, 0x81, 0x78, 0x2d, 0xcf, 0xbe, 0x37, 0xac, + 0xef, 0x12, 0x3d, 0x31, 0x68, 0xf6, 0xfd, 0x76, 0x3d, 0x90, 0x67, 0x72, + 0x7f, 0x1a, 0x1a, 0xfd, 0xf5, 0xa3, 0xbc, 0x47, 0x3c, 0xdc, 0xaf, 0xdd, + 0x99, 0x45, 0xbc, 0x16, 0x89, 0x0b, 0x23, 0xe0, 0x61, 0x6c, 0xc4, 0x29, + 0x8f, 0xa5, 0x1e, 0xea, 0x1c, 0xd8, 0xa5, 0xec, 0xcc, 0x9e, 0x0b, 0xbd, + 0x76, 0xb0, 0xfe, 0xbf, 0xd2, 0xef, 0x17, 0x51, 0xb6, 0x57, 0xee, 0x8b, + 0xf9, 0x8d, 0xf4, 0x6e, 0xca, 0x13, 0x69, 0x73, 0x6c, 0x7f, 0xcf, 0xb4, + 0x90, 0x5e, 0x85, 0x7c, 0xaf, 0x47, 0x2d, 0xfd, 0x07, 0xe4, 0x8b, 0xeb, + 0x81, 0xc7, 0xea, 0x95, 0xdf, 0x94, 0x7f, 0x95, 0x3f, 0xe1, 0xf7, 0xe8, + 0x4a, 0x12, 0x6b, 0x4c, 0xae, 0xa4, 0x84, 0xaf, 0x4f, 0x4e, 0xa9, 0x7b, + 0x1e, 0xeb, 0xb7, 0xc4, 0xa5, 0xfc, 0x07, 0xe2, 0xfb, 0x38, 0x2e, 0xd6, + 0x9f, 0x19, 0xb3, 0xfd, 0xbd, 0xe0, 0x3f, 0xeb, 0x3b, 0xc1, 0x23, 0xce, + 0xe7, 0xf1, 0x74, 0x83, 0xea, 0x7b, 0xe7, 0x7b, 0xef, 0x48, 0x3b, 0x3e, + 0xba, 0x9f, 0xdb, 0x47, 0x67, 0xfb, 0x60, 0x65, 0xaf, 0x5c, 0x57, 0xcf, + 0x71, 0xac, 0x89, 0xa6, 0x0d, 0xf8, 0x7d, 0x23, 0xec, 0x47, 0xde, 0x84, + 0xfd, 0x4e, 0xef, 0x52, 0x23, 0x8e, 0xf2, 0xa9, 0x60, 0x2c, 0x2c, 0xcc, + 0x27, 0xfb, 0x8e, 0x5c, 0xbc, 0x44, 0x62, 0x9f, 0x0d, 0x06, 0x31, 0x5f, + 0xd2, 0x84, 0x63, 0xbc, 0x14, 0xf7, 0x1e, 0x71, 0x08, 0x79, 0xf1, 0xe9, + 0x31, 0xe0, 0x27, 0xe2, 0x55, 0xec, 0x53, 0x78, 0x90, 0xee, 0x53, 0xad, + 0x04, 0x34, 0x67, 0x06, 0x39, 0xde, 0x4b, 0xdd, 0xf4, 0xfb, 0xe2, 0xac, + 0x08, 0x49, 0xf4, 0x99, 0xf9, 0x8b, 0xb8, 0xf6, 0x5a, 0xfc, 0xff, 0xa8, + 0x71, 0x4e, 0xf5, 0xc9, 0x88, 0x63, 0x6c, 0xc3, 0xf1, 0xae, 0xe5, 0xc6, + 0xbb, 0xe2, 0xa9, 0xd4, 0xed, 0x18, 0xdf, 0xab, 0xe7, 0xc6, 0xb7, 0xd2, + 0x4f, 0xf2, 0x7d, 0xd7, 0xbc, 0x3f, 0xe7, 0xee, 0x8f, 0xf7, 0xd3, 0x85, + 0x27, 0x16, 0xdf, 0xc0, 0x8d, 0xd5, 0xf2, 0xfc, 0x92, 0x39, 0x7d, 0x72, + 0x6e, 0xfe, 0xa1, 0xde, 0xbd, 0x61, 0xc4, 0xc3, 0x7f, 0x69, 0x4a, 0x77, + 0x8f, 0xc8, 0x15, 0x00, 0x00, 0x00 }; static u8 bnx2_TPAT_b09FwText[] = { 0xbd, 0x58, 0x5d, 0x6c, 0x1c, 0xd5, 0x15, 0x3e, 0x73, 0x67, 0xd6, 0x3b, diff --git a/drivers/net/bnx2x.c b/drivers/net/bnx2x.c index 7bdb5af3595..70cba64732c 100644 --- a/drivers/net/bnx2x.c +++ b/drivers/net/bnx2x.c @@ -6,7 +6,8 @@ * it under the terms of the GNU General Public License as published by * the Free Software Foundation. * - * Written by: Eliezer Tamir <eliezert@broadcom.com> + * Maintained by: Eilon Greenstein <eilong@broadcom.com> + * Written by: Eliezer Tamir * Based on code from Michael Chan's bnx2 driver * UDP CSUM errata workaround by Arik Gendelman * Slowpath rework by Vladislav Zolotarov @@ -74,7 +75,7 @@ static char version[] __devinitdata = "Broadcom NetXtreme II 5771X 10Gigabit Ethernet Driver " DRV_MODULE_NAME " " DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n"; -MODULE_AUTHOR("Eliezer Tamir <eliezert@broadcom.com>"); +MODULE_AUTHOR("Eliezer Tamir"); MODULE_DESCRIPTION("Broadcom NetXtreme II BCM57710 Driver"); MODULE_LICENSE("GPL"); MODULE_VERSION(DRV_MODULE_VERSION); diff --git a/drivers/net/bnx2x.h b/drivers/net/bnx2x.h index 4f0c0d31e7c..8e68d06510a 100644 --- a/drivers/net/bnx2x.h +++ b/drivers/net/bnx2x.h @@ -6,7 +6,8 @@ * it under the terms of the GNU General Public License as published by * the Free Software Foundation. * - * Written by: Eliezer Tamir <eliezert@broadcom.com> + * Maintained by: Eilon Greenstein <eilong@broadcom.com> + * Written by: Eliezer Tamir * Based on code from Michael Chan's bnx2 driver */ diff --git a/drivers/net/bnx2x_init.h b/drivers/net/bnx2x_init.h index dcaecc53bdb..370686eef97 100644 --- a/drivers/net/bnx2x_init.h +++ b/drivers/net/bnx2x_init.h @@ -6,7 +6,8 @@ * it under the terms of the GNU General Public License as published by * the Free Software Foundation. * - * Written by: Eliezer Tamir <eliezert@broadcom.com> + * Maintained by: Eilon Greenstein <eilong@broadcom.com> + * Written by: Eliezer Tamir */ #ifndef BNX2X_INIT_H diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c index 6425603bc37..50a40e43315 100644 --- a/drivers/net/bonding/bond_main.c +++ b/drivers/net/bonding/bond_main.c @@ -1425,13 +1425,13 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev) res = netdev_set_master(slave_dev, bond_dev); if (res) { dprintk("Error %d calling netdev_set_master\n", res); - goto err_close; + goto err_restore_mac; } /* open the slave since the application closed it */ res = dev_open(slave_dev); if (res) { dprintk("Openning slave %s failed\n", slave_dev->name); - goto err_restore_mac; + goto err_unset_master; } new_slave->dev = slave_dev; @@ -1444,7 +1444,7 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev) */ res = bond_alb_init_slave(bond, new_slave); if (res) { - goto err_unset_master; + goto err_close; } } @@ -1619,7 +1619,7 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev) res = bond_create_slave_symlinks(bond_dev, slave_dev); if (res) - goto err_unset_master; + goto err_close; printk(KERN_INFO DRV_NAME ": %s: enslaving %s as a%s interface with a%s link.\n", @@ -1631,12 +1631,12 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev) return 0; /* Undo stages on error */ -err_unset_master: - netdev_set_master(slave_dev, NULL); - err_close: dev_close(slave_dev); +err_unset_master: + netdev_set_master(slave_dev, NULL); + err_restore_mac: if (!bond->params.fail_over_mac) { memcpy(addr.sa_data, new_slave->perm_hwaddr, ETH_ALEN); @@ -4936,7 +4936,9 @@ int bond_create(char *name, struct bond_params *params, struct bonding **newbond if (res < 0) { rtnl_lock(); down_write(&bonding_rwsem); - goto out_bond; + bond_deinit(bond_dev); + unregister_netdevice(bond_dev); + goto out_rtnl; } return 0; @@ -4990,9 +4992,10 @@ err: destroy_workqueue(bond->wq); } + bond_destroy_sysfs(); + rtnl_lock(); bond_free_all(); - bond_destroy_sysfs(); rtnl_unlock(); out: return res; @@ -5004,9 +5007,10 @@ static void __exit bonding_exit(void) unregister_netdevice_notifier(&bond_netdev_notifier); unregister_inetaddr_notifier(&bond_inetaddr_notifier); + bond_destroy_sysfs(); + rtnl_lock(); bond_free_all(); - bond_destroy_sysfs(); rtnl_unlock(); } diff --git a/drivers/net/bonding/bond_sysfs.c b/drivers/net/bonding/bond_sysfs.c index 979c2d05ff9..08f3d396bcd 100644 --- a/drivers/net/bonding/bond_sysfs.c +++ b/drivers/net/bonding/bond_sysfs.c @@ -146,29 +146,29 @@ static ssize_t bonding_store_bonds(struct class *cls, const char *buffer, size_t ": Unable remove bond %s due to open references.\n", ifname); res = -EPERM; - goto out; + goto out_unlock; } printk(KERN_INFO DRV_NAME ": %s is being deleted...\n", bond->dev->name); bond_destroy(bond); - up_write(&bonding_rwsem); - rtnl_unlock(); - goto out; + goto out_unlock; } printk(KERN_ERR DRV_NAME ": unable to delete non-existent bond %s\n", ifname); res = -ENODEV; - up_write(&bonding_rwsem); - rtnl_unlock(); - goto out; + goto out_unlock; } err_no_cmd: printk(KERN_ERR DRV_NAME ": no command found in bonding_masters. Use +ifname or -ifname.\n"); - res = -EPERM; + return -EPERM; + +out_unlock: + up_write(&bonding_rwsem); + rtnl_unlock(); /* Always return either count or an error. If you return 0, you'll * get called forever, which is bad. @@ -1437,8 +1437,16 @@ int bond_create_sysfs(void) * configure multiple bonding devices. */ if (ret == -EEXIST) { - netdev_class = NULL; - return 0; + /* Is someone being kinky and naming a device bonding_master? */ + if (__dev_get_by_name(&init_net, + class_attr_bonding_masters.attr.name)) + printk(KERN_ERR + "network device named %s already exists in sysfs", + class_attr_bonding_masters.attr.name); + else { + netdev_class = NULL; + return 0; + } } return ret; diff --git a/drivers/net/cassini.c b/drivers/net/cassini.c index 93e13636f8d..83768df2780 100644 --- a/drivers/net/cassini.c +++ b/drivers/net/cassini.c @@ -142,8 +142,8 @@ #define DRV_MODULE_NAME "cassini" #define PFX DRV_MODULE_NAME ": " -#define DRV_MODULE_VERSION "1.5" -#define DRV_MODULE_RELDATE "4 Jan 2008" +#define DRV_MODULE_VERSION "1.6" +#define DRV_MODULE_RELDATE "21 May 2008" #define CAS_DEF_MSG_ENABLE \ (NETIF_MSG_DRV | \ @@ -2136,9 +2136,12 @@ end_copy_pkt: if (addr) cas_page_unmap(addr); } - skb->csum = csum_unfold(~csum); - skb->ip_summed = CHECKSUM_COMPLETE; skb->protocol = eth_type_trans(skb, cp->dev); + if (skb->protocol == htons(ETH_P_IP)) { + skb->csum = csum_unfold(~csum); + skb->ip_summed = CHECKSUM_COMPLETE; + } else + skb->ip_summed = CHECKSUM_NONE; return len; } diff --git a/drivers/net/cpmac.c b/drivers/net/cpmac.c index 2b5740b3d18..7f3f62e1b11 100644 --- a/drivers/net/cpmac.c +++ b/drivers/net/cpmac.c @@ -38,6 +38,7 @@ #include <linux/platform_device.h> #include <linux/dma-mapping.h> #include <asm/gpio.h> +#include <asm/atomic.h> MODULE_AUTHOR("Eugene Konev <ejka@imfi.kspu.ru>"); MODULE_DESCRIPTION("TI AR7 ethernet driver (CPMAC)"); @@ -187,6 +188,7 @@ struct cpmac_desc { #define CPMAC_EOQ 0x1000 struct sk_buff *skb; struct cpmac_desc *next; + struct cpmac_desc *prev; dma_addr_t mapping; dma_addr_t data_mapping; }; @@ -208,6 +210,7 @@ struct cpmac_priv { struct work_struct reset_work; struct platform_device *pdev; struct napi_struct napi; + atomic_t reset_pending; }; static irqreturn_t cpmac_irq(int, void *); @@ -241,6 +244,16 @@ static void cpmac_dump_desc(struct net_device *dev, struct cpmac_desc *desc) printk("\n"); } +static void cpmac_dump_all_desc(struct net_device *dev) +{ + struct cpmac_priv *priv = netdev_priv(dev); + struct cpmac_desc *dump = priv->rx_head; + do { + cpmac_dump_desc(dev, dump); + dump = dump->next; + } while (dump != priv->rx_head); +} + static void cpmac_dump_skb(struct net_device *dev, struct sk_buff *skb) { int i; @@ -412,21 +425,42 @@ static struct sk_buff *cpmac_rx_one(struct cpmac_priv *priv, static int cpmac_poll(struct napi_struct *napi, int budget) { struct sk_buff *skb; - struct cpmac_desc *desc; - int received = 0; + struct cpmac_desc *desc, *restart; struct cpmac_priv *priv = container_of(napi, struct cpmac_priv, napi); + int received = 0, processed = 0; spin_lock(&priv->rx_lock); if (unlikely(!priv->rx_head)) { if (netif_msg_rx_err(priv) && net_ratelimit()) printk(KERN_WARNING "%s: rx: polling, but no queue\n", priv->dev->name); + spin_unlock(&priv->rx_lock); netif_rx_complete(priv->dev, napi); return 0; } desc = priv->rx_head; + restart = NULL; while (((desc->dataflags & CPMAC_OWN) == 0) && (received < budget)) { + processed++; + + if ((desc->dataflags & CPMAC_EOQ) != 0) { + /* The last update to eoq->hw_next didn't happen + * soon enough, and the receiver stopped here. + *Remember this descriptor so we can restart + * the receiver after freeing some space. + */ + if (unlikely(restart)) { + if (netif_msg_rx_err(priv)) + printk(KERN_ERR "%s: poll found a" + " duplicate EOQ: %p and %p\n", + priv->dev->name, restart, desc); + goto fatal_error; + } + + restart = desc->next; + } + skb = cpmac_rx_one(priv, desc); if (likely(skb)) { netif_receive_skb(skb); @@ -435,19 +469,90 @@ static int cpmac_poll(struct napi_struct *napi, int budget) desc = desc->next; } + if (desc != priv->rx_head) { + /* We freed some buffers, but not the whole ring, + * add what we did free to the rx list */ + desc->prev->hw_next = (u32)0; + priv->rx_head->prev->hw_next = priv->rx_head->mapping; + } + + /* Optimization: If we did not actually process an EOQ (perhaps because + * of quota limits), check to see if the tail of the queue has EOQ set. + * We should immediately restart in that case so that the receiver can + * restart and run in parallel with more packet processing. + * This lets us handle slightly larger bursts before running + * out of ring space (assuming dev->weight < ring_size) */ + + if (!restart && + (priv->rx_head->prev->dataflags & (CPMAC_OWN|CPMAC_EOQ)) + == CPMAC_EOQ && + (priv->rx_head->dataflags & CPMAC_OWN) != 0) { + /* reset EOQ so the poll loop (above) doesn't try to + * restart this when it eventually gets to this descriptor. + */ + priv->rx_head->prev->dataflags &= ~CPMAC_EOQ; + restart = priv->rx_head; + } + + if (restart) { + priv->dev->stats.rx_errors++; + priv->dev->stats.rx_fifo_errors++; + if (netif_msg_rx_err(priv) && net_ratelimit()) + printk(KERN_WARNING "%s: rx dma ring overrun\n", + priv->dev->name); + + if (unlikely((restart->dataflags & CPMAC_OWN) == 0)) { + if (netif_msg_drv(priv)) + printk(KERN_ERR "%s: cpmac_poll is trying to " + "restart rx from a descriptor that's " + "not free: %p\n", + priv->dev->name, restart); + goto fatal_error; + } + + cpmac_write(priv->regs, CPMAC_RX_PTR(0), restart->mapping); + } + priv->rx_head = desc; spin_unlock(&priv->rx_lock); if (unlikely(netif_msg_rx_status(priv))) printk(KERN_DEBUG "%s: poll processed %d packets\n", priv->dev->name, received); - if (desc->dataflags & CPMAC_OWN) { + if (processed == 0) { + /* we ran out of packets to read, + * revert to interrupt-driven mode */ netif_rx_complete(priv->dev, napi); - cpmac_write(priv->regs, CPMAC_RX_PTR(0), (u32)desc->mapping); cpmac_write(priv->regs, CPMAC_RX_INT_ENABLE, 1); return 0; } return 1; + +fatal_error: + /* Something went horribly wrong. + * Reset hardware to try to recover rather than wedging. */ + + if (netif_msg_drv(priv)) { + printk(KERN_ERR "%s: cpmac_poll is confused. " + "Resetting hardware\n", priv->dev->name); + cpmac_dump_all_desc(priv->dev); + printk(KERN_DEBUG "%s: RX_PTR(0)=0x%08x RX_ACK(0)=0x%08x\n", + priv->dev->name, + cpmac_read(priv->regs, CPMAC_RX_PTR(0)), + cpmac_read(priv->regs, CPMAC_RX_ACK(0))); + } + + spin_unlock(&priv->rx_lock); + netif_rx_complete(priv->dev, napi); + netif_stop_queue(priv->dev); + napi_disable(&priv->napi); + + atomic_inc(&priv->reset_pending); + cpmac_hw_stop(priv->dev); + if (!schedule_work(&priv->reset_work)) + atomic_dec(&priv->reset_pending); + return 0; + } static int cpmac_start_xmit(struct sk_buff *skb, struct net_device *dev) @@ -456,6 +561,9 @@ static int cpmac_start_xmit(struct sk_buff *skb, struct net_device *dev) struct cpmac_desc *desc; struct cpmac_priv *priv = netdev_priv(dev); + if (unlikely(atomic_read(&priv->reset_pending))) + return NETDEV_TX_BUSY; + if (unlikely(skb_padto(skb, ETH_ZLEN))) return NETDEV_TX_OK; @@ -621,8 +729,10 @@ static void cpmac_clear_rx(struct net_device *dev) desc->dataflags = CPMAC_OWN; dev->stats.rx_dropped++; } + desc->hw_next = desc->next->mapping; desc = desc->next; } + priv->rx_head->prev->hw_next = 0; } static void cpmac_clear_tx(struct net_device *dev) @@ -635,14 +745,14 @@ static void cpmac_clear_tx(struct net_device *dev) priv->desc_ring[i].dataflags = 0; if (priv->desc_ring[i].skb) { dev_kfree_skb_any(priv->desc_ring[i].skb); - if (netif_subqueue_stopped(dev, i)) - netif_wake_subqueue(dev, i); + priv->desc_ring[i].skb = NULL; } } } static void cpmac_hw_error(struct work_struct *work) { + int i; struct cpmac_priv *priv = container_of(work, struct cpmac_priv, reset_work); @@ -651,8 +761,48 @@ static void cpmac_hw_error(struct work_struct *work) spin_unlock(&priv->rx_lock); cpmac_clear_tx(priv->dev); cpmac_hw_start(priv->dev); - napi_enable(&priv->napi); - netif_start_queue(priv->dev); + barrier(); + atomic_dec(&priv->reset_pending); + + for (i = 0; i < CPMAC_QUEUES; i++) + netif_wake_subqueue(priv->dev, i); + netif_wake_queue(priv->dev); + cpmac_write(priv->regs, CPMAC_MAC_INT_ENABLE, 3); +} + +static void cpmac_check_status(struct net_device *dev) +{ + struct cpmac_priv *priv = netdev_priv(dev); + + u32 macstatus = cpmac_read(priv->regs, CPMAC_MAC_STATUS); + int rx_channel = (macstatus >> 8) & 7; + int rx_code = (macstatus >> 12) & 15; + int tx_channel = (macstatus >> 16) & 7; + int tx_code = (macstatus >> 20) & 15; + + if (rx_code || tx_code) { + if (netif_msg_drv(priv) && net_ratelimit()) { + /* Can't find any documentation on what these + *error codes actually are. So just log them and hope.. + */ + if (rx_code) + printk(KERN_WARNING "%s: host error %d on rx " + "channel %d (macstatus %08x), resetting\n", + dev->name, rx_code, rx_channel, macstatus); + if (tx_code) + printk(KERN_WARNING "%s: host error %d on tx " + "channel %d (macstatus %08x), resetting\n", + dev->name, tx_code, tx_channel, macstatus); + } + + netif_stop_queue(dev); + cpmac_hw_stop(dev); + if (schedule_work(&priv->reset_work)) + atomic_inc(&priv->reset_pending); + if (unlikely(netif_msg_hw(priv))) + cpmac_dump_regs(dev); + } + cpmac_write(priv->regs, CPMAC_MAC_INT_CLEAR, 0xff); } static irqreturn_t cpmac_irq(int irq, void *dev_id) @@ -683,49 +833,32 @@ static irqreturn_t cpmac_irq(int irq, void *dev_id) cpmac_write(priv->regs, CPMAC_MAC_EOI_VECTOR, 0); - if (unlikely(status & (MAC_INT_HOST | MAC_INT_STATUS))) { - if (netif_msg_drv(priv) && net_ratelimit()) - printk(KERN_ERR "%s: hw error, resetting...\n", - dev->name); - netif_stop_queue(dev); - napi_disable(&priv->napi); - cpmac_hw_stop(dev); - schedule_work(&priv->reset_work); - if (unlikely(netif_msg_hw(priv))) - cpmac_dump_regs(dev); - } + if (unlikely(status & (MAC_INT_HOST | MAC_INT_STATUS))) + cpmac_check_status(dev); return IRQ_HANDLED; } static void cpmac_tx_timeout(struct net_device *dev) { - struct cpmac_priv *priv = netdev_priv(dev); int i; + struct cpmac_priv *priv = netdev_priv(dev); spin_lock(&priv->lock); dev->stats.tx_errors++; spin_unlock(&priv->lock); if (netif_msg_tx_err(priv) && net_ratelimit()) printk(KERN_WARNING "%s: transmit timeout\n", dev->name); - /* - * FIXME: waking up random queue is not the best thing to - * do... on the other hand why we got here at all? - */ -#ifdef CONFIG_NETDEVICES_MULTIQUEUE + + atomic_inc(&priv->reset_pending); + barrier(); + cpmac_clear_tx(dev); + barrier(); + atomic_dec(&priv->reset_pending); + + netif_wake_queue(priv->dev); for (i = 0; i < CPMAC_QUEUES; i++) - if (priv->desc_ring[i].skb) { - priv->desc_ring[i].dataflags = 0; - dev_kfree_skb_any(priv->desc_ring[i].skb); - netif_wake_subqueue(dev, i); - break; - } -#else - priv->desc_ring[0].dataflags = 0; - if (priv->desc_ring[0].skb) - dev_kfree_skb_any(priv->desc_ring[0].skb); - netif_wake_queue(dev); -#endif + netif_wake_subqueue(dev, i); } static int cpmac_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) @@ -901,9 +1034,12 @@ static int cpmac_open(struct net_device *dev) desc->buflen = CPMAC_SKB_SIZE; desc->dataflags = CPMAC_OWN; desc->next = &priv->rx_head[(i + 1) % priv->ring_size]; + desc->next->prev = desc; desc->hw_next = (u32)desc->next->mapping; } + priv->rx_head->prev->hw_next = (u32)0; + if ((res = request_irq(dev->irq, cpmac_irq, IRQF_SHARED, dev->name, dev))) { if (netif_msg_drv(priv)) @@ -912,6 +1048,7 @@ static int cpmac_open(struct net_device *dev) goto fail_irq; } + atomic_set(&priv->reset_pending, 0); INIT_WORK(&priv->reset_work, cpmac_hw_error); cpmac_hw_start(dev); @@ -1007,21 +1144,10 @@ static int __devinit cpmac_probe(struct platform_device *pdev) if (phy_id == PHY_MAX_ADDR) { if (external_switch || dumb_switch) { - struct fixed_phy_status status = {}; - - /* - * FIXME: this should be in the platform code! - * Since there is not platform code at all (that is, - * no mainline users of that driver), place it here - * for now. - */ - phy_id = 0; - status.link = 1; - status.duplex = 1; - status.speed = 100; - fixed_phy_add(PHY_POLL, phy_id, &status); + mdio_bus_id = 0; /* fixed phys bus */ + phy_id = pdev->id; } else { - printk(KERN_ERR "cpmac: no PHY present\n"); + dev_err(&pdev->dev, "no PHY present\n"); return -ENODEV; } } @@ -1064,10 +1190,8 @@ static int __devinit cpmac_probe(struct platform_device *pdev) priv->msg_enable = netif_msg_init(debug_level, 0xff); memcpy(dev->dev_addr, pdata->dev_addr, sizeof(dev->dev_addr)); - snprintf(priv->phy_name, BUS_ID_SIZE, PHY_ID_FMT, mdio_bus_id, phy_id); - - priv->phy = phy_connect(dev, priv->phy_name, &cpmac_adjust_link, 0, - PHY_INTERFACE_MODE_MII); + priv->phy = phy_connect(dev, cpmac_mii.phy_map[phy_id]->dev.bus_id, + &cpmac_adjust_link, 0, PHY_INTERFACE_MODE_MII); if (IS_ERR(priv->phy)) { if (netif_msg_drv(priv)) printk(KERN_ERR "%s: Could not attach to PHY\n", diff --git a/drivers/net/cs89x0.c b/drivers/net/cs89x0.c index 348371fda59..fba87abe78e 100644 --- a/drivers/net/cs89x0.c +++ b/drivers/net/cs89x0.c @@ -1394,7 +1394,11 @@ net_open(struct net_device *dev) #endif if (!result) { printk(KERN_ERR "%s: EEPROM is configured for unavailable media\n", dev->name); - release_irq: +release_dma: +#if ALLOW_DMA + free_dma(dev->dma); +#endif +release_irq: #if ALLOW_DMA release_dma_buff(lp); #endif @@ -1442,12 +1446,12 @@ net_open(struct net_device *dev) if ((result = detect_bnc(dev)) != DETECTED_NONE) break; printk(KERN_ERR "%s: no media detected\n", dev->name); - goto release_irq; + goto release_dma; } switch(result) { case DETECTED_NONE: printk(KERN_ERR "%s: no network cable attached to configured media\n", dev->name); - goto release_irq; + goto release_dma; case DETECTED_RJ45H: printk(KERN_INFO "%s: using half-duplex 10Base-T (RJ-45)\n", dev->name); break; diff --git a/drivers/net/cxgb3/adapter.h b/drivers/net/cxgb3/adapter.h index 4fdb13f8447..acebe431d06 100644 --- a/drivers/net/cxgb3/adapter.h +++ b/drivers/net/cxgb3/adapter.h @@ -71,6 +71,7 @@ enum { /* adapter flags */ USING_MSIX = (1 << 2), QUEUES_BOUND = (1 << 3), TP_PARITY_INIT = (1 << 4), + NAPI_INIT = (1 << 5), }; struct fl_pg_chunk { diff --git a/drivers/net/cxgb3/common.h b/drivers/net/cxgb3/common.h index 91ee7277b81..579bee42a5c 100644 --- a/drivers/net/cxgb3/common.h +++ b/drivers/net/cxgb3/common.h @@ -698,6 +698,7 @@ void mac_prep(struct cmac *mac, struct adapter *adapter, int index); void early_hw_init(struct adapter *adapter, const struct adapter_info *ai); int t3_prep_adapter(struct adapter *adapter, const struct adapter_info *ai, int reset); +int t3_replay_prep_adapter(struct adapter *adapter); void t3_led_ready(struct adapter *adapter); void t3_fatal_err(struct adapter *adapter); void t3_set_vlan_accel(struct adapter *adapter, unsigned int ports, int on); diff --git a/drivers/net/cxgb3/cxgb3_main.c b/drivers/net/cxgb3/cxgb3_main.c index 05e5f59e87f..3a312721679 100644 --- a/drivers/net/cxgb3/cxgb3_main.c +++ b/drivers/net/cxgb3/cxgb3_main.c @@ -421,6 +421,13 @@ static void init_napi(struct adapter *adap) netif_napi_add(qs->netdev, &qs->napi, qs->napi.poll, 64); } + + /* + * netif_napi_add() can be called only once per napi_struct because it + * adds each new napi_struct to a list. Be careful not to call it a + * second time, e.g., during EEH recovery, by making a note of it. + */ + adap->flags |= NAPI_INIT; } /* @@ -896,7 +903,8 @@ static int cxgb_up(struct adapter *adap) goto out; setup_rss(adap); - init_napi(adap); + if (!(adap->flags & NAPI_INIT)) + init_napi(adap); adap->flags |= FULL_INIT_DONE; } @@ -999,7 +1007,7 @@ static int offload_open(struct net_device *dev) return 0; if (!adap_up && (err = cxgb_up(adapter)) < 0) - return err; + goto out; t3_tp_set_offload_mode(adapter, 1); tdev->lldev = adapter->port[0]; @@ -1061,10 +1069,8 @@ static int cxgb_open(struct net_device *dev) int other_ports = adapter->open_device_map & PORT_MASK; int err; - if (!adapter->open_device_map && (err = cxgb_up(adapter)) < 0) { - quiesce_rx(adapter); + if (!adapter->open_device_map && (err = cxgb_up(adapter)) < 0) return err; - } set_bit(pi->port_id, &adapter->open_device_map); if (is_offload(adapter) && !ofld_disable) { @@ -1894,11 +1900,11 @@ static int cxgb_extension_ioctl(struct net_device *dev, void __user *useraddr) u8 *fw_data; struct ch_mem_range t; - if (!capable(CAP_NET_ADMIN)) + if (!capable(CAP_SYS_RAWIO)) return -EPERM; if (copy_from_user(&t, useraddr, sizeof(t))) return -EFAULT; - + /* Check t.len sanity ? */ fw_data = kmalloc(t.len, GFP_KERNEL); if (!fw_data) return -ENOMEM; @@ -2424,14 +2430,11 @@ static pci_ers_result_t t3_io_error_detected(struct pci_dev *pdev, test_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map)) offload_close(&adapter->tdev); - /* Free sge resources */ - t3_free_sge_resources(adapter); - adapter->flags &= ~FULL_INIT_DONE; pci_disable_device(pdev); - /* Request a slot slot reset. */ + /* Request a slot reset. */ return PCI_ERS_RESULT_NEED_RESET; } @@ -2448,13 +2451,20 @@ static pci_ers_result_t t3_io_slot_reset(struct pci_dev *pdev) if (pci_enable_device(pdev)) { dev_err(&pdev->dev, "Cannot re-enable PCI device after reset.\n"); - return PCI_ERS_RESULT_DISCONNECT; + goto err; } pci_set_master(pdev); + pci_restore_state(pdev); - t3_prep_adapter(adapter, adapter->params.info, 1); + /* Free sge resources */ + t3_free_sge_resources(adapter); + + if (t3_replay_prep_adapter(adapter)) + goto err; return PCI_ERS_RESULT_RECOVERED; +err: + return PCI_ERS_RESULT_DISCONNECT; } /** @@ -2483,13 +2493,6 @@ static void t3_io_resume(struct pci_dev *pdev) netif_device_attach(netdev); } } - - if (is_offload(adapter)) { - __set_bit(OFFLOAD_DEVMAP_BIT, &adapter->registered_device_map); - if (offload_open(adapter->port[0])) - printk(KERN_WARNING - "Could not bring back offload capabilities\n"); - } } static struct pci_error_handlers t3_err_handler = { @@ -2608,6 +2611,7 @@ static int __devinit init_one(struct pci_dev *pdev, } pci_set_master(pdev); + pci_save_state(pdev); mmio_start = pci_resource_start(pdev, 0); mmio_len = pci_resource_len(pdev, 0); diff --git a/drivers/net/cxgb3/regs.h b/drivers/net/cxgb3/regs.h index 02dbbb30092..56717887934 100644 --- a/drivers/net/cxgb3/regs.h +++ b/drivers/net/cxgb3/regs.h @@ -444,6 +444,14 @@ #define A_PCIE_CFG 0x88 +#define S_ENABLELINKDWNDRST 21 +#define V_ENABLELINKDWNDRST(x) ((x) << S_ENABLELINKDWNDRST) +#define F_ENABLELINKDWNDRST V_ENABLELINKDWNDRST(1U) + +#define S_ENABLELINKDOWNRST 20 +#define V_ENABLELINKDOWNRST(x) ((x) << S_ENABLELINKDOWNRST) +#define F_ENABLELINKDOWNRST V_ENABLELINKDOWNRST(1U) + #define S_PCIE_CLIDECEN 16 #define V_PCIE_CLIDECEN(x) ((x) << S_PCIE_CLIDECEN) #define F_PCIE_CLIDECEN V_PCIE_CLIDECEN(1U) diff --git a/drivers/net/cxgb3/sge.c b/drivers/net/cxgb3/sge.c index 98a6bbd11d4..796eb305cdc 100644 --- a/drivers/net/cxgb3/sge.c +++ b/drivers/net/cxgb3/sge.c @@ -539,6 +539,31 @@ static void *alloc_ring(struct pci_dev *pdev, size_t nelem, size_t elem_size, } /** + * t3_reset_qset - reset a sge qset + * @q: the queue set + * + * Reset the qset structure. + * the NAPI structure is preserved in the event of + * the qset's reincarnation, for example during EEH recovery. + */ +static void t3_reset_qset(struct sge_qset *q) +{ + if (q->adap && + !(q->adap->flags & NAPI_INIT)) { + memset(q, 0, sizeof(*q)); + return; + } + + q->adap = NULL; + memset(&q->rspq, 0, sizeof(q->rspq)); + memset(q->fl, 0, sizeof(struct sge_fl) * SGE_RXQ_PER_SET); + memset(q->txq, 0, sizeof(struct sge_txq) * SGE_TXQ_PER_SET); + q->txq_stopped = 0; + memset(&q->tx_reclaim_timer, 0, sizeof(q->tx_reclaim_timer)); +} + + +/** * free_qset - free the resources of an SGE queue set * @adapter: the adapter owning the queue set * @q: the queue set @@ -594,7 +619,7 @@ static void t3_free_qset(struct adapter *adapter, struct sge_qset *q) q->rspq.desc, q->rspq.phys_addr); } - memset(q, 0, sizeof(*q)); + t3_reset_qset(q); } /** @@ -1365,7 +1390,7 @@ static void restart_ctrlq(unsigned long data) */ int t3_mgmt_tx(struct adapter *adap, struct sk_buff *skb) { - int ret; + int ret; local_bh_disable(); ret = ctrl_xmit(adap, &adap->sge.qs[0].txq[TXQ_CTRL], skb); local_bh_enable(); diff --git a/drivers/net/cxgb3/t3_hw.c b/drivers/net/cxgb3/t3_hw.c index a99496a431c..d405a932c73 100644 --- a/drivers/net/cxgb3/t3_hw.c +++ b/drivers/net/cxgb3/t3_hw.c @@ -3264,6 +3264,7 @@ static void config_pcie(struct adapter *adap) t3_write_reg(adap, A_PCIE_PEX_ERR, 0xffffffff); t3_set_reg_field(adap, A_PCIE_CFG, 0, + F_ENABLELINKDWNDRST | F_ENABLELINKDOWNRST | F_PCIE_DMASTOPEN | F_PCIE_CLIDECEN); } @@ -3655,3 +3656,30 @@ void t3_led_ready(struct adapter *adapter) t3_set_reg_field(adapter, A_T3DBG_GPIO_EN, F_GPIO0_OUT_VAL, F_GPIO0_OUT_VAL); } + +int t3_replay_prep_adapter(struct adapter *adapter) +{ + const struct adapter_info *ai = adapter->params.info; + unsigned int i, j = 0; + int ret; + + early_hw_init(adapter, ai); + ret = init_parity(adapter); + if (ret) + return ret; + + for_each_port(adapter, i) { + struct port_info *p = adap2pinfo(adapter, i); + while (!adapter->params.vpd.port_type[j]) + ++j; + + p->port_type->phy_prep(&p->phy, adapter, ai->phy_base_addr + j, + ai->mdio_ops); + + p->phy.ops->power_down(&p->phy, 1); + ++j; + } + +return 0; +} + diff --git a/drivers/net/dm9000.c b/drivers/net/dm9000.c index e6fe2614ea6..864295e081b 100644 --- a/drivers/net/dm9000.c +++ b/drivers/net/dm9000.c @@ -117,6 +117,9 @@ typedef struct board_info { struct mutex addr_lock; /* phy and eeprom access lock */ + struct delayed_work phy_poll; + struct net_device *ndev; + spinlock_t lock; struct mii_if_info mii; @@ -297,6 +300,10 @@ static void dm9000_set_io(struct board_info *db, int byte_width) } } +static void dm9000_schedule_poll(board_info_t *db) +{ + schedule_delayed_work(&db->phy_poll, HZ * 2); +} /* Our watchdog timed out. Called by the networking layer */ static void dm9000_timeout(struct net_device *dev) @@ -465,6 +472,17 @@ static const struct ethtool_ops dm9000_ethtool_ops = { .set_eeprom = dm9000_set_eeprom, }; +static void +dm9000_poll_work(struct work_struct *w) +{ + struct delayed_work *dw = container_of(w, struct delayed_work, work); + board_info_t *db = container_of(dw, board_info_t, phy_poll); + + mii_check_media(&db->mii, netif_msg_link(db), 0); + + if (netif_running(db->ndev)) + dm9000_schedule_poll(db); +} /* dm9000_release_board * @@ -503,7 +521,7 @@ dm9000_release_board(struct platform_device *pdev, struct board_info *db) /* * Search DM9000 board, allocate space and register it */ -static int +static int __devinit dm9000_probe(struct platform_device *pdev) { struct dm9000_plat_data *pdata = pdev->dev.platform_data; @@ -525,17 +543,21 @@ dm9000_probe(struct platform_device *pdev) SET_NETDEV_DEV(ndev, &pdev->dev); - dev_dbg(&pdev->dev, "dm9000_probe()"); + dev_dbg(&pdev->dev, "dm9000_probe()\n"); /* setup board info structure */ db = (struct board_info *) ndev->priv; memset(db, 0, sizeof (*db)); db->dev = &pdev->dev; + db->ndev = ndev; spin_lock_init(&db->lock); mutex_init(&db->addr_lock); + INIT_DELAYED_WORK(&db->phy_poll, dm9000_poll_work); + + if (pdev->num_resources < 2) { ret = -ENODEV; goto out; @@ -761,6 +783,8 @@ dm9000_open(struct net_device *dev) mii_check_media(&db->mii, netif_msg_link(db), 1); netif_start_queue(dev); + + dm9000_schedule_poll(db); return 0; } @@ -879,6 +903,8 @@ dm9000_stop(struct net_device *ndev) if (netif_msg_ifdown(db)) dev_dbg(db->dev, "shutting down %s\n", ndev->name); + cancel_delayed_work_sync(&db->phy_poll); + netif_stop_queue(ndev); netif_carrier_off(ndev); @@ -1288,6 +1314,8 @@ dm9000_phy_read(struct net_device *dev, int phy_reg_unused, int reg) spin_unlock_irqrestore(&db->lock,flags); mutex_unlock(&db->addr_lock); + + dm9000_dbg(db, 5, "phy_read[%02x] -> %04x\n", reg, ret); return ret; } @@ -1301,6 +1329,7 @@ dm9000_phy_write(struct net_device *dev, int phyaddr_unused, int reg, int value) unsigned long flags; unsigned long reg_save; + dm9000_dbg(db, 5, "phy_write[%02x] = %04x\n", reg, value); mutex_lock(&db->addr_lock); spin_lock_irqsave(&db->lock,flags); @@ -1372,7 +1401,7 @@ dm9000_drv_resume(struct platform_device *dev) return 0; } -static int +static int __devexit dm9000_drv_remove(struct platform_device *pdev) { struct net_device *ndev = platform_get_drvdata(pdev); @@ -1393,7 +1422,7 @@ static struct platform_driver dm9000_driver = { .owner = THIS_MODULE, }, .probe = dm9000_probe, - .remove = dm9000_drv_remove, + .remove = __devexit_p(dm9000_drv_remove), .suspend = dm9000_drv_suspend, .resume = dm9000_drv_resume, }; diff --git a/drivers/net/e100.c b/drivers/net/e100.c index f3cba5e24ec..1037b133231 100644 --- a/drivers/net/e100.c +++ b/drivers/net/e100.c @@ -1803,6 +1803,8 @@ static int e100_rx_alloc_skb(struct nic *nic, struct rx *rx) if (rx->prev->skb) { struct rfd *prev_rfd = (struct rfd *)rx->prev->skb->data; put_unaligned_le32(rx->dma_addr, &prev_rfd->link); + pci_dma_sync_single_for_device(nic->pdev, rx->prev->dma_addr, + sizeof(struct rfd), PCI_DMA_TODEVICE); } return 0; diff --git a/drivers/net/e1000/e1000_ethtool.c b/drivers/net/e1000/e1000_ethtool.c index 701531e72e7..a3f6a9c72ec 100644 --- a/drivers/net/e1000/e1000_ethtool.c +++ b/drivers/net/e1000/e1000_ethtool.c @@ -347,7 +347,7 @@ e1000_set_tso(struct net_device *netdev, u32 data) else netdev->features &= ~NETIF_F_TSO; - if (data) + if (data && (adapter->hw.mac_type > e1000_82547_rev_2)) netdev->features |= NETIF_F_TSO6; else netdev->features &= ~NETIF_F_TSO6; diff --git a/drivers/net/e1000e/defines.h b/drivers/net/e1000e/defines.h index 2a53875cddb..f823b8ba578 100644 --- a/drivers/net/e1000e/defines.h +++ b/drivers/net/e1000e/defines.h @@ -648,6 +648,8 @@ #define IFE_E_PHY_ID 0x02A80330 #define IFE_PLUS_E_PHY_ID 0x02A80320 #define IFE_C_E_PHY_ID 0x02A80310 +#define BME1000_E_PHY_ID 0x01410CB0 +#define BME1000_E_PHY_ID_R2 0x01410CB1 /* M88E1000 Specific Registers */ #define M88E1000_PHY_SPEC_CTRL 0x10 /* PHY Specific Control Register */ @@ -701,6 +703,14 @@ #define M88EC018_EPSCR_DOWNSHIFT_COUNTER_MASK 0x0E00 #define M88EC018_EPSCR_DOWNSHIFT_COUNTER_5X 0x0800 +/* BME1000 PHY Specific Control Register */ +#define BME1000_PSCR_ENABLE_DOWNSHIFT 0x0800 /* 1 = enable downshift */ + + +#define PHY_PAGE_SHIFT 5 +#define PHY_REG(page, reg) (((page) << PHY_PAGE_SHIFT) | \ + ((reg) & MAX_PHY_REG_ADDRESS)) + /* * Bits... * 15-5: page diff --git a/drivers/net/e1000e/e1000.h b/drivers/net/e1000e/e1000.h index 38bfd0d261f..d3bc6f8101f 100644 --- a/drivers/net/e1000e/e1000.h +++ b/drivers/net/e1000e/e1000.h @@ -127,7 +127,7 @@ struct e1000_buffer { /* arrays of page information for packet split */ struct e1000_ps_page *ps_pages; }; - + struct page *page; }; struct e1000_ring { @@ -304,6 +304,7 @@ struct e1000_info { #define FLAG_HAS_CTRLEXT_ON_LOAD (1 << 5) #define FLAG_HAS_SWSM_ON_LOAD (1 << 6) #define FLAG_HAS_JUMBO_FRAMES (1 << 7) +#define FLAG_IS_ICH (1 << 9) #define FLAG_HAS_SMART_POWER_DOWN (1 << 11) #define FLAG_IS_QUAD_PORT_A (1 << 12) #define FLAG_IS_QUAD_PORT (1 << 13) @@ -386,6 +387,7 @@ extern void e1000e_set_kmrn_lock_loss_workaround_ich8lan(struct e1000_hw *hw, bool state); extern void e1000e_igp3_phy_powerdown_workaround_ich8lan(struct e1000_hw *hw); extern void e1000e_gig_downshift_workaround_ich8lan(struct e1000_hw *hw); +extern void e1000e_disable_gig_wol_ich8lan(struct e1000_hw *hw); extern s32 e1000e_check_for_copper_link(struct e1000_hw *hw); extern s32 e1000e_check_for_fiber_link(struct e1000_hw *hw); @@ -443,6 +445,9 @@ extern s32 e1000e_get_phy_info_m88(struct e1000_hw *hw); extern s32 e1000e_read_phy_reg_m88(struct e1000_hw *hw, u32 offset, u16 *data); extern s32 e1000e_write_phy_reg_m88(struct e1000_hw *hw, u32 offset, u16 data); extern enum e1000_phy_type e1000e_get_phy_type_from_id(u32 phy_id); +extern s32 e1000e_determine_phy_address(struct e1000_hw *hw); +extern s32 e1000e_write_phy_reg_bm(struct e1000_hw *hw, u32 offset, u16 data); +extern s32 e1000e_read_phy_reg_bm(struct e1000_hw *hw, u32 offset, u16 *data); extern void e1000e_phy_force_speed_duplex_setup(struct e1000_hw *hw, u16 *phy_ctrl); extern s32 e1000e_write_kmrn_reg(struct e1000_hw *hw, u32 offset, u16 data); extern s32 e1000e_read_kmrn_reg(struct e1000_hw *hw, u32 offset, u16 *data); diff --git a/drivers/net/e1000e/ethtool.c b/drivers/net/e1000e/ethtool.c index ce045acce63..a14561f40db 100644 --- a/drivers/net/e1000e/ethtool.c +++ b/drivers/net/e1000e/ethtool.c @@ -494,8 +494,12 @@ static int e1000_get_eeprom(struct net_device *netdev, for (i = 0; i < last_word - first_word + 1; i++) { ret_val = e1000_read_nvm(hw, first_word + i, 1, &eeprom_buff[i]); - if (ret_val) + if (ret_val) { + /* a read error occurred, throw away the + * result */ + memset(eeprom_buff, 0xff, sizeof(eeprom_buff)); break; + } } } @@ -803,8 +807,7 @@ static int e1000_reg_test(struct e1000_adapter *adapter, u64 *data) /* restore previous status */ ew32(STATUS, before); - if ((mac->type != e1000_ich8lan) && - (mac->type != e1000_ich9lan)) { + if (!(adapter->flags & FLAG_IS_ICH)) { REG_PATTERN_TEST(E1000_FCAL, 0xFFFFFFFF, 0xFFFFFFFF); REG_PATTERN_TEST(E1000_FCAH, 0x0000FFFF, 0xFFFFFFFF); REG_PATTERN_TEST(E1000_FCT, 0x0000FFFF, 0xFFFFFFFF); @@ -824,15 +827,13 @@ static int e1000_reg_test(struct e1000_adapter *adapter, u64 *data) REG_SET_AND_CHECK(E1000_RCTL, 0xFFFFFFFF, 0x00000000); - before = (((mac->type == e1000_ich8lan) || - (mac->type == e1000_ich9lan)) ? 0x06C3B33E : 0x06DFB3FE); + before = ((adapter->flags & FLAG_IS_ICH) ? 0x06C3B33E : 0x06DFB3FE); REG_SET_AND_CHECK(E1000_RCTL, before, 0x003FFFFB); REG_SET_AND_CHECK(E1000_TCTL, 0xFFFFFFFF, 0x00000000); REG_SET_AND_CHECK(E1000_RCTL, before, 0xFFFFFFFF); REG_PATTERN_TEST(E1000_RDBAL, 0xFFFFFFF0, 0xFFFFFFFF); - if ((mac->type != e1000_ich8lan) && - (mac->type != e1000_ich9lan)) + if (!(adapter->flags & FLAG_IS_ICH)) REG_PATTERN_TEST(E1000_TXCW, 0xC000FFFF, 0x0000FFFF); REG_PATTERN_TEST(E1000_TDBAL, 0xFFFFFFF0, 0xFFFFFFFF); REG_PATTERN_TEST(E1000_TIDV, 0x0000FFFF, 0x0000FFFF); @@ -911,9 +912,7 @@ static int e1000_intr_test(struct e1000_adapter *adapter, u64 *data) /* Test each interrupt */ for (i = 0; i < 10; i++) { - - if (((adapter->hw.mac.type == e1000_ich8lan) || - (adapter->hw.mac.type == e1000_ich9lan)) && i == 8) + if ((adapter->flags & FLAG_IS_ICH) && (i == 8)) continue; /* Interrupt to test */ @@ -1184,6 +1183,7 @@ static int e1000_integrated_phy_loopback(struct e1000_adapter *adapter) struct e1000_hw *hw = &adapter->hw; u32 ctrl_reg = 0; u32 stat_reg = 0; + u16 phy_reg = 0; hw->mac.autoneg = 0; @@ -1211,6 +1211,28 @@ static int e1000_integrated_phy_loopback(struct e1000_adapter *adapter) E1000_CTRL_SPD_100 |/* Force Speed to 100 */ E1000_CTRL_FD); /* Force Duplex to FULL */ break; + case e1000_phy_bm: + /* Set Default MAC Interface speed to 1GB */ + e1e_rphy(hw, PHY_REG(2, 21), &phy_reg); + phy_reg &= ~0x0007; + phy_reg |= 0x006; + e1e_wphy(hw, PHY_REG(2, 21), phy_reg); + /* Assert SW reset for above settings to take effect */ + e1000e_commit_phy(hw); + mdelay(1); + /* Force Full Duplex */ + e1e_rphy(hw, PHY_REG(769, 16), &phy_reg); + e1e_wphy(hw, PHY_REG(769, 16), phy_reg | 0x000C); + /* Set Link Up (in force link) */ + e1e_rphy(hw, PHY_REG(776, 16), &phy_reg); + e1e_wphy(hw, PHY_REG(776, 16), phy_reg | 0x0040); + /* Force Link */ + e1e_rphy(hw, PHY_REG(769, 16), &phy_reg); + e1e_wphy(hw, PHY_REG(769, 16), phy_reg | 0x0040); + /* Set Early Link Enable */ + e1e_rphy(hw, PHY_REG(769, 20), &phy_reg); + e1e_wphy(hw, PHY_REG(769, 20), phy_reg | 0x0400); + /* fall through */ default: /* force 1000, set loopback */ e1e_wphy(hw, PHY_CONTROL, 0x4140); @@ -1224,8 +1246,7 @@ static int e1000_integrated_phy_loopback(struct e1000_adapter *adapter) E1000_CTRL_SPD_1000 |/* Force Speed to 1000 */ E1000_CTRL_FD); /* Force Duplex to FULL */ - if ((adapter->hw.mac.type == e1000_ich8lan) || - (adapter->hw.mac.type == e1000_ich9lan)) + if (adapter->flags & FLAG_IS_ICH) ctrl_reg |= E1000_CTRL_SLU; /* Set Link Up */ } diff --git a/drivers/net/e1000e/hw.h b/drivers/net/e1000e/hw.h index a930e6d9cf0..74f263acb17 100644 --- a/drivers/net/e1000e/hw.h +++ b/drivers/net/e1000e/hw.h @@ -216,6 +216,21 @@ enum e1e_registers { #define IGP01E1000_PHY_LINK_HEALTH 0x13 /* PHY Link Health */ #define IGP02E1000_PHY_POWER_MGMT 0x19 /* Power Management */ #define IGP01E1000_PHY_PAGE_SELECT 0x1F /* Page Select */ +#define BM_PHY_PAGE_SELECT 22 /* Page Select for BM */ +#define IGP_PAGE_SHIFT 5 +#define PHY_REG_MASK 0x1F + +#define BM_WUC_PAGE 800 +#define BM_WUC_ADDRESS_OPCODE 0x11 +#define BM_WUC_DATA_OPCODE 0x12 +#define BM_WUC_ENABLE_PAGE 769 +#define BM_WUC_ENABLE_REG 17 +#define BM_WUC_ENABLE_BIT (1 << 2) +#define BM_WUC_HOST_WU_BIT (1 << 4) + +#define BM_WUC PHY_REG(BM_WUC_PAGE, 1) +#define BM_WUFC PHY_REG(BM_WUC_PAGE, 2) +#define BM_WUS PHY_REG(BM_WUC_PAGE, 3) #define IGP01E1000_PHY_PCS_INIT_REG 0x00B4 #define IGP01E1000_PHY_POLARITY_MASK 0x0078 @@ -331,10 +346,16 @@ enum e1e_registers { #define E1000_DEV_ID_ICH8_IFE_G 0x10C5 #define E1000_DEV_ID_ICH8_IGP_M 0x104D #define E1000_DEV_ID_ICH9_IGP_AMT 0x10BD +#define E1000_DEV_ID_ICH9_IGP_M_AMT 0x10F5 +#define E1000_DEV_ID_ICH9_IGP_M 0x10BF +#define E1000_DEV_ID_ICH9_IGP_M_V 0x10CB #define E1000_DEV_ID_ICH9_IGP_C 0x294C #define E1000_DEV_ID_ICH9_IFE 0x10C0 #define E1000_DEV_ID_ICH9_IFE_GT 0x10C3 #define E1000_DEV_ID_ICH9_IFE_G 0x10C2 +#define E1000_DEV_ID_ICH10_R_BM_LM 0x10CC +#define E1000_DEV_ID_ICH10_R_BM_LF 0x10CD +#define E1000_DEV_ID_ICH10_R_BM_V 0x10CE #define E1000_FUNC_1 1 @@ -378,6 +399,7 @@ enum e1000_phy_type { e1000_phy_gg82563, e1000_phy_igp_3, e1000_phy_ife, + e1000_phy_bm, }; enum e1000_bus_width { diff --git a/drivers/net/e1000e/ich8lan.c b/drivers/net/e1000e/ich8lan.c index 768485dbb2c..9e38452a738 100644 --- a/drivers/net/e1000e/ich8lan.c +++ b/drivers/net/e1000e/ich8lan.c @@ -38,6 +38,12 @@ * 82566DM Gigabit Network Connection * 82566MC Gigabit Network Connection * 82566MM Gigabit Network Connection + * 82567LM Gigabit Network Connection + * 82567LF Gigabit Network Connection + * 82567LM-2 Gigabit Network Connection + * 82567LF-2 Gigabit Network Connection + * 82567V-2 Gigabit Network Connection + * 82562GT-3 10/100 Network Connection */ #include <linux/netdevice.h> @@ -198,6 +204,19 @@ static s32 e1000_init_phy_params_ich8lan(struct e1000_hw *hw) phy->addr = 1; phy->reset_delay_us = 100; + /* + * We may need to do this twice - once for IGP and if that fails, + * we'll set BM func pointers and try again + */ + ret_val = e1000e_determine_phy_address(hw); + if (ret_val) { + hw->phy.ops.write_phy_reg = e1000e_write_phy_reg_bm; + hw->phy.ops.read_phy_reg = e1000e_read_phy_reg_bm; + ret_val = e1000e_determine_phy_address(hw); + if (ret_val) + return ret_val; + } + phy->id = 0; while ((e1000_phy_unknown == e1000e_get_phy_type_from_id(phy->id)) && (i++ < 100)) { @@ -219,6 +238,13 @@ static s32 e1000_init_phy_params_ich8lan(struct e1000_hw *hw) phy->type = e1000_phy_ife; phy->autoneg_mask = E1000_ALL_NOT_GIG; break; + case BME1000_E_PHY_ID: + phy->type = e1000_phy_bm; + phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT; + hw->phy.ops.read_phy_reg = e1000e_read_phy_reg_bm; + hw->phy.ops.write_phy_reg = e1000e_write_phy_reg_bm; + hw->phy.ops.commit_phy = e1000e_phy_sw_reset; + break; default: return -E1000_ERR_PHY; break; @@ -664,6 +690,7 @@ static s32 e1000_get_phy_info_ich8lan(struct e1000_hw *hw) return e1000_get_phy_info_ife_ich8lan(hw); break; case e1000_phy_igp_3: + case e1000_phy_bm: return e1000e_get_phy_info_igp(hw); break; default: @@ -728,7 +755,7 @@ static s32 e1000_set_d0_lplu_state_ich8lan(struct e1000_hw *hw, bool active) s32 ret_val = 0; u16 data; - if (phy->type != e1000_phy_igp_3) + if (phy->type == e1000_phy_ife) return ret_val; phy_ctrl = er32(PHY_CTRL); @@ -1918,8 +1945,35 @@ static s32 e1000_setup_copper_link_ich8lan(struct e1000_hw *hw) ret_val = e1000e_copper_link_setup_igp(hw); if (ret_val) return ret_val; + } else if (hw->phy.type == e1000_phy_bm) { + ret_val = e1000e_copper_link_setup_m88(hw); + if (ret_val) + return ret_val; } + if (hw->phy.type == e1000_phy_ife) { + ret_val = e1e_rphy(hw, IFE_PHY_MDIX_CONTROL, ®_data); + if (ret_val) + return ret_val; + + reg_data &= ~IFE_PMC_AUTO_MDIX; + + switch (hw->phy.mdix) { + case 1: + reg_data &= ~IFE_PMC_FORCE_MDIX; + break; + case 2: + reg_data |= IFE_PMC_FORCE_MDIX; + break; + case 0: + default: + reg_data |= IFE_PMC_AUTO_MDIX; + break; + } + ret_val = e1e_wphy(hw, IFE_PHY_MDIX_CONTROL, reg_data); + if (ret_val) + return ret_val; + } return e1000e_setup_copper_link(hw); } @@ -2127,6 +2181,31 @@ void e1000e_gig_downshift_workaround_ich8lan(struct e1000_hw *hw) } /** + * e1000e_disable_gig_wol_ich8lan - disable gig during WoL + * @hw: pointer to the HW structure + * + * During S0 to Sx transition, it is possible the link remains at gig + * instead of negotiating to a lower speed. Before going to Sx, set + * 'LPLU Enabled' and 'Gig Disable' to force link speed negotiation + * to a lower speed. + * + * Should only be called for ICH9 devices. + **/ +void e1000e_disable_gig_wol_ich8lan(struct e1000_hw *hw) +{ + u32 phy_ctrl; + + if (hw->mac.type == e1000_ich9lan) { + phy_ctrl = er32(PHY_CTRL); + phy_ctrl |= E1000_PHY_CTRL_D0A_LPLU | + E1000_PHY_CTRL_GBE_DISABLE; + ew32(PHY_CTRL, phy_ctrl); + } + + return; +} + +/** * e1000_cleanup_led_ich8lan - Restore the default LED operation * @hw: pointer to the HW structure * @@ -2247,6 +2326,7 @@ static struct e1000_nvm_operations ich8_nvm_ops = { struct e1000_info e1000_ich8_info = { .mac = e1000_ich8lan, .flags = FLAG_HAS_WOL + | FLAG_IS_ICH | FLAG_RX_CSUM_ENABLED | FLAG_HAS_CTRLEXT_ON_LOAD | FLAG_HAS_AMT @@ -2262,6 +2342,7 @@ struct e1000_info e1000_ich8_info = { struct e1000_info e1000_ich9_info = { .mac = e1000_ich9lan, .flags = FLAG_HAS_JUMBO_FRAMES + | FLAG_IS_ICH | FLAG_HAS_WOL | FLAG_RX_CSUM_ENABLED | FLAG_HAS_CTRLEXT_ON_LOAD diff --git a/drivers/net/e1000e/netdev.c b/drivers/net/e1000e/netdev.c index 8991ab8911e..648a87bbf46 100644 --- a/drivers/net/e1000e/netdev.c +++ b/drivers/net/e1000e/netdev.c @@ -43,10 +43,11 @@ #include <linux/if_vlan.h> #include <linux/cpu.h> #include <linux/smp.h> +#include <linux/pm_qos_params.h> #include "e1000.h" -#define DRV_VERSION "0.2.1" +#define DRV_VERSION "0.3.3.3-k2" char e1000e_driver_name[] = "e1000e"; const char e1000e_driver_version[] = DRV_VERSION; @@ -341,6 +342,89 @@ no_buffers: } /** + * e1000_alloc_jumbo_rx_buffers - Replace used jumbo receive buffers + * @adapter: address of board private structure + * @rx_ring: pointer to receive ring structure + * @cleaned_count: number of buffers to allocate this pass + **/ + +static void e1000_alloc_jumbo_rx_buffers(struct e1000_adapter *adapter, + int cleaned_count) +{ + struct net_device *netdev = adapter->netdev; + struct pci_dev *pdev = adapter->pdev; + struct e1000_rx_desc *rx_desc; + struct e1000_ring *rx_ring = adapter->rx_ring; + struct e1000_buffer *buffer_info; + struct sk_buff *skb; + unsigned int i; + unsigned int bufsz = 256 - + 16 /* for skb_reserve */ - + NET_IP_ALIGN; + + i = rx_ring->next_to_use; + buffer_info = &rx_ring->buffer_info[i]; + + while (cleaned_count--) { + skb = buffer_info->skb; + if (skb) { + skb_trim(skb, 0); + goto check_page; + } + + skb = netdev_alloc_skb(netdev, bufsz); + if (unlikely(!skb)) { + /* Better luck next round */ + adapter->alloc_rx_buff_failed++; + break; + } + + /* Make buffer alignment 2 beyond a 16 byte boundary + * this will result in a 16 byte aligned IP header after + * the 14 byte MAC header is removed + */ + skb_reserve(skb, NET_IP_ALIGN); + + buffer_info->skb = skb; +check_page: + /* allocate a new page if necessary */ + if (!buffer_info->page) { + buffer_info->page = alloc_page(GFP_ATOMIC); + if (unlikely(!buffer_info->page)) { + adapter->alloc_rx_buff_failed++; + break; + } + } + + if (!buffer_info->dma) + buffer_info->dma = pci_map_page(pdev, + buffer_info->page, 0, + PAGE_SIZE, + PCI_DMA_FROMDEVICE); + + rx_desc = E1000_RX_DESC(*rx_ring, i); + rx_desc->buffer_addr = cpu_to_le64(buffer_info->dma); + + if (unlikely(++i == rx_ring->count)) + i = 0; + buffer_info = &rx_ring->buffer_info[i]; + } + + if (likely(rx_ring->next_to_use != i)) { + rx_ring->next_to_use = i; + if (unlikely(i-- == 0)) + i = (rx_ring->count - 1); + + /* Force memory writes to complete before letting h/w + * know there are new descriptors to fetch. (Only + * applicable for weak-ordered memory model archs, + * such as IA-64). */ + wmb(); + writel(i, adapter->hw.hw_addr + rx_ring->tail); + } +} + +/** * e1000_clean_rx_irq - Send received data up the network stack; legacy * @adapter: board private structure * @@ -783,6 +867,186 @@ next_desc: } /** + * e1000_consume_page - helper function + **/ +static void e1000_consume_page(struct e1000_buffer *bi, struct sk_buff *skb, + u16 length) +{ + bi->page = NULL; + skb->len += length; + skb->data_len += length; + skb->truesize += length; +} + +/** + * e1000_clean_jumbo_rx_irq - Send received data up the network stack; legacy + * @adapter: board private structure + * + * the return value indicates whether actual cleaning was done, there + * is no guarantee that everything was cleaned + **/ + +static bool e1000_clean_jumbo_rx_irq(struct e1000_adapter *adapter, + int *work_done, int work_to_do) +{ + struct net_device *netdev = adapter->netdev; + struct pci_dev *pdev = adapter->pdev; + struct e1000_ring *rx_ring = adapter->rx_ring; + struct e1000_rx_desc *rx_desc, *next_rxd; + struct e1000_buffer *buffer_info, *next_buffer; + u32 length; + unsigned int i; + int cleaned_count = 0; + bool cleaned = false; + unsigned int total_rx_bytes=0, total_rx_packets=0; + + i = rx_ring->next_to_clean; + rx_desc = E1000_RX_DESC(*rx_ring, i); + buffer_info = &rx_ring->buffer_info[i]; + + while (rx_desc->status & E1000_RXD_STAT_DD) { + struct sk_buff *skb; + u8 status; + + if (*work_done >= work_to_do) + break; + (*work_done)++; + + status = rx_desc->status; + skb = buffer_info->skb; + buffer_info->skb = NULL; + + ++i; + if (i == rx_ring->count) + i = 0; + next_rxd = E1000_RX_DESC(*rx_ring, i); + prefetch(next_rxd); + + next_buffer = &rx_ring->buffer_info[i]; + + cleaned = true; + cleaned_count++; + pci_unmap_page(pdev, buffer_info->dma, PAGE_SIZE, + PCI_DMA_FROMDEVICE); + buffer_info->dma = 0; + + length = le16_to_cpu(rx_desc->length); + + /* errors is only valid for DD + EOP descriptors */ + if (unlikely((status & E1000_RXD_STAT_EOP) && + (rx_desc->errors & E1000_RXD_ERR_FRAME_ERR_MASK))) { + /* recycle both page and skb */ + buffer_info->skb = skb; + /* an error means any chain goes out the window + * too */ + if (rx_ring->rx_skb_top) + dev_kfree_skb(rx_ring->rx_skb_top); + rx_ring->rx_skb_top = NULL; + goto next_desc; + } + +#define rxtop rx_ring->rx_skb_top + if (!(status & E1000_RXD_STAT_EOP)) { + /* this descriptor is only the beginning (or middle) */ + if (!rxtop) { + /* this is the beginning of a chain */ + rxtop = skb; + skb_fill_page_desc(rxtop, 0, buffer_info->page, + 0, length); + } else { + /* this is the middle of a chain */ + skb_fill_page_desc(rxtop, + skb_shinfo(rxtop)->nr_frags, + buffer_info->page, 0, length); + /* re-use the skb, only consumed the page */ + buffer_info->skb = skb; + } + e1000_consume_page(buffer_info, rxtop, length); + goto next_desc; + } else { + if (rxtop) { + /* end of the chain */ + skb_fill_page_desc(rxtop, + skb_shinfo(rxtop)->nr_frags, + buffer_info->page, 0, length); + /* re-use the current skb, we only consumed the + * page */ + buffer_info->skb = skb; + skb = rxtop; + rxtop = NULL; + e1000_consume_page(buffer_info, skb, length); + } else { + /* no chain, got EOP, this buf is the packet + * copybreak to save the put_page/alloc_page */ + if (length <= copybreak && + skb_tailroom(skb) >= length) { + u8 *vaddr; + vaddr = kmap_atomic(buffer_info->page, + KM_SKB_DATA_SOFTIRQ); + memcpy(skb_tail_pointer(skb), vaddr, + length); + kunmap_atomic(vaddr, + KM_SKB_DATA_SOFTIRQ); + /* re-use the page, so don't erase + * buffer_info->page */ + skb_put(skb, length); + } else { + skb_fill_page_desc(skb, 0, + buffer_info->page, 0, + length); + e1000_consume_page(buffer_info, skb, + length); + } + } + } + + /* Receive Checksum Offload XXX recompute due to CRC strip? */ + e1000_rx_checksum(adapter, + (u32)(status) | + ((u32)(rx_desc->errors) << 24), + le16_to_cpu(rx_desc->csum), skb); + + /* probably a little skewed due to removing CRC */ + total_rx_bytes += skb->len; + total_rx_packets++; + + /* eth type trans needs skb->data to point to something */ + if (!pskb_may_pull(skb, ETH_HLEN)) { + ndev_err(netdev, "pskb_may_pull failed.\n"); + dev_kfree_skb(skb); + goto next_desc; + } + + e1000_receive_skb(adapter, netdev, skb, status, + rx_desc->special); + +next_desc: + rx_desc->status = 0; + + /* return some buffers to hardware, one at a time is too slow */ + if (unlikely(cleaned_count >= E1000_RX_BUFFER_WRITE)) { + adapter->alloc_rx_buf(adapter, cleaned_count); + cleaned_count = 0; + } + + /* use prefetched values */ + rx_desc = next_rxd; + buffer_info = next_buffer; + } + rx_ring->next_to_clean = i; + + cleaned_count = e1000_desc_unused(rx_ring); + if (cleaned_count) + adapter->alloc_rx_buf(adapter, cleaned_count); + + adapter->total_rx_bytes += total_rx_bytes; + adapter->total_rx_packets += total_rx_packets; + adapter->net_stats.rx_bytes += total_rx_bytes; + adapter->net_stats.rx_packets += total_rx_packets; + return cleaned; +} + +/** * e1000_clean_rx_ring - Free Rx Buffers per Queue * @adapter: board private structure **/ @@ -802,6 +1066,10 @@ static void e1000_clean_rx_ring(struct e1000_adapter *adapter) pci_unmap_single(pdev, buffer_info->dma, adapter->rx_buffer_len, PCI_DMA_FROMDEVICE); + else if (adapter->clean_rx == e1000_clean_jumbo_rx_irq) + pci_unmap_page(pdev, buffer_info->dma, + PAGE_SIZE, + PCI_DMA_FROMDEVICE); else if (adapter->clean_rx == e1000_clean_rx_irq_ps) pci_unmap_single(pdev, buffer_info->dma, adapter->rx_ps_bsize0, @@ -809,6 +1077,11 @@ static void e1000_clean_rx_ring(struct e1000_adapter *adapter) buffer_info->dma = 0; } + if (buffer_info->page) { + put_page(buffer_info->page); + buffer_info->page = NULL; + } + if (buffer_info->skb) { dev_kfree_skb(buffer_info->skb); buffer_info->skb = NULL; @@ -1755,10 +2028,12 @@ static void e1000_setup_rctl(struct e1000_adapter *adapter) * a lot of memory, since we allocate 3 pages at all times * per packet. */ - adapter->rx_ps_pages = 0; pages = PAGE_USE_COUNT(adapter->netdev->mtu); - if ((pages <= 3) && (PAGE_SIZE <= 16384) && (rctl & E1000_RCTL_LPE)) + if (!(adapter->flags & FLAG_IS_ICH) && (pages <= 3) && + (PAGE_SIZE <= 16384) && (rctl & E1000_RCTL_LPE)) adapter->rx_ps_pages = pages; + else + adapter->rx_ps_pages = 0; if (adapter->rx_ps_pages) { /* Configure extra packet-split registers */ @@ -1819,9 +2094,12 @@ static void e1000_configure_rx(struct e1000_adapter *adapter) sizeof(union e1000_rx_desc_packet_split); adapter->clean_rx = e1000_clean_rx_irq_ps; adapter->alloc_rx_buf = e1000_alloc_rx_buffers_ps; + } else if (adapter->netdev->mtu > ETH_FRAME_LEN + ETH_FCS_LEN) { + rdlen = rx_ring->count * sizeof(struct e1000_rx_desc); + adapter->clean_rx = e1000_clean_jumbo_rx_irq; + adapter->alloc_rx_buf = e1000_alloc_jumbo_rx_buffers; } else { - rdlen = rx_ring->count * - sizeof(struct e1000_rx_desc); + rdlen = rx_ring->count * sizeof(struct e1000_rx_desc); adapter->clean_rx = e1000_clean_rx_irq; adapter->alloc_rx_buf = e1000_alloc_rx_buffers; } @@ -1885,8 +2163,21 @@ static void e1000_configure_rx(struct e1000_adapter *adapter) * units), e.g. using jumbo frames when setting to E1000_ERT_2048 */ if ((adapter->flags & FLAG_HAS_ERT) && - (adapter->netdev->mtu > ETH_DATA_LEN)) - ew32(ERT, E1000_ERT_2048); + (adapter->netdev->mtu > ETH_DATA_LEN)) { + u32 rxdctl = er32(RXDCTL(0)); + ew32(RXDCTL(0), rxdctl | 0x3); + ew32(ERT, E1000_ERT_2048 | (1 << 13)); + /* + * With jumbo frames and early-receive enabled, excessive + * C4->C2 latencies result in dropped transactions. + */ + pm_qos_update_requirement(PM_QOS_CPU_DMA_LATENCY, + e1000e_driver_name, 55); + } else { + pm_qos_update_requirement(PM_QOS_CPU_DMA_LATENCY, + e1000e_driver_name, + PM_QOS_DEFAULT_VALUE); + } /* Enable Receives */ ew32(RCTL, rctl); @@ -2155,6 +2446,14 @@ void e1000e_reset(struct e1000_adapter *adapter) /* Allow time for pending master requests to run */ mac->ops.reset_hw(hw); + + /* + * For parts with AMT enabled, let the firmware know + * that the network interface is in control + */ + if ((adapter->flags & FLAG_HAS_AMT) && e1000e_check_mng_mode(hw)) + e1000_get_hw_control(adapter); + ew32(WUC, 0); if (mac->ops.init_hw(hw)) @@ -2236,7 +2535,8 @@ void e1000e_down(struct e1000_adapter *adapter) adapter->link_speed = 0; adapter->link_duplex = 0; - e1000e_reset(adapter); + if (!pci_channel_offline(adapter->pdev)) + e1000e_reset(adapter); e1000_clean_tx_ring(adapter); e1000_clean_rx_ring(adapter); @@ -3469,6 +3769,8 @@ static int e1000_change_mtu(struct net_device *netdev, int new_mtu) * means we reserve 2 more, this pushes us to allocate from the next * larger slab size. * i.e. RXBUFFER_2048 --> size-4096 slab + * However with the new *_jumbo_rx* routines, jumbo receives will use + * fragmented skbs */ if (max_frame <= 256) @@ -3626,6 +3928,9 @@ static int e1000_suspend(struct pci_dev *pdev, pm_message_t state) ew32(CTRL_EXT, ctrl_ext); } + if (adapter->flags & FLAG_IS_ICH) + e1000e_disable_gig_wol_ich8lan(&adapter->hw); + /* Allow time for pending master requests to run */ e1000e_disable_pcie_master(&adapter->hw); @@ -3897,8 +4202,8 @@ static int __devinit e1000_probe(struct pci_dev *pdev, struct e1000_adapter *adapter; struct e1000_hw *hw; const struct e1000_info *ei = e1000_info_tbl[ent->driver_data]; - unsigned long mmio_start, mmio_len; - unsigned long flash_start, flash_len; + resource_size_t mmio_start, mmio_len; + resource_size_t flash_start, flash_len; static int cards_found; int i, err, pci_using_dac; @@ -4292,6 +4597,13 @@ static struct pci_device_id e1000_pci_tbl[] = { { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IFE_GT), board_ich9lan }, { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IGP_AMT), board_ich9lan }, { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IGP_C), board_ich9lan }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IGP_M), board_ich9lan }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IGP_M_AMT), board_ich9lan }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IGP_M_V), board_ich9lan }, + + { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH10_R_BM_LM), board_ich9lan }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH10_R_BM_LF), board_ich9lan }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH10_R_BM_V), board_ich9lan }, { } /* terminate list */ }; @@ -4326,7 +4638,9 @@ static int __init e1000_init_module(void) printk(KERN_INFO "%s: Copyright (c) 1999-2008 Intel Corporation.\n", e1000e_driver_name); ret = pci_register_driver(&e1000_driver); - + pm_qos_add_requirement(PM_QOS_CPU_DMA_LATENCY, e1000e_driver_name, + PM_QOS_DEFAULT_VALUE); + return ret; } module_init(e1000_init_module); @@ -4340,6 +4654,7 @@ module_init(e1000_init_module); static void __exit e1000_exit_module(void) { pci_unregister_driver(&e1000_driver); + pm_qos_remove_requirement(PM_QOS_CPU_DMA_LATENCY, e1000e_driver_name); } module_exit(e1000_exit_module); diff --git a/drivers/net/e1000e/phy.c b/drivers/net/e1000e/phy.c index e102332a6be..b133dcf0e95 100644 --- a/drivers/net/e1000e/phy.c +++ b/drivers/net/e1000e/phy.c @@ -34,6 +34,9 @@ static s32 e1000_get_phy_cfg_done(struct e1000_hw *hw); static s32 e1000_phy_force_speed_duplex(struct e1000_hw *hw); static s32 e1000_set_d0_lplu_state(struct e1000_hw *hw, bool active); static s32 e1000_wait_autoneg(struct e1000_hw *hw); +static u32 e1000_get_phy_addr_for_bm_page(u32 page, u32 reg); +static s32 e1000_access_phy_wakeup_reg_bm(struct e1000_hw *hw, u32 offset, + u16 *data, bool read); /* Cable length tables */ static const u16 e1000_m88_cable_length_table[] = @@ -465,6 +468,10 @@ s32 e1000e_copper_link_setup_m88(struct e1000_hw *hw) if (phy->disable_polarity_correction == 1) phy_data |= M88E1000_PSCR_POLARITY_REVERSAL; + /* Enable downshift on BM (disabled by default) */ + if (phy->type == e1000_phy_bm) + phy_data |= BME1000_PSCR_ENABLE_DOWNSHIFT; + ret_val = e1e_wphy(hw, M88E1000_PHY_SPEC_CTRL, phy_data); if (ret_val) return ret_val; @@ -1776,6 +1783,10 @@ enum e1000_phy_type e1000e_get_phy_type_from_id(u32 phy_id) case IFE_C_E_PHY_ID: phy_type = e1000_phy_ife; break; + case BME1000_E_PHY_ID: + case BME1000_E_PHY_ID_R2: + phy_type = e1000_phy_bm; + break; default: phy_type = e1000_phy_unknown; break; @@ -1784,6 +1795,273 @@ enum e1000_phy_type e1000e_get_phy_type_from_id(u32 phy_id) } /** + * e1000e_determine_phy_address - Determines PHY address. + * @hw: pointer to the HW structure + * + * This uses a trial and error method to loop through possible PHY + * addresses. It tests each by reading the PHY ID registers and + * checking for a match. + **/ +s32 e1000e_determine_phy_address(struct e1000_hw *hw) +{ + s32 ret_val = -E1000_ERR_PHY_TYPE; + u32 phy_addr= 0; + u32 i = 0; + enum e1000_phy_type phy_type = e1000_phy_unknown; + + do { + for (phy_addr = 0; phy_addr < 4; phy_addr++) { + hw->phy.addr = phy_addr; + e1000e_get_phy_id(hw); + phy_type = e1000e_get_phy_type_from_id(hw->phy.id); + + /* + * If phy_type is valid, break - we found our + * PHY address + */ + if (phy_type != e1000_phy_unknown) { + ret_val = 0; + break; + } + } + i++; + } while ((ret_val != 0) && (i < 100)); + + return ret_val; +} + +/** + * e1000_get_phy_addr_for_bm_page - Retrieve PHY page address + * @page: page to access + * + * Returns the phy address for the page requested. + **/ +static u32 e1000_get_phy_addr_for_bm_page(u32 page, u32 reg) +{ + u32 phy_addr = 2; + + if ((page >= 768) || (page == 0 && reg == 25) || (reg == 31)) + phy_addr = 1; + + return phy_addr; +} + +/** + * e1000e_write_phy_reg_bm - Write BM PHY register + * @hw: pointer to the HW structure + * @offset: register offset to write to + * @data: data to write at register offset + * + * Acquires semaphore, if necessary, then writes the data to PHY register + * at the offset. Release any acquired semaphores before exiting. + **/ +s32 e1000e_write_phy_reg_bm(struct e1000_hw *hw, u32 offset, u16 data) +{ + s32 ret_val; + u32 page_select = 0; + u32 page = offset >> IGP_PAGE_SHIFT; + u32 page_shift = 0; + + /* Page 800 works differently than the rest so it has its own func */ + if (page == BM_WUC_PAGE) { + ret_val = e1000_access_phy_wakeup_reg_bm(hw, offset, &data, + false); + goto out; + } + + ret_val = hw->phy.ops.acquire_phy(hw); + if (ret_val) + goto out; + + hw->phy.addr = e1000_get_phy_addr_for_bm_page(page, offset); + + if (offset > MAX_PHY_MULTI_PAGE_REG) { + /* + * Page select is register 31 for phy address 1 and 22 for + * phy address 2 and 3. Page select is shifted only for + * phy address 1. + */ + if (hw->phy.addr == 1) { + page_shift = IGP_PAGE_SHIFT; + page_select = IGP01E1000_PHY_PAGE_SELECT; + } else { + page_shift = 0; + page_select = BM_PHY_PAGE_SELECT; + } + + /* Page is shifted left, PHY expects (page x 32) */ + ret_val = e1000e_write_phy_reg_mdic(hw, page_select, + (page << page_shift)); + if (ret_val) { + hw->phy.ops.release_phy(hw); + goto out; + } + } + + ret_val = e1000e_write_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & offset, + data); + + hw->phy.ops.release_phy(hw); + +out: + return ret_val; +} + +/** + * e1000e_read_phy_reg_bm - Read BM PHY register + * @hw: pointer to the HW structure + * @offset: register offset to be read + * @data: pointer to the read data + * + * Acquires semaphore, if necessary, then reads the PHY register at offset + * and storing the retrieved information in data. Release any acquired + * semaphores before exiting. + **/ +s32 e1000e_read_phy_reg_bm(struct e1000_hw *hw, u32 offset, u16 *data) +{ + s32 ret_val; + u32 page_select = 0; + u32 page = offset >> IGP_PAGE_SHIFT; + u32 page_shift = 0; + + /* Page 800 works differently than the rest so it has its own func */ + if (page == BM_WUC_PAGE) { + ret_val = e1000_access_phy_wakeup_reg_bm(hw, offset, data, + true); + goto out; + } + + ret_val = hw->phy.ops.acquire_phy(hw); + if (ret_val) + goto out; + + hw->phy.addr = e1000_get_phy_addr_for_bm_page(page, offset); + + if (offset > MAX_PHY_MULTI_PAGE_REG) { + /* + * Page select is register 31 for phy address 1 and 22 for + * phy address 2 and 3. Page select is shifted only for + * phy address 1. + */ + if (hw->phy.addr == 1) { + page_shift = IGP_PAGE_SHIFT; + page_select = IGP01E1000_PHY_PAGE_SELECT; + } else { + page_shift = 0; + page_select = BM_PHY_PAGE_SELECT; + } + + /* Page is shifted left, PHY expects (page x 32) */ + ret_val = e1000e_write_phy_reg_mdic(hw, page_select, + (page << page_shift)); + if (ret_val) { + hw->phy.ops.release_phy(hw); + goto out; + } + } + + ret_val = e1000e_read_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & offset, + data); + hw->phy.ops.release_phy(hw); + +out: + return ret_val; +} + +/** + * e1000_access_phy_wakeup_reg_bm - Read BM PHY wakeup register + * @hw: pointer to the HW structure + * @offset: register offset to be read or written + * @data: pointer to the data to read or write + * @read: determines if operation is read or write + * + * Acquires semaphore, if necessary, then reads the PHY register at offset + * and storing the retrieved information in data. Release any acquired + * semaphores before exiting. Note that procedure to read the wakeup + * registers are different. It works as such: + * 1) Set page 769, register 17, bit 2 = 1 + * 2) Set page to 800 for host (801 if we were manageability) + * 3) Write the address using the address opcode (0x11) + * 4) Read or write the data using the data opcode (0x12) + * 5) Restore 769_17.2 to its original value + **/ +static s32 e1000_access_phy_wakeup_reg_bm(struct e1000_hw *hw, u32 offset, + u16 *data, bool read) +{ + s32 ret_val; + u16 reg = ((u16)offset) & PHY_REG_MASK; + u16 phy_reg = 0; + u8 phy_acquired = 1; + + + ret_val = hw->phy.ops.acquire_phy(hw); + if (ret_val) { + phy_acquired = 0; + goto out; + } + + /* All operations in this function are phy address 1 */ + hw->phy.addr = 1; + + /* Set page 769 */ + e1000e_write_phy_reg_mdic(hw, IGP01E1000_PHY_PAGE_SELECT, + (BM_WUC_ENABLE_PAGE << IGP_PAGE_SHIFT)); + + ret_val = e1000e_read_phy_reg_mdic(hw, BM_WUC_ENABLE_REG, &phy_reg); + if (ret_val) + goto out; + + /* First clear bit 4 to avoid a power state change */ + phy_reg &= ~(BM_WUC_HOST_WU_BIT); + ret_val = e1000e_write_phy_reg_mdic(hw, BM_WUC_ENABLE_REG, phy_reg); + if (ret_val) + goto out; + + /* Write bit 2 = 1, and clear bit 4 to 769_17 */ + ret_val = e1000e_write_phy_reg_mdic(hw, BM_WUC_ENABLE_REG, + phy_reg | BM_WUC_ENABLE_BIT); + if (ret_val) + goto out; + + /* Select page 800 */ + ret_val = e1000e_write_phy_reg_mdic(hw, IGP01E1000_PHY_PAGE_SELECT, + (BM_WUC_PAGE << IGP_PAGE_SHIFT)); + + /* Write the page 800 offset value using opcode 0x11 */ + ret_val = e1000e_write_phy_reg_mdic(hw, BM_WUC_ADDRESS_OPCODE, reg); + if (ret_val) + goto out; + + if (read) { + /* Read the page 800 value using opcode 0x12 */ + ret_val = e1000e_read_phy_reg_mdic(hw, BM_WUC_DATA_OPCODE, + data); + } else { + /* Read the page 800 value using opcode 0x12 */ + ret_val = e1000e_write_phy_reg_mdic(hw, BM_WUC_DATA_OPCODE, + *data); + } + + if (ret_val) + goto out; + + /* + * Restore 769_17.2 to its original value + * Set page 769 + */ + e1000e_write_phy_reg_mdic(hw, IGP01E1000_PHY_PAGE_SELECT, + (BM_WUC_ENABLE_PAGE << IGP_PAGE_SHIFT)); + + /* Clear 769_17.2 */ + ret_val = e1000e_write_phy_reg_mdic(hw, BM_WUC_ENABLE_REG, phy_reg); + +out: + if (phy_acquired == 1) + hw->phy.ops.release_phy(hw); + return ret_val; +} + +/** * e1000e_commit_phy - Soft PHY reset * @hw: pointer to the HW structure * diff --git a/drivers/net/eexpress.c b/drivers/net/eexpress.c index 2eb82aba4a8..795c594a4b7 100644 --- a/drivers/net/eexpress.c +++ b/drivers/net/eexpress.c @@ -202,7 +202,7 @@ static unsigned short start_code[] = { 0x0000,Cmd_MCast, 0x0076, /* link to next command */ #define CONF_NR_MULTICAST 0x44 - 0x0000, /* number of multicast addresses */ + 0x0000, /* number of bytes in multicast address(es) */ #define CONF_MULTICAST 0x46 0x0000, 0x0000, 0x0000, /* some addresses */ 0x0000, 0x0000, 0x0000, @@ -1569,7 +1569,7 @@ static void eexp_hw_init586(struct net_device *dev) static void eexp_setup_filter(struct net_device *dev) { - struct dev_mc_list *dmi = dev->mc_list; + struct dev_mc_list *dmi; unsigned short ioaddr = dev->base_addr; int count = dev->mc_count; int i; @@ -1580,9 +1580,9 @@ static void eexp_setup_filter(struct net_device *dev) } outw(CONF_NR_MULTICAST & ~31, ioaddr+SM_PTR); - outw(count, ioaddr+SHADOW(CONF_NR_MULTICAST)); - for (i = 0; i < count; i++) { - unsigned short *data = (unsigned short *)dmi->dmi_addr; + outw(6*count, ioaddr+SHADOW(CONF_NR_MULTICAST)); + for (i = 0, dmi = dev->mc_list; i < count; i++, dmi = dmi->next) { + unsigned short *data; if (!dmi) { printk(KERN_INFO "%s: too few multicast addresses\n", dev->name); break; @@ -1591,6 +1591,7 @@ static void eexp_setup_filter(struct net_device *dev) printk(KERN_INFO "%s: invalid multicast address length given.\n", dev->name); continue; } + data = (unsigned short *)dmi->dmi_addr; outw((CONF_MULTICAST+(6*i)) & ~31, ioaddr+SM_PTR); outw(data[0], ioaddr+SHADOW(CONF_MULTICAST+(6*i))); outw((CONF_MULTICAST+(6*i)+2) & ~31, ioaddr+SM_PTR); diff --git a/drivers/net/ehea/ehea.h b/drivers/net/ehea/ehea.h index f5dacceab95..e01926b7b5b 100644 --- a/drivers/net/ehea/ehea.h +++ b/drivers/net/ehea/ehea.h @@ -40,7 +40,7 @@ #include <asm/io.h> #define DRV_NAME "ehea" -#define DRV_VERSION "EHEA_0090" +#define DRV_VERSION "EHEA_0092" /* eHEA capability flags */ #define DLPAR_PORT_ADD_REM 1 @@ -118,6 +118,13 @@ #define EHEA_MR_ACC_CTRL 0x00800000 #define EHEA_BUSMAP_START 0x8000000000000000ULL +#define EHEA_INVAL_ADDR 0xFFFFFFFFFFFFFFFFULL +#define EHEA_DIR_INDEX_SHIFT 13 /* 8k Entries in 64k block */ +#define EHEA_TOP_INDEX_SHIFT (EHEA_DIR_INDEX_SHIFT * 2) +#define EHEA_MAP_ENTRIES (1 << EHEA_DIR_INDEX_SHIFT) +#define EHEA_MAP_SIZE (0x10000) /* currently fixed map size */ +#define EHEA_INDEX_MASK (EHEA_MAP_ENTRIES - 1) + #define EHEA_WATCH_DOG_TIMEOUT 10*HZ @@ -192,10 +199,20 @@ struct h_epas { set to 0 if unused */ }; -struct ehea_busmap { - unsigned int entries; /* total number of entries */ - unsigned int valid_sections; /* number of valid sections */ - u64 *vaddr; +/* + * Memory map data structures + */ +struct ehea_dir_bmap +{ + u64 ent[EHEA_MAP_ENTRIES]; +}; +struct ehea_top_bmap +{ + struct ehea_dir_bmap *dir[EHEA_MAP_ENTRIES]; +}; +struct ehea_bmap +{ + struct ehea_top_bmap *top[EHEA_MAP_ENTRIES]; }; struct ehea_qp; @@ -435,7 +452,7 @@ struct ehea_bcmc_reg_entry { struct ehea_bcmc_reg_array { struct ehea_bcmc_reg_entry *arr; int num_entries; - struct mutex lock; + spinlock_t lock; }; #define EHEA_PORT_UP 1 @@ -461,6 +478,7 @@ struct ehea_port { int num_add_tx_qps; int num_mcs; int resets; + u64 flags; u64 mac_addr; u32 logical_port_id; u32 port_speed; @@ -484,7 +502,8 @@ struct port_res_cfg { }; enum ehea_flag_bits { - __EHEA_STOP_XFER + __EHEA_STOP_XFER, + __EHEA_DISABLE_PORT_RESET }; void ehea_set_ethtool_ops(struct net_device *netdev); diff --git a/drivers/net/ehea/ehea_main.c b/drivers/net/ehea/ehea_main.c index f9bc21c74b5..0920b796bd7 100644 --- a/drivers/net/ehea/ehea_main.c +++ b/drivers/net/ehea/ehea_main.c @@ -35,6 +35,7 @@ #include <linux/if_ether.h> #include <linux/notifier.h> #include <linux/reboot.h> +#include <linux/memory.h> #include <asm/kexec.h> #include <linux/mutex.h> @@ -117,6 +118,7 @@ static struct of_device_id ehea_device_table[] = { }, {}, }; +MODULE_DEVICE_TABLE(of, ehea_device_table); static struct of_platform_driver ehea_driver = { .name = "ehea", @@ -136,6 +138,12 @@ void ehea_dump(void *adr, int len, char *msg) } } +void ehea_schedule_port_reset(struct ehea_port *port) +{ + if (!test_bit(__EHEA_DISABLE_PORT_RESET, &port->flags)) + schedule_work(&port->reset_task); +} + static void ehea_update_firmware_handles(void) { struct ehea_fw_handle_entry *arr = NULL; @@ -240,7 +248,7 @@ static void ehea_update_bcmc_registrations(void) } if (num_registrations) { - arr = kzalloc(num_registrations * sizeof(*arr), GFP_KERNEL); + arr = kzalloc(num_registrations * sizeof(*arr), GFP_ATOMIC); if (!arr) return; /* Keep the existing array */ } else @@ -300,7 +308,7 @@ static struct net_device_stats *ehea_get_stats(struct net_device *dev) memset(stats, 0, sizeof(*stats)); - cb2 = kzalloc(PAGE_SIZE, GFP_KERNEL); + cb2 = kzalloc(PAGE_SIZE, GFP_ATOMIC); if (!cb2) { ehea_error("no mem for cb2"); goto out; @@ -586,7 +594,7 @@ static int ehea_treat_poll_error(struct ehea_port_res *pr, int rq, "Resetting port.", pr->qp->init_attr.qp_nr); ehea_dump(cqe, sizeof(*cqe), "CQE"); } - schedule_work(&pr->port->reset_task); + ehea_schedule_port_reset(pr->port); return 1; } @@ -615,7 +623,7 @@ static int get_skb_hdr(struct sk_buff *skb, void **iphdr, *tcph = tcp_hdr(skb); /* check if ip header and tcp header are complete */ - if (iph->tot_len < ip_len + tcp_hdrlen(skb)) + if (ntohs(iph->tot_len) < ip_len + tcp_hdrlen(skb)) return -1; *hdr_flags = LRO_IPV4 | LRO_TCP; @@ -764,7 +772,7 @@ static struct ehea_cqe *ehea_proc_cqes(struct ehea_port_res *pr, int my_quota) ehea_error("Send Completion Error: Resetting port"); if (netif_msg_tx_err(pr->port)) ehea_dump(cqe, sizeof(*cqe), "Send CQE"); - schedule_work(&pr->port->reset_task); + ehea_schedule_port_reset(pr->port); break; } @@ -884,7 +892,7 @@ static irqreturn_t ehea_qp_aff_irq_handler(int irq, void *param) eqe = ehea_poll_eq(port->qp_eq); } - schedule_work(&port->reset_task); + ehea_schedule_port_reset(port); return IRQ_HANDLED; } @@ -1762,25 +1770,29 @@ static int ehea_set_mac_addr(struct net_device *dev, void *sa) memcpy(dev->dev_addr, mac_addr->sa_data, dev->addr_len); - mutex_lock(&ehea_bcmc_regs.lock); + spin_lock(&ehea_bcmc_regs.lock); /* Deregister old MAC in pHYP */ - ret = ehea_broadcast_reg_helper(port, H_DEREG_BCMC); - if (ret) - goto out_upregs; + if (port->state == EHEA_PORT_UP) { + ret = ehea_broadcast_reg_helper(port, H_DEREG_BCMC); + if (ret) + goto out_upregs; + } port->mac_addr = cb0->port_mac_addr << 16; /* Register new MAC in pHYP */ - ret = ehea_broadcast_reg_helper(port, H_REG_BCMC); - if (ret) - goto out_upregs; + if (port->state == EHEA_PORT_UP) { + ret = ehea_broadcast_reg_helper(port, H_REG_BCMC); + if (ret) + goto out_upregs; + } ret = 0; out_upregs: ehea_update_bcmc_registrations(); - mutex_unlock(&ehea_bcmc_regs.lock); + spin_unlock(&ehea_bcmc_regs.lock); out_free: kfree(cb0); out: @@ -1942,7 +1954,7 @@ static void ehea_set_multicast_list(struct net_device *dev) } ehea_promiscuous(dev, 0); - mutex_lock(&ehea_bcmc_regs.lock); + spin_lock(&ehea_bcmc_regs.lock); if (dev->flags & IFF_ALLMULTI) { ehea_allmulti(dev, 1); @@ -1973,7 +1985,7 @@ static void ehea_set_multicast_list(struct net_device *dev) } out: ehea_update_bcmc_registrations(); - mutex_unlock(&ehea_bcmc_regs.lock); + spin_unlock(&ehea_bcmc_regs.lock); return; } @@ -2212,8 +2224,6 @@ static void ehea_vlan_rx_register(struct net_device *dev, goto out; } - memset(cb1->vlan_filter, 0, sizeof(cb1->vlan_filter)); - hret = ehea_h_modify_ehea_port(adapter->handle, port->logical_port_id, H_PORT_CB1, H_PORT_CB1_ALL, cb1); if (hret != H_SUCCESS) @@ -2494,7 +2504,7 @@ static int ehea_up(struct net_device *dev) } } - mutex_lock(&ehea_bcmc_regs.lock); + spin_lock(&ehea_bcmc_regs.lock); ret = ehea_broadcast_reg_helper(port, H_REG_BCMC); if (ret) { @@ -2517,7 +2527,7 @@ out: ehea_info("Failed starting %s. ret=%i", dev->name, ret); ehea_update_bcmc_registrations(); - mutex_unlock(&ehea_bcmc_regs.lock); + spin_unlock(&ehea_bcmc_regs.lock); ehea_update_firmware_handles(); mutex_unlock(&ehea_fw_handles.lock); @@ -2572,7 +2582,7 @@ static int ehea_down(struct net_device *dev) mutex_lock(&ehea_fw_handles.lock); - mutex_lock(&ehea_bcmc_regs.lock); + spin_lock(&ehea_bcmc_regs.lock); ehea_drop_multicast_list(dev); ehea_broadcast_reg_helper(port, H_DEREG_BCMC); @@ -2581,7 +2591,7 @@ static int ehea_down(struct net_device *dev) port->state = EHEA_PORT_DOWN; ehea_update_bcmc_registrations(); - mutex_unlock(&ehea_bcmc_regs.lock); + spin_unlock(&ehea_bcmc_regs.lock); ret = ehea_clean_all_portres(port); if (ret) @@ -2602,12 +2612,14 @@ static int ehea_stop(struct net_device *dev) if (netif_msg_ifdown(port)) ehea_info("disabling port %s", dev->name); - flush_scheduled_work(); + set_bit(__EHEA_DISABLE_PORT_RESET, &port->flags); + cancel_work_sync(&port->reset_task); mutex_lock(&port->port_lock); netif_stop_queue(dev); port_napi_disable(port); ret = ehea_down(dev); mutex_unlock(&port->port_lock); + clear_bit(__EHEA_DISABLE_PORT_RESET, &port->flags); return ret; } @@ -2937,7 +2949,7 @@ static void ehea_tx_watchdog(struct net_device *dev) if (netif_carrier_ok(dev) && !test_bit(__EHEA_STOP_XFER, &ehea_driver_flags)) - schedule_work(&port->reset_task); + ehea_schedule_port_reset(port); } int ehea_sense_adapter_attr(struct ehea_adapter *adapter) @@ -3177,11 +3189,12 @@ out_err: static void ehea_shutdown_single_port(struct ehea_port *port) { + struct ehea_adapter *adapter = port->adapter; unregister_netdev(port->netdev); ehea_unregister_port(port); kfree(port->mc_list); free_netdev(port->netdev); - port->adapter->active_ports--; + adapter->active_ports--; } static int ehea_setup_ports(struct ehea_adapter *adapter) @@ -3503,6 +3516,24 @@ void ehea_crash_handler(void) 0, H_DEREG_BCMC); } +static int ehea_mem_notifier(struct notifier_block *nb, + unsigned long action, void *data) +{ + switch (action) { + case MEM_OFFLINE: + ehea_info("memory has been removed"); + ehea_rereg_mrs(NULL); + break; + default: + break; + } + return NOTIFY_OK; +} + +static struct notifier_block ehea_mem_nb = { + .notifier_call = ehea_mem_notifier, +}; + static int ehea_reboot_notifier(struct notifier_block *nb, unsigned long action, void *unused) { @@ -3567,7 +3598,7 @@ int __init ehea_module_init(void) memset(&ehea_bcmc_regs, 0, sizeof(ehea_bcmc_regs)); mutex_init(&ehea_fw_handles.lock); - mutex_init(&ehea_bcmc_regs.lock); + spin_lock_init(&ehea_bcmc_regs.lock); ret = check_module_parm(); if (ret) @@ -3581,6 +3612,10 @@ int __init ehea_module_init(void) if (ret) ehea_info("failed registering reboot notifier"); + ret = register_memory_notifier(&ehea_mem_nb); + if (ret) + ehea_info("failed registering memory remove notifier"); + ret = crash_shutdown_register(&ehea_crash_handler); if (ret) ehea_info("failed registering crash handler"); @@ -3604,6 +3639,7 @@ int __init ehea_module_init(void) out3: ibmebus_unregister_driver(&ehea_driver); out2: + unregister_memory_notifier(&ehea_mem_nb); unregister_reboot_notifier(&ehea_reboot_nb); crash_shutdown_unregister(&ehea_crash_handler); out: @@ -3621,6 +3657,7 @@ static void __exit ehea_module_exit(void) ret = crash_shutdown_unregister(&ehea_crash_handler); if (ret) ehea_info("failed unregistering crash handler"); + unregister_memory_notifier(&ehea_mem_nb); kfree(ehea_fw_handles.arr); kfree(ehea_bcmc_regs.arr); ehea_destroy_busmap(); diff --git a/drivers/net/ehea/ehea_qmr.c b/drivers/net/ehea/ehea_qmr.c index d522e905f46..140f05baafd 100644 --- a/drivers/net/ehea/ehea_qmr.c +++ b/drivers/net/ehea/ehea_qmr.c @@ -31,8 +31,8 @@ #include "ehea_phyp.h" #include "ehea_qmr.h" +struct ehea_bmap *ehea_bmap = NULL; -struct ehea_busmap ehea_bmap = { 0, 0, NULL }; static void *hw_qpageit_get_inc(struct hw_queue *queue) @@ -559,125 +559,253 @@ int ehea_destroy_qp(struct ehea_qp *qp) return 0; } -int ehea_create_busmap(void) +static inline int ehea_calc_index(unsigned long i, unsigned long s) { - u64 vaddr = EHEA_BUSMAP_START; - unsigned long high_section_index = 0; - int i; + return (i >> s) & EHEA_INDEX_MASK; +} - /* - * Sections are not in ascending order -> Loop over all sections and - * find the highest PFN to compute the required map size. - */ - ehea_bmap.valid_sections = 0; +static inline int ehea_init_top_bmap(struct ehea_top_bmap *ehea_top_bmap, + int dir) +{ + if(!ehea_top_bmap->dir[dir]) { + ehea_top_bmap->dir[dir] = + kzalloc(sizeof(struct ehea_dir_bmap), GFP_KERNEL); + if (!ehea_top_bmap->dir[dir]) + return -ENOMEM; + } + return 0; +} - for (i = 0; i < NR_MEM_SECTIONS; i++) - if (valid_section_nr(i)) - high_section_index = i; +static inline int ehea_init_bmap(struct ehea_bmap *ehea_bmap, int top, int dir) +{ + if(!ehea_bmap->top[top]) { + ehea_bmap->top[top] = + kzalloc(sizeof(struct ehea_top_bmap), GFP_KERNEL); + if (!ehea_bmap->top[top]) + return -ENOMEM; + } + return ehea_init_top_bmap(ehea_bmap->top[top], dir); +} - ehea_bmap.entries = high_section_index + 1; - ehea_bmap.vaddr = vmalloc(ehea_bmap.entries * sizeof(*ehea_bmap.vaddr)); +static int ehea_create_busmap_callback(unsigned long pfn, + unsigned long nr_pages, void *arg) +{ + unsigned long i, mr_len, start_section, end_section; + start_section = (pfn * PAGE_SIZE) / EHEA_SECTSIZE; + end_section = start_section + ((nr_pages * PAGE_SIZE) / EHEA_SECTSIZE); + mr_len = *(unsigned long *)arg; - if (!ehea_bmap.vaddr) + ehea_bmap = kzalloc(sizeof(struct ehea_bmap), GFP_KERNEL); + if (!ehea_bmap) return -ENOMEM; - for (i = 0 ; i < ehea_bmap.entries; i++) { - unsigned long pfn = section_nr_to_pfn(i); + for (i = start_section; i < end_section; i++) { + int ret; + int top, dir, idx; + u64 vaddr; + + top = ehea_calc_index(i, EHEA_TOP_INDEX_SHIFT); + dir = ehea_calc_index(i, EHEA_DIR_INDEX_SHIFT); + + ret = ehea_init_bmap(ehea_bmap, top, dir); + if(ret) + return ret; - if (pfn_valid(pfn)) { - ehea_bmap.vaddr[i] = vaddr; - vaddr += EHEA_SECTSIZE; - ehea_bmap.valid_sections++; - } else - ehea_bmap.vaddr[i] = 0; + idx = i & EHEA_INDEX_MASK; + vaddr = EHEA_BUSMAP_START + mr_len + i * EHEA_SECTSIZE; + + ehea_bmap->top[top]->dir[dir]->ent[idx] = vaddr; } + mr_len += nr_pages * PAGE_SIZE; + *(unsigned long *)arg = mr_len; + return 0; } +static unsigned long ehea_mr_len; + +static DEFINE_MUTEX(ehea_busmap_mutex); + +int ehea_create_busmap(void) +{ + int ret; + mutex_lock(&ehea_busmap_mutex); + ehea_mr_len = 0; + ret = walk_memory_resource(0, 1ULL << MAX_PHYSMEM_BITS, &ehea_mr_len, + ehea_create_busmap_callback); + mutex_unlock(&ehea_busmap_mutex); + return ret; +} + void ehea_destroy_busmap(void) { - vfree(ehea_bmap.vaddr); + int top, dir; + mutex_lock(&ehea_busmap_mutex); + if (!ehea_bmap) + goto out_destroy; + + for (top = 0; top < EHEA_MAP_ENTRIES; top++) { + if (!ehea_bmap->top[top]) + continue; + + for (dir = 0; dir < EHEA_MAP_ENTRIES; dir++) { + if (!ehea_bmap->top[top]->dir[dir]) + continue; + + kfree(ehea_bmap->top[top]->dir[dir]); + } + + kfree(ehea_bmap->top[top]); + } + + kfree(ehea_bmap); + ehea_bmap = NULL; +out_destroy: + mutex_unlock(&ehea_busmap_mutex); } u64 ehea_map_vaddr(void *caddr) { - u64 mapped_addr; - unsigned long index = __pa(caddr) >> SECTION_SIZE_BITS; - - if (likely(index < ehea_bmap.entries)) { - mapped_addr = ehea_bmap.vaddr[index]; - if (likely(mapped_addr)) - mapped_addr |= (((unsigned long)caddr) - & (EHEA_SECTSIZE - 1)); - else - mapped_addr = -1; - } else - mapped_addr = -1; - - if (unlikely(mapped_addr == -1)) - if (!test_and_set_bit(__EHEA_STOP_XFER, &ehea_driver_flags)) - schedule_work(&ehea_rereg_mr_task); - - return mapped_addr; + int top, dir, idx; + unsigned long index, offset; + + if (!ehea_bmap) + return EHEA_INVAL_ADDR; + + index = virt_to_abs(caddr) >> SECTION_SIZE_BITS; + top = (index >> EHEA_TOP_INDEX_SHIFT) & EHEA_INDEX_MASK; + if (!ehea_bmap->top[top]) + return EHEA_INVAL_ADDR; + + dir = (index >> EHEA_DIR_INDEX_SHIFT) & EHEA_INDEX_MASK; + if (!ehea_bmap->top[top]->dir[dir]) + return EHEA_INVAL_ADDR; + + idx = index & EHEA_INDEX_MASK; + if (!ehea_bmap->top[top]->dir[dir]->ent[idx]) + return EHEA_INVAL_ADDR; + + offset = (unsigned long)caddr & (EHEA_SECTSIZE - 1); + return ehea_bmap->top[top]->dir[dir]->ent[idx] | offset; +} + +static inline void *ehea_calc_sectbase(int top, int dir, int idx) +{ + unsigned long ret = idx; + ret |= dir << EHEA_DIR_INDEX_SHIFT; + ret |= top << EHEA_TOP_INDEX_SHIFT; + return abs_to_virt(ret << SECTION_SIZE_BITS); +} + +static u64 ehea_reg_mr_section(int top, int dir, int idx, u64 *pt, + struct ehea_adapter *adapter, + struct ehea_mr *mr) +{ + void *pg; + u64 j, m, hret; + unsigned long k = 0; + u64 pt_abs = virt_to_abs(pt); + + void *sectbase = ehea_calc_sectbase(top, dir, idx); + + for (j = 0; j < (EHEA_PAGES_PER_SECTION / EHEA_MAX_RPAGE); j++) { + + for (m = 0; m < EHEA_MAX_RPAGE; m++) { + pg = sectbase + ((k++) * EHEA_PAGESIZE); + pt[m] = virt_to_abs(pg); + } + hret = ehea_h_register_rpage_mr(adapter->handle, mr->handle, 0, + 0, pt_abs, EHEA_MAX_RPAGE); + + if ((hret != H_SUCCESS) + && (hret != H_PAGE_REGISTERED)) { + ehea_h_free_resource(adapter->handle, mr->handle, + FORCE_FREE); + ehea_error("register_rpage_mr failed"); + return hret; + } + } + return hret; +} + +static u64 ehea_reg_mr_sections(int top, int dir, u64 *pt, + struct ehea_adapter *adapter, + struct ehea_mr *mr) +{ + u64 hret = H_SUCCESS; + int idx; + + for (idx = 0; idx < EHEA_MAP_ENTRIES; idx++) { + if (!ehea_bmap->top[top]->dir[dir]->ent[idx]) + continue; + + hret = ehea_reg_mr_section(top, dir, idx, pt, adapter, mr); + if ((hret != H_SUCCESS) && (hret != H_PAGE_REGISTERED)) + return hret; + } + return hret; +} + +static u64 ehea_reg_mr_dir_sections(int top, u64 *pt, + struct ehea_adapter *adapter, + struct ehea_mr *mr) +{ + u64 hret = H_SUCCESS; + int dir; + + for (dir = 0; dir < EHEA_MAP_ENTRIES; dir++) { + if (!ehea_bmap->top[top]->dir[dir]) + continue; + + hret = ehea_reg_mr_sections(top, dir, pt, adapter, mr); + if ((hret != H_SUCCESS) && (hret != H_PAGE_REGISTERED)) + return hret; + } + return hret; } int ehea_reg_kernel_mr(struct ehea_adapter *adapter, struct ehea_mr *mr) { int ret; u64 *pt; - void *pg; - u64 hret, pt_abs, i, j, m, mr_len; + u64 hret; u32 acc_ctrl = EHEA_MR_ACC_CTRL; - mr_len = ehea_bmap.valid_sections * EHEA_SECTSIZE; + unsigned long top; - pt = kzalloc(PAGE_SIZE, GFP_KERNEL); + pt = kzalloc(PAGE_SIZE, GFP_KERNEL); if (!pt) { ehea_error("no mem"); ret = -ENOMEM; goto out; } - pt_abs = virt_to_abs(pt); - hret = ehea_h_alloc_resource_mr(adapter->handle, - EHEA_BUSMAP_START, mr_len, - acc_ctrl, adapter->pd, + hret = ehea_h_alloc_resource_mr(adapter->handle, EHEA_BUSMAP_START, + ehea_mr_len, acc_ctrl, adapter->pd, &mr->handle, &mr->lkey); + if (hret != H_SUCCESS) { ehea_error("alloc_resource_mr failed"); ret = -EIO; goto out; } - for (i = 0 ; i < ehea_bmap.entries; i++) - if (ehea_bmap.vaddr[i]) { - void *sectbase = __va(i << SECTION_SIZE_BITS); - unsigned long k = 0; - - for (j = 0; j < (EHEA_PAGES_PER_SECTION / - EHEA_MAX_RPAGE); j++) { - - for (m = 0; m < EHEA_MAX_RPAGE; m++) { - pg = sectbase + ((k++) * EHEA_PAGESIZE); - pt[m] = virt_to_abs(pg); - } - - hret = ehea_h_register_rpage_mr(adapter->handle, - mr->handle, - 0, 0, pt_abs, - EHEA_MAX_RPAGE); - if ((hret != H_SUCCESS) - && (hret != H_PAGE_REGISTERED)) { - ehea_h_free_resource(adapter->handle, - mr->handle, - FORCE_FREE); - ehea_error("register_rpage_mr failed"); - ret = -EIO; - goto out; - } - } - } + if (!ehea_bmap) { + ehea_h_free_resource(adapter->handle, mr->handle, FORCE_FREE); + ehea_error("no busmap available"); + ret = -EIO; + goto out; + } + + for (top = 0; top < EHEA_MAP_ENTRIES; top++) { + if (!ehea_bmap->top[top]) + continue; + + hret = ehea_reg_mr_dir_sections(top, pt, adapter, mr); + if((hret != H_PAGE_REGISTERED) && (hret != H_SUCCESS)) + break; + } if (hret != H_SUCCESS) { ehea_h_free_resource(adapter->handle, mr->handle, FORCE_FREE); diff --git a/drivers/net/enc28j60.c b/drivers/net/enc28j60.c index 46a90e9ec56..c05cb159c77 100644 --- a/drivers/net/enc28j60.c +++ b/drivers/net/enc28j60.c @@ -400,26 +400,31 @@ enc28j60_packet_write(struct enc28j60_net *priv, int len, const u8 *data) mutex_unlock(&priv->lock); } -/* - * Wait until the PHY operation is complete. - */ -static int wait_phy_ready(struct enc28j60_net *priv) +static unsigned long msec20_to_jiffies; + +static int poll_ready(struct enc28j60_net *priv, u8 reg, u8 mask, u8 val) { - unsigned long timeout = jiffies + 20 * HZ / 1000; - int ret = 1; + unsigned long timeout = jiffies + msec20_to_jiffies; /* 20 msec timeout read */ - while (nolock_regb_read(priv, MISTAT) & MISTAT_BUSY) { + while ((nolock_regb_read(priv, reg) & mask) != val) { if (time_after(jiffies, timeout)) { if (netif_msg_drv(priv)) - printk(KERN_DEBUG DRV_NAME - ": PHY ready timeout!\n"); - ret = 0; - break; + dev_dbg(&priv->spi->dev, + "reg %02x ready timeout!\n", reg); + return -ETIMEDOUT; } cpu_relax(); } - return ret; + return 0; +} + +/* + * Wait until the PHY operation is complete. + */ +static int wait_phy_ready(struct enc28j60_net *priv) +{ + return poll_ready(priv, MISTAT, MISTAT_BUSY, 0) ? 0 : 1; } /* @@ -594,6 +599,32 @@ static void nolock_txfifo_init(struct enc28j60_net *priv, u16 start, u16 end) nolock_regw_write(priv, ETXNDL, end); } +/* + * Low power mode shrinks power consumption about 100x, so we'd like + * the chip to be in that mode whenever it's inactive. (However, we + * can't stay in lowpower mode during suspend with WOL active.) + */ +static void enc28j60_lowpower(struct enc28j60_net *priv, bool is_low) +{ + if (netif_msg_drv(priv)) + dev_dbg(&priv->spi->dev, "%s power...\n", + is_low ? "low" : "high"); + + mutex_lock(&priv->lock); + if (is_low) { + nolock_reg_bfclr(priv, ECON1, ECON1_RXEN); + poll_ready(priv, ESTAT, ESTAT_RXBUSY, 0); + poll_ready(priv, ECON1, ECON1_TXRTS, 0); + /* ECON2_VRPS was set during initialization */ + nolock_reg_bfset(priv, ECON2, ECON2_PWRSV); + } else { + nolock_reg_bfclr(priv, ECON2, ECON2_PWRSV); + poll_ready(priv, ESTAT, ESTAT_CLKRDY, ESTAT_CLKRDY); + /* caller sets ECON1_RXEN */ + } + mutex_unlock(&priv->lock); +} + static int enc28j60_hw_init(struct enc28j60_net *priv) { u8 reg; @@ -612,8 +643,8 @@ static int enc28j60_hw_init(struct enc28j60_net *priv) priv->tx_retry_count = 0; priv->max_pk_counter = 0; priv->rxfilter = RXFILTER_NORMAL; - /* enable address auto increment */ - nolock_regb_write(priv, ECON2, ECON2_AUTOINC); + /* enable address auto increment and voltage regulator powersave */ + nolock_regb_write(priv, ECON2, ECON2_AUTOINC | ECON2_VRPS); nolock_rxfifo_init(priv, RXSTART_INIT, RXEND_INIT); nolock_txfifo_init(priv, TXSTART_INIT, TXEND_INIT); @@ -690,7 +721,7 @@ static int enc28j60_hw_init(struct enc28j60_net *priv) static void enc28j60_hw_enable(struct enc28j60_net *priv) { - /* enable interrutps */ + /* enable interrupts */ if (netif_msg_hw(priv)) printk(KERN_DEBUG DRV_NAME ": %s() enabling interrupts.\n", __FUNCTION__); @@ -726,15 +757,12 @@ enc28j60_setlink(struct net_device *ndev, u8 autoneg, u16 speed, u8 duplex) int ret = 0; if (!priv->hw_enable) { - if (autoneg == AUTONEG_DISABLE && speed == SPEED_10) { + /* link is in low power mode now; duplex setting + * will take effect on next enc28j60_hw_init(). + */ + if (autoneg == AUTONEG_DISABLE && speed == SPEED_10) priv->full_duplex = (duplex == DUPLEX_FULL); - if (!enc28j60_hw_init(priv)) { - if (netif_msg_drv(priv)) - dev_err(&ndev->dev, - "hw_reset() failed\n"); - ret = -EINVAL; - } - } else { + else { if (netif_msg_link(priv)) dev_warn(&ndev->dev, "unsupported link setting\n"); @@ -1307,7 +1335,8 @@ static int enc28j60_net_open(struct net_device *dev) } return -EADDRNOTAVAIL; } - /* Reset the hardware here */ + /* Reset the hardware here (and take it out of low power mode) */ + enc28j60_lowpower(priv, false); enc28j60_hw_disable(priv); if (!enc28j60_hw_init(priv)) { if (netif_msg_ifup(priv)) @@ -1337,6 +1366,7 @@ static int enc28j60_net_close(struct net_device *dev) printk(KERN_DEBUG DRV_NAME ": %s() enter\n", __FUNCTION__); enc28j60_hw_disable(priv); + enc28j60_lowpower(priv, true); netif_stop_queue(dev); return 0; @@ -1537,6 +1567,8 @@ static int __devinit enc28j60_probe(struct spi_device *spi) dev->watchdog_timeo = TX_TIMEOUT; SET_ETHTOOL_OPS(dev, &enc28j60_ethtool_ops); + enc28j60_lowpower(priv, true); + ret = register_netdev(dev); if (ret) { if (netif_msg_probe(priv)) @@ -1556,7 +1588,7 @@ error_alloc: return ret; } -static int enc28j60_remove(struct spi_device *spi) +static int __devexit enc28j60_remove(struct spi_device *spi) { struct enc28j60_net *priv = dev_get_drvdata(&spi->dev); @@ -1573,15 +1605,16 @@ static int enc28j60_remove(struct spi_device *spi) static struct spi_driver enc28j60_driver = { .driver = { .name = DRV_NAME, - .bus = &spi_bus_type, .owner = THIS_MODULE, - }, + }, .probe = enc28j60_probe, .remove = __devexit_p(enc28j60_remove), }; static int __init enc28j60_init(void) { + msec20_to_jiffies = msecs_to_jiffies(20); + return spi_register_driver(&enc28j60_driver); } diff --git a/drivers/net/fec_mpc52xx.c b/drivers/net/fec_mpc52xx.c index d21b7ab64bd..329edd9c08f 100644 --- a/drivers/net/fec_mpc52xx.c +++ b/drivers/net/fec_mpc52xx.c @@ -43,6 +43,29 @@ #define DRIVER_NAME "mpc52xx-fec" +#define FEC5200_PHYADDR_NONE (-1) +#define FEC5200_PHYADDR_7WIRE (-2) + +/* Private driver data structure */ +struct mpc52xx_fec_priv { + int duplex; + int speed; + int r_irq; + int t_irq; + struct mpc52xx_fec __iomem *fec; + struct bcom_task *rx_dmatsk; + struct bcom_task *tx_dmatsk; + spinlock_t lock; + int msg_enable; + + /* MDIO link details */ + int phy_addr; + unsigned int phy_speed; + struct phy_device *phydev; + enum phy_state link; +}; + + static irqreturn_t mpc52xx_fec_interrupt(int, void *); static irqreturn_t mpc52xx_fec_rx_interrupt(int, void *); static irqreturn_t mpc52xx_fec_tx_interrupt(int, void *); @@ -55,7 +78,7 @@ module_param_array_named(mac, mpc52xx_fec_mac_addr, byte, NULL, 0); MODULE_PARM_DESC(mac, "six hex digits, ie. 0x1,0x2,0xc0,0x01,0xba,0xbe"); #define MPC52xx_MESSAGES_DEFAULT ( NETIF_MSG_DRV | NETIF_MSG_PROBE | \ - NETIF_MSG_LINK | NETIF_MSG_IFDOWN | NETIF_MSG_IFDOWN ) + NETIF_MSG_LINK | NETIF_MSG_IFDOWN | NETIF_MSG_IFUP) static int debug = -1; /* the above default */ module_param(debug, int, 0); MODULE_PARM_DESC(debug, "debugging messages level"); @@ -223,7 +246,7 @@ static int mpc52xx_fec_phy_start(struct net_device *dev) struct mpc52xx_fec_priv *priv = netdev_priv(dev); int err; - if (!priv->has_phy) + if (priv->phy_addr < 0) return 0; err = mpc52xx_fec_init_phy(dev); @@ -243,7 +266,7 @@ static void mpc52xx_fec_phy_stop(struct net_device *dev) { struct mpc52xx_fec_priv *priv = netdev_priv(dev); - if (!priv->has_phy) + if (!priv->phydev) return; phy_disconnect(priv->phydev); @@ -255,7 +278,7 @@ static void mpc52xx_fec_phy_stop(struct net_device *dev) static int mpc52xx_fec_phy_mii_ioctl(struct mpc52xx_fec_priv *priv, struct mii_ioctl_data *mii_data, int cmd) { - if (!priv->has_phy) + if (!priv->phydev) return -ENOTSUPP; return phy_mii_ioctl(priv->phydev, mii_data, cmd); @@ -265,7 +288,7 @@ static void mpc52xx_fec_phy_hw_init(struct mpc52xx_fec_priv *priv) { struct mpc52xx_fec __iomem *fec = priv->fec; - if (!priv->has_phy) + if (priv->phydev) return; out_be32(&fec->mii_speed, priv->phy_speed); @@ -704,7 +727,7 @@ static void mpc52xx_fec_start(struct net_device *dev) rcntrl = FEC_RX_BUFFER_SIZE << 16; /* max frame length */ rcntrl |= FEC_RCNTRL_FCE; - if (priv->has_phy) + if (priv->phy_addr != FEC5200_PHYADDR_7WIRE) rcntrl |= FEC_RCNTRL_MII_MODE; if (priv->duplex == DUPLEX_FULL) @@ -864,7 +887,10 @@ mpc52xx_fec_probe(struct of_device *op, const struct of_device_id *match) struct net_device *ndev; struct mpc52xx_fec_priv *priv = NULL; struct resource mem; - const phandle *ph; + struct device_node *phy_node; + const phandle *phy_handle; + const u32 *prop; + int prop_size; phys_addr_t rx_fifo; phys_addr_t tx_fifo; @@ -948,26 +974,37 @@ mpc52xx_fec_probe(struct of_device *op, const struct of_device_id *match) mpc52xx_fec_get_paddr(ndev, ndev->dev_addr); priv->msg_enable = netif_msg_init(debug, MPC52xx_MESSAGES_DEFAULT); - priv->duplex = DUPLEX_FULL; - - /* is the phy present in device tree? */ - ph = of_get_property(op->node, "phy-handle", NULL); - if (ph) { - const unsigned int *prop; - struct device_node *phy_dn; - priv->has_phy = 1; - phy_dn = of_find_node_by_phandle(*ph); - prop = of_get_property(phy_dn, "reg", NULL); - priv->phy_addr = *prop; + /* + * Link mode configuration + */ - of_node_put(phy_dn); + /* Start with safe defaults for link connection */ + priv->phy_addr = FEC5200_PHYADDR_NONE; + priv->speed = 100; + priv->duplex = DUPLEX_HALF; + priv->phy_speed = ((mpc52xx_find_ipb_freq(op->node) >> 20) / 5) << 1; + + /* the 7-wire property means don't use MII mode */ + if (of_find_property(op->node, "fsl,7-wire-mode", NULL)) + priv->phy_addr = FEC5200_PHYADDR_7WIRE; + + /* The current speed preconfigures the speed of the MII link */ + prop = of_get_property(op->node, "current-speed", &prop_size); + if (prop && (prop_size >= sizeof(u32) * 2)) { + priv->speed = prop[0]; + priv->duplex = prop[1] ? DUPLEX_FULL : DUPLEX_HALF; + } - /* Phy speed */ - priv->phy_speed = ((mpc52xx_find_ipb_freq(op->node) >> 20) / 5) << 1; - } else { - dev_info(&ndev->dev, "can't find \"phy-handle\" in device" - " tree, using 7-wire mode\n"); + /* If there is a phy handle, setup link to that phy */ + phy_handle = of_get_property(op->node, "phy-handle", &prop_size); + if (phy_handle && (prop_size >= sizeof(phandle))) { + phy_node = of_find_node_by_phandle(*phy_handle); + prop = of_get_property(phy_node, "reg", &prop_size); + if (prop && (prop_size >= sizeof(u32))) + if ((*prop >= 0) && (*prop < PHY_MAX_ADDR)) + priv->phy_addr = *prop; + of_node_put(phy_node); } /* Hardware init */ @@ -982,6 +1019,20 @@ mpc52xx_fec_probe(struct of_device *op, const struct of_device_id *match) if (rv < 0) goto probe_error; + /* Now report the link setup */ + switch (priv->phy_addr) { + case FEC5200_PHYADDR_NONE: + dev_info(&ndev->dev, "Fixed speed MII link: %i%cD\n", + priv->speed, priv->duplex ? 'F' : 'H'); + break; + case FEC5200_PHYADDR_7WIRE: + dev_info(&ndev->dev, "using 7-wire PHY mode\n"); + break; + default: + dev_info(&ndev->dev, "Using PHY at MDIO address %i\n", + priv->phy_addr); + } + /* We're done ! */ dev_set_drvdata(&op->dev, ndev); diff --git a/drivers/net/fec_mpc52xx.h b/drivers/net/fec_mpc52xx.h index 8b1f75397b9..a227a525bdb 100644 --- a/drivers/net/fec_mpc52xx.h +++ b/drivers/net/fec_mpc52xx.h @@ -26,25 +26,6 @@ #define FEC_WATCHDOG_TIMEOUT ((400*HZ)/1000) -struct mpc52xx_fec_priv { - int duplex; - int r_irq; - int t_irq; - struct mpc52xx_fec __iomem *fec; - struct bcom_task *rx_dmatsk; - struct bcom_task *tx_dmatsk; - spinlock_t lock; - int msg_enable; - - int has_phy; - unsigned int phy_speed; - unsigned int phy_addr; - struct phy_device *phydev; - enum phy_state link; - int speed; -}; - - /* ======================================================================== */ /* Hardware register sets & bits */ /* ======================================================================== */ diff --git a/drivers/net/forcedeth.c b/drivers/net/forcedeth.c index 35f66d4a459..20d4fe96a81 100644 --- a/drivers/net/forcedeth.c +++ b/drivers/net/forcedeth.c @@ -3273,6 +3273,20 @@ static void nv_link_irq(struct net_device *dev) dprintk(KERN_DEBUG "%s: link change notification done.\n", dev->name); } +static void nv_msi_workaround(struct fe_priv *np) +{ + + /* Need to toggle the msi irq mask within the ethernet device, + * otherwise, future interrupts will not be detected. + */ + if (np->msi_flags & NV_MSI_ENABLED) { + u8 __iomem *base = np->base; + + writel(0, base + NvRegMSIIrqMask); + writel(NVREG_MSI_VECTOR_0_ENABLED, base + NvRegMSIIrqMask); + } +} + static irqreturn_t nv_nic_irq(int foo, void *data) { struct net_device *dev = (struct net_device *) data; @@ -3295,6 +3309,8 @@ static irqreturn_t nv_nic_irq(int foo, void *data) if (!(events & np->irqmask)) break; + nv_msi_workaround(np); + spin_lock(&np->lock); nv_tx_done(dev); spin_unlock(&np->lock); @@ -3410,6 +3426,8 @@ static irqreturn_t nv_nic_irq_optimized(int foo, void *data) if (!(events & np->irqmask)) break; + nv_msi_workaround(np); + spin_lock(&np->lock); nv_tx_done_optimized(dev, TX_WORK_PER_LOOP); spin_unlock(&np->lock); @@ -3750,6 +3768,8 @@ static irqreturn_t nv_nic_irq_test(int foo, void *data) if (!(events & NVREG_IRQ_TIMER)) return IRQ_RETVAL(0); + nv_msi_workaround(np); + spin_lock(&np->lock); np->intr_test = 1; spin_unlock(&np->lock); @@ -4174,12 +4194,23 @@ static int nv_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd) netif_carrier_off(dev); if (netif_running(dev)) { + unsigned long flags; + nv_disable_irq(dev); netif_tx_lock_bh(dev); - spin_lock(&np->lock); + /* with plain spinlock lockdep complains */ + spin_lock_irqsave(&np->lock, flags); /* stop engines */ + /* FIXME: + * this can take some time, and interrupts are disabled + * due to spin_lock_irqsave, but let's hope no daemon + * is going to change the settings very often... + * Worst case: + * NV_RXSTOP_DELAY1MAX + NV_TXSTOP_DELAY1MAX + * + some minor delays, which is up to a second approximately + */ nv_stop_rxtx(dev); - spin_unlock(&np->lock); + spin_unlock_irqrestore(&np->lock, flags); netif_tx_unlock_bh(dev); } @@ -5823,6 +5854,7 @@ static int nv_resume(struct pci_dev *pdev) writel(txreg, base + NvRegTransmitPoll); rc = nv_open(dev); + nv_set_multicast(dev); out: return rc; } diff --git a/drivers/net/fs_enet/fs_enet-main.c b/drivers/net/fs_enet/fs_enet-main.c index 67b4b0728fc..a5baaf59ff6 100644 --- a/drivers/net/fs_enet/fs_enet-main.c +++ b/drivers/net/fs_enet/fs_enet-main.c @@ -1093,7 +1093,7 @@ err: if (registered) unregister_netdev(ndev); - if (fep != NULL) { + if (fep && fep->ops) { (*fep->ops->free_bd)(ndev); (*fep->ops->cleanup_data)(ndev); } diff --git a/drivers/net/fs_enet/mac-fcc.c b/drivers/net/fs_enet/mac-fcc.c index e36321152d5..8268b3535b3 100644 --- a/drivers/net/fs_enet/mac-fcc.c +++ b/drivers/net/fs_enet/mac-fcc.c @@ -463,6 +463,9 @@ static void restart(struct net_device *dev) else C32(fccp, fcc_fpsmr, FCC_PSMR_FDE | FCC_PSMR_LPB); + /* Restore multicast and promiscuous settings */ + set_multicast_list(dev); + S32(fccp, fcc_gfmr, FCC_GFMR_ENR | FCC_GFMR_ENT); } diff --git a/drivers/net/fs_enet/mii-fec.c b/drivers/net/fs_enet/mii-fec.c index ba75efc9f5b..f0014cfbb27 100644 --- a/drivers/net/fs_enet/mii-fec.c +++ b/drivers/net/fs_enet/mii-fec.c @@ -194,7 +194,7 @@ static int __devinit fs_enet_mdio_probe(struct of_device *ofdev, ret = of_address_to_resource(ofdev->node, 0, &res); if (ret) - return ret; + goto out_res; snprintf(new_bus->id, MII_BUS_ID_SIZE, "%x", res.start); @@ -236,6 +236,7 @@ out_free_irqs: kfree(new_bus->irq); out_unmap_regs: iounmap(fec->fecp); +out_res: out_fec: kfree(fec); out_mii: diff --git a/drivers/net/gianfar.c b/drivers/net/gianfar.c index 587afe7be68..25bdd0832df 100644 --- a/drivers/net/gianfar.c +++ b/drivers/net/gianfar.c @@ -138,6 +138,7 @@ static int gfar_poll(struct napi_struct *napi, int budget); static void gfar_netpoll(struct net_device *dev); #endif int gfar_clean_rx_ring(struct net_device *dev, int rx_work_limit); +static int gfar_clean_tx_ring(struct net_device *dev); static int gfar_process_frame(struct net_device *dev, struct sk_buff *skb, int length); static void gfar_vlan_rx_register(struct net_device *netdev, struct vlan_group *grp); @@ -634,6 +635,8 @@ static void free_skb_resources(struct gfar_private *priv) dev_kfree_skb_any(priv->tx_skbuff[i]); priv->tx_skbuff[i] = NULL; } + + txbdp++; } kfree(priv->tx_skbuff); @@ -1141,7 +1144,7 @@ static int gfar_close(struct net_device *dev) } /* Changes the mac address if the controller is not running. */ -int gfar_set_mac_address(struct net_device *dev) +static int gfar_set_mac_address(struct net_device *dev) { gfar_set_mac_for_addr(dev, 0, dev->dev_addr); @@ -1260,7 +1263,7 @@ static void gfar_timeout(struct net_device *dev) } /* Interrupt Handler for Transmit complete */ -int gfar_clean_tx_ring(struct net_device *dev) +static int gfar_clean_tx_ring(struct net_device *dev) { struct txbd8 *bdp; struct gfar_private *priv = netdev_priv(dev); diff --git a/drivers/net/gianfar.h b/drivers/net/gianfar.h index fd487be3993..27f37c81e52 100644 --- a/drivers/net/gianfar.h +++ b/drivers/net/gianfar.h @@ -782,5 +782,8 @@ extern void gfar_halt(struct net_device *dev); extern void gfar_phy_test(struct mii_bus *bus, struct phy_device *phydev, int enable, u32 regnum, u32 read); void gfar_init_sysfs(struct net_device *dev); +int gfar_local_mdio_write(struct gfar_mii __iomem *regs, int mii_id, + int regnum, u16 value); +int gfar_local_mdio_read(struct gfar_mii __iomem *regs, int mii_id, int regnum); #endif /* __GIANFAR_H */ diff --git a/drivers/net/gianfar_sysfs.c b/drivers/net/gianfar_sysfs.c index 230878b9419..5116f68e01b 100644 --- a/drivers/net/gianfar_sysfs.c +++ b/drivers/net/gianfar_sysfs.c @@ -103,10 +103,10 @@ static ssize_t gfar_set_rx_stash_size(struct device *dev, spin_lock_irqsave(&priv->rxlock, flags); if (length > priv->rx_buffer_size) - return count; + goto out; if (length == priv->rx_stash_size) - return count; + goto out; priv->rx_stash_size = length; @@ -125,6 +125,7 @@ static ssize_t gfar_set_rx_stash_size(struct device *dev, gfar_write(&priv->regs->attr, temp); +out: spin_unlock_irqrestore(&priv->rxlock, flags); return count; @@ -154,10 +155,10 @@ static ssize_t gfar_set_rx_stash_index(struct device *dev, spin_lock_irqsave(&priv->rxlock, flags); if (index > priv->rx_stash_size) - return count; + goto out; if (index == priv->rx_stash_index) - return count; + goto out; priv->rx_stash_index = index; @@ -166,6 +167,7 @@ static ssize_t gfar_set_rx_stash_index(struct device *dev, temp |= ATTRELI_EI(index); gfar_write(&priv->regs->attreli, flags); +out: spin_unlock_irqrestore(&priv->rxlock, flags); return count; diff --git a/drivers/net/hamradio/baycom_epp.c b/drivers/net/hamradio/baycom_epp.c index dde9c7e6408..00bc7fbb6b3 100644 --- a/drivers/net/hamradio/baycom_epp.c +++ b/drivers/net/hamradio/baycom_epp.c @@ -959,7 +959,7 @@ static int epp_close(struct net_device *dev) unsigned char tmp[1]; bc->work_running = 0; - flush_scheduled_work(); + cancel_delayed_work_sync(&bc->run_work); bc->stat = EPP_DCDBIT; tmp[0] = 0; pp->ops->epp_write_addr(pp, tmp, 1, 0); diff --git a/drivers/net/hamradio/dmascc.c b/drivers/net/hamradio/dmascc.c index 0b94833e23f..e8cfadefa4b 100644 --- a/drivers/net/hamradio/dmascc.c +++ b/drivers/net/hamradio/dmascc.c @@ -1077,8 +1077,6 @@ static inline void rx_off(struct scc_priv *priv) static void start_timer(struct scc_priv *priv, int t, int r15) { - unsigned long flags; - outb(priv->tmr_mode, priv->tmr_ctrl); if (t == 0) { tm_isr(priv); diff --git a/drivers/net/hamradio/scc.c b/drivers/net/hamradio/scc.c index f9051593583..45ae9d1191d 100644 --- a/drivers/net/hamradio/scc.c +++ b/drivers/net/hamradio/scc.c @@ -1340,9 +1340,10 @@ static unsigned int scc_set_param(struct scc_channel *scc, unsigned int cmd, uns case PARAM_RTS: if ( !(scc->wreg[R5] & RTS) ) { - if (arg != TX_OFF) + if (arg != TX_OFF) { scc_key_trx(scc, TX_ON); scc_start_tx_timer(scc, t_txdelay, scc->kiss.txdelay); + } } else { if (arg == TX_OFF) { diff --git a/drivers/net/ibm_newemac/Kconfig b/drivers/net/ibm_newemac/Kconfig index 0d3e7380bad..70a3272ee99 100644 --- a/drivers/net/ibm_newemac/Kconfig +++ b/drivers/net/ibm_newemac/Kconfig @@ -1,6 +1,7 @@ config IBM_NEW_EMAC tristate "IBM EMAC Ethernet support" depends on PPC_DCR && PPC_MERGE + select CRC32 help This driver supports the IBM EMAC family of Ethernet controllers typically found on 4xx embedded PowerPC chips, but also on the diff --git a/drivers/net/ibm_newemac/core.c b/drivers/net/ibm_newemac/core.c index 5d2108c5ac7..babc79ad490 100644 --- a/drivers/net/ibm_newemac/core.c +++ b/drivers/net/ibm_newemac/core.c @@ -1636,6 +1636,12 @@ static int emac_poll_rx(void *param, int budget) goto next; } + if (len < ETH_HLEN) { + ++dev->estats.rx_dropped_stack; + emac_recycle_rx_skb(dev, slot, len); + goto next; + } + if (len && len < EMAC_RX_COPY_THRESH) { struct sk_buff *copy_skb = alloc_skb(len + EMAC_RX_SKB_HEADROOM + 2, GFP_ATOMIC); @@ -2719,6 +2725,8 @@ static int __devinit emac_probe(struct of_device *ofdev, /* Clean rings */ memset(dev->tx_desc, 0, NUM_TX_BUFF * sizeof(struct mal_descriptor)); memset(dev->rx_desc, 0, NUM_RX_BUFF * sizeof(struct mal_descriptor)); + memset(dev->tx_skb, 0, NUM_TX_BUFF * sizeof(struct sk_buff *)); + memset(dev->rx_skb, 0, NUM_RX_BUFF * sizeof(struct sk_buff *)); /* Attach to ZMII, if needed */ if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII) && diff --git a/drivers/net/igb/igb_main.c b/drivers/net/igb/igb_main.c index ae398f04c7b..e79a26a886c 100644 --- a/drivers/net/igb/igb_main.c +++ b/drivers/net/igb/igb_main.c @@ -718,7 +718,8 @@ void igb_down(struct igb_adapter *adapter) adapter->link_speed = 0; adapter->link_duplex = 0; - igb_reset(adapter); + if (!pci_channel_offline(adapter->pdev)) + igb_reset(adapter); igb_clean_all_tx_rings(adapter); igb_clean_all_rx_rings(adapter); } diff --git a/drivers/net/ipg.c b/drivers/net/ipg.c index 9b358f61ed7..2c03f4e2ccc 100644 --- a/drivers/net/ipg.c +++ b/drivers/net/ipg.c @@ -577,12 +577,12 @@ static void ipg_nic_set_multicast_list(struct net_device *dev) /* NIC to be configured in promiscuous mode. */ receivemode = IPG_RM_RECEIVEALLFRAMES; } else if ((dev->flags & IFF_ALLMULTI) || - (dev->flags & IFF_MULTICAST & + ((dev->flags & IFF_MULTICAST) && (dev->mc_count > IPG_MULTICAST_HASHTABLE_SIZE))) { /* NIC to be configured to receive all multicast * frames. */ receivemode |= IPG_RM_RECEIVEMULTICAST; - } else if (dev->flags & IFF_MULTICAST & (dev->mc_count > 0)) { + } else if ((dev->flags & IFF_MULTICAST) && (dev->mc_count > 0)) { /* NIC to be configured to receive selected * multicast addresses. */ receivemode |= IPG_RM_RECEIVEMULTICASTHASH; @@ -1271,7 +1271,7 @@ static void ipg_nic_rx_with_end(struct net_device *dev, framelen = le64_to_cpu(rxfd->rfs) & IPG_RFS_RXFRAMELEN; - endframeLen = framelen - jumbo->current_size; + endframelen = framelen - jumbo->current_size; /* if (framelen > IPG_RXFRAG_SIZE) framelen=IPG_RXFRAG_SIZE; @@ -1279,8 +1279,8 @@ static void ipg_nic_rx_with_end(struct net_device *dev, if (framelen > IPG_RXSUPPORT_SIZE) dev_kfree_skb_irq(jumbo->skb); else { - memcpy(skb_put(jumbo->skb, endframeLen), - skb->data, endframeLen); + memcpy(skb_put(jumbo->skb, endframelen), + skb->data, endframelen); jumbo->skb->protocol = eth_type_trans(jumbo->skb, dev); @@ -1352,16 +1352,16 @@ static int ipg_nic_rx(struct net_device *dev) switch (ipg_nic_rx_check_frame_type(dev)) { case FRAME_WITH_START_WITH_END: - ipg_nic_rx_with_start_and_end(dev, tp, rxfd, entry); + ipg_nic_rx_with_start_and_end(dev, sp, rxfd, entry); break; case FRAME_WITH_START: - ipg_nic_rx_with_start(dev, tp, rxfd, entry); + ipg_nic_rx_with_start(dev, sp, rxfd, entry); break; case FRAME_WITH_END: - ipg_nic_rx_with_end(dev, tp, rxfd, entry); + ipg_nic_rx_with_end(dev, sp, rxfd, entry); break; case FRAME_NO_START_NO_END: - ipg_nic_rx_no_start_no_end(dev, tp, rxfd, entry); + ipg_nic_rx_no_start_no_end(dev, sp, rxfd, entry); break; } } @@ -1808,7 +1808,7 @@ static int ipg_nic_open(struct net_device *dev) /* initialize JUMBO Frame control variable */ sp->jumbo.found_start = 0; sp->jumbo.current_size = 0; - sp->jumbo.skb = 0; + sp->jumbo.skb = NULL; dev->mtu = IPG_TXFRAG_SIZE; #endif diff --git a/drivers/net/irda/Kconfig b/drivers/net/irda/Kconfig index ce816ba9c40..e6317557a53 100644 --- a/drivers/net/irda/Kconfig +++ b/drivers/net/irda/Kconfig @@ -329,6 +329,7 @@ config PXA_FICP config MCS_FIR tristate "MosChip MCS7780 IrDA-USB dongle" depends on IRDA && USB && EXPERIMENTAL + select CRC32 help Say Y or M here if you want to build support for the MosChip MCS7780 IrDA-USB bridge device driver. diff --git a/drivers/net/irda/irda-usb.c b/drivers/net/irda/irda-usb.c index 9081234ab45..6f50ed7b183 100644 --- a/drivers/net/irda/irda-usb.c +++ b/drivers/net/irda/irda-usb.c @@ -1120,7 +1120,7 @@ static int stir421x_patch_device(struct irda_usb_cb *self) } } - if (self->usbdev->descriptor.bcdDevice == fw_version) { + if (self->usbdev->descriptor.bcdDevice == cpu_to_le16(fw_version)) { /* * If we're here, we've found a correct patch * The actual image starts after the "STMP" keyword diff --git a/drivers/net/irda/irda-usb.h b/drivers/net/irda/irda-usb.h index e846c38224a..a0ca9c1fe19 100644 --- a/drivers/net/irda/irda-usb.h +++ b/drivers/net/irda/irda-usb.h @@ -117,11 +117,11 @@ struct irda_class_desc { __u8 bLength; __u8 bDescriptorType; - __u16 bcdSpecRevision; + __le16 bcdSpecRevision; __u8 bmDataSize; __u8 bmWindowSize; __u8 bmMinTurnaroundTime; - __u16 wBaudRate; + __le16 wBaudRate; __u8 bmAdditionalBOFs; __u8 bIrdaRateSniff; __u8 bMaxUnicastList; diff --git a/drivers/net/irda/nsc-ircc.c b/drivers/net/irda/nsc-ircc.c index a873d2b315c..effc1ce8179 100644 --- a/drivers/net/irda/nsc-ircc.c +++ b/drivers/net/irda/nsc-ircc.c @@ -100,7 +100,9 @@ static int nsc_ircc_probe_39x(nsc_chip_t *chip, chipio_t *info); static int nsc_ircc_init_108(nsc_chip_t *chip, chipio_t *info); static int nsc_ircc_init_338(nsc_chip_t *chip, chipio_t *info); static int nsc_ircc_init_39x(nsc_chip_t *chip, chipio_t *info); +#ifdef CONFIG_PNP static int nsc_ircc_pnp_probe(struct pnp_dev *dev, const struct pnp_device_id *id); +#endif /* These are the known NSC chips */ static nsc_chip_t chips[] = { @@ -150,15 +152,18 @@ static chipio_t pnp_info; static const struct pnp_device_id nsc_ircc_pnp_table[] = { { .id = "NSC6001", .driver_data = 0 }, { .id = "IBM0071", .driver_data = 0 }, + { .id = "HWPC224", .driver_data = 0 }, { } }; MODULE_DEVICE_TABLE(pnp, nsc_ircc_pnp_table); static struct pnp_driver nsc_ircc_pnp_driver = { +#ifdef CONFIG_PNP .name = "nsc-ircc", .id_table = nsc_ircc_pnp_table, .probe = nsc_ircc_pnp_probe, +#endif }; /* Some prototypes */ @@ -916,6 +921,7 @@ static int nsc_ircc_probe_39x(nsc_chip_t *chip, chipio_t *info) return 0; } +#ifdef CONFIG_PNP /* PNP probing */ static int nsc_ircc_pnp_probe(struct pnp_dev *dev, const struct pnp_device_id *id) { @@ -952,6 +958,7 @@ static int nsc_ircc_pnp_probe(struct pnp_dev *dev, const struct pnp_device_id *i return 0; } +#endif /* * Function nsc_ircc_setup (info) diff --git a/drivers/net/irda/smsc-ircc2.c b/drivers/net/irda/smsc-ircc2.c index 1f26da761e9..cfe0194fef7 100644 --- a/drivers/net/irda/smsc-ircc2.c +++ b/drivers/net/irda/smsc-ircc2.c @@ -376,6 +376,7 @@ MODULE_DEVICE_TABLE(pnp, smsc_ircc_pnp_table); static int pnp_driver_registered; +#ifdef CONFIG_PNP static int __init smsc_ircc_pnp_probe(struct pnp_dev *dev, const struct pnp_device_id *dev_id) { @@ -402,7 +403,9 @@ static struct pnp_driver smsc_ircc_pnp_driver = { .id_table = smsc_ircc_pnp_table, .probe = smsc_ircc_pnp_probe, }; - +#else /* CONFIG_PNP */ +static struct pnp_driver smsc_ircc_pnp_driver; +#endif /******************************************************************************* * diff --git a/drivers/net/irda/via-ircc.c b/drivers/net/irda/via-ircc.c index 58e12878458..04ad3573b15 100644 --- a/drivers/net/irda/via-ircc.c +++ b/drivers/net/irda/via-ircc.c @@ -1546,6 +1546,7 @@ static int via_ircc_net_open(struct net_device *dev) IRDA_WARNING("%s, unable to allocate dma2=%d\n", driver_name, self->io.dma2); free_irq(self->io.irq, self); + free_dma(self->io.dma); return -EAGAIN; } } @@ -1606,6 +1607,8 @@ static int via_ircc_net_close(struct net_device *dev) EnAllInt(iobase, OFF); free_irq(self->io.irq, dev); free_dma(self->io.dma); + if (self->io.dma2 != self->io.dma) + free_dma(self->io.dma2); return 0; } diff --git a/drivers/net/ixgbe/ixgbe_82598.c b/drivers/net/ixgbe/ixgbe_82598.c index 6321b059ce1..2f38e847e2c 100644 --- a/drivers/net/ixgbe/ixgbe_82598.c +++ b/drivers/net/ixgbe/ixgbe_82598.c @@ -58,8 +58,8 @@ static s32 ixgbe_reset_hw_82598(struct ixgbe_hw *hw); static s32 ixgbe_get_invariants_82598(struct ixgbe_hw *hw) { - hw->mac.num_rx_queues = IXGBE_82598_MAX_TX_QUEUES; - hw->mac.num_tx_queues = IXGBE_82598_MAX_RX_QUEUES; + hw->mac.num_rx_queues = IXGBE_82598_MAX_RX_QUEUES; + hw->mac.num_tx_queues = IXGBE_82598_MAX_TX_QUEUES; hw->mac.num_rx_addrs = IXGBE_82598_RAR_ENTRIES; /* PHY ops are filled in by default properly for Fiber only */ diff --git a/drivers/net/ixgbe/ixgbe_main.c b/drivers/net/ixgbe/ixgbe_main.c index 7b859220c25..8f046090115 100644 --- a/drivers/net/ixgbe/ixgbe_main.c +++ b/drivers/net/ixgbe/ixgbe_main.c @@ -1969,7 +1969,8 @@ void ixgbe_down(struct ixgbe_adapter *adapter) netif_carrier_off(netdev); netif_stop_queue(netdev); - ixgbe_reset(adapter); + if (!pci_channel_offline(adapter->pdev)) + ixgbe_reset(adapter); ixgbe_clean_all_tx_rings(adapter); ixgbe_clean_all_rx_rings(adapter); diff --git a/drivers/net/mac89x0.c b/drivers/net/mac89x0.c index 2a66e5b7ceb..4ce8afd481c 100644 --- a/drivers/net/mac89x0.c +++ b/drivers/net/mac89x0.c @@ -183,6 +183,9 @@ struct net_device * __init mac89x0_probe(int unit) int err = -ENODEV; DECLARE_MAC_BUF(mac); + if (!MACH_IS_MAC) + return ERR_PTR(-ENODEV); + dev = alloc_etherdev(sizeof(struct net_local)); if (!dev) return ERR_PTR(-ENOMEM); diff --git a/drivers/net/macmace.c b/drivers/net/macmace.c index 18770527df9..51ad3765e07 100644 --- a/drivers/net/macmace.c +++ b/drivers/net/macmace.c @@ -781,6 +781,9 @@ static int __init mac_mace_init_module(void) { int err; + if (!MACH_IS_MAC) + return -ENODEV; + if ((err = platform_driver_register(&mac_mace_driver))) { printk(KERN_ERR "Driver registration failed\n"); return err; diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c index 2056cfc624d..c36a03ae9bf 100644 --- a/drivers/net/macvlan.c +++ b/drivers/net/macvlan.c @@ -450,7 +450,7 @@ static void macvlan_dellink(struct net_device *dev) unregister_netdevice(dev); if (list_empty(&port->vlans)) - macvlan_port_destroy(dev); + macvlan_port_destroy(port->dev); } static struct rtnl_link_ops macvlan_link_ops __read_mostly = { diff --git a/drivers/net/mlx4/mr.c b/drivers/net/mlx4/mr.c index cb46446b269..03a9abcce52 100644 --- a/drivers/net/mlx4/mr.c +++ b/drivers/net/mlx4/mr.c @@ -551,7 +551,7 @@ int mlx4_fmr_alloc(struct mlx4_dev *dev, u32 pd, u32 access, int max_pages, u64 mtt_seg; int err = -ENOMEM; - if (page_shift < 12 || page_shift >= 32) + if (page_shift < (ffs(dev->caps.page_size_cap) - 1) || page_shift >= 32) return -EINVAL; /* All MTTs must fit in the same page */ diff --git a/drivers/net/mv643xx_eth.c b/drivers/net/mv643xx_eth.c index 381b36e5f64..b7915cdcc6a 100644 --- a/drivers/net/mv643xx_eth.c +++ b/drivers/net/mv643xx_eth.c @@ -91,6 +91,11 @@ */ #define PHY_ADDR_REG 0x0000 #define SMI_REG 0x0004 +#define WINDOW_BASE(i) (0x0200 + ((i) << 3)) +#define WINDOW_SIZE(i) (0x0204 + ((i) << 3)) +#define WINDOW_REMAP_HIGH(i) (0x0280 + ((i) << 2)) +#define WINDOW_BAR_ENABLE 0x0290 +#define WINDOW_PROTECT(i) (0x0294 + ((i) << 4)) /* * Per-port registers. @@ -507,9 +512,23 @@ struct mv643xx_mib_counters { u32 late_collision; }; +struct mv643xx_shared_private { + void __iomem *eth_base; + + /* used to protect SMI_REG, which is shared across ports */ + spinlock_t phy_lock; + + u32 win_protect; + + unsigned int t_clk; +}; + struct mv643xx_private { + struct mv643xx_shared_private *shared; int port_num; /* User Ethernet port number */ + struct mv643xx_shared_private *shared_smi; + u32 rx_sram_addr; /* Base address of rx sram area */ u32 rx_sram_size; /* Size of rx sram area */ u32 tx_sram_addr; /* Base address of tx sram area */ @@ -614,19 +633,14 @@ static const struct ethtool_ops mv643xx_ethtool_ops; static char mv643xx_driver_name[] = "mv643xx_eth"; static char mv643xx_driver_version[] = "1.0"; -static void __iomem *mv643xx_eth_base; - -/* used to protect SMI_REG, which is shared across ports */ -static DEFINE_SPINLOCK(mv643xx_eth_phy_lock); - static inline u32 rdl(struct mv643xx_private *mp, int offset) { - return readl(mv643xx_eth_base + offset); + return readl(mp->shared->eth_base + offset); } static inline void wrl(struct mv643xx_private *mp, int offset, u32 data) { - writel(data, mv643xx_eth_base + offset); + writel(data, mp->shared->eth_base + offset); } /* @@ -1119,7 +1133,6 @@ static irqreturn_t mv643xx_eth_int_handler(int irq, void *dev_id) * * INPUT: * struct mv643xx_private *mp Ethernet port - * unsigned int t_clk t_clk of the MV-643xx chip in HZ units * unsigned int delay Delay in usec * * OUTPUT: @@ -1130,10 +1143,10 @@ static irqreturn_t mv643xx_eth_int_handler(int irq, void *dev_id) * */ static unsigned int eth_port_set_rx_coal(struct mv643xx_private *mp, - unsigned int t_clk, unsigned int delay) + unsigned int delay) { unsigned int port_num = mp->port_num; - unsigned int coal = ((t_clk / 1000000) * delay) / 64; + unsigned int coal = ((mp->shared->t_clk / 1000000) * delay) / 64; /* Set RX Coalescing mechanism */ wrl(mp, SDMA_CONFIG_REG(port_num), @@ -1158,7 +1171,6 @@ static unsigned int eth_port_set_rx_coal(struct mv643xx_private *mp, * * INPUT: * struct mv643xx_private *mp Ethernet port - * unsigned int t_clk t_clk of the MV-643xx chip in HZ units * unsigned int delay Delay in uSeconds * * OUTPUT: @@ -1169,9 +1181,9 @@ static unsigned int eth_port_set_rx_coal(struct mv643xx_private *mp, * */ static unsigned int eth_port_set_tx_coal(struct mv643xx_private *mp, - unsigned int t_clk, unsigned int delay) + unsigned int delay) { - unsigned int coal = ((t_clk / 1000000) * delay) / 64; + unsigned int coal = ((mp->shared->t_clk / 1000000) * delay) / 64; /* Set TX Coalescing mechanism */ wrl(mp, TX_FIFO_URGENT_THRESHOLD_REG(mp->port_num), coal << 4); @@ -1413,11 +1425,11 @@ static int mv643xx_eth_open(struct net_device *dev) #ifdef MV643XX_COAL mp->rx_int_coal = - eth_port_set_rx_coal(mp, 133000000, MV643XX_RX_COAL); + eth_port_set_rx_coal(mp, MV643XX_RX_COAL); #endif mp->tx_int_coal = - eth_port_set_tx_coal(mp, 133000000, MV643XX_TX_COAL); + eth_port_set_tx_coal(mp, MV643XX_TX_COAL); /* Unmask phy and link status changes interrupts */ wrl(mp, INTERRUPT_EXTEND_MASK_REG(port_num), ETH_INT_UNMASK_ALL_EXT); @@ -1827,6 +1839,11 @@ static int mv643xx_eth_probe(struct platform_device *pdev) return -ENODEV; } + if (pd->shared == NULL) { + printk(KERN_ERR "No mv643xx_eth_platform_data->shared\n"); + return -ENODEV; + } + dev = alloc_etherdev(sizeof(struct mv643xx_private)); if (!dev) return -ENOMEM; @@ -1877,8 +1894,16 @@ static int mv643xx_eth_probe(struct platform_device *pdev) spin_lock_init(&mp->lock); + mp->shared = platform_get_drvdata(pd->shared); port_num = mp->port_num = pd->port_number; + if (mp->shared->win_protect) + wrl(mp, WINDOW_PROTECT(port_num), mp->shared->win_protect); + + mp->shared_smi = mp->shared; + if (pd->shared_smi != NULL) + mp->shared_smi = platform_get_drvdata(pd->shared_smi); + /* set default config values */ eth_port_uc_addr_get(mp, dev->dev_addr); mp->rx_ring_size = PORT_DEFAULT_RECEIVE_QUEUE_SIZE; @@ -1983,30 +2008,91 @@ static int mv643xx_eth_remove(struct platform_device *pdev) return 0; } +static void mv643xx_eth_conf_mbus_windows(struct mv643xx_shared_private *msp, + struct mbus_dram_target_info *dram) +{ + void __iomem *base = msp->eth_base; + u32 win_enable; + u32 win_protect; + int i; + + for (i = 0; i < 6; i++) { + writel(0, base + WINDOW_BASE(i)); + writel(0, base + WINDOW_SIZE(i)); + if (i < 4) + writel(0, base + WINDOW_REMAP_HIGH(i)); + } + + win_enable = 0x3f; + win_protect = 0; + + for (i = 0; i < dram->num_cs; i++) { + struct mbus_dram_window *cs = dram->cs + i; + + writel((cs->base & 0xffff0000) | + (cs->mbus_attr << 8) | + dram->mbus_dram_target_id, base + WINDOW_BASE(i)); + writel((cs->size - 1) & 0xffff0000, base + WINDOW_SIZE(i)); + + win_enable &= ~(1 << i); + win_protect |= 3 << (2 * i); + } + + writel(win_enable, base + WINDOW_BAR_ENABLE); + msp->win_protect = win_protect; +} + static int mv643xx_eth_shared_probe(struct platform_device *pdev) { static int mv643xx_version_printed = 0; + struct mv643xx_eth_shared_platform_data *pd = pdev->dev.platform_data; + struct mv643xx_shared_private *msp; struct resource *res; + int ret; if (!mv643xx_version_printed++) printk(KERN_NOTICE "MV-643xx 10/100/1000 Ethernet Driver\n"); + ret = -EINVAL; res = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (res == NULL) - return -ENODEV; + goto out; - mv643xx_eth_base = ioremap(res->start, res->end - res->start + 1); - if (mv643xx_eth_base == NULL) - return -ENOMEM; + ret = -ENOMEM; + msp = kmalloc(sizeof(*msp), GFP_KERNEL); + if (msp == NULL) + goto out; + memset(msp, 0, sizeof(*msp)); + + msp->eth_base = ioremap(res->start, res->end - res->start + 1); + if (msp->eth_base == NULL) + goto out_free; + + spin_lock_init(&msp->phy_lock); + msp->t_clk = (pd != NULL && pd->t_clk != 0) ? pd->t_clk : 133000000; + + platform_set_drvdata(pdev, msp); + + /* + * (Re-)program MBUS remapping windows if we are asked to. + */ + if (pd != NULL && pd->dram != NULL) + mv643xx_eth_conf_mbus_windows(msp, pd->dram); return 0; +out_free: + kfree(msp); +out: + return ret; } static int mv643xx_eth_shared_remove(struct platform_device *pdev) { - iounmap(mv643xx_eth_base); - mv643xx_eth_base = NULL; + struct mv643xx_shared_private *msp = platform_get_drvdata(pdev); + + iounmap(msp->eth_base); + kfree(msp); return 0; } @@ -2906,15 +2992,16 @@ static void eth_port_reset(struct mv643xx_private *mp) static void eth_port_read_smi_reg(struct mv643xx_private *mp, unsigned int phy_reg, unsigned int *value) { + void __iomem *smi_reg = mp->shared_smi->eth_base + SMI_REG; int phy_addr = ethernet_phy_get(mp); unsigned long flags; int i; /* the SMI register is a shared resource */ - spin_lock_irqsave(&mv643xx_eth_phy_lock, flags); + spin_lock_irqsave(&mp->shared_smi->phy_lock, flags); /* wait for the SMI register to become available */ - for (i = 0; rdl(mp, SMI_REG) & ETH_SMI_BUSY; i++) { + for (i = 0; readl(smi_reg) & ETH_SMI_BUSY; i++) { if (i == PHY_WAIT_ITERATIONS) { printk("%s: PHY busy timeout\n", mp->dev->name); goto out; @@ -2922,11 +3009,11 @@ static void eth_port_read_smi_reg(struct mv643xx_private *mp, udelay(PHY_WAIT_MICRO_SECONDS); } - wrl(mp, SMI_REG, - (phy_addr << 16) | (phy_reg << 21) | ETH_SMI_OPCODE_READ); + writel((phy_addr << 16) | (phy_reg << 21) | ETH_SMI_OPCODE_READ, + smi_reg); /* now wait for the data to be valid */ - for (i = 0; !(rdl(mp, SMI_REG) & ETH_SMI_READ_VALID); i++) { + for (i = 0; !(readl(smi_reg) & ETH_SMI_READ_VALID); i++) { if (i == PHY_WAIT_ITERATIONS) { printk("%s: PHY read timeout\n", mp->dev->name); goto out; @@ -2934,9 +3021,9 @@ static void eth_port_read_smi_reg(struct mv643xx_private *mp, udelay(PHY_WAIT_MICRO_SECONDS); } - *value = rdl(mp, SMI_REG) & 0xffff; + *value = readl(smi_reg) & 0xffff; out: - spin_unlock_irqrestore(&mv643xx_eth_phy_lock, flags); + spin_unlock_irqrestore(&mp->shared_smi->phy_lock, flags); } /* @@ -2962,17 +3049,16 @@ out: static void eth_port_write_smi_reg(struct mv643xx_private *mp, unsigned int phy_reg, unsigned int value) { - int phy_addr; - int i; + void __iomem *smi_reg = mp->shared_smi->eth_base + SMI_REG; + int phy_addr = ethernet_phy_get(mp); unsigned long flags; - - phy_addr = ethernet_phy_get(mp); + int i; /* the SMI register is a shared resource */ - spin_lock_irqsave(&mv643xx_eth_phy_lock, flags); + spin_lock_irqsave(&mp->shared_smi->phy_lock, flags); /* wait for the SMI register to become available */ - for (i = 0; rdl(mp, SMI_REG) & ETH_SMI_BUSY; i++) { + for (i = 0; readl(smi_reg) & ETH_SMI_BUSY; i++) { if (i == PHY_WAIT_ITERATIONS) { printk("%s: PHY busy timeout\n", mp->dev->name); goto out; @@ -2980,10 +3066,10 @@ static void eth_port_write_smi_reg(struct mv643xx_private *mp, udelay(PHY_WAIT_MICRO_SECONDS); } - wrl(mp, SMI_REG, (phy_addr << 16) | (phy_reg << 21) | - ETH_SMI_OPCODE_WRITE | (value & 0xffff)); + writel((phy_addr << 16) | (phy_reg << 21) | + ETH_SMI_OPCODE_WRITE | (value & 0xffff), smi_reg); out: - spin_unlock_irqrestore(&mv643xx_eth_phy_lock, flags); + spin_unlock_irqrestore(&mp->shared_smi->phy_lock, flags); } /* diff --git a/drivers/net/myri10ge/myri10ge.c b/drivers/net/myri10ge/myri10ge.c index ef63c8d2bd7..e0d76c75aea 100644 --- a/drivers/net/myri10ge/myri10ge.c +++ b/drivers/net/myri10ge/myri10ge.c @@ -75,7 +75,7 @@ #include "myri10ge_mcp.h" #include "myri10ge_mcp_gen_header.h" -#define MYRI10GE_VERSION_STR "1.3.2-1.287" +#define MYRI10GE_VERSION_STR "1.3.99-1.347" MODULE_DESCRIPTION("Myricom 10G driver (10GbE)"); MODULE_AUTHOR("Maintainer: help@myri.com"); @@ -144,11 +144,13 @@ struct myri10ge_tx_buf { char *req_bytes; struct myri10ge_tx_buffer_state *info; int mask; /* number of transmit slots -1 */ - int boundary; /* boundary transmits cannot cross */ int req ____cacheline_aligned; /* transmit slots submitted */ int pkt_start; /* packets started */ + int stop_queue; + int linearized; int done ____cacheline_aligned; /* transmit slots completed */ int pkt_done; /* packets completed */ + int wake_queue; }; struct myri10ge_rx_done { @@ -160,29 +162,50 @@ struct myri10ge_rx_done { struct net_lro_desc lro_desc[MYRI10GE_MAX_LRO_DESCRIPTORS]; }; -struct myri10ge_priv { - int running; /* running? */ - int csum_flag; /* rx_csums? */ +struct myri10ge_slice_netstats { + unsigned long rx_packets; + unsigned long tx_packets; + unsigned long rx_bytes; + unsigned long tx_bytes; + unsigned long rx_dropped; + unsigned long tx_dropped; +}; + +struct myri10ge_slice_state { struct myri10ge_tx_buf tx; /* transmit ring */ struct myri10ge_rx_buf rx_small; struct myri10ge_rx_buf rx_big; struct myri10ge_rx_done rx_done; + struct net_device *dev; + struct napi_struct napi; + struct myri10ge_priv *mgp; + struct myri10ge_slice_netstats stats; + __be32 __iomem *irq_claim; + struct mcp_irq_data *fw_stats; + dma_addr_t fw_stats_bus; + int watchdog_tx_done; + int watchdog_tx_req; +}; + +struct myri10ge_priv { + struct myri10ge_slice_state ss; + int tx_boundary; /* boundary transmits cannot cross */ + int running; /* running? */ + int csum_flag; /* rx_csums? */ int small_bytes; int big_bytes; + int max_intr_slots; struct net_device *dev; - struct napi_struct napi; struct net_device_stats stats; + spinlock_t stats_lock; u8 __iomem *sram; int sram_size; unsigned long board_span; unsigned long iomem_base; - __be32 __iomem *irq_claim; __be32 __iomem *irq_deassert; char *mac_addr_string; struct mcp_cmd_response *cmd; dma_addr_t cmd_bus; - struct mcp_irq_data *fw_stats; - dma_addr_t fw_stats_bus; struct pci_dev *pdev; int msi_enabled; u32 link_state; @@ -191,20 +214,16 @@ struct myri10ge_priv { __be32 __iomem *intr_coal_delay_ptr; int mtrr; int wc_enabled; - int wake_queue; - int stop_queue; int down_cnt; wait_queue_head_t down_wq; struct work_struct watchdog_work; struct timer_list watchdog_timer; - int watchdog_tx_done; - int watchdog_tx_req; - int watchdog_pause; int watchdog_resets; - int tx_linearized; + int watchdog_pause; int pause; char *fw_name; char eeprom_strings[MYRI10GE_EEPROM_STRINGS_SIZE]; + char *product_code_string; char fw_version[128]; int fw_ver_major; int fw_ver_minor; @@ -228,58 +247,54 @@ static char *myri10ge_fw_aligned = "myri10ge_eth_z8e.dat"; static char *myri10ge_fw_name = NULL; module_param(myri10ge_fw_name, charp, S_IRUGO | S_IWUSR); -MODULE_PARM_DESC(myri10ge_fw_name, "Firmware image name\n"); +MODULE_PARM_DESC(myri10ge_fw_name, "Firmware image name"); static int myri10ge_ecrc_enable = 1; module_param(myri10ge_ecrc_enable, int, S_IRUGO); -MODULE_PARM_DESC(myri10ge_ecrc_enable, "Enable Extended CRC on PCI-E\n"); - -static int myri10ge_max_intr_slots = 1024; -module_param(myri10ge_max_intr_slots, int, S_IRUGO); -MODULE_PARM_DESC(myri10ge_max_intr_slots, "Interrupt queue slots\n"); +MODULE_PARM_DESC(myri10ge_ecrc_enable, "Enable Extended CRC on PCI-E"); static int myri10ge_small_bytes = -1; /* -1 == auto */ module_param(myri10ge_small_bytes, int, S_IRUGO | S_IWUSR); -MODULE_PARM_DESC(myri10ge_small_bytes, "Threshold of small packets\n"); +MODULE_PARM_DESC(myri10ge_small_bytes, "Threshold of small packets"); static int myri10ge_msi = 1; /* enable msi by default */ module_param(myri10ge_msi, int, S_IRUGO | S_IWUSR); -MODULE_PARM_DESC(myri10ge_msi, "Enable Message Signalled Interrupts\n"); +MODULE_PARM_DESC(myri10ge_msi, "Enable Message Signalled Interrupts"); static int myri10ge_intr_coal_delay = 75; module_param(myri10ge_intr_coal_delay, int, S_IRUGO); -MODULE_PARM_DESC(myri10ge_intr_coal_delay, "Interrupt coalescing delay\n"); +MODULE_PARM_DESC(myri10ge_intr_coal_delay, "Interrupt coalescing delay"); static int myri10ge_flow_control = 1; module_param(myri10ge_flow_control, int, S_IRUGO); -MODULE_PARM_DESC(myri10ge_flow_control, "Pause parameter\n"); +MODULE_PARM_DESC(myri10ge_flow_control, "Pause parameter"); static int myri10ge_deassert_wait = 1; module_param(myri10ge_deassert_wait, int, S_IRUGO | S_IWUSR); MODULE_PARM_DESC(myri10ge_deassert_wait, - "Wait when deasserting legacy interrupts\n"); + "Wait when deasserting legacy interrupts"); static int myri10ge_force_firmware = 0; module_param(myri10ge_force_firmware, int, S_IRUGO); MODULE_PARM_DESC(myri10ge_force_firmware, - "Force firmware to assume aligned completions\n"); + "Force firmware to assume aligned completions"); static int myri10ge_initial_mtu = MYRI10GE_MAX_ETHER_MTU - ETH_HLEN; module_param(myri10ge_initial_mtu, int, S_IRUGO); -MODULE_PARM_DESC(myri10ge_initial_mtu, "Initial MTU\n"); +MODULE_PARM_DESC(myri10ge_initial_mtu, "Initial MTU"); static int myri10ge_napi_weight = 64; module_param(myri10ge_napi_weight, int, S_IRUGO); -MODULE_PARM_DESC(myri10ge_napi_weight, "Set NAPI weight\n"); +MODULE_PARM_DESC(myri10ge_napi_weight, "Set NAPI weight"); static int myri10ge_watchdog_timeout = 1; module_param(myri10ge_watchdog_timeout, int, S_IRUGO); -MODULE_PARM_DESC(myri10ge_watchdog_timeout, "Set watchdog timeout\n"); +MODULE_PARM_DESC(myri10ge_watchdog_timeout, "Set watchdog timeout"); static int myri10ge_max_irq_loops = 1048576; module_param(myri10ge_max_irq_loops, int, S_IRUGO); MODULE_PARM_DESC(myri10ge_max_irq_loops, - "Set stuck legacy IRQ detection threshold\n"); + "Set stuck legacy IRQ detection threshold"); #define MYRI10GE_MSG_DEFAULT NETIF_MSG_LINK @@ -289,21 +304,22 @@ MODULE_PARM_DESC(myri10ge_debug, "Debug level (0=none,...,16=all)"); static int myri10ge_lro = 1; module_param(myri10ge_lro, int, S_IRUGO); -MODULE_PARM_DESC(myri10ge_lro, "Enable large receive offload\n"); +MODULE_PARM_DESC(myri10ge_lro, "Enable large receive offload"); static int myri10ge_lro_max_pkts = MYRI10GE_LRO_MAX_PKTS; module_param(myri10ge_lro_max_pkts, int, S_IRUGO); -MODULE_PARM_DESC(myri10ge_lro, "Number of LRO packets to be aggregated\n"); +MODULE_PARM_DESC(myri10ge_lro_max_pkts, + "Number of LRO packets to be aggregated"); static int myri10ge_fill_thresh = 256; module_param(myri10ge_fill_thresh, int, S_IRUGO | S_IWUSR); -MODULE_PARM_DESC(myri10ge_fill_thresh, "Number of empty rx slots allowed\n"); +MODULE_PARM_DESC(myri10ge_fill_thresh, "Number of empty rx slots allowed"); static int myri10ge_reset_recover = 1; static int myri10ge_wcfifo = 0; module_param(myri10ge_wcfifo, int, S_IRUGO); -MODULE_PARM_DESC(myri10ge_wcfifo, "Enable WC Fifo when WC is enabled\n"); +MODULE_PARM_DESC(myri10ge_wcfifo, "Enable WC Fifo when WC is enabled"); #define MYRI10GE_FW_OFFSET 1024*1024 #define MYRI10GE_HIGHPART_TO_U32(X) \ @@ -359,8 +375,10 @@ myri10ge_send_cmd(struct myri10ge_priv *mgp, u32 cmd, for (sleep_total = 0; sleep_total < 1000 && response->result == htonl(MYRI10GE_NO_RESPONSE_RESULT); - sleep_total += 10) + sleep_total += 10) { udelay(10); + mb(); + } } else { /* use msleep for most command */ for (sleep_total = 0; @@ -420,6 +438,10 @@ static int myri10ge_read_mac_addr(struct myri10ge_priv *mgp) ptr += 1; } } + if (memcmp(ptr, "PC=", 3) == 0) { + ptr += 3; + mgp->product_code_string = ptr; + } if (memcmp((const void *)ptr, "SN=", 3) == 0) { ptr += 3; mgp->serial_number = simple_strtoul(ptr, &ptr, 10); @@ -442,7 +464,7 @@ abort: static void myri10ge_dummy_rdma(struct myri10ge_priv *mgp, int enable) { char __iomem *submit; - __be32 buf[16]; + __be32 buf[16] __attribute__ ((__aligned__(8))); u32 dma_low, dma_high; int i; @@ -609,13 +631,38 @@ static int myri10ge_adopt_running_firmware(struct myri10ge_priv *mgp) return status; } +static int myri10ge_get_firmware_capabilities(struct myri10ge_priv *mgp) +{ + struct myri10ge_cmd cmd; + int status; + + /* probe for IPv6 TSO support */ + mgp->features = NETIF_F_SG | NETIF_F_HW_CSUM | NETIF_F_TSO; + status = myri10ge_send_cmd(mgp, MXGEFW_CMD_GET_MAX_TSO6_HDR_SIZE, + &cmd, 0); + if (status == 0) { + mgp->max_tso6 = cmd.data0; + mgp->features |= NETIF_F_TSO6; + } + + status = myri10ge_send_cmd(mgp, MXGEFW_CMD_GET_RX_RING_SIZE, &cmd, 0); + if (status != 0) { + dev_err(&mgp->pdev->dev, + "failed MXGEFW_CMD_GET_RX_RING_SIZE\n"); + return -ENXIO; + } + + mgp->max_intr_slots = 2 * (cmd.data0 / sizeof(struct mcp_dma_addr)); + + return 0; +} + static int myri10ge_load_firmware(struct myri10ge_priv *mgp) { char __iomem *submit; - __be32 buf[16]; + __be32 buf[16] __attribute__ ((__aligned__(8))); u32 dma_low, dma_high, size; int status, i; - struct myri10ge_cmd cmd; size = 0; status = myri10ge_load_hotplug_firmware(mgp, &size); @@ -635,7 +682,7 @@ static int myri10ge_load_firmware(struct myri10ge_priv *mgp) } dev_info(&mgp->pdev->dev, "Successfully adopted running firmware\n"); - if (mgp->tx.boundary == 4096) { + if (mgp->tx_boundary == 4096) { dev_warn(&mgp->pdev->dev, "Using firmware currently running on NIC" ". For optimal\n"); @@ -646,7 +693,9 @@ static int myri10ge_load_firmware(struct myri10ge_priv *mgp) } mgp->fw_name = "adopted"; - mgp->tx.boundary = 2048; + mgp->tx_boundary = 2048; + myri10ge_dummy_rdma(mgp, 1); + status = myri10ge_get_firmware_capabilities(mgp); return status; } @@ -681,26 +730,18 @@ static int myri10ge_load_firmware(struct myri10ge_priv *mgp) msleep(1); mb(); i = 0; - while (mgp->cmd->data != MYRI10GE_NO_CONFIRM_DATA && i < 20) { - msleep(1); + while (mgp->cmd->data != MYRI10GE_NO_CONFIRM_DATA && i < 9) { + msleep(1 << i); i++; } if (mgp->cmd->data != MYRI10GE_NO_CONFIRM_DATA) { dev_err(&mgp->pdev->dev, "handoff failed\n"); return -ENXIO; } - dev_info(&mgp->pdev->dev, "handoff confirmed\n"); myri10ge_dummy_rdma(mgp, 1); + status = myri10ge_get_firmware_capabilities(mgp); - /* probe for IPv6 TSO support */ - mgp->features = NETIF_F_SG | NETIF_F_HW_CSUM | NETIF_F_TSO; - status = myri10ge_send_cmd(mgp, MXGEFW_CMD_GET_MAX_TSO6_HDR_SIZE, - &cmd, 0); - if (status == 0) { - mgp->max_tso6 = cmd.data0; - mgp->features |= NETIF_F_TSO6; - } - return 0; + return status; } static int myri10ge_update_mac_address(struct myri10ge_priv *mgp, u8 * addr) @@ -772,7 +813,7 @@ static int myri10ge_dma_test(struct myri10ge_priv *mgp, int test_type) * transfers took to complete. */ - len = mgp->tx.boundary; + len = mgp->tx_boundary; cmd.data0 = MYRI10GE_LOWPART_TO_U32(dmatest_bus); cmd.data1 = MYRI10GE_HIGHPART_TO_U32(dmatest_bus); @@ -834,17 +875,17 @@ static int myri10ge_reset(struct myri10ge_priv *mgp) /* Now exchange information about interrupts */ - bytes = myri10ge_max_intr_slots * sizeof(*mgp->rx_done.entry); - memset(mgp->rx_done.entry, 0, bytes); + bytes = mgp->max_intr_slots * sizeof(*mgp->ss.rx_done.entry); + memset(mgp->ss.rx_done.entry, 0, bytes); cmd.data0 = (u32) bytes; status = myri10ge_send_cmd(mgp, MXGEFW_CMD_SET_INTRQ_SIZE, &cmd, 0); - cmd.data0 = MYRI10GE_LOWPART_TO_U32(mgp->rx_done.bus); - cmd.data1 = MYRI10GE_HIGHPART_TO_U32(mgp->rx_done.bus); + cmd.data0 = MYRI10GE_LOWPART_TO_U32(mgp->ss.rx_done.bus); + cmd.data1 = MYRI10GE_HIGHPART_TO_U32(mgp->ss.rx_done.bus); status |= myri10ge_send_cmd(mgp, MXGEFW_CMD_SET_INTRQ_DMA, &cmd, 0); status |= myri10ge_send_cmd(mgp, MXGEFW_CMD_GET_IRQ_ACK_OFFSET, &cmd, 0); - mgp->irq_claim = (__iomem __be32 *) (mgp->sram + cmd.data0); + mgp->ss.irq_claim = (__iomem __be32 *) (mgp->sram + cmd.data0); status |= myri10ge_send_cmd(mgp, MXGEFW_CMD_GET_IRQ_DEASSERT_OFFSET, &cmd, 0); mgp->irq_deassert = (__iomem __be32 *) (mgp->sram + cmd.data0); @@ -858,17 +899,17 @@ static int myri10ge_reset(struct myri10ge_priv *mgp) } put_be32(htonl(mgp->intr_coal_delay), mgp->intr_coal_delay_ptr); - memset(mgp->rx_done.entry, 0, bytes); + memset(mgp->ss.rx_done.entry, 0, bytes); /* reset mcp/driver shared state back to 0 */ - mgp->tx.req = 0; - mgp->tx.done = 0; - mgp->tx.pkt_start = 0; - mgp->tx.pkt_done = 0; - mgp->rx_big.cnt = 0; - mgp->rx_small.cnt = 0; - mgp->rx_done.idx = 0; - mgp->rx_done.cnt = 0; + mgp->ss.tx.req = 0; + mgp->ss.tx.done = 0; + mgp->ss.tx.pkt_start = 0; + mgp->ss.tx.pkt_done = 0; + mgp->ss.rx_big.cnt = 0; + mgp->ss.rx_small.cnt = 0; + mgp->ss.rx_done.idx = 0; + mgp->ss.rx_done.cnt = 0; mgp->link_changes = 0; status = myri10ge_update_mac_address(mgp, mgp->dev->dev_addr); myri10ge_change_pause(mgp, mgp->pause); @@ -1020,9 +1061,10 @@ myri10ge_unmap_rx_page(struct pci_dev *pdev, * page into an skb */ static inline int -myri10ge_rx_done(struct myri10ge_priv *mgp, struct myri10ge_rx_buf *rx, +myri10ge_rx_done(struct myri10ge_slice_state *ss, struct myri10ge_rx_buf *rx, int bytes, int len, __wsum csum) { + struct myri10ge_priv *mgp = ss->mgp; struct sk_buff *skb; struct skb_frag_struct rx_frags[MYRI10GE_MAX_FRAGS_PER_FRAME]; int i, idx, hlen, remainder; @@ -1052,11 +1094,10 @@ myri10ge_rx_done(struct myri10ge_priv *mgp, struct myri10ge_rx_buf *rx, rx_frags[0].page_offset += MXGEFW_PAD; rx_frags[0].size -= MXGEFW_PAD; len -= MXGEFW_PAD; - lro_receive_frags(&mgp->rx_done.lro_mgr, rx_frags, + lro_receive_frags(&ss->rx_done.lro_mgr, rx_frags, len, len, - /* opaque, will come back in get_frag_header */ - (void *)(__force unsigned long)csum, - csum); + /* opaque, will come back in get_frag_header */ + (void *)(__force unsigned long)csum, csum); return 1; } @@ -1096,10 +1137,11 @@ myri10ge_rx_done(struct myri10ge_priv *mgp, struct myri10ge_rx_buf *rx, return 1; } -static inline void myri10ge_tx_done(struct myri10ge_priv *mgp, int mcp_index) +static inline void +myri10ge_tx_done(struct myri10ge_slice_state *ss, int mcp_index) { - struct pci_dev *pdev = mgp->pdev; - struct myri10ge_tx_buf *tx = &mgp->tx; + struct pci_dev *pdev = ss->mgp->pdev; + struct myri10ge_tx_buf *tx = &ss->tx; struct sk_buff *skb; int idx, len; @@ -1117,8 +1159,8 @@ static inline void myri10ge_tx_done(struct myri10ge_priv *mgp, int mcp_index) len = pci_unmap_len(&tx->info[idx], len); pci_unmap_len_set(&tx->info[idx], len, 0); if (skb) { - mgp->stats.tx_bytes += skb->len; - mgp->stats.tx_packets++; + ss->stats.tx_bytes += skb->len; + ss->stats.tx_packets++; dev_kfree_skb_irq(skb); if (len) pci_unmap_single(pdev, @@ -1134,16 +1176,18 @@ static inline void myri10ge_tx_done(struct myri10ge_priv *mgp, int mcp_index) } } /* start the queue if we've stopped it */ - if (netif_queue_stopped(mgp->dev) + if (netif_queue_stopped(ss->dev) && tx->req - tx->done < (tx->mask >> 1)) { - mgp->wake_queue++; - netif_wake_queue(mgp->dev); + tx->wake_queue++; + netif_wake_queue(ss->dev); } } -static inline int myri10ge_clean_rx_done(struct myri10ge_priv *mgp, int budget) +static inline int +myri10ge_clean_rx_done(struct myri10ge_slice_state *ss, int budget) { - struct myri10ge_rx_done *rx_done = &mgp->rx_done; + struct myri10ge_rx_done *rx_done = &ss->rx_done; + struct myri10ge_priv *mgp = ss->mgp; unsigned long rx_bytes = 0; unsigned long rx_packets = 0; unsigned long rx_ok; @@ -1159,40 +1203,40 @@ static inline int myri10ge_clean_rx_done(struct myri10ge_priv *mgp, int budget) rx_done->entry[idx].length = 0; checksum = csum_unfold(rx_done->entry[idx].checksum); if (length <= mgp->small_bytes) - rx_ok = myri10ge_rx_done(mgp, &mgp->rx_small, + rx_ok = myri10ge_rx_done(ss, &ss->rx_small, mgp->small_bytes, length, checksum); else - rx_ok = myri10ge_rx_done(mgp, &mgp->rx_big, + rx_ok = myri10ge_rx_done(ss, &ss->rx_big, mgp->big_bytes, length, checksum); rx_packets += rx_ok; rx_bytes += rx_ok * (unsigned long)length; cnt++; - idx = cnt & (myri10ge_max_intr_slots - 1); + idx = cnt & (mgp->max_intr_slots - 1); work_done++; } rx_done->idx = idx; rx_done->cnt = cnt; - mgp->stats.rx_packets += rx_packets; - mgp->stats.rx_bytes += rx_bytes; + ss->stats.rx_packets += rx_packets; + ss->stats.rx_bytes += rx_bytes; if (myri10ge_lro) lro_flush_all(&rx_done->lro_mgr); /* restock receive rings if needed */ - if (mgp->rx_small.fill_cnt - mgp->rx_small.cnt < myri10ge_fill_thresh) - myri10ge_alloc_rx_pages(mgp, &mgp->rx_small, + if (ss->rx_small.fill_cnt - ss->rx_small.cnt < myri10ge_fill_thresh) + myri10ge_alloc_rx_pages(mgp, &ss->rx_small, mgp->small_bytes + MXGEFW_PAD, 0); - if (mgp->rx_big.fill_cnt - mgp->rx_big.cnt < myri10ge_fill_thresh) - myri10ge_alloc_rx_pages(mgp, &mgp->rx_big, mgp->big_bytes, 0); + if (ss->rx_big.fill_cnt - ss->rx_big.cnt < myri10ge_fill_thresh) + myri10ge_alloc_rx_pages(mgp, &ss->rx_big, mgp->big_bytes, 0); return work_done; } static inline void myri10ge_check_statblock(struct myri10ge_priv *mgp) { - struct mcp_irq_data *stats = mgp->fw_stats; + struct mcp_irq_data *stats = mgp->ss.fw_stats; if (unlikely(stats->stats_updated)) { unsigned link_up = ntohl(stats->link_up); @@ -1219,9 +1263,9 @@ static inline void myri10ge_check_statblock(struct myri10ge_priv *mgp) } } if (mgp->rdma_tags_available != - ntohl(mgp->fw_stats->rdma_tags_available)) { + ntohl(stats->rdma_tags_available)) { mgp->rdma_tags_available = - ntohl(mgp->fw_stats->rdma_tags_available); + ntohl(stats->rdma_tags_available); printk(KERN_WARNING "myri10ge: %s: RDMA timed out! " "%d tags left\n", mgp->dev->name, mgp->rdma_tags_available); @@ -1234,26 +1278,27 @@ static inline void myri10ge_check_statblock(struct myri10ge_priv *mgp) static int myri10ge_poll(struct napi_struct *napi, int budget) { - struct myri10ge_priv *mgp = - container_of(napi, struct myri10ge_priv, napi); - struct net_device *netdev = mgp->dev; + struct myri10ge_slice_state *ss = + container_of(napi, struct myri10ge_slice_state, napi); + struct net_device *netdev = ss->mgp->dev; int work_done; /* process as many rx events as NAPI will allow */ - work_done = myri10ge_clean_rx_done(mgp, budget); + work_done = myri10ge_clean_rx_done(ss, budget); if (work_done < budget) { netif_rx_complete(netdev, napi); - put_be32(htonl(3), mgp->irq_claim); + put_be32(htonl(3), ss->irq_claim); } return work_done; } static irqreturn_t myri10ge_intr(int irq, void *arg) { - struct myri10ge_priv *mgp = arg; - struct mcp_irq_data *stats = mgp->fw_stats; - struct myri10ge_tx_buf *tx = &mgp->tx; + struct myri10ge_slice_state *ss = arg; + struct myri10ge_priv *mgp = ss->mgp; + struct mcp_irq_data *stats = ss->fw_stats; + struct myri10ge_tx_buf *tx = &ss->tx; u32 send_done_count; int i; @@ -1264,7 +1309,7 @@ static irqreturn_t myri10ge_intr(int irq, void *arg) /* low bit indicates receives are present, so schedule * napi poll handler */ if (stats->valid & 1) - netif_rx_schedule(mgp->dev, &mgp->napi); + netif_rx_schedule(ss->dev, &ss->napi); if (!mgp->msi_enabled) { put_be32(0, mgp->irq_deassert); @@ -1281,7 +1326,7 @@ static irqreturn_t myri10ge_intr(int irq, void *arg) /* check for transmit completes and receives */ send_done_count = ntohl(stats->send_done_count); if (send_done_count != tx->pkt_done) - myri10ge_tx_done(mgp, (int)send_done_count); + myri10ge_tx_done(ss, (int)send_done_count); if (unlikely(i > myri10ge_max_irq_loops)) { printk(KERN_WARNING "myri10ge: %s: irq stuck?\n", mgp->dev->name); @@ -1296,16 +1341,46 @@ static irqreturn_t myri10ge_intr(int irq, void *arg) myri10ge_check_statblock(mgp); - put_be32(htonl(3), mgp->irq_claim + 1); + put_be32(htonl(3), ss->irq_claim + 1); return (IRQ_HANDLED); } static int myri10ge_get_settings(struct net_device *netdev, struct ethtool_cmd *cmd) { + struct myri10ge_priv *mgp = netdev_priv(netdev); + char *ptr; + int i; + cmd->autoneg = AUTONEG_DISABLE; cmd->speed = SPEED_10000; cmd->duplex = DUPLEX_FULL; + + /* + * parse the product code to deterimine the interface type + * (CX4, XFP, Quad Ribbon Fiber) by looking at the character + * after the 3rd dash in the driver's cached copy of the + * EEPROM's product code string. + */ + ptr = mgp->product_code_string; + if (ptr == NULL) { + printk(KERN_ERR "myri10ge: %s: Missing product code\n", + netdev->name); + return 0; + } + for (i = 0; i < 3; i++, ptr++) { + ptr = strchr(ptr, '-'); + if (ptr == NULL) { + printk(KERN_ERR "myri10ge: %s: Invalid product " + "code %s\n", netdev->name, + mgp->product_code_string); + return 0; + } + } + if (*ptr == 'R' || *ptr == 'Q') { + /* We've found either an XFP or quad ribbon fiber */ + cmd->port = PORT_FIBRE; + } return 0; } @@ -1324,6 +1399,7 @@ static int myri10ge_get_coalesce(struct net_device *netdev, struct ethtool_coalesce *coal) { struct myri10ge_priv *mgp = netdev_priv(netdev); + coal->rx_coalesce_usecs = mgp->intr_coal_delay; return 0; } @@ -1370,10 +1446,10 @@ myri10ge_get_ringparam(struct net_device *netdev, { struct myri10ge_priv *mgp = netdev_priv(netdev); - ring->rx_mini_max_pending = mgp->rx_small.mask + 1; - ring->rx_max_pending = mgp->rx_big.mask + 1; + ring->rx_mini_max_pending = mgp->ss.rx_small.mask + 1; + ring->rx_max_pending = mgp->ss.rx_big.mask + 1; ring->rx_jumbo_max_pending = 0; - ring->tx_max_pending = mgp->rx_small.mask + 1; + ring->tx_max_pending = mgp->ss.rx_small.mask + 1; ring->rx_mini_pending = ring->rx_mini_max_pending; ring->rx_pending = ring->rx_max_pending; ring->rx_jumbo_pending = ring->rx_jumbo_max_pending; @@ -1383,6 +1459,7 @@ myri10ge_get_ringparam(struct net_device *netdev, static u32 myri10ge_get_rx_csum(struct net_device *netdev) { struct myri10ge_priv *mgp = netdev_priv(netdev); + if (mgp->csum_flag) return 1; else @@ -1392,6 +1469,7 @@ static u32 myri10ge_get_rx_csum(struct net_device *netdev) static int myri10ge_set_rx_csum(struct net_device *netdev, u32 csum_enabled) { struct myri10ge_priv *mgp = netdev_priv(netdev); + if (csum_enabled) mgp->csum_flag = MXGEFW_FLAGS_CKSUM; else @@ -1411,7 +1489,7 @@ static int myri10ge_set_tso(struct net_device *netdev, u32 tso_enabled) return 0; } -static const char myri10ge_gstrings_stats[][ETH_GSTRING_LEN] = { +static const char myri10ge_gstrings_main_stats[][ETH_GSTRING_LEN] = { "rx_packets", "tx_packets", "rx_bytes", "tx_bytes", "rx_errors", "tx_errors", "rx_dropped", "tx_dropped", "multicast", "collisions", "rx_length_errors", "rx_over_errors", "rx_crc_errors", @@ -1421,28 +1499,39 @@ static const char myri10ge_gstrings_stats[][ETH_GSTRING_LEN] = { /* device-specific stats */ "tx_boundary", "WC", "irq", "MSI", "read_dma_bw_MBs", "write_dma_bw_MBs", "read_write_dma_bw_MBs", - "serial_number", "tx_pkt_start", "tx_pkt_done", - "tx_req", "tx_done", "rx_small_cnt", "rx_big_cnt", - "wake_queue", "stop_queue", "watchdog_resets", "tx_linearized", + "serial_number", "watchdog_resets", "link_changes", "link_up", "dropped_link_overflow", "dropped_link_error_or_filtered", "dropped_pause", "dropped_bad_phy", "dropped_bad_crc32", "dropped_unicast_filtered", "dropped_multicast_filtered", "dropped_runt", "dropped_overrun", "dropped_no_small_buffer", - "dropped_no_big_buffer", "LRO aggregated", "LRO flushed", + "dropped_no_big_buffer" +}; + +static const char myri10ge_gstrings_slice_stats[][ETH_GSTRING_LEN] = { + "----------- slice ---------", + "tx_pkt_start", "tx_pkt_done", "tx_req", "tx_done", + "rx_small_cnt", "rx_big_cnt", + "wake_queue", "stop_queue", "tx_linearized", "LRO aggregated", + "LRO flushed", "LRO avg aggr", "LRO no_desc" }; #define MYRI10GE_NET_STATS_LEN 21 -#define MYRI10GE_STATS_LEN ARRAY_SIZE(myri10ge_gstrings_stats) +#define MYRI10GE_MAIN_STATS_LEN ARRAY_SIZE(myri10ge_gstrings_main_stats) +#define MYRI10GE_SLICE_STATS_LEN ARRAY_SIZE(myri10ge_gstrings_slice_stats) static void myri10ge_get_strings(struct net_device *netdev, u32 stringset, u8 * data) { switch (stringset) { case ETH_SS_STATS: - memcpy(data, *myri10ge_gstrings_stats, - sizeof(myri10ge_gstrings_stats)); + memcpy(data, *myri10ge_gstrings_main_stats, + sizeof(myri10ge_gstrings_main_stats)); + data += sizeof(myri10ge_gstrings_main_stats); + memcpy(data, *myri10ge_gstrings_slice_stats, + sizeof(myri10ge_gstrings_slice_stats)); + data += sizeof(myri10ge_gstrings_slice_stats); break; } } @@ -1451,7 +1540,7 @@ static int myri10ge_get_sset_count(struct net_device *netdev, int sset) { switch (sset) { case ETH_SS_STATS: - return MYRI10GE_STATS_LEN; + return MYRI10GE_MAIN_STATS_LEN + MYRI10GE_SLICE_STATS_LEN; default: return -EOPNOTSUPP; } @@ -1462,12 +1551,13 @@ myri10ge_get_ethtool_stats(struct net_device *netdev, struct ethtool_stats *stats, u64 * data) { struct myri10ge_priv *mgp = netdev_priv(netdev); + struct myri10ge_slice_state *ss; int i; for (i = 0; i < MYRI10GE_NET_STATS_LEN; i++) data[i] = ((unsigned long *)&mgp->stats)[i]; - data[i++] = (unsigned int)mgp->tx.boundary; + data[i++] = (unsigned int)mgp->tx_boundary; data[i++] = (unsigned int)mgp->wc_enabled; data[i++] = (unsigned int)mgp->pdev->irq; data[i++] = (unsigned int)mgp->msi_enabled; @@ -1475,40 +1565,44 @@ myri10ge_get_ethtool_stats(struct net_device *netdev, data[i++] = (unsigned int)mgp->write_dma; data[i++] = (unsigned int)mgp->read_write_dma; data[i++] = (unsigned int)mgp->serial_number; - data[i++] = (unsigned int)mgp->tx.pkt_start; - data[i++] = (unsigned int)mgp->tx.pkt_done; - data[i++] = (unsigned int)mgp->tx.req; - data[i++] = (unsigned int)mgp->tx.done; - data[i++] = (unsigned int)mgp->rx_small.cnt; - data[i++] = (unsigned int)mgp->rx_big.cnt; - data[i++] = (unsigned int)mgp->wake_queue; - data[i++] = (unsigned int)mgp->stop_queue; data[i++] = (unsigned int)mgp->watchdog_resets; - data[i++] = (unsigned int)mgp->tx_linearized; data[i++] = (unsigned int)mgp->link_changes; - data[i++] = (unsigned int)ntohl(mgp->fw_stats->link_up); - data[i++] = (unsigned int)ntohl(mgp->fw_stats->dropped_link_overflow); - data[i++] = - (unsigned int)ntohl(mgp->fw_stats->dropped_link_error_or_filtered); - data[i++] = (unsigned int)ntohl(mgp->fw_stats->dropped_pause); - data[i++] = (unsigned int)ntohl(mgp->fw_stats->dropped_bad_phy); - data[i++] = (unsigned int)ntohl(mgp->fw_stats->dropped_bad_crc32); + + /* firmware stats are useful only in the first slice */ + ss = &mgp->ss; + data[i++] = (unsigned int)ntohl(ss->fw_stats->link_up); + data[i++] = (unsigned int)ntohl(ss->fw_stats->dropped_link_overflow); data[i++] = - (unsigned int)ntohl(mgp->fw_stats->dropped_unicast_filtered); + (unsigned int)ntohl(ss->fw_stats->dropped_link_error_or_filtered); + data[i++] = (unsigned int)ntohl(ss->fw_stats->dropped_pause); + data[i++] = (unsigned int)ntohl(ss->fw_stats->dropped_bad_phy); + data[i++] = (unsigned int)ntohl(ss->fw_stats->dropped_bad_crc32); + data[i++] = (unsigned int)ntohl(ss->fw_stats->dropped_unicast_filtered); data[i++] = - (unsigned int)ntohl(mgp->fw_stats->dropped_multicast_filtered); - data[i++] = (unsigned int)ntohl(mgp->fw_stats->dropped_runt); - data[i++] = (unsigned int)ntohl(mgp->fw_stats->dropped_overrun); - data[i++] = (unsigned int)ntohl(mgp->fw_stats->dropped_no_small_buffer); - data[i++] = (unsigned int)ntohl(mgp->fw_stats->dropped_no_big_buffer); - data[i++] = mgp->rx_done.lro_mgr.stats.aggregated; - data[i++] = mgp->rx_done.lro_mgr.stats.flushed; - if (mgp->rx_done.lro_mgr.stats.flushed) - data[i++] = mgp->rx_done.lro_mgr.stats.aggregated / - mgp->rx_done.lro_mgr.stats.flushed; + (unsigned int)ntohl(ss->fw_stats->dropped_multicast_filtered); + data[i++] = (unsigned int)ntohl(ss->fw_stats->dropped_runt); + data[i++] = (unsigned int)ntohl(ss->fw_stats->dropped_overrun); + data[i++] = (unsigned int)ntohl(ss->fw_stats->dropped_no_small_buffer); + data[i++] = (unsigned int)ntohl(ss->fw_stats->dropped_no_big_buffer); + + data[i++] = 0; + data[i++] = (unsigned int)ss->tx.pkt_start; + data[i++] = (unsigned int)ss->tx.pkt_done; + data[i++] = (unsigned int)ss->tx.req; + data[i++] = (unsigned int)ss->tx.done; + data[i++] = (unsigned int)ss->rx_small.cnt; + data[i++] = (unsigned int)ss->rx_big.cnt; + data[i++] = (unsigned int)ss->tx.wake_queue; + data[i++] = (unsigned int)ss->tx.stop_queue; + data[i++] = (unsigned int)ss->tx.linearized; + data[i++] = ss->rx_done.lro_mgr.stats.aggregated; + data[i++] = ss->rx_done.lro_mgr.stats.flushed; + if (ss->rx_done.lro_mgr.stats.flushed) + data[i++] = ss->rx_done.lro_mgr.stats.aggregated / + ss->rx_done.lro_mgr.stats.flushed; else data[i++] = 0; - data[i++] = mgp->rx_done.lro_mgr.stats.no_desc; + data[i++] = ss->rx_done.lro_mgr.stats.no_desc; } static void myri10ge_set_msglevel(struct net_device *netdev, u32 value) @@ -1544,19 +1638,17 @@ static const struct ethtool_ops myri10ge_ethtool_ops = { .get_msglevel = myri10ge_get_msglevel }; -static int myri10ge_allocate_rings(struct net_device *dev) +static int myri10ge_allocate_rings(struct myri10ge_slice_state *ss) { - struct myri10ge_priv *mgp; + struct myri10ge_priv *mgp = ss->mgp; struct myri10ge_cmd cmd; + struct net_device *dev = mgp->dev; int tx_ring_size, rx_ring_size; int tx_ring_entries, rx_ring_entries; int i, status; size_t bytes; - mgp = netdev_priv(dev); - /* get ring sizes */ - status = myri10ge_send_cmd(mgp, MXGEFW_CMD_GET_SEND_RING_SIZE, &cmd, 0); tx_ring_size = cmd.data0; status |= myri10ge_send_cmd(mgp, MXGEFW_CMD_GET_RX_RING_SIZE, &cmd, 0); @@ -1566,144 +1658,142 @@ static int myri10ge_allocate_rings(struct net_device *dev) tx_ring_entries = tx_ring_size / sizeof(struct mcp_kreq_ether_send); rx_ring_entries = rx_ring_size / sizeof(struct mcp_dma_addr); - mgp->tx.mask = tx_ring_entries - 1; - mgp->rx_small.mask = mgp->rx_big.mask = rx_ring_entries - 1; + ss->tx.mask = tx_ring_entries - 1; + ss->rx_small.mask = ss->rx_big.mask = rx_ring_entries - 1; status = -ENOMEM; /* allocate the host shadow rings */ bytes = 8 + (MYRI10GE_MAX_SEND_DESC_TSO + 4) - * sizeof(*mgp->tx.req_list); - mgp->tx.req_bytes = kzalloc(bytes, GFP_KERNEL); - if (mgp->tx.req_bytes == NULL) + * sizeof(*ss->tx.req_list); + ss->tx.req_bytes = kzalloc(bytes, GFP_KERNEL); + if (ss->tx.req_bytes == NULL) goto abort_with_nothing; /* ensure req_list entries are aligned to 8 bytes */ - mgp->tx.req_list = (struct mcp_kreq_ether_send *) - ALIGN((unsigned long)mgp->tx.req_bytes, 8); + ss->tx.req_list = (struct mcp_kreq_ether_send *) + ALIGN((unsigned long)ss->tx.req_bytes, 8); - bytes = rx_ring_entries * sizeof(*mgp->rx_small.shadow); - mgp->rx_small.shadow = kzalloc(bytes, GFP_KERNEL); - if (mgp->rx_small.shadow == NULL) + bytes = rx_ring_entries * sizeof(*ss->rx_small.shadow); + ss->rx_small.shadow = kzalloc(bytes, GFP_KERNEL); + if (ss->rx_small.shadow == NULL) goto abort_with_tx_req_bytes; - bytes = rx_ring_entries * sizeof(*mgp->rx_big.shadow); - mgp->rx_big.shadow = kzalloc(bytes, GFP_KERNEL); - if (mgp->rx_big.shadow == NULL) + bytes = rx_ring_entries * sizeof(*ss->rx_big.shadow); + ss->rx_big.shadow = kzalloc(bytes, GFP_KERNEL); + if (ss->rx_big.shadow == NULL) goto abort_with_rx_small_shadow; /* allocate the host info rings */ - bytes = tx_ring_entries * sizeof(*mgp->tx.info); - mgp->tx.info = kzalloc(bytes, GFP_KERNEL); - if (mgp->tx.info == NULL) + bytes = tx_ring_entries * sizeof(*ss->tx.info); + ss->tx.info = kzalloc(bytes, GFP_KERNEL); + if (ss->tx.info == NULL) goto abort_with_rx_big_shadow; - bytes = rx_ring_entries * sizeof(*mgp->rx_small.info); - mgp->rx_small.info = kzalloc(bytes, GFP_KERNEL); - if (mgp->rx_small.info == NULL) + bytes = rx_ring_entries * sizeof(*ss->rx_small.info); + ss->rx_small.info = kzalloc(bytes, GFP_KERNEL); + if (ss->rx_small.info == NULL) goto abort_with_tx_info; - bytes = rx_ring_entries * sizeof(*mgp->rx_big.info); - mgp->rx_big.info = kzalloc(bytes, GFP_KERNEL); - if (mgp->rx_big.info == NULL) + bytes = rx_ring_entries * sizeof(*ss->rx_big.info); + ss->rx_big.info = kzalloc(bytes, GFP_KERNEL); + if (ss->rx_big.info == NULL) goto abort_with_rx_small_info; /* Fill the receive rings */ - mgp->rx_big.cnt = 0; - mgp->rx_small.cnt = 0; - mgp->rx_big.fill_cnt = 0; - mgp->rx_small.fill_cnt = 0; - mgp->rx_small.page_offset = MYRI10GE_ALLOC_SIZE; - mgp->rx_big.page_offset = MYRI10GE_ALLOC_SIZE; - mgp->rx_small.watchdog_needed = 0; - mgp->rx_big.watchdog_needed = 0; - myri10ge_alloc_rx_pages(mgp, &mgp->rx_small, + ss->rx_big.cnt = 0; + ss->rx_small.cnt = 0; + ss->rx_big.fill_cnt = 0; + ss->rx_small.fill_cnt = 0; + ss->rx_small.page_offset = MYRI10GE_ALLOC_SIZE; + ss->rx_big.page_offset = MYRI10GE_ALLOC_SIZE; + ss->rx_small.watchdog_needed = 0; + ss->rx_big.watchdog_needed = 0; + myri10ge_alloc_rx_pages(mgp, &ss->rx_small, mgp->small_bytes + MXGEFW_PAD, 0); - if (mgp->rx_small.fill_cnt < mgp->rx_small.mask + 1) { + if (ss->rx_small.fill_cnt < ss->rx_small.mask + 1) { printk(KERN_ERR "myri10ge: %s: alloced only %d small bufs\n", - dev->name, mgp->rx_small.fill_cnt); + dev->name, ss->rx_small.fill_cnt); goto abort_with_rx_small_ring; } - myri10ge_alloc_rx_pages(mgp, &mgp->rx_big, mgp->big_bytes, 0); - if (mgp->rx_big.fill_cnt < mgp->rx_big.mask + 1) { + myri10ge_alloc_rx_pages(mgp, &ss->rx_big, mgp->big_bytes, 0); + if (ss->rx_big.fill_cnt < ss->rx_big.mask + 1) { printk(KERN_ERR "myri10ge: %s: alloced only %d big bufs\n", - dev->name, mgp->rx_big.fill_cnt); + dev->name, ss->rx_big.fill_cnt); goto abort_with_rx_big_ring; } return 0; abort_with_rx_big_ring: - for (i = mgp->rx_big.cnt; i < mgp->rx_big.fill_cnt; i++) { - int idx = i & mgp->rx_big.mask; - myri10ge_unmap_rx_page(mgp->pdev, &mgp->rx_big.info[idx], + for (i = ss->rx_big.cnt; i < ss->rx_big.fill_cnt; i++) { + int idx = i & ss->rx_big.mask; + myri10ge_unmap_rx_page(mgp->pdev, &ss->rx_big.info[idx], mgp->big_bytes); - put_page(mgp->rx_big.info[idx].page); + put_page(ss->rx_big.info[idx].page); } abort_with_rx_small_ring: - for (i = mgp->rx_small.cnt; i < mgp->rx_small.fill_cnt; i++) { - int idx = i & mgp->rx_small.mask; - myri10ge_unmap_rx_page(mgp->pdev, &mgp->rx_small.info[idx], + for (i = ss->rx_small.cnt; i < ss->rx_small.fill_cnt; i++) { + int idx = i & ss->rx_small.mask; + myri10ge_unmap_rx_page(mgp->pdev, &ss->rx_small.info[idx], mgp->small_bytes + MXGEFW_PAD); - put_page(mgp->rx_small.info[idx].page); + put_page(ss->rx_small.info[idx].page); } - kfree(mgp->rx_big.info); + kfree(ss->rx_big.info); abort_with_rx_small_info: - kfree(mgp->rx_small.info); + kfree(ss->rx_small.info); abort_with_tx_info: - kfree(mgp->tx.info); + kfree(ss->tx.info); abort_with_rx_big_shadow: - kfree(mgp->rx_big.shadow); + kfree(ss->rx_big.shadow); abort_with_rx_small_shadow: - kfree(mgp->rx_small.shadow); + kfree(ss->rx_small.shadow); abort_with_tx_req_bytes: - kfree(mgp->tx.req_bytes); - mgp->tx.req_bytes = NULL; - mgp->tx.req_list = NULL; + kfree(ss->tx.req_bytes); + ss->tx.req_bytes = NULL; + ss->tx.req_list = NULL; abort_with_nothing: return status; } -static void myri10ge_free_rings(struct net_device *dev) +static void myri10ge_free_rings(struct myri10ge_slice_state *ss) { - struct myri10ge_priv *mgp; + struct myri10ge_priv *mgp = ss->mgp; struct sk_buff *skb; struct myri10ge_tx_buf *tx; int i, len, idx; - mgp = netdev_priv(dev); - - for (i = mgp->rx_big.cnt; i < mgp->rx_big.fill_cnt; i++) { - idx = i & mgp->rx_big.mask; - if (i == mgp->rx_big.fill_cnt - 1) - mgp->rx_big.info[idx].page_offset = MYRI10GE_ALLOC_SIZE; - myri10ge_unmap_rx_page(mgp->pdev, &mgp->rx_big.info[idx], + for (i = ss->rx_big.cnt; i < ss->rx_big.fill_cnt; i++) { + idx = i & ss->rx_big.mask; + if (i == ss->rx_big.fill_cnt - 1) + ss->rx_big.info[idx].page_offset = MYRI10GE_ALLOC_SIZE; + myri10ge_unmap_rx_page(mgp->pdev, &ss->rx_big.info[idx], mgp->big_bytes); - put_page(mgp->rx_big.info[idx].page); + put_page(ss->rx_big.info[idx].page); } - for (i = mgp->rx_small.cnt; i < mgp->rx_small.fill_cnt; i++) { - idx = i & mgp->rx_small.mask; - if (i == mgp->rx_small.fill_cnt - 1) - mgp->rx_small.info[idx].page_offset = + for (i = ss->rx_small.cnt; i < ss->rx_small.fill_cnt; i++) { + idx = i & ss->rx_small.mask; + if (i == ss->rx_small.fill_cnt - 1) + ss->rx_small.info[idx].page_offset = MYRI10GE_ALLOC_SIZE; - myri10ge_unmap_rx_page(mgp->pdev, &mgp->rx_small.info[idx], + myri10ge_unmap_rx_page(mgp->pdev, &ss->rx_small.info[idx], mgp->small_bytes + MXGEFW_PAD); - put_page(mgp->rx_small.info[idx].page); + put_page(ss->rx_small.info[idx].page); } - tx = &mgp->tx; + tx = &ss->tx; while (tx->done != tx->req) { idx = tx->done & tx->mask; skb = tx->info[idx].skb; @@ -1714,7 +1804,7 @@ static void myri10ge_free_rings(struct net_device *dev) len = pci_unmap_len(&tx->info[idx], len); pci_unmap_len_set(&tx->info[idx], len, 0); if (skb) { - mgp->stats.tx_dropped++; + ss->stats.tx_dropped++; dev_kfree_skb_any(skb); if (len) pci_unmap_single(mgp->pdev, @@ -1729,19 +1819,19 @@ static void myri10ge_free_rings(struct net_device *dev) PCI_DMA_TODEVICE); } } - kfree(mgp->rx_big.info); + kfree(ss->rx_big.info); - kfree(mgp->rx_small.info); + kfree(ss->rx_small.info); - kfree(mgp->tx.info); + kfree(ss->tx.info); - kfree(mgp->rx_big.shadow); + kfree(ss->rx_big.shadow); - kfree(mgp->rx_small.shadow); + kfree(ss->rx_small.shadow); - kfree(mgp->tx.req_bytes); - mgp->tx.req_bytes = NULL; - mgp->tx.req_list = NULL; + kfree(ss->tx.req_bytes); + ss->tx.req_bytes = NULL; + ss->tx.req_list = NULL; } static int myri10ge_request_irq(struct myri10ge_priv *mgp) @@ -1840,13 +1930,11 @@ myri10ge_get_frag_header(struct skb_frag_struct *frag, void **mac_hdr, static int myri10ge_open(struct net_device *dev) { - struct myri10ge_priv *mgp; + struct myri10ge_priv *mgp = netdev_priv(dev); struct myri10ge_cmd cmd; struct net_lro_mgr *lro_mgr; int status, big_pow2; - mgp = netdev_priv(dev); - if (mgp->running != MYRI10GE_ETH_STOPPED) return -EBUSY; @@ -1883,16 +1971,16 @@ static int myri10ge_open(struct net_device *dev) /* get the lanai pointers to the send and receive rings */ status |= myri10ge_send_cmd(mgp, MXGEFW_CMD_GET_SEND_OFFSET, &cmd, 0); - mgp->tx.lanai = + mgp->ss.tx.lanai = (struct mcp_kreq_ether_send __iomem *)(mgp->sram + cmd.data0); status |= myri10ge_send_cmd(mgp, MXGEFW_CMD_GET_SMALL_RX_OFFSET, &cmd, 0); - mgp->rx_small.lanai = + mgp->ss.rx_small.lanai = (struct mcp_kreq_ether_recv __iomem *)(mgp->sram + cmd.data0); status |= myri10ge_send_cmd(mgp, MXGEFW_CMD_GET_BIG_RX_OFFSET, &cmd, 0); - mgp->rx_big.lanai = + mgp->ss.rx_big.lanai = (struct mcp_kreq_ether_recv __iomem *)(mgp->sram + cmd.data0); if (status != 0) { @@ -1904,15 +1992,15 @@ static int myri10ge_open(struct net_device *dev) } if (myri10ge_wcfifo && mgp->wc_enabled) { - mgp->tx.wc_fifo = (u8 __iomem *) mgp->sram + MXGEFW_ETH_SEND_4; - mgp->rx_small.wc_fifo = + mgp->ss.tx.wc_fifo = (u8 __iomem *) mgp->sram + MXGEFW_ETH_SEND_4; + mgp->ss.rx_small.wc_fifo = (u8 __iomem *) mgp->sram + MXGEFW_ETH_RECV_SMALL; - mgp->rx_big.wc_fifo = + mgp->ss.rx_big.wc_fifo = (u8 __iomem *) mgp->sram + MXGEFW_ETH_RECV_BIG; } else { - mgp->tx.wc_fifo = NULL; - mgp->rx_small.wc_fifo = NULL; - mgp->rx_big.wc_fifo = NULL; + mgp->ss.tx.wc_fifo = NULL; + mgp->ss.rx_small.wc_fifo = NULL; + mgp->ss.rx_big.wc_fifo = NULL; } /* Firmware needs the big buff size as a power of 2. Lie and @@ -1929,7 +2017,7 @@ static int myri10ge_open(struct net_device *dev) mgp->big_bytes = big_pow2; } - status = myri10ge_allocate_rings(dev); + status = myri10ge_allocate_rings(&mgp->ss); if (status != 0) goto abort_with_irq; @@ -1948,12 +2036,12 @@ static int myri10ge_open(struct net_device *dev) goto abort_with_rings; } - cmd.data0 = MYRI10GE_LOWPART_TO_U32(mgp->fw_stats_bus); - cmd.data1 = MYRI10GE_HIGHPART_TO_U32(mgp->fw_stats_bus); + cmd.data0 = MYRI10GE_LOWPART_TO_U32(mgp->ss.fw_stats_bus); + cmd.data1 = MYRI10GE_HIGHPART_TO_U32(mgp->ss.fw_stats_bus); cmd.data2 = sizeof(struct mcp_irq_data); status = myri10ge_send_cmd(mgp, MXGEFW_CMD_SET_STATS_DMA_V2, &cmd, 0); if (status == -ENOSYS) { - dma_addr_t bus = mgp->fw_stats_bus; + dma_addr_t bus = mgp->ss.fw_stats_bus; bus += offsetof(struct mcp_irq_data, send_done_count); cmd.data0 = MYRI10GE_LOWPART_TO_U32(bus); cmd.data1 = MYRI10GE_HIGHPART_TO_U32(bus); @@ -1974,20 +2062,20 @@ static int myri10ge_open(struct net_device *dev) mgp->link_state = ~0U; mgp->rdma_tags_available = 15; - lro_mgr = &mgp->rx_done.lro_mgr; + lro_mgr = &mgp->ss.rx_done.lro_mgr; lro_mgr->dev = dev; lro_mgr->features = LRO_F_NAPI; lro_mgr->ip_summed = CHECKSUM_COMPLETE; lro_mgr->ip_summed_aggr = CHECKSUM_UNNECESSARY; lro_mgr->max_desc = MYRI10GE_MAX_LRO_DESCRIPTORS; - lro_mgr->lro_arr = mgp->rx_done.lro_desc; + lro_mgr->lro_arr = mgp->ss.rx_done.lro_desc; lro_mgr->get_frag_header = myri10ge_get_frag_header; lro_mgr->max_aggr = myri10ge_lro_max_pkts; lro_mgr->frag_align_pad = 2; if (lro_mgr->max_aggr > MAX_SKB_FRAGS) lro_mgr->max_aggr = MAX_SKB_FRAGS; - napi_enable(&mgp->napi); /* must happen prior to any irq */ + napi_enable(&mgp->ss.napi); /* must happen prior to any irq */ status = myri10ge_send_cmd(mgp, MXGEFW_CMD_ETHERNET_UP, &cmd, 0); if (status) { @@ -1996,8 +2084,8 @@ static int myri10ge_open(struct net_device *dev) goto abort_with_rings; } - mgp->wake_queue = 0; - mgp->stop_queue = 0; + mgp->ss.tx.wake_queue = 0; + mgp->ss.tx.stop_queue = 0; mgp->running = MYRI10GE_ETH_RUNNING; mgp->watchdog_timer.expires = jiffies + myri10ge_watchdog_timeout * HZ; add_timer(&mgp->watchdog_timer); @@ -2005,7 +2093,7 @@ static int myri10ge_open(struct net_device *dev) return 0; abort_with_rings: - myri10ge_free_rings(dev); + myri10ge_free_rings(&mgp->ss); abort_with_irq: myri10ge_free_irq(mgp); @@ -2017,21 +2105,19 @@ abort_with_nothing: static int myri10ge_close(struct net_device *dev) { - struct myri10ge_priv *mgp; + struct myri10ge_priv *mgp = netdev_priv(dev); struct myri10ge_cmd cmd; int status, old_down_cnt; - mgp = netdev_priv(dev); - if (mgp->running != MYRI10GE_ETH_RUNNING) return 0; - if (mgp->tx.req_bytes == NULL) + if (mgp->ss.tx.req_bytes == NULL) return 0; del_timer_sync(&mgp->watchdog_timer); mgp->running = MYRI10GE_ETH_STOPPING; - napi_disable(&mgp->napi); + napi_disable(&mgp->ss.napi); netif_carrier_off(dev); netif_stop_queue(dev); old_down_cnt = mgp->down_cnt; @@ -2047,7 +2133,7 @@ static int myri10ge_close(struct net_device *dev) netif_tx_disable(dev); myri10ge_free_irq(mgp); - myri10ge_free_rings(dev); + myri10ge_free_rings(&mgp->ss); mgp->running = MYRI10GE_ETH_STOPPED; return 0; @@ -2143,7 +2229,7 @@ myri10ge_submit_req_wc(struct myri10ge_tx_buf *tx, /* * Transmit a packet. We need to split the packet so that a single - * segment does not cross myri10ge->tx.boundary, so this makes segment + * segment does not cross myri10ge->tx_boundary, so this makes segment * counting tricky. So rather than try to count segments up front, we * just give up if there are too few segments to hold a reasonably * fragmented packet currently available. If we run @@ -2154,8 +2240,9 @@ myri10ge_submit_req_wc(struct myri10ge_tx_buf *tx, static int myri10ge_xmit(struct sk_buff *skb, struct net_device *dev) { struct myri10ge_priv *mgp = netdev_priv(dev); + struct myri10ge_slice_state *ss; struct mcp_kreq_ether_send *req; - struct myri10ge_tx_buf *tx = &mgp->tx; + struct myri10ge_tx_buf *tx; struct skb_frag_struct *frag; dma_addr_t bus; u32 low; @@ -2166,6 +2253,9 @@ static int myri10ge_xmit(struct sk_buff *skb, struct net_device *dev) int cum_len, seglen, boundary, rdma_count; u8 flags, odd_flag; + /* always transmit through slot 0 */ + ss = &mgp->ss; + tx = &ss->tx; again: req = tx->req_list; avail = tx->mask - 1 - (tx->req - tx->done); @@ -2180,7 +2270,7 @@ again: if ((unlikely(avail < max_segments))) { /* we are out of transmit resources */ - mgp->stop_queue++; + tx->stop_queue++; netif_stop_queue(dev); return 1; } @@ -2242,7 +2332,7 @@ again: if (skb_padto(skb, ETH_ZLEN)) { /* The packet is gone, so we must * return 0 */ - mgp->stats.tx_dropped += 1; + ss->stats.tx_dropped += 1; return 0; } /* adjust the len to account for the zero pad @@ -2284,7 +2374,7 @@ again: while (1) { /* Break the SKB or Fragment up into pieces which - * do not cross mgp->tx.boundary */ + * do not cross mgp->tx_boundary */ low = MYRI10GE_LOWPART_TO_U32(bus); high_swapped = htonl(MYRI10GE_HIGHPART_TO_U32(bus)); while (len) { @@ -2294,7 +2384,8 @@ again: if (unlikely(count == max_segments)) goto abort_linearize; - boundary = (low + tx->boundary) & ~(tx->boundary - 1); + boundary = + (low + mgp->tx_boundary) & ~(mgp->tx_boundary - 1); seglen = boundary - low; if (seglen > len) seglen = len; @@ -2378,7 +2469,7 @@ again: myri10ge_submit_req_wc(tx, tx->req_list, count); tx->pkt_start++; if ((avail - count) < MXGEFW_MAX_SEND_DESC) { - mgp->stop_queue++; + tx->stop_queue++; netif_stop_queue(dev); } dev->trans_start = jiffies; @@ -2420,12 +2511,12 @@ abort_linearize: if (skb_linearize(skb)) goto drop; - mgp->tx_linearized++; + tx->linearized++; goto again; drop: dev_kfree_skb_any(skb); - mgp->stats.tx_dropped += 1; + ss->stats.tx_dropped += 1; return 0; } @@ -2433,7 +2524,7 @@ drop: static int myri10ge_sw_tso(struct sk_buff *skb, struct net_device *dev) { struct sk_buff *segs, *curr; - struct myri10ge_priv *mgp = dev->priv; + struct myri10ge_priv *mgp = netdev_priv(dev); int status; segs = skb_gso_segment(skb, dev->features & ~NETIF_F_TSO6); @@ -2473,14 +2564,13 @@ static struct net_device_stats *myri10ge_get_stats(struct net_device *dev) static void myri10ge_set_multicast_list(struct net_device *dev) { + struct myri10ge_priv *mgp = netdev_priv(dev); struct myri10ge_cmd cmd; - struct myri10ge_priv *mgp; struct dev_mc_list *mc_list; __be32 data[2] = { 0, 0 }; int err; DECLARE_MAC_BUF(mac); - mgp = netdev_priv(dev); /* can be called from atomic contexts, * pass 1 to force atomicity in myri10ge_send_cmd() */ myri10ge_change_promisc(mgp, dev->flags & IFF_PROMISC, 1); @@ -2616,13 +2706,14 @@ static void myri10ge_enable_ecrc(struct myri10ge_priv *mgp) ext_type = (val & PCI_EXP_FLAGS_TYPE) >> 4; if (ext_type != PCI_EXP_TYPE_ROOT_PORT) { if (myri10ge_ecrc_enable > 1) { - struct pci_dev *old_bridge = bridge; + struct pci_dev *prev_bridge, *old_bridge = bridge; /* Walk the hierarchy up to the root port * where ECRC has to be enabled */ do { + prev_bridge = bridge; bridge = bridge->bus->self; - if (!bridge) { + if (!bridge || prev_bridge == bridge) { dev_err(dev, "Failed to find root port" " to force ECRC\n"); @@ -2681,9 +2772,9 @@ static void myri10ge_enable_ecrc(struct myri10ge_priv *mgp) * already been enabled, then it must use a firmware image which works * around unaligned completion packets (myri10ge_ethp_z8e.dat), and it * should also ensure that it never gives the device a Read-DMA which is - * larger than 2KB by setting the tx.boundary to 2KB. If ECRC is + * larger than 2KB by setting the tx_boundary to 2KB. If ECRC is * enabled, then the driver should use the aligned (myri10ge_eth_z8e.dat) - * firmware image, and set tx.boundary to 4KB. + * firmware image, and set tx_boundary to 4KB. */ static void myri10ge_firmware_probe(struct myri10ge_priv *mgp) @@ -2692,7 +2783,7 @@ static void myri10ge_firmware_probe(struct myri10ge_priv *mgp) struct device *dev = &pdev->dev; int status; - mgp->tx.boundary = 4096; + mgp->tx_boundary = 4096; /* * Verify the max read request size was set to 4KB * before trying the test with 4KB. @@ -2704,7 +2795,7 @@ static void myri10ge_firmware_probe(struct myri10ge_priv *mgp) } if (status != 4096) { dev_warn(dev, "Max Read Request size != 4096 (%d)\n", status); - mgp->tx.boundary = 2048; + mgp->tx_boundary = 2048; } /* * load the optimized firmware (which assumes aligned PCIe @@ -2737,7 +2828,7 @@ static void myri10ge_firmware_probe(struct myri10ge_priv *mgp) "Please install up to date fw\n"); abort: /* fall back to using the unaligned firmware */ - mgp->tx.boundary = 2048; + mgp->tx_boundary = 2048; mgp->fw_name = myri10ge_fw_unaligned; } @@ -2758,7 +2849,7 @@ static void myri10ge_select_firmware(struct myri10ge_priv *mgp) if (link_width < 8) { dev_info(&mgp->pdev->dev, "PCIE x%d Link\n", link_width); - mgp->tx.boundary = 4096; + mgp->tx_boundary = 4096; mgp->fw_name = myri10ge_fw_aligned; } else { myri10ge_firmware_probe(mgp); @@ -2767,12 +2858,12 @@ static void myri10ge_select_firmware(struct myri10ge_priv *mgp) if (myri10ge_force_firmware == 1) { dev_info(&mgp->pdev->dev, "Assuming aligned completions (forced)\n"); - mgp->tx.boundary = 4096; + mgp->tx_boundary = 4096; mgp->fw_name = myri10ge_fw_aligned; } else { dev_info(&mgp->pdev->dev, "Assuming unaligned completions (forced)\n"); - mgp->tx.boundary = 2048; + mgp->tx_boundary = 2048; mgp->fw_name = myri10ge_fw_unaligned; } } @@ -2889,6 +2980,7 @@ static void myri10ge_watchdog(struct work_struct *work) { struct myri10ge_priv *mgp = container_of(work, struct myri10ge_priv, watchdog_work); + struct myri10ge_tx_buf *tx; u32 reboot; int status; u16 cmd, vendor; @@ -2938,15 +3030,16 @@ static void myri10ge_watchdog(struct work_struct *work) printk(KERN_ERR "myri10ge: %s: device timeout, resetting\n", mgp->dev->name); + tx = &mgp->ss.tx; printk(KERN_INFO "myri10ge: %s: %d %d %d %d %d\n", - mgp->dev->name, mgp->tx.req, mgp->tx.done, - mgp->tx.pkt_start, mgp->tx.pkt_done, - (int)ntohl(mgp->fw_stats->send_done_count)); + mgp->dev->name, tx->req, tx->done, + tx->pkt_start, tx->pkt_done, + (int)ntohl(mgp->ss.fw_stats->send_done_count)); msleep(2000); printk(KERN_INFO "myri10ge: %s: %d %d %d %d %d\n", - mgp->dev->name, mgp->tx.req, mgp->tx.done, - mgp->tx.pkt_start, mgp->tx.pkt_done, - (int)ntohl(mgp->fw_stats->send_done_count)); + mgp->dev->name, tx->req, tx->done, + tx->pkt_start, tx->pkt_done, + (int)ntohl(mgp->ss.fw_stats->send_done_count)); } rtnl_lock(); myri10ge_close(mgp->dev); @@ -2969,28 +3062,31 @@ static void myri10ge_watchdog(struct work_struct *work) static void myri10ge_watchdog_timer(unsigned long arg) { struct myri10ge_priv *mgp; + struct myri10ge_slice_state *ss; u32 rx_pause_cnt; mgp = (struct myri10ge_priv *)arg; - if (mgp->rx_small.watchdog_needed) { - myri10ge_alloc_rx_pages(mgp, &mgp->rx_small, + rx_pause_cnt = ntohl(mgp->ss.fw_stats->dropped_pause); + + ss = &mgp->ss; + if (ss->rx_small.watchdog_needed) { + myri10ge_alloc_rx_pages(mgp, &ss->rx_small, mgp->small_bytes + MXGEFW_PAD, 1); - if (mgp->rx_small.fill_cnt - mgp->rx_small.cnt >= + if (ss->rx_small.fill_cnt - ss->rx_small.cnt >= myri10ge_fill_thresh) - mgp->rx_small.watchdog_needed = 0; + ss->rx_small.watchdog_needed = 0; } - if (mgp->rx_big.watchdog_needed) { - myri10ge_alloc_rx_pages(mgp, &mgp->rx_big, mgp->big_bytes, 1); - if (mgp->rx_big.fill_cnt - mgp->rx_big.cnt >= + if (ss->rx_big.watchdog_needed) { + myri10ge_alloc_rx_pages(mgp, &ss->rx_big, mgp->big_bytes, 1); + if (ss->rx_big.fill_cnt - ss->rx_big.cnt >= myri10ge_fill_thresh) - mgp->rx_big.watchdog_needed = 0; + ss->rx_big.watchdog_needed = 0; } - rx_pause_cnt = ntohl(mgp->fw_stats->dropped_pause); - if (mgp->tx.req != mgp->tx.done && - mgp->tx.done == mgp->watchdog_tx_done && - mgp->watchdog_tx_req != mgp->watchdog_tx_done) { + if (ss->tx.req != ss->tx.done && + ss->tx.done == ss->watchdog_tx_done && + ss->watchdog_tx_req != ss->watchdog_tx_done) { /* nic seems like it might be stuck.. */ if (rx_pause_cnt != mgp->watchdog_pause) { if (net_ratelimit()) @@ -3005,8 +3101,8 @@ static void myri10ge_watchdog_timer(unsigned long arg) /* rearm timer */ mod_timer(&mgp->watchdog_timer, jiffies + myri10ge_watchdog_timeout * HZ); - mgp->watchdog_tx_done = mgp->tx.done; - mgp->watchdog_tx_req = mgp->tx.req; + ss->watchdog_tx_done = ss->tx.done; + ss->watchdog_tx_req = ss->tx.req; mgp->watchdog_pause = rx_pause_cnt; } @@ -3030,7 +3126,7 @@ static int myri10ge_probe(struct pci_dev *pdev, const struct pci_device_id *ent) mgp = netdev_priv(netdev); mgp->dev = netdev; - netif_napi_add(netdev, &mgp->napi, myri10ge_poll, myri10ge_napi_weight); + netif_napi_add(netdev, &mgp->ss.napi, myri10ge_poll, myri10ge_napi_weight); mgp->pdev = pdev; mgp->csum_flag = MXGEFW_FLAGS_CKSUM; mgp->pause = myri10ge_flow_control; @@ -3076,9 +3172,9 @@ static int myri10ge_probe(struct pci_dev *pdev, const struct pci_device_id *ent) if (mgp->cmd == NULL) goto abort_with_netdev; - mgp->fw_stats = dma_alloc_coherent(&pdev->dev, sizeof(*mgp->fw_stats), - &mgp->fw_stats_bus, GFP_KERNEL); - if (mgp->fw_stats == NULL) + mgp->ss.fw_stats = dma_alloc_coherent(&pdev->dev, sizeof(*mgp->ss.fw_stats), + &mgp->ss.fw_stats_bus, GFP_KERNEL); + if (mgp->ss.fw_stats == NULL) goto abort_with_cmd; mgp->board_span = pci_resource_len(pdev, 0); @@ -3118,12 +3214,12 @@ static int myri10ge_probe(struct pci_dev *pdev, const struct pci_device_id *ent) netdev->dev_addr[i] = mgp->mac_addr[i]; /* allocate rx done ring */ - bytes = myri10ge_max_intr_slots * sizeof(*mgp->rx_done.entry); - mgp->rx_done.entry = dma_alloc_coherent(&pdev->dev, bytes, - &mgp->rx_done.bus, GFP_KERNEL); - if (mgp->rx_done.entry == NULL) + bytes = mgp->max_intr_slots * sizeof(*mgp->ss.rx_done.entry); + mgp->ss.rx_done.entry = dma_alloc_coherent(&pdev->dev, bytes, + &mgp->ss.rx_done.bus, GFP_KERNEL); + if (mgp->ss.rx_done.entry == NULL) goto abort_with_ioremap; - memset(mgp->rx_done.entry, 0, bytes); + memset(mgp->ss.rx_done.entry, 0, bytes); myri10ge_select_firmware(mgp); @@ -3183,7 +3279,7 @@ static int myri10ge_probe(struct pci_dev *pdev, const struct pci_device_id *ent) } dev_info(dev, "%s IRQ %d, tx bndry %d, fw %s, WC %s\n", (mgp->msi_enabled ? "MSI" : "xPIC"), - netdev->irq, mgp->tx.boundary, mgp->fw_name, + netdev->irq, mgp->tx_boundary, mgp->fw_name, (mgp->wc_enabled ? "Enabled" : "Disabled")); return 0; @@ -3195,9 +3291,9 @@ abort_with_firmware: myri10ge_dummy_rdma(mgp, 0); abort_with_rx_done: - bytes = myri10ge_max_intr_slots * sizeof(*mgp->rx_done.entry); + bytes = mgp->max_intr_slots * sizeof(*mgp->ss.rx_done.entry); dma_free_coherent(&pdev->dev, bytes, - mgp->rx_done.entry, mgp->rx_done.bus); + mgp->ss.rx_done.entry, mgp->ss.rx_done.bus); abort_with_ioremap: iounmap(mgp->sram); @@ -3207,8 +3303,8 @@ abort_with_wc: if (mgp->mtrr >= 0) mtrr_del(mgp->mtrr, mgp->iomem_base, mgp->board_span); #endif - dma_free_coherent(&pdev->dev, sizeof(*mgp->fw_stats), - mgp->fw_stats, mgp->fw_stats_bus); + dma_free_coherent(&pdev->dev, sizeof(*mgp->ss.fw_stats), + mgp->ss.fw_stats, mgp->ss.fw_stats_bus); abort_with_cmd: dma_free_coherent(&pdev->dev, sizeof(*mgp->cmd), @@ -3246,9 +3342,9 @@ static void myri10ge_remove(struct pci_dev *pdev) /* avoid a memory leak */ pci_restore_state(pdev); - bytes = myri10ge_max_intr_slots * sizeof(*mgp->rx_done.entry); + bytes = mgp->max_intr_slots * sizeof(*mgp->ss.rx_done.entry); dma_free_coherent(&pdev->dev, bytes, - mgp->rx_done.entry, mgp->rx_done.bus); + mgp->ss.rx_done.entry, mgp->ss.rx_done.bus); iounmap(mgp->sram); @@ -3256,8 +3352,8 @@ static void myri10ge_remove(struct pci_dev *pdev) if (mgp->mtrr >= 0) mtrr_del(mgp->mtrr, mgp->iomem_base, mgp->board_span); #endif - dma_free_coherent(&pdev->dev, sizeof(*mgp->fw_stats), - mgp->fw_stats, mgp->fw_stats_bus); + dma_free_coherent(&pdev->dev, sizeof(*mgp->ss.fw_stats), + mgp->ss.fw_stats, mgp->ss.fw_stats_bus); dma_free_coherent(&pdev->dev, sizeof(*mgp->cmd), mgp->cmd, mgp->cmd_bus); diff --git a/drivers/net/myri10ge/myri10ge_mcp.h b/drivers/net/myri10ge/myri10ge_mcp.h index 58e57178c56..fdbeeee0737 100644 --- a/drivers/net/myri10ge/myri10ge_mcp.h +++ b/drivers/net/myri10ge/myri10ge_mcp.h @@ -10,7 +10,7 @@ struct mcp_dma_addr { __be32 low; }; -/* 4 Bytes. 8 Bytes for NDIS drivers. */ +/* 4 Bytes */ struct mcp_slot { __sum16 checksum; __be16 length; @@ -144,6 +144,7 @@ enum myri10ge_mcp_cmd_type { * a power of 2 number of entries. */ MXGEFW_CMD_SET_INTRQ_SIZE, /* in bytes */ +#define MXGEFW_CMD_SET_INTRQ_SIZE_FLAG_NO_STRICT_SIZE_CHECK (1 << 31) /* command to bring ethernet interface up. Above parameters * (plus mtu & mac address) must have been exchanged prior @@ -221,10 +222,14 @@ enum myri10ge_mcp_cmd_type { MXGEFW_CMD_GET_MAX_RSS_QUEUES, MXGEFW_CMD_ENABLE_RSS_QUEUES, /* data0 = number of slices n (0, 1, ..., n-1) to enable - * data1 = interrupt mode. 0=share one INTx/MSI, 1=use one MSI-X per queue. + * data1 = interrupt mode. + * 0=share one INTx/MSI, 1=use one MSI-X per queue. * If all queues share one interrupt, the driver must have set * RSS_SHARED_INTERRUPT_DMA before enabling queues. */ +#define MXGEFW_SLICE_INTR_MODE_SHARED 0 +#define MXGEFW_SLICE_INTR_MODE_ONE_PER_SLICE 1 + MXGEFW_CMD_GET_RSS_SHARED_INTERRUPT_MASK_OFFSET, MXGEFW_CMD_SET_RSS_SHARED_INTERRUPT_DMA, /* data0, data1 = bus address lsw, msw */ @@ -241,10 +246,14 @@ enum myri10ge_mcp_cmd_type { * 0: disable rss. nic does not distribute receive packets. * 1: enable rss. nic distributes receive packets among queues. * data1 = hash type - * 1: IPV4 - * 2: TCP_IPV4 - * 3: IPV4 | TCP_IPV4 + * 1: IPV4 (required by RSS) + * 2: TCP_IPV4 (required by RSS) + * 3: IPV4 | TCP_IPV4 (required by RSS) + * 4: source port */ +#define MXGEFW_RSS_HASH_TYPE_IPV4 0x1 +#define MXGEFW_RSS_HASH_TYPE_TCP_IPV4 0x2 +#define MXGEFW_RSS_HASH_TYPE_SRC_PORT 0x4 MXGEFW_CMD_GET_MAX_TSO6_HDR_SIZE, /* Return data = the max. size of the entire headers of a IPv6 TSO packet. @@ -260,6 +269,8 @@ enum myri10ge_mcp_cmd_type { * 0: Linux/FreeBSD style (NIC default) * 1: NDIS/NetBSD style */ +#define MXGEFW_TSO_MODE_LINUX 0 +#define MXGEFW_TSO_MODE_NDIS 1 MXGEFW_CMD_MDIO_READ, /* data0 = dev_addr (PMA/PMD or PCS ...), data1 = register/addr */ @@ -286,6 +297,38 @@ enum myri10ge_mcp_cmd_type { /* Return data = NIC memory offset of mcp_vpump_public_global */ MXGEFW_CMD_RESET_VPUMP, /* Resets the VPUMP state */ + + MXGEFW_CMD_SET_RSS_MCP_SLOT_TYPE, + /* data0 = mcp_slot type to use. + * 0 = the default 4B mcp_slot + * 1 = 8B mcp_slot_8 + */ +#define MXGEFW_RSS_MCP_SLOT_TYPE_MIN 0 +#define MXGEFW_RSS_MCP_SLOT_TYPE_WITH_HASH 1 + + MXGEFW_CMD_SET_THROTTLE_FACTOR, + /* set the throttle factor for ethp_z8e + * data0 = throttle_factor + * throttle_factor = 256 * pcie-raw-speed / tx_speed + * tx_speed = 256 * pcie-raw-speed / throttle_factor + * + * For PCI-E x8: pcie-raw-speed == 16Gb/s + * For PCI-E x4: pcie-raw-speed == 8Gb/s + * + * ex1: throttle_factor == 0x1a0 (416), tx_speed == 1.23GB/s == 9.846 Gb/s + * ex2: throttle_factor == 0x200 (512), tx_speed == 1.0GB/s == 8 Gb/s + * + * with tx_boundary == 2048, max-throttle-factor == 8191 => min-speed == 500Mb/s + * with tx_boundary == 4096, max-throttle-factor == 4095 => min-speed == 1Gb/s + */ + + MXGEFW_CMD_VPUMP_UP, + /* Allocates VPump Connection, Send Request and Zero copy buffer address tables */ + MXGEFW_CMD_GET_VPUMP_CLK, + /* Get the lanai clock */ + + MXGEFW_CMD_GET_DCA_OFFSET, + /* offset of dca control for WDMAs */ }; enum myri10ge_mcp_cmd_status { @@ -302,7 +345,8 @@ enum myri10ge_mcp_cmd_status { MXGEFW_CMD_ERROR_UNALIGNED, MXGEFW_CMD_ERROR_NO_MDIO, MXGEFW_CMD_ERROR_XFP_FAILURE, - MXGEFW_CMD_ERROR_XFP_ABSENT + MXGEFW_CMD_ERROR_XFP_ABSENT, + MXGEFW_CMD_ERROR_BAD_PCIE_LINK }; #define MXGEFW_OLD_IRQ_DATA_LEN 40 diff --git a/drivers/net/myri10ge/myri10ge_mcp_gen_header.h b/drivers/net/myri10ge/myri10ge_mcp_gen_header.h index 16a810dd6d5..07d65c2cbb2 100644 --- a/drivers/net/myri10ge/myri10ge_mcp_gen_header.h +++ b/drivers/net/myri10ge/myri10ge_mcp_gen_header.h @@ -1,30 +1,6 @@ #ifndef __MYRI10GE_MCP_GEN_HEADER_H__ #define __MYRI10GE_MCP_GEN_HEADER_H__ -/* this file define a standard header used as a first entry point to - * exchange information between firmware/driver and driver. The - * header structure can be anywhere in the mcp. It will usually be in - * the .data section, because some fields needs to be initialized at - * compile time. - * The 32bit word at offset MX_HEADER_PTR_OFFSET in the mcp must - * contains the location of the header. - * - * Typically a MCP will start with the following: - * .text - * .space 52 ! to help catch MEMORY_INT errors - * bt start ! jump to real code - * nop - * .long _gen_mcp_header - * - * The source will have a definition like: - * - * mcp_gen_header_t gen_mcp_header = { - * .header_length = sizeof(mcp_gen_header_t), - * .mcp_type = MCP_TYPE_XXX, - * .version = "something $Id: mcp_gen_header.h,v 1.2 2006/05/13 10:04:35 bgoglin Exp $", - * .mcp_globals = (unsigned)&Globals - * }; - */ #define MCP_HEADER_PTR_OFFSET 0x3c @@ -32,13 +8,14 @@ #define MCP_TYPE_PCIE 0x70636965 /* "PCIE" pcie-only MCP */ #define MCP_TYPE_ETH 0x45544820 /* "ETH " */ #define MCP_TYPE_MCP0 0x4d435030 /* "MCP0" */ +#define MCP_TYPE_DFLT 0x20202020 /* " " */ struct mcp_gen_header { /* the first 4 fields are filled at compile time */ unsigned header_length; __be32 mcp_type; char version[128]; - unsigned mcp_globals; /* pointer to mcp-type specific structure */ + unsigned mcp_private; /* pointer to mcp-type specific structure */ /* filled by the MCP at run-time */ unsigned sram_size; @@ -53,6 +30,18 @@ struct mcp_gen_header { * * Never remove any field. Keep everything naturally align. */ + + /* Specifies if the running mcp is mcp0, 1, or 2. */ + unsigned char mcp_index; + unsigned char disable_rabbit; + unsigned char unaligned_tlp; + unsigned char pad1; + unsigned counters_addr; + unsigned copy_block_info; /* for small mcps loaded with "lload -d" */ + unsigned short handoff_id_major; /* must be equal */ + unsigned short handoff_id_caps; /* bitfield: new mcp must have superset */ + unsigned msix_table_addr; /* start address of msix table in firmware */ + /* 8 */ }; #endif /* __MYRI10GE_MCP_GEN_HEADER_H__ */ diff --git a/drivers/net/netxen/netxen_nic.h b/drivers/net/netxen/netxen_nic.h index 8cb29f5b103..da4c4fb9706 100644 --- a/drivers/net/netxen/netxen_nic.h +++ b/drivers/net/netxen/netxen_nic.h @@ -776,7 +776,6 @@ struct netxen_hardware_context { u8 revision_id; u16 board_type; - u16 max_ports; struct netxen_board_info boardcfg; u32 xg_linkup; u32 qg_linksup; @@ -863,6 +862,7 @@ struct netxen_adapter { unsigned char mac_addr[ETH_ALEN]; int mtu; int portnum; + u8 physical_port; struct work_struct watchdog_task; struct timer_list watchdog_timer; @@ -1034,7 +1034,6 @@ int netxen_rom_se(struct netxen_adapter *adapter, int addr); /* Functions from netxen_nic_isr.c */ void netxen_initialize_adapter_sw(struct netxen_adapter *adapter); -void netxen_initialize_adapter_hw(struct netxen_adapter *adapter); void *netxen_alloc(struct pci_dev *pdev, size_t sz, dma_addr_t * ptr, struct pci_dev **used_dev); void netxen_initialize_adapter_ops(struct netxen_adapter *adapter); @@ -1077,20 +1076,6 @@ static const struct netxen_brdinfo netxen_boards[] = { #define NUM_SUPPORTED_BOARDS ARRAY_SIZE(netxen_boards) -static inline void get_brd_port_by_type(u32 type, int *ports) -{ - int i, found = 0; - for (i = 0; i < NUM_SUPPORTED_BOARDS; ++i) { - if (netxen_boards[i].brdtype == type) { - *ports = netxen_boards[i].ports; - found = 1; - break; - } - } - if (!found) - *ports = 0; -} - static inline void get_brd_name_by_type(u32 type, char *name) { int i, found = 0; @@ -1169,5 +1154,4 @@ extern int netxen_rom_fast_read(struct netxen_adapter *adapter, int addr, extern struct ethtool_ops netxen_nic_ethtool_ops; -extern int physical_port[]; /* physical port # from virtual port.*/ #endif /* __NETXEN_NIC_H_ */ diff --git a/drivers/net/netxen/netxen_nic_ethtool.c b/drivers/net/netxen/netxen_nic_ethtool.c index 6e98d830eef..723487bf200 100644 --- a/drivers/net/netxen/netxen_nic_ethtool.c +++ b/drivers/net/netxen/netxen_nic_ethtool.c @@ -369,7 +369,7 @@ netxen_nic_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *p) for (i = 3; niu_registers[mode].reg[i - 3] != -1; i++) { /* GB: port specific registers */ if (mode == 0 && i >= 19) - window = physical_port[adapter->portnum] * + window = adapter->physical_port * NETXEN_NIC_PORT_WINDOW; NETXEN_NIC_LOCKED_READ_REG(niu_registers[mode]. @@ -527,7 +527,7 @@ netxen_nic_get_pauseparam(struct net_device *dev, { struct netxen_adapter *adapter = netdev_priv(dev); __u32 val; - int port = physical_port[adapter->portnum]; + int port = adapter->physical_port; if (adapter->ahw.board_type == NETXEN_NIC_GBE) { if ((port < 0) || (port > NETXEN_NIU_MAX_GBE_PORTS)) @@ -573,7 +573,7 @@ netxen_nic_set_pauseparam(struct net_device *dev, { struct netxen_adapter *adapter = netdev_priv(dev); __u32 val; - int port = physical_port[adapter->portnum]; + int port = adapter->physical_port; /* read mode */ if (adapter->ahw.board_type == NETXEN_NIC_GBE) { if ((port < 0) || (port > NETXEN_NIU_MAX_GBE_PORTS)) diff --git a/drivers/net/netxen/netxen_nic_hw.c b/drivers/net/netxen/netxen_nic_hw.c index af735646825..c43d06b8de9 100644 --- a/drivers/net/netxen/netxen_nic_hw.c +++ b/drivers/net/netxen/netxen_nic_hw.c @@ -396,11 +396,8 @@ int netxen_nic_hw_resources(struct netxen_adapter *adapter) } adapter->intr_scheme = readl( NETXEN_CRB_NORMALIZE(adapter, CRB_NIC_CAPABILITIES_FW)); - printk(KERN_NOTICE "%s: FW capabilities:0x%x\n", netxen_nic_driver_name, - adapter->intr_scheme); adapter->msi_mode = readl( NETXEN_CRB_NORMALIZE(adapter, CRB_NIC_MSI_MODE_FW)); - DPRINTK(INFO, "Receive Peg ready too. starting stuff\n"); addr = netxen_alloc(adapter->ahw.pdev, sizeof(struct netxen_ring_ctx) + @@ -408,8 +405,6 @@ int netxen_nic_hw_resources(struct netxen_adapter *adapter) (dma_addr_t *) & adapter->ctx_desc_phys_addr, &adapter->ctx_desc_pdev); - printk(KERN_INFO "ctx_desc_phys_addr: 0x%llx\n", - (unsigned long long) adapter->ctx_desc_phys_addr); if (addr == NULL) { DPRINTK(ERR, "bad return from pci_alloc_consistent\n"); err = -ENOMEM; @@ -429,8 +424,6 @@ int netxen_nic_hw_resources(struct netxen_adapter *adapter) adapter->max_tx_desc_count, (dma_addr_t *) & hw->cmd_desc_phys_addr, &adapter->ahw.cmd_desc_pdev); - printk(KERN_INFO "cmd_desc_phys_addr: 0x%llx\n", - (unsigned long long) hw->cmd_desc_phys_addr); if (addr == NULL) { DPRINTK(ERR, "bad return from pci_alloc_consistent\n"); @@ -1032,15 +1025,15 @@ int netxen_nic_get_board_info(struct netxen_adapter *adapter) int netxen_nic_set_mtu_gb(struct netxen_adapter *adapter, int new_mtu) { netxen_nic_write_w0(adapter, - NETXEN_NIU_GB_MAX_FRAME_SIZE( - physical_port[adapter->portnum]), new_mtu); + NETXEN_NIU_GB_MAX_FRAME_SIZE(adapter->physical_port), + new_mtu); return 0; } int netxen_nic_set_mtu_xgb(struct netxen_adapter *adapter, int new_mtu) { new_mtu += NETXEN_NIU_HDRSIZE + NETXEN_NIU_TLRSIZE; - if (physical_port[adapter->portnum] == 0) + if (adapter->physical_port == 0) netxen_nic_write_w0(adapter, NETXEN_NIU_XGE_MAX_FRAME_SIZE, new_mtu); else @@ -1051,7 +1044,7 @@ int netxen_nic_set_mtu_xgb(struct netxen_adapter *adapter, int new_mtu) void netxen_nic_init_niu_gb(struct netxen_adapter *adapter) { - netxen_niu_gbe_init_port(adapter, physical_port[adapter->portnum]); + netxen_niu_gbe_init_port(adapter, adapter->physical_port); } void @@ -1127,7 +1120,6 @@ void netxen_nic_set_link_parameters(struct netxen_adapter *adapter) void netxen_nic_flash_print(struct netxen_adapter *adapter) { - int valid = 1; u32 fw_major = 0; u32 fw_minor = 0; u32 fw_build = 0; @@ -1137,70 +1129,62 @@ void netxen_nic_flash_print(struct netxen_adapter *adapter) __le32 *ptr32; struct netxen_board_info *board_info = &(adapter->ahw.boardcfg); - if (board_info->magic != NETXEN_BDINFO_MAGIC) { - printk - ("NetXen Unknown board config, Read 0x%x expected as 0x%x\n", - board_info->magic, NETXEN_BDINFO_MAGIC); - valid = 0; - } - if (board_info->header_version != NETXEN_BDINFO_VERSION) { - printk("NetXen Unknown board config version." - " Read %x, expected %x\n", - board_info->header_version, NETXEN_BDINFO_VERSION); - valid = 0; - } - if (valid) { - ptr32 = (u32 *)&serial_num; - addr = NETXEN_USER_START + - offsetof(struct netxen_new_user_info, serial_num); - for (i = 0; i < 8; i++) { - if (netxen_rom_fast_read(adapter, addr, ptr32) == -1) { - printk("%s: ERROR reading %s board userarea.\n", - netxen_nic_driver_name, - netxen_nic_driver_name); - return; - } - ptr32++; - addr += sizeof(u32); + + adapter->driver_mismatch = 0; + + ptr32 = (u32 *)&serial_num; + addr = NETXEN_USER_START + + offsetof(struct netxen_new_user_info, serial_num); + for (i = 0; i < 8; i++) { + if (netxen_rom_fast_read(adapter, addr, ptr32) == -1) { + printk("%s: ERROR reading %s board userarea.\n", + netxen_nic_driver_name, + netxen_nic_driver_name); + adapter->driver_mismatch = 1; + return; } + ptr32++; + addr += sizeof(u32); + } + + fw_major = readl(NETXEN_CRB_NORMALIZE(adapter, + NETXEN_FW_VERSION_MAJOR)); + fw_minor = readl(NETXEN_CRB_NORMALIZE(adapter, + NETXEN_FW_VERSION_MINOR)); + fw_build = + readl(NETXEN_CRB_NORMALIZE(adapter, NETXEN_FW_VERSION_SUB)); + if (adapter->portnum == 0) { get_brd_name_by_type(board_info->board_type, brd_name); printk("NetXen %s Board S/N %s Chip id 0x%x\n", - brd_name, serial_num, board_info->chip_id); - - printk("NetXen %s Board #%d, Chip id 0x%x\n", - board_info->board_type == 0x0b ? "XGB" : "GBE", - board_info->board_num, board_info->chip_id); - fw_major = readl(NETXEN_CRB_NORMALIZE(adapter, - NETXEN_FW_VERSION_MAJOR)); - fw_minor = readl(NETXEN_CRB_NORMALIZE(adapter, - NETXEN_FW_VERSION_MINOR)); - fw_build = - readl(NETXEN_CRB_NORMALIZE(adapter, NETXEN_FW_VERSION_SUB)); - - printk("NetXen Firmware version %d.%d.%d\n", fw_major, fw_minor, - fw_build); + brd_name, serial_num, board_info->chip_id); + printk("NetXen Firmware version %d.%d.%d\n", fw_major, + fw_minor, fw_build); } + if (fw_major != _NETXEN_NIC_LINUX_MAJOR) { - printk(KERN_ERR "The mismatch in driver version and firmware " - "version major number\n" - "Driver version major number = %d \t" - "Firmware version major number = %d \n", - _NETXEN_NIC_LINUX_MAJOR, fw_major); adapter->driver_mismatch = 1; } if (fw_minor != _NETXEN_NIC_LINUX_MINOR && fw_minor != (_NETXEN_NIC_LINUX_MINOR + 1)) { - printk(KERN_ERR "The mismatch in driver version and firmware " - "version minor number\n" - "Driver version minor number = %d \t" - "Firmware version minor number = %d \n", - _NETXEN_NIC_LINUX_MINOR, fw_minor); adapter->driver_mismatch = 1; } - if (adapter->driver_mismatch) - printk(KERN_INFO "Use the driver with version no %d.%d.xxx\n", - fw_major, fw_minor); + if (adapter->driver_mismatch) { + printk(KERN_ERR "%s: driver and firmware version mismatch\n", + adapter->netdev->name); + return; + } + + switch (adapter->ahw.board_type) { + case NETXEN_NIC_GBE: + dev_info(&adapter->pdev->dev, "%s: GbE port initialized\n", + adapter->netdev->name); + break; + case NETXEN_NIC_XGBE: + dev_info(&adapter->pdev->dev, "%s: XGbE port initialized\n", + adapter->netdev->name); + break; + } } diff --git a/drivers/net/netxen/netxen_nic_init.c b/drivers/net/netxen/netxen_nic_init.c index 45fa33e0cb9..70d1b22ced2 100644 --- a/drivers/net/netxen/netxen_nic_init.c +++ b/drivers/net/netxen/netxen_nic_init.c @@ -203,21 +203,6 @@ void netxen_initialize_adapter_sw(struct netxen_adapter *adapter) } } -void netxen_initialize_adapter_hw(struct netxen_adapter *adapter) -{ - int ports = 0; - struct netxen_board_info *board_info = &(adapter->ahw.boardcfg); - - if (netxen_nic_get_board_info(adapter) != 0) - printk("%s: Error getting board config info.\n", - netxen_nic_driver_name); - get_brd_port_by_type(board_info->board_type, &ports); - if (ports == 0) - printk(KERN_ERR "%s: Unknown board type\n", - netxen_nic_driver_name); - adapter->ahw.max_ports = ports; -} - void netxen_initialize_adapter_ops(struct netxen_adapter *adapter) { switch (adapter->ahw.board_type) { @@ -765,18 +750,13 @@ int netxen_flash_unlock(struct netxen_adapter *adapter) int netxen_pinit_from_rom(struct netxen_adapter *adapter, int verbose) { - int addr, val, status; + int addr, val; int n, i; int init_delay = 0; struct crb_addr_pair *buf; u32 off; /* resetall */ - status = netxen_nic_get_board_info(adapter); - if (status) - printk("%s: netxen_pinit_from_rom: Error getting board info\n", - netxen_nic_driver_name); - netxen_crb_writelit_adapter(adapter, NETXEN_ROMUSB_GLB_SW_RESET, NETXEN_ROMBUS_RESET); @@ -860,10 +840,10 @@ int netxen_pinit_from_rom(struct netxen_adapter *adapter, int verbose) netxen_nic_pci_change_crbwindow(adapter, 1); } if (init_delay == 1) { - msleep(2000); + msleep(1000); init_delay = 0; } - msleep(20); + msleep(1); } kfree(buf); @@ -938,12 +918,28 @@ int netxen_initialize_adapter_offload(struct netxen_adapter *adapter) void netxen_free_adapter_offload(struct netxen_adapter *adapter) { + int i; + if (adapter->dummy_dma.addr) { - pci_free_consistent(adapter->ahw.pdev, + i = 100; + do { + if (dma_watchdog_shutdown_request(adapter) == 1) + break; + msleep(50); + if (dma_watchdog_shutdown_poll_result(adapter) == 1) + break; + } while (--i); + + if (i) { + pci_free_consistent(adapter->ahw.pdev, NETXEN_HOST_DUMMY_DMA_SIZE, adapter->dummy_dma.addr, adapter->dummy_dma.phys_addr); - adapter->dummy_dma.addr = NULL; + adapter->dummy_dma.addr = NULL; + } else { + printk(KERN_ERR "%s: dma_watchdog_shutdown failed\n", + adapter->netdev->name); + } } } diff --git a/drivers/net/netxen/netxen_nic_isr.c b/drivers/net/netxen/netxen_nic_isr.c index f487615f406..96cec41f901 100644 --- a/drivers/net/netxen/netxen_nic_isr.c +++ b/drivers/net/netxen/netxen_nic_isr.c @@ -145,7 +145,7 @@ static void netxen_nic_isr_other(struct netxen_adapter *adapter) /* verify the offset */ val = readl(NETXEN_CRB_NORMALIZE(adapter, CRB_XG_STATE)); - val = val >> physical_port[adapter->portnum]; + val = val >> adapter->physical_port; if (val == adapter->ahw.qg_linksup) return; @@ -199,7 +199,7 @@ void netxen_nic_xgbe_handle_phy_intr(struct netxen_adapter *adapter) /* WINDOW = 1 */ val = readl(NETXEN_CRB_NORMALIZE(adapter, CRB_XG_STATE)); - val >>= (physical_port[adapter->portnum] * 8); + val >>= (adapter->physical_port * 8); val &= 0xff; if (adapter->ahw.xg_linkup == 1 && val != XG_LINK_UP) { diff --git a/drivers/net/netxen/netxen_nic_main.c b/drivers/net/netxen/netxen_nic_main.c index 7144c255ce5..63cd67b931e 100644 --- a/drivers/net/netxen/netxen_nic_main.c +++ b/drivers/net/netxen/netxen_nic_main.c @@ -70,17 +70,19 @@ static void netxen_nic_poll_controller(struct net_device *netdev); static irqreturn_t netxen_intr(int irq, void *data); static irqreturn_t netxen_msi_intr(int irq, void *data); -int physical_port[] = {0, 1, 2, 3}; - /* PCI Device ID Table */ +#define ENTRY(device) \ + {PCI_DEVICE(0x4040, (device)), \ + .class = PCI_CLASS_NETWORK_ETHERNET << 8, .class_mask = ~0} + static struct pci_device_id netxen_pci_tbl[] __devinitdata = { - {PCI_DEVICE(0x4040, 0x0001)}, - {PCI_DEVICE(0x4040, 0x0002)}, - {PCI_DEVICE(0x4040, 0x0003)}, - {PCI_DEVICE(0x4040, 0x0004)}, - {PCI_DEVICE(0x4040, 0x0005)}, - {PCI_DEVICE(0x4040, 0x0024)}, - {PCI_DEVICE(0x4040, 0x0025)}, + ENTRY(0x0001), + ENTRY(0x0002), + ENTRY(0x0003), + ENTRY(0x0004), + ENTRY(0x0005), + ENTRY(0x0024), + ENTRY(0x0025), {0,} }; @@ -288,10 +290,11 @@ netxen_nic_probe(struct pci_dev *pdev, const struct pci_device_id *ent) int pci_func_id = PCI_FUNC(pdev->devfn); DECLARE_MAC_BUF(mac); - printk(KERN_INFO "%s \n", netxen_nic_driver_string); + if (pci_func_id == 0) + printk(KERN_INFO "%s \n", netxen_nic_driver_string); if (pdev->class != 0x020000) { - printk(KERN_ERR"NetXen function %d, class %x will not " + printk(KERN_DEBUG "NetXen function %d, class %x will not " "be enabled.\n",pci_func_id, pdev->class); return -ENODEV; } @@ -450,8 +453,12 @@ netxen_nic_probe(struct pci_dev *pdev, const struct pci_device_id *ent) */ adapter->curr_window = 255; - /* initialize the adapter */ - netxen_initialize_adapter_hw(adapter); + if (netxen_nic_get_board_info(adapter) != 0) { + printk("%s: Error getting board config info.\n", + netxen_nic_driver_name); + err = -EIO; + goto err_out_iounmap; + } /* * Adapter in our case is quad port so initialize it before @@ -530,17 +537,15 @@ netxen_nic_probe(struct pci_dev *pdev, const struct pci_device_id *ent) netxen_initialize_adapter_sw(adapter); /* initialize the buffers in adapter */ /* Mezz cards have PCI function 0,2,3 enabled */ - if ((adapter->ahw.boardcfg.board_type == NETXEN_BRDTYPE_P2_SB31_10G_IMEZ) - && (pci_func_id >= 2)) + switch (adapter->ahw.boardcfg.board_type) { + case NETXEN_BRDTYPE_P2_SB31_10G_IMEZ: + case NETXEN_BRDTYPE_P2_SB31_10G_HMEZ: + if (pci_func_id >= 2) adapter->portnum = pci_func_id - 2; - -#ifdef CONFIG_IA64 - if(adapter->portnum == 0) { - netxen_pinit_from_rom(adapter, 0); - udelay(500); - netxen_load_firmware(adapter); + break; + default: + break; } -#endif init_timer(&adapter->watchdog_timer); adapter->ahw.xg_linkup = 0; @@ -613,11 +618,18 @@ netxen_nic_probe(struct pci_dev *pdev, const struct pci_device_id *ent) err = -ENODEV; goto err_out_free_dev; } + } else { + writel(0, NETXEN_CRB_NORMALIZE(adapter, + CRB_CMDPEG_STATE)); + netxen_pinit_from_rom(adapter, 0); + msleep(1); + netxen_load_firmware(adapter); + netxen_phantom_init(adapter, NETXEN_NIC_PEG_TUNE); } /* clear the register for future unloads/loads */ writel(0, NETXEN_CRB_NORMALIZE(adapter, NETXEN_CAM_RAM(0x1fc))); - printk(KERN_INFO "State: 0x%0x\n", + dev_info(&pdev->dev, "cmdpeg state: 0x%0x\n", readl(NETXEN_CRB_NORMALIZE(adapter, CRB_CMDPEG_STATE))); /* @@ -639,9 +651,10 @@ netxen_nic_probe(struct pci_dev *pdev, const struct pci_device_id *ent) /* * See if the firmware gave us a virtual-physical port mapping. */ + adapter->physical_port = adapter->portnum; i = readl(NETXEN_CRB_NORMALIZE(adapter, CRB_V2P(adapter->portnum))); if (i != 0x55555555) - physical_port[adapter->portnum] = i; + adapter->physical_port = i; netif_carrier_off(netdev); netif_stop_queue(netdev); @@ -654,22 +667,9 @@ netxen_nic_probe(struct pci_dev *pdev, const struct pci_device_id *ent) goto err_out_free_dev; } + netxen_nic_flash_print(adapter); pci_set_drvdata(pdev, adapter); - switch (adapter->ahw.board_type) { - case NETXEN_NIC_GBE: - printk(KERN_INFO "%s: QUAD GbE board initialized\n", - netxen_nic_driver_name); - break; - - case NETXEN_NIC_XGBE: - printk(KERN_INFO "%s: XGbE board initialized\n", - netxen_nic_driver_name); - break; - } - - adapter->driver_mismatch = 0; - return 0; err_out_free_dev: @@ -760,55 +760,8 @@ static void __devexit netxen_nic_remove(struct pci_dev *pdev) vfree(adapter->cmd_buf_arr); - if (adapter->portnum == 0) { - if (init_firmware_done) { - i = 100; - do { - if (dma_watchdog_shutdown_request(adapter) == 1) - break; - msleep(100); - if (dma_watchdog_shutdown_poll_result(adapter) == 1) - break; - } while (--i); - - if (i == 0) - printk(KERN_ERR "%s: dma_watchdog_shutdown failed\n", - netdev->name); - - /* clear the register for future unloads/loads */ - writel(0, NETXEN_CRB_NORMALIZE(adapter, NETXEN_CAM_RAM(0x1fc))); - printk(KERN_INFO "State: 0x%0x\n", - readl(NETXEN_CRB_NORMALIZE(adapter, CRB_CMDPEG_STATE))); - - /* leave the hw in the same state as reboot */ - writel(0, NETXEN_CRB_NORMALIZE(adapter, CRB_CMDPEG_STATE)); - netxen_pinit_from_rom(adapter, 0); - msleep(1); - netxen_load_firmware(adapter); - netxen_phantom_init(adapter, NETXEN_NIC_PEG_TUNE); - } - - /* clear the register for future unloads/loads */ - writel(0, NETXEN_CRB_NORMALIZE(adapter, NETXEN_CAM_RAM(0x1fc))); - printk(KERN_INFO "State: 0x%0x\n", - readl(NETXEN_CRB_NORMALIZE(adapter, CRB_CMDPEG_STATE))); - - i = 100; - do { - if (dma_watchdog_shutdown_request(adapter) == 1) - break; - msleep(100); - if (dma_watchdog_shutdown_poll_result(adapter) == 1) - break; - } while (--i); - - if (i) { - netxen_free_adapter_offload(adapter); - } else { - printk(KERN_ERR "%s: dma_watchdog_shutdown failed\n", - netdev->name); - } - } + if (adapter->portnum == 0) + netxen_free_adapter_offload(adapter); if (adapter->irq) free_irq(adapter->irq, adapter); @@ -840,13 +793,15 @@ static int netxen_nic_open(struct net_device *netdev) irq_handler_t handler; unsigned long flags = IRQF_SAMPLE_RANDOM; + if (adapter->driver_mismatch) + return -EIO; + if (adapter->is_up != NETXEN_ADAPTER_UP_MAGIC) { err = netxen_init_firmware(adapter); if (err != 0) { printk(KERN_ERR "Failed to init firmware\n"); return -EIO; } - netxen_nic_flash_print(adapter); /* setup all the resources for the Phantom... */ /* this include the descriptors for rcv, tx, and status */ @@ -895,14 +850,12 @@ static int netxen_nic_open(struct net_device *netdev) if (adapter->set_mtu) adapter->set_mtu(adapter, netdev->mtu); - if (!adapter->driver_mismatch) - mod_timer(&adapter->watchdog_timer, jiffies); + mod_timer(&adapter->watchdog_timer, jiffies); napi_enable(&adapter->napi); netxen_nic_enable_int(adapter); - if (!adapter->driver_mismatch) - netif_start_queue(netdev); + netif_start_queue(netdev); return 0; } diff --git a/drivers/net/netxen/netxen_nic_niu.c b/drivers/net/netxen/netxen_nic_niu.c index 1c852a76c80..a3bc7cc67a6 100644 --- a/drivers/net/netxen/netxen_nic_niu.c +++ b/drivers/net/netxen/netxen_nic_niu.c @@ -94,7 +94,7 @@ int netxen_niu_gbe_phy_read(struct netxen_adapter *adapter, long reg, long timeout = 0; long result = 0; long restore = 0; - long phy = physical_port[adapter->portnum]; + long phy = adapter->physical_port; __u32 address; __u32 command; __u32 status; @@ -190,7 +190,7 @@ int netxen_niu_gbe_phy_write(struct netxen_adapter *adapter, long reg, long timeout = 0; long result = 0; long restore = 0; - long phy = physical_port[adapter->portnum]; + long phy = adapter->physical_port; __u32 address; __u32 command; __u32 status; @@ -456,7 +456,7 @@ int netxen_niu_gbe_init_port(struct netxen_adapter *adapter, int port) int netxen_niu_xg_init_port(struct netxen_adapter *adapter, int port) { - u32 portnum = physical_port[adapter->portnum]; + u32 portnum = adapter->physical_port; netxen_crb_writelit_adapter(adapter, NETXEN_NIU_XGE_CONFIG_1+(0x10000*portnum), 0x1447); @@ -573,7 +573,7 @@ static int netxen_niu_macaddr_get(struct netxen_adapter *adapter, { u32 stationhigh; u32 stationlow; - int phy = physical_port[adapter->portnum]; + int phy = adapter->physical_port; u8 val[8]; if (addr == NULL) @@ -604,7 +604,7 @@ int netxen_niu_macaddr_set(struct netxen_adapter *adapter, { u8 temp[4]; u32 val; - int phy = physical_port[adapter->portnum]; + int phy = adapter->physical_port; unsigned char mac_addr[6]; int i; DECLARE_MAC_BUF(mac); @@ -724,7 +724,7 @@ int netxen_niu_enable_gbe_port(struct netxen_adapter *adapter, int netxen_niu_disable_gbe_port(struct netxen_adapter *adapter) { __u32 mac_cfg0; - u32 port = physical_port[adapter->portnum]; + u32 port = adapter->physical_port; if (port > NETXEN_NIU_MAX_GBE_PORTS) return -EINVAL; @@ -740,7 +740,7 @@ int netxen_niu_disable_gbe_port(struct netxen_adapter *adapter) int netxen_niu_disable_xg_port(struct netxen_adapter *adapter) { __u32 mac_cfg; - u32 port = physical_port[adapter->portnum]; + u32 port = adapter->physical_port; if (port > NETXEN_NIU_MAX_XG_PORTS) return -EINVAL; @@ -757,7 +757,7 @@ int netxen_niu_set_promiscuous_mode(struct netxen_adapter *adapter, netxen_niu_prom_mode_t mode) { __u32 reg; - u32 port = physical_port[adapter->portnum]; + u32 port = adapter->physical_port; if (port > NETXEN_NIU_MAX_GBE_PORTS) return -EINVAL; @@ -814,7 +814,7 @@ int netxen_niu_set_promiscuous_mode(struct netxen_adapter *adapter, int netxen_niu_xg_macaddr_set(struct netxen_adapter *adapter, netxen_ethernet_macaddr_t addr) { - int phy = physical_port[adapter->portnum]; + int phy = adapter->physical_port; u8 temp[4]; u32 val; @@ -867,7 +867,7 @@ int netxen_niu_xg_macaddr_set(struct netxen_adapter *adapter, int netxen_niu_xg_macaddr_get(struct netxen_adapter *adapter, netxen_ethernet_macaddr_t * addr) { - int phy = physical_port[adapter->portnum]; + int phy = adapter->physical_port; u32 stationhigh; u32 stationlow; u8 val[8]; @@ -896,7 +896,7 @@ int netxen_niu_xg_set_promiscuous_mode(struct netxen_adapter *adapter, netxen_niu_prom_mode_t mode) { __u32 reg; - u32 port = physical_port[adapter->portnum]; + u32 port = adapter->physical_port; if (port > NETXEN_NIU_MAX_XG_PORTS) return -EINVAL; diff --git a/drivers/net/niu.c b/drivers/net/niu.c index 4009c4ce96b..918f802fe08 100644 --- a/drivers/net/niu.c +++ b/drivers/net/niu.c @@ -1,6 +1,6 @@ /* niu.c: Neptune ethernet driver. * - * Copyright (C) 2007 David S. Miller (davem@davemloft.net) + * Copyright (C) 2007, 2008 David S. Miller (davem@davemloft.net) */ #include <linux/module.h> @@ -33,8 +33,8 @@ #define DRV_MODULE_NAME "niu" #define PFX DRV_MODULE_NAME ": " -#define DRV_MODULE_VERSION "0.8" -#define DRV_MODULE_RELDATE "April 24, 2008" +#define DRV_MODULE_VERSION "0.9" +#define DRV_MODULE_RELDATE "May 4, 2008" static char version[] __devinitdata = DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n"; @@ -865,7 +865,6 @@ static int link_status_1g_serdes(struct niu *np, int *link_up_p) return 0; } - static int link_status_10g_serdes(struct niu *np, int *link_up_p) { unsigned long flags; @@ -900,7 +899,6 @@ static int link_status_10g_serdes(struct niu *np, int *link_up_p) return 0; } - static int link_status_1g_rgmii(struct niu *np, int *link_up_p) { struct niu_link_config *lp = &np->link_config; @@ -957,7 +955,6 @@ out: return err; } - static int bcm8704_reset(struct niu *np) { int err, limit; @@ -1357,8 +1354,6 @@ static int mii_reset(struct niu *np) return 0; } - - static int xcvr_init_1g_rgmii(struct niu *np) { int err; @@ -1419,7 +1414,6 @@ static int xcvr_init_1g_rgmii(struct niu *np) return 0; } - static int mii_init_common(struct niu *np) { struct niu_link_config *lp = &np->link_config; @@ -7008,31 +7002,20 @@ static int __devinit niu_phy_type_prop_decode(struct niu *np, return 0; } -/* niu board models have a trailing dash version incremented - * with HW rev change. Need to ingnore the dash version while - * checking for match - * - * for example, for the 10G card the current vpd.board_model - * is 501-5283-04, of which -04 is the dash version and have - * to be ignored - */ -static int niu_board_model_match(struct niu *np, const char *model) -{ - return !strncmp(np->vpd.board_model, model, strlen(model)); -} - static int niu_pci_vpd_get_nports(struct niu *np) { int ports = 0; - if ((niu_board_model_match(np, NIU_QGC_LP_BM_STR)) || - (niu_board_model_match(np, NIU_QGC_PEM_BM_STR)) || - (niu_board_model_match(np, NIU_ALONSO_BM_STR))) { + if ((!strcmp(np->vpd.model, NIU_QGC_LP_MDL_STR)) || + (!strcmp(np->vpd.model, NIU_QGC_PEM_MDL_STR)) || + (!strcmp(np->vpd.model, NIU_MARAMBA_MDL_STR)) || + (!strcmp(np->vpd.model, NIU_KIMI_MDL_STR)) || + (!strcmp(np->vpd.model, NIU_ALONSO_MDL_STR))) { ports = 4; - } else if ((niu_board_model_match(np, NIU_2XGF_LP_BM_STR)) || - (niu_board_model_match(np, NIU_2XGF_PEM_BM_STR)) || - (niu_board_model_match(np, NIU_FOXXY_BM_STR)) || - (niu_board_model_match(np, NIU_2XGF_MRVL_BM_STR))) { + } else if ((!strcmp(np->vpd.model, NIU_2XGF_LP_MDL_STR)) || + (!strcmp(np->vpd.model, NIU_2XGF_PEM_MDL_STR)) || + (!strcmp(np->vpd.model, NIU_FOXXY_MDL_STR)) || + (!strcmp(np->vpd.model, NIU_2XGF_MRVL_MDL_STR))) { ports = 2; } @@ -7053,8 +7036,8 @@ static void __devinit niu_pci_vpd_validate(struct niu *np) return; } - if (!strcmp(np->vpd.model, "SUNW,CP3220") || - !strcmp(np->vpd.model, "SUNW,CP3260")) { + if (!strcmp(np->vpd.model, NIU_ALONSO_MDL_STR) || + !strcmp(np->vpd.model, NIU_KIMI_MDL_STR)) { np->flags |= NIU_FLAGS_10G; np->flags &= ~NIU_FLAGS_FIBER; np->flags |= NIU_FLAGS_XCVR_SERDES; @@ -7065,7 +7048,7 @@ static void __devinit niu_pci_vpd_validate(struct niu *np) } if (np->flags & NIU_FLAGS_10G) np->mac_xcvr = MAC_XCVR_XPCS; - } else if (niu_board_model_match(np, NIU_FOXXY_BM_STR)) { + } else if (!strcmp(np->vpd.model, NIU_FOXXY_MDL_STR)) { np->flags |= (NIU_FLAGS_10G | NIU_FLAGS_FIBER | NIU_FLAGS_HOTPLUG_PHY); } else if (niu_phy_type_prop_decode(np, np->vpd.phy_type)) { @@ -7264,8 +7247,11 @@ static int __devinit niu_get_and_validate_port(struct niu *np) parent->num_ports = nr64(ESPC_NUM_PORTS_MACS) & ESPC_NUM_PORTS_MACS_VAL; + /* All of the current probing methods fail on + * Maramba on-board parts. + */ if (!parent->num_ports) - return -ENODEV; + parent->num_ports = 4; } } } @@ -7538,8 +7524,8 @@ static int __devinit walk_phys(struct niu *np, struct niu_parent *parent) u32 val; int err; - if (!strcmp(np->vpd.model, "SUNW,CP3220") || - !strcmp(np->vpd.model, "SUNW,CP3260")) { + if (!strcmp(np->vpd.model, NIU_ALONSO_MDL_STR) || + !strcmp(np->vpd.model, NIU_KIMI_MDL_STR)) { num_10g = 0; num_1g = 2; parent->plat_type = PLAT_TYPE_ATCA_CP3220; @@ -7548,7 +7534,7 @@ static int __devinit walk_phys(struct niu *np, struct niu_parent *parent) phy_encode(PORT_TYPE_1G, 1) | phy_encode(PORT_TYPE_1G, 2) | phy_encode(PORT_TYPE_1G, 3)); - } else if (niu_board_model_match(np, NIU_FOXXY_BM_STR)) { + } else if (!strcmp(np->vpd.model, NIU_FOXXY_MDL_STR)) { num_10g = 2; num_1g = 0; parent->num_ports = 2; @@ -7943,6 +7929,7 @@ static int __devinit niu_get_of_props(struct niu *np) struct device_node *dp; const char *phy_type; const u8 *mac_addr; + const char *model; int prop_len; if (np->parent->plat_type == PLAT_TYPE_NIU) @@ -7997,6 +7984,11 @@ static int __devinit niu_get_of_props(struct niu *np) memcpy(dev->dev_addr, dev->perm_addr, dev->addr_len); + model = of_get_property(dp, "model", &prop_len); + + if (model) + strcpy(np->vpd.model, model); + return 0; #else return -EINVAL; diff --git a/drivers/net/niu.h b/drivers/net/niu.h index 97ffbe137bc..12fd570b942 100644 --- a/drivers/net/niu.h +++ b/drivers/net/niu.h @@ -2946,6 +2946,15 @@ struct rx_ring_info { #define NIU_ALONSO_BM_STR "373-0202" #define NIU_FOXXY_BM_STR "501-7961" #define NIU_2XGF_MRVL_BM_STR "SK-6E82" +#define NIU_QGC_LP_MDL_STR "SUNW,pcie-qgc" +#define NIU_2XGF_LP_MDL_STR "SUNW,pcie-2xgf" +#define NIU_QGC_PEM_MDL_STR "SUNW,pcie-qgc-pem" +#define NIU_2XGF_PEM_MDL_STR "SUNW,pcie-2xgf-pem" +#define NIU_ALONSO_MDL_STR "SUNW,CP3220" +#define NIU_KIMI_MDL_STR "SUNW,CP3260" +#define NIU_MARAMBA_MDL_STR "SUNW,pcie-neptune" +#define NIU_FOXXY_MDL_STR "SUNW,pcie-rfem" +#define NIU_2XGF_MRVL_MDL_STR "SysKonnect,pcie-2xgf" #define NIU_VPD_MIN_MAJOR 3 #define NIU_VPD_MIN_MINOR 4 diff --git a/drivers/net/pasemi_mac.c b/drivers/net/pasemi_mac.c index 3b2a6c59808..993d87c9296 100644 --- a/drivers/net/pasemi_mac.c +++ b/drivers/net/pasemi_mac.c @@ -277,7 +277,7 @@ static int get_skb_hdr(struct sk_buff *skb, void **iphdr, *tcph = tcp_hdr(skb); /* check if ip header and tcp header are complete */ - if (iph->tot_len < ip_len + tcp_hdrlen(skb)) + if (ntohs(iph->tot_len) < ip_len + tcp_hdrlen(skb)) return -1; *hdr_flags = LRO_IPV4 | LRO_TCP; diff --git a/drivers/net/pcmcia/axnet_cs.c b/drivers/net/pcmcia/axnet_cs.c index ce95c5d168f..70d012e90dc 100644 --- a/drivers/net/pcmcia/axnet_cs.c +++ b/drivers/net/pcmcia/axnet_cs.c @@ -525,12 +525,14 @@ static int axnet_open(struct net_device *dev) int ret; axnet_dev_t *info = PRIV(dev); struct pcmcia_device *link = info->p_dev; + unsigned int nic_base = dev->base_addr; DEBUG(2, "axnet_open('%s')\n", dev->name); if (!pcmcia_dev_present(link)) return -ENODEV; + outb_p(0xFF, nic_base + EN0_ISR); /* Clear bogus intr. */ ret = request_irq(dev->irq, ei_irq_wrapper, IRQF_SHARED, "axnet_cs", dev); if (ret) return ret; diff --git a/drivers/net/pcmcia/fmvj18x_cs.c b/drivers/net/pcmcia/fmvj18x_cs.c index 8f328a03847..a550c9bd126 100644 --- a/drivers/net/pcmcia/fmvj18x_cs.c +++ b/drivers/net/pcmcia/fmvj18x_cs.c @@ -391,7 +391,9 @@ static int fmvj18x_config(struct pcmcia_device *link) cardtype = CONTEC; break; case MANFID_FUJITSU: - if (link->card_id == PRODID_FUJITSU_MBH10302) + if (link->conf.ConfigBase == 0x0fe0) + cardtype = MBH10302; + else if (link->card_id == PRODID_FUJITSU_MBH10302) /* RATOC REX-5588/9822/4886's PRODID are 0004(=MBH10302), but these are MBH10304 based card. */ cardtype = MBH10304; diff --git a/drivers/net/pcmcia/pcnet_cs.c b/drivers/net/pcmcia/pcnet_cs.c index fd8158a86f6..2d4c4ad89b8 100644 --- a/drivers/net/pcmcia/pcnet_cs.c +++ b/drivers/net/pcmcia/pcnet_cs.c @@ -969,6 +969,7 @@ static int pcnet_open(struct net_device *dev) int ret; pcnet_dev_t *info = PRIV(dev); struct pcmcia_device *link = info->p_dev; + unsigned int nic_base = dev->base_addr; DEBUG(2, "pcnet_open('%s')\n", dev->name); @@ -976,6 +977,8 @@ static int pcnet_open(struct net_device *dev) return -ENODEV; set_misc_reg(dev); + + outb_p(0xFF, nic_base + EN0_ISR); /* Clear bogus intr. */ ret = request_irq(dev->irq, ei_irq_wrapper, IRQF_SHARED, dev_info, dev); if (ret) return ret; diff --git a/drivers/net/pcmcia/xirc2ps_cs.c b/drivers/net/pcmcia/xirc2ps_cs.c index d041f831a18..f6c4698ce73 100644 --- a/drivers/net/pcmcia/xirc2ps_cs.c +++ b/drivers/net/pcmcia/xirc2ps_cs.c @@ -1461,22 +1461,25 @@ static void set_multicast_list(struct net_device *dev) { unsigned int ioaddr = dev->base_addr; + unsigned value; SelectPage(0x42); + value = GetByte(XIRCREG42_SWC1) & 0xC0; + if (dev->flags & IFF_PROMISC) { /* snoop */ - PutByte(XIRCREG42_SWC1, 0x06); /* set MPE and PME */ + PutByte(XIRCREG42_SWC1, value | 0x06); /* set MPE and PME */ } else if (dev->mc_count > 9 || (dev->flags & IFF_ALLMULTI)) { - PutByte(XIRCREG42_SWC1, 0x02); /* set MPE */ + PutByte(XIRCREG42_SWC1, value | 0x02); /* set MPE */ } else if (dev->mc_count) { /* the chip can filter 9 addresses perfectly */ - PutByte(XIRCREG42_SWC1, 0x01); + PutByte(XIRCREG42_SWC1, value | 0x01); SelectPage(0x40); PutByte(XIRCREG40_CMD0, Offline); set_addresses(dev); SelectPage(0x40); PutByte(XIRCREG40_CMD0, EnableRecv | Online); } else { /* standard usage */ - PutByte(XIRCREG42_SWC1, 0x00); + PutByte(XIRCREG42_SWC1, value | 0x00); } SelectPage(0); } @@ -1722,6 +1725,7 @@ do_reset(struct net_device *dev, int full) /* enable receiver and put the mac online */ if (full) { + set_multicast_list(dev); SelectPage(0x40); PutByte(XIRCREG40_CMD0, EnableRecv | Online); } diff --git a/drivers/net/pcnet32.c b/drivers/net/pcnet32.c index 4eb322e5273..1c89b97f4e0 100644 --- a/drivers/net/pcnet32.c +++ b/drivers/net/pcnet32.c @@ -22,12 +22,8 @@ *************************************************************************/ #define DRV_NAME "pcnet32" -#ifdef CONFIG_PCNET32_NAPI -#define DRV_VERSION "1.34-NAPI" -#else -#define DRV_VERSION "1.34" -#endif -#define DRV_RELDATE "14.Aug.2007" +#define DRV_VERSION "1.35" +#define DRV_RELDATE "21.Apr.2008" #define PFX DRV_NAME ": " static const char *const version = @@ -329,7 +325,7 @@ static int pcnet32_get_regs_len(struct net_device *dev); static void pcnet32_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *ptr); static void pcnet32_purge_tx_ring(struct net_device *dev); -static int pcnet32_alloc_ring(struct net_device *dev, char *name); +static int pcnet32_alloc_ring(struct net_device *dev, const char *name); static void pcnet32_free_ring(struct net_device *dev); static void pcnet32_check_media(struct net_device *dev, int verbose); @@ -445,30 +441,24 @@ static struct pcnet32_access pcnet32_dwio = { static void pcnet32_netif_stop(struct net_device *dev) { -#ifdef CONFIG_PCNET32_NAPI struct pcnet32_private *lp = netdev_priv(dev); -#endif + dev->trans_start = jiffies; -#ifdef CONFIG_PCNET32_NAPI napi_disable(&lp->napi); -#endif netif_tx_disable(dev); } static void pcnet32_netif_start(struct net_device *dev) { -#ifdef CONFIG_PCNET32_NAPI struct pcnet32_private *lp = netdev_priv(dev); ulong ioaddr = dev->base_addr; u16 val; -#endif + netif_wake_queue(dev); -#ifdef CONFIG_PCNET32_NAPI val = lp->a.read_csr(ioaddr, CSR3); val &= 0x00ff; lp->a.write_csr(ioaddr, CSR3, val); napi_enable(&lp->napi); -#endif } /* @@ -911,11 +901,7 @@ static int pcnet32_loopback_test(struct net_device *dev, uint64_t * data1) rc = 1; /* default to fail */ if (netif_running(dev)) -#ifdef CONFIG_PCNET32_NAPI pcnet32_netif_stop(dev); -#else - pcnet32_close(dev); -#endif spin_lock_irqsave(&lp->lock, flags); lp->a.write_csr(ioaddr, CSR0, CSR0_STOP); /* stop the chip */ @@ -1046,7 +1032,6 @@ static int pcnet32_loopback_test(struct net_device *dev, uint64_t * data1) x = a->read_bcr(ioaddr, 32); /* reset internal loopback */ a->write_bcr(ioaddr, 32, (x & ~0x0002)); -#ifdef CONFIG_PCNET32_NAPI if (netif_running(dev)) { pcnet32_netif_start(dev); pcnet32_restart(dev, CSR0_NORMAL); @@ -1055,16 +1040,6 @@ static int pcnet32_loopback_test(struct net_device *dev, uint64_t * data1) lp->a.write_bcr(ioaddr, 20, 4); /* return to 16bit mode */ } spin_unlock_irqrestore(&lp->lock, flags); -#else - if (netif_running(dev)) { - spin_unlock_irqrestore(&lp->lock, flags); - pcnet32_open(dev); - } else { - pcnet32_purge_rx_ring(dev); - lp->a.write_bcr(ioaddr, 20, 4); /* return to 16bit mode */ - spin_unlock_irqrestore(&lp->lock, flags); - } -#endif return (rc); } /* end pcnet32_loopback_test */ @@ -1270,11 +1245,7 @@ static void pcnet32_rx_entry(struct net_device *dev, } dev->stats.rx_bytes += skb->len; skb->protocol = eth_type_trans(skb, dev); -#ifdef CONFIG_PCNET32_NAPI netif_receive_skb(skb); -#else - netif_rx(skb); -#endif dev->last_rx = jiffies; dev->stats.rx_packets++; return; @@ -1403,7 +1374,6 @@ static int pcnet32_tx(struct net_device *dev) return must_restart; } -#ifdef CONFIG_PCNET32_NAPI static int pcnet32_poll(struct napi_struct *napi, int budget) { struct pcnet32_private *lp = container_of(napi, struct pcnet32_private, napi); @@ -1442,7 +1412,6 @@ static int pcnet32_poll(struct napi_struct *napi, int budget) } return work_done; } -#endif #define PCNET32_REGS_PER_PHY 32 #define PCNET32_MAX_PHYS 32 @@ -1864,9 +1833,7 @@ pcnet32_probe1(unsigned long ioaddr, int shared, struct pci_dev *pdev) /* napi.weight is used in both the napi and non-napi cases */ lp->napi.weight = lp->rx_ring_size / 2; -#ifdef CONFIG_PCNET32_NAPI netif_napi_add(dev, &lp->napi, pcnet32_poll, lp->rx_ring_size / 2); -#endif if (fdx && !(lp->options & PCNET32_PORT_ASEL) && ((cards_found >= MAX_UNITS) || full_duplex[cards_found])) @@ -2016,7 +1983,7 @@ pcnet32_probe1(unsigned long ioaddr, int shared, struct pci_dev *pdev) } /* if any allocation fails, caller must also call pcnet32_free_ring */ -static int pcnet32_alloc_ring(struct net_device *dev, char *name) +static int pcnet32_alloc_ring(struct net_device *dev, const char *name) { struct pcnet32_private *lp = netdev_priv(dev); @@ -2297,9 +2264,7 @@ static int pcnet32_open(struct net_device *dev) goto err_free_ring; } -#ifdef CONFIG_PCNET32_NAPI napi_enable(&lp->napi); -#endif /* Re-initialize the PCNET32, and start it when done. */ lp->a.write_csr(ioaddr, 1, (lp->init_dma_addr & 0xffff)); @@ -2623,7 +2588,6 @@ pcnet32_interrupt(int irq, void *dev_id) dev->name, csr0); /* unlike for the lance, there is no restart needed */ } -#ifdef CONFIG_PCNET32_NAPI if (netif_rx_schedule_prep(dev, &lp->napi)) { u16 val; /* set interrupt masks */ @@ -2634,24 +2598,9 @@ pcnet32_interrupt(int irq, void *dev_id) __netif_rx_schedule(dev, &lp->napi); break; } -#else - pcnet32_rx(dev, lp->napi.weight); - if (pcnet32_tx(dev)) { - /* reset the chip to clear the error condition, then restart */ - lp->a.reset(ioaddr); - lp->a.write_csr(ioaddr, CSR4, 0x0915); /* auto tx pad */ - pcnet32_restart(dev, CSR0_START); - netif_wake_queue(dev); - } -#endif csr0 = lp->a.read_csr(ioaddr, CSR0); } -#ifndef CONFIG_PCNET32_NAPI - /* Set interrupt enable. */ - lp->a.write_csr(ioaddr, CSR0, CSR0_INTEN); -#endif - if (netif_msg_intr(lp)) printk(KERN_DEBUG "%s: exiting interrupt, csr0=%#4.4x.\n", dev->name, lp->a.read_csr(ioaddr, CSR0)); @@ -2670,9 +2619,7 @@ static int pcnet32_close(struct net_device *dev) del_timer_sync(&lp->watchdog_timer); netif_stop_queue(dev); -#ifdef CONFIG_PCNET32_NAPI napi_disable(&lp->napi); -#endif spin_lock_irqsave(&lp->lock, flags); diff --git a/drivers/net/phy/Kconfig b/drivers/net/phy/Kconfig index 6bf9e76b0a0..6eb2d31d1e3 100644 --- a/drivers/net/phy/Kconfig +++ b/drivers/net/phy/Kconfig @@ -5,7 +5,7 @@ menuconfig PHYLIB tristate "PHY Device support and infrastructure" depends on !S390 - depends on NET_ETHERNET && (BROKEN || !S390) + depends on NET_ETHERNET help Ethernet controllers are usually attached to PHY devices. This option provides infrastructure for diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c index 3c18bb59495..45cc2914d34 100644 --- a/drivers/net/phy/phy.c +++ b/drivers/net/phy/phy.c @@ -547,7 +547,7 @@ static void phy_force_reduction(struct phy_device *phydev) * Must not be called from interrupt context, or while the * phydev->lock is held. */ -void phy_error(struct phy_device *phydev) +static void phy_error(struct phy_device *phydev) { mutex_lock(&phydev->lock); phydev->state = PHY_HALTED; diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c index ac3c01d28fd..16a0e7de588 100644 --- a/drivers/net/phy/phy_device.c +++ b/drivers/net/phy/phy_device.c @@ -207,6 +207,7 @@ int get_phy_id(struct mii_bus *bus, int addr, u32 *phy_id) return 0; } +EXPORT_SYMBOL(get_phy_id); /** * get_phy_device - reads the specified PHY device and returns its @phy_device struct diff --git a/drivers/net/ppp_generic.c b/drivers/net/ppp_generic.c index d3207c0da89..1f4ca2b54a7 100644 --- a/drivers/net/ppp_generic.c +++ b/drivers/net/ppp_generic.c @@ -2458,6 +2458,7 @@ ppp_create_interface(int unit, int *retp) out3: atomic_dec(&ppp_unit_count); + unregister_netdev(dev); out2: mutex_unlock(&all_ppp_mutex); free_netdev(dev); diff --git a/drivers/net/pppoe.c b/drivers/net/pppoe.c index 58a26a47af2..fc6f4b8c64b 100644 --- a/drivers/net/pppoe.c +++ b/drivers/net/pppoe.c @@ -341,12 +341,6 @@ static int pppoe_rcv_core(struct sock *sk, struct sk_buff *skb) struct pppox_sock *relay_po; if (sk->sk_state & PPPOX_BOUND) { - struct pppoe_hdr *ph = pppoe_hdr(skb); - int len = ntohs(ph->length); - skb_pull_rcsum(skb, sizeof(struct pppoe_hdr)); - if (pskb_trim_rcsum(skb, len)) - goto abort_kfree; - ppp_input(&po->chan, skb); } else if (sk->sk_state & PPPOX_RELAY) { relay_po = get_item_by_addr(&po->pppoe_relay); @@ -357,7 +351,6 @@ static int pppoe_rcv_core(struct sock *sk, struct sk_buff *skb) if ((sk_pppox(relay_po)->sk_state & PPPOX_CONNECTED) == 0) goto abort_put; - skb_pull(skb, sizeof(struct pppoe_hdr)); if (!__pppoe_xmit(sk_pppox(relay_po), skb)) goto abort_put; } else { @@ -388,6 +381,7 @@ static int pppoe_rcv(struct sk_buff *skb, { struct pppoe_hdr *ph; struct pppox_sock *po; + int len; if (!(skb = skb_share_check(skb, GFP_ATOMIC))) goto out; @@ -399,10 +393,21 @@ static int pppoe_rcv(struct sk_buff *skb, goto drop; ph = pppoe_hdr(skb); + len = ntohs(ph->length); + + skb_pull_rcsum(skb, sizeof(*ph)); + if (skb->len < len) + goto drop; po = get_item(ph->sid, eth_hdr(skb)->h_source, dev->ifindex); - if (po != NULL) - return sk_receive_skb(sk_pppox(po), skb, 0); + if (!po) + goto drop; + + if (pskb_trim_rcsum(skb, len)) + goto drop; + + return sk_receive_skb(sk_pppox(po), skb, 0); + drop: kfree_skb(skb); out: @@ -427,12 +432,12 @@ static int pppoe_disc_rcv(struct sk_buff *skb, if (dev_net(dev) != &init_net) goto abort; - if (!pskb_may_pull(skb, sizeof(struct pppoe_hdr))) - goto abort; - if (!(skb = skb_share_check(skb, GFP_ATOMIC))) goto out; + if (!pskb_may_pull(skb, sizeof(struct pppoe_hdr))) + goto abort; + ph = pppoe_hdr(skb); if (ph->code != PADT_CODE) goto abort; @@ -937,12 +942,10 @@ static int pppoe_recvmsg(struct kiocb *iocb, struct socket *sock, m->msg_namelen = 0; if (skb) { - struct pppoe_hdr *ph = pppoe_hdr(skb); - const int len = ntohs(ph->length); - - error = memcpy_toiovec(m->msg_iov, (unsigned char *) &ph->tag[0], len); + total_len = min_t(size_t, total_len, skb->len); + error = skb_copy_datagram_iovec(skb, 0, m->msg_iov, total_len); if (error == 0) - error = len; + error = total_len; } kfree_skb(skb); diff --git a/drivers/net/pppol2tp.c b/drivers/net/pppol2tp.c index 244d7830c92..f9298827a76 100644 --- a/drivers/net/pppol2tp.c +++ b/drivers/net/pppol2tp.c @@ -240,12 +240,15 @@ static inline struct pppol2tp_session *pppol2tp_sock_to_session(struct sock *sk) if (sk == NULL) return NULL; + sock_hold(sk); session = (struct pppol2tp_session *)(sk->sk_user_data); - if (session == NULL) - return NULL; + if (session == NULL) { + sock_put(sk); + goto out; + } BUG_ON(session->magic != L2TP_SESSION_MAGIC); - +out: return session; } @@ -256,12 +259,15 @@ static inline struct pppol2tp_tunnel *pppol2tp_sock_to_tunnel(struct sock *sk) if (sk == NULL) return NULL; + sock_hold(sk); tunnel = (struct pppol2tp_tunnel *)(sk->sk_user_data); - if (tunnel == NULL) - return NULL; + if (tunnel == NULL) { + sock_put(sk); + goto out; + } BUG_ON(tunnel->magic != L2TP_TUNNEL_MAGIC); - +out: return tunnel; } @@ -716,12 +722,14 @@ discard: session->stats.rx_errors++; kfree_skb(skb); sock_put(session->sock); + sock_put(sock); return 0; error: /* Put UDP header back */ __skb_push(skb, sizeof(struct udphdr)); + sock_put(sock); no_tunnel: return 1; @@ -745,10 +753,13 @@ static int pppol2tp_udp_encap_recv(struct sock *sk, struct sk_buff *skb) "%s: received %d bytes\n", tunnel->name, skb->len); if (pppol2tp_recv_core(sk, skb)) - goto pass_up; + goto pass_up_put; + sock_put(sk); return 0; +pass_up_put: + sock_put(sk); pass_up: return 1; } @@ -772,14 +783,18 @@ static int pppol2tp_recvmsg(struct kiocb *iocb, struct socket *sock, err = 0; skb = skb_recv_datagram(sk, flags & ~MSG_DONTWAIT, flags & MSG_DONTWAIT, &err); - if (skb) { - err = memcpy_toiovec(msg->msg_iov, (unsigned char *) skb->data, - skb->len); - if (err < 0) - goto do_skb_free; - err = skb->len; - } -do_skb_free: + if (!skb) + goto end; + + if (len > skb->len) + len = skb->len; + else if (len < skb->len) + msg->msg_flags |= MSG_TRUNC; + + err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, len); + if (likely(err == 0)) + err = len; + kfree_skb(skb); end: return err; @@ -858,7 +873,7 @@ static int pppol2tp_sendmsg(struct kiocb *iocb, struct socket *sock, struct msgh tunnel = pppol2tp_sock_to_tunnel(session->tunnel_sock); if (tunnel == NULL) - goto error; + goto error_put_sess; /* What header length is configured for this session? */ hdr_len = pppol2tp_l2tp_header_len(session); @@ -870,7 +885,7 @@ static int pppol2tp_sendmsg(struct kiocb *iocb, struct socket *sock, struct msgh sizeof(ppph) + total_len, 0, GFP_KERNEL); if (!skb) - goto error; + goto error_put_sess_tun; /* Reserve space for headers. */ skb_reserve(skb, NET_SKB_PAD); @@ -900,7 +915,7 @@ static int pppol2tp_sendmsg(struct kiocb *iocb, struct socket *sock, struct msgh error = memcpy_fromiovec(skb->data, m->msg_iov, total_len); if (error < 0) { kfree_skb(skb); - goto error; + goto error_put_sess_tun; } skb_put(skb, total_len); @@ -947,10 +962,33 @@ static int pppol2tp_sendmsg(struct kiocb *iocb, struct socket *sock, struct msgh session->stats.tx_errors++; } + return error; + +error_put_sess_tun: + sock_put(session->tunnel_sock); +error_put_sess: + sock_put(sk); error: return error; } +/* Automatically called when the skb is freed. + */ +static void pppol2tp_sock_wfree(struct sk_buff *skb) +{ + sock_put(skb->sk); +} + +/* For data skbs that we transmit, we associate with the tunnel socket + * but don't do accounting. + */ +static inline void pppol2tp_skb_set_owner_w(struct sk_buff *skb, struct sock *sk) +{ + sock_hold(sk); + skb->sk = sk; + skb->destructor = pppol2tp_sock_wfree; +} + /* Transmit function called by generic PPP driver. Sends PPP frame * over PPPoL2TP socket. * @@ -980,6 +1018,8 @@ static int pppol2tp_xmit(struct ppp_channel *chan, struct sk_buff *skb) __wsum csum = 0; struct udphdr *uh; unsigned int len; + int old_headroom; + int new_headroom; if (sock_flag(sk, SOCK_DEAD) || !(sk->sk_state & PPPOX_CONNECTED)) goto abort; @@ -991,25 +1031,27 @@ static int pppol2tp_xmit(struct ppp_channel *chan, struct sk_buff *skb) sk_tun = session->tunnel_sock; if (sk_tun == NULL) - goto abort; + goto abort_put_sess; tunnel = pppol2tp_sock_to_tunnel(sk_tun); if (tunnel == NULL) - goto abort; + goto abort_put_sess; /* What header length is configured for this session? */ hdr_len = pppol2tp_l2tp_header_len(session); /* Check that there's enough headroom in the skb to insert IP, * UDP and L2TP and PPP headers. If not enough, expand it to - * make room. Note that a new skb (or a clone) is - * allocated. If we return an error from this point on, make - * sure we free the new skb but do not free the original skb - * since that is done by the caller for the error case. + * make room. Adjust truesize. */ headroom = NET_SKB_PAD + sizeof(struct iphdr) + sizeof(struct udphdr) + hdr_len + sizeof(ppph); + old_headroom = skb_headroom(skb); if (skb_cow_head(skb, headroom)) - goto abort; + goto abort_put_sess_tun; + + new_headroom = skb_headroom(skb); + skb_orphan(skb); + skb->truesize += new_headroom - old_headroom; /* Setup PPP header */ __skb_push(skb, sizeof(ppph)); @@ -1065,8 +1107,7 @@ static int pppol2tp_xmit(struct ppp_channel *chan, struct sk_buff *skb) /* Get routing info from the tunnel socket */ dst_release(skb->dst); skb->dst = dst_clone(__sk_dst_get(sk_tun)); - skb_orphan(skb); - skb->sk = sk_tun; + pppol2tp_skb_set_owner_w(skb, sk_tun); /* Queue the packet to IP for output */ len = skb->len; @@ -1083,8 +1124,14 @@ static int pppol2tp_xmit(struct ppp_channel *chan, struct sk_buff *skb) session->stats.tx_errors++; } + sock_put(sk_tun); + sock_put(sk); return 1; +abort_put_sess_tun: + sock_put(sk_tun); +abort_put_sess: + sock_put(sk); abort: /* Free the original skb */ kfree_skb(skb); @@ -1188,7 +1235,7 @@ static void pppol2tp_tunnel_destruct(struct sock *sk) { struct pppol2tp_tunnel *tunnel; - tunnel = pppol2tp_sock_to_tunnel(sk); + tunnel = sk->sk_user_data; if (tunnel == NULL) goto end; @@ -1227,10 +1274,12 @@ static void pppol2tp_session_destruct(struct sock *sk) if (sk->sk_user_data != NULL) { struct pppol2tp_tunnel *tunnel; - session = pppol2tp_sock_to_session(sk); + session = sk->sk_user_data; if (session == NULL) goto out; + BUG_ON(session->magic != L2TP_SESSION_MAGIC); + /* Don't use pppol2tp_sock_to_tunnel() here to * get the tunnel context because the tunnel * socket might have already been closed (its @@ -1276,6 +1325,7 @@ out: static int pppol2tp_release(struct socket *sock) { struct sock *sk = sock->sk; + struct pppol2tp_session *session; int error; if (!sk) @@ -1293,9 +1343,18 @@ static int pppol2tp_release(struct socket *sock) sock_orphan(sk); sock->sk = NULL; + session = pppol2tp_sock_to_session(sk); + /* Purge any queued data */ skb_queue_purge(&sk->sk_receive_queue); skb_queue_purge(&sk->sk_write_queue); + if (session != NULL) { + struct sk_buff *skb; + while ((skb = skb_dequeue(&session->reorder_q))) { + kfree_skb(skb); + sock_put(sk); + } + } release_sock(sk); @@ -1598,7 +1657,7 @@ static int pppol2tp_connect(struct socket *sock, struct sockaddr *uservaddr, error = ppp_register_channel(&po->chan); if (error) - goto end; + goto end_put_tun; /* This is how we get the session context from the socket. */ sk->sk_user_data = session; @@ -1618,12 +1677,21 @@ out_no_ppp: PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO, "%s: created\n", session->name); +end_put_tun: + sock_put(tunnel_sock); end: release_sock(sk); - if (error != 0) - PRINTK(session ? session->debug : -1, PPPOL2TP_MSG_CONTROL, KERN_WARNING, - "%s: connect failed: %d\n", session->name, error); + if (error != 0) { + if (session) + PRINTK(session->debug, + PPPOL2TP_MSG_CONTROL, KERN_WARNING, + "%s: connect failed: %d\n", + session->name, error); + else + PRINTK(-1, PPPOL2TP_MSG_CONTROL, KERN_WARNING, + "connect failed: %d\n", error); + } return error; } @@ -1658,6 +1726,7 @@ static int pppol2tp_getname(struct socket *sock, struct sockaddr *uaddr, *usockaddr_len = len; error = 0; + sock_put(sock->sk); end: return error; @@ -1896,14 +1965,17 @@ static int pppol2tp_ioctl(struct socket *sock, unsigned int cmd, err = -EBADF; tunnel = pppol2tp_sock_to_tunnel(session->tunnel_sock); if (tunnel == NULL) - goto end; + goto end_put_sess; err = pppol2tp_tunnel_ioctl(tunnel, cmd, arg); - goto end; + sock_put(session->tunnel_sock); + goto end_put_sess; } err = pppol2tp_session_ioctl(session, cmd, arg); +end_put_sess: + sock_put(sk); end: return err; } @@ -2049,14 +2121,17 @@ static int pppol2tp_setsockopt(struct socket *sock, int level, int optname, err = -EBADF; tunnel = pppol2tp_sock_to_tunnel(session->tunnel_sock); if (tunnel == NULL) - goto end; + goto end_put_sess; err = pppol2tp_tunnel_setsockopt(sk, tunnel, optname, val); + sock_put(session->tunnel_sock); } else err = pppol2tp_session_setsockopt(sk, session, optname, val); err = 0; +end_put_sess: + sock_put(sk); end: return err; } @@ -2171,20 +2246,24 @@ static int pppol2tp_getsockopt(struct socket *sock, int level, err = -EBADF; tunnel = pppol2tp_sock_to_tunnel(session->tunnel_sock); if (tunnel == NULL) - goto end; + goto end_put_sess; err = pppol2tp_tunnel_getsockopt(sk, tunnel, optname, &val); + sock_put(session->tunnel_sock); } else err = pppol2tp_session_getsockopt(sk, session, optname, &val); err = -EFAULT; if (put_user(len, (int __user *) optlen)) - goto end; + goto end_put_sess; if (copy_to_user((void __user *) optval, &val, len)) - goto end; + goto end_put_sess; err = 0; + +end_put_sess: + sock_put(sk); end: return err; } diff --git a/drivers/net/ps3_gelic_wireless.c b/drivers/net/ps3_gelic_wireless.c index 0d32123085e..1dae1f2ed81 100644 --- a/drivers/net/ps3_gelic_wireless.c +++ b/drivers/net/ps3_gelic_wireless.c @@ -2474,6 +2474,8 @@ static void gelic_wl_free(struct gelic_wl_info *wl) pr_debug("%s: <-\n", __func__); + free_page((unsigned long)wl->buf); + pr_debug("%s: destroy queues\n", __func__); destroy_workqueue(wl->eurus_cmd_queue); destroy_workqueue(wl->event_queue); diff --git a/drivers/net/qla3xxx.c b/drivers/net/qla3xxx.c index b7f7b2227d5..bccee68bd48 100644 --- a/drivers/net/qla3xxx.c +++ b/drivers/net/qla3xxx.c @@ -3701,7 +3701,9 @@ static int ql_cycle_adapter(struct ql3_adapter *qdev, int reset) printk(KERN_ERR PFX "%s: Driver up/down cycle failed, " "closing device\n",qdev->ndev->name); + rtnl_lock(); dev_close(qdev->ndev); + rtnl_unlock(); return -1; } return 0; diff --git a/drivers/net/r6040.c b/drivers/net/r6040.c index 169edc15492..504a48ff73c 100644 --- a/drivers/net/r6040.c +++ b/drivers/net/r6040.c @@ -273,7 +273,7 @@ static void r6040_init_ring_desc(struct r6040_descriptor *desc_ring, dma_addr_t mapping = desc_dma; while (size-- > 0) { - mapping += sizeof(sizeof(*desc)); + mapping += sizeof(*desc); desc->ndesc = cpu_to_le32(mapping); desc->vndescp = desc + 1; desc++; @@ -733,7 +733,7 @@ static void r6040_timer(unsigned long data) } /* Timer active again */ - mod_timer(&lp->timer, jiffies + round_jiffies(HZ)); + mod_timer(&lp->timer, round_jiffies(jiffies + HZ)); } /* Read/set MAC address routines */ diff --git a/drivers/net/s2io-regs.h b/drivers/net/s2io-regs.h index 2109508c047..f8274f8941e 100644 --- a/drivers/net/s2io-regs.h +++ b/drivers/net/s2io-regs.h @@ -250,7 +250,7 @@ struct XENA_dev_config { u64 tx_mat0_n[0x8]; #define TX_MAT_SET(fifo, msi) vBIT(msi, (8 * fifo), 8) - u8 unused_1[0x8]; + u64 xmsi_mask_reg; u64 stat_byte_cnt; #define STAT_BC(n) vBIT(n,4,12) diff --git a/drivers/net/s2io.c b/drivers/net/s2io.c index 523478ebfd6..ae7b697456b 100644 --- a/drivers/net/s2io.c +++ b/drivers/net/s2io.c @@ -86,7 +86,7 @@ #include "s2io.h" #include "s2io-regs.h" -#define DRV_VERSION "2.0.26.23" +#define DRV_VERSION "2.0.26.24" /* S2io Driver name & version. */ static char s2io_driver_name[] = "Neterion"; @@ -1113,9 +1113,10 @@ static int s2io_on_nec_bridge(struct pci_dev *s2io_pdev) struct pci_dev *tdev = NULL; while ((tdev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, tdev)) != NULL) { if (tdev->vendor == NEC_VENID && tdev->device == NEC_DEVID) { - if (tdev->bus == s2io_pdev->bus->parent) + if (tdev->bus == s2io_pdev->bus->parent) { pci_dev_put(tdev); return 1; + } } } return 0; @@ -1219,15 +1220,33 @@ static int init_tti(struct s2io_nic *nic, int link) TTI_DATA1_MEM_TX_URNG_B(0x10) | TTI_DATA1_MEM_TX_URNG_C(0x30) | TTI_DATA1_MEM_TX_TIMER_AC_EN; - - if (use_continuous_tx_intrs && (link == LINK_UP)) - val64 |= TTI_DATA1_MEM_TX_TIMER_CI_EN; + if (i == 0) + if (use_continuous_tx_intrs && (link == LINK_UP)) + val64 |= TTI_DATA1_MEM_TX_TIMER_CI_EN; writeq(val64, &bar0->tti_data1_mem); - val64 = TTI_DATA2_MEM_TX_UFC_A(0x10) | - TTI_DATA2_MEM_TX_UFC_B(0x20) | - TTI_DATA2_MEM_TX_UFC_C(0x40) | - TTI_DATA2_MEM_TX_UFC_D(0x80); + if (nic->config.intr_type == MSI_X) { + val64 = TTI_DATA2_MEM_TX_UFC_A(0x10) | + TTI_DATA2_MEM_TX_UFC_B(0x100) | + TTI_DATA2_MEM_TX_UFC_C(0x200) | + TTI_DATA2_MEM_TX_UFC_D(0x300); + } else { + if ((nic->config.tx_steering_type == + TX_DEFAULT_STEERING) && + (config->tx_fifo_num > 1) && + (i >= nic->udp_fifo_idx) && + (i < (nic->udp_fifo_idx + + nic->total_udp_fifos))) + val64 = TTI_DATA2_MEM_TX_UFC_A(0x50) | + TTI_DATA2_MEM_TX_UFC_B(0x80) | + TTI_DATA2_MEM_TX_UFC_C(0x100) | + TTI_DATA2_MEM_TX_UFC_D(0x120); + else + val64 = TTI_DATA2_MEM_TX_UFC_A(0x10) | + TTI_DATA2_MEM_TX_UFC_B(0x20) | + TTI_DATA2_MEM_TX_UFC_C(0x40) | + TTI_DATA2_MEM_TX_UFC_D(0x80); + } writeq(val64, &bar0->tti_data2_mem); @@ -2606,9 +2625,7 @@ static int fill_rx_buffers(struct ring_info *ring) rxdp1->Buffer0_ptr = pci_map_single (ring->pdev, skb->data, size - NET_IP_ALIGN, PCI_DMA_FROMDEVICE); - if( (rxdp1->Buffer0_ptr == 0) || - (rxdp1->Buffer0_ptr == - DMA_ERROR_CODE)) + if(pci_dma_mapping_error(rxdp1->Buffer0_ptr)) goto pci_map_failed; rxdp->Control_2 = @@ -2638,6 +2655,7 @@ static int fill_rx_buffers(struct ring_info *ring) skb->data = (void *) (unsigned long)tmp; skb_reset_tail_pointer(skb); + /* AK: check is wrong. 0 can be valid dma address */ if (!(rxdp3->Buffer0_ptr)) rxdp3->Buffer0_ptr = pci_map_single(ring->pdev, ba->ba_0, @@ -2646,8 +2664,7 @@ static int fill_rx_buffers(struct ring_info *ring) pci_dma_sync_single_for_device(ring->pdev, (dma_addr_t) rxdp3->Buffer0_ptr, BUF0_LEN, PCI_DMA_FROMDEVICE); - if( (rxdp3->Buffer0_ptr == 0) || - (rxdp3->Buffer0_ptr == DMA_ERROR_CODE)) + if (pci_dma_mapping_error(rxdp3->Buffer0_ptr)) goto pci_map_failed; rxdp->Control_2 = SET_BUFFER0_SIZE_3(BUF0_LEN); @@ -2662,18 +2679,17 @@ static int fill_rx_buffers(struct ring_info *ring) (ring->pdev, skb->data, ring->mtu + 4, PCI_DMA_FROMDEVICE); - if( (rxdp3->Buffer2_ptr == 0) || - (rxdp3->Buffer2_ptr == DMA_ERROR_CODE)) + if (pci_dma_mapping_error(rxdp3->Buffer2_ptr)) goto pci_map_failed; + /* AK: check is wrong */ if (!rxdp3->Buffer1_ptr) rxdp3->Buffer1_ptr = pci_map_single(ring->pdev, ba->ba_1, BUF1_LEN, PCI_DMA_FROMDEVICE); - if( (rxdp3->Buffer1_ptr == 0) || - (rxdp3->Buffer1_ptr == DMA_ERROR_CODE)) { + if (pci_dma_mapping_error(rxdp3->Buffer1_ptr)) { pci_unmap_single (ring->pdev, (dma_addr_t)(unsigned long) @@ -2813,6 +2829,15 @@ static void free_rx_buffers(struct s2io_nic *sp) } } +static int s2io_chk_rx_buffers(struct ring_info *ring) +{ + if (fill_rx_buffers(ring) == -ENOMEM) { + DBG_PRINT(INFO_DBG, "%s:Out of memory", ring->dev->name); + DBG_PRINT(INFO_DBG, " in Rx Intr!!\n"); + } + return 0; +} + /** * s2io_poll - Rx interrupt handler for NAPI support * @napi : pointer to the napi structure. @@ -2826,57 +2851,73 @@ static void free_rx_buffers(struct s2io_nic *sp) * 0 on success and 1 if there are No Rx packets to be processed. */ -static int s2io_poll(struct napi_struct *napi, int budget) +static int s2io_poll_msix(struct napi_struct *napi, int budget) { - struct s2io_nic *nic = container_of(napi, struct s2io_nic, napi); - struct net_device *dev = nic->dev; - int pkt_cnt = 0, org_pkts_to_process; - struct mac_info *mac_control; + struct ring_info *ring = container_of(napi, struct ring_info, napi); + struct net_device *dev = ring->dev; struct config_param *config; + struct mac_info *mac_control; + int pkts_processed = 0; + u8 __iomem *addr = NULL; + u8 val8 = 0; + struct s2io_nic *nic = dev->priv; struct XENA_dev_config __iomem *bar0 = nic->bar0; - int i; + int budget_org = budget; - mac_control = &nic->mac_control; config = &nic->config; + mac_control = &nic->mac_control; - nic->pkts_to_process = budget; - org_pkts_to_process = nic->pkts_to_process; + if (unlikely(!is_s2io_card_up(nic))) + return 0; - writeq(S2IO_MINUS_ONE, &bar0->rx_traffic_int); - readl(&bar0->rx_traffic_int); + pkts_processed = rx_intr_handler(ring, budget); + s2io_chk_rx_buffers(ring); - for (i = 0; i < config->rx_ring_num; i++) { - rx_intr_handler(&mac_control->rings[i]); - pkt_cnt = org_pkts_to_process - nic->pkts_to_process; - if (!nic->pkts_to_process) { - /* Quota for the current iteration has been met */ - goto no_rx; - } + if (pkts_processed < budget_org) { + netif_rx_complete(dev, napi); + /*Re Enable MSI-Rx Vector*/ + addr = (u8 __iomem *)&bar0->xmsi_mask_reg; + addr += 7 - ring->ring_no; + val8 = (ring->ring_no == 0) ? 0x3f : 0xbf; + writeb(val8, addr); + val8 = readb(addr); } + return pkts_processed; +} +static int s2io_poll_inta(struct napi_struct *napi, int budget) +{ + struct s2io_nic *nic = container_of(napi, struct s2io_nic, napi); + struct ring_info *ring; + struct net_device *dev = nic->dev; + struct config_param *config; + struct mac_info *mac_control; + int pkts_processed = 0; + int ring_pkts_processed, i; + struct XENA_dev_config __iomem *bar0 = nic->bar0; + int budget_org = budget; - netif_rx_complete(dev, napi); + config = &nic->config; + mac_control = &nic->mac_control; - for (i = 0; i < config->rx_ring_num; i++) { - if (fill_rx_buffers(&mac_control->rings[i]) == -ENOMEM) { - DBG_PRINT(INFO_DBG, "%s:Out of memory", dev->name); - DBG_PRINT(INFO_DBG, " in Rx Poll!!\n"); - break; - } - } - /* Re enable the Rx interrupts. */ - writeq(0x0, &bar0->rx_traffic_mask); - readl(&bar0->rx_traffic_mask); - return pkt_cnt; + if (unlikely(!is_s2io_card_up(nic))) + return 0; -no_rx: for (i = 0; i < config->rx_ring_num; i++) { - if (fill_rx_buffers(&mac_control->rings[i]) == -ENOMEM) { - DBG_PRINT(INFO_DBG, "%s:Out of memory", dev->name); - DBG_PRINT(INFO_DBG, " in Rx Poll!!\n"); + ring = &mac_control->rings[i]; + ring_pkts_processed = rx_intr_handler(ring, budget); + s2io_chk_rx_buffers(ring); + pkts_processed += ring_pkts_processed; + budget -= ring_pkts_processed; + if (budget <= 0) break; - } } - return pkt_cnt; + if (pkts_processed < budget_org) { + netif_rx_complete(dev, napi); + /* Re enable the Rx interrupts for the ring */ + writeq(0, &bar0->rx_traffic_mask); + readl(&bar0->rx_traffic_mask); + } + return pkts_processed; } #ifdef CONFIG_NET_POLL_CONTROLLER @@ -2918,7 +2959,7 @@ static void s2io_netpoll(struct net_device *dev) /* check for received packet and indicate up to network */ for (i = 0; i < config->rx_ring_num; i++) - rx_intr_handler(&mac_control->rings[i]); + rx_intr_handler(&mac_control->rings[i], 0); for (i = 0; i < config->rx_ring_num; i++) { if (fill_rx_buffers(&mac_control->rings[i]) == -ENOMEM) { @@ -2934,7 +2975,8 @@ static void s2io_netpoll(struct net_device *dev) /** * rx_intr_handler - Rx interrupt handler - * @nic: device private variable. + * @ring_info: per ring structure. + * @budget: budget for napi processing. * Description: * If the interrupt is because of a received frame or if the * receive ring contains fresh as yet un-processed frames,this function is @@ -2942,15 +2984,15 @@ static void s2io_netpoll(struct net_device *dev) * stopped and sends the skb to the OSM's Rx handler and then increments * the offset. * Return Value: - * NONE. + * No. of napi packets processed. */ -static void rx_intr_handler(struct ring_info *ring_data) +static int rx_intr_handler(struct ring_info *ring_data, int budget) { int get_block, put_block; struct rx_curr_get_info get_info, put_info; struct RxD_t *rxdp; struct sk_buff *skb; - int pkt_cnt = 0; + int pkt_cnt = 0, napi_pkts = 0; int i; struct RxD1* rxdp1; struct RxD3* rxdp3; @@ -2977,7 +3019,7 @@ static void rx_intr_handler(struct ring_info *ring_data) DBG_PRINT(ERR_DBG, "%s: The skb is ", ring_data->dev->name); DBG_PRINT(ERR_DBG, "Null in Rx Intr\n"); - return; + return 0; } if (ring_data->rxd_mode == RXD_MODE_1) { rxdp1 = (struct RxD1*)rxdp; @@ -3014,9 +3056,10 @@ static void rx_intr_handler(struct ring_info *ring_data) rxdp = ring_data->rx_blocks[get_block].block_virt_addr; } - if(ring_data->nic->config.napi){ - ring_data->nic->pkts_to_process -= 1; - if (!ring_data->nic->pkts_to_process) + if (ring_data->nic->config.napi) { + budget--; + napi_pkts++; + if (!budget) break; } pkt_cnt++; @@ -3034,6 +3077,7 @@ static void rx_intr_handler(struct ring_info *ring_data) } } } + return(napi_pkts); } /** @@ -3730,14 +3774,19 @@ static void restore_xmsi_data(struct s2io_nic *nic) { struct XENA_dev_config __iomem *bar0 = nic->bar0; u64 val64; - int i; + int i, msix_index; + + + if (nic->device_type == XFRAME_I_DEVICE) + return; for (i=0; i < MAX_REQUESTED_MSI_X; i++) { + msix_index = (i) ? ((i-1) * 8 + 1): 0; writeq(nic->msix_info[i].addr, &bar0->xmsi_address); writeq(nic->msix_info[i].data, &bar0->xmsi_data); - val64 = (s2BIT(7) | s2BIT(15) | vBIT(i, 26, 6)); + val64 = (s2BIT(7) | s2BIT(15) | vBIT(msix_index, 26, 6)); writeq(val64, &bar0->xmsi_access); - if (wait_for_msix_trans(nic, i)) { + if (wait_for_msix_trans(nic, msix_index)) { DBG_PRINT(ERR_DBG, "failed in %s\n", __FUNCTION__); continue; } @@ -3748,13 +3797,17 @@ static void store_xmsi_data(struct s2io_nic *nic) { struct XENA_dev_config __iomem *bar0 = nic->bar0; u64 val64, addr, data; - int i; + int i, msix_index; + + if (nic->device_type == XFRAME_I_DEVICE) + return; /* Store and display */ for (i=0; i < MAX_REQUESTED_MSI_X; i++) { - val64 = (s2BIT(15) | vBIT(i, 26, 6)); + msix_index = (i) ? ((i-1) * 8 + 1): 0; + val64 = (s2BIT(15) | vBIT(msix_index, 26, 6)); writeq(val64, &bar0->xmsi_access); - if (wait_for_msix_trans(nic, i)) { + if (wait_for_msix_trans(nic, msix_index)) { DBG_PRINT(ERR_DBG, "failed in %s\n", __FUNCTION__); continue; } @@ -3770,11 +3823,11 @@ static void store_xmsi_data(struct s2io_nic *nic) static int s2io_enable_msi_x(struct s2io_nic *nic) { struct XENA_dev_config __iomem *bar0 = nic->bar0; - u64 tx_mat, rx_mat; + u64 rx_mat; u16 msi_control; /* Temp variable */ int ret, i, j, msix_indx = 1; - nic->entries = kcalloc(MAX_REQUESTED_MSI_X, sizeof(struct msix_entry), + nic->entries = kmalloc(nic->num_entries * sizeof(struct msix_entry), GFP_KERNEL); if (!nic->entries) { DBG_PRINT(INFO_DBG, "%s: Memory allocation failed\n", \ @@ -3783,10 +3836,12 @@ static int s2io_enable_msi_x(struct s2io_nic *nic) return -ENOMEM; } nic->mac_control.stats_info->sw_stat.mem_allocated - += (MAX_REQUESTED_MSI_X * sizeof(struct msix_entry)); + += (nic->num_entries * sizeof(struct msix_entry)); + + memset(nic->entries, 0, nic->num_entries * sizeof(struct msix_entry)); nic->s2io_entries = - kcalloc(MAX_REQUESTED_MSI_X, sizeof(struct s2io_msix_entry), + kmalloc(nic->num_entries * sizeof(struct s2io_msix_entry), GFP_KERNEL); if (!nic->s2io_entries) { DBG_PRINT(INFO_DBG, "%s: Memory allocation failed\n", @@ -3794,60 +3849,52 @@ static int s2io_enable_msi_x(struct s2io_nic *nic) nic->mac_control.stats_info->sw_stat.mem_alloc_fail_cnt++; kfree(nic->entries); nic->mac_control.stats_info->sw_stat.mem_freed - += (MAX_REQUESTED_MSI_X * sizeof(struct msix_entry)); + += (nic->num_entries * sizeof(struct msix_entry)); return -ENOMEM; } nic->mac_control.stats_info->sw_stat.mem_allocated - += (MAX_REQUESTED_MSI_X * sizeof(struct s2io_msix_entry)); - - for (i=0; i< MAX_REQUESTED_MSI_X; i++) { - nic->entries[i].entry = i; - nic->s2io_entries[i].entry = i; + += (nic->num_entries * sizeof(struct s2io_msix_entry)); + memset(nic->s2io_entries, 0, + nic->num_entries * sizeof(struct s2io_msix_entry)); + + nic->entries[0].entry = 0; + nic->s2io_entries[0].entry = 0; + nic->s2io_entries[0].in_use = MSIX_FLG; + nic->s2io_entries[0].type = MSIX_ALARM_TYPE; + nic->s2io_entries[0].arg = &nic->mac_control.fifos; + + for (i = 1; i < nic->num_entries; i++) { + nic->entries[i].entry = ((i - 1) * 8) + 1; + nic->s2io_entries[i].entry = ((i - 1) * 8) + 1; nic->s2io_entries[i].arg = NULL; nic->s2io_entries[i].in_use = 0; } - tx_mat = readq(&bar0->tx_mat0_n[0]); - for (i=0; i<nic->config.tx_fifo_num; i++, msix_indx++) { - tx_mat |= TX_MAT_SET(i, msix_indx); - nic->s2io_entries[msix_indx].arg = &nic->mac_control.fifos[i]; - nic->s2io_entries[msix_indx].type = MSIX_FIFO_TYPE; - nic->s2io_entries[msix_indx].in_use = MSIX_FLG; - } - writeq(tx_mat, &bar0->tx_mat0_n[0]); - rx_mat = readq(&bar0->rx_mat); - for (j = 0; j < nic->config.rx_ring_num; j++, msix_indx++) { + for (j = 0; j < nic->config.rx_ring_num; j++) { rx_mat |= RX_MAT_SET(j, msix_indx); - nic->s2io_entries[msix_indx].arg - = &nic->mac_control.rings[j]; - nic->s2io_entries[msix_indx].type = MSIX_RING_TYPE; - nic->s2io_entries[msix_indx].in_use = MSIX_FLG; + nic->s2io_entries[j+1].arg = &nic->mac_control.rings[j]; + nic->s2io_entries[j+1].type = MSIX_RING_TYPE; + nic->s2io_entries[j+1].in_use = MSIX_FLG; + msix_indx += 8; } writeq(rx_mat, &bar0->rx_mat); + readq(&bar0->rx_mat); - nic->avail_msix_vectors = 0; - ret = pci_enable_msix(nic->pdev, nic->entries, MAX_REQUESTED_MSI_X); + ret = pci_enable_msix(nic->pdev, nic->entries, nic->num_entries); /* We fail init if error or we get less vectors than min required */ - if (ret >= (nic->config.tx_fifo_num + nic->config.rx_ring_num + 1)) { - nic->avail_msix_vectors = ret; - ret = pci_enable_msix(nic->pdev, nic->entries, ret); - } if (ret) { DBG_PRINT(ERR_DBG, "%s: Enabling MSIX failed\n", nic->dev->name); kfree(nic->entries); nic->mac_control.stats_info->sw_stat.mem_freed - += (MAX_REQUESTED_MSI_X * sizeof(struct msix_entry)); + += (nic->num_entries * sizeof(struct msix_entry)); kfree(nic->s2io_entries); nic->mac_control.stats_info->sw_stat.mem_freed - += (MAX_REQUESTED_MSI_X * sizeof(struct s2io_msix_entry)); + += (nic->num_entries * sizeof(struct s2io_msix_entry)); nic->entries = NULL; nic->s2io_entries = NULL; - nic->avail_msix_vectors = 0; return -ENOMEM; } - if (!nic->avail_msix_vectors) - nic->avail_msix_vectors = MAX_REQUESTED_MSI_X; /* * To enable MSI-X, MSI also needs to be enabled, due to a bug @@ -3919,7 +3966,7 @@ static void remove_msix_isr(struct s2io_nic *sp) int i; u16 msi_control; - for (i = 0; i < MAX_REQUESTED_MSI_X; i++) { + for (i = 0; i < sp->num_entries; i++) { if (sp->s2io_entries[i].in_use == MSIX_REGISTERED_SUCCESS) { int vector = sp->entries[i].vector; @@ -3975,29 +4022,6 @@ static int s2io_open(struct net_device *dev) netif_carrier_off(dev); sp->last_link_state = 0; - if (sp->config.intr_type == MSI_X) { - int ret = s2io_enable_msi_x(sp); - - if (!ret) { - ret = s2io_test_msi(sp); - /* rollback MSI-X, will re-enable during add_isr() */ - remove_msix_isr(sp); - } - if (ret) { - - DBG_PRINT(ERR_DBG, - "%s: MSI-X requested but failed to enable\n", - dev->name); - sp->config.intr_type = INTA; - } - } - - /* NAPI doesn't work well with MSI(X) */ - if (sp->config.intr_type != INTA) { - if(sp->config.napi) - sp->config.napi = 0; - } - /* Initialize H/W and enable interrupts */ err = s2io_card_up(sp); if (err) { @@ -4020,12 +4044,12 @@ hw_init_failed: if (sp->entries) { kfree(sp->entries); sp->mac_control.stats_info->sw_stat.mem_freed - += (MAX_REQUESTED_MSI_X * sizeof(struct msix_entry)); + += (sp->num_entries * sizeof(struct msix_entry)); } if (sp->s2io_entries) { kfree(sp->s2io_entries); sp->mac_control.stats_info->sw_stat.mem_freed - += (MAX_REQUESTED_MSI_X * sizeof(struct s2io_msix_entry)); + += (sp->num_entries * sizeof(struct s2io_msix_entry)); } } return err; @@ -4237,16 +4261,14 @@ static int s2io_xmit(struct sk_buff *skb, struct net_device *dev) txdp->Buffer_Pointer = pci_map_single(sp->pdev, fifo->ufo_in_band_v, sizeof(u64), PCI_DMA_TODEVICE); - if((txdp->Buffer_Pointer == 0) || - (txdp->Buffer_Pointer == DMA_ERROR_CODE)) + if (pci_dma_mapping_error(txdp->Buffer_Pointer)) goto pci_map_failed; txdp++; } txdp->Buffer_Pointer = pci_map_single (sp->pdev, skb->data, frg_len, PCI_DMA_TODEVICE); - if((txdp->Buffer_Pointer == 0) || - (txdp->Buffer_Pointer == DMA_ERROR_CODE)) + if (pci_dma_mapping_error(txdp->Buffer_Pointer)) goto pci_map_failed; txdp->Host_Control = (unsigned long) skb; @@ -4327,40 +4349,65 @@ s2io_alarm_handle(unsigned long data) mod_timer(&sp->alarm_timer, jiffies + HZ / 2); } -static int s2io_chk_rx_buffers(struct ring_info *ring) -{ - if (fill_rx_buffers(ring) == -ENOMEM) { - DBG_PRINT(INFO_DBG, "%s:Out of memory", ring->dev->name); - DBG_PRINT(INFO_DBG, " in Rx Intr!!\n"); - } - return 0; -} - static irqreturn_t s2io_msix_ring_handle(int irq, void *dev_id) { struct ring_info *ring = (struct ring_info *)dev_id; struct s2io_nic *sp = ring->nic; + struct XENA_dev_config __iomem *bar0 = sp->bar0; + struct net_device *dev = sp->dev; - if (!is_s2io_card_up(sp)) + if (unlikely(!is_s2io_card_up(sp))) return IRQ_HANDLED; - rx_intr_handler(ring); - s2io_chk_rx_buffers(ring); + if (sp->config.napi) { + u8 __iomem *addr = NULL; + u8 val8 = 0; + + addr = (u8 __iomem *)&bar0->xmsi_mask_reg; + addr += (7 - ring->ring_no); + val8 = (ring->ring_no == 0) ? 0x7f : 0xff; + writeb(val8, addr); + val8 = readb(addr); + netif_rx_schedule(dev, &ring->napi); + } else { + rx_intr_handler(ring, 0); + s2io_chk_rx_buffers(ring); + } return IRQ_HANDLED; } static irqreturn_t s2io_msix_fifo_handle(int irq, void *dev_id) { - struct fifo_info *fifo = (struct fifo_info *)dev_id; - struct s2io_nic *sp = fifo->nic; + int i; + struct fifo_info *fifos = (struct fifo_info *)dev_id; + struct s2io_nic *sp = fifos->nic; + struct XENA_dev_config __iomem *bar0 = sp->bar0; + struct config_param *config = &sp->config; + u64 reason; - if (!is_s2io_card_up(sp)) + if (unlikely(!is_s2io_card_up(sp))) + return IRQ_NONE; + + reason = readq(&bar0->general_int_status); + if (unlikely(reason == S2IO_MINUS_ONE)) + /* Nothing much can be done. Get out */ return IRQ_HANDLED; - tx_intr_handler(fifo); + writeq(S2IO_MINUS_ONE, &bar0->general_int_mask); + + if (reason & GEN_INTR_TXTRAFFIC) + writeq(S2IO_MINUS_ONE, &bar0->tx_traffic_int); + + for (i = 0; i < config->tx_fifo_num; i++) + tx_intr_handler(&fifos[i]); + + writeq(sp->general_int_mask, &bar0->general_int_mask); + readl(&bar0->general_int_status); + return IRQ_HANDLED; } + static void s2io_txpic_intr_handle(struct s2io_nic *sp) { struct XENA_dev_config __iomem *bar0 = sp->bar0; @@ -4762,14 +4809,10 @@ static irqreturn_t s2io_isr(int irq, void *dev_id) if (config->napi) { if (reason & GEN_INTR_RXTRAFFIC) { - if (likely(netif_rx_schedule_prep(dev, - &sp->napi))) { - __netif_rx_schedule(dev, &sp->napi); - writeq(S2IO_MINUS_ONE, - &bar0->rx_traffic_mask); - } else - writeq(S2IO_MINUS_ONE, - &bar0->rx_traffic_int); + netif_rx_schedule(dev, &sp->napi); + writeq(S2IO_MINUS_ONE, &bar0->rx_traffic_mask); + writeq(S2IO_MINUS_ONE, &bar0->rx_traffic_int); + readl(&bar0->rx_traffic_int); } } else { /* @@ -4781,7 +4824,7 @@ static irqreturn_t s2io_isr(int irq, void *dev_id) writeq(S2IO_MINUS_ONE, &bar0->rx_traffic_int); for (i = 0; i < config->rx_ring_num; i++) - rx_intr_handler(&mac_control->rings[i]); + rx_intr_handler(&mac_control->rings[i], 0); } /* @@ -6836,10 +6879,8 @@ static int set_rxd_buffer_pointer(struct s2io_nic *sp, struct RxD_t *rxdp, pci_map_single( sp->pdev, (*skb)->data, size - NET_IP_ALIGN, PCI_DMA_FROMDEVICE); - if( (rxdp1->Buffer0_ptr == 0) || - (rxdp1->Buffer0_ptr == DMA_ERROR_CODE)) { + if (pci_dma_mapping_error(rxdp1->Buffer0_ptr)) goto memalloc_failed; - } rxdp->Host_Control = (unsigned long) (*skb); } } else if ((sp->rxd_mode == RXD_MODE_3B) && (rxdp->Host_Control == 0)) { @@ -6865,15 +6906,12 @@ static int set_rxd_buffer_pointer(struct s2io_nic *sp, struct RxD_t *rxdp, pci_map_single(sp->pdev, (*skb)->data, dev->mtu + 4, PCI_DMA_FROMDEVICE); - if( (rxdp3->Buffer2_ptr == 0) || - (rxdp3->Buffer2_ptr == DMA_ERROR_CODE)) { + if (pci_dma_mapping_error(rxdp3->Buffer2_ptr)) goto memalloc_failed; - } rxdp3->Buffer0_ptr = *temp0 = pci_map_single( sp->pdev, ba->ba_0, BUF0_LEN, PCI_DMA_FROMDEVICE); - if( (rxdp3->Buffer0_ptr == 0) || - (rxdp3->Buffer0_ptr == DMA_ERROR_CODE)) { + if (pci_dma_mapping_error(rxdp3->Buffer0_ptr)) { pci_unmap_single (sp->pdev, (dma_addr_t)rxdp3->Buffer2_ptr, dev->mtu + 4, PCI_DMA_FROMDEVICE); @@ -6885,8 +6923,7 @@ static int set_rxd_buffer_pointer(struct s2io_nic *sp, struct RxD_t *rxdp, rxdp3->Buffer1_ptr = *temp1 = pci_map_single(sp->pdev, ba->ba_1, BUF1_LEN, PCI_DMA_FROMDEVICE); - if( (rxdp3->Buffer1_ptr == 0) || - (rxdp3->Buffer1_ptr == DMA_ERROR_CODE)) { + if (pci_dma_mapping_error(rxdp3->Buffer1_ptr)) { pci_unmap_single (sp->pdev, (dma_addr_t)rxdp3->Buffer0_ptr, BUF0_LEN, PCI_DMA_FROMDEVICE); @@ -6984,62 +7021,62 @@ static int s2io_add_isr(struct s2io_nic * sp) /* After proper initialization of H/W, register ISR */ if (sp->config.intr_type == MSI_X) { - int i, msix_tx_cnt=0,msix_rx_cnt=0; - - for (i=1; (sp->s2io_entries[i].in_use == MSIX_FLG); i++) { - if (sp->s2io_entries[i].type == MSIX_FIFO_TYPE) { - sprintf(sp->desc[i], "%s:MSI-X-%d-TX", + int i, msix_rx_cnt = 0; + + for (i = 0; i < sp->num_entries; i++) { + if (sp->s2io_entries[i].in_use == MSIX_FLG) { + if (sp->s2io_entries[i].type == + MSIX_RING_TYPE) { + sprintf(sp->desc[i], "%s:MSI-X-%d-RX", + dev->name, i); + err = request_irq(sp->entries[i].vector, + s2io_msix_ring_handle, 0, + sp->desc[i], + sp->s2io_entries[i].arg); + } else if (sp->s2io_entries[i].type == + MSIX_ALARM_TYPE) { + sprintf(sp->desc[i], "%s:MSI-X-%d-TX", dev->name, i); - err = request_irq(sp->entries[i].vector, - s2io_msix_fifo_handle, 0, sp->desc[i], - sp->s2io_entries[i].arg); - /* If either data or addr is zero print it */ - if(!(sp->msix_info[i].addr && - sp->msix_info[i].data)) { - DBG_PRINT(ERR_DBG, "%s @ Addr:0x%llx " - "Data:0x%llx\n",sp->desc[i], - (unsigned long long) - sp->msix_info[i].addr, - (unsigned long long) - sp->msix_info[i].data); - } else { - msix_tx_cnt++; + err = request_irq(sp->entries[i].vector, + s2io_msix_fifo_handle, 0, + sp->desc[i], + sp->s2io_entries[i].arg); + } - } else { - sprintf(sp->desc[i], "%s:MSI-X-%d-RX", - dev->name, i); - err = request_irq(sp->entries[i].vector, - s2io_msix_ring_handle, 0, sp->desc[i], - sp->s2io_entries[i].arg); - /* If either data or addr is zero print it */ - if(!(sp->msix_info[i].addr && + /* if either data or addr is zero print it. */ + if (!(sp->msix_info[i].addr && sp->msix_info[i].data)) { - DBG_PRINT(ERR_DBG, "%s @ Addr:0x%llx " - "Data:0x%llx\n",sp->desc[i], + DBG_PRINT(ERR_DBG, + "%s @Addr:0x%llx Data:0x%llx\n", + sp->desc[i], (unsigned long long) sp->msix_info[i].addr, (unsigned long long) - sp->msix_info[i].data); - } else { + ntohl(sp->msix_info[i].data)); + } else msix_rx_cnt++; + if (err) { + remove_msix_isr(sp); + + DBG_PRINT(ERR_DBG, + "%s:MSI-X-%d registration " + "failed\n", dev->name, i); + + DBG_PRINT(ERR_DBG, + "%s: Defaulting to INTA\n", + dev->name); + sp->config.intr_type = INTA; + break; } + sp->s2io_entries[i].in_use = + MSIX_REGISTERED_SUCCESS; } - if (err) { - remove_msix_isr(sp); - DBG_PRINT(ERR_DBG,"%s:MSI-X-%d registration " - "failed\n", dev->name, i); - DBG_PRINT(ERR_DBG, "%s: defaulting to INTA\n", - dev->name); - sp->config.intr_type = INTA; - break; - } - sp->s2io_entries[i].in_use = MSIX_REGISTERED_SUCCESS; } if (!err) { - printk(KERN_INFO "MSI-X-TX %d entries enabled\n", - msix_tx_cnt); printk(KERN_INFO "MSI-X-RX %d entries enabled\n", - msix_rx_cnt); + --msix_rx_cnt); + DBG_PRINT(INFO_DBG, "MSI-X-TX entries enabled" + " through alarm vector\n"); } } if (sp->config.intr_type == INTA) { @@ -7080,8 +7117,15 @@ static void do_s2io_card_down(struct s2io_nic * sp, int do_io) clear_bit(__S2IO_STATE_CARD_UP, &sp->state); /* Disable napi */ - if (config->napi) - napi_disable(&sp->napi); + if (sp->config.napi) { + int off = 0; + if (config->intr_type == MSI_X) { + for (; off < sp->config.rx_ring_num; off++) + napi_disable(&sp->mac_control.rings[off].napi); + } + else + napi_disable(&sp->napi); + } /* disable Tx and Rx traffic on the NIC */ if (do_io) @@ -7173,8 +7217,15 @@ static int s2io_card_up(struct s2io_nic * sp) } /* Initialise napi */ - if (config->napi) - napi_enable(&sp->napi); + if (config->napi) { + int i; + if (config->intr_type == MSI_X) { + for (i = 0; i < sp->config.rx_ring_num; i++) + napi_enable(&sp->mac_control.rings[i].napi); + } else { + napi_enable(&sp->napi); + } + } /* Maintain the state prior to the open */ if (sp->promisc_flg) @@ -7217,7 +7268,7 @@ static int s2io_card_up(struct s2io_nic * sp) /* Enable select interrupts */ en_dis_err_alarms(sp, ENA_ALL_INTRS, ENABLE_INTRS); if (sp->config.intr_type != INTA) - en_dis_able_nic_intrs(sp, ENA_ALL_INTRS, DISABLE_INTRS); + en_dis_able_nic_intrs(sp, TX_TRAFFIC_INTR, ENABLE_INTRS); else { interruptible = TX_TRAFFIC_INTR | RX_TRAFFIC_INTR; interruptible |= TX_PIC_INTR; @@ -7615,9 +7666,6 @@ static int s2io_verify_parm(struct pci_dev *pdev, u8 *dev_intr_type, rx_ring_num = MAX_RX_RINGS; } - if (*dev_intr_type != INTA) - napi = 0; - if ((*dev_intr_type != INTA) && (*dev_intr_type != MSI_X)) { DBG_PRINT(ERR_DBG, "s2io: Wrong intr_type requested. " "Defaulting to INTA\n"); @@ -7918,8 +7966,6 @@ s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre) * will use eth_mac_addr() for dev->set_mac_address * mac address will be set every time dev->open() is called */ - netif_napi_add(dev, &sp->napi, s2io_poll, 32); - #ifdef CONFIG_NET_POLL_CONTROLLER dev->poll_controller = s2io_netpoll; #endif @@ -7963,6 +8009,32 @@ s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre) } } + if (sp->config.intr_type == MSI_X) { + sp->num_entries = config->rx_ring_num + 1; + ret = s2io_enable_msi_x(sp); + + if (!ret) { + ret = s2io_test_msi(sp); + /* rollback MSI-X, will re-enable during add_isr() */ + remove_msix_isr(sp); + } + if (ret) { + + DBG_PRINT(ERR_DBG, + "%s: MSI-X requested but failed to enable\n", + dev->name); + sp->config.intr_type = INTA; + } + } + + if (config->intr_type == MSI_X) { + for (i = 0; i < config->rx_ring_num ; i++) + netif_napi_add(dev, &mac_control->rings[i].napi, + s2io_poll_msix, 64); + } else { + netif_napi_add(dev, &sp->napi, s2io_poll_inta, 64); + } + /* Not needed for Herc */ if (sp->device_type & XFRAME_I_DEVICE) { /* @@ -8013,6 +8085,11 @@ s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre) /* store mac addresses from CAM to s2io_nic structure */ do_s2io_store_unicast_mc(sp); + /* Configure MSIX vector for number of rings configured plus one */ + if ((sp->device_type == XFRAME_II_DEVICE) && + (config->intr_type == MSI_X)) + sp->num_entries = config->rx_ring_num + 1; + /* Store the values of the MSIX table in the s2io_nic structure */ store_xmsi_data(sp); /* reset Nic and bring it to known state */ @@ -8078,8 +8155,14 @@ s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre) break; } - if (napi) + switch (sp->config.napi) { + case 0: + DBG_PRINT(ERR_DBG, "%s: NAPI disabled\n", dev->name); + break; + case 1: DBG_PRINT(ERR_DBG, "%s: NAPI enabled\n", dev->name); + break; + } DBG_PRINT(ERR_DBG, "%s: Using %d Tx fifo(s)\n", dev->name, sp->config.tx_fifo_num); diff --git a/drivers/net/s2io.h b/drivers/net/s2io.h index 0709ebae913..1827b6686c9 100644 --- a/drivers/net/s2io.h +++ b/drivers/net/s2io.h @@ -75,10 +75,6 @@ static int debug_level = ERR_DBG; /* DEBUG message print. */ #define DBG_PRINT(dbg_level, args...) if(!(debug_level<dbg_level)) printk(args) -#ifndef DMA_ERROR_CODE -#define DMA_ERROR_CODE (~(dma_addr_t)0x0) -#endif - /* Protocol assist features of the NIC */ #define L3_CKSUM_OK 0xFFFF #define L4_CKSUM_OK 0xFFFF @@ -706,7 +702,7 @@ struct ring_info { /* per-ring buffer counter */ u32 rx_bufs_left; - #define MAX_LRO_SESSIONS 32 +#define MAX_LRO_SESSIONS 32 struct lro lro0_n[MAX_LRO_SESSIONS]; u8 lro; @@ -725,6 +721,11 @@ struct ring_info { /* copy of sp->pdev pointer */ struct pci_dev *pdev; + /* Per ring napi struct */ + struct napi_struct napi; + + unsigned long interrupt_count; + /* * Place holders for the virtual and physical addresses of * all the Rx Blocks @@ -841,7 +842,7 @@ struct usr_addr { * Structure to keep track of the MSI-X vectors and the corresponding * argument registered against each vector */ -#define MAX_REQUESTED_MSI_X 17 +#define MAX_REQUESTED_MSI_X 9 struct s2io_msix_entry { u16 vector; @@ -849,8 +850,8 @@ struct s2io_msix_entry void *arg; u8 type; -#define MSIX_FIFO_TYPE 1 -#define MSIX_RING_TYPE 2 +#define MSIX_ALARM_TYPE 1 +#define MSIX_RING_TYPE 2 u8 in_use; #define MSIX_REGISTERED_SUCCESS 0xAA @@ -877,7 +878,6 @@ struct s2io_nic { */ int pkts_to_process; struct net_device *dev; - struct napi_struct napi; struct mac_info mac_control; struct config_param config; struct pci_dev *pdev; @@ -948,6 +948,7 @@ struct s2io_nic { */ u8 other_fifo_idx; + struct napi_struct napi; /* after blink, the adapter must be restored with original * values. */ @@ -962,6 +963,7 @@ struct s2io_nic { unsigned long long start_time; struct vlan_group *vlgrp; #define MSIX_FLG 0xA5 + int num_entries; struct msix_entry *entries; int msi_detected; wait_queue_head_t msi_wait; @@ -982,6 +984,7 @@ struct s2io_nic { u16 lro_max_aggr_per_sess; volatile unsigned long state; u64 general_int_mask; + #define VPD_STRING_LEN 80 u8 product_name[VPD_STRING_LEN]; u8 serial_num[VPD_STRING_LEN]; @@ -1103,7 +1106,7 @@ static void __devexit s2io_rem_nic(struct pci_dev *pdev); static int init_shared_mem(struct s2io_nic *sp); static void free_shared_mem(struct s2io_nic *sp); static int init_nic(struct s2io_nic *nic); -static void rx_intr_handler(struct ring_info *ring_data); +static int rx_intr_handler(struct ring_info *ring_data, int budget); static void tx_intr_handler(struct fifo_info *fifo_data); static void s2io_handle_errors(void * dev_id); @@ -1114,7 +1117,8 @@ static void s2io_set_multicast(struct net_device *dev); static int rx_osm_handler(struct ring_info *ring_data, struct RxD_t * rxdp); static void s2io_link(struct s2io_nic * sp, int link); static void s2io_reset(struct s2io_nic * sp); -static int s2io_poll(struct napi_struct *napi, int budget); +static int s2io_poll_msix(struct napi_struct *napi, int budget); +static int s2io_poll_inta(struct napi_struct *napi, int budget); static void s2io_init_pci(struct s2io_nic * sp); static int do_s2io_prog_unicast(struct net_device *dev, u8 *addr); static void s2io_alarm_handle(unsigned long data); diff --git a/drivers/net/sb1250-mac.c b/drivers/net/sb1250-mac.c index 888b7dec986..33bb18f810f 100644 --- a/drivers/net/sb1250-mac.c +++ b/drivers/net/sb1250-mac.c @@ -179,8 +179,7 @@ enum sbmac_state { #define SBMAC_MAX_TXDESCR 256 #define SBMAC_MAX_RXDESCR 256 -#define ETHER_ALIGN 2 -#define ETHER_ADDR_LEN 6 +#define ETHER_ADDR_LEN 6 #define ENET_PACKET_SIZE 1518 /*#define ENET_PACKET_SIZE 9216 */ @@ -262,8 +261,6 @@ struct sbmac_softc { spinlock_t sbm_lock; /* spin lock */ int sbm_devflags; /* current device flags */ - int sbm_buffersize; - /* * Controller-specific things */ @@ -305,10 +302,11 @@ struct sbmac_softc { static void sbdma_initctx(struct sbmacdma *d, struct sbmac_softc *s, int chan, int txrx, int maxdescr); static void sbdma_channel_start(struct sbmacdma *d, int rxtx); -static int sbdma_add_rcvbuffer(struct sbmacdma *d, struct sk_buff *m); +static int sbdma_add_rcvbuffer(struct sbmac_softc *sc, struct sbmacdma *d, + struct sk_buff *m); static int sbdma_add_txbuffer(struct sbmacdma *d, struct sk_buff *m); static void sbdma_emptyring(struct sbmacdma *d); -static void sbdma_fillring(struct sbmacdma *d); +static void sbdma_fillring(struct sbmac_softc *sc, struct sbmacdma *d); static int sbdma_rx_process(struct sbmac_softc *sc, struct sbmacdma *d, int work_to_do, int poll); static void sbdma_tx_process(struct sbmac_softc *sc, struct sbmacdma *d, @@ -777,16 +775,13 @@ static void sbdma_channel_stop(struct sbmacdma *d) d->sbdma_remptr = NULL; } -static void sbdma_align_skb(struct sk_buff *skb,int power2,int offset) +static inline void sbdma_align_skb(struct sk_buff *skb, + unsigned int power2, unsigned int offset) { - unsigned long addr; - unsigned long newaddr; - - addr = (unsigned long) skb->data; - - newaddr = (addr + power2 - 1) & ~(power2 - 1); + unsigned char *addr = skb->data; + unsigned char *newaddr = PTR_ALIGN(addr, power2); - skb_reserve(skb,newaddr-addr+offset); + skb_reserve(skb, newaddr - addr + offset); } @@ -797,7 +792,8 @@ static void sbdma_align_skb(struct sk_buff *skb,int power2,int offset) * this queues a buffer for inbound packets. * * Input parameters: - * d - DMA channel descriptor + * sc - softc structure + * d - DMA channel descriptor * sb - sk_buff to add, or NULL if we should allocate one * * Return value: @@ -806,8 +802,10 @@ static void sbdma_align_skb(struct sk_buff *skb,int power2,int offset) ********************************************************************* */ -static int sbdma_add_rcvbuffer(struct sbmacdma *d, struct sk_buff *sb) +static int sbdma_add_rcvbuffer(struct sbmac_softc *sc, struct sbmacdma *d, + struct sk_buff *sb) { + struct net_device *dev = sc->sbm_dev; struct sbdmadscr *dsc; struct sbdmadscr *nextdsc; struct sk_buff *sb_new = NULL; @@ -848,14 +846,16 @@ static int sbdma_add_rcvbuffer(struct sbmacdma *d, struct sk_buff *sb) */ if (sb == NULL) { - sb_new = dev_alloc_skb(ENET_PACKET_SIZE + SMP_CACHE_BYTES * 2 + ETHER_ALIGN); + sb_new = netdev_alloc_skb(dev, ENET_PACKET_SIZE + + SMP_CACHE_BYTES * 2 + + NET_IP_ALIGN); if (sb_new == NULL) { pr_info("%s: sk_buff allocation failed\n", d->sbdma_eth->sbm_dev->name); return -ENOBUFS; } - sbdma_align_skb(sb_new, SMP_CACHE_BYTES, ETHER_ALIGN); + sbdma_align_skb(sb_new, SMP_CACHE_BYTES, NET_IP_ALIGN); } else { sb_new = sb; @@ -874,10 +874,10 @@ static int sbdma_add_rcvbuffer(struct sbmacdma *d, struct sk_buff *sb) * Do not interrupt per DMA transfer. */ dsc->dscr_a = virt_to_phys(sb_new->data) | - V_DMA_DSCRA_A_SIZE(NUMCACHEBLKS(pktsize+ETHER_ALIGN)) | 0; + V_DMA_DSCRA_A_SIZE(NUMCACHEBLKS(pktsize + NET_IP_ALIGN)) | 0; #else dsc->dscr_a = virt_to_phys(sb_new->data) | - V_DMA_DSCRA_A_SIZE(NUMCACHEBLKS(pktsize+ETHER_ALIGN)) | + V_DMA_DSCRA_A_SIZE(NUMCACHEBLKS(pktsize + NET_IP_ALIGN)) | M_DMA_DSCRA_INTERRUPT; #endif @@ -1032,18 +1032,19 @@ static void sbdma_emptyring(struct sbmacdma *d) * with sk_buffs * * Input parameters: - * d - DMA channel + * sc - softc structure + * d - DMA channel * * Return value: * nothing ********************************************************************* */ -static void sbdma_fillring(struct sbmacdma *d) +static void sbdma_fillring(struct sbmac_softc *sc, struct sbmacdma *d) { int idx; - for (idx = 0; idx < SBMAC_MAX_RXDESCR-1; idx++) { - if (sbdma_add_rcvbuffer(d,NULL) != 0) + for (idx = 0; idx < SBMAC_MAX_RXDESCR - 1; idx++) { + if (sbdma_add_rcvbuffer(sc, d, NULL) != 0) break; } } @@ -1159,10 +1160,11 @@ again: * packet and put it right back on the receive ring. */ - if (unlikely (sbdma_add_rcvbuffer(d,NULL) == - -ENOBUFS)) { + if (unlikely(sbdma_add_rcvbuffer(sc, d, NULL) == + -ENOBUFS)) { dev->stats.rx_dropped++; - sbdma_add_rcvbuffer(d,sb); /* re-add old buffer */ + /* Re-add old buffer */ + sbdma_add_rcvbuffer(sc, d, sb); /* No point in continuing at the moment */ printk(KERN_ERR "dropped packet (1)\n"); d->sbdma_remptr = SBDMA_NEXTBUF(d,sbdma_remptr); @@ -1212,7 +1214,7 @@ again: * put it back on the receive ring. */ dev->stats.rx_errors++; - sbdma_add_rcvbuffer(d,sb); + sbdma_add_rcvbuffer(sc, d, sb); } @@ -1570,7 +1572,7 @@ static void sbmac_channel_start(struct sbmac_softc *s) * Fill the receive ring */ - sbdma_fillring(&(s->sbm_rxdma)); + sbdma_fillring(s, &(s->sbm_rxdma)); /* * Turn on the rest of the bits in the enable register @@ -2312,13 +2314,6 @@ static int sbmac_init(struct platform_device *pldev, long long base) dev->dev_addr[i] = eaddr[i]; } - - /* - * Init packet size - */ - - sc->sbm_buffersize = ENET_PACKET_SIZE + SMP_CACHE_BYTES * 2 + ETHER_ALIGN; - /* * Initialize context (get pointers to registers and stuff), then * allocate the memory for the descriptor tables. diff --git a/drivers/net/sc92031.c b/drivers/net/sc92031.c index f64a860029b..61955f8d801 100644 --- a/drivers/net/sc92031.c +++ b/drivers/net/sc92031.c @@ -953,9 +953,6 @@ static int sc92031_start_xmit(struct sk_buff *skb, struct net_device *dev) unsigned entry; u32 tx_status; - if (skb_padto(skb, ETH_ZLEN)) - return NETDEV_TX_OK; - if (unlikely(skb->len > TX_BUF_SIZE)) { dev->stats.tx_dropped++; goto out; @@ -975,6 +972,11 @@ static int sc92031_start_xmit(struct sk_buff *skb, struct net_device *dev) skb_copy_and_csum_dev(skb, priv->tx_bufs + entry * TX_BUF_SIZE); len = skb->len; + if (len < ETH_ZLEN) { + memset(priv->tx_bufs + entry * TX_BUF_SIZE + len, + 0, ETH_ZLEN - len); + len = ETH_ZLEN; + } wmb(); diff --git a/drivers/net/sfc/Makefile b/drivers/net/sfc/Makefile index 0f023447eaf..1d2daeec7ac 100644 --- a/drivers/net/sfc/Makefile +++ b/drivers/net/sfc/Makefile @@ -1,5 +1,5 @@ sfc-y += efx.o falcon.o tx.o rx.o falcon_xmac.o \ - i2c-direct.o ethtool.o xfp_phy.o mdio_10g.o \ - tenxpress.o boards.o sfe4001.o + i2c-direct.o selftest.o ethtool.o xfp_phy.o \ + mdio_10g.o tenxpress.o boards.o sfe4001.o obj-$(CONFIG_SFC) += sfc.o diff --git a/drivers/net/sfc/bitfield.h b/drivers/net/sfc/bitfield.h index 2806201644c..2c79d27404e 100644 --- a/drivers/net/sfc/bitfield.h +++ b/drivers/net/sfc/bitfield.h @@ -483,7 +483,7 @@ typedef union efx_oword { #endif #define EFX_SET_OWORD_FIELD_VER(efx, oword, field, value) do { \ - if (FALCON_REV(efx) >= FALCON_REV_B0) { \ + if (falcon_rev(efx) >= FALCON_REV_B0) { \ EFX_SET_OWORD_FIELD((oword), field##_B0, (value)); \ } else { \ EFX_SET_OWORD_FIELD((oword), field##_A1, (value)); \ @@ -491,7 +491,7 @@ typedef union efx_oword { } while (0) #define EFX_QWORD_FIELD_VER(efx, qword, field) \ - (FALCON_REV(efx) >= FALCON_REV_B0 ? \ + (falcon_rev(efx) >= FALCON_REV_B0 ? \ EFX_QWORD_FIELD((qword), field##_B0) : \ EFX_QWORD_FIELD((qword), field##_A1)) @@ -501,8 +501,5 @@ typedef union efx_oword { #define DMA_ADDR_T_WIDTH (8 * sizeof(dma_addr_t)) #define EFX_DMA_TYPE_WIDTH(width) \ (((width) < DMA_ADDR_T_WIDTH) ? (width) : DMA_ADDR_T_WIDTH) -#define EFX_DMA_MAX_MASK ((DMA_ADDR_T_WIDTH == 64) ? \ - ~((u64) 0) : ~((u32) 0)) -#define EFX_DMA_MASK(mask) ((mask) & EFX_DMA_MAX_MASK) #endif /* EFX_BITFIELD_H */ diff --git a/drivers/net/sfc/boards.c b/drivers/net/sfc/boards.c index eecaa6d5858..7fc0328dc05 100644 --- a/drivers/net/sfc/boards.c +++ b/drivers/net/sfc/boards.c @@ -27,10 +27,8 @@ static void blink_led_timer(unsigned long context) struct efx_blinker *bl = &efx->board_info.blinker; efx->board_info.set_fault_led(efx, bl->state); bl->state = !bl->state; - if (bl->resubmit) { - bl->timer.expires = jiffies + BLINK_INTERVAL; - add_timer(&bl->timer); - } + if (bl->resubmit) + mod_timer(&bl->timer, jiffies + BLINK_INTERVAL); } static void board_blink(struct efx_nic *efx, int blink) @@ -44,8 +42,7 @@ static void board_blink(struct efx_nic *efx, int blink) blinker->state = 0; setup_timer(&blinker->timer, blink_led_timer, (unsigned long)efx); - blinker->timer.expires = jiffies + BLINK_INTERVAL; - add_timer(&blinker->timer); + mod_timer(&blinker->timer, jiffies + BLINK_INTERVAL); } else { blinker->resubmit = 0; if (blinker->timer.function) diff --git a/drivers/net/sfc/boards.h b/drivers/net/sfc/boards.h index f56341d428e..695764dc2e6 100644 --- a/drivers/net/sfc/boards.h +++ b/drivers/net/sfc/boards.h @@ -22,5 +22,7 @@ enum efx_board_type { extern int efx_set_board_info(struct efx_nic *efx, u16 revision_info); extern int sfe4001_poweron(struct efx_nic *efx); extern void sfe4001_poweroff(struct efx_nic *efx); +/* Are we putting the PHY into flash config mode */ +extern unsigned int sfe4001_phy_flash_cfg; #endif diff --git a/drivers/net/sfc/efx.c b/drivers/net/sfc/efx.c index 59edcf793c1..449760642e3 100644 --- a/drivers/net/sfc/efx.c +++ b/drivers/net/sfc/efx.c @@ -199,11 +199,12 @@ static inline int efx_process_channel(struct efx_channel *channel, int rx_quota) */ static inline void efx_channel_processed(struct efx_channel *channel) { - /* Write to EVQ_RPTR_REG. If a new event arrived in a race - * with finishing processing, a new interrupt will be raised. - */ + /* The interrupt handler for this channel may set work_pending + * as soon as we acknowledge the events we've seen. Make sure + * it's cleared before then. */ channel->work_pending = 0; - smp_wmb(); /* Ensure channel updated before any new interrupt. */ + smp_wmb(); + falcon_eventq_read_ack(channel); } @@ -265,7 +266,7 @@ void efx_process_channel_now(struct efx_channel *channel) napi_disable(&channel->napi_str); /* Poll the channel */ - (void) efx_process_channel(channel, efx->type->evq_size); + efx_process_channel(channel, efx->type->evq_size); /* Ack the eventq. This may cause an interrupt to be generated * when they are reenabled */ @@ -317,26 +318,6 @@ static void efx_remove_eventq(struct efx_channel *channel) * *************************************************************************/ -/* Setup per-NIC RX buffer parameters. - * Calculate the rx buffer allocation parameters required to support - * the current MTU, including padding for header alignment and overruns. - */ -static void efx_calc_rx_buffer_params(struct efx_nic *efx) -{ - unsigned int order, len; - - len = (max(EFX_PAGE_IP_ALIGN, NET_IP_ALIGN) + - EFX_MAX_FRAME_LEN(efx->net_dev->mtu) + - efx->type->rx_buffer_padding); - - /* Calculate page-order */ - for (order = 0; ((1u << order) * PAGE_SIZE) < len; ++order) - ; - - efx->rx_buffer_len = len; - efx->rx_buffer_order = order; -} - static int efx_probe_channel(struct efx_channel *channel) { struct efx_tx_queue *tx_queue; @@ -387,7 +368,14 @@ static int efx_init_channels(struct efx_nic *efx) struct efx_channel *channel; int rc = 0; - efx_calc_rx_buffer_params(efx); + /* Calculate the rx buffer allocation parameters required to + * support the current MTU, including padding for header + * alignment and overruns. + */ + efx->rx_buffer_len = (max(EFX_PAGE_IP_ALIGN, NET_IP_ALIGN) + + EFX_MAX_FRAME_LEN(efx->net_dev->mtu) + + efx->type->rx_buffer_padding); + efx->rx_buffer_order = get_order(efx->rx_buffer_len); /* Initialise the channels */ efx_for_each_channel(channel, efx) { @@ -440,9 +428,12 @@ static void efx_start_channel(struct efx_channel *channel) netif_napi_add(channel->napi_dev, &channel->napi_str, efx_poll, napi_weight); + /* The interrupt handler for this channel may set work_pending + * as soon as we enable it. Make sure it's cleared before + * then. Similarly, make sure it sees the enabled flag set. */ channel->work_pending = 0; channel->enabled = 1; - smp_wmb(); /* ensure channel updated before first interrupt */ + smp_wmb(); napi_enable(&channel->napi_str); @@ -704,7 +695,7 @@ static void efx_stop_port(struct efx_nic *efx) mutex_unlock(&efx->mac_lock); /* Serialise against efx_set_multicast_list() */ - if (NET_DEV_REGISTERED(efx)) { + if (efx_dev_registered(efx)) { netif_tx_lock_bh(efx->net_dev); netif_tx_unlock_bh(efx->net_dev); } @@ -791,22 +782,23 @@ static int efx_init_io(struct efx_nic *efx) efx->membase = ioremap_nocache(efx->membase_phys, efx->type->mem_map_size); if (!efx->membase) { - EFX_ERR(efx, "could not map memory BAR %d at %lx+%x\n", - efx->type->mem_bar, efx->membase_phys, + EFX_ERR(efx, "could not map memory BAR %d at %llx+%x\n", + efx->type->mem_bar, + (unsigned long long)efx->membase_phys, efx->type->mem_map_size); rc = -ENOMEM; goto fail4; } - EFX_LOG(efx, "memory BAR %u at %lx+%x (virtual %p)\n", - efx->type->mem_bar, efx->membase_phys, efx->type->mem_map_size, - efx->membase); + EFX_LOG(efx, "memory BAR %u at %llx+%x (virtual %p)\n", + efx->type->mem_bar, (unsigned long long)efx->membase_phys, + efx->type->mem_map_size, efx->membase); return 0; fail4: release_mem_region(efx->membase_phys, efx->type->mem_map_size); fail3: - efx->membase_phys = 0UL; + efx->membase_phys = 0; fail2: pci_disable_device(efx->pci_dev); fail1: @@ -824,7 +816,7 @@ static void efx_fini_io(struct efx_nic *efx) if (efx->membase_phys) { pci_release_region(efx->pci_dev, efx->type->mem_bar); - efx->membase_phys = 0UL; + efx->membase_phys = 0; } pci_disable_device(efx->pci_dev); @@ -1043,7 +1035,7 @@ static void efx_start_all(struct efx_nic *efx) return; if ((efx->state != STATE_RUNNING) && (efx->state != STATE_INIT)) return; - if (NET_DEV_REGISTERED(efx) && !netif_running(efx->net_dev)) + if (efx_dev_registered(efx) && !netif_running(efx->net_dev)) return; /* Mark the port as enabled so port reconfigurations can start, then @@ -1073,9 +1065,8 @@ static void efx_flush_all(struct efx_nic *efx) cancel_delayed_work_sync(&efx->monitor_work); /* Ensure that all RX slow refills are complete. */ - efx_for_each_rx_queue(rx_queue, efx) { + efx_for_each_rx_queue(rx_queue, efx) cancel_delayed_work_sync(&rx_queue->work); - } /* Stop scheduled port reconfigurations */ cancel_work_sync(&efx->reconfigure_work); @@ -1101,9 +1092,10 @@ static void efx_stop_all(struct efx_nic *efx) falcon_disable_interrupts(efx); if (efx->legacy_irq) synchronize_irq(efx->legacy_irq); - efx_for_each_channel_with_interrupt(channel, efx) + efx_for_each_channel_with_interrupt(channel, efx) { if (channel->irq) synchronize_irq(channel->irq); + } /* Stop all NAPI processing and synchronous rx refills */ efx_for_each_channel(channel, efx) @@ -1125,7 +1117,7 @@ static void efx_stop_all(struct efx_nic *efx) /* Stop the kernel transmit interface late, so the watchdog * timer isn't ticking over the flush */ efx_stop_queue(efx); - if (NET_DEV_REGISTERED(efx)) { + if (efx_dev_registered(efx)) { netif_tx_lock_bh(efx->net_dev); netif_tx_unlock_bh(efx->net_dev); } @@ -1344,13 +1336,17 @@ static int efx_net_stop(struct net_device *net_dev) return 0; } -/* Context: process, dev_base_lock held, non-blocking. */ +/* Context: process, dev_base_lock or RTNL held, non-blocking. */ static struct net_device_stats *efx_net_stats(struct net_device *net_dev) { struct efx_nic *efx = net_dev->priv; struct efx_mac_stats *mac_stats = &efx->mac_stats; struct net_device_stats *stats = &net_dev->stats; + /* Update stats if possible, but do not wait if another thread + * is updating them (or resetting the NIC); slightly stale + * stats are acceptable. + */ if (!spin_trylock(&efx->stats_lock)) return stats; if (efx->state == STATE_RUNNING) { @@ -1494,7 +1490,7 @@ static void efx_set_multicast_list(struct net_device *net_dev) static int efx_netdev_event(struct notifier_block *this, unsigned long event, void *ptr) { - struct net_device *net_dev = (struct net_device *)ptr; + struct net_device *net_dev = ptr; if (net_dev->open == efx_net_open && event == NETDEV_CHANGENAME) { struct efx_nic *efx = net_dev->priv; @@ -1563,7 +1559,7 @@ static void efx_unregister_netdev(struct efx_nic *efx) efx_for_each_tx_queue(tx_queue, efx) efx_release_tx_buffers(tx_queue); - if (NET_DEV_REGISTERED(efx)) { + if (efx_dev_registered(efx)) { strlcpy(efx->name, pci_name(efx->pci_dev), sizeof(efx->name)); unregister_netdev(efx->net_dev); } @@ -1688,7 +1684,7 @@ static int efx_reset(struct efx_nic *efx) if (method == RESET_TYPE_DISABLE) { /* Reinitialise the device anyway so the driver unload sequence * can talk to the external SRAM */ - (void) falcon_init_nic(efx); + falcon_init_nic(efx); rc = -EIO; goto fail4; } @@ -1873,6 +1869,7 @@ static int efx_init_struct(struct efx_nic *efx, struct efx_nic_type *type, tx_queue->queue = i; tx_queue->buffer = NULL; tx_queue->channel = &efx->channel[0]; /* for safety */ + tx_queue->tso_headers_free = NULL; } for (i = 0; i < EFX_MAX_RX_QUEUES; i++) { rx_queue = &efx->rx_queue[i]; @@ -2071,7 +2068,8 @@ static int __devinit efx_pci_probe(struct pci_dev *pci_dev, net_dev = alloc_etherdev(sizeof(*efx)); if (!net_dev) return -ENOMEM; - net_dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG | NETIF_F_HIGHDMA; + net_dev->features |= (NETIF_F_IP_CSUM | NETIF_F_SG | + NETIF_F_HIGHDMA | NETIF_F_TSO); if (lro) net_dev->features |= NETIF_F_LRO; efx = net_dev->priv; diff --git a/drivers/net/sfc/enum.h b/drivers/net/sfc/enum.h index 43663a4619d..c53290d08e2 100644 --- a/drivers/net/sfc/enum.h +++ b/drivers/net/sfc/enum.h @@ -10,6 +10,55 @@ #ifndef EFX_ENUM_H #define EFX_ENUM_H +/** + * enum efx_loopback_mode - loopback modes + * @LOOPBACK_NONE: no loopback + * @LOOPBACK_XGMII: loopback within MAC at XGMII level + * @LOOPBACK_XGXS: loopback within MAC at XGXS level + * @LOOPBACK_XAUI: loopback within MAC at XAUI level + * @LOOPBACK_PHYXS: loopback within PHY at PHYXS level + * @LOOPBACK_PCS: loopback within PHY at PCS level + * @LOOPBACK_PMAPMD: loopback within PHY at PMAPMD level + * @LOOPBACK_NETWORK: reflecting loopback (even further than furthest!) + */ +/* Please keep in order and up-to-date w.r.t the following two #defines */ +enum efx_loopback_mode { + LOOPBACK_NONE = 0, + LOOPBACK_MAC = 1, + LOOPBACK_XGMII = 2, + LOOPBACK_XGXS = 3, + LOOPBACK_XAUI = 4, + LOOPBACK_PHY = 5, + LOOPBACK_PHYXS = 6, + LOOPBACK_PCS = 7, + LOOPBACK_PMAPMD = 8, + LOOPBACK_NETWORK = 9, + LOOPBACK_MAX +}; + +#define LOOPBACK_TEST_MAX LOOPBACK_PMAPMD + +extern const char *efx_loopback_mode_names[]; +#define LOOPBACK_MODE_NAME(mode) \ + STRING_TABLE_LOOKUP(mode, efx_loopback_mode) +#define LOOPBACK_MODE(efx) \ + LOOPBACK_MODE_NAME(efx->loopback_mode) + +/* These loopbacks occur within the controller */ +#define LOOPBACKS_10G_INTERNAL ((1 << LOOPBACK_XGMII)| \ + (1 << LOOPBACK_XGXS) | \ + (1 << LOOPBACK_XAUI)) + +#define LOOPBACK_MASK(_efx) \ + (1 << (_efx)->loopback_mode) + +#define LOOPBACK_INTERNAL(_efx) \ + ((LOOPBACKS_10G_INTERNAL & LOOPBACK_MASK(_efx)) ? 1 : 0) + +#define LOOPBACK_OUT_OF(_from, _to, _mask) \ + (((LOOPBACK_MASK(_from) & (_mask)) && \ + ((LOOPBACK_MASK(_to) & (_mask)) == 0)) ? 1 : 0) + /*****************************************************************************/ /** diff --git a/drivers/net/sfc/ethtool.c b/drivers/net/sfc/ethtool.c index ad541badbd9..e2c75d10161 100644 --- a/drivers/net/sfc/ethtool.c +++ b/drivers/net/sfc/ethtool.c @@ -12,12 +12,26 @@ #include <linux/ethtool.h> #include <linux/rtnetlink.h> #include "net_driver.h" +#include "selftest.h" #include "efx.h" #include "ethtool.h" #include "falcon.h" #include "gmii.h" #include "mac.h" +const char *efx_loopback_mode_names[] = { + [LOOPBACK_NONE] = "NONE", + [LOOPBACK_MAC] = "MAC", + [LOOPBACK_XGMII] = "XGMII", + [LOOPBACK_XGXS] = "XGXS", + [LOOPBACK_XAUI] = "XAUI", + [LOOPBACK_PHY] = "PHY", + [LOOPBACK_PHYXS] = "PHY(XS)", + [LOOPBACK_PCS] = "PHY(PCS)", + [LOOPBACK_PMAPMD] = "PHY(PMAPMD)", + [LOOPBACK_NETWORK] = "NETWORK", +}; + static int efx_ethtool_set_tx_csum(struct net_device *net_dev, u32 enable); struct ethtool_string { @@ -217,23 +231,179 @@ static void efx_ethtool_get_drvinfo(struct net_device *net_dev, strlcpy(info->bus_info, pci_name(efx->pci_dev), sizeof(info->bus_info)); } +/** + * efx_fill_test - fill in an individual self-test entry + * @test_index: Index of the test + * @strings: Ethtool strings, or %NULL + * @data: Ethtool test results, or %NULL + * @test: Pointer to test result (used only if data != %NULL) + * @unit_format: Unit name format (e.g. "channel\%d") + * @unit_id: Unit id (e.g. 0 for "channel0") + * @test_format: Test name format (e.g. "loopback.\%s.tx.sent") + * @test_id: Test id (e.g. "PHY" for "loopback.PHY.tx_sent") + * + * Fill in an individual self-test entry. + */ +static void efx_fill_test(unsigned int test_index, + struct ethtool_string *strings, u64 *data, + int *test, const char *unit_format, int unit_id, + const char *test_format, const char *test_id) +{ + struct ethtool_string unit_str, test_str; + + /* Fill data value, if applicable */ + if (data) + data[test_index] = *test; + + /* Fill string, if applicable */ + if (strings) { + snprintf(unit_str.name, sizeof(unit_str.name), + unit_format, unit_id); + snprintf(test_str.name, sizeof(test_str.name), + test_format, test_id); + snprintf(strings[test_index].name, + sizeof(strings[test_index].name), + "%-9s%-17s", unit_str.name, test_str.name); + } +} + +#define EFX_PORT_NAME "port%d", 0 +#define EFX_CHANNEL_NAME(_channel) "channel%d", _channel->channel +#define EFX_TX_QUEUE_NAME(_tx_queue) "txq%d", _tx_queue->queue +#define EFX_RX_QUEUE_NAME(_rx_queue) "rxq%d", _rx_queue->queue +#define EFX_LOOPBACK_NAME(_mode, _counter) \ + "loopback.%s." _counter, LOOPBACK_MODE_NAME(mode) + +/** + * efx_fill_loopback_test - fill in a block of loopback self-test entries + * @efx: Efx NIC + * @lb_tests: Efx loopback self-test results structure + * @mode: Loopback test mode + * @test_index: Starting index of the test + * @strings: Ethtool strings, or %NULL + * @data: Ethtool test results, or %NULL + */ +static int efx_fill_loopback_test(struct efx_nic *efx, + struct efx_loopback_self_tests *lb_tests, + enum efx_loopback_mode mode, + unsigned int test_index, + struct ethtool_string *strings, u64 *data) +{ + struct efx_tx_queue *tx_queue; + + efx_for_each_tx_queue(tx_queue, efx) { + efx_fill_test(test_index++, strings, data, + &lb_tests->tx_sent[tx_queue->queue], + EFX_TX_QUEUE_NAME(tx_queue), + EFX_LOOPBACK_NAME(mode, "tx_sent")); + efx_fill_test(test_index++, strings, data, + &lb_tests->tx_done[tx_queue->queue], + EFX_TX_QUEUE_NAME(tx_queue), + EFX_LOOPBACK_NAME(mode, "tx_done")); + } + efx_fill_test(test_index++, strings, data, + &lb_tests->rx_good, + EFX_PORT_NAME, + EFX_LOOPBACK_NAME(mode, "rx_good")); + efx_fill_test(test_index++, strings, data, + &lb_tests->rx_bad, + EFX_PORT_NAME, + EFX_LOOPBACK_NAME(mode, "rx_bad")); + + return test_index; +} + +/** + * efx_ethtool_fill_self_tests - get self-test details + * @efx: Efx NIC + * @tests: Efx self-test results structure, or %NULL + * @strings: Ethtool strings, or %NULL + * @data: Ethtool test results, or %NULL + */ +static int efx_ethtool_fill_self_tests(struct efx_nic *efx, + struct efx_self_tests *tests, + struct ethtool_string *strings, + u64 *data) +{ + struct efx_channel *channel; + unsigned int n = 0; + enum efx_loopback_mode mode; + + /* Interrupt */ + efx_fill_test(n++, strings, data, &tests->interrupt, + "core", 0, "interrupt", NULL); + + /* Event queues */ + efx_for_each_channel(channel, efx) { + efx_fill_test(n++, strings, data, + &tests->eventq_dma[channel->channel], + EFX_CHANNEL_NAME(channel), + "eventq.dma", NULL); + efx_fill_test(n++, strings, data, + &tests->eventq_int[channel->channel], + EFX_CHANNEL_NAME(channel), + "eventq.int", NULL); + efx_fill_test(n++, strings, data, + &tests->eventq_poll[channel->channel], + EFX_CHANNEL_NAME(channel), + "eventq.poll", NULL); + } + + /* PHY presence */ + efx_fill_test(n++, strings, data, &tests->phy_ok, + EFX_PORT_NAME, "phy_ok", NULL); + + /* Loopback tests */ + efx_fill_test(n++, strings, data, &tests->loopback_speed, + EFX_PORT_NAME, "loopback.speed", NULL); + efx_fill_test(n++, strings, data, &tests->loopback_full_duplex, + EFX_PORT_NAME, "loopback.full_duplex", NULL); + for (mode = LOOPBACK_NONE; mode < LOOPBACK_TEST_MAX; mode++) { + if (!(efx->loopback_modes & (1 << mode))) + continue; + n = efx_fill_loopback_test(efx, + &tests->loopback[mode], mode, n, + strings, data); + } + + return n; +} + static int efx_ethtool_get_stats_count(struct net_device *net_dev) { return EFX_ETHTOOL_NUM_STATS; } +static int efx_ethtool_self_test_count(struct net_device *net_dev) +{ + struct efx_nic *efx = net_dev->priv; + + return efx_ethtool_fill_self_tests(efx, NULL, NULL, NULL); +} + static void efx_ethtool_get_strings(struct net_device *net_dev, u32 string_set, u8 *strings) { + struct efx_nic *efx = net_dev->priv; struct ethtool_string *ethtool_strings = (struct ethtool_string *)strings; int i; - if (string_set == ETH_SS_STATS) + switch (string_set) { + case ETH_SS_STATS: for (i = 0; i < EFX_ETHTOOL_NUM_STATS; i++) strncpy(ethtool_strings[i].name, efx_ethtool_stats[i].name, sizeof(ethtool_strings[i].name)); + break; + case ETH_SS_TEST: + efx_ethtool_fill_self_tests(efx, NULL, + ethtool_strings, NULL); + break; + default: + /* No other string sets */ + break; + } } static void efx_ethtool_get_stats(struct net_device *net_dev, @@ -272,6 +442,22 @@ static void efx_ethtool_get_stats(struct net_device *net_dev, } } +static int efx_ethtool_set_tso(struct net_device *net_dev, u32 enable) +{ + int rc; + + /* Our TSO requires TX checksumming, so force TX checksumming + * on when TSO is enabled. + */ + if (enable) { + rc = efx_ethtool_set_tx_csum(net_dev, 1); + if (rc) + return rc; + } + + return ethtool_op_set_tso(net_dev, enable); +} + static int efx_ethtool_set_tx_csum(struct net_device *net_dev, u32 enable) { struct efx_nic *efx = net_dev->priv; @@ -283,6 +469,15 @@ static int efx_ethtool_set_tx_csum(struct net_device *net_dev, u32 enable) efx_flush_queues(efx); + /* Our TSO requires TX checksumming, so disable TSO when + * checksumming is disabled + */ + if (!enable) { + rc = efx_ethtool_set_tso(net_dev, 0); + if (rc) + return rc; + } + return 0; } @@ -305,6 +500,64 @@ static u32 efx_ethtool_get_rx_csum(struct net_device *net_dev) return efx->rx_checksum_enabled; } +static void efx_ethtool_self_test(struct net_device *net_dev, + struct ethtool_test *test, u64 *data) +{ + struct efx_nic *efx = net_dev->priv; + struct efx_self_tests efx_tests; + int offline, already_up; + int rc; + + ASSERT_RTNL(); + if (efx->state != STATE_RUNNING) { + rc = -EIO; + goto fail1; + } + + /* We need rx buffers and interrupts. */ + already_up = (efx->net_dev->flags & IFF_UP); + if (!already_up) { + rc = dev_open(efx->net_dev); + if (rc) { + EFX_ERR(efx, "failed opening device.\n"); + goto fail2; + } + } + + memset(&efx_tests, 0, sizeof(efx_tests)); + offline = (test->flags & ETH_TEST_FL_OFFLINE); + + /* Perform online self tests first */ + rc = efx_online_test(efx, &efx_tests); + if (rc) + goto out; + + /* Perform offline tests only if online tests passed */ + if (offline) { + /* Stop the kernel from sending packets during the test. */ + efx_stop_queue(efx); + rc = efx_flush_queues(efx); + if (!rc) + rc = efx_offline_test(efx, &efx_tests, + efx->loopback_modes); + efx_wake_queue(efx); + } + + out: + if (!already_up) + dev_close(efx->net_dev); + + EFX_LOG(efx, "%s all %sline self-tests\n", + rc == 0 ? "passed" : "failed", offline ? "off" : "on"); + + fail2: + fail1: + /* Fill ethtool results structures */ + efx_ethtool_fill_self_tests(efx, &efx_tests, NULL, data); + if (rc) + test->flags |= ETH_TEST_FL_FAILED; +} + /* Restart autonegotiation */ static int efx_ethtool_nway_reset(struct net_device *net_dev) { @@ -451,8 +704,12 @@ struct ethtool_ops efx_ethtool_ops = { .set_tx_csum = efx_ethtool_set_tx_csum, .get_sg = ethtool_op_get_sg, .set_sg = ethtool_op_set_sg, + .get_tso = ethtool_op_get_tso, + .set_tso = efx_ethtool_set_tso, .get_flags = ethtool_op_get_flags, .set_flags = ethtool_op_set_flags, + .self_test_count = efx_ethtool_self_test_count, + .self_test = efx_ethtool_self_test, .get_strings = efx_ethtool_get_strings, .phys_id = efx_ethtool_phys_id, .get_stats_count = efx_ethtool_get_stats_count, diff --git a/drivers/net/sfc/falcon.c b/drivers/net/sfc/falcon.c index 46db549ce58..790db89db34 100644 --- a/drivers/net/sfc/falcon.c +++ b/drivers/net/sfc/falcon.c @@ -116,17 +116,8 @@ MODULE_PARM_DESC(rx_xon_thresh_bytes, "RX fifo XON threshold"); ************************************************************************** */ -/* DMA address mask (up to 46-bit, avoiding compiler warnings) - * - * Note that it is possible to have a platform with 64-bit longs and - * 32-bit DMA addresses, or vice versa. EFX_DMA_MASK takes care of the - * platform DMA mask. - */ -#if BITS_PER_LONG == 64 -#define FALCON_DMA_MASK EFX_DMA_MASK(0x00003fffffffffffUL) -#else -#define FALCON_DMA_MASK EFX_DMA_MASK(0x00003fffffffffffULL) -#endif +/* DMA address mask */ +#define FALCON_DMA_MASK DMA_BIT_MASK(46) /* TX DMA length mask (13-bit) */ #define FALCON_TX_DMA_MASK (4096 - 1) @@ -145,7 +136,7 @@ MODULE_PARM_DESC(rx_xon_thresh_bytes, "RX fifo XON threshold"); #define PCI_EXP_LNKSTA_LNK_WID_LBN 4 #define FALCON_IS_DUAL_FUNC(efx) \ - (FALCON_REV(efx) < FALCON_REV_B0) + (falcon_rev(efx) < FALCON_REV_B0) /************************************************************************** * @@ -465,7 +456,7 @@ int falcon_init_tx(struct efx_tx_queue *tx_queue) TX_DESCQ_TYPE, 0, TX_NON_IP_DROP_DIS_B0, 1); - if (FALCON_REV(efx) >= FALCON_REV_B0) { + if (falcon_rev(efx) >= FALCON_REV_B0) { int csum = !(efx->net_dev->features & NETIF_F_IP_CSUM); EFX_SET_OWORD_FIELD(tx_desc_ptr, TX_IP_CHKSM_DIS_B0, csum); EFX_SET_OWORD_FIELD(tx_desc_ptr, TX_TCP_CHKSM_DIS_B0, csum); @@ -474,7 +465,7 @@ int falcon_init_tx(struct efx_tx_queue *tx_queue) falcon_write_table(efx, &tx_desc_ptr, efx->type->txd_ptr_tbl_base, tx_queue->queue); - if (FALCON_REV(efx) < FALCON_REV_B0) { + if (falcon_rev(efx) < FALCON_REV_B0) { efx_oword_t reg; BUG_ON(tx_queue->queue >= 128); /* HW limit */ @@ -635,7 +626,7 @@ int falcon_init_rx(struct efx_rx_queue *rx_queue) efx_oword_t rx_desc_ptr; struct efx_nic *efx = rx_queue->efx; int rc; - int is_b0 = FALCON_REV(efx) >= FALCON_REV_B0; + int is_b0 = falcon_rev(efx) >= FALCON_REV_B0; int iscsi_digest_en = is_b0; EFX_LOG(efx, "RX queue %d ring in special buffers %d-%d\n", @@ -742,8 +733,10 @@ void falcon_fini_rx(struct efx_rx_queue *rx_queue) continue; break; } - if (rc) + if (rc) { EFX_ERR(efx, "failed to flush rx queue %d\n", rx_queue->queue); + efx_schedule_reset(efx, RESET_TYPE_INVISIBLE); + } /* Remove RX descriptor ring from card */ EFX_ZERO_OWORD(rx_desc_ptr); @@ -822,10 +815,10 @@ static inline void falcon_handle_tx_event(struct efx_channel *channel, tx_ev_q_label = EFX_QWORD_FIELD(*event, TX_EV_Q_LABEL); tx_queue = &efx->tx_queue[tx_ev_q_label]; - if (NET_DEV_REGISTERED(efx)) + if (efx_dev_registered(efx)) netif_tx_lock(efx->net_dev); falcon_notify_tx_desc(tx_queue); - if (NET_DEV_REGISTERED(efx)) + if (efx_dev_registered(efx)) netif_tx_unlock(efx->net_dev); } else if (EFX_QWORD_FIELD(*event, TX_EV_PKT_ERR) && EFX_WORKAROUND_10727(efx)) { @@ -884,7 +877,7 @@ static void falcon_handle_rx_not_ok(struct efx_rx_queue *rx_queue, RX_EV_TCP_UDP_CHKSUM_ERR); rx_ev_eth_crc_err = EFX_QWORD_FIELD(*event, RX_EV_ETH_CRC_ERR); rx_ev_frm_trunc = EFX_QWORD_FIELD(*event, RX_EV_FRM_TRUNC); - rx_ev_drib_nib = ((FALCON_REV(efx) >= FALCON_REV_B0) ? + rx_ev_drib_nib = ((falcon_rev(efx) >= FALCON_REV_B0) ? 0 : EFX_QWORD_FIELD(*event, RX_EV_DRIB_NIB)); rx_ev_pause_frm = EFX_QWORD_FIELD(*event, RX_EV_PAUSE_FRM_ERR); @@ -1065,7 +1058,7 @@ static void falcon_handle_global_event(struct efx_channel *channel, EFX_QWORD_FIELD(*event, XG_PHY_INTR)) is_phy_event = 1; - if ((FALCON_REV(efx) >= FALCON_REV_B0) && + if ((falcon_rev(efx) >= FALCON_REV_B0) && EFX_OWORD_FIELD(*event, XG_MNT_INTR_B0)) is_phy_event = 1; @@ -1129,6 +1122,7 @@ static void falcon_handle_driver_event(struct efx_channel *channel, case RX_RECOVERY_EV_DECODE: EFX_ERR(efx, "channel %d seen DRIVER RX_RESET event. " "Resetting.\n", channel->channel); + atomic_inc(&efx->rx_reset); efx_schedule_reset(efx, EFX_WORKAROUND_6555(efx) ? RESET_TYPE_RX_RECOVERY : @@ -1404,7 +1398,7 @@ static inline void falcon_irq_ack_a1(struct efx_nic *efx) static irqreturn_t falcon_fatal_interrupt(struct efx_nic *efx) { struct falcon_nic_data *nic_data = efx->nic_data; - efx_oword_t *int_ker = (efx_oword_t *) efx->irq_status.addr; + efx_oword_t *int_ker = efx->irq_status.addr; efx_oword_t fatal_intr; int error, mem_perr; static int n_int_errors; @@ -1450,8 +1444,8 @@ out: */ static irqreturn_t falcon_legacy_interrupt_b0(int irq, void *dev_id) { - struct efx_nic *efx = (struct efx_nic *)dev_id; - efx_oword_t *int_ker = (efx_oword_t *) efx->irq_status.addr; + struct efx_nic *efx = dev_id; + efx_oword_t *int_ker = efx->irq_status.addr; struct efx_channel *channel; efx_dword_t reg; u32 queues; @@ -1488,8 +1482,8 @@ static irqreturn_t falcon_legacy_interrupt_b0(int irq, void *dev_id) static irqreturn_t falcon_legacy_interrupt_a1(int irq, void *dev_id) { - struct efx_nic *efx = (struct efx_nic *)dev_id; - efx_oword_t *int_ker = (efx_oword_t *) efx->irq_status.addr; + struct efx_nic *efx = dev_id; + efx_oword_t *int_ker = efx->irq_status.addr; struct efx_channel *channel; int syserr; int queues; @@ -1541,9 +1535,9 @@ static irqreturn_t falcon_legacy_interrupt_a1(int irq, void *dev_id) */ static irqreturn_t falcon_msi_interrupt(int irq, void *dev_id) { - struct efx_channel *channel = (struct efx_channel *)dev_id; + struct efx_channel *channel = dev_id; struct efx_nic *efx = channel->efx; - efx_oword_t *int_ker = (efx_oword_t *) efx->irq_status.addr; + efx_oword_t *int_ker = efx->irq_status.addr; int syserr; efx->last_irq_cpu = raw_smp_processor_id(); @@ -1571,7 +1565,7 @@ static void falcon_setup_rss_indir_table(struct efx_nic *efx) unsigned long offset; efx_dword_t dword; - if (FALCON_REV(efx) < FALCON_REV_B0) + if (falcon_rev(efx) < FALCON_REV_B0) return; for (offset = RX_RSS_INDIR_TBL_B0; @@ -1594,7 +1588,7 @@ int falcon_init_interrupt(struct efx_nic *efx) if (!EFX_INT_MODE_USE_MSI(efx)) { irq_handler_t handler; - if (FALCON_REV(efx) >= FALCON_REV_B0) + if (falcon_rev(efx) >= FALCON_REV_B0) handler = falcon_legacy_interrupt_b0; else handler = falcon_legacy_interrupt_a1; @@ -1635,12 +1629,13 @@ void falcon_fini_interrupt(struct efx_nic *efx) efx_oword_t reg; /* Disable MSI/MSI-X interrupts */ - efx_for_each_channel_with_interrupt(channel, efx) + efx_for_each_channel_with_interrupt(channel, efx) { if (channel->irq) free_irq(channel->irq, channel); + } /* ACK legacy interrupt */ - if (FALCON_REV(efx) >= FALCON_REV_B0) + if (falcon_rev(efx) >= FALCON_REV_B0) falcon_read(efx, ®, INT_ISR0_B0); else falcon_irq_ack_a1(efx); @@ -1731,7 +1726,8 @@ void falcon_drain_tx_fifo(struct efx_nic *efx) efx_oword_t temp; int count; - if (FALCON_REV(efx) < FALCON_REV_B0) + if ((falcon_rev(efx) < FALCON_REV_B0) || + (efx->loopback_mode != LOOPBACK_NONE)) return; falcon_read(efx, &temp, MAC0_CTRL_REG_KER); @@ -1783,7 +1779,7 @@ void falcon_deconfigure_mac_wrapper(struct efx_nic *efx) { efx_oword_t temp; - if (FALCON_REV(efx) < FALCON_REV_B0) + if (falcon_rev(efx) < FALCON_REV_B0) return; /* Isolate the MAC -> RX */ @@ -1821,7 +1817,7 @@ void falcon_reconfigure_mac_wrapper(struct efx_nic *efx) MAC_SPEED, link_speed); /* On B0, MAC backpressure can be disabled and packets get * discarded. */ - if (FALCON_REV(efx) >= FALCON_REV_B0) { + if (falcon_rev(efx) >= FALCON_REV_B0) { EFX_SET_OWORD_FIELD(reg, TXFIFO_DRAIN_EN_B0, !efx->link_up); } @@ -1839,7 +1835,7 @@ void falcon_reconfigure_mac_wrapper(struct efx_nic *efx) EFX_SET_OWORD_FIELD_VER(efx, reg, RX_XOFF_MAC_EN, tx_fc); /* Unisolate the MAC -> RX */ - if (FALCON_REV(efx) >= FALCON_REV_B0) + if (falcon_rev(efx) >= FALCON_REV_B0) EFX_SET_OWORD_FIELD(reg, RX_INGR_EN_B0, 1); falcon_write(efx, ®, RX_CFG_REG_KER); } @@ -1854,7 +1850,7 @@ int falcon_dma_stats(struct efx_nic *efx, unsigned int done_offset) return 0; /* Statistics fetch will fail if the MAC is in TX drain */ - if (FALCON_REV(efx) >= FALCON_REV_B0) { + if (falcon_rev(efx) >= FALCON_REV_B0) { efx_oword_t temp; falcon_read(efx, &temp, MAC0_CTRL_REG_KER); if (EFX_OWORD_FIELD(temp, TXFIFO_DRAIN_EN_B0)) @@ -1938,7 +1934,7 @@ static int falcon_gmii_wait(struct efx_nic *efx) static void falcon_mdio_write(struct net_device *net_dev, int phy_id, int addr, int value) { - struct efx_nic *efx = (struct efx_nic *)net_dev->priv; + struct efx_nic *efx = net_dev->priv; unsigned int phy_id2 = phy_id & FALCON_PHY_ID_ID_MASK; efx_oword_t reg; @@ -2006,7 +2002,7 @@ static void falcon_mdio_write(struct net_device *net_dev, int phy_id, * could be read, -1 will be returned. */ static int falcon_mdio_read(struct net_device *net_dev, int phy_id, int addr) { - struct efx_nic *efx = (struct efx_nic *)net_dev->priv; + struct efx_nic *efx = net_dev->priv; unsigned int phy_addr = phy_id & FALCON_PHY_ID_ID_MASK; efx_oword_t reg; int value = -1; @@ -2091,6 +2087,8 @@ static int falcon_probe_phy(struct efx_nic *efx) efx->phy_type); return -1; } + + efx->loopback_modes = LOOPBACKS_10G_INTERNAL | efx->phy_op->loopbacks; return 0; } @@ -2109,7 +2107,7 @@ int falcon_probe_port(struct efx_nic *efx) falcon_init_mdio(&efx->mii); /* Hardware flow ctrl. FalconA RX FIFO too small for pause generation */ - if (FALCON_REV(efx) >= FALCON_REV_B0) + if (falcon_rev(efx) >= FALCON_REV_B0) efx->flow_control = EFX_FC_RX | EFX_FC_TX; else efx->flow_control = EFX_FC_RX; @@ -2369,7 +2367,7 @@ static int falcon_probe_nic_variant(struct efx_nic *efx) return -ENODEV; } - switch (FALCON_REV(efx)) { + switch (falcon_rev(efx)) { case FALCON_REV_A0: case 0xff: EFX_ERR(efx, "Falcon rev A0 not supported\n"); @@ -2395,7 +2393,7 @@ static int falcon_probe_nic_variant(struct efx_nic *efx) break; default: - EFX_ERR(efx, "Unknown Falcon rev %d\n", FALCON_REV(efx)); + EFX_ERR(efx, "Unknown Falcon rev %d\n", falcon_rev(efx)); return -ENODEV; } @@ -2415,7 +2413,7 @@ int falcon_probe_nic(struct efx_nic *efx) /* Allocate storage for hardware specific data */ nic_data = kzalloc(sizeof(*nic_data), GFP_KERNEL); - efx->nic_data = (void *) nic_data; + efx->nic_data = nic_data; /* Determine number of ports etc. */ rc = falcon_probe_nic_variant(efx); @@ -2468,14 +2466,12 @@ int falcon_probe_nic(struct efx_nic *efx) fail5: falcon_free_buffer(efx, &efx->irq_status); fail4: - /* fall-thru */ fail3: if (nic_data->pci_dev2) { pci_dev_put(nic_data->pci_dev2); nic_data->pci_dev2 = NULL; } fail2: - /* fall-thru */ fail1: kfree(efx->nic_data); return rc; @@ -2487,13 +2483,10 @@ int falcon_probe_nic(struct efx_nic *efx) */ int falcon_init_nic(struct efx_nic *efx) { - struct falcon_nic_data *data; efx_oword_t temp; unsigned thresh; int rc; - data = (struct falcon_nic_data *)efx->nic_data; - /* Set up the address region register. This is only needed * for the B0 FPGA, but since we are just pushing in the * reset defaults this may as well be unconditional. */ @@ -2560,7 +2553,7 @@ int falcon_init_nic(struct efx_nic *efx) /* Set number of RSS queues for receive path. */ falcon_read(efx, &temp, RX_FILTER_CTL_REG); - if (FALCON_REV(efx) >= FALCON_REV_B0) + if (falcon_rev(efx) >= FALCON_REV_B0) EFX_SET_OWORD_FIELD(temp, NUM_KER, 0); else EFX_SET_OWORD_FIELD(temp, NUM_KER, efx->rss_queues - 1); @@ -2598,7 +2591,7 @@ int falcon_init_nic(struct efx_nic *efx) /* Prefetch threshold 2 => fetch when descriptor cache half empty */ EFX_SET_OWORD_FIELD(temp, TX_PREF_THRESHOLD, 2); /* Squash TX of packets of 16 bytes or less */ - if (FALCON_REV(efx) >= FALCON_REV_B0 && EFX_WORKAROUND_9141(efx)) + if (falcon_rev(efx) >= FALCON_REV_B0 && EFX_WORKAROUND_9141(efx)) EFX_SET_OWORD_FIELD(temp, TX_FLUSH_MIN_LEN_EN_B0, 1); falcon_write(efx, &temp, TX_CFG2_REG_KER); @@ -2615,7 +2608,7 @@ int falcon_init_nic(struct efx_nic *efx) if (EFX_WORKAROUND_7575(efx)) EFX_SET_OWORD_FIELD_VER(efx, temp, RX_USR_BUF_SIZE, (3 * 4096) / 32); - if (FALCON_REV(efx) >= FALCON_REV_B0) + if (falcon_rev(efx) >= FALCON_REV_B0) EFX_SET_OWORD_FIELD(temp, RX_INGR_EN_B0, 1); /* RX FIFO flow control thresholds */ @@ -2631,7 +2624,7 @@ int falcon_init_nic(struct efx_nic *efx) falcon_write(efx, &temp, RX_CFG_REG_KER); /* Set destination of both TX and RX Flush events */ - if (FALCON_REV(efx) >= FALCON_REV_B0) { + if (falcon_rev(efx) >= FALCON_REV_B0) { EFX_POPULATE_OWORD_1(temp, FLS_EVQ_ID, 0); falcon_write(efx, &temp, DP_CTRL_REG); } @@ -2645,7 +2638,7 @@ void falcon_remove_nic(struct efx_nic *efx) falcon_free_buffer(efx, &efx->irq_status); - (void) falcon_reset_hw(efx, RESET_TYPE_ALL); + falcon_reset_hw(efx, RESET_TYPE_ALL); /* Release the second function after the reset */ if (nic_data->pci_dev2) { diff --git a/drivers/net/sfc/falcon.h b/drivers/net/sfc/falcon.h index 6117403b0c0..492f9bc2884 100644 --- a/drivers/net/sfc/falcon.h +++ b/drivers/net/sfc/falcon.h @@ -23,7 +23,10 @@ enum falcon_revision { FALCON_REV_B0 = 2, }; -#define FALCON_REV(efx) ((efx)->pci_dev->revision) +static inline int falcon_rev(struct efx_nic *efx) +{ + return efx->pci_dev->revision; +} extern struct efx_nic_type falcon_a_nic_type; extern struct efx_nic_type falcon_b_nic_type; diff --git a/drivers/net/sfc/falcon_hwdefs.h b/drivers/net/sfc/falcon_hwdefs.h index 0485a63eaff..6d003114eea 100644 --- a/drivers/net/sfc/falcon_hwdefs.h +++ b/drivers/net/sfc/falcon_hwdefs.h @@ -636,6 +636,14 @@ #define XX_HIDRVA_WIDTH 1 #define XX_LODRVA_LBN 8 #define XX_LODRVA_WIDTH 1 +#define XX_LPBKD_LBN 3 +#define XX_LPBKD_WIDTH 1 +#define XX_LPBKC_LBN 2 +#define XX_LPBKC_WIDTH 1 +#define XX_LPBKB_LBN 1 +#define XX_LPBKB_WIDTH 1 +#define XX_LPBKA_LBN 0 +#define XX_LPBKA_WIDTH 1 #define XX_TXDRV_CTL_REG_MAC 0x12 #define XX_DEQD_LBN 28 @@ -656,8 +664,14 @@ #define XX_DTXA_WIDTH 4 /* XAUI XGXS core status register */ -#define XX_FORCE_SIG_DECODE_FORCED 0xff #define XX_CORE_STAT_REG_MAC 0x16 +#define XX_FORCE_SIG_LBN 24 +#define XX_FORCE_SIG_WIDTH 8 +#define XX_FORCE_SIG_DECODE_FORCED 0xff +#define XX_XGXS_LB_EN_LBN 23 +#define XX_XGXS_LB_EN_WIDTH 1 +#define XX_XGMII_LB_EN_LBN 22 +#define XX_XGMII_LB_EN_WIDTH 1 #define XX_ALIGN_DONE_LBN 20 #define XX_ALIGN_DONE_WIDTH 1 #define XX_SYNC_STAT_LBN 16 @@ -1111,7 +1125,7 @@ struct falcon_nvconfig_board_v2 { u8 port1_phy_type; __le16 asic_sub_revision; __le16 board_revision; -} __attribute__ ((packed)); +} __packed; #define NVCONFIG_BASE 0x300 #define NVCONFIG_BOARD_MAGIC_NUM 0xFA1C @@ -1130,6 +1144,6 @@ struct falcon_nvconfig { __le16 board_struct_ver; __le16 board_checksum; struct falcon_nvconfig_board_v2 board_v2; -} __attribute__ ((packed)); +} __packed; #endif /* EFX_FALCON_HWDEFS_H */ diff --git a/drivers/net/sfc/falcon_io.h b/drivers/net/sfc/falcon_io.h index ea08184ddfa..6670cdfc41a 100644 --- a/drivers/net/sfc/falcon_io.h +++ b/drivers/net/sfc/falcon_io.h @@ -56,14 +56,27 @@ #define FALCON_USE_QWORD_IO 1 #endif -#define _falcon_writeq(efx, value, reg) \ - __raw_writeq((__force u64) (value), (efx)->membase + (reg)) -#define _falcon_writel(efx, value, reg) \ - __raw_writel((__force u32) (value), (efx)->membase + (reg)) -#define _falcon_readq(efx, reg) \ - ((__force __le64) __raw_readq((efx)->membase + (reg))) -#define _falcon_readl(efx, reg) \ - ((__force __le32) __raw_readl((efx)->membase + (reg))) +#ifdef FALCON_USE_QWORD_IO +static inline void _falcon_writeq(struct efx_nic *efx, __le64 value, + unsigned int reg) +{ + __raw_writeq((__force u64)value, efx->membase + reg); +} +static inline __le64 _falcon_readq(struct efx_nic *efx, unsigned int reg) +{ + return (__force __le64)__raw_readq(efx->membase + reg); +} +#endif + +static inline void _falcon_writel(struct efx_nic *efx, __le32 value, + unsigned int reg) +{ + __raw_writel((__force u32)value, efx->membase + reg); +} +static inline __le32 _falcon_readl(struct efx_nic *efx, unsigned int reg) +{ + return (__force __le32)__raw_readl(efx->membase + reg); +} /* Writes to a normal 16-byte Falcon register, locking as appropriate. */ static inline void falcon_write(struct efx_nic *efx, efx_oword_t *value, diff --git a/drivers/net/sfc/falcon_xmac.c b/drivers/net/sfc/falcon_xmac.c index aa7521b24a5..55c0d9760be 100644 --- a/drivers/net/sfc/falcon_xmac.c +++ b/drivers/net/sfc/falcon_xmac.c @@ -32,7 +32,7 @@ (FALCON_XMAC_REGBANK + ((mac_reg) * FALCON_XMAC_REG_SIZE)) void falcon_xmac_writel(struct efx_nic *efx, - efx_dword_t *value, unsigned int mac_reg) + efx_dword_t *value, unsigned int mac_reg) { efx_oword_t temp; @@ -69,6 +69,10 @@ static int falcon_reset_xmac(struct efx_nic *efx) udelay(10); } + /* This often fails when DSP is disabled, ignore it */ + if (sfe4001_phy_flash_cfg != 0) + return 0; + EFX_ERR(efx, "timed out waiting for XMAC core reset\n"); return -ETIMEDOUT; } @@ -217,13 +221,13 @@ static int falcon_xgmii_status(struct efx_nic *efx) { efx_dword_t reg; - if (FALCON_REV(efx) < FALCON_REV_B0) + if (falcon_rev(efx) < FALCON_REV_B0) return 1; /* The ISR latches, so clear it and re-read */ falcon_xmac_readl(efx, ®, XM_MGT_INT_REG_MAC_B0); falcon_xmac_readl(efx, ®, XM_MGT_INT_REG_MAC_B0); - + if (EFX_DWORD_FIELD(reg, XM_LCLFLT) || EFX_DWORD_FIELD(reg, XM_RMTFLT)) { EFX_INFO(efx, "MGT_INT: "EFX_DWORD_FMT"\n", EFX_DWORD_VAL(reg)); @@ -237,7 +241,7 @@ static void falcon_mask_status_intr(struct efx_nic *efx, int enable) { efx_dword_t reg; - if (FALCON_REV(efx) < FALCON_REV_B0) + if ((falcon_rev(efx) < FALCON_REV_B0) || LOOPBACK_INTERNAL(efx)) return; /* Flush the ISR */ @@ -284,6 +288,9 @@ int falcon_xaui_link_ok(struct efx_nic *efx) efx_dword_t reg; int align_done, sync_status, link_ok = 0; + if (LOOPBACK_INTERNAL(efx)) + return 1; + /* Read link status */ falcon_xmac_readl(efx, ®, XX_CORE_STAT_REG_MAC); @@ -374,6 +381,61 @@ static void falcon_reconfigure_xmac_core(struct efx_nic *efx) falcon_xmac_writel(efx, ®, XM_ADR_HI_REG_MAC); } +static void falcon_reconfigure_xgxs_core(struct efx_nic *efx) +{ + efx_dword_t reg; + int xgxs_loopback = (efx->loopback_mode == LOOPBACK_XGXS) ? 1 : 0; + int xaui_loopback = (efx->loopback_mode == LOOPBACK_XAUI) ? 1 : 0; + int xgmii_loopback = + (efx->loopback_mode == LOOPBACK_XGMII) ? 1 : 0; + + /* XGXS block is flaky and will need to be reset if moving + * into our out of XGMII, XGXS or XAUI loopbacks. */ + if (EFX_WORKAROUND_5147(efx)) { + int old_xgmii_loopback, old_xgxs_loopback, old_xaui_loopback; + int reset_xgxs; + + falcon_xmac_readl(efx, ®, XX_CORE_STAT_REG_MAC); + old_xgxs_loopback = EFX_DWORD_FIELD(reg, XX_XGXS_LB_EN); + old_xgmii_loopback = EFX_DWORD_FIELD(reg, XX_XGMII_LB_EN); + + falcon_xmac_readl(efx, ®, XX_SD_CTL_REG_MAC); + old_xaui_loopback = EFX_DWORD_FIELD(reg, XX_LPBKA); + + /* The PHY driver may have turned XAUI off */ + reset_xgxs = ((xgxs_loopback != old_xgxs_loopback) || + (xaui_loopback != old_xaui_loopback) || + (xgmii_loopback != old_xgmii_loopback)); + if (reset_xgxs) { + falcon_xmac_readl(efx, ®, XX_PWR_RST_REG_MAC); + EFX_SET_DWORD_FIELD(reg, XX_RSTXGXSTX_EN, 1); + EFX_SET_DWORD_FIELD(reg, XX_RSTXGXSRX_EN, 1); + falcon_xmac_writel(efx, ®, XX_PWR_RST_REG_MAC); + udelay(1); + EFX_SET_DWORD_FIELD(reg, XX_RSTXGXSTX_EN, 0); + EFX_SET_DWORD_FIELD(reg, XX_RSTXGXSRX_EN, 0); + falcon_xmac_writel(efx, ®, XX_PWR_RST_REG_MAC); + udelay(1); + } + } + + falcon_xmac_readl(efx, ®, XX_CORE_STAT_REG_MAC); + EFX_SET_DWORD_FIELD(reg, XX_FORCE_SIG, + (xgxs_loopback || xaui_loopback) ? + XX_FORCE_SIG_DECODE_FORCED : 0); + EFX_SET_DWORD_FIELD(reg, XX_XGXS_LB_EN, xgxs_loopback); + EFX_SET_DWORD_FIELD(reg, XX_XGMII_LB_EN, xgmii_loopback); + falcon_xmac_writel(efx, ®, XX_CORE_STAT_REG_MAC); + + falcon_xmac_readl(efx, ®, XX_SD_CTL_REG_MAC); + EFX_SET_DWORD_FIELD(reg, XX_LPBKD, xaui_loopback); + EFX_SET_DWORD_FIELD(reg, XX_LPBKC, xaui_loopback); + EFX_SET_DWORD_FIELD(reg, XX_LPBKB, xaui_loopback); + EFX_SET_DWORD_FIELD(reg, XX_LPBKA, xaui_loopback); + falcon_xmac_writel(efx, ®, XX_SD_CTL_REG_MAC); +} + + /* Try and bring the Falcon side of the Falcon-Phy XAUI link fails * to come back up. Bash it until it comes back up */ static int falcon_check_xaui_link_up(struct efx_nic *efx) @@ -382,7 +444,8 @@ static int falcon_check_xaui_link_up(struct efx_nic *efx) tries = EFX_WORKAROUND_5147(efx) ? 5 : 1; max_tries = tries; - if (efx->phy_type == PHY_TYPE_NONE) + if ((efx->loopback_mode == LOOPBACK_NETWORK) || + (efx->phy_type == PHY_TYPE_NONE)) return 0; while (tries) { @@ -391,12 +454,12 @@ static int falcon_check_xaui_link_up(struct efx_nic *efx) EFX_LOG(efx, "%s Clobbering XAUI (%d tries left).\n", __func__, tries); - (void) falcon_reset_xaui(efx); + falcon_reset_xaui(efx); udelay(200); tries--; } - EFX_ERR(efx, "Failed to bring XAUI link back up in %d tries!\n", + EFX_LOG(efx, "Failed to bring XAUI link back up in %d tries!\n", max_tries); return 0; } @@ -408,8 +471,13 @@ void falcon_reconfigure_xmac(struct efx_nic *efx) falcon_mask_status_intr(efx, 0); falcon_deconfigure_mac_wrapper(efx); + + efx->tx_disabled = LOOPBACK_INTERNAL(efx); efx->phy_op->reconfigure(efx); + + falcon_reconfigure_xgxs_core(efx); falcon_reconfigure_xmac_core(efx); + falcon_reconfigure_mac_wrapper(efx); /* Ensure XAUI link is up */ @@ -491,18 +559,20 @@ void falcon_update_stats_xmac(struct efx_nic *efx) (mac_stats->rx_bytes - mac_stats->rx_good_bytes); } -#define EFX_XAUI_RETRAIN_MAX 8 - int falcon_check_xmac(struct efx_nic *efx) { unsigned xaui_link_ok; int rc; + if ((efx->loopback_mode == LOOPBACK_NETWORK) || + (efx->phy_type == PHY_TYPE_NONE)) + return 0; + falcon_mask_status_intr(efx, 0); xaui_link_ok = falcon_xaui_link_ok(efx); if (EFX_WORKAROUND_5147(efx) && !xaui_link_ok) - (void) falcon_reset_xaui(efx); + falcon_reset_xaui(efx); /* Call the PHY check_hw routine */ rc = efx->phy_op->check_hw(efx); @@ -569,7 +639,7 @@ int falcon_xmac_set_pause(struct efx_nic *efx, enum efx_fc_type flow_control) reset = ((flow_control & EFX_FC_TX) && !(efx->flow_control & EFX_FC_TX)); if (EFX_WORKAROUND_11482(efx) && reset) { - if (FALCON_REV(efx) >= FALCON_REV_B0) { + if (falcon_rev(efx) >= FALCON_REV_B0) { /* Recover by resetting the EM block */ if (efx->link_up) falcon_drain_tx_fifo(efx); diff --git a/drivers/net/sfc/mdio_10g.c b/drivers/net/sfc/mdio_10g.c index dc06bb0aa57..c4f540e93b7 100644 --- a/drivers/net/sfc/mdio_10g.c +++ b/drivers/net/sfc/mdio_10g.c @@ -44,6 +44,9 @@ static int mdio_clause45_check_mmd(struct efx_nic *efx, int mmd, int status; int phy_id = efx->mii.phy_id; + if (LOOPBACK_INTERNAL(efx)) + return 0; + /* Read MMD STATUS2 to check it is responding. */ status = mdio_clause45_read(efx, phy_id, mmd, MDIO_MMDREG_STAT2); if (((status >> MDIO_MMDREG_STAT2_PRESENT_LBN) & @@ -164,6 +167,22 @@ int mdio_clause45_links_ok(struct efx_nic *efx, unsigned int mmd_mask) int mmd = 0; int good; + /* If the port is in loopback, then we should only consider a subset + * of mmd's */ + if (LOOPBACK_INTERNAL(efx)) + return 1; + else if (efx->loopback_mode == LOOPBACK_NETWORK) + return 0; + else if (efx->loopback_mode == LOOPBACK_PHYXS) + mmd_mask &= ~(MDIO_MMDREG_DEVS0_PHYXS | + MDIO_MMDREG_DEVS0_PCS | + MDIO_MMDREG_DEVS0_PMAPMD); + else if (efx->loopback_mode == LOOPBACK_PCS) + mmd_mask &= ~(MDIO_MMDREG_DEVS0_PCS | + MDIO_MMDREG_DEVS0_PMAPMD); + else if (efx->loopback_mode == LOOPBACK_PMAPMD) + mmd_mask &= ~MDIO_MMDREG_DEVS0_PMAPMD; + while (mmd_mask) { if (mmd_mask & 1) { /* Double reads because link state is latched, and a @@ -182,6 +201,65 @@ int mdio_clause45_links_ok(struct efx_nic *efx, unsigned int mmd_mask) return ok; } +void mdio_clause45_transmit_disable(struct efx_nic *efx) +{ + int phy_id = efx->mii.phy_id; + int ctrl1, ctrl2; + + ctrl1 = ctrl2 = mdio_clause45_read(efx, phy_id, MDIO_MMD_PMAPMD, + MDIO_MMDREG_TXDIS); + if (efx->tx_disabled) + ctrl2 |= (1 << MDIO_MMDREG_TXDIS_GLOBAL_LBN); + else + ctrl1 &= ~(1 << MDIO_MMDREG_TXDIS_GLOBAL_LBN); + if (ctrl1 != ctrl2) + mdio_clause45_write(efx, phy_id, MDIO_MMD_PMAPMD, + MDIO_MMDREG_TXDIS, ctrl2); +} + +void mdio_clause45_phy_reconfigure(struct efx_nic *efx) +{ + int phy_id = efx->mii.phy_id; + int ctrl1, ctrl2; + + /* Handle (with debouncing) PMA/PMD loopback */ + ctrl1 = ctrl2 = mdio_clause45_read(efx, phy_id, MDIO_MMD_PMAPMD, + MDIO_MMDREG_CTRL1); + + if (efx->loopback_mode == LOOPBACK_PMAPMD) + ctrl2 |= (1 << MDIO_PMAPMD_CTRL1_LBACK_LBN); + else + ctrl2 &= ~(1 << MDIO_PMAPMD_CTRL1_LBACK_LBN); + + if (ctrl1 != ctrl2) + mdio_clause45_write(efx, phy_id, MDIO_MMD_PMAPMD, + MDIO_MMDREG_CTRL1, ctrl2); + + /* Handle (with debouncing) PCS loopback */ + ctrl1 = ctrl2 = mdio_clause45_read(efx, phy_id, MDIO_MMD_PCS, + MDIO_MMDREG_CTRL1); + if (efx->loopback_mode == LOOPBACK_PCS) + ctrl2 |= (1 << MDIO_MMDREG_CTRL1_LBACK_LBN); + else + ctrl2 &= ~(1 << MDIO_MMDREG_CTRL1_LBACK_LBN); + + if (ctrl1 != ctrl2) + mdio_clause45_write(efx, phy_id, MDIO_MMD_PCS, + MDIO_MMDREG_CTRL1, ctrl2); + + /* Handle (with debouncing) PHYXS network loopback */ + ctrl1 = ctrl2 = mdio_clause45_read(efx, phy_id, MDIO_MMD_PHYXS, + MDIO_MMDREG_CTRL1); + if (efx->loopback_mode == LOOPBACK_NETWORK) + ctrl2 |= (1 << MDIO_MMDREG_CTRL1_LBACK_LBN); + else + ctrl2 &= ~(1 << MDIO_MMDREG_CTRL1_LBACK_LBN); + + if (ctrl1 != ctrl2) + mdio_clause45_write(efx, phy_id, MDIO_MMD_PHYXS, + MDIO_MMDREG_CTRL1, ctrl2); +} + /** * mdio_clause45_get_settings - Read (some of) the PHY settings over MDIO. * @efx: Efx NIC diff --git a/drivers/net/sfc/mdio_10g.h b/drivers/net/sfc/mdio_10g.h index 2214b6d820a..cb99f3f4491 100644 --- a/drivers/net/sfc/mdio_10g.h +++ b/drivers/net/sfc/mdio_10g.h @@ -44,11 +44,16 @@ #define MDIO_MMDREG_DEVS1 (6) #define MDIO_MMDREG_CTRL2 (7) #define MDIO_MMDREG_STAT2 (8) +#define MDIO_MMDREG_TXDIS (9) /* Bits in MMDREG_CTRL1 */ /* Reset */ #define MDIO_MMDREG_CTRL1_RESET_LBN (15) #define MDIO_MMDREG_CTRL1_RESET_WIDTH (1) +/* Loopback */ +/* Loopback bit for WIS, PCS, PHYSX and DTEXS */ +#define MDIO_MMDREG_CTRL1_LBACK_LBN (14) +#define MDIO_MMDREG_CTRL1_LBACK_WIDTH (1) /* Bits in MMDREG_STAT1 */ #define MDIO_MMDREG_STAT1_FAULT_LBN (7) @@ -56,6 +61,9 @@ /* Link state */ #define MDIO_MMDREG_STAT1_LINK_LBN (2) #define MDIO_MMDREG_STAT1_LINK_WIDTH (1) +/* Low power ability */ +#define MDIO_MMDREG_STAT1_LPABLE_LBN (1) +#define MDIO_MMDREG_STAT1_LPABLE_WIDTH (1) /* Bits in ID reg */ #define MDIO_ID_REV(_id32) (_id32 & 0xf) @@ -76,6 +84,14 @@ #define MDIO_MMDREG_STAT2_PRESENT_LBN (14) #define MDIO_MMDREG_STAT2_PRESENT_WIDTH (2) +/* Bits in MMDREG_TXDIS */ +#define MDIO_MMDREG_TXDIS_GLOBAL_LBN (0) +#define MDIO_MMDREG_TXDIS_GLOBAL_WIDTH (1) + +/* MMD-specific bits, ordered by MMD, then register */ +#define MDIO_PMAPMD_CTRL1_LBACK_LBN (0) +#define MDIO_PMAPMD_CTRL1_LBACK_WIDTH (1) + /* PMA type (4 bits) */ #define MDIO_PMAPMD_CTRL2_10G_CX4 (0x0) #define MDIO_PMAPMD_CTRL2_10G_EW (0x1) @@ -95,7 +111,7 @@ #define MDIO_PMAPMD_CTRL2_10_BT (0xf) #define MDIO_PMAPMD_CTRL2_TYPE_MASK (0xf) -/* /\* PHY XGXS lane state *\/ */ +/* PHY XGXS lane state */ #define MDIO_PHYXS_LANE_STATE (0x18) #define MDIO_PHYXS_LANE_ALIGNED_LBN (12) @@ -217,6 +233,12 @@ int mdio_clause45_check_mmds(struct efx_nic *efx, extern int mdio_clause45_links_ok(struct efx_nic *efx, unsigned int mmd_mask); +/* Generic transmit disable support though PMAPMD */ +extern void mdio_clause45_transmit_disable(struct efx_nic *efx); + +/* Generic part of reconfigure: set/clear loopback bits */ +extern void mdio_clause45_phy_reconfigure(struct efx_nic *efx); + /* Read (some of) the PHY settings over MDIO */ extern void mdio_clause45_get_settings(struct efx_nic *efx, struct ethtool_cmd *ecmd); diff --git a/drivers/net/sfc/net_driver.h b/drivers/net/sfc/net_driver.h index c505482c252..5e20e7551da 100644 --- a/drivers/net/sfc/net_driver.h +++ b/drivers/net/sfc/net_driver.h @@ -42,7 +42,7 @@ #ifndef EFX_DRIVER_NAME #define EFX_DRIVER_NAME "sfc" #endif -#define EFX_DRIVER_VERSION "2.2.0136" +#define EFX_DRIVER_VERSION "2.2" #ifdef EFX_ENABLE_DEBUG #define EFX_BUG_ON_PARANOID(x) BUG_ON(x) @@ -52,28 +52,19 @@ #define EFX_WARN_ON_PARANOID(x) do {} while (0) #endif -#define NET_DEV_REGISTERED(efx) \ - ((efx)->net_dev->reg_state == NETREG_REGISTERED) - -/* Include net device name in log messages if it has been registered. - * Use efx->name not efx->net_dev->name so that races with (un)registration - * are harmless. - */ -#define NET_DEV_NAME(efx) (NET_DEV_REGISTERED(efx) ? (efx)->name : "") - /* Un-rate-limited logging */ #define EFX_ERR(efx, fmt, args...) \ -dev_err(&((efx)->pci_dev->dev), "ERR: %s " fmt, NET_DEV_NAME(efx), ##args) +dev_err(&((efx)->pci_dev->dev), "ERR: %s " fmt, efx_dev_name(efx), ##args) #define EFX_INFO(efx, fmt, args...) \ -dev_info(&((efx)->pci_dev->dev), "INFO: %s " fmt, NET_DEV_NAME(efx), ##args) +dev_info(&((efx)->pci_dev->dev), "INFO: %s " fmt, efx_dev_name(efx), ##args) #ifdef EFX_ENABLE_DEBUG #define EFX_LOG(efx, fmt, args...) \ -dev_info(&((efx)->pci_dev->dev), "DBG: %s " fmt, NET_DEV_NAME(efx), ##args) +dev_info(&((efx)->pci_dev->dev), "DBG: %s " fmt, efx_dev_name(efx), ##args) #else #define EFX_LOG(efx, fmt, args...) \ -dev_dbg(&((efx)->pci_dev->dev), "DBG: %s " fmt, NET_DEV_NAME(efx), ##args) +dev_dbg(&((efx)->pci_dev->dev), "DBG: %s " fmt, efx_dev_name(efx), ##args) #endif #define EFX_TRACE(efx, fmt, args...) do {} while (0) @@ -90,11 +81,6 @@ do {if (net_ratelimit()) EFX_INFO(efx, fmt, ##args); } while (0) #define EFX_LOG_RL(efx, fmt, args...) \ do {if (net_ratelimit()) EFX_LOG(efx, fmt, ##args); } while (0) -/* Kernel headers may redefine inline anyway */ -#ifndef inline -#define inline inline __attribute__ ((always_inline)) -#endif - /************************************************************************** * * Efx data structures @@ -134,6 +120,8 @@ struct efx_special_buffer { * Set only on the final fragment of a packet; %NULL for all other * fragments. When this fragment completes, then we can free this * skb. + * @tsoh: The associated TSO header structure, or %NULL if this + * buffer is not a TSO header. * @dma_addr: DMA address of the fragment. * @len: Length of this fragment. * This field is zero when the queue slot is empty. @@ -144,6 +132,7 @@ struct efx_special_buffer { */ struct efx_tx_buffer { const struct sk_buff *skb; + struct efx_tso_header *tsoh; dma_addr_t dma_addr; unsigned short len; unsigned char continuation; @@ -187,6 +176,13 @@ struct efx_tx_buffer { * variable indicates that the queue is full. This is to * avoid cache-line ping-pong between the xmit path and the * completion path. + * @tso_headers_free: A list of TSO headers allocated for this TX queue + * that are not in use, and so available for new TSO sends. The list + * is protected by the TX queue lock. + * @tso_bursts: Number of times TSO xmit invoked by kernel + * @tso_long_headers: Number of packets with headers too long for standard + * blocks + * @tso_packets: Number of packets via the TSO xmit path */ struct efx_tx_queue { /* Members which don't change on the fast path */ @@ -206,6 +202,10 @@ struct efx_tx_queue { unsigned int insert_count ____cacheline_aligned_in_smp; unsigned int write_count; unsigned int old_read_count; + struct efx_tso_header *tso_headers_free; + unsigned int tso_bursts; + unsigned int tso_long_headers; + unsigned int tso_packets; }; /** @@ -434,6 +434,9 @@ struct efx_board { struct efx_blinker blinker; }; +#define STRING_TABLE_LOOKUP(val, member) \ + member ## _names[val] + enum efx_int_mode { /* Be careful if altering to correct macro below */ EFX_INT_MODE_MSIX = 0, @@ -506,6 +509,7 @@ enum efx_fc_type { * @check_hw: Check hardware * @reset_xaui: Reset XAUI side of PHY for (software sequenced reset) * @mmds: MMD presence mask + * @loopbacks: Supported loopback modes mask */ struct efx_phy_operations { int (*init) (struct efx_nic *efx); @@ -515,6 +519,7 @@ struct efx_phy_operations { int (*check_hw) (struct efx_nic *efx); void (*reset_xaui) (struct efx_nic *efx); int mmds; + unsigned loopbacks; }; /* @@ -653,7 +658,6 @@ union efx_multicast_hash { * @phy_op: PHY interface * @phy_data: PHY private data (including PHY-specific stats) * @mii: PHY interface - * @phy_powered: PHY power state * @tx_disabled: PHY transmitter turned off * @link_up: Link status * @link_options: Link options (MII/GMII format) @@ -662,6 +666,9 @@ union efx_multicast_hash { * @multicast_hash: Multicast hash table * @flow_control: Flow control flags - separate RX/TX so can't use link_options * @reconfigure_work: work item for dealing with PHY events + * @loopback_mode: Loopback status + * @loopback_modes: Supported loopback mode bitmask + * @loopback_selftest: Offline self-test private state * * The @priv field of the corresponding &struct net_device points to * this. @@ -674,7 +681,7 @@ struct efx_nic { struct workqueue_struct *workqueue; struct work_struct reset_work; struct delayed_work monitor_work; - unsigned long membase_phys; + resource_size_t membase_phys; void __iomem *membase; spinlock_t biu_lock; enum efx_int_mode interrupt_mode; @@ -698,7 +705,7 @@ struct efx_nic { unsigned n_rx_nodesc_drop_cnt; - void *nic_data; + struct falcon_nic_data *nic_data; struct mutex mac_lock; int port_enabled; @@ -721,6 +728,7 @@ struct efx_nic { struct efx_phy_operations *phy_op; void *phy_data; struct mii_if_info mii; + unsigned tx_disabled; int link_up; unsigned int link_options; @@ -732,8 +740,26 @@ struct efx_nic { struct work_struct reconfigure_work; atomic_t rx_reset; + enum efx_loopback_mode loopback_mode; + unsigned int loopback_modes; + + void *loopback_selftest; }; +static inline int efx_dev_registered(struct efx_nic *efx) +{ + return efx->net_dev->reg_state == NETREG_REGISTERED; +} + +/* Net device name, for inclusion in log messages if it has been registered. + * Use efx->name not efx->net_dev->name so that races with (un)registration + * are harmless. + */ +static inline const char *efx_dev_name(struct efx_nic *efx) +{ + return efx_dev_registered(efx) ? efx->name : ""; +} + /** * struct efx_nic_type - Efx device type definition * @mem_bar: Memory BAR number @@ -769,7 +795,7 @@ struct efx_nic_type { unsigned int txd_ring_mask; unsigned int rxd_ring_mask; unsigned int evq_size; - dma_addr_t max_dma_mask; + u64 max_dma_mask; unsigned int tx_dma_mask; unsigned bug5391_mask; diff --git a/drivers/net/sfc/rx.c b/drivers/net/sfc/rx.c index 551299b462a..601b001437c 100644 --- a/drivers/net/sfc/rx.c +++ b/drivers/net/sfc/rx.c @@ -19,6 +19,7 @@ #include "rx.h" #include "efx.h" #include "falcon.h" +#include "selftest.h" #include "workarounds.h" /* Number of RX descriptors pushed at once. */ @@ -85,14 +86,17 @@ static unsigned int rx_refill_limit = 95; */ #define EFX_RXD_HEAD_ROOM 2 -/* Macros for zero-order pages (potentially) containing multiple RX buffers */ -#define RX_DATA_OFFSET(_data) \ - (((unsigned long) (_data)) & (PAGE_SIZE-1)) -#define RX_BUF_OFFSET(_rx_buf) \ - RX_DATA_OFFSET((_rx_buf)->data) - -#define RX_PAGE_SIZE(_efx) \ - (PAGE_SIZE * (1u << (_efx)->rx_buffer_order)) +static inline unsigned int efx_rx_buf_offset(struct efx_rx_buffer *buf) +{ + /* Offset is always within one page, so we don't need to consider + * the page order. + */ + return (__force unsigned long) buf->data & (PAGE_SIZE - 1); +} +static inline unsigned int efx_rx_buf_size(struct efx_nic *efx) +{ + return PAGE_SIZE << efx->rx_buffer_order; +} /************************************************************************** @@ -105,7 +109,7 @@ static unsigned int rx_refill_limit = 95; static int efx_lro_get_skb_hdr(struct sk_buff *skb, void **ip_hdr, void **tcpudp_hdr, u64 *hdr_flags, void *priv) { - struct efx_channel *channel = (struct efx_channel *)priv; + struct efx_channel *channel = priv; struct iphdr *iph; struct tcphdr *th; @@ -130,12 +134,12 @@ static int efx_get_frag_hdr(struct skb_frag_struct *frag, void **mac_hdr, void **ip_hdr, void **tcpudp_hdr, u64 *hdr_flags, void *priv) { - struct efx_channel *channel = (struct efx_channel *)priv; + struct efx_channel *channel = priv; struct ethhdr *eh; struct iphdr *iph; /* We support EtherII and VLAN encapsulated IPv4 */ - eh = (struct ethhdr *)(page_address(frag->page) + frag->page_offset); + eh = page_address(frag->page) + frag->page_offset; *mac_hdr = eh; if (eh->h_proto == htons(ETH_P_IP)) { @@ -268,7 +272,7 @@ static inline int efx_init_rx_buffer_page(struct efx_rx_queue *rx_queue, return -ENOMEM; dma_addr = pci_map_page(efx->pci_dev, rx_buf->page, - 0, RX_PAGE_SIZE(efx), + 0, efx_rx_buf_size(efx), PCI_DMA_FROMDEVICE); if (unlikely(pci_dma_mapping_error(dma_addr))) { @@ -279,14 +283,14 @@ static inline int efx_init_rx_buffer_page(struct efx_rx_queue *rx_queue, rx_queue->buf_page = rx_buf->page; rx_queue->buf_dma_addr = dma_addr; - rx_queue->buf_data = ((char *) page_address(rx_buf->page) + + rx_queue->buf_data = (page_address(rx_buf->page) + EFX_PAGE_IP_ALIGN); } - offset = RX_DATA_OFFSET(rx_queue->buf_data); rx_buf->len = bytes; - rx_buf->dma_addr = rx_queue->buf_dma_addr + offset; rx_buf->data = rx_queue->buf_data; + offset = efx_rx_buf_offset(rx_buf); + rx_buf->dma_addr = rx_queue->buf_dma_addr + offset; /* Try to pack multiple buffers per page */ if (efx->rx_buffer_order == 0) { @@ -294,7 +298,7 @@ static inline int efx_init_rx_buffer_page(struct efx_rx_queue *rx_queue, rx_queue->buf_data += ((bytes + 0x1ff) & ~0x1ff); offset += ((bytes + 0x1ff) & ~0x1ff); - space = RX_PAGE_SIZE(efx) - offset; + space = efx_rx_buf_size(efx) - offset; if (space >= bytes) { /* Refs dropped on kernel releasing each skb */ get_page(rx_queue->buf_page); @@ -343,7 +347,8 @@ static inline void efx_unmap_rx_buffer(struct efx_nic *efx, EFX_BUG_ON_PARANOID(rx_buf->skb); if (rx_buf->unmap_addr) { pci_unmap_page(efx->pci_dev, rx_buf->unmap_addr, - RX_PAGE_SIZE(efx), PCI_DMA_FROMDEVICE); + efx_rx_buf_size(efx), + PCI_DMA_FROMDEVICE); rx_buf->unmap_addr = 0; } } else if (likely(rx_buf->skb)) { @@ -399,9 +404,10 @@ static int __efx_fast_push_rx_descriptors(struct efx_rx_queue *rx_queue, return 0; /* Record minimum fill level */ - if (unlikely(fill_level < rx_queue->min_fill)) + if (unlikely(fill_level < rx_queue->min_fill)) { if (fill_level) rx_queue->min_fill = fill_level; + } /* Acquire RX add lock. If this lock is contended, then a fast * fill must already be in progress (e.g. in the refill @@ -551,7 +557,7 @@ static inline void efx_rx_packet_lro(struct efx_channel *channel, struct skb_frag_struct frags; frags.page = rx_buf->page; - frags.page_offset = RX_BUF_OFFSET(rx_buf); + frags.page_offset = efx_rx_buf_offset(rx_buf); frags.size = rx_buf->len; lro_receive_frags(lro_mgr, &frags, rx_buf->len, @@ -596,7 +602,7 @@ static inline struct sk_buff *efx_rx_mk_skb(struct efx_rx_buffer *rx_buf, if (unlikely(rx_buf->len > hdr_len)) { struct skb_frag_struct *frag = skb_shinfo(skb)->frags; frag->page = rx_buf->page; - frag->page_offset = RX_BUF_OFFSET(rx_buf) + hdr_len; + frag->page_offset = efx_rx_buf_offset(rx_buf) + hdr_len; frag->size = skb->len - hdr_len; skb_shinfo(skb)->nr_frags = 1; skb->data_len = frag->size; @@ -683,6 +689,15 @@ void __efx_rx_packet(struct efx_channel *channel, struct sk_buff *skb; int lro = efx->net_dev->features & NETIF_F_LRO; + /* If we're in loopback test, then pass the packet directly to the + * loopback layer, and free the rx_buf here + */ + if (unlikely(efx->loopback_selftest)) { + efx_loopback_rx_packet(efx, rx_buf->data, rx_buf->len); + efx_free_rx_buffer(efx, rx_buf); + goto done; + } + if (rx_buf->skb) { prefetch(skb_shinfo(rx_buf->skb)); @@ -736,7 +751,6 @@ void __efx_rx_packet(struct efx_channel *channel, /* Update allocation strategy method */ channel->rx_alloc_level += RX_ALLOC_FACTOR_SKB; - /* fall-thru */ done: efx->net_dev->last_rx = jiffies; } @@ -842,7 +856,8 @@ void efx_fini_rx_queue(struct efx_rx_queue *rx_queue) /* For a page that is part-way through splitting into RX buffers */ if (rx_queue->buf_page != NULL) { pci_unmap_page(rx_queue->efx->pci_dev, rx_queue->buf_dma_addr, - RX_PAGE_SIZE(rx_queue->efx), PCI_DMA_FROMDEVICE); + efx_rx_buf_size(rx_queue->efx), + PCI_DMA_FROMDEVICE); __free_pages(rx_queue->buf_page, rx_queue->efx->rx_buffer_order); rx_queue->buf_page = NULL; diff --git a/drivers/net/sfc/selftest.c b/drivers/net/sfc/selftest.c new file mode 100644 index 00000000000..3b2de9fe7f2 --- /dev/null +++ b/drivers/net/sfc/selftest.c @@ -0,0 +1,719 @@ +/**************************************************************************** + * Driver for Solarflare Solarstorm network controllers and boards + * Copyright 2005-2006 Fen Systems Ltd. + * Copyright 2006-2008 Solarflare Communications Inc. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation, incorporated herein by reference. + */ + +#include <linux/netdevice.h> +#include <linux/module.h> +#include <linux/delay.h> +#include <linux/kernel_stat.h> +#include <linux/pci.h> +#include <linux/ethtool.h> +#include <linux/ip.h> +#include <linux/in.h> +#include <linux/udp.h> +#include <linux/rtnetlink.h> +#include <asm/io.h> +#include "net_driver.h" +#include "ethtool.h" +#include "efx.h" +#include "falcon.h" +#include "selftest.h" +#include "boards.h" +#include "workarounds.h" +#include "mac.h" + +/* + * Loopback test packet structure + * + * The self-test should stress every RSS vector, and unfortunately + * Falcon only performs RSS on TCP/UDP packets. + */ +struct efx_loopback_payload { + struct ethhdr header; + struct iphdr ip; + struct udphdr udp; + __be16 iteration; + const char msg[64]; +} __attribute__ ((packed)); + +/* Loopback test source MAC address */ +static const unsigned char payload_source[ETH_ALEN] = { + 0x00, 0x0f, 0x53, 0x1b, 0x1b, 0x1b, +}; + +static const char *payload_msg = + "Hello world! This is an Efx loopback test in progress!"; + +/** + * efx_selftest_state - persistent state during a selftest + * @flush: Drop all packets in efx_loopback_rx_packet + * @packet_count: Number of packets being used in this test + * @skbs: An array of skbs transmitted + * @rx_good: RX good packet count + * @rx_bad: RX bad packet count + * @payload: Payload used in tests + */ +struct efx_selftest_state { + int flush; + int packet_count; + struct sk_buff **skbs; + atomic_t rx_good; + atomic_t rx_bad; + struct efx_loopback_payload payload; +}; + +/************************************************************************** + * + * Configurable values + * + **************************************************************************/ + +/* Level of loopback testing + * + * The maximum packet burst length is 16**(n-1), i.e. + * + * - Level 0 : no packets + * - Level 1 : 1 packet + * - Level 2 : 17 packets (1 * 1 packet, 1 * 16 packets) + * - Level 3 : 273 packets (1 * 1 packet, 1 * 16 packet, 1 * 256 packets) + * + */ +static unsigned int loopback_test_level = 3; + +/************************************************************************** + * + * Interrupt and event queue testing + * + **************************************************************************/ + +/* Test generation and receipt of interrupts */ +static int efx_test_interrupts(struct efx_nic *efx, + struct efx_self_tests *tests) +{ + struct efx_channel *channel; + + EFX_LOG(efx, "testing interrupts\n"); + tests->interrupt = -1; + + /* Reset interrupt flag */ + efx->last_irq_cpu = -1; + smp_wmb(); + + /* ACK each interrupting event queue. Receiving an interrupt due to + * traffic before a test event is raised is considered a pass */ + efx_for_each_channel_with_interrupt(channel, efx) { + if (channel->work_pending) + efx_process_channel_now(channel); + if (efx->last_irq_cpu >= 0) + goto success; + } + + falcon_generate_interrupt(efx); + + /* Wait for arrival of test interrupt. */ + EFX_LOG(efx, "waiting for test interrupt\n"); + schedule_timeout_uninterruptible(HZ / 10); + if (efx->last_irq_cpu >= 0) + goto success; + + EFX_ERR(efx, "timed out waiting for interrupt\n"); + return -ETIMEDOUT; + + success: + EFX_LOG(efx, "test interrupt (mode %d) seen on CPU%d\n", + efx->interrupt_mode, efx->last_irq_cpu); + tests->interrupt = 1; + return 0; +} + +/* Test generation and receipt of non-interrupting events */ +static int efx_test_eventq(struct efx_channel *channel, + struct efx_self_tests *tests) +{ + unsigned int magic; + + /* Channel specific code, limited to 20 bits */ + magic = (0x00010150 + channel->channel); + EFX_LOG(channel->efx, "channel %d testing event queue with code %x\n", + channel->channel, magic); + + tests->eventq_dma[channel->channel] = -1; + tests->eventq_int[channel->channel] = 1; /* fake pass */ + tests->eventq_poll[channel->channel] = 1; /* fake pass */ + + /* Reset flag and zero magic word */ + channel->efx->last_irq_cpu = -1; + channel->eventq_magic = 0; + smp_wmb(); + + falcon_generate_test_event(channel, magic); + udelay(1); + + efx_process_channel_now(channel); + if (channel->eventq_magic != magic) { + EFX_ERR(channel->efx, "channel %d failed to see test event\n", + channel->channel); + return -ETIMEDOUT; + } else { + tests->eventq_dma[channel->channel] = 1; + } + + return 0; +} + +/* Test generation and receipt of interrupting events */ +static int efx_test_eventq_irq(struct efx_channel *channel, + struct efx_self_tests *tests) +{ + unsigned int magic, count; + + /* Channel specific code, limited to 20 bits */ + magic = (0x00010150 + channel->channel); + EFX_LOG(channel->efx, "channel %d testing event queue with code %x\n", + channel->channel, magic); + + tests->eventq_dma[channel->channel] = -1; + tests->eventq_int[channel->channel] = -1; + tests->eventq_poll[channel->channel] = -1; + + /* Reset flag and zero magic word */ + channel->efx->last_irq_cpu = -1; + channel->eventq_magic = 0; + smp_wmb(); + + falcon_generate_test_event(channel, magic); + + /* Wait for arrival of interrupt */ + count = 0; + do { + schedule_timeout_uninterruptible(HZ / 100); + + if (channel->work_pending) + efx_process_channel_now(channel); + + if (channel->eventq_magic == magic) + goto eventq_ok; + } while (++count < 2); + + EFX_ERR(channel->efx, "channel %d timed out waiting for event queue\n", + channel->channel); + + /* See if interrupt arrived */ + if (channel->efx->last_irq_cpu >= 0) { + EFX_ERR(channel->efx, "channel %d saw interrupt on CPU%d " + "during event queue test\n", channel->channel, + raw_smp_processor_id()); + tests->eventq_int[channel->channel] = 1; + } + + /* Check to see if event was received even if interrupt wasn't */ + efx_process_channel_now(channel); + if (channel->eventq_magic == magic) { + EFX_ERR(channel->efx, "channel %d event was generated, but " + "failed to trigger an interrupt\n", channel->channel); + tests->eventq_dma[channel->channel] = 1; + } + + return -ETIMEDOUT; + eventq_ok: + EFX_LOG(channel->efx, "channel %d event queue passed\n", + channel->channel); + tests->eventq_dma[channel->channel] = 1; + tests->eventq_int[channel->channel] = 1; + tests->eventq_poll[channel->channel] = 1; + return 0; +} + +/************************************************************************** + * + * PHY testing + * + **************************************************************************/ + +/* Check PHY presence by reading the PHY ID registers */ +static int efx_test_phy(struct efx_nic *efx, + struct efx_self_tests *tests) +{ + u16 physid1, physid2; + struct mii_if_info *mii = &efx->mii; + struct net_device *net_dev = efx->net_dev; + + if (efx->phy_type == PHY_TYPE_NONE) + return 0; + + EFX_LOG(efx, "testing PHY presence\n"); + tests->phy_ok = -1; + + physid1 = mii->mdio_read(net_dev, mii->phy_id, MII_PHYSID1); + physid2 = mii->mdio_read(net_dev, mii->phy_id, MII_PHYSID2); + + if ((physid1 != 0x0000) && (physid1 != 0xffff) && + (physid2 != 0x0000) && (physid2 != 0xffff)) { + EFX_LOG(efx, "found MII PHY %d ID 0x%x:%x\n", + mii->phy_id, physid1, physid2); + tests->phy_ok = 1; + return 0; + } + + EFX_ERR(efx, "no MII PHY present with ID %d\n", mii->phy_id); + return -ENODEV; +} + +/************************************************************************** + * + * Loopback testing + * NB Only one loopback test can be executing concurrently. + * + **************************************************************************/ + +/* Loopback test RX callback + * This is called for each received packet during loopback testing. + */ +void efx_loopback_rx_packet(struct efx_nic *efx, + const char *buf_ptr, int pkt_len) +{ + struct efx_selftest_state *state = efx->loopback_selftest; + struct efx_loopback_payload *received; + struct efx_loopback_payload *payload; + + BUG_ON(!buf_ptr); + + /* If we are just flushing, then drop the packet */ + if ((state == NULL) || state->flush) + return; + + payload = &state->payload; + + received = (struct efx_loopback_payload *) buf_ptr; + received->ip.saddr = payload->ip.saddr; + received->ip.check = payload->ip.check; + + /* Check that header exists */ + if (pkt_len < sizeof(received->header)) { + EFX_ERR(efx, "saw runt RX packet (length %d) in %s loopback " + "test\n", pkt_len, LOOPBACK_MODE(efx)); + goto err; + } + + /* Check that the ethernet header exists */ + if (memcmp(&received->header, &payload->header, ETH_HLEN) != 0) { + EFX_ERR(efx, "saw non-loopback RX packet in %s loopback test\n", + LOOPBACK_MODE(efx)); + goto err; + } + + /* Check packet length */ + if (pkt_len != sizeof(*payload)) { + EFX_ERR(efx, "saw incorrect RX packet length %d (wanted %d) in " + "%s loopback test\n", pkt_len, (int)sizeof(*payload), + LOOPBACK_MODE(efx)); + goto err; + } + + /* Check that IP header matches */ + if (memcmp(&received->ip, &payload->ip, sizeof(payload->ip)) != 0) { + EFX_ERR(efx, "saw corrupted IP header in %s loopback test\n", + LOOPBACK_MODE(efx)); + goto err; + } + + /* Check that msg and padding matches */ + if (memcmp(&received->msg, &payload->msg, sizeof(received->msg)) != 0) { + EFX_ERR(efx, "saw corrupted RX packet in %s loopback test\n", + LOOPBACK_MODE(efx)); + goto err; + } + + /* Check that iteration matches */ + if (received->iteration != payload->iteration) { + EFX_ERR(efx, "saw RX packet from iteration %d (wanted %d) in " + "%s loopback test\n", ntohs(received->iteration), + ntohs(payload->iteration), LOOPBACK_MODE(efx)); + goto err; + } + + /* Increase correct RX count */ + EFX_TRACE(efx, "got loopback RX in %s loopback test\n", + LOOPBACK_MODE(efx)); + + atomic_inc(&state->rx_good); + return; + + err: +#ifdef EFX_ENABLE_DEBUG + if (atomic_read(&state->rx_bad) == 0) { + EFX_ERR(efx, "received packet:\n"); + print_hex_dump(KERN_ERR, "", DUMP_PREFIX_OFFSET, 0x10, 1, + buf_ptr, pkt_len, 0); + EFX_ERR(efx, "expected packet:\n"); + print_hex_dump(KERN_ERR, "", DUMP_PREFIX_OFFSET, 0x10, 1, + &state->payload, sizeof(state->payload), 0); + } +#endif + atomic_inc(&state->rx_bad); +} + +/* Initialise an efx_selftest_state for a new iteration */ +static void efx_iterate_state(struct efx_nic *efx) +{ + struct efx_selftest_state *state = efx->loopback_selftest; + struct net_device *net_dev = efx->net_dev; + struct efx_loopback_payload *payload = &state->payload; + + /* Initialise the layerII header */ + memcpy(&payload->header.h_dest, net_dev->dev_addr, ETH_ALEN); + memcpy(&payload->header.h_source, &payload_source, ETH_ALEN); + payload->header.h_proto = htons(ETH_P_IP); + + /* saddr set later and used as incrementing count */ + payload->ip.daddr = htonl(INADDR_LOOPBACK); + payload->ip.ihl = 5; + payload->ip.check = htons(0xdead); + payload->ip.tot_len = htons(sizeof(*payload) - sizeof(struct ethhdr)); + payload->ip.version = IPVERSION; + payload->ip.protocol = IPPROTO_UDP; + + /* Initialise udp header */ + payload->udp.source = 0; + payload->udp.len = htons(sizeof(*payload) - sizeof(struct ethhdr) - + sizeof(struct iphdr)); + payload->udp.check = 0; /* checksum ignored */ + + /* Fill out payload */ + payload->iteration = htons(ntohs(payload->iteration) + 1); + memcpy(&payload->msg, payload_msg, sizeof(payload_msg)); + + /* Fill out remaining state members */ + atomic_set(&state->rx_good, 0); + atomic_set(&state->rx_bad, 0); + smp_wmb(); +} + +static int efx_tx_loopback(struct efx_tx_queue *tx_queue) +{ + struct efx_nic *efx = tx_queue->efx; + struct efx_selftest_state *state = efx->loopback_selftest; + struct efx_loopback_payload *payload; + struct sk_buff *skb; + int i, rc; + + /* Transmit N copies of buffer */ + for (i = 0; i < state->packet_count; i++) { + /* Allocate an skb, holding an extra reference for + * transmit completion counting */ + skb = alloc_skb(sizeof(state->payload), GFP_KERNEL); + if (!skb) + return -ENOMEM; + state->skbs[i] = skb; + skb_get(skb); + + /* Copy the payload in, incrementing the source address to + * exercise the rss vectors */ + payload = ((struct efx_loopback_payload *) + skb_put(skb, sizeof(state->payload))); + memcpy(payload, &state->payload, sizeof(state->payload)); + payload->ip.saddr = htonl(INADDR_LOOPBACK | (i << 2)); + + /* Ensure everything we've written is visible to the + * interrupt handler. */ + smp_wmb(); + + if (efx_dev_registered(efx)) + netif_tx_lock_bh(efx->net_dev); + rc = efx_xmit(efx, tx_queue, skb); + if (efx_dev_registered(efx)) + netif_tx_unlock_bh(efx->net_dev); + + if (rc != NETDEV_TX_OK) { + EFX_ERR(efx, "TX queue %d could not transmit packet %d " + "of %d in %s loopback test\n", tx_queue->queue, + i + 1, state->packet_count, LOOPBACK_MODE(efx)); + + /* Defer cleaning up the other skbs for the caller */ + kfree_skb(skb); + return -EPIPE; + } + } + + return 0; +} + +static int efx_rx_loopback(struct efx_tx_queue *tx_queue, + struct efx_loopback_self_tests *lb_tests) +{ + struct efx_nic *efx = tx_queue->efx; + struct efx_selftest_state *state = efx->loopback_selftest; + struct sk_buff *skb; + int tx_done = 0, rx_good, rx_bad; + int i, rc = 0; + + if (efx_dev_registered(efx)) + netif_tx_lock_bh(efx->net_dev); + + /* Count the number of tx completions, and decrement the refcnt. Any + * skbs not already completed will be free'd when the queue is flushed */ + for (i=0; i < state->packet_count; i++) { + skb = state->skbs[i]; + if (skb && !skb_shared(skb)) + ++tx_done; + dev_kfree_skb_any(skb); + } + + if (efx_dev_registered(efx)) + netif_tx_unlock_bh(efx->net_dev); + + /* Check TX completion and received packet counts */ + rx_good = atomic_read(&state->rx_good); + rx_bad = atomic_read(&state->rx_bad); + if (tx_done != state->packet_count) { + /* Don't free the skbs; they will be picked up on TX + * overflow or channel teardown. + */ + EFX_ERR(efx, "TX queue %d saw only %d out of an expected %d " + "TX completion events in %s loopback test\n", + tx_queue->queue, tx_done, state->packet_count, + LOOPBACK_MODE(efx)); + rc = -ETIMEDOUT; + /* Allow to fall through so we see the RX errors as well */ + } + + /* We may always be up to a flush away from our desired packet total */ + if (rx_good != state->packet_count) { + EFX_LOG(efx, "TX queue %d saw only %d out of an expected %d " + "received packets in %s loopback test\n", + tx_queue->queue, rx_good, state->packet_count, + LOOPBACK_MODE(efx)); + rc = -ETIMEDOUT; + /* Fall through */ + } + + /* Update loopback test structure */ + lb_tests->tx_sent[tx_queue->queue] += state->packet_count; + lb_tests->tx_done[tx_queue->queue] += tx_done; + lb_tests->rx_good += rx_good; + lb_tests->rx_bad += rx_bad; + + return rc; +} + +static int +efx_test_loopback(struct efx_tx_queue *tx_queue, + struct efx_loopback_self_tests *lb_tests) +{ + struct efx_nic *efx = tx_queue->efx; + struct efx_selftest_state *state = efx->loopback_selftest; + struct efx_channel *channel; + int i, rc = 0; + + for (i = 0; i < loopback_test_level; i++) { + /* Determine how many packets to send */ + state->packet_count = (efx->type->txd_ring_mask + 1) / 3; + state->packet_count = min(1 << (i << 2), state->packet_count); + state->skbs = kzalloc(sizeof(state->skbs[0]) * + state->packet_count, GFP_KERNEL); + if (!state->skbs) + return -ENOMEM; + state->flush = 0; + + EFX_LOG(efx, "TX queue %d testing %s loopback with %d " + "packets\n", tx_queue->queue, LOOPBACK_MODE(efx), + state->packet_count); + + efx_iterate_state(efx); + rc = efx_tx_loopback(tx_queue); + + /* NAPI polling is not enabled, so process channels synchronously */ + schedule_timeout_uninterruptible(HZ / 50); + efx_for_each_channel_with_interrupt(channel, efx) { + if (channel->work_pending) + efx_process_channel_now(channel); + } + + rc |= efx_rx_loopback(tx_queue, lb_tests); + kfree(state->skbs); + + if (rc) { + /* Wait a while to ensure there are no packets + * floating around after a failure. */ + schedule_timeout_uninterruptible(HZ / 10); + return rc; + } + } + + EFX_LOG(efx, "TX queue %d passed %s loopback test with a burst length " + "of %d packets\n", tx_queue->queue, LOOPBACK_MODE(efx), + state->packet_count); + + return rc; +} + +static int efx_test_loopbacks(struct efx_nic *efx, + struct efx_self_tests *tests, + unsigned int loopback_modes) +{ + struct efx_selftest_state *state = efx->loopback_selftest; + struct ethtool_cmd ecmd, ecmd_loopback; + struct efx_tx_queue *tx_queue; + enum efx_loopback_mode old_mode, mode; + int count, rc = 0, link_up; + + rc = efx_ethtool_get_settings(efx->net_dev, &ecmd); + if (rc) { + EFX_ERR(efx, "could not get GMII settings\n"); + return rc; + } + old_mode = efx->loopback_mode; + + /* Disable autonegotiation for the purposes of loopback */ + memcpy(&ecmd_loopback, &ecmd, sizeof(ecmd_loopback)); + if (ecmd_loopback.autoneg == AUTONEG_ENABLE) { + ecmd_loopback.autoneg = AUTONEG_DISABLE; + ecmd_loopback.duplex = DUPLEX_FULL; + ecmd_loopback.speed = SPEED_10000; + } + + rc = efx_ethtool_set_settings(efx->net_dev, &ecmd_loopback); + if (rc) { + EFX_ERR(efx, "could not disable autonegotiation\n"); + goto out; + } + tests->loopback_speed = ecmd_loopback.speed; + tests->loopback_full_duplex = ecmd_loopback.duplex; + + /* Test all supported loopback modes */ + for (mode = LOOPBACK_NONE; mode < LOOPBACK_TEST_MAX; mode++) { + if (!(loopback_modes & (1 << mode))) + continue; + + /* Move the port into the specified loopback mode. */ + state->flush = 1; + efx->loopback_mode = mode; + efx_reconfigure_port(efx); + + /* Wait for the PHY to signal the link is up */ + count = 0; + do { + struct efx_channel *channel = &efx->channel[0]; + + falcon_check_xmac(efx); + schedule_timeout_uninterruptible(HZ / 10); + if (channel->work_pending) + efx_process_channel_now(channel); + /* Wait for PHY events to be processed */ + flush_workqueue(efx->workqueue); + rmb(); + + /* efx->link_up can be 1 even if the XAUI link is down, + * (bug5762). Usually, it's not worth bothering with the + * difference, but for selftests, we need that extra + * guarantee that the link is really, really, up. + */ + link_up = efx->link_up; + if (!falcon_xaui_link_ok(efx)) + link_up = 0; + + } while ((++count < 20) && !link_up); + + /* The link should now be up. If it isn't, there is no point + * in attempting a loopback test */ + if (!link_up) { + EFX_ERR(efx, "loopback %s never came up\n", + LOOPBACK_MODE(efx)); + rc = -EIO; + goto out; + } + + EFX_LOG(efx, "link came up in %s loopback in %d iterations\n", + LOOPBACK_MODE(efx), count); + + /* Test every TX queue */ + efx_for_each_tx_queue(tx_queue, efx) { + rc |= efx_test_loopback(tx_queue, + &tests->loopback[mode]); + if (rc) + goto out; + } + } + + out: + /* Take out of loopback and restore PHY settings */ + state->flush = 1; + efx->loopback_mode = old_mode; + efx_ethtool_set_settings(efx->net_dev, &ecmd); + + return rc; +} + +/************************************************************************** + * + * Entry points + * + *************************************************************************/ + +/* Online (i.e. non-disruptive) testing + * This checks interrupt generation, event delivery and PHY presence. */ +int efx_online_test(struct efx_nic *efx, struct efx_self_tests *tests) +{ + struct efx_channel *channel; + int rc = 0; + + EFX_LOG(efx, "performing online self-tests\n"); + + rc |= efx_test_interrupts(efx, tests); + efx_for_each_channel(channel, efx) { + if (channel->has_interrupt) + rc |= efx_test_eventq_irq(channel, tests); + else + rc |= efx_test_eventq(channel, tests); + } + rc |= efx_test_phy(efx, tests); + + if (rc) + EFX_ERR(efx, "failed online self-tests\n"); + + return rc; +} + +/* Offline (i.e. disruptive) testing + * This checks MAC and PHY loopback on the specified port. */ +int efx_offline_test(struct efx_nic *efx, + struct efx_self_tests *tests, unsigned int loopback_modes) +{ + struct efx_selftest_state *state; + int rc = 0; + + EFX_LOG(efx, "performing offline self-tests\n"); + + /* Create a selftest_state structure to hold state for the test */ + state = kzalloc(sizeof(*state), GFP_KERNEL); + if (state == NULL) { + rc = -ENOMEM; + goto out; + } + + /* Set the port loopback_selftest member. From this point on + * all received packets will be dropped. Mark the state as + * "flushing" so all inflight packets are dropped */ + BUG_ON(efx->loopback_selftest); + state->flush = 1; + efx->loopback_selftest = state; + + rc = efx_test_loopbacks(efx, tests, loopback_modes); + + efx->loopback_selftest = NULL; + wmb(); + kfree(state); + + out: + if (rc) + EFX_ERR(efx, "failed offline self-tests\n"); + + return rc; +} + diff --git a/drivers/net/sfc/selftest.h b/drivers/net/sfc/selftest.h new file mode 100644 index 00000000000..f6999c2b622 --- /dev/null +++ b/drivers/net/sfc/selftest.h @@ -0,0 +1,50 @@ +/**************************************************************************** + * Driver for Solarflare Solarstorm network controllers and boards + * Copyright 2005-2006 Fen Systems Ltd. + * Copyright 2006-2008 Solarflare Communications Inc. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation, incorporated herein by reference. + */ + +#ifndef EFX_SELFTEST_H +#define EFX_SELFTEST_H + +#include "net_driver.h" + +/* + * Self tests + */ + +struct efx_loopback_self_tests { + int tx_sent[EFX_MAX_TX_QUEUES]; + int tx_done[EFX_MAX_TX_QUEUES]; + int rx_good; + int rx_bad; +}; + +/* Efx self test results + * For fields which are not counters, 1 indicates success and -1 + * indicates failure. + */ +struct efx_self_tests { + int interrupt; + int eventq_dma[EFX_MAX_CHANNELS]; + int eventq_int[EFX_MAX_CHANNELS]; + int eventq_poll[EFX_MAX_CHANNELS]; + int phy_ok; + int loopback_speed; + int loopback_full_duplex; + struct efx_loopback_self_tests loopback[LOOPBACK_TEST_MAX]; +}; + +extern void efx_loopback_rx_packet(struct efx_nic *efx, + const char *buf_ptr, int pkt_len); +extern int efx_online_test(struct efx_nic *efx, + struct efx_self_tests *tests); +extern int efx_offline_test(struct efx_nic *efx, + struct efx_self_tests *tests, + unsigned int loopback_modes); + +#endif /* EFX_SELFTEST_H */ diff --git a/drivers/net/sfc/sfe4001.c b/drivers/net/sfc/sfe4001.c index 11fa9fb8f48..66a0d1442ab 100644 --- a/drivers/net/sfc/sfe4001.c +++ b/drivers/net/sfc/sfe4001.c @@ -116,20 +116,29 @@ void sfe4001_poweroff(struct efx_nic *efx) /* Turn off all power rails */ out = 0xff; - (void) efx_i2c_write(i2c, PCA9539, P0_OUT, &out, 1); + efx_i2c_write(i2c, PCA9539, P0_OUT, &out, 1); /* Disable port 1 outputs on IO expander */ cfg = 0xff; - (void) efx_i2c_write(i2c, PCA9539, P1_CONFIG, &cfg, 1); + efx_i2c_write(i2c, PCA9539, P1_CONFIG, &cfg, 1); /* Disable port 0 outputs on IO expander */ cfg = 0xff; - (void) efx_i2c_write(i2c, PCA9539, P0_CONFIG, &cfg, 1); + efx_i2c_write(i2c, PCA9539, P0_CONFIG, &cfg, 1); /* Clear any over-temperature alert */ - (void) efx_i2c_read(i2c, MAX6647, RSL, &in, 1); + efx_i2c_read(i2c, MAX6647, RSL, &in, 1); } +/* The P0_EN_3V3X line on SFE4001 boards (from A2 onward) is connected + * to the FLASH_CFG_1 input on the DSP. We must keep it high at power- + * up to allow writing the flash (done through MDIO from userland). + */ +unsigned int sfe4001_phy_flash_cfg; +module_param_named(phy_flash_cfg, sfe4001_phy_flash_cfg, uint, 0444); +MODULE_PARM_DESC(phy_flash_cfg, + "Force PHY to enter flash configuration mode"); + /* This board uses an I2C expander to provider power to the PHY, which needs to * be turned on before the PHY can be used. * Context: Process context, rtnl lock held @@ -203,6 +212,8 @@ int sfe4001_poweron(struct efx_nic *efx) out = 0xff & ~((1 << P0_EN_1V2_LBN) | (1 << P0_EN_2V5_LBN) | (1 << P0_EN_3V3X_LBN) | (1 << P0_EN_5V_LBN) | (1 << P0_X_TRST_LBN)); + if (sfe4001_phy_flash_cfg) + out |= 1 << P0_EN_3V3X_LBN; rc = efx_i2c_write(i2c, PCA9539, P0_OUT, &out, 1); if (rc) @@ -226,6 +237,9 @@ int sfe4001_poweron(struct efx_nic *efx) if (in & (1 << P1_AFE_PWD_LBN)) goto done; + /* DSP doesn't look powered in flash config mode */ + if (sfe4001_phy_flash_cfg) + goto done; } while (++count < 20); EFX_INFO(efx, "timed out waiting for power\n"); @@ -239,14 +253,14 @@ done: fail3: /* Turn off all power rails */ out = 0xff; - (void) efx_i2c_write(i2c, PCA9539, P0_OUT, &out, 1); + efx_i2c_write(i2c, PCA9539, P0_OUT, &out, 1); /* Disable port 1 outputs on IO expander */ out = 0xff; - (void) efx_i2c_write(i2c, PCA9539, P1_CONFIG, &out, 1); + efx_i2c_write(i2c, PCA9539, P1_CONFIG, &out, 1); fail2: /* Disable port 0 outputs on IO expander */ out = 0xff; - (void) efx_i2c_write(i2c, PCA9539, P0_CONFIG, &out, 1); + efx_i2c_write(i2c, PCA9539, P0_CONFIG, &out, 1); fail1: return rc; } diff --git a/drivers/net/sfc/tenxpress.c b/drivers/net/sfc/tenxpress.c index a2e9f79e47b..c0146061c32 100644 --- a/drivers/net/sfc/tenxpress.c +++ b/drivers/net/sfc/tenxpress.c @@ -24,6 +24,11 @@ MDIO_MMDREG_DEVS0_PCS | \ MDIO_MMDREG_DEVS0_PHYXS) +#define TENXPRESS_LOOPBACKS ((1 << LOOPBACK_PHYXS) | \ + (1 << LOOPBACK_PCS) | \ + (1 << LOOPBACK_PMAPMD) | \ + (1 << LOOPBACK_NETWORK)) + /* We complain if we fail to see the link partner as 10G capable this many * times in a row (must be > 1 as sampling the autoneg. registers is racy) */ @@ -72,6 +77,10 @@ #define PMA_PMD_BIST_RXD_LBN (1) #define PMA_PMD_BIST_AFE_LBN (0) +/* Special Software reset register */ +#define PMA_PMD_EXT_CTRL_REG 49152 +#define PMA_PMD_EXT_SSR_LBN 15 + #define BIST_MAX_DELAY (1000) #define BIST_POLL_DELAY (10) @@ -86,6 +95,11 @@ #define PCS_TEST_SELECT_REG 0xd807 /* PRM 10.5.8 */ #define CLK312_EN_LBN 3 +/* PHYXS registers */ +#define PHYXS_TEST1 (49162) +#define LOOPBACK_NEAR_LBN (8) +#define LOOPBACK_NEAR_WIDTH (1) + /* Boot status register */ #define PCS_BOOT_STATUS_REG (0xd000) #define PCS_BOOT_FATAL_ERR_LBN (0) @@ -106,7 +120,9 @@ MODULE_PARM_DESC(crc_error_reset_threshold, struct tenxpress_phy_data { enum tenxpress_state state; + enum efx_loopback_mode loopback_mode; atomic_t bad_crc_count; + int tx_disabled; int bad_lp_tries; }; @@ -195,14 +211,18 @@ static int tenxpress_phy_init(struct efx_nic *efx) int rc = 0; phy_data = kzalloc(sizeof(*phy_data), GFP_KERNEL); + if (!phy_data) + return -ENOMEM; efx->phy_data = phy_data; tenxpress_set_state(efx, TENXPRESS_STATUS_NORMAL); - rc = mdio_clause45_wait_reset_mmds(efx, - TENXPRESS_REQUIRED_DEVS); - if (rc < 0) - goto fail; + if (!sfe4001_phy_flash_cfg) { + rc = mdio_clause45_wait_reset_mmds(efx, + TENXPRESS_REQUIRED_DEVS); + if (rc < 0) + goto fail; + } rc = mdio_clause45_check_mmds(efx, TENXPRESS_REQUIRED_DEVS, 0); if (rc < 0) @@ -225,6 +245,35 @@ static int tenxpress_phy_init(struct efx_nic *efx) return rc; } +static int tenxpress_special_reset(struct efx_nic *efx) +{ + int rc, reg; + + EFX_TRACE(efx, "%s\n", __func__); + + /* Initiate reset */ + reg = mdio_clause45_read(efx, efx->mii.phy_id, + MDIO_MMD_PMAPMD, PMA_PMD_EXT_CTRL_REG); + reg |= (1 << PMA_PMD_EXT_SSR_LBN); + mdio_clause45_write(efx, efx->mii.phy_id, MDIO_MMD_PMAPMD, + PMA_PMD_EXT_CTRL_REG, reg); + + msleep(200); + + /* Wait for the blocks to come out of reset */ + rc = mdio_clause45_wait_reset_mmds(efx, + TENXPRESS_REQUIRED_DEVS); + if (rc < 0) + return rc; + + /* Try and reconfigure the device */ + rc = tenxpress_init(efx); + if (rc < 0) + return rc; + + return 0; +} + static void tenxpress_set_bad_lp(struct efx_nic *efx, int bad_lp) { struct tenxpress_phy_data *pd = efx->phy_data; @@ -299,11 +348,46 @@ static int tenxpress_link_ok(struct efx_nic *efx, int check_lp) return ok; } +static void tenxpress_phyxs_loopback(struct efx_nic *efx) +{ + int phy_id = efx->mii.phy_id; + int ctrl1, ctrl2; + + ctrl1 = ctrl2 = mdio_clause45_read(efx, phy_id, MDIO_MMD_PHYXS, + PHYXS_TEST1); + if (efx->loopback_mode == LOOPBACK_PHYXS) + ctrl2 |= (1 << LOOPBACK_NEAR_LBN); + else + ctrl2 &= ~(1 << LOOPBACK_NEAR_LBN); + if (ctrl1 != ctrl2) + mdio_clause45_write(efx, phy_id, MDIO_MMD_PHYXS, + PHYXS_TEST1, ctrl2); +} + static void tenxpress_phy_reconfigure(struct efx_nic *efx) { + struct tenxpress_phy_data *phy_data = efx->phy_data; + int loop_change = LOOPBACK_OUT_OF(phy_data, efx, + TENXPRESS_LOOPBACKS); + if (!tenxpress_state_is(efx, TENXPRESS_STATUS_NORMAL)) return; + /* When coming out of transmit disable, coming out of low power + * mode, or moving out of any PHY internal loopback mode, + * perform a special software reset */ + if ((phy_data->tx_disabled && !efx->tx_disabled) || + loop_change) { + tenxpress_special_reset(efx); + falcon_reset_xaui(efx); + } + + mdio_clause45_transmit_disable(efx); + mdio_clause45_phy_reconfigure(efx); + tenxpress_phyxs_loopback(efx); + + phy_data->tx_disabled = efx->tx_disabled; + phy_data->loopback_mode = efx->loopback_mode; efx->link_up = tenxpress_link_ok(efx, 0); efx->link_options = GM_LPA_10000FULL; } @@ -431,4 +515,5 @@ struct efx_phy_operations falcon_tenxpress_phy_ops = { .clear_interrupt = tenxpress_phy_clear_interrupt, .reset_xaui = tenxpress_reset_xaui, .mmds = TENXPRESS_REQUIRED_DEVS, + .loopbacks = TENXPRESS_LOOPBACKS, }; diff --git a/drivers/net/sfc/tx.c b/drivers/net/sfc/tx.c index fbb866b2185..5cdd082ab8f 100644 --- a/drivers/net/sfc/tx.c +++ b/drivers/net/sfc/tx.c @@ -82,6 +82,46 @@ static inline void efx_dequeue_buffer(struct efx_tx_queue *tx_queue, } } +/** + * struct efx_tso_header - a DMA mapped buffer for packet headers + * @next: Linked list of free ones. + * The list is protected by the TX queue lock. + * @dma_unmap_len: Length to unmap for an oversize buffer, or 0. + * @dma_addr: The DMA address of the header below. + * + * This controls the memory used for a TSO header. Use TSOH_DATA() + * to find the packet header data. Use TSOH_SIZE() to calculate the + * total size required for a given packet header length. TSO headers + * in the free list are exactly %TSOH_STD_SIZE bytes in size. + */ +struct efx_tso_header { + union { + struct efx_tso_header *next; + size_t unmap_len; + }; + dma_addr_t dma_addr; +}; + +static int efx_enqueue_skb_tso(struct efx_tx_queue *tx_queue, + const struct sk_buff *skb); +static void efx_fini_tso(struct efx_tx_queue *tx_queue); +static void efx_tsoh_heap_free(struct efx_tx_queue *tx_queue, + struct efx_tso_header *tsoh); + +static inline void efx_tsoh_free(struct efx_tx_queue *tx_queue, + struct efx_tx_buffer *buffer) +{ + if (buffer->tsoh) { + if (likely(!buffer->tsoh->unmap_len)) { + buffer->tsoh->next = tx_queue->tso_headers_free; + tx_queue->tso_headers_free = buffer->tsoh; + } else { + efx_tsoh_heap_free(tx_queue, buffer->tsoh); + } + buffer->tsoh = NULL; + } +} + /* * Add a socket buffer to a TX queue @@ -114,6 +154,9 @@ static inline int efx_enqueue_skb(struct efx_tx_queue *tx_queue, EFX_BUG_ON_PARANOID(tx_queue->write_count != tx_queue->insert_count); + if (skb_shinfo((struct sk_buff *)skb)->gso_size) + return efx_enqueue_skb_tso(tx_queue, skb); + /* Get size of the initial fragment */ len = skb_headlen(skb); @@ -166,6 +209,8 @@ static inline int efx_enqueue_skb(struct efx_tx_queue *tx_queue, insert_ptr = (tx_queue->insert_count & efx->type->txd_ring_mask); buffer = &tx_queue->buffer[insert_ptr]; + efx_tsoh_free(tx_queue, buffer); + EFX_BUG_ON_PARANOID(buffer->tsoh); EFX_BUG_ON_PARANOID(buffer->skb); EFX_BUG_ON_PARANOID(buffer->len); EFX_BUG_ON_PARANOID(buffer->continuation != 1); @@ -342,7 +387,7 @@ void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index) if (unlikely(tx_queue->stopped)) { fill_level = tx_queue->insert_count - tx_queue->read_count; if (fill_level < EFX_NETDEV_TX_THRESHOLD(tx_queue)) { - EFX_BUG_ON_PARANOID(!NET_DEV_REGISTERED(efx)); + EFX_BUG_ON_PARANOID(!efx_dev_registered(efx)); /* Do this under netif_tx_lock(), to avoid racing * with efx_xmit(). */ @@ -432,6 +477,9 @@ void efx_fini_tx_queue(struct efx_tx_queue *tx_queue) efx_release_tx_buffers(tx_queue); + /* Free up TSO header cache */ + efx_fini_tso(tx_queue); + /* Release queue's stop on port, if any */ if (tx_queue->stopped) { tx_queue->stopped = 0; @@ -450,3 +498,622 @@ void efx_remove_tx_queue(struct efx_tx_queue *tx_queue) } +/* Efx TCP segmentation acceleration. + * + * Why? Because by doing it here in the driver we can go significantly + * faster than the GSO. + * + * Requires TX checksum offload support. + */ + +/* Number of bytes inserted at the start of a TSO header buffer, + * similar to NET_IP_ALIGN. + */ +#if defined(__i386__) || defined(__x86_64__) +#define TSOH_OFFSET 0 +#else +#define TSOH_OFFSET NET_IP_ALIGN +#endif + +#define TSOH_BUFFER(tsoh) ((u8 *)(tsoh + 1) + TSOH_OFFSET) + +/* Total size of struct efx_tso_header, buffer and padding */ +#define TSOH_SIZE(hdr_len) \ + (sizeof(struct efx_tso_header) + TSOH_OFFSET + hdr_len) + +/* Size of blocks on free list. Larger blocks must be allocated from + * the heap. + */ +#define TSOH_STD_SIZE 128 + +#define PTR_DIFF(p1, p2) ((u8 *)(p1) - (u8 *)(p2)) +#define ETH_HDR_LEN(skb) (skb_network_header(skb) - (skb)->data) +#define SKB_TCP_OFF(skb) PTR_DIFF(tcp_hdr(skb), (skb)->data) +#define SKB_IPV4_OFF(skb) PTR_DIFF(ip_hdr(skb), (skb)->data) + +/** + * struct tso_state - TSO state for an SKB + * @remaining_len: Bytes of data we've yet to segment + * @seqnum: Current sequence number + * @packet_space: Remaining space in current packet + * @ifc: Input fragment cursor. + * Where we are in the current fragment of the incoming SKB. These + * values get updated in place when we split a fragment over + * multiple packets. + * @p: Parameters. + * These values are set once at the start of the TSO send and do + * not get changed as the routine progresses. + * + * The state used during segmentation. It is put into this data structure + * just to make it easy to pass into inline functions. + */ +struct tso_state { + unsigned remaining_len; + unsigned seqnum; + unsigned packet_space; + + struct { + /* DMA address of current position */ + dma_addr_t dma_addr; + /* Remaining length */ + unsigned int len; + /* DMA address and length of the whole fragment */ + unsigned int unmap_len; + dma_addr_t unmap_addr; + struct page *page; + unsigned page_off; + } ifc; + + struct { + /* The number of bytes of header */ + unsigned int header_length; + + /* The number of bytes to put in each outgoing segment. */ + int full_packet_size; + + /* Current IPv4 ID, host endian. */ + unsigned ipv4_id; + } p; +}; + + +/* + * Verify that our various assumptions about sk_buffs and the conditions + * under which TSO will be attempted hold true. + */ +static inline void efx_tso_check_safe(const struct sk_buff *skb) +{ + EFX_BUG_ON_PARANOID(skb->protocol != htons(ETH_P_IP)); + EFX_BUG_ON_PARANOID(((struct ethhdr *)skb->data)->h_proto != + skb->protocol); + EFX_BUG_ON_PARANOID(ip_hdr(skb)->protocol != IPPROTO_TCP); + EFX_BUG_ON_PARANOID((PTR_DIFF(tcp_hdr(skb), skb->data) + + (tcp_hdr(skb)->doff << 2u)) > + skb_headlen(skb)); +} + + +/* + * Allocate a page worth of efx_tso_header structures, and string them + * into the tx_queue->tso_headers_free linked list. Return 0 or -ENOMEM. + */ +static int efx_tsoh_block_alloc(struct efx_tx_queue *tx_queue) +{ + + struct pci_dev *pci_dev = tx_queue->efx->pci_dev; + struct efx_tso_header *tsoh; + dma_addr_t dma_addr; + u8 *base_kva, *kva; + + base_kva = pci_alloc_consistent(pci_dev, PAGE_SIZE, &dma_addr); + if (base_kva == NULL) { + EFX_ERR(tx_queue->efx, "Unable to allocate page for TSO" + " headers\n"); + return -ENOMEM; + } + + /* pci_alloc_consistent() allocates pages. */ + EFX_BUG_ON_PARANOID(dma_addr & (PAGE_SIZE - 1u)); + + for (kva = base_kva; kva < base_kva + PAGE_SIZE; kva += TSOH_STD_SIZE) { + tsoh = (struct efx_tso_header *)kva; + tsoh->dma_addr = dma_addr + (TSOH_BUFFER(tsoh) - base_kva); + tsoh->next = tx_queue->tso_headers_free; + tx_queue->tso_headers_free = tsoh; + } + + return 0; +} + + +/* Free up a TSO header, and all others in the same page. */ +static void efx_tsoh_block_free(struct efx_tx_queue *tx_queue, + struct efx_tso_header *tsoh, + struct pci_dev *pci_dev) +{ + struct efx_tso_header **p; + unsigned long base_kva; + dma_addr_t base_dma; + + base_kva = (unsigned long)tsoh & PAGE_MASK; + base_dma = tsoh->dma_addr & PAGE_MASK; + + p = &tx_queue->tso_headers_free; + while (*p != NULL) { + if (((unsigned long)*p & PAGE_MASK) == base_kva) + *p = (*p)->next; + else + p = &(*p)->next; + } + + pci_free_consistent(pci_dev, PAGE_SIZE, (void *)base_kva, base_dma); +} + +static struct efx_tso_header * +efx_tsoh_heap_alloc(struct efx_tx_queue *tx_queue, size_t header_len) +{ + struct efx_tso_header *tsoh; + + tsoh = kmalloc(TSOH_SIZE(header_len), GFP_ATOMIC | GFP_DMA); + if (unlikely(!tsoh)) + return NULL; + + tsoh->dma_addr = pci_map_single(tx_queue->efx->pci_dev, + TSOH_BUFFER(tsoh), header_len, + PCI_DMA_TODEVICE); + if (unlikely(pci_dma_mapping_error(tsoh->dma_addr))) { + kfree(tsoh); + return NULL; + } + + tsoh->unmap_len = header_len; + return tsoh; +} + +static void +efx_tsoh_heap_free(struct efx_tx_queue *tx_queue, struct efx_tso_header *tsoh) +{ + pci_unmap_single(tx_queue->efx->pci_dev, + tsoh->dma_addr, tsoh->unmap_len, + PCI_DMA_TODEVICE); + kfree(tsoh); +} + +/** + * efx_tx_queue_insert - push descriptors onto the TX queue + * @tx_queue: Efx TX queue + * @dma_addr: DMA address of fragment + * @len: Length of fragment + * @skb: Only non-null for end of last segment + * @end_of_packet: True if last fragment in a packet + * @unmap_addr: DMA address of fragment for unmapping + * @unmap_len: Only set this in last segment of a fragment + * + * Push descriptors onto the TX queue. Return 0 on success or 1 if + * @tx_queue full. + */ +static int efx_tx_queue_insert(struct efx_tx_queue *tx_queue, + dma_addr_t dma_addr, unsigned len, + const struct sk_buff *skb, int end_of_packet, + dma_addr_t unmap_addr, unsigned unmap_len) +{ + struct efx_tx_buffer *buffer; + struct efx_nic *efx = tx_queue->efx; + unsigned dma_len, fill_level, insert_ptr, misalign; + int q_space; + + EFX_BUG_ON_PARANOID(len <= 0); + + fill_level = tx_queue->insert_count - tx_queue->old_read_count; + /* -1 as there is no way to represent all descriptors used */ + q_space = efx->type->txd_ring_mask - 1 - fill_level; + + while (1) { + if (unlikely(q_space-- <= 0)) { + /* It might be that completions have happened + * since the xmit path last checked. Update + * the xmit path's copy of read_count. + */ + ++tx_queue->stopped; + /* This memory barrier protects the change of + * stopped from the access of read_count. */ + smp_mb(); + tx_queue->old_read_count = + *(volatile unsigned *)&tx_queue->read_count; + fill_level = (tx_queue->insert_count + - tx_queue->old_read_count); + q_space = efx->type->txd_ring_mask - 1 - fill_level; + if (unlikely(q_space-- <= 0)) + return 1; + smp_mb(); + --tx_queue->stopped; + } + + insert_ptr = tx_queue->insert_count & efx->type->txd_ring_mask; + buffer = &tx_queue->buffer[insert_ptr]; + ++tx_queue->insert_count; + + EFX_BUG_ON_PARANOID(tx_queue->insert_count - + tx_queue->read_count > + efx->type->txd_ring_mask); + + efx_tsoh_free(tx_queue, buffer); + EFX_BUG_ON_PARANOID(buffer->len); + EFX_BUG_ON_PARANOID(buffer->unmap_len); + EFX_BUG_ON_PARANOID(buffer->skb); + EFX_BUG_ON_PARANOID(buffer->continuation != 1); + EFX_BUG_ON_PARANOID(buffer->tsoh); + + buffer->dma_addr = dma_addr; + + /* Ensure we do not cross a boundary unsupported by H/W */ + dma_len = (~dma_addr & efx->type->tx_dma_mask) + 1; + + misalign = (unsigned)dma_addr & efx->type->bug5391_mask; + if (misalign && dma_len + misalign > 512) + dma_len = 512 - misalign; + + /* If there is enough space to send then do so */ + if (dma_len >= len) + break; + + buffer->len = dma_len; /* Don't set the other members */ + dma_addr += dma_len; + len -= dma_len; + } + + EFX_BUG_ON_PARANOID(!len); + buffer->len = len; + buffer->skb = skb; + buffer->continuation = !end_of_packet; + buffer->unmap_addr = unmap_addr; + buffer->unmap_len = unmap_len; + return 0; +} + + +/* + * Put a TSO header into the TX queue. + * + * This is special-cased because we know that it is small enough to fit in + * a single fragment, and we know it doesn't cross a page boundary. It + * also allows us to not worry about end-of-packet etc. + */ +static inline void efx_tso_put_header(struct efx_tx_queue *tx_queue, + struct efx_tso_header *tsoh, unsigned len) +{ + struct efx_tx_buffer *buffer; + + buffer = &tx_queue->buffer[tx_queue->insert_count & + tx_queue->efx->type->txd_ring_mask]; + efx_tsoh_free(tx_queue, buffer); + EFX_BUG_ON_PARANOID(buffer->len); + EFX_BUG_ON_PARANOID(buffer->unmap_len); + EFX_BUG_ON_PARANOID(buffer->skb); + EFX_BUG_ON_PARANOID(buffer->continuation != 1); + EFX_BUG_ON_PARANOID(buffer->tsoh); + buffer->len = len; + buffer->dma_addr = tsoh->dma_addr; + buffer->tsoh = tsoh; + + ++tx_queue->insert_count; +} + + +/* Remove descriptors put into a tx_queue. */ +static void efx_enqueue_unwind(struct efx_tx_queue *tx_queue) +{ + struct efx_tx_buffer *buffer; + + /* Work backwards until we hit the original insert pointer value */ + while (tx_queue->insert_count != tx_queue->write_count) { + --tx_queue->insert_count; + buffer = &tx_queue->buffer[tx_queue->insert_count & + tx_queue->efx->type->txd_ring_mask]; + efx_tsoh_free(tx_queue, buffer); + EFX_BUG_ON_PARANOID(buffer->skb); + buffer->len = 0; + buffer->continuation = 1; + if (buffer->unmap_len) { + pci_unmap_page(tx_queue->efx->pci_dev, + buffer->unmap_addr, + buffer->unmap_len, PCI_DMA_TODEVICE); + buffer->unmap_len = 0; + } + } +} + + +/* Parse the SKB header and initialise state. */ +static inline void tso_start(struct tso_state *st, const struct sk_buff *skb) +{ + /* All ethernet/IP/TCP headers combined size is TCP header size + * plus offset of TCP header relative to start of packet. + */ + st->p.header_length = ((tcp_hdr(skb)->doff << 2u) + + PTR_DIFF(tcp_hdr(skb), skb->data)); + st->p.full_packet_size = (st->p.header_length + + skb_shinfo(skb)->gso_size); + + st->p.ipv4_id = ntohs(ip_hdr(skb)->id); + st->seqnum = ntohl(tcp_hdr(skb)->seq); + + EFX_BUG_ON_PARANOID(tcp_hdr(skb)->urg); + EFX_BUG_ON_PARANOID(tcp_hdr(skb)->syn); + EFX_BUG_ON_PARANOID(tcp_hdr(skb)->rst); + + st->packet_space = st->p.full_packet_size; + st->remaining_len = skb->len - st->p.header_length; +} + + +/** + * tso_get_fragment - record fragment details and map for DMA + * @st: TSO state + * @efx: Efx NIC + * @data: Pointer to fragment data + * @len: Length of fragment + * + * Record fragment details and map for DMA. Return 0 on success, or + * -%ENOMEM if DMA mapping fails. + */ +static inline int tso_get_fragment(struct tso_state *st, struct efx_nic *efx, + int len, struct page *page, int page_off) +{ + + st->ifc.unmap_addr = pci_map_page(efx->pci_dev, page, page_off, + len, PCI_DMA_TODEVICE); + if (likely(!pci_dma_mapping_error(st->ifc.unmap_addr))) { + st->ifc.unmap_len = len; + st->ifc.len = len; + st->ifc.dma_addr = st->ifc.unmap_addr; + st->ifc.page = page; + st->ifc.page_off = page_off; + return 0; + } + return -ENOMEM; +} + + +/** + * tso_fill_packet_with_fragment - form descriptors for the current fragment + * @tx_queue: Efx TX queue + * @skb: Socket buffer + * @st: TSO state + * + * Form descriptors for the current fragment, until we reach the end + * of fragment or end-of-packet. Return 0 on success, 1 if not enough + * space in @tx_queue. + */ +static inline int tso_fill_packet_with_fragment(struct efx_tx_queue *tx_queue, + const struct sk_buff *skb, + struct tso_state *st) +{ + + int n, end_of_packet, rc; + + if (st->ifc.len == 0) + return 0; + if (st->packet_space == 0) + return 0; + + EFX_BUG_ON_PARANOID(st->ifc.len <= 0); + EFX_BUG_ON_PARANOID(st->packet_space <= 0); + + n = min(st->ifc.len, st->packet_space); + + st->packet_space -= n; + st->remaining_len -= n; + st->ifc.len -= n; + st->ifc.page_off += n; + end_of_packet = st->remaining_len == 0 || st->packet_space == 0; + + rc = efx_tx_queue_insert(tx_queue, st->ifc.dma_addr, n, + st->remaining_len ? NULL : skb, + end_of_packet, st->ifc.unmap_addr, + st->ifc.len ? 0 : st->ifc.unmap_len); + + st->ifc.dma_addr += n; + + return rc; +} + + +/** + * tso_start_new_packet - generate a new header and prepare for the new packet + * @tx_queue: Efx TX queue + * @skb: Socket buffer + * @st: TSO state + * + * Generate a new header and prepare for the new packet. Return 0 on + * success, or -1 if failed to alloc header. + */ +static inline int tso_start_new_packet(struct efx_tx_queue *tx_queue, + const struct sk_buff *skb, + struct tso_state *st) +{ + struct efx_tso_header *tsoh; + struct iphdr *tsoh_iph; + struct tcphdr *tsoh_th; + unsigned ip_length; + u8 *header; + + /* Allocate a DMA-mapped header buffer. */ + if (likely(TSOH_SIZE(st->p.header_length) <= TSOH_STD_SIZE)) { + if (tx_queue->tso_headers_free == NULL) { + if (efx_tsoh_block_alloc(tx_queue)) + return -1; + } + EFX_BUG_ON_PARANOID(!tx_queue->tso_headers_free); + tsoh = tx_queue->tso_headers_free; + tx_queue->tso_headers_free = tsoh->next; + tsoh->unmap_len = 0; + } else { + tx_queue->tso_long_headers++; + tsoh = efx_tsoh_heap_alloc(tx_queue, st->p.header_length); + if (unlikely(!tsoh)) + return -1; + } + + header = TSOH_BUFFER(tsoh); + tsoh_th = (struct tcphdr *)(header + SKB_TCP_OFF(skb)); + tsoh_iph = (struct iphdr *)(header + SKB_IPV4_OFF(skb)); + + /* Copy and update the headers. */ + memcpy(header, skb->data, st->p.header_length); + + tsoh_th->seq = htonl(st->seqnum); + st->seqnum += skb_shinfo(skb)->gso_size; + if (st->remaining_len > skb_shinfo(skb)->gso_size) { + /* This packet will not finish the TSO burst. */ + ip_length = st->p.full_packet_size - ETH_HDR_LEN(skb); + tsoh_th->fin = 0; + tsoh_th->psh = 0; + } else { + /* This packet will be the last in the TSO burst. */ + ip_length = (st->p.header_length - ETH_HDR_LEN(skb) + + st->remaining_len); + tsoh_th->fin = tcp_hdr(skb)->fin; + tsoh_th->psh = tcp_hdr(skb)->psh; + } + tsoh_iph->tot_len = htons(ip_length); + + /* Linux leaves suitable gaps in the IP ID space for us to fill. */ + tsoh_iph->id = htons(st->p.ipv4_id); + st->p.ipv4_id++; + + st->packet_space = skb_shinfo(skb)->gso_size; + ++tx_queue->tso_packets; + + /* Form a descriptor for this header. */ + efx_tso_put_header(tx_queue, tsoh, st->p.header_length); + + return 0; +} + + +/** + * efx_enqueue_skb_tso - segment and transmit a TSO socket buffer + * @tx_queue: Efx TX queue + * @skb: Socket buffer + * + * Context: You must hold netif_tx_lock() to call this function. + * + * Add socket buffer @skb to @tx_queue, doing TSO or return != 0 if + * @skb was not enqueued. In all cases @skb is consumed. Return + * %NETDEV_TX_OK or %NETDEV_TX_BUSY. + */ +static int efx_enqueue_skb_tso(struct efx_tx_queue *tx_queue, + const struct sk_buff *skb) +{ + int frag_i, rc, rc2 = NETDEV_TX_OK; + struct tso_state state; + skb_frag_t *f; + + /* Verify TSO is safe - these checks should never fail. */ + efx_tso_check_safe(skb); + + EFX_BUG_ON_PARANOID(tx_queue->write_count != tx_queue->insert_count); + + tso_start(&state, skb); + + /* Assume that skb header area contains exactly the headers, and + * all payload is in the frag list. + */ + if (skb_headlen(skb) == state.p.header_length) { + /* Grab the first payload fragment. */ + EFX_BUG_ON_PARANOID(skb_shinfo(skb)->nr_frags < 1); + frag_i = 0; + f = &skb_shinfo(skb)->frags[frag_i]; + rc = tso_get_fragment(&state, tx_queue->efx, + f->size, f->page, f->page_offset); + if (rc) + goto mem_err; + } else { + /* It may look like this code fragment assumes that the + * skb->data portion does not cross a page boundary, but + * that is not the case. It is guaranteed to be direct + * mapped memory, and therefore is physically contiguous, + * and so DMA will work fine. kmap_atomic() on this region + * will just return the direct mapping, so that will work + * too. + */ + int page_off = (unsigned long)skb->data & (PAGE_SIZE - 1); + int hl = state.p.header_length; + rc = tso_get_fragment(&state, tx_queue->efx, + skb_headlen(skb) - hl, + virt_to_page(skb->data), page_off + hl); + if (rc) + goto mem_err; + frag_i = -1; + } + + if (tso_start_new_packet(tx_queue, skb, &state) < 0) + goto mem_err; + + while (1) { + rc = tso_fill_packet_with_fragment(tx_queue, skb, &state); + if (unlikely(rc)) + goto stop; + + /* Move onto the next fragment? */ + if (state.ifc.len == 0) { + if (++frag_i >= skb_shinfo(skb)->nr_frags) + /* End of payload reached. */ + break; + f = &skb_shinfo(skb)->frags[frag_i]; + rc = tso_get_fragment(&state, tx_queue->efx, + f->size, f->page, f->page_offset); + if (rc) + goto mem_err; + } + + /* Start at new packet? */ + if (state.packet_space == 0 && + tso_start_new_packet(tx_queue, skb, &state) < 0) + goto mem_err; + } + + /* Pass off to hardware */ + falcon_push_buffers(tx_queue); + + tx_queue->tso_bursts++; + return NETDEV_TX_OK; + + mem_err: + EFX_ERR(tx_queue->efx, "Out of memory for TSO headers, or PCI mapping" + " error\n"); + dev_kfree_skb_any((struct sk_buff *)skb); + goto unwind; + + stop: + rc2 = NETDEV_TX_BUSY; + + /* Stop the queue if it wasn't stopped before. */ + if (tx_queue->stopped == 1) + efx_stop_queue(tx_queue->efx); + + unwind: + efx_enqueue_unwind(tx_queue); + return rc2; +} + + +/* + * Free up all TSO datastructures associated with tx_queue. This + * routine should be called only once the tx_queue is both empty and + * will no longer be used. + */ +static void efx_fini_tso(struct efx_tx_queue *tx_queue) +{ + unsigned i; + + if (tx_queue->buffer) { + for (i = 0; i <= tx_queue->efx->type->txd_ring_mask; ++i) + efx_tsoh_free(tx_queue, &tx_queue->buffer[i]); + } + + while (tx_queue->tso_headers_free != NULL) + efx_tsoh_block_free(tx_queue, tx_queue->tso_headers_free, + tx_queue->efx->pci_dev); +} diff --git a/drivers/net/sfc/workarounds.h b/drivers/net/sfc/workarounds.h index dca62f19019..35ab19c27f8 100644 --- a/drivers/net/sfc/workarounds.h +++ b/drivers/net/sfc/workarounds.h @@ -16,7 +16,7 @@ */ #define EFX_WORKAROUND_ALWAYS(efx) 1 -#define EFX_WORKAROUND_FALCON_A(efx) (FALCON_REV(efx) <= FALCON_REV_A1) +#define EFX_WORKAROUND_FALCON_A(efx) (falcon_rev(efx) <= FALCON_REV_A1) /* XAUI resets if link not detected */ #define EFX_WORKAROUND_5147 EFX_WORKAROUND_ALWAYS diff --git a/drivers/net/sfc/xfp_phy.c b/drivers/net/sfc/xfp_phy.c index 66dd5bf1eaa..f3684ad2888 100644 --- a/drivers/net/sfc/xfp_phy.c +++ b/drivers/net/sfc/xfp_phy.c @@ -24,6 +24,10 @@ MDIO_MMDREG_DEVS0_PMAPMD | \ MDIO_MMDREG_DEVS0_PHYXS) +#define XFP_LOOPBACKS ((1 << LOOPBACK_PCS) | \ + (1 << LOOPBACK_PMAPMD) | \ + (1 << LOOPBACK_NETWORK)) + /****************************************************************************/ /* Quake-specific MDIO registers */ #define MDIO_QUAKE_LED0_REG (0xD006) @@ -35,6 +39,10 @@ void xfp_set_led(struct efx_nic *p, int led, int mode) mode); } +struct xfp_phy_data { + int tx_disabled; +}; + #define XFP_MAX_RESET_TIME 500 #define XFP_RESET_WAIT 10 @@ -72,18 +80,33 @@ static int xfp_reset_phy(struct efx_nic *efx) static int xfp_phy_init(struct efx_nic *efx) { + struct xfp_phy_data *phy_data; u32 devid = mdio_clause45_read_id(efx, MDIO_MMD_PHYXS); int rc; + phy_data = kzalloc(sizeof(struct xfp_phy_data), GFP_KERNEL); + if (!phy_data) + return -ENOMEM; + efx->phy_data = phy_data; + EFX_INFO(efx, "XFP: PHY ID reg %x (OUI %x model %x revision" " %x)\n", devid, MDIO_ID_OUI(devid), MDIO_ID_MODEL(devid), MDIO_ID_REV(devid)); + phy_data->tx_disabled = efx->tx_disabled; + rc = xfp_reset_phy(efx); EFX_INFO(efx, "XFP: PHY init %s.\n", rc ? "failed" : "successful"); + if (rc < 0) + goto fail; + return 0; + + fail: + kfree(efx->phy_data); + efx->phy_data = NULL; return rc; } @@ -110,6 +133,16 @@ static int xfp_phy_check_hw(struct efx_nic *efx) static void xfp_phy_reconfigure(struct efx_nic *efx) { + struct xfp_phy_data *phy_data = efx->phy_data; + + /* Reset the PHY when moving from tx off to tx on */ + if (phy_data->tx_disabled && !efx->tx_disabled) + xfp_reset_phy(efx); + + mdio_clause45_transmit_disable(efx); + mdio_clause45_phy_reconfigure(efx); + + phy_data->tx_disabled = efx->tx_disabled; efx->link_up = xfp_link_ok(efx); efx->link_options = GM_LPA_10000FULL; } @@ -119,6 +152,10 @@ static void xfp_phy_fini(struct efx_nic *efx) { /* Clobber the LED if it was blinking */ efx->board_info.blink(efx, 0); + + /* Free the context block */ + kfree(efx->phy_data); + efx->phy_data = NULL; } struct efx_phy_operations falcon_xfp_phy_ops = { @@ -129,4 +166,5 @@ struct efx_phy_operations falcon_xfp_phy_ops = { .clear_interrupt = xfp_phy_clear_interrupt, .reset_xaui = efx_port_dummy_op_void, .mmds = XFP_REQUIRED_DEVS, + .loopbacks = XFP_LOOPBACKS, }; diff --git a/drivers/net/sky2.c b/drivers/net/sky2.c index f226bcac7d1..c8a5ef2d75f 100644 --- a/drivers/net/sky2.c +++ b/drivers/net/sky2.c @@ -118,6 +118,7 @@ static const struct pci_device_id sky2_id_table[] = { { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4352) }, /* 88E8038 */ { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4353) }, /* 88E8039 */ { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4354) }, /* 88E8040 */ + { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4355) }, /* 88E8040T */ { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4356) }, /* 88EC033 */ { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4357) }, /* 88E8042 */ { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x435A) }, /* 88E8048 */ @@ -1159,17 +1160,9 @@ static int sky2_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) } #ifdef SKY2_VLAN_TAG_USED -static void sky2_vlan_rx_register(struct net_device *dev, struct vlan_group *grp) +static void sky2_set_vlan_mode(struct sky2_hw *hw, u16 port, bool onoff) { - struct sky2_port *sky2 = netdev_priv(dev); - struct sky2_hw *hw = sky2->hw; - u16 port = sky2->port; - - netif_tx_lock_bh(dev); - napi_disable(&hw->napi); - - sky2->vlgrp = grp; - if (grp) { + if (onoff) { sky2_write32(hw, SK_REG(port, RX_GMF_CTRL_T), RX_VLAN_STRIP_ON); sky2_write32(hw, SK_REG(port, TX_GMF_CTRL_T), @@ -1180,6 +1173,19 @@ static void sky2_vlan_rx_register(struct net_device *dev, struct vlan_group *grp sky2_write32(hw, SK_REG(port, TX_GMF_CTRL_T), TX_VLAN_TAG_OFF); } +} + +static void sky2_vlan_rx_register(struct net_device *dev, struct vlan_group *grp) +{ + struct sky2_port *sky2 = netdev_priv(dev); + struct sky2_hw *hw = sky2->hw; + u16 port = sky2->port; + + netif_tx_lock_bh(dev); + napi_disable(&hw->napi); + + sky2->vlgrp = grp; + sky2_set_vlan_mode(hw, port, grp != NULL); sky2_read32(hw, B0_Y2_SP_LISR); napi_enable(&hw->napi); @@ -1418,6 +1424,10 @@ static int sky2_up(struct net_device *dev) sky2_prefetch_init(hw, txqaddr[port], sky2->tx_le_map, TX_RING_SIZE - 1); +#ifdef SKY2_VLAN_TAG_USED + sky2_set_vlan_mode(hw, port, sky2->vlgrp != NULL); +#endif + err = sky2_rx_start(sky2); if (err) goto err_out; @@ -4395,7 +4405,9 @@ static int sky2_resume(struct pci_dev *pdev) if (err) { printk(KERN_ERR PFX "%s: could not up: %d\n", dev->name, err); + rtnl_lock(); dev_close(dev); + rtnl_unlock(); goto out; } } diff --git a/drivers/net/sky2.h b/drivers/net/sky2.h index 7bb3ba9bcbd..c0a5eea2000 100644 --- a/drivers/net/sky2.h +++ b/drivers/net/sky2.h @@ -1966,13 +1966,13 @@ struct sky2_status_le { struct tx_ring_info { struct sk_buff *skb; DECLARE_PCI_UNMAP_ADDR(mapaddr); - DECLARE_PCI_UNMAP_ADDR(maplen); + DECLARE_PCI_UNMAP_LEN(maplen); }; struct rx_ring_info { struct sk_buff *skb; dma_addr_t data_addr; - DECLARE_PCI_UNMAP_ADDR(data_size); + DECLARE_PCI_UNMAP_LEN(data_size); dma_addr_t frag_addr[ETH_JUMBO_MTU >> PAGE_SHIFT]; }; diff --git a/drivers/net/smc911x.c b/drivers/net/smc911x.c index 4e280020518..e2ee91a6ae7 100644 --- a/drivers/net/smc911x.c +++ b/drivers/net/smc911x.c @@ -136,7 +136,6 @@ struct smc911x_local { /* work queue */ struct work_struct phy_configure; - int work_pending; int tx_throttle; spinlock_t lock; @@ -960,11 +959,11 @@ static void smc911x_phy_configure(struct work_struct *work) * We should not be called if phy_type is zero. */ if (lp->phy_type == 0) - goto smc911x_phy_configure_exit_nolock; + return; if (smc911x_phy_reset(dev, phyaddr)) { printk("%s: PHY reset timed out\n", dev->name); - goto smc911x_phy_configure_exit_nolock; + return; } spin_lock_irqsave(&lp->lock, flags); @@ -1033,8 +1032,6 @@ static void smc911x_phy_configure(struct work_struct *work) smc911x_phy_configure_exit: spin_unlock_irqrestore(&lp->lock, flags); -smc911x_phy_configure_exit_nolock: - lp->work_pending = 0; } /* @@ -1356,11 +1353,8 @@ static void smc911x_timeout(struct net_device *dev) * smc911x_phy_configure() calls msleep() which calls schedule_timeout() * which calls schedule(). Hence we use a work queue. */ - if (lp->phy_type != 0) { - if (schedule_work(&lp->phy_configure)) { - lp->work_pending = 1; - } - } + if (lp->phy_type != 0) + schedule_work(&lp->phy_configure); /* We can accept TX packets again */ dev->trans_start = jiffies; @@ -1531,16 +1525,8 @@ static int smc911x_close(struct net_device *dev) if (lp->phy_type != 0) { /* We need to ensure that no calls to * smc911x_phy_configure are pending. - - * flush_scheduled_work() cannot be called because we - * are running with the netlink semaphore held (from - * devinet_ioctl()) and the pending work queue - * contains linkwatch_event() (scheduled by - * netif_carrier_off() above). linkwatch_event() also - * wants the netlink semaphore. */ - while (lp->work_pending) - schedule(); + cancel_work_sync(&lp->phy_configure); smc911x_phy_powerdown(dev, lp->mii.phy_id); } diff --git a/drivers/net/smc91x.c b/drivers/net/smc91x.c index a188e33484e..f2051b209da 100644 --- a/drivers/net/smc91x.c +++ b/drivers/net/smc91x.c @@ -1016,15 +1016,8 @@ static void smc_phy_powerdown(struct net_device *dev) /* We need to ensure that no calls to smc_phy_configure are pending. - - flush_scheduled_work() cannot be called because we are - running with the netlink semaphore held (from - devinet_ioctl()) and the pending work queue contains - linkwatch_event() (scheduled by netif_carrier_off() - above). linkwatch_event() also wants the netlink semaphore. */ - while(lp->work_pending) - yield(); + cancel_work_sync(&lp->phy_configure); bmcr = smc_phy_read(dev, phy, MII_BMCR); smc_phy_write(dev, phy, MII_BMCR, bmcr | BMCR_PDOWN); @@ -1161,7 +1154,6 @@ static void smc_phy_configure(struct work_struct *work) smc_phy_configure_exit: SMC_SELECT_BANK(lp, 2); spin_unlock_irq(&lp->lock); - lp->work_pending = 0; } /* @@ -1389,11 +1381,8 @@ static void smc_timeout(struct net_device *dev) * smc_phy_configure() calls msleep() which calls schedule_timeout() * which calls schedule(). Hence we use a work queue. */ - if (lp->phy_type != 0) { - if (schedule_work(&lp->phy_configure)) { - lp->work_pending = 1; - } - } + if (lp->phy_type != 0) + schedule_work(&lp->phy_configure); /* We can accept TX packets again */ dev->trans_start = jiffies; diff --git a/drivers/net/smc91x.h b/drivers/net/smc91x.h index 69e97a1cb1c..8606818653f 100644 --- a/drivers/net/smc91x.h +++ b/drivers/net/smc91x.h @@ -93,14 +93,14 @@ #define SMC_insw(a, r, p, l) insw ((unsigned long *)((a) + (r)), p, l) # endif /* check if the mac in reg is valid */ -#define SMC_GET_MAC_ADDR(addr) \ +#define SMC_GET_MAC_ADDR(lp, addr) \ do { \ unsigned int __v; \ - __v = SMC_inw(ioaddr, ADDR0_REG); \ + __v = SMC_inw(ioaddr, ADDR0_REG(lp)); \ addr[0] = __v; addr[1] = __v >> 8; \ - __v = SMC_inw(ioaddr, ADDR1_REG); \ + __v = SMC_inw(ioaddr, ADDR1_REG(lp)); \ addr[2] = __v; addr[3] = __v >> 8; \ - __v = SMC_inw(ioaddr, ADDR2_REG); \ + __v = SMC_inw(ioaddr, ADDR2_REG(lp)); \ addr[4] = __v; addr[5] = __v >> 8; \ if (*(u32 *)(&addr[0]) == 0xFFFFFFFF) { \ random_ether_addr(addr); \ diff --git a/drivers/net/sun3lance.c b/drivers/net/sun3lance.c index f8d46134dac..359452a06c6 100644 --- a/drivers/net/sun3lance.c +++ b/drivers/net/sun3lance.c @@ -250,6 +250,9 @@ struct net_device * __init sun3lance_probe(int unit) static int found; int err = -ENODEV; + if (!MACH_IS_SUN3 && !MACH_IS_SUN3X) + return ERR_PTR(-ENODEV); + /* check that this machine has an onboard lance */ switch(idprom->id_machtype) { case SM_SUN3|SM_3_50: diff --git a/drivers/net/sunhme.c b/drivers/net/sunhme.c index b4e7f30ea4e..1aa425be306 100644 --- a/drivers/net/sunhme.c +++ b/drivers/net/sunhme.c @@ -111,7 +111,7 @@ static __inline__ void tx_add_log(struct happy_meal *hp, unsigned int a, unsigne struct hme_tx_logent *tlp; unsigned long flags; - save_and_cli(flags); + local_irq_save(flags); tlp = &tx_log[txlog_cur_entry]; tlp->tstamp = (unsigned int)jiffies; tlp->tx_new = hp->tx_new; @@ -119,7 +119,7 @@ static __inline__ void tx_add_log(struct happy_meal *hp, unsigned int a, unsigne tlp->action = a; tlp->status = s; txlog_cur_entry = (txlog_cur_entry + 1) & (TX_LOG_LEN - 1); - restore_flags(flags); + local_irq_restore(flags); } static __inline__ void tx_dump_log(void) { diff --git a/drivers/net/tc35815.c b/drivers/net/tc35815.c index 10e4e85da3f..b07b8cbadea 100644 --- a/drivers/net/tc35815.c +++ b/drivers/net/tc35815.c @@ -1394,6 +1394,7 @@ tc35815_open(struct net_device *dev) tc35815_chip_init(dev); spin_unlock_irq(&lp->lock); + netif_carrier_off(dev); /* schedule a link state check */ phy_start(lp->phy_dev); @@ -1735,7 +1736,6 @@ tc35815_rx(struct net_device *dev) skb = lp->rx_skbs[cur_bd].skb; prefetch(skb->data); lp->rx_skbs[cur_bd].skb = NULL; - lp->fbl_count--; pci_unmap_single(lp->pci_dev, lp->rx_skbs[cur_bd].skb_dma, RX_BUF_SIZE, PCI_DMA_FROMDEVICE); @@ -1791,6 +1791,7 @@ tc35815_rx(struct net_device *dev) #ifdef TC35815_USE_PACKEDBUFFER while (lp->fbl_curid != id) #else + lp->fbl_count--; while (lp->fbl_count < RX_BUF_NUM) #endif { @@ -2453,6 +2454,7 @@ static int tc35815_resume(struct pci_dev *pdev) return 0; pci_set_power_state(pdev, PCI_D0); tc35815_restart(dev); + netif_carrier_off(dev); if (lp->phy_dev) phy_start(lp->phy_dev); netif_device_attach(dev); diff --git a/drivers/net/tg3.c b/drivers/net/tg3.c index b66c75e3b8a..cc4bde85254 100644 --- a/drivers/net/tg3.c +++ b/drivers/net/tg3.c @@ -64,8 +64,8 @@ #define DRV_MODULE_NAME "tg3" #define PFX DRV_MODULE_NAME ": " -#define DRV_MODULE_VERSION "3.91" -#define DRV_MODULE_RELDATE "April 18, 2008" +#define DRV_MODULE_VERSION "3.92.1" +#define DRV_MODULE_RELDATE "June 9, 2008" #define TG3_DEF_MAC_MODE 0 #define TG3_DEF_RX_MODE 0 @@ -1295,6 +1295,21 @@ static void tg3_frob_aux_power(struct tg3 *tp) GRC_LCLCTRL_GPIO_OUTPUT0 | GRC_LCLCTRL_GPIO_OUTPUT1), 100); + } else if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761) { + /* The 5761 non-e device swaps GPIO 0 and GPIO 2. */ + u32 grc_local_ctrl = GRC_LCLCTRL_GPIO_OE0 | + GRC_LCLCTRL_GPIO_OE1 | + GRC_LCLCTRL_GPIO_OE2 | + GRC_LCLCTRL_GPIO_OUTPUT0 | + GRC_LCLCTRL_GPIO_OUTPUT1 | + tp->grc_local_ctrl; + tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl, 100); + + grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT2; + tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl, 100); + + grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT0; + tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl, 100); } else { u32 no_gpio2; u32 grc_local_ctrl = 0; @@ -1656,12 +1671,76 @@ static int tg3_set_power_state(struct tg3 *tp, pci_power_t state) return 0; } +/* tp->lock is held. */ +static void tg3_wait_for_event_ack(struct tg3 *tp) +{ + int i; + + /* Wait for up to 2.5 milliseconds */ + for (i = 0; i < 250000; i++) { + if (!(tr32(GRC_RX_CPU_EVENT) & GRC_RX_CPU_DRIVER_EVENT)) + break; + udelay(10); + } +} + +/* tp->lock is held. */ +static void tg3_ump_link_report(struct tg3 *tp) +{ + u32 reg; + u32 val; + + if (!(tp->tg3_flags2 & TG3_FLG2_5780_CLASS) || + !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) + return; + + tg3_wait_for_event_ack(tp); + + tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_LINK_UPDATE); + + tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 14); + + val = 0; + if (!tg3_readphy(tp, MII_BMCR, ®)) + val = reg << 16; + if (!tg3_readphy(tp, MII_BMSR, ®)) + val |= (reg & 0xffff); + tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX, val); + + val = 0; + if (!tg3_readphy(tp, MII_ADVERTISE, ®)) + val = reg << 16; + if (!tg3_readphy(tp, MII_LPA, ®)) + val |= (reg & 0xffff); + tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 4, val); + + val = 0; + if (!(tp->tg3_flags2 & TG3_FLG2_MII_SERDES)) { + if (!tg3_readphy(tp, MII_CTRL1000, ®)) + val = reg << 16; + if (!tg3_readphy(tp, MII_STAT1000, ®)) + val |= (reg & 0xffff); + } + tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 8, val); + + if (!tg3_readphy(tp, MII_PHYADDR, ®)) + val = reg << 16; + else + val = 0; + tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 12, val); + + val = tr32(GRC_RX_CPU_EVENT); + val |= GRC_RX_CPU_DRIVER_EVENT; + tw32_f(GRC_RX_CPU_EVENT, val); +} + static void tg3_link_report(struct tg3 *tp) { if (!netif_carrier_ok(tp->dev)) { if (netif_msg_link(tp)) printk(KERN_INFO PFX "%s: Link is down.\n", tp->dev->name); + tg3_ump_link_report(tp); } else if (netif_msg_link(tp)) { printk(KERN_INFO PFX "%s: Link is up at %d Mbps, %s duplex.\n", tp->dev->name, @@ -1679,6 +1758,7 @@ static void tg3_link_report(struct tg3 *tp) "on" : "off", (tp->link_config.active_flowctrl & TG3_FLOW_CTRL_RX) ? "on" : "off"); + tg3_ump_link_report(tp); } } @@ -2097,9 +2177,11 @@ static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset) MAC_STATUS_LNKSTATE_CHANGED)); udelay(40); - tp->mi_mode = MAC_MI_MODE_BASE; - tw32_f(MAC_MI_MODE, tp->mi_mode); - udelay(80); + if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) { + tw32_f(MAC_MI_MODE, + (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL)); + udelay(80); + } tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x02); @@ -3101,8 +3183,7 @@ static int tg3_setup_fiber_mii_phy(struct tg3 *tp, int force_reset) err |= tg3_readphy(tp, MII_BMCR, &bmcr); if ((tp->link_config.autoneg == AUTONEG_ENABLE) && !force_reset && - (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT) && - tp->link_config.flowctrl == tp->link_config.active_flowctrl) { + (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT)) { /* do nothing, just check for link up at the end */ } else if (tp->link_config.autoneg == AUTONEG_ENABLE) { u32 adv, new_adv; @@ -5498,19 +5579,17 @@ static void tg3_stop_fw(struct tg3 *tp) if ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) && !(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)) { u32 val; - int i; + + /* Wait for RX cpu to ACK the previous event. */ + tg3_wait_for_event_ack(tp); tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_PAUSE_FW); val = tr32(GRC_RX_CPU_EVENT); - val |= (1 << 14); + val |= GRC_RX_CPU_DRIVER_EVENT; tw32(GRC_RX_CPU_EVENT, val); - /* Wait for RX cpu to ACK the event. */ - for (i = 0; i < 100; i++) { - if (!(tr32(GRC_RX_CPU_EVENT) & (1 << 14))) - break; - udelay(1); - } + /* Wait for RX cpu to ACK this event. */ + tg3_wait_for_event_ack(tp); } } @@ -7102,7 +7181,7 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy) tp->link_config.autoneg = tp->link_config.orig_autoneg; } - tp->mi_mode = MAC_MI_MODE_BASE; + tp->mi_mode &= ~MAC_MI_MODE_AUTO_POLL; tw32_f(MAC_MI_MODE, tp->mi_mode); udelay(80); @@ -7400,14 +7479,16 @@ static void tg3_timer(unsigned long __opaque) if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) { u32 val; + tg3_wait_for_event_ack(tp); + tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_ALIVE3); tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 4); /* 5 seconds timeout */ tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX, 5); val = tr32(GRC_RX_CPU_EVENT); - val |= (1 << 14); - tw32(GRC_RX_CPU_EVENT, val); + val |= GRC_RX_CPU_DRIVER_EVENT; + tw32_f(GRC_RX_CPU_EVENT, val); } tp->asf_counter = tp->asf_multiplier; } @@ -8532,7 +8613,7 @@ static int tg3_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) (cmd->speed == SPEED_1000)) return -EINVAL; else if ((cmd->speed == SPEED_1000) && - (tp->tg3_flags2 & TG3_FLAG_10_100_ONLY)) + (tp->tg3_flags & TG3_FLAG_10_100_ONLY)) return -EINVAL; tg3_full_lock(tp, 0); @@ -9568,14 +9649,9 @@ static int tg3_test_loopback(struct tg3 *tp) /* Turn off link-based power management. */ cpmuctrl = tr32(TG3_CPMU_CTRL); - if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 || - GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5761_AX) - tw32(TG3_CPMU_CTRL, - cpmuctrl & ~(CPMU_CTRL_LINK_SPEED_MODE | - CPMU_CTRL_LINK_AWARE_MODE)); - else - tw32(TG3_CPMU_CTRL, - cpmuctrl & ~CPMU_CTRL_LINK_AWARE_MODE); + tw32(TG3_CPMU_CTRL, + cpmuctrl & ~(CPMU_CTRL_LINK_SPEED_MODE | + CPMU_CTRL_LINK_AWARE_MODE)); } if (tg3_run_loopback(tp, TG3_MAC_LOOPBACK)) @@ -9892,7 +9968,7 @@ static void __devinit tg3_get_nvram_size(struct tg3 *tp) return; } } - tp->nvram_size = 0x80000; + tp->nvram_size = TG3_NVRAM_SIZE_512KB; } static void __devinit tg3_get_nvram_info(struct tg3 *tp) @@ -10033,11 +10109,14 @@ static void __devinit tg3_get_5755_nvram_info(struct tg3 *tp) tp->nvram_pagesize = 264; if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_1 || nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_5) - tp->nvram_size = (protect ? 0x3e200 : 0x80000); + tp->nvram_size = (protect ? 0x3e200 : + TG3_NVRAM_SIZE_512KB); else if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_2) - tp->nvram_size = (protect ? 0x1f200 : 0x40000); + tp->nvram_size = (protect ? 0x1f200 : + TG3_NVRAM_SIZE_256KB); else - tp->nvram_size = (protect ? 0x1f200 : 0x20000); + tp->nvram_size = (protect ? 0x1f200 : + TG3_NVRAM_SIZE_128KB); break; case FLASH_5752VENDOR_ST_M45PE10: case FLASH_5752VENDOR_ST_M45PE20: @@ -10047,11 +10126,17 @@ static void __devinit tg3_get_5755_nvram_info(struct tg3 *tp) tp->tg3_flags2 |= TG3_FLG2_FLASH; tp->nvram_pagesize = 256; if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE10) - tp->nvram_size = (protect ? 0x10000 : 0x20000); + tp->nvram_size = (protect ? + TG3_NVRAM_SIZE_64KB : + TG3_NVRAM_SIZE_128KB); else if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE20) - tp->nvram_size = (protect ? 0x10000 : 0x40000); + tp->nvram_size = (protect ? + TG3_NVRAM_SIZE_64KB : + TG3_NVRAM_SIZE_256KB); else - tp->nvram_size = (protect ? 0x20000 : 0x80000); + tp->nvram_size = (protect ? + TG3_NVRAM_SIZE_128KB : + TG3_NVRAM_SIZE_512KB); break; } } @@ -10145,25 +10230,25 @@ static void __devinit tg3_get_5761_nvram_info(struct tg3 *tp) case FLASH_5761VENDOR_ATMEL_MDB161D: case FLASH_5761VENDOR_ST_A_M45PE16: case FLASH_5761VENDOR_ST_M_M45PE16: - tp->nvram_size = 0x100000; + tp->nvram_size = TG3_NVRAM_SIZE_2MB; break; case FLASH_5761VENDOR_ATMEL_ADB081D: case FLASH_5761VENDOR_ATMEL_MDB081D: case FLASH_5761VENDOR_ST_A_M45PE80: case FLASH_5761VENDOR_ST_M_M45PE80: - tp->nvram_size = 0x80000; + tp->nvram_size = TG3_NVRAM_SIZE_1MB; break; case FLASH_5761VENDOR_ATMEL_ADB041D: case FLASH_5761VENDOR_ATMEL_MDB041D: case FLASH_5761VENDOR_ST_A_M45PE40: case FLASH_5761VENDOR_ST_M_M45PE40: - tp->nvram_size = 0x40000; + tp->nvram_size = TG3_NVRAM_SIZE_512KB; break; case FLASH_5761VENDOR_ATMEL_ADB021D: case FLASH_5761VENDOR_ATMEL_MDB021D: case FLASH_5761VENDOR_ST_A_M45PE20: case FLASH_5761VENDOR_ST_M_M45PE20: - tp->nvram_size = 0x20000; + tp->nvram_size = TG3_NVRAM_SIZE_256KB; break; } } @@ -11697,6 +11782,15 @@ static int __devinit tg3_get_invariants(struct tg3 *tp) if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755) tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL; + if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761) { + /* Turn off the debug UART. */ + tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL; + if (tp->tg3_flags2 & TG3_FLG2_IS_NIC) + /* Keep VMain power. */ + tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 | + GRC_LCLCTRL_GPIO_OUTPUT0; + } + /* Force the chip into D0. */ err = tg3_set_power_state(tp, PCI_D0); if (err) { @@ -11764,6 +11858,12 @@ static int __devinit tg3_get_invariants(struct tg3 *tp) tp->phy_otp = TG3_OTP_DEFAULT; } + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) + tp->mi_mode = MAC_MI_MODE_500KHZ_CONST; + else + tp->mi_mode = MAC_MI_MODE_BASE; + tp->coalesce_mode = 0; if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_AX && GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_BX) @@ -12692,7 +12792,7 @@ static int __devinit tg3_init_one(struct pci_dev *pdev, tp->mac_mode = TG3_DEF_MAC_MODE; tp->rx_mode = TG3_DEF_RX_MODE; tp->tx_mode = TG3_DEF_TX_MODE; - tp->mi_mode = MAC_MI_MODE_BASE; + if (tg3_debug > 0) tp->msg_enable = tg3_debug; else diff --git a/drivers/net/tg3.h b/drivers/net/tg3.h index c688c3ac503..0404f93baa2 100644 --- a/drivers/net/tg3.h +++ b/drivers/net/tg3.h @@ -415,7 +415,7 @@ #define MAC_MI_MODE_CLK_10MHZ 0x00000001 #define MAC_MI_MODE_SHORT_PREAMBLE 0x00000002 #define MAC_MI_MODE_AUTO_POLL 0x00000010 -#define MAC_MI_MODE_CORE_CLK_62MHZ 0x00008000 +#define MAC_MI_MODE_500KHZ_CONST 0x00008000 #define MAC_MI_MODE_BASE 0x000c0000 /* XXX magic values XXX */ #define MAC_AUTO_POLL_STATUS 0x00000458 #define MAC_AUTO_POLL_ERROR 0x00000001 @@ -1429,6 +1429,7 @@ #define GRC_LCLCTRL_AUTO_SEEPROM 0x01000000 #define GRC_TIMER 0x0000680c #define GRC_RX_CPU_EVENT 0x00006810 +#define GRC_RX_CPU_DRIVER_EVENT 0x00004000 #define GRC_RX_TIMER_REF 0x00006814 #define GRC_RX_CPU_SEM 0x00006818 #define GRC_REMOTE_RX_CPU_ATTN 0x0000681c @@ -1676,6 +1677,7 @@ #define FWCMD_NICDRV_IPV6ADDR_CHG 0x00000004 #define FWCMD_NICDRV_FIX_DMAR 0x00000005 #define FWCMD_NICDRV_FIX_DMAW 0x00000006 +#define FWCMD_NICDRV_LINK_UPDATE 0x0000000c #define FWCMD_NICDRV_ALIVE2 0x0000000d #define FWCMD_NICDRV_ALIVE3 0x0000000e #define NIC_SRAM_FW_CMD_LEN_MBOX 0x00000b7c @@ -2576,6 +2578,13 @@ struct tg3 { int nvram_lock_cnt; u32 nvram_size; +#define TG3_NVRAM_SIZE_64KB 0x00010000 +#define TG3_NVRAM_SIZE_128KB 0x00020000 +#define TG3_NVRAM_SIZE_256KB 0x00040000 +#define TG3_NVRAM_SIZE_512KB 0x00080000 +#define TG3_NVRAM_SIZE_1MB 0x00100000 +#define TG3_NVRAM_SIZE_2MB 0x00200000 + u32 nvram_pagesize; u32 nvram_jedecnum; @@ -2584,10 +2593,10 @@ struct tg3 { #define JEDEC_SAIFUN 0x4f #define JEDEC_SST 0xbf -#define ATMEL_AT24C64_CHIP_SIZE (64 * 1024) +#define ATMEL_AT24C64_CHIP_SIZE TG3_NVRAM_SIZE_64KB #define ATMEL_AT24C64_PAGE_SIZE (32) -#define ATMEL_AT24C512_CHIP_SIZE (512 * 1024) +#define ATMEL_AT24C512_CHIP_SIZE TG3_NVRAM_SIZE_512KB #define ATMEL_AT24C512_PAGE_SIZE (128) #define ATMEL_AT45DB0X1B_PAGE_POS 9 diff --git a/drivers/net/tokenring/3c359.h b/drivers/net/tokenring/3c359.h index b880cba0f6f..74cf8e1a181 100644 --- a/drivers/net/tokenring/3c359.h +++ b/drivers/net/tokenring/3c359.h @@ -264,7 +264,7 @@ struct xl_private { u16 asb; u8 __iomem *xl_mmio; - char *xl_card_name; + const char *xl_card_name; struct pci_dev *pdev ; spinlock_t xl_lock ; diff --git a/drivers/net/tokenring/olympic.h b/drivers/net/tokenring/olympic.h index c91956310fb..10fbba08978 100644 --- a/drivers/net/tokenring/olympic.h +++ b/drivers/net/tokenring/olympic.h @@ -254,7 +254,7 @@ struct olympic_private { u8 __iomem *olympic_mmio; u8 __iomem *olympic_lap; struct pci_dev *pdev ; - char *olympic_card_name ; + const char *olympic_card_name; spinlock_t olympic_lock ; diff --git a/drivers/net/tulip/tulip_core.c b/drivers/net/tulip/tulip_core.c index f9d13fa05d6..af8d2c436ef 100644 --- a/drivers/net/tulip/tulip_core.c +++ b/drivers/net/tulip/tulip_core.c @@ -731,7 +731,7 @@ static void tulip_down (struct net_device *dev) void __iomem *ioaddr = tp->base_addr; unsigned long flags; - flush_scheduled_work(); + cancel_work_sync(&tp->media_work); #ifdef CONFIG_TULIP_NAPI napi_disable(&tp->napi); @@ -1729,12 +1729,15 @@ static int tulip_suspend (struct pci_dev *pdev, pm_message_t state) if (!dev) return -EINVAL; - if (netif_running(dev)) - tulip_down(dev); + if (!netif_running(dev)) + goto save_state; + + tulip_down(dev); netif_device_detach(dev); free_irq(dev->irq, dev); +save_state: pci_save_state(pdev); pci_disable_device(pdev); pci_set_power_state(pdev, pci_choose_state(pdev, state)); @@ -1754,6 +1757,9 @@ static int tulip_resume(struct pci_dev *pdev) pci_set_power_state(pdev, PCI_D0); pci_restore_state(pdev); + if (!netif_running(dev)) + return 0; + if ((retval = pci_enable_device(pdev))) { printk (KERN_ERR "tulip: pci_enable_device failed in resume\n"); return retval; diff --git a/drivers/net/tulip/uli526x.c b/drivers/net/tulip/uli526x.c index a59c1f224aa..e9e62862163 100644 --- a/drivers/net/tulip/uli526x.c +++ b/drivers/net/tulip/uli526x.c @@ -225,6 +225,9 @@ static void uli526x_set_filter_mode(struct net_device *); static const struct ethtool_ops netdev_ethtool_ops; static u16 read_srom_word(long, int); static irqreturn_t uli526x_interrupt(int, void *); +#ifdef CONFIG_NET_POLL_CONTROLLER +static void uli526x_poll(struct net_device *dev); +#endif static void uli526x_descriptor_init(struct uli526x_board_info *, unsigned long); static void allocate_rx_buffer(struct uli526x_board_info *); static void update_cr6(u32, unsigned long); @@ -339,6 +342,9 @@ static int __devinit uli526x_init_one (struct pci_dev *pdev, dev->get_stats = &uli526x_get_stats; dev->set_multicast_list = &uli526x_set_filter_mode; dev->ethtool_ops = &netdev_ethtool_ops; +#ifdef CONFIG_NET_POLL_CONTROLLER + dev->poll_controller = &uli526x_poll; +#endif spin_lock_init(&db->lock); @@ -434,10 +440,6 @@ static int uli526x_open(struct net_device *dev) ULI526X_DBUG(0, "uli526x_open", 0); - ret = request_irq(dev->irq, &uli526x_interrupt, IRQF_SHARED, dev->name, dev); - if (ret) - return ret; - /* system variable init */ db->cr6_data = CR6_DEFAULT | uli526x_cr6_user_set; db->tx_packet_cnt = 0; @@ -456,6 +458,10 @@ static int uli526x_open(struct net_device *dev) /* Initialize ULI526X board */ uli526x_init(dev); + ret = request_irq(dev->irq, &uli526x_interrupt, IRQF_SHARED, dev->name, dev); + if (ret) + return ret; + /* Active System Interface */ netif_wake_queue(dev); @@ -681,8 +687,9 @@ static irqreturn_t uli526x_interrupt(int irq, void *dev_id) db->cr5_data = inl(ioaddr + DCR5); outl(db->cr5_data, ioaddr + DCR5); if ( !(db->cr5_data & 0x180c1) ) { - spin_unlock_irqrestore(&db->lock, flags); + /* Restore CR7 to enable interrupt mask */ outl(db->cr7_data, ioaddr + DCR7); + spin_unlock_irqrestore(&db->lock, flags); return IRQ_HANDLED; } @@ -715,6 +722,13 @@ static irqreturn_t uli526x_interrupt(int irq, void *dev_id) return IRQ_HANDLED; } +#ifdef CONFIG_NET_POLL_CONTROLLER +static void uli526x_poll(struct net_device *dev) +{ + /* ISR grabs the irqsave lock, so this should be safe */ + uli526x_interrupt(dev->irq, dev); +} +#endif /* * Free TX resource after TX complete @@ -1368,6 +1382,12 @@ static void update_cr6(u32 cr6_data, unsigned long ioaddr) * This setup frame initialize ULI526X address filter mode */ +#ifdef __BIG_ENDIAN +#define FLT_SHIFT 16 +#else +#define FLT_SHIFT 0 +#endif + static void send_filter_frame(struct net_device *dev, int mc_cnt) { struct uli526x_board_info *db = netdev_priv(dev); @@ -1384,27 +1404,27 @@ static void send_filter_frame(struct net_device *dev, int mc_cnt) /* Node address */ addrptr = (u16 *) dev->dev_addr; - *suptr++ = addrptr[0]; - *suptr++ = addrptr[1]; - *suptr++ = addrptr[2]; + *suptr++ = addrptr[0] << FLT_SHIFT; + *suptr++ = addrptr[1] << FLT_SHIFT; + *suptr++ = addrptr[2] << FLT_SHIFT; /* broadcast address */ - *suptr++ = 0xffff; - *suptr++ = 0xffff; - *suptr++ = 0xffff; + *suptr++ = 0xffff << FLT_SHIFT; + *suptr++ = 0xffff << FLT_SHIFT; + *suptr++ = 0xffff << FLT_SHIFT; /* fit the multicast address */ for (mcptr = dev->mc_list, i = 0; i < mc_cnt; i++, mcptr = mcptr->next) { addrptr = (u16 *) mcptr->dmi_addr; - *suptr++ = addrptr[0]; - *suptr++ = addrptr[1]; - *suptr++ = addrptr[2]; + *suptr++ = addrptr[0] << FLT_SHIFT; + *suptr++ = addrptr[1] << FLT_SHIFT; + *suptr++ = addrptr[2] << FLT_SHIFT; } for (; i<14; i++) { - *suptr++ = 0xffff; - *suptr++ = 0xffff; - *suptr++ = 0xffff; + *suptr++ = 0xffff << FLT_SHIFT; + *suptr++ = 0xffff << FLT_SHIFT; + *suptr++ = 0xffff << FLT_SHIFT; } /* prepare the setup frame */ diff --git a/drivers/net/tun.c b/drivers/net/tun.c index 0ce07a339c7..b9018bfa0a9 100644 --- a/drivers/net/tun.c +++ b/drivers/net/tun.c @@ -313,6 +313,21 @@ static __inline__ ssize_t tun_get_user(struct tun_struct *tun, struct iovec *iv, switch (tun->flags & TUN_TYPE_MASK) { case TUN_TUN_DEV: + if (tun->flags & TUN_NO_PI) { + switch (skb->data[0] & 0xf0) { + case 0x40: + pi.proto = htons(ETH_P_IP); + break; + case 0x60: + pi.proto = htons(ETH_P_IPV6); + break; + default: + tun->dev->stats.rx_dropped++; + kfree_skb(skb); + return -EINVAL; + } + } + skb_reset_mac_header(skb); skb->protocol = pi.proto; skb->dev = tun->dev; @@ -587,6 +602,12 @@ static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr) tun->attached = 1; get_net(dev_net(tun->dev)); + /* Make sure persistent devices do not get stuck in + * xoff state. + */ + if (netif_running(tun->dev)) + netif_wake_queue(tun->dev); + strcpy(ifr->ifr_name, tun->dev->name); return 0; diff --git a/drivers/net/ucc_geth.c b/drivers/net/ucc_geth.c index 281ce3d3953..fb0b918e5cc 100644 --- a/drivers/net/ucc_geth.c +++ b/drivers/net/ucc_geth.c @@ -62,7 +62,6 @@ #endif /* UGETH_VERBOSE_DEBUG */ #define UGETH_MSG_DEFAULT (NETIF_MSG_IFUP << 1 ) - 1 -void uec_set_ethtool_ops(struct net_device *netdev); static DEFINE_SPINLOCK(ugeth_lock); @@ -216,7 +215,8 @@ static struct list_head *dequeue(struct list_head *lh) } } -static struct sk_buff *get_new_skb(struct ucc_geth_private *ugeth, u8 *bd) +static struct sk_buff *get_new_skb(struct ucc_geth_private *ugeth, + u8 __iomem *bd) { struct sk_buff *skb = NULL; @@ -236,21 +236,22 @@ static struct sk_buff *get_new_skb(struct ucc_geth_private *ugeth, u8 *bd) skb->dev = ugeth->dev; - out_be32(&((struct qe_bd *)bd)->buf, - dma_map_single(NULL, + out_be32(&((struct qe_bd __iomem *)bd)->buf, + dma_map_single(&ugeth->dev->dev, skb->data, ugeth->ug_info->uf_info.max_rx_buf_length + UCC_GETH_RX_DATA_BUF_ALIGNMENT, DMA_FROM_DEVICE)); - out_be32((u32 *)bd, (R_E | R_I | (in_be32((u32 *)bd) & R_W))); + out_be32((u32 __iomem *)bd, + (R_E | R_I | (in_be32((u32 __iomem*)bd) & R_W))); return skb; } static int rx_bd_buffer_set(struct ucc_geth_private *ugeth, u8 rxQ) { - u8 *bd; + u8 __iomem *bd; u32 bd_status; struct sk_buff *skb; int i; @@ -259,7 +260,7 @@ static int rx_bd_buffer_set(struct ucc_geth_private *ugeth, u8 rxQ) i = 0; do { - bd_status = in_be32((u32*)bd); + bd_status = in_be32((u32 __iomem *)bd); skb = get_new_skb(ugeth, bd); if (!skb) /* If can not allocate data buffer, @@ -277,7 +278,7 @@ static int rx_bd_buffer_set(struct ucc_geth_private *ugeth, u8 rxQ) } static int fill_init_enet_entries(struct ucc_geth_private *ugeth, - volatile u32 *p_start, + u32 *p_start, u8 num_entries, u32 thread_size, u32 thread_alignment, @@ -316,7 +317,7 @@ static int fill_init_enet_entries(struct ucc_geth_private *ugeth, } static int return_init_enet_entries(struct ucc_geth_private *ugeth, - volatile u32 *p_start, + u32 *p_start, u8 num_entries, enum qe_risc_allocation risc, int skip_page_for_first_entry) @@ -326,21 +327,22 @@ static int return_init_enet_entries(struct ucc_geth_private *ugeth, int snum; for (i = 0; i < num_entries; i++) { + u32 val = *p_start; + /* Check that this entry was actually valid -- needed in case failed in allocations */ - if ((*p_start & ENET_INIT_PARAM_RISC_MASK) == risc) { + if ((val & ENET_INIT_PARAM_RISC_MASK) == risc) { snum = - (u32) (*p_start & ENET_INIT_PARAM_SNUM_MASK) >> + (u32) (val & ENET_INIT_PARAM_SNUM_MASK) >> ENET_INIT_PARAM_SNUM_SHIFT; qe_put_snum((u8) snum); if (!((i == 0) && skip_page_for_first_entry)) { /* First entry of Rx does not have page */ init_enet_offset = - (in_be32(p_start) & - ENET_INIT_PARAM_PTR_MASK); + (val & ENET_INIT_PARAM_PTR_MASK); qe_muram_free(init_enet_offset); } - *(p_start++) = 0; /* Just for cosmetics */ + *p_start++ = 0; } } @@ -349,7 +351,7 @@ static int return_init_enet_entries(struct ucc_geth_private *ugeth, #ifdef DEBUG static int dump_init_enet_entries(struct ucc_geth_private *ugeth, - volatile u32 *p_start, + u32 __iomem *p_start, u8 num_entries, u32 thread_size, enum qe_risc_allocation risc, @@ -360,11 +362,13 @@ static int dump_init_enet_entries(struct ucc_geth_private *ugeth, int snum; for (i = 0; i < num_entries; i++) { + u32 val = in_be32(p_start); + /* Check that this entry was actually valid -- needed in case failed in allocations */ - if ((*p_start & ENET_INIT_PARAM_RISC_MASK) == risc) { + if ((val & ENET_INIT_PARAM_RISC_MASK) == risc) { snum = - (u32) (*p_start & ENET_INIT_PARAM_SNUM_MASK) >> + (u32) (val & ENET_INIT_PARAM_SNUM_MASK) >> ENET_INIT_PARAM_SNUM_SHIFT; qe_put_snum((u8) snum); if (!((i == 0) && skip_page_for_first_entry)) { @@ -440,7 +444,7 @@ static int hw_add_addr_in_paddr(struct ucc_geth_private *ugeth, static int hw_clear_addr_in_paddr(struct ucc_geth_private *ugeth, u8 paddr_num) { - struct ucc_geth_82xx_address_filtering_pram *p_82xx_addr_filt; + struct ucc_geth_82xx_address_filtering_pram __iomem *p_82xx_addr_filt; if (!(paddr_num < NUM_OF_PADDRS)) { ugeth_warn("%s: Illagel paddr_num.", __FUNCTION__); @@ -448,7 +452,7 @@ static int hw_clear_addr_in_paddr(struct ucc_geth_private *ugeth, u8 paddr_num) } p_82xx_addr_filt = - (struct ucc_geth_82xx_address_filtering_pram *) ugeth->p_rx_glbl_pram-> + (struct ucc_geth_82xx_address_filtering_pram __iomem *) ugeth->p_rx_glbl_pram-> addressfiltering; /* Writing address ff.ff.ff.ff.ff.ff disables address @@ -463,11 +467,11 @@ static int hw_clear_addr_in_paddr(struct ucc_geth_private *ugeth, u8 paddr_num) static void hw_add_addr_in_hash(struct ucc_geth_private *ugeth, u8 *p_enet_addr) { - struct ucc_geth_82xx_address_filtering_pram *p_82xx_addr_filt; + struct ucc_geth_82xx_address_filtering_pram __iomem *p_82xx_addr_filt; u32 cecr_subblock; p_82xx_addr_filt = - (struct ucc_geth_82xx_address_filtering_pram *) ugeth->p_rx_glbl_pram-> + (struct ucc_geth_82xx_address_filtering_pram __iomem *) ugeth->p_rx_glbl_pram-> addressfiltering; cecr_subblock = @@ -487,7 +491,7 @@ static void hw_add_addr_in_hash(struct ucc_geth_private *ugeth, static void magic_packet_detection_enable(struct ucc_geth_private *ugeth) { struct ucc_fast_private *uccf; - struct ucc_geth *ug_regs; + struct ucc_geth __iomem *ug_regs; u32 maccfg2, uccm; uccf = ugeth->uccf; @@ -507,7 +511,7 @@ static void magic_packet_detection_enable(struct ucc_geth_private *ugeth) static void magic_packet_detection_disable(struct ucc_geth_private *ugeth) { struct ucc_fast_private *uccf; - struct ucc_geth *ug_regs; + struct ucc_geth __iomem *ug_regs; u32 maccfg2, uccm; uccf = ugeth->uccf; @@ -538,13 +542,13 @@ static void get_statistics(struct ucc_geth_private *ugeth, rx_firmware_statistics, struct ucc_geth_hardware_statistics *hardware_statistics) { - struct ucc_fast *uf_regs; - struct ucc_geth *ug_regs; + struct ucc_fast __iomem *uf_regs; + struct ucc_geth __iomem *ug_regs; struct ucc_geth_tx_firmware_statistics_pram *p_tx_fw_statistics_pram; struct ucc_geth_rx_firmware_statistics_pram *p_rx_fw_statistics_pram; ug_regs = ugeth->ug_regs; - uf_regs = (struct ucc_fast *) ug_regs; + uf_regs = (struct ucc_fast __iomem *) ug_regs; p_tx_fw_statistics_pram = ugeth->p_tx_fw_statistics_pram; p_rx_fw_statistics_pram = ugeth->p_rx_fw_statistics_pram; @@ -1132,9 +1136,9 @@ static void dump_regs(struct ucc_geth_private *ugeth) } #endif /* DEBUG */ -static void init_default_reg_vals(volatile u32 *upsmr_register, - volatile u32 *maccfg1_register, - volatile u32 *maccfg2_register) +static void init_default_reg_vals(u32 __iomem *upsmr_register, + u32 __iomem *maccfg1_register, + u32 __iomem *maccfg2_register) { out_be32(upsmr_register, UCC_GETH_UPSMR_INIT); out_be32(maccfg1_register, UCC_GETH_MACCFG1_INIT); @@ -1148,7 +1152,7 @@ static int init_half_duplex_params(int alt_beb, u8 alt_beb_truncation, u8 max_retransmissions, u8 collision_window, - volatile u32 *hafdup_register) + u32 __iomem *hafdup_register) { u32 value = 0; @@ -1180,7 +1184,7 @@ static int init_inter_frame_gap_params(u8 non_btb_cs_ipg, u8 non_btb_ipg, u8 min_ifg, u8 btb_ipg, - volatile u32 *ipgifg_register) + u32 __iomem *ipgifg_register) { u32 value = 0; @@ -1215,9 +1219,9 @@ int init_flow_control_params(u32 automatic_flow_control_mode, int tx_flow_control_enable, u16 pause_period, u16 extension_field, - volatile u32 *upsmr_register, - volatile u32 *uempr_register, - volatile u32 *maccfg1_register) + u32 __iomem *upsmr_register, + u32 __iomem *uempr_register, + u32 __iomem *maccfg1_register) { u32 value = 0; @@ -1243,8 +1247,8 @@ int init_flow_control_params(u32 automatic_flow_control_mode, static int init_hw_statistics_gathering_mode(int enable_hardware_statistics, int auto_zero_hardware_statistics, - volatile u32 *upsmr_register, - volatile u16 *uescr_register) + u32 __iomem *upsmr_register, + u16 __iomem *uescr_register) { u32 upsmr_value = 0; u16 uescr_value = 0; @@ -1270,12 +1274,12 @@ static int init_hw_statistics_gathering_mode(int enable_hardware_statistics, static int init_firmware_statistics_gathering_mode(int enable_tx_firmware_statistics, int enable_rx_firmware_statistics, - volatile u32 *tx_rmon_base_ptr, + u32 __iomem *tx_rmon_base_ptr, u32 tx_firmware_statistics_structure_address, - volatile u32 *rx_rmon_base_ptr, + u32 __iomem *rx_rmon_base_ptr, u32 rx_firmware_statistics_structure_address, - volatile u16 *temoder_register, - volatile u32 *remoder_register) + u16 __iomem *temoder_register, + u32 __iomem *remoder_register) { /* Note: this function does not check if */ /* the parameters it receives are NULL */ @@ -1307,8 +1311,8 @@ static int init_mac_station_addr_regs(u8 address_byte_0, u8 address_byte_3, u8 address_byte_4, u8 address_byte_5, - volatile u32 *macstnaddr1_register, - volatile u32 *macstnaddr2_register) + u32 __iomem *macstnaddr1_register, + u32 __iomem *macstnaddr2_register) { u32 value = 0; @@ -1344,7 +1348,7 @@ static int init_mac_station_addr_regs(u8 address_byte_0, } static int init_check_frame_length_mode(int length_check, - volatile u32 *maccfg2_register) + u32 __iomem *maccfg2_register) { u32 value = 0; @@ -1360,7 +1364,7 @@ static int init_check_frame_length_mode(int length_check, } static int init_preamble_length(u8 preamble_length, - volatile u32 *maccfg2_register) + u32 __iomem *maccfg2_register) { u32 value = 0; @@ -1376,7 +1380,7 @@ static int init_preamble_length(u8 preamble_length, static int init_rx_parameters(int reject_broadcast, int receive_short_frames, - int promiscuous, volatile u32 *upsmr_register) + int promiscuous, u32 __iomem *upsmr_register) { u32 value = 0; @@ -1403,7 +1407,7 @@ static int init_rx_parameters(int reject_broadcast, } static int init_max_rx_buff_len(u16 max_rx_buf_len, - volatile u16 *mrblr_register) + u16 __iomem *mrblr_register) { /* max_rx_buf_len value must be a multiple of 128 */ if ((max_rx_buf_len == 0) @@ -1415,8 +1419,8 @@ static int init_max_rx_buff_len(u16 max_rx_buf_len, } static int init_min_frame_len(u16 min_frame_length, - volatile u16 *minflr_register, - volatile u16 *mrblr_register) + u16 __iomem *minflr_register, + u16 __iomem *mrblr_register) { u16 mrblr_value = 0; @@ -1431,8 +1435,8 @@ static int init_min_frame_len(u16 min_frame_length, static int adjust_enet_interface(struct ucc_geth_private *ugeth) { struct ucc_geth_info *ug_info; - struct ucc_geth *ug_regs; - struct ucc_fast *uf_regs; + struct ucc_geth __iomem *ug_regs; + struct ucc_fast __iomem *uf_regs; int ret_val; u32 upsmr, maccfg2, tbiBaseAddress; u16 value; @@ -1517,8 +1521,8 @@ static int adjust_enet_interface(struct ucc_geth_private *ugeth) static void adjust_link(struct net_device *dev) { struct ucc_geth_private *ugeth = netdev_priv(dev); - struct ucc_geth *ug_regs; - struct ucc_fast *uf_regs; + struct ucc_geth __iomem *ug_regs; + struct ucc_fast __iomem *uf_regs; struct phy_device *phydev = ugeth->phydev; unsigned long flags; int new_state = 0; @@ -1678,9 +1682,9 @@ static int ugeth_graceful_stop_rx(struct ucc_geth_private * ugeth) uccf = ugeth->uccf; /* Clear acknowledge bit */ - temp = ugeth->p_rx_glbl_pram->rxgstpack; + temp = in_8(&ugeth->p_rx_glbl_pram->rxgstpack); temp &= ~GRACEFUL_STOP_ACKNOWLEDGE_RX; - ugeth->p_rx_glbl_pram->rxgstpack = temp; + out_8(&ugeth->p_rx_glbl_pram->rxgstpack, temp); /* Keep issuing command and checking acknowledge bit until it is asserted, according to spec */ @@ -1692,7 +1696,7 @@ static int ugeth_graceful_stop_rx(struct ucc_geth_private * ugeth) qe_issue_cmd(QE_GRACEFUL_STOP_RX, cecr_subblock, QE_CR_PROTOCOL_ETHERNET, 0); - temp = ugeth->p_rx_glbl_pram->rxgstpack; + temp = in_8(&ugeth->p_rx_glbl_pram->rxgstpack); } while (!(temp & GRACEFUL_STOP_ACKNOWLEDGE_RX)); uccf->stopped_rx = 1; @@ -1991,19 +1995,20 @@ static int ugeth_82xx_filtering_clear_all_addr_in_hash(struct ucc_geth_private * enum enet_addr_type enet_addr_type) { - struct ucc_geth_82xx_address_filtering_pram *p_82xx_addr_filt; + struct ucc_geth_82xx_address_filtering_pram __iomem *p_82xx_addr_filt; struct ucc_fast_private *uccf; enum comm_dir comm_dir; struct list_head *p_lh; u16 i, num; - u32 *addr_h, *addr_l; + u32 __iomem *addr_h; + u32 __iomem *addr_l; u8 *p_counter; uccf = ugeth->uccf; p_82xx_addr_filt = - (struct ucc_geth_82xx_address_filtering_pram *) ugeth->p_rx_glbl_pram-> - addressfiltering; + (struct ucc_geth_82xx_address_filtering_pram __iomem *) + ugeth->p_rx_glbl_pram->addressfiltering; if (enet_addr_type == ENET_ADDR_TYPE_GROUP) { addr_h = &(p_82xx_addr_filt->gaddr_h); @@ -2079,7 +2084,7 @@ static int ugeth_82xx_filtering_clear_addr_in_paddr(struct ucc_geth_private *uge static void ucc_geth_memclean(struct ucc_geth_private *ugeth) { u16 i, j; - u8 *bd; + u8 __iomem *bd; if (!ugeth) return; @@ -2153,9 +2158,9 @@ static void ucc_geth_memclean(struct ucc_geth_private *ugeth) continue; for (j = 0; j < ugeth->ug_info->bdRingLenTx[i]; j++) { if (ugeth->tx_skbuff[i][j]) { - dma_unmap_single(NULL, - ((struct qe_bd *)bd)->buf, - (in_be32((u32 *)bd) & + dma_unmap_single(&ugeth->dev->dev, + in_be32(&((struct qe_bd __iomem *)bd)->buf), + (in_be32((u32 __iomem *)bd) & BD_LENGTH_MASK), DMA_TO_DEVICE); dev_kfree_skb_any(ugeth->tx_skbuff[i][j]); @@ -2181,8 +2186,8 @@ static void ucc_geth_memclean(struct ucc_geth_private *ugeth) bd = ugeth->p_rx_bd_ring[i]; for (j = 0; j < ugeth->ug_info->bdRingLenRx[i]; j++) { if (ugeth->rx_skbuff[i][j]) { - dma_unmap_single(NULL, - ((struct qe_bd *)bd)->buf, + dma_unmap_single(&ugeth->dev->dev, + in_be32(&((struct qe_bd __iomem *)bd)->buf), ugeth->ug_info-> uf_info.max_rx_buf_length + UCC_GETH_RX_DATA_BUF_ALIGNMENT, @@ -2218,8 +2223,8 @@ static void ucc_geth_set_multi(struct net_device *dev) { struct ucc_geth_private *ugeth; struct dev_mc_list *dmi; - struct ucc_fast *uf_regs; - struct ucc_geth_82xx_address_filtering_pram *p_82xx_addr_filt; + struct ucc_fast __iomem *uf_regs; + struct ucc_geth_82xx_address_filtering_pram __iomem *p_82xx_addr_filt; int i; ugeth = netdev_priv(dev); @@ -2228,14 +2233,14 @@ static void ucc_geth_set_multi(struct net_device *dev) if (dev->flags & IFF_PROMISC) { - uf_regs->upsmr |= UPSMR_PRO; + out_be32(&uf_regs->upsmr, in_be32(&uf_regs->upsmr) | UPSMR_PRO); } else { - uf_regs->upsmr &= ~UPSMR_PRO; + out_be32(&uf_regs->upsmr, in_be32(&uf_regs->upsmr)&~UPSMR_PRO); p_82xx_addr_filt = - (struct ucc_geth_82xx_address_filtering_pram *) ugeth-> + (struct ucc_geth_82xx_address_filtering_pram __iomem *) ugeth-> p_rx_glbl_pram->addressfiltering; if (dev->flags & IFF_ALLMULTI) { @@ -2270,7 +2275,7 @@ static void ucc_geth_set_multi(struct net_device *dev) static void ucc_geth_stop(struct ucc_geth_private *ugeth) { - struct ucc_geth *ug_regs = ugeth->ug_regs; + struct ucc_geth __iomem *ug_regs = ugeth->ug_regs; struct phy_device *phydev = ugeth->phydev; u32 tempval; @@ -2419,20 +2424,20 @@ static int ucc_struct_init(struct ucc_geth_private *ugeth) return -ENOMEM; } - ugeth->ug_regs = (struct ucc_geth *) ioremap(uf_info->regs, sizeof(struct ucc_geth)); + ugeth->ug_regs = (struct ucc_geth __iomem *) ioremap(uf_info->regs, sizeof(struct ucc_geth)); return 0; } static int ucc_geth_startup(struct ucc_geth_private *ugeth) { - struct ucc_geth_82xx_address_filtering_pram *p_82xx_addr_filt; - struct ucc_geth_init_pram *p_init_enet_pram; + struct ucc_geth_82xx_address_filtering_pram __iomem *p_82xx_addr_filt; + struct ucc_geth_init_pram __iomem *p_init_enet_pram; struct ucc_fast_private *uccf; struct ucc_geth_info *ug_info; struct ucc_fast_info *uf_info; - struct ucc_fast *uf_regs; - struct ucc_geth *ug_regs; + struct ucc_fast __iomem *uf_regs; + struct ucc_geth __iomem *ug_regs; int ret_val = -EINVAL; u32 remoder = UCC_GETH_REMODER_INIT; u32 init_enet_pram_offset, cecr_subblock, command, maccfg1; @@ -2440,7 +2445,8 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth) u16 temoder = UCC_GETH_TEMODER_INIT; u16 test; u8 function_code = 0; - u8 *bd, *endOfRing; + u8 __iomem *bd; + u8 __iomem *endOfRing; u8 numThreadsRxNumerical, numThreadsTxNumerical; ugeth_vdbg("%s: IN", __FUNCTION__); @@ -2602,11 +2608,11 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth) if (UCC_GETH_TX_BD_RING_ALIGNMENT > 4) align = UCC_GETH_TX_BD_RING_ALIGNMENT; ugeth->tx_bd_ring_offset[j] = - kmalloc((u32) (length + align), GFP_KERNEL); + (u32) kmalloc((u32) (length + align), GFP_KERNEL); if (ugeth->tx_bd_ring_offset[j] != 0) ugeth->p_tx_bd_ring[j] = - (void*)((ugeth->tx_bd_ring_offset[j] + + (u8 __iomem *)((ugeth->tx_bd_ring_offset[j] + align) & ~(align - 1)); } else if (uf_info->bd_mem_part == MEM_PART_MURAM) { ugeth->tx_bd_ring_offset[j] = @@ -2614,7 +2620,7 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth) UCC_GETH_TX_BD_RING_ALIGNMENT); if (!IS_ERR_VALUE(ugeth->tx_bd_ring_offset[j])) ugeth->p_tx_bd_ring[j] = - (u8 *) qe_muram_addr(ugeth-> + (u8 __iomem *) qe_muram_addr(ugeth-> tx_bd_ring_offset[j]); } if (!ugeth->p_tx_bd_ring[j]) { @@ -2626,8 +2632,8 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth) return -ENOMEM; } /* Zero unused end of bd ring, according to spec */ - memset(ugeth->p_tx_bd_ring[j] + - ug_info->bdRingLenTx[j] * sizeof(struct qe_bd), 0, + memset_io((void __iomem *)(ugeth->p_tx_bd_ring[j] + + ug_info->bdRingLenTx[j] * sizeof(struct qe_bd)), 0, length - ug_info->bdRingLenTx[j] * sizeof(struct qe_bd)); } @@ -2639,10 +2645,10 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth) if (UCC_GETH_RX_BD_RING_ALIGNMENT > 4) align = UCC_GETH_RX_BD_RING_ALIGNMENT; ugeth->rx_bd_ring_offset[j] = - kmalloc((u32) (length + align), GFP_KERNEL); + (u32) kmalloc((u32) (length + align), GFP_KERNEL); if (ugeth->rx_bd_ring_offset[j] != 0) ugeth->p_rx_bd_ring[j] = - (void*)((ugeth->rx_bd_ring_offset[j] + + (u8 __iomem *)((ugeth->rx_bd_ring_offset[j] + align) & ~(align - 1)); } else if (uf_info->bd_mem_part == MEM_PART_MURAM) { ugeth->rx_bd_ring_offset[j] = @@ -2650,7 +2656,7 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth) UCC_GETH_RX_BD_RING_ALIGNMENT); if (!IS_ERR_VALUE(ugeth->rx_bd_ring_offset[j])) ugeth->p_rx_bd_ring[j] = - (u8 *) qe_muram_addr(ugeth-> + (u8 __iomem *) qe_muram_addr(ugeth-> rx_bd_ring_offset[j]); } if (!ugeth->p_rx_bd_ring[j]) { @@ -2685,14 +2691,14 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth) bd = ugeth->confBd[j] = ugeth->txBd[j] = ugeth->p_tx_bd_ring[j]; for (i = 0; i < ug_info->bdRingLenTx[j]; i++) { /* clear bd buffer */ - out_be32(&((struct qe_bd *)bd)->buf, 0); + out_be32(&((struct qe_bd __iomem *)bd)->buf, 0); /* set bd status and length */ - out_be32((u32 *)bd, 0); + out_be32((u32 __iomem *)bd, 0); bd += sizeof(struct qe_bd); } bd -= sizeof(struct qe_bd); /* set bd status and length */ - out_be32((u32 *)bd, T_W); /* for last BD set Wrap bit */ + out_be32((u32 __iomem *)bd, T_W); /* for last BD set Wrap bit */ } /* Init Rx bds */ @@ -2717,14 +2723,14 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth) bd = ugeth->rxBd[j] = ugeth->p_rx_bd_ring[j]; for (i = 0; i < ug_info->bdRingLenRx[j]; i++) { /* set bd status and length */ - out_be32((u32 *)bd, R_I); + out_be32((u32 __iomem *)bd, R_I); /* clear bd buffer */ - out_be32(&((struct qe_bd *)bd)->buf, 0); + out_be32(&((struct qe_bd __iomem *)bd)->buf, 0); bd += sizeof(struct qe_bd); } bd -= sizeof(struct qe_bd); /* set bd status and length */ - out_be32((u32 *)bd, R_W); /* for last BD set Wrap bit */ + out_be32((u32 __iomem *)bd, R_W); /* for last BD set Wrap bit */ } /* @@ -2744,10 +2750,10 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth) return -ENOMEM; } ugeth->p_tx_glbl_pram = - (struct ucc_geth_tx_global_pram *) qe_muram_addr(ugeth-> + (struct ucc_geth_tx_global_pram __iomem *) qe_muram_addr(ugeth-> tx_glbl_pram_offset); /* Zero out p_tx_glbl_pram */ - memset(ugeth->p_tx_glbl_pram, 0, sizeof(struct ucc_geth_tx_global_pram)); + memset_io((void __iomem *)ugeth->p_tx_glbl_pram, 0, sizeof(struct ucc_geth_tx_global_pram)); /* Fill global PRAM */ @@ -2768,7 +2774,7 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth) } ugeth->p_thread_data_tx = - (struct ucc_geth_thread_data_tx *) qe_muram_addr(ugeth-> + (struct ucc_geth_thread_data_tx __iomem *) qe_muram_addr(ugeth-> thread_dat_tx_offset); out_be32(&ugeth->p_tx_glbl_pram->tqptr, ugeth->thread_dat_tx_offset); @@ -2779,7 +2785,8 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth) /* iphoffset */ for (i = 0; i < TX_IP_OFFSET_ENTRY_MAX; i++) - ugeth->p_tx_glbl_pram->iphoffset[i] = ug_info->iphoffset[i]; + out_8(&ugeth->p_tx_glbl_pram->iphoffset[i], + ug_info->iphoffset[i]); /* SQPTR */ /* Size varies with number of Tx queues */ @@ -2797,7 +2804,7 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth) } ugeth->p_send_q_mem_reg = - (struct ucc_geth_send_queue_mem_region *) qe_muram_addr(ugeth-> + (struct ucc_geth_send_queue_mem_region __iomem *) qe_muram_addr(ugeth-> send_q_mem_reg_offset); out_be32(&ugeth->p_tx_glbl_pram->sqptr, ugeth->send_q_mem_reg_offset); @@ -2841,25 +2848,26 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth) } ugeth->p_scheduler = - (struct ucc_geth_scheduler *) qe_muram_addr(ugeth-> + (struct ucc_geth_scheduler __iomem *) qe_muram_addr(ugeth-> scheduler_offset); out_be32(&ugeth->p_tx_glbl_pram->schedulerbasepointer, ugeth->scheduler_offset); /* Zero out p_scheduler */ - memset(ugeth->p_scheduler, 0, sizeof(struct ucc_geth_scheduler)); + memset_io((void __iomem *)ugeth->p_scheduler, 0, sizeof(struct ucc_geth_scheduler)); /* Set values in scheduler */ out_be32(&ugeth->p_scheduler->mblinterval, ug_info->mblinterval); out_be16(&ugeth->p_scheduler->nortsrbytetime, ug_info->nortsrbytetime); - ugeth->p_scheduler->fracsiz = ug_info->fracsiz; - ugeth->p_scheduler->strictpriorityq = ug_info->strictpriorityq; - ugeth->p_scheduler->txasap = ug_info->txasap; - ugeth->p_scheduler->extrabw = ug_info->extrabw; + out_8(&ugeth->p_scheduler->fracsiz, ug_info->fracsiz); + out_8(&ugeth->p_scheduler->strictpriorityq, + ug_info->strictpriorityq); + out_8(&ugeth->p_scheduler->txasap, ug_info->txasap); + out_8(&ugeth->p_scheduler->extrabw, ug_info->extrabw); for (i = 0; i < NUM_TX_QUEUES; i++) - ugeth->p_scheduler->weightfactor[i] = - ug_info->weightfactor[i]; + out_8(&ugeth->p_scheduler->weightfactor[i], + ug_info->weightfactor[i]); /* Set pointers to cpucount registers in scheduler */ ugeth->p_cpucount[0] = &(ugeth->p_scheduler->cpucount0); @@ -2890,10 +2898,10 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth) return -ENOMEM; } ugeth->p_tx_fw_statistics_pram = - (struct ucc_geth_tx_firmware_statistics_pram *) + (struct ucc_geth_tx_firmware_statistics_pram __iomem *) qe_muram_addr(ugeth->tx_fw_statistics_pram_offset); /* Zero out p_tx_fw_statistics_pram */ - memset(ugeth->p_tx_fw_statistics_pram, + memset_io((void __iomem *)ugeth->p_tx_fw_statistics_pram, 0, sizeof(struct ucc_geth_tx_firmware_statistics_pram)); } @@ -2930,10 +2938,10 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth) return -ENOMEM; } ugeth->p_rx_glbl_pram = - (struct ucc_geth_rx_global_pram *) qe_muram_addr(ugeth-> + (struct ucc_geth_rx_global_pram __iomem *) qe_muram_addr(ugeth-> rx_glbl_pram_offset); /* Zero out p_rx_glbl_pram */ - memset(ugeth->p_rx_glbl_pram, 0, sizeof(struct ucc_geth_rx_global_pram)); + memset_io((void __iomem *)ugeth->p_rx_glbl_pram, 0, sizeof(struct ucc_geth_rx_global_pram)); /* Fill global PRAM */ @@ -2953,7 +2961,7 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth) } ugeth->p_thread_data_rx = - (struct ucc_geth_thread_data_rx *) qe_muram_addr(ugeth-> + (struct ucc_geth_thread_data_rx __iomem *) qe_muram_addr(ugeth-> thread_dat_rx_offset); out_be32(&ugeth->p_rx_glbl_pram->rqptr, ugeth->thread_dat_rx_offset); @@ -2976,10 +2984,10 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth) return -ENOMEM; } ugeth->p_rx_fw_statistics_pram = - (struct ucc_geth_rx_firmware_statistics_pram *) + (struct ucc_geth_rx_firmware_statistics_pram __iomem *) qe_muram_addr(ugeth->rx_fw_statistics_pram_offset); /* Zero out p_rx_fw_statistics_pram */ - memset(ugeth->p_rx_fw_statistics_pram, 0, + memset_io((void __iomem *)ugeth->p_rx_fw_statistics_pram, 0, sizeof(struct ucc_geth_rx_firmware_statistics_pram)); } @@ -3000,7 +3008,7 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth) } ugeth->p_rx_irq_coalescing_tbl = - (struct ucc_geth_rx_interrupt_coalescing_table *) + (struct ucc_geth_rx_interrupt_coalescing_table __iomem *) qe_muram_addr(ugeth->rx_irq_coalescing_tbl_offset); out_be32(&ugeth->p_rx_glbl_pram->intcoalescingptr, ugeth->rx_irq_coalescing_tbl_offset); @@ -3069,11 +3077,11 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth) } ugeth->p_rx_bd_qs_tbl = - (struct ucc_geth_rx_bd_queues_entry *) qe_muram_addr(ugeth-> + (struct ucc_geth_rx_bd_queues_entry __iomem *) qe_muram_addr(ugeth-> rx_bd_qs_tbl_offset); out_be32(&ugeth->p_rx_glbl_pram->rbdqptr, ugeth->rx_bd_qs_tbl_offset); /* Zero out p_rx_bd_qs_tbl */ - memset(ugeth->p_rx_bd_qs_tbl, + memset_io((void __iomem *)ugeth->p_rx_bd_qs_tbl, 0, ug_info->numQueuesRx * (sizeof(struct ucc_geth_rx_bd_queues_entry) + sizeof(struct ucc_geth_rx_prefetched_bds))); @@ -3133,7 +3141,7 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth) &ugeth->p_rx_glbl_pram->remoder); /* function code register */ - ugeth->p_rx_glbl_pram->rstate = function_code; + out_8(&ugeth->p_rx_glbl_pram->rstate, function_code); /* initialize extended filtering */ if (ug_info->rxExtendedFiltering) { @@ -3160,7 +3168,7 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth) } ugeth->p_exf_glbl_param = - (struct ucc_geth_exf_global_pram *) qe_muram_addr(ugeth-> + (struct ucc_geth_exf_global_pram __iomem *) qe_muram_addr(ugeth-> exf_glbl_param_offset); out_be32(&ugeth->p_rx_glbl_pram->exfGlobalParam, ugeth->exf_glbl_param_offset); @@ -3175,7 +3183,7 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth) ugeth_82xx_filtering_clear_addr_in_paddr(ugeth, (u8) j); p_82xx_addr_filt = - (struct ucc_geth_82xx_address_filtering_pram *) ugeth-> + (struct ucc_geth_82xx_address_filtering_pram __iomem *) ugeth-> p_rx_glbl_pram->addressfiltering; ugeth_82xx_filtering_clear_all_addr_in_hash(ugeth, @@ -3307,17 +3315,21 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth) return -ENOMEM; } p_init_enet_pram = - (struct ucc_geth_init_pram *) qe_muram_addr(init_enet_pram_offset); + (struct ucc_geth_init_pram __iomem *) qe_muram_addr(init_enet_pram_offset); /* Copy shadow InitEnet command parameter structure into PRAM */ - p_init_enet_pram->resinit1 = ugeth->p_init_enet_param_shadow->resinit1; - p_init_enet_pram->resinit2 = ugeth->p_init_enet_param_shadow->resinit2; - p_init_enet_pram->resinit3 = ugeth->p_init_enet_param_shadow->resinit3; - p_init_enet_pram->resinit4 = ugeth->p_init_enet_param_shadow->resinit4; + out_8(&p_init_enet_pram->resinit1, + ugeth->p_init_enet_param_shadow->resinit1); + out_8(&p_init_enet_pram->resinit2, + ugeth->p_init_enet_param_shadow->resinit2); + out_8(&p_init_enet_pram->resinit3, + ugeth->p_init_enet_param_shadow->resinit3); + out_8(&p_init_enet_pram->resinit4, + ugeth->p_init_enet_param_shadow->resinit4); out_be16(&p_init_enet_pram->resinit5, ugeth->p_init_enet_param_shadow->resinit5); - p_init_enet_pram->largestexternallookupkeysize = - ugeth->p_init_enet_param_shadow->largestexternallookupkeysize; + out_8(&p_init_enet_pram->largestexternallookupkeysize, + ugeth->p_init_enet_param_shadow->largestexternallookupkeysize); out_be32(&p_init_enet_pram->rgftgfrxglobal, ugeth->p_init_enet_param_shadow->rgftgfrxglobal); for (i = 0; i < ENET_INIT_PARAM_MAX_ENTRIES_RX; i++) @@ -3371,7 +3383,7 @@ static int ucc_geth_start_xmit(struct sk_buff *skb, struct net_device *dev) #ifdef CONFIG_UGETH_TX_ON_DEMAND struct ucc_fast_private *uccf; #endif - u8 *bd; /* BD pointer */ + u8 __iomem *bd; /* BD pointer */ u32 bd_status; u8 txQ = 0; @@ -3383,7 +3395,7 @@ static int ucc_geth_start_xmit(struct sk_buff *skb, struct net_device *dev) /* Start from the next BD that should be filled */ bd = ugeth->txBd[txQ]; - bd_status = in_be32((u32 *)bd); + bd_status = in_be32((u32 __iomem *)bd); /* Save the skb pointer so we can free it later */ ugeth->tx_skbuff[txQ][ugeth->skb_curtx[txQ]] = skb; @@ -3393,15 +3405,16 @@ static int ucc_geth_start_xmit(struct sk_buff *skb, struct net_device *dev) 1) & TX_RING_MOD_MASK(ugeth->ug_info->bdRingLenTx[txQ]); /* set up the buffer descriptor */ - out_be32(&((struct qe_bd *)bd)->buf, - dma_map_single(NULL, skb->data, skb->len, DMA_TO_DEVICE)); + out_be32(&((struct qe_bd __iomem *)bd)->buf, + dma_map_single(&ugeth->dev->dev, skb->data, + skb->len, DMA_TO_DEVICE)); /* printk(KERN_DEBUG"skb->data is 0x%x\n",skb->data); */ bd_status = (bd_status & T_W) | T_R | T_I | T_L | skb->len; /* set bd status and length */ - out_be32((u32 *)bd, bd_status); + out_be32((u32 __iomem *)bd, bd_status); dev->trans_start = jiffies; @@ -3441,7 +3454,7 @@ static int ucc_geth_start_xmit(struct sk_buff *skb, struct net_device *dev) static int ucc_geth_rx(struct ucc_geth_private *ugeth, u8 rxQ, int rx_work_limit) { struct sk_buff *skb; - u8 *bd; + u8 __iomem *bd; u16 length, howmany = 0; u32 bd_status; u8 *bdBuffer; @@ -3454,11 +3467,11 @@ static int ucc_geth_rx(struct ucc_geth_private *ugeth, u8 rxQ, int rx_work_limit /* collect received buffers */ bd = ugeth->rxBd[rxQ]; - bd_status = in_be32((u32 *)bd); + bd_status = in_be32((u32 __iomem *)bd); /* while there are received buffers and BD is full (~R_E) */ while (!((bd_status & (R_E)) || (--rx_work_limit < 0))) { - bdBuffer = (u8 *) in_be32(&((struct qe_bd *)bd)->buf); + bdBuffer = (u8 *) in_be32(&((struct qe_bd __iomem *)bd)->buf); length = (u16) ((bd_status & BD_LENGTH_MASK) - 4); skb = ugeth->rx_skbuff[rxQ][ugeth->skb_currx[rxQ]]; @@ -3516,7 +3529,7 @@ static int ucc_geth_rx(struct ucc_geth_private *ugeth, u8 rxQ, int rx_work_limit else bd += sizeof(struct qe_bd); - bd_status = in_be32((u32 *)bd); + bd_status = in_be32((u32 __iomem *)bd); } ugeth->rxBd[rxQ] = bd; @@ -3527,11 +3540,11 @@ static int ucc_geth_tx(struct net_device *dev, u8 txQ) { /* Start from the next BD that should be filled */ struct ucc_geth_private *ugeth = netdev_priv(dev); - u8 *bd; /* BD pointer */ + u8 __iomem *bd; /* BD pointer */ u32 bd_status; bd = ugeth->confBd[txQ]; - bd_status = in_be32((u32 *)bd); + bd_status = in_be32((u32 __iomem *)bd); /* Normal processing. */ while ((bd_status & T_R) == 0) { @@ -3561,7 +3574,7 @@ static int ucc_geth_tx(struct net_device *dev, u8 txQ) bd += sizeof(struct qe_bd); else bd = ugeth->p_tx_bd_ring[txQ]; - bd_status = in_be32((u32 *)bd); + bd_status = in_be32((u32 __iomem *)bd); } ugeth->confBd[txQ] = bd; return 0; @@ -3910,7 +3923,7 @@ static int ucc_geth_probe(struct of_device* ofdev, const struct of_device_id *ma return -EINVAL; } } else { - prop = of_get_property(np, "rx-clock", NULL); + prop = of_get_property(np, "tx-clock", NULL); if (!prop) { printk(KERN_ERR "ucc_geth: mising tx-clock-name property\n"); diff --git a/drivers/net/ucc_geth.h b/drivers/net/ucc_geth.h index 9f8b7580a3a..abc0e224263 100644 --- a/drivers/net/ucc_geth.h +++ b/drivers/net/ucc_geth.h @@ -700,8 +700,8 @@ struct ucc_geth_82xx_address_filtering_pram { u32 iaddr_l; /* individual address filter, low */ u32 gaddr_h; /* group address filter, high */ u32 gaddr_l; /* group address filter, low */ - struct ucc_geth_82xx_enet_address taddr; - struct ucc_geth_82xx_enet_address paddr[NUM_OF_PADDRS]; + struct ucc_geth_82xx_enet_address __iomem taddr; + struct ucc_geth_82xx_enet_address __iomem paddr[NUM_OF_PADDRS]; u8 res0[0x40 - 0x38]; } __attribute__ ((packed)); @@ -1186,40 +1186,40 @@ struct ucc_geth_private { struct ucc_fast_private *uccf; struct net_device *dev; struct napi_struct napi; - struct ucc_geth *ug_regs; + struct ucc_geth __iomem *ug_regs; struct ucc_geth_init_pram *p_init_enet_param_shadow; - struct ucc_geth_exf_global_pram *p_exf_glbl_param; + struct ucc_geth_exf_global_pram __iomem *p_exf_glbl_param; u32 exf_glbl_param_offset; - struct ucc_geth_rx_global_pram *p_rx_glbl_pram; + struct ucc_geth_rx_global_pram __iomem *p_rx_glbl_pram; u32 rx_glbl_pram_offset; - struct ucc_geth_tx_global_pram *p_tx_glbl_pram; + struct ucc_geth_tx_global_pram __iomem *p_tx_glbl_pram; u32 tx_glbl_pram_offset; - struct ucc_geth_send_queue_mem_region *p_send_q_mem_reg; + struct ucc_geth_send_queue_mem_region __iomem *p_send_q_mem_reg; u32 send_q_mem_reg_offset; - struct ucc_geth_thread_data_tx *p_thread_data_tx; + struct ucc_geth_thread_data_tx __iomem *p_thread_data_tx; u32 thread_dat_tx_offset; - struct ucc_geth_thread_data_rx *p_thread_data_rx; + struct ucc_geth_thread_data_rx __iomem *p_thread_data_rx; u32 thread_dat_rx_offset; - struct ucc_geth_scheduler *p_scheduler; + struct ucc_geth_scheduler __iomem *p_scheduler; u32 scheduler_offset; - struct ucc_geth_tx_firmware_statistics_pram *p_tx_fw_statistics_pram; + struct ucc_geth_tx_firmware_statistics_pram __iomem *p_tx_fw_statistics_pram; u32 tx_fw_statistics_pram_offset; - struct ucc_geth_rx_firmware_statistics_pram *p_rx_fw_statistics_pram; + struct ucc_geth_rx_firmware_statistics_pram __iomem *p_rx_fw_statistics_pram; u32 rx_fw_statistics_pram_offset; - struct ucc_geth_rx_interrupt_coalescing_table *p_rx_irq_coalescing_tbl; + struct ucc_geth_rx_interrupt_coalescing_table __iomem *p_rx_irq_coalescing_tbl; u32 rx_irq_coalescing_tbl_offset; - struct ucc_geth_rx_bd_queues_entry *p_rx_bd_qs_tbl; + struct ucc_geth_rx_bd_queues_entry __iomem *p_rx_bd_qs_tbl; u32 rx_bd_qs_tbl_offset; - u8 *p_tx_bd_ring[NUM_TX_QUEUES]; + u8 __iomem *p_tx_bd_ring[NUM_TX_QUEUES]; u32 tx_bd_ring_offset[NUM_TX_QUEUES]; - u8 *p_rx_bd_ring[NUM_RX_QUEUES]; + u8 __iomem *p_rx_bd_ring[NUM_RX_QUEUES]; u32 rx_bd_ring_offset[NUM_RX_QUEUES]; - u8 *confBd[NUM_TX_QUEUES]; - u8 *txBd[NUM_TX_QUEUES]; - u8 *rxBd[NUM_RX_QUEUES]; + u8 __iomem *confBd[NUM_TX_QUEUES]; + u8 __iomem *txBd[NUM_TX_QUEUES]; + u8 __iomem *rxBd[NUM_RX_QUEUES]; int badFrame[NUM_RX_QUEUES]; u16 cpucount[NUM_TX_QUEUES]; - volatile u16 *p_cpucount[NUM_TX_QUEUES]; + u16 __iomem *p_cpucount[NUM_TX_QUEUES]; int indAddrRegUsed[NUM_OF_PADDRS]; u8 paddr[NUM_OF_PADDRS][ENET_NUM_OCTETS_PER_ADDRESS]; /* ethernet address */ u8 numGroupAddrInHash; @@ -1251,4 +1251,12 @@ struct ucc_geth_private { int oldlink; }; +void uec_set_ethtool_ops(struct net_device *netdev); +int init_flow_control_params(u32 automatic_flow_control_mode, + int rx_flow_control_enable, int tx_flow_control_enable, + u16 pause_period, u16 extension_field, + u32 __iomem *upsmr_register, u32 __iomem *uempr_register, + u32 __iomem *maccfg1_register); + + #endif /* __UCC_GETH_H__ */ diff --git a/drivers/net/ucc_geth_ethtool.c b/drivers/net/ucc_geth_ethtool.c index 9a9622c13e2..f5839c4a5cb 100644 --- a/drivers/net/ucc_geth_ethtool.c +++ b/drivers/net/ucc_geth_ethtool.c @@ -73,6 +73,7 @@ static char tx_fw_stat_gstrings[][ETH_GSTRING_LEN] = { "tx-frames-ok", "tx-excessive-differ-frames", "tx-256-511-frames", + "tx-512-1023-frames", "tx-1024-1518-frames", "tx-jumbo-frames", }; @@ -108,12 +109,6 @@ static char rx_fw_stat_gstrings[][ETH_GSTRING_LEN] = { #define UEC_TX_FW_STATS_LEN ARRAY_SIZE(tx_fw_stat_gstrings) #define UEC_RX_FW_STATS_LEN ARRAY_SIZE(rx_fw_stat_gstrings) -extern int init_flow_control_params(u32 automatic_flow_control_mode, - int rx_flow_control_enable, - int tx_flow_control_enable, u16 pause_period, - u16 extension_field, volatile u32 *upsmr_register, - volatile u32 *uempr_register, volatile u32 *maccfg1_register); - static int uec_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd) { @@ -314,7 +309,7 @@ static void uec_get_strings(struct net_device *netdev, u32 stringset, u8 *buf) buf += UEC_TX_FW_STATS_LEN * ETH_GSTRING_LEN; } if (stats_mode & UCC_GETH_STATISTICS_GATHERING_MODE_FIRMWARE_RX) - memcpy(buf, tx_fw_stat_gstrings, UEC_RX_FW_STATS_LEN * + memcpy(buf, rx_fw_stat_gstrings, UEC_RX_FW_STATS_LEN * ETH_GSTRING_LEN); } diff --git a/drivers/net/ucc_geth_mii.c b/drivers/net/ucc_geth_mii.c index 2af49078100..94047473692 100644 --- a/drivers/net/ucc_geth_mii.c +++ b/drivers/net/ucc_geth_mii.c @@ -104,7 +104,7 @@ int uec_mdio_read(struct mii_bus *bus, int mii_id, int regnum) } /* Reset the MIIM registers, and wait for the bus to free */ -int uec_mdio_reset(struct mii_bus *bus) +static int uec_mdio_reset(struct mii_bus *bus) { struct ucc_mii_mng __iomem *regs = (void __iomem *)bus->priv; unsigned int timeout = PHY_INIT_TIMEOUT; @@ -240,7 +240,7 @@ reg_map_fail: return err; } -int uec_mdio_remove(struct of_device *ofdev) +static int uec_mdio_remove(struct of_device *ofdev) { struct device *device = &ofdev->dev; struct mii_bus *bus = dev_get_drvdata(device); diff --git a/drivers/net/usb/asix.c b/drivers/net/usb/asix.c index 6f245cfb662..37ecf845edf 100644 --- a/drivers/net/usb/asix.c +++ b/drivers/net/usb/asix.c @@ -1381,6 +1381,10 @@ static const struct usb_device_id products [] = { USB_DEVICE (0x0411, 0x003d), .driver_info = (unsigned long) &ax8817x_info, }, { + // Buffalo LUA-U2-GT 10/100/1000 + USB_DEVICE (0x0411, 0x006e), + .driver_info = (unsigned long) &ax88178_info, +}, { // Sitecom LN-029 "USB 2.0 10/100 Ethernet adapter" USB_DEVICE (0x6189, 0x182d), .driver_info = (unsigned long) &ax8817x_info, @@ -1436,6 +1440,10 @@ static const struct usb_device_id products [] = { // Belkin F5D5055 USB_DEVICE(0x050d, 0x5055), .driver_info = (unsigned long) &ax88178_info, +}, { + // Apple USB Ethernet Adapter + USB_DEVICE(0x05ac, 0x1402), + .driver_info = (unsigned long) &ax88772_info, }, { }, // END }; diff --git a/drivers/net/usb/catc.c b/drivers/net/usb/catc.c index 76752d84a30..22c17bbacb6 100644 --- a/drivers/net/usb/catc.c +++ b/drivers/net/usb/catc.c @@ -423,7 +423,10 @@ static int catc_hard_start_xmit(struct sk_buff *skb, struct net_device *netdev) catc->tx_ptr = (((catc->tx_ptr - 1) >> 6) + 1) << 6; tx_buf = catc->tx_buf[catc->tx_idx] + catc->tx_ptr; - *((u16*)tx_buf) = (catc->is_f5u011) ? cpu_to_be16((u16)skb->len) : cpu_to_le16((u16)skb->len); + if (catc->is_f5u011) + *(__be16 *)tx_buf = cpu_to_be16(skb->len); + else + *(__le16 *)tx_buf = cpu_to_le16(skb->len); skb_copy_from_linear_data(skb, tx_buf + 2, skb->len); catc->tx_ptr += skb->len + 2; diff --git a/drivers/net/usb/cdc_subset.c b/drivers/net/usb/cdc_subset.c index 0ec7936cbe2..c66b9c324f5 100644 --- a/drivers/net/usb/cdc_subset.c +++ b/drivers/net/usb/cdc_subset.c @@ -218,7 +218,7 @@ static const struct driver_info blob_info = { /*-------------------------------------------------------------------------*/ #ifndef HAVE_HARDWARE -#error You need to configure some hardware for this driver +#warning You need to configure some hardware for this driver #endif /* diff --git a/drivers/net/usb/kaweth.c b/drivers/net/usb/kaweth.c index 0dcfc031026..7c66b052f55 100644 --- a/drivers/net/usb/kaweth.c +++ b/drivers/net/usb/kaweth.c @@ -706,7 +706,7 @@ static void kaweth_kill_urbs(struct kaweth_device *kaweth) usb_kill_urb(kaweth->rx_urb); usb_kill_urb(kaweth->tx_urb); - flush_scheduled_work(); + cancel_delayed_work_sync(&kaweth->lowmem_work); /* a scheduled work may have resubmitted, we hit them again */ diff --git a/drivers/net/usb/rndis_host.c b/drivers/net/usb/rndis_host.c index 21a7785cb8b..ae467f182c4 100644 --- a/drivers/net/usb/rndis_host.c +++ b/drivers/net/usb/rndis_host.c @@ -194,7 +194,7 @@ int rndis_command(struct usbnet *dev, struct rndis_msg_hdr *buf) dev_dbg(&info->control->dev, "rndis response error, code %d\n", retval); } - msleep(2); + msleep(20); } dev_dbg(&info->control->dev, "rndis response timeout\n"); return -ETIMEDOUT; @@ -283,8 +283,8 @@ generic_rndis_bind(struct usbnet *dev, struct usb_interface *intf, int flags) struct rndis_set_c *set_c; struct rndis_halt *halt; } u; - u32 tmp, phym_unspec; - __le32 *phym; + u32 tmp; + __le32 phym_unspec, *phym; int reply_len; unsigned char *bp; diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c index 555b70c8b86..4452306d532 100644 --- a/drivers/net/virtio_net.c +++ b/drivers/net/virtio_net.c @@ -41,9 +41,19 @@ struct virtnet_info struct net_device *dev; struct napi_struct napi; + /* The skb we couldn't send because buffers were full. */ + struct sk_buff *last_xmit_skb; + + /* If we need to free in a timer, this is it. */ + struct timer_list xmit_free_timer; + /* Number of input buffers, and max we've ever had. */ unsigned int num, max; + /* For cleaning up after transmission. */ + struct tasklet_struct tasklet; + bool free_in_tasklet; + /* Receive & send queues. */ struct sk_buff_head recv; struct sk_buff_head send; @@ -65,8 +75,13 @@ static void skb_xmit_done(struct virtqueue *svq) /* Suppress further interrupts. */ svq->vq_ops->disable_cb(svq); - /* We were waiting for more output buffers. */ + + /* We were probably waiting for more output buffers. */ netif_wake_queue(vi->dev); + + /* Make sure we re-xmit last_xmit_skb: if there are no more packets + * queued, start_xmit won't be called. */ + tasklet_schedule(&vi->tasklet); } static void receive_skb(struct net_device *dev, struct sk_buff *skb, @@ -83,9 +98,7 @@ static void receive_skb(struct net_device *dev, struct sk_buff *skb, BUG_ON(len > MAX_PACKET_LEN); skb_trim(skb, len); - skb->protocol = eth_type_trans(skb, dev); - pr_debug("Receiving skb proto 0x%04x len %i type %i\n", - ntohs(skb->protocol), skb->len, skb->pkt_type); + dev->stats.rx_bytes += skb->len; dev->stats.rx_packets++; @@ -95,6 +108,10 @@ static void receive_skb(struct net_device *dev, struct sk_buff *skb, goto frame_err; } + skb->protocol = eth_type_trans(skb, dev); + pr_debug("Receiving skb proto 0x%04x len %i type %i\n", + ntohs(skb->protocol), skb->len, skb->pkt_type); + if (hdr->gso_type != VIRTIO_NET_HDR_GSO_NONE) { pr_debug("GSO!\n"); switch (hdr->gso_type & ~VIRTIO_NET_HDR_GSO_ECN) { @@ -142,10 +159,10 @@ drop: static void try_fill_recv(struct virtnet_info *vi) { struct sk_buff *skb; - struct scatterlist sg[1+MAX_SKB_FRAGS]; + struct scatterlist sg[2+MAX_SKB_FRAGS]; int num, err; - sg_init_table(sg, 1+MAX_SKB_FRAGS); + sg_init_table(sg, 2+MAX_SKB_FRAGS); for (;;) { skb = netdev_alloc_skb(vi->dev, MAX_PACKET_LEN); if (unlikely(!skb)) @@ -221,23 +238,38 @@ static void free_old_xmit_skbs(struct virtnet_info *vi) while ((skb = vi->svq->vq_ops->get_buf(vi->svq, &len)) != NULL) { pr_debug("Sent skb %p\n", skb); __skb_unlink(skb, &vi->send); - vi->dev->stats.tx_bytes += len; + vi->dev->stats.tx_bytes += skb->len; vi->dev->stats.tx_packets++; kfree_skb(skb); } } -static int start_xmit(struct sk_buff *skb, struct net_device *dev) +/* If the virtio transport doesn't always notify us when all in-flight packets + * are consumed, we fall back to using this function on a timer to free them. */ +static void xmit_free(unsigned long data) +{ + struct virtnet_info *vi = (void *)data; + + netif_tx_lock(vi->dev); + + free_old_xmit_skbs(vi); + + if (!skb_queue_empty(&vi->send)) + mod_timer(&vi->xmit_free_timer, jiffies + (HZ/10)); + + netif_tx_unlock(vi->dev); +} + +static int xmit_skb(struct virtnet_info *vi, struct sk_buff *skb) { - struct virtnet_info *vi = netdev_priv(dev); int num, err; - struct scatterlist sg[1+MAX_SKB_FRAGS]; + struct scatterlist sg[2+MAX_SKB_FRAGS]; struct virtio_net_hdr *hdr; const unsigned char *dest = ((struct ethhdr *)skb->data)->h_dest; - sg_init_table(sg, 1+MAX_SKB_FRAGS); + sg_init_table(sg, 2+MAX_SKB_FRAGS); - pr_debug("%s: xmit %p " MAC_FMT "\n", dev->name, skb, + pr_debug("%s: xmit %p " MAC_FMT "\n", vi->dev->name, skb, dest[0], dest[1], dest[2], dest[3], dest[4], dest[5]); @@ -272,30 +304,73 @@ static int start_xmit(struct sk_buff *skb, struct net_device *dev) vnet_hdr_to_sg(sg, skb); num = skb_to_sgvec(skb, sg+1, 0, skb->len) + 1; - __skb_queue_head(&vi->send, skb); + + err = vi->svq->vq_ops->add_buf(vi->svq, sg, num, 0, skb); + if (!err && !vi->free_in_tasklet) + mod_timer(&vi->xmit_free_timer, jiffies + (HZ/10)); + + return err; +} + +static void xmit_tasklet(unsigned long data) +{ + struct virtnet_info *vi = (void *)data; + + netif_tx_lock_bh(vi->dev); + if (vi->last_xmit_skb && xmit_skb(vi, vi->last_xmit_skb) == 0) { + vi->svq->vq_ops->kick(vi->svq); + vi->last_xmit_skb = NULL; + } + if (vi->free_in_tasklet) + free_old_xmit_skbs(vi); + netif_tx_unlock_bh(vi->dev); +} + +static int start_xmit(struct sk_buff *skb, struct net_device *dev) +{ + struct virtnet_info *vi = netdev_priv(dev); again: /* Free up any pending old buffers before queueing new ones. */ free_old_xmit_skbs(vi); - err = vi->svq->vq_ops->add_buf(vi->svq, sg, num, 0, skb); - if (err) { - pr_debug("%s: virtio not prepared to send\n", dev->name); - netif_stop_queue(dev); - - /* Activate callback for using skbs: if this returns false it - * means some were used in the meantime. */ - if (unlikely(!vi->svq->vq_ops->enable_cb(vi->svq))) { - vi->svq->vq_ops->disable_cb(vi->svq); - netif_start_queue(dev); - goto again; + + /* If we has a buffer left over from last time, send it now. */ + if (unlikely(vi->last_xmit_skb)) { + if (xmit_skb(vi, vi->last_xmit_skb) != 0) { + /* Drop this skb: we only queue one. */ + vi->dev->stats.tx_dropped++; + kfree_skb(skb); + skb = NULL; + goto stop_queue; } - __skb_unlink(skb, &vi->send); + vi->last_xmit_skb = NULL; + } - return NETDEV_TX_BUSY; + /* Put new one in send queue and do transmit */ + if (likely(skb)) { + __skb_queue_head(&vi->send, skb); + if (xmit_skb(vi, skb) != 0) { + vi->last_xmit_skb = skb; + skb = NULL; + goto stop_queue; + } } +done: vi->svq->vq_ops->kick(vi->svq); - - return 0; + return NETDEV_TX_OK; + +stop_queue: + pr_debug("%s: virtio not prepared to send\n", dev->name); + netif_stop_queue(dev); + + /* Activate callback for using skbs: if this returns false it + * means some were used in the meantime. */ + if (unlikely(!vi->svq->vq_ops->enable_cb(vi->svq))) { + vi->svq->vq_ops->disable_cb(vi->svq); + netif_start_queue(dev); + goto again; + } + goto done; } #ifdef CONFIG_NET_POLL_CONTROLLER @@ -355,17 +430,26 @@ static int virtnet_probe(struct virtio_device *vdev) SET_NETDEV_DEV(dev, &vdev->dev); /* Do we support "hardware" checksums? */ - if (csum && vdev->config->feature(vdev, VIRTIO_NET_F_CSUM)) { + if (csum && virtio_has_feature(vdev, VIRTIO_NET_F_CSUM)) { /* This opens up the world of extra features. */ dev->features |= NETIF_F_HW_CSUM|NETIF_F_SG|NETIF_F_FRAGLIST; - if (gso && vdev->config->feature(vdev, VIRTIO_NET_F_GSO)) { + if (gso && virtio_has_feature(vdev, VIRTIO_NET_F_GSO)) { dev->features |= NETIF_F_TSO | NETIF_F_UFO | NETIF_F_TSO_ECN | NETIF_F_TSO6; } + /* Individual feature bits: what can host handle? */ + if (gso && virtio_has_feature(vdev, VIRTIO_NET_F_HOST_TSO4)) + dev->features |= NETIF_F_TSO; + if (gso && virtio_has_feature(vdev, VIRTIO_NET_F_HOST_TSO6)) + dev->features |= NETIF_F_TSO6; + if (gso && virtio_has_feature(vdev, VIRTIO_NET_F_HOST_ECN)) + dev->features |= NETIF_F_TSO_ECN; + if (gso && virtio_has_feature(vdev, VIRTIO_NET_F_HOST_UFO)) + dev->features |= NETIF_F_UFO; } /* Configuration may specify what MAC to use. Otherwise random. */ - if (vdev->config->feature(vdev, VIRTIO_NET_F_MAC)) { + if (virtio_has_feature(vdev, VIRTIO_NET_F_MAC)) { vdev->config->get(vdev, offsetof(struct virtio_net_config, mac), dev->dev_addr, dev->addr_len); @@ -379,6 +463,10 @@ static int virtnet_probe(struct virtio_device *vdev) vi->vdev = vdev; vdev->priv = vi; + /* If they give us a callback when all buffers are done, we don't need + * the timer. */ + vi->free_in_tasklet = virtio_has_feature(vdev,VIRTIO_F_NOTIFY_ON_EMPTY); + /* We expect two virtqueues, receive then send. */ vi->rvq = vdev->config->find_vq(vdev, 0, skb_recv_done); if (IS_ERR(vi->rvq)) { @@ -396,6 +484,11 @@ static int virtnet_probe(struct virtio_device *vdev) skb_queue_head_init(&vi->recv); skb_queue_head_init(&vi->send); + tasklet_init(&vi->tasklet, xmit_tasklet, (unsigned long)vi); + + if (!vi->free_in_tasklet) + setup_timer(&vi->xmit_free_timer, xmit_free, (unsigned long)vi); + err = register_netdev(dev); if (err) { pr_debug("virtio_net: registering device failed\n"); @@ -433,13 +526,15 @@ static void virtnet_remove(struct virtio_device *vdev) /* Stop all the virtqueues. */ vdev->config->reset(vdev); + if (!vi->free_in_tasklet) + del_timer_sync(&vi->xmit_free_timer); + /* Free our skbs in send and recv queues, if any. */ while ((skb = __skb_dequeue(&vi->recv)) != NULL) { kfree_skb(skb); vi->num--; } - while ((skb = __skb_dequeue(&vi->send)) != NULL) - kfree_skb(skb); + __skb_queue_purge(&vi->send); BUG_ON(vi->num != 0); @@ -454,7 +549,15 @@ static struct virtio_device_id id_table[] = { { 0 }, }; +static unsigned int features[] = { + VIRTIO_NET_F_CSUM, VIRTIO_NET_F_GSO, VIRTIO_NET_F_MAC, + VIRTIO_NET_F_HOST_TSO4, VIRTIO_NET_F_HOST_UFO, VIRTIO_NET_F_HOST_TSO6, + VIRTIO_NET_F_HOST_ECN, VIRTIO_F_NOTIFY_ON_EMPTY, +}; + static struct virtio_driver virtio_net = { + .feature_table = features, + .feature_table_size = ARRAY_SIZE(features), .driver.name = KBUILD_MODNAME, .driver.owner = THIS_MODULE, .id_table = id_table, diff --git a/drivers/net/wan/Kconfig b/drivers/net/wan/Kconfig index 8005dd16fb4..d5140aed7b7 100644 --- a/drivers/net/wan/Kconfig +++ b/drivers/net/wan/Kconfig @@ -150,11 +150,9 @@ config HDLC_FR config HDLC_PPP tristate "Synchronous Point-to-Point Protocol (PPP) support" - depends on HDLC && BROKEN + depends on HDLC help Generic HDLC driver supporting PPP over WAN connections. - This module is currently broken and will cause a kernel panic - when a device configured in PPP mode is activated. It will be replaced by new PPP implementation in Linux 2.6.26. diff --git a/drivers/net/wan/cosa.c b/drivers/net/wan/cosa.c index 45ddfc9763c..b0fce1387ea 100644 --- a/drivers/net/wan/cosa.c +++ b/drivers/net/wan/cosa.c @@ -629,7 +629,7 @@ static void sppp_channel_init(struct channel_data *chan) d->base_addr = chan->cosa->datareg; d->irq = chan->cosa->irq; d->dma = chan->cosa->dma; - d->priv = chan; + d->ml_priv = chan; sppp_attach(&chan->pppdev); if (register_netdev(d)) { printk(KERN_WARNING "%s: register_netdev failed.\n", d->name); @@ -650,7 +650,7 @@ static void sppp_channel_delete(struct channel_data *chan) static int cosa_sppp_open(struct net_device *d) { - struct channel_data *chan = d->priv; + struct channel_data *chan = d->ml_priv; int err; unsigned long flags; @@ -690,7 +690,7 @@ static int cosa_sppp_open(struct net_device *d) static int cosa_sppp_tx(struct sk_buff *skb, struct net_device *dev) { - struct channel_data *chan = dev->priv; + struct channel_data *chan = dev->ml_priv; netif_stop_queue(dev); @@ -701,7 +701,7 @@ static int cosa_sppp_tx(struct sk_buff *skb, struct net_device *dev) static void cosa_sppp_timeout(struct net_device *dev) { - struct channel_data *chan = dev->priv; + struct channel_data *chan = dev->ml_priv; if (test_bit(RXBIT, &chan->cosa->rxtx)) { chan->stats.rx_errors++; @@ -720,7 +720,7 @@ static void cosa_sppp_timeout(struct net_device *dev) static int cosa_sppp_close(struct net_device *d) { - struct channel_data *chan = d->priv; + struct channel_data *chan = d->ml_priv; unsigned long flags; netif_stop_queue(d); @@ -800,7 +800,7 @@ static int sppp_tx_done(struct channel_data *chan, int size) static struct net_device_stats *cosa_net_stats(struct net_device *dev) { - struct channel_data *chan = dev->priv; + struct channel_data *chan = dev->ml_priv; return &chan->stats; } @@ -1217,7 +1217,7 @@ static int cosa_sppp_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) { int rv; - struct channel_data *chan = dev->priv; + struct channel_data *chan = dev->ml_priv; rv = cosa_ioctl_common(chan->cosa, chan, cmd, (unsigned long)ifr->ifr_data); if (rv == -ENOIOCTLCMD) { return sppp_do_ioctl(dev, ifr, cmd); diff --git a/drivers/net/wan/hdlc.c b/drivers/net/wan/hdlc.c index 9a83c9d5b8c..7f984895b0d 100644 --- a/drivers/net/wan/hdlc.c +++ b/drivers/net/wan/hdlc.c @@ -43,8 +43,7 @@ static const char* version = "HDLC support module revision 1.22"; #undef DEBUG_LINK -static struct hdlc_proto *first_proto = NULL; - +static struct hdlc_proto *first_proto; static int hdlc_change_mtu(struct net_device *dev, int new_mtu) { @@ -314,21 +313,25 @@ void detach_hdlc_protocol(struct net_device *dev) void register_hdlc_protocol(struct hdlc_proto *proto) { + rtnl_lock(); proto->next = first_proto; first_proto = proto; + rtnl_unlock(); } void unregister_hdlc_protocol(struct hdlc_proto *proto) { - struct hdlc_proto **p = &first_proto; - while (*p) { - if (*p == proto) { - *p = proto->next; - return; - } + struct hdlc_proto **p; + + rtnl_lock(); + p = &first_proto; + while (*p != proto) { + BUG_ON(!*p); p = &((*p)->next); } + *p = proto->next; + rtnl_unlock(); } diff --git a/drivers/net/wan/hdlc_cisco.c b/drivers/net/wan/hdlc_cisco.c index 7133c688cf2..762d21c1c70 100644 --- a/drivers/net/wan/hdlc_cisco.c +++ b/drivers/net/wan/hdlc_cisco.c @@ -56,6 +56,7 @@ struct cisco_state { cisco_proto settings; struct timer_list timer; + spinlock_t lock; unsigned long last_poll; int up; int request_sent; @@ -158,6 +159,7 @@ static int cisco_rx(struct sk_buff *skb) { struct net_device *dev = skb->dev; hdlc_device *hdlc = dev_to_hdlc(dev); + struct cisco_state *st = state(hdlc); struct hdlc_header *data = (struct hdlc_header*)skb->data; struct cisco_packet *cisco_data; struct in_device *in_dev; @@ -220,11 +222,12 @@ static int cisco_rx(struct sk_buff *skb) goto rx_error; case CISCO_KEEPALIVE_REQ: - state(hdlc)->rxseq = ntohl(cisco_data->par1); - if (state(hdlc)->request_sent && - ntohl(cisco_data->par2) == state(hdlc)->txseq) { - state(hdlc)->last_poll = jiffies; - if (!state(hdlc)->up) { + spin_lock(&st->lock); + st->rxseq = ntohl(cisco_data->par1); + if (st->request_sent && + ntohl(cisco_data->par2) == st->txseq) { + st->last_poll = jiffies; + if (!st->up) { u32 sec, min, hrs, days; sec = ntohl(cisco_data->time) / 1000; min = sec / 60; sec -= min * 60; @@ -232,12 +235,12 @@ static int cisco_rx(struct sk_buff *skb) days = hrs / 24; hrs -= days * 24; printk(KERN_INFO "%s: Link up (peer " "uptime %ud%uh%um%us)\n", - dev->name, days, hrs, - min, sec); + dev->name, days, hrs, min, sec); netif_dormant_off(dev); - state(hdlc)->up = 1; + st->up = 1; } } + spin_unlock(&st->lock); dev_kfree_skb_any(skb); return NET_RX_SUCCESS; @@ -261,24 +264,25 @@ static void cisco_timer(unsigned long arg) { struct net_device *dev = (struct net_device *)arg; hdlc_device *hdlc = dev_to_hdlc(dev); + struct cisco_state *st = state(hdlc); - if (state(hdlc)->up && - time_after(jiffies, state(hdlc)->last_poll + - state(hdlc)->settings.timeout * HZ)) { - state(hdlc)->up = 0; + spin_lock(&st->lock); + if (st->up && + time_after(jiffies, st->last_poll + st->settings.timeout * HZ)) { + st->up = 0; printk(KERN_INFO "%s: Link down\n", dev->name); netif_dormant_on(dev); } - cisco_keepalive_send(dev, CISCO_KEEPALIVE_REQ, - htonl(++state(hdlc)->txseq), - htonl(state(hdlc)->rxseq)); - state(hdlc)->request_sent = 1; - state(hdlc)->timer.expires = jiffies + - state(hdlc)->settings.interval * HZ; - state(hdlc)->timer.function = cisco_timer; - state(hdlc)->timer.data = arg; - add_timer(&state(hdlc)->timer); + cisco_keepalive_send(dev, CISCO_KEEPALIVE_REQ, htonl(++st->txseq), + htonl(st->rxseq)); + st->request_sent = 1; + spin_unlock(&st->lock); + + st->timer.expires = jiffies + st->settings.interval * HZ; + st->timer.function = cisco_timer; + st->timer.data = arg; + add_timer(&st->timer); } @@ -286,15 +290,20 @@ static void cisco_timer(unsigned long arg) static void cisco_start(struct net_device *dev) { hdlc_device *hdlc = dev_to_hdlc(dev); - state(hdlc)->up = 0; - state(hdlc)->request_sent = 0; - state(hdlc)->txseq = state(hdlc)->rxseq = 0; - - init_timer(&state(hdlc)->timer); - state(hdlc)->timer.expires = jiffies + HZ; /*First poll after 1s*/ - state(hdlc)->timer.function = cisco_timer; - state(hdlc)->timer.data = (unsigned long)dev; - add_timer(&state(hdlc)->timer); + struct cisco_state *st = state(hdlc); + unsigned long flags; + + spin_lock_irqsave(&st->lock, flags); + st->up = 0; + st->request_sent = 0; + st->txseq = st->rxseq = 0; + spin_unlock_irqrestore(&st->lock, flags); + + init_timer(&st->timer); + st->timer.expires = jiffies + HZ; /* First poll after 1 s */ + st->timer.function = cisco_timer; + st->timer.data = (unsigned long)dev; + add_timer(&st->timer); } @@ -302,10 +311,16 @@ static void cisco_start(struct net_device *dev) static void cisco_stop(struct net_device *dev) { hdlc_device *hdlc = dev_to_hdlc(dev); - del_timer_sync(&state(hdlc)->timer); + struct cisco_state *st = state(hdlc); + unsigned long flags; + + del_timer_sync(&st->timer); + + spin_lock_irqsave(&st->lock, flags); netif_dormant_on(dev); - state(hdlc)->up = 0; - state(hdlc)->request_sent = 0; + st->up = 0; + st->request_sent = 0; + spin_unlock_irqrestore(&st->lock, flags); } @@ -367,6 +382,7 @@ static int cisco_ioctl(struct net_device *dev, struct ifreq *ifr) return result; memcpy(&state(hdlc)->settings, &new_settings, size); + spin_lock_init(&state(hdlc)->lock); dev->hard_start_xmit = hdlc->xmit; dev->header_ops = &cisco_header_ops; dev->type = ARPHRD_CISCO; diff --git a/drivers/net/wan/hdlc_fr.c b/drivers/net/wan/hdlc_fr.c index 520bb0b1a9a..6d35155c714 100644 --- a/drivers/net/wan/hdlc_fr.c +++ b/drivers/net/wan/hdlc_fr.c @@ -1008,6 +1008,7 @@ static int fr_rx(struct sk_buff *skb) stats->rx_bytes += skb->len; if (pvc->state.becn) stats->rx_compressed++; + skb->dev = dev; netif_rx(skb); return NET_RX_SUCCESS; } else { diff --git a/drivers/net/wan/hdlc_ppp.c b/drivers/net/wan/hdlc_ppp.c index 10396d9686f..00308337928 100644 --- a/drivers/net/wan/hdlc_ppp.c +++ b/drivers/net/wan/hdlc_ppp.c @@ -45,7 +45,7 @@ static int ppp_open(struct net_device *dev) int (*old_ioctl)(struct net_device *, struct ifreq *, int); int result; - dev->priv = &state(hdlc)->syncppp_ptr; + dev->ml_priv = &state(hdlc)->syncppp_ptr; state(hdlc)->syncppp_ptr = &state(hdlc)->pppdev; state(hdlc)->pppdev.dev = dev; diff --git a/drivers/net/wan/hostess_sv11.c b/drivers/net/wan/hostess_sv11.c index 83dbc924fcb..f3065d3473f 100644 --- a/drivers/net/wan/hostess_sv11.c +++ b/drivers/net/wan/hostess_sv11.c @@ -75,7 +75,7 @@ static void hostess_input(struct z8530_channel *c, struct sk_buff *skb) static int hostess_open(struct net_device *d) { - struct sv11_device *sv11=d->priv; + struct sv11_device *sv11=d->ml_priv; int err = -1; /* @@ -128,7 +128,7 @@ static int hostess_open(struct net_device *d) static int hostess_close(struct net_device *d) { - struct sv11_device *sv11=d->priv; + struct sv11_device *sv11=d->ml_priv; /* * Discard new frames */ @@ -159,14 +159,14 @@ static int hostess_close(struct net_device *d) static int hostess_ioctl(struct net_device *d, struct ifreq *ifr, int cmd) { - /* struct sv11_device *sv11=d->priv; + /* struct sv11_device *sv11=d->ml_priv; z8530_ioctl(d,&sv11->sync.chanA,ifr,cmd) */ return sppp_do_ioctl(d, ifr,cmd); } static struct net_device_stats *hostess_get_stats(struct net_device *d) { - struct sv11_device *sv11=d->priv; + struct sv11_device *sv11=d->ml_priv; if(sv11) return z8530_get_stats(&sv11->sync.chanA); else @@ -179,7 +179,7 @@ static struct net_device_stats *hostess_get_stats(struct net_device *d) static int hostess_queue_xmit(struct sk_buff *skb, struct net_device *d) { - struct sv11_device *sv11=d->priv; + struct sv11_device *sv11=d->ml_priv; return z8530_queue_xmit(&sv11->sync.chanA, skb); } @@ -325,6 +325,7 @@ static struct sv11_device *sv11_init(int iobase, int irq) /* * Initialise the PPP components */ + d->ml_priv = sv; sppp_attach(&sv->netdev); /* @@ -333,7 +334,6 @@ static struct sv11_device *sv11_init(int iobase, int irq) d->base_addr = iobase; d->irq = irq; - d->priv = sv; if(register_netdev(d)) { diff --git a/drivers/net/wan/lapbether.c b/drivers/net/wan/lapbether.c index b5860b97a93..24fd613466b 100644 --- a/drivers/net/wan/lapbether.c +++ b/drivers/net/wan/lapbether.c @@ -459,6 +459,7 @@ static void __exit lapbeth_cleanup_driver(void) list_for_each_safe(entry, tmp, &lapbeth_devices) { lapbeth = list_entry(entry, struct lapbethdev, node); + dev_put(lapbeth->ethdev); unregister_netdevice(lapbeth->axdev); } rtnl_unlock(); diff --git a/drivers/net/wan/lmc/lmc_main.c b/drivers/net/wan/lmc/lmc_main.c index 6635ecef36e..62133cee446 100644 --- a/drivers/net/wan/lmc/lmc_main.c +++ b/drivers/net/wan/lmc/lmc_main.c @@ -891,6 +891,7 @@ static int __devinit lmc_init_one(struct pci_dev *pdev, /* Initialize the sppp layer */ /* An ioctl can cause a subsequent detach for raw frame interface */ + dev->ml_priv = sc; sc->if_type = LMC_PPP; sc->check = 0xBEAFCAFE; dev->base_addr = pci_resource_start(pdev, 0); diff --git a/drivers/net/wan/sealevel.c b/drivers/net/wan/sealevel.c index 11276bf3149..44a89df1b8b 100644 --- a/drivers/net/wan/sealevel.c +++ b/drivers/net/wan/sealevel.c @@ -241,6 +241,7 @@ static inline struct slvl_device *slvl_alloc(int iobase, int irq) return NULL; sv = d->priv; + d->ml_priv = sv; sv->if_ptr = &sv->pppdev; sv->pppdev.dev = d; d->base_addr = iobase; diff --git a/drivers/net/wan/x25_asy.c b/drivers/net/wan/x25_asy.c index 249e18053d5..069f8bb0a99 100644 --- a/drivers/net/wan/x25_asy.c +++ b/drivers/net/wan/x25_asy.c @@ -32,6 +32,7 @@ #include <linux/x25.h> #include <linux/lapb.h> #include <linux/init.h> +#include <linux/rtnetlink.h> #include "x25_asy.h" #include <net/x25device.h> @@ -601,8 +602,10 @@ static void x25_asy_close_tty(struct tty_struct *tty) if (!sl || sl->magic != X25_ASY_MAGIC) return; + rtnl_lock(); if (sl->dev->flags & IFF_UP) dev_close(sl->dev); + rtnl_unlock(); tty->disc_data = NULL; sl->tty = NULL; diff --git a/drivers/net/wireless/airo.c b/drivers/net/wireless/airo.c index 45f47c1c0a3..32019fb878d 100644 --- a/drivers/net/wireless/airo.c +++ b/drivers/net/wireless/airo.c @@ -2668,6 +2668,7 @@ static struct net_device *init_wifidev(struct airo_info *ai, dev->irq = ethdev->irq; dev->base_addr = ethdev->base_addr; dev->wireless_data = ethdev->wireless_data; + SET_NETDEV_DEV(dev, ethdev->dev.parent); memcpy(dev->dev_addr, ethdev->dev_addr, dev->addr_len); err = register_netdev(dev); if (err<0) { @@ -2904,7 +2905,7 @@ EXPORT_SYMBOL(init_airo_card); static int waitbusy (struct airo_info *ai) { int delay = 0; - while ((IN4500 (ai, COMMAND) & COMMAND_BUSY) & (delay < 10000)) { + while ((IN4500(ai, COMMAND) & COMMAND_BUSY) && (delay < 10000)) { udelay (10); if ((++delay % 20) == 0) OUT4500(ai, EVACK, EV_CLEARCOMMANDBUSY); diff --git a/drivers/net/wireless/ath5k/base.c b/drivers/net/wireless/ath5k/base.c index 4e5c8fc3520..635b9ac9aaa 100644 --- a/drivers/net/wireless/ath5k/base.c +++ b/drivers/net/wireless/ath5k/base.c @@ -1787,6 +1787,8 @@ ath5k_tasklet_rx(unsigned long data) spin_lock(&sc->rxbuflock); do { + rxs.flag = 0; + if (unlikely(list_empty(&sc->rxbuf))) { ATH5K_WARN(sc, "empty rx buf pool\n"); break; diff --git a/drivers/net/wireless/ath5k/hw.c b/drivers/net/wireless/ath5k/hw.c index 5fb1ae6ad3e..77990b56860 100644 --- a/drivers/net/wireless/ath5k/hw.c +++ b/drivers/net/wireless/ath5k/hw.c @@ -4119,6 +4119,7 @@ static int ath5k_hw_proc_5210_rx_status(struct ath5k_hw *ah, rs->rs_tstamp = AR5K_REG_MS(rx_status->rx_status_1, AR5K_5210_RX_DESC_STATUS1_RECEIVE_TIMESTAMP); rs->rs_status = 0; + rs->rs_phyerr = 0; /* * Key table status @@ -4145,7 +4146,7 @@ static int ath5k_hw_proc_5210_rx_status(struct ath5k_hw *ah, if (rx_status->rx_status_1 & AR5K_5210_RX_DESC_STATUS1_PHY_ERROR) { rs->rs_status |= AR5K_RXERR_PHY; - rs->rs_phyerr = AR5K_REG_MS(rx_status->rx_status_1, + rs->rs_phyerr |= AR5K_REG_MS(rx_status->rx_status_1, AR5K_5210_RX_DESC_STATUS1_PHY_ERROR); } @@ -4193,6 +4194,7 @@ static int ath5k_hw_proc_5212_rx_status(struct ath5k_hw *ah, rs->rs_tstamp = AR5K_REG_MS(rx_status->rx_status_1, AR5K_5212_RX_DESC_STATUS1_RECEIVE_TIMESTAMP); rs->rs_status = 0; + rs->rs_phyerr = 0; /* * Key table status @@ -4215,7 +4217,7 @@ static int ath5k_hw_proc_5212_rx_status(struct ath5k_hw *ah, if (rx_status->rx_status_1 & AR5K_5212_RX_DESC_STATUS1_PHY_ERROR) { rs->rs_status |= AR5K_RXERR_PHY; - rs->rs_phyerr = AR5K_REG_MS(rx_err->rx_error_1, + rs->rs_phyerr |= AR5K_REG_MS(rx_err->rx_error_1, AR5K_RX_DESC_ERROR1_PHY_ERROR_CODE); } diff --git a/drivers/net/wireless/atmel.c b/drivers/net/wireless/atmel.c index ef2da4023d6..438e63ecccf 100644 --- a/drivers/net/wireless/atmel.c +++ b/drivers/net/wireless/atmel.c @@ -47,6 +47,7 @@ #include <linux/string.h> #include <linux/ctype.h> #include <linux/timer.h> +#include <asm/byteorder.h> #include <asm/io.h> #include <asm/system.h> #include <asm/uaccess.h> @@ -60,7 +61,6 @@ #include <linux/delay.h> #include <linux/wireless.h> #include <net/iw_handler.h> -#include <linux/byteorder/generic.h> #include <linux/crc32.h> #include <linux/proc_fs.h> #include <linux/device.h> diff --git a/drivers/net/wireless/b43/Kconfig b/drivers/net/wireless/b43/Kconfig index f51b2d9b085..1fa043d1802 100644 --- a/drivers/net/wireless/b43/Kconfig +++ b/drivers/net/wireless/b43/Kconfig @@ -1,6 +1,6 @@ config B43 tristate "Broadcom 43xx wireless support (mac80211 stack)" - depends on SSB_POSSIBLE && MAC80211 && WLAN_80211 + depends on SSB_POSSIBLE && MAC80211 && WLAN_80211 && HAS_DMA select SSB select FW_LOADER select HW_RANDOM diff --git a/drivers/net/wireless/b43/b43.h b/drivers/net/wireless/b43/b43.h index eff2a158a41..d3db298c05f 100644 --- a/drivers/net/wireless/b43/b43.h +++ b/drivers/net/wireless/b43/b43.h @@ -630,7 +630,6 @@ struct b43_pio { /* Context information for a noise calculation (Link Quality). */ struct b43_noise_calculation { - u8 channel_at_start; bool calculation_running; u8 nr_samples; s8 samples[8][4]; @@ -691,6 +690,10 @@ struct b43_wl { struct mutex mutex; spinlock_t irq_lock; + /* R/W lock for data transmission. + * Transmissions on 2+ queues can run concurrently, but somebody else + * might sync with TX by write_lock_irqsave()'ing. */ + rwlock_t tx_lock; /* Lock for LEDs access. */ spinlock_t leds_lock; /* Lock for SHM access. */ @@ -733,6 +736,7 @@ struct b43_wl { struct ieee80211_tx_control beacon_txctl; bool beacon0_uploaded; bool beacon1_uploaded; + bool beacon_templates_virgin; /* Never wrote the templates? */ struct work_struct beacon_update_trigger; /* The current QOS parameters for the 4 queues. diff --git a/drivers/net/wireless/b43/dma.c b/drivers/net/wireless/b43/dma.c index 6dcbb3c87e7..e23f2f172bd 100644 --- a/drivers/net/wireless/b43/dma.c +++ b/drivers/net/wireless/b43/dma.c @@ -795,24 +795,49 @@ struct b43_dmaring *b43_setup_dmaring(struct b43_wldev *dev, { struct b43_dmaring *ring; int err; - int nr_slots; dma_addr_t dma_test; ring = kzalloc(sizeof(*ring), GFP_KERNEL); if (!ring) goto out; - ring->type = type; - nr_slots = B43_RXRING_SLOTS; + ring->nr_slots = B43_RXRING_SLOTS; if (for_tx) - nr_slots = B43_TXRING_SLOTS; + ring->nr_slots = B43_TXRING_SLOTS; - ring->meta = kcalloc(nr_slots, sizeof(struct b43_dmadesc_meta), + ring->meta = kcalloc(ring->nr_slots, sizeof(struct b43_dmadesc_meta), GFP_KERNEL); if (!ring->meta) goto err_kfree_ring; + + ring->type = type; + ring->dev = dev; + ring->mmio_base = b43_dmacontroller_base(type, controller_index); + ring->index = controller_index; + if (type == B43_DMA_64BIT) + ring->ops = &dma64_ops; + else + ring->ops = &dma32_ops; if (for_tx) { - ring->txhdr_cache = kcalloc(nr_slots, + ring->tx = 1; + ring->current_slot = -1; + } else { + if (ring->index == 0) { + ring->rx_buffersize = B43_DMA0_RX_BUFFERSIZE; + ring->frameoffset = B43_DMA0_RX_FRAMEOFFSET; + } else if (ring->index == 3) { + ring->rx_buffersize = B43_DMA3_RX_BUFFERSIZE; + ring->frameoffset = B43_DMA3_RX_FRAMEOFFSET; + } else + B43_WARN_ON(1); + } + spin_lock_init(&ring->lock); +#ifdef CONFIG_B43_DEBUG + ring->last_injected_overflow = jiffies; +#endif + + if (for_tx) { + ring->txhdr_cache = kcalloc(ring->nr_slots, b43_txhdr_size(dev), GFP_KERNEL); if (!ring->txhdr_cache) @@ -828,7 +853,7 @@ struct b43_dmaring *b43_setup_dmaring(struct b43_wldev *dev, b43_txhdr_size(dev), 1)) { /* ugh realloc */ kfree(ring->txhdr_cache); - ring->txhdr_cache = kcalloc(nr_slots, + ring->txhdr_cache = kcalloc(ring->nr_slots, b43_txhdr_size(dev), GFP_KERNEL | GFP_DMA); if (!ring->txhdr_cache) @@ -853,32 +878,6 @@ struct b43_dmaring *b43_setup_dmaring(struct b43_wldev *dev, DMA_TO_DEVICE); } - ring->dev = dev; - ring->nr_slots = nr_slots; - ring->mmio_base = b43_dmacontroller_base(type, controller_index); - ring->index = controller_index; - if (type == B43_DMA_64BIT) - ring->ops = &dma64_ops; - else - ring->ops = &dma32_ops; - if (for_tx) { - ring->tx = 1; - ring->current_slot = -1; - } else { - if (ring->index == 0) { - ring->rx_buffersize = B43_DMA0_RX_BUFFERSIZE; - ring->frameoffset = B43_DMA0_RX_FRAMEOFFSET; - } else if (ring->index == 3) { - ring->rx_buffersize = B43_DMA3_RX_BUFFERSIZE; - ring->frameoffset = B43_DMA3_RX_FRAMEOFFSET; - } else - B43_WARN_ON(1); - } - spin_lock_init(&ring->lock); -#ifdef CONFIG_B43_DEBUG - ring->last_injected_overflow = jiffies; -#endif - err = alloc_ringmemory(ring); if (err) goto err_kfree_txhdr_cache; diff --git a/drivers/net/wireless/b43/leds.c b/drivers/net/wireless/b43/leds.c index 36a9c42df83..76f4c7bad8b 100644 --- a/drivers/net/wireless/b43/leds.c +++ b/drivers/net/wireless/b43/leds.c @@ -72,6 +72,9 @@ static void b43_led_brightness_set(struct led_classdev *led_dev, struct b43_wldev *dev = led->dev; bool radio_enabled; + if (unlikely(b43_status(dev) < B43_STAT_INITIALIZED)) + return; + /* Checking the radio-enabled status here is slightly racy, * but we want to avoid the locking overhead and we don't care * whether the LED has the wrong state for a second. */ diff --git a/drivers/net/wireless/b43/main.c b/drivers/net/wireless/b43/main.c index 8c24cd72aac..a7082779308 100644 --- a/drivers/net/wireless/b43/main.c +++ b/drivers/net/wireless/b43/main.c @@ -729,6 +729,7 @@ static void b43_synchronize_irq(struct b43_wldev *dev) */ void b43_dummy_transmission(struct b43_wldev *dev) { + struct b43_wl *wl = dev->wl; struct b43_phy *phy = &dev->phy; unsigned int i, max_loop; u16 value; @@ -755,6 +756,9 @@ void b43_dummy_transmission(struct b43_wldev *dev) return; } + spin_lock_irq(&wl->irq_lock); + write_lock(&wl->tx_lock); + for (i = 0; i < 5; i++) b43_ram_write(dev, i * 4, buffer[i]); @@ -795,6 +799,9 @@ void b43_dummy_transmission(struct b43_wldev *dev) } if (phy->radio_ver == 0x2050 && phy->radio_rev <= 0x5) b43_radio_write16(dev, 0x0051, 0x0037); + + write_unlock(&wl->tx_lock); + spin_unlock_irq(&wl->irq_lock); } static void key_write(struct b43_wldev *dev, @@ -1138,7 +1145,6 @@ static void b43_generate_noise_sample(struct b43_wldev *dev) b43_jssi_write(dev, 0x7F7F7F7F); b43_write32(dev, B43_MMIO_MACCMD, b43_read32(dev, B43_MMIO_MACCMD) | B43_MACCMD_BGNOISE); - B43_WARN_ON(dev->noisecalc.channel_at_start != dev->phy.channel); } static void b43_calculate_link_quality(struct b43_wldev *dev) @@ -1147,7 +1153,6 @@ static void b43_calculate_link_quality(struct b43_wldev *dev) if (dev->noisecalc.calculation_running) return; - dev->noisecalc.channel_at_start = dev->phy.channel; dev->noisecalc.calculation_running = 1; dev->noisecalc.nr_samples = 0; @@ -1164,9 +1169,16 @@ static void handle_irq_noise(struct b43_wldev *dev) /* Bottom half of Link Quality calculation. */ + /* Possible race condition: It might be possible that the user + * changed to a different channel in the meantime since we + * started the calculation. We ignore that fact, since it's + * not really that much of a problem. The background noise is + * an estimation only anyway. Slightly wrong results will get damped + * by the averaging of the 8 sample rounds. Additionally the + * value is shortlived. So it will be replaced by the next noise + * calculation round soon. */ + B43_WARN_ON(!dev->noisecalc.calculation_running); - if (dev->noisecalc.channel_at_start != phy->channel) - goto drop_calculation; *((__le32 *)noise) = cpu_to_le32(b43_jssi_read(dev)); if (noise[0] == 0x7F || noise[1] == 0x7F || noise[2] == 0x7F || noise[3] == 0x7F) @@ -1207,11 +1219,10 @@ static void handle_irq_noise(struct b43_wldev *dev) average -= 48; dev->stats.link_noise = average; - drop_calculation: dev->noisecalc.calculation_running = 0; return; } - generate_new: +generate_new: b43_generate_noise_sample(dev); } @@ -1537,6 +1548,30 @@ static void b43_write_probe_resp_template(struct b43_wldev *dev, kfree(probe_resp_data); } +static void b43_upload_beacon0(struct b43_wldev *dev) +{ + struct b43_wl *wl = dev->wl; + + if (wl->beacon0_uploaded) + return; + b43_write_beacon_template(dev, 0x68, 0x18); + /* FIXME: Probe resp upload doesn't really belong here, + * but we don't use that feature anyway. */ + b43_write_probe_resp_template(dev, 0x268, 0x4A, + &__b43_ratetable[3]); + wl->beacon0_uploaded = 1; +} + +static void b43_upload_beacon1(struct b43_wldev *dev) +{ + struct b43_wl *wl = dev->wl; + + if (wl->beacon1_uploaded) + return; + b43_write_beacon_template(dev, 0x468, 0x1A); + wl->beacon1_uploaded = 1; +} + static void handle_irq_beacon(struct b43_wldev *dev) { struct b43_wl *wl = dev->wl; @@ -1561,24 +1596,27 @@ static void handle_irq_beacon(struct b43_wldev *dev) return; } - if (!beacon0_valid) { - if (!wl->beacon0_uploaded) { - b43_write_beacon_template(dev, 0x68, 0x18); - b43_write_probe_resp_template(dev, 0x268, 0x4A, - &__b43_ratetable[3]); - wl->beacon0_uploaded = 1; - } + if (unlikely(wl->beacon_templates_virgin)) { + /* We never uploaded a beacon before. + * Upload both templates now, but only mark one valid. */ + wl->beacon_templates_virgin = 0; + b43_upload_beacon0(dev); + b43_upload_beacon1(dev); cmd = b43_read32(dev, B43_MMIO_MACCMD); cmd |= B43_MACCMD_BEACON0_VALID; b43_write32(dev, B43_MMIO_MACCMD, cmd); - } else if (!beacon1_valid) { - if (!wl->beacon1_uploaded) { - b43_write_beacon_template(dev, 0x468, 0x1A); - wl->beacon1_uploaded = 1; + } else { + if (!beacon0_valid) { + b43_upload_beacon0(dev); + cmd = b43_read32(dev, B43_MMIO_MACCMD); + cmd |= B43_MACCMD_BEACON0_VALID; + b43_write32(dev, B43_MMIO_MACCMD, cmd); + } else if (!beacon1_valid) { + b43_upload_beacon1(dev); + cmd = b43_read32(dev, B43_MMIO_MACCMD); + cmd |= B43_MACCMD_BEACON1_VALID; + b43_write32(dev, B43_MMIO_MACCMD, cmd); } - cmd = b43_read32(dev, B43_MMIO_MACCMD); - cmd |= B43_MACCMD_BEACON1_VALID; - b43_write32(dev, B43_MMIO_MACCMD, cmd); } } @@ -2840,26 +2878,37 @@ static int b43_op_tx(struct ieee80211_hw *hw, { struct b43_wl *wl = hw_to_b43_wl(hw); struct b43_wldev *dev = wl->current_dev; - int err = -ENODEV; + unsigned long flags; + int err; if (unlikely(skb->len < 2 + 2 + 6)) { /* Too short, this can't be a valid frame. */ - return -EINVAL; + goto drop_packet; } B43_WARN_ON(skb_shinfo(skb)->nr_frags); - if (unlikely(!dev)) - goto out; - if (unlikely(b43_status(dev) < B43_STAT_STARTED)) - goto out; - /* TX is done without a global lock. */ - if (b43_using_pio_transfers(dev)) - err = b43_pio_tx(dev, skb, ctl); - else - err = b43_dma_tx(dev, skb, ctl); -out: + goto drop_packet; + + /* Transmissions on seperate queues can run concurrently. */ + read_lock_irqsave(&wl->tx_lock, flags); + + err = -ENODEV; + if (likely(b43_status(dev) >= B43_STAT_STARTED)) { + if (b43_using_pio_transfers(dev)) + err = b43_pio_tx(dev, skb, ctl); + else + err = b43_dma_tx(dev, skb, ctl); + } + + read_unlock_irqrestore(&wl->tx_lock, flags); + if (unlikely(err)) - return NETDEV_TX_BUSY; + goto drop_packet; + return NETDEV_TX_OK; + +drop_packet: + /* We can not transmit this packet. Drop it. */ + dev_kfree_skb_any(skb); return NETDEV_TX_OK; } @@ -3476,7 +3525,9 @@ static void b43_wireless_core_stop(struct b43_wldev *dev) spin_unlock_irqrestore(&wl->irq_lock, flags); b43_synchronize_irq(dev); + write_lock_irqsave(&wl->tx_lock, flags); b43_set_status(dev, B43_STAT_INITIALIZED); + write_unlock_irqrestore(&wl->tx_lock, flags); b43_pio_stop(dev); mutex_unlock(&wl->mutex); @@ -3485,8 +3536,6 @@ static void b43_wireless_core_stop(struct b43_wldev *dev) cancel_delayed_work_sync(&dev->periodic_work); mutex_lock(&wl->mutex); - ieee80211_stop_queues(wl->hw); //FIXME this could cause a deadlock, as mac80211 seems buggy. - b43_mac_suspend(dev); free_irq(dev->dev->irq, dev); b43dbg(wl, "Wireless interface stopped\n"); @@ -4059,6 +4108,9 @@ static int b43_op_start(struct ieee80211_hw *hw) wl->filter_flags = 0; wl->radiotap_enabled = 0; b43_qos_clear(wl); + wl->beacon0_uploaded = 0; + wl->beacon1_uploaded = 0; + wl->beacon_templates_virgin = 1; /* First register RFkill. * LEDs that are registered later depend on it. */ @@ -4227,7 +4279,9 @@ static void b43_chip_reset(struct work_struct *work) goto out; } } - out: +out: + if (err) + wl->current_dev = NULL; /* Failed to init the dev. */ mutex_unlock(&wl->mutex); if (err) b43err(wl, "Controller restart FAILED\n"); @@ -4326,6 +4380,14 @@ static int b43_wireless_core_attach(struct b43_wldev *dev) err = -EOPNOTSUPP; goto err_powerdown; } + if (1 /* disable A-PHY */) { + /* FIXME: For now we disable the A-PHY on multi-PHY devices. */ + if (dev->phy.type != B43_PHYTYPE_N) { + have_2ghz_phy = 1; + have_5ghz_phy = 0; + } + } + dev->phy.gmode = have_2ghz_phy; tmp = dev->phy.gmode ? B43_TMSLOW_GMODE : 0; b43_wireless_core_reset(dev, tmp); @@ -4360,9 +4422,11 @@ static void b43_one_core_detach(struct ssb_device *dev) struct b43_wldev *wldev; struct b43_wl *wl; + /* Do not cancel ieee80211-workqueue based work here. + * See comment in b43_remove(). */ + wldev = ssb_get_drvdata(dev); wl = wldev->wl; - cancel_work_sync(&wldev->restart_work); b43_debugfs_remove_device(wldev); b43_wireless_core_detach(wldev); list_del(&wldev->list); @@ -4490,6 +4554,7 @@ static int b43_wireless_init(struct ssb_device *dev) memset(wl, 0, sizeof(*wl)); wl->hw = hw; spin_lock_init(&wl->irq_lock); + rwlock_init(&wl->tx_lock); spin_lock_init(&wl->leds_lock); spin_lock_init(&wl->shm_lock); mutex_init(&wl->mutex); @@ -4546,6 +4611,10 @@ static void b43_remove(struct ssb_device *dev) struct b43_wl *wl = ssb_get_devtypedata(dev); struct b43_wldev *wldev = ssb_get_drvdata(dev); + /* We must cancel any work here before unregistering from ieee80211, + * as the ieee80211 unreg will destroy the workqueue. */ + cancel_work_sync(&wldev->restart_work); + B43_WARN_ON(!wl); if (wl->current_dev == wldev) ieee80211_unregister_hw(wl->hw); diff --git a/drivers/net/wireless/b43legacy/Kconfig b/drivers/net/wireless/b43legacy/Kconfig index 13c65faf024..aef2298d37a 100644 --- a/drivers/net/wireless/b43legacy/Kconfig +++ b/drivers/net/wireless/b43legacy/Kconfig @@ -1,6 +1,6 @@ config B43LEGACY tristate "Broadcom 43xx-legacy wireless support (mac80211 stack)" - depends on SSB_POSSIBLE && MAC80211 && WLAN_80211 + depends on SSB_POSSIBLE && MAC80211 && WLAN_80211 && HAS_DMA select SSB select FW_LOADER select HW_RANDOM diff --git a/drivers/net/wireless/b43legacy/dma.c b/drivers/net/wireless/b43legacy/dma.c index c990f87b107..93ddc1cbcc8 100644 --- a/drivers/net/wireless/b43legacy/dma.c +++ b/drivers/net/wireless/b43legacy/dma.c @@ -876,6 +876,7 @@ struct b43legacy_dmaring *b43legacy_setup_dmaring(struct b43legacy_wldev *dev, if (!ring) goto out; ring->type = type; + ring->dev = dev; nr_slots = B43legacy_RXRING_SLOTS; if (for_tx) @@ -922,7 +923,6 @@ struct b43legacy_dmaring *b43legacy_setup_dmaring(struct b43legacy_wldev *dev, DMA_TO_DEVICE); } - ring->dev = dev; ring->nr_slots = nr_slots; ring->mmio_base = b43legacy_dmacontroller_base(type, controller_index); ring->index = controller_index; diff --git a/drivers/net/wireless/b43legacy/main.c b/drivers/net/wireless/b43legacy/main.c index 14a5eea2573..3e612d0a13e 100644 --- a/drivers/net/wireless/b43legacy/main.c +++ b/drivers/net/wireless/b43legacy/main.c @@ -2378,8 +2378,10 @@ static int b43legacy_op_tx(struct ieee80211_hw *hw, } else err = b43legacy_dma_tx(dev, skb, ctl); out: - if (unlikely(err)) - return NETDEV_TX_BUSY; + if (unlikely(err)) { + /* Drop the packet. */ + dev_kfree_skb_any(skb); + } return NETDEV_TX_OK; } @@ -3039,7 +3041,6 @@ static void b43legacy_set_pretbtt(struct b43legacy_wldev *dev) /* Locking: wl->mutex */ static void b43legacy_wireless_core_exit(struct b43legacy_wldev *dev) { - struct b43legacy_wl *wl = dev->wl; struct b43legacy_phy *phy = &dev->phy; u32 macctl; @@ -3054,12 +3055,6 @@ static void b43legacy_wireless_core_exit(struct b43legacy_wldev *dev) macctl |= B43legacy_MACCTL_PSM_JMP0; b43legacy_write32(dev, B43legacy_MMIO_MACCTL, macctl); - mutex_unlock(&wl->mutex); - /* Must unlock as it would otherwise deadlock. No races here. - * Cancel possibly pending workqueues. */ - cancel_work_sync(&dev->restart_work); - mutex_lock(&wl->mutex); - b43legacy_leds_exit(dev); b43legacy_rng_exit(dev->wl); b43legacy_pio_free(dev); @@ -3486,6 +3481,8 @@ static void b43legacy_chip_reset(struct work_struct *work) } } out: + if (err) + wl->current_dev = NULL; /* Failed to init the dev. */ mutex_unlock(&wl->mutex); if (err) b43legacyerr(wl, "Controller restart FAILED\n"); @@ -3618,9 +3615,11 @@ static void b43legacy_one_core_detach(struct ssb_device *dev) struct b43legacy_wldev *wldev; struct b43legacy_wl *wl; + /* Do not cancel ieee80211-workqueue based work here. + * See comment in b43legacy_remove(). */ + wldev = ssb_get_drvdata(dev); wl = wldev->wl; - cancel_work_sync(&wldev->restart_work); b43legacy_debugfs_remove_device(wldev); b43legacy_wireless_core_detach(wldev); list_del(&wldev->list); @@ -3789,6 +3788,10 @@ static void b43legacy_remove(struct ssb_device *dev) struct b43legacy_wl *wl = ssb_get_devtypedata(dev); struct b43legacy_wldev *wldev = ssb_get_drvdata(dev); + /* We must cancel any work here before unregistering from ieee80211, + * as the ieee80211 unreg will destroy the workqueue. */ + cancel_work_sync(&wldev->restart_work); + B43legacy_WARN_ON(!wl); if (wl->current_dev == wldev) ieee80211_unregister_hw(wl->hw); diff --git a/drivers/net/wireless/hostap/hostap_80211_rx.c b/drivers/net/wireless/hostap/hostap_80211_rx.c index 4fd73809602..020f450e9db 100644 --- a/drivers/net/wireless/hostap/hostap_80211_rx.c +++ b/drivers/net/wireless/hostap/hostap_80211_rx.c @@ -64,7 +64,7 @@ int prism2_rx_80211(struct net_device *dev, struct sk_buff *skb, int hdrlen, phdrlen, head_need, tail_need; u16 fc; int prism_header, ret; - struct ieee80211_hdr_4addr *hdr; + struct ieee80211_hdr_4addr *fhdr; iface = netdev_priv(dev); local = iface->local; @@ -83,8 +83,8 @@ int prism2_rx_80211(struct net_device *dev, struct sk_buff *skb, phdrlen = 0; } - hdr = (struct ieee80211_hdr_4addr *) skb->data; - fc = le16_to_cpu(hdr->frame_ctl); + fhdr = (struct ieee80211_hdr_4addr *) skb->data; + fc = le16_to_cpu(fhdr->frame_ctl); if (type == PRISM2_RX_MGMT && (fc & IEEE80211_FCTL_VERS)) { printk(KERN_DEBUG "%s: dropped management frame with header " @@ -551,7 +551,7 @@ hostap_rx_frame_wds(local_info_t *local, struct ieee80211_hdr_4addr *hdr, hdr->addr1[2] != 0xff || hdr->addr1[3] != 0xff || hdr->addr1[4] != 0xff || hdr->addr1[5] != 0xff)) { /* RA (or BSSID) is not ours - drop */ - PDEBUG(DEBUG_EXTRA, "%s: received WDS frame with " + PDEBUG(DEBUG_EXTRA2, "%s: received WDS frame with " "not own or broadcast %s=%s\n", local->dev->name, fc & IEEE80211_FCTL_FROMDS ? "RA" : "BSSID", diff --git a/drivers/net/wireless/hostap/hostap_ap.c b/drivers/net/wireless/hostap/hostap_ap.c index 0acd9589c48..ab981afd481 100644 --- a/drivers/net/wireless/hostap/hostap_ap.c +++ b/drivers/net/wireless/hostap/hostap_ap.c @@ -1930,7 +1930,7 @@ static void handle_pspoll(local_info_t *local, PDEBUG(DEBUG_PS, " PSPOLL and AID[15:14] not set\n"); return; } - aid &= ~BIT(15) & ~BIT(14); + aid &= ~(BIT(15) | BIT(14)); if (aid == 0 || aid > MAX_AID_TABLE_SIZE) { PDEBUG(DEBUG_PS, " invalid aid=%d\n", aid); return; diff --git a/drivers/net/wireless/hostap/hostap_cs.c b/drivers/net/wireless/hostap/hostap_cs.c index 437a9bcc9bd..3b4e55cf33c 100644 --- a/drivers/net/wireless/hostap/hostap_cs.c +++ b/drivers/net/wireless/hostap/hostap_cs.c @@ -533,10 +533,10 @@ static void prism2_detach(struct pcmcia_device *link) do { last_fn = (fn); if ((last_ret = (ret)) != 0) goto cs_failed; } while (0) #define CFG_CHECK2(fn, retf) \ -do { int ret = (retf); \ -if (ret != 0) { \ - PDEBUG(DEBUG_EXTRA, "CardServices(" #fn ") returned %d\n", ret); \ - cs_error(link, fn, ret); \ +do { int _ret = (retf); \ +if (_ret != 0) { \ + PDEBUG(DEBUG_EXTRA, "CardServices(" #fn ") returned %d\n", _ret); \ + cs_error(link, fn, _ret); \ goto next_entry; \ } \ } while (0) @@ -777,8 +777,10 @@ static int hostap_cs_suspend(struct pcmcia_device *link) int dev_open = 0; struct hostap_interface *iface = NULL; - if (dev) - iface = netdev_priv(dev); + if (!dev) + return -ENODEV; + + iface = netdev_priv(dev); PDEBUG(DEBUG_EXTRA, "%s: CS_EVENT_PM_SUSPEND\n", dev_info); if (iface && iface->local) @@ -798,8 +800,10 @@ static int hostap_cs_resume(struct pcmcia_device *link) int dev_open = 0; struct hostap_interface *iface = NULL; - if (dev) - iface = netdev_priv(dev); + if (!dev) + return -ENODEV; + + iface = netdev_priv(dev); PDEBUG(DEBUG_EXTRA, "%s: CS_EVENT_PM_RESUME\n", dev_info); @@ -833,6 +837,7 @@ static struct pcmcia_device_id hostap_cs_ids[] = { PCMCIA_DEVICE_MANF_CARD(0x50c2, 0x0001), PCMCIA_DEVICE_MANF_CARD(0x50c2, 0x7300), /* PCMCIA_DEVICE_MANF_CARD(0xc00f, 0x0000), conflict with pcnet_cs */ + PCMCIA_DEVICE_MANF_CARD(0xc250, 0x0002), PCMCIA_DEVICE_MANF_CARD(0xd601, 0x0002), PCMCIA_DEVICE_MANF_CARD(0xd601, 0x0005), PCMCIA_DEVICE_MANF_CARD(0xd601, 0x0010), diff --git a/drivers/net/wireless/hostap/hostap_hw.c b/drivers/net/wireless/hostap/hostap_hw.c index 7be68db6f30..936f52e3d95 100644 --- a/drivers/net/wireless/hostap/hostap_hw.c +++ b/drivers/net/wireless/hostap/hostap_hw.c @@ -2835,7 +2835,7 @@ static void hostap_passive_scan(unsigned long data) { local_info_t *local = (local_info_t *) data; struct net_device *dev = local->dev; - u16 channel; + u16 chan; if (local->passive_scan_interval <= 0) return; @@ -2872,11 +2872,11 @@ static void hostap_passive_scan(unsigned long data) printk(KERN_DEBUG "%s: passive scan channel %d\n", dev->name, local->passive_scan_channel); - channel = local->passive_scan_channel; + chan = local->passive_scan_channel; local->passive_scan_state = PASSIVE_SCAN_WAIT; local->passive_scan_timer.expires = jiffies + HZ / 10; } else { - channel = local->channel; + chan = local->channel; local->passive_scan_state = PASSIVE_SCAN_LISTEN; local->passive_scan_timer.expires = jiffies + local->passive_scan_interval * HZ; @@ -2884,9 +2884,9 @@ static void hostap_passive_scan(unsigned long data) if (hfa384x_cmd_callback(dev, HFA384X_CMDCODE_TEST | (HFA384X_TEST_CHANGE_CHANNEL << 8), - channel, NULL, 0)) + chan, NULL, 0)) printk(KERN_ERR "%s: passive scan channel set %d " - "failed\n", dev->name, channel); + "failed\n", dev->name, chan); add_timer(&local->passive_scan_timer); } @@ -3276,11 +3276,6 @@ while (0) } printk(KERN_INFO "%s: Registered netdevice %s\n", dev_info, dev->name); -#ifndef PRISM2_NO_PROCFS_DEBUG - create_proc_read_entry("registers", 0, local->proc, - prism2_registers_proc_read, local); -#endif /* PRISM2_NO_PROCFS_DEBUG */ - hostap_init_data(local); return dev; @@ -3307,6 +3302,10 @@ static int hostap_hw_ready(struct net_device *dev) netif_carrier_off(local->ddev); } hostap_init_proc(local); +#ifndef PRISM2_NO_PROCFS_DEBUG + create_proc_read_entry("registers", 0, local->proc, + prism2_registers_proc_read, local); +#endif /* PRISM2_NO_PROCFS_DEBUG */ hostap_init_ap_proc(local); return 0; } diff --git a/drivers/net/wireless/hostap/hostap_main.c b/drivers/net/wireless/hostap/hostap_main.c index 20d387f6658..a38e85f334d 100644 --- a/drivers/net/wireless/hostap/hostap_main.c +++ b/drivers/net/wireless/hostap/hostap_main.c @@ -594,7 +594,8 @@ void hostap_dump_tx_header(const char *name, const struct hfa384x_tx_frame *tx) } -int hostap_80211_header_parse(const struct sk_buff *skb, unsigned char *haddr) +static int hostap_80211_header_parse(const struct sk_buff *skb, + unsigned char *haddr) { struct hostap_interface *iface = netdev_priv(skb->dev); local_info_t *local = iface->local; @@ -682,7 +683,13 @@ static int prism2_close(struct net_device *dev) netif_device_detach(dev); } - flush_scheduled_work(); + cancel_work_sync(&local->reset_queue); + cancel_work_sync(&local->set_multicast_list_queue); + cancel_work_sync(&local->set_tim_queue); +#ifndef PRISM2_NO_STATION_MODES + cancel_work_sync(&local->info_queue); +#endif + cancel_work_sync(&local->comms_qual_update); module_put(local->hw_module); @@ -851,7 +858,6 @@ const struct header_ops hostap_80211_ops = { .rebuild = eth_rebuild_header, .cache = eth_header_cache, .cache_update = eth_header_cache_update, - .parse = hostap_80211_header_parse, }; EXPORT_SYMBOL(hostap_80211_ops); @@ -1144,7 +1150,6 @@ EXPORT_SYMBOL(hostap_set_roaming); EXPORT_SYMBOL(hostap_set_auth_algs); EXPORT_SYMBOL(hostap_dump_rx_header); EXPORT_SYMBOL(hostap_dump_tx_header); -EXPORT_SYMBOL(hostap_80211_header_parse); EXPORT_SYMBOL(hostap_80211_get_hdrlen); EXPORT_SYMBOL(hostap_get_stats); EXPORT_SYMBOL(hostap_setup_dev); diff --git a/drivers/net/wireless/ipw2200.c b/drivers/net/wireless/ipw2200.c index fa87c5c2ae0..6e704608947 100644 --- a/drivers/net/wireless/ipw2200.c +++ b/drivers/net/wireless/ipw2200.c @@ -1753,6 +1753,8 @@ static int ipw_radio_kill_sw(struct ipw_priv *priv, int disable_radio) if (priv->workqueue) { cancel_delayed_work(&priv->request_scan); + cancel_delayed_work(&priv->request_direct_scan); + cancel_delayed_work(&priv->request_passive_scan); cancel_delayed_work(&priv->scan_event); } queue_work(priv->workqueue, &priv->down); @@ -2005,6 +2007,8 @@ static void ipw_irq_tasklet(struct ipw_priv *priv) wake_up_interruptible(&priv->wait_command_queue); priv->status &= ~(STATUS_ASSOCIATED | STATUS_ASSOCIATING); cancel_delayed_work(&priv->request_scan); + cancel_delayed_work(&priv->request_direct_scan); + cancel_delayed_work(&priv->request_passive_scan); cancel_delayed_work(&priv->scan_event); schedule_work(&priv->link_down); queue_delayed_work(priv->workqueue, &priv->rf_kill, 2 * HZ); @@ -4712,6 +4716,12 @@ static void ipw_rx_notification(struct ipw_priv *priv, priv->status &= ~STATUS_SCAN_FORCED; #endif /* CONFIG_IPW2200_MONITOR */ + /* Do queued direct scans first */ + if (priv->status & STATUS_DIRECT_SCAN_PENDING) { + queue_delayed_work(priv->workqueue, + &priv->request_direct_scan, 0); + } + if (!(priv->status & (STATUS_ASSOCIATED | STATUS_ASSOCIATING | STATUS_ROAMING | @@ -6267,7 +6277,7 @@ static void ipw_add_scan_channels(struct ipw_priv *priv, } } -static int ipw_request_scan_helper(struct ipw_priv *priv, int type) +static int ipw_request_scan_helper(struct ipw_priv *priv, int type, int direct) { struct ipw_scan_request_ext scan; int err = 0, scan_type; @@ -6278,22 +6288,31 @@ static int ipw_request_scan_helper(struct ipw_priv *priv, int type) mutex_lock(&priv->mutex); + if (direct && (priv->direct_scan_ssid_len == 0)) { + IPW_DEBUG_HC("Direct scan requested but no SSID to scan for\n"); + priv->status &= ~STATUS_DIRECT_SCAN_PENDING; + goto done; + } + if (priv->status & STATUS_SCANNING) { - IPW_DEBUG_HC("Concurrent scan requested. Ignoring.\n"); - priv->status |= STATUS_SCAN_PENDING; + IPW_DEBUG_HC("Concurrent scan requested. Queuing.\n"); + priv->status |= direct ? STATUS_DIRECT_SCAN_PENDING : + STATUS_SCAN_PENDING; goto done; } if (!(priv->status & STATUS_SCAN_FORCED) && priv->status & STATUS_SCAN_ABORTING) { IPW_DEBUG_HC("Scan request while abort pending. Queuing.\n"); - priv->status |= STATUS_SCAN_PENDING; + priv->status |= direct ? STATUS_DIRECT_SCAN_PENDING : + STATUS_SCAN_PENDING; goto done; } if (priv->status & STATUS_RF_KILL_MASK) { - IPW_DEBUG_HC("Aborting scan due to RF Kill activation\n"); - priv->status |= STATUS_SCAN_PENDING; + IPW_DEBUG_HC("Queuing scan due to RF Kill activation\n"); + priv->status |= direct ? STATUS_DIRECT_SCAN_PENDING : + STATUS_SCAN_PENDING; goto done; } @@ -6321,6 +6340,7 @@ static int ipw_request_scan_helper(struct ipw_priv *priv, int type) cpu_to_le16(20); scan.dwell_time[IPW_SCAN_PASSIVE_FULL_DWELL_SCAN] = cpu_to_le16(120); + scan.dwell_time[IPW_SCAN_ACTIVE_DIRECT_SCAN] = cpu_to_le16(20); #ifdef CONFIG_IPW2200_MONITOR if (priv->ieee->iw_mode == IW_MODE_MONITOR) { @@ -6360,13 +6380,23 @@ static int ipw_request_scan_helper(struct ipw_priv *priv, int type) cpu_to_le16(2000); } else { #endif /* CONFIG_IPW2200_MONITOR */ - /* If we are roaming, then make this a directed scan for the - * current network. Otherwise, ensure that every other scan - * is a fast channel hop scan */ - if ((priv->status & STATUS_ROAMING) - || (!(priv->status & STATUS_ASSOCIATED) - && (priv->config & CFG_STATIC_ESSID) - && (le32_to_cpu(scan.full_scan_index) % 2))) { + /* Honor direct scans first, otherwise if we are roaming make + * this a direct scan for the current network. Finally, + * ensure that every other scan is a fast channel hop scan */ + if (direct) { + err = ipw_send_ssid(priv, priv->direct_scan_ssid, + priv->direct_scan_ssid_len); + if (err) { + IPW_DEBUG_HC("Attempt to send SSID command " + "failed\n"); + goto done; + } + + scan_type = IPW_SCAN_ACTIVE_BROADCAST_AND_DIRECT_SCAN; + } else if ((priv->status & STATUS_ROAMING) + || (!(priv->status & STATUS_ASSOCIATED) + && (priv->config & CFG_STATIC_ESSID) + && (le32_to_cpu(scan.full_scan_index) % 2))) { err = ipw_send_ssid(priv, priv->essid, priv->essid_len); if (err) { IPW_DEBUG_HC("Attempt to send SSID command " @@ -6391,7 +6421,12 @@ send_request: } priv->status |= STATUS_SCANNING; - priv->status &= ~STATUS_SCAN_PENDING; + if (direct) { + priv->status &= ~STATUS_DIRECT_SCAN_PENDING; + priv->direct_scan_ssid_len = 0; + } else + priv->status &= ~STATUS_SCAN_PENDING; + queue_delayed_work(priv->workqueue, &priv->scan_check, IPW_SCAN_CHECK_WATCHDOG); done: @@ -6402,15 +6437,22 @@ done: static void ipw_request_passive_scan(struct work_struct *work) { struct ipw_priv *priv = - container_of(work, struct ipw_priv, request_passive_scan); - ipw_request_scan_helper(priv, IW_SCAN_TYPE_PASSIVE); + container_of(work, struct ipw_priv, request_passive_scan.work); + ipw_request_scan_helper(priv, IW_SCAN_TYPE_PASSIVE, 0); } static void ipw_request_scan(struct work_struct *work) { struct ipw_priv *priv = container_of(work, struct ipw_priv, request_scan.work); - ipw_request_scan_helper(priv, IW_SCAN_TYPE_ACTIVE); + ipw_request_scan_helper(priv, IW_SCAN_TYPE_ACTIVE, 0); +} + +static void ipw_request_direct_scan(struct work_struct *work) +{ + struct ipw_priv *priv = + container_of(work, struct ipw_priv, request_direct_scan.work); + ipw_request_scan_helper(priv, IW_SCAN_TYPE_ACTIVE, 1); } static void ipw_bg_abort_scan(struct work_struct *work) @@ -7558,8 +7600,31 @@ static int ipw_associate(void *data) priv->ieee->iw_mode == IW_MODE_ADHOC && priv->config & CFG_ADHOC_CREATE && priv->config & CFG_STATIC_ESSID && - priv->config & CFG_STATIC_CHANNEL && - !list_empty(&priv->ieee->network_free_list)) { + priv->config & CFG_STATIC_CHANNEL) { + /* Use oldest network if the free list is empty */ + if (list_empty(&priv->ieee->network_free_list)) { + struct ieee80211_network *oldest = NULL; + struct ieee80211_network *target; + DECLARE_MAC_BUF(mac); + + list_for_each_entry(target, &priv->ieee->network_list, list) { + if ((oldest == NULL) || + (target->last_scanned < oldest->last_scanned)) + oldest = target; + } + + /* If there are no more slots, expire the oldest */ + list_del(&oldest->list); + target = oldest; + IPW_DEBUG_ASSOC("Expired '%s' (%s) from " + "network list.\n", + escape_essid(target->ssid, + target->ssid_len), + print_mac(mac, target->bssid)); + list_add_tail(&target->list, + &priv->ieee->network_free_list); + } + element = priv->ieee->network_free_list.next; network = list_entry(element, struct ieee80211_network, list); ipw_adhoc_create(priv, network); @@ -9454,99 +9519,38 @@ static int ipw_wx_get_retry(struct net_device *dev, return 0; } -static int ipw_request_direct_scan(struct ipw_priv *priv, char *essid, - int essid_len) -{ - struct ipw_scan_request_ext scan; - int err = 0, scan_type; - - if (!(priv->status & STATUS_INIT) || - (priv->status & STATUS_EXIT_PENDING)) - return 0; - - mutex_lock(&priv->mutex); - - if (priv->status & STATUS_RF_KILL_MASK) { - IPW_DEBUG_HC("Aborting scan due to RF kill activation\n"); - priv->status |= STATUS_SCAN_PENDING; - goto done; - } - - IPW_DEBUG_HC("starting request direct scan!\n"); - - if (priv->status & (STATUS_SCANNING | STATUS_SCAN_ABORTING)) { - /* We should not sleep here; otherwise we will block most - * of the system (for instance, we hold rtnl_lock when we - * get here). - */ - err = -EAGAIN; - goto done; - } - memset(&scan, 0, sizeof(scan)); - - if (priv->config & CFG_SPEED_SCAN) - scan.dwell_time[IPW_SCAN_ACTIVE_BROADCAST_SCAN] = - cpu_to_le16(30); - else - scan.dwell_time[IPW_SCAN_ACTIVE_BROADCAST_SCAN] = - cpu_to_le16(20); - - scan.dwell_time[IPW_SCAN_ACTIVE_BROADCAST_AND_DIRECT_SCAN] = - cpu_to_le16(20); - scan.dwell_time[IPW_SCAN_PASSIVE_FULL_DWELL_SCAN] = cpu_to_le16(120); - scan.dwell_time[IPW_SCAN_ACTIVE_DIRECT_SCAN] = cpu_to_le16(20); - - scan.full_scan_index = cpu_to_le32(ieee80211_get_scans(priv->ieee)); - - err = ipw_send_ssid(priv, essid, essid_len); - if (err) { - IPW_DEBUG_HC("Attempt to send SSID command failed\n"); - goto done; - } - scan_type = IPW_SCAN_ACTIVE_BROADCAST_AND_DIRECT_SCAN; - - ipw_add_scan_channels(priv, &scan, scan_type); - - err = ipw_send_scan_request_ext(priv, &scan); - if (err) { - IPW_DEBUG_HC("Sending scan command failed: %08X\n", err); - goto done; - } - - priv->status |= STATUS_SCANNING; - - done: - mutex_unlock(&priv->mutex); - return err; -} - static int ipw_wx_set_scan(struct net_device *dev, struct iw_request_info *info, union iwreq_data *wrqu, char *extra) { struct ipw_priv *priv = ieee80211_priv(dev); struct iw_scan_req *req = (struct iw_scan_req *)extra; + struct delayed_work *work = NULL; mutex_lock(&priv->mutex); + priv->user_requested_scan = 1; - mutex_unlock(&priv->mutex); if (wrqu->data.length == sizeof(struct iw_scan_req)) { if (wrqu->data.flags & IW_SCAN_THIS_ESSID) { - ipw_request_direct_scan(priv, req->essid, - req->essid_len); - return 0; - } - if (req->scan_type == IW_SCAN_TYPE_PASSIVE) { - queue_work(priv->workqueue, - &priv->request_passive_scan); - return 0; + int len = min((int)req->essid_len, + (int)sizeof(priv->direct_scan_ssid)); + memcpy(priv->direct_scan_ssid, req->essid, len); + priv->direct_scan_ssid_len = len; + work = &priv->request_direct_scan; + } else if (req->scan_type == IW_SCAN_TYPE_PASSIVE) { + work = &priv->request_passive_scan; } + } else { + /* Normal active broadcast scan */ + work = &priv->request_scan; } + mutex_unlock(&priv->mutex); + IPW_DEBUG_WX("Start scan\n"); - queue_delayed_work(priv->workqueue, &priv->request_scan, 0); + queue_delayed_work(priv->workqueue, work, 0); return 0; } @@ -10708,6 +10712,8 @@ static void ipw_link_up(struct ipw_priv *priv) } cancel_delayed_work(&priv->request_scan); + cancel_delayed_work(&priv->request_direct_scan); + cancel_delayed_work(&priv->request_passive_scan); cancel_delayed_work(&priv->scan_event); ipw_reset_stats(priv); /* Ensure the rate is updated immediately */ @@ -10738,6 +10744,8 @@ static void ipw_link_down(struct ipw_priv *priv) /* Cancel any queued work ... */ cancel_delayed_work(&priv->request_scan); + cancel_delayed_work(&priv->request_direct_scan); + cancel_delayed_work(&priv->request_passive_scan); cancel_delayed_work(&priv->adhoc_check); cancel_delayed_work(&priv->gather_stats); @@ -10777,8 +10785,9 @@ static int __devinit ipw_setup_deferred_work(struct ipw_priv *priv) INIT_WORK(&priv->up, ipw_bg_up); INIT_WORK(&priv->down, ipw_bg_down); INIT_DELAYED_WORK(&priv->request_scan, ipw_request_scan); + INIT_DELAYED_WORK(&priv->request_direct_scan, ipw_request_direct_scan); + INIT_DELAYED_WORK(&priv->request_passive_scan, ipw_request_passive_scan); INIT_DELAYED_WORK(&priv->scan_event, ipw_scan_event); - INIT_WORK(&priv->request_passive_scan, ipw_request_passive_scan); INIT_DELAYED_WORK(&priv->gather_stats, ipw_bg_gather_stats); INIT_WORK(&priv->abort_scan, ipw_bg_abort_scan); INIT_WORK(&priv->roam, ipw_bg_roam); @@ -11584,6 +11593,7 @@ static int ipw_prom_alloc(struct ipw_priv *priv) priv->prom_net_dev->hard_start_xmit = ipw_prom_hard_start_xmit; priv->prom_priv->ieee->iw_mode = IW_MODE_MONITOR; + SET_NETDEV_DEV(priv->prom_net_dev, &priv->pci_dev->dev); rc = register_netdev(priv->prom_net_dev); if (rc) { @@ -11811,6 +11821,8 @@ static void __devexit ipw_pci_remove(struct pci_dev *pdev) cancel_delayed_work(&priv->adhoc_check); cancel_delayed_work(&priv->gather_stats); cancel_delayed_work(&priv->request_scan); + cancel_delayed_work(&priv->request_direct_scan); + cancel_delayed_work(&priv->request_passive_scan); cancel_delayed_work(&priv->scan_event); cancel_delayed_work(&priv->rf_kill); cancel_delayed_work(&priv->scan_check); diff --git a/drivers/net/wireless/ipw2200.h b/drivers/net/wireless/ipw2200.h index cd3295b66dd..d4ab28b73b3 100644 --- a/drivers/net/wireless/ipw2200.h +++ b/drivers/net/wireless/ipw2200.h @@ -1037,6 +1037,7 @@ struct ipw_cmd { /* XXX */ #define STATUS_DISASSOC_PENDING (1<<12) #define STATUS_STATE_PENDING (1<<13) +#define STATUS_DIRECT_SCAN_PENDING (1<<19) #define STATUS_SCAN_PENDING (1<<20) #define STATUS_SCANNING (1<<21) #define STATUS_SCAN_ABORTING (1<<22) @@ -1292,6 +1293,8 @@ struct ipw_priv { struct iw_public_data wireless_data; int user_requested_scan; + u8 direct_scan_ssid[IW_ESSID_MAX_SIZE]; + u8 direct_scan_ssid_len; struct workqueue_struct *workqueue; @@ -1301,8 +1304,9 @@ struct ipw_priv { struct work_struct system_config; struct work_struct rx_replenish; struct delayed_work request_scan; + struct delayed_work request_direct_scan; + struct delayed_work request_passive_scan; struct delayed_work scan_event; - struct work_struct request_passive_scan; struct work_struct adapter_restart; struct delayed_work rf_kill; struct work_struct up; diff --git a/drivers/net/wireless/iwlwifi/Kconfig b/drivers/net/wireless/iwlwifi/Kconfig index d5b7a76fcaa..62fb89d8231 100644 --- a/drivers/net/wireless/iwlwifi/Kconfig +++ b/drivers/net/wireless/iwlwifi/Kconfig @@ -1,6 +1,5 @@ config IWLWIFI - bool - default n + tristate config IWLCORE tristate "Intel Wireless Wifi Core" diff --git a/drivers/net/wireless/iwlwifi/iwl-3945-led.c b/drivers/net/wireless/iwlwifi/iwl-3945-led.c index d200d08fb08..8b1528e52d4 100644 --- a/drivers/net/wireless/iwlwifi/iwl-3945-led.c +++ b/drivers/net/wireless/iwlwifi/iwl-3945-led.c @@ -229,14 +229,15 @@ static int iwl3945_led_register_led(struct iwl3945_priv *priv, led->led_dev.brightness_set = iwl3945_led_brightness_set; led->led_dev.default_trigger = trigger; + led->priv = priv; + led->type = type; + ret = led_classdev_register(device, &led->led_dev); if (ret) { IWL_ERROR("Error: failed to register led handler.\n"); return ret; } - led->priv = priv; - led->type = type; led->registered = 1; if (set_led && led->led_on) diff --git a/drivers/net/wireless/iwlwifi/iwl-3945.c b/drivers/net/wireless/iwlwifi/iwl-3945.c index d3406830c8e..55ac850744b 100644 --- a/drivers/net/wireless/iwlwifi/iwl-3945.c +++ b/drivers/net/wireless/iwlwifi/iwl-3945.c @@ -449,7 +449,7 @@ static void iwl3945_dbg_report_frame(struct iwl3945_priv *priv, if (print_summary) { char *title; - u32 rate; + int rate; if (hundred) title = "100Frames"; @@ -487,7 +487,7 @@ static void iwl3945_dbg_report_frame(struct iwl3945_priv *priv, * but you can hack it to show more, if you'd like to. */ if (dataframe) IWL_DEBUG_RX("%s: mhd=0x%04x, dst=0x%02x, " - "len=%u, rssi=%d, chnl=%d, rate=%u, \n", + "len=%u, rssi=%d, chnl=%d, rate=%d, \n", title, fc, header->addr1[5], length, rssi, channel, rate); else { @@ -588,8 +588,12 @@ static void iwl3945_add_radiotap(struct iwl3945_priv *priv, if (rate == -1) iwl3945_rt->rt_rate = 0; - else + else { + if (stats->band == IEEE80211_BAND_5GHZ) + rate += IWL_FIRST_OFDM_RATE; + iwl3945_rt->rt_rate = iwl3945_rates[rate].ieee; + } /* antenna number */ antenna = phy_flags_hw & RX_RES_PHY_FLAGS_ANTENNA_MSK; @@ -666,7 +670,7 @@ static void iwl3945_rx_reply_rx(struct iwl3945_priv *priv, rx_status.flag = 0; rx_status.mactime = le64_to_cpu(rx_end->timestamp); rx_status.freq = - ieee80211_frequency_to_channel(le16_to_cpu(rx_hdr->channel)); + ieee80211_channel_to_frequency(le16_to_cpu(rx_hdr->channel)); rx_status.band = (rx_hdr->phy_flags & RX_RES_PHY_FLAGS_BAND_24_MSK) ? IEEE80211_BAND_2GHZ : IEEE80211_BAND_5GHZ; diff --git a/drivers/net/wireless/iwlwifi/iwl-3945.h b/drivers/net/wireless/iwlwifi/iwl-3945.h index 45c1c5533bf..c7695a215a3 100644 --- a/drivers/net/wireless/iwlwifi/iwl-3945.h +++ b/drivers/net/wireless/iwlwifi/iwl-3945.h @@ -742,7 +742,6 @@ struct iwl3945_priv { u8 direct_ssid_len; u8 direct_ssid[IW_ESSID_MAX_SIZE]; struct iwl3945_scan_cmd *scan; - u8 only_active_channel; /* spinlock */ spinlock_t lock; /* protect general shared data */ diff --git a/drivers/net/wireless/iwlwifi/iwl-4965-rs.c b/drivers/net/wireless/iwlwifi/iwl-4965-rs.c index b608e1ca8b4..3a7f0cb710e 100644 --- a/drivers/net/wireless/iwlwifi/iwl-4965-rs.c +++ b/drivers/net/wireless/iwlwifi/iwl-4965-rs.c @@ -163,8 +163,8 @@ struct iwl4965_lq_sta { struct dentry *rs_sta_dbgfs_tx_agg_tid_en_file; #endif struct iwl4965_rate dbg_fixed; - struct iwl_priv *drv; #endif + struct iwl_priv *drv; }; static void rs_rate_scale_perform(struct iwl_priv *priv, @@ -1162,7 +1162,6 @@ static s32 rs_get_best_rate(struct iwl_priv *priv, /* Higher rate not available, use the original */ } else { - new_rate = rate; break; } } @@ -2009,7 +2008,7 @@ static void rs_rate_scale_perform(struct iwl_priv *priv, * 2) Not just finishing up a search * 3) Allowing a new search */ - if (!update_lq && !done_search && !lq_sta->stay_in_tbl) { + if (!update_lq && !done_search && !lq_sta->stay_in_tbl && window->counter) { /* Save current throughput to compare with "search" throughput*/ lq_sta->last_tpt = current_tpt; diff --git a/drivers/net/wireless/iwlwifi/iwl-4965.c b/drivers/net/wireless/iwlwifi/iwl-4965.c index 17f629fb96f..de330ae0ca9 100644 --- a/drivers/net/wireless/iwlwifi/iwl-4965.c +++ b/drivers/net/wireless/iwlwifi/iwl-4965.c @@ -3528,8 +3528,12 @@ static void iwl4965_add_radiotap(struct iwl_priv *priv, if (rate == -1) iwl4965_rt->rt_rate = 0; - else + else { + if (stats->band == IEEE80211_BAND_5GHZ) + rate += IWL_FIRST_OFDM_RATE; + iwl4965_rt->rt_rate = iwl4965_rates[rate].ieee; + } /* * "antenna number" @@ -3978,7 +3982,7 @@ static void iwl4965_rx_reply_rx(struct iwl_priv *priv, rx_status.mactime = le64_to_cpu(rx_start->timestamp); rx_status.freq = - ieee80211_frequency_to_channel(le16_to_cpu(rx_start->channel)); + ieee80211_channel_to_frequency(le16_to_cpu(rx_start->channel)); rx_status.band = (rx_start->phy_flags & RX_RES_PHY_FLAGS_BAND_24_MSK) ? IEEE80211_BAND_2GHZ : IEEE80211_BAND_5GHZ; rx_status.rate_idx = diff --git a/drivers/net/wireless/iwlwifi/iwl-4965.h b/drivers/net/wireless/iwlwifi/iwl-4965.h index 9ed13cb0a2a..581b98556c8 100644 --- a/drivers/net/wireless/iwlwifi/iwl-4965.h +++ b/drivers/net/wireless/iwlwifi/iwl-4965.h @@ -996,7 +996,6 @@ struct iwl_priv { u8 direct_ssid_len; u8 direct_ssid[IW_ESSID_MAX_SIZE]; struct iwl4965_scan_cmd *scan; - u8 only_active_channel; /* spinlock */ spinlock_t lock; /* protect general shared data */ diff --git a/drivers/net/wireless/iwlwifi/iwl3945-base.c b/drivers/net/wireless/iwlwifi/iwl3945-base.c index a1a0b3c581f..6027e1119c3 100644 --- a/drivers/net/wireless/iwlwifi/iwl3945-base.c +++ b/drivers/net/wireless/iwlwifi/iwl3945-base.c @@ -2227,7 +2227,10 @@ static int iwl3945_scan_initiate(struct iwl3945_priv *priv) } IWL_DEBUG_INFO("Starting scan...\n"); - priv->scan_bands = 2; + if (priv->cfg->sku & IWL_SKU_G) + priv->scan_bands |= BIT(IEEE80211_BAND_2GHZ); + if (priv->cfg->sku & IWL_SKU_A) + priv->scan_bands |= BIT(IEEE80211_BAND_5GHZ); set_bit(STATUS_SCANNING, &priv->status); priv->scan_start = jiffies; priv->scan_pass_start = priv->scan_start; @@ -3352,13 +3355,18 @@ static void iwl3945_rx_scan_complete_notif(struct iwl3945_priv *priv, cancel_delayed_work(&priv->scan_check); IWL_DEBUG_INFO("Scan pass on %sGHz took %dms\n", - (priv->scan_bands == 2) ? "2.4" : "5.2", + (priv->scan_bands & BIT(IEEE80211_BAND_2GHZ)) ? + "2.4" : "5.2", jiffies_to_msecs(elapsed_jiffies (priv->scan_pass_start, jiffies))); - /* Remove this scanned band from the list - * of pending bands to scan */ - priv->scan_bands--; + /* Remove this scanned band from the list of pending + * bands to scan, band G precedes A in order of scanning + * as seen in iwl3945_bg_request_scan */ + if (priv->scan_bands & BIT(IEEE80211_BAND_2GHZ)) + priv->scan_bands &= ~BIT(IEEE80211_BAND_2GHZ); + else if (priv->scan_bands & BIT(IEEE80211_BAND_5GHZ)) + priv->scan_bands &= ~BIT(IEEE80211_BAND_5GHZ); /* If a request to abort was given, or the scan did not succeed * then we reset the scan state machine and terminate, @@ -4968,22 +4976,11 @@ static int iwl3945_get_channels_for_scan(struct iwl3945_priv *priv, if (channels[i].flags & IEEE80211_CHAN_DISABLED) continue; - if (channels[i].hw_value == - le16_to_cpu(priv->active_rxon.channel)) { - if (iwl3945_is_associated(priv)) { - IWL_DEBUG_SCAN - ("Skipping current channel %d\n", - le16_to_cpu(priv->active_rxon.channel)); - continue; - } - } else if (priv->only_active_channel) - continue; - scan_ch->channel = channels[i].hw_value; ch_info = iwl3945_get_channel_info(priv, band, scan_ch->channel); if (!is_channel_valid(ch_info)) { - IWL_DEBUG_SCAN("Channel %d is INVALID for this SKU.\n", + IWL_DEBUG_SCAN("Channel %d is INVALID for this band.\n", scan_ch->channel); continue; } @@ -6303,12 +6300,17 @@ static void iwl3945_bg_request_scan(struct work_struct *data) priv->direct_ssid, priv->direct_ssid_len); direct_mask = 1; } else if (!iwl3945_is_associated(priv) && priv->essid_len) { + IWL_DEBUG_SCAN + ("Kicking off one direct scan for '%s' when not associated\n", + iwl3945_escape_essid(priv->essid, priv->essid_len)); scan->direct_scan[0].id = WLAN_EID_SSID; scan->direct_scan[0].len = priv->essid_len; memcpy(scan->direct_scan[0].ssid, priv->essid, priv->essid_len); direct_mask = 1; - } else + } else { + IWL_DEBUG_SCAN("Kicking off one indirect scan.\n"); direct_mask = 0; + } /* We don't build a direct scan probe request; the uCode will do * that based on the direct_mask added to each channel entry */ @@ -6321,21 +6323,16 @@ static void iwl3945_bg_request_scan(struct work_struct *data) /* flags + rate selection */ - switch (priv->scan_bands) { - case 2: + if (priv->scan_bands & BIT(IEEE80211_BAND_2GHZ)) { scan->flags = RXON_FLG_BAND_24G_MSK | RXON_FLG_AUTO_DETECT_MSK; scan->tx_cmd.rate = IWL_RATE_1M_PLCP; scan->good_CRC_th = 0; band = IEEE80211_BAND_2GHZ; - break; - - case 1: + } else if (priv->scan_bands & BIT(IEEE80211_BAND_5GHZ)) { scan->tx_cmd.rate = IWL_RATE_6M_PLCP; scan->good_CRC_th = IWL_GOOD_CRC_TH; band = IEEE80211_BAND_5GHZ; - break; - - default: + } else { IWL_WARNING("Invalid scan band count\n"); goto done; } @@ -6346,23 +6343,18 @@ static void iwl3945_bg_request_scan(struct work_struct *data) if (priv->iw_mode == IEEE80211_IF_TYPE_MNTR) scan->filter_flags = RXON_FILTER_PROMISC_MSK; - if (direct_mask) { - IWL_DEBUG_SCAN - ("Initiating direct scan for %s.\n", - iwl3945_escape_essid(priv->essid, priv->essid_len)); + if (direct_mask) scan->channel_count = iwl3945_get_channels_for_scan( priv, band, 1, /* active */ direct_mask, (void *)&scan->data[le16_to_cpu(scan->tx_cmd.len)]); - } else { - IWL_DEBUG_SCAN("Initiating indirect scan.\n"); + else scan->channel_count = iwl3945_get_channels_for_scan( priv, band, 0, /* passive */ direct_mask, (void *)&scan->data[le16_to_cpu(scan->tx_cmd.len)]); - } cmd.len += le16_to_cpu(scan->tx_cmd.len) + scan->channel_count * sizeof(struct iwl3945_scan_channel); @@ -6695,7 +6687,8 @@ static int iwl3945_mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb, if (priv->iw_mode == IEEE80211_IF_TYPE_MNTR) { IWL_DEBUG_MAC80211("leave - monitor\n"); - return -1; + dev_kfree_skb_any(skb); + return 0; } IWL_DEBUG_TX("dev->xmit(%d bytes) at rate 0x%02x\n", skb->len, @@ -6781,7 +6774,7 @@ static int iwl3945_mac_config(struct ieee80211_hw *hw, struct ieee80211_conf *co ch_info = iwl3945_get_channel_info(priv, conf->channel->band, conf->channel->hw_value); if (!is_channel_valid(ch_info)) { - IWL_DEBUG_SCAN("Channel %d [%d] is INVALID for this SKU.\n", + IWL_DEBUG_SCAN("Channel %d [%d] is INVALID for this band.\n", conf->channel->hw_value, conf->channel->band); IWL_DEBUG_MAC80211("leave - invalid channel\n"); spin_unlock_irqrestore(&priv->lock, flags); @@ -7314,8 +7307,6 @@ static void iwl3945_mac_reset_tsf(struct ieee80211_hw *hw) return; } - priv->only_active_channel = 0; - iwl3945_set_rate(priv); mutex_unlock(&priv->mutex); diff --git a/drivers/net/wireless/iwlwifi/iwl4965-base.c b/drivers/net/wireless/iwlwifi/iwl4965-base.c index d0bbcaaeb94..0bd55bb1973 100644 --- a/drivers/net/wireless/iwlwifi/iwl4965-base.c +++ b/drivers/net/wireless/iwlwifi/iwl4965-base.c @@ -1774,7 +1774,10 @@ static int iwl4965_scan_initiate(struct iwl_priv *priv) } IWL_DEBUG_INFO("Starting scan...\n"); - priv->scan_bands = 2; + if (priv->cfg->sku & IWL_SKU_G) + priv->scan_bands |= BIT(IEEE80211_BAND_2GHZ); + if (priv->cfg->sku & IWL_SKU_A) + priv->scan_bands |= BIT(IEEE80211_BAND_5GHZ); set_bit(STATUS_SCANNING, &priv->status); priv->scan_start = jiffies; priv->scan_pass_start = priv->scan_start; @@ -3023,8 +3026,9 @@ static void iwl4965_rx_reply_tx(struct iwl_priv *priv, IWL_DEBUG_TX_REPLY("Tx queue reclaim %d\n", index); if (index != -1) { - int freed = iwl4965_tx_queue_reclaim(priv, txq_id, index); #ifdef CONFIG_IWL4965_HT + int freed = iwl4965_tx_queue_reclaim(priv, txq_id, index); + if (tid != MAX_TID_COUNT) priv->stations[sta_id].tid[tid].tfds_in_queue -= freed; if (iwl4965_queue_space(&txq->q) > txq->q.low_mark && @@ -3276,13 +3280,18 @@ static void iwl4965_rx_scan_complete_notif(struct iwl_priv *priv, cancel_delayed_work(&priv->scan_check); IWL_DEBUG_INFO("Scan pass on %sGHz took %dms\n", - (priv->scan_bands == 2) ? "2.4" : "5.2", + (priv->scan_bands & BIT(IEEE80211_BAND_2GHZ)) ? + "2.4" : "5.2", jiffies_to_msecs(elapsed_jiffies (priv->scan_pass_start, jiffies))); - /* Remove this scanned band from the list - * of pending bands to scan */ - priv->scan_bands--; + /* Remove this scanned band from the list of pending + * bands to scan, band G precedes A in order of scanning + * as seen in iwl_bg_request_scan */ + if (priv->scan_bands & BIT(IEEE80211_BAND_2GHZ)) + priv->scan_bands &= ~BIT(IEEE80211_BAND_2GHZ); + else if (priv->scan_bands & BIT(IEEE80211_BAND_5GHZ)) + priv->scan_bands &= ~BIT(IEEE80211_BAND_5GHZ); /* If a request to abort was given, or the scan did not succeed * then we reset the scan state machine and terminate, @@ -3292,7 +3301,7 @@ static void iwl4965_rx_scan_complete_notif(struct iwl_priv *priv, clear_bit(STATUS_SCAN_ABORTING, &priv->status); } else { /* If there are more bands on this scan pass reschedule */ - if (priv->scan_bands > 0) + if (priv->scan_bands) goto reschedule; } @@ -4633,23 +4642,11 @@ static int iwl4965_get_channels_for_scan(struct iwl_priv *priv, if (channels[i].flags & IEEE80211_CHAN_DISABLED) continue; - if (ieee80211_frequency_to_channel(channels[i].center_freq) == - le16_to_cpu(priv->active_rxon.channel)) { - if (iwl_is_associated(priv)) { - IWL_DEBUG_SCAN - ("Skipping current channel %d\n", - le16_to_cpu(priv->active_rxon.channel)); - continue; - } - } else if (priv->only_active_channel) - continue; - scan_ch->channel = ieee80211_frequency_to_channel(channels[i].center_freq); - ch_info = iwl_get_channel_info(priv, band, - scan_ch->channel); + ch_info = iwl_get_channel_info(priv, band, scan_ch->channel); if (!is_channel_valid(ch_info)) { - IWL_DEBUG_SCAN("Channel %d is INVALID for this SKU.\n", + IWL_DEBUG_SCAN("Channel %d is INVALID for this band.\n", scan_ch->channel); continue; } @@ -5824,11 +5821,15 @@ static void iwl4965_bg_request_scan(struct work_struct *data) priv->direct_ssid, priv->direct_ssid_len); direct_mask = 1; } else if (!iwl_is_associated(priv) && priv->essid_len) { + IWL_DEBUG_SCAN + ("Kicking off one direct scan for '%s' when not associated\n", + iwl4965_escape_essid(priv->essid, priv->essid_len)); scan->direct_scan[0].id = WLAN_EID_SSID; scan->direct_scan[0].len = priv->essid_len; memcpy(scan->direct_scan[0].ssid, priv->essid, priv->essid_len); direct_mask = 1; } else { + IWL_DEBUG_SCAN("Kicking off one indirect scan.\n"); direct_mask = 0; } @@ -5837,8 +5838,7 @@ static void iwl4965_bg_request_scan(struct work_struct *data) scan->tx_cmd.stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE; - switch (priv->scan_bands) { - case 2: + if (priv->scan_bands & BIT(IEEE80211_BAND_2GHZ)) { scan->flags = RXON_FLG_BAND_24G_MSK | RXON_FLG_AUTO_DETECT_MSK; scan->tx_cmd.rate_n_flags = iwl4965_hw_set_rate_n_flags(IWL_RATE_1M_PLCP, @@ -5846,17 +5846,13 @@ static void iwl4965_bg_request_scan(struct work_struct *data) scan->good_CRC_th = 0; band = IEEE80211_BAND_2GHZ; - break; - - case 1: + } else if (priv->scan_bands & BIT(IEEE80211_BAND_5GHZ)) { scan->tx_cmd.rate_n_flags = iwl4965_hw_set_rate_n_flags(IWL_RATE_6M_PLCP, RATE_MCS_ANT_B_MSK); scan->good_CRC_th = IWL_GOOD_CRC_TH; band = IEEE80211_BAND_5GHZ; - break; - - default: + } else { IWL_WARNING("Invalid scan band count\n"); goto done; } @@ -5881,23 +5877,18 @@ static void iwl4965_bg_request_scan(struct work_struct *data) if (priv->iw_mode == IEEE80211_IF_TYPE_MNTR) scan->filter_flags = RXON_FILTER_PROMISC_MSK; - if (direct_mask) { - IWL_DEBUG_SCAN - ("Initiating direct scan for %s.\n", - iwl4965_escape_essid(priv->essid, priv->essid_len)); + if (direct_mask) scan->channel_count = iwl4965_get_channels_for_scan( priv, band, 1, /* active */ direct_mask, (void *)&scan->data[le16_to_cpu(scan->tx_cmd.len)]); - } else { - IWL_DEBUG_SCAN("Initiating indirect scan.\n"); + else scan->channel_count = iwl4965_get_channels_for_scan( priv, band, 0, /* passive */ direct_mask, (void *)&scan->data[le16_to_cpu(scan->tx_cmd.len)]); - } cmd.len += le16_to_cpu(scan->tx_cmd.len) + scan->channel_count * sizeof(struct iwl4965_scan_channel); @@ -6246,7 +6237,8 @@ static int iwl4965_mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb, if (priv->iw_mode == IEEE80211_IF_TYPE_MNTR) { IWL_DEBUG_MAC80211("leave - monitor\n"); - return -1; + dev_kfree_skb_any(skb); + return 0; } IWL_DEBUG_TX("dev->xmit(%d bytes) at rate 0x%02x\n", skb->len, @@ -7061,8 +7053,6 @@ static void iwl4965_mac_reset_tsf(struct ieee80211_hw *hw) return; } - priv->only_active_channel = 0; - iwl4965_set_rate(priv); mutex_unlock(&priv->mutex); diff --git a/drivers/net/wireless/libertas/cmd.c b/drivers/net/wireless/libertas/cmd.c index 6328b959387..8124fd9b135 100644 --- a/drivers/net/wireless/libertas/cmd.c +++ b/drivers/net/wireless/libertas/cmd.c @@ -1842,6 +1842,9 @@ static void lbs_send_confirmsleep(struct lbs_private *priv) spin_lock_irqsave(&priv->driver_lock, flags); + /* We don't get a response on the sleep-confirmation */ + priv->dnld_sent = DNLD_RES_RECEIVED; + /* If nothing to do, go back to sleep (?) */ if (!__kfifo_len(priv->event_fifo) && !priv->resp_len[priv->resp_idx]) priv->psstate = PS_STATE_SLEEP; @@ -1904,12 +1907,12 @@ void lbs_ps_confirm_sleep(struct lbs_private *priv) lbs_deb_enter(LBS_DEB_HOST); + spin_lock_irqsave(&priv->driver_lock, flags); if (priv->dnld_sent) { allowed = 0; lbs_deb_host("dnld_sent was set\n"); } - spin_lock_irqsave(&priv->driver_lock, flags); /* In-progress command? */ if (priv->cur_cmd) { allowed = 0; diff --git a/drivers/net/wireless/libertas/debugfs.c b/drivers/net/wireless/libertas/debugfs.c index ad2fabca911..0aa0ce3b2c4 100644 --- a/drivers/net/wireless/libertas/debugfs.c +++ b/drivers/net/wireless/libertas/debugfs.c @@ -312,8 +312,8 @@ static ssize_t lbs_threshold_write(uint16_t tlv_type, uint16_t event_mask, if (tlv_type != TLV_TYPE_BCNMISS) tlv->freq = freq; - /* The command header, the event mask, and the one TLV */ - events->hdr.size = cpu_to_le16(sizeof(events->hdr) + 2 + sizeof(*tlv)); + /* The command header, the action, the event mask, and one TLV */ + events->hdr.size = cpu_to_le16(sizeof(events->hdr) + 4 + sizeof(*tlv)); ret = lbs_cmd_with_response(priv, CMD_802_11_SUBSCRIBE_EVENT, events); diff --git a/drivers/net/wireless/libertas/ethtool.c b/drivers/net/wireless/libertas/ethtool.c index dcfdb404678..688d60de55c 100644 --- a/drivers/net/wireless/libertas/ethtool.c +++ b/drivers/net/wireless/libertas/ethtool.c @@ -73,8 +73,8 @@ out: return ret; } -static void lbs_ethtool_get_stats(struct net_device * dev, - struct ethtool_stats * stats, u64 * data) +static void lbs_ethtool_get_stats(struct net_device *dev, + struct ethtool_stats *stats, uint64_t *data) { struct lbs_private *priv = dev->priv; struct cmd_ds_mesh_access mesh_access; @@ -83,12 +83,12 @@ static void lbs_ethtool_get_stats(struct net_device * dev, lbs_deb_enter(LBS_DEB_ETHTOOL); /* Get Mesh Statistics */ - ret = lbs_prepare_and_send_command(priv, - CMD_MESH_ACCESS, CMD_ACT_MESH_GET_STATS, - CMD_OPTION_WAITFORRSP, 0, &mesh_access); + ret = lbs_mesh_access(priv, CMD_ACT_MESH_GET_STATS, &mesh_access); - if (ret) + if (ret) { + memset(data, 0, MESH_STATS_NUM*(sizeof(uint64_t))); return; + } priv->mstats.fwd_drop_rbt = le32_to_cpu(mesh_access.data[0]); priv->mstats.fwd_drop_ttl = le32_to_cpu(mesh_access.data[1]); @@ -111,19 +111,18 @@ static void lbs_ethtool_get_stats(struct net_device * dev, lbs_deb_enter(LBS_DEB_ETHTOOL); } -static int lbs_ethtool_get_sset_count(struct net_device * dev, int sset) +static int lbs_ethtool_get_sset_count(struct net_device *dev, int sset) { - switch (sset) { - case ETH_SS_STATS: + struct lbs_private *priv = dev->priv; + + if (sset == ETH_SS_STATS && dev == priv->mesh_dev) return MESH_STATS_NUM; - default: - return -EOPNOTSUPP; - } + + return -EOPNOTSUPP; } static void lbs_ethtool_get_strings(struct net_device *dev, - u32 stringset, - u8 * s) + uint32_t stringset, uint8_t *s) { int i; diff --git a/drivers/net/wireless/libertas/if_usb.c b/drivers/net/wireless/libertas/if_usb.c index 8032df72aaa..36288b29abf 100644 --- a/drivers/net/wireless/libertas/if_usb.c +++ b/drivers/net/wireless/libertas/if_usb.c @@ -925,6 +925,7 @@ static struct usb_driver if_usb_driver = { .id_table = if_usb_table, .suspend = if_usb_suspend, .resume = if_usb_resume, + .reset_resume = if_usb_resume, }; static int __init if_usb_init_module(void) diff --git a/drivers/net/wireless/libertas/main.c b/drivers/net/wireless/libertas/main.c index 406f54d4095..acfc4bfcc26 100644 --- a/drivers/net/wireless/libertas/main.c +++ b/drivers/net/wireless/libertas/main.c @@ -732,8 +732,8 @@ static int lbs_thread(void *data) lbs_deb_thread("4: currenttxskb %p, dnld_sent %d\n", priv->currenttxskb, priv->dnld_sent); - spin_lock_irq(&priv->driver_lock); /* Process any pending command response */ + spin_lock_irq(&priv->driver_lock); resp_idx = priv->resp_idx; if (priv->resp_len[resp_idx]) { spin_unlock_irq(&priv->driver_lock); @@ -756,6 +756,7 @@ static int lbs_thread(void *data) priv->nr_retries = 0; } else { priv->cur_cmd = NULL; + priv->dnld_sent = DNLD_RES_RECEIVED; lbs_pr_info("requeueing command %x due to timeout (#%d)\n", le16_to_cpu(cmdnode->cmdbuf->command), priv->nr_retries); @@ -1564,6 +1565,7 @@ static int lbs_add_rtap(struct lbs_private *priv) rtap_dev->hard_start_xmit = lbs_rtap_hard_start_xmit; rtap_dev->set_multicast_list = lbs_set_multicast_list; rtap_dev->priv = priv; + SET_NETDEV_DEV(rtap_dev, priv->dev->dev.parent); ret = register_netdev(rtap_dev); if (ret) { diff --git a/drivers/net/wireless/libertas/scan.c b/drivers/net/wireless/libertas/scan.c index 1a409fcc80d..387d4878af2 100644 --- a/drivers/net/wireless/libertas/scan.c +++ b/drivers/net/wireless/libertas/scan.c @@ -298,7 +298,8 @@ static int lbs_do_scan(struct lbs_private *priv, uint8_t bsstype, uint8_t *tlv; /* pointer into our current, growing TLV storage area */ lbs_deb_enter_args(LBS_DEB_SCAN, "bsstype %d, chanlist[].chan %d, chan_count %d", - bsstype, chan_list[0].channumber, chan_count); + bsstype, chan_list ? chan_list[0].channumber : -1, + chan_count); /* create the fixed part for scan command */ scan_cmd = kzalloc(MAX_SCAN_CFG_ALLOC, GFP_KERNEL); @@ -566,11 +567,11 @@ static int lbs_process_bss(struct bss_descriptor *bss, pos += 8; /* beacon interval is 2 bytes long */ - bss->beaconperiod = le16_to_cpup((void *) pos); + bss->beaconperiod = get_unaligned_le16(pos); pos += 2; /* capability information is 2 bytes long */ - bss->capability = le16_to_cpup((void *) pos); + bss->capability = get_unaligned_le16(pos); lbs_deb_scan("process_bss: capabilities 0x%04x\n", bss->capability); pos += 2; diff --git a/drivers/net/wireless/orinoco_cs.c b/drivers/net/wireless/orinoco_cs.c index 8b7f5768a10..1c216e015f6 100644 --- a/drivers/net/wireless/orinoco_cs.c +++ b/drivers/net/wireless/orinoco_cs.c @@ -461,6 +461,7 @@ static struct pcmcia_device_id orinoco_cs_ids[] = { PCMCIA_DEVICE_MANF_CARD(0x028a, 0x0673), /* Linksys WCF12 Wireless CompactFlash Card */ PCMCIA_DEVICE_MANF_CARD(0x02aa, 0x0002), /* ASUS SpaceLink WL-100 */ PCMCIA_DEVICE_MANF_CARD(0x02ac, 0x0002), /* SpeedStream SS1021 Wireless Adapter */ + PCMCIA_DEVICE_MANF_CARD(0x02ac, 0x3021), /* SpeedStream Wireless Adapter */ PCMCIA_DEVICE_MANF_CARD(0x14ea, 0xb001), /* PLANEX RoadLannerWave GW-NS11H */ PCMCIA_DEVICE_MANF_CARD(0x50c2, 0x7300), /* Airvast WN-100 */ PCMCIA_DEVICE_MANF_CARD(0x9005, 0x0021), /* Adaptec Ultra Wireless ANW-8030 */ diff --git a/drivers/net/wireless/p54/p54usb.c b/drivers/net/wireless/p54/p54usb.c index 98ddbb3b327..1610a7308c1 100644 --- a/drivers/net/wireless/p54/p54usb.c +++ b/drivers/net/wireless/p54/p54usb.c @@ -49,6 +49,7 @@ static struct usb_device_id p54u_table[] __devinitdata = { {USB_DEVICE(0x5041, 0x2235)}, /* Linksys WUSB54G Portable */ /* Version 2 devices (3887) */ + {USB_DEVICE(0x0471, 0x1230)}, /* Philips CPWUA054/00 */ {USB_DEVICE(0x050d, 0x7050)}, /* Belkin F5D7050 ver 1000 */ {USB_DEVICE(0x0572, 0x2000)}, /* Cohiba Proto board */ {USB_DEVICE(0x0572, 0x2002)}, /* Cohiba Proto board */ diff --git a/drivers/net/wireless/prism54/islpci_dev.c b/drivers/net/wireless/prism54/islpci_dev.c index 04c2638d75a..9196825ed1b 100644 --- a/drivers/net/wireless/prism54/islpci_dev.c +++ b/drivers/net/wireless/prism54/islpci_dev.c @@ -388,8 +388,15 @@ islpci_open(struct net_device *ndev) netif_start_queue(ndev); - /* Turn off carrier unless we know we have associated */ - netif_carrier_off(ndev); + /* Turn off carrier if in STA or Ad-hoc mode. It will be turned on + * once the firmware receives a trap of being associated + * (GEN_OID_LINKSTATE). In other modes (AP or WDS or monitor) we + * should just leave the carrier on as its expected the firmware + * won't send us a trigger. */ + if (priv->iw_mode == IW_MODE_INFRA || priv->iw_mode == IW_MODE_ADHOC) + netif_carrier_off(ndev); + else + netif_carrier_on(ndev); return 0; } diff --git a/drivers/net/wireless/prism54/islpci_eth.c b/drivers/net/wireless/prism54/islpci_eth.c index 762e85bef55..e43bae97ed8 100644 --- a/drivers/net/wireless/prism54/islpci_eth.c +++ b/drivers/net/wireless/prism54/islpci_eth.c @@ -290,7 +290,7 @@ islpci_monitor_rx(islpci_private *priv, struct sk_buff **skb) avs->version = cpu_to_be32(P80211CAPTURE_VERSION); avs->length = cpu_to_be32(sizeof (struct avs_80211_1_header)); - avs->mactime = cpu_to_be64(le64_to_cpu(clock)); + avs->mactime = cpu_to_be64(clock); avs->hosttime = cpu_to_be64(jiffies); avs->phytype = cpu_to_be32(6); /*OFDM: 6 for (g), 8 for (a) */ avs->channel = cpu_to_be32(channel_of_freq(freq)); diff --git a/drivers/net/wireless/rndis_wlan.c b/drivers/net/wireless/rndis_wlan.c index d0b1fb15c70..18c9931e326 100644 --- a/drivers/net/wireless/rndis_wlan.c +++ b/drivers/net/wireless/rndis_wlan.c @@ -116,6 +116,7 @@ MODULE_PARM_DESC(workaround_interval, #define OID_802_11_ENCRYPTION_STATUS ccpu2(0x0d01011b) #define OID_802_11_ADD_KEY ccpu2(0x0d01011d) #define OID_802_11_REMOVE_KEY ccpu2(0x0d01011e) +#define OID_802_11_ASSOCIATION_INFORMATION ccpu2(0x0d01011f) #define OID_802_11_PMKID ccpu2(0x0d010123) #define OID_802_11_NETWORK_TYPES_SUPPORTED ccpu2(0x0d010203) #define OID_802_11_NETWORK_TYPE_IN_USE ccpu2(0x0d010204) @@ -271,6 +272,26 @@ struct ndis_config_param { __le32 value_length; } __attribute__((packed)); +struct ndis_80211_assoc_info { + __le32 length; + __le16 req_ies; + struct req_ie { + __le16 capa; + __le16 listen_interval; + u8 cur_ap_address[6]; + } req_ie; + __le32 req_ie_length; + __le32 offset_req_ies; + __le16 resp_ies; + struct resp_ie { + __le16 capa; + __le16 status_code; + __le16 assoc_id; + } resp_ie; + __le32 resp_ie_length; + __le32 offset_resp_ies; +} __attribute__((packed)); + /* these have to match what is in wpa_supplicant */ enum wpa_alg { WPA_ALG_NONE, WPA_ALG_WEP, WPA_ALG_TKIP, WPA_ALG_CCMP }; enum wpa_cipher { CIPHER_NONE, CIPHER_WEP40, CIPHER_TKIP, CIPHER_CCMP, @@ -674,6 +695,12 @@ static int get_bssid(struct usbnet *usbdev, u8 bssid[ETH_ALEN]) return ret; } +static int get_association_info(struct usbnet *usbdev, + struct ndis_80211_assoc_info *info, int len) +{ + return rndis_query_oid(usbdev, OID_802_11_ASSOCIATION_INFORMATION, + info, &len); +} static int is_associated(struct usbnet *usbdev) { @@ -2182,11 +2209,40 @@ static void rndis_wext_worker(struct work_struct *work) struct usbnet *usbdev = priv->usbdev; union iwreq_data evt; unsigned char bssid[ETH_ALEN]; - int ret; + struct ndis_80211_assoc_info *info; + int assoc_size = sizeof(*info) + IW_CUSTOM_MAX + 32; + int ret, offset; if (test_and_clear_bit(WORK_CONNECTION_EVENT, &priv->work_pending)) { - ret = get_bssid(usbdev, bssid); + info = kzalloc(assoc_size, GFP_KERNEL); + if (!info) + goto get_bssid; + + /* Get association info IEs from device and send them back to + * userspace. */ + ret = get_association_info(usbdev, info, assoc_size); + if (!ret) { + evt.data.length = le32_to_cpu(info->req_ie_length); + if (evt.data.length > 0) { + offset = le32_to_cpu(info->offset_req_ies); + wireless_send_event(usbdev->net, + IWEVASSOCREQIE, &evt, + (char *)info + offset); + } + + evt.data.length = le32_to_cpu(info->resp_ie_length); + if (evt.data.length > 0) { + offset = le32_to_cpu(info->offset_resp_ies); + wireless_send_event(usbdev->net, + IWEVASSOCRESPIE, &evt, + (char *)info + offset); + } + } + + kfree(info); +get_bssid: + ret = get_bssid(usbdev, bssid); if (!ret) { evt.data.flags = 0; evt.data.length = 0; @@ -2414,6 +2470,11 @@ static int bcm4320_early_init(struct usbnet *dev) else if (priv->param_power_save > 2) priv->param_power_save = 2; + if (priv->param_power_output < 0) + priv->param_power_output = 0; + else if (priv->param_power_output > 3) + priv->param_power_output = 3; + if (priv->param_roamtrigger < -80) priv->param_roamtrigger = -80; else if (priv->param_roamtrigger > -60) diff --git a/drivers/net/wireless/rt2x00/Kconfig b/drivers/net/wireless/rt2x00/Kconfig index ab1029e7988..2d611876bbe 100644 --- a/drivers/net/wireless/rt2x00/Kconfig +++ b/drivers/net/wireless/rt2x00/Kconfig @@ -32,12 +32,13 @@ config RT2X00_LIB_FIRMWARE config RT2X00_LIB_RFKILL boolean depends on RT2X00_LIB + depends on INPUT select RFKILL select INPUT_POLLDEV config RT2X00_LIB_LEDS boolean - depends on RT2X00_LIB + depends on RT2X00_LIB && NEW_LEDS config RT2400PCI tristate "Ralink rt2400 pci/pcmcia support" @@ -51,7 +52,7 @@ config RT2400PCI config RT2400PCI_RFKILL bool "RT2400 rfkill support" - depends on RT2400PCI + depends on RT2400PCI && INPUT select RT2X00_LIB_RFKILL ---help--- This adds support for integrated rt2400 devices that feature a @@ -60,7 +61,7 @@ config RT2400PCI_RFKILL config RT2400PCI_LEDS bool "RT2400 leds support" - depends on RT2400PCI + depends on RT2400PCI && NEW_LEDS select LEDS_CLASS select RT2X00_LIB_LEDS ---help--- @@ -78,7 +79,7 @@ config RT2500PCI config RT2500PCI_RFKILL bool "RT2500 rfkill support" - depends on RT2500PCI + depends on RT2500PCI && INPUT select RT2X00_LIB_RFKILL ---help--- This adds support for integrated rt2500 devices that feature a @@ -87,7 +88,7 @@ config RT2500PCI_RFKILL config RT2500PCI_LEDS bool "RT2500 leds support" - depends on RT2500PCI + depends on RT2500PCI && NEW_LEDS select LEDS_CLASS select RT2X00_LIB_LEDS ---help--- @@ -107,7 +108,7 @@ config RT61PCI config RT61PCI_RFKILL bool "RT61 rfkill support" - depends on RT61PCI + depends on RT61PCI && INPUT select RT2X00_LIB_RFKILL ---help--- This adds support for integrated rt61 devices that feature a @@ -116,7 +117,7 @@ config RT61PCI_RFKILL config RT61PCI_LEDS bool "RT61 leds support" - depends on RT61PCI + depends on RT61PCI && NEW_LEDS select LEDS_CLASS select RT2X00_LIB_LEDS ---help--- @@ -133,7 +134,7 @@ config RT2500USB config RT2500USB_LEDS bool "RT2500 leds support" - depends on RT2500USB + depends on RT2500USB && NEW_LEDS select LEDS_CLASS select RT2X00_LIB_LEDS ---help--- @@ -152,7 +153,7 @@ config RT73USB config RT73USB_LEDS bool "RT73 leds support" - depends on RT73USB + depends on RT73USB && NEW_LEDS select LEDS_CLASS select RT2X00_LIB_LEDS ---help--- diff --git a/drivers/net/wireless/rt2x00/rt2400pci.c b/drivers/net/wireless/rt2x00/rt2400pci.c index b41187af130..b36ed1c6c74 100644 --- a/drivers/net/wireless/rt2x00/rt2400pci.c +++ b/drivers/net/wireless/rt2x00/rt2400pci.c @@ -363,7 +363,7 @@ static void rt2400pci_config_erp(struct rt2x00_dev *rt2x00dev, rt2x00pci_register_write(rt2x00dev, TXCSR1, reg); rt2x00pci_register_read(rt2x00dev, ARCSR2, ®); - rt2x00_set_field32(®, ARCSR2_SIGNAL, 0x00 | preamble_mask); + rt2x00_set_field32(®, ARCSR2_SIGNAL, 0x00); rt2x00_set_field32(®, ARCSR2_SERVICE, 0x04); rt2x00_set_field32(®, ARCSR2_LENGTH, get_duration(ACK_SIZE, 10)); rt2x00pci_register_write(rt2x00dev, ARCSR2, reg); @@ -731,6 +731,17 @@ static int rt2400pci_init_registers(struct rt2x00_dev *rt2x00dev) (rt2x00dev->rx->data_size / 128)); rt2x00pci_register_write(rt2x00dev, CSR9, reg); + rt2x00pci_register_read(rt2x00dev, CSR14, ®); + rt2x00_set_field32(®, CSR14_TSF_COUNT, 0); + rt2x00_set_field32(®, CSR14_TSF_SYNC, 0); + rt2x00_set_field32(®, CSR14_TBCN, 0); + rt2x00_set_field32(®, CSR14_TCFP, 0); + rt2x00_set_field32(®, CSR14_TATIMW, 0); + rt2x00_set_field32(®, CSR14_BEACON_GEN, 0); + rt2x00_set_field32(®, CSR14_CFP_COUNT_PRELOAD, 0); + rt2x00_set_field32(®, CSR14_TBCM_PRELOAD, 0); + rt2x00pci_register_write(rt2x00dev, CSR14, reg); + rt2x00pci_register_write(rt2x00dev, CNT3, 0x3f080000); rt2x00pci_register_read(rt2x00dev, ARCSR0, ®); @@ -1308,7 +1319,7 @@ static int rt2400pci_init_eeprom(struct rt2x00_dev *rt2x00dev) if (value == LED_MODE_TXRX_ACTIVITY) { rt2x00dev->led_qual.rt2x00dev = rt2x00dev; - rt2x00dev->led_radio.type = LED_TYPE_ACTIVITY; + rt2x00dev->led_qual.type = LED_TYPE_ACTIVITY; rt2x00dev->led_qual.led_dev.brightness_set = rt2400pci_brightness_set; rt2x00dev->led_qual.led_dev.blink_set = diff --git a/drivers/net/wireless/rt2x00/rt2500pci.c b/drivers/net/wireless/rt2x00/rt2500pci.c index 5ade097ed45..f7731fb8255 100644 --- a/drivers/net/wireless/rt2x00/rt2500pci.c +++ b/drivers/net/wireless/rt2x00/rt2500pci.c @@ -370,7 +370,7 @@ static void rt2500pci_config_erp(struct rt2x00_dev *rt2x00dev, rt2x00pci_register_write(rt2x00dev, TXCSR1, reg); rt2x00pci_register_read(rt2x00dev, ARCSR2, ®); - rt2x00_set_field32(®, ARCSR2_SIGNAL, 0x00 | preamble_mask); + rt2x00_set_field32(®, ARCSR2_SIGNAL, 0x00); rt2x00_set_field32(®, ARCSR2_SERVICE, 0x04); rt2x00_set_field32(®, ARCSR2_LENGTH, get_duration(ACK_SIZE, 10)); rt2x00pci_register_write(rt2x00dev, ARCSR2, reg); @@ -824,6 +824,17 @@ static int rt2500pci_init_registers(struct rt2x00_dev *rt2x00dev) rt2x00_set_field32(®, CSR11_CW_SELECT, 0); rt2x00pci_register_write(rt2x00dev, CSR11, reg); + rt2x00pci_register_read(rt2x00dev, CSR14, ®); + rt2x00_set_field32(®, CSR14_TSF_COUNT, 0); + rt2x00_set_field32(®, CSR14_TSF_SYNC, 0); + rt2x00_set_field32(®, CSR14_TBCN, 0); + rt2x00_set_field32(®, CSR14_TCFP, 0); + rt2x00_set_field32(®, CSR14_TATIMW, 0); + rt2x00_set_field32(®, CSR14_BEACON_GEN, 0); + rt2x00_set_field32(®, CSR14_CFP_COUNT_PRELOAD, 0); + rt2x00_set_field32(®, CSR14_TBCM_PRELOAD, 0); + rt2x00pci_register_write(rt2x00dev, CSR14, reg); + rt2x00pci_register_write(rt2x00dev, CNT3, 0); rt2x00pci_register_read(rt2x00dev, TXCSR8, ®); @@ -1485,7 +1496,7 @@ static int rt2500pci_init_eeprom(struct rt2x00_dev *rt2x00dev) if (value == LED_MODE_TXRX_ACTIVITY) { rt2x00dev->led_qual.rt2x00dev = rt2x00dev; - rt2x00dev->led_radio.type = LED_TYPE_ACTIVITY; + rt2x00dev->led_qual.type = LED_TYPE_ACTIVITY; rt2x00dev->led_qual.led_dev.brightness_set = rt2500pci_brightness_set; rt2x00dev->led_qual.led_dev.blink_set = diff --git a/drivers/net/wireless/rt2x00/rt2500usb.c b/drivers/net/wireless/rt2x00/rt2500usb.c index 6bb07b33932..d90512f97b3 100644 --- a/drivers/net/wireless/rt2x00/rt2500usb.c +++ b/drivers/net/wireless/rt2x00/rt2500usb.c @@ -138,11 +138,8 @@ static void rt2500usb_bbp_write(struct rt2x00_dev *rt2x00dev, * Wait until the BBP becomes ready. */ reg = rt2500usb_bbp_check(rt2x00dev); - if (rt2x00_get_field16(reg, PHY_CSR8_BUSY)) { - ERROR(rt2x00dev, "PHY_CSR8 register busy. Write failed.\n"); - mutex_unlock(&rt2x00dev->usb_cache_mutex); - return; - } + if (rt2x00_get_field16(reg, PHY_CSR8_BUSY)) + goto exit_fail; /* * Write the data into the BBP. @@ -155,6 +152,13 @@ static void rt2500usb_bbp_write(struct rt2x00_dev *rt2x00dev, rt2500usb_register_write_lock(rt2x00dev, PHY_CSR7, reg); mutex_unlock(&rt2x00dev->usb_cache_mutex); + + return; + +exit_fail: + mutex_unlock(&rt2x00dev->usb_cache_mutex); + + ERROR(rt2x00dev, "PHY_CSR8 register busy. Write failed.\n"); } static void rt2500usb_bbp_read(struct rt2x00_dev *rt2x00dev, @@ -168,10 +172,8 @@ static void rt2500usb_bbp_read(struct rt2x00_dev *rt2x00dev, * Wait until the BBP becomes ready. */ reg = rt2500usb_bbp_check(rt2x00dev); - if (rt2x00_get_field16(reg, PHY_CSR8_BUSY)) { - ERROR(rt2x00dev, "PHY_CSR8 register busy. Read failed.\n"); - return; - } + if (rt2x00_get_field16(reg, PHY_CSR8_BUSY)) + goto exit_fail; /* * Write the request into the BBP. @@ -186,17 +188,21 @@ static void rt2500usb_bbp_read(struct rt2x00_dev *rt2x00dev, * Wait until the BBP becomes ready. */ reg = rt2500usb_bbp_check(rt2x00dev); - if (rt2x00_get_field16(reg, PHY_CSR8_BUSY)) { - ERROR(rt2x00dev, "PHY_CSR8 register busy. Read failed.\n"); - *value = 0xff; - mutex_unlock(&rt2x00dev->usb_cache_mutex); - return; - } + if (rt2x00_get_field16(reg, PHY_CSR8_BUSY)) + goto exit_fail; rt2500usb_register_read_lock(rt2x00dev, PHY_CSR7, ®); *value = rt2x00_get_field16(reg, PHY_CSR7_DATA); mutex_unlock(&rt2x00dev->usb_cache_mutex); + + return; + +exit_fail: + mutex_unlock(&rt2x00dev->usb_cache_mutex); + + ERROR(rt2x00dev, "PHY_CSR8 register busy. Read failed.\n"); + *value = 0xff; } static void rt2500usb_rf_write(struct rt2x00_dev *rt2x00dev, @@ -795,6 +801,13 @@ static int rt2500usb_init_registers(struct rt2x00_dev *rt2x00dev) rt2x00_set_field16(®, TXRX_CSR8_BBP_ID1_VALID, 0); rt2500usb_register_write(rt2x00dev, TXRX_CSR8, reg); + rt2500usb_register_read(rt2x00dev, TXRX_CSR19, ®); + rt2x00_set_field16(®, TXRX_CSR19_TSF_COUNT, 0); + rt2x00_set_field16(®, TXRX_CSR19_TSF_SYNC, 0); + rt2x00_set_field16(®, TXRX_CSR19_TBCN, 0); + rt2x00_set_field16(®, TXRX_CSR19_BEACON_GEN, 0); + rt2500usb_register_write(rt2x00dev, TXRX_CSR19, reg); + rt2500usb_register_write(rt2x00dev, TXRX_CSR21, 0xe78f); rt2500usb_register_write(rt2x00dev, MAC_CSR9, 0xff1d); @@ -1394,7 +1407,7 @@ static int rt2500usb_init_eeprom(struct rt2x00_dev *rt2x00dev) if (value == LED_MODE_TXRX_ACTIVITY) { rt2x00dev->led_qual.rt2x00dev = rt2x00dev; - rt2x00dev->led_radio.type = LED_TYPE_ACTIVITY; + rt2x00dev->led_qual.type = LED_TYPE_ACTIVITY; rt2x00dev->led_qual.led_dev.brightness_set = rt2500usb_brightness_set; rt2x00dev->led_qual.led_dev.blink_set = diff --git a/drivers/net/wireless/rt2x00/rt2x00.h b/drivers/net/wireless/rt2x00/rt2x00.h index 57bdc153952..b4bf1e09cf9 100644 --- a/drivers/net/wireless/rt2x00/rt2x00.h +++ b/drivers/net/wireless/rt2x00/rt2x00.h @@ -328,6 +328,11 @@ static inline int rt2x00_get_link_ant_rssi(struct link *link) return DEFAULT_RSSI; } +static inline void rt2x00_reset_link_ant_rssi(struct link *link) +{ + link->ant.rssi_ant = 0; +} + static inline int rt2x00_get_link_ant_rssi_history(struct link *link, enum antenna ant) { @@ -816,6 +821,7 @@ struct rt2x00_dev { /* * Scheduled work. */ + struct workqueue_struct *workqueue; struct work_struct intf_work; struct work_struct filter_work; diff --git a/drivers/net/wireless/rt2x00/rt2x00config.c b/drivers/net/wireless/rt2x00/rt2x00config.c index a9930a03f45..48608e8cc8b 100644 --- a/drivers/net/wireless/rt2x00/rt2x00config.c +++ b/drivers/net/wireless/rt2x00/rt2x00config.c @@ -129,6 +129,7 @@ void rt2x00lib_config_antenna(struct rt2x00_dev *rt2x00dev, */ rt2x00dev->ops->lib->config(rt2x00dev, &libconf, CONFIG_UPDATE_ANTENNA); rt2x00lib_reset_link_tuner(rt2x00dev); + rt2x00_reset_link_ant_rssi(&rt2x00dev->link); rt2x00dev->link.ant.active.rx = libconf.ant.rx; rt2x00dev->link.ant.active.tx = libconf.ant.tx; diff --git a/drivers/net/wireless/rt2x00/rt2x00dev.c b/drivers/net/wireless/rt2x00/rt2x00dev.c index f8fe7a139a8..c997d4f28ab 100644 --- a/drivers/net/wireless/rt2x00/rt2x00dev.c +++ b/drivers/net/wireless/rt2x00/rt2x00dev.c @@ -75,7 +75,7 @@ static void rt2x00lib_start_link_tuner(struct rt2x00_dev *rt2x00dev) rt2x00lib_reset_link_tuner(rt2x00dev); - queue_delayed_work(rt2x00dev->hw->workqueue, + queue_delayed_work(rt2x00dev->workqueue, &rt2x00dev->link.work, LINK_TUNE_INTERVAL); } @@ -114,6 +114,7 @@ int rt2x00lib_enable_radio(struct rt2x00_dev *rt2x00dev) return status; rt2x00leds_led_radio(rt2x00dev, true); + rt2x00led_led_activity(rt2x00dev, true); __set_bit(DEVICE_ENABLED_RADIO, &rt2x00dev->flags); @@ -136,14 +137,6 @@ void rt2x00lib_disable_radio(struct rt2x00_dev *rt2x00dev) return; /* - * Stop all scheduled work. - */ - if (work_pending(&rt2x00dev->intf_work)) - cancel_work_sync(&rt2x00dev->intf_work); - if (work_pending(&rt2x00dev->filter_work)) - cancel_work_sync(&rt2x00dev->filter_work); - - /* * Stop the TX queues. */ ieee80211_stop_queues(rt2x00dev->hw); @@ -157,6 +150,7 @@ void rt2x00lib_disable_radio(struct rt2x00_dev *rt2x00dev) * Disable radio. */ rt2x00dev->ops->lib->set_device_state(rt2x00dev, STATE_RADIO_OFF); + rt2x00led_led_activity(rt2x00dev, false); rt2x00leds_led_radio(rt2x00dev, false); } @@ -396,8 +390,8 @@ static void rt2x00lib_link_tuner(struct work_struct *work) * Increase tuner counter, and reschedule the next link tuner run. */ rt2x00dev->link.count++; - queue_delayed_work(rt2x00dev->hw->workqueue, &rt2x00dev->link.work, - LINK_TUNE_INTERVAL); + queue_delayed_work(rt2x00dev->workqueue, + &rt2x00dev->link.work, LINK_TUNE_INTERVAL); } static void rt2x00lib_packetfilter_scheduled(struct work_struct *work) @@ -431,6 +425,15 @@ static void rt2x00lib_intf_scheduled_iter(void *data, u8 *mac, spin_unlock(&intf->lock); + /* + * It is possible the radio was disabled while the work had been + * scheduled. If that happens we should return here immediately, + * note that in the spinlock protected area above the delayed_flags + * have been cleared correctly. + */ + if (!test_bit(DEVICE_ENABLED_RADIO, &rt2x00dev->flags)) + return; + if (delayed_flags & DELAYED_UPDATE_BEACON) { skb = ieee80211_beacon_get(rt2x00dev->hw, vif, &control); if (skb && rt2x00dev->ops->hw->beacon_update(rt2x00dev->hw, @@ -439,7 +442,7 @@ static void rt2x00lib_intf_scheduled_iter(void *data, u8 *mac, } if (delayed_flags & DELAYED_CONFIG_ERP) - rt2x00lib_config_erp(rt2x00dev, intf, &intf->conf); + rt2x00lib_config_erp(rt2x00dev, intf, &conf); if (delayed_flags & DELAYED_LED_ASSOC) rt2x00leds_led_assoc(rt2x00dev, !!rt2x00dev->intf_associated); @@ -481,11 +484,11 @@ void rt2x00lib_beacondone(struct rt2x00_dev *rt2x00dev) if (!test_bit(DEVICE_ENABLED_RADIO, &rt2x00dev->flags)) return; - ieee80211_iterate_active_interfaces(rt2x00dev->hw, - rt2x00lib_beacondone_iter, - rt2x00dev); + ieee80211_iterate_active_interfaces_atomic(rt2x00dev->hw, + rt2x00lib_beacondone_iter, + rt2x00dev); - queue_work(rt2x00dev->hw->workqueue, &rt2x00dev->intf_work); + queue_work(rt2x00dev->workqueue, &rt2x00dev->intf_work); } EXPORT_SYMBOL_GPL(rt2x00lib_beacondone); @@ -505,7 +508,7 @@ void rt2x00lib_txdone(struct queue_entry *entry, * Update TX statistics. */ rt2x00dev->link.qual.tx_success += success; - rt2x00dev->link.qual.tx_failed += txdesc->retry + fail; + rt2x00dev->link.qual.tx_failed += fail; /* * Initialize TX status @@ -1030,8 +1033,10 @@ static int rt2x00lib_initialize(struct rt2x00_dev *rt2x00dev) * Initialize the device. */ status = rt2x00dev->ops->lib->initialize(rt2x00dev); - if (status) - goto exit; + if (status) { + rt2x00queue_uninitialize(rt2x00dev); + return status; + } __set_bit(DEVICE_INITIALIZED, &rt2x00dev->flags); @@ -1041,11 +1046,6 @@ static int rt2x00lib_initialize(struct rt2x00_dev *rt2x00dev) rt2x00rfkill_register(rt2x00dev); return 0; - -exit: - rt2x00lib_uninitialize(rt2x00dev); - - return status; } int rt2x00lib_start(struct rt2x00_dev *rt2x00dev) @@ -1131,6 +1131,10 @@ int rt2x00lib_probe_dev(struct rt2x00_dev *rt2x00dev) /* * Initialize configuration work. */ + rt2x00dev->workqueue = create_singlethread_workqueue("rt2x00lib"); + if (!rt2x00dev->workqueue) + goto exit; + INIT_WORK(&rt2x00dev->intf_work, rt2x00lib_intf_scheduled); INIT_WORK(&rt2x00dev->filter_work, rt2x00lib_packetfilter_scheduled); INIT_DELAYED_WORK(&rt2x00dev->link.work, rt2x00lib_link_tuner); @@ -1191,6 +1195,13 @@ void rt2x00lib_remove_dev(struct rt2x00_dev *rt2x00dev) rt2x00leds_unregister(rt2x00dev); /* + * Stop all queued work. Note that most tasks will already be halted + * during rt2x00lib_disable_radio() and rt2x00lib_uninitialize(). + */ + flush_workqueue(rt2x00dev->workqueue); + destroy_workqueue(rt2x00dev->workqueue); + + /* * Free ieee80211_hw memory. */ rt2x00lib_remove_hw(rt2x00dev); diff --git a/drivers/net/wireless/rt2x00/rt2x00leds.c b/drivers/net/wireless/rt2x00/rt2x00leds.c index 40c1f5c1b80..b362a1cf3f8 100644 --- a/drivers/net/wireless/rt2x00/rt2x00leds.c +++ b/drivers/net/wireless/rt2x00/rt2x00leds.c @@ -72,6 +72,21 @@ void rt2x00leds_led_quality(struct rt2x00_dev *rt2x00dev, int rssi) } } +void rt2x00led_led_activity(struct rt2x00_dev *rt2x00dev, bool enabled) +{ + struct rt2x00_led *led = &rt2x00dev->led_qual; + unsigned int brightness; + + if ((led->type != LED_TYPE_ACTIVITY) || !(led->flags & LED_REGISTERED)) + return; + + brightness = enabled ? LED_FULL : LED_OFF; + if (brightness != led->led_dev.brightness) { + led->led_dev.brightness_set(&led->led_dev, brightness); + led->led_dev.brightness = brightness; + } +} + void rt2x00leds_led_assoc(struct rt2x00_dev *rt2x00dev, bool enabled) { struct rt2x00_led *led = &rt2x00dev->led_assoc; diff --git a/drivers/net/wireless/rt2x00/rt2x00lib.h b/drivers/net/wireless/rt2x00/rt2x00lib.h index 5be32fffc74..41ee02cd282 100644 --- a/drivers/net/wireless/rt2x00/rt2x00lib.h +++ b/drivers/net/wireless/rt2x00/rt2x00lib.h @@ -185,6 +185,7 @@ static inline void rt2x00rfkill_resume(struct rt2x00_dev *rt2x00dev) */ #ifdef CONFIG_RT2X00_LIB_LEDS void rt2x00leds_led_quality(struct rt2x00_dev *rt2x00dev, int rssi); +void rt2x00led_led_activity(struct rt2x00_dev *rt2x00dev, bool enabled); void rt2x00leds_led_assoc(struct rt2x00_dev *rt2x00dev, bool enabled); void rt2x00leds_led_radio(struct rt2x00_dev *rt2x00dev, bool enabled); void rt2x00leds_register(struct rt2x00_dev *rt2x00dev); @@ -197,6 +198,11 @@ static inline void rt2x00leds_led_quality(struct rt2x00_dev *rt2x00dev, { } +static inline void rt2x00led_led_activity(struct rt2x00_dev *rt2x00dev, + bool enabled) +{ +} + static inline void rt2x00leds_led_assoc(struct rt2x00_dev *rt2x00dev, bool enabled) { diff --git a/drivers/net/wireless/rt2x00/rt2x00mac.c b/drivers/net/wireless/rt2x00/rt2x00mac.c index c206b509207..9cb023edd2e 100644 --- a/drivers/net/wireless/rt2x00/rt2x00mac.c +++ b/drivers/net/wireless/rt2x00/rt2x00mac.c @@ -93,6 +93,7 @@ int rt2x00mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb, */ if (!test_bit(DEVICE_PRESENT, &rt2x00dev->flags)) { ieee80211_stop_queues(hw); + dev_kfree_skb_any(skb); return NETDEV_TX_OK; } @@ -427,7 +428,7 @@ void rt2x00mac_configure_filter(struct ieee80211_hw *hw, if (!test_bit(DRIVER_REQUIRE_SCHEDULED, &rt2x00dev->flags)) rt2x00dev->ops->lib->config_filter(rt2x00dev, *total_flags); else - queue_work(rt2x00dev->hw->workqueue, &rt2x00dev->filter_work); + queue_work(rt2x00dev->workqueue, &rt2x00dev->filter_work); } EXPORT_SYMBOL_GPL(rt2x00mac_configure_filter); @@ -508,7 +509,7 @@ void rt2x00mac_bss_info_changed(struct ieee80211_hw *hw, memcpy(&intf->conf, bss_conf, sizeof(*bss_conf)); if (delayed) { intf->delayed_flags |= delayed; - queue_work(rt2x00dev->hw->workqueue, &rt2x00dev->intf_work); + queue_work(rt2x00dev->workqueue, &rt2x00dev->intf_work); } spin_unlock(&intf->lock); } diff --git a/drivers/net/wireless/rt2x00/rt2x00pci.c b/drivers/net/wireless/rt2x00/rt2x00pci.c index 7867ec64bd2..60893de3bf8 100644 --- a/drivers/net/wireless/rt2x00/rt2x00pci.c +++ b/drivers/net/wireless/rt2x00/rt2x00pci.c @@ -314,13 +314,14 @@ int rt2x00pci_initialize(struct rt2x00_dev *rt2x00dev) if (status) { ERROR(rt2x00dev, "IRQ %d allocation failed (error %d).\n", pci_dev->irq, status); - return status; + goto exit; } return 0; exit: - rt2x00pci_uninitialize(rt2x00dev); + queue_for_each(rt2x00dev, queue) + rt2x00pci_free_queue_dma(rt2x00dev, queue); return status; } @@ -411,8 +412,7 @@ int rt2x00pci_probe(struct pci_dev *pci_dev, const struct pci_device_id *id) if (pci_set_mwi(pci_dev)) ERROR_PROBE("MWI not available.\n"); - if (pci_set_dma_mask(pci_dev, DMA_64BIT_MASK) && - pci_set_dma_mask(pci_dev, DMA_32BIT_MASK)) { + if (pci_set_dma_mask(pci_dev, DMA_32BIT_MASK)) { ERROR_PROBE("PCI DMA not supported.\n"); retval = -EIO; goto exit_disable_device; diff --git a/drivers/net/wireless/rt2x00/rt2x00usb.c b/drivers/net/wireless/rt2x00/rt2x00usb.c index 5a331674dcb..e5ceae805b5 100644 --- a/drivers/net/wireless/rt2x00/rt2x00usb.c +++ b/drivers/net/wireless/rt2x00/rt2x00usb.c @@ -362,6 +362,12 @@ void rt2x00usb_disable_radio(struct rt2x00_dev *rt2x00dev) } } + /* + * Kill guardian urb (if required by driver). + */ + if (!test_bit(DRIVER_REQUIRE_BEACON_GUARD, &rt2x00dev->flags)) + return; + for (i = 0; i < rt2x00dev->bcn->limit; i++) { priv_bcn = rt2x00dev->bcn->entries[i].priv_data; usb_kill_urb(priv_bcn->urb); diff --git a/drivers/net/wireless/rt2x00/rt61pci.c b/drivers/net/wireless/rt2x00/rt61pci.c index 468a31c8c11..c3afb5cbe80 100644 --- a/drivers/net/wireless/rt2x00/rt61pci.c +++ b/drivers/net/wireless/rt2x00/rt61pci.c @@ -1201,6 +1201,15 @@ static int rt61pci_init_registers(struct rt2x00_dev *rt2x00dev) rt2x00_set_field32(®, TXRX_CSR8_ACK_CTS_54MBS, 42); rt2x00pci_register_write(rt2x00dev, TXRX_CSR8, reg); + rt2x00pci_register_read(rt2x00dev, TXRX_CSR9, ®); + rt2x00_set_field32(®, TXRX_CSR9_BEACON_INTERVAL, 0); + rt2x00_set_field32(®, TXRX_CSR9_TSF_TICKING, 0); + rt2x00_set_field32(®, TXRX_CSR9_TSF_SYNC, 0); + rt2x00_set_field32(®, TXRX_CSR9_TBTT_ENABLE, 0); + rt2x00_set_field32(®, TXRX_CSR9_BEACON_GEN, 0); + rt2x00_set_field32(®, TXRX_CSR9_TIMESTAMP_COMPENSATE, 0); + rt2x00pci_register_write(rt2x00dev, TXRX_CSR9, reg); + rt2x00pci_register_write(rt2x00dev, TXRX_CSR15, 0x0000000f); rt2x00pci_register_write(rt2x00dev, MAC_CSR6, 0x00000fff); @@ -2087,7 +2096,7 @@ static int rt61pci_init_eeprom(struct rt2x00_dev *rt2x00dev) if (value == LED_MODE_SIGNAL_STRENGTH) { rt2x00dev->led_qual.rt2x00dev = rt2x00dev; - rt2x00dev->led_radio.type = LED_TYPE_QUALITY; + rt2x00dev->led_qual.type = LED_TYPE_QUALITY; rt2x00dev->led_qual.led_dev.brightness_set = rt61pci_brightness_set; rt2x00dev->led_qual.led_dev.blink_set = @@ -2366,6 +2375,7 @@ static int rt61pci_beacon_update(struct ieee80211_hw *hw, struct sk_buff *skb, { struct rt2x00_dev *rt2x00dev = hw->priv; struct rt2x00_intf *intf = vif_to_intf(control->vif); + struct queue_entry_priv_pci_tx *priv_tx; struct skb_frame_desc *skbdesc; unsigned int beacon_base; u32 reg; @@ -2373,21 +2383,8 @@ static int rt61pci_beacon_update(struct ieee80211_hw *hw, struct sk_buff *skb, if (unlikely(!intf->beacon)) return -ENOBUFS; - /* - * We need to append the descriptor in front of the - * beacon frame. - */ - if (skb_headroom(skb) < intf->beacon->queue->desc_size) { - if (pskb_expand_head(skb, intf->beacon->queue->desc_size, - 0, GFP_ATOMIC)) - return -ENOMEM; - } - - /* - * Add the descriptor in front of the skb. - */ - skb_push(skb, intf->beacon->queue->desc_size); - memset(skb->data, 0, intf->beacon->queue->desc_size); + priv_tx = intf->beacon->priv_data; + memset(priv_tx->desc, 0, intf->beacon->queue->desc_size); /* * Fill in skb descriptor @@ -2395,9 +2392,9 @@ static int rt61pci_beacon_update(struct ieee80211_hw *hw, struct sk_buff *skb, skbdesc = get_skb_frame_desc(skb); memset(skbdesc, 0, sizeof(*skbdesc)); skbdesc->flags |= FRAME_DESC_DRIVER_GENERATED; - skbdesc->data = skb->data + intf->beacon->queue->desc_size; - skbdesc->data_len = skb->len - intf->beacon->queue->desc_size; - skbdesc->desc = skb->data; + skbdesc->data = skb->data; + skbdesc->data_len = skb->len; + skbdesc->desc = priv_tx->desc; skbdesc->desc_len = intf->beacon->queue->desc_size; skbdesc->entry = intf->beacon; @@ -2425,7 +2422,10 @@ static int rt61pci_beacon_update(struct ieee80211_hw *hw, struct sk_buff *skb, */ beacon_base = HW_BEACON_OFFSET(intf->beacon->entry_idx); rt2x00pci_register_multiwrite(rt2x00dev, beacon_base, - skb->data, skb->len); + skbdesc->desc, skbdesc->desc_len); + rt2x00pci_register_multiwrite(rt2x00dev, + beacon_base + skbdesc->desc_len, + skbdesc->data, skbdesc->data_len); rt61pci_kick_tx_queue(rt2x00dev, control->queue); return 0; @@ -2490,7 +2490,7 @@ static const struct data_queue_desc rt61pci_queue_tx = { static const struct data_queue_desc rt61pci_queue_bcn = { .entry_num = 4 * BEACON_ENTRIES, - .data_size = MGMT_FRAME_SIZE, + .data_size = 0, /* No DMA required for beacons */ .desc_size = TXINFO_SIZE, .priv_size = sizeof(struct queue_entry_priv_pci_tx), }; diff --git a/drivers/net/wireless/rt2x00/rt73usb.c b/drivers/net/wireless/rt2x00/rt73usb.c index a9efe25f1ea..46e9e081fbf 100644 --- a/drivers/net/wireless/rt2x00/rt73usb.c +++ b/drivers/net/wireless/rt2x00/rt73usb.c @@ -134,11 +134,8 @@ static void rt73usb_bbp_write(struct rt2x00_dev *rt2x00dev, * Wait until the BBP becomes ready. */ reg = rt73usb_bbp_check(rt2x00dev); - if (rt2x00_get_field32(reg, PHY_CSR3_BUSY)) { - ERROR(rt2x00dev, "PHY_CSR3 register busy. Write failed.\n"); - mutex_unlock(&rt2x00dev->usb_cache_mutex); - return; - } + if (rt2x00_get_field32(reg, PHY_CSR3_BUSY)) + goto exit_fail; /* * Write the data into the BBP. @@ -151,6 +148,13 @@ static void rt73usb_bbp_write(struct rt2x00_dev *rt2x00dev, rt73usb_register_write_lock(rt2x00dev, PHY_CSR3, reg); mutex_unlock(&rt2x00dev->usb_cache_mutex); + + return; + +exit_fail: + mutex_unlock(&rt2x00dev->usb_cache_mutex); + + ERROR(rt2x00dev, "PHY_CSR3 register busy. Write failed.\n"); } static void rt73usb_bbp_read(struct rt2x00_dev *rt2x00dev, @@ -164,11 +168,8 @@ static void rt73usb_bbp_read(struct rt2x00_dev *rt2x00dev, * Wait until the BBP becomes ready. */ reg = rt73usb_bbp_check(rt2x00dev); - if (rt2x00_get_field32(reg, PHY_CSR3_BUSY)) { - ERROR(rt2x00dev, "PHY_CSR3 register busy. Read failed.\n"); - mutex_unlock(&rt2x00dev->usb_cache_mutex); - return; - } + if (rt2x00_get_field32(reg, PHY_CSR3_BUSY)) + goto exit_fail; /* * Write the request into the BBP. @@ -184,14 +185,19 @@ static void rt73usb_bbp_read(struct rt2x00_dev *rt2x00dev, * Wait until the BBP becomes ready. */ reg = rt73usb_bbp_check(rt2x00dev); - if (rt2x00_get_field32(reg, PHY_CSR3_BUSY)) { - ERROR(rt2x00dev, "PHY_CSR3 register busy. Read failed.\n"); - *value = 0xff; - return; - } + if (rt2x00_get_field32(reg, PHY_CSR3_BUSY)) + goto exit_fail; *value = rt2x00_get_field32(reg, PHY_CSR3_VALUE); mutex_unlock(&rt2x00dev->usb_cache_mutex); + + return; + +exit_fail: + mutex_unlock(&rt2x00dev->usb_cache_mutex); + + ERROR(rt2x00dev, "PHY_CSR3 register busy. Read failed.\n"); + *value = 0xff; } static void rt73usb_rf_write(struct rt2x00_dev *rt2x00dev, @@ -1000,6 +1006,15 @@ static int rt73usb_init_registers(struct rt2x00_dev *rt2x00dev) rt2x00_set_field32(®, TXRX_CSR8_ACK_CTS_54MBS, 42); rt73usb_register_write(rt2x00dev, TXRX_CSR8, reg); + rt73usb_register_read(rt2x00dev, TXRX_CSR9, ®); + rt2x00_set_field32(®, TXRX_CSR9_BEACON_INTERVAL, 0); + rt2x00_set_field32(®, TXRX_CSR9_TSF_TICKING, 0); + rt2x00_set_field32(®, TXRX_CSR9_TSF_SYNC, 0); + rt2x00_set_field32(®, TXRX_CSR9_TBTT_ENABLE, 0); + rt2x00_set_field32(®, TXRX_CSR9_BEACON_GEN, 0); + rt2x00_set_field32(®, TXRX_CSR9_TIMESTAMP_COMPENSATE, 0); + rt73usb_register_write(rt2x00dev, TXRX_CSR9, reg); + rt73usb_register_write(rt2x00dev, TXRX_CSR15, 0x0000000f); rt73usb_register_read(rt2x00dev, MAC_CSR6, ®); @@ -1647,7 +1662,7 @@ static int rt73usb_init_eeprom(struct rt2x00_dev *rt2x00dev) if (value == LED_MODE_SIGNAL_STRENGTH) { rt2x00dev->led_qual.rt2x00dev = rt2x00dev; - rt2x00dev->led_radio.type = LED_TYPE_QUALITY; + rt2x00dev->led_qual.type = LED_TYPE_QUALITY; rt2x00dev->led_qual.led_dev.brightness_set = rt73usb_brightness_set; rt2x00dev->led_qual.led_dev.blink_set = @@ -2131,6 +2146,7 @@ static struct usb_device_id rt73usb_device_table[] = { /* D-Link */ { USB_DEVICE(0x07d1, 0x3c03), USB_DEVICE_DATA(&rt73usb_ops) }, { USB_DEVICE(0x07d1, 0x3c04), USB_DEVICE_DATA(&rt73usb_ops) }, + { USB_DEVICE(0x07d1, 0x3c06), USB_DEVICE_DATA(&rt73usb_ops) }, { USB_DEVICE(0x07d1, 0x3c07), USB_DEVICE_DATA(&rt73usb_ops) }, /* Gemtek */ { USB_DEVICE(0x15a9, 0x0004), USB_DEVICE_DATA(&rt73usb_ops) }, diff --git a/drivers/net/wireless/rtl8180_grf5101.c b/drivers/net/wireless/rtl8180_grf5101.c index 5d47935dbac..947ee55f18b 100644 --- a/drivers/net/wireless/rtl8180_grf5101.c +++ b/drivers/net/wireless/rtl8180_grf5101.c @@ -88,7 +88,7 @@ static void grf5101_rf_set_channel(struct ieee80211_hw *dev, write_grf5101(dev, 0x0B, chan); write_grf5101(dev, 0x07, 0x1000); - grf5101_write_phy_antenna(dev, chan); + grf5101_write_phy_antenna(dev, channel); } static void grf5101_rf_stop(struct ieee80211_hw *dev) diff --git a/drivers/net/wireless/rtl8180_max2820.c b/drivers/net/wireless/rtl8180_max2820.c index a34dfd382b6..6c825fd7f3b 100644 --- a/drivers/net/wireless/rtl8180_max2820.c +++ b/drivers/net/wireless/rtl8180_max2820.c @@ -78,7 +78,8 @@ static void max2820_rf_set_channel(struct ieee80211_hw *dev, struct ieee80211_conf *conf) { struct rtl8180_priv *priv = dev->priv; - int channel = ieee80211_frequency_to_channel(conf->channel->center_freq); + int channel = conf ? + ieee80211_frequency_to_channel(conf->channel->center_freq) : 1; unsigned int chan_idx = channel - 1; u32 txpw = priv->channels[chan_idx].hw_value & 0xFF; u32 chan = max2820_chan[chan_idx]; @@ -87,7 +88,7 @@ static void max2820_rf_set_channel(struct ieee80211_hw *dev, * sa2400, for MAXIM we do this directly from BB */ rtl8180_write_phy(dev, 3, txpw); - max2820_write_phy_antenna(dev, chan); + max2820_write_phy_antenna(dev, channel); write_max2820(dev, 3, chan); } diff --git a/drivers/net/wireless/rtl8180_sa2400.c b/drivers/net/wireless/rtl8180_sa2400.c index 0311b4ea124..cea4e0ccb92 100644 --- a/drivers/net/wireless/rtl8180_sa2400.c +++ b/drivers/net/wireless/rtl8180_sa2400.c @@ -86,7 +86,7 @@ static void sa2400_rf_set_channel(struct ieee80211_hw *dev, write_sa2400(dev, 7, txpw); - sa2400_write_phy_antenna(dev, chan); + sa2400_write_phy_antenna(dev, channel); write_sa2400(dev, 0, chan); write_sa2400(dev, 1, 0xbb50); diff --git a/drivers/net/wireless/rtl8187_dev.c b/drivers/net/wireless/rtl8187_dev.c index d5787b37e1f..9223ada5f00 100644 --- a/drivers/net/wireless/rtl8187_dev.c +++ b/drivers/net/wireless/rtl8187_dev.c @@ -92,6 +92,7 @@ static void rtl8187_iowrite_async(struct rtl8187_priv *priv, __le16 addr, u8 data[4]; struct usb_ctrlrequest dr; } *buf; + int rc; buf = kmalloc(sizeof(*buf), GFP_ATOMIC); if (!buf) @@ -116,7 +117,11 @@ static void rtl8187_iowrite_async(struct rtl8187_priv *priv, __le16 addr, usb_fill_control_urb(urb, priv->udev, usb_sndctrlpipe(priv->udev, 0), (unsigned char *)dr, buf, len, rtl8187_iowrite_async_cb, buf); - usb_submit_urb(urb, GFP_ATOMIC); + rc = usb_submit_urb(urb, GFP_ATOMIC); + if (rc < 0) { + kfree(buf); + usb_free_urb(urb); + } } static inline void rtl818x_iowrite32_async(struct rtl8187_priv *priv, @@ -169,6 +174,7 @@ static int rtl8187_tx(struct ieee80211_hw *dev, struct sk_buff *skb, struct urb *urb; __le16 rts_dur = 0; u32 flags; + int rc; urb = usb_alloc_urb(0, GFP_ATOMIC); if (!urb) { @@ -208,7 +214,11 @@ static int rtl8187_tx(struct ieee80211_hw *dev, struct sk_buff *skb, info->dev = dev; usb_fill_bulk_urb(urb, priv->udev, usb_sndbulkpipe(priv->udev, 2), hdr, skb->len, rtl8187_tx_cb, skb); - usb_submit_urb(urb, GFP_ATOMIC); + rc = usb_submit_urb(urb, GFP_ATOMIC); + if (rc < 0) { + usb_free_urb(urb); + kfree_skb(skb); + } return 0; } diff --git a/drivers/net/wireless/strip.c b/drivers/net/wireless/strip.c index 5dd23c93497..883af891ebf 100644 --- a/drivers/net/wireless/strip.c +++ b/drivers/net/wireless/strip.c @@ -2611,7 +2611,7 @@ static int strip_open(struct tty_struct *tty) * We need a write method. */ - if (tty->ops->write == NULL) + if (tty->ops->write == NULL || tty->ops->set_termios == NULL) return -EOPNOTSUPP; /* diff --git a/drivers/net/wireless/wavelan.c b/drivers/net/wireless/wavelan.c index 03384a43186..49ae9700395 100644 --- a/drivers/net/wireless/wavelan.c +++ b/drivers/net/wireless/wavelan.c @@ -908,9 +908,9 @@ static void wv_psa_show(psa_t * p) p->psa_call_code[3], p->psa_call_code[4], p->psa_call_code[5], p->psa_call_code[6], p->psa_call_code[7]); #ifdef DEBUG_SHOW_UNUSED - printk(KERN_DEBUG "psa_reserved[]: %02X:%02X:%02X:%02X\n", + printk(KERN_DEBUG "psa_reserved[]: %02X:%02X\n", p->psa_reserved[0], - p->psa_reserved[1], p->psa_reserved[2], p->psa_reserved[3]); + p->psa_reserved[1]); #endif /* DEBUG_SHOW_UNUSED */ printk(KERN_DEBUG "psa_conf_status: %d, ", p->psa_conf_status); printk("psa_crc: 0x%02x%02x, ", p->psa_crc[0], p->psa_crc[1]); diff --git a/drivers/net/wireless/wavelan_cs.c b/drivers/net/wireless/wavelan_cs.c index baf74015751..b584c0ecc62 100644 --- a/drivers/net/wireless/wavelan_cs.c +++ b/drivers/net/wireless/wavelan_cs.c @@ -1074,11 +1074,9 @@ wv_psa_show(psa_t * p) p->psa_call_code[6], p->psa_call_code[7]); #ifdef DEBUG_SHOW_UNUSED - printk(KERN_DEBUG "psa_reserved[]: %02X:%02X:%02X:%02X\n", + printk(KERN_DEBUG "psa_reserved[]: %02X:%02X\n", p->psa_reserved[0], - p->psa_reserved[1], - p->psa_reserved[2], - p->psa_reserved[3]); + p->psa_reserved[1]); #endif /* DEBUG_SHOW_UNUSED */ printk(KERN_DEBUG "psa_conf_status: %d, ", p->psa_conf_status); printk("psa_crc: 0x%02x%02x, ", p->psa_crc[0], p->psa_crc[1]); diff --git a/drivers/net/wireless/zd1211rw/zd_mac.c b/drivers/net/wireless/zd1211rw/zd_mac.c index 69c45ca9905..694e95d35fd 100644 --- a/drivers/net/wireless/zd1211rw/zd_mac.c +++ b/drivers/net/wireless/zd1211rw/zd_mac.c @@ -719,7 +719,7 @@ int zd_mac_rx(struct ieee80211_hw *hw, const u8 *buffer, unsigned int length) fc = le16_to_cpu(*((__le16 *) buffer)); is_qos = ((fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_DATA) && - ((fc & IEEE80211_FCTL_STYPE) == IEEE80211_STYPE_QOS_DATA); + (fc & IEEE80211_STYPE_QOS_DATA); is_4addr = (fc & (IEEE80211_FCTL_TODS | IEEE80211_FCTL_FROMDS)) == (IEEE80211_FCTL_TODS | IEEE80211_FCTL_FROMDS); need_padding = is_qos ^ is_4addr; @@ -765,6 +765,7 @@ static void zd_op_remove_interface(struct ieee80211_hw *hw, { struct zd_mac *mac = zd_hw_mac(hw); mac->type = IEEE80211_IF_TYPE_INVALID; + zd_set_beacon_interval(&mac->chip, 0); zd_write_mac_addr(&mac->chip, NULL); } @@ -805,7 +806,7 @@ void zd_process_intr(struct work_struct *work) u16 int_status; struct zd_mac *mac = container_of(work, struct zd_mac, process_intr); - int_status = le16_to_cpu(*(u16 *)(mac->intr_buffer+4)); + int_status = le16_to_cpu(*(__le16 *)(mac->intr_buffer+4)); if (int_status & INT_CFG_NEXT_BCN) { if (net_ratelimit()) dev_dbg_f(zd_mac_dev(mac), "INT_CFG_NEXT_BCN\n"); diff --git a/drivers/net/wireless/zd1211rw/zd_usb.c b/drivers/net/wireless/zd1211rw/zd_usb.c index 5316074f39f..6cdad976460 100644 --- a/drivers/net/wireless/zd1211rw/zd_usb.c +++ b/drivers/net/wireless/zd1211rw/zd_usb.c @@ -64,6 +64,7 @@ static struct usb_device_id usb_ids[] = { { USB_DEVICE(0x079b, 0x0062), .driver_info = DEVICE_ZD1211B }, { USB_DEVICE(0x1582, 0x6003), .driver_info = DEVICE_ZD1211B }, { USB_DEVICE(0x050d, 0x705c), .driver_info = DEVICE_ZD1211B }, + { USB_DEVICE(0x083a, 0xe506), .driver_info = DEVICE_ZD1211B }, { USB_DEVICE(0x083a, 0x4505), .driver_info = DEVICE_ZD1211B }, { USB_DEVICE(0x0471, 0x1236), .driver_info = DEVICE_ZD1211B }, { USB_DEVICE(0x13b1, 0x0024), .driver_info = DEVICE_ZD1211B }, @@ -342,7 +343,7 @@ static inline void handle_regs_int(struct urb *urb) ZD_ASSERT(in_interrupt()); spin_lock(&intr->lock); - int_num = le16_to_cpu(*(u16 *)(urb->transfer_buffer+2)); + int_num = le16_to_cpu(*(__le16 *)(urb->transfer_buffer+2)); if (int_num == CR_INTERRUPT) { struct zd_mac *mac = zd_hw_mac(zd_usb_to_hw(urb->context)); memcpy(&mac->intr_buffer, urb->transfer_buffer, @@ -889,9 +890,13 @@ static void tx_urb_complete(struct urb *urb) } free_urb: skb = (struct sk_buff *)urb->context; - zd_mac_tx_to_dev(skb, urb->status); + /* + * grab 'usb' pointer before handing off the skb (since + * it might be freed by zd_mac_tx_to_dev or mac80211) + */ cb = (struct zd_tx_skb_control_block *)skb->cb; usb = &zd_hw_mac(cb->hw)->chip.usb; + zd_mac_tx_to_dev(skb, urb->status); free_tx_urb(usb, urb); tx_dec_submitted_urbs(usb); return; diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c index e62018a3613..d26f69b0184 100644 --- a/drivers/net/xen-netfront.c +++ b/drivers/net/xen-netfront.c @@ -946,8 +946,7 @@ err: work_done++; } - while ((skb = __skb_dequeue(&errq))) - kfree_skb(skb); + __skb_queue_purge(&errq); work_done -= handle_incoming_queue(dev, &rxq); @@ -1079,8 +1078,7 @@ static void xennet_release_rx_bufs(struct netfront_info *np) } } - while ((skb = __skb_dequeue(&free_list)) != NULL) - dev_kfree_skb(skb); + __skb_queue_purge(&free_list); spin_unlock_bh(&np->rx_lock); } @@ -1803,7 +1801,7 @@ static void __exit netif_exit(void) if (is_initial_xendomain()) return; - return xenbus_unregister_driver(&netfront); + xenbus_unregister_driver(&netfront); } module_exit(netif_exit); |