aboutsummaryrefslogtreecommitdiff
path: root/drivers/net/igb
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/igb')
-rw-r--r--drivers/net/igb/e1000_defines.h7
-rw-r--r--drivers/net/igb/e1000_mac.c25
-rw-r--r--drivers/net/igb/e1000_regs.h4
-rw-r--r--drivers/net/igb/igb.h44
-rw-r--r--drivers/net/igb/igb_ethtool.c131
-rw-r--r--drivers/net/igb/igb_main.c403
6 files changed, 336 insertions, 278 deletions
diff --git a/drivers/net/igb/e1000_defines.h b/drivers/net/igb/e1000_defines.h
index ce700689fb5..40d03426c12 100644
--- a/drivers/net/igb/e1000_defines.h
+++ b/drivers/net/igb/e1000_defines.h
@@ -168,18 +168,12 @@
#define E1000_RCTL_RDMTS_HALF 0x00000000 /* rx desc min threshold size */
#define E1000_RCTL_MO_SHIFT 12 /* multicast offset shift */
#define E1000_RCTL_BAM 0x00008000 /* broadcast enable */
-/* these buffer sizes are valid if E1000_RCTL_BSEX is 0 */
#define E1000_RCTL_SZ_2048 0x00000000 /* rx buffer size 2048 */
#define E1000_RCTL_SZ_1024 0x00010000 /* rx buffer size 1024 */
#define E1000_RCTL_SZ_512 0x00020000 /* rx buffer size 512 */
#define E1000_RCTL_SZ_256 0x00030000 /* rx buffer size 256 */
-/* these buffer sizes are valid if E1000_RCTL_BSEX is 1 */
-#define E1000_RCTL_SZ_16384 0x00010000 /* rx buffer size 16384 */
-#define E1000_RCTL_SZ_8192 0x00020000 /* rx buffer size 8192 */
-#define E1000_RCTL_SZ_4096 0x00030000 /* rx buffer size 4096 */
#define E1000_RCTL_VFE 0x00040000 /* vlan filter enable */
#define E1000_RCTL_CFIEN 0x00080000 /* canonical form enable */
-#define E1000_RCTL_BSEX 0x02000000 /* Buffer size extension */
#define E1000_RCTL_SECRC 0x04000000 /* Strip Ethernet CRC */
/*
@@ -329,6 +323,7 @@
#define E1000_TXD_CMD_IFCS 0x02000000 /* Insert FCS (Ethernet CRC) */
#define E1000_TXD_CMD_RS 0x08000000 /* Report Status */
#define E1000_TXD_CMD_DEXT 0x20000000 /* Descriptor extension (0 = legacy) */
+#define E1000_TXD_STAT_DD 0x00000001 /* Descriptor Done */
/* Extended desc bits for Linksec and timesync */
/* Transmit Control */
diff --git a/drivers/net/igb/e1000_mac.c b/drivers/net/igb/e1000_mac.c
index e18747c70be..97f0049a5d6 100644
--- a/drivers/net/igb/e1000_mac.c
+++ b/drivers/net/igb/e1000_mac.c
@@ -50,13 +50,6 @@ void igb_remove_device(struct e1000_hw *hw)
kfree(hw->dev_spec);
}
-static void igb_read_pci_cfg(struct e1000_hw *hw, u32 reg, u16 *value)
-{
- struct igb_adapter *adapter = hw->back;
-
- pci_read_config_word(adapter->pdev, reg, value);
-}
-
static s32 igb_read_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value)
{
struct igb_adapter *adapter = hw->back;
@@ -83,8 +76,8 @@ s32 igb_get_bus_info_pcie(struct e1000_hw *hw)
{
struct e1000_bus_info *bus = &hw->bus;
s32 ret_val;
- u32 status;
- u16 pcie_link_status, pci_header_type;
+ u32 reg;
+ u16 pcie_link_status;
bus->type = e1000_bus_type_pci_express;
bus->speed = e1000_bus_speed_2500;
@@ -99,14 +92,8 @@ s32 igb_get_bus_info_pcie(struct e1000_hw *hw)
PCIE_LINK_WIDTH_MASK) >>
PCIE_LINK_WIDTH_SHIFT);
- igb_read_pci_cfg(hw, PCI_HEADER_TYPE_REGISTER, &pci_header_type);
- if (pci_header_type & PCI_HEADER_TYPE_MULTIFUNC) {
- status = rd32(E1000_STATUS);
- bus->func = (status & E1000_STATUS_FUNC_MASK)
- >> E1000_STATUS_FUNC_SHIFT;
- } else {
- bus->func = 0;
- }
+ reg = rd32(E1000_STATUS);
+ bus->func = (reg & E1000_STATUS_FUNC_MASK) >> E1000_STATUS_FUNC_SHIFT;
return 0;
}
@@ -229,8 +216,8 @@ void igb_rar_set(struct e1000_hw *hw, u8 *addr, u32 index)
if (!hw->mac.disable_av)
rar_high |= E1000_RAH_AV;
- array_wr32(E1000_RA, (index << 1), rar_low);
- array_wr32(E1000_RA, ((index << 1) + 1), rar_high);
+ wr32(E1000_RAL(index), rar_low);
+ wr32(E1000_RAH(index), rar_high);
}
/**
diff --git a/drivers/net/igb/e1000_regs.h b/drivers/net/igb/e1000_regs.h
index 95523af2605..bdf5d839c4b 100644
--- a/drivers/net/igb/e1000_regs.h
+++ b/drivers/net/igb/e1000_regs.h
@@ -221,6 +221,10 @@
#define E1000_MTA 0x05200 /* Multicast Table Array - RW Array */
#define E1000_RA 0x05400 /* Receive Address - RW Array */
#define E1000_RA2 0x054E0 /* 2nd half of receive address array - RW Array */
+#define E1000_RAL(_i) (((_i) <= 15) ? (0x05400 + ((_i) * 8)) : \
+ (0x054E0 + ((_i - 16) * 8)))
+#define E1000_RAH(_i) (((_i) <= 15) ? (0x05404 + ((_i) * 8)) : \
+ (0x054E4 + ((_i - 16) * 8)))
#define E1000_VFTA 0x05600 /* VLAN Filter Table Array - RW Array */
#define E1000_VMD_CTL 0x0581C /* VMDq Control - RW */
#define E1000_WUC 0x05800 /* Wakeup Control - RW */
diff --git a/drivers/net/igb/igb.h b/drivers/net/igb/igb.h
index 4ff6f0567f3..5a27825cc48 100644
--- a/drivers/net/igb/igb.h
+++ b/drivers/net/igb/igb.h
@@ -43,8 +43,6 @@ struct igb_adapter;
#endif
/* Interrupt defines */
-#define IGB_MAX_TX_CLEAN 72
-
#define IGB_MIN_DYN_ITR 3000
#define IGB_MAX_DYN_ITR 96000
@@ -127,7 +125,8 @@ struct igb_buffer {
/* TX */
struct {
unsigned long time_stamp;
- u32 length;
+ u16 length;
+ u16 next_to_watch;
};
/* RX */
struct {
@@ -160,7 +159,8 @@ struct igb_ring {
u16 itr_register;
u16 cpu;
- int queue_index;
+ u16 queue_index;
+ u16 reg_idx;
unsigned int total_bytes;
unsigned int total_packets;
@@ -294,6 +294,8 @@ struct igb_adapter {
unsigned int lro_flushed;
unsigned int lro_no_desc;
#endif
+ unsigned int tx_ring_count;
+ unsigned int rx_ring_count;
};
#define IGB_FLAG_HAS_MSI (1 << 0)
@@ -325,7 +327,41 @@ extern void igb_reset(struct igb_adapter *);
extern int igb_set_spd_dplx(struct igb_adapter *, u16);
extern int igb_setup_tx_resources(struct igb_adapter *, struct igb_ring *);
extern int igb_setup_rx_resources(struct igb_adapter *, struct igb_ring *);
+extern void igb_free_tx_resources(struct igb_ring *);
+extern void igb_free_rx_resources(struct igb_ring *);
extern void igb_update_stats(struct igb_adapter *);
extern void igb_set_ethtool_ops(struct net_device *);
+static inline s32 igb_reset_phy(struct e1000_hw *hw)
+{
+ if (hw->phy.ops.reset_phy)
+ return hw->phy.ops.reset_phy(hw);
+
+ return 0;
+}
+
+static inline s32 igb_read_phy_reg(struct e1000_hw *hw, u32 offset, u16 *data)
+{
+ if (hw->phy.ops.read_phy_reg)
+ return hw->phy.ops.read_phy_reg(hw, offset, data);
+
+ return 0;
+}
+
+static inline s32 igb_write_phy_reg(struct e1000_hw *hw, u32 offset, u16 data)
+{
+ if (hw->phy.ops.write_phy_reg)
+ return hw->phy.ops.write_phy_reg(hw, offset, data);
+
+ return 0;
+}
+
+static inline s32 igb_get_phy_info(struct e1000_hw *hw)
+{
+ if (hw->phy.ops.get_phy_info)
+ return hw->phy.ops.get_phy_info(hw);
+
+ return 0;
+}
+
#endif /* _IGB_H_ */
diff --git a/drivers/net/igb/igb_ethtool.c b/drivers/net/igb/igb_ethtool.c
index 89964fa739a..3c831f1472a 100644
--- a/drivers/net/igb/igb_ethtool.c
+++ b/drivers/net/igb/igb_ethtool.c
@@ -101,8 +101,8 @@ static const struct igb_stats igb_gstrings_stats[] = {
};
#define IGB_QUEUE_STATS_LEN \
- ((((struct igb_adapter *)netdev->priv)->num_rx_queues + \
- ((struct igb_adapter *)netdev->priv)->num_tx_queues) * \
+ ((((struct igb_adapter *)netdev_priv(netdev))->num_rx_queues + \
+ ((struct igb_adapter *)netdev_priv(netdev))->num_tx_queues) * \
(sizeof(struct igb_queue_stats) / sizeof(u64)))
#define IGB_GLOBAL_STATS_LEN \
sizeof(igb_gstrings_stats) / sizeof(struct igb_stats)
@@ -494,8 +494,6 @@ static void igb_get_regs(struct net_device *netdev,
/* These should probably be added to e1000_regs.h instead */
#define E1000_PSRTYPE_REG(_i) (0x05480 + ((_i) * 4))
- #define E1000_RAL(_i) (0x05400 + ((_i) * 8))
- #define E1000_RAH(_i) (0x05404 + ((_i) * 8))
#define E1000_IP4AT_REG(_i) (0x05840 + ((_i) * 8))
#define E1000_IP6AT_REG(_i) (0x05880 + ((_i) * 4))
#define E1000_WUPM_REG(_i) (0x05A00 + ((_i) * 4))
@@ -714,15 +712,13 @@ static void igb_get_ringparam(struct net_device *netdev,
struct ethtool_ringparam *ring)
{
struct igb_adapter *adapter = netdev_priv(netdev);
- struct igb_ring *tx_ring = adapter->tx_ring;
- struct igb_ring *rx_ring = adapter->rx_ring;
ring->rx_max_pending = IGB_MAX_RXD;
ring->tx_max_pending = IGB_MAX_TXD;
ring->rx_mini_max_pending = 0;
ring->rx_jumbo_max_pending = 0;
- ring->rx_pending = rx_ring->count;
- ring->tx_pending = tx_ring->count;
+ ring->rx_pending = adapter->rx_ring_count;
+ ring->tx_pending = adapter->tx_ring_count;
ring->rx_mini_pending = 0;
ring->rx_jumbo_pending = 0;
}
@@ -731,12 +727,9 @@ static int igb_set_ringparam(struct net_device *netdev,
struct ethtool_ringparam *ring)
{
struct igb_adapter *adapter = netdev_priv(netdev);
- struct igb_buffer *old_buf;
- struct igb_buffer *old_rx_buf;
- void *old_desc;
+ struct igb_ring *temp_ring;
int i, err;
- u32 new_rx_count, new_tx_count, old_size;
- dma_addr_t old_dma;
+ u32 new_rx_count, new_tx_count;
if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending))
return -EINVAL;
@@ -749,12 +742,19 @@ static int igb_set_ringparam(struct net_device *netdev,
new_tx_count = min(new_tx_count, (u32)IGB_MAX_TXD);
new_tx_count = ALIGN(new_tx_count, REQ_TX_DESCRIPTOR_MULTIPLE);
- if ((new_tx_count == adapter->tx_ring->count) &&
- (new_rx_count == adapter->rx_ring->count)) {
+ if ((new_tx_count == adapter->tx_ring_count) &&
+ (new_rx_count == adapter->rx_ring_count)) {
/* nothing to do */
return 0;
}
+ if (adapter->num_tx_queues > adapter->num_rx_queues)
+ temp_ring = vmalloc(adapter->num_tx_queues * sizeof(struct igb_ring));
+ else
+ temp_ring = vmalloc(adapter->num_rx_queues * sizeof(struct igb_ring));
+ if (!temp_ring)
+ return -ENOMEM;
+
while (test_and_set_bit(__IGB_RESETTING, &adapter->state))
msleep(1);
@@ -766,62 +766,55 @@ static int igb_set_ringparam(struct net_device *netdev,
* because the ISRs in MSI-X mode get passed pointers
* to the tx and rx ring structs.
*/
- if (new_tx_count != adapter->tx_ring->count) {
+ if (new_tx_count != adapter->tx_ring_count) {
+ memcpy(temp_ring, adapter->tx_ring,
+ adapter->num_tx_queues * sizeof(struct igb_ring));
+
for (i = 0; i < adapter->num_tx_queues; i++) {
- /* Save existing descriptor ring */
- old_buf = adapter->tx_ring[i].buffer_info;
- old_desc = adapter->tx_ring[i].desc;
- old_size = adapter->tx_ring[i].size;
- old_dma = adapter->tx_ring[i].dma;
- /* Try to allocate a new one */
- adapter->tx_ring[i].buffer_info = NULL;
- adapter->tx_ring[i].desc = NULL;
- adapter->tx_ring[i].count = new_tx_count;
- err = igb_setup_tx_resources(adapter,
- &adapter->tx_ring[i]);
+ temp_ring[i].count = new_tx_count;
+ err = igb_setup_tx_resources(adapter, &temp_ring[i]);
if (err) {
- /* Restore the old one so at least
- the adapter still works, even if
- we failed the request */
- adapter->tx_ring[i].buffer_info = old_buf;
- adapter->tx_ring[i].desc = old_desc;
- adapter->tx_ring[i].size = old_size;
- adapter->tx_ring[i].dma = old_dma;
+ while (i) {
+ i--;
+ igb_free_tx_resources(&temp_ring[i]);
+ }
goto err_setup;
}
- /* Free the old buffer manually */
- vfree(old_buf);
- pci_free_consistent(adapter->pdev, old_size,
- old_desc, old_dma);
}
+
+ for (i = 0; i < adapter->num_tx_queues; i++)
+ igb_free_tx_resources(&adapter->tx_ring[i]);
+
+ memcpy(adapter->tx_ring, temp_ring,
+ adapter->num_tx_queues * sizeof(struct igb_ring));
+
+ adapter->tx_ring_count = new_tx_count;
}
if (new_rx_count != adapter->rx_ring->count) {
- for (i = 0; i < adapter->num_rx_queues; i++) {
+ memcpy(temp_ring, adapter->rx_ring,
+ adapter->num_rx_queues * sizeof(struct igb_ring));
- old_rx_buf = adapter->rx_ring[i].buffer_info;
- old_desc = adapter->rx_ring[i].desc;
- old_size = adapter->rx_ring[i].size;
- old_dma = adapter->rx_ring[i].dma;
-
- adapter->rx_ring[i].buffer_info = NULL;
- adapter->rx_ring[i].desc = NULL;
- adapter->rx_ring[i].dma = 0;
- adapter->rx_ring[i].count = new_rx_count;
- err = igb_setup_rx_resources(adapter,
- &adapter->rx_ring[i]);
+ for (i = 0; i < adapter->num_rx_queues; i++) {
+ temp_ring[i].count = new_rx_count;
+ err = igb_setup_rx_resources(adapter, &temp_ring[i]);
if (err) {
- adapter->rx_ring[i].buffer_info = old_rx_buf;
- adapter->rx_ring[i].desc = old_desc;
- adapter->rx_ring[i].size = old_size;
- adapter->rx_ring[i].dma = old_dma;
+ while (i) {
+ i--;
+ igb_free_rx_resources(&temp_ring[i]);
+ }
goto err_setup;
}
- vfree(old_rx_buf);
- pci_free_consistent(adapter->pdev, old_size, old_desc,
- old_dma);
}
+
+ for (i = 0; i < adapter->num_rx_queues; i++)
+ igb_free_rx_resources(&adapter->rx_ring[i]);
+
+ memcpy(adapter->rx_ring, temp_ring,
+ adapter->num_rx_queues * sizeof(struct igb_ring));
+
+ adapter->rx_ring_count = new_rx_count;
}
err = 0;
@@ -830,6 +823,7 @@ err_setup:
igb_up(adapter);
clear_bit(__IGB_RESETTING, &adapter->state);
+ vfree(temp_ring);
return err;
}
@@ -1343,8 +1337,9 @@ static int igb_setup_desc_rings(struct igb_adapter *adapter)
wr32(E1000_RDLEN(0), rx_ring->size);
wr32(E1000_RDH(0), 0);
wr32(E1000_RDT(0), 0);
+ rctl &= ~(E1000_RCTL_LBM_TCVR | E1000_RCTL_LBM_MAC);
rctl = E1000_RCTL_EN | E1000_RCTL_BAM | E1000_RCTL_SZ_2048 |
- E1000_RCTL_LBM_NO | E1000_RCTL_RDMTS_HALF |
+ E1000_RCTL_RDMTS_HALF |
(adapter->hw.mac.mc_filter_type << E1000_RCTL_MO_SHIFT);
wr32(E1000_RCTL, rctl);
wr32(E1000_SRRCTL(0), 0);
@@ -1380,10 +1375,10 @@ static void igb_phy_disable_receiver(struct igb_adapter *adapter)
struct e1000_hw *hw = &adapter->hw;
/* Write out to PHY registers 29 and 30 to disable the Receiver. */
- hw->phy.ops.write_phy_reg(hw, 29, 0x001F);
- hw->phy.ops.write_phy_reg(hw, 30, 0x8FFC);
- hw->phy.ops.write_phy_reg(hw, 29, 0x001A);
- hw->phy.ops.write_phy_reg(hw, 30, 0x8FF0);
+ igb_write_phy_reg(hw, 29, 0x001F);
+ igb_write_phy_reg(hw, 30, 0x8FFC);
+ igb_write_phy_reg(hw, 29, 0x001A);
+ igb_write_phy_reg(hw, 30, 0x8FF0);
}
static int igb_integrated_phy_loopback(struct igb_adapter *adapter)
@@ -1396,17 +1391,17 @@ static int igb_integrated_phy_loopback(struct igb_adapter *adapter)
if (hw->phy.type == e1000_phy_m88) {
/* Auto-MDI/MDIX Off */
- hw->phy.ops.write_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, 0x0808);
+ igb_write_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, 0x0808);
/* reset to update Auto-MDI/MDIX */
- hw->phy.ops.write_phy_reg(hw, PHY_CONTROL, 0x9140);
+ igb_write_phy_reg(hw, PHY_CONTROL, 0x9140);
/* autoneg off */
- hw->phy.ops.write_phy_reg(hw, PHY_CONTROL, 0x8140);
+ igb_write_phy_reg(hw, PHY_CONTROL, 0x8140);
}
ctrl_reg = rd32(E1000_CTRL);
/* force 1000, set loopback */
- hw->phy.ops.write_phy_reg(hw, PHY_CONTROL, 0x4140);
+ igb_write_phy_reg(hw, PHY_CONTROL, 0x4140);
/* Now set up the MAC to the same speed/duplex as the PHY. */
ctrl_reg = rd32(E1000_CTRL);
@@ -1500,10 +1495,10 @@ static void igb_loopback_cleanup(struct igb_adapter *adapter)
wr32(E1000_RCTL, rctl);
hw->mac.autoneg = true;
- hw->phy.ops.read_phy_reg(hw, PHY_CONTROL, &phy_reg);
+ igb_read_phy_reg(hw, PHY_CONTROL, &phy_reg);
if (phy_reg & MII_CR_LOOPBACK) {
phy_reg &= ~MII_CR_LOOPBACK;
- hw->phy.ops.write_phy_reg(hw, PHY_CONTROL, phy_reg);
+ igb_write_phy_reg(hw, PHY_CONTROL, phy_reg);
igb_phy_sw_reset(hw);
}
}
diff --git a/drivers/net/igb/igb_main.c b/drivers/net/igb/igb_main.c
index 20d27e622ec..022794e579c 100644
--- a/drivers/net/igb/igb_main.c
+++ b/drivers/net/igb/igb_main.c
@@ -42,6 +42,7 @@
#include <linux/delay.h>
#include <linux/interrupt.h>
#include <linux/if_ether.h>
+#include <linux/aer.h>
#ifdef CONFIG_IGB_DCA
#include <linux/dca.h>
#endif
@@ -76,8 +77,6 @@ static int igb_setup_all_tx_resources(struct igb_adapter *);
static int igb_setup_all_rx_resources(struct igb_adapter *);
static void igb_free_all_tx_resources(struct igb_adapter *);
static void igb_free_all_rx_resources(struct igb_adapter *);
-static void igb_free_tx_resources(struct igb_ring *);
-static void igb_free_rx_resources(struct igb_ring *);
void igb_update_stats(struct igb_adapter *);
static int igb_probe(struct pci_dev *, const struct pci_device_id *);
static void __devexit igb_remove(struct pci_dev *pdev);
@@ -232,6 +231,40 @@ static void __exit igb_exit_module(void)
module_exit(igb_exit_module);
+#define Q_IDX_82576(i) (((i & 0x1) << 3) + (i >> 1))
+/**
+ * igb_cache_ring_register - Descriptor ring to register mapping
+ * @adapter: board private structure to initialize
+ *
+ * Once we know the feature-set enabled for the device, we'll cache
+ * the register offset the descriptor ring is assigned to.
+ **/
+static void igb_cache_ring_register(struct igb_adapter *adapter)
+{
+ int i;
+
+ switch (adapter->hw.mac.type) {
+ case e1000_82576:
+ /* The queues are allocated for virtualization such that VF 0
+ * is allocated queues 0 and 8, VF 1 queues 1 and 9, etc.
+ * In order to avoid collision we start at the first free queue
+ * and continue consuming queues in the same sequence
+ */
+ for (i = 0; i < adapter->num_rx_queues; i++)
+ adapter->rx_ring[i].reg_idx = Q_IDX_82576(i);
+ for (i = 0; i < adapter->num_tx_queues; i++)
+ adapter->tx_ring[i].reg_idx = Q_IDX_82576(i);
+ break;
+ case e1000_82575:
+ default:
+ for (i = 0; i < adapter->num_rx_queues; i++)
+ adapter->rx_ring[i].reg_idx = i;
+ for (i = 0; i < adapter->num_tx_queues; i++)
+ adapter->tx_ring[i].reg_idx = i;
+ break;
+ }
+}
+
/**
* igb_alloc_queues - Allocate memory for all rings
* @adapter: board private structure to initialize
@@ -259,11 +292,13 @@ static int igb_alloc_queues(struct igb_adapter *adapter)
for (i = 0; i < adapter->num_tx_queues; i++) {
struct igb_ring *ring = &(adapter->tx_ring[i]);
+ ring->count = adapter->tx_ring_count;
ring->adapter = adapter;
ring->queue_index = i;
}
for (i = 0; i < adapter->num_rx_queues; i++) {
struct igb_ring *ring = &(adapter->rx_ring[i]);
+ ring->count = adapter->rx_ring_count;
ring->adapter = adapter;
ring->queue_index = i;
ring->itr_register = E1000_ITR;
@@ -271,6 +306,8 @@ static int igb_alloc_queues(struct igb_adapter *adapter)
/* set a default napi handler for each rx_ring */
netif_napi_add(adapter->netdev, &ring->napi, igb_poll, 64);
}
+
+ igb_cache_ring_register(adapter);
return 0;
}
@@ -311,36 +348,36 @@ static void igb_assign_vector(struct igb_adapter *adapter, int rx_queue,
array_wr32(E1000_MSIXBM(0), msix_vector, msixbm);
break;
case e1000_82576:
- /* The 82576 uses a table-based method for assigning vectors.
+ /* 82576 uses a table-based method for assigning vectors.
Each queue has a single entry in the table to which we write
a vector number along with a "valid" bit. Sadly, the layout
of the table is somewhat counterintuitive. */
if (rx_queue > IGB_N0_QUEUE) {
- index = (rx_queue & 0x7);
+ index = (rx_queue >> 1);
ivar = array_rd32(E1000_IVAR0, index);
- if (rx_queue < 8) {
- /* vector goes into low byte of register */
- ivar = ivar & 0xFFFFFF00;
- ivar |= msix_vector | E1000_IVAR_VALID;
- } else {
+ if (rx_queue & 0x1) {
/* vector goes into third byte of register */
ivar = ivar & 0xFF00FFFF;
ivar |= (msix_vector | E1000_IVAR_VALID) << 16;
+ } else {
+ /* vector goes into low byte of register */
+ ivar = ivar & 0xFFFFFF00;
+ ivar |= msix_vector | E1000_IVAR_VALID;
}
adapter->rx_ring[rx_queue].eims_value= 1 << msix_vector;
array_wr32(E1000_IVAR0, index, ivar);
}
if (tx_queue > IGB_N0_QUEUE) {
- index = (tx_queue & 0x7);
+ index = (tx_queue >> 1);
ivar = array_rd32(E1000_IVAR0, index);
- if (tx_queue < 8) {
- /* vector goes into second byte of register */
- ivar = ivar & 0xFFFF00FF;
- ivar |= (msix_vector | E1000_IVAR_VALID) << 8;
- } else {
+ if (tx_queue & 0x1) {
/* vector goes into high byte of register */
ivar = ivar & 0x00FFFFFF;
ivar |= (msix_vector | E1000_IVAR_VALID) << 24;
+ } else {
+ /* vector goes into second byte of register */
+ ivar = ivar & 0xFFFF00FF;
+ ivar |= (msix_vector | E1000_IVAR_VALID) << 8;
}
adapter->tx_ring[tx_queue].eims_value= 1 << msix_vector;
array_wr32(E1000_IVAR0, index, ivar);
@@ -445,7 +482,7 @@ static int igb_request_msix(struct igb_adapter *adapter)
for (i = 0; i < adapter->num_tx_queues; i++) {
struct igb_ring *ring = &(adapter->tx_ring[i]);
- sprintf(ring->name, "%s-tx%d", netdev->name, i);
+ sprintf(ring->name, "%s-tx-%d", netdev->name, i);
err = request_irq(adapter->msix_entries[vector].vector,
&igb_msix_tx, 0, ring->name,
&(adapter->tx_ring[i]));
@@ -458,7 +495,7 @@ static int igb_request_msix(struct igb_adapter *adapter)
for (i = 0; i < adapter->num_rx_queues; i++) {
struct igb_ring *ring = &(adapter->rx_ring[i]);
if (strlen(netdev->name) < (IFNAMSIZ - 5))
- sprintf(ring->name, "%s-rx%d", netdev->name, i);
+ sprintf(ring->name, "%s-rx-%d", netdev->name, i);
else
memcpy(ring->name, netdev->name, IFNAMSIZ);
err = request_irq(adapter->msix_entries[vector].vector,
@@ -931,8 +968,7 @@ void igb_reset(struct igb_adapter *adapter)
wr32(E1000_VET, ETHERNET_IEEE_VLAN_TYPE);
igb_reset_adaptive(&adapter->hw);
- if (adapter->hw.phy.ops.get_phy_info)
- adapter->hw.phy.ops.get_phy_info(&adapter->hw);
+ igb_get_phy_info(&adapter->hw);
}
/**
@@ -950,6 +986,25 @@ static int igb_is_need_ioport(struct pci_dev *pdev)
}
}
+static const struct net_device_ops igb_netdev_ops = {
+ .ndo_open = igb_open,
+ .ndo_stop = igb_close,
+ .ndo_start_xmit = igb_xmit_frame_adv,
+ .ndo_get_stats = igb_get_stats,
+ .ndo_set_multicast_list = igb_set_multi,
+ .ndo_set_mac_address = igb_set_mac,
+ .ndo_change_mtu = igb_change_mtu,
+ .ndo_do_ioctl = igb_ioctl,
+ .ndo_tx_timeout = igb_tx_timeout,
+ .ndo_validate_addr = eth_validate_addr,
+ .ndo_vlan_rx_register = igb_vlan_rx_register,
+ .ndo_vlan_rx_add_vid = igb_vlan_rx_add_vid,
+ .ndo_vlan_rx_kill_vid = igb_vlan_rx_kill_vid,
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ .ndo_poll_controller = igb_netpoll,
+#endif
+};
+
/**
* igb_probe - Device Initialization Routine
* @pdev: PCI device information struct
@@ -1031,6 +1086,13 @@ static int __devinit igb_probe(struct pci_dev *pdev,
if (err)
goto err_pci_reg;
+ err = pci_enable_pcie_error_reporting(pdev);
+ if (err) {
+ dev_err(&pdev->dev, "pci_enable_pcie_error_reporting failed "
+ "0x%x\n", err);
+ /* non-fatal, continue */
+ }
+
pci_set_master(pdev);
pci_save_state(pdev);
@@ -1059,23 +1121,9 @@ static int __devinit igb_probe(struct pci_dev *pdev,
if (!adapter->hw.hw_addr)
goto err_ioremap;
- netdev->open = &igb_open;
- netdev->stop = &igb_close;
- netdev->get_stats = &igb_get_stats;
- netdev->set_multicast_list = &igb_set_multi;
- netdev->set_mac_address = &igb_set_mac;
- netdev->change_mtu = &igb_change_mtu;
- netdev->do_ioctl = &igb_ioctl;
+ netdev->netdev_ops = &igb_netdev_ops;
igb_set_ethtool_ops(netdev);
- netdev->tx_timeout = &igb_tx_timeout;
netdev->watchdog_timeo = 5 * HZ;
- netdev->vlan_rx_register = igb_vlan_rx_register;
- netdev->vlan_rx_add_vid = igb_vlan_rx_add_vid;
- netdev->vlan_rx_kill_vid = igb_vlan_rx_kill_vid;
-#ifdef CONFIG_NET_POLL_CONTROLLER
- netdev->poll_controller = igb_netpoll;
-#endif
- netdev->hard_start_xmit = &igb_xmit_frame_adv;
strncpy(netdev->name, pci_name(pdev), sizeof(netdev->name) - 1);
@@ -1275,16 +1323,14 @@ static int __devinit igb_probe(struct pci_dev *pdev,
dev_info(&pdev->dev, "Intel(R) Gigabit Ethernet Network Connection\n");
/* print bus type/speed/width info */
- dev_info(&pdev->dev,
- "%s: (PCIe:%s:%s) %02x:%02x:%02x:%02x:%02x:%02x\n",
+ dev_info(&pdev->dev, "%s: (PCIe:%s:%s) %pM\n",
netdev->name,
((hw->bus.speed == e1000_bus_speed_2500)
? "2.5Gb/s" : "unknown"),
((hw->bus.width == e1000_bus_width_pcie_x4)
? "Width x4" : (hw->bus.width == e1000_bus_width_pcie_x1)
? "Width x1" : "unknown"),
- netdev->dev_addr[0], netdev->dev_addr[1], netdev->dev_addr[2],
- netdev->dev_addr[3], netdev->dev_addr[4], netdev->dev_addr[5]);
+ netdev->dev_addr);
igb_read_part_num(hw, &part_num);
dev_info(&pdev->dev, "%s: PBA No: %06x-%03x\n", netdev->name,
@@ -1302,7 +1348,7 @@ err_register:
igb_release_hw_control(adapter);
err_eeprom:
if (!igb_check_reset_block(hw))
- hw->phy.ops.reset_phy(hw);
+ igb_reset_phy(hw);
if (hw->flash_address)
iounmap(hw->flash_address);
@@ -1338,6 +1384,7 @@ static void __devexit igb_remove(struct pci_dev *pdev)
#ifdef CONFIG_IGB_DCA
struct e1000_hw *hw = &adapter->hw;
#endif
+ int err;
/* flush_scheduled work may reschedule our watchdog task, so
* explicitly disable watchdog tasks from being rescheduled */
@@ -1362,9 +1409,8 @@ static void __devexit igb_remove(struct pci_dev *pdev)
unregister_netdev(netdev);
- if (adapter->hw.phy.ops.reset_phy &&
- !igb_check_reset_block(&adapter->hw))
- adapter->hw.phy.ops.reset_phy(&adapter->hw);
+ if (!igb_check_reset_block(&adapter->hw))
+ igb_reset_phy(&adapter->hw);
igb_remove_device(&adapter->hw);
igb_reset_interrupt_capability(adapter);
@@ -1378,6 +1424,11 @@ static void __devexit igb_remove(struct pci_dev *pdev)
free_netdev(netdev);
+ err = pci_disable_pcie_error_reporting(pdev);
+ if (err)
+ dev_err(&pdev->dev,
+ "pci_disable_pcie_error_reporting failed 0x%x\n", err);
+
pci_disable_device(pdev);
}
@@ -1397,6 +1448,8 @@ static int __devinit igb_sw_init(struct igb_adapter *adapter)
pci_read_config_word(pdev, PCI_COMMAND, &hw->bus.pci_cmd_word);
+ adapter->tx_ring_count = IGB_DEFAULT_TXD;
+ adapter->rx_ring_count = IGB_DEFAULT_RXD;
adapter->rx_buffer_len = MAXIMUM_ETHERNET_VLAN_SIZE;
adapter->rx_ps_hdr_size = 0; /* disable packet split */
adapter->max_frame_size = netdev->mtu + ETH_HLEN + ETH_FCS_LEN;
@@ -1558,8 +1611,7 @@ int igb_setup_tx_resources(struct igb_adapter *adapter,
memset(tx_ring->buffer_info, 0, size);
/* round up to nearest 4K */
- tx_ring->size = tx_ring->count * sizeof(struct e1000_tx_desc)
- + sizeof(u32);
+ tx_ring->size = tx_ring->count * sizeof(struct e1000_tx_desc);
tx_ring->size = ALIGN(tx_ring->size, 4096);
tx_ring->desc = pci_alloc_consistent(pdev, tx_ring->size,
@@ -1618,43 +1670,37 @@ static int igb_setup_all_tx_resources(struct igb_adapter *adapter)
**/
static void igb_configure_tx(struct igb_adapter *adapter)
{
- u64 tdba, tdwba;
+ u64 tdba;
struct e1000_hw *hw = &adapter->hw;
u32 tctl;
u32 txdctl, txctrl;
- int i;
+ int i, j;
for (i = 0; i < adapter->num_tx_queues; i++) {
struct igb_ring *ring = &(adapter->tx_ring[i]);
-
- wr32(E1000_TDLEN(i),
+ j = ring->reg_idx;
+ wr32(E1000_TDLEN(j),
ring->count * sizeof(struct e1000_tx_desc));
tdba = ring->dma;
- wr32(E1000_TDBAL(i),
+ wr32(E1000_TDBAL(j),
tdba & 0x00000000ffffffffULL);
- wr32(E1000_TDBAH(i), tdba >> 32);
-
- tdwba = ring->dma + ring->count * sizeof(struct e1000_tx_desc);
- tdwba |= 1; /* enable head wb */
- wr32(E1000_TDWBAL(i),
- tdwba & 0x00000000ffffffffULL);
- wr32(E1000_TDWBAH(i), tdwba >> 32);
+ wr32(E1000_TDBAH(j), tdba >> 32);
- ring->head = E1000_TDH(i);
- ring->tail = E1000_TDT(i);
+ ring->head = E1000_TDH(j);
+ ring->tail = E1000_TDT(j);
writel(0, hw->hw_addr + ring->tail);
writel(0, hw->hw_addr + ring->head);
- txdctl = rd32(E1000_TXDCTL(i));
+ txdctl = rd32(E1000_TXDCTL(j));
txdctl |= E1000_TXDCTL_QUEUE_ENABLE;
- wr32(E1000_TXDCTL(i), txdctl);
+ wr32(E1000_TXDCTL(j), txdctl);
/* Turn off Relaxed Ordering on head write-backs. The
* writebacks MUST be delivered in order or it will
* completely screw up our bookeeping.
*/
- txctrl = rd32(E1000_DCA_TXCTRL(i));
+ txctrl = rd32(E1000_DCA_TXCTRL(j));
txctrl &= ~E1000_DCA_TXCTRL_TX_WB_RO_EN;
- wr32(E1000_DCA_TXCTRL(i), txctrl);
+ wr32(E1000_DCA_TXCTRL(j), txctrl);
}
@@ -1771,14 +1817,14 @@ static void igb_setup_rctl(struct igb_adapter *adapter)
struct e1000_hw *hw = &adapter->hw;
u32 rctl;
u32 srrctl = 0;
- int i;
+ int i, j;
rctl = rd32(E1000_RCTL);
rctl &= ~(3 << E1000_RCTL_MO_SHIFT);
+ rctl &= ~(E1000_RCTL_LBM_TCVR | E1000_RCTL_LBM_MAC);
- rctl |= E1000_RCTL_EN | E1000_RCTL_BAM |
- E1000_RCTL_LBM_NO | E1000_RCTL_RDMTS_HALF |
+ rctl |= E1000_RCTL_EN | E1000_RCTL_BAM | E1000_RCTL_RDMTS_HALF |
(adapter->hw.mac.mc_filter_type << E1000_RCTL_MO_SHIFT);
/*
@@ -1788,38 +1834,26 @@ static void igb_setup_rctl(struct igb_adapter *adapter)
*/
rctl |= E1000_RCTL_SECRC;
- rctl &= ~E1000_RCTL_SBP;
+ /*
+ * disable store bad packets, long packet enable, and clear size bits.
+ */
+ rctl &= ~(E1000_RCTL_SBP | E1000_RCTL_LPE | E1000_RCTL_SZ_256);
- if (adapter->netdev->mtu <= ETH_DATA_LEN)
- rctl &= ~E1000_RCTL_LPE;
- else
+ if (adapter->netdev->mtu > ETH_DATA_LEN)
rctl |= E1000_RCTL_LPE;
- if (adapter->rx_buffer_len <= IGB_RXBUFFER_2048) {
- /* Setup buffer sizes */
- rctl &= ~E1000_RCTL_SZ_4096;
- rctl |= E1000_RCTL_BSEX;
- switch (adapter->rx_buffer_len) {
- case IGB_RXBUFFER_256:
- rctl |= E1000_RCTL_SZ_256;
- rctl &= ~E1000_RCTL_BSEX;
- break;
- case IGB_RXBUFFER_512:
- rctl |= E1000_RCTL_SZ_512;
- rctl &= ~E1000_RCTL_BSEX;
- break;
- case IGB_RXBUFFER_1024:
- rctl |= E1000_RCTL_SZ_1024;
- rctl &= ~E1000_RCTL_BSEX;
- break;
- case IGB_RXBUFFER_2048:
- default:
- rctl |= E1000_RCTL_SZ_2048;
- rctl &= ~E1000_RCTL_BSEX;
- break;
- }
- } else {
- rctl &= ~E1000_RCTL_BSEX;
- srrctl = adapter->rx_buffer_len >> E1000_SRRCTL_BSIZEPKT_SHIFT;
+
+ /* Setup buffer sizes */
+ switch (adapter->rx_buffer_len) {
+ case IGB_RXBUFFER_256:
+ rctl |= E1000_RCTL_SZ_256;
+ break;
+ case IGB_RXBUFFER_512:
+ rctl |= E1000_RCTL_SZ_512;
+ break;
+ default:
+ srrctl = ALIGN(adapter->rx_buffer_len, 1024)
+ >> E1000_SRRCTL_BSIZEPKT_SHIFT;
+ break;
}
/* 82575 and greater support packet-split where the protocol
@@ -1841,8 +1875,10 @@ static void igb_setup_rctl(struct igb_adapter *adapter)
srrctl |= E1000_SRRCTL_DESCTYPE_ADV_ONEBUF;
}
- for (i = 0; i < adapter->num_rx_queues; i++)
- wr32(E1000_SRRCTL(i), srrctl);
+ for (i = 0; i < adapter->num_rx_queues; i++) {
+ j = adapter->rx_ring[i].reg_idx;
+ wr32(E1000_SRRCTL(j), srrctl);
+ }
wr32(E1000_RCTL, rctl);
}
@@ -1859,7 +1895,7 @@ static void igb_configure_rx(struct igb_adapter *adapter)
struct e1000_hw *hw = &adapter->hw;
u32 rctl, rxcsum;
u32 rxdctl;
- int i;
+ int i, j;
/* disable receives while setting up the descriptors */
rctl = rd32(E1000_RCTL);
@@ -1874,25 +1910,26 @@ static void igb_configure_rx(struct igb_adapter *adapter)
* the Base and Length of the Rx Descriptor Ring */
for (i = 0; i < adapter->num_rx_queues; i++) {
struct igb_ring *ring = &(adapter->rx_ring[i]);
+ j = ring->reg_idx;
rdba = ring->dma;
- wr32(E1000_RDBAL(i),
+ wr32(E1000_RDBAL(j),
rdba & 0x00000000ffffffffULL);
- wr32(E1000_RDBAH(i), rdba >> 32);
- wr32(E1000_RDLEN(i),
+ wr32(E1000_RDBAH(j), rdba >> 32);
+ wr32(E1000_RDLEN(j),
ring->count * sizeof(union e1000_adv_rx_desc));
- ring->head = E1000_RDH(i);
- ring->tail = E1000_RDT(i);
+ ring->head = E1000_RDH(j);
+ ring->tail = E1000_RDT(j);
writel(0, hw->hw_addr + ring->tail);
writel(0, hw->hw_addr + ring->head);
- rxdctl = rd32(E1000_RXDCTL(i));
+ rxdctl = rd32(E1000_RXDCTL(j));
rxdctl |= E1000_RXDCTL_QUEUE_ENABLE;
rxdctl &= 0xFFF00000;
rxdctl |= IGB_RX_PTHRESH;
rxdctl |= IGB_RX_HTHRESH << 8;
rxdctl |= IGB_RX_WTHRESH << 16;
- wr32(E1000_RXDCTL(i), rxdctl);
+ wr32(E1000_RXDCTL(j), rxdctl);
#ifdef CONFIG_IGB_LRO
/* Intitial LRO Settings */
ring->lro_mgr.max_aggr = MAX_LRO_AGGR;
@@ -1922,7 +1959,7 @@ static void igb_configure_rx(struct igb_adapter *adapter)
shift = 6;
for (j = 0; j < (32 * 4); j++) {
reta.bytes[j & 3] =
- (j % adapter->num_rx_queues) << shift;
+ adapter->rx_ring[(j % adapter->num_rx_queues)].reg_idx << shift;
if ((j & 3) == 3)
writel(reta.dword,
hw->hw_addr + E1000_RETA(0) + (j & ~3));
@@ -1984,7 +2021,7 @@ static void igb_configure_rx(struct igb_adapter *adapter)
*
* Free all transmit software resources
**/
-static void igb_free_tx_resources(struct igb_ring *tx_ring)
+void igb_free_tx_resources(struct igb_ring *tx_ring)
{
struct pci_dev *pdev = tx_ring->adapter->pdev;
@@ -2082,7 +2119,7 @@ static void igb_clean_all_tx_rings(struct igb_adapter *adapter)
*
* Free all receive software resources
**/
-static void igb_free_rx_resources(struct igb_ring *rx_ring)
+void igb_free_rx_resources(struct igb_ring *rx_ring)
{
struct pci_dev *pdev = rx_ring->adapter->pdev;
@@ -2274,8 +2311,7 @@ static void igb_set_multi(struct net_device *netdev)
static void igb_update_phy_info(unsigned long data)
{
struct igb_adapter *adapter = (struct igb_adapter *) data;
- if (adapter->hw.phy.ops.get_phy_info)
- adapter->hw.phy.ops.get_phy_info(&adapter->hw);
+ igb_get_phy_info(&adapter->hw);
}
/**
@@ -2330,9 +2366,10 @@ static void igb_watchdog_task(struct work_struct *work)
&adapter->link_duplex);
ctrl = rd32(E1000_CTRL);
- dev_info(&adapter->pdev->dev,
- "NIC Link is Up %d Mbps %s, "
+ /* Links status message must follow this format */
+ printk(KERN_INFO "igb: %s NIC Link is Up %d Mbps %s, "
"Flow Control: %s\n",
+ netdev->name,
adapter->link_speed,
adapter->link_duplex == FULL_DUPLEX ?
"Full Duplex" : "Half Duplex",
@@ -2367,7 +2404,9 @@ static void igb_watchdog_task(struct work_struct *work)
if (netif_carrier_ok(netdev)) {
adapter->link_speed = 0;
adapter->link_duplex = 0;
- dev_info(&adapter->pdev->dev, "NIC Link is Down\n");
+ /* Links status message must follow this format */
+ printk(KERN_INFO "igb: %s NIC Link is Down\n",
+ netdev->name);
netif_carrier_off(netdev);
netif_tx_stop_all_queues(netdev);
if (!test_bit(__IGB_DOWN, &adapter->state))
@@ -2703,6 +2742,7 @@ static inline int igb_tso_adv(struct igb_adapter *adapter,
context_desc->seqnum_seed = 0;
buffer_info->time_stamp = jiffies;
+ buffer_info->next_to_watch = i;
buffer_info->dma = 0;
i++;
if (i == tx_ring->count)
@@ -2766,6 +2806,7 @@ static inline bool igb_tx_csum_adv(struct igb_adapter *adapter,
cpu_to_le32(tx_ring->queue_index << 4);
buffer_info->time_stamp = jiffies;
+ buffer_info->next_to_watch = i;
buffer_info->dma = 0;
i++;
@@ -2784,8 +2825,8 @@ static inline bool igb_tx_csum_adv(struct igb_adapter *adapter,
#define IGB_MAX_DATA_PER_TXD (1<<IGB_MAX_TXD_PWR)
static inline int igb_tx_map_adv(struct igb_adapter *adapter,
- struct igb_ring *tx_ring,
- struct sk_buff *skb)
+ struct igb_ring *tx_ring, struct sk_buff *skb,
+ unsigned int first)
{
struct igb_buffer *buffer_info;
unsigned int len = skb_headlen(skb);
@@ -2799,6 +2840,7 @@ static inline int igb_tx_map_adv(struct igb_adapter *adapter,
buffer_info->length = len;
/* set time_stamp *before* dma to help avoid a possible race */
buffer_info->time_stamp = jiffies;
+ buffer_info->next_to_watch = i;
buffer_info->dma = pci_map_single(adapter->pdev, skb->data, len,
PCI_DMA_TODEVICE);
count++;
@@ -2816,6 +2858,7 @@ static inline int igb_tx_map_adv(struct igb_adapter *adapter,
BUG_ON(len >= IGB_MAX_DATA_PER_TXD);
buffer_info->length = len;
buffer_info->time_stamp = jiffies;
+ buffer_info->next_to_watch = i;
buffer_info->dma = pci_map_page(adapter->pdev,
frag->page,
frag->page_offset,
@@ -2828,8 +2871,9 @@ static inline int igb_tx_map_adv(struct igb_adapter *adapter,
i = 0;
}
- i = (i == 0) ? tx_ring->count - 1 : i - 1;
+ i = ((i == 0) ? tx_ring->count - 1 : i - 1);
tx_ring->buffer_info[i].skb = skb;
+ tx_ring->buffer_info[first].next_to_watch = i;
return count;
}
@@ -2936,6 +2980,7 @@ static int igb_xmit_frame_ring_adv(struct sk_buff *skb,
struct igb_ring *tx_ring)
{
struct igb_adapter *adapter = netdev_priv(netdev);
+ unsigned int first;
unsigned int tx_flags = 0;
unsigned int len;
u8 hdr_len = 0;
@@ -2972,6 +3017,8 @@ static int igb_xmit_frame_ring_adv(struct sk_buff *skb,
if (skb->protocol == htons(ETH_P_IP))
tx_flags |= IGB_TX_FLAGS_IPV4;
+ first = tx_ring->next_to_use;
+
tso = skb_is_gso(skb) ? igb_tso_adv(adapter, tx_ring, skb, tx_flags,
&hdr_len) : 0;
@@ -2987,7 +3034,7 @@ static int igb_xmit_frame_ring_adv(struct sk_buff *skb,
tx_flags |= IGB_TX_FLAGS_CSUM;
igb_tx_queue_adv(adapter, tx_ring, tx_flags,
- igb_tx_map_adv(adapter, tx_ring, skb),
+ igb_tx_map_adv(adapter, tx_ring, skb, first),
skb->len, hdr_len);
netdev->trans_start = jiffies;
@@ -3249,7 +3296,7 @@ void igb_update_stats(struct igb_adapter *adapter)
/* Phy Stats */
if (hw->phy.media_type == e1000_media_type_copper) {
if ((adapter->link_speed == SPEED_1000) &&
- (!hw->phy.ops.read_phy_reg(hw, PHY_1000T_STATUS,
+ (!igb_read_phy_reg(hw, PHY_1000T_STATUS,
&phy_tmp))) {
phy_tmp &= PHY_IDLE_ERROR_COUNT_MASK;
adapter->phy_stats.idle_errors += phy_tmp;
@@ -3332,7 +3379,6 @@ static void igb_write_itr(struct igb_ring *ring)
static irqreturn_t igb_msix_rx(int irq, void *data)
{
struct igb_ring *rx_ring = data;
- struct igb_adapter *adapter = rx_ring->adapter;
/* Write the ITR value calculated at the end of the
* previous interrupt.
@@ -3340,11 +3386,11 @@ static irqreturn_t igb_msix_rx(int irq, void *data)
igb_write_itr(rx_ring);
- if (netif_rx_schedule_prep(adapter->netdev, &rx_ring->napi))
- __netif_rx_schedule(adapter->netdev, &rx_ring->napi);
+ if (netif_rx_schedule_prep(&rx_ring->napi))
+ __netif_rx_schedule(&rx_ring->napi);
#ifdef CONFIG_IGB_DCA
- if (adapter->flags & IGB_FLAG_DCA_ENABLED)
+ if (rx_ring->adapter->flags & IGB_FLAG_DCA_ENABLED)
igb_update_rx_dca(rx_ring);
#endif
return IRQ_HANDLED;
@@ -3357,7 +3403,7 @@ static void igb_update_rx_dca(struct igb_ring *rx_ring)
struct igb_adapter *adapter = rx_ring->adapter;
struct e1000_hw *hw = &adapter->hw;
int cpu = get_cpu();
- int q = rx_ring - adapter->rx_ring;
+ int q = rx_ring->reg_idx;
if (rx_ring->cpu != cpu) {
dca_rxctrl = rd32(E1000_DCA_RXCTRL(q));
@@ -3384,7 +3430,7 @@ static void igb_update_tx_dca(struct igb_ring *tx_ring)
struct igb_adapter *adapter = tx_ring->adapter;
struct e1000_hw *hw = &adapter->hw;
int cpu = get_cpu();
- int q = tx_ring - adapter->tx_ring;
+ int q = tx_ring->reg_idx;
if (tx_ring->cpu != cpu) {
dca_txctrl = rd32(E1000_DCA_TXCTRL(q));
@@ -3493,7 +3539,7 @@ static irqreturn_t igb_intr_msi(int irq, void *data)
mod_timer(&adapter->watchdog_timer, jiffies + 1);
}
- netif_rx_schedule(netdev, &adapter->rx_ring[0].napi);
+ netif_rx_schedule(&adapter->rx_ring[0].napi);
return IRQ_HANDLED;
}
@@ -3531,7 +3577,7 @@ static irqreturn_t igb_intr(int irq, void *data)
mod_timer(&adapter->watchdog_timer, jiffies + 1);
}
- netif_rx_schedule(netdev, &adapter->rx_ring[0].napi);
+ netif_rx_schedule(&adapter->rx_ring[0].napi);
return IRQ_HANDLED;
}
@@ -3566,7 +3612,7 @@ static int igb_poll(struct napi_struct *napi, int budget)
!netif_running(netdev)) {
if (adapter->itr_setting & 3)
igb_set_itr(adapter);
- netif_rx_complete(netdev, napi);
+ netif_rx_complete(napi);
if (!test_bit(__IGB_DOWN, &adapter->state))
igb_irq_enable(adapter);
return 0;
@@ -3592,7 +3638,7 @@ static int igb_clean_rx_ring_msix(struct napi_struct *napi, int budget)
/* If not enough Rx work done, exit the polling mode */
if ((work_done == 0) || !netif_running(netdev)) {
- netif_rx_complete(netdev, napi);
+ netif_rx_complete(napi);
if (adapter->itr_setting & 3) {
if (adapter->num_rx_queues == 1)
@@ -3610,12 +3656,6 @@ static int igb_clean_rx_ring_msix(struct napi_struct *napi, int budget)
return 1;
}
-static inline u32 get_head(struct igb_ring *tx_ring)
-{
- void *end = (struct e1000_tx_desc *)tx_ring->desc + tx_ring->count;
- return le32_to_cpu(*(volatile __le32 *)end);
-}
-
/**
* igb_clean_tx_irq - Reclaim resources after transmit completes
* @adapter: board private structure
@@ -3624,24 +3664,25 @@ static inline u32 get_head(struct igb_ring *tx_ring)
static bool igb_clean_tx_irq(struct igb_ring *tx_ring)
{
struct igb_adapter *adapter = tx_ring->adapter;
- struct e1000_hw *hw = &adapter->hw;
struct net_device *netdev = adapter->netdev;
- struct e1000_tx_desc *tx_desc;
+ struct e1000_hw *hw = &adapter->hw;
struct igb_buffer *buffer_info;
struct sk_buff *skb;
- unsigned int i;
- u32 head, oldhead;
- unsigned int count = 0;
+ union e1000_adv_tx_desc *tx_desc, *eop_desc;
unsigned int total_bytes = 0, total_packets = 0;
- bool retval = true;
+ unsigned int i, eop, count = 0;
+ bool cleaned = false;
- rmb();
- head = get_head(tx_ring);
i = tx_ring->next_to_clean;
- while (1) {
- while (i != head) {
- tx_desc = E1000_TX_DESC(*tx_ring, i);
+ eop = tx_ring->buffer_info[i].next_to_watch;
+ eop_desc = E1000_TX_DESC_ADV(*tx_ring, eop);
+
+ while ((eop_desc->wb.status & cpu_to_le32(E1000_TXD_STAT_DD)) &&
+ (count < tx_ring->count)) {
+ for (cleaned = false; !cleaned; count++) {
+ tx_desc = E1000_TX_DESC_ADV(*tx_ring, i);
buffer_info = &tx_ring->buffer_info[i];
+ cleaned = (i == eop);
skb = buffer_info->skb;
if (skb) {
@@ -3656,25 +3697,17 @@ static bool igb_clean_tx_irq(struct igb_ring *tx_ring)
}
igb_unmap_and_free_tx_resource(adapter, buffer_info);
+ tx_desc->wb.status = 0;
i++;
if (i == tx_ring->count)
i = 0;
-
- count++;
- if (count == IGB_MAX_TX_CLEAN) {
- retval = false;
- goto done_cleaning;
- }
}
- oldhead = head;
- rmb();
- head = get_head(tx_ring);
- if (head == oldhead)
- goto done_cleaning;
- } /* while (1) */
-
-done_cleaning:
+
+ eop = tx_ring->buffer_info[i].next_to_watch;
+ eop_desc = E1000_TX_DESC_ADV(*tx_ring, eop);
+ }
+
tx_ring->next_to_clean = i;
if (unlikely(count &&
@@ -3701,7 +3734,6 @@ done_cleaning:
&& !(rd32(E1000_STATUS) &
E1000_STATUS_TXOFF)) {
- tx_desc = E1000_TX_DESC(*tx_ring, i);
/* detected Tx unit hang */
dev_err(&adapter->pdev->dev,
"Detected Tx Unit Hang\n"
@@ -3710,9 +3742,9 @@ done_cleaning:
" TDT <%x>\n"
" next_to_use <%x>\n"
" next_to_clean <%x>\n"
- " head (WB) <%x>\n"
"buffer_info[next_to_clean]\n"
" time_stamp <%lx>\n"
+ " next_to_watch <%x>\n"
" jiffies <%lx>\n"
" desc.status <%x>\n",
tx_ring->queue_index,
@@ -3720,10 +3752,10 @@ done_cleaning:
readl(adapter->hw.hw_addr + tx_ring->tail),
tx_ring->next_to_use,
tx_ring->next_to_clean,
- head,
tx_ring->buffer_info[i].time_stamp,
+ eop,
jiffies,
- tx_desc->upper.fields.status);
+ eop_desc->wb.status);
netif_stop_subqueue(netdev, tx_ring->queue_index);
}
}
@@ -3733,7 +3765,7 @@ done_cleaning:
tx_ring->tx_stats.packets += total_packets;
adapter->net_stats.tx_bytes += total_bytes;
adapter->net_stats.tx_packets += total_packets;
- return retval;
+ return (count < tx_ring->count);
}
#ifdef CONFIG_IGB_LRO
@@ -3919,8 +3951,10 @@ send_up:
next_buffer = &rx_ring->buffer_info[i];
if (!(staterr & E1000_RXD_STAT_EOP)) {
- buffer_info->skb = xchg(&next_buffer->skb, skb);
- buffer_info->dma = xchg(&next_buffer->dma, 0);
+ buffer_info->skb = next_buffer->skb;
+ buffer_info->dma = next_buffer->dma;
+ next_buffer->skb = skb;
+ next_buffer->dma = 0;
goto next_desc;
}
@@ -3938,8 +3972,6 @@ send_up:
igb_receive_skb(rx_ring, staterr, rx_desc, skb);
- netdev->last_rx = jiffies;
-
next_desc:
rx_desc->wb.upper.status_error = 0;
@@ -4102,9 +4134,8 @@ static int igb_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
case SIOCGMIIREG:
if (!capable(CAP_NET_ADMIN))
return -EPERM;
- if (adapter->hw.phy.ops.read_phy_reg(&adapter->hw,
- data->reg_num
- & 0x1F, &data->val_out))
+ if (igb_read_phy_reg(&adapter->hw, data->reg_num & 0x1F,
+ &data->val_out))
return -EIO;
break;
case SIOCSMIIREG:
@@ -4474,27 +4505,38 @@ static pci_ers_result_t igb_io_slot_reset(struct pci_dev *pdev)
struct net_device *netdev = pci_get_drvdata(pdev);
struct igb_adapter *adapter = netdev_priv(netdev);
struct e1000_hw *hw = &adapter->hw;
+ pci_ers_result_t result;
int err;
if (adapter->need_ioport)
err = pci_enable_device(pdev);
else
err = pci_enable_device_mem(pdev);
+
if (err) {
dev_err(&pdev->dev,
"Cannot re-enable PCI device after reset.\n");
- return PCI_ERS_RESULT_DISCONNECT;
- }
- pci_set_master(pdev);
- pci_restore_state(pdev);
+ result = PCI_ERS_RESULT_DISCONNECT;
+ } else {
+ pci_set_master(pdev);
+ pci_restore_state(pdev);
- pci_enable_wake(pdev, PCI_D3hot, 0);
- pci_enable_wake(pdev, PCI_D3cold, 0);
+ pci_enable_wake(pdev, PCI_D3hot, 0);
+ pci_enable_wake(pdev, PCI_D3cold, 0);
- igb_reset(adapter);
- wr32(E1000_WUS, ~0);
+ igb_reset(adapter);
+ wr32(E1000_WUS, ~0);
+ result = PCI_ERS_RESULT_RECOVERED;
+ }
+
+ err = pci_cleanup_aer_uncorrect_error_status(pdev);
+ if (err) {
+ dev_err(&pdev->dev, "pci_cleanup_aer_uncorrect_error_status "
+ "failed 0x%0x\n", err);
+ /* non-fatal, continue */
+ }
- return PCI_ERS_RESULT_RECOVERED;
+ return result;
}
/**
@@ -4522,7 +4564,6 @@ static void igb_io_resume(struct pci_dev *pdev)
/* let the f/w know that the h/w is now under the control of the
* driver. */
igb_get_hw_control(adapter);
-
}
/* igb_main.c */