aboutsummaryrefslogtreecommitdiff
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/ide/pci/aec62xx.c12
-rw-r--r--drivers/ide/pci/serverworks.c10
-rw-r--r--drivers/ide/pci/siimage.c6
-rw-r--r--drivers/isdn/i4l/Kconfig1
-rw-r--r--drivers/mmc/imxmmc.c69
-rw-r--r--drivers/mmc/mmc.c55
-rw-r--r--drivers/mmc/mmc_block.c60
-rw-r--r--drivers/net/3c501.c1
-rw-r--r--drivers/net/3c59x.c2
-rw-r--r--drivers/net/8139cp.c2
-rw-r--r--drivers/net/8139too.c2
-rw-r--r--drivers/net/8390.c10
-rw-r--r--drivers/net/Kconfig33
-rw-r--r--drivers/net/Makefile11
-rw-r--r--drivers/net/acenic.c2
-rw-r--r--drivers/net/amd8111e.c2
-rw-r--r--drivers/net/arcnet/com20020-pci.c2
-rw-r--r--drivers/net/b44.c2
-rw-r--r--drivers/net/bnx2.c2
-rw-r--r--drivers/net/cassini.c2
-rw-r--r--drivers/net/chelsio/cxgb2.c2
-rw-r--r--drivers/net/defxx.c2
-rw-r--r--drivers/net/dl2k.c2
-rw-r--r--drivers/net/dm9000.c4
-rw-r--r--drivers/net/e100.c30
-rw-r--r--drivers/net/e1000/e1000.h6
-rw-r--r--drivers/net/e1000/e1000_ethtool.c257
-rw-r--r--drivers/net/e1000/e1000_hw.c1077
-rw-r--r--drivers/net/e1000/e1000_hw.h26
-rw-r--r--drivers/net/e1000/e1000_main.c154
-rw-r--r--drivers/net/e1000/e1000_param.c161
-rw-r--r--drivers/net/eepro100.c2
-rw-r--r--drivers/net/epic100.c2
-rw-r--r--drivers/net/fealnx.c2
-rw-r--r--drivers/net/forcedeth.c560
-rw-r--r--drivers/net/hp100.c1
-rw-r--r--drivers/net/irda/mcs7780.c1
-rw-r--r--drivers/net/irda/w83977af_ir.c1
-rw-r--r--drivers/net/ixgb/ixgb.h5
-rw-r--r--drivers/net/ixgb/ixgb_ethtool.c6
-rw-r--r--drivers/net/ixgb/ixgb_hw.c11
-rw-r--r--drivers/net/ixgb/ixgb_ids.h1
-rw-r--r--drivers/net/ixgb/ixgb_main.c152
-rw-r--r--drivers/net/myri10ge/myri10ge.c228
-rw-r--r--drivers/net/myri10ge/myri10ge_mcp.h47
-rw-r--r--drivers/net/natsemi.c2
-rw-r--r--drivers/net/ne2k-pci.c2
-rw-r--r--drivers/net/netx-eth.c1
-rw-r--r--drivers/net/ns83820.c2
-rw-r--r--drivers/net/pci-skeleton.c2
-rw-r--r--drivers/net/pcmcia/axnet_cs.c3
-rw-r--r--drivers/net/pcmcia/pcnet_cs.c15
-rw-r--r--drivers/net/pcnet32.c2
-rw-r--r--drivers/net/phy/smsc.c1
-rw-r--r--drivers/net/phy/vitesse.c1
-rw-r--r--drivers/net/qla3xxx.c3537
-rw-r--r--drivers/net/qla3xxx.h1194
-rw-r--r--drivers/net/r8169.c2
-rw-r--r--drivers/net/rrunner.c2
-rw-r--r--drivers/net/s2io.c2
-rw-r--r--drivers/net/saa9730.c2
-rw-r--r--drivers/net/sis190.c2
-rw-r--r--drivers/net/sis900.c3
-rw-r--r--drivers/net/sk98lin/skge.c2
-rw-r--r--drivers/net/skfp/skfddi.c2
-rw-r--r--drivers/net/skge.c204
-rw-r--r--drivers/net/skge.h1
-rw-r--r--drivers/net/sky2.c188
-rw-r--r--drivers/net/sky2.h3
-rw-r--r--drivers/net/slhc.c28
-rw-r--r--drivers/net/smc911x.c2
-rw-r--r--drivers/net/starfire.c2
-rw-r--r--drivers/net/sundance.c15
-rw-r--r--drivers/net/sungem.c2
-rw-r--r--drivers/net/tc35815.c2
-rw-r--r--drivers/net/tg3.c2
-rw-r--r--drivers/net/tokenring/3c359.c2
-rw-r--r--drivers/net/tokenring/lanstreamer.c2
-rw-r--r--drivers/net/tulip/21142.c6
-rw-r--r--drivers/net/tulip/de2104x.c18
-rw-r--r--drivers/net/tulip/de4x5.c2
-rw-r--r--drivers/net/tulip/dmfe.c2
-rw-r--r--drivers/net/tulip/eeprom.c2
-rw-r--r--drivers/net/tulip/interrupt.c2
-rw-r--r--drivers/net/tulip/media.c2
-rw-r--r--drivers/net/tulip/pnic.c2
-rw-r--r--drivers/net/tulip/pnic2.c2
-rw-r--r--drivers/net/tulip/timer.c16
-rw-r--r--drivers/net/tulip/tulip.h36
-rw-r--r--drivers/net/tulip/tulip_core.c94
-rw-r--r--drivers/net/tulip/uli526x.c12
-rw-r--r--drivers/net/tulip/winbond-840.c82
-rw-r--r--drivers/net/tulip/xircom_tulip_cb.c2
-rw-r--r--drivers/net/typhoon.c2
-rw-r--r--drivers/net/via-rhine.c2
-rw-r--r--drivers/net/via-velocity.c2
-rw-r--r--drivers/net/via-velocity.h19
-rw-r--r--drivers/net/wan/cycx_main.c1
-rw-r--r--drivers/net/wan/dlci.c1
-rw-r--r--drivers/net/wan/dscc4.c2
-rw-r--r--drivers/net/wan/farsync.c2
-rw-r--r--drivers/net/wan/lmc/lmc_main.c2
-rw-r--r--drivers/net/wan/pc300_drv.c2
-rw-r--r--drivers/net/wan/pci200syn.c2
-rw-r--r--drivers/net/wan/sdla.c1
-rw-r--r--drivers/net/wan/wanxl.c2
-rw-r--r--drivers/net/wireless/atmel_pci.c2
-rw-r--r--drivers/net/wireless/ipw2100.c2
-rw-r--r--drivers/net/wireless/ipw2200.c2
-rw-r--r--drivers/net/wireless/orinoco_nortel.c2
-rw-r--r--drivers/net/wireless/orinoco_pci.c2
-rw-r--r--drivers/net/wireless/orinoco_plx.c2
-rw-r--r--drivers/net/wireless/orinoco_tmd.c2
-rw-r--r--drivers/net/wireless/prism54/islpci_hotplug.c2
-rw-r--r--drivers/net/yellowfin.c2
-rw-r--r--drivers/scsi/ata_piix.c36
-rw-r--r--drivers/scsi/sata_mv.c3
-rw-r--r--drivers/scsi/sata_via.c1
-rw-r--r--drivers/usb/input/hid-core.c4
-rw-r--r--drivers/usb/input/usbtouchscreen.c2
-rw-r--r--drivers/usb/misc/sisusbvga/sisusb.c2
-rw-r--r--drivers/usb/serial/ftdi_sio.c1
-rw-r--r--drivers/usb/serial/ftdi_sio.h5
123 files changed, 6984 insertions, 1666 deletions
diff --git a/drivers/ide/pci/aec62xx.c b/drivers/ide/pci/aec62xx.c
index a7c725f8bf6..f286079d233 100644
--- a/drivers/ide/pci/aec62xx.c
+++ b/drivers/ide/pci/aec62xx.c
@@ -425,12 +425,12 @@ static int __devinit aec62xx_init_one(struct pci_dev *dev, const struct pci_devi
return d->init_setup(dev, d);
}
-static const struct pci_device_id aec62xx_pci_tbl[] = {
- { PCI_DEVICE(PCI_VENDOR_ID_ARTOP, PCI_DEVICE_ID_ARTOP_ATP850UF), 0 },
- { PCI_DEVICE(PCI_VENDOR_ID_ARTOP, PCI_DEVICE_ID_ARTOP_ATP860), 1 },
- { PCI_DEVICE(PCI_VENDOR_ID_ARTOP, PCI_DEVICE_ID_ARTOP_ATP860R), 2 },
- { PCI_DEVICE(PCI_VENDOR_ID_ARTOP, PCI_DEVICE_ID_ARTOP_ATP865), 3 },
- { PCI_DEVICE(PCI_VENDOR_ID_ARTOP, PCI_DEVICE_ID_ARTOP_ATP865R), 4 },
+static struct pci_device_id aec62xx_pci_tbl[] = {
+ { PCI_VENDOR_ID_ARTOP, PCI_DEVICE_ID_ARTOP_ATP850UF, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
+ { PCI_VENDOR_ID_ARTOP, PCI_DEVICE_ID_ARTOP_ATP860, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 1 },
+ { PCI_VENDOR_ID_ARTOP, PCI_DEVICE_ID_ARTOP_ATP860R, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 2 },
+ { PCI_VENDOR_ID_ARTOP, PCI_DEVICE_ID_ARTOP_ATP865, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 3 },
+ { PCI_VENDOR_ID_ARTOP, PCI_DEVICE_ID_ARTOP_ATP865R, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 4 },
{ 0, },
};
MODULE_DEVICE_TABLE(pci, aec62xx_pci_tbl);
diff --git a/drivers/ide/pci/serverworks.c b/drivers/ide/pci/serverworks.c
index 03677bff0d7..f063d954236 100644
--- a/drivers/ide/pci/serverworks.c
+++ b/drivers/ide/pci/serverworks.c
@@ -649,11 +649,11 @@ static int __devinit svwks_init_one(struct pci_dev *dev, const struct pci_device
}
static struct pci_device_id svwks_pci_tbl[] = {
- { PCI_DEVICE(PCI_VENDOR_ID_SERVERWORKS, PCI_DEVICE_ID_SERVERWORKS_OSB4IDE), 0},
- { PCI_DEVICE(PCI_VENDOR_ID_SERVERWORKS, PCI_DEVICE_ID_SERVERWORKS_CSB5IDE), 1},
- { PCI_DEVICE(PCI_VENDOR_ID_SERVERWORKS, PCI_DEVICE_ID_SERVERWORKS_CSB6IDE), 2},
- { PCI_DEVICE(PCI_VENDOR_ID_SERVERWORKS, PCI_DEVICE_ID_SERVERWORKS_CSB6IDE2), 3},
- { PCI_DEVICE(PCI_VENDOR_ID_SERVERWORKS, PCI_DEVICE_ID_SERVERWORKS_HT1000IDE), 4},
+ { PCI_VENDOR_ID_SERVERWORKS, PCI_DEVICE_ID_SERVERWORKS_OSB4IDE, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
+ { PCI_VENDOR_ID_SERVERWORKS, PCI_DEVICE_ID_SERVERWORKS_CSB5IDE, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 1},
+ { PCI_VENDOR_ID_SERVERWORKS, PCI_DEVICE_ID_SERVERWORKS_CSB6IDE, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 2},
+ { PCI_VENDOR_ID_SERVERWORKS, PCI_DEVICE_ID_SERVERWORKS_CSB6IDE2, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 3},
+ { PCI_VENDOR_ID_SERVERWORKS, PCI_DEVICE_ID_SERVERWORKS_HT1000IDE, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 4},
{ 0, },
};
MODULE_DEVICE_TABLE(pci, svwks_pci_tbl);
diff --git a/drivers/ide/pci/siimage.c b/drivers/ide/pci/siimage.c
index 25ceb4a39ed..20b392948f3 100644
--- a/drivers/ide/pci/siimage.c
+++ b/drivers/ide/pci/siimage.c
@@ -1082,10 +1082,10 @@ static int __devinit siimage_init_one(struct pci_dev *dev, const struct pci_devi
}
static struct pci_device_id siimage_pci_tbl[] = {
- { PCI_DEVICE(PCI_VENDOR_ID_CMD, PCI_DEVICE_ID_SII_680), 0},
+ { PCI_VENDOR_ID_CMD, PCI_DEVICE_ID_SII_680, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
#ifdef CONFIG_BLK_DEV_IDE_SATA
- { PCI_DEVICE(PCI_VENDOR_ID_CMD, PCI_DEVICE_ID_SII_3112), 1},
- { PCI_DEVICE(PCI_VENDOR_ID_CMD, PCI_DEVICE_ID_SII_1210SA), 2},
+ { PCI_VENDOR_ID_CMD, PCI_DEVICE_ID_SII_3112, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 1},
+ { PCI_VENDOR_ID_CMD, PCI_DEVICE_ID_SII_1210SA, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 2},
#endif
{ 0, },
};
diff --git a/drivers/isdn/i4l/Kconfig b/drivers/isdn/i4l/Kconfig
index a4f7288a1fc..3ef567b99c7 100644
--- a/drivers/isdn/i4l/Kconfig
+++ b/drivers/isdn/i4l/Kconfig
@@ -5,6 +5,7 @@
config ISDN_PPP
bool "Support synchronous PPP"
depends on INET
+ select SLHC
help
Over digital connections such as ISDN, there is no need to
synchronize sender and recipient's clocks with start and stop bits
diff --git a/drivers/mmc/imxmmc.c b/drivers/mmc/imxmmc.c
index 7ca9e95bdf8..fb6565b98f3 100644
--- a/drivers/mmc/imxmmc.c
+++ b/drivers/mmc/imxmmc.c
@@ -91,6 +91,8 @@ struct imxmci_host {
int dma_allocated;
unsigned char actual_bus_width;
+
+ int prev_cmd_code;
};
#define IMXMCI_PEND_IRQ_b 0
@@ -248,16 +250,14 @@ static void imxmci_setup_data(struct imxmci_host *host, struct mmc_data *data)
* partial FIFO fills and reads. The length has to be rounded up to burst size multiple.
* This is required for SCR read at least.
*/
- if (datasz < 64) {
+ if (datasz < 512) {
host->dma_size = datasz;
if (data->flags & MMC_DATA_READ) {
host->dma_dir = DMA_FROM_DEVICE;
/* Hack to enable read SCR */
- if(datasz < 16) {
- MMC_NOB = 1;
- MMC_BLK_LEN = 16;
- }
+ MMC_NOB = 1;
+ MMC_BLK_LEN = 512;
} else {
host->dma_dir = DMA_TO_DEVICE;
}
@@ -409,6 +409,9 @@ static void imxmci_finish_request(struct imxmci_host *host, struct mmc_request *
spin_unlock_irqrestore(&host->lock, flags);
+ if(req && req->cmd)
+ host->prev_cmd_code = req->cmd->opcode;
+
host->req = NULL;
host->cmd = NULL;
host->data = NULL;
@@ -553,7 +556,6 @@ static int imxmci_cpu_driven_data(struct imxmci_host *host, unsigned int *pstat)
{
int i;
int burst_len;
- int flush_len;
int trans_done = 0;
unsigned int stat = *pstat;
@@ -566,44 +568,43 @@ static int imxmci_cpu_driven_data(struct imxmci_host *host, unsigned int *pstat)
dev_dbg(mmc_dev(host->mmc), "imxmci_cpu_driven_data running STATUS = 0x%x\n",
stat);
+ udelay(20); /* required for clocks < 8MHz*/
+
if(host->dma_dir == DMA_FROM_DEVICE) {
imxmci_busy_wait_for_status(host, &stat,
STATUS_APPL_BUFF_FF | STATUS_DATA_TRANS_DONE,
- 20, "imxmci_cpu_driven_data read");
+ 50, "imxmci_cpu_driven_data read");
while((stat & (STATUS_APPL_BUFF_FF | STATUS_DATA_TRANS_DONE)) &&
- (host->data_cnt < host->dma_size)) {
- if(burst_len >= host->dma_size - host->data_cnt) {
- flush_len = burst_len;
- burst_len = host->dma_size - host->data_cnt;
- flush_len -= burst_len;
- host->data_cnt = host->dma_size;
- trans_done = 1;
- } else {
- flush_len = 0;
- host->data_cnt += burst_len;
- }
+ (host->data_cnt < 512)) {
+
+ udelay(20); /* required for clocks < 8MHz*/
for(i = burst_len; i>=2 ; i-=2) {
- *(host->data_ptr++) = MMC_BUFFER_ACCESS;
- udelay(20); /* required for clocks < 8MHz*/
+ u16 data;
+ data = MMC_BUFFER_ACCESS;
+ udelay(10); /* required for clocks < 8MHz*/
+ if(host->data_cnt+2 <= host->dma_size) {
+ *(host->data_ptr++) = data;
+ } else {
+ if(host->data_cnt < host->dma_size)
+ *(u8*)(host->data_ptr) = data;
+ }
+ host->data_cnt += 2;
}
- if(i == 1)
- *(u8*)(host->data_ptr) = MMC_BUFFER_ACCESS;
-
stat = MMC_STATUS;
- /* Flush extra bytes from FIFO */
- while(flush_len && !(stat & STATUS_DATA_TRANS_DONE)){
- i = MMC_BUFFER_ACCESS;
- stat = MMC_STATUS;
- stat &= ~STATUS_CRC_READ_ERR; /* Stupid but required there */
- }
-
- dev_dbg(mmc_dev(host->mmc), "imxmci_cpu_driven_data read burst %d STATUS = 0x%x\n",
- burst_len, stat);
+ dev_dbg(mmc_dev(host->mmc), "imxmci_cpu_driven_data read %d burst %d STATUS = 0x%x\n",
+ host->data_cnt, burst_len, stat);
}
+
+ if((stat & STATUS_DATA_TRANS_DONE) && (host->data_cnt >= 512))
+ trans_done = 1;
+
+ if(host->dma_size & 0x1ff)
+ stat &= ~STATUS_CRC_READ_ERR;
+
} else {
imxmci_busy_wait_for_status(host, &stat,
STATUS_APPL_BUFF_FE,
@@ -692,8 +693,8 @@ static void imxmci_tasklet_fnc(unsigned long data)
what, stat, MMC_INT_MASK);
dev_err(mmc_dev(host->mmc), "CMD_DAT_CONT = 0x%04x, MMC_BLK_LEN = 0x%04x, MMC_NOB = 0x%04x, DMA_CCR = 0x%08x\n",
MMC_CMD_DAT_CONT, MMC_BLK_LEN, MMC_NOB, CCR(host->dma));
- dev_err(mmc_dev(host->mmc), "CMD%d, bus %d-bit, dma_size = 0x%x\n",
- host->cmd?host->cmd->opcode:0, 1<<host->actual_bus_width, host->dma_size);
+ dev_err(mmc_dev(host->mmc), "CMD%d, prevCMD%d, bus %d-bit, dma_size = 0x%x\n",
+ host->cmd?host->cmd->opcode:0, host->prev_cmd_code, 1<<host->actual_bus_width, host->dma_size);
}
if(!host->present || timeout)
diff --git a/drivers/mmc/mmc.c b/drivers/mmc/mmc.c
index 33525bdf2ab..74eaaee66de 100644
--- a/drivers/mmc/mmc.c
+++ b/drivers/mmc/mmc.c
@@ -247,6 +247,55 @@ int mmc_wait_for_app_cmd(struct mmc_host *host, unsigned int rca,
EXPORT_SYMBOL(mmc_wait_for_app_cmd);
+/**
+ * mmc_set_data_timeout - set the timeout for a data command
+ * @data: data phase for command
+ * @card: the MMC card associated with the data transfer
+ * @write: flag to differentiate reads from writes
+ */
+void mmc_set_data_timeout(struct mmc_data *data, const struct mmc_card *card,
+ int write)
+{
+ unsigned int mult;
+
+ /*
+ * SD cards use a 100 multiplier rather than 10
+ */
+ mult = mmc_card_sd(card) ? 100 : 10;
+
+ /*
+ * Scale up the multiplier (and therefore the timeout) by
+ * the r2w factor for writes.
+ */
+ if (write)
+ mult <<= card->csd.r2w_factor;
+
+ data->timeout_ns = card->csd.tacc_ns * mult;
+ data->timeout_clks = card->csd.tacc_clks * mult;
+
+ /*
+ * SD cards also have an upper limit on the timeout.
+ */
+ if (mmc_card_sd(card)) {
+ unsigned int timeout_us, limit_us;
+
+ timeout_us = data->timeout_ns / 1000;
+ timeout_us += data->timeout_clks * 1000 /
+ (card->host->ios.clock / 1000);
+
+ if (write)
+ limit_us = 250000;
+ else
+ limit_us = 100000;
+
+ if (timeout_us > limit_us) {
+ data->timeout_ns = limit_us * 1000;
+ data->timeout_clks = 0;
+ }
+ }
+}
+EXPORT_SYMBOL(mmc_set_data_timeout);
+
static int mmc_select_card(struct mmc_host *host, struct mmc_card *card);
/**
@@ -908,11 +957,9 @@ static void mmc_read_scrs(struct mmc_host *host)
{
int err;
struct mmc_card *card;
-
struct mmc_request mrq;
struct mmc_command cmd;
struct mmc_data data;
-
struct scatterlist sg;
list_for_each_entry(card, &host->cards, node) {
@@ -947,8 +994,8 @@ static void mmc_read_scrs(struct mmc_host *host)
memset(&data, 0, sizeof(struct mmc_data));
- data.timeout_ns = card->csd.tacc_ns * 10;
- data.timeout_clks = card->csd.tacc_clks * 10;
+ mmc_set_data_timeout(&data, card, 0);
+
data.blksz_bits = 3;
data.blksz = 1 << 3;
data.blocks = 1;
diff --git a/drivers/mmc/mmc_block.c b/drivers/mmc/mmc_block.c
index 115cc21094b..a0e0dad1b41 100644
--- a/drivers/mmc/mmc_block.c
+++ b/drivers/mmc/mmc_block.c
@@ -30,6 +30,7 @@
#include <linux/mutex.h>
#include <linux/mmc/card.h>
+#include <linux/mmc/host.h>
#include <linux/mmc/protocol.h>
#include <asm/system.h>
@@ -171,8 +172,6 @@ static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req)
brq.cmd.arg = req->sector << 9;
brq.cmd.flags = MMC_RSP_R1 | MMC_CMD_ADTC;
- brq.data.timeout_ns = card->csd.tacc_ns * 10;
- brq.data.timeout_clks = card->csd.tacc_clks * 10;
brq.data.blksz_bits = md->block_bits;
brq.data.blksz = 1 << md->block_bits;
brq.data.blocks = req->nr_sectors >> (md->block_bits - 9);
@@ -180,6 +179,8 @@ static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req)
brq.stop.arg = 0;
brq.stop.flags = MMC_RSP_R1B | MMC_CMD_AC;
+ mmc_set_data_timeout(&brq.data, card, rq_data_dir(req) != READ);
+
if (rq_data_dir(req) == READ) {
brq.cmd.opcode = brq.data.blocks > 1 ? MMC_READ_MULTIPLE_BLOCK : MMC_READ_SINGLE_BLOCK;
brq.data.flags |= MMC_DATA_READ;
@@ -187,12 +188,6 @@ static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req)
brq.cmd.opcode = MMC_WRITE_BLOCK;
brq.data.flags |= MMC_DATA_WRITE;
brq.data.blocks = 1;
-
- /*
- * Scale up the timeout by the r2w factor
- */
- brq.data.timeout_ns <<= card->csd.r2w_factor;
- brq.data.timeout_clks <<= card->csd.r2w_factor;
}
if (brq.data.blocks > 1) {
@@ -324,52 +319,11 @@ static struct mmc_blk_data *mmc_blk_alloc(struct mmc_card *card)
md->read_only = mmc_blk_readonly(card);
/*
- * Figure out a workable block size. MMC cards have:
- * - two block sizes, one for read and one for write.
- * - may support partial reads and/or writes
- * (allows block sizes smaller than specified)
- */
- md->block_bits = card->csd.read_blkbits;
- if (card->csd.write_blkbits != card->csd.read_blkbits) {
- if (card->csd.write_blkbits < card->csd.read_blkbits &&
- card->csd.read_partial) {
- /*
- * write block size is smaller than read block
- * size, but we support partial reads, so choose
- * the smaller write block size.
- */
- md->block_bits = card->csd.write_blkbits;
- } else if (card->csd.write_blkbits > card->csd.read_blkbits &&
- card->csd.write_partial) {
- /*
- * read block size is smaller than write block
- * size, but we support partial writes. Use read
- * block size.
- */
- } else {
- /*
- * We don't support this configuration for writes.
- */
- printk(KERN_ERR "%s: unable to select block size for "
- "writing (rb%u wb%u rp%u wp%u)\n",
- mmc_card_id(card),
- 1 << card->csd.read_blkbits,
- 1 << card->csd.write_blkbits,
- card->csd.read_partial,
- card->csd.write_partial);
- md->read_only = 1;
- }
- }
-
- /*
- * Refuse to allow block sizes smaller than 512 bytes.
+ * Both SD and MMC specifications state (although a bit
+ * unclearly in the MMC case) that a block size of 512
+ * bytes must always be supported by the card.
*/
- if (md->block_bits < 9) {
- printk(KERN_ERR "%s: unable to support block size %u\n",
- mmc_card_id(card), 1 << md->block_bits);
- ret = -EINVAL;
- goto err_kfree;
- }
+ md->block_bits = 9;
md->disk = alloc_disk(1 << MMC_SHIFT);
if (md->disk == NULL) {
diff --git a/drivers/net/3c501.c b/drivers/net/3c501.c
index 07136ec423b..d7b115a3596 100644
--- a/drivers/net/3c501.c
+++ b/drivers/net/3c501.c
@@ -120,7 +120,6 @@ static const char version[] =
#include <linux/slab.h>
#include <linux/string.h>
#include <linux/errno.h>
-#include <linux/config.h> /* for CONFIG_IP_MULTICAST */
#include <linux/spinlock.h>
#include <linux/ethtool.h>
#include <linux/delay.h>
diff --git a/drivers/net/3c59x.c b/drivers/net/3c59x.c
index 80e8ca013e4..7c23813bb09 100644
--- a/drivers/net/3c59x.c
+++ b/drivers/net/3c59x.c
@@ -3169,7 +3169,7 @@ static int __init vortex_init(void)
{
int pci_rc, eisa_rc;
- pci_rc = pci_module_init(&vortex_driver);
+ pci_rc = pci_register_driver(&vortex_driver);
eisa_rc = vortex_eisa_init();
if (pci_rc == 0)
diff --git a/drivers/net/8139cp.c b/drivers/net/8139cp.c
index 1428bb7715a..4c2e76326c4 100644
--- a/drivers/net/8139cp.c
+++ b/drivers/net/8139cp.c
@@ -2098,7 +2098,7 @@ static int __init cp_init (void)
#ifdef MODULE
printk("%s", version);
#endif
- return pci_module_init (&cp_driver);
+ return pci_register_driver(&cp_driver);
}
static void __exit cp_exit (void)
diff --git a/drivers/net/8139too.c b/drivers/net/8139too.c
index e4f4eaff767..2a707747ed8 100644
--- a/drivers/net/8139too.c
+++ b/drivers/net/8139too.c
@@ -2629,7 +2629,7 @@ static int __init rtl8139_init_module (void)
printk (KERN_INFO RTL8139_DRIVER_NAME "\n");
#endif
- return pci_module_init (&rtl8139_pci_driver);
+ return pci_register_driver(&rtl8139_pci_driver);
}
diff --git a/drivers/net/8390.c b/drivers/net/8390.c
index d2935ae3981..3eb7048684a 100644
--- a/drivers/net/8390.c
+++ b/drivers/net/8390.c
@@ -299,7 +299,7 @@ static int ei_start_xmit(struct sk_buff *skb, struct net_device *dev)
* Slow phase with lock held.
*/
- disable_irq_nosync(dev->irq);
+ disable_irq_nosync_lockdep(dev->irq);
spin_lock(&ei_local->page_lock);
@@ -338,7 +338,7 @@ static int ei_start_xmit(struct sk_buff *skb, struct net_device *dev)
netif_stop_queue(dev);
outb_p(ENISR_ALL, e8390_base + EN0_IMR);
spin_unlock(&ei_local->page_lock);
- enable_irq(dev->irq);
+ enable_irq_lockdep(dev->irq);
ei_local->stat.tx_errors++;
return 1;
}
@@ -379,7 +379,7 @@ static int ei_start_xmit(struct sk_buff *skb, struct net_device *dev)
outb_p(ENISR_ALL, e8390_base + EN0_IMR);
spin_unlock(&ei_local->page_lock);
- enable_irq(dev->irq);
+ enable_irq_lockdep(dev->irq);
dev_kfree_skb (skb);
ei_local->stat.tx_bytes += send_length;
@@ -505,9 +505,9 @@ irqreturn_t ei_interrupt(int irq, void *dev_id, struct pt_regs * regs)
#ifdef CONFIG_NET_POLL_CONTROLLER
void ei_poll(struct net_device *dev)
{
- disable_irq(dev->irq);
+ disable_irq_lockdep(dev->irq);
ei_interrupt(dev->irq, dev, NULL);
- enable_irq(dev->irq);
+ enable_irq_lockdep(dev->irq);
}
#endif
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index a2bd8119270..de4f9e1f2ca 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -1411,6 +1411,22 @@ config FORCEDETH
<file:Documentation/networking/net-modules.txt>. The module will be
called forcedeth.
+config FORCEDETH_NAPI
+ bool "Use Rx and Tx Polling (NAPI) (EXPERIMENTAL)"
+ depends on FORCEDETH && EXPERIMENTAL
+ help
+ NAPI is a new driver API designed to reduce CPU and interrupt load
+ when the driver is receiving lots of packets from the card. It is
+ still somewhat experimental and thus not yet enabled by default.
+
+ If your estimated Rx load is 10kpps or more, or if the card will be
+ deployed on potentially unfriendly networks (e.g. in a firewall),
+ then say Y here.
+
+ See <file:Documentation/networking/NAPI_HOWTO.txt> for more
+ information.
+
+ If in doubt, say N.
config CS89x0
tristate "CS89x0 support"
@@ -2290,6 +2306,15 @@ config MV643XX_ETH_2
This enables support for Port 2 of the Marvell MV643XX Gigabit
Ethernet.
+config QLA3XXX
+ tristate "QLogic QLA3XXX Network Driver Support"
+ depends on PCI
+ help
+ This driver supports QLogic ISP3XXX gigabit Ethernet cards.
+
+ To compile this driver as a module, choose M here: the module
+ will be called qla3xxx.
+
endmenu
#
@@ -2550,6 +2575,7 @@ config PLIP
config PPP
tristate "PPP (point-to-point protocol) support"
+ select SLHC
---help---
PPP (Point to Point Protocol) is a newer and better SLIP. It serves
the same purpose: sending Internet traffic over telephone (and other
@@ -2730,6 +2756,7 @@ config SLIP
config SLIP_COMPRESSED
bool "CSLIP compressed headers"
depends on SLIP
+ select SLHC
---help---
This protocol is faster than SLIP because it uses compression on the
TCP/IP headers (not on the data itself), but it has to be supported
@@ -2742,6 +2769,12 @@ config SLIP_COMPRESSED
<http://www.tldp.org/docs.html#howto>, explains how to configure
CSLIP. This won't enlarge your kernel.
+config SLHC
+ tristate
+ help
+ This option enables Van Jacobsen serial line header compression
+ routines.
+
config SLIP_SMART
bool "Keepalive and linefill"
depends on SLIP
diff --git a/drivers/net/Makefile b/drivers/net/Makefile
index 8427bf9dec9..6ff17649c0f 100644
--- a/drivers/net/Makefile
+++ b/drivers/net/Makefile
@@ -2,10 +2,6 @@
# Makefile for the Linux network (ethercard) device drivers.
#
-ifeq ($(CONFIG_ISDN_PPP),y)
- obj-$(CONFIG_ISDN) += slhc.o
-endif
-
obj-$(CONFIG_E1000) += e1000/
obj-$(CONFIG_IBM_EMAC) += ibm_emac/
obj-$(CONFIG_IXGB) += ixgb/
@@ -113,8 +109,9 @@ obj-$(CONFIG_FORCEDETH) += forcedeth.o
obj-$(CONFIG_NE_H8300) += ne-h8300.o 8390.o
obj-$(CONFIG_MV643XX_ETH) += mv643xx_eth.o
+obj-$(CONFIG_QLA3XXX) += qla3xxx.o
-obj-$(CONFIG_PPP) += ppp_generic.o slhc.o
+obj-$(CONFIG_PPP) += ppp_generic.o
obj-$(CONFIG_PPP_ASYNC) += ppp_async.o
obj-$(CONFIG_PPP_SYNC_TTY) += ppp_synctty.o
obj-$(CONFIG_PPP_DEFLATE) += ppp_deflate.o
@@ -123,9 +120,7 @@ obj-$(CONFIG_PPP_MPPE) += ppp_mppe.o
obj-$(CONFIG_PPPOE) += pppox.o pppoe.o
obj-$(CONFIG_SLIP) += slip.o
-ifeq ($(CONFIG_SLIP_COMPRESSED),y)
- obj-$(CONFIG_SLIP) += slhc.o
-endif
+obj-$(CONFIG_SLHC) += slhc.o
obj-$(CONFIG_DUMMY) += dummy.o
obj-$(CONFIG_IFB) += ifb.o
diff --git a/drivers/net/acenic.c b/drivers/net/acenic.c
index 1c01e9b3d07..c0f3574b470 100644
--- a/drivers/net/acenic.c
+++ b/drivers/net/acenic.c
@@ -725,7 +725,7 @@ static struct pci_driver acenic_pci_driver = {
static int __init acenic_init(void)
{
- return pci_module_init(&acenic_pci_driver);
+ return pci_register_driver(&acenic_pci_driver);
}
static void __exit acenic_exit(void)
diff --git a/drivers/net/amd8111e.c b/drivers/net/amd8111e.c
index ed322a76980..2ef8e554263 100644
--- a/drivers/net/amd8111e.c
+++ b/drivers/net/amd8111e.c
@@ -2158,7 +2158,7 @@ static struct pci_driver amd8111e_driver = {
static int __init amd8111e_init(void)
{
- return pci_module_init(&amd8111e_driver);
+ return pci_register_driver(&amd8111e_driver);
}
static void __exit amd8111e_cleanup(void)
diff --git a/drivers/net/arcnet/com20020-pci.c b/drivers/net/arcnet/com20020-pci.c
index 979a33df0a8..fc256c197cd 100644
--- a/drivers/net/arcnet/com20020-pci.c
+++ b/drivers/net/arcnet/com20020-pci.c
@@ -177,7 +177,7 @@ static struct pci_driver com20020pci_driver = {
static int __init com20020pci_init(void)
{
BUGLVL(D_NORMAL) printk(VERSION);
- return pci_module_init(&com20020pci_driver);
+ return pci_register_driver(&com20020pci_driver);
}
static void __exit com20020pci_cleanup(void)
diff --git a/drivers/net/b44.c b/drivers/net/b44.c
index bea0fc0ede2..17eb2912971 100644
--- a/drivers/net/b44.c
+++ b/drivers/net/b44.c
@@ -2354,7 +2354,7 @@ static int __init b44_init(void)
dma_desc_align_mask = ~(dma_desc_align_size - 1);
dma_desc_sync_size = max_t(unsigned int, dma_desc_align_size, sizeof(struct dma_desc));
- return pci_module_init(&b44_driver);
+ return pci_register_driver(&b44_driver);
}
static void __exit b44_cleanup(void)
diff --git a/drivers/net/bnx2.c b/drivers/net/bnx2.c
index 652eb05a6c2..654b903985c 100644
--- a/drivers/net/bnx2.c
+++ b/drivers/net/bnx2.c
@@ -6016,7 +6016,7 @@ static struct pci_driver bnx2_pci_driver = {
static int __init bnx2_init(void)
{
- return pci_module_init(&bnx2_pci_driver);
+ return pci_register_driver(&bnx2_pci_driver);
}
static void __exit bnx2_cleanup(void)
diff --git a/drivers/net/cassini.c b/drivers/net/cassini.c
index a31544ccb3c..26040abfef6 100644
--- a/drivers/net/cassini.c
+++ b/drivers/net/cassini.c
@@ -5245,7 +5245,7 @@ static int __init cas_init(void)
else
link_transition_timeout = 0;
- return pci_module_init(&cas_driver);
+ return pci_register_driver(&cas_driver);
}
static void __exit cas_cleanup(void)
diff --git a/drivers/net/chelsio/cxgb2.c b/drivers/net/chelsio/cxgb2.c
index e67872433e9..b6de184e469 100644
--- a/drivers/net/chelsio/cxgb2.c
+++ b/drivers/net/chelsio/cxgb2.c
@@ -1243,7 +1243,7 @@ static struct pci_driver driver = {
static int __init t1_init_module(void)
{
- return pci_module_init(&driver);
+ return pci_register_driver(&driver);
}
static void __exit t1_cleanup_module(void)
diff --git a/drivers/net/defxx.c b/drivers/net/defxx.c
index 91cc8cbdd44..7d06dedbfb2 100644
--- a/drivers/net/defxx.c
+++ b/drivers/net/defxx.c
@@ -3444,7 +3444,7 @@ static int __init dfx_init(void)
{
int rc_pci, rc_eisa;
- rc_pci = pci_module_init(&dfx_driver);
+ rc_pci = pci_register_driver(&dfx_driver);
if (rc_pci >= 0) dfx_have_pci = 1;
rc_eisa = dfx_eisa_init();
diff --git a/drivers/net/dl2k.c b/drivers/net/dl2k.c
index 402961e68c8..a572c297056 100644
--- a/drivers/net/dl2k.c
+++ b/drivers/net/dl2k.c
@@ -1815,7 +1815,7 @@ static struct pci_driver rio_driver = {
static int __init
rio_init (void)
{
- return pci_module_init (&rio_driver);
+ return pci_register_driver(&rio_driver);
}
static void __exit
diff --git a/drivers/net/dm9000.c b/drivers/net/dm9000.c
index 3d76fa144c4..a860ebbbf81 100644
--- a/drivers/net/dm9000.c
+++ b/drivers/net/dm9000.c
@@ -377,8 +377,8 @@ dm9000_release_board(struct platform_device *pdev, struct board_info *db)
kfree(db->data_req);
}
- if (db->addr_res != NULL) {
- release_resource(db->addr_res);
+ if (db->addr_req != NULL) {
+ release_resource(db->addr_req);
kfree(db->addr_req);
}
}
diff --git a/drivers/net/e100.c b/drivers/net/e100.c
index ce850f1078b..47d970896a5 100644
--- a/drivers/net/e100.c
+++ b/drivers/net/e100.c
@@ -1,7 +1,7 @@
/*******************************************************************************
- Copyright(c) 1999 - 2005 Intel Corporation. All rights reserved.
+ Copyright(c) 1999 - 2006 Intel Corporation. All rights reserved.
This program is free software; you can redistribute it and/or modify it
under the terms of the GNU General Public License as published by the Free
@@ -158,10 +158,10 @@
#define DRV_NAME "e100"
-#define DRV_EXT "-NAPI"
-#define DRV_VERSION "3.5.10-k2"DRV_EXT
+#define DRV_EXT "-NAPI"
+#define DRV_VERSION "3.5.16-k2"DRV_EXT
#define DRV_DESCRIPTION "Intel(R) PRO/100 Network Driver"
-#define DRV_COPYRIGHT "Copyright(c) 1999-2005 Intel Corporation"
+#define DRV_COPYRIGHT "Copyright(c) 1999-2006 Intel Corporation"
#define PFX DRV_NAME ": "
#define E100_WATCHDOG_PERIOD (2 * HZ)
@@ -1395,15 +1395,11 @@ static int e100_phy_init(struct nic *nic)
}
if((nic->mac >= mac_82550_D102) || ((nic->flags & ich) &&
- (mdio_read(netdev, nic->mii.phy_id, MII_TPISTATUS) & 0x8000))) {
- /* enable/disable MDI/MDI-X auto-switching.
- MDI/MDI-X auto-switching is disabled for 82551ER/QM chips */
- if((nic->mac == mac_82551_E) || (nic->mac == mac_82551_F) ||
- (nic->mac == mac_82551_10) || (nic->mii.force_media) ||
- !(nic->eeprom[eeprom_cnfg_mdix] & eeprom_mdix_enabled))
- mdio_write(netdev, nic->mii.phy_id, MII_NCONFIG, 0);
- else
- mdio_write(netdev, nic->mii.phy_id, MII_NCONFIG, NCONFIG_AUTO_SWITCH);
+ (mdio_read(netdev, nic->mii.phy_id, MII_TPISTATUS) & 0x8000) &&
+ !(nic->eeprom[eeprom_cnfg_mdix] & eeprom_mdix_enabled))) {
+ /* enable/disable MDI/MDI-X auto-switching. */
+ mdio_write(netdev, nic->mii.phy_id, MII_NCONFIG,
+ nic->mii.force_media ? 0 : NCONFIG_AUTO_SWITCH);
}
return 0;
@@ -1767,11 +1763,10 @@ static inline void e100_start_receiver(struct nic *nic, struct rx *rx)
#define RFD_BUF_LEN (sizeof(struct rfd) + VLAN_ETH_FRAME_LEN)
static int e100_rx_alloc_skb(struct nic *nic, struct rx *rx)
{
- if(!(rx->skb = dev_alloc_skb(RFD_BUF_LEN + NET_IP_ALIGN)))
+ if(!(rx->skb = netdev_alloc_skb(nic->netdev, RFD_BUF_LEN + NET_IP_ALIGN)))
return -ENOMEM;
/* Align, init, and map the RFD. */
- rx->skb->dev = nic->netdev;
skb_reserve(rx->skb, NET_IP_ALIGN);
memcpy(rx->skb->data, &nic->blank_rfd, sizeof(struct rfd));
rx->dma_addr = pci_map_single(nic->pdev, rx->skb->data,
@@ -2147,7 +2142,7 @@ static int e100_loopback_test(struct nic *nic, enum loopback loopback_mode)
e100_start_receiver(nic, NULL);
- if(!(skb = dev_alloc_skb(ETH_DATA_LEN))) {
+ if(!(skb = netdev_alloc_skb(nic->netdev, ETH_DATA_LEN))) {
err = -ENOMEM;
goto err_loopback_none;
}
@@ -2799,6 +2794,7 @@ static pci_ers_result_t e100_io_error_detected(struct pci_dev *pdev, pci_channel
/* Detach; put netif into state similar to hotplug unplug. */
netif_poll_enable(netdev);
netif_device_detach(netdev);
+ pci_disable_device(pdev);
/* Request a slot reset. */
return PCI_ERS_RESULT_NEED_RESET;
@@ -2877,7 +2873,7 @@ static int __init e100_init_module(void)
printk(KERN_INFO PFX "%s, %s\n", DRV_DESCRIPTION, DRV_VERSION);
printk(KERN_INFO PFX "%s\n", DRV_COPYRIGHT);
}
- return pci_module_init(&e100_driver);
+ return pci_register_driver(&e100_driver);
}
static void __exit e100_cleanup_module(void)
diff --git a/drivers/net/e1000/e1000.h b/drivers/net/e1000/e1000.h
index d304297c496..98afa9c2057 100644
--- a/drivers/net/e1000/e1000.h
+++ b/drivers/net/e1000/e1000.h
@@ -242,12 +242,10 @@ struct e1000_adapter {
struct timer_list watchdog_timer;
struct timer_list phy_info_timer;
struct vlan_group *vlgrp;
- uint16_t mng_vlan_id;
+ uint16_t mng_vlan_id;
uint32_t bd_number;
uint32_t rx_buffer_len;
- uint32_t part_num;
uint32_t wol;
- uint32_t ksp3_port_a;
uint32_t smartspeed;
uint32_t en_mng_pt;
uint16_t link_speed;
@@ -342,7 +340,9 @@ struct e1000_adapter {
boolean_t tso_force;
#endif
boolean_t smart_power_down; /* phy smart power down */
+ boolean_t quad_port_a;
unsigned long flags;
+ uint32_t eeprom_wol;
};
enum e1000_state_t {
diff --git a/drivers/net/e1000/e1000_ethtool.c b/drivers/net/e1000/e1000_ethtool.c
index 88a82ba88f5..3fccffdb27b 100644
--- a/drivers/net/e1000/e1000_ethtool.c
+++ b/drivers/net/e1000/e1000_ethtool.c
@@ -183,6 +183,9 @@ e1000_set_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
return -EINVAL;
}
+ while (test_and_set_bit(__E1000_RESETTING, &adapter->flags))
+ msleep(1);
+
if (ecmd->autoneg == AUTONEG_ENABLE) {
hw->autoneg = 1;
if (hw->media_type == e1000_media_type_fiber)
@@ -199,16 +202,20 @@ e1000_set_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
ADVERTISED_TP;
ecmd->advertising = hw->autoneg_advertised;
} else
- if (e1000_set_spd_dplx(adapter, ecmd->speed + ecmd->duplex))
+ if (e1000_set_spd_dplx(adapter, ecmd->speed + ecmd->duplex)) {
+ clear_bit(__E1000_RESETTING, &adapter->flags);
return -EINVAL;
+ }
/* reset the link */
- if (netif_running(adapter->netdev))
- e1000_reinit_locked(adapter);
- else
+ if (netif_running(adapter->netdev)) {
+ e1000_down(adapter);
+ e1000_up(adapter);
+ } else
e1000_reset(adapter);
+ clear_bit(__E1000_RESETTING, &adapter->flags);
return 0;
}
@@ -238,9 +245,13 @@ e1000_set_pauseparam(struct net_device *netdev,
{
struct e1000_adapter *adapter = netdev_priv(netdev);
struct e1000_hw *hw = &adapter->hw;
+ int retval = 0;
adapter->fc_autoneg = pause->autoneg;
+ while (test_and_set_bit(__E1000_RESETTING, &adapter->flags))
+ msleep(1);
+
if (pause->rx_pause && pause->tx_pause)
hw->fc = e1000_fc_full;
else if (pause->rx_pause && !pause->tx_pause)
@@ -253,15 +264,17 @@ e1000_set_pauseparam(struct net_device *netdev,
hw->original_fc = hw->fc;
if (adapter->fc_autoneg == AUTONEG_ENABLE) {
- if (netif_running(adapter->netdev))
- e1000_reinit_locked(adapter);
- else
+ if (netif_running(adapter->netdev)) {
+ e1000_down(adapter);
+ e1000_up(adapter);
+ } else
e1000_reset(adapter);
} else
- return ((hw->media_type == e1000_media_type_fiber) ?
- e1000_setup_link(hw) : e1000_force_mac_fc(hw));
+ retval = ((hw->media_type == e1000_media_type_fiber) ?
+ e1000_setup_link(hw) : e1000_force_mac_fc(hw));
- return 0;
+ clear_bit(__E1000_RESETTING, &adapter->flags);
+ return retval;
}
static uint32_t
@@ -415,12 +428,12 @@ e1000_get_regs(struct net_device *netdev,
regs_buff[23] = regs_buff[18]; /* mdix mode */
e1000_write_phy_reg(hw, IGP01E1000_PHY_PAGE_SELECT, 0x0);
} else {
- e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_STATUS, &phy_data);
+ e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_STATUS, &phy_data);
regs_buff[13] = (uint32_t)phy_data; /* cable length */
regs_buff[14] = 0; /* Dummy (to align w/ IGP phy reg dump) */
regs_buff[15] = 0; /* Dummy (to align w/ IGP phy reg dump) */
regs_buff[16] = 0; /* Dummy (to align w/ IGP phy reg dump) */
- e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_data);
+ e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_data);
regs_buff[17] = (uint32_t)phy_data; /* extended 10bt distance */
regs_buff[18] = regs_buff[13]; /* cable polarity */
regs_buff[19] = 0; /* Dummy (to align w/ IGP phy reg dump) */
@@ -696,7 +709,6 @@ e1000_set_ringparam(struct net_device *netdev,
}
clear_bit(__E1000_RESETTING, &adapter->flags);
-
return 0;
err_setup_tx:
e1000_free_all_rx_resources(adapter);
@@ -881,16 +893,17 @@ e1000_intr_test(struct e1000_adapter *adapter, uint64_t *data)
*data = 0;
+ /* NOTE: we don't test MSI interrupts here, yet */
/* Hook up test interrupt handler just for this test */
if (!request_irq(irq, &e1000_test_intr, IRQF_PROBE_SHARED,
- netdev->name, netdev)) {
- shared_int = FALSE;
- } else if (request_irq(irq, &e1000_test_intr, IRQF_SHARED,
- netdev->name, netdev)){
+ netdev->name, netdev))
+ shared_int = FALSE;
+ else if (request_irq(irq, &e1000_test_intr, IRQF_SHARED,
+ netdev->name, netdev)) {
*data = 1;
return -1;
}
- DPRINTK(PROBE,INFO, "testing %s interrupt\n",
+ DPRINTK(HW, INFO, "testing %s interrupt\n",
(shared_int ? "shared" : "unshared"));
/* Disable all the interrupts */
@@ -1256,11 +1269,10 @@ e1000_integrated_phy_loopback(struct e1000_adapter *adapter)
e1000_write_phy_reg(&adapter->hw, PHY_CTRL, 0x9140);
/* autoneg off */
e1000_write_phy_reg(&adapter->hw, PHY_CTRL, 0x8140);
- } else if (adapter->hw.phy_type == e1000_phy_gg82563) {
+ } else if (adapter->hw.phy_type == e1000_phy_gg82563)
e1000_write_phy_reg(&adapter->hw,
GG82563_PHY_KMRN_MODE_CTRL,
0x1CC);
- }
ctrl_reg = E1000_READ_REG(&adapter->hw, CTRL);
@@ -1288,9 +1300,9 @@ e1000_integrated_phy_loopback(struct e1000_adapter *adapter)
}
if (adapter->hw.media_type == e1000_media_type_copper &&
- adapter->hw.phy_type == e1000_phy_m88) {
+ adapter->hw.phy_type == e1000_phy_m88)
ctrl_reg |= E1000_CTRL_ILOS; /* Invert Loss of Signal */
- } else {
+ else {
/* Set the ILOS bit on the fiber Nic is half
* duplex link is detected. */
stat_reg = E1000_READ_REG(&adapter->hw, STATUS);
@@ -1426,11 +1438,10 @@ e1000_loopback_cleanup(struct e1000_adapter *adapter)
case e1000_82546_rev_3:
default:
hw->autoneg = TRUE;
- if (hw->phy_type == e1000_phy_gg82563) {
+ if (hw->phy_type == e1000_phy_gg82563)
e1000_write_phy_reg(hw,
GG82563_PHY_KMRN_MODE_CTRL,
0x180);
- }
e1000_read_phy_reg(hw, PHY_CTRL, &phy_reg);
if (phy_reg & MII_CR_LOOPBACK) {
phy_reg &= ~MII_CR_LOOPBACK;
@@ -1590,6 +1601,8 @@ e1000_diag_test_count(struct net_device *netdev)
return E1000_TEST_LEN;
}
+extern void e1000_power_up_phy(struct e1000_adapter *);
+
static void
e1000_diag_test(struct net_device *netdev,
struct ethtool_test *eth_test, uint64_t *data)
@@ -1606,6 +1619,8 @@ e1000_diag_test(struct net_device *netdev,
uint8_t forced_speed_duplex = adapter->hw.forced_speed_duplex;
uint8_t autoneg = adapter->hw.autoneg;
+ DPRINTK(HW, INFO, "offline testing starting\n");
+
/* Link test performed before hardware reset so autoneg doesn't
* interfere with test result */
if (e1000_link_test(adapter, &data[4]))
@@ -1629,6 +1644,8 @@ e1000_diag_test(struct net_device *netdev,
eth_test->flags |= ETH_TEST_FL_FAILED;
e1000_reset(adapter);
+ /* make sure the phy is powered up */
+ e1000_power_up_phy(adapter);
if (e1000_loopback_test(adapter, &data[3]))
eth_test->flags |= ETH_TEST_FL_FAILED;
@@ -1642,6 +1659,7 @@ e1000_diag_test(struct net_device *netdev,
if (if_running)
dev_open(netdev);
} else {
+ DPRINTK(HW, INFO, "online testing starting\n");
/* Online tests */
if (e1000_link_test(adapter, &data[4]))
eth_test->flags |= ETH_TEST_FL_FAILED;
@@ -1657,14 +1675,12 @@ e1000_diag_test(struct net_device *netdev,
msleep_interruptible(4 * 1000);
}
-static void
-e1000_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
+static int e1000_wol_exclusion(struct e1000_adapter *adapter, struct ethtool_wolinfo *wol)
{
- struct e1000_adapter *adapter = netdev_priv(netdev);
struct e1000_hw *hw = &adapter->hw;
+ int retval = 1; /* fail by default */
- switch (adapter->hw.device_id) {
- case E1000_DEV_ID_82542:
+ switch (hw->device_id) {
case E1000_DEV_ID_82543GC_FIBER:
case E1000_DEV_ID_82543GC_COPPER:
case E1000_DEV_ID_82544EI_FIBER:
@@ -1672,52 +1688,87 @@ e1000_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
case E1000_DEV_ID_82545EM_FIBER:
case E1000_DEV_ID_82545EM_COPPER:
case E1000_DEV_ID_82546GB_QUAD_COPPER:
+ case E1000_DEV_ID_82546GB_PCIE:
+ /* these don't support WoL at all */
wol->supported = 0;
- wol->wolopts = 0;
- return;
-
- case E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3:
- /* device id 10B5 port-A supports wol */
- if (!adapter->ksp3_port_a) {
- wol->supported = 0;
- return;
- }
- /* KSP3 does not suppport UCAST wake-ups for any interface */
- wol->supported = WAKE_MCAST | WAKE_BCAST | WAKE_MAGIC;
-
- if (adapter->wol & E1000_WUFC_EX)
- DPRINTK(DRV, ERR, "Interface does not support "
- "directed (unicast) frame wake-up packets\n");
- wol->wolopts = 0;
- goto do_defaults;
-
+ break;
case E1000_DEV_ID_82546EB_FIBER:
case E1000_DEV_ID_82546GB_FIBER:
case E1000_DEV_ID_82571EB_FIBER:
- /* Wake events only supported on port A for dual fiber */
+ case E1000_DEV_ID_82571EB_SERDES:
+ case E1000_DEV_ID_82571EB_COPPER:
+ /* Wake events not supported on port B */
if (E1000_READ_REG(hw, STATUS) & E1000_STATUS_FUNC_1) {
wol->supported = 0;
- wol->wolopts = 0;
- return;
+ break;
}
- /* Fall Through */
-
+ /* return success for non excluded adapter ports */
+ retval = 0;
+ break;
+ case E1000_DEV_ID_82571EB_QUAD_COPPER:
+ case E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3:
+ /* quad port adapters only support WoL on port A */
+ if (!adapter->quad_port_a) {
+ wol->supported = 0;
+ break;
+ }
+ /* return success for non excluded adapter ports */
+ retval = 0;
+ break;
default:
- wol->supported = WAKE_UCAST | WAKE_MCAST |
- WAKE_BCAST | WAKE_MAGIC;
- wol->wolopts = 0;
+ /* dual port cards only support WoL on port A from now on
+ * unless it was enabled in the eeprom for port B
+ * so exclude FUNC_1 ports from having WoL enabled */
+ if (E1000_READ_REG(hw, STATUS) & E1000_STATUS_FUNC_1 &&
+ !adapter->eeprom_wol) {
+ wol->supported = 0;
+ break;
+ }
-do_defaults:
- if (adapter->wol & E1000_WUFC_EX)
- wol->wolopts |= WAKE_UCAST;
- if (adapter->wol & E1000_WUFC_MC)
- wol->wolopts |= WAKE_MCAST;
- if (adapter->wol & E1000_WUFC_BC)
- wol->wolopts |= WAKE_BCAST;
- if (adapter->wol & E1000_WUFC_MAG)
- wol->wolopts |= WAKE_MAGIC;
+ retval = 0;
+ }
+
+ return retval;
+}
+
+static void
+e1000_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
+{
+ struct e1000_adapter *adapter = netdev_priv(netdev);
+
+ wol->supported = WAKE_UCAST | WAKE_MCAST |
+ WAKE_BCAST | WAKE_MAGIC;
+ wol->wolopts = 0;
+
+ /* this function will set ->supported = 0 and return 1 if wol is not
+ * supported by this hardware */
+ if (e1000_wol_exclusion(adapter, wol))
return;
+
+ /* apply any specific unsupported masks here */
+ switch (adapter->hw.device_id) {
+ case E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3:
+ /* KSP3 does not suppport UCAST wake-ups */
+ wol->supported &= ~WAKE_UCAST;
+
+ if (adapter->wol & E1000_WUFC_EX)
+ DPRINTK(DRV, ERR, "Interface does not support "
+ "directed (unicast) frame wake-up packets\n");
+ break;
+ default:
+ break;
}
+
+ if (adapter->wol & E1000_WUFC_EX)
+ wol->wolopts |= WAKE_UCAST;
+ if (adapter->wol & E1000_WUFC_MC)
+ wol->wolopts |= WAKE_MCAST;
+ if (adapter->wol & E1000_WUFC_BC)
+ wol->wolopts |= WAKE_BCAST;
+ if (adapter->wol & E1000_WUFC_MAG)
+ wol->wolopts |= WAKE_MAGIC;
+
+ return;
}
static int
@@ -1726,51 +1777,35 @@ e1000_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
struct e1000_adapter *adapter = netdev_priv(netdev);
struct e1000_hw *hw = &adapter->hw;
- switch (adapter->hw.device_id) {
- case E1000_DEV_ID_82542:
- case E1000_DEV_ID_82543GC_FIBER:
- case E1000_DEV_ID_82543GC_COPPER:
- case E1000_DEV_ID_82544EI_FIBER:
- case E1000_DEV_ID_82546EB_QUAD_COPPER:
- case E1000_DEV_ID_82546GB_QUAD_COPPER:
- case E1000_DEV_ID_82545EM_FIBER:
- case E1000_DEV_ID_82545EM_COPPER:
+ if (wol->wolopts & (WAKE_PHY | WAKE_ARP | WAKE_MAGICSECURE))
+ return -EOPNOTSUPP;
+
+ if (e1000_wol_exclusion(adapter, wol))
return wol->wolopts ? -EOPNOTSUPP : 0;
+ switch (hw->device_id) {
case E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3:
- /* device id 10B5 port-A supports wol */
- if (!adapter->ksp3_port_a)
- return wol->wolopts ? -EOPNOTSUPP : 0;
-
if (wol->wolopts & WAKE_UCAST) {
DPRINTK(DRV, ERR, "Interface does not support "
"directed (unicast) frame wake-up packets\n");
return -EOPNOTSUPP;
}
-
- case E1000_DEV_ID_82546EB_FIBER:
- case E1000_DEV_ID_82546GB_FIBER:
- case E1000_DEV_ID_82571EB_FIBER:
- /* Wake events only supported on port A for dual fiber */
- if (E1000_READ_REG(hw, STATUS) & E1000_STATUS_FUNC_1)
- return wol->wolopts ? -EOPNOTSUPP : 0;
- /* Fall Through */
-
+ break;
default:
- if (wol->wolopts & (WAKE_PHY | WAKE_ARP | WAKE_MAGICSECURE))
- return -EOPNOTSUPP;
+ break;
+ }
- adapter->wol = 0;
+ /* these settings will always override what we currently have */
+ adapter->wol = 0;
- if (wol->wolopts & WAKE_UCAST)
- adapter->wol |= E1000_WUFC_EX;
- if (wol->wolopts & WAKE_MCAST)
- adapter->wol |= E1000_WUFC_MC;
- if (wol->wolopts & WAKE_BCAST)
- adapter->wol |= E1000_WUFC_BC;
- if (wol->wolopts & WAKE_MAGIC)
- adapter->wol |= E1000_WUFC_MAG;
- }
+ if (wol->wolopts & WAKE_UCAST)
+ adapter->wol |= E1000_WUFC_EX;
+ if (wol->wolopts & WAKE_MCAST)
+ adapter->wol |= E1000_WUFC_MC;
+ if (wol->wolopts & WAKE_BCAST)
+ adapter->wol |= E1000_WUFC_BC;
+ if (wol->wolopts & WAKE_MAGIC)
+ adapter->wol |= E1000_WUFC_MAG;
return 0;
}
@@ -1895,8 +1930,8 @@ static struct ethtool_ops e1000_ethtool_ops = {
.get_regs = e1000_get_regs,
.get_wol = e1000_get_wol,
.set_wol = e1000_set_wol,
- .get_msglevel = e1000_get_msglevel,
- .set_msglevel = e1000_set_msglevel,
+ .get_msglevel = e1000_get_msglevel,
+ .set_msglevel = e1000_set_msglevel,
.nway_reset = e1000_nway_reset,
.get_link = ethtool_op_get_link,
.get_eeprom_len = e1000_get_eeprom_len,
@@ -1904,17 +1939,17 @@ static struct ethtool_ops e1000_ethtool_ops = {
.set_eeprom = e1000_set_eeprom,
.get_ringparam = e1000_get_ringparam,
.set_ringparam = e1000_set_ringparam,
- .get_pauseparam = e1000_get_pauseparam,
- .set_pauseparam = e1000_set_pauseparam,
- .get_rx_csum = e1000_get_rx_csum,
- .set_rx_csum = e1000_set_rx_csum,
- .get_tx_csum = e1000_get_tx_csum,
- .set_tx_csum = e1000_set_tx_csum,
- .get_sg = ethtool_op_get_sg,
- .set_sg = ethtool_op_set_sg,
+ .get_pauseparam = e1000_get_pauseparam,
+ .set_pauseparam = e1000_set_pauseparam,
+ .get_rx_csum = e1000_get_rx_csum,
+ .set_rx_csum = e1000_set_rx_csum,
+ .get_tx_csum = e1000_get_tx_csum,
+ .set_tx_csum = e1000_set_tx_csum,
+ .get_sg = ethtool_op_get_sg,
+ .set_sg = ethtool_op_set_sg,
#ifdef NETIF_F_TSO
- .get_tso = ethtool_op_get_tso,
- .set_tso = e1000_set_tso,
+ .get_tso = ethtool_op_get_tso,
+ .set_tso = e1000_set_tso,
#endif
.self_test_count = e1000_diag_test_count,
.self_test = e1000_diag_test,
@@ -1922,7 +1957,7 @@ static struct ethtool_ops e1000_ethtool_ops = {
.phys_id = e1000_phys_id,
.get_stats_count = e1000_get_stats_count,
.get_ethtool_stats = e1000_get_ethtool_stats,
- .get_perm_addr = ethtool_op_get_perm_addr,
+ .get_perm_addr = ethtool_op_get_perm_addr,
};
void e1000_set_ethtool_ops(struct net_device *netdev)
diff --git a/drivers/net/e1000/e1000_hw.c b/drivers/net/e1000/e1000_hw.c
index b3b919116e0..a6f8f4fce70 100644
--- a/drivers/net/e1000/e1000_hw.c
+++ b/drivers/net/e1000/e1000_hw.c
@@ -31,6 +31,7 @@
* Shared functions for accessing and configuring the MAC
*/
+
#include "e1000_hw.h"
static int32_t e1000_set_phy_type(struct e1000_hw *hw);
@@ -166,10 +167,10 @@ e1000_set_phy_type(struct e1000_hw *hw)
{
DEBUGFUNC("e1000_set_phy_type");
- if(hw->mac_type == e1000_undefined)
+ if (hw->mac_type == e1000_undefined)
return -E1000_ERR_PHY_TYPE;
- switch(hw->phy_id) {
+ switch (hw->phy_id) {
case M88E1000_E_PHY_ID:
case M88E1000_I_PHY_ID:
case M88E1011_I_PHY_ID:
@@ -177,10 +178,10 @@ e1000_set_phy_type(struct e1000_hw *hw)
hw->phy_type = e1000_phy_m88;
break;
case IGP01E1000_I_PHY_ID:
- if(hw->mac_type == e1000_82541 ||
- hw->mac_type == e1000_82541_rev_2 ||
- hw->mac_type == e1000_82547 ||
- hw->mac_type == e1000_82547_rev_2) {
+ if (hw->mac_type == e1000_82541 ||
+ hw->mac_type == e1000_82541_rev_2 ||
+ hw->mac_type == e1000_82547 ||
+ hw->mac_type == e1000_82547_rev_2) {
hw->phy_type = e1000_phy_igp;
break;
}
@@ -207,6 +208,7 @@ e1000_set_phy_type(struct e1000_hw *hw)
return E1000_SUCCESS;
}
+
/******************************************************************************
* IGP phy init script - initializes the GbE PHY
*
@@ -220,7 +222,7 @@ e1000_phy_init_script(struct e1000_hw *hw)
DEBUGFUNC("e1000_phy_init_script");
- if(hw->phy_init_script) {
+ if (hw->phy_init_script) {
msec_delay(20);
/* Save off the current value of register 0x2F5B to be restored at
@@ -236,7 +238,7 @@ e1000_phy_init_script(struct e1000_hw *hw)
msec_delay(5);
- switch(hw->mac_type) {
+ switch (hw->mac_type) {
case e1000_82541:
case e1000_82547:
e1000_write_phy_reg(hw, 0x1F95, 0x0001);
@@ -273,22 +275,22 @@ e1000_phy_init_script(struct e1000_hw *hw)
/* Now enable the transmitter */
e1000_write_phy_reg(hw, 0x2F5B, phy_saved_data);
- if(hw->mac_type == e1000_82547) {
+ if (hw->mac_type == e1000_82547) {
uint16_t fused, fine, coarse;
/* Move to analog registers page */
e1000_read_phy_reg(hw, IGP01E1000_ANALOG_SPARE_FUSE_STATUS, &fused);
- if(!(fused & IGP01E1000_ANALOG_SPARE_FUSE_ENABLED)) {
+ if (!(fused & IGP01E1000_ANALOG_SPARE_FUSE_ENABLED)) {
e1000_read_phy_reg(hw, IGP01E1000_ANALOG_FUSE_STATUS, &fused);
fine = fused & IGP01E1000_ANALOG_FUSE_FINE_MASK;
coarse = fused & IGP01E1000_ANALOG_FUSE_COARSE_MASK;
- if(coarse > IGP01E1000_ANALOG_FUSE_COARSE_THRESH) {
+ if (coarse > IGP01E1000_ANALOG_FUSE_COARSE_THRESH) {
coarse -= IGP01E1000_ANALOG_FUSE_COARSE_10;
fine -= IGP01E1000_ANALOG_FUSE_FINE_1;
- } else if(coarse == IGP01E1000_ANALOG_FUSE_COARSE_THRESH)
+ } else if (coarse == IGP01E1000_ANALOG_FUSE_COARSE_THRESH)
fine -= IGP01E1000_ANALOG_FUSE_FINE_10;
fused = (fused & IGP01E1000_ANALOG_FUSE_POLY_MASK) |
@@ -387,6 +389,7 @@ e1000_set_mac_type(struct e1000_hw *hw)
case E1000_DEV_ID_82571EB_COPPER:
case E1000_DEV_ID_82571EB_FIBER:
case E1000_DEV_ID_82571EB_SERDES:
+ case E1000_DEV_ID_82571EB_QUAD_COPPER:
hw->mac_type = e1000_82571;
break;
case E1000_DEV_ID_82572EI_COPPER:
@@ -418,7 +421,7 @@ e1000_set_mac_type(struct e1000_hw *hw)
return -E1000_ERR_MAC_TYPE;
}
- switch(hw->mac_type) {
+ switch (hw->mac_type) {
case e1000_ich8lan:
hw->swfwhw_semaphore_present = TRUE;
hw->asf_firmware_present = TRUE;
@@ -456,7 +459,7 @@ e1000_set_media_type(struct e1000_hw *hw)
DEBUGFUNC("e1000_set_media_type");
- if(hw->mac_type != e1000_82543) {
+ if (hw->mac_type != e1000_82543) {
/* tbi_compatibility is only valid on 82543 */
hw->tbi_compatibility_en = FALSE;
}
@@ -516,16 +519,16 @@ e1000_reset_hw(struct e1000_hw *hw)
DEBUGFUNC("e1000_reset_hw");
/* For 82542 (rev 2.0), disable MWI before issuing a device reset */
- if(hw->mac_type == e1000_82542_rev2_0) {
+ if (hw->mac_type == e1000_82542_rev2_0) {
DEBUGOUT("Disabling MWI on 82542 rev 2.0\n");
e1000_pci_clear_mwi(hw);
}
- if(hw->bus_type == e1000_bus_type_pci_express) {
+ if (hw->bus_type == e1000_bus_type_pci_express) {
/* Prevent the PCI-E bus from sticking if there is no TLP connection
* on the last TLP read/write transaction when MAC is reset.
*/
- if(e1000_disable_pciex_master(hw) != E1000_SUCCESS) {
+ if (e1000_disable_pciex_master(hw) != E1000_SUCCESS) {
DEBUGOUT("PCI-E Master disable polling has failed.\n");
}
}
@@ -553,14 +556,14 @@ e1000_reset_hw(struct e1000_hw *hw)
ctrl = E1000_READ_REG(hw, CTRL);
/* Must reset the PHY before resetting the MAC */
- if((hw->mac_type == e1000_82541) || (hw->mac_type == e1000_82547)) {
+ if ((hw->mac_type == e1000_82541) || (hw->mac_type == e1000_82547)) {
E1000_WRITE_REG(hw, CTRL, (ctrl | E1000_CTRL_PHY_RST));
msec_delay(5);
}
/* Must acquire the MDIO ownership before MAC reset.
* Ownership defaults to firmware after a reset. */
- if(hw->mac_type == e1000_82573) {
+ if (hw->mac_type == e1000_82573) {
timeout = 10;
extcnf_ctrl = E1000_READ_REG(hw, EXTCNF_CTRL);
@@ -570,14 +573,14 @@ e1000_reset_hw(struct e1000_hw *hw)
E1000_WRITE_REG(hw, EXTCNF_CTRL, extcnf_ctrl);
extcnf_ctrl = E1000_READ_REG(hw, EXTCNF_CTRL);
- if(extcnf_ctrl & E1000_EXTCNF_CTRL_MDIO_SW_OWNERSHIP)
+ if (extcnf_ctrl & E1000_EXTCNF_CTRL_MDIO_SW_OWNERSHIP)
break;
else
extcnf_ctrl |= E1000_EXTCNF_CTRL_MDIO_SW_OWNERSHIP;
msec_delay(2);
timeout--;
- } while(timeout);
+ } while (timeout);
}
/* Workaround for ICH8 bit corruption issue in FIFO memory */
@@ -595,7 +598,7 @@ e1000_reset_hw(struct e1000_hw *hw)
*/
DEBUGOUT("Issuing a global reset to MAC\n");
- switch(hw->mac_type) {
+ switch (hw->mac_type) {
case e1000_82544:
case e1000_82540:
case e1000_82545:
@@ -634,7 +637,7 @@ e1000_reset_hw(struct e1000_hw *hw)
* device. Later controllers reload the EEPROM automatically, so just wait
* for reload to complete.
*/
- switch(hw->mac_type) {
+ switch (hw->mac_type) {
case e1000_82542_rev2_0:
case e1000_82542_rev2_1:
case e1000_82543:
@@ -669,7 +672,7 @@ e1000_reset_hw(struct e1000_hw *hw)
case e1000_ich8lan:
case e1000_80003es2lan:
ret_val = e1000_get_auto_rd_done(hw);
- if(ret_val)
+ if (ret_val)
/* We don't want to continue accessing MAC registers. */
return ret_val;
break;
@@ -680,13 +683,13 @@ e1000_reset_hw(struct e1000_hw *hw)
}
/* Disable HW ARPs on ASF enabled adapters */
- if(hw->mac_type >= e1000_82540 && hw->mac_type <= e1000_82547_rev_2) {
+ if (hw->mac_type >= e1000_82540 && hw->mac_type <= e1000_82547_rev_2) {
manc = E1000_READ_REG(hw, MANC);
manc &= ~(E1000_MANC_ARP_EN);
E1000_WRITE_REG(hw, MANC, manc);
}
- if((hw->mac_type == e1000_82541) || (hw->mac_type == e1000_82547)) {
+ if ((hw->mac_type == e1000_82541) || (hw->mac_type == e1000_82547)) {
e1000_phy_init_script(hw);
/* Configure activity LED after PHY reset */
@@ -704,8 +707,8 @@ e1000_reset_hw(struct e1000_hw *hw)
icr = E1000_READ_REG(hw, ICR);
/* If MWI was previously enabled, reenable it. */
- if(hw->mac_type == e1000_82542_rev2_0) {
- if(hw->pci_cmd_word & CMD_MEM_WRT_INVALIDATE)
+ if (hw->mac_type == e1000_82542_rev2_0) {
+ if (hw->pci_cmd_word & CMD_MEM_WRT_INVALIDATE)
e1000_pci_set_mwi(hw);
}
@@ -745,9 +748,20 @@ e1000_init_hw(struct e1000_hw *hw)
DEBUGFUNC("e1000_init_hw");
+ /* force full DMA clock frequency for 10/100 on ICH8 A0-B0 */
+ if (hw->mac_type == e1000_ich8lan) {
+ reg_data = E1000_READ_REG(hw, TARC0);
+ reg_data |= 0x30000000;
+ E1000_WRITE_REG(hw, TARC0, reg_data);
+
+ reg_data = E1000_READ_REG(hw, STATUS);
+ reg_data &= ~0x80000000;
+ E1000_WRITE_REG(hw, STATUS, reg_data);
+ }
+
/* Initialize Identification LED */
ret_val = e1000_id_led_init(hw);
- if(ret_val) {
+ if (ret_val) {
DEBUGOUT("Error Initializing Identification LED\n");
return ret_val;
}
@@ -765,7 +779,7 @@ e1000_init_hw(struct e1000_hw *hw)
}
/* For 82542 (rev 2.0), disable MWI and put the receiver into reset */
- if(hw->mac_type == e1000_82542_rev2_0) {
+ if (hw->mac_type == e1000_82542_rev2_0) {
DEBUGOUT("Disabling MWI on 82542 rev 2.0\n");
e1000_pci_clear_mwi(hw);
E1000_WRITE_REG(hw, RCTL, E1000_RCTL_RST);
@@ -779,11 +793,11 @@ e1000_init_hw(struct e1000_hw *hw)
e1000_init_rx_addrs(hw);
/* For 82542 (rev 2.0), take the receiver out of reset and enable MWI */
- if(hw->mac_type == e1000_82542_rev2_0) {
+ if (hw->mac_type == e1000_82542_rev2_0) {
E1000_WRITE_REG(hw, RCTL, 0);
E1000_WRITE_FLUSH(hw);
msec_delay(1);
- if(hw->pci_cmd_word & CMD_MEM_WRT_INVALIDATE)
+ if (hw->pci_cmd_word & CMD_MEM_WRT_INVALIDATE)
e1000_pci_set_mwi(hw);
}
@@ -792,7 +806,7 @@ e1000_init_hw(struct e1000_hw *hw)
mta_size = E1000_MC_TBL_SIZE;
if (hw->mac_type == e1000_ich8lan)
mta_size = E1000_MC_TBL_SIZE_ICH8LAN;
- for(i = 0; i < mta_size; i++) {
+ for (i = 0; i < mta_size; i++) {
E1000_WRITE_REG_ARRAY(hw, MTA, i, 0);
/* use write flush to prevent Memory Write Block (MWB) from
* occuring when accessing our register space */
@@ -804,18 +818,18 @@ e1000_init_hw(struct e1000_hw *hw)
* gives equal priority to transmits and receives. Valid only on
* 82542 and 82543 silicon.
*/
- if(hw->dma_fairness && hw->mac_type <= e1000_82543) {
+ if (hw->dma_fairness && hw->mac_type <= e1000_82543) {
ctrl = E1000_READ_REG(hw, CTRL);
E1000_WRITE_REG(hw, CTRL, ctrl | E1000_CTRL_PRIOR);
}
- switch(hw->mac_type) {
+ switch (hw->mac_type) {
case e1000_82545_rev_3:
case e1000_82546_rev_3:
break;
default:
/* Workaround for PCI-X problem when BIOS sets MMRBC incorrectly. */
- if(hw->bus_type == e1000_bus_type_pcix) {
+ if (hw->bus_type == e1000_bus_type_pcix) {
e1000_read_pci_cfg(hw, PCIX_COMMAND_REGISTER, &pcix_cmd_word);
e1000_read_pci_cfg(hw, PCIX_STATUS_REGISTER_HI,
&pcix_stat_hi_word);
@@ -823,9 +837,9 @@ e1000_init_hw(struct e1000_hw *hw)
PCIX_COMMAND_MMRBC_SHIFT;
stat_mmrbc = (pcix_stat_hi_word & PCIX_STATUS_HI_MMRBC_MASK) >>
PCIX_STATUS_HI_MMRBC_SHIFT;
- if(stat_mmrbc == PCIX_STATUS_HI_MMRBC_4K)
+ if (stat_mmrbc == PCIX_STATUS_HI_MMRBC_4K)
stat_mmrbc = PCIX_STATUS_HI_MMRBC_2K;
- if(cmd_mmrbc > stat_mmrbc) {
+ if (cmd_mmrbc > stat_mmrbc) {
pcix_cmd_word &= ~PCIX_COMMAND_MMRBC_MASK;
pcix_cmd_word |= stat_mmrbc << PCIX_COMMAND_MMRBC_SHIFT;
e1000_write_pci_cfg(hw, PCIX_COMMAND_REGISTER,
@@ -843,7 +857,7 @@ e1000_init_hw(struct e1000_hw *hw)
ret_val = e1000_setup_link(hw);
/* Set the transmit descriptor write-back policy */
- if(hw->mac_type > e1000_82544) {
+ if (hw->mac_type > e1000_82544) {
ctrl = E1000_READ_REG(hw, TXDCTL);
ctrl = (ctrl & ~E1000_TXDCTL_WTHRESH) | E1000_TXDCTL_FULL_TX_DESC_WB;
switch (hw->mac_type) {
@@ -894,14 +908,13 @@ e1000_init_hw(struct e1000_hw *hw)
case e1000_ich8lan:
ctrl = E1000_READ_REG(hw, TXDCTL1);
ctrl = (ctrl & ~E1000_TXDCTL_WTHRESH) | E1000_TXDCTL_FULL_TX_DESC_WB;
- if(hw->mac_type >= e1000_82571)
+ if (hw->mac_type >= e1000_82571)
ctrl |= E1000_TXDCTL_COUNT_DESC;
E1000_WRITE_REG(hw, TXDCTL1, ctrl);
break;
}
-
if (hw->mac_type == e1000_82573) {
uint32_t gcr = E1000_READ_REG(hw, GCR);
gcr |= E1000_GCR_L1_ACT_WITHOUT_L0S_RX;
@@ -945,10 +958,10 @@ e1000_adjust_serdes_amplitude(struct e1000_hw *hw)
DEBUGFUNC("e1000_adjust_serdes_amplitude");
- if(hw->media_type != e1000_media_type_internal_serdes)
+ if (hw->media_type != e1000_media_type_internal_serdes)
return E1000_SUCCESS;
- switch(hw->mac_type) {
+ switch (hw->mac_type) {
case e1000_82545_rev_3:
case e1000_82546_rev_3:
break;
@@ -961,11 +974,11 @@ e1000_adjust_serdes_amplitude(struct e1000_hw *hw)
return ret_val;
}
- if(eeprom_data != EEPROM_RESERVED_WORD) {
+ if (eeprom_data != EEPROM_RESERVED_WORD) {
/* Adjust SERDES output amplitude only. */
eeprom_data &= EEPROM_SERDES_AMPLITUDE_MASK;
ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_EXT_CTRL, eeprom_data);
- if(ret_val)
+ if (ret_val)
return ret_val;
}
@@ -1033,10 +1046,10 @@ e1000_setup_link(struct e1000_hw *hw)
* in case we get disconnected and then reconnected into a different
* hub or switch with different Flow Control capabilities.
*/
- if(hw->mac_type == e1000_82542_rev2_0)
+ if (hw->mac_type == e1000_82542_rev2_0)
hw->fc &= (~e1000_fc_tx_pause);
- if((hw->mac_type < e1000_82543) && (hw->report_tx_early == 1))
+ if ((hw->mac_type < e1000_82543) && (hw->report_tx_early == 1))
hw->fc &= (~e1000_fc_rx_pause);
hw->original_fc = hw->fc;
@@ -1051,12 +1064,12 @@ e1000_setup_link(struct e1000_hw *hw)
* or e1000_phy_setup() is called.
*/
if (hw->mac_type == e1000_82543) {
- ret_val = e1000_read_eeprom(hw, EEPROM_INIT_CONTROL2_REG,
- 1, &eeprom_data);
- if (ret_val) {
- DEBUGOUT("EEPROM Read Error\n");
- return -E1000_ERR_EEPROM;
- }
+ ret_val = e1000_read_eeprom(hw, EEPROM_INIT_CONTROL2_REG,
+ 1, &eeprom_data);
+ if (ret_val) {
+ DEBUGOUT("EEPROM Read Error\n");
+ return -E1000_ERR_EEPROM;
+ }
ctrl_ext = ((eeprom_data & EEPROM_WORD0F_SWPDIO_EXT) <<
SWDPIO__EXT_SHIFT);
E1000_WRITE_REG(hw, CTRL_EXT, ctrl_ext);
@@ -1089,14 +1102,14 @@ e1000_setup_link(struct e1000_hw *hw)
* ability to transmit pause frames in not enabled, then these
* registers will be set to 0.
*/
- if(!(hw->fc & e1000_fc_tx_pause)) {
+ if (!(hw->fc & e1000_fc_tx_pause)) {
E1000_WRITE_REG(hw, FCRTL, 0);
E1000_WRITE_REG(hw, FCRTH, 0);
} else {
/* We need to set up the Receive Threshold high and low water marks
* as well as (optionally) enabling the transmission of XON frames.
*/
- if(hw->fc_send_xon) {
+ if (hw->fc_send_xon) {
E1000_WRITE_REG(hw, FCRTL, (hw->fc_low_water | E1000_FCRTL_XONE));
E1000_WRITE_REG(hw, FCRTH, hw->fc_high_water);
} else {
@@ -1143,11 +1156,11 @@ e1000_setup_fiber_serdes_link(struct e1000_hw *hw)
* the EEPROM.
*/
ctrl = E1000_READ_REG(hw, CTRL);
- if(hw->media_type == e1000_media_type_fiber)
+ if (hw->media_type == e1000_media_type_fiber)
signal = (hw->mac_type > e1000_82544) ? E1000_CTRL_SWDPIN1 : 0;
ret_val = e1000_adjust_serdes_amplitude(hw);
- if(ret_val)
+ if (ret_val)
return ret_val;
/* Take the link out of reset */
@@ -1155,7 +1168,7 @@ e1000_setup_fiber_serdes_link(struct e1000_hw *hw)
/* Adjust VCO speed to improve BER performance */
ret_val = e1000_set_vco_speed(hw);
- if(ret_val)
+ if (ret_val)
return ret_val;
e1000_config_collision_dist(hw);
@@ -1226,15 +1239,15 @@ e1000_setup_fiber_serdes_link(struct e1000_hw *hw)
* less than 500 milliseconds even if the other end is doing it in SW).
* For internal serdes, we just assume a signal is present, then poll.
*/
- if(hw->media_type == e1000_media_type_internal_serdes ||
+ if (hw->media_type == e1000_media_type_internal_serdes ||
(E1000_READ_REG(hw, CTRL) & E1000_CTRL_SWDPIN1) == signal) {
DEBUGOUT("Looking for Link\n");
- for(i = 0; i < (LINK_UP_TIMEOUT / 10); i++) {
+ for (i = 0; i < (LINK_UP_TIMEOUT / 10); i++) {
msec_delay(10);
status = E1000_READ_REG(hw, STATUS);
- if(status & E1000_STATUS_LU) break;
+ if (status & E1000_STATUS_LU) break;
}
- if(i == (LINK_UP_TIMEOUT / 10)) {
+ if (i == (LINK_UP_TIMEOUT / 10)) {
DEBUGOUT("Never got a valid link from auto-neg!!!\n");
hw->autoneg_failed = 1;
/* AutoNeg failed to achieve a link, so we'll call
@@ -1243,7 +1256,7 @@ e1000_setup_fiber_serdes_link(struct e1000_hw *hw)
* non-autonegotiating link partners.
*/
ret_val = e1000_check_for_link(hw);
- if(ret_val) {
+ if (ret_val) {
DEBUGOUT("Error while checking for link\n");
return ret_val;
}
@@ -1277,7 +1290,7 @@ e1000_copper_link_preconfig(struct e1000_hw *hw)
* the PHY speed and duplex configuration is. In addition, we need to
* perform a hardware reset on the PHY to take it out of reset.
*/
- if(hw->mac_type > e1000_82543) {
+ if (hw->mac_type > e1000_82543) {
ctrl |= E1000_CTRL_SLU;
ctrl &= ~(E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX);
E1000_WRITE_REG(hw, CTRL, ctrl);
@@ -1285,13 +1298,13 @@ e1000_copper_link_preconfig(struct e1000_hw *hw)
ctrl |= (E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX | E1000_CTRL_SLU);
E1000_WRITE_REG(hw, CTRL, ctrl);
ret_val = e1000_phy_hw_reset(hw);
- if(ret_val)
+ if (ret_val)
return ret_val;
}
/* Make sure we have a valid PHY */
ret_val = e1000_detect_gig_phy(hw);
- if(ret_val) {
+ if (ret_val) {
DEBUGOUT("Error, did not detect valid phy.\n");
return ret_val;
}
@@ -1299,19 +1312,19 @@ e1000_copper_link_preconfig(struct e1000_hw *hw)
/* Set PHY to class A mode (if necessary) */
ret_val = e1000_set_phy_mode(hw);
- if(ret_val)
+ if (ret_val)
return ret_val;
- if((hw->mac_type == e1000_82545_rev_3) ||
+ if ((hw->mac_type == e1000_82545_rev_3) ||
(hw->mac_type == e1000_82546_rev_3)) {
ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_data);
phy_data |= 0x00000008;
ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, phy_data);
}
- if(hw->mac_type <= e1000_82543 ||
- hw->mac_type == e1000_82541 || hw->mac_type == e1000_82547 ||
- hw->mac_type == e1000_82541_rev_2 || hw->mac_type == e1000_82547_rev_2)
+ if (hw->mac_type <= e1000_82543 ||
+ hw->mac_type == e1000_82541 || hw->mac_type == e1000_82547 ||
+ hw->mac_type == e1000_82541_rev_2 || hw->mac_type == e1000_82547_rev_2)
hw->phy_reset_disable = FALSE;
return E1000_SUCCESS;
@@ -1341,7 +1354,7 @@ e1000_copper_link_igp_setup(struct e1000_hw *hw)
return ret_val;
}
- /* Wait 10ms for MAC to configure PHY from eeprom settings */
+ /* Wait 15ms for MAC to configure PHY from eeprom settings */
msec_delay(15);
if (hw->mac_type != e1000_ich8lan) {
/* Configure activity LED after PHY reset */
@@ -1351,11 +1364,14 @@ e1000_copper_link_igp_setup(struct e1000_hw *hw)
E1000_WRITE_REG(hw, LEDCTL, led_ctrl);
}
- /* disable lplu d3 during driver init */
- ret_val = e1000_set_d3_lplu_state(hw, FALSE);
- if (ret_val) {
- DEBUGOUT("Error Disabling LPLU D3\n");
- return ret_val;
+ /* The NVM settings will configure LPLU in D3 for IGP2 and IGP3 PHYs */
+ if (hw->phy_type == e1000_phy_igp) {
+ /* disable lplu d3 during driver init */
+ ret_val = e1000_set_d3_lplu_state(hw, FALSE);
+ if (ret_val) {
+ DEBUGOUT("Error Disabling LPLU D3\n");
+ return ret_val;
+ }
}
/* disable lplu d0 during driver init */
@@ -1393,45 +1409,45 @@ e1000_copper_link_igp_setup(struct e1000_hw *hw)
}
}
ret_val = e1000_write_phy_reg(hw, IGP01E1000_PHY_PORT_CTRL, phy_data);
- if(ret_val)
+ if (ret_val)
return ret_val;
/* set auto-master slave resolution settings */
- if(hw->autoneg) {
+ if (hw->autoneg) {
e1000_ms_type phy_ms_setting = hw->master_slave;
- if(hw->ffe_config_state == e1000_ffe_config_active)
+ if (hw->ffe_config_state == e1000_ffe_config_active)
hw->ffe_config_state = e1000_ffe_config_enabled;
- if(hw->dsp_config_state == e1000_dsp_config_activated)
+ if (hw->dsp_config_state == e1000_dsp_config_activated)
hw->dsp_config_state = e1000_dsp_config_enabled;
/* when autonegotiation advertisment is only 1000Mbps then we
* should disable SmartSpeed and enable Auto MasterSlave
* resolution as hardware default. */
- if(hw->autoneg_advertised == ADVERTISE_1000_FULL) {
+ if (hw->autoneg_advertised == ADVERTISE_1000_FULL) {
/* Disable SmartSpeed */
- ret_val = e1000_read_phy_reg(hw, IGP01E1000_PHY_PORT_CONFIG, &phy_data);
- if(ret_val)
+ ret_val = e1000_read_phy_reg(hw, IGP01E1000_PHY_PORT_CONFIG,
+ &phy_data);
+ if (ret_val)
return ret_val;
phy_data &= ~IGP01E1000_PSCFR_SMART_SPEED;
- ret_val = e1000_write_phy_reg(hw,
- IGP01E1000_PHY_PORT_CONFIG,
- phy_data);
- if(ret_val)
+ ret_val = e1000_write_phy_reg(hw, IGP01E1000_PHY_PORT_CONFIG,
+ phy_data);
+ if (ret_val)
return ret_val;
/* Set auto Master/Slave resolution process */
ret_val = e1000_read_phy_reg(hw, PHY_1000T_CTRL, &phy_data);
- if(ret_val)
+ if (ret_val)
return ret_val;
phy_data &= ~CR_1000T_MS_ENABLE;
ret_val = e1000_write_phy_reg(hw, PHY_1000T_CTRL, phy_data);
- if(ret_val)
+ if (ret_val)
return ret_val;
}
ret_val = e1000_read_phy_reg(hw, PHY_1000T_CTRL, &phy_data);
- if(ret_val)
+ if (ret_val)
return ret_val;
/* load defaults for future use */
@@ -1455,7 +1471,7 @@ e1000_copper_link_igp_setup(struct e1000_hw *hw)
break;
}
ret_val = e1000_write_phy_reg(hw, PHY_1000T_CTRL, phy_data);
- if(ret_val)
+ if (ret_val)
return ret_val;
}
@@ -1476,12 +1492,12 @@ e1000_copper_link_ggp_setup(struct e1000_hw *hw)
DEBUGFUNC("e1000_copper_link_ggp_setup");
- if(!hw->phy_reset_disable) {
+ if (!hw->phy_reset_disable) {
/* Enable CRS on TX for half-duplex operation. */
ret_val = e1000_read_phy_reg(hw, GG82563_PHY_MAC_SPEC_CTRL,
&phy_data);
- if(ret_val)
+ if (ret_val)
return ret_val;
phy_data |= GG82563_MSCR_ASSERT_CRS_ON_TX;
@@ -1490,7 +1506,7 @@ e1000_copper_link_ggp_setup(struct e1000_hw *hw)
ret_val = e1000_write_phy_reg(hw, GG82563_PHY_MAC_SPEC_CTRL,
phy_data);
- if(ret_val)
+ if (ret_val)
return ret_val;
/* Options:
@@ -1501,7 +1517,7 @@ e1000_copper_link_ggp_setup(struct e1000_hw *hw)
* 3 - Auto for 1000Base-T only (MDI-X for 10/100Base-T modes)
*/
ret_val = e1000_read_phy_reg(hw, GG82563_PHY_SPEC_CTRL, &phy_data);
- if(ret_val)
+ if (ret_val)
return ret_val;
phy_data &= ~GG82563_PSCR_CROSSOVER_MODE_MASK;
@@ -1526,11 +1542,11 @@ e1000_copper_link_ggp_setup(struct e1000_hw *hw)
* 1 - Enabled
*/
phy_data &= ~GG82563_PSCR_POLARITY_REVERSAL_DISABLE;
- if(hw->disable_polarity_correction == 1)
+ if (hw->disable_polarity_correction == 1)
phy_data |= GG82563_PSCR_POLARITY_REVERSAL_DISABLE;
ret_val = e1000_write_phy_reg(hw, GG82563_PHY_SPEC_CTRL, phy_data);
- if(ret_val)
+ if (ret_val)
return ret_val;
/* SW Reset the PHY so all changes take effect */
@@ -1586,9 +1602,9 @@ e1000_copper_link_ggp_setup(struct e1000_hw *hw)
return ret_val;
phy_data &= ~GG82563_KMCR_PASS_FALSE_CARRIER;
-
ret_val = e1000_write_phy_reg(hw, GG82563_PHY_KMRN_MODE_CTRL,
phy_data);
+
if (ret_val)
return ret_val;
}
@@ -1623,12 +1639,12 @@ e1000_copper_link_mgp_setup(struct e1000_hw *hw)
DEBUGFUNC("e1000_copper_link_mgp_setup");
- if(hw->phy_reset_disable)
+ if (hw->phy_reset_disable)
return E1000_SUCCESS;
/* Enable CRS on TX. This must be set for half-duplex operation. */
ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_data);
- if(ret_val)
+ if (ret_val)
return ret_val;
phy_data |= M88E1000_PSCR_ASSERT_CRS_ON_TX;
@@ -1665,7 +1681,7 @@ e1000_copper_link_mgp_setup(struct e1000_hw *hw)
* 1 - Enabled
*/
phy_data &= ~M88E1000_PSCR_POLARITY_REVERSAL;
- if(hw->disable_polarity_correction == 1)
+ if (hw->disable_polarity_correction == 1)
phy_data |= M88E1000_PSCR_POLARITY_REVERSAL;
ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, phy_data);
if (ret_val)
@@ -1705,7 +1721,7 @@ e1000_copper_link_mgp_setup(struct e1000_hw *hw)
/* SW Reset the PHY so all changes take effect */
ret_val = e1000_phy_reset(hw);
- if(ret_val) {
+ if (ret_val) {
DEBUGOUT("Error Resetting the PHY\n");
return ret_val;
}
@@ -1735,7 +1751,7 @@ e1000_copper_link_autoneg(struct e1000_hw *hw)
/* If autoneg_advertised is zero, we assume it was not defaulted
* by the calling code so we set to advertise full capability.
*/
- if(hw->autoneg_advertised == 0)
+ if (hw->autoneg_advertised == 0)
hw->autoneg_advertised = AUTONEG_ADVERTISE_SPEED_DEFAULT;
/* IFE phy only supports 10/100 */
@@ -1744,7 +1760,7 @@ e1000_copper_link_autoneg(struct e1000_hw *hw)
DEBUGOUT("Reconfiguring auto-neg advertisement params\n");
ret_val = e1000_phy_setup_autoneg(hw);
- if(ret_val) {
+ if (ret_val) {
DEBUGOUT("Error Setting up Auto-Negotiation\n");
return ret_val;
}
@@ -1754,20 +1770,20 @@ e1000_copper_link_autoneg(struct e1000_hw *hw)
* the Auto Neg Restart bit in the PHY control register.
*/
ret_val = e1000_read_phy_reg(hw, PHY_CTRL, &phy_data);
- if(ret_val)
+ if (ret_val)
return ret_val;
phy_data |= (MII_CR_AUTO_NEG_EN | MII_CR_RESTART_AUTO_NEG);
ret_val = e1000_write_phy_reg(hw, PHY_CTRL, phy_data);
- if(ret_val)
+ if (ret_val)
return ret_val;
/* Does the user want to wait for Auto-Neg to complete here, or
* check at a later time (for example, callback routine).
*/
- if(hw->wait_autoneg_complete) {
+ if (hw->wait_autoneg_complete) {
ret_val = e1000_wait_autoneg(hw);
- if(ret_val) {
+ if (ret_val) {
DEBUGOUT("Error while waiting for autoneg to complete\n");
return ret_val;
}
@@ -1778,7 +1794,6 @@ e1000_copper_link_autoneg(struct e1000_hw *hw)
return E1000_SUCCESS;
}
-
/******************************************************************************
* Config the MAC and the PHY after link is up.
* 1) Set up the MAC to the current PHY speed/duplex
@@ -1797,25 +1812,25 @@ e1000_copper_link_postconfig(struct e1000_hw *hw)
int32_t ret_val;
DEBUGFUNC("e1000_copper_link_postconfig");
- if(hw->mac_type >= e1000_82544) {
+ if (hw->mac_type >= e1000_82544) {
e1000_config_collision_dist(hw);
} else {
ret_val = e1000_config_mac_to_phy(hw);
- if(ret_val) {
+ if (ret_val) {
DEBUGOUT("Error configuring MAC to PHY settings\n");
return ret_val;
}
}
ret_val = e1000_config_fc_after_link_up(hw);
- if(ret_val) {
+ if (ret_val) {
DEBUGOUT("Error Configuring Flow Control\n");
return ret_val;
}
/* Config DSP to improve Giga link quality */
- if(hw->phy_type == e1000_phy_igp) {
+ if (hw->phy_type == e1000_phy_igp) {
ret_val = e1000_config_dsp_after_link_change(hw, TRUE);
- if(ret_val) {
+ if (ret_val) {
DEBUGOUT("Error Configuring DSP after link up\n");
return ret_val;
}
@@ -1861,7 +1876,7 @@ e1000_setup_copper_link(struct e1000_hw *hw)
/* Check if it is a valid PHY and set PHY mode if necessary. */
ret_val = e1000_copper_link_preconfig(hw);
- if(ret_val)
+ if (ret_val)
return ret_val;
switch (hw->mac_type) {
@@ -1882,30 +1897,30 @@ e1000_setup_copper_link(struct e1000_hw *hw)
hw->phy_type == e1000_phy_igp_3 ||
hw->phy_type == e1000_phy_igp_2) {
ret_val = e1000_copper_link_igp_setup(hw);
- if(ret_val)
+ if (ret_val)
return ret_val;
} else if (hw->phy_type == e1000_phy_m88) {
ret_val = e1000_copper_link_mgp_setup(hw);
- if(ret_val)
+ if (ret_val)
return ret_val;
} else if (hw->phy_type == e1000_phy_gg82563) {
ret_val = e1000_copper_link_ggp_setup(hw);
- if(ret_val)
+ if (ret_val)
return ret_val;
}
- if(hw->autoneg) {
+ if (hw->autoneg) {
/* Setup autoneg and flow control advertisement
* and perform autonegotiation */
ret_val = e1000_copper_link_autoneg(hw);
- if(ret_val)
+ if (ret_val)
return ret_val;
} else {
/* PHY will be set to 10H, 10F, 100H,or 100F
* depending on value from forced_speed_duplex. */
DEBUGOUT("Forcing speed and duplex\n");
ret_val = e1000_phy_force_speed_duplex(hw);
- if(ret_val) {
+ if (ret_val) {
DEBUGOUT("Error Forcing Speed and Duplex\n");
return ret_val;
}
@@ -1914,18 +1929,18 @@ e1000_setup_copper_link(struct e1000_hw *hw)
/* Check link status. Wait up to 100 microseconds for link to become
* valid.
*/
- for(i = 0; i < 10; i++) {
+ for (i = 0; i < 10; i++) {
ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &phy_data);
- if(ret_val)
+ if (ret_val)
return ret_val;
ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &phy_data);
- if(ret_val)
+ if (ret_val)
return ret_val;
- if(phy_data & MII_SR_LINK_STATUS) {
+ if (phy_data & MII_SR_LINK_STATUS) {
/* Config the MAC and PHY after link is up */
ret_val = e1000_copper_link_postconfig(hw);
- if(ret_val)
+ if (ret_val)
return ret_val;
DEBUGOUT("Valid link established!!!\n");
@@ -2027,7 +2042,7 @@ e1000_phy_setup_autoneg(struct e1000_hw *hw)
/* Read the MII Auto-Neg Advertisement Register (Address 4). */
ret_val = e1000_read_phy_reg(hw, PHY_AUTONEG_ADV, &mii_autoneg_adv_reg);
- if(ret_val)
+ if (ret_val)
return ret_val;
if (hw->phy_type != e1000_phy_ife) {
@@ -2055,36 +2070,36 @@ e1000_phy_setup_autoneg(struct e1000_hw *hw)
DEBUGOUT1("autoneg_advertised %x\n", hw->autoneg_advertised);
/* Do we want to advertise 10 Mb Half Duplex? */
- if(hw->autoneg_advertised & ADVERTISE_10_HALF) {
+ if (hw->autoneg_advertised & ADVERTISE_10_HALF) {
DEBUGOUT("Advertise 10mb Half duplex\n");
mii_autoneg_adv_reg |= NWAY_AR_10T_HD_CAPS;
}
/* Do we want to advertise 10 Mb Full Duplex? */
- if(hw->autoneg_advertised & ADVERTISE_10_FULL) {
+ if (hw->autoneg_advertised & ADVERTISE_10_FULL) {
DEBUGOUT("Advertise 10mb Full duplex\n");
mii_autoneg_adv_reg |= NWAY_AR_10T_FD_CAPS;
}
/* Do we want to advertise 100 Mb Half Duplex? */
- if(hw->autoneg_advertised & ADVERTISE_100_HALF) {
+ if (hw->autoneg_advertised & ADVERTISE_100_HALF) {
DEBUGOUT("Advertise 100mb Half duplex\n");
mii_autoneg_adv_reg |= NWAY_AR_100TX_HD_CAPS;
}
/* Do we want to advertise 100 Mb Full Duplex? */
- if(hw->autoneg_advertised & ADVERTISE_100_FULL) {
+ if (hw->autoneg_advertised & ADVERTISE_100_FULL) {
DEBUGOUT("Advertise 100mb Full duplex\n");
mii_autoneg_adv_reg |= NWAY_AR_100TX_FD_CAPS;
}
/* We do not allow the Phy to advertise 1000 Mb Half Duplex */
- if(hw->autoneg_advertised & ADVERTISE_1000_HALF) {
+ if (hw->autoneg_advertised & ADVERTISE_1000_HALF) {
DEBUGOUT("Advertise 1000mb Half duplex requested, request denied!\n");
}
/* Do we want to advertise 1000 Mb Full Duplex? */
- if(hw->autoneg_advertised & ADVERTISE_1000_FULL) {
+ if (hw->autoneg_advertised & ADVERTISE_1000_FULL) {
DEBUGOUT("Advertise 1000mb Full duplex\n");
mii_1000t_ctrl_reg |= CR_1000T_FD_CAPS;
if (hw->phy_type == e1000_phy_ife) {
@@ -2146,7 +2161,7 @@ e1000_phy_setup_autoneg(struct e1000_hw *hw)
}
ret_val = e1000_write_phy_reg(hw, PHY_AUTONEG_ADV, mii_autoneg_adv_reg);
- if(ret_val)
+ if (ret_val)
return ret_val;
DEBUGOUT1("Auto-Neg Advertising %x\n", mii_autoneg_adv_reg);
@@ -2194,7 +2209,7 @@ e1000_phy_force_speed_duplex(struct e1000_hw *hw)
/* Read the MII Control Register. */
ret_val = e1000_read_phy_reg(hw, PHY_CTRL, &mii_ctrl_reg);
- if(ret_val)
+ if (ret_val)
return ret_val;
/* We need to disable autoneg in order to force link and duplex. */
@@ -2202,8 +2217,8 @@ e1000_phy_force_speed_duplex(struct e1000_hw *hw)
mii_ctrl_reg &= ~MII_CR_AUTO_NEG_EN;
/* Are we forcing Full or Half Duplex? */
- if(hw->forced_speed_duplex == e1000_100_full ||
- hw->forced_speed_duplex == e1000_10_full) {
+ if (hw->forced_speed_duplex == e1000_100_full ||
+ hw->forced_speed_duplex == e1000_10_full) {
/* We want to force full duplex so we SET the full duplex bits in the
* Device and MII Control Registers.
*/
@@ -2220,7 +2235,7 @@ e1000_phy_force_speed_duplex(struct e1000_hw *hw)
}
/* Are we forcing 100Mbps??? */
- if(hw->forced_speed_duplex == e1000_100_full ||
+ if (hw->forced_speed_duplex == e1000_100_full ||
hw->forced_speed_duplex == e1000_100_half) {
/* Set the 100Mb bit and turn off the 1000Mb and 10Mb bits. */
ctrl |= E1000_CTRL_SPD_100;
@@ -2243,7 +2258,7 @@ e1000_phy_force_speed_duplex(struct e1000_hw *hw)
if ((hw->phy_type == e1000_phy_m88) ||
(hw->phy_type == e1000_phy_gg82563)) {
ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_data);
- if(ret_val)
+ if (ret_val)
return ret_val;
/* Clear Auto-Crossover to force MDI manually. M88E1000 requires MDI
@@ -2251,7 +2266,7 @@ e1000_phy_force_speed_duplex(struct e1000_hw *hw)
*/
phy_data &= ~M88E1000_PSCR_AUTO_X_MODE;
ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, phy_data);
- if(ret_val)
+ if (ret_val)
return ret_val;
DEBUGOUT1("M88E1000 PSCR: %x \n", phy_data);
@@ -2275,20 +2290,20 @@ e1000_phy_force_speed_duplex(struct e1000_hw *hw)
* forced whenever speed or duplex are forced.
*/
ret_val = e1000_read_phy_reg(hw, IGP01E1000_PHY_PORT_CTRL, &phy_data);
- if(ret_val)
+ if (ret_val)
return ret_val;
phy_data &= ~IGP01E1000_PSCR_AUTO_MDIX;
phy_data &= ~IGP01E1000_PSCR_FORCE_MDI_MDIX;
ret_val = e1000_write_phy_reg(hw, IGP01E1000_PHY_PORT_CTRL, phy_data);
- if(ret_val)
+ if (ret_val)
return ret_val;
}
/* Write back the modified PHY MII control register. */
ret_val = e1000_write_phy_reg(hw, PHY_CTRL, mii_ctrl_reg);
- if(ret_val)
+ if (ret_val)
return ret_val;
udelay(1);
@@ -2300,50 +2315,50 @@ e1000_phy_force_speed_duplex(struct e1000_hw *hw)
* only if the user has set wait_autoneg_complete to 1, which is
* the default.
*/
- if(hw->wait_autoneg_complete) {
+ if (hw->wait_autoneg_complete) {
/* We will wait for autoneg to complete. */
DEBUGOUT("Waiting for forced speed/duplex link.\n");
mii_status_reg = 0;
/* We will wait for autoneg to complete or 4.5 seconds to expire. */
- for(i = PHY_FORCE_TIME; i > 0; i--) {
+ for (i = PHY_FORCE_TIME; i > 0; i--) {
/* Read the MII Status Register and wait for Auto-Neg Complete bit
* to be set.
*/
ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &mii_status_reg);
- if(ret_val)
+ if (ret_val)
return ret_val;
ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &mii_status_reg);
- if(ret_val)
+ if (ret_val)
return ret_val;
- if(mii_status_reg & MII_SR_LINK_STATUS) break;
+ if (mii_status_reg & MII_SR_LINK_STATUS) break;
msec_delay(100);
}
- if((i == 0) &&
+ if ((i == 0) &&
((hw->phy_type == e1000_phy_m88) ||
(hw->phy_type == e1000_phy_gg82563))) {
/* We didn't get link. Reset the DSP and wait again for link. */
ret_val = e1000_phy_reset_dsp(hw);
- if(ret_val) {
+ if (ret_val) {
DEBUGOUT("Error Resetting PHY DSP\n");
return ret_val;
}
}
/* This loop will early-out if the link condition has been met. */
- for(i = PHY_FORCE_TIME; i > 0; i--) {
- if(mii_status_reg & MII_SR_LINK_STATUS) break;
+ for (i = PHY_FORCE_TIME; i > 0; i--) {
+ if (mii_status_reg & MII_SR_LINK_STATUS) break;
msec_delay(100);
/* Read the MII Status Register and wait for Auto-Neg Complete bit
* to be set.
*/
ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &mii_status_reg);
- if(ret_val)
+ if (ret_val)
return ret_val;
ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &mii_status_reg);
- if(ret_val)
+ if (ret_val)
return ret_val;
}
}
@@ -2354,32 +2369,31 @@ e1000_phy_force_speed_duplex(struct e1000_hw *hw)
* defaults back to a 2.5MHz clock when the PHY is reset.
*/
ret_val = e1000_read_phy_reg(hw, M88E1000_EXT_PHY_SPEC_CTRL, &phy_data);
- if(ret_val)
+ if (ret_val)
return ret_val;
phy_data |= M88E1000_EPSCR_TX_CLK_25;
ret_val = e1000_write_phy_reg(hw, M88E1000_EXT_PHY_SPEC_CTRL, phy_data);
- if(ret_val)
+ if (ret_val)
return ret_val;
/* In addition, because of the s/w reset above, we need to enable CRS on
* TX. This must be set for both full and half duplex operation.
*/
ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_data);
- if(ret_val)
+ if (ret_val)
return ret_val;
phy_data |= M88E1000_PSCR_ASSERT_CRS_ON_TX;
ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, phy_data);
- if(ret_val)
+ if (ret_val)
return ret_val;
- if((hw->mac_type == e1000_82544 || hw->mac_type == e1000_82543) &&
- (!hw->autoneg) &&
- (hw->forced_speed_duplex == e1000_10_full ||
- hw->forced_speed_duplex == e1000_10_half)) {
+ if ((hw->mac_type == e1000_82544 || hw->mac_type == e1000_82543) &&
+ (!hw->autoneg) && (hw->forced_speed_duplex == e1000_10_full ||
+ hw->forced_speed_duplex == e1000_10_half)) {
ret_val = e1000_polarity_reversal_workaround(hw);
- if(ret_val)
+ if (ret_val)
return ret_val;
}
} else if (hw->phy_type == e1000_phy_gg82563) {
@@ -2470,10 +2484,10 @@ e1000_config_mac_to_phy(struct e1000_hw *hw)
* registers depending on negotiated values.
*/
ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_STATUS, &phy_data);
- if(ret_val)
+ if (ret_val)
return ret_val;
- if(phy_data & M88E1000_PSSR_DPLX)
+ if (phy_data & M88E1000_PSSR_DPLX)
ctrl |= E1000_CTRL_FD;
else
ctrl &= ~E1000_CTRL_FD;
@@ -2483,9 +2497,9 @@ e1000_config_mac_to_phy(struct e1000_hw *hw)
/* Set up speed in the Device Control register depending on
* negotiated values.
*/
- if((phy_data & M88E1000_PSSR_SPEED) == M88E1000_PSSR_1000MBS)
+ if ((phy_data & M88E1000_PSSR_SPEED) == M88E1000_PSSR_1000MBS)
ctrl |= E1000_CTRL_SPD_1000;
- else if((phy_data & M88E1000_PSSR_SPEED) == M88E1000_PSSR_100MBS)
+ else if ((phy_data & M88E1000_PSSR_SPEED) == M88E1000_PSSR_100MBS)
ctrl |= E1000_CTRL_SPD_100;
/* Write the configured values back to the Device Control Reg. */
@@ -2553,7 +2567,7 @@ e1000_force_mac_fc(struct e1000_hw *hw)
}
/* Disable TX Flow Control for 82542 (rev 2.0) */
- if(hw->mac_type == e1000_82542_rev2_0)
+ if (hw->mac_type == e1000_82542_rev2_0)
ctrl &= (~E1000_CTRL_TFCE);
E1000_WRITE_REG(hw, CTRL, ctrl);
@@ -2587,11 +2601,12 @@ e1000_config_fc_after_link_up(struct e1000_hw *hw)
* so we had to force link. In this case, we need to force the
* configuration of the MAC to match the "fc" parameter.
*/
- if(((hw->media_type == e1000_media_type_fiber) && (hw->autoneg_failed)) ||
- ((hw->media_type == e1000_media_type_internal_serdes) && (hw->autoneg_failed)) ||
- ((hw->media_type == e1000_media_type_copper) && (!hw->autoneg))) {
+ if (((hw->media_type == e1000_media_type_fiber) && (hw->autoneg_failed)) ||
+ ((hw->media_type == e1000_media_type_internal_serdes) &&
+ (hw->autoneg_failed)) ||
+ ((hw->media_type == e1000_media_type_copper) && (!hw->autoneg))) {
ret_val = e1000_force_mac_fc(hw);
- if(ret_val) {
+ if (ret_val) {
DEBUGOUT("Error forcing flow control settings\n");
return ret_val;
}
@@ -2602,19 +2617,19 @@ e1000_config_fc_after_link_up(struct e1000_hw *hw)
* has completed, and if so, how the PHY and link partner has
* flow control configured.
*/
- if((hw->media_type == e1000_media_type_copper) && hw->autoneg) {
+ if ((hw->media_type == e1000_media_type_copper) && hw->autoneg) {
/* Read the MII Status Register and check to see if AutoNeg
* has completed. We read this twice because this reg has
* some "sticky" (latched) bits.
*/
ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &mii_status_reg);
- if(ret_val)
+ if (ret_val)
return ret_val;
ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &mii_status_reg);
- if(ret_val)
+ if (ret_val)
return ret_val;
- if(mii_status_reg & MII_SR_AUTONEG_COMPLETE) {
+ if (mii_status_reg & MII_SR_AUTONEG_COMPLETE) {
/* The AutoNeg process has completed, so we now need to
* read both the Auto Negotiation Advertisement Register
* (Address 4) and the Auto_Negotiation Base Page Ability
@@ -2623,11 +2638,11 @@ e1000_config_fc_after_link_up(struct e1000_hw *hw)
*/
ret_val = e1000_read_phy_reg(hw, PHY_AUTONEG_ADV,
&mii_nway_adv_reg);
- if(ret_val)
+ if (ret_val)
return ret_val;
ret_val = e1000_read_phy_reg(hw, PHY_LP_ABILITY,
&mii_nway_lp_ability_reg);
- if(ret_val)
+ if (ret_val)
return ret_val;
/* Two bits in the Auto Negotiation Advertisement Register
@@ -2664,15 +2679,15 @@ e1000_config_fc_after_link_up(struct e1000_hw *hw)
* 1 | DC | 1 | DC | e1000_fc_full
*
*/
- if((mii_nway_adv_reg & NWAY_AR_PAUSE) &&
- (mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE)) {
+ if ((mii_nway_adv_reg & NWAY_AR_PAUSE) &&
+ (mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE)) {
/* Now we need to check if the user selected RX ONLY
* of pause frames. In this case, we had to advertise
* FULL flow control because we could not advertise RX
* ONLY. Hence, we must now check to see if we need to
* turn OFF the TRANSMISSION of PAUSE frames.
*/
- if(hw->original_fc == e1000_fc_full) {
+ if (hw->original_fc == e1000_fc_full) {
hw->fc = e1000_fc_full;
DEBUGOUT("Flow Control = FULL.\n");
} else {
@@ -2688,10 +2703,10 @@ e1000_config_fc_after_link_up(struct e1000_hw *hw)
* 0 | 1 | 1 | 1 | e1000_fc_tx_pause
*
*/
- else if(!(mii_nway_adv_reg & NWAY_AR_PAUSE) &&
- (mii_nway_adv_reg & NWAY_AR_ASM_DIR) &&
- (mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE) &&
- (mii_nway_lp_ability_reg & NWAY_LPAR_ASM_DIR)) {
+ else if (!(mii_nway_adv_reg & NWAY_AR_PAUSE) &&
+ (mii_nway_adv_reg & NWAY_AR_ASM_DIR) &&
+ (mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE) &&
+ (mii_nway_lp_ability_reg & NWAY_LPAR_ASM_DIR)) {
hw->fc = e1000_fc_tx_pause;
DEBUGOUT("Flow Control = TX PAUSE frames only.\n");
}
@@ -2703,10 +2718,10 @@ e1000_config_fc_after_link_up(struct e1000_hw *hw)
* 1 | 1 | 0 | 1 | e1000_fc_rx_pause
*
*/
- else if((mii_nway_adv_reg & NWAY_AR_PAUSE) &&
- (mii_nway_adv_reg & NWAY_AR_ASM_DIR) &&
- !(mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE) &&
- (mii_nway_lp_ability_reg & NWAY_LPAR_ASM_DIR)) {
+ else if ((mii_nway_adv_reg & NWAY_AR_PAUSE) &&
+ (mii_nway_adv_reg & NWAY_AR_ASM_DIR) &&
+ !(mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE) &&
+ (mii_nway_lp_ability_reg & NWAY_LPAR_ASM_DIR)) {
hw->fc = e1000_fc_rx_pause;
DEBUGOUT("Flow Control = RX PAUSE frames only.\n");
}
@@ -2730,9 +2745,9 @@ e1000_config_fc_after_link_up(struct e1000_hw *hw)
* be asked to delay transmission of packets than asking
* our link partner to pause transmission of frames.
*/
- else if((hw->original_fc == e1000_fc_none ||
- hw->original_fc == e1000_fc_tx_pause) ||
- hw->fc_strict_ieee) {
+ else if ((hw->original_fc == e1000_fc_none ||
+ hw->original_fc == e1000_fc_tx_pause) ||
+ hw->fc_strict_ieee) {
hw->fc = e1000_fc_none;
DEBUGOUT("Flow Control = NONE.\n");
} else {
@@ -2745,19 +2760,19 @@ e1000_config_fc_after_link_up(struct e1000_hw *hw)
* enabled per IEEE 802.3 spec.
*/
ret_val = e1000_get_speed_and_duplex(hw, &speed, &duplex);
- if(ret_val) {
+ if (ret_val) {
DEBUGOUT("Error getting link speed and duplex\n");
return ret_val;
}
- if(duplex == HALF_DUPLEX)
+ if (duplex == HALF_DUPLEX)
hw->fc = e1000_fc_none;
/* Now we call a subroutine to actually force the MAC
* controller to use the correct flow control settings.
*/
ret_val = e1000_force_mac_fc(hw);
- if(ret_val) {
+ if (ret_val) {
DEBUGOUT("Error forcing flow control settings\n");
return ret_val;
}
@@ -2796,13 +2811,13 @@ e1000_check_for_link(struct e1000_hw *hw)
* set when the optics detect a signal. On older adapters, it will be
* cleared when there is a signal. This applies to fiber media only.
*/
- if((hw->media_type == e1000_media_type_fiber) ||
- (hw->media_type == e1000_media_type_internal_serdes)) {
+ if ((hw->media_type == e1000_media_type_fiber) ||
+ (hw->media_type == e1000_media_type_internal_serdes)) {
rxcw = E1000_READ_REG(hw, RXCW);
- if(hw->media_type == e1000_media_type_fiber) {
+ if (hw->media_type == e1000_media_type_fiber) {
signal = (hw->mac_type > e1000_82544) ? E1000_CTRL_SWDPIN1 : 0;
- if(status & E1000_STATUS_LU)
+ if (status & E1000_STATUS_LU)
hw->get_link_status = FALSE;
}
}
@@ -2813,20 +2828,20 @@ e1000_check_for_link(struct e1000_hw *hw)
* receive a Link Status Change interrupt or we have Rx Sequence
* Errors.
*/
- if((hw->media_type == e1000_media_type_copper) && hw->get_link_status) {
+ if ((hw->media_type == e1000_media_type_copper) && hw->get_link_status) {
/* First we want to see if the MII Status Register reports
* link. If so, then we want to get the current speed/duplex
* of the PHY.
* Read the register twice since the link bit is sticky.
*/
ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &phy_data);
- if(ret_val)
+ if (ret_val)
return ret_val;
ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &phy_data);
- if(ret_val)
+ if (ret_val)
return ret_val;
- if(phy_data & MII_SR_LINK_STATUS) {
+ if (phy_data & MII_SR_LINK_STATUS) {
hw->get_link_status = FALSE;
/* Check if there was DownShift, must be checked immediately after
* link-up */
@@ -2840,10 +2855,10 @@ e1000_check_for_link(struct e1000_hw *hw)
* happen due to the execution of this workaround.
*/
- if((hw->mac_type == e1000_82544 || hw->mac_type == e1000_82543) &&
- (!hw->autoneg) &&
- (hw->forced_speed_duplex == e1000_10_full ||
- hw->forced_speed_duplex == e1000_10_half)) {
+ if ((hw->mac_type == e1000_82544 || hw->mac_type == e1000_82543) &&
+ (!hw->autoneg) &&
+ (hw->forced_speed_duplex == e1000_10_full ||
+ hw->forced_speed_duplex == e1000_10_half)) {
E1000_WRITE_REG(hw, IMC, 0xffffffff);
ret_val = e1000_polarity_reversal_workaround(hw);
icr = E1000_READ_REG(hw, ICR);
@@ -2860,7 +2875,7 @@ e1000_check_for_link(struct e1000_hw *hw)
/* If we are forcing speed/duplex, then we simply return since
* we have already determined whether we have link or not.
*/
- if(!hw->autoneg) return -E1000_ERR_CONFIG;
+ if (!hw->autoneg) return -E1000_ERR_CONFIG;
/* optimize the dsp settings for the igp phy */
e1000_config_dsp_after_link_change(hw, TRUE);
@@ -2873,11 +2888,11 @@ e1000_check_for_link(struct e1000_hw *hw)
* speed/duplex on the MAC to the current PHY speed/duplex
* settings.
*/
- if(hw->mac_type >= e1000_82544)
+ if (hw->mac_type >= e1000_82544)
e1000_config_collision_dist(hw);
else {
ret_val = e1000_config_mac_to_phy(hw);
- if(ret_val) {
+ if (ret_val) {
DEBUGOUT("Error configuring MAC to PHY settings\n");
return ret_val;
}
@@ -2888,7 +2903,7 @@ e1000_check_for_link(struct e1000_hw *hw)
* have had to re-autoneg with a different link partner.
*/
ret_val = e1000_config_fc_after_link_up(hw);
- if(ret_val) {
+ if (ret_val) {
DEBUGOUT("Error configuring flow control\n");
return ret_val;
}
@@ -2900,7 +2915,7 @@ e1000_check_for_link(struct e1000_hw *hw)
* at gigabit speed, then TBI compatibility is not needed. If we are
* at gigabit speed, we turn on TBI compatibility.
*/
- if(hw->tbi_compatibility_en) {
+ if (hw->tbi_compatibility_en) {
uint16_t speed, duplex;
ret_val = e1000_get_speed_and_duplex(hw, &speed, &duplex);
if (ret_val) {
@@ -2911,7 +2926,7 @@ e1000_check_for_link(struct e1000_hw *hw)
/* If link speed is not set to gigabit speed, we do not need
* to enable TBI compatibility.
*/
- if(hw->tbi_compatibility_on) {
+ if (hw->tbi_compatibility_on) {
/* If we previously were in the mode, turn it off. */
rctl = E1000_READ_REG(hw, RCTL);
rctl &= ~E1000_RCTL_SBP;
@@ -2924,7 +2939,7 @@ e1000_check_for_link(struct e1000_hw *hw)
* packets. Some frames have an additional byte on the end and
* will look like CRC errors to to the hardware.
*/
- if(!hw->tbi_compatibility_on) {
+ if (!hw->tbi_compatibility_on) {
hw->tbi_compatibility_on = TRUE;
rctl = E1000_READ_REG(hw, RCTL);
rctl |= E1000_RCTL_SBP;
@@ -2940,12 +2955,12 @@ e1000_check_for_link(struct e1000_hw *hw)
* auto-negotiation time to complete, in case the cable was just plugged
* in. The autoneg_failed flag does this.
*/
- else if((((hw->media_type == e1000_media_type_fiber) &&
+ else if ((((hw->media_type == e1000_media_type_fiber) &&
((ctrl & E1000_CTRL_SWDPIN1) == signal)) ||
- (hw->media_type == e1000_media_type_internal_serdes)) &&
- (!(status & E1000_STATUS_LU)) &&
- (!(rxcw & E1000_RXCW_C))) {
- if(hw->autoneg_failed == 0) {
+ (hw->media_type == e1000_media_type_internal_serdes)) &&
+ (!(status & E1000_STATUS_LU)) &&
+ (!(rxcw & E1000_RXCW_C))) {
+ if (hw->autoneg_failed == 0) {
hw->autoneg_failed = 1;
return 0;
}
@@ -2961,7 +2976,7 @@ e1000_check_for_link(struct e1000_hw *hw)
/* Configure Flow Control after forcing link up. */
ret_val = e1000_config_fc_after_link_up(hw);
- if(ret_val) {
+ if (ret_val) {
DEBUGOUT("Error configuring flow control\n");
return ret_val;
}
@@ -2971,9 +2986,9 @@ e1000_check_for_link(struct e1000_hw *hw)
* Device Control register in an attempt to auto-negotiate with our link
* partner.
*/
- else if(((hw->media_type == e1000_media_type_fiber) ||
- (hw->media_type == e1000_media_type_internal_serdes)) &&
- (ctrl & E1000_CTRL_SLU) && (rxcw & E1000_RXCW_C)) {
+ else if (((hw->media_type == e1000_media_type_fiber) ||
+ (hw->media_type == e1000_media_type_internal_serdes)) &&
+ (ctrl & E1000_CTRL_SLU) && (rxcw & E1000_RXCW_C)) {
DEBUGOUT("RXing /C/, enable AutoNeg and stop forcing link.\n");
E1000_WRITE_REG(hw, TXCW, hw->txcw);
E1000_WRITE_REG(hw, CTRL, (ctrl & ~E1000_CTRL_SLU));
@@ -2983,12 +2998,12 @@ e1000_check_for_link(struct e1000_hw *hw)
/* If we force link for non-auto-negotiation switch, check link status
* based on MAC synchronization for internal serdes media type.
*/
- else if((hw->media_type == e1000_media_type_internal_serdes) &&
- !(E1000_TXCW_ANE & E1000_READ_REG(hw, TXCW))) {
+ else if ((hw->media_type == e1000_media_type_internal_serdes) &&
+ !(E1000_TXCW_ANE & E1000_READ_REG(hw, TXCW))) {
/* SYNCH bit and IV bit are sticky. */
udelay(10);
- if(E1000_RXCW_SYNCH & E1000_READ_REG(hw, RXCW)) {
- if(!(rxcw & E1000_RXCW_IV)) {
+ if (E1000_RXCW_SYNCH & E1000_READ_REG(hw, RXCW)) {
+ if (!(rxcw & E1000_RXCW_IV)) {
hw->serdes_link_down = FALSE;
DEBUGOUT("SERDES: Link is up.\n");
}
@@ -2997,8 +3012,8 @@ e1000_check_for_link(struct e1000_hw *hw)
DEBUGOUT("SERDES: Link is down.\n");
}
}
- if((hw->media_type == e1000_media_type_internal_serdes) &&
- (E1000_TXCW_ANE & E1000_READ_REG(hw, TXCW))) {
+ if ((hw->media_type == e1000_media_type_internal_serdes) &&
+ (E1000_TXCW_ANE & E1000_READ_REG(hw, TXCW))) {
hw->serdes_link_down = !(E1000_STATUS_LU & E1000_READ_REG(hw, STATUS));
}
return E1000_SUCCESS;
@@ -3022,12 +3037,12 @@ e1000_get_speed_and_duplex(struct e1000_hw *hw,
DEBUGFUNC("e1000_get_speed_and_duplex");
- if(hw->mac_type >= e1000_82543) {
+ if (hw->mac_type >= e1000_82543) {
status = E1000_READ_REG(hw, STATUS);
- if(status & E1000_STATUS_SPEED_1000) {
+ if (status & E1000_STATUS_SPEED_1000) {
*speed = SPEED_1000;
DEBUGOUT("1000 Mbs, ");
- } else if(status & E1000_STATUS_SPEED_100) {
+ } else if (status & E1000_STATUS_SPEED_100) {
*speed = SPEED_100;
DEBUGOUT("100 Mbs, ");
} else {
@@ -3035,7 +3050,7 @@ e1000_get_speed_and_duplex(struct e1000_hw *hw,
DEBUGOUT("10 Mbs, ");
}
- if(status & E1000_STATUS_FD) {
+ if (status & E1000_STATUS_FD) {
*duplex = FULL_DUPLEX;
DEBUGOUT("Full Duplex\n");
} else {
@@ -3052,18 +3067,18 @@ e1000_get_speed_and_duplex(struct e1000_hw *hw,
* if it is operating at half duplex. Here we set the duplex settings to
* match the duplex in the link partner's capabilities.
*/
- if(hw->phy_type == e1000_phy_igp && hw->speed_downgraded) {
+ if (hw->phy_type == e1000_phy_igp && hw->speed_downgraded) {
ret_val = e1000_read_phy_reg(hw, PHY_AUTONEG_EXP, &phy_data);
- if(ret_val)
+ if (ret_val)
return ret_val;
- if(!(phy_data & NWAY_ER_LP_NWAY_CAPS))
+ if (!(phy_data & NWAY_ER_LP_NWAY_CAPS))
*duplex = HALF_DUPLEX;
else {
ret_val = e1000_read_phy_reg(hw, PHY_LP_ABILITY, &phy_data);
- if(ret_val)
+ if (ret_val)
return ret_val;
- if((*speed == SPEED_100 && !(phy_data & NWAY_LPAR_100TX_FD_CAPS)) ||
+ if ((*speed == SPEED_100 && !(phy_data & NWAY_LPAR_100TX_FD_CAPS)) ||
(*speed == SPEED_10 && !(phy_data & NWAY_LPAR_10T_FD_CAPS)))
*duplex = HALF_DUPLEX;
}
@@ -3104,17 +3119,17 @@ e1000_wait_autoneg(struct e1000_hw *hw)
DEBUGOUT("Waiting for Auto-Neg to complete.\n");
/* We will wait for autoneg to complete or 4.5 seconds to expire. */
- for(i = PHY_AUTO_NEG_TIME; i > 0; i--) {
+ for (i = PHY_AUTO_NEG_TIME; i > 0; i--) {
/* Read the MII Status Register and wait for Auto-Neg
* Complete bit to be set.
*/
ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &phy_data);
- if(ret_val)
+ if (ret_val)
return ret_val;
ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &phy_data);
- if(ret_val)
+ if (ret_val)
return ret_val;
- if(phy_data & MII_SR_AUTONEG_COMPLETE) {
+ if (phy_data & MII_SR_AUTONEG_COMPLETE) {
return E1000_SUCCESS;
}
msec_delay(100);
@@ -3187,14 +3202,16 @@ e1000_shift_out_mdi_bits(struct e1000_hw *hw,
/* Set MDIO_DIR and MDC_DIR direction bits to be used as output pins. */
ctrl |= (E1000_CTRL_MDIO_DIR | E1000_CTRL_MDC_DIR);
- while(mask) {
+ while (mask) {
/* A "1" is shifted out to the PHY by setting the MDIO bit to "1" and
* then raising and lowering the Management Data Clock. A "0" is
* shifted out to the PHY by setting the MDIO bit to "0" and then
* raising and lowering the clock.
*/
- if(data & mask) ctrl |= E1000_CTRL_MDIO;
- else ctrl &= ~E1000_CTRL_MDIO;
+ if (data & mask)
+ ctrl |= E1000_CTRL_MDIO;
+ else
+ ctrl &= ~E1000_CTRL_MDIO;
E1000_WRITE_REG(hw, CTRL, ctrl);
E1000_WRITE_FLUSH(hw);
@@ -3245,12 +3262,13 @@ e1000_shift_in_mdi_bits(struct e1000_hw *hw)
e1000_raise_mdi_clk(hw, &ctrl);
e1000_lower_mdi_clk(hw, &ctrl);
- for(data = 0, i = 0; i < 16; i++) {
+ for (data = 0, i = 0; i < 16; i++) {
data = data << 1;
e1000_raise_mdi_clk(hw, &ctrl);
ctrl = E1000_READ_REG(hw, CTRL);
/* Check to see if we shifted in a "1". */
- if(ctrl & E1000_CTRL_MDIO) data |= 1;
+ if (ctrl & E1000_CTRL_MDIO)
+ data |= 1;
e1000_lower_mdi_clk(hw, &ctrl);
}
@@ -3276,7 +3294,7 @@ e1000_swfw_sync_acquire(struct e1000_hw *hw, uint16_t mask)
if (!hw->swfw_sync_present)
return e1000_get_hw_eeprom_semaphore(hw);
- while(timeout) {
+ while (timeout) {
if (e1000_get_hw_eeprom_semaphore(hw))
return -E1000_ERR_SWFW_SYNC;
@@ -3365,7 +3383,7 @@ e1000_read_phy_reg(struct e1000_hw *hw,
(reg_addr > MAX_PHY_MULTI_PAGE_REG)) {
ret_val = e1000_write_phy_reg_ex(hw, IGP01E1000_PHY_PAGE_SELECT,
(uint16_t)reg_addr);
- if(ret_val) {
+ if (ret_val) {
e1000_swfw_sync_release(hw, swfw);
return ret_val;
}
@@ -3410,12 +3428,12 @@ e1000_read_phy_reg_ex(struct e1000_hw *hw,
DEBUGFUNC("e1000_read_phy_reg_ex");
- if(reg_addr > MAX_PHY_REG_ADDRESS) {
+ if (reg_addr > MAX_PHY_REG_ADDRESS) {
DEBUGOUT1("PHY Address %d is out of range\n", reg_addr);
return -E1000_ERR_PARAM;
}
- if(hw->mac_type > e1000_82543) {
+ if (hw->mac_type > e1000_82543) {
/* Set up Op-code, Phy Address, and register address in the MDI
* Control register. The MAC will take care of interfacing with the
* PHY to retrieve the desired data.
@@ -3427,16 +3445,16 @@ e1000_read_phy_reg_ex(struct e1000_hw *hw,
E1000_WRITE_REG(hw, MDIC, mdic);
/* Poll the ready bit to see if the MDI read completed */
- for(i = 0; i < 64; i++) {
+ for (i = 0; i < 64; i++) {
udelay(50);
mdic = E1000_READ_REG(hw, MDIC);
- if(mdic & E1000_MDIC_READY) break;
+ if (mdic & E1000_MDIC_READY) break;
}
- if(!(mdic & E1000_MDIC_READY)) {
+ if (!(mdic & E1000_MDIC_READY)) {
DEBUGOUT("MDI Read did not complete\n");
return -E1000_ERR_PHY;
}
- if(mdic & E1000_MDIC_ERROR) {
+ if (mdic & E1000_MDIC_ERROR) {
DEBUGOUT("MDI Error\n");
return -E1000_ERR_PHY;
}
@@ -3505,7 +3523,7 @@ e1000_write_phy_reg(struct e1000_hw *hw,
(reg_addr > MAX_PHY_MULTI_PAGE_REG)) {
ret_val = e1000_write_phy_reg_ex(hw, IGP01E1000_PHY_PAGE_SELECT,
(uint16_t)reg_addr);
- if(ret_val) {
+ if (ret_val) {
e1000_swfw_sync_release(hw, swfw);
return ret_val;
}
@@ -3550,12 +3568,12 @@ e1000_write_phy_reg_ex(struct e1000_hw *hw,
DEBUGFUNC("e1000_write_phy_reg_ex");
- if(reg_addr > MAX_PHY_REG_ADDRESS) {
+ if (reg_addr > MAX_PHY_REG_ADDRESS) {
DEBUGOUT1("PHY Address %d is out of range\n", reg_addr);
return -E1000_ERR_PARAM;
}
- if(hw->mac_type > e1000_82543) {
+ if (hw->mac_type > e1000_82543) {
/* Set up Op-code, Phy Address, register address, and data intended
* for the PHY register in the MDI Control register. The MAC will take
* care of interfacing with the PHY to send the desired data.
@@ -3568,12 +3586,12 @@ e1000_write_phy_reg_ex(struct e1000_hw *hw,
E1000_WRITE_REG(hw, MDIC, mdic);
/* Poll the ready bit to see if the MDI read completed */
- for(i = 0; i < 640; i++) {
+ for (i = 0; i < 641; i++) {
udelay(5);
mdic = E1000_READ_REG(hw, MDIC);
- if(mdic & E1000_MDIC_READY) break;
+ if (mdic & E1000_MDIC_READY) break;
}
- if(!(mdic & E1000_MDIC_READY)) {
+ if (!(mdic & E1000_MDIC_READY)) {
DEBUGOUT("MDI Write did not complete\n");
return -E1000_ERR_PHY;
}
@@ -3685,7 +3703,7 @@ e1000_phy_hw_reset(struct e1000_hw *hw)
DEBUGOUT("Resetting Phy...\n");
- if(hw->mac_type > e1000_82543) {
+ if (hw->mac_type > e1000_82543) {
if ((hw->mac_type == e1000_80003es2lan) &&
(E1000_READ_REG(hw, STATUS) & E1000_STATUS_FUNC_1)) {
swfw = E1000_SWFW_PHY1_SM;
@@ -3733,7 +3751,7 @@ e1000_phy_hw_reset(struct e1000_hw *hw)
}
udelay(150);
- if((hw->mac_type == e1000_82541) || (hw->mac_type == e1000_82547)) {
+ if ((hw->mac_type == e1000_82541) || (hw->mac_type == e1000_82547)) {
/* Configure activity LED after PHY reset */
led_ctrl = E1000_READ_REG(hw, LEDCTL);
led_ctrl &= IGP_ACTIVITY_LED_MASK;
@@ -3743,14 +3761,13 @@ e1000_phy_hw_reset(struct e1000_hw *hw)
/* Wait for FW to finish PHY configuration. */
ret_val = e1000_get_phy_cfg_done(hw);
+ if (ret_val != E1000_SUCCESS)
+ return ret_val;
e1000_release_software_semaphore(hw);
- if ((hw->mac_type == e1000_ich8lan) &&
- (hw->phy_type == e1000_phy_igp_3)) {
- ret_val = e1000_init_lcd_from_nvm(hw);
- if (ret_val)
- return ret_val;
- }
+ if ((hw->mac_type == e1000_ich8lan) && (hw->phy_type == e1000_phy_igp_3))
+ ret_val = e1000_init_lcd_from_nvm(hw);
+
return ret_val;
}
@@ -3781,25 +3798,25 @@ e1000_phy_reset(struct e1000_hw *hw)
case e1000_82572:
case e1000_ich8lan:
ret_val = e1000_phy_hw_reset(hw);
- if(ret_val)
+ if (ret_val)
return ret_val;
break;
default:
ret_val = e1000_read_phy_reg(hw, PHY_CTRL, &phy_data);
- if(ret_val)
+ if (ret_val)
return ret_val;
phy_data |= MII_CR_RESET;
ret_val = e1000_write_phy_reg(hw, PHY_CTRL, phy_data);
- if(ret_val)
+ if (ret_val)
return ret_val;
udelay(1);
break;
}
- if(hw->phy_type == e1000_phy_igp || hw->phy_type == e1000_phy_igp_2)
+ if (hw->phy_type == e1000_phy_igp || hw->phy_type == e1000_phy_igp_2)
e1000_phy_init_script(hw);
return E1000_SUCCESS;
@@ -3877,8 +3894,8 @@ e1000_kumeran_lock_loss_workaround(struct e1000_hw *hw)
if (hw->kmrn_lock_loss_workaround_disabled)
return E1000_SUCCESS;
- /* Make sure link is up before proceeding. If not just return.
- * Attempting this while link is negotiating fouls up link
+ /* Make sure link is up before proceeding. If not just return.
+ * Attempting this while link is negotiating fouled up link
* stability */
ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &phy_data);
ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &phy_data);
@@ -3955,34 +3972,34 @@ e1000_detect_gig_phy(struct e1000_hw *hw)
hw->phy_id = (uint32_t) (phy_id_high << 16);
udelay(20);
ret_val = e1000_read_phy_reg(hw, PHY_ID2, &phy_id_low);
- if(ret_val)
+ if (ret_val)
return ret_val;
hw->phy_id |= (uint32_t) (phy_id_low & PHY_REVISION_MASK);
hw->phy_revision = (uint32_t) phy_id_low & ~PHY_REVISION_MASK;
- switch(hw->mac_type) {
+ switch (hw->mac_type) {
case e1000_82543:
- if(hw->phy_id == M88E1000_E_PHY_ID) match = TRUE;
+ if (hw->phy_id == M88E1000_E_PHY_ID) match = TRUE;
break;
case e1000_82544:
- if(hw->phy_id == M88E1000_I_PHY_ID) match = TRUE;
+ if (hw->phy_id == M88E1000_I_PHY_ID) match = TRUE;
break;
case e1000_82540:
case e1000_82545:
case e1000_82545_rev_3:
case e1000_82546:
case e1000_82546_rev_3:
- if(hw->phy_id == M88E1011_I_PHY_ID) match = TRUE;
+ if (hw->phy_id == M88E1011_I_PHY_ID) match = TRUE;
break;
case e1000_82541:
case e1000_82541_rev_2:
case e1000_82547:
case e1000_82547_rev_2:
- if(hw->phy_id == IGP01E1000_I_PHY_ID) match = TRUE;
+ if (hw->phy_id == IGP01E1000_I_PHY_ID) match = TRUE;
break;
case e1000_82573:
- if(hw->phy_id == M88E1111_I_PHY_ID) match = TRUE;
+ if (hw->phy_id == M88E1111_I_PHY_ID) match = TRUE;
break;
case e1000_80003es2lan:
if (hw->phy_id == GG82563_E_PHY_ID) match = TRUE;
@@ -4021,14 +4038,14 @@ e1000_phy_reset_dsp(struct e1000_hw *hw)
do {
if (hw->phy_type != e1000_phy_gg82563) {
ret_val = e1000_write_phy_reg(hw, 29, 0x001d);
- if(ret_val) break;
+ if (ret_val) break;
}
ret_val = e1000_write_phy_reg(hw, 30, 0x00c1);
- if(ret_val) break;
+ if (ret_val) break;
ret_val = e1000_write_phy_reg(hw, 30, 0x0000);
- if(ret_val) break;
+ if (ret_val) break;
ret_val = E1000_SUCCESS;
- } while(0);
+ } while (0);
return ret_val;
}
@@ -4060,23 +4077,23 @@ e1000_phy_igp_get_info(struct e1000_hw *hw,
/* Check polarity status */
ret_val = e1000_check_polarity(hw, &polarity);
- if(ret_val)
+ if (ret_val)
return ret_val;
phy_info->cable_polarity = polarity;
ret_val = e1000_read_phy_reg(hw, IGP01E1000_PHY_PORT_STATUS, &phy_data);
- if(ret_val)
+ if (ret_val)
return ret_val;
phy_info->mdix_mode = (phy_data & IGP01E1000_PSSR_MDIX) >>
IGP01E1000_PSSR_MDIX_SHIFT;
- if((phy_data & IGP01E1000_PSSR_SPEED_MASK) ==
+ if ((phy_data & IGP01E1000_PSSR_SPEED_MASK) ==
IGP01E1000_PSSR_SPEED_1000MBPS) {
/* Local/Remote Receiver Information are only valid at 1000 Mbps */
ret_val = e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_data);
- if(ret_val)
+ if (ret_val)
return ret_val;
phy_info->local_rx = (phy_data & SR_1000T_LOCAL_RX_STATUS) >>
@@ -4086,19 +4103,19 @@ e1000_phy_igp_get_info(struct e1000_hw *hw,
/* Get cable length */
ret_val = e1000_get_cable_length(hw, &min_length, &max_length);
- if(ret_val)
+ if (ret_val)
return ret_val;
/* Translate to old method */
average = (max_length + min_length) / 2;
- if(average <= e1000_igp_cable_length_50)
+ if (average <= e1000_igp_cable_length_50)
phy_info->cable_length = e1000_cable_length_50;
- else if(average <= e1000_igp_cable_length_80)
+ else if (average <= e1000_igp_cable_length_80)
phy_info->cable_length = e1000_cable_length_50_80;
- else if(average <= e1000_igp_cable_length_110)
+ else if (average <= e1000_igp_cable_length_110)
phy_info->cable_length = e1000_cable_length_80_110;
- else if(average <= e1000_igp_cable_length_140)
+ else if (average <= e1000_igp_cable_length_140)
phy_info->cable_length = e1000_cable_length_110_140;
else
phy_info->cable_length = e1000_cable_length_140;
@@ -4174,7 +4191,7 @@ e1000_phy_m88_get_info(struct e1000_hw *hw,
phy_info->downshift = (e1000_downshift)hw->speed_downgraded;
ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_data);
- if(ret_val)
+ if (ret_val)
return ret_val;
phy_info->extended_10bt_distance =
@@ -4186,12 +4203,12 @@ e1000_phy_m88_get_info(struct e1000_hw *hw,
/* Check polarity status */
ret_val = e1000_check_polarity(hw, &polarity);
- if(ret_val)
+ if (ret_val)
return ret_val;
phy_info->cable_polarity = polarity;
ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_STATUS, &phy_data);
- if(ret_val)
+ if (ret_val)
return ret_val;
phy_info->mdix_mode = (phy_data & M88E1000_PSSR_MDIX) >>
@@ -4214,7 +4231,7 @@ e1000_phy_m88_get_info(struct e1000_hw *hw,
}
ret_val = e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_data);
- if(ret_val)
+ if (ret_val)
return ret_val;
phy_info->local_rx = (phy_data & SR_1000T_LOCAL_RX_STATUS) >>
@@ -4251,20 +4268,20 @@ e1000_phy_get_info(struct e1000_hw *hw,
phy_info->local_rx = e1000_1000t_rx_status_undefined;
phy_info->remote_rx = e1000_1000t_rx_status_undefined;
- if(hw->media_type != e1000_media_type_copper) {
+ if (hw->media_type != e1000_media_type_copper) {
DEBUGOUT("PHY info is only valid for copper media\n");
return -E1000_ERR_CONFIG;
}
ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &phy_data);
- if(ret_val)
+ if (ret_val)
return ret_val;
ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &phy_data);
- if(ret_val)
+ if (ret_val)
return ret_val;
- if((phy_data & MII_SR_LINK_STATUS) != MII_SR_LINK_STATUS) {
+ if ((phy_data & MII_SR_LINK_STATUS) != MII_SR_LINK_STATUS) {
DEBUGOUT("PHY info is only valid if link is up\n");
return -E1000_ERR_CONFIG;
}
@@ -4284,7 +4301,7 @@ e1000_validate_mdi_setting(struct e1000_hw *hw)
{
DEBUGFUNC("e1000_validate_mdi_settings");
- if(!hw->autoneg && (hw->mdix == 0 || hw->mdix == 3)) {
+ if (!hw->autoneg && (hw->mdix == 0 || hw->mdix == 3)) {
DEBUGOUT("Invalid MDI setting detected\n");
hw->mdix = 1;
return -E1000_ERR_CONFIG;
@@ -4331,7 +4348,7 @@ e1000_init_eeprom_params(struct e1000_hw *hw)
eeprom->type = e1000_eeprom_microwire;
eeprom->opcode_bits = 3;
eeprom->delay_usec = 50;
- if(eecd & E1000_EECD_SIZE) {
+ if (eecd & E1000_EECD_SIZE) {
eeprom->word_size = 256;
eeprom->address_bits = 8;
} else {
@@ -4399,7 +4416,7 @@ e1000_init_eeprom_params(struct e1000_hw *hw)
}
eeprom->use_eerd = TRUE;
eeprom->use_eewr = TRUE;
- if(e1000_is_onboard_nvm_eeprom(hw) == FALSE) {
+ if (e1000_is_onboard_nvm_eeprom(hw) == FALSE) {
eeprom->type = e1000_eeprom_flash;
eeprom->word_size = 2048;
@@ -4460,17 +4477,17 @@ e1000_init_eeprom_params(struct e1000_hw *hw)
/* eeprom_size will be an enum [0..8] that maps to eeprom sizes 128B to
* 32KB (incremented by powers of 2).
*/
- if(hw->mac_type <= e1000_82547_rev_2) {
+ if (hw->mac_type <= e1000_82547_rev_2) {
/* Set to default value for initial eeprom read. */
eeprom->word_size = 64;
ret_val = e1000_read_eeprom(hw, EEPROM_CFG, 1, &eeprom_size);
- if(ret_val)
+ if (ret_val)
return ret_val;
eeprom_size = (eeprom_size & EEPROM_SIZE_MASK) >> EEPROM_SIZE_SHIFT;
/* 256B eeprom size was not supported in earlier hardware, so we
* bump eeprom_size up one to ensure that "1" (which maps to 256B)
* is never the result used in the shifting logic below. */
- if(eeprom_size)
+ if (eeprom_size)
eeprom_size++;
} else {
eeprom_size = (uint16_t)((eecd & E1000_EECD_SIZE_EX_MASK) >>
@@ -4555,7 +4572,7 @@ e1000_shift_out_ee_bits(struct e1000_hw *hw,
*/
eecd &= ~E1000_EECD_DI;
- if(data & mask)
+ if (data & mask)
eecd |= E1000_EECD_DI;
E1000_WRITE_REG(hw, EECD, eecd);
@@ -4568,7 +4585,7 @@ e1000_shift_out_ee_bits(struct e1000_hw *hw,
mask = mask >> 1;
- } while(mask);
+ } while (mask);
/* We leave the "DI" bit set to "0" when we leave this routine. */
eecd &= ~E1000_EECD_DI;
@@ -4600,14 +4617,14 @@ e1000_shift_in_ee_bits(struct e1000_hw *hw,
eecd &= ~(E1000_EECD_DO | E1000_EECD_DI);
data = 0;
- for(i = 0; i < count; i++) {
+ for (i = 0; i < count; i++) {
data = data << 1;
e1000_raise_ee_clk(hw, &eecd);
eecd = E1000_READ_REG(hw, EECD);
eecd &= ~(E1000_EECD_DI);
- if(eecd & E1000_EECD_DO)
+ if (eecd & E1000_EECD_DO)
data |= 1;
e1000_lower_ee_clk(hw, &eecd);
@@ -4638,17 +4655,17 @@ e1000_acquire_eeprom(struct e1000_hw *hw)
if (hw->mac_type != e1000_82573) {
/* Request EEPROM Access */
- if(hw->mac_type > e1000_82544) {
+ if (hw->mac_type > e1000_82544) {
eecd |= E1000_EECD_REQ;
E1000_WRITE_REG(hw, EECD, eecd);
eecd = E1000_READ_REG(hw, EECD);
- while((!(eecd & E1000_EECD_GNT)) &&
+ while ((!(eecd & E1000_EECD_GNT)) &&
(i < E1000_EEPROM_GRANT_ATTEMPTS)) {
i++;
udelay(5);
eecd = E1000_READ_REG(hw, EECD);
}
- if(!(eecd & E1000_EECD_GNT)) {
+ if (!(eecd & E1000_EECD_GNT)) {
eecd &= ~E1000_EECD_REQ;
E1000_WRITE_REG(hw, EECD, eecd);
DEBUGOUT("Could not acquire EEPROM grant\n");
@@ -4691,7 +4708,7 @@ e1000_standby_eeprom(struct e1000_hw *hw)
eecd = E1000_READ_REG(hw, EECD);
- if(eeprom->type == e1000_eeprom_microwire) {
+ if (eeprom->type == e1000_eeprom_microwire) {
eecd &= ~(E1000_EECD_CS | E1000_EECD_SK);
E1000_WRITE_REG(hw, EECD, eecd);
E1000_WRITE_FLUSH(hw);
@@ -4714,7 +4731,7 @@ e1000_standby_eeprom(struct e1000_hw *hw)
E1000_WRITE_REG(hw, EECD, eecd);
E1000_WRITE_FLUSH(hw);
udelay(eeprom->delay_usec);
- } else if(eeprom->type == e1000_eeprom_spi) {
+ } else if (eeprom->type == e1000_eeprom_spi) {
/* Toggle CS to flush commands */
eecd |= E1000_EECD_CS;
E1000_WRITE_REG(hw, EECD, eecd);
@@ -4748,7 +4765,7 @@ e1000_release_eeprom(struct e1000_hw *hw)
E1000_WRITE_REG(hw, EECD, eecd);
udelay(hw->eeprom.delay_usec);
- } else if(hw->eeprom.type == e1000_eeprom_microwire) {
+ } else if (hw->eeprom.type == e1000_eeprom_microwire) {
/* cleanup eeprom */
/* CS on Microwire is active-high */
@@ -4770,7 +4787,7 @@ e1000_release_eeprom(struct e1000_hw *hw)
}
/* Stop requesting EEPROM access */
- if(hw->mac_type > e1000_82544) {
+ if (hw->mac_type > e1000_82544) {
eecd &= ~E1000_EECD_REQ;
E1000_WRITE_REG(hw, EECD, eecd);
}
@@ -4808,12 +4825,12 @@ e1000_spi_eeprom_ready(struct e1000_hw *hw)
retry_count += 5;
e1000_standby_eeprom(hw);
- } while(retry_count < EEPROM_MAX_RETRY_SPI);
+ } while (retry_count < EEPROM_MAX_RETRY_SPI);
/* ATMEL SPI write time could vary from 0-20mSec on 3.3V devices (and
* only 0-5mSec on 5V devices)
*/
- if(retry_count >= EEPROM_MAX_RETRY_SPI) {
+ if (retry_count >= EEPROM_MAX_RETRY_SPI) {
DEBUGOUT("SPI EEPROM Status error\n");
return -E1000_ERR_EEPROM;
}
@@ -4844,7 +4861,7 @@ e1000_read_eeprom(struct e1000_hw *hw,
/* A check for invalid values: offset too large, too many words, and not
* enough words.
*/
- if((offset >= eeprom->word_size) || (words > eeprom->word_size - offset) ||
+ if ((offset >= eeprom->word_size) || (words > eeprom->word_size - offset) ||
(words == 0)) {
DEBUGOUT("\"words\" parameter out of bounds\n");
return -E1000_ERR_EEPROM;
@@ -4852,7 +4869,7 @@ e1000_read_eeprom(struct e1000_hw *hw,
/* FLASH reads without acquiring the semaphore are safe */
if (e1000_is_onboard_nvm_eeprom(hw) == TRUE &&
- hw->eeprom.use_eerd == FALSE) {
+ hw->eeprom.use_eerd == FALSE) {
switch (hw->mac_type) {
case e1000_80003es2lan:
break;
@@ -4879,7 +4896,7 @@ e1000_read_eeprom(struct e1000_hw *hw,
uint16_t word_in;
uint8_t read_opcode = EEPROM_READ_OPCODE_SPI;
- if(e1000_spi_eeprom_ready(hw)) {
+ if (e1000_spi_eeprom_ready(hw)) {
e1000_release_eeprom(hw);
return -E1000_ERR_EEPROM;
}
@@ -4887,7 +4904,7 @@ e1000_read_eeprom(struct e1000_hw *hw,
e1000_standby_eeprom(hw);
/* Some SPI eeproms use the 8th address bit embedded in the opcode */
- if((eeprom->address_bits == 8) && (offset >= 128))
+ if ((eeprom->address_bits == 8) && (offset >= 128))
read_opcode |= EEPROM_A8_OPCODE_SPI;
/* Send the READ command (opcode + addr) */
@@ -4903,7 +4920,7 @@ e1000_read_eeprom(struct e1000_hw *hw,
word_in = e1000_shift_in_ee_bits(hw, 16);
data[i] = (word_in >> 8) | (word_in << 8);
}
- } else if(eeprom->type == e1000_eeprom_microwire) {
+ } else if (eeprom->type == e1000_eeprom_microwire) {
for (i = 0; i < words; i++) {
/* Send the READ command (opcode + addr) */
e1000_shift_out_ee_bits(hw, EEPROM_READ_OPCODE_MICROWIRE,
@@ -4948,7 +4965,7 @@ e1000_read_eeprom_eerd(struct e1000_hw *hw,
E1000_WRITE_REG(hw, EERD, eerd);
error = e1000_poll_eerd_eewr_done(hw, E1000_EEPROM_POLL_READ);
- if(error) {
+ if (error) {
break;
}
data[i] = (E1000_READ_REG(hw, EERD) >> E1000_EEPROM_RW_REG_DATA);
@@ -4985,7 +5002,7 @@ e1000_write_eeprom_eewr(struct e1000_hw *hw,
E1000_EEPROM_RW_REG_START;
error = e1000_poll_eerd_eewr_done(hw, E1000_EEPROM_POLL_WRITE);
- if(error) {
+ if (error) {
break;
}
@@ -4993,7 +5010,7 @@ e1000_write_eeprom_eewr(struct e1000_hw *hw,
error = e1000_poll_eerd_eewr_done(hw, E1000_EEPROM_POLL_WRITE);
- if(error) {
+ if (error) {
break;
}
}
@@ -5014,13 +5031,13 @@ e1000_poll_eerd_eewr_done(struct e1000_hw *hw, int eerd)
uint32_t i, reg = 0;
int32_t done = E1000_ERR_EEPROM;
- for(i = 0; i < attempts; i++) {
- if(eerd == E1000_EEPROM_POLL_READ)
+ for (i = 0; i < attempts; i++) {
+ if (eerd == E1000_EEPROM_POLL_READ)
reg = E1000_READ_REG(hw, EERD);
else
reg = E1000_READ_REG(hw, EEWR);
- if(reg & E1000_EEPROM_RW_REG_DONE) {
+ if (reg & E1000_EEPROM_RW_REG_DONE) {
done = E1000_SUCCESS;
break;
}
@@ -5052,7 +5069,7 @@ e1000_is_onboard_nvm_eeprom(struct e1000_hw *hw)
eecd = ((eecd >> 15) & 0x03);
/* If both bits are set, device is Flash type */
- if(eecd == 0x03) {
+ if (eecd == 0x03) {
return FALSE;
}
}
@@ -5117,7 +5134,7 @@ e1000_validate_eeprom_checksum(struct e1000_hw *hw)
checksum += eeprom_data;
}
- if(checksum == (uint16_t) EEPROM_SUM)
+ if (checksum == (uint16_t) EEPROM_SUM)
return E1000_SUCCESS;
else {
DEBUGOUT("EEPROM Checksum Invalid\n");
@@ -5142,15 +5159,15 @@ e1000_update_eeprom_checksum(struct e1000_hw *hw)
DEBUGFUNC("e1000_update_eeprom_checksum");
- for(i = 0; i < EEPROM_CHECKSUM_REG; i++) {
- if(e1000_read_eeprom(hw, i, 1, &eeprom_data) < 0) {
+ for (i = 0; i < EEPROM_CHECKSUM_REG; i++) {
+ if (e1000_read_eeprom(hw, i, 1, &eeprom_data) < 0) {
DEBUGOUT("EEPROM Read Error\n");
return -E1000_ERR_EEPROM;
}
checksum += eeprom_data;
}
checksum = (uint16_t) EEPROM_SUM - checksum;
- if(e1000_write_eeprom(hw, EEPROM_CHECKSUM_REG, 1, &checksum) < 0) {
+ if (e1000_write_eeprom(hw, EEPROM_CHECKSUM_REG, 1, &checksum) < 0) {
DEBUGOUT("EEPROM Write Error\n");
return -E1000_ERR_EEPROM;
} else if (hw->eeprom.type == e1000_eeprom_flash) {
@@ -5192,14 +5209,14 @@ e1000_write_eeprom(struct e1000_hw *hw,
/* A check for invalid values: offset too large, too many words, and not
* enough words.
*/
- if((offset >= eeprom->word_size) || (words > eeprom->word_size - offset) ||
+ if ((offset >= eeprom->word_size) || (words > eeprom->word_size - offset) ||
(words == 0)) {
DEBUGOUT("\"words\" parameter out of bounds\n");
return -E1000_ERR_EEPROM;
}
/* 82573 writes only through eewr */
- if(eeprom->use_eewr == TRUE)
+ if (eeprom->use_eewr == TRUE)
return e1000_write_eeprom_eewr(hw, offset, words, data);
if (eeprom->type == e1000_eeprom_ich8)
@@ -5209,7 +5226,7 @@ e1000_write_eeprom(struct e1000_hw *hw,
if (e1000_acquire_eeprom(hw) != E1000_SUCCESS)
return -E1000_ERR_EEPROM;
- if(eeprom->type == e1000_eeprom_microwire) {
+ if (eeprom->type == e1000_eeprom_microwire) {
status = e1000_write_eeprom_microwire(hw, offset, words, data);
} else {
status = e1000_write_eeprom_spi(hw, offset, words, data);
@@ -5245,7 +5262,7 @@ e1000_write_eeprom_spi(struct e1000_hw *hw,
while (widx < words) {
uint8_t write_opcode = EEPROM_WRITE_OPCODE_SPI;
- if(e1000_spi_eeprom_ready(hw)) return -E1000_ERR_EEPROM;
+ if (e1000_spi_eeprom_ready(hw)) return -E1000_ERR_EEPROM;
e1000_standby_eeprom(hw);
@@ -5256,7 +5273,7 @@ e1000_write_eeprom_spi(struct e1000_hw *hw,
e1000_standby_eeprom(hw);
/* Some SPI eeproms use the 8th address bit embedded in the opcode */
- if((eeprom->address_bits == 8) && (offset >= 128))
+ if ((eeprom->address_bits == 8) && (offset >= 128))
write_opcode |= EEPROM_A8_OPCODE_SPI;
/* Send the Write command (8-bit opcode + addr) */
@@ -5278,7 +5295,7 @@ e1000_write_eeprom_spi(struct e1000_hw *hw,
* operation, while the smaller eeproms are capable of an 8-byte
* PAGE WRITE operation. Break the inner loop to pass new address
*/
- if((((offset + widx)*2) % eeprom->page_size) == 0) {
+ if ((((offset + widx)*2) % eeprom->page_size) == 0) {
e1000_standby_eeprom(hw);
break;
}
@@ -5344,12 +5361,12 @@ e1000_write_eeprom_microwire(struct e1000_hw *hw,
* signal that the command has been completed by raising the DO signal.
* If DO does not go high in 10 milliseconds, then error out.
*/
- for(i = 0; i < 200; i++) {
+ for (i = 0; i < 200; i++) {
eecd = E1000_READ_REG(hw, EECD);
- if(eecd & E1000_EECD_DO) break;
+ if (eecd & E1000_EECD_DO) break;
udelay(50);
}
- if(i == 200) {
+ if (i == 200) {
DEBUGOUT("EEPROM Write did not complete\n");
return -E1000_ERR_EEPROM;
}
@@ -5540,40 +5557,6 @@ e1000_commit_shadow_ram(struct e1000_hw *hw)
}
/******************************************************************************
- * Reads the adapter's part number from the EEPROM
- *
- * hw - Struct containing variables accessed by shared code
- * part_num - Adapter's part number
- *****************************************************************************/
-int32_t
-e1000_read_part_num(struct e1000_hw *hw,
- uint32_t *part_num)
-{
- uint16_t offset = EEPROM_PBA_BYTE_1;
- uint16_t eeprom_data;
-
- DEBUGFUNC("e1000_read_part_num");
-
- /* Get word 0 from EEPROM */
- if(e1000_read_eeprom(hw, offset, 1, &eeprom_data) < 0) {
- DEBUGOUT("EEPROM Read Error\n");
- return -E1000_ERR_EEPROM;
- }
- /* Save word 0 in upper half of part_num */
- *part_num = (uint32_t) (eeprom_data << 16);
-
- /* Get word 1 from EEPROM */
- if(e1000_read_eeprom(hw, ++offset, 1, &eeprom_data) < 0) {
- DEBUGOUT("EEPROM Read Error\n");
- return -E1000_ERR_EEPROM;
- }
- /* Save word 1 in lower half of part_num */
- *part_num |= eeprom_data;
-
- return E1000_SUCCESS;
-}
-
-/******************************************************************************
* Reads the adapter's MAC address from the EEPROM and inverts the LSB for the
* second function of dual function devices
*
@@ -5587,9 +5570,9 @@ e1000_read_mac_addr(struct e1000_hw * hw)
DEBUGFUNC("e1000_read_mac_addr");
- for(i = 0; i < NODE_ADDRESS_SIZE; i += 2) {
+ for (i = 0; i < NODE_ADDRESS_SIZE; i += 2) {
offset = i >> 1;
- if(e1000_read_eeprom(hw, offset, 1, &eeprom_data) < 0) {
+ if (e1000_read_eeprom(hw, offset, 1, &eeprom_data) < 0) {
DEBUGOUT("EEPROM Read Error\n");
return -E1000_ERR_EEPROM;
}
@@ -5604,12 +5587,12 @@ e1000_read_mac_addr(struct e1000_hw * hw)
case e1000_82546_rev_3:
case e1000_82571:
case e1000_80003es2lan:
- if(E1000_READ_REG(hw, STATUS) & E1000_STATUS_FUNC_1)
+ if (E1000_READ_REG(hw, STATUS) & E1000_STATUS_FUNC_1)
hw->perm_mac_addr[5] ^= 0x01;
break;
}
- for(i = 0; i < NODE_ADDRESS_SIZE; i++)
+ for (i = 0; i < NODE_ADDRESS_SIZE; i++)
hw->mac_addr[i] = hw->perm_mac_addr[i];
return E1000_SUCCESS;
}
@@ -5648,7 +5631,7 @@ e1000_init_rx_addrs(struct e1000_hw *hw)
/* Zero out the other 15 receive addresses. */
DEBUGOUT("Clearing RAR[1-15]\n");
- for(i = 1; i < rar_num; i++) {
+ for (i = 1; i < rar_num; i++) {
E1000_WRITE_REG_ARRAY(hw, RA, (i << 1), 0);
E1000_WRITE_FLUSH(hw);
E1000_WRITE_REG_ARRAY(hw, RA, ((i << 1) + 1), 0);
@@ -5699,7 +5682,7 @@ e1000_mc_addr_list_update(struct e1000_hw *hw,
if ((hw->mac_type == e1000_82571) && (hw->laa_is_present == TRUE))
num_rar_entry -= 1;
- for(i = rar_used_count; i < num_rar_entry; i++) {
+ for (i = rar_used_count; i < num_rar_entry; i++) {
E1000_WRITE_REG_ARRAY(hw, RA, (i << 1), 0);
E1000_WRITE_FLUSH(hw);
E1000_WRITE_REG_ARRAY(hw, RA, ((i << 1) + 1), 0);
@@ -5711,13 +5694,13 @@ e1000_mc_addr_list_update(struct e1000_hw *hw,
num_mta_entry = E1000_NUM_MTA_REGISTERS;
if (hw->mac_type == e1000_ich8lan)
num_mta_entry = E1000_NUM_MTA_REGISTERS_ICH8LAN;
- for(i = 0; i < num_mta_entry; i++) {
+ for (i = 0; i < num_mta_entry; i++) {
E1000_WRITE_REG_ARRAY(hw, MTA, i, 0);
E1000_WRITE_FLUSH(hw);
}
/* Add the new addresses */
- for(i = 0; i < mc_addr_count; i++) {
+ for (i = 0; i < mc_addr_count; i++) {
DEBUGOUT(" Adding the multicast addresses:\n");
DEBUGOUT7(" MC Addr #%d =%.2X %.2X %.2X %.2X %.2X %.2X\n", i,
mc_addr_list[i * (ETH_LENGTH_OF_ADDRESS + pad)],
@@ -5849,7 +5832,7 @@ e1000_mta_set(struct e1000_hw *hw,
* in the MTA, save off the previous entry before writing and
* restore the old value after writing.
*/
- if((hw->mac_type == e1000_82544) && ((hash_reg & 0x1) == 1)) {
+ if ((hw->mac_type == e1000_82544) && ((hash_reg & 0x1) == 1)) {
temp = E1000_READ_REG_ARRAY(hw, MTA, (hash_reg - 1));
E1000_WRITE_REG_ARRAY(hw, MTA, hash_reg, mta);
E1000_WRITE_FLUSH(hw);
@@ -5999,7 +5982,7 @@ e1000_id_led_init(struct e1000_hw * hw)
DEBUGFUNC("e1000_id_led_init");
- if(hw->mac_type < e1000_82540) {
+ if (hw->mac_type < e1000_82540) {
/* Nothing to do */
return E1000_SUCCESS;
}
@@ -6009,7 +5992,7 @@ e1000_id_led_init(struct e1000_hw * hw)
hw->ledctl_mode1 = hw->ledctl_default;
hw->ledctl_mode2 = hw->ledctl_default;
- if(e1000_read_eeprom(hw, EEPROM_ID_LED_SETTINGS, 1, &eeprom_data) < 0) {
+ if (e1000_read_eeprom(hw, EEPROM_ID_LED_SETTINGS, 1, &eeprom_data) < 0) {
DEBUGOUT("EEPROM Read Error\n");
return -E1000_ERR_EEPROM;
}
@@ -6026,7 +6009,7 @@ e1000_id_led_init(struct e1000_hw * hw)
}
for (i = 0; i < 4; i++) {
temp = (eeprom_data >> (i << 2)) & led_mask;
- switch(temp) {
+ switch (temp) {
case ID_LED_ON1_DEF2:
case ID_LED_ON1_ON2:
case ID_LED_ON1_OFF2:
@@ -6043,7 +6026,7 @@ e1000_id_led_init(struct e1000_hw * hw)
/* Do nothing */
break;
}
- switch(temp) {
+ switch (temp) {
case ID_LED_DEF1_ON2:
case ID_LED_ON1_ON2:
case ID_LED_OFF1_ON2:
@@ -6077,7 +6060,7 @@ e1000_setup_led(struct e1000_hw *hw)
DEBUGFUNC("e1000_setup_led");
- switch(hw->mac_type) {
+ switch (hw->mac_type) {
case e1000_82542_rev2_0:
case e1000_82542_rev2_1:
case e1000_82543:
@@ -6091,16 +6074,16 @@ e1000_setup_led(struct e1000_hw *hw)
/* Turn off PHY Smart Power Down (if enabled) */
ret_val = e1000_read_phy_reg(hw, IGP01E1000_GMII_FIFO,
&hw->phy_spd_default);
- if(ret_val)
+ if (ret_val)
return ret_val;
ret_val = e1000_write_phy_reg(hw, IGP01E1000_GMII_FIFO,
(uint16_t)(hw->phy_spd_default &
~IGP01E1000_GMII_SPD));
- if(ret_val)
+ if (ret_val)
return ret_val;
/* Fall Through */
default:
- if(hw->media_type == e1000_media_type_fiber) {
+ if (hw->media_type == e1000_media_type_fiber) {
ledctl = E1000_READ_REG(hw, LEDCTL);
/* Save current LEDCTL settings */
hw->ledctl_default = ledctl;
@@ -6111,7 +6094,7 @@ e1000_setup_led(struct e1000_hw *hw)
ledctl |= (E1000_LEDCTL_MODE_LED_OFF <<
E1000_LEDCTL_LED0_MODE_SHIFT);
E1000_WRITE_REG(hw, LEDCTL, ledctl);
- } else if(hw->media_type == e1000_media_type_copper)
+ } else if (hw->media_type == e1000_media_type_copper)
E1000_WRITE_REG(hw, LEDCTL, hw->ledctl_mode1);
break;
}
@@ -6119,6 +6102,7 @@ e1000_setup_led(struct e1000_hw *hw)
return E1000_SUCCESS;
}
+
/******************************************************************************
* Used on 82571 and later Si that has LED blink bits.
* Callers must use their own timer and should have already called
@@ -6169,7 +6153,7 @@ e1000_cleanup_led(struct e1000_hw *hw)
DEBUGFUNC("e1000_cleanup_led");
- switch(hw->mac_type) {
+ switch (hw->mac_type) {
case e1000_82542_rev2_0:
case e1000_82542_rev2_1:
case e1000_82543:
@@ -6183,7 +6167,7 @@ e1000_cleanup_led(struct e1000_hw *hw)
/* Turn on PHY Smart Power Down (if previously enabled) */
ret_val = e1000_write_phy_reg(hw, IGP01E1000_GMII_FIFO,
hw->phy_spd_default);
- if(ret_val)
+ if (ret_val)
return ret_val;
/* Fall Through */
default:
@@ -6211,7 +6195,7 @@ e1000_led_on(struct e1000_hw *hw)
DEBUGFUNC("e1000_led_on");
- switch(hw->mac_type) {
+ switch (hw->mac_type) {
case e1000_82542_rev2_0:
case e1000_82542_rev2_1:
case e1000_82543:
@@ -6220,7 +6204,7 @@ e1000_led_on(struct e1000_hw *hw)
ctrl |= E1000_CTRL_SWDPIO0;
break;
case e1000_82544:
- if(hw->media_type == e1000_media_type_fiber) {
+ if (hw->media_type == e1000_media_type_fiber) {
/* Set SW Defineable Pin 0 to turn on the LED */
ctrl |= E1000_CTRL_SWDPIN0;
ctrl |= E1000_CTRL_SWDPIO0;
@@ -6231,7 +6215,7 @@ e1000_led_on(struct e1000_hw *hw)
}
break;
default:
- if(hw->media_type == e1000_media_type_fiber) {
+ if (hw->media_type == e1000_media_type_fiber) {
/* Clear SW Defineable Pin 0 to turn on the LED */
ctrl &= ~E1000_CTRL_SWDPIN0;
ctrl |= E1000_CTRL_SWDPIO0;
@@ -6262,7 +6246,7 @@ e1000_led_off(struct e1000_hw *hw)
DEBUGFUNC("e1000_led_off");
- switch(hw->mac_type) {
+ switch (hw->mac_type) {
case e1000_82542_rev2_0:
case e1000_82542_rev2_1:
case e1000_82543:
@@ -6271,7 +6255,7 @@ e1000_led_off(struct e1000_hw *hw)
ctrl |= E1000_CTRL_SWDPIO0;
break;
case e1000_82544:
- if(hw->media_type == e1000_media_type_fiber) {
+ if (hw->media_type == e1000_media_type_fiber) {
/* Clear SW Defineable Pin 0 to turn off the LED */
ctrl &= ~E1000_CTRL_SWDPIN0;
ctrl |= E1000_CTRL_SWDPIO0;
@@ -6282,7 +6266,7 @@ e1000_led_off(struct e1000_hw *hw)
}
break;
default:
- if(hw->media_type == e1000_media_type_fiber) {
+ if (hw->media_type == e1000_media_type_fiber) {
/* Set SW Defineable Pin 0 to turn off the LED */
ctrl |= E1000_CTRL_SWDPIN0;
ctrl |= E1000_CTRL_SWDPIO0;
@@ -6306,7 +6290,7 @@ e1000_led_off(struct e1000_hw *hw)
*
* hw - Struct containing variables accessed by shared code
*****************************************************************************/
-static void
+void
e1000_clear_hw_cntrs(struct e1000_hw *hw)
{
volatile uint32_t temp;
@@ -6369,7 +6353,7 @@ e1000_clear_hw_cntrs(struct e1000_hw *hw)
temp = E1000_READ_REG(hw, MPTC);
temp = E1000_READ_REG(hw, BPTC);
- if(hw->mac_type < e1000_82543) return;
+ if (hw->mac_type < e1000_82543) return;
temp = E1000_READ_REG(hw, ALGNERRC);
temp = E1000_READ_REG(hw, RXERRC);
@@ -6378,13 +6362,13 @@ e1000_clear_hw_cntrs(struct e1000_hw *hw)
temp = E1000_READ_REG(hw, TSCTC);
temp = E1000_READ_REG(hw, TSCTFC);
- if(hw->mac_type <= e1000_82544) return;
+ if (hw->mac_type <= e1000_82544) return;
temp = E1000_READ_REG(hw, MGTPRC);
temp = E1000_READ_REG(hw, MGTPDC);
temp = E1000_READ_REG(hw, MGTPTC);
- if(hw->mac_type <= e1000_82547_rev_2) return;
+ if (hw->mac_type <= e1000_82547_rev_2) return;
temp = E1000_READ_REG(hw, IAC);
temp = E1000_READ_REG(hw, ICRXOC);
@@ -6415,8 +6399,8 @@ e1000_reset_adaptive(struct e1000_hw *hw)
{
DEBUGFUNC("e1000_reset_adaptive");
- if(hw->adaptive_ifs) {
- if(!hw->ifs_params_forced) {
+ if (hw->adaptive_ifs) {
+ if (!hw->ifs_params_forced) {
hw->current_ifs_val = 0;
hw->ifs_min_val = IFS_MIN;
hw->ifs_max_val = IFS_MAX;
@@ -6443,12 +6427,12 @@ e1000_update_adaptive(struct e1000_hw *hw)
{
DEBUGFUNC("e1000_update_adaptive");
- if(hw->adaptive_ifs) {
- if((hw->collision_delta * hw->ifs_ratio) > hw->tx_packet_delta) {
- if(hw->tx_packet_delta > MIN_NUM_XMITS) {
+ if (hw->adaptive_ifs) {
+ if ((hw->collision_delta * hw->ifs_ratio) > hw->tx_packet_delta) {
+ if (hw->tx_packet_delta > MIN_NUM_XMITS) {
hw->in_ifs_mode = TRUE;
- if(hw->current_ifs_val < hw->ifs_max_val) {
- if(hw->current_ifs_val == 0)
+ if (hw->current_ifs_val < hw->ifs_max_val) {
+ if (hw->current_ifs_val == 0)
hw->current_ifs_val = hw->ifs_min_val;
else
hw->current_ifs_val += hw->ifs_step_size;
@@ -6456,7 +6440,7 @@ e1000_update_adaptive(struct e1000_hw *hw)
}
}
} else {
- if(hw->in_ifs_mode && (hw->tx_packet_delta <= MIN_NUM_XMITS)) {
+ if (hw->in_ifs_mode && (hw->tx_packet_delta <= MIN_NUM_XMITS)) {
hw->current_ifs_val = 0;
hw->in_ifs_mode = FALSE;
E1000_WRITE_REG(hw, AIT, 0);
@@ -6503,46 +6487,46 @@ e1000_tbi_adjust_stats(struct e1000_hw *hw,
* This could be simplified if all environments supported
* 64-bit integers.
*/
- if(carry_bit && ((stats->gorcl & 0x80000000) == 0))
+ if (carry_bit && ((stats->gorcl & 0x80000000) == 0))
stats->gorch++;
/* Is this a broadcast or multicast? Check broadcast first,
* since the test for a multicast frame will test positive on
* a broadcast frame.
*/
- if((mac_addr[0] == (uint8_t) 0xff) && (mac_addr[1] == (uint8_t) 0xff))
+ if ((mac_addr[0] == (uint8_t) 0xff) && (mac_addr[1] == (uint8_t) 0xff))
/* Broadcast packet */
stats->bprc++;
- else if(*mac_addr & 0x01)
+ else if (*mac_addr & 0x01)
/* Multicast packet */
stats->mprc++;
- if(frame_len == hw->max_frame_size) {
+ if (frame_len == hw->max_frame_size) {
/* In this case, the hardware has overcounted the number of
* oversize frames.
*/
- if(stats->roc > 0)
+ if (stats->roc > 0)
stats->roc--;
}
/* Adjust the bin counters when the extra byte put the frame in the
* wrong bin. Remember that the frame_len was adjusted above.
*/
- if(frame_len == 64) {
+ if (frame_len == 64) {
stats->prc64++;
stats->prc127--;
- } else if(frame_len == 127) {
+ } else if (frame_len == 127) {
stats->prc127++;
stats->prc255--;
- } else if(frame_len == 255) {
+ } else if (frame_len == 255) {
stats->prc255++;
stats->prc511--;
- } else if(frame_len == 511) {
+ } else if (frame_len == 511) {
stats->prc511++;
stats->prc1023--;
- } else if(frame_len == 1023) {
+ } else if (frame_len == 1023) {
stats->prc1023++;
stats->prc1522--;
- } else if(frame_len == 1522) {
+ } else if (frame_len == 1522) {
stats->prc1522++;
}
}
@@ -6582,10 +6566,10 @@ e1000_get_bus_info(struct e1000_hw *hw)
hw->bus_type = (status & E1000_STATUS_PCIX_MODE) ?
e1000_bus_type_pcix : e1000_bus_type_pci;
- if(hw->device_id == E1000_DEV_ID_82546EB_QUAD_COPPER) {
+ if (hw->device_id == E1000_DEV_ID_82546EB_QUAD_COPPER) {
hw->bus_speed = (hw->bus_type == e1000_bus_type_pci) ?
e1000_bus_speed_66 : e1000_bus_speed_120;
- } else if(hw->bus_type == e1000_bus_type_pci) {
+ } else if (hw->bus_type == e1000_bus_type_pci) {
hw->bus_speed = (status & E1000_STATUS_PCI66) ?
e1000_bus_speed_66 : e1000_bus_speed_33;
} else {
@@ -6680,11 +6664,11 @@ e1000_get_cable_length(struct e1000_hw *hw,
*min_length = *max_length = 0;
/* Use old method for Phy older than IGP */
- if(hw->phy_type == e1000_phy_m88) {
+ if (hw->phy_type == e1000_phy_m88) {
ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_STATUS,
&phy_data);
- if(ret_val)
+ if (ret_val)
return ret_val;
cable_length = (phy_data & M88E1000_PSSR_CABLE_LENGTH) >>
M88E1000_PSSR_CABLE_LENGTH_SHIFT;
@@ -6743,7 +6727,7 @@ e1000_get_cable_length(struct e1000_hw *hw,
return -E1000_ERR_PHY;
break;
}
- } else if(hw->phy_type == e1000_phy_igp) { /* For IGP PHY */
+ } else if (hw->phy_type == e1000_phy_igp) { /* For IGP PHY */
uint16_t cur_agc_value;
uint16_t min_agc_value = IGP01E1000_AGC_LENGTH_TABLE_SIZE;
uint16_t agc_reg_array[IGP01E1000_PHY_CHANNEL_NUM] =
@@ -6752,10 +6736,10 @@ e1000_get_cable_length(struct e1000_hw *hw,
IGP01E1000_PHY_AGC_C,
IGP01E1000_PHY_AGC_D};
/* Read the AGC registers for all channels */
- for(i = 0; i < IGP01E1000_PHY_CHANNEL_NUM; i++) {
+ for (i = 0; i < IGP01E1000_PHY_CHANNEL_NUM; i++) {
ret_val = e1000_read_phy_reg(hw, agc_reg_array[i], &phy_data);
- if(ret_val)
+ if (ret_val)
return ret_val;
cur_agc_value = phy_data >> IGP01E1000_AGC_LENGTH_SHIFT;
@@ -6805,7 +6789,7 @@ e1000_get_cable_length(struct e1000_hw *hw,
if (ret_val)
return ret_val;
- /* Getting bits 15:9, which represent the combination of course and
+ /* Getting bits 15:9, which represent the combination of course and
* fine gain values. The result is a number that can be put into
* the lookup table to obtain the approximate cable length. */
cur_agc_index = (phy_data >> IGP02E1000_AGC_LENGTH_SHIFT) &
@@ -6870,7 +6854,7 @@ e1000_check_polarity(struct e1000_hw *hw,
/* return the Polarity bit in the Status register. */
ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_STATUS,
&phy_data);
- if(ret_val)
+ if (ret_val)
return ret_val;
*polarity = (phy_data & M88E1000_PSSR_REV_POLARITY) >>
M88E1000_PSSR_REV_POLARITY_SHIFT;
@@ -6880,18 +6864,18 @@ e1000_check_polarity(struct e1000_hw *hw,
/* Read the Status register to check the speed */
ret_val = e1000_read_phy_reg(hw, IGP01E1000_PHY_PORT_STATUS,
&phy_data);
- if(ret_val)
+ if (ret_val)
return ret_val;
/* If speed is 1000 Mbps, must read the IGP01E1000_PHY_PCS_INIT_REG to
* find the polarity status */
- if((phy_data & IGP01E1000_PSSR_SPEED_MASK) ==
+ if ((phy_data & IGP01E1000_PSSR_SPEED_MASK) ==
IGP01E1000_PSSR_SPEED_1000MBPS) {
/* Read the GIG initialization PCS register (0x00B4) */
ret_val = e1000_read_phy_reg(hw, IGP01E1000_PHY_PCS_INIT_REG,
&phy_data);
- if(ret_val)
+ if (ret_val)
return ret_val;
/* Check the polarity bits */
@@ -6940,7 +6924,7 @@ e1000_check_downshift(struct e1000_hw *hw)
hw->phy_type == e1000_phy_igp_2) {
ret_val = e1000_read_phy_reg(hw, IGP01E1000_PHY_LINK_HEALTH,
&phy_data);
- if(ret_val)
+ if (ret_val)
return ret_val;
hw->speed_downgraded = (phy_data & IGP01E1000_PLHR_SS_DOWNGRADE) ? 1 : 0;
@@ -6948,7 +6932,7 @@ e1000_check_downshift(struct e1000_hw *hw)
(hw->phy_type == e1000_phy_gg82563)) {
ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_STATUS,
&phy_data);
- if(ret_val)
+ if (ret_val)
return ret_val;
hw->speed_downgraded = (phy_data & M88E1000_PSSR_DOWNSHIFT) >>
@@ -6988,42 +6972,42 @@ e1000_config_dsp_after_link_change(struct e1000_hw *hw,
DEBUGFUNC("e1000_config_dsp_after_link_change");
- if(hw->phy_type != e1000_phy_igp)
+ if (hw->phy_type != e1000_phy_igp)
return E1000_SUCCESS;
- if(link_up) {
+ if (link_up) {
ret_val = e1000_get_speed_and_duplex(hw, &speed, &duplex);
- if(ret_val) {
+ if (ret_val) {
DEBUGOUT("Error getting link speed and duplex\n");
return ret_val;
}
- if(speed == SPEED_1000) {
+ if (speed == SPEED_1000) {
ret_val = e1000_get_cable_length(hw, &min_length, &max_length);
if (ret_val)
return ret_val;
- if((hw->dsp_config_state == e1000_dsp_config_enabled) &&
+ if ((hw->dsp_config_state == e1000_dsp_config_enabled) &&
min_length >= e1000_igp_cable_length_50) {
- for(i = 0; i < IGP01E1000_PHY_CHANNEL_NUM; i++) {
+ for (i = 0; i < IGP01E1000_PHY_CHANNEL_NUM; i++) {
ret_val = e1000_read_phy_reg(hw, dsp_reg_array[i],
&phy_data);
- if(ret_val)
+ if (ret_val)
return ret_val;
phy_data &= ~IGP01E1000_PHY_EDAC_MU_INDEX;
ret_val = e1000_write_phy_reg(hw, dsp_reg_array[i],
phy_data);
- if(ret_val)
+ if (ret_val)
return ret_val;
}
hw->dsp_config_state = e1000_dsp_config_activated;
}
- if((hw->ffe_config_state == e1000_ffe_config_enabled) &&
+ if ((hw->ffe_config_state == e1000_ffe_config_enabled) &&
(min_length < e1000_igp_cable_length_50)) {
uint16_t ffe_idle_err_timeout = FFE_IDLE_ERR_COUNT_TIMEOUT_20;
@@ -7032,70 +7016,70 @@ e1000_config_dsp_after_link_change(struct e1000_hw *hw,
/* clear previous idle error counts */
ret_val = e1000_read_phy_reg(hw, PHY_1000T_STATUS,
&phy_data);
- if(ret_val)
+ if (ret_val)
return ret_val;
- for(i = 0; i < ffe_idle_err_timeout; i++) {
+ for (i = 0; i < ffe_idle_err_timeout; i++) {
udelay(1000);
ret_val = e1000_read_phy_reg(hw, PHY_1000T_STATUS,
&phy_data);
- if(ret_val)
+ if (ret_val)
return ret_val;
idle_errs += (phy_data & SR_1000T_IDLE_ERROR_CNT);
- if(idle_errs > SR_1000T_PHY_EXCESSIVE_IDLE_ERR_COUNT) {
+ if (idle_errs > SR_1000T_PHY_EXCESSIVE_IDLE_ERR_COUNT) {
hw->ffe_config_state = e1000_ffe_config_active;
ret_val = e1000_write_phy_reg(hw,
IGP01E1000_PHY_DSP_FFE,
IGP01E1000_PHY_DSP_FFE_CM_CP);
- if(ret_val)
+ if (ret_val)
return ret_val;
break;
}
- if(idle_errs)
+ if (idle_errs)
ffe_idle_err_timeout = FFE_IDLE_ERR_COUNT_TIMEOUT_100;
}
}
}
} else {
- if(hw->dsp_config_state == e1000_dsp_config_activated) {
+ if (hw->dsp_config_state == e1000_dsp_config_activated) {
/* Save off the current value of register 0x2F5B to be restored at
* the end of the routines. */
ret_val = e1000_read_phy_reg(hw, 0x2F5B, &phy_saved_data);
- if(ret_val)
+ if (ret_val)
return ret_val;
/* Disable the PHY transmitter */
ret_val = e1000_write_phy_reg(hw, 0x2F5B, 0x0003);
- if(ret_val)
+ if (ret_val)
return ret_val;
msec_delay_irq(20);
ret_val = e1000_write_phy_reg(hw, 0x0000,
IGP01E1000_IEEE_FORCE_GIGA);
- if(ret_val)
+ if (ret_val)
return ret_val;
- for(i = 0; i < IGP01E1000_PHY_CHANNEL_NUM; i++) {
+ for (i = 0; i < IGP01E1000_PHY_CHANNEL_NUM; i++) {
ret_val = e1000_read_phy_reg(hw, dsp_reg_array[i], &phy_data);
- if(ret_val)
+ if (ret_val)
return ret_val;
phy_data &= ~IGP01E1000_PHY_EDAC_MU_INDEX;
phy_data |= IGP01E1000_PHY_EDAC_SIGN_EXT_9_BITS;
ret_val = e1000_write_phy_reg(hw,dsp_reg_array[i], phy_data);
- if(ret_val)
+ if (ret_val)
return ret_val;
}
ret_val = e1000_write_phy_reg(hw, 0x0000,
IGP01E1000_IEEE_RESTART_AUTONEG);
- if(ret_val)
+ if (ret_val)
return ret_val;
msec_delay_irq(20);
@@ -7103,40 +7087,40 @@ e1000_config_dsp_after_link_change(struct e1000_hw *hw,
/* Now enable the transmitter */
ret_val = e1000_write_phy_reg(hw, 0x2F5B, phy_saved_data);
- if(ret_val)
+ if (ret_val)
return ret_val;
hw->dsp_config_state = e1000_dsp_config_enabled;
}
- if(hw->ffe_config_state == e1000_ffe_config_active) {
+ if (hw->ffe_config_state == e1000_ffe_config_active) {
/* Save off the current value of register 0x2F5B to be restored at
* the end of the routines. */
ret_val = e1000_read_phy_reg(hw, 0x2F5B, &phy_saved_data);
- if(ret_val)
+ if (ret_val)
return ret_val;
/* Disable the PHY transmitter */
ret_val = e1000_write_phy_reg(hw, 0x2F5B, 0x0003);
- if(ret_val)
+ if (ret_val)
return ret_val;
msec_delay_irq(20);
ret_val = e1000_write_phy_reg(hw, 0x0000,
IGP01E1000_IEEE_FORCE_GIGA);
- if(ret_val)
+ if (ret_val)
return ret_val;
ret_val = e1000_write_phy_reg(hw, IGP01E1000_PHY_DSP_FFE,
IGP01E1000_PHY_DSP_FFE_DEFAULT);
- if(ret_val)
+ if (ret_val)
return ret_val;
ret_val = e1000_write_phy_reg(hw, 0x0000,
IGP01E1000_IEEE_RESTART_AUTONEG);
- if(ret_val)
+ if (ret_val)
return ret_val;
msec_delay_irq(20);
@@ -7144,7 +7128,7 @@ e1000_config_dsp_after_link_change(struct e1000_hw *hw,
/* Now enable the transmitter */
ret_val = e1000_write_phy_reg(hw, 0x2F5B, phy_saved_data);
- if(ret_val)
+ if (ret_val)
return ret_val;
hw->ffe_config_state = e1000_ffe_config_enabled;
@@ -7169,20 +7153,20 @@ e1000_set_phy_mode(struct e1000_hw *hw)
DEBUGFUNC("e1000_set_phy_mode");
- if((hw->mac_type == e1000_82545_rev_3) &&
- (hw->media_type == e1000_media_type_copper)) {
+ if ((hw->mac_type == e1000_82545_rev_3) &&
+ (hw->media_type == e1000_media_type_copper)) {
ret_val = e1000_read_eeprom(hw, EEPROM_PHY_CLASS_WORD, 1, &eeprom_data);
- if(ret_val) {
+ if (ret_val) {
return ret_val;
}
- if((eeprom_data != EEPROM_RESERVED_WORD) &&
- (eeprom_data & EEPROM_PHY_CLASS_A)) {
+ if ((eeprom_data != EEPROM_RESERVED_WORD) &&
+ (eeprom_data & EEPROM_PHY_CLASS_A)) {
ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_PAGE_SELECT, 0x000B);
- if(ret_val)
+ if (ret_val)
return ret_val;
ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_GEN_CONTROL, 0x8104);
- if(ret_val)
+ if (ret_val)
return ret_val;
hw->phy_reset_disable = FALSE;
@@ -7233,16 +7217,16 @@ e1000_set_d3_lplu_state(struct e1000_hw *hw,
phy_ctrl = E1000_READ_REG(hw, PHY_CTRL);
} else {
ret_val = e1000_read_phy_reg(hw, IGP02E1000_PHY_POWER_MGMT, &phy_data);
- if(ret_val)
+ if (ret_val)
return ret_val;
}
- if(!active) {
- if(hw->mac_type == e1000_82541_rev_2 ||
- hw->mac_type == e1000_82547_rev_2) {
+ if (!active) {
+ if (hw->mac_type == e1000_82541_rev_2 ||
+ hw->mac_type == e1000_82547_rev_2) {
phy_data &= ~IGP01E1000_GMII_FLEX_SPD;
ret_val = e1000_write_phy_reg(hw, IGP01E1000_GMII_FIFO, phy_data);
- if(ret_val)
+ if (ret_val)
return ret_val;
} else {
if (hw->mac_type == e1000_ich8lan) {
@@ -7264,13 +7248,13 @@ e1000_set_d3_lplu_state(struct e1000_hw *hw,
if (hw->smart_speed == e1000_smart_speed_on) {
ret_val = e1000_read_phy_reg(hw, IGP01E1000_PHY_PORT_CONFIG,
&phy_data);
- if(ret_val)
+ if (ret_val)
return ret_val;
phy_data |= IGP01E1000_PSCFR_SMART_SPEED;
ret_val = e1000_write_phy_reg(hw, IGP01E1000_PHY_PORT_CONFIG,
phy_data);
- if(ret_val)
+ if (ret_val)
return ret_val;
} else if (hw->smart_speed == e1000_smart_speed_off) {
ret_val = e1000_read_phy_reg(hw, IGP01E1000_PHY_PORT_CONFIG,
@@ -7281,19 +7265,19 @@ e1000_set_d3_lplu_state(struct e1000_hw *hw,
phy_data &= ~IGP01E1000_PSCFR_SMART_SPEED;
ret_val = e1000_write_phy_reg(hw, IGP01E1000_PHY_PORT_CONFIG,
phy_data);
- if(ret_val)
+ if (ret_val)
return ret_val;
}
- } else if((hw->autoneg_advertised == AUTONEG_ADVERTISE_SPEED_DEFAULT) ||
- (hw->autoneg_advertised == AUTONEG_ADVERTISE_10_ALL ) ||
- (hw->autoneg_advertised == AUTONEG_ADVERTISE_10_100_ALL)) {
+ } else if ((hw->autoneg_advertised == AUTONEG_ADVERTISE_SPEED_DEFAULT) ||
+ (hw->autoneg_advertised == AUTONEG_ADVERTISE_10_ALL ) ||
+ (hw->autoneg_advertised == AUTONEG_ADVERTISE_10_100_ALL)) {
- if(hw->mac_type == e1000_82541_rev_2 ||
+ if (hw->mac_type == e1000_82541_rev_2 ||
hw->mac_type == e1000_82547_rev_2) {
phy_data |= IGP01E1000_GMII_FLEX_SPD;
ret_val = e1000_write_phy_reg(hw, IGP01E1000_GMII_FIFO, phy_data);
- if(ret_val)
+ if (ret_val)
return ret_val;
} else {
if (hw->mac_type == e1000_ich8lan) {
@@ -7310,12 +7294,12 @@ e1000_set_d3_lplu_state(struct e1000_hw *hw,
/* When LPLU is enabled we should disable SmartSpeed */
ret_val = e1000_read_phy_reg(hw, IGP01E1000_PHY_PORT_CONFIG, &phy_data);
- if(ret_val)
+ if (ret_val)
return ret_val;
phy_data &= ~IGP01E1000_PSCFR_SMART_SPEED;
ret_val = e1000_write_phy_reg(hw, IGP01E1000_PHY_PORT_CONFIG, phy_data);
- if(ret_val)
+ if (ret_val)
return ret_val;
}
@@ -7345,14 +7329,14 @@ e1000_set_d0_lplu_state(struct e1000_hw *hw,
uint16_t phy_data;
DEBUGFUNC("e1000_set_d0_lplu_state");
- if(hw->mac_type <= e1000_82547_rev_2)
+ if (hw->mac_type <= e1000_82547_rev_2)
return E1000_SUCCESS;
if (hw->mac_type == e1000_ich8lan) {
phy_ctrl = E1000_READ_REG(hw, PHY_CTRL);
} else {
ret_val = e1000_read_phy_reg(hw, IGP02E1000_PHY_POWER_MGMT, &phy_data);
- if(ret_val)
+ if (ret_val)
return ret_val;
}
@@ -7374,13 +7358,13 @@ e1000_set_d0_lplu_state(struct e1000_hw *hw,
if (hw->smart_speed == e1000_smart_speed_on) {
ret_val = e1000_read_phy_reg(hw, IGP01E1000_PHY_PORT_CONFIG,
&phy_data);
- if(ret_val)
+ if (ret_val)
return ret_val;
phy_data |= IGP01E1000_PSCFR_SMART_SPEED;
ret_val = e1000_write_phy_reg(hw, IGP01E1000_PHY_PORT_CONFIG,
phy_data);
- if(ret_val)
+ if (ret_val)
return ret_val;
} else if (hw->smart_speed == e1000_smart_speed_off) {
ret_val = e1000_read_phy_reg(hw, IGP01E1000_PHY_PORT_CONFIG,
@@ -7391,7 +7375,7 @@ e1000_set_d0_lplu_state(struct e1000_hw *hw,
phy_data &= ~IGP01E1000_PSCFR_SMART_SPEED;
ret_val = e1000_write_phy_reg(hw, IGP01E1000_PHY_PORT_CONFIG,
phy_data);
- if(ret_val)
+ if (ret_val)
return ret_val;
}
@@ -7410,12 +7394,12 @@ e1000_set_d0_lplu_state(struct e1000_hw *hw,
/* When LPLU is enabled we should disable SmartSpeed */
ret_val = e1000_read_phy_reg(hw, IGP01E1000_PHY_PORT_CONFIG, &phy_data);
- if(ret_val)
+ if (ret_val)
return ret_val;
phy_data &= ~IGP01E1000_PSCFR_SMART_SPEED;
ret_val = e1000_write_phy_reg(hw, IGP01E1000_PHY_PORT_CONFIG, phy_data);
- if(ret_val)
+ if (ret_val)
return ret_val;
}
@@ -7436,7 +7420,7 @@ e1000_set_vco_speed(struct e1000_hw *hw)
DEBUGFUNC("e1000_set_vco_speed");
- switch(hw->mac_type) {
+ switch (hw->mac_type) {
case e1000_82545_rev_3:
case e1000_82546_rev_3:
break;
@@ -7447,39 +7431,39 @@ e1000_set_vco_speed(struct e1000_hw *hw)
/* Set PHY register 30, page 5, bit 8 to 0 */
ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_PAGE_SELECT, &default_page);
- if(ret_val)
+ if (ret_val)
return ret_val;
ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_PAGE_SELECT, 0x0005);
- if(ret_val)
+ if (ret_val)
return ret_val;
ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_GEN_CONTROL, &phy_data);
- if(ret_val)
+ if (ret_val)
return ret_val;
phy_data &= ~M88E1000_PHY_VCO_REG_BIT8;
ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_GEN_CONTROL, phy_data);
- if(ret_val)
+ if (ret_val)
return ret_val;
/* Set PHY register 30, page 4, bit 11 to 1 */
ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_PAGE_SELECT, 0x0004);
- if(ret_val)
+ if (ret_val)
return ret_val;
ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_GEN_CONTROL, &phy_data);
- if(ret_val)
+ if (ret_val)
return ret_val;
phy_data |= M88E1000_PHY_VCO_REG_BIT11;
ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_GEN_CONTROL, phy_data);
- if(ret_val)
+ if (ret_val)
return ret_val;
ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_PAGE_SELECT, default_page);
- if(ret_val)
+ if (ret_val)
return ret_val;
return E1000_SUCCESS;
@@ -7558,7 +7542,7 @@ e1000_mng_host_if_write(struct e1000_hw * hw, uint8_t *buffer,
{
uint8_t *tmp;
uint8_t *bufptr = buffer;
- uint32_t data;
+ uint32_t data = 0;
uint16_t remaining, i, j, prev_bytes;
/* sum = only sum of the data and it is not checksum */
@@ -7638,7 +7622,7 @@ e1000_mng_write_cmd_header(struct e1000_hw * hw,
buffer = (uint8_t *) hdr;
i = length;
- while(i--)
+ while (i--)
sum += buffer[i];
hdr->checksum = 0 - sum;
@@ -7661,8 +7645,7 @@ e1000_mng_write_cmd_header(struct e1000_hw * hw,
* returns - E1000_SUCCESS for success.
****************************************************************************/
static int32_t
-e1000_mng_write_commit(
- struct e1000_hw * hw)
+e1000_mng_write_commit(struct e1000_hw * hw)
{
uint32_t hicr;
@@ -7834,31 +7817,31 @@ e1000_polarity_reversal_workaround(struct e1000_hw *hw)
/* Disable the transmitter on the PHY */
ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_PAGE_SELECT, 0x0019);
- if(ret_val)
+ if (ret_val)
return ret_val;
ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_GEN_CONTROL, 0xFFFF);
- if(ret_val)
+ if (ret_val)
return ret_val;
ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_PAGE_SELECT, 0x0000);
- if(ret_val)
+ if (ret_val)
return ret_val;
/* This loop will early-out if the NO link condition has been met. */
- for(i = PHY_FORCE_TIME; i > 0; i--) {
+ for (i = PHY_FORCE_TIME; i > 0; i--) {
/* Read the MII Status Register and wait for Link Status bit
* to be clear.
*/
ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &mii_status_reg);
- if(ret_val)
+ if (ret_val)
return ret_val;
ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &mii_status_reg);
- if(ret_val)
+ if (ret_val)
return ret_val;
- if((mii_status_reg & ~MII_SR_LINK_STATUS) == 0) break;
+ if ((mii_status_reg & ~MII_SR_LINK_STATUS) == 0) break;
msec_delay_irq(100);
}
@@ -7868,40 +7851,40 @@ e1000_polarity_reversal_workaround(struct e1000_hw *hw)
/* Now we will re-enable th transmitter on the PHY */
ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_PAGE_SELECT, 0x0019);
- if(ret_val)
+ if (ret_val)
return ret_val;
msec_delay_irq(50);
ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_GEN_CONTROL, 0xFFF0);
- if(ret_val)
+ if (ret_val)
return ret_val;
msec_delay_irq(50);
ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_GEN_CONTROL, 0xFF00);
- if(ret_val)
+ if (ret_val)
return ret_val;
msec_delay_irq(50);
ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_GEN_CONTROL, 0x0000);
- if(ret_val)
+ if (ret_val)
return ret_val;
ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_PAGE_SELECT, 0x0000);
- if(ret_val)
+ if (ret_val)
return ret_val;
/* This loop will early-out if the link condition has been met. */
- for(i = PHY_FORCE_TIME; i > 0; i--) {
+ for (i = PHY_FORCE_TIME; i > 0; i--) {
/* Read the MII Status Register and wait for Link Status bit
* to be set.
*/
ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &mii_status_reg);
- if(ret_val)
+ if (ret_val)
return ret_val;
ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &mii_status_reg);
- if(ret_val)
+ if (ret_val)
return ret_val;
- if(mii_status_reg & MII_SR_LINK_STATUS) break;
+ if (mii_status_reg & MII_SR_LINK_STATUS) break;
msec_delay_irq(100);
}
return E1000_SUCCESS;
@@ -7980,15 +7963,15 @@ e1000_disable_pciex_master(struct e1000_hw *hw)
e1000_set_pci_express_master_disable(hw);
- while(timeout) {
- if(!(E1000_READ_REG(hw, STATUS) & E1000_STATUS_GIO_MASTER_ENABLE))
+ while (timeout) {
+ if (!(E1000_READ_REG(hw, STATUS) & E1000_STATUS_GIO_MASTER_ENABLE))
break;
else
udelay(100);
timeout--;
}
- if(!timeout) {
+ if (!timeout) {
DEBUGOUT("Master requests are pending.\n");
return -E1000_ERR_MASTER_REQUESTS_PENDING;
}
@@ -8029,7 +8012,7 @@ e1000_get_auto_rd_done(struct e1000_hw *hw)
timeout--;
}
- if(!timeout) {
+ if (!timeout) {
DEBUGOUT("Auto read by HW from EEPROM has not completed.\n");
return -E1000_ERR_RESET;
}
@@ -8110,7 +8093,7 @@ e1000_get_hw_eeprom_semaphore(struct e1000_hw *hw)
DEBUGFUNC("e1000_get_hw_eeprom_semaphore");
- if(!hw->eeprom_semaphore_present)
+ if (!hw->eeprom_semaphore_present)
return E1000_SUCCESS;
if (hw->mac_type == e1000_80003es2lan) {
@@ -8121,20 +8104,20 @@ e1000_get_hw_eeprom_semaphore(struct e1000_hw *hw)
/* Get the FW semaphore. */
timeout = hw->eeprom.word_size + 1;
- while(timeout) {
+ while (timeout) {
swsm = E1000_READ_REG(hw, SWSM);
swsm |= E1000_SWSM_SWESMBI;
E1000_WRITE_REG(hw, SWSM, swsm);
/* if we managed to set the bit we got the semaphore. */
swsm = E1000_READ_REG(hw, SWSM);
- if(swsm & E1000_SWSM_SWESMBI)
+ if (swsm & E1000_SWSM_SWESMBI)
break;
udelay(50);
timeout--;
}
- if(!timeout) {
+ if (!timeout) {
/* Release semaphores */
e1000_put_hw_eeprom_semaphore(hw);
DEBUGOUT("Driver can't access the Eeprom - SWESMBI bit is set.\n");
@@ -8159,7 +8142,7 @@ e1000_put_hw_eeprom_semaphore(struct e1000_hw *hw)
DEBUGFUNC("e1000_put_hw_eeprom_semaphore");
- if(!hw->eeprom_semaphore_present)
+ if (!hw->eeprom_semaphore_present)
return;
swsm = E1000_READ_REG(hw, SWSM);
@@ -8192,16 +8175,16 @@ e1000_get_software_semaphore(struct e1000_hw *hw)
if (hw->mac_type != e1000_80003es2lan)
return E1000_SUCCESS;
- while(timeout) {
+ while (timeout) {
swsm = E1000_READ_REG(hw, SWSM);
/* If SMBI bit cleared, it is now set and we hold the semaphore */
- if(!(swsm & E1000_SWSM_SMBI))
+ if (!(swsm & E1000_SWSM_SMBI))
break;
msec_delay_irq(1);
timeout--;
}
- if(!timeout) {
+ if (!timeout) {
DEBUGOUT("Driver can't access device - SMBI bit is set.\n");
return -E1000_ERR_RESET;
}
@@ -8277,7 +8260,7 @@ e1000_arc_subsystem_valid(struct e1000_hw *hw)
case e1000_82573:
case e1000_80003es2lan:
fwsm = E1000_READ_REG(hw, FWSM);
- if((fwsm & E1000_FWSM_MODE_MASK) != 0)
+ if ((fwsm & E1000_FWSM_MODE_MASK) != 0)
return TRUE;
break;
case e1000_ich8lan:
diff --git a/drivers/net/e1000/e1000_hw.h b/drivers/net/e1000/e1000_hw.h
index 375b95518c3..a170e96251f 100644
--- a/drivers/net/e1000/e1000_hw.h
+++ b/drivers/net/e1000/e1000_hw.h
@@ -336,9 +336,9 @@ uint32_t e1000_enable_mng_pass_thru(struct e1000_hw *hw);
#define E1000_HI_MAX_MNG_DATA_LENGTH 0x6F8 /* Host Interface data length */
#define E1000_MNG_DHCP_COMMAND_TIMEOUT 10 /* Time in ms to process MNG command */
-#define E1000_MNG_DHCP_COOKIE_OFFSET 0x6F0 /* Cookie offset */
-#define E1000_MNG_DHCP_COOKIE_LENGTH 0x10 /* Cookie length */
-#define E1000_MNG_IAMT_MODE 0x3
+#define E1000_MNG_DHCP_COOKIE_OFFSET 0x6F0 /* Cookie offset */
+#define E1000_MNG_DHCP_COOKIE_LENGTH 0x10 /* Cookie length */
+#define E1000_MNG_IAMT_MODE 0x3
#define E1000_MNG_ICH_IAMT_MODE 0x2
#define E1000_IAMT_SIGNATURE 0x544D4149 /* Intel(R) Active Management Technology signature */
@@ -385,7 +385,7 @@ struct e1000_host_mng_dhcp_cookie{
#endif
int32_t e1000_mng_write_dhcp_info(struct e1000_hw *hw, uint8_t *buffer,
- uint16_t length);
+ uint16_t length);
boolean_t e1000_check_mng_mode(struct e1000_hw *hw);
boolean_t e1000_enable_tx_pkt_filtering(struct e1000_hw *hw);
@@ -470,6 +470,7 @@ int32_t e1000_check_phy_reset_block(struct e1000_hw *hw);
#define E1000_DEV_ID_82571EB_COPPER 0x105E
#define E1000_DEV_ID_82571EB_FIBER 0x105F
#define E1000_DEV_ID_82571EB_SERDES 0x1060
+#define E1000_DEV_ID_82571EB_QUAD_COPPER 0x10A4
#define E1000_DEV_ID_82572EI_COPPER 0x107D
#define E1000_DEV_ID_82572EI_FIBER 0x107E
#define E1000_DEV_ID_82572EI_SERDES 0x107F
@@ -523,7 +524,7 @@ int32_t e1000_check_phy_reset_block(struct e1000_hw *hw);
/* 802.1q VLAN Packet Sizes */
-#define VLAN_TAG_SIZE 4 /* 802.3ac tag (not DMAed) */
+#define VLAN_TAG_SIZE 4 /* 802.3ac tag (not DMAed) */
/* Ethertype field values */
#define ETHERNET_IEEE_VLAN_TYPE 0x8100 /* 802.3ac packet */
@@ -697,6 +698,7 @@ union e1000_rx_desc_packet_split {
E1000_RXDEXT_STATERR_CXE | \
E1000_RXDEXT_STATERR_RXE)
+
/* Transmit Descriptor */
struct e1000_tx_desc {
uint64_t buffer_addr; /* Address of the descriptor's data buffer */
@@ -2086,7 +2088,7 @@ struct e1000_hw {
#define E1000_MANC_EN_IP_ADDR_FILTER 0x00400000 /* Enable IP address
* filtering */
#define E1000_MANC_EN_XSUM_FILTER 0x00800000 /* Enable checksum filtering */
-#define E1000_MANC_BR_EN 0x01000000 /* Enable broadcast filtering */
+#define E1000_MANC_BR_EN 0x01000000 /* Enable broadcast filtering */
#define E1000_MANC_SMB_REQ 0x01000000 /* SMBus Request */
#define E1000_MANC_SMB_GNT 0x02000000 /* SMBus Grant */
#define E1000_MANC_SMB_CLK_IN 0x04000000 /* SMBus Clock In */
@@ -2172,7 +2174,7 @@ struct e1000_host_command_info {
#define E1000_MDALIGN 4096
-/* PCI-Ex registers */
+/* PCI-Ex registers*/
/* PCI-Ex Control Register */
#define E1000_GCR_RXD_NO_SNOOP 0x00000001
@@ -2224,7 +2226,7 @@ struct e1000_host_command_info {
#define EEPROM_EWDS_OPCODE_MICROWIRE 0x10 /* EEPROM erast/write disable */
/* EEPROM Commands - SPI */
-#define EEPROM_MAX_RETRY_SPI 5000 /* Max wait of 5ms, for RDY signal */
+#define EEPROM_MAX_RETRY_SPI 5000 /* Max wait of 5ms, for RDY signal */
#define EEPROM_READ_OPCODE_SPI 0x03 /* EEPROM read opcode */
#define EEPROM_WRITE_OPCODE_SPI 0x02 /* EEPROM write opcode */
#define EEPROM_A8_OPCODE_SPI 0x08 /* opcode bit-3 = address bit-8 */
@@ -3082,10 +3084,10 @@ struct e1000_host_command_info {
/* DSP Distance Register (Page 5, Register 26) */
#define GG82563_DSPD_CABLE_LENGTH 0x0007 /* 0 = <50M;
- 1 = 50-80M;
- 2 = 80-110M;
- 3 = 110-140M;
- 4 = >140M */
+ 1 = 50-80M;
+ 2 = 80-110M;
+ 3 = 110-140M;
+ 4 = >140M */
/* Kumeran Mode Control Register (Page 193, Register 16) */
#define GG82563_KMCR_PHY_LEDS_EN 0x0020 /* 1=PHY LEDs, 0=Kumeran Inband LEDs */
diff --git a/drivers/net/e1000/e1000_main.c b/drivers/net/e1000/e1000_main.c
index 726f43d5593..1d7c99947e9 100644
--- a/drivers/net/e1000/e1000_main.c
+++ b/drivers/net/e1000/e1000_main.c
@@ -36,7 +36,7 @@ static char e1000_driver_string[] = "Intel(R) PRO/1000 Network Driver";
#else
#define DRIVERNAPI "-NAPI"
#endif
-#define DRV_VERSION "7.1.9-k4"DRIVERNAPI
+#define DRV_VERSION "7.2.7-k2"DRIVERNAPI
char e1000_driver_version[] = DRV_VERSION;
static char e1000_copyright[] = "Copyright (c) 1999-2006 Intel Corporation.";
@@ -48,7 +48,6 @@ static char e1000_copyright[] = "Copyright (c) 1999-2006 Intel Corporation.";
* {PCI_DEVICE(PCI_VENDOR_ID_INTEL, device_id)}
*/
static struct pci_device_id e1000_pci_tbl[] = {
- INTEL_E1000_ETHERNET_DEVICE(0x1000),
INTEL_E1000_ETHERNET_DEVICE(0x1001),
INTEL_E1000_ETHERNET_DEVICE(0x1004),
INTEL_E1000_ETHERNET_DEVICE(0x1008),
@@ -99,6 +98,7 @@ static struct pci_device_id e1000_pci_tbl[] = {
INTEL_E1000_ETHERNET_DEVICE(0x1098),
INTEL_E1000_ETHERNET_DEVICE(0x1099),
INTEL_E1000_ETHERNET_DEVICE(0x109A),
+ INTEL_E1000_ETHERNET_DEVICE(0x10A4),
INTEL_E1000_ETHERNET_DEVICE(0x10B5),
INTEL_E1000_ETHERNET_DEVICE(0x10B9),
INTEL_E1000_ETHERNET_DEVICE(0x10BA),
@@ -245,7 +245,7 @@ e1000_init_module(void)
printk(KERN_INFO "%s\n", e1000_copyright);
- ret = pci_module_init(&e1000_driver);
+ ret = pci_register_driver(&e1000_driver);
return ret;
}
@@ -485,7 +485,7 @@ e1000_up(struct e1000_adapter *adapter)
*
**/
-static void e1000_power_up_phy(struct e1000_adapter *adapter)
+void e1000_power_up_phy(struct e1000_adapter *adapter)
{
uint16_t mii_reg = 0;
@@ -682,9 +682,9 @@ e1000_probe(struct pci_dev *pdev,
unsigned long flash_start, flash_len;
static int cards_found = 0;
- static int e1000_ksp3_port_a = 0; /* global ksp3 port a indication */
+ static int global_quad_port_a = 0; /* global ksp3 port a indication */
int i, err, pci_using_dac;
- uint16_t eeprom_data;
+ uint16_t eeprom_data = 0;
uint16_t eeprom_apme_mask = E1000_EEPROM_APME;
if ((err = pci_enable_device(pdev)))
return err;
@@ -696,21 +696,20 @@ e1000_probe(struct pci_dev *pdev,
if ((err = pci_set_dma_mask(pdev, DMA_32BIT_MASK)) &&
(err = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK))) {
E1000_ERR("No usable DMA configuration, aborting\n");
- return err;
+ goto err_dma;
}
pci_using_dac = 0;
}
if ((err = pci_request_regions(pdev, e1000_driver_name)))
- return err;
+ goto err_pci_reg;
pci_set_master(pdev);
+ err = -ENOMEM;
netdev = alloc_etherdev(sizeof(struct e1000_adapter));
- if (!netdev) {
- err = -ENOMEM;
+ if (!netdev)
goto err_alloc_etherdev;
- }
SET_MODULE_OWNER(netdev);
SET_NETDEV_DEV(netdev, &pdev->dev);
@@ -725,11 +724,10 @@ e1000_probe(struct pci_dev *pdev,
mmio_start = pci_resource_start(pdev, BAR_0);
mmio_len = pci_resource_len(pdev, BAR_0);
+ err = -EIO;
adapter->hw.hw_addr = ioremap(mmio_start, mmio_len);
- if (!adapter->hw.hw_addr) {
- err = -EIO;
+ if (!adapter->hw.hw_addr)
goto err_ioremap;
- }
for (i = BAR_1; i <= BAR_5; i++) {
if (pci_resource_len(pdev, i) == 0)
@@ -774,6 +772,7 @@ e1000_probe(struct pci_dev *pdev,
if ((err = e1000_sw_init(adapter)))
goto err_sw_init;
+ err = -EIO;
/* Flash BAR mapping must happen after e1000_sw_init
* because it depends on mac_type */
if ((adapter->hw.mac_type == e1000_ich8lan) &&
@@ -781,24 +780,13 @@ e1000_probe(struct pci_dev *pdev,
flash_start = pci_resource_start(pdev, 1);
flash_len = pci_resource_len(pdev, 1);
adapter->hw.flash_address = ioremap(flash_start, flash_len);
- if (!adapter->hw.flash_address) {
- err = -EIO;
+ if (!adapter->hw.flash_address)
goto err_flashmap;
- }
}
- if ((err = e1000_check_phy_reset_block(&adapter->hw)))
+ if (e1000_check_phy_reset_block(&adapter->hw))
DPRINTK(PROBE, INFO, "PHY reset is blocked due to SOL/IDER session.\n");
- /* if ksp3, indicate if it's port a being setup */
- if (pdev->device == E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3 &&
- e1000_ksp3_port_a == 0)
- adapter->ksp3_port_a = 1;
- e1000_ksp3_port_a++;
- /* Reset for multiple KP3 adapters */
- if (e1000_ksp3_port_a == 4)
- e1000_ksp3_port_a = 0;
-
if (adapter->hw.mac_type >= e1000_82543) {
netdev->features = NETIF_F_SG |
NETIF_F_HW_CSUM |
@@ -830,7 +818,7 @@ e1000_probe(struct pci_dev *pdev,
if (e1000_init_eeprom_params(&adapter->hw)) {
E1000_ERR("EEPROM initialization failed\n");
- return -EIO;
+ goto err_eeprom;
}
/* before reading the EEPROM, reset the controller to
@@ -842,7 +830,6 @@ e1000_probe(struct pci_dev *pdev,
if (e1000_validate_eeprom_checksum(&adapter->hw) < 0) {
DPRINTK(PROBE, ERR, "The EEPROM Checksum Is Not Valid\n");
- err = -EIO;
goto err_eeprom;
}
@@ -855,12 +842,9 @@ e1000_probe(struct pci_dev *pdev,
if (!is_valid_ether_addr(netdev->perm_addr)) {
DPRINTK(PROBE, ERR, "Invalid MAC Address\n");
- err = -EIO;
goto err_eeprom;
}
- e1000_read_part_num(&adapter->hw, &(adapter->part_num));
-
e1000_get_bus_info(&adapter->hw);
init_timer(&adapter->tx_fifo_stall_timer);
@@ -921,7 +905,38 @@ e1000_probe(struct pci_dev *pdev,
break;
}
if (eeprom_data & eeprom_apme_mask)
- adapter->wol |= E1000_WUFC_MAG;
+ adapter->eeprom_wol |= E1000_WUFC_MAG;
+
+ /* now that we have the eeprom settings, apply the special cases
+ * where the eeprom may be wrong or the board simply won't support
+ * wake on lan on a particular port */
+ switch (pdev->device) {
+ case E1000_DEV_ID_82546GB_PCIE:
+ adapter->eeprom_wol = 0;
+ break;
+ case E1000_DEV_ID_82546EB_FIBER:
+ case E1000_DEV_ID_82546GB_FIBER:
+ case E1000_DEV_ID_82571EB_FIBER:
+ /* Wake events only supported on port A for dual fiber
+ * regardless of eeprom setting */
+ if (E1000_READ_REG(&adapter->hw, STATUS) & E1000_STATUS_FUNC_1)
+ adapter->eeprom_wol = 0;
+ break;
+ case E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3:
+ case E1000_DEV_ID_82571EB_QUAD_COPPER:
+ /* if quad port adapter, disable WoL on all but port A */
+ if (global_quad_port_a != 0)
+ adapter->eeprom_wol = 0;
+ else
+ adapter->quad_port_a = 1;
+ /* Reset for multiple quad port adapters */
+ if (++global_quad_port_a == 4)
+ global_quad_port_a = 0;
+ break;
+ }
+
+ /* initialize the wol settings based on the eeprom settings */
+ adapter->wol = adapter->eeprom_wol;
/* print bus type/speed/width info */
{
@@ -964,16 +979,33 @@ e1000_probe(struct pci_dev *pdev,
return 0;
err_register:
+ e1000_release_hw_control(adapter);
+err_eeprom:
+ if (!e1000_check_phy_reset_block(&adapter->hw))
+ e1000_phy_hw_reset(&adapter->hw);
+
if (adapter->hw.flash_address)
iounmap(adapter->hw.flash_address);
err_flashmap:
+#ifdef CONFIG_E1000_NAPI
+ for (i = 0; i < adapter->num_rx_queues; i++)
+ dev_put(&adapter->polling_netdev[i]);
+#endif
+
+ kfree(adapter->tx_ring);
+ kfree(adapter->rx_ring);
+#ifdef CONFIG_E1000_NAPI
+ kfree(adapter->polling_netdev);
+#endif
err_sw_init:
-err_eeprom:
iounmap(adapter->hw.hw_addr);
err_ioremap:
free_netdev(netdev);
err_alloc_etherdev:
pci_release_regions(pdev);
+err_pci_reg:
+err_dma:
+ pci_disable_device(pdev);
return err;
}
@@ -1208,7 +1240,7 @@ e1000_open(struct net_device *netdev)
err = e1000_request_irq(adapter);
if (err)
- goto err_up;
+ goto err_req_irq;
e1000_power_up_phy(adapter);
@@ -1229,6 +1261,9 @@ e1000_open(struct net_device *netdev)
return E1000_SUCCESS;
err_up:
+ e1000_power_down_phy(adapter);
+ e1000_free_irq(adapter);
+err_req_irq:
e1000_free_all_rx_resources(adapter);
err_setup_rx:
e1000_free_all_tx_resources(adapter);
@@ -1381,10 +1416,6 @@ setup_tx_desc_die:
* (Descriptors) for all queues
* @adapter: board private structure
*
- * If this function returns with an error, then it's possible one or
- * more of the rings is populated (while the rest are not). It is the
- * callers duty to clean those orphaned rings.
- *
* Return 0 on success, negative on failure
**/
@@ -1398,6 +1429,9 @@ e1000_setup_all_tx_resources(struct e1000_adapter *adapter)
if (err) {
DPRINTK(PROBE, ERR,
"Allocation for Tx Queue %u failed\n", i);
+ for (i-- ; i >= 0; i--)
+ e1000_free_tx_resources(adapter,
+ &adapter->tx_ring[i]);
break;
}
}
@@ -1499,8 +1533,6 @@ e1000_configure_tx(struct e1000_adapter *adapter)
} else if (hw->mac_type == e1000_80003es2lan) {
tarc = E1000_READ_REG(hw, TARC0);
tarc |= 1;
- if (hw->media_type == e1000_media_type_internal_serdes)
- tarc |= (1 << 20);
E1000_WRITE_REG(hw, TARC0, tarc);
tarc = E1000_READ_REG(hw, TARC1);
tarc |= 1;
@@ -1639,10 +1671,6 @@ setup_rx_desc_die:
* (Descriptors) for all queues
* @adapter: board private structure
*
- * If this function returns with an error, then it's possible one or
- * more of the rings is populated (while the rest are not). It is the
- * callers duty to clean those orphaned rings.
- *
* Return 0 on success, negative on failure
**/
@@ -1656,6 +1684,9 @@ e1000_setup_all_rx_resources(struct e1000_adapter *adapter)
if (err) {
DPRINTK(PROBE, ERR,
"Allocation for Rx Queue %u failed\n", i);
+ for (i-- ; i >= 0; i--)
+ e1000_free_rx_resources(adapter,
+ &adapter->rx_ring[i]);
break;
}
}
@@ -2442,10 +2473,9 @@ e1000_watchdog(unsigned long data)
* disable receives in the ISR and
* reset device here in the watchdog
*/
- if (adapter->hw.mac_type == e1000_80003es2lan) {
+ if (adapter->hw.mac_type == e1000_80003es2lan)
/* reset device */
schedule_work(&adapter->reset_task);
- }
}
e1000_smartspeed(adapter);
@@ -2545,7 +2575,7 @@ e1000_tso(struct e1000_adapter *adapter, struct e1000_tx_ring *tx_ring,
cmd_length = E1000_TXD_CMD_IP;
ipcse = skb->h.raw - skb->data - 1;
#ifdef NETIF_F_TSO_IPV6
- } else if (skb->protocol == ntohs(ETH_P_IPV6)) {
+ } else if (skb->protocol == htons(ETH_P_IPV6)) {
skb->nh.ipv6h->payload_len = 0;
skb->h.th->check =
~csum_ipv6_magic(&skb->nh.ipv6h->saddr,
@@ -3680,7 +3710,7 @@ e1000_clean_rx_irq(struct e1000_adapter *adapter,
E1000_DBG("%s: Receive packet consumed multiple"
" buffers\n", netdev->name);
/* recycle */
- buffer_info-> skb = skb;
+ buffer_info->skb = skb;
goto next_desc;
}
@@ -3711,7 +3741,6 @@ e1000_clean_rx_irq(struct e1000_adapter *adapter,
netdev_alloc_skb(netdev, length + NET_IP_ALIGN);
if (new_skb) {
skb_reserve(new_skb, NET_IP_ALIGN);
- new_skb->dev = netdev;
memcpy(new_skb->data - NET_IP_ALIGN,
skb->data - NET_IP_ALIGN,
length + NET_IP_ALIGN);
@@ -3978,13 +4007,13 @@ e1000_alloc_rx_buffers(struct e1000_adapter *adapter,
buffer_info = &rx_ring->buffer_info[i];
while (cleaned_count--) {
- if (!(skb = buffer_info->skb))
- skb = netdev_alloc_skb(netdev, bufsz);
- else {
+ skb = buffer_info->skb;
+ if (skb) {
skb_trim(skb, 0);
goto map_skb;
}
+ skb = netdev_alloc_skb(netdev, bufsz);
if (unlikely(!skb)) {
/* Better luck next round */
adapter->alloc_rx_buff_failed++;
@@ -4009,10 +4038,10 @@ e1000_alloc_rx_buffers(struct e1000_adapter *adapter,
dev_kfree_skb(skb);
dev_kfree_skb(oldskb);
break; /* while !buffer_info->skb */
- } else {
- /* Use new allocation */
- dev_kfree_skb(oldskb);
}
+
+ /* Use new allocation */
+ dev_kfree_skb(oldskb);
}
/* Make buffer alignment 2 beyond a 16 byte boundary
* this will result in a 16 byte aligned IP header after
@@ -4020,8 +4049,6 @@ e1000_alloc_rx_buffers(struct e1000_adapter *adapter,
*/
skb_reserve(skb, NET_IP_ALIGN);
- skb->dev = netdev;
-
buffer_info->skb = skb;
buffer_info->length = adapter->rx_buffer_len;
map_skb:
@@ -4135,8 +4162,6 @@ e1000_alloc_rx_buffers_ps(struct e1000_adapter *adapter,
*/
skb_reserve(skb, NET_IP_ALIGN);
- skb->dev = netdev;
-
buffer_info->skb = skb;
buffer_info->length = adapter->rx_ps_bsize0;
buffer_info->dma = pci_map_single(pdev, skb->data,
@@ -4628,7 +4653,7 @@ e1000_suspend(struct pci_dev *pdev, pm_message_t state)
e1000_set_multi(netdev);
/* turn on all-multi mode if wake on multicast is enabled */
- if (adapter->wol & E1000_WUFC_MC) {
+ if (wufc & E1000_WUFC_MC) {
rctl = E1000_READ_REG(&adapter->hw, RCTL);
rctl |= E1000_RCTL_MPE;
E1000_WRITE_REG(&adapter->hw, RCTL, rctl);
@@ -4700,11 +4725,14 @@ e1000_resume(struct pci_dev *pdev)
{
struct net_device *netdev = pci_get_drvdata(pdev);
struct e1000_adapter *adapter = netdev_priv(netdev);
- uint32_t manc, ret_val;
+ uint32_t manc, err;
pci_set_power_state(pdev, PCI_D0);
e1000_pci_restore_state(adapter);
- ret_val = pci_enable_device(pdev);
+ if ((err = pci_enable_device(pdev))) {
+ printk(KERN_ERR "e1000: Cannot enable PCI device from suspend\n");
+ return err;
+ }
pci_set_master(pdev);
pci_enable_wake(pdev, PCI_D3hot, 0);
diff --git a/drivers/net/e1000/e1000_param.c b/drivers/net/e1000/e1000_param.c
index 0ef413172c6..21284273897 100644
--- a/drivers/net/e1000/e1000_param.c
+++ b/drivers/net/e1000/e1000_param.c
@@ -324,7 +324,6 @@ e1000_check_options(struct e1000_adapter *adapter)
DPRINTK(PROBE, NOTICE,
"Warning: no configuration for board #%i\n", bd);
DPRINTK(PROBE, NOTICE, "Using defaults for all values\n");
- bd = E1000_MAX_NIC;
}
{ /* Transmit Descriptor Count */
@@ -342,9 +341,14 @@ e1000_check_options(struct e1000_adapter *adapter)
opt.arg.r.max = mac_type < e1000_82544 ?
E1000_MAX_TXD : E1000_MAX_82544_TXD;
- tx_ring->count = TxDescriptors[bd];
- e1000_validate_option(&tx_ring->count, &opt, adapter);
- E1000_ROUNDUP(tx_ring->count, REQ_TX_DESCRIPTOR_MULTIPLE);
+ if (num_TxDescriptors > bd) {
+ tx_ring->count = TxDescriptors[bd];
+ e1000_validate_option(&tx_ring->count, &opt, adapter);
+ E1000_ROUNDUP(tx_ring->count,
+ REQ_TX_DESCRIPTOR_MULTIPLE);
+ } else {
+ tx_ring->count = opt.def;
+ }
for (i = 0; i < adapter->num_tx_queues; i++)
tx_ring[i].count = tx_ring->count;
}
@@ -363,9 +367,14 @@ e1000_check_options(struct e1000_adapter *adapter)
opt.arg.r.max = mac_type < e1000_82544 ? E1000_MAX_RXD :
E1000_MAX_82544_RXD;
- rx_ring->count = RxDescriptors[bd];
- e1000_validate_option(&rx_ring->count, &opt, adapter);
- E1000_ROUNDUP(rx_ring->count, REQ_RX_DESCRIPTOR_MULTIPLE);
+ if (num_RxDescriptors > bd) {
+ rx_ring->count = RxDescriptors[bd];
+ e1000_validate_option(&rx_ring->count, &opt, adapter);
+ E1000_ROUNDUP(rx_ring->count,
+ REQ_RX_DESCRIPTOR_MULTIPLE);
+ } else {
+ rx_ring->count = opt.def;
+ }
for (i = 0; i < adapter->num_rx_queues; i++)
rx_ring[i].count = rx_ring->count;
}
@@ -377,9 +386,13 @@ e1000_check_options(struct e1000_adapter *adapter)
.def = OPTION_ENABLED
};
- int rx_csum = XsumRX[bd];
- e1000_validate_option(&rx_csum, &opt, adapter);
- adapter->rx_csum = rx_csum;
+ if (num_XsumRX > bd) {
+ int rx_csum = XsumRX[bd];
+ e1000_validate_option(&rx_csum, &opt, adapter);
+ adapter->rx_csum = rx_csum;
+ } else {
+ adapter->rx_csum = opt.def;
+ }
}
{ /* Flow Control */
@@ -399,9 +412,13 @@ e1000_check_options(struct e1000_adapter *adapter)
.p = fc_list }}
};
- int fc = FlowControl[bd];
- e1000_validate_option(&fc, &opt, adapter);
- adapter->hw.fc = adapter->hw.original_fc = fc;
+ if (num_FlowControl > bd) {
+ int fc = FlowControl[bd];
+ e1000_validate_option(&fc, &opt, adapter);
+ adapter->hw.fc = adapter->hw.original_fc = fc;
+ } else {
+ adapter->hw.fc = adapter->hw.original_fc = opt.def;
+ }
}
{ /* Transmit Interrupt Delay */
struct e1000_option opt = {
@@ -413,8 +430,13 @@ e1000_check_options(struct e1000_adapter *adapter)
.max = MAX_TXDELAY }}
};
- adapter->tx_int_delay = TxIntDelay[bd];
- e1000_validate_option(&adapter->tx_int_delay, &opt, adapter);
+ if (num_TxIntDelay > bd) {
+ adapter->tx_int_delay = TxIntDelay[bd];
+ e1000_validate_option(&adapter->tx_int_delay, &opt,
+ adapter);
+ } else {
+ adapter->tx_int_delay = opt.def;
+ }
}
{ /* Transmit Absolute Interrupt Delay */
struct e1000_option opt = {
@@ -426,9 +448,13 @@ e1000_check_options(struct e1000_adapter *adapter)
.max = MAX_TXABSDELAY }}
};
- adapter->tx_abs_int_delay = TxAbsIntDelay[bd];
- e1000_validate_option(&adapter->tx_abs_int_delay, &opt,
- adapter);
+ if (num_TxAbsIntDelay > bd) {
+ adapter->tx_abs_int_delay = TxAbsIntDelay[bd];
+ e1000_validate_option(&adapter->tx_abs_int_delay, &opt,
+ adapter);
+ } else {
+ adapter->tx_abs_int_delay = opt.def;
+ }
}
{ /* Receive Interrupt Delay */
struct e1000_option opt = {
@@ -440,8 +466,13 @@ e1000_check_options(struct e1000_adapter *adapter)
.max = MAX_RXDELAY }}
};
- adapter->rx_int_delay = RxIntDelay[bd];
- e1000_validate_option(&adapter->rx_int_delay, &opt, adapter);
+ if (num_RxIntDelay > bd) {
+ adapter->rx_int_delay = RxIntDelay[bd];
+ e1000_validate_option(&adapter->rx_int_delay, &opt,
+ adapter);
+ } else {
+ adapter->rx_int_delay = opt.def;
+ }
}
{ /* Receive Absolute Interrupt Delay */
struct e1000_option opt = {
@@ -453,9 +484,13 @@ e1000_check_options(struct e1000_adapter *adapter)
.max = MAX_RXABSDELAY }}
};
- adapter->rx_abs_int_delay = RxAbsIntDelay[bd];
- e1000_validate_option(&adapter->rx_abs_int_delay, &opt,
- adapter);
+ if (num_RxAbsIntDelay > bd) {
+ adapter->rx_abs_int_delay = RxAbsIntDelay[bd];
+ e1000_validate_option(&adapter->rx_abs_int_delay, &opt,
+ adapter);
+ } else {
+ adapter->rx_abs_int_delay = opt.def;
+ }
}
{ /* Interrupt Throttling Rate */
struct e1000_option opt = {
@@ -467,18 +502,24 @@ e1000_check_options(struct e1000_adapter *adapter)
.max = MAX_ITR }}
};
- adapter->itr = InterruptThrottleRate[bd];
- switch (adapter->itr) {
- case 0:
- DPRINTK(PROBE, INFO, "%s turned off\n", opt.name);
- break;
- case 1:
- DPRINTK(PROBE, INFO, "%s set to dynamic mode\n",
- opt.name);
- break;
- default:
- e1000_validate_option(&adapter->itr, &opt, adapter);
- break;
+ if (num_InterruptThrottleRate > bd) {
+ adapter->itr = InterruptThrottleRate[bd];
+ switch (adapter->itr) {
+ case 0:
+ DPRINTK(PROBE, INFO, "%s turned off\n",
+ opt.name);
+ break;
+ case 1:
+ DPRINTK(PROBE, INFO, "%s set to dynamic mode\n",
+ opt.name);
+ break;
+ default:
+ e1000_validate_option(&adapter->itr, &opt,
+ adapter);
+ break;
+ }
+ } else {
+ adapter->itr = opt.def;
}
}
{ /* Smart Power Down */
@@ -489,9 +530,13 @@ e1000_check_options(struct e1000_adapter *adapter)
.def = OPTION_DISABLED
};
- int spd = SmartPowerDownEnable[bd];
- e1000_validate_option(&spd, &opt, adapter);
- adapter->smart_power_down = spd;
+ if (num_SmartPowerDownEnable > bd) {
+ int spd = SmartPowerDownEnable[bd];
+ e1000_validate_option(&spd, &opt, adapter);
+ adapter->smart_power_down = spd;
+ } else {
+ adapter->smart_power_down = opt.def;
+ }
}
{ /* Kumeran Lock Loss Workaround */
struct e1000_option opt = {
@@ -501,9 +546,13 @@ e1000_check_options(struct e1000_adapter *adapter)
.def = OPTION_ENABLED
};
+ if (num_KumeranLockLoss > bd) {
int kmrn_lock_loss = KumeranLockLoss[bd];
e1000_validate_option(&kmrn_lock_loss, &opt, adapter);
adapter->hw.kmrn_lock_loss_workaround_disabled = !kmrn_lock_loss;
+ } else {
+ adapter->hw.kmrn_lock_loss_workaround_disabled = !opt.def;
+ }
}
switch (adapter->hw.media_type) {
@@ -530,18 +579,17 @@ static void __devinit
e1000_check_fiber_options(struct e1000_adapter *adapter)
{
int bd = adapter->bd_number;
- bd = bd > E1000_MAX_NIC ? E1000_MAX_NIC : bd;
- if ((Speed[bd] != OPTION_UNSET)) {
+ if (num_Speed > bd) {
DPRINTK(PROBE, INFO, "Speed not valid for fiber adapters, "
"parameter ignored\n");
}
- if ((Duplex[bd] != OPTION_UNSET)) {
+ if (num_Duplex > bd) {
DPRINTK(PROBE, INFO, "Duplex not valid for fiber adapters, "
"parameter ignored\n");
}
- if ((AutoNeg[bd] != OPTION_UNSET) && (AutoNeg[bd] != 0x20)) {
+ if ((num_AutoNeg > bd) && (AutoNeg[bd] != 0x20)) {
DPRINTK(PROBE, INFO, "AutoNeg other than 1000/Full is "
"not valid for fiber adapters, "
"parameter ignored\n");
@@ -560,7 +608,6 @@ e1000_check_copper_options(struct e1000_adapter *adapter)
{
int speed, dplx, an;
int bd = adapter->bd_number;
- bd = bd > E1000_MAX_NIC ? E1000_MAX_NIC : bd;
{ /* Speed */
struct e1000_opt_list speed_list[] = {{ 0, "" },
@@ -577,8 +624,12 @@ e1000_check_copper_options(struct e1000_adapter *adapter)
.p = speed_list }}
};
- speed = Speed[bd];
- e1000_validate_option(&speed, &opt, adapter);
+ if (num_Speed > bd) {
+ speed = Speed[bd];
+ e1000_validate_option(&speed, &opt, adapter);
+ } else {
+ speed = opt.def;
+ }
}
{ /* Duplex */
struct e1000_opt_list dplx_list[] = {{ 0, "" },
@@ -600,11 +651,15 @@ e1000_check_copper_options(struct e1000_adapter *adapter)
"Speed/Duplex/AutoNeg parameter ignored.\n");
return;
}
- dplx = Duplex[bd];
- e1000_validate_option(&dplx, &opt, adapter);
+ if (num_Duplex > bd) {
+ dplx = Duplex[bd];
+ e1000_validate_option(&dplx, &opt, adapter);
+ } else {
+ dplx = opt.def;
+ }
}
- if (AutoNeg[bd] != OPTION_UNSET && (speed != 0 || dplx != 0)) {
+ if ((num_AutoNeg > bd) && (speed != 0 || dplx != 0)) {
DPRINTK(PROBE, INFO,
"AutoNeg specified along with Speed or Duplex, "
"parameter ignored\n");
@@ -653,15 +708,19 @@ e1000_check_copper_options(struct e1000_adapter *adapter)
.p = an_list }}
};
- an = AutoNeg[bd];
- e1000_validate_option(&an, &opt, adapter);
+ if (num_AutoNeg > bd) {
+ an = AutoNeg[bd];
+ e1000_validate_option(&an, &opt, adapter);
+ } else {
+ an = opt.def;
+ }
adapter->hw.autoneg_advertised = an;
}
switch (speed + dplx) {
case 0:
adapter->hw.autoneg = adapter->fc_autoneg = 1;
- if (Speed[bd] != OPTION_UNSET || Duplex[bd] != OPTION_UNSET)
+ if ((num_Speed > bd) && (speed != 0 || dplx != 0))
DPRINTK(PROBE, INFO,
"Speed and duplex autonegotiation enabled\n");
break;
diff --git a/drivers/net/eepro100.c b/drivers/net/eepro100.c
index e445988c92e..a3d515def10 100644
--- a/drivers/net/eepro100.c
+++ b/drivers/net/eepro100.c
@@ -2385,7 +2385,7 @@ static int __init eepro100_init_module(void)
#ifdef MODULE
printk(version);
#endif
- return pci_module_init(&eepro100_driver);
+ return pci_register_driver(&eepro100_driver);
}
static void __exit eepro100_cleanup_module(void)
diff --git a/drivers/net/epic100.c b/drivers/net/epic100.c
index a67650ccf08..25c4619d842 100644
--- a/drivers/net/epic100.c
+++ b/drivers/net/epic100.c
@@ -1604,7 +1604,7 @@ static int __init epic_init (void)
version, version2, version3);
#endif
- return pci_module_init (&epic_driver);
+ return pci_register_driver(&epic_driver);
}
diff --git a/drivers/net/fealnx.c b/drivers/net/fealnx.c
index 567e27413cf..a2121faa610 100644
--- a/drivers/net/fealnx.c
+++ b/drivers/net/fealnx.c
@@ -1984,7 +1984,7 @@ static int __init fealnx_init(void)
printk(version);
#endif
- return pci_module_init(&fealnx_driver);
+ return pci_register_driver(&fealnx_driver);
}
static void __exit fealnx_exit(void)
diff --git a/drivers/net/forcedeth.c b/drivers/net/forcedeth.c
index 11b8f1b43dd..59f9a515c07 100644
--- a/drivers/net/forcedeth.c
+++ b/drivers/net/forcedeth.c
@@ -109,6 +109,7 @@
* 0.54: 21 Mar 2006: Fix spin locks for multi irqs and cleanup.
* 0.55: 22 Mar 2006: Add flow control (pause frame).
* 0.56: 22 Mar 2006: Additional ethtool config and moduleparam support.
+ * 0.57: 14 May 2006: Mac address set in probe/remove and order corrections.
*
* Known bugs:
* We suspect that on some hardware no TX done interrupts are generated.
@@ -120,7 +121,12 @@
* DEV_NEED_TIMERIRQ will not harm you on sane hardware, only generating a few
* superfluous timer interrupts from the nic.
*/
-#define FORCEDETH_VERSION "0.56"
+#ifdef CONFIG_FORCEDETH_NAPI
+#define DRIVERNAPI "-NAPI"
+#else
+#define DRIVERNAPI
+#endif
+#define FORCEDETH_VERSION "0.57"
#define DRV_NAME "forcedeth"
#include <linux/module.h>
@@ -262,7 +268,8 @@ enum {
NvRegRingSizes = 0x108,
#define NVREG_RINGSZ_TXSHIFT 0
#define NVREG_RINGSZ_RXSHIFT 16
- NvRegUnknownTransmitterReg = 0x10c,
+ NvRegTransmitPoll = 0x10c,
+#define NVREG_TRANSMITPOLL_MAC_ADDR_REV 0x00008000
NvRegLinkSpeed = 0x110,
#define NVREG_LINKSPEED_FORCE 0x10000
#define NVREG_LINKSPEED_10 1000
@@ -381,21 +388,21 @@ enum {
/* Big endian: should work, but is untested */
struct ring_desc {
- u32 PacketBuffer;
- u32 FlagLen;
+ __le32 buf;
+ __le32 flaglen;
};
struct ring_desc_ex {
- u32 PacketBufferHigh;
- u32 PacketBufferLow;
- u32 TxVlan;
- u32 FlagLen;
+ __le32 bufhigh;
+ __le32 buflow;
+ __le32 txvlan;
+ __le32 flaglen;
};
-typedef union _ring_type {
+union ring_type {
struct ring_desc* orig;
struct ring_desc_ex* ex;
-} ring_type;
+};
#define FLAG_MASK_V1 0xffff0000
#define FLAG_MASK_V2 0xffffc000
@@ -536,6 +543,9 @@ typedef union _ring_type {
#define PHYID1_OUI_SHFT 6
#define PHYID2_OUI_MASK 0xfc00
#define PHYID2_OUI_SHFT 10
+#define PHYID2_MODEL_MASK 0x03f0
+#define PHY_MODEL_MARVELL_E3016 0x220
+#define PHY_MARVELL_E3016_INITMASK 0x0300
#define PHY_INIT1 0x0f000
#define PHY_INIT2 0x0e00
#define PHY_INIT3 0x01000
@@ -653,8 +663,8 @@ static const struct nv_ethtool_str nv_etests_str[] = {
};
struct register_test {
- u32 reg;
- u32 mask;
+ __le32 reg;
+ __le32 mask;
};
static const struct register_test nv_registers_test[] = {
@@ -694,6 +704,7 @@ struct fe_priv {
int phyaddr;
int wolenabled;
unsigned int phy_oui;
+ unsigned int phy_model;
u16 gigabit;
int intr_test;
@@ -707,13 +718,14 @@ struct fe_priv {
u32 vlanctl_bits;
u32 driver_data;
u32 register_size;
+ int rx_csum;
void __iomem *base;
/* rx specific fields.
* Locking: Within irq hander or disable_irq+spin_lock(&np->lock);
*/
- ring_type rx_ring;
+ union ring_type rx_ring;
unsigned int cur_rx, refill_rx;
struct sk_buff **rx_skbuff;
dma_addr_t *rx_dma;
@@ -733,7 +745,7 @@ struct fe_priv {
/*
* tx specific fields.
*/
- ring_type tx_ring;
+ union ring_type tx_ring;
unsigned int next_tx, nic_tx;
struct sk_buff **tx_skbuff;
dma_addr_t *tx_dma;
@@ -826,13 +838,13 @@ static inline void pci_push(u8 __iomem *base)
static inline u32 nv_descr_getlength(struct ring_desc *prd, u32 v)
{
- return le32_to_cpu(prd->FlagLen)
+ return le32_to_cpu(prd->flaglen)
& ((v == DESC_VER_1) ? LEN_MASK_V1 : LEN_MASK_V2);
}
static inline u32 nv_descr_getlength_ex(struct ring_desc_ex *prd, u32 v)
{
- return le32_to_cpu(prd->FlagLen) & LEN_MASK_V2;
+ return le32_to_cpu(prd->flaglen) & LEN_MASK_V2;
}
static int reg_delay(struct net_device *dev, int offset, u32 mask, u32 target,
@@ -885,7 +897,7 @@ static void free_rings(struct net_device *dev)
struct fe_priv *np = get_nvpriv(dev);
if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) {
- if(np->rx_ring.orig)
+ if (np->rx_ring.orig)
pci_free_consistent(np->pci_dev, sizeof(struct ring_desc) * (np->rx_ring_size + np->tx_ring_size),
np->rx_ring.orig, np->ring_addr);
} else {
@@ -1020,14 +1032,13 @@ static int mii_rw(struct net_device *dev, int addr, int miireg, int value)
return retval;
}
-static int phy_reset(struct net_device *dev)
+static int phy_reset(struct net_device *dev, u32 bmcr_setup)
{
struct fe_priv *np = netdev_priv(dev);
u32 miicontrol;
unsigned int tries = 0;
- miicontrol = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ);
- miicontrol |= BMCR_RESET;
+ miicontrol = BMCR_RESET | bmcr_setup;
if (mii_rw(dev, np->phyaddr, MII_BMCR, miicontrol)) {
return -1;
}
@@ -1052,6 +1063,16 @@ static int phy_init(struct net_device *dev)
u8 __iomem *base = get_hwbase(dev);
u32 phyinterface, phy_reserved, mii_status, mii_control, mii_control_1000,reg;
+ /* phy errata for E3016 phy */
+ if (np->phy_model == PHY_MODEL_MARVELL_E3016) {
+ reg = mii_rw(dev, np->phyaddr, MII_NCONFIG, MII_READ);
+ reg &= ~PHY_MARVELL_E3016_INITMASK;
+ if (mii_rw(dev, np->phyaddr, MII_NCONFIG, reg)) {
+ printk(KERN_INFO "%s: phy write to errata reg failed.\n", pci_name(np->pci_dev));
+ return PHY_ERROR;
+ }
+ }
+
/* set advertise register */
reg = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ);
reg |= (ADVERTISE_10HALF|ADVERTISE_10FULL|ADVERTISE_100HALF|ADVERTISE_100FULL|ADVERTISE_PAUSE_ASYM|ADVERTISE_PAUSE_CAP);
@@ -1082,8 +1103,13 @@ static int phy_init(struct net_device *dev)
else
np->gigabit = 0;
- /* reset the phy */
- if (phy_reset(dev)) {
+ mii_control = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ);
+ mii_control |= BMCR_ANENABLE;
+
+ /* reset the phy
+ * (certain phys need bmcr to be setup with reset)
+ */
+ if (phy_reset(dev, mii_control)) {
printk(KERN_INFO "%s: phy reset failed\n", pci_name(np->pci_dev));
return PHY_ERROR;
}
@@ -1178,7 +1204,7 @@ static void nv_stop_tx(struct net_device *dev)
KERN_INFO "nv_stop_tx: TransmitterStatus remained busy");
udelay(NV_TXSTOP_DELAY2);
- writel(0, base + NvRegUnknownTransmitterReg);
+ writel(readl(base + NvRegTransmitPoll) & NVREG_TRANSMITPOLL_MAC_ADDR_REV, base + NvRegTransmitPoll);
}
static void nv_txrx_reset(struct net_device *dev)
@@ -1258,14 +1284,14 @@ static int nv_alloc_rx(struct net_device *dev)
np->rx_dma[nr] = pci_map_single(np->pci_dev, skb->data,
skb->end-skb->data, PCI_DMA_FROMDEVICE);
if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) {
- np->rx_ring.orig[nr].PacketBuffer = cpu_to_le32(np->rx_dma[nr]);
+ np->rx_ring.orig[nr].buf = cpu_to_le32(np->rx_dma[nr]);
wmb();
- np->rx_ring.orig[nr].FlagLen = cpu_to_le32(np->rx_buf_sz | NV_RX_AVAIL);
+ np->rx_ring.orig[nr].flaglen = cpu_to_le32(np->rx_buf_sz | NV_RX_AVAIL);
} else {
- np->rx_ring.ex[nr].PacketBufferHigh = cpu_to_le64(np->rx_dma[nr]) >> 32;
- np->rx_ring.ex[nr].PacketBufferLow = cpu_to_le64(np->rx_dma[nr]) & 0x0FFFFFFFF;
+ np->rx_ring.ex[nr].bufhigh = cpu_to_le64(np->rx_dma[nr]) >> 32;
+ np->rx_ring.ex[nr].buflow = cpu_to_le64(np->rx_dma[nr]) & 0x0FFFFFFFF;
wmb();
- np->rx_ring.ex[nr].FlagLen = cpu_to_le32(np->rx_buf_sz | NV_RX2_AVAIL);
+ np->rx_ring.ex[nr].flaglen = cpu_to_le32(np->rx_buf_sz | NV_RX2_AVAIL);
}
dprintk(KERN_DEBUG "%s: nv_alloc_rx: Packet %d marked as Available\n",
dev->name, refill_rx);
@@ -1277,6 +1303,16 @@ static int nv_alloc_rx(struct net_device *dev)
return 0;
}
+/* If rx bufs are exhausted called after 50ms to attempt to refresh */
+#ifdef CONFIG_FORCEDETH_NAPI
+static void nv_do_rx_refill(unsigned long data)
+{
+ struct net_device *dev = (struct net_device *) data;
+
+ /* Just reschedule NAPI rx processing */
+ netif_rx_schedule(dev);
+}
+#else
static void nv_do_rx_refill(unsigned long data)
{
struct net_device *dev = (struct net_device *) data;
@@ -1305,6 +1341,7 @@ static void nv_do_rx_refill(unsigned long data)
enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector);
}
}
+#endif
static void nv_init_rx(struct net_device *dev)
{
@@ -1315,9 +1352,9 @@ static void nv_init_rx(struct net_device *dev)
np->refill_rx = 0;
for (i = 0; i < np->rx_ring_size; i++)
if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2)
- np->rx_ring.orig[i].FlagLen = 0;
+ np->rx_ring.orig[i].flaglen = 0;
else
- np->rx_ring.ex[i].FlagLen = 0;
+ np->rx_ring.ex[i].flaglen = 0;
}
static void nv_init_tx(struct net_device *dev)
@@ -1328,9 +1365,9 @@ static void nv_init_tx(struct net_device *dev)
np->next_tx = np->nic_tx = 0;
for (i = 0; i < np->tx_ring_size; i++) {
if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2)
- np->tx_ring.orig[i].FlagLen = 0;
+ np->tx_ring.orig[i].flaglen = 0;
else
- np->tx_ring.ex[i].FlagLen = 0;
+ np->tx_ring.ex[i].flaglen = 0;
np->tx_skbuff[i] = NULL;
np->tx_dma[i] = 0;
}
@@ -1373,9 +1410,9 @@ static void nv_drain_tx(struct net_device *dev)
for (i = 0; i < np->tx_ring_size; i++) {
if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2)
- np->tx_ring.orig[i].FlagLen = 0;
+ np->tx_ring.orig[i].flaglen = 0;
else
- np->tx_ring.ex[i].FlagLen = 0;
+ np->tx_ring.ex[i].flaglen = 0;
if (nv_release_txskb(dev, i))
np->stats.tx_dropped++;
}
@@ -1387,9 +1424,9 @@ static void nv_drain_rx(struct net_device *dev)
int i;
for (i = 0; i < np->rx_ring_size; i++) {
if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2)
- np->rx_ring.orig[i].FlagLen = 0;
+ np->rx_ring.orig[i].flaglen = 0;
else
- np->rx_ring.ex[i].FlagLen = 0;
+ np->rx_ring.ex[i].flaglen = 0;
wmb();
if (np->rx_skbuff[i]) {
pci_unmap_single(np->pci_dev, np->rx_dma[i],
@@ -1450,17 +1487,17 @@ static int nv_start_xmit(struct sk_buff *skb, struct net_device *dev)
np->tx_dma_len[nr] = bcnt;
if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) {
- np->tx_ring.orig[nr].PacketBuffer = cpu_to_le32(np->tx_dma[nr]);
- np->tx_ring.orig[nr].FlagLen = cpu_to_le32((bcnt-1) | tx_flags);
+ np->tx_ring.orig[nr].buf = cpu_to_le32(np->tx_dma[nr]);
+ np->tx_ring.orig[nr].flaglen = cpu_to_le32((bcnt-1) | tx_flags);
} else {
- np->tx_ring.ex[nr].PacketBufferHigh = cpu_to_le64(np->tx_dma[nr]) >> 32;
- np->tx_ring.ex[nr].PacketBufferLow = cpu_to_le64(np->tx_dma[nr]) & 0x0FFFFFFFF;
- np->tx_ring.ex[nr].FlagLen = cpu_to_le32((bcnt-1) | tx_flags);
+ np->tx_ring.ex[nr].bufhigh = cpu_to_le64(np->tx_dma[nr]) >> 32;
+ np->tx_ring.ex[nr].buflow = cpu_to_le64(np->tx_dma[nr]) & 0x0FFFFFFFF;
+ np->tx_ring.ex[nr].flaglen = cpu_to_le32((bcnt-1) | tx_flags);
}
tx_flags = np->tx_flags;
offset += bcnt;
size -= bcnt;
- } while(size);
+ } while (size);
/* setup the fragments */
for (i = 0; i < fragments; i++) {
@@ -1477,12 +1514,12 @@ static int nv_start_xmit(struct sk_buff *skb, struct net_device *dev)
np->tx_dma_len[nr] = bcnt;
if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) {
- np->tx_ring.orig[nr].PacketBuffer = cpu_to_le32(np->tx_dma[nr]);
- np->tx_ring.orig[nr].FlagLen = cpu_to_le32((bcnt-1) | tx_flags);
+ np->tx_ring.orig[nr].buf = cpu_to_le32(np->tx_dma[nr]);
+ np->tx_ring.orig[nr].flaglen = cpu_to_le32((bcnt-1) | tx_flags);
} else {
- np->tx_ring.ex[nr].PacketBufferHigh = cpu_to_le64(np->tx_dma[nr]) >> 32;
- np->tx_ring.ex[nr].PacketBufferLow = cpu_to_le64(np->tx_dma[nr]) & 0x0FFFFFFFF;
- np->tx_ring.ex[nr].FlagLen = cpu_to_le32((bcnt-1) | tx_flags);
+ np->tx_ring.ex[nr].bufhigh = cpu_to_le64(np->tx_dma[nr]) >> 32;
+ np->tx_ring.ex[nr].buflow = cpu_to_le64(np->tx_dma[nr]) & 0x0FFFFFFFF;
+ np->tx_ring.ex[nr].flaglen = cpu_to_le32((bcnt-1) | tx_flags);
}
offset += bcnt;
size -= bcnt;
@@ -1491,9 +1528,9 @@ static int nv_start_xmit(struct sk_buff *skb, struct net_device *dev)
/* set last fragment flag */
if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) {
- np->tx_ring.orig[nr].FlagLen |= cpu_to_le32(tx_flags_extra);
+ np->tx_ring.orig[nr].flaglen |= cpu_to_le32(tx_flags_extra);
} else {
- np->tx_ring.ex[nr].FlagLen |= cpu_to_le32(tx_flags_extra);
+ np->tx_ring.ex[nr].flaglen |= cpu_to_le32(tx_flags_extra);
}
np->tx_skbuff[nr] = skb;
@@ -1512,10 +1549,10 @@ static int nv_start_xmit(struct sk_buff *skb, struct net_device *dev)
/* set tx flags */
if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) {
- np->tx_ring.orig[start_nr].FlagLen |= cpu_to_le32(tx_flags | tx_flags_extra);
+ np->tx_ring.orig[start_nr].flaglen |= cpu_to_le32(tx_flags | tx_flags_extra);
} else {
- np->tx_ring.ex[start_nr].TxVlan = cpu_to_le32(tx_flags_vlan);
- np->tx_ring.ex[start_nr].FlagLen |= cpu_to_le32(tx_flags | tx_flags_extra);
+ np->tx_ring.ex[start_nr].txvlan = cpu_to_le32(tx_flags_vlan);
+ np->tx_ring.ex[start_nr].flaglen |= cpu_to_le32(tx_flags | tx_flags_extra);
}
dprintk(KERN_DEBUG "%s: nv_start_xmit: packet %d (entries %d) queued for transmission. tx_flags_extra: %x\n",
@@ -1547,7 +1584,7 @@ static int nv_start_xmit(struct sk_buff *skb, struct net_device *dev)
static void nv_tx_done(struct net_device *dev)
{
struct fe_priv *np = netdev_priv(dev);
- u32 Flags;
+ u32 flags;
unsigned int i;
struct sk_buff *skb;
@@ -1555,22 +1592,22 @@ static void nv_tx_done(struct net_device *dev)
i = np->nic_tx % np->tx_ring_size;
if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2)
- Flags = le32_to_cpu(np->tx_ring.orig[i].FlagLen);
+ flags = le32_to_cpu(np->tx_ring.orig[i].flaglen);
else
- Flags = le32_to_cpu(np->tx_ring.ex[i].FlagLen);
+ flags = le32_to_cpu(np->tx_ring.ex[i].flaglen);
- dprintk(KERN_DEBUG "%s: nv_tx_done: looking at packet %d, Flags 0x%x.\n",
- dev->name, np->nic_tx, Flags);
- if (Flags & NV_TX_VALID)
+ dprintk(KERN_DEBUG "%s: nv_tx_done: looking at packet %d, flags 0x%x.\n",
+ dev->name, np->nic_tx, flags);
+ if (flags & NV_TX_VALID)
break;
if (np->desc_ver == DESC_VER_1) {
- if (Flags & NV_TX_LASTPACKET) {
+ if (flags & NV_TX_LASTPACKET) {
skb = np->tx_skbuff[i];
- if (Flags & (NV_TX_RETRYERROR|NV_TX_CARRIERLOST|NV_TX_LATECOLLISION|
+ if (flags & (NV_TX_RETRYERROR|NV_TX_CARRIERLOST|NV_TX_LATECOLLISION|
NV_TX_UNDERFLOW|NV_TX_ERROR)) {
- if (Flags & NV_TX_UNDERFLOW)
+ if (flags & NV_TX_UNDERFLOW)
np->stats.tx_fifo_errors++;
- if (Flags & NV_TX_CARRIERLOST)
+ if (flags & NV_TX_CARRIERLOST)
np->stats.tx_carrier_errors++;
np->stats.tx_errors++;
} else {
@@ -1579,13 +1616,13 @@ static void nv_tx_done(struct net_device *dev)
}
}
} else {
- if (Flags & NV_TX2_LASTPACKET) {
+ if (flags & NV_TX2_LASTPACKET) {
skb = np->tx_skbuff[i];
- if (Flags & (NV_TX2_RETRYERROR|NV_TX2_CARRIERLOST|NV_TX2_LATECOLLISION|
+ if (flags & (NV_TX2_RETRYERROR|NV_TX2_CARRIERLOST|NV_TX2_LATECOLLISION|
NV_TX2_UNDERFLOW|NV_TX2_ERROR)) {
- if (Flags & NV_TX2_UNDERFLOW)
+ if (flags & NV_TX2_UNDERFLOW)
np->stats.tx_fifo_errors++;
- if (Flags & NV_TX2_CARRIERLOST)
+ if (flags & NV_TX2_CARRIERLOST)
np->stats.tx_carrier_errors++;
np->stats.tx_errors++;
} else {
@@ -1638,29 +1675,29 @@ static void nv_tx_timeout(struct net_device *dev)
if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) {
printk(KERN_INFO "%03x: %08x %08x // %08x %08x // %08x %08x // %08x %08x\n",
i,
- le32_to_cpu(np->tx_ring.orig[i].PacketBuffer),
- le32_to_cpu(np->tx_ring.orig[i].FlagLen),
- le32_to_cpu(np->tx_ring.orig[i+1].PacketBuffer),
- le32_to_cpu(np->tx_ring.orig[i+1].FlagLen),
- le32_to_cpu(np->tx_ring.orig[i+2].PacketBuffer),
- le32_to_cpu(np->tx_ring.orig[i+2].FlagLen),
- le32_to_cpu(np->tx_ring.orig[i+3].PacketBuffer),
- le32_to_cpu(np->tx_ring.orig[i+3].FlagLen));
+ le32_to_cpu(np->tx_ring.orig[i].buf),
+ le32_to_cpu(np->tx_ring.orig[i].flaglen),
+ le32_to_cpu(np->tx_ring.orig[i+1].buf),
+ le32_to_cpu(np->tx_ring.orig[i+1].flaglen),
+ le32_to_cpu(np->tx_ring.orig[i+2].buf),
+ le32_to_cpu(np->tx_ring.orig[i+2].flaglen),
+ le32_to_cpu(np->tx_ring.orig[i+3].buf),
+ le32_to_cpu(np->tx_ring.orig[i+3].flaglen));
} else {
printk(KERN_INFO "%03x: %08x %08x %08x // %08x %08x %08x // %08x %08x %08x // %08x %08x %08x\n",
i,
- le32_to_cpu(np->tx_ring.ex[i].PacketBufferHigh),
- le32_to_cpu(np->tx_ring.ex[i].PacketBufferLow),
- le32_to_cpu(np->tx_ring.ex[i].FlagLen),
- le32_to_cpu(np->tx_ring.ex[i+1].PacketBufferHigh),
- le32_to_cpu(np->tx_ring.ex[i+1].PacketBufferLow),
- le32_to_cpu(np->tx_ring.ex[i+1].FlagLen),
- le32_to_cpu(np->tx_ring.ex[i+2].PacketBufferHigh),
- le32_to_cpu(np->tx_ring.ex[i+2].PacketBufferLow),
- le32_to_cpu(np->tx_ring.ex[i+2].FlagLen),
- le32_to_cpu(np->tx_ring.ex[i+3].PacketBufferHigh),
- le32_to_cpu(np->tx_ring.ex[i+3].PacketBufferLow),
- le32_to_cpu(np->tx_ring.ex[i+3].FlagLen));
+ le32_to_cpu(np->tx_ring.ex[i].bufhigh),
+ le32_to_cpu(np->tx_ring.ex[i].buflow),
+ le32_to_cpu(np->tx_ring.ex[i].flaglen),
+ le32_to_cpu(np->tx_ring.ex[i+1].bufhigh),
+ le32_to_cpu(np->tx_ring.ex[i+1].buflow),
+ le32_to_cpu(np->tx_ring.ex[i+1].flaglen),
+ le32_to_cpu(np->tx_ring.ex[i+2].bufhigh),
+ le32_to_cpu(np->tx_ring.ex[i+2].buflow),
+ le32_to_cpu(np->tx_ring.ex[i+2].flaglen),
+ le32_to_cpu(np->tx_ring.ex[i+3].bufhigh),
+ le32_to_cpu(np->tx_ring.ex[i+3].buflow),
+ le32_to_cpu(np->tx_ring.ex[i+3].flaglen));
}
}
}
@@ -1697,7 +1734,7 @@ static int nv_getlen(struct net_device *dev, void *packet, int datalen)
int protolen; /* length as stored in the proto field */
/* 1) calculate len according to header */
- if ( ((struct vlan_ethhdr *)packet)->h_vlan_proto == __constant_htons(ETH_P_8021Q)) {
+ if ( ((struct vlan_ethhdr *)packet)->h_vlan_proto == htons(ETH_P_8021Q)) {
protolen = ntohs( ((struct vlan_ethhdr *)packet)->h_vlan_encapsulated_proto );
hdrlen = VLAN_HLEN;
} else {
@@ -1740,13 +1777,14 @@ static int nv_getlen(struct net_device *dev, void *packet, int datalen)
}
}
-static void nv_rx_process(struct net_device *dev)
+static int nv_rx_process(struct net_device *dev, int limit)
{
struct fe_priv *np = netdev_priv(dev);
- u32 Flags;
+ u32 flags;
u32 vlanflags = 0;
+ int count;
- for (;;) {
+ for (count = 0; count < limit; ++count) {
struct sk_buff *skb;
int len;
int i;
@@ -1755,18 +1793,18 @@ static void nv_rx_process(struct net_device *dev)
i = np->cur_rx % np->rx_ring_size;
if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) {
- Flags = le32_to_cpu(np->rx_ring.orig[i].FlagLen);
+ flags = le32_to_cpu(np->rx_ring.orig[i].flaglen);
len = nv_descr_getlength(&np->rx_ring.orig[i], np->desc_ver);
} else {
- Flags = le32_to_cpu(np->rx_ring.ex[i].FlagLen);
+ flags = le32_to_cpu(np->rx_ring.ex[i].flaglen);
len = nv_descr_getlength_ex(&np->rx_ring.ex[i], np->desc_ver);
- vlanflags = le32_to_cpu(np->rx_ring.ex[i].PacketBufferLow);
+ vlanflags = le32_to_cpu(np->rx_ring.ex[i].buflow);
}
- dprintk(KERN_DEBUG "%s: nv_rx_process: looking at packet %d, Flags 0x%x.\n",
- dev->name, np->cur_rx, Flags);
+ dprintk(KERN_DEBUG "%s: nv_rx_process: looking at packet %d, flags 0x%x.\n",
+ dev->name, np->cur_rx, flags);
- if (Flags & NV_RX_AVAIL)
+ if (flags & NV_RX_AVAIL)
break; /* still owned by hardware, */
/*
@@ -1780,7 +1818,7 @@ static void nv_rx_process(struct net_device *dev)
{
int j;
- dprintk(KERN_DEBUG "Dumping packet (flags 0x%x).",Flags);
+ dprintk(KERN_DEBUG "Dumping packet (flags 0x%x).",flags);
for (j=0; j<64; j++) {
if ((j%16) == 0)
dprintk("\n%03x:", j);
@@ -1790,30 +1828,30 @@ static void nv_rx_process(struct net_device *dev)
}
/* look at what we actually got: */
if (np->desc_ver == DESC_VER_1) {
- if (!(Flags & NV_RX_DESCRIPTORVALID))
+ if (!(flags & NV_RX_DESCRIPTORVALID))
goto next_pkt;
- if (Flags & NV_RX_ERROR) {
- if (Flags & NV_RX_MISSEDFRAME) {
+ if (flags & NV_RX_ERROR) {
+ if (flags & NV_RX_MISSEDFRAME) {
np->stats.rx_missed_errors++;
np->stats.rx_errors++;
goto next_pkt;
}
- if (Flags & (NV_RX_ERROR1|NV_RX_ERROR2|NV_RX_ERROR3)) {
+ if (flags & (NV_RX_ERROR1|NV_RX_ERROR2|NV_RX_ERROR3)) {
np->stats.rx_errors++;
goto next_pkt;
}
- if (Flags & NV_RX_CRCERR) {
+ if (flags & NV_RX_CRCERR) {
np->stats.rx_crc_errors++;
np->stats.rx_errors++;
goto next_pkt;
}
- if (Flags & NV_RX_OVERFLOW) {
+ if (flags & NV_RX_OVERFLOW) {
np->stats.rx_over_errors++;
np->stats.rx_errors++;
goto next_pkt;
}
- if (Flags & NV_RX_ERROR4) {
+ if (flags & NV_RX_ERROR4) {
len = nv_getlen(dev, np->rx_skbuff[i]->data, len);
if (len < 0) {
np->stats.rx_errors++;
@@ -1821,32 +1859,32 @@ static void nv_rx_process(struct net_device *dev)
}
}
/* framing errors are soft errors. */
- if (Flags & NV_RX_FRAMINGERR) {
- if (Flags & NV_RX_SUBSTRACT1) {
+ if (flags & NV_RX_FRAMINGERR) {
+ if (flags & NV_RX_SUBSTRACT1) {
len--;
}
}
}
} else {
- if (!(Flags & NV_RX2_DESCRIPTORVALID))
+ if (!(flags & NV_RX2_DESCRIPTORVALID))
goto next_pkt;
- if (Flags & NV_RX2_ERROR) {
- if (Flags & (NV_RX2_ERROR1|NV_RX2_ERROR2|NV_RX2_ERROR3)) {
+ if (flags & NV_RX2_ERROR) {
+ if (flags & (NV_RX2_ERROR1|NV_RX2_ERROR2|NV_RX2_ERROR3)) {
np->stats.rx_errors++;
goto next_pkt;
}
- if (Flags & NV_RX2_CRCERR) {
+ if (flags & NV_RX2_CRCERR) {
np->stats.rx_crc_errors++;
np->stats.rx_errors++;
goto next_pkt;
}
- if (Flags & NV_RX2_OVERFLOW) {
+ if (flags & NV_RX2_OVERFLOW) {
np->stats.rx_over_errors++;
np->stats.rx_errors++;
goto next_pkt;
}
- if (Flags & NV_RX2_ERROR4) {
+ if (flags & NV_RX2_ERROR4) {
len = nv_getlen(dev, np->rx_skbuff[i]->data, len);
if (len < 0) {
np->stats.rx_errors++;
@@ -1854,17 +1892,17 @@ static void nv_rx_process(struct net_device *dev)
}
}
/* framing errors are soft errors */
- if (Flags & NV_RX2_FRAMINGERR) {
- if (Flags & NV_RX2_SUBSTRACT1) {
+ if (flags & NV_RX2_FRAMINGERR) {
+ if (flags & NV_RX2_SUBSTRACT1) {
len--;
}
}
}
- if (np->txrxctl_bits & NVREG_TXRXCTL_RXCHECK) {
- Flags &= NV_RX2_CHECKSUMMASK;
- if (Flags == NV_RX2_CHECKSUMOK1 ||
- Flags == NV_RX2_CHECKSUMOK2 ||
- Flags == NV_RX2_CHECKSUMOK3) {
+ if (np->rx_csum) {
+ flags &= NV_RX2_CHECKSUMMASK;
+ if (flags == NV_RX2_CHECKSUMOK1 ||
+ flags == NV_RX2_CHECKSUMOK2 ||
+ flags == NV_RX2_CHECKSUMOK3) {
dprintk(KERN_DEBUG "%s: hw checksum hit!.\n", dev->name);
np->rx_skbuff[i]->ip_summed = CHECKSUM_UNNECESSARY;
} else {
@@ -1880,17 +1918,27 @@ static void nv_rx_process(struct net_device *dev)
skb->protocol = eth_type_trans(skb, dev);
dprintk(KERN_DEBUG "%s: nv_rx_process: packet %d with %d bytes, proto %d accepted.\n",
dev->name, np->cur_rx, len, skb->protocol);
- if (np->vlangrp && (vlanflags & NV_RX3_VLAN_TAG_PRESENT)) {
- vlan_hwaccel_rx(skb, np->vlangrp, vlanflags & NV_RX3_VLAN_TAG_MASK);
- } else {
+#ifdef CONFIG_FORCEDETH_NAPI
+ if (np->vlangrp && (vlanflags & NV_RX3_VLAN_TAG_PRESENT))
+ vlan_hwaccel_receive_skb(skb, np->vlangrp,
+ vlanflags & NV_RX3_VLAN_TAG_MASK);
+ else
+ netif_receive_skb(skb);
+#else
+ if (np->vlangrp && (vlanflags & NV_RX3_VLAN_TAG_PRESENT))
+ vlan_hwaccel_rx(skb, np->vlangrp,
+ vlanflags & NV_RX3_VLAN_TAG_MASK);
+ else
netif_rx(skb);
- }
+#endif
dev->last_rx = jiffies;
np->stats.rx_packets++;
np->stats.rx_bytes += len;
next_pkt:
np->cur_rx++;
}
+
+ return count;
}
static void set_bufsize(struct net_device *dev)
@@ -1990,7 +2038,7 @@ static int nv_set_mac_address(struct net_device *dev, void *addr)
struct fe_priv *np = netdev_priv(dev);
struct sockaddr *macaddr = (struct sockaddr*)addr;
- if(!is_valid_ether_addr(macaddr->sa_data))
+ if (!is_valid_ether_addr(macaddr->sa_data))
return -EADDRNOTAVAIL;
/* synchronized against open : rtnl_lock() held by caller */
@@ -2032,7 +2080,6 @@ static void nv_set_multicast(struct net_device *dev)
memset(mask, 0, sizeof(mask));
if (dev->flags & IFF_PROMISC) {
- printk(KERN_NOTICE "%s: Promiscuous mode enabled.\n", dev->name);
pff |= NVREG_PFF_PROMISC;
} else {
pff |= NVREG_PFF_MYADDR;
@@ -2283,20 +2330,20 @@ set_speed:
lpa_pause = lpa & (LPA_PAUSE_CAP| LPA_PAUSE_ASYM);
switch (adv_pause) {
- case (ADVERTISE_PAUSE_CAP):
+ case ADVERTISE_PAUSE_CAP:
if (lpa_pause & LPA_PAUSE_CAP) {
pause_flags |= NV_PAUSEFRAME_RX_ENABLE;
if (np->pause_flags & NV_PAUSEFRAME_TX_REQ)
pause_flags |= NV_PAUSEFRAME_TX_ENABLE;
}
break;
- case (ADVERTISE_PAUSE_ASYM):
+ case ADVERTISE_PAUSE_ASYM:
if (lpa_pause == (LPA_PAUSE_CAP| LPA_PAUSE_ASYM))
{
pause_flags |= NV_PAUSEFRAME_TX_ENABLE;
}
break;
- case (ADVERTISE_PAUSE_CAP| ADVERTISE_PAUSE_ASYM):
+ case ADVERTISE_PAUSE_CAP| ADVERTISE_PAUSE_ASYM:
if (lpa_pause & LPA_PAUSE_CAP)
{
pause_flags |= NV_PAUSEFRAME_RX_ENABLE;
@@ -2376,14 +2423,6 @@ static irqreturn_t nv_nic_irq(int foo, void *data, struct pt_regs *regs)
nv_tx_done(dev);
spin_unlock(&np->lock);
- nv_rx_process(dev);
- if (nv_alloc_rx(dev)) {
- spin_lock(&np->lock);
- if (!np->in_shutdown)
- mod_timer(&np->oom_kick, jiffies + OOM_REFILL);
- spin_unlock(&np->lock);
- }
-
if (events & NVREG_IRQ_LINK) {
spin_lock(&np->lock);
nv_link_irq(dev);
@@ -2403,6 +2442,29 @@ static irqreturn_t nv_nic_irq(int foo, void *data, struct pt_regs *regs)
printk(KERN_DEBUG "%s: received irq with unknown events 0x%x. Please report\n",
dev->name, events);
}
+#ifdef CONFIG_FORCEDETH_NAPI
+ if (events & NVREG_IRQ_RX_ALL) {
+ netif_rx_schedule(dev);
+
+ /* Disable furthur receive irq's */
+ spin_lock(&np->lock);
+ np->irqmask &= ~NVREG_IRQ_RX_ALL;
+
+ if (np->msi_flags & NV_MSI_X_ENABLED)
+ writel(NVREG_IRQ_RX_ALL, base + NvRegIrqMask);
+ else
+ writel(np->irqmask, base + NvRegIrqMask);
+ spin_unlock(&np->lock);
+ }
+#else
+ nv_rx_process(dev, dev->weight);
+ if (nv_alloc_rx(dev)) {
+ spin_lock(&np->lock);
+ if (!np->in_shutdown)
+ mod_timer(&np->oom_kick, jiffies + OOM_REFILL);
+ spin_unlock(&np->lock);
+ }
+#endif
if (i > max_interrupt_work) {
spin_lock(&np->lock);
/* disable interrupts on the nic */
@@ -2474,6 +2536,63 @@ static irqreturn_t nv_nic_irq_tx(int foo, void *data, struct pt_regs *regs)
return IRQ_RETVAL(i);
}
+#ifdef CONFIG_FORCEDETH_NAPI
+static int nv_napi_poll(struct net_device *dev, int *budget)
+{
+ int pkts, limit = min(*budget, dev->quota);
+ struct fe_priv *np = netdev_priv(dev);
+ u8 __iomem *base = get_hwbase(dev);
+
+ pkts = nv_rx_process(dev, limit);
+
+ if (nv_alloc_rx(dev)) {
+ spin_lock_irq(&np->lock);
+ if (!np->in_shutdown)
+ mod_timer(&np->oom_kick, jiffies + OOM_REFILL);
+ spin_unlock_irq(&np->lock);
+ }
+
+ if (pkts < limit) {
+ /* all done, no more packets present */
+ netif_rx_complete(dev);
+
+ /* re-enable receive interrupts */
+ spin_lock_irq(&np->lock);
+ np->irqmask |= NVREG_IRQ_RX_ALL;
+ if (np->msi_flags & NV_MSI_X_ENABLED)
+ writel(NVREG_IRQ_RX_ALL, base + NvRegIrqMask);
+ else
+ writel(np->irqmask, base + NvRegIrqMask);
+ spin_unlock_irq(&np->lock);
+ return 0;
+ } else {
+ /* used up our quantum, so reschedule */
+ dev->quota -= pkts;
+ *budget -= pkts;
+ return 1;
+ }
+}
+#endif
+
+#ifdef CONFIG_FORCEDETH_NAPI
+static irqreturn_t nv_nic_irq_rx(int foo, void *data, struct pt_regs *regs)
+{
+ struct net_device *dev = (struct net_device *) data;
+ u8 __iomem *base = get_hwbase(dev);
+ u32 events;
+
+ events = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQ_RX_ALL;
+ writel(NVREG_IRQ_RX_ALL, base + NvRegMSIXIrqStatus);
+
+ if (events) {
+ netif_rx_schedule(dev);
+ /* disable receive interrupts on the nic */
+ writel(NVREG_IRQ_RX_ALL, base + NvRegIrqMask);
+ pci_push(base);
+ }
+ return IRQ_HANDLED;
+}
+#else
static irqreturn_t nv_nic_irq_rx(int foo, void *data, struct pt_regs *regs)
{
struct net_device *dev = (struct net_device *) data;
@@ -2492,7 +2611,7 @@ static irqreturn_t nv_nic_irq_rx(int foo, void *data, struct pt_regs *regs)
if (!(events & np->irqmask))
break;
- nv_rx_process(dev);
+ nv_rx_process(dev, dev->weight);
if (nv_alloc_rx(dev)) {
spin_lock_irq(&np->lock);
if (!np->in_shutdown)
@@ -2514,12 +2633,12 @@ static irqreturn_t nv_nic_irq_rx(int foo, void *data, struct pt_regs *regs)
spin_unlock_irq(&np->lock);
break;
}
-
}
dprintk(KERN_DEBUG "%s: nv_nic_irq_rx completed\n", dev->name);
return IRQ_RETVAL(i);
}
+#endif
static irqreturn_t nv_nic_irq_other(int foo, void *data, struct pt_regs *regs)
{
@@ -3057,9 +3176,18 @@ static int nv_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
if (netif_running(dev))
printk(KERN_INFO "%s: link down.\n", dev->name);
bmcr = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ);
- bmcr |= (BMCR_ANENABLE | BMCR_ANRESTART);
- mii_rw(dev, np->phyaddr, MII_BMCR, bmcr);
-
+ if (np->phy_model == PHY_MODEL_MARVELL_E3016) {
+ bmcr |= BMCR_ANENABLE;
+ /* reset the phy in order for settings to stick,
+ * and cause autoneg to start */
+ if (phy_reset(dev, bmcr)) {
+ printk(KERN_INFO "%s: phy reset failed\n", dev->name);
+ return -EINVAL;
+ }
+ } else {
+ bmcr |= (BMCR_ANENABLE | BMCR_ANRESTART);
+ mii_rw(dev, np->phyaddr, MII_BMCR, bmcr);
+ }
} else {
int adv, bmcr;
@@ -3099,17 +3227,19 @@ static int nv_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
bmcr |= BMCR_FULLDPLX;
if (np->fixed_mode & (ADVERTISE_100HALF|ADVERTISE_100FULL))
bmcr |= BMCR_SPEED100;
- mii_rw(dev, np->phyaddr, MII_BMCR, bmcr);
if (np->phy_oui == PHY_OUI_MARVELL) {
- /* reset the phy */
- if (phy_reset(dev)) {
+ /* reset the phy in order for forced mode settings to stick */
+ if (phy_reset(dev, bmcr)) {
printk(KERN_INFO "%s: phy reset failed\n", dev->name);
return -EINVAL;
}
- } else if (netif_running(dev)) {
- /* Wait a bit and then reconfigure the nic. */
- udelay(10);
- nv_linkchange(dev);
+ } else {
+ mii_rw(dev, np->phyaddr, MII_BMCR, bmcr);
+ if (netif_running(dev)) {
+ /* Wait a bit and then reconfigure the nic. */
+ udelay(10);
+ nv_linkchange(dev);
+ }
}
}
@@ -3166,8 +3296,17 @@ static int nv_nway_reset(struct net_device *dev)
}
bmcr = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ);
- bmcr |= (BMCR_ANENABLE | BMCR_ANRESTART);
- mii_rw(dev, np->phyaddr, MII_BMCR, bmcr);
+ if (np->phy_model == PHY_MODEL_MARVELL_E3016) {
+ bmcr |= BMCR_ANENABLE;
+ /* reset the phy in order for settings to stick*/
+ if (phy_reset(dev, bmcr)) {
+ printk(KERN_INFO "%s: phy reset failed\n", dev->name);
+ return -EINVAL;
+ }
+ } else {
+ bmcr |= (BMCR_ANENABLE | BMCR_ANRESTART);
+ mii_rw(dev, np->phyaddr, MII_BMCR, bmcr);
+ }
if (netif_running(dev)) {
nv_start_rx(dev);
@@ -3245,7 +3384,7 @@ static int nv_set_ringparam(struct net_device *dev, struct ethtool_ringparam* ri
if (!rxtx_ring || !rx_skbuff || !rx_dma || !tx_skbuff || !tx_dma || !tx_dma_len) {
/* fall back to old rings */
if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) {
- if(rxtx_ring)
+ if (rxtx_ring)
pci_free_consistent(np->pci_dev, sizeof(struct ring_desc) * (ring->rx_pending + ring->tx_pending),
rxtx_ring, ring_addr);
} else {
@@ -3418,7 +3557,7 @@ static int nv_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam*
static u32 nv_get_rx_csum(struct net_device *dev)
{
struct fe_priv *np = netdev_priv(dev);
- return (np->txrxctl_bits & NVREG_TXRXCTL_RXCHECK) != 0;
+ return (np->rx_csum) != 0;
}
static int nv_set_rx_csum(struct net_device *dev, u32 data)
@@ -3428,22 +3567,15 @@ static int nv_set_rx_csum(struct net_device *dev, u32 data)
int retcode = 0;
if (np->driver_data & DEV_HAS_CHECKSUM) {
-
- if (((np->txrxctl_bits & NVREG_TXRXCTL_RXCHECK) && data) ||
- (!(np->txrxctl_bits & NVREG_TXRXCTL_RXCHECK) && !data)) {
- /* already set or unset */
- return 0;
- }
-
if (data) {
+ np->rx_csum = 1;
np->txrxctl_bits |= NVREG_TXRXCTL_RXCHECK;
- } else if (!(np->vlanctl_bits & NVREG_VLANCONTROL_ENABLE)) {
- np->txrxctl_bits &= ~NVREG_TXRXCTL_RXCHECK;
} else {
- printk(KERN_INFO "Can not disable rx checksum if vlan is enabled\n");
- return -EINVAL;
+ np->rx_csum = 0;
+ /* vlan is dependent on rx checksum offload */
+ if (!(np->vlanctl_bits & NVREG_VLANCONTROL_ENABLE))
+ np->txrxctl_bits &= ~NVREG_TXRXCTL_RXCHECK;
}
-
if (netif_running(dev)) {
spin_lock_irq(&np->lock);
writel(np->txrxctl_bits, base + NvRegTxRxControl);
@@ -3481,7 +3613,7 @@ static int nv_get_stats_count(struct net_device *dev)
struct fe_priv *np = netdev_priv(dev);
if (np->driver_data & DEV_HAS_STATISTICS)
- return (sizeof(struct nv_ethtool_stats)/sizeof(u64));
+ return sizeof(struct nv_ethtool_stats)/sizeof(u64);
else
return 0;
}
@@ -3619,7 +3751,7 @@ static int nv_loopback_test(struct net_device *dev)
struct sk_buff *tx_skb, *rx_skb;
dma_addr_t test_dma_addr;
u32 tx_flags_extra = (np->desc_ver == DESC_VER_1 ? NV_TX_LASTPACKET : NV_TX2_LASTPACKET);
- u32 Flags;
+ u32 flags;
int len, i, pkt_len;
u8 *pkt_data;
u32 filter_flags = 0;
@@ -3663,12 +3795,12 @@ static int nv_loopback_test(struct net_device *dev)
tx_skb->end-tx_skb->data, PCI_DMA_FROMDEVICE);
if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) {
- np->tx_ring.orig[0].PacketBuffer = cpu_to_le32(test_dma_addr);
- np->tx_ring.orig[0].FlagLen = cpu_to_le32((pkt_len-1) | np->tx_flags | tx_flags_extra);
+ np->tx_ring.orig[0].buf = cpu_to_le32(test_dma_addr);
+ np->tx_ring.orig[0].flaglen = cpu_to_le32((pkt_len-1) | np->tx_flags | tx_flags_extra);
} else {
- np->tx_ring.ex[0].PacketBufferHigh = cpu_to_le64(test_dma_addr) >> 32;
- np->tx_ring.ex[0].PacketBufferLow = cpu_to_le64(test_dma_addr) & 0x0FFFFFFFF;
- np->tx_ring.ex[0].FlagLen = cpu_to_le32((pkt_len-1) | np->tx_flags | tx_flags_extra);
+ np->tx_ring.ex[0].bufhigh = cpu_to_le64(test_dma_addr) >> 32;
+ np->tx_ring.ex[0].buflow = cpu_to_le64(test_dma_addr) & 0x0FFFFFFFF;
+ np->tx_ring.ex[0].flaglen = cpu_to_le32((pkt_len-1) | np->tx_flags | tx_flags_extra);
}
writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl);
pci_push(get_hwbase(dev));
@@ -3677,21 +3809,21 @@ static int nv_loopback_test(struct net_device *dev)
/* check for rx of the packet */
if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) {
- Flags = le32_to_cpu(np->rx_ring.orig[0].FlagLen);
+ flags = le32_to_cpu(np->rx_ring.orig[0].flaglen);
len = nv_descr_getlength(&np->rx_ring.orig[0], np->desc_ver);
} else {
- Flags = le32_to_cpu(np->rx_ring.ex[0].FlagLen);
+ flags = le32_to_cpu(np->rx_ring.ex[0].flaglen);
len = nv_descr_getlength_ex(&np->rx_ring.ex[0], np->desc_ver);
}
- if (Flags & NV_RX_AVAIL) {
+ if (flags & NV_RX_AVAIL) {
ret = 0;
} else if (np->desc_ver == DESC_VER_1) {
- if (Flags & NV_RX_ERROR)
+ if (flags & NV_RX_ERROR)
ret = 0;
} else {
- if (Flags & NV_RX2_ERROR) {
+ if (flags & NV_RX2_ERROR) {
ret = 0;
}
}
@@ -3753,6 +3885,7 @@ static void nv_self_test(struct net_device *dev, struct ethtool_test *test, u64
if (test->flags & ETH_TEST_FL_OFFLINE) {
if (netif_running(dev)) {
netif_stop_queue(dev);
+ netif_poll_disable(dev);
netif_tx_lock_bh(dev);
spin_lock_irq(&np->lock);
nv_disable_hw_interrupts(dev, np->irqmask);
@@ -3811,6 +3944,7 @@ static void nv_self_test(struct net_device *dev, struct ethtool_test *test, u64
nv_start_rx(dev);
nv_start_tx(dev);
netif_start_queue(dev);
+ netif_poll_enable(dev);
nv_enable_hw_interrupts(dev, np->irqmask);
}
}
@@ -3895,10 +4029,9 @@ static int nv_open(struct net_device *dev)
dprintk(KERN_DEBUG "nv_open: begin\n");
- /* 1) erase previous misconfiguration */
+ /* erase previous misconfiguration */
if (np->driver_data & DEV_HAS_POWER_CNTRL)
nv_mac_reset(dev);
- /* 4.1-1: stop adapter: ignored, 4.3 seems to be overkill */
writel(NVREG_MCASTADDRA_FORCE, base + NvRegMulticastAddrA);
writel(0, base + NvRegMulticastAddrB);
writel(0, base + NvRegMulticastMaskA);
@@ -3913,26 +4046,22 @@ static int nv_open(struct net_device *dev)
if (np->pause_flags & NV_PAUSEFRAME_TX_CAPABLE)
writel(NVREG_TX_PAUSEFRAME_DISABLE, base + NvRegTxPauseFrame);
- /* 2) initialize descriptor rings */
+ /* initialize descriptor rings */
set_bufsize(dev);
oom = nv_init_ring(dev);
writel(0, base + NvRegLinkSpeed);
- writel(0, base + NvRegUnknownTransmitterReg);
+ writel(readl(base + NvRegTransmitPoll) & NVREG_TRANSMITPOLL_MAC_ADDR_REV, base + NvRegTransmitPoll);
nv_txrx_reset(dev);
writel(0, base + NvRegUnknownSetupReg6);
np->in_shutdown = 0;
- /* 3) set mac address */
- nv_copy_mac_to_hw(dev);
-
- /* 4) give hw rings */
+ /* give hw rings */
setup_hw_rings(dev, NV_SETUP_RX_RING | NV_SETUP_TX_RING);
writel( ((np->rx_ring_size-1) << NVREG_RINGSZ_RXSHIFT) + ((np->tx_ring_size-1) << NVREG_RINGSZ_TXSHIFT),
base + NvRegRingSizes);
- /* 5) continue setup */
writel(np->linkspeed, base + NvRegLinkSpeed);
if (np->desc_ver == DESC_VER_1)
writel(NVREG_TX_WM_DESC1_DEFAULT, base + NvRegTxWatermark);
@@ -3950,7 +4079,6 @@ static int nv_open(struct net_device *dev)
writel(NVREG_IRQSTAT_MASK, base + NvRegIrqStatus);
writel(NVREG_MIISTAT_MASK2, base + NvRegMIIStatus);
- /* 6) continue setup */
writel(NVREG_MISC1_FORCE | NVREG_MISC1_HD, base + NvRegMisc1);
writel(readl(base + NvRegTransmitterStatus), base + NvRegTransmitterStatus);
writel(NVREG_PFF_ALWAYS, base + NvRegPacketFilterFlags);
@@ -4020,6 +4148,8 @@ static int nv_open(struct net_device *dev)
nv_start_rx(dev);
nv_start_tx(dev);
netif_start_queue(dev);
+ netif_poll_enable(dev);
+
if (ret) {
netif_carrier_on(dev);
} else {
@@ -4049,6 +4179,7 @@ static int nv_close(struct net_device *dev)
spin_lock_irq(&np->lock);
np->in_shutdown = 1;
spin_unlock_irq(&np->lock);
+ netif_poll_disable(dev);
synchronize_irq(dev->irq);
del_timer_sync(&np->oom_kick);
@@ -4076,12 +4207,6 @@ static int nv_close(struct net_device *dev)
if (np->wolenabled)
nv_start_rx(dev);
- /* special op: write back the misordered MAC address - otherwise
- * the next nv_probe would see a wrong address.
- */
- writel(np->orig_mac[0], base + NvRegMacAddrA);
- writel(np->orig_mac[1], base + NvRegMacAddrB);
-
/* FIXME: power down nic */
return 0;
@@ -4094,7 +4219,7 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i
unsigned long addr;
u8 __iomem *base;
int err, i;
- u32 powerstate;
+ u32 powerstate, txreg;
dev = alloc_etherdev(sizeof(struct fe_priv));
err = -ENOMEM;
@@ -4190,6 +4315,7 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i
np->pkt_limit = NV_PKTLIMIT_2;
if (id->driver_data & DEV_HAS_CHECKSUM) {
+ np->rx_csum = 1;
np->txrxctl_bits |= NVREG_TXRXCTL_RXCHECK;
dev->features |= NETIF_F_HW_CSUM | NETIF_F_SG;
#ifdef NETIF_F_TSO
@@ -4270,6 +4396,10 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i
#ifdef CONFIG_NET_POLL_CONTROLLER
dev->poll_controller = nv_poll_controller;
#endif
+ dev->weight = 64;
+#ifdef CONFIG_FORCEDETH_NAPI
+ dev->poll = nv_napi_poll;
+#endif
SET_ETHTOOL_OPS(dev, &ops);
dev->tx_timeout = nv_tx_timeout;
dev->watchdog_timeo = NV_WATCHDOG_TIMEO;
@@ -4281,12 +4411,30 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i
np->orig_mac[0] = readl(base + NvRegMacAddrA);
np->orig_mac[1] = readl(base + NvRegMacAddrB);
- dev->dev_addr[0] = (np->orig_mac[1] >> 8) & 0xff;
- dev->dev_addr[1] = (np->orig_mac[1] >> 0) & 0xff;
- dev->dev_addr[2] = (np->orig_mac[0] >> 24) & 0xff;
- dev->dev_addr[3] = (np->orig_mac[0] >> 16) & 0xff;
- dev->dev_addr[4] = (np->orig_mac[0] >> 8) & 0xff;
- dev->dev_addr[5] = (np->orig_mac[0] >> 0) & 0xff;
+ /* check the workaround bit for correct mac address order */
+ txreg = readl(base + NvRegTransmitPoll);
+ if (txreg & NVREG_TRANSMITPOLL_MAC_ADDR_REV) {
+ /* mac address is already in correct order */
+ dev->dev_addr[0] = (np->orig_mac[0] >> 0) & 0xff;
+ dev->dev_addr[1] = (np->orig_mac[0] >> 8) & 0xff;
+ dev->dev_addr[2] = (np->orig_mac[0] >> 16) & 0xff;
+ dev->dev_addr[3] = (np->orig_mac[0] >> 24) & 0xff;
+ dev->dev_addr[4] = (np->orig_mac[1] >> 0) & 0xff;
+ dev->dev_addr[5] = (np->orig_mac[1] >> 8) & 0xff;
+ } else {
+ /* need to reverse mac address to correct order */
+ dev->dev_addr[0] = (np->orig_mac[1] >> 8) & 0xff;
+ dev->dev_addr[1] = (np->orig_mac[1] >> 0) & 0xff;
+ dev->dev_addr[2] = (np->orig_mac[0] >> 24) & 0xff;
+ dev->dev_addr[3] = (np->orig_mac[0] >> 16) & 0xff;
+ dev->dev_addr[4] = (np->orig_mac[0] >> 8) & 0xff;
+ dev->dev_addr[5] = (np->orig_mac[0] >> 0) & 0xff;
+ /* set permanent address to be correct aswell */
+ np->orig_mac[0] = (dev->dev_addr[0] << 0) + (dev->dev_addr[1] << 8) +
+ (dev->dev_addr[2] << 16) + (dev->dev_addr[3] << 24);
+ np->orig_mac[1] = (dev->dev_addr[4] << 0) + (dev->dev_addr[5] << 8);
+ writel(txreg|NVREG_TRANSMITPOLL_MAC_ADDR_REV, base + NvRegTransmitPoll);
+ }
memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
if (!is_valid_ether_addr(dev->perm_addr)) {
@@ -4309,6 +4457,9 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i
dev->dev_addr[0], dev->dev_addr[1], dev->dev_addr[2],
dev->dev_addr[3], dev->dev_addr[4], dev->dev_addr[5]);
+ /* set mac address */
+ nv_copy_mac_to_hw(dev);
+
/* disable WOL */
writel(0, base + NvRegWakeUpFlags);
np->wolenabled = 0;
@@ -4369,6 +4520,7 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i
if (id2 < 0 || id2 == 0xffff)
continue;
+ np->phy_model = id2 & PHYID2_MODEL_MASK;
id1 = (id1 & PHYID1_OUI_MASK) << PHYID1_OUI_SHFT;
id2 = (id2 & PHYID2_OUI_MASK) >> PHYID2_OUI_SHFT;
dprintk(KERN_DEBUG "%s: open: Found PHY %04x:%04x at address %d.\n",
@@ -4421,9 +4573,17 @@ out:
static void __devexit nv_remove(struct pci_dev *pci_dev)
{
struct net_device *dev = pci_get_drvdata(pci_dev);
+ struct fe_priv *np = netdev_priv(dev);
+ u8 __iomem *base = get_hwbase(dev);
unregister_netdev(dev);
+ /* special op: write back the misordered MAC address - otherwise
+ * the next nv_probe would see a wrong address.
+ */
+ writel(np->orig_mac[0], base + NvRegMacAddrA);
+ writel(np->orig_mac[1], base + NvRegMacAddrB);
+
/* free all structures */
free_rings(dev);
iounmap(get_hwbase(dev));
@@ -4540,7 +4700,7 @@ static struct pci_driver driver = {
static int __init init_nic(void)
{
printk(KERN_INFO "forcedeth.c: Reverse Engineered nForce ethernet driver. Version %s.\n", FORCEDETH_VERSION);
- return pci_module_init(&driver);
+ return pci_register_driver(&driver);
}
static void __exit exit_nic(void)
diff --git a/drivers/net/hp100.c b/drivers/net/hp100.c
index e7d9bf33028..ff5a67d619b 100644
--- a/drivers/net/hp100.c
+++ b/drivers/net/hp100.c
@@ -111,7 +111,6 @@
#include <linux/etherdevice.h>
#include <linux/skbuff.h>
#include <linux/types.h>
-#include <linux/config.h> /* for CONFIG_PCI */
#include <linux/delay.h>
#include <linux/init.h>
#include <linux/bitops.h>
diff --git a/drivers/net/irda/mcs7780.c b/drivers/net/irda/mcs7780.c
index 47f6f64d604..415ba8dc94c 100644
--- a/drivers/net/irda/mcs7780.c
+++ b/drivers/net/irda/mcs7780.c
@@ -45,7 +45,6 @@
#include <linux/module.h>
#include <linux/moduleparam.h>
-#include <linux/config.h>
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/errno.h>
diff --git a/drivers/net/irda/w83977af_ir.c b/drivers/net/irda/w83977af_ir.c
index 0ea65c4c6f8..b69776e0095 100644
--- a/drivers/net/irda/w83977af_ir.c
+++ b/drivers/net/irda/w83977af_ir.c
@@ -40,7 +40,6 @@
********************************************************************/
#include <linux/module.h>
-#include <linux/config.h>
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/skbuff.h>
diff --git a/drivers/net/ixgb/ixgb.h b/drivers/net/ixgb/ixgb.h
index 82b67af54c9..a51604b3651 100644
--- a/drivers/net/ixgb/ixgb.h
+++ b/drivers/net/ixgb/ixgb.h
@@ -110,9 +110,6 @@ struct ixgb_adapter;
#define IXGB_RXBUFFER_8192 8192
#define IXGB_RXBUFFER_16384 16384
-/* How many Tx Descriptors do we need to call netif_wake_queue? */
-#define IXGB_TX_QUEUE_WAKE 16
-
/* How many Rx Buffers do we bundle into one write to the hardware ? */
#define IXGB_RX_BUFFER_WRITE 4 /* Must be power of 2 */
@@ -173,7 +170,7 @@ struct ixgb_adapter {
unsigned long led_status;
/* TX */
- struct ixgb_desc_ring tx_ring;
+ struct ixgb_desc_ring tx_ring ____cacheline_aligned_in_smp;
unsigned long timeo_start;
uint32_t tx_cmd_type;
uint64_t hw_csum_tx_good;
diff --git a/drivers/net/ixgb/ixgb_ethtool.c b/drivers/net/ixgb/ixgb_ethtool.c
index cf19b898ba9..ba621083830 100644
--- a/drivers/net/ixgb/ixgb_ethtool.c
+++ b/drivers/net/ixgb/ixgb_ethtool.c
@@ -654,11 +654,7 @@ ixgb_phys_id(struct net_device *netdev, uint32_t data)
mod_timer(&adapter->blink_timer, jiffies);
- if (data)
- schedule_timeout_interruptible(data * HZ);
- else
- schedule_timeout_interruptible(MAX_SCHEDULE_TIMEOUT);
-
+ msleep_interruptible(data * 1000);
del_timer_sync(&adapter->blink_timer);
ixgb_led_off(&adapter->hw);
clear_bit(IXGB_LED_ON, &adapter->led_status);
diff --git a/drivers/net/ixgb/ixgb_hw.c b/drivers/net/ixgb/ixgb_hw.c
index f7fa10e47fa..2b1515574fa 100644
--- a/drivers/net/ixgb/ixgb_hw.c
+++ b/drivers/net/ixgb/ixgb_hw.c
@@ -236,6 +236,17 @@ ixgb_identify_phy(struct ixgb_hw *hw)
DEBUGOUT("Identified G6104 optics\n");
phy_type = ixgb_phy_type_g6104;
break;
+ case IXGB_DEVICE_ID_82597EX_CX4:
+ DEBUGOUT("Identified CX4\n");
+ xpak_vendor = ixgb_identify_xpak_vendor(hw);
+ if (xpak_vendor == ixgb_xpak_vendor_intel) {
+ DEBUGOUT("Identified TXN17201 optics\n");
+ phy_type = ixgb_phy_type_txn17201;
+ } else {
+ DEBUGOUT("Identified G6005 optics\n");
+ phy_type = ixgb_phy_type_g6005;
+ }
+ break;
default:
DEBUGOUT("Unknown physical layer module\n");
phy_type = ixgb_phy_type_unknown;
diff --git a/drivers/net/ixgb/ixgb_ids.h b/drivers/net/ixgb/ixgb_ids.h
index 40a085f94c7..9fd61189b4b 100644
--- a/drivers/net/ixgb/ixgb_ids.h
+++ b/drivers/net/ixgb/ixgb_ids.h
@@ -45,6 +45,7 @@
#define IXGB_DEVICE_ID_82597EX_CX4 0x109E
#define IXGB_SUBDEVICE_ID_A00C 0xA00C
+#define IXGB_SUBDEVICE_ID_A01C 0xA01C
#endif /* #ifndef _IXGB_IDS_H_ */
/* End of File */
diff --git a/drivers/net/ixgb/ixgb_main.c b/drivers/net/ixgb/ixgb_main.c
index 7bbd447289b..e36dee1dd33 100644
--- a/drivers/net/ixgb/ixgb_main.c
+++ b/drivers/net/ixgb/ixgb_main.c
@@ -36,7 +36,7 @@ static char ixgb_driver_string[] = "Intel(R) PRO/10GbE Network Driver";
#else
#define DRIVERNAPI "-NAPI"
#endif
-#define DRV_VERSION "1.0.109-k2"DRIVERNAPI
+#define DRV_VERSION "1.0.112-k2"DRIVERNAPI
char ixgb_driver_version[] = DRV_VERSION;
static char ixgb_copyright[] = "Copyright (c) 1999-2006 Intel Corporation.";
@@ -118,15 +118,26 @@ static void ixgb_restore_vlan(struct ixgb_adapter *adapter);
static void ixgb_netpoll(struct net_device *dev);
#endif
-/* Exported from other modules */
+static pci_ers_result_t ixgb_io_error_detected (struct pci_dev *pdev,
+ enum pci_channel_state state);
+static pci_ers_result_t ixgb_io_slot_reset (struct pci_dev *pdev);
+static void ixgb_io_resume (struct pci_dev *pdev);
+/* Exported from other modules */
extern void ixgb_check_options(struct ixgb_adapter *adapter);
+static struct pci_error_handlers ixgb_err_handler = {
+ .error_detected = ixgb_io_error_detected,
+ .slot_reset = ixgb_io_slot_reset,
+ .resume = ixgb_io_resume,
+};
+
static struct pci_driver ixgb_driver = {
.name = ixgb_driver_name,
.id_table = ixgb_pci_tbl,
.probe = ixgb_probe,
.remove = __devexit_p(ixgb_remove),
+ .err_handler = &ixgb_err_handler
};
MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
@@ -140,12 +151,12 @@ module_param(debug, int, 0);
MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
/* some defines for controlling descriptor fetches in h/w */
-#define RXDCTL_WTHRESH_DEFAULT 16 /* chip writes back at this many or RXT0 */
-#define RXDCTL_PTHRESH_DEFAULT 0 /* chip considers prefech below
- * this */
-#define RXDCTL_HTHRESH_DEFAULT 0 /* chip will only prefetch if tail
- * is pushed this many descriptors
- * from head */
+#define RXDCTL_WTHRESH_DEFAULT 15 /* chip writes back at this many or RXT0 */
+#define RXDCTL_PTHRESH_DEFAULT 0 /* chip considers prefech below
+ * this */
+#define RXDCTL_HTHRESH_DEFAULT 0 /* chip will only prefetch if tail
+ * is pushed this many descriptors
+ * from head */
/**
* ixgb_init_module - Driver Registration Routine
@@ -162,7 +173,7 @@ ixgb_init_module(void)
printk(KERN_INFO "%s\n", ixgb_copyright);
- return pci_module_init(&ixgb_driver);
+ return pci_register_driver(&ixgb_driver);
}
module_init(ixgb_init_module);
@@ -1174,6 +1185,7 @@ ixgb_tso(struct ixgb_adapter *adapter, struct sk_buff *skb)
int err;
if (likely(skb_is_gso(skb))) {
+ struct ixgb_buffer *buffer_info;
if (skb_header_cloned(skb)) {
err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
if (err)
@@ -1196,6 +1208,8 @@ ixgb_tso(struct ixgb_adapter *adapter, struct sk_buff *skb)
i = adapter->tx_ring.next_to_use;
context_desc = IXGB_CONTEXT_DESC(adapter->tx_ring, i);
+ buffer_info = &adapter->tx_ring.buffer_info[i];
+ WARN_ON(buffer_info->dma != 0);
context_desc->ipcss = ipcss;
context_desc->ipcso = ipcso;
@@ -1233,11 +1247,14 @@ ixgb_tx_csum(struct ixgb_adapter *adapter, struct sk_buff *skb)
uint8_t css, cso;
if(likely(skb->ip_summed == CHECKSUM_HW)) {
+ struct ixgb_buffer *buffer_info;
css = skb->h.raw - skb->data;
cso = (skb->h.raw + skb->csum) - skb->data;
i = adapter->tx_ring.next_to_use;
context_desc = IXGB_CONTEXT_DESC(adapter->tx_ring, i);
+ buffer_info = &adapter->tx_ring.buffer_info[i];
+ WARN_ON(buffer_info->dma != 0);
context_desc->tucss = css;
context_desc->tucso = cso;
@@ -1283,6 +1300,7 @@ ixgb_tx_map(struct ixgb_adapter *adapter, struct sk_buff *skb,
buffer_info = &tx_ring->buffer_info[i];
size = min(len, IXGB_MAX_DATA_PER_TXD);
buffer_info->length = size;
+ WARN_ON(buffer_info->dma != 0);
buffer_info->dma =
pci_map_single(adapter->pdev,
skb->data + offset,
@@ -1543,6 +1561,11 @@ void
ixgb_update_stats(struct ixgb_adapter *adapter)
{
struct net_device *netdev = adapter->netdev;
+ struct pci_dev *pdev = adapter->pdev;
+
+ /* Prevent stats update while adapter is being reset */
+ if (pdev->error_state && pdev->error_state != pci_channel_io_normal)
+ return;
if((netdev->flags & IFF_PROMISC) || (netdev->flags & IFF_ALLMULTI) ||
(netdev->mc_count > IXGB_MAX_NUM_MULTICAST_ADDRESSES)) {
@@ -1787,7 +1810,7 @@ ixgb_clean_tx_irq(struct ixgb_adapter *adapter)
if (unlikely(netif_queue_stopped(netdev))) {
spin_lock(&adapter->tx_lock);
if (netif_queue_stopped(netdev) && netif_carrier_ok(netdev) &&
- (IXGB_DESC_UNUSED(tx_ring) > IXGB_TX_QUEUE_WAKE))
+ (IXGB_DESC_UNUSED(tx_ring) >= DESC_NEEDED))
netif_wake_queue(netdev);
spin_unlock(&adapter->tx_lock);
}
@@ -1948,10 +1971,9 @@ ixgb_clean_rx_irq(struct ixgb_adapter *adapter)
#define IXGB_CB_LENGTH 256
if (length < IXGB_CB_LENGTH) {
struct sk_buff *new_skb =
- dev_alloc_skb(length + NET_IP_ALIGN);
+ netdev_alloc_skb(netdev, length + NET_IP_ALIGN);
if (new_skb) {
skb_reserve(new_skb, NET_IP_ALIGN);
- new_skb->dev = netdev;
memcpy(new_skb->data - NET_IP_ALIGN,
skb->data - NET_IP_ALIGN,
length + NET_IP_ALIGN);
@@ -2031,14 +2053,14 @@ ixgb_alloc_rx_buffers(struct ixgb_adapter *adapter)
/* leave three descriptors unused */
while(--cleancount > 2) {
/* recycle! its good for you */
- if (!(skb = buffer_info->skb))
- skb = dev_alloc_skb(adapter->rx_buffer_len
- + NET_IP_ALIGN);
- else {
+ skb = buffer_info->skb;
+ if (skb) {
skb_trim(skb, 0);
goto map_skb;
}
+ skb = netdev_alloc_skb(netdev, adapter->rx_buffer_len
+ + NET_IP_ALIGN);
if (unlikely(!skb)) {
/* Better luck next round */
adapter->alloc_rx_buff_failed++;
@@ -2051,8 +2073,6 @@ ixgb_alloc_rx_buffers(struct ixgb_adapter *adapter)
*/
skb_reserve(skb, NET_IP_ALIGN);
- skb->dev = netdev;
-
buffer_info->skb = skb;
buffer_info->length = adapter->rx_buffer_len;
map_skb:
@@ -2190,7 +2210,7 @@ ixgb_restore_vlan(struct ixgb_adapter *adapter)
static void ixgb_netpoll(struct net_device *dev)
{
- struct ixgb_adapter *adapter = dev->priv;
+ struct ixgb_adapter *adapter = netdev_priv(dev);
disable_irq(adapter->pdev->irq);
ixgb_intr(adapter->pdev->irq, dev, NULL);
@@ -2198,4 +2218,98 @@ static void ixgb_netpoll(struct net_device *dev)
}
#endif
+/**
+ * ixgb_io_error_detected() - called when PCI error is detected
+ * @pdev pointer to pci device with error
+ * @state pci channel state after error
+ *
+ * This callback is called by the PCI subsystem whenever
+ * a PCI bus error is detected.
+ */
+static pci_ers_result_t ixgb_io_error_detected (struct pci_dev *pdev,
+ enum pci_channel_state state)
+{
+ struct net_device *netdev = pci_get_drvdata(pdev);
+ struct ixgb_adapter *adapter = netdev->priv;
+
+ if(netif_running(netdev))
+ ixgb_down(adapter, TRUE);
+
+ pci_disable_device(pdev);
+
+ /* Request a slot reset. */
+ return PCI_ERS_RESULT_NEED_RESET;
+}
+
+/**
+ * ixgb_io_slot_reset - called after the pci bus has been reset.
+ * @pdev pointer to pci device with error
+ *
+ * This callback is called after the PCI buss has been reset.
+ * Basically, this tries to restart the card from scratch.
+ * This is a shortened version of the device probe/discovery code,
+ * it resembles the first-half of the ixgb_probe() routine.
+ */
+static pci_ers_result_t ixgb_io_slot_reset (struct pci_dev *pdev)
+{
+ struct net_device *netdev = pci_get_drvdata(pdev);
+ struct ixgb_adapter *adapter = netdev->priv;
+
+ if(pci_enable_device(pdev)) {
+ DPRINTK(PROBE, ERR, "Cannot re-enable PCI device after reset.\n");
+ return PCI_ERS_RESULT_DISCONNECT;
+ }
+
+ /* Perform card reset only on one instance of the card */
+ if (0 != PCI_FUNC (pdev->devfn))
+ return PCI_ERS_RESULT_RECOVERED;
+
+ pci_set_master(pdev);
+
+ netif_carrier_off(netdev);
+ netif_stop_queue(netdev);
+ ixgb_reset(adapter);
+
+ /* Make sure the EEPROM is good */
+ if(!ixgb_validate_eeprom_checksum(&adapter->hw)) {
+ DPRINTK(PROBE, ERR, "After reset, the EEPROM checksum is not valid.\n");
+ return PCI_ERS_RESULT_DISCONNECT;
+ }
+ ixgb_get_ee_mac_addr(&adapter->hw, netdev->dev_addr);
+ memcpy(netdev->perm_addr, netdev->dev_addr, netdev->addr_len);
+
+ if(!is_valid_ether_addr(netdev->perm_addr)) {
+ DPRINTK(PROBE, ERR, "After reset, invalid MAC address.\n");
+ return PCI_ERS_RESULT_DISCONNECT;
+ }
+
+ return PCI_ERS_RESULT_RECOVERED;
+}
+
+/**
+ * ixgb_io_resume - called when its OK to resume normal operations
+ * @pdev pointer to pci device with error
+ *
+ * The error recovery driver tells us that its OK to resume
+ * normal operation. Implementation resembles the second-half
+ * of the ixgb_probe() routine.
+ */
+static void ixgb_io_resume (struct pci_dev *pdev)
+{
+ struct net_device *netdev = pci_get_drvdata(pdev);
+ struct ixgb_adapter *adapter = netdev->priv;
+
+ pci_set_master(pdev);
+
+ if(netif_running(netdev)) {
+ if(ixgb_up(adapter)) {
+ printk ("ixgb: can't bring device back up after reset\n");
+ return;
+ }
+ }
+
+ netif_device_attach(netdev);
+ mod_timer(&adapter->watchdog_timer, jiffies);
+}
+
/* ixgb_main.c */
diff --git a/drivers/net/myri10ge/myri10ge.c b/drivers/net/myri10ge/myri10ge.c
index 9bdd43ab357..b19e2034d11 100644
--- a/drivers/net/myri10ge/myri10ge.c
+++ b/drivers/net/myri10ge/myri10ge.c
@@ -187,11 +187,14 @@ struct myri10ge_priv {
u8 mac_addr[6]; /* eeprom mac address */
unsigned long serial_number;
int vendor_specific_offset;
+ int fw_multicast_support;
u32 devctl;
u16 msi_flags;
u32 read_dma;
u32 write_dma;
u32 read_write_dma;
+ u32 link_changes;
+ u32 msg_enable;
};
static char *myri10ge_fw_unaligned = "myri10ge_ethp_z8e.dat";
@@ -257,6 +260,12 @@ module_param(myri10ge_max_irq_loops, int, S_IRUGO);
MODULE_PARM_DESC(myri10ge_max_irq_loops,
"Set stuck legacy IRQ detection threshold\n");
+#define MYRI10GE_MSG_DEFAULT NETIF_MSG_LINK
+
+static int myri10ge_debug = -1; /* defaults above */
+module_param(myri10ge_debug, int, 0);
+MODULE_PARM_DESC(myri10ge_debug, "Debug level (0=none,...,16=all)");
+
#define MYRI10GE_FW_OFFSET 1024*1024
#define MYRI10GE_HIGHPART_TO_U32(X) \
(sizeof (X) == 8) ? ((u32)((u64)(X) >> 32)) : (0)
@@ -271,7 +280,7 @@ myri10ge_send_cmd(struct myri10ge_priv *mgp, u32 cmd,
struct mcp_cmd *buf;
char buf_bytes[sizeof(*buf) + 8];
struct mcp_cmd_response *response = mgp->cmd;
- char __iomem *cmd_addr = mgp->sram + MXGEFW_CMD_OFFSET;
+ char __iomem *cmd_addr = mgp->sram + MXGEFW_ETH_CMD;
u32 dma_low, dma_high, result, value;
int sleep_total = 0;
@@ -320,6 +329,8 @@ myri10ge_send_cmd(struct myri10ge_priv *mgp, u32 cmd,
if (result == 0) {
data->data0 = value;
return 0;
+ } else if (result == MXGEFW_CMD_UNKNOWN) {
+ return -ENOSYS;
} else {
dev_err(&mgp->pdev->dev,
"command %d failed, result = %d\n",
@@ -404,7 +415,7 @@ static void myri10ge_dummy_rdma(struct myri10ge_priv *mgp, int enable)
buf[4] = htonl(dma_low); /* dummy addr LSW */
buf[5] = htonl(enable); /* enable? */
- submit = mgp->sram + 0xfc01c0;
+ submit = mgp->sram + MXGEFW_BOOT_DUMMY_RDMA;
myri10ge_pio_copy(submit, &buf, sizeof(buf));
for (i = 0; mgp->cmd->data != MYRI10GE_NO_CONFIRM_DATA && i < 20; i++)
@@ -600,7 +611,7 @@ static int myri10ge_load_firmware(struct myri10ge_priv *mgp)
buf[5] = htonl(8); /* where to copy to */
buf[6] = htonl(0); /* where to jump to */
- submit = mgp->sram + 0xfc0000;
+ submit = mgp->sram + MXGEFW_BOOT_HANDOFF;
myri10ge_pio_copy(submit, &buf, sizeof(buf));
mb();
@@ -764,6 +775,7 @@ static int myri10ge_reset(struct myri10ge_priv *mgp)
mgp->rx_small.cnt = 0;
mgp->rx_done.idx = 0;
mgp->rx_done.cnt = 0;
+ mgp->link_changes = 0;
status = myri10ge_update_mac_address(mgp, mgp->dev->dev_addr);
myri10ge_change_promisc(mgp, 0, 0);
myri10ge_change_pause(mgp, mgp->pause);
@@ -798,12 +810,13 @@ myri10ge_submit_8rx(struct mcp_kreq_ether_recv __iomem * dst,
* pages directly and building a fraglist in the near future.
*/
-static inline struct sk_buff *myri10ge_alloc_big(int bytes)
+static inline struct sk_buff *myri10ge_alloc_big(struct net_device *dev,
+ int bytes)
{
struct sk_buff *skb;
unsigned long data, roundup;
- skb = dev_alloc_skb(bytes + 4096 + MXGEFW_PAD);
+ skb = netdev_alloc_skb(dev, bytes + 4096 + MXGEFW_PAD);
if (skb == NULL)
return NULL;
@@ -821,12 +834,13 @@ static inline struct sk_buff *myri10ge_alloc_big(int bytes)
/* Allocate 2x as much space as required and use whichever portion
* does not cross a 4KB boundary */
-static inline struct sk_buff *myri10ge_alloc_small_safe(unsigned int bytes)
+static inline struct sk_buff *myri10ge_alloc_small_safe(struct net_device *dev,
+ unsigned int bytes)
{
struct sk_buff *skb;
unsigned long data, boundary;
- skb = dev_alloc_skb(2 * (bytes + MXGEFW_PAD) - 1);
+ skb = netdev_alloc_skb(dev, 2 * (bytes + MXGEFW_PAD) - 1);
if (unlikely(skb == NULL))
return NULL;
@@ -847,12 +861,13 @@ static inline struct sk_buff *myri10ge_alloc_small_safe(unsigned int bytes)
/* Allocate just enough space, and verify that the allocated
* space does not cross a 4KB boundary */
-static inline struct sk_buff *myri10ge_alloc_small(int bytes)
+static inline struct sk_buff *myri10ge_alloc_small(struct net_device *dev,
+ int bytes)
{
struct sk_buff *skb;
unsigned long roundup, data, end;
- skb = dev_alloc_skb(bytes + 16 + MXGEFW_PAD);
+ skb = netdev_alloc_skb(dev, bytes + 16 + MXGEFW_PAD);
if (unlikely(skb == NULL))
return NULL;
@@ -868,15 +883,17 @@ static inline struct sk_buff *myri10ge_alloc_small(int bytes)
"myri10ge_alloc_small: small skb crossed 4KB boundary\n");
myri10ge_skb_cross_4k = 1;
dev_kfree_skb_any(skb);
- skb = myri10ge_alloc_small_safe(bytes);
+ skb = myri10ge_alloc_small_safe(dev, bytes);
}
return skb;
}
static inline int
-myri10ge_getbuf(struct myri10ge_rx_buf *rx, struct pci_dev *pdev, int bytes,
- int idx)
+myri10ge_getbuf(struct myri10ge_rx_buf *rx, struct myri10ge_priv *mgp,
+ int bytes, int idx)
{
+ struct net_device *dev = mgp->dev;
+ struct pci_dev *pdev = mgp->pdev;
struct sk_buff *skb;
dma_addr_t bus;
int len, retval = 0;
@@ -884,11 +901,11 @@ myri10ge_getbuf(struct myri10ge_rx_buf *rx, struct pci_dev *pdev, int bytes,
bytes += VLAN_HLEN; /* account for 802.1q vlan tag */
if ((bytes + MXGEFW_PAD) > (4096 - 16) /* linux overhead */ )
- skb = myri10ge_alloc_big(bytes);
+ skb = myri10ge_alloc_big(dev, bytes);
else if (myri10ge_skb_cross_4k)
- skb = myri10ge_alloc_small_safe(bytes);
+ skb = myri10ge_alloc_small_safe(dev, bytes);
else
- skb = myri10ge_alloc_small(bytes);
+ skb = myri10ge_alloc_small(dev, bytes);
if (unlikely(skb == NULL)) {
rx->alloc_fail++;
@@ -951,7 +968,7 @@ myri10ge_rx_done(struct myri10ge_priv *mgp, struct myri10ge_rx_buf *rx,
unmap_len = pci_unmap_len(&rx->info[idx], len);
/* try to replace the received skb */
- if (myri10ge_getbuf(rx, mgp->pdev, bytes, idx)) {
+ if (myri10ge_getbuf(rx, mgp, bytes, idx)) {
/* drop the frame -- the old skbuf is re-cycled */
mgp->stats.rx_dropped += 1;
return 0;
@@ -968,7 +985,6 @@ myri10ge_rx_done(struct myri10ge_priv *mgp, struct myri10ge_rx_buf *rx,
skb_put(skb, len);
skb->protocol = eth_type_trans(skb, mgp->dev);
- skb->dev = mgp->dev;
if (mgp->csum_flag) {
if ((skb->protocol == ntohs(ETH_P_IP)) ||
(skb->protocol == ntohs(ETH_P_IPV6))) {
@@ -1081,13 +1097,19 @@ static inline void myri10ge_check_statblock(struct myri10ge_priv *mgp)
if (mgp->link_state != stats->link_up) {
mgp->link_state = stats->link_up;
if (mgp->link_state) {
- printk(KERN_INFO "myri10ge: %s: link up\n",
- mgp->dev->name);
+ if (netif_msg_link(mgp))
+ printk(KERN_INFO
+ "myri10ge: %s: link up\n",
+ mgp->dev->name);
netif_carrier_on(mgp->dev);
+ mgp->link_changes++;
} else {
- printk(KERN_INFO "myri10ge: %s: link down\n",
- mgp->dev->name);
+ if (netif_msg_link(mgp))
+ printk(KERN_INFO
+ "myri10ge: %s: link down\n",
+ mgp->dev->name);
netif_carrier_off(mgp->dev);
+ mgp->link_changes++;
}
}
if (mgp->rdma_tags_available !=
@@ -1289,7 +1311,8 @@ static const char myri10ge_gstrings_stats[][ETH_GSTRING_LEN] = {
"serial_number", "tx_pkt_start", "tx_pkt_done",
"tx_req", "tx_done", "rx_small_cnt", "rx_big_cnt",
"wake_queue", "stop_queue", "watchdog_resets", "tx_linearized",
- "link_up", "dropped_link_overflow", "dropped_link_error_or_filtered",
+ "link_changes", "link_up", "dropped_link_overflow",
+ "dropped_link_error_or_filtered", "dropped_multicast_filtered",
"dropped_runt", "dropped_overrun", "dropped_no_small_buffer",
"dropped_no_big_buffer"
};
@@ -1341,16 +1364,31 @@ myri10ge_get_ethtool_stats(struct net_device *netdev,
data[i++] = (unsigned int)mgp->stop_queue;
data[i++] = (unsigned int)mgp->watchdog_resets;
data[i++] = (unsigned int)mgp->tx_linearized;
+ data[i++] = (unsigned int)mgp->link_changes;
data[i++] = (unsigned int)ntohl(mgp->fw_stats->link_up);
data[i++] = (unsigned int)ntohl(mgp->fw_stats->dropped_link_overflow);
data[i++] =
(unsigned int)ntohl(mgp->fw_stats->dropped_link_error_or_filtered);
+ data[i++] =
+ (unsigned int)ntohl(mgp->fw_stats->dropped_multicast_filtered);
data[i++] = (unsigned int)ntohl(mgp->fw_stats->dropped_runt);
data[i++] = (unsigned int)ntohl(mgp->fw_stats->dropped_overrun);
data[i++] = (unsigned int)ntohl(mgp->fw_stats->dropped_no_small_buffer);
data[i++] = (unsigned int)ntohl(mgp->fw_stats->dropped_no_big_buffer);
}
+static void myri10ge_set_msglevel(struct net_device *netdev, u32 value)
+{
+ struct myri10ge_priv *mgp = netdev_priv(netdev);
+ mgp->msg_enable = value;
+}
+
+static u32 myri10ge_get_msglevel(struct net_device *netdev)
+{
+ struct myri10ge_priv *mgp = netdev_priv(netdev);
+ return mgp->msg_enable;
+}
+
static struct ethtool_ops myri10ge_ethtool_ops = {
.get_settings = myri10ge_get_settings,
.get_drvinfo = myri10ge_get_drvinfo,
@@ -1371,7 +1409,9 @@ static struct ethtool_ops myri10ge_ethtool_ops = {
#endif
.get_strings = myri10ge_get_strings,
.get_stats_count = myri10ge_get_stats_count,
- .get_ethtool_stats = myri10ge_get_ethtool_stats
+ .get_ethtool_stats = myri10ge_get_ethtool_stats,
+ .set_msglevel = myri10ge_set_msglevel,
+ .get_msglevel = myri10ge_get_msglevel
};
static int myri10ge_allocate_rings(struct net_device *dev)
@@ -1439,7 +1479,7 @@ static int myri10ge_allocate_rings(struct net_device *dev)
/* Fill the receive rings */
for (i = 0; i <= mgp->rx_small.mask; i++) {
- status = myri10ge_getbuf(&mgp->rx_small, mgp->pdev,
+ status = myri10ge_getbuf(&mgp->rx_small, mgp,
mgp->small_bytes, i);
if (status) {
printk(KERN_ERR
@@ -1451,8 +1491,7 @@ static int myri10ge_allocate_rings(struct net_device *dev)
for (i = 0; i <= mgp->rx_big.mask; i++) {
status =
- myri10ge_getbuf(&mgp->rx_big, mgp->pdev,
- dev->mtu + ETH_HLEN, i);
+ myri10ge_getbuf(&mgp->rx_big, mgp, dev->mtu + ETH_HLEN, i);
if (status) {
printk(KERN_ERR
"myri10ge: %s: alloced only %d big bufs\n",
@@ -1648,9 +1687,11 @@ static int myri10ge_open(struct net_device *dev)
}
if (mgp->mtrr >= 0) {
- mgp->tx.wc_fifo = (u8 __iomem *) mgp->sram + 0x200000;
- mgp->rx_small.wc_fifo = (u8 __iomem *) mgp->sram + 0x300000;
- mgp->rx_big.wc_fifo = (u8 __iomem *) mgp->sram + 0x340000;
+ mgp->tx.wc_fifo = (u8 __iomem *) mgp->sram + MXGEFW_ETH_SEND_4;
+ mgp->rx_small.wc_fifo =
+ (u8 __iomem *) mgp->sram + MXGEFW_ETH_RECV_SMALL;
+ mgp->rx_big.wc_fifo =
+ (u8 __iomem *) mgp->sram + MXGEFW_ETH_RECV_BIG;
} else {
mgp->tx.wc_fifo = NULL;
mgp->rx_small.wc_fifo = NULL;
@@ -1686,7 +1727,21 @@ static int myri10ge_open(struct net_device *dev)
cmd.data0 = MYRI10GE_LOWPART_TO_U32(mgp->fw_stats_bus);
cmd.data1 = MYRI10GE_HIGHPART_TO_U32(mgp->fw_stats_bus);
- status = myri10ge_send_cmd(mgp, MXGEFW_CMD_SET_STATS_DMA, &cmd, 0);
+ cmd.data2 = sizeof(struct mcp_irq_data);
+ status = myri10ge_send_cmd(mgp, MXGEFW_CMD_SET_STATS_DMA_V2, &cmd, 0);
+ if (status == -ENOSYS) {
+ dma_addr_t bus = mgp->fw_stats_bus;
+ bus += offsetof(struct mcp_irq_data, send_done_count);
+ cmd.data0 = MYRI10GE_LOWPART_TO_U32(bus);
+ cmd.data1 = MYRI10GE_HIGHPART_TO_U32(bus);
+ status = myri10ge_send_cmd(mgp,
+ MXGEFW_CMD_SET_STATS_DMA_OBSOLETE,
+ &cmd, 0);
+ /* Firmware cannot support multicast without STATS_DMA_V2 */
+ mgp->fw_multicast_support = 0;
+ } else {
+ mgp->fw_multicast_support = 1;
+ }
if (status) {
printk(KERN_ERR "myri10ge: %s: Couldn't set stats DMA\n",
dev->name);
@@ -1841,7 +1896,8 @@ myri10ge_submit_req_wc(struct myri10ge_tx_buf *tx,
if (cnt > 0) {
/* pad it to 64 bytes. The src is 64 bytes bigger than it
* needs to be so that we don't overrun it */
- myri10ge_pio_copy(tx->wc_fifo + (cnt << 18), src, 64);
+ myri10ge_pio_copy(tx->wc_fifo + MXGEFW_ETH_SEND_OFFSET(cnt),
+ src, 64);
mb();
}
}
@@ -2140,9 +2196,81 @@ static struct net_device_stats *myri10ge_get_stats(struct net_device *dev)
static void myri10ge_set_multicast_list(struct net_device *dev)
{
+ struct myri10ge_cmd cmd;
+ struct myri10ge_priv *mgp;
+ struct dev_mc_list *mc_list;
+ int err;
+
+ mgp = netdev_priv(dev);
/* can be called from atomic contexts,
* pass 1 to force atomicity in myri10ge_send_cmd() */
- myri10ge_change_promisc(netdev_priv(dev), dev->flags & IFF_PROMISC, 1);
+ myri10ge_change_promisc(mgp, dev->flags & IFF_PROMISC, 1);
+
+ /* This firmware is known to not support multicast */
+ if (!mgp->fw_multicast_support)
+ return;
+
+ /* Disable multicast filtering */
+
+ err = myri10ge_send_cmd(mgp, MXGEFW_ENABLE_ALLMULTI, &cmd, 1);
+ if (err != 0) {
+ printk(KERN_ERR "myri10ge: %s: Failed MXGEFW_ENABLE_ALLMULTI,"
+ " error status: %d\n", dev->name, err);
+ goto abort;
+ }
+
+ if (dev->flags & IFF_ALLMULTI) {
+ /* request to disable multicast filtering, so quit here */
+ return;
+ }
+
+ /* Flush the filters */
+
+ err = myri10ge_send_cmd(mgp, MXGEFW_LEAVE_ALL_MULTICAST_GROUPS,
+ &cmd, 1);
+ if (err != 0) {
+ printk(KERN_ERR
+ "myri10ge: %s: Failed MXGEFW_LEAVE_ALL_MULTICAST_GROUPS"
+ ", error status: %d\n", dev->name, err);
+ goto abort;
+ }
+
+ /* Walk the multicast list, and add each address */
+ for (mc_list = dev->mc_list; mc_list != NULL; mc_list = mc_list->next) {
+ memcpy(&cmd.data0, &mc_list->dmi_addr, 4);
+ memcpy(&cmd.data1, ((char *)&mc_list->dmi_addr) + 4, 2);
+ cmd.data0 = htonl(cmd.data0);
+ cmd.data1 = htonl(cmd.data1);
+ err = myri10ge_send_cmd(mgp, MXGEFW_JOIN_MULTICAST_GROUP,
+ &cmd, 1);
+
+ if (err != 0) {
+ printk(KERN_ERR "myri10ge: %s: Failed "
+ "MXGEFW_JOIN_MULTICAST_GROUP, error status:"
+ "%d\t", dev->name, err);
+ printk(KERN_ERR "MAC %02x:%02x:%02x:%02x:%02x:%02x\n",
+ ((unsigned char *)&mc_list->dmi_addr)[0],
+ ((unsigned char *)&mc_list->dmi_addr)[1],
+ ((unsigned char *)&mc_list->dmi_addr)[2],
+ ((unsigned char *)&mc_list->dmi_addr)[3],
+ ((unsigned char *)&mc_list->dmi_addr)[4],
+ ((unsigned char *)&mc_list->dmi_addr)[5]
+ );
+ goto abort;
+ }
+ }
+ /* Enable multicast filtering */
+ err = myri10ge_send_cmd(mgp, MXGEFW_DISABLE_ALLMULTI, &cmd, 1);
+ if (err != 0) {
+ printk(KERN_ERR "myri10ge: %s: Failed MXGEFW_DISABLE_ALLMULTI,"
+ "error status: %d\n", dev->name, err);
+ goto abort;
+ }
+
+ return;
+
+abort:
+ return;
}
static int myri10ge_set_mac_address(struct net_device *dev, void *addr)
@@ -2289,6 +2417,8 @@ static void myri10ge_enable_ecrc(struct myri10ge_priv *mgp)
*/
#define PCI_DEVICE_ID_SERVERWORKS_HT2000_PCIE 0x0132
+#define PCI_DEVICE_ID_INTEL_E5000_PCIE23 0x25f7
+#define PCI_DEVICE_ID_INTEL_E5000_PCIE47 0x25fa
static void myri10ge_select_firmware(struct myri10ge_priv *mgp)
{
@@ -2298,15 +2428,34 @@ static void myri10ge_select_firmware(struct myri10ge_priv *mgp)
mgp->fw_name = myri10ge_fw_unaligned;
if (myri10ge_force_firmware == 0) {
+ int link_width, exp_cap;
+ u16 lnk;
+
+ exp_cap = pci_find_capability(mgp->pdev, PCI_CAP_ID_EXP);
+ pci_read_config_word(mgp->pdev, exp_cap + PCI_EXP_LNKSTA, &lnk);
+ link_width = (lnk >> 4) & 0x3f;
+
myri10ge_enable_ecrc(mgp);
- /* Check to see if the upstream bridge is known to
- * provide aligned completions */
- if (bridge
- /* ServerWorks HT2000/HT1000 */
- && bridge->vendor == PCI_VENDOR_ID_SERVERWORKS
- && bridge->device ==
- PCI_DEVICE_ID_SERVERWORKS_HT2000_PCIE) {
+ /* Check to see if Link is less than 8 or if the
+ * upstream bridge is known to provide aligned
+ * completions */
+ if (link_width < 8) {
+ dev_info(&mgp->pdev->dev, "PCIE x%d Link\n",
+ link_width);
+ mgp->tx.boundary = 4096;
+ mgp->fw_name = myri10ge_fw_aligned;
+ } else if (bridge &&
+ /* ServerWorks HT2000/HT1000 */
+ ((bridge->vendor == PCI_VENDOR_ID_SERVERWORKS
+ && bridge->device ==
+ PCI_DEVICE_ID_SERVERWORKS_HT2000_PCIE)
+ /* All Intel E5000 PCIE ports */
+ || (bridge->vendor == PCI_VENDOR_ID_INTEL
+ && bridge->device >=
+ PCI_DEVICE_ID_INTEL_E5000_PCIE23
+ && bridge->device <=
+ PCI_DEVICE_ID_INTEL_E5000_PCIE47))) {
dev_info(&mgp->pdev->dev,
"Assuming aligned completions (0x%x:0x%x)\n",
bridge->vendor, bridge->device);
@@ -2581,6 +2730,7 @@ static int myri10ge_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
mgp->csum_flag = MXGEFW_FLAGS_CKSUM;
mgp->pause = myri10ge_flow_control;
mgp->intr_coal_delay = myri10ge_intr_coal_delay;
+ mgp->msg_enable = netif_msg_init(myri10ge_debug, MYRI10GE_MSG_DEFAULT);
init_waitqueue_head(&mgp->down_wq);
if (pci_enable_device(pdev)) {
diff --git a/drivers/net/myri10ge/myri10ge_mcp.h b/drivers/net/myri10ge/myri10ge_mcp.h
index 0a6cae6cb18..9519ae7cd5e 100644
--- a/drivers/net/myri10ge/myri10ge_mcp.h
+++ b/drivers/net/myri10ge/myri10ge_mcp.h
@@ -91,7 +91,19 @@ struct mcp_kreq_ether_recv {
/* Commands */
-#define MXGEFW_CMD_OFFSET 0xf80000
+#define MXGEFW_BOOT_HANDOFF 0xfc0000
+#define MXGEFW_BOOT_DUMMY_RDMA 0xfc01c0
+
+#define MXGEFW_ETH_CMD 0xf80000
+#define MXGEFW_ETH_SEND_4 0x200000
+#define MXGEFW_ETH_SEND_1 0x240000
+#define MXGEFW_ETH_SEND_2 0x280000
+#define MXGEFW_ETH_SEND_3 0x2c0000
+#define MXGEFW_ETH_RECV_SMALL 0x300000
+#define MXGEFW_ETH_RECV_BIG 0x340000
+
+#define MXGEFW_ETH_SEND(n) (0x200000 + (((n) & 0x03) * 0x40000))
+#define MXGEFW_ETH_SEND_OFFSET(n) (MXGEFW_ETH_SEND(n) - MXGEFW_ETH_SEND_4)
enum myri10ge_mcp_cmd_type {
MXGEFW_CMD_NONE = 0,
@@ -154,7 +166,7 @@ enum myri10ge_mcp_cmd_type {
MXGEFW_CMD_SET_MTU,
MXGEFW_CMD_GET_INTR_COAL_DELAY_OFFSET, /* in microseconds */
MXGEFW_CMD_SET_STATS_INTERVAL, /* in microseconds */
- MXGEFW_CMD_SET_STATS_DMA,
+ MXGEFW_CMD_SET_STATS_DMA_OBSOLETE, /* replaced by SET_STATS_DMA_V2 */
MXGEFW_ENABLE_PROMISC,
MXGEFW_DISABLE_PROMISC,
@@ -168,7 +180,26 @@ enum myri10ge_mcp_cmd_type {
* data2 = RDMA length (MSH), WDMA length (LSH)
* command return data = repetitions (MSH), 0.5-ms ticks (LSH)
*/
- MXGEFW_DMA_TEST
+ MXGEFW_DMA_TEST,
+
+ MXGEFW_ENABLE_ALLMULTI,
+ MXGEFW_DISABLE_ALLMULTI,
+
+ /* returns MXGEFW_CMD_ERROR_MULTICAST
+ * if there is no room in the cache
+ * data0,MSH(data1) = multicast group address */
+ MXGEFW_JOIN_MULTICAST_GROUP,
+ /* returns MXGEFW_CMD_ERROR_MULTICAST
+ * if the address is not in the cache,
+ * or is equal to FF-FF-FF-FF-FF-FF
+ * data0,MSH(data1) = multicast group address */
+ MXGEFW_LEAVE_MULTICAST_GROUP,
+ MXGEFW_LEAVE_ALL_MULTICAST_GROUPS,
+
+ MXGEFW_CMD_SET_STATS_DMA_V2,
+ /* data0, data1 = bus addr,
+ * data2 = sizeof(struct mcp_irq_data) from driver point of view, allows
+ * adding new stuff to mcp_irq_data without changing the ABI */
};
enum myri10ge_mcp_cmd_status {
@@ -180,11 +211,17 @@ enum myri10ge_mcp_cmd_status {
MXGEFW_CMD_ERROR_CLOSED,
MXGEFW_CMD_ERROR_HASH_ERROR,
MXGEFW_CMD_ERROR_BAD_PORT,
- MXGEFW_CMD_ERROR_RESOURCES
+ MXGEFW_CMD_ERROR_RESOURCES,
+ MXGEFW_CMD_ERROR_MULTICAST
};
-/* 40 Bytes */
+#define MXGEFW_OLD_IRQ_DATA_LEN 40
+
struct mcp_irq_data {
+ /* add new counters at the beginning */
+ u32 future_use[5];
+ u32 dropped_multicast_filtered;
+ /* 40 Bytes */
u32 send_done_count;
u32 link_up;
diff --git a/drivers/net/natsemi.c b/drivers/net/natsemi.c
index db0475a1102..9510030feeb 100644
--- a/drivers/net/natsemi.c
+++ b/drivers/net/natsemi.c
@@ -3246,7 +3246,7 @@ static int __init natsemi_init_mod (void)
printk(version);
#endif
- return pci_module_init (&natsemi_driver);
+ return pci_register_driver(&natsemi_driver);
}
static void __exit natsemi_exit_mod (void)
diff --git a/drivers/net/ne2k-pci.c b/drivers/net/ne2k-pci.c
index 34bdba9eec7..654b477b570 100644
--- a/drivers/net/ne2k-pci.c
+++ b/drivers/net/ne2k-pci.c
@@ -702,7 +702,7 @@ static int __init ne2k_pci_init(void)
#ifdef MODULE
printk(version);
#endif
- return pci_module_init (&ne2k_driver);
+ return pci_register_driver(&ne2k_driver);
}
diff --git a/drivers/net/netx-eth.c b/drivers/net/netx-eth.c
index b1311ae8267..30ed9a5a40e 100644
--- a/drivers/net/netx-eth.c
+++ b/drivers/net/netx-eth.c
@@ -17,7 +17,6 @@
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
-#include <linux/config.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/kernel.h>
diff --git a/drivers/net/ns83820.c b/drivers/net/ns83820.c
index 0e76859c90a..0dedd34804c 100644
--- a/drivers/net/ns83820.c
+++ b/drivers/net/ns83820.c
@@ -2178,7 +2178,7 @@ static struct pci_driver driver = {
static int __init ns83820_init(void)
{
printk(KERN_INFO "ns83820.c: National Semiconductor DP83820 10/100/1000 driver.\n");
- return pci_module_init(&driver);
+ return pci_register_driver(&driver);
}
static void __exit ns83820_exit(void)
diff --git a/drivers/net/pci-skeleton.c b/drivers/net/pci-skeleton.c
index e0e29396404..e6347620529 100644
--- a/drivers/net/pci-skeleton.c
+++ b/drivers/net/pci-skeleton.c
@@ -1963,7 +1963,7 @@ static int __init netdrv_init_module (void)
#ifdef MODULE
printk(version);
#endif
- return pci_module_init (&netdrv_pci_driver);
+ return pci_register_driver(&netdrv_pci_driver);
}
diff --git a/drivers/net/pcmcia/axnet_cs.c b/drivers/net/pcmcia/axnet_cs.c
index 297e9f80536..c54f6a7ebf3 100644
--- a/drivers/net/pcmcia/axnet_cs.c
+++ b/drivers/net/pcmcia/axnet_cs.c
@@ -771,6 +771,7 @@ static struct pcmcia_device_id axnet_ids[] = {
PCMCIA_DEVICE_MANF_CARD(0x026f, 0x0309),
PCMCIA_DEVICE_MANF_CARD(0x0274, 0x1106),
PCMCIA_DEVICE_MANF_CARD(0x8a01, 0xc1ab),
+ PCMCIA_DEVICE_MANF_CARD(0x021b, 0x0202),
PCMCIA_DEVICE_PROD_ID12("AmbiCom,Inc.", "Fast Ethernet PC Card(AMB8110)", 0x49b020a7, 0x119cc9fc),
PCMCIA_DEVICE_PROD_ID124("Fast Ethernet", "16-bit PC Card", "AX88190", 0xb4be14e3, 0x9a12eb6a, 0xab9be5ef),
PCMCIA_DEVICE_PROD_ID12("ASIX", "AX88190", 0x0959823b, 0xab9be5ef),
@@ -786,8 +787,6 @@ static struct pcmcia_device_id axnet_ids[] = {
PCMCIA_DEVICE_PROD_ID12("PCMCIA", "FastEtherCard", 0x281f1c5d, 0x7ef26116),
PCMCIA_DEVICE_PROD_ID12("PCMCIA", "FEP501", 0x281f1c5d, 0x2e272058),
PCMCIA_DEVICE_PROD_ID14("Network Everywhere", "AX88190", 0x820a67b6, 0xab9be5ef),
- /* this is not specific enough */
- /* PCMCIA_DEVICE_MANF_CARD(0x021b, 0x0202), */
PCMCIA_DEVICE_NULL,
};
MODULE_DEVICE_TABLE(pcmcia, axnet_ids);
diff --git a/drivers/net/pcmcia/pcnet_cs.c b/drivers/net/pcmcia/pcnet_cs.c
index 0ecebfc31f0..cc0dcc9bf63 100644
--- a/drivers/net/pcmcia/pcnet_cs.c
+++ b/drivers/net/pcmcia/pcnet_cs.c
@@ -654,11 +654,8 @@ static int pcnet_config(struct pcmcia_device *link)
SET_ETHTOOL_OPS(dev, &netdev_ethtool_ops);
if (info->flags & (IS_DL10019|IS_DL10022)) {
- u_char id = inb(dev->base_addr + 0x1a);
dev->do_ioctl = &ei_ioctl;
mii_phy_probe(dev);
- if ((id == 0x30) && !info->pna_phy && (info->eth_phy == 4))
- info->eth_phy = 0;
}
link->dev_node = &info->node;
@@ -821,15 +818,6 @@ static void mdio_write(kio_addr_t addr, int phy_id, int loc, int value)
}
}
-static void mdio_reset(kio_addr_t addr, int phy_id)
-{
- outb_p(0x08, addr);
- outb_p(0x0c, addr);
- outb_p(0x08, addr);
- outb_p(0x0c, addr);
- outb_p(0x00, addr);
-}
-
/*======================================================================
EEPROM access routines for DL10019 and DL10022 based cards
@@ -942,7 +930,8 @@ static void set_misc_reg(struct net_device *dev)
}
if (info->flags & IS_DL10022) {
if (info->flags & HAS_MII) {
- mdio_reset(nic_base + DLINK_GPIO, info->eth_phy);
+ /* Advertise 100F, 100H, 10F, 10H */
+ mdio_write(nic_base + DLINK_GPIO, info->eth_phy, 4, 0x01e1);
/* Restart MII autonegotiation */
mdio_write(nic_base + DLINK_GPIO, info->eth_phy, 0, 0x0000);
mdio_write(nic_base + DLINK_GPIO, info->eth_phy, 0, 0x1200);
diff --git a/drivers/net/pcnet32.c b/drivers/net/pcnet32.c
index d50bcb89dd2..5e26fe806e2 100644
--- a/drivers/net/pcnet32.c
+++ b/drivers/net/pcnet32.c
@@ -2978,7 +2978,7 @@ static int __init pcnet32_init_module(void)
tx_start = tx_start_pt;
/* find the PCI devices */
- if (!pci_module_init(&pcnet32_driver))
+ if (!pci_register_driver(&pcnet32_driver))
pcnet32_have_pci = 1;
/* should we find any remaining VLbus devices ? */
diff --git a/drivers/net/phy/smsc.c b/drivers/net/phy/smsc.c
index 25e31fb5cb3..b1d8ed40ad9 100644
--- a/drivers/net/phy/smsc.c
+++ b/drivers/net/phy/smsc.c
@@ -14,7 +14,6 @@
*
*/
-#include <linux/config.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/mii.h>
diff --git a/drivers/net/phy/vitesse.c b/drivers/net/phy/vitesse.c
index ffd215d9a9b..792716beb05 100644
--- a/drivers/net/phy/vitesse.c
+++ b/drivers/net/phy/vitesse.c
@@ -12,7 +12,6 @@
*
*/
-#include <linux/config.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/mii.h>
diff --git a/drivers/net/qla3xxx.c b/drivers/net/qla3xxx.c
new file mode 100644
index 00000000000..c729aeeb469
--- /dev/null
+++ b/drivers/net/qla3xxx.c
@@ -0,0 +1,3537 @@
+/*
+ * QLogic QLA3xxx NIC HBA Driver
+ * Copyright (c) 2003-2006 QLogic Corporation
+ *
+ * See LICENSE.qla3xxx for copyright and licensing details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/types.h>
+#include <linux/module.h>
+#include <linux/list.h>
+#include <linux/pci.h>
+#include <linux/dma-mapping.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/dmapool.h>
+#include <linux/mempool.h>
+#include <linux/spinlock.h>
+#include <linux/kthread.h>
+#include <linux/interrupt.h>
+#include <linux/errno.h>
+#include <linux/ioport.h>
+#include <linux/ip.h>
+#include <linux/if_arp.h>
+#include <linux/if_ether.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/ethtool.h>
+#include <linux/skbuff.h>
+#include <linux/rtnetlink.h>
+#include <linux/if_vlan.h>
+#include <linux/init.h>
+#include <linux/delay.h>
+#include <linux/mm.h>
+
+#include "qla3xxx.h"
+
+#define DRV_NAME "qla3xxx"
+#define DRV_STRING "QLogic ISP3XXX Network Driver"
+#define DRV_VERSION "v2.02.00-k36"
+#define PFX DRV_NAME " "
+
+static const char ql3xxx_driver_name[] = DRV_NAME;
+static const char ql3xxx_driver_version[] = DRV_VERSION;
+
+MODULE_AUTHOR("QLogic Corporation");
+MODULE_DESCRIPTION("QLogic ISP3XXX Network Driver " DRV_VERSION " ");
+MODULE_LICENSE("GPL");
+MODULE_VERSION(DRV_VERSION);
+
+static const u32 default_msg
+ = NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK
+ | NETIF_MSG_IFUP | NETIF_MSG_IFDOWN;
+
+static int debug = -1; /* defaults above */
+module_param(debug, int, 0);
+MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
+
+static int msi;
+module_param(msi, int, 0);
+MODULE_PARM_DESC(msi, "Turn on Message Signaled Interrupts.");
+
+static struct pci_device_id ql3xxx_pci_tbl[] __devinitdata = {
+ {PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, QL3022_DEVICE_ID)},
+ /* required last entry */
+ {0,}
+};
+
+MODULE_DEVICE_TABLE(pci, ql3xxx_pci_tbl);
+
+/*
+ * Caller must take hw_lock.
+ */
+static int ql_sem_spinlock(struct ql3_adapter *qdev,
+ u32 sem_mask, u32 sem_bits)
+{
+ struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers;
+ u32 value;
+ unsigned int seconds = 3;
+
+ do {
+ writel((sem_mask | sem_bits),
+ &port_regs->CommonRegs.semaphoreReg);
+ value = readl(&port_regs->CommonRegs.semaphoreReg);
+ if ((value & (sem_mask >> 16)) == sem_bits)
+ return 0;
+ ssleep(1);
+ } while(--seconds);
+ return -1;
+}
+
+static void ql_sem_unlock(struct ql3_adapter *qdev, u32 sem_mask)
+{
+ struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers;
+ writel(sem_mask, &port_regs->CommonRegs.semaphoreReg);
+ readl(&port_regs->CommonRegs.semaphoreReg);
+}
+
+static int ql_sem_lock(struct ql3_adapter *qdev, u32 sem_mask, u32 sem_bits)
+{
+ struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers;
+ u32 value;
+
+ writel((sem_mask | sem_bits), &port_regs->CommonRegs.semaphoreReg);
+ value = readl(&port_regs->CommonRegs.semaphoreReg);
+ return ((value & (sem_mask >> 16)) == sem_bits);
+}
+
+/*
+ * Caller holds hw_lock.
+ */
+static int ql_wait_for_drvr_lock(struct ql3_adapter *qdev)
+{
+ int i = 0;
+
+ while (1) {
+ if (!ql_sem_lock(qdev,
+ QL_DRVR_SEM_MASK,
+ (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index)
+ * 2) << 1)) {
+ if (i < 10) {
+ ssleep(1);
+ i++;
+ } else {
+ printk(KERN_ERR PFX "%s: Timed out waiting for "
+ "driver lock...\n",
+ qdev->ndev->name);
+ return 0;
+ }
+ } else {
+ printk(KERN_DEBUG PFX
+ "%s: driver lock acquired.\n",
+ qdev->ndev->name);
+ return 1;
+ }
+ }
+}
+
+static void ql_set_register_page(struct ql3_adapter *qdev, u32 page)
+{
+ struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers;
+
+ writel(((ISP_CONTROL_NP_MASK << 16) | page),
+ &port_regs->CommonRegs.ispControlStatus);
+ readl(&port_regs->CommonRegs.ispControlStatus);
+ qdev->current_page = page;
+}
+
+static u32 ql_read_common_reg_l(struct ql3_adapter *qdev,
+ u32 __iomem * reg)
+{
+ u32 value;
+ unsigned long hw_flags;
+
+ spin_lock_irqsave(&qdev->hw_lock, hw_flags);
+ value = readl(reg);
+ spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
+
+ return value;
+}
+
+static u32 ql_read_common_reg(struct ql3_adapter *qdev,
+ u32 __iomem * reg)
+{
+ return readl(reg);
+}
+
+static u32 ql_read_page0_reg_l(struct ql3_adapter *qdev, u32 __iomem *reg)
+{
+ u32 value;
+ unsigned long hw_flags;
+
+ spin_lock_irqsave(&qdev->hw_lock, hw_flags);
+
+ if (qdev->current_page != 0)
+ ql_set_register_page(qdev,0);
+ value = readl(reg);
+
+ spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
+ return value;
+}
+
+static u32 ql_read_page0_reg(struct ql3_adapter *qdev, u32 __iomem *reg)
+{
+ if (qdev->current_page != 0)
+ ql_set_register_page(qdev,0);
+ return readl(reg);
+}
+
+static void ql_write_common_reg_l(struct ql3_adapter *qdev,
+ u32 * reg, u32 value)
+{
+ unsigned long hw_flags;
+
+ spin_lock_irqsave(&qdev->hw_lock, hw_flags);
+ writel(value, (u32 *) reg);
+ readl(reg);
+ spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
+ return;
+}
+
+static void ql_write_common_reg(struct ql3_adapter *qdev,
+ u32 * reg, u32 value)
+{
+ writel(value, (u32 *) reg);
+ readl(reg);
+ return;
+}
+
+static void ql_write_page0_reg(struct ql3_adapter *qdev,
+ u32 * reg, u32 value)
+{
+ if (qdev->current_page != 0)
+ ql_set_register_page(qdev,0);
+ writel(value, (u32 *) reg);
+ readl(reg);
+ return;
+}
+
+/*
+ * Caller holds hw_lock. Only called during init.
+ */
+static void ql_write_page1_reg(struct ql3_adapter *qdev,
+ u32 * reg, u32 value)
+{
+ if (qdev->current_page != 1)
+ ql_set_register_page(qdev,1);
+ writel(value, (u32 *) reg);
+ readl(reg);
+ return;
+}
+
+/*
+ * Caller holds hw_lock. Only called during init.
+ */
+static void ql_write_page2_reg(struct ql3_adapter *qdev,
+ u32 * reg, u32 value)
+{
+ if (qdev->current_page != 2)
+ ql_set_register_page(qdev,2);
+ writel(value, (u32 *) reg);
+ readl(reg);
+ return;
+}
+
+static void ql_disable_interrupts(struct ql3_adapter *qdev)
+{
+ struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers;
+
+ ql_write_common_reg_l(qdev, &port_regs->CommonRegs.ispInterruptMaskReg,
+ (ISP_IMR_ENABLE_INT << 16));
+
+}
+
+static void ql_enable_interrupts(struct ql3_adapter *qdev)
+{
+ struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers;
+
+ ql_write_common_reg_l(qdev, &port_regs->CommonRegs.ispInterruptMaskReg,
+ ((0xff << 16) | ISP_IMR_ENABLE_INT));
+
+}
+
+static void ql_release_to_lrg_buf_free_list(struct ql3_adapter *qdev,
+ struct ql_rcv_buf_cb *lrg_buf_cb)
+{
+ u64 map;
+ lrg_buf_cb->next = NULL;
+
+ if (qdev->lrg_buf_free_tail == NULL) { /* The list is empty */
+ qdev->lrg_buf_free_head = qdev->lrg_buf_free_tail = lrg_buf_cb;
+ } else {
+ qdev->lrg_buf_free_tail->next = lrg_buf_cb;
+ qdev->lrg_buf_free_tail = lrg_buf_cb;
+ }
+
+ if (!lrg_buf_cb->skb) {
+ lrg_buf_cb->skb = dev_alloc_skb(qdev->lrg_buffer_len);
+ if (unlikely(!lrg_buf_cb->skb)) {
+ printk(KERN_ERR PFX "%s: failed dev_alloc_skb().\n",
+ qdev->ndev->name);
+ qdev->lrg_buf_skb_check++;
+ } else {
+ /*
+ * We save some space to copy the ethhdr from first
+ * buffer
+ */
+ skb_reserve(lrg_buf_cb->skb, QL_HEADER_SPACE);
+ map = pci_map_single(qdev->pdev,
+ lrg_buf_cb->skb->data,
+ qdev->lrg_buffer_len -
+ QL_HEADER_SPACE,
+ PCI_DMA_FROMDEVICE);
+ lrg_buf_cb->buf_phy_addr_low =
+ cpu_to_le32(LS_64BITS(map));
+ lrg_buf_cb->buf_phy_addr_high =
+ cpu_to_le32(MS_64BITS(map));
+ pci_unmap_addr_set(lrg_buf_cb, mapaddr, map);
+ pci_unmap_len_set(lrg_buf_cb, maplen,
+ qdev->lrg_buffer_len -
+ QL_HEADER_SPACE);
+ }
+ }
+
+ qdev->lrg_buf_free_count++;
+}
+
+static struct ql_rcv_buf_cb *ql_get_from_lrg_buf_free_list(struct ql3_adapter
+ *qdev)
+{
+ struct ql_rcv_buf_cb *lrg_buf_cb;
+
+ if ((lrg_buf_cb = qdev->lrg_buf_free_head) != NULL) {
+ if ((qdev->lrg_buf_free_head = lrg_buf_cb->next) == NULL)
+ qdev->lrg_buf_free_tail = NULL;
+ qdev->lrg_buf_free_count--;
+ }
+
+ return lrg_buf_cb;
+}
+
+static u32 addrBits = EEPROM_NO_ADDR_BITS;
+static u32 dataBits = EEPROM_NO_DATA_BITS;
+
+static void fm93c56a_deselect(struct ql3_adapter *qdev);
+static void eeprom_readword(struct ql3_adapter *qdev, u32 eepromAddr,
+ unsigned short *value);
+
+/*
+ * Caller holds hw_lock.
+ */
+static void fm93c56a_select(struct ql3_adapter *qdev)
+{
+ struct ql3xxx_port_registers __iomem *port_regs =
+ qdev->mem_map_registers;
+
+ qdev->eeprom_cmd_data = AUBURN_EEPROM_CS_1;
+ ql_write_common_reg(qdev, &port_regs->CommonRegs.serialPortInterfaceReg,
+ ISP_NVRAM_MASK | qdev->eeprom_cmd_data);
+ ql_write_common_reg(qdev, &port_regs->CommonRegs.serialPortInterfaceReg,
+ ((ISP_NVRAM_MASK << 16) | qdev->eeprom_cmd_data));
+}
+
+/*
+ * Caller holds hw_lock.
+ */
+static void fm93c56a_cmd(struct ql3_adapter *qdev, u32 cmd, u32 eepromAddr)
+{
+ int i;
+ u32 mask;
+ u32 dataBit;
+ u32 previousBit;
+ struct ql3xxx_port_registers __iomem *port_regs =
+ qdev->mem_map_registers;
+
+ /* Clock in a zero, then do the start bit */
+ ql_write_common_reg(qdev, &port_regs->CommonRegs.serialPortInterfaceReg,
+ ISP_NVRAM_MASK | qdev->eeprom_cmd_data |
+ AUBURN_EEPROM_DO_1);
+ ql_write_common_reg(qdev, &port_regs->CommonRegs.serialPortInterfaceReg,
+ ISP_NVRAM_MASK | qdev->
+ eeprom_cmd_data | AUBURN_EEPROM_DO_1 |
+ AUBURN_EEPROM_CLK_RISE);
+ ql_write_common_reg(qdev, &port_regs->CommonRegs.serialPortInterfaceReg,
+ ISP_NVRAM_MASK | qdev->
+ eeprom_cmd_data | AUBURN_EEPROM_DO_1 |
+ AUBURN_EEPROM_CLK_FALL);
+
+ mask = 1 << (FM93C56A_CMD_BITS - 1);
+ /* Force the previous data bit to be different */
+ previousBit = 0xffff;
+ for (i = 0; i < FM93C56A_CMD_BITS; i++) {
+ dataBit =
+ (cmd & mask) ? AUBURN_EEPROM_DO_1 : AUBURN_EEPROM_DO_0;
+ if (previousBit != dataBit) {
+ /*
+ * If the bit changed, then change the DO state to
+ * match
+ */
+ ql_write_common_reg(qdev,
+ &port_regs->CommonRegs.
+ serialPortInterfaceReg,
+ ISP_NVRAM_MASK | qdev->
+ eeprom_cmd_data | dataBit);
+ previousBit = dataBit;
+ }
+ ql_write_common_reg(qdev,
+ &port_regs->CommonRegs.
+ serialPortInterfaceReg,
+ ISP_NVRAM_MASK | qdev->
+ eeprom_cmd_data | dataBit |
+ AUBURN_EEPROM_CLK_RISE);
+ ql_write_common_reg(qdev,
+ &port_regs->CommonRegs.
+ serialPortInterfaceReg,
+ ISP_NVRAM_MASK | qdev->
+ eeprom_cmd_data | dataBit |
+ AUBURN_EEPROM_CLK_FALL);
+ cmd = cmd << 1;
+ }
+
+ mask = 1 << (addrBits - 1);
+ /* Force the previous data bit to be different */
+ previousBit = 0xffff;
+ for (i = 0; i < addrBits; i++) {
+ dataBit =
+ (eepromAddr & mask) ? AUBURN_EEPROM_DO_1 :
+ AUBURN_EEPROM_DO_0;
+ if (previousBit != dataBit) {
+ /*
+ * If the bit changed, then change the DO state to
+ * match
+ */
+ ql_write_common_reg(qdev,
+ &port_regs->CommonRegs.
+ serialPortInterfaceReg,
+ ISP_NVRAM_MASK | qdev->
+ eeprom_cmd_data | dataBit);
+ previousBit = dataBit;
+ }
+ ql_write_common_reg(qdev,
+ &port_regs->CommonRegs.
+ serialPortInterfaceReg,
+ ISP_NVRAM_MASK | qdev->
+ eeprom_cmd_data | dataBit |
+ AUBURN_EEPROM_CLK_RISE);
+ ql_write_common_reg(qdev,
+ &port_regs->CommonRegs.
+ serialPortInterfaceReg,
+ ISP_NVRAM_MASK | qdev->
+ eeprom_cmd_data | dataBit |
+ AUBURN_EEPROM_CLK_FALL);
+ eepromAddr = eepromAddr << 1;
+ }
+}
+
+/*
+ * Caller holds hw_lock.
+ */
+static void fm93c56a_deselect(struct ql3_adapter *qdev)
+{
+ struct ql3xxx_port_registers __iomem *port_regs =
+ qdev->mem_map_registers;
+ qdev->eeprom_cmd_data = AUBURN_EEPROM_CS_0;
+ ql_write_common_reg(qdev, &port_regs->CommonRegs.serialPortInterfaceReg,
+ ISP_NVRAM_MASK | qdev->eeprom_cmd_data);
+}
+
+/*
+ * Caller holds hw_lock.
+ */
+static void fm93c56a_datain(struct ql3_adapter *qdev, unsigned short *value)
+{
+ int i;
+ u32 data = 0;
+ u32 dataBit;
+ struct ql3xxx_port_registers __iomem *port_regs =
+ qdev->mem_map_registers;
+
+ /* Read the data bits */
+ /* The first bit is a dummy. Clock right over it. */
+ for (i = 0; i < dataBits; i++) {
+ ql_write_common_reg(qdev,
+ &port_regs->CommonRegs.
+ serialPortInterfaceReg,
+ ISP_NVRAM_MASK | qdev->eeprom_cmd_data |
+ AUBURN_EEPROM_CLK_RISE);
+ ql_write_common_reg(qdev,
+ &port_regs->CommonRegs.
+ serialPortInterfaceReg,
+ ISP_NVRAM_MASK | qdev->eeprom_cmd_data |
+ AUBURN_EEPROM_CLK_FALL);
+ dataBit =
+ (ql_read_common_reg
+ (qdev,
+ &port_regs->CommonRegs.
+ serialPortInterfaceReg) & AUBURN_EEPROM_DI_1) ? 1 : 0;
+ data = (data << 1) | dataBit;
+ }
+ *value = (u16) data;
+}
+
+/*
+ * Caller holds hw_lock.
+ */
+static void eeprom_readword(struct ql3_adapter *qdev,
+ u32 eepromAddr, unsigned short *value)
+{
+ fm93c56a_select(qdev);
+ fm93c56a_cmd(qdev, (int)FM93C56A_READ, eepromAddr);
+ fm93c56a_datain(qdev, value);
+ fm93c56a_deselect(qdev);
+}
+
+static void ql_swap_mac_addr(u8 * macAddress)
+{
+#ifdef __BIG_ENDIAN
+ u8 temp;
+ temp = macAddress[0];
+ macAddress[0] = macAddress[1];
+ macAddress[1] = temp;
+ temp = macAddress[2];
+ macAddress[2] = macAddress[3];
+ macAddress[3] = temp;
+ temp = macAddress[4];
+ macAddress[4] = macAddress[5];
+ macAddress[5] = temp;
+#endif
+}
+
+static int ql_get_nvram_params(struct ql3_adapter *qdev)
+{
+ u16 *pEEPROMData;
+ u16 checksum = 0;
+ u32 index;
+ unsigned long hw_flags;
+
+ spin_lock_irqsave(&qdev->hw_lock, hw_flags);
+
+ pEEPROMData = (u16 *) & qdev->nvram_data;
+ qdev->eeprom_cmd_data = 0;
+ if(ql_sem_spinlock(qdev, QL_NVRAM_SEM_MASK,
+ (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) *
+ 2) << 10)) {
+ printk(KERN_ERR PFX"%s: Failed ql_sem_spinlock().\n",
+ __func__);
+ spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
+ return -1;
+ }
+
+ for (index = 0; index < EEPROM_SIZE; index++) {
+ eeprom_readword(qdev, index, pEEPROMData);
+ checksum += *pEEPROMData;
+ pEEPROMData++;
+ }
+ ql_sem_unlock(qdev, QL_NVRAM_SEM_MASK);
+
+ if (checksum != 0) {
+ printk(KERN_ERR PFX "%s: checksum should be zero, is %x!!\n",
+ qdev->ndev->name, checksum);
+ spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
+ return -1;
+ }
+
+ /*
+ * We have a problem with endianness for the MAC addresses
+ * and the two 8-bit values version, and numPorts. We
+ * have to swap them on big endian systems.
+ */
+ ql_swap_mac_addr(qdev->nvram_data.funcCfg_fn0.macAddress);
+ ql_swap_mac_addr(qdev->nvram_data.funcCfg_fn1.macAddress);
+ ql_swap_mac_addr(qdev->nvram_data.funcCfg_fn2.macAddress);
+ ql_swap_mac_addr(qdev->nvram_data.funcCfg_fn3.macAddress);
+ pEEPROMData = (u16 *) & qdev->nvram_data.version;
+ *pEEPROMData = le16_to_cpu(*pEEPROMData);
+
+ spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
+ return checksum;
+}
+
+static const u32 PHYAddr[2] = {
+ PORT0_PHY_ADDRESS, PORT1_PHY_ADDRESS
+};
+
+static int ql_wait_for_mii_ready(struct ql3_adapter *qdev)
+{
+ struct ql3xxx_port_registers __iomem *port_regs =
+ qdev->mem_map_registers;
+ u32 temp;
+ int count = 1000;
+
+ while (count) {
+ temp = ql_read_page0_reg(qdev, &port_regs->macMIIStatusReg);
+ if (!(temp & MAC_MII_STATUS_BSY))
+ return 0;
+ udelay(10);
+ count--;
+ }
+ return -1;
+}
+
+static void ql_mii_enable_scan_mode(struct ql3_adapter *qdev)
+{
+ struct ql3xxx_port_registers __iomem *port_regs =
+ qdev->mem_map_registers;
+ u32 scanControl;
+
+ if (qdev->numPorts > 1) {
+ /* Auto scan will cycle through multiple ports */
+ scanControl = MAC_MII_CONTROL_AS | MAC_MII_CONTROL_SC;
+ } else {
+ scanControl = MAC_MII_CONTROL_SC;
+ }
+
+ /*
+ * Scan register 1 of PHY/PETBI,
+ * Set up to scan both devices
+ * The autoscan starts from the first register, completes
+ * the last one before rolling over to the first
+ */
+ ql_write_page0_reg(qdev, &port_regs->macMIIMgmtAddrReg,
+ PHYAddr[0] | MII_SCAN_REGISTER);
+
+ ql_write_page0_reg(qdev, &port_regs->macMIIMgmtControlReg,
+ (scanControl) |
+ ((MAC_MII_CONTROL_SC | MAC_MII_CONTROL_AS) << 16));
+}
+
+static u8 ql_mii_disable_scan_mode(struct ql3_adapter *qdev)
+{
+ u8 ret;
+ struct ql3xxx_port_registers __iomem *port_regs =
+ qdev->mem_map_registers;
+
+ /* See if scan mode is enabled before we turn it off */
+ if (ql_read_page0_reg(qdev, &port_regs->macMIIMgmtControlReg) &
+ (MAC_MII_CONTROL_AS | MAC_MII_CONTROL_SC)) {
+ /* Scan is enabled */
+ ret = 1;
+ } else {
+ /* Scan is disabled */
+ ret = 0;
+ }
+
+ /*
+ * When disabling scan mode you must first change the MII register
+ * address
+ */
+ ql_write_page0_reg(qdev, &port_regs->macMIIMgmtAddrReg,
+ PHYAddr[0] | MII_SCAN_REGISTER);
+
+ ql_write_page0_reg(qdev, &port_regs->macMIIMgmtControlReg,
+ ((MAC_MII_CONTROL_SC | MAC_MII_CONTROL_AS |
+ MAC_MII_CONTROL_RC) << 16));
+
+ return ret;
+}
+
+static int ql_mii_write_reg_ex(struct ql3_adapter *qdev,
+ u16 regAddr, u16 value, u32 mac_index)
+{
+ struct ql3xxx_port_registers __iomem *port_regs =
+ qdev->mem_map_registers;
+ u8 scanWasEnabled;
+
+ scanWasEnabled = ql_mii_disable_scan_mode(qdev);
+
+ if (ql_wait_for_mii_ready(qdev)) {
+ if (netif_msg_link(qdev))
+ printk(KERN_WARNING PFX
+ "%s Timed out waiting for management port to "
+ "get free before issuing command.\n",
+ qdev->ndev->name);
+ return -1;
+ }
+
+ ql_write_page0_reg(qdev, &port_regs->macMIIMgmtAddrReg,
+ PHYAddr[mac_index] | regAddr);
+
+ ql_write_page0_reg(qdev, &port_regs->macMIIMgmtDataReg, value);
+
+ /* Wait for write to complete 9/10/04 SJP */
+ if (ql_wait_for_mii_ready(qdev)) {
+ if (netif_msg_link(qdev))
+ printk(KERN_WARNING PFX
+ "%s: Timed out waiting for management port to"
+ "get free before issuing command.\n",
+ qdev->ndev->name);
+ return -1;
+ }
+
+ if (scanWasEnabled)
+ ql_mii_enable_scan_mode(qdev);
+
+ return 0;
+}
+
+static int ql_mii_read_reg_ex(struct ql3_adapter *qdev, u16 regAddr,
+ u16 * value, u32 mac_index)
+{
+ struct ql3xxx_port_registers __iomem *port_regs =
+ qdev->mem_map_registers;
+ u8 scanWasEnabled;
+ u32 temp;
+
+ scanWasEnabled = ql_mii_disable_scan_mode(qdev);
+
+ if (ql_wait_for_mii_ready(qdev)) {
+ if (netif_msg_link(qdev))
+ printk(KERN_WARNING PFX
+ "%s: Timed out waiting for management port to "
+ "get free before issuing command.\n",
+ qdev->ndev->name);
+ return -1;
+ }
+
+ ql_write_page0_reg(qdev, &port_regs->macMIIMgmtAddrReg,
+ PHYAddr[mac_index] | regAddr);
+
+ ql_write_page0_reg(qdev, &port_regs->macMIIMgmtControlReg,
+ (MAC_MII_CONTROL_RC << 16));
+
+ ql_write_page0_reg(qdev, &port_regs->macMIIMgmtControlReg,
+ (MAC_MII_CONTROL_RC << 16) | MAC_MII_CONTROL_RC);
+
+ /* Wait for the read to complete */
+ if (ql_wait_for_mii_ready(qdev)) {
+ if (netif_msg_link(qdev))
+ printk(KERN_WARNING PFX
+ "%s: Timed out waiting for management port to "
+ "get free after issuing command.\n",
+ qdev->ndev->name);
+ return -1;
+ }
+
+ temp = ql_read_page0_reg(qdev, &port_regs->macMIIMgmtDataReg);
+ *value = (u16) temp;
+
+ if (scanWasEnabled)
+ ql_mii_enable_scan_mode(qdev);
+
+ return 0;
+}
+
+static int ql_mii_write_reg(struct ql3_adapter *qdev, u16 regAddr, u16 value)
+{
+ struct ql3xxx_port_registers __iomem *port_regs =
+ qdev->mem_map_registers;
+
+ ql_mii_disable_scan_mode(qdev);
+
+ if (ql_wait_for_mii_ready(qdev)) {
+ if (netif_msg_link(qdev))
+ printk(KERN_WARNING PFX
+ "%s: Timed out waiting for management port to "
+ "get free before issuing command.\n",
+ qdev->ndev->name);
+ return -1;
+ }
+
+ ql_write_page0_reg(qdev, &port_regs->macMIIMgmtAddrReg,
+ qdev->PHYAddr | regAddr);
+
+ ql_write_page0_reg(qdev, &port_regs->macMIIMgmtDataReg, value);
+
+ /* Wait for write to complete. */
+ if (ql_wait_for_mii_ready(qdev)) {
+ if (netif_msg_link(qdev))
+ printk(KERN_WARNING PFX
+ "%s: Timed out waiting for management port to "
+ "get free before issuing command.\n",
+ qdev->ndev->name);
+ return -1;
+ }
+
+ ql_mii_enable_scan_mode(qdev);
+
+ return 0;
+}
+
+static int ql_mii_read_reg(struct ql3_adapter *qdev, u16 regAddr, u16 *value)
+{
+ u32 temp;
+ struct ql3xxx_port_registers __iomem *port_regs =
+ qdev->mem_map_registers;
+
+ ql_mii_disable_scan_mode(qdev);
+
+ if (ql_wait_for_mii_ready(qdev)) {
+ if (netif_msg_link(qdev))
+ printk(KERN_WARNING PFX
+ "%s: Timed out waiting for management port to "
+ "get free before issuing command.\n",
+ qdev->ndev->name);
+ return -1;
+ }
+
+ ql_write_page0_reg(qdev, &port_regs->macMIIMgmtAddrReg,
+ qdev->PHYAddr | regAddr);
+
+ ql_write_page0_reg(qdev, &port_regs->macMIIMgmtControlReg,
+ (MAC_MII_CONTROL_RC << 16));
+
+ ql_write_page0_reg(qdev, &port_regs->macMIIMgmtControlReg,
+ (MAC_MII_CONTROL_RC << 16) | MAC_MII_CONTROL_RC);
+
+ /* Wait for the read to complete */
+ if (ql_wait_for_mii_ready(qdev)) {
+ if (netif_msg_link(qdev))
+ printk(KERN_WARNING PFX
+ "%s: Timed out waiting for management port to "
+ "get free before issuing command.\n",
+ qdev->ndev->name);
+ return -1;
+ }
+
+ temp = ql_read_page0_reg(qdev, &port_regs->macMIIMgmtDataReg);
+ *value = (u16) temp;
+
+ ql_mii_enable_scan_mode(qdev);
+
+ return 0;
+}
+
+static void ql_petbi_reset(struct ql3_adapter *qdev)
+{
+ ql_mii_write_reg(qdev, PETBI_CONTROL_REG, PETBI_CTRL_SOFT_RESET);
+}
+
+static void ql_petbi_start_neg(struct ql3_adapter *qdev)
+{
+ u16 reg;
+
+ /* Enable Auto-negotiation sense */
+ ql_mii_read_reg(qdev, PETBI_TBI_CTRL, &reg);
+ reg |= PETBI_TBI_AUTO_SENSE;
+ ql_mii_write_reg(qdev, PETBI_TBI_CTRL, reg);
+
+ ql_mii_write_reg(qdev, PETBI_NEG_ADVER,
+ PETBI_NEG_PAUSE | PETBI_NEG_DUPLEX);
+
+ ql_mii_write_reg(qdev, PETBI_CONTROL_REG,
+ PETBI_CTRL_AUTO_NEG | PETBI_CTRL_RESTART_NEG |
+ PETBI_CTRL_FULL_DUPLEX | PETBI_CTRL_SPEED_1000);
+
+}
+
+static void ql_petbi_reset_ex(struct ql3_adapter *qdev, u32 mac_index)
+{
+ ql_mii_write_reg_ex(qdev, PETBI_CONTROL_REG, PETBI_CTRL_SOFT_RESET,
+ mac_index);
+}
+
+static void ql_petbi_start_neg_ex(struct ql3_adapter *qdev, u32 mac_index)
+{
+ u16 reg;
+
+ /* Enable Auto-negotiation sense */
+ ql_mii_read_reg_ex(qdev, PETBI_TBI_CTRL, &reg, mac_index);
+ reg |= PETBI_TBI_AUTO_SENSE;
+ ql_mii_write_reg_ex(qdev, PETBI_TBI_CTRL, reg, mac_index);
+
+ ql_mii_write_reg_ex(qdev, PETBI_NEG_ADVER,
+ PETBI_NEG_PAUSE | PETBI_NEG_DUPLEX, mac_index);
+
+ ql_mii_write_reg_ex(qdev, PETBI_CONTROL_REG,
+ PETBI_CTRL_AUTO_NEG | PETBI_CTRL_RESTART_NEG |
+ PETBI_CTRL_FULL_DUPLEX | PETBI_CTRL_SPEED_1000,
+ mac_index);
+}
+
+static void ql_petbi_init(struct ql3_adapter *qdev)
+{
+ ql_petbi_reset(qdev);
+ ql_petbi_start_neg(qdev);
+}
+
+static void ql_petbi_init_ex(struct ql3_adapter *qdev, u32 mac_index)
+{
+ ql_petbi_reset_ex(qdev, mac_index);
+ ql_petbi_start_neg_ex(qdev, mac_index);
+}
+
+static int ql_is_petbi_neg_pause(struct ql3_adapter *qdev)
+{
+ u16 reg;
+
+ if (ql_mii_read_reg(qdev, PETBI_NEG_PARTNER, &reg) < 0)
+ return 0;
+
+ return (reg & PETBI_NEG_PAUSE_MASK) == PETBI_NEG_PAUSE;
+}
+
+static int ql_phy_get_speed(struct ql3_adapter *qdev)
+{
+ u16 reg;
+
+ if (ql_mii_read_reg(qdev, AUX_CONTROL_STATUS, &reg) < 0)
+ return 0;
+
+ reg = (((reg & 0x18) >> 3) & 3);
+
+ if (reg == 2)
+ return SPEED_1000;
+ else if (reg == 1)
+ return SPEED_100;
+ else if (reg == 0)
+ return SPEED_10;
+ else
+ return -1;
+}
+
+static int ql_is_full_dup(struct ql3_adapter *qdev)
+{
+ u16 reg;
+
+ if (ql_mii_read_reg(qdev, AUX_CONTROL_STATUS, &reg) < 0)
+ return 0;
+
+ return (reg & PHY_AUX_DUPLEX_STAT) != 0;
+}
+
+static int ql_is_phy_neg_pause(struct ql3_adapter *qdev)
+{
+ u16 reg;
+
+ if (ql_mii_read_reg(qdev, PHY_NEG_PARTNER, &reg) < 0)
+ return 0;
+
+ return (reg & PHY_NEG_PAUSE) != 0;
+}
+
+/*
+ * Caller holds hw_lock.
+ */
+static void ql_mac_enable(struct ql3_adapter *qdev, u32 enable)
+{
+ struct ql3xxx_port_registers __iomem *port_regs =
+ qdev->mem_map_registers;
+ u32 value;
+
+ if (enable)
+ value = (MAC_CONFIG_REG_PE | (MAC_CONFIG_REG_PE << 16));
+ else
+ value = (MAC_CONFIG_REG_PE << 16);
+
+ if (qdev->mac_index)
+ ql_write_page0_reg(qdev, &port_regs->mac1ConfigReg, value);
+ else
+ ql_write_page0_reg(qdev, &port_regs->mac0ConfigReg, value);
+}
+
+/*
+ * Caller holds hw_lock.
+ */
+static void ql_mac_cfg_soft_reset(struct ql3_adapter *qdev, u32 enable)
+{
+ struct ql3xxx_port_registers __iomem *port_regs =
+ qdev->mem_map_registers;
+ u32 value;
+
+ if (enable)
+ value = (MAC_CONFIG_REG_SR | (MAC_CONFIG_REG_SR << 16));
+ else
+ value = (MAC_CONFIG_REG_SR << 16);
+
+ if (qdev->mac_index)
+ ql_write_page0_reg(qdev, &port_regs->mac1ConfigReg, value);
+ else
+ ql_write_page0_reg(qdev, &port_regs->mac0ConfigReg, value);
+}
+
+/*
+ * Caller holds hw_lock.
+ */
+static void ql_mac_cfg_gig(struct ql3_adapter *qdev, u32 enable)
+{
+ struct ql3xxx_port_registers __iomem *port_regs =
+ qdev->mem_map_registers;
+ u32 value;
+
+ if (enable)
+ value = (MAC_CONFIG_REG_GM | (MAC_CONFIG_REG_GM << 16));
+ else
+ value = (MAC_CONFIG_REG_GM << 16);
+
+ if (qdev->mac_index)
+ ql_write_page0_reg(qdev, &port_regs->mac1ConfigReg, value);
+ else
+ ql_write_page0_reg(qdev, &port_regs->mac0ConfigReg, value);
+}
+
+/*
+ * Caller holds hw_lock.
+ */
+static void ql_mac_cfg_full_dup(struct ql3_adapter *qdev, u32 enable)
+{
+ struct ql3xxx_port_registers __iomem *port_regs =
+ qdev->mem_map_registers;
+ u32 value;
+
+ if (enable)
+ value = (MAC_CONFIG_REG_FD | (MAC_CONFIG_REG_FD << 16));
+ else
+ value = (MAC_CONFIG_REG_FD << 16);
+
+ if (qdev->mac_index)
+ ql_write_page0_reg(qdev, &port_regs->mac1ConfigReg, value);
+ else
+ ql_write_page0_reg(qdev, &port_regs->mac0ConfigReg, value);
+}
+
+/*
+ * Caller holds hw_lock.
+ */
+static void ql_mac_cfg_pause(struct ql3_adapter *qdev, u32 enable)
+{
+ struct ql3xxx_port_registers __iomem *port_regs =
+ qdev->mem_map_registers;
+ u32 value;
+
+ if (enable)
+ value =
+ ((MAC_CONFIG_REG_TF | MAC_CONFIG_REG_RF) |
+ ((MAC_CONFIG_REG_TF | MAC_CONFIG_REG_RF) << 16));
+ else
+ value = ((MAC_CONFIG_REG_TF | MAC_CONFIG_REG_RF) << 16);
+
+ if (qdev->mac_index)
+ ql_write_page0_reg(qdev, &port_regs->mac1ConfigReg, value);
+ else
+ ql_write_page0_reg(qdev, &port_regs->mac0ConfigReg, value);
+}
+
+/*
+ * Caller holds hw_lock.
+ */
+static int ql_is_fiber(struct ql3_adapter *qdev)
+{
+ struct ql3xxx_port_registers __iomem *port_regs =
+ qdev->mem_map_registers;
+ u32 bitToCheck = 0;
+ u32 temp;
+
+ switch (qdev->mac_index) {
+ case 0:
+ bitToCheck = PORT_STATUS_SM0;
+ break;
+ case 1:
+ bitToCheck = PORT_STATUS_SM1;
+ break;
+ }
+
+ temp = ql_read_page0_reg(qdev, &port_regs->portStatus);
+ return (temp & bitToCheck) != 0;
+}
+
+static int ql_is_auto_cfg(struct ql3_adapter *qdev)
+{
+ u16 reg;
+ ql_mii_read_reg(qdev, 0x00, &reg);
+ return (reg & 0x1000) != 0;
+}
+
+/*
+ * Caller holds hw_lock.
+ */
+static int ql_is_auto_neg_complete(struct ql3_adapter *qdev)
+{
+ struct ql3xxx_port_registers __iomem *port_regs =
+ qdev->mem_map_registers;
+ u32 bitToCheck = 0;
+ u32 temp;
+
+ switch (qdev->mac_index) {
+ case 0:
+ bitToCheck = PORT_STATUS_AC0;
+ break;
+ case 1:
+ bitToCheck = PORT_STATUS_AC1;
+ break;
+ }
+
+ temp = ql_read_page0_reg(qdev, &port_regs->portStatus);
+ if (temp & bitToCheck) {
+ if (netif_msg_link(qdev))
+ printk(KERN_INFO PFX
+ "%s: Auto-Negotiate complete.\n",
+ qdev->ndev->name);
+ return 1;
+ } else {
+ if (netif_msg_link(qdev))
+ printk(KERN_WARNING PFX
+ "%s: Auto-Negotiate incomplete.\n",
+ qdev->ndev->name);
+ return 0;
+ }
+}
+
+/*
+ * ql_is_neg_pause() returns 1 if pause was negotiated to be on
+ */
+static int ql_is_neg_pause(struct ql3_adapter *qdev)
+{
+ if (ql_is_fiber(qdev))
+ return ql_is_petbi_neg_pause(qdev);
+ else
+ return ql_is_phy_neg_pause(qdev);
+}
+
+static int ql_auto_neg_error(struct ql3_adapter *qdev)
+{
+ struct ql3xxx_port_registers __iomem *port_regs =
+ qdev->mem_map_registers;
+ u32 bitToCheck = 0;
+ u32 temp;
+
+ switch (qdev->mac_index) {
+ case 0:
+ bitToCheck = PORT_STATUS_AE0;
+ break;
+ case 1:
+ bitToCheck = PORT_STATUS_AE1;
+ break;
+ }
+ temp = ql_read_page0_reg(qdev, &port_regs->portStatus);
+ return (temp & bitToCheck) != 0;
+}
+
+static u32 ql_get_link_speed(struct ql3_adapter *qdev)
+{
+ if (ql_is_fiber(qdev))
+ return SPEED_1000;
+ else
+ return ql_phy_get_speed(qdev);
+}
+
+static int ql_is_link_full_dup(struct ql3_adapter *qdev)
+{
+ if (ql_is_fiber(qdev))
+ return 1;
+ else
+ return ql_is_full_dup(qdev);
+}
+
+/*
+ * Caller holds hw_lock.
+ */
+static int ql_link_down_detect(struct ql3_adapter *qdev)
+{
+ struct ql3xxx_port_registers __iomem *port_regs =
+ qdev->mem_map_registers;
+ u32 bitToCheck = 0;
+ u32 temp;
+
+ switch (qdev->mac_index) {
+ case 0:
+ bitToCheck = ISP_CONTROL_LINK_DN_0;
+ break;
+ case 1:
+ bitToCheck = ISP_CONTROL_LINK_DN_1;
+ break;
+ }
+
+ temp =
+ ql_read_common_reg(qdev, &port_regs->CommonRegs.ispControlStatus);
+ return (temp & bitToCheck) != 0;
+}
+
+/*
+ * Caller holds hw_lock.
+ */
+static int ql_link_down_detect_clear(struct ql3_adapter *qdev)
+{
+ struct ql3xxx_port_registers __iomem *port_regs =
+ qdev->mem_map_registers;
+
+ switch (qdev->mac_index) {
+ case 0:
+ ql_write_common_reg(qdev,
+ &port_regs->CommonRegs.ispControlStatus,
+ (ISP_CONTROL_LINK_DN_0) |
+ (ISP_CONTROL_LINK_DN_0 << 16));
+ break;
+
+ case 1:
+ ql_write_common_reg(qdev,
+ &port_regs->CommonRegs.ispControlStatus,
+ (ISP_CONTROL_LINK_DN_1) |
+ (ISP_CONTROL_LINK_DN_1 << 16));
+ break;
+
+ default:
+ return 1;
+ }
+
+ return 0;
+}
+
+/*
+ * Caller holds hw_lock.
+ */
+static int ql_this_adapter_controls_port(struct ql3_adapter *qdev,
+ u32 mac_index)
+{
+ struct ql3xxx_port_registers __iomem *port_regs =
+ qdev->mem_map_registers;
+ u32 bitToCheck = 0;
+ u32 temp;
+
+ switch (mac_index) {
+ case 0:
+ bitToCheck = PORT_STATUS_F1_ENABLED;
+ break;
+ case 1:
+ bitToCheck = PORT_STATUS_F3_ENABLED;
+ break;
+ default:
+ break;
+ }
+
+ temp = ql_read_page0_reg(qdev, &port_regs->portStatus);
+ if (temp & bitToCheck) {
+ if (netif_msg_link(qdev))
+ printk(KERN_DEBUG PFX
+ "%s: is not link master.\n", qdev->ndev->name);
+ return 0;
+ } else {
+ if (netif_msg_link(qdev))
+ printk(KERN_DEBUG PFX
+ "%s: is link master.\n", qdev->ndev->name);
+ return 1;
+ }
+}
+
+static void ql_phy_reset_ex(struct ql3_adapter *qdev, u32 mac_index)
+{
+ ql_mii_write_reg_ex(qdev, CONTROL_REG, PHY_CTRL_SOFT_RESET, mac_index);
+}
+
+static void ql_phy_start_neg_ex(struct ql3_adapter *qdev, u32 mac_index)
+{
+ u16 reg;
+
+ ql_mii_write_reg_ex(qdev, PHY_NEG_ADVER,
+ PHY_NEG_PAUSE | PHY_NEG_ADV_SPEED | 1, mac_index);
+
+ ql_mii_read_reg_ex(qdev, CONTROL_REG, &reg, mac_index);
+ ql_mii_write_reg_ex(qdev, CONTROL_REG, reg | PHY_CTRL_RESTART_NEG,
+ mac_index);
+}
+
+static void ql_phy_init_ex(struct ql3_adapter *qdev, u32 mac_index)
+{
+ ql_phy_reset_ex(qdev, mac_index);
+ ql_phy_start_neg_ex(qdev, mac_index);
+}
+
+/*
+ * Caller holds hw_lock.
+ */
+static u32 ql_get_link_state(struct ql3_adapter *qdev)
+{
+ struct ql3xxx_port_registers __iomem *port_regs =
+ qdev->mem_map_registers;
+ u32 bitToCheck = 0;
+ u32 temp, linkState;
+
+ switch (qdev->mac_index) {
+ case 0:
+ bitToCheck = PORT_STATUS_UP0;
+ break;
+ case 1:
+ bitToCheck = PORT_STATUS_UP1;
+ break;
+ }
+ temp = ql_read_page0_reg(qdev, &port_regs->portStatus);
+ if (temp & bitToCheck) {
+ linkState = LS_UP;
+ } else {
+ linkState = LS_DOWN;
+ if (netif_msg_link(qdev))
+ printk(KERN_WARNING PFX
+ "%s: Link is down.\n", qdev->ndev->name);
+ }
+ return linkState;
+}
+
+static int ql_port_start(struct ql3_adapter *qdev)
+{
+ if(ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK,
+ (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) *
+ 2) << 7))
+ return -1;
+
+ if (ql_is_fiber(qdev)) {
+ ql_petbi_init(qdev);
+ } else {
+ /* Copper port */
+ ql_phy_init_ex(qdev, qdev->mac_index);
+ }
+
+ ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK);
+ return 0;
+}
+
+static int ql_finish_auto_neg(struct ql3_adapter *qdev)
+{
+
+ if(ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK,
+ (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) *
+ 2) << 7))
+ return -1;
+
+ if (!ql_auto_neg_error(qdev)) {
+ if (test_bit(QL_LINK_MASTER,&qdev->flags)) {
+ /* configure the MAC */
+ if (netif_msg_link(qdev))
+ printk(KERN_DEBUG PFX
+ "%s: Configuring link.\n",
+ qdev->ndev->
+ name);
+ ql_mac_cfg_soft_reset(qdev, 1);
+ ql_mac_cfg_gig(qdev,
+ (ql_get_link_speed
+ (qdev) ==
+ SPEED_1000));
+ ql_mac_cfg_full_dup(qdev,
+ ql_is_link_full_dup
+ (qdev));
+ ql_mac_cfg_pause(qdev,
+ ql_is_neg_pause
+ (qdev));
+ ql_mac_cfg_soft_reset(qdev, 0);
+
+ /* enable the MAC */
+ if (netif_msg_link(qdev))
+ printk(KERN_DEBUG PFX
+ "%s: Enabling mac.\n",
+ qdev->ndev->
+ name);
+ ql_mac_enable(qdev, 1);
+ }
+
+ if (netif_msg_link(qdev))
+ printk(KERN_DEBUG PFX
+ "%s: Change port_link_state LS_DOWN to LS_UP.\n",
+ qdev->ndev->name);
+ qdev->port_link_state = LS_UP;
+ netif_start_queue(qdev->ndev);
+ netif_carrier_on(qdev->ndev);
+ if (netif_msg_link(qdev))
+ printk(KERN_INFO PFX
+ "%s: Link is up at %d Mbps, %s duplex.\n",
+ qdev->ndev->name,
+ ql_get_link_speed(qdev),
+ ql_is_link_full_dup(qdev)
+ ? "full" : "half");
+
+ } else { /* Remote error detected */
+
+ if (test_bit(QL_LINK_MASTER,&qdev->flags)) {
+ if (netif_msg_link(qdev))
+ printk(KERN_DEBUG PFX
+ "%s: Remote error detected. "
+ "Calling ql_port_start().\n",
+ qdev->ndev->
+ name);
+ /*
+ * ql_port_start() is shared code and needs
+ * to lock the PHY on it's own.
+ */
+ ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK);
+ if(ql_port_start(qdev)) {/* Restart port */
+ return -1;
+ } else
+ return 0;
+ }
+ }
+ ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK);
+ return 0;
+}
+
+static void ql_link_state_machine(struct ql3_adapter *qdev)
+{
+ u32 curr_link_state;
+ unsigned long hw_flags;
+
+ spin_lock_irqsave(&qdev->hw_lock, hw_flags);
+
+ curr_link_state = ql_get_link_state(qdev);
+
+ if (test_bit(QL_RESET_ACTIVE,&qdev->flags)) {
+ if (netif_msg_link(qdev))
+ printk(KERN_INFO PFX
+ "%s: Reset in progress, skip processing link "
+ "state.\n", qdev->ndev->name);
+ return;
+ }
+
+ switch (qdev->port_link_state) {
+ default:
+ if (test_bit(QL_LINK_MASTER,&qdev->flags)) {
+ ql_port_start(qdev);
+ }
+ qdev->port_link_state = LS_DOWN;
+ /* Fall Through */
+
+ case LS_DOWN:
+ if (netif_msg_link(qdev))
+ printk(KERN_DEBUG PFX
+ "%s: port_link_state = LS_DOWN.\n",
+ qdev->ndev->name);
+ if (curr_link_state == LS_UP) {
+ if (netif_msg_link(qdev))
+ printk(KERN_DEBUG PFX
+ "%s: curr_link_state = LS_UP.\n",
+ qdev->ndev->name);
+ if (ql_is_auto_neg_complete(qdev))
+ ql_finish_auto_neg(qdev);
+
+ if (qdev->port_link_state == LS_UP)
+ ql_link_down_detect_clear(qdev);
+
+ }
+ break;
+
+ case LS_UP:
+ /*
+ * See if the link is currently down or went down and came
+ * back up
+ */
+ if ((curr_link_state == LS_DOWN) || ql_link_down_detect(qdev)) {
+ if (netif_msg_link(qdev))
+ printk(KERN_INFO PFX "%s: Link is down.\n",
+ qdev->ndev->name);
+ qdev->port_link_state = LS_DOWN;
+ }
+ break;
+ }
+ spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
+}
+
+/*
+ * Caller must take hw_lock and QL_PHY_GIO_SEM.
+ */
+static void ql_get_phy_owner(struct ql3_adapter *qdev)
+{
+ if (ql_this_adapter_controls_port(qdev, qdev->mac_index))
+ set_bit(QL_LINK_MASTER,&qdev->flags);
+ else
+ clear_bit(QL_LINK_MASTER,&qdev->flags);
+}
+
+/*
+ * Caller must take hw_lock and QL_PHY_GIO_SEM.
+ */
+static void ql_init_scan_mode(struct ql3_adapter *qdev)
+{
+ ql_mii_enable_scan_mode(qdev);
+
+ if (test_bit(QL_LINK_OPTICAL,&qdev->flags)) {
+ if (ql_this_adapter_controls_port(qdev, qdev->mac_index))
+ ql_petbi_init_ex(qdev, qdev->mac_index);
+ } else {
+ if (ql_this_adapter_controls_port(qdev, qdev->mac_index))
+ ql_phy_init_ex(qdev, qdev->mac_index);
+ }
+}
+
+/*
+ * MII_Setup needs to be called before taking the PHY out of reset so that the
+ * management interface clock speed can be set properly. It would be better if
+ * we had a way to disable MDC until after the PHY is out of reset, but we
+ * don't have that capability.
+ */
+static int ql_mii_setup(struct ql3_adapter *qdev)
+{
+ u32 reg;
+ struct ql3xxx_port_registers __iomem *port_regs =
+ qdev->mem_map_registers;
+
+ if(ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK,
+ (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) *
+ 2) << 7))
+ return -1;
+
+ /* Divide 125MHz clock by 28 to meet PHY timing requirements */
+ reg = MAC_MII_CONTROL_CLK_SEL_DIV28;
+
+ ql_write_page0_reg(qdev, &port_regs->macMIIMgmtControlReg,
+ reg | ((MAC_MII_CONTROL_CLK_SEL_MASK) << 16));
+
+ ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK);
+ return 0;
+}
+
+static u32 ql_supported_modes(struct ql3_adapter *qdev)
+{
+ u32 supported;
+
+ if (test_bit(QL_LINK_OPTICAL,&qdev->flags)) {
+ supported = SUPPORTED_1000baseT_Full | SUPPORTED_FIBRE
+ | SUPPORTED_Autoneg;
+ } else {
+ supported = SUPPORTED_10baseT_Half
+ | SUPPORTED_10baseT_Full
+ | SUPPORTED_100baseT_Half
+ | SUPPORTED_100baseT_Full
+ | SUPPORTED_1000baseT_Half
+ | SUPPORTED_1000baseT_Full
+ | SUPPORTED_Autoneg | SUPPORTED_TP;
+ }
+
+ return supported;
+}
+
+static int ql_get_auto_cfg_status(struct ql3_adapter *qdev)
+{
+ int status;
+ unsigned long hw_flags;
+ spin_lock_irqsave(&qdev->hw_lock, hw_flags);
+ if(ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK,
+ (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) *
+ 2) << 7))
+ return 0;
+ status = ql_is_auto_cfg(qdev);
+ ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK);
+ spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
+ return status;
+}
+
+static u32 ql_get_speed(struct ql3_adapter *qdev)
+{
+ u32 status;
+ unsigned long hw_flags;
+ spin_lock_irqsave(&qdev->hw_lock, hw_flags);
+ if(ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK,
+ (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) *
+ 2) << 7))
+ return 0;
+ status = ql_get_link_speed(qdev);
+ ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK);
+ spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
+ return status;
+}
+
+static int ql_get_full_dup(struct ql3_adapter *qdev)
+{
+ int status;
+ unsigned long hw_flags;
+ spin_lock_irqsave(&qdev->hw_lock, hw_flags);
+ if(ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK,
+ (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) *
+ 2) << 7))
+ return 0;
+ status = ql_is_link_full_dup(qdev);
+ ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK);
+ spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
+ return status;
+}
+
+
+static int ql_get_settings(struct net_device *ndev, struct ethtool_cmd *ecmd)
+{
+ struct ql3_adapter *qdev = netdev_priv(ndev);
+
+ ecmd->transceiver = XCVR_INTERNAL;
+ ecmd->supported = ql_supported_modes(qdev);
+
+ if (test_bit(QL_LINK_OPTICAL,&qdev->flags)) {
+ ecmd->port = PORT_FIBRE;
+ } else {
+ ecmd->port = PORT_TP;
+ ecmd->phy_address = qdev->PHYAddr;
+ }
+ ecmd->advertising = ql_supported_modes(qdev);
+ ecmd->autoneg = ql_get_auto_cfg_status(qdev);
+ ecmd->speed = ql_get_speed(qdev);
+ ecmd->duplex = ql_get_full_dup(qdev);
+ return 0;
+}
+
+static void ql_get_drvinfo(struct net_device *ndev,
+ struct ethtool_drvinfo *drvinfo)
+{
+ struct ql3_adapter *qdev = netdev_priv(ndev);
+ strncpy(drvinfo->driver, ql3xxx_driver_name, 32);
+ strncpy(drvinfo->version, ql3xxx_driver_version, 32);
+ strncpy(drvinfo->fw_version, "N/A", 32);
+ strncpy(drvinfo->bus_info, pci_name(qdev->pdev), 32);
+ drvinfo->n_stats = 0;
+ drvinfo->testinfo_len = 0;
+ drvinfo->regdump_len = 0;
+ drvinfo->eedump_len = 0;
+}
+
+static u32 ql_get_msglevel(struct net_device *ndev)
+{
+ struct ql3_adapter *qdev = netdev_priv(ndev);
+ return qdev->msg_enable;
+}
+
+static void ql_set_msglevel(struct net_device *ndev, u32 value)
+{
+ struct ql3_adapter *qdev = netdev_priv(ndev);
+ qdev->msg_enable = value;
+}
+
+static struct ethtool_ops ql3xxx_ethtool_ops = {
+ .get_settings = ql_get_settings,
+ .get_drvinfo = ql_get_drvinfo,
+ .get_perm_addr = ethtool_op_get_perm_addr,
+ .get_link = ethtool_op_get_link,
+ .get_msglevel = ql_get_msglevel,
+ .set_msglevel = ql_set_msglevel,
+};
+
+static int ql_populate_free_queue(struct ql3_adapter *qdev)
+{
+ struct ql_rcv_buf_cb *lrg_buf_cb = qdev->lrg_buf_free_head;
+ u64 map;
+
+ while (lrg_buf_cb) {
+ if (!lrg_buf_cb->skb) {
+ lrg_buf_cb->skb = dev_alloc_skb(qdev->lrg_buffer_len);
+ if (unlikely(!lrg_buf_cb->skb)) {
+ printk(KERN_DEBUG PFX
+ "%s: Failed dev_alloc_skb().\n",
+ qdev->ndev->name);
+ break;
+ } else {
+ /*
+ * We save some space to copy the ethhdr from
+ * first buffer
+ */
+ skb_reserve(lrg_buf_cb->skb, QL_HEADER_SPACE);
+ map = pci_map_single(qdev->pdev,
+ lrg_buf_cb->skb->data,
+ qdev->lrg_buffer_len -
+ QL_HEADER_SPACE,
+ PCI_DMA_FROMDEVICE);
+ lrg_buf_cb->buf_phy_addr_low =
+ cpu_to_le32(LS_64BITS(map));
+ lrg_buf_cb->buf_phy_addr_high =
+ cpu_to_le32(MS_64BITS(map));
+ pci_unmap_addr_set(lrg_buf_cb, mapaddr, map);
+ pci_unmap_len_set(lrg_buf_cb, maplen,
+ qdev->lrg_buffer_len -
+ QL_HEADER_SPACE);
+ --qdev->lrg_buf_skb_check;
+ if (!qdev->lrg_buf_skb_check)
+ return 1;
+ }
+ }
+ lrg_buf_cb = lrg_buf_cb->next;
+ }
+ return 0;
+}
+
+/*
+ * Caller holds hw_lock.
+ */
+static void ql_update_lrg_bufq_prod_index(struct ql3_adapter *qdev)
+{
+ struct bufq_addr_element *lrg_buf_q_ele;
+ int i;
+ struct ql_rcv_buf_cb *lrg_buf_cb;
+ struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers;
+
+ if ((qdev->lrg_buf_free_count >= 8)
+ && (qdev->lrg_buf_release_cnt >= 16)) {
+
+ if (qdev->lrg_buf_skb_check)
+ if (!ql_populate_free_queue(qdev))
+ return;
+
+ lrg_buf_q_ele = qdev->lrg_buf_next_free;
+
+ while ((qdev->lrg_buf_release_cnt >= 16)
+ && (qdev->lrg_buf_free_count >= 8)) {
+
+ for (i = 0; i < 8; i++) {
+ lrg_buf_cb =
+ ql_get_from_lrg_buf_free_list(qdev);
+ lrg_buf_q_ele->addr_high =
+ lrg_buf_cb->buf_phy_addr_high;
+ lrg_buf_q_ele->addr_low =
+ lrg_buf_cb->buf_phy_addr_low;
+ lrg_buf_q_ele++;
+
+ qdev->lrg_buf_release_cnt--;
+ }
+
+ qdev->lrg_buf_q_producer_index++;
+
+ if (qdev->lrg_buf_q_producer_index == NUM_LBUFQ_ENTRIES)
+ qdev->lrg_buf_q_producer_index = 0;
+
+ if (qdev->lrg_buf_q_producer_index ==
+ (NUM_LBUFQ_ENTRIES - 1)) {
+ lrg_buf_q_ele = qdev->lrg_buf_q_virt_addr;
+ }
+ }
+
+ qdev->lrg_buf_next_free = lrg_buf_q_ele;
+
+ ql_write_common_reg(qdev,
+ (u32 *) & port_regs->CommonRegs.
+ rxLargeQProducerIndex,
+ qdev->lrg_buf_q_producer_index);
+ }
+}
+
+static void ql_process_mac_tx_intr(struct ql3_adapter *qdev,
+ struct ob_mac_iocb_rsp *mac_rsp)
+{
+ struct ql_tx_buf_cb *tx_cb;
+
+ tx_cb = &qdev->tx_buf[mac_rsp->transaction_id];
+ pci_unmap_single(qdev->pdev,
+ pci_unmap_addr(tx_cb, mapaddr),
+ pci_unmap_len(tx_cb, maplen), PCI_DMA_TODEVICE);
+ dev_kfree_skb_irq(tx_cb->skb);
+ qdev->stats.tx_packets++;
+ qdev->stats.tx_bytes += tx_cb->skb->len;
+ tx_cb->skb = NULL;
+ atomic_inc(&qdev->tx_count);
+}
+
+static void ql_process_mac_rx_intr(struct ql3_adapter *qdev,
+ struct ib_mac_iocb_rsp *ib_mac_rsp_ptr)
+{
+ long int offset;
+ u32 lrg_buf_phy_addr_low = 0;
+ struct ql_rcv_buf_cb *lrg_buf_cb1 = NULL;
+ struct ql_rcv_buf_cb *lrg_buf_cb2 = NULL;
+ u32 *curr_ial_ptr;
+ struct sk_buff *skb;
+ u16 length = le16_to_cpu(ib_mac_rsp_ptr->length);
+
+ /*
+ * Get the inbound address list (small buffer).
+ */
+ offset = qdev->small_buf_index * QL_SMALL_BUFFER_SIZE;
+ if (++qdev->small_buf_index == NUM_SMALL_BUFFERS)
+ qdev->small_buf_index = 0;
+
+ curr_ial_ptr = (u32 *) (qdev->small_buf_virt_addr + offset);
+ qdev->last_rsp_offset = qdev->small_buf_phy_addr_low + offset;
+ qdev->small_buf_release_cnt++;
+
+ /* start of first buffer */
+ lrg_buf_phy_addr_low = le32_to_cpu(*curr_ial_ptr);
+ lrg_buf_cb1 = &qdev->lrg_buf[qdev->lrg_buf_index];
+ qdev->lrg_buf_release_cnt++;
+ if (++qdev->lrg_buf_index == NUM_LARGE_BUFFERS)
+ qdev->lrg_buf_index = 0;
+ curr_ial_ptr++; /* 64-bit pointers require two incs. */
+ curr_ial_ptr++;
+
+ /* start of second buffer */
+ lrg_buf_phy_addr_low = le32_to_cpu(*curr_ial_ptr);
+ lrg_buf_cb2 = &qdev->lrg_buf[qdev->lrg_buf_index];
+
+ /*
+ * Second buffer gets sent up the stack.
+ */
+ qdev->lrg_buf_release_cnt++;
+ if (++qdev->lrg_buf_index == NUM_LARGE_BUFFERS)
+ qdev->lrg_buf_index = 0;
+ skb = lrg_buf_cb2->skb;
+
+ qdev->stats.rx_packets++;
+ qdev->stats.rx_bytes += length;
+
+ skb_put(skb, length);
+ pci_unmap_single(qdev->pdev,
+ pci_unmap_addr(lrg_buf_cb2, mapaddr),
+ pci_unmap_len(lrg_buf_cb2, maplen),
+ PCI_DMA_FROMDEVICE);
+ prefetch(skb->data);
+ skb->dev = qdev->ndev;
+ skb->ip_summed = CHECKSUM_NONE;
+ skb->protocol = eth_type_trans(skb, qdev->ndev);
+
+ netif_receive_skb(skb);
+ qdev->ndev->last_rx = jiffies;
+ lrg_buf_cb2->skb = NULL;
+
+ ql_release_to_lrg_buf_free_list(qdev, lrg_buf_cb1);
+ ql_release_to_lrg_buf_free_list(qdev, lrg_buf_cb2);
+}
+
+static void ql_process_macip_rx_intr(struct ql3_adapter *qdev,
+ struct ib_ip_iocb_rsp *ib_ip_rsp_ptr)
+{
+ long int offset;
+ u32 lrg_buf_phy_addr_low = 0;
+ struct ql_rcv_buf_cb *lrg_buf_cb1 = NULL;
+ struct ql_rcv_buf_cb *lrg_buf_cb2 = NULL;
+ u32 *curr_ial_ptr;
+ struct sk_buff *skb1, *skb2;
+ struct net_device *ndev = qdev->ndev;
+ u16 length = le16_to_cpu(ib_ip_rsp_ptr->length);
+ u16 size = 0;
+
+ /*
+ * Get the inbound address list (small buffer).
+ */
+
+ offset = qdev->small_buf_index * QL_SMALL_BUFFER_SIZE;
+ if (++qdev->small_buf_index == NUM_SMALL_BUFFERS)
+ qdev->small_buf_index = 0;
+ curr_ial_ptr = (u32 *) (qdev->small_buf_virt_addr + offset);
+ qdev->last_rsp_offset = qdev->small_buf_phy_addr_low + offset;
+ qdev->small_buf_release_cnt++;
+
+ /* start of first buffer */
+ lrg_buf_phy_addr_low = le32_to_cpu(*curr_ial_ptr);
+ lrg_buf_cb1 = &qdev->lrg_buf[qdev->lrg_buf_index];
+
+ qdev->lrg_buf_release_cnt++;
+ if (++qdev->lrg_buf_index == NUM_LARGE_BUFFERS)
+ qdev->lrg_buf_index = 0;
+ skb1 = lrg_buf_cb1->skb;
+ curr_ial_ptr++; /* 64-bit pointers require two incs. */
+ curr_ial_ptr++;
+
+ /* start of second buffer */
+ lrg_buf_phy_addr_low = le32_to_cpu(*curr_ial_ptr);
+ lrg_buf_cb2 = &qdev->lrg_buf[qdev->lrg_buf_index];
+ skb2 = lrg_buf_cb2->skb;
+ qdev->lrg_buf_release_cnt++;
+ if (++qdev->lrg_buf_index == NUM_LARGE_BUFFERS)
+ qdev->lrg_buf_index = 0;
+
+ qdev->stats.rx_packets++;
+ qdev->stats.rx_bytes += length;
+
+ /*
+ * Copy the ethhdr from first buffer to second. This
+ * is necessary for IP completions.
+ */
+ if (*((u16 *) skb1->data) != 0xFFFF)
+ size = VLAN_ETH_HLEN;
+ else
+ size = ETH_HLEN;
+
+ skb_put(skb2, length); /* Just the second buffer length here. */
+ pci_unmap_single(qdev->pdev,
+ pci_unmap_addr(lrg_buf_cb2, mapaddr),
+ pci_unmap_len(lrg_buf_cb2, maplen),
+ PCI_DMA_FROMDEVICE);
+ prefetch(skb2->data);
+
+ memcpy(skb_push(skb2, size), skb1->data + VLAN_ID_LEN, size);
+ skb2->dev = qdev->ndev;
+ skb2->ip_summed = CHECKSUM_NONE;
+ skb2->protocol = eth_type_trans(skb2, qdev->ndev);
+
+ netif_receive_skb(skb2);
+ ndev->last_rx = jiffies;
+ lrg_buf_cb2->skb = NULL;
+
+ ql_release_to_lrg_buf_free_list(qdev, lrg_buf_cb1);
+ ql_release_to_lrg_buf_free_list(qdev, lrg_buf_cb2);
+}
+
+static int ql_tx_rx_clean(struct ql3_adapter *qdev,
+ int *tx_cleaned, int *rx_cleaned, int work_to_do)
+{
+ struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers;
+ struct net_rsp_iocb *net_rsp;
+ struct net_device *ndev = qdev->ndev;
+ unsigned long hw_flags;
+
+ /* While there are entries in the completion queue. */
+ while ((cpu_to_le32(*(qdev->prsp_producer_index)) !=
+ qdev->rsp_consumer_index) && (*rx_cleaned < work_to_do)) {
+
+ net_rsp = qdev->rsp_current;
+ switch (net_rsp->opcode) {
+
+ case OPCODE_OB_MAC_IOCB_FN0:
+ case OPCODE_OB_MAC_IOCB_FN2:
+ ql_process_mac_tx_intr(qdev, (struct ob_mac_iocb_rsp *)
+ net_rsp);
+ (*tx_cleaned)++;
+ break;
+
+ case OPCODE_IB_MAC_IOCB:
+ ql_process_mac_rx_intr(qdev, (struct ib_mac_iocb_rsp *)
+ net_rsp);
+ (*rx_cleaned)++;
+ break;
+
+ case OPCODE_IB_IP_IOCB:
+ ql_process_macip_rx_intr(qdev, (struct ib_ip_iocb_rsp *)
+ net_rsp);
+ (*rx_cleaned)++;
+ break;
+ default:
+ {
+ u32 *tmp = (u32 *) net_rsp;
+ printk(KERN_ERR PFX
+ "%s: Hit default case, not "
+ "handled!\n"
+ " dropping the packet, opcode = "
+ "%x.\n",
+ ndev->name, net_rsp->opcode);
+ printk(KERN_ERR PFX
+ "0x%08lx 0x%08lx 0x%08lx 0x%08lx \n",
+ (unsigned long int)tmp[0],
+ (unsigned long int)tmp[1],
+ (unsigned long int)tmp[2],
+ (unsigned long int)tmp[3]);
+ }
+ }
+
+ qdev->rsp_consumer_index++;
+
+ if (qdev->rsp_consumer_index == NUM_RSP_Q_ENTRIES) {
+ qdev->rsp_consumer_index = 0;
+ qdev->rsp_current = qdev->rsp_q_virt_addr;
+ } else {
+ qdev->rsp_current++;
+ }
+ }
+
+ spin_lock_irqsave(&qdev->hw_lock, hw_flags);
+
+ ql_update_lrg_bufq_prod_index(qdev);
+
+ if (qdev->small_buf_release_cnt >= 16) {
+ while (qdev->small_buf_release_cnt >= 16) {
+ qdev->small_buf_q_producer_index++;
+
+ if (qdev->small_buf_q_producer_index ==
+ NUM_SBUFQ_ENTRIES)
+ qdev->small_buf_q_producer_index = 0;
+ qdev->small_buf_release_cnt -= 8;
+ }
+
+ ql_write_common_reg(qdev,
+ (u32 *) & port_regs->CommonRegs.
+ rxSmallQProducerIndex,
+ qdev->small_buf_q_producer_index);
+ }
+
+ ql_write_common_reg(qdev,
+ (u32 *) & port_regs->CommonRegs.rspQConsumerIndex,
+ qdev->rsp_consumer_index);
+ spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
+
+ if (unlikely(netif_queue_stopped(qdev->ndev))) {
+ if (netif_queue_stopped(qdev->ndev) &&
+ (atomic_read(&qdev->tx_count) > (NUM_REQ_Q_ENTRIES / 4)))
+ netif_wake_queue(qdev->ndev);
+ }
+
+ return *tx_cleaned + *rx_cleaned;
+}
+
+static int ql_poll(struct net_device *ndev, int *budget)
+{
+ struct ql3_adapter *qdev = netdev_priv(ndev);
+ int work_to_do = min(*budget, ndev->quota);
+ int rx_cleaned = 0, tx_cleaned = 0;
+
+ if (!netif_carrier_ok(ndev))
+ goto quit_polling;
+
+ ql_tx_rx_clean(qdev, &tx_cleaned, &rx_cleaned, work_to_do);
+ *budget -= rx_cleaned;
+ ndev->quota -= rx_cleaned;
+
+ if ((!tx_cleaned && !rx_cleaned) || !netif_running(ndev)) {
+quit_polling:
+ netif_rx_complete(ndev);
+ ql_enable_interrupts(qdev);
+ return 0;
+ }
+ return 1;
+}
+
+static irqreturn_t ql3xxx_isr(int irq, void *dev_id, struct pt_regs *regs)
+{
+
+ struct net_device *ndev = dev_id;
+ struct ql3_adapter *qdev = netdev_priv(ndev);
+ struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers;
+ u32 value;
+ int handled = 1;
+ u32 var;
+
+ port_regs = qdev->mem_map_registers;
+
+ value =
+ ql_read_common_reg_l(qdev, &port_regs->CommonRegs.ispControlStatus);
+
+ if (value & (ISP_CONTROL_FE | ISP_CONTROL_RI)) {
+ spin_lock(&qdev->adapter_lock);
+ netif_stop_queue(qdev->ndev);
+ netif_carrier_off(qdev->ndev);
+ ql_disable_interrupts(qdev);
+ qdev->port_link_state = LS_DOWN;
+ set_bit(QL_RESET_ACTIVE,&qdev->flags) ;
+
+ if (value & ISP_CONTROL_FE) {
+ /*
+ * Chip Fatal Error.
+ */
+ var =
+ ql_read_page0_reg_l(qdev,
+ &port_regs->PortFatalErrStatus);
+ printk(KERN_WARNING PFX
+ "%s: Resetting chip. PortFatalErrStatus "
+ "register = 0x%x\n", ndev->name, var);
+ set_bit(QL_RESET_START,&qdev->flags) ;
+ } else {
+ /*
+ * Soft Reset Requested.
+ */
+ set_bit(QL_RESET_PER_SCSI,&qdev->flags) ;
+ printk(KERN_ERR PFX
+ "%s: Another function issued a reset to the "
+ "chip. ISR value = %x.\n", ndev->name, value);
+ }
+ queue_work(qdev->workqueue, &qdev->reset_work);
+ spin_unlock(&qdev->adapter_lock);
+ } else if (value & ISP_IMR_DISABLE_CMPL_INT) {
+ ql_disable_interrupts(qdev);
+ if (likely(netif_rx_schedule_prep(ndev)))
+ __netif_rx_schedule(ndev);
+ else
+ ql_enable_interrupts(qdev);
+ } else {
+ return IRQ_NONE;
+ }
+
+ return IRQ_RETVAL(handled);
+}
+
+static int ql3xxx_send(struct sk_buff *skb, struct net_device *ndev)
+{
+ struct ql3_adapter *qdev = (struct ql3_adapter *)netdev_priv(ndev);
+ struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers;
+ struct ql_tx_buf_cb *tx_cb;
+ struct ob_mac_iocb_req *mac_iocb_ptr;
+ u64 map;
+
+ if (unlikely(atomic_read(&qdev->tx_count) < 2)) {
+ if (!netif_queue_stopped(ndev))
+ netif_stop_queue(ndev);
+ return NETDEV_TX_BUSY;
+ }
+ tx_cb = &qdev->tx_buf[qdev->req_producer_index] ;
+ mac_iocb_ptr = tx_cb->queue_entry;
+ memset((void *)mac_iocb_ptr, 0, sizeof(struct ob_mac_iocb_req));
+ mac_iocb_ptr->opcode = qdev->mac_ob_opcode;
+ mac_iocb_ptr->flags |= qdev->mb_bit_mask;
+ mac_iocb_ptr->transaction_id = qdev->req_producer_index;
+ mac_iocb_ptr->data_len = cpu_to_le16((u16) skb->len);
+ tx_cb->skb = skb;
+ map = pci_map_single(qdev->pdev, skb->data, skb->len, PCI_DMA_TODEVICE);
+ mac_iocb_ptr->buf_addr0_low = cpu_to_le32(LS_64BITS(map));
+ mac_iocb_ptr->buf_addr0_high = cpu_to_le32(MS_64BITS(map));
+ mac_iocb_ptr->buf_0_len = cpu_to_le32(skb->len | OB_MAC_IOCB_REQ_E);
+ pci_unmap_addr_set(tx_cb, mapaddr, map);
+ pci_unmap_len_set(tx_cb, maplen, skb->len);
+ atomic_dec(&qdev->tx_count);
+
+ qdev->req_producer_index++;
+ if (qdev->req_producer_index == NUM_REQ_Q_ENTRIES)
+ qdev->req_producer_index = 0;
+ wmb();
+ ql_write_common_reg_l(qdev,
+ (u32 *) & port_regs->CommonRegs.reqQProducerIndex,
+ qdev->req_producer_index);
+
+ ndev->trans_start = jiffies;
+ if (netif_msg_tx_queued(qdev))
+ printk(KERN_DEBUG PFX "%s: tx queued, slot %d, len %d\n",
+ ndev->name, qdev->req_producer_index, skb->len);
+
+ return NETDEV_TX_OK;
+}
+static int ql_alloc_net_req_rsp_queues(struct ql3_adapter *qdev)
+{
+ qdev->req_q_size =
+ (u32) (NUM_REQ_Q_ENTRIES * sizeof(struct ob_mac_iocb_req));
+
+ qdev->req_q_virt_addr =
+ pci_alloc_consistent(qdev->pdev,
+ (size_t) qdev->req_q_size,
+ &qdev->req_q_phy_addr);
+
+ if ((qdev->req_q_virt_addr == NULL) ||
+ LS_64BITS(qdev->req_q_phy_addr) & (qdev->req_q_size - 1)) {
+ printk(KERN_ERR PFX "%s: reqQ failed.\n",
+ qdev->ndev->name);
+ return -ENOMEM;
+ }
+
+ qdev->rsp_q_size = NUM_RSP_Q_ENTRIES * sizeof(struct net_rsp_iocb);
+
+ qdev->rsp_q_virt_addr =
+ pci_alloc_consistent(qdev->pdev,
+ (size_t) qdev->rsp_q_size,
+ &qdev->rsp_q_phy_addr);
+
+ if ((qdev->rsp_q_virt_addr == NULL) ||
+ LS_64BITS(qdev->rsp_q_phy_addr) & (qdev->rsp_q_size - 1)) {
+ printk(KERN_ERR PFX
+ "%s: rspQ allocation failed\n",
+ qdev->ndev->name);
+ pci_free_consistent(qdev->pdev, (size_t) qdev->req_q_size,
+ qdev->req_q_virt_addr,
+ qdev->req_q_phy_addr);
+ return -ENOMEM;
+ }
+
+ set_bit(QL_ALLOC_REQ_RSP_Q_DONE,&qdev->flags);
+
+ return 0;
+}
+
+static void ql_free_net_req_rsp_queues(struct ql3_adapter *qdev)
+{
+ if (!test_bit(QL_ALLOC_REQ_RSP_Q_DONE,&qdev->flags)) {
+ printk(KERN_INFO PFX
+ "%s: Already done.\n", qdev->ndev->name);
+ return;
+ }
+
+ pci_free_consistent(qdev->pdev,
+ qdev->req_q_size,
+ qdev->req_q_virt_addr, qdev->req_q_phy_addr);
+
+ qdev->req_q_virt_addr = NULL;
+
+ pci_free_consistent(qdev->pdev,
+ qdev->rsp_q_size,
+ qdev->rsp_q_virt_addr, qdev->rsp_q_phy_addr);
+
+ qdev->rsp_q_virt_addr = NULL;
+
+ clear_bit(QL_ALLOC_REQ_RSP_Q_DONE,&qdev->flags);
+}
+
+static int ql_alloc_buffer_queues(struct ql3_adapter *qdev)
+{
+ /* Create Large Buffer Queue */
+ qdev->lrg_buf_q_size =
+ NUM_LBUFQ_ENTRIES * sizeof(struct lrg_buf_q_entry);
+ if (qdev->lrg_buf_q_size < PAGE_SIZE)
+ qdev->lrg_buf_q_alloc_size = PAGE_SIZE;
+ else
+ qdev->lrg_buf_q_alloc_size = qdev->lrg_buf_q_size * 2;
+
+ qdev->lrg_buf_q_alloc_virt_addr =
+ pci_alloc_consistent(qdev->pdev,
+ qdev->lrg_buf_q_alloc_size,
+ &qdev->lrg_buf_q_alloc_phy_addr);
+
+ if (qdev->lrg_buf_q_alloc_virt_addr == NULL) {
+ printk(KERN_ERR PFX
+ "%s: lBufQ failed\n", qdev->ndev->name);
+ return -ENOMEM;
+ }
+ qdev->lrg_buf_q_virt_addr = qdev->lrg_buf_q_alloc_virt_addr;
+ qdev->lrg_buf_q_phy_addr = qdev->lrg_buf_q_alloc_phy_addr;
+
+ /* Create Small Buffer Queue */
+ qdev->small_buf_q_size =
+ NUM_SBUFQ_ENTRIES * sizeof(struct lrg_buf_q_entry);
+ if (qdev->small_buf_q_size < PAGE_SIZE)
+ qdev->small_buf_q_alloc_size = PAGE_SIZE;
+ else
+ qdev->small_buf_q_alloc_size = qdev->small_buf_q_size * 2;
+
+ qdev->small_buf_q_alloc_virt_addr =
+ pci_alloc_consistent(qdev->pdev,
+ qdev->small_buf_q_alloc_size,
+ &qdev->small_buf_q_alloc_phy_addr);
+
+ if (qdev->small_buf_q_alloc_virt_addr == NULL) {
+ printk(KERN_ERR PFX
+ "%s: Small Buffer Queue allocation failed.\n",
+ qdev->ndev->name);
+ pci_free_consistent(qdev->pdev, qdev->lrg_buf_q_alloc_size,
+ qdev->lrg_buf_q_alloc_virt_addr,
+ qdev->lrg_buf_q_alloc_phy_addr);
+ return -ENOMEM;
+ }
+
+ qdev->small_buf_q_virt_addr = qdev->small_buf_q_alloc_virt_addr;
+ qdev->small_buf_q_phy_addr = qdev->small_buf_q_alloc_phy_addr;
+ set_bit(QL_ALLOC_BUFQS_DONE,&qdev->flags);
+ return 0;
+}
+
+static void ql_free_buffer_queues(struct ql3_adapter *qdev)
+{
+ if (!test_bit(QL_ALLOC_BUFQS_DONE,&qdev->flags)) {
+ printk(KERN_INFO PFX
+ "%s: Already done.\n", qdev->ndev->name);
+ return;
+ }
+
+ pci_free_consistent(qdev->pdev,
+ qdev->lrg_buf_q_alloc_size,
+ qdev->lrg_buf_q_alloc_virt_addr,
+ qdev->lrg_buf_q_alloc_phy_addr);
+
+ qdev->lrg_buf_q_virt_addr = NULL;
+
+ pci_free_consistent(qdev->pdev,
+ qdev->small_buf_q_alloc_size,
+ qdev->small_buf_q_alloc_virt_addr,
+ qdev->small_buf_q_alloc_phy_addr);
+
+ qdev->small_buf_q_virt_addr = NULL;
+
+ clear_bit(QL_ALLOC_BUFQS_DONE,&qdev->flags);
+}
+
+static int ql_alloc_small_buffers(struct ql3_adapter *qdev)
+{
+ int i;
+ struct bufq_addr_element *small_buf_q_entry;
+
+ /* Currently we allocate on one of memory and use it for smallbuffers */
+ qdev->small_buf_total_size =
+ (QL_ADDR_ELE_PER_BUFQ_ENTRY * NUM_SBUFQ_ENTRIES *
+ QL_SMALL_BUFFER_SIZE);
+
+ qdev->small_buf_virt_addr =
+ pci_alloc_consistent(qdev->pdev,
+ qdev->small_buf_total_size,
+ &qdev->small_buf_phy_addr);
+
+ if (qdev->small_buf_virt_addr == NULL) {
+ printk(KERN_ERR PFX
+ "%s: Failed to get small buffer memory.\n",
+ qdev->ndev->name);
+ return -ENOMEM;
+ }
+
+ qdev->small_buf_phy_addr_low = LS_64BITS(qdev->small_buf_phy_addr);
+ qdev->small_buf_phy_addr_high = MS_64BITS(qdev->small_buf_phy_addr);
+
+ small_buf_q_entry = qdev->small_buf_q_virt_addr;
+
+ qdev->last_rsp_offset = qdev->small_buf_phy_addr_low;
+
+ /* Initialize the small buffer queue. */
+ for (i = 0; i < (QL_ADDR_ELE_PER_BUFQ_ENTRY * NUM_SBUFQ_ENTRIES); i++) {
+ small_buf_q_entry->addr_high =
+ cpu_to_le32(qdev->small_buf_phy_addr_high);
+ small_buf_q_entry->addr_low =
+ cpu_to_le32(qdev->small_buf_phy_addr_low +
+ (i * QL_SMALL_BUFFER_SIZE));
+ small_buf_q_entry++;
+ }
+ qdev->small_buf_index = 0;
+ set_bit(QL_ALLOC_SMALL_BUF_DONE,&qdev->flags);
+ return 0;
+}
+
+static void ql_free_small_buffers(struct ql3_adapter *qdev)
+{
+ if (!test_bit(QL_ALLOC_SMALL_BUF_DONE,&qdev->flags)) {
+ printk(KERN_INFO PFX
+ "%s: Already done.\n", qdev->ndev->name);
+ return;
+ }
+ if (qdev->small_buf_virt_addr != NULL) {
+ pci_free_consistent(qdev->pdev,
+ qdev->small_buf_total_size,
+ qdev->small_buf_virt_addr,
+ qdev->small_buf_phy_addr);
+
+ qdev->small_buf_virt_addr = NULL;
+ }
+}
+
+static void ql_free_large_buffers(struct ql3_adapter *qdev)
+{
+ int i = 0;
+ struct ql_rcv_buf_cb *lrg_buf_cb;
+
+ for (i = 0; i < NUM_LARGE_BUFFERS; i++) {
+ lrg_buf_cb = &qdev->lrg_buf[i];
+ if (lrg_buf_cb->skb) {
+ dev_kfree_skb(lrg_buf_cb->skb);
+ pci_unmap_single(qdev->pdev,
+ pci_unmap_addr(lrg_buf_cb, mapaddr),
+ pci_unmap_len(lrg_buf_cb, maplen),
+ PCI_DMA_FROMDEVICE);
+ memset(lrg_buf_cb, 0, sizeof(struct ql_rcv_buf_cb));
+ } else {
+ break;
+ }
+ }
+}
+
+static void ql_init_large_buffers(struct ql3_adapter *qdev)
+{
+ int i;
+ struct ql_rcv_buf_cb *lrg_buf_cb;
+ struct bufq_addr_element *buf_addr_ele = qdev->lrg_buf_q_virt_addr;
+
+ for (i = 0; i < NUM_LARGE_BUFFERS; i++) {
+ lrg_buf_cb = &qdev->lrg_buf[i];
+ buf_addr_ele->addr_high = lrg_buf_cb->buf_phy_addr_high;
+ buf_addr_ele->addr_low = lrg_buf_cb->buf_phy_addr_low;
+ buf_addr_ele++;
+ }
+ qdev->lrg_buf_index = 0;
+ qdev->lrg_buf_skb_check = 0;
+}
+
+static int ql_alloc_large_buffers(struct ql3_adapter *qdev)
+{
+ int i;
+ struct ql_rcv_buf_cb *lrg_buf_cb;
+ struct sk_buff *skb;
+ u64 map;
+
+ for (i = 0; i < NUM_LARGE_BUFFERS; i++) {
+ skb = dev_alloc_skb(qdev->lrg_buffer_len);
+ if (unlikely(!skb)) {
+ /* Better luck next round */
+ printk(KERN_ERR PFX
+ "%s: large buff alloc failed, "
+ "for %d bytes at index %d.\n",
+ qdev->ndev->name,
+ qdev->lrg_buffer_len * 2, i);
+ ql_free_large_buffers(qdev);
+ return -ENOMEM;
+ } else {
+
+ lrg_buf_cb = &qdev->lrg_buf[i];
+ memset(lrg_buf_cb, 0, sizeof(struct ql_rcv_buf_cb));
+ lrg_buf_cb->index = i;
+ lrg_buf_cb->skb = skb;
+ /*
+ * We save some space to copy the ethhdr from first
+ * buffer
+ */
+ skb_reserve(skb, QL_HEADER_SPACE);
+ map = pci_map_single(qdev->pdev,
+ skb->data,
+ qdev->lrg_buffer_len -
+ QL_HEADER_SPACE,
+ PCI_DMA_FROMDEVICE);
+ pci_unmap_addr_set(lrg_buf_cb, mapaddr, map);
+ pci_unmap_len_set(lrg_buf_cb, maplen,
+ qdev->lrg_buffer_len -
+ QL_HEADER_SPACE);
+ lrg_buf_cb->buf_phy_addr_low =
+ cpu_to_le32(LS_64BITS(map));
+ lrg_buf_cb->buf_phy_addr_high =
+ cpu_to_le32(MS_64BITS(map));
+ }
+ }
+ return 0;
+}
+
+static void ql_create_send_free_list(struct ql3_adapter *qdev)
+{
+ struct ql_tx_buf_cb *tx_cb;
+ int i;
+ struct ob_mac_iocb_req *req_q_curr =
+ qdev->req_q_virt_addr;
+
+ /* Create free list of transmit buffers */
+ for (i = 0; i < NUM_REQ_Q_ENTRIES; i++) {
+ tx_cb = &qdev->tx_buf[i];
+ tx_cb->skb = NULL;
+ tx_cb->queue_entry = req_q_curr;
+ req_q_curr++;
+ }
+}
+
+static int ql_alloc_mem_resources(struct ql3_adapter *qdev)
+{
+ if (qdev->ndev->mtu == NORMAL_MTU_SIZE)
+ qdev->lrg_buffer_len = NORMAL_MTU_SIZE;
+ else if (qdev->ndev->mtu == JUMBO_MTU_SIZE) {
+ qdev->lrg_buffer_len = JUMBO_MTU_SIZE;
+ } else {
+ printk(KERN_ERR PFX
+ "%s: Invalid mtu size. Only 1500 and 9000 are accepted.\n",
+ qdev->ndev->name);
+ return -ENOMEM;
+ }
+ qdev->lrg_buffer_len += VLAN_ETH_HLEN + VLAN_ID_LEN + QL_HEADER_SPACE;
+ qdev->max_frame_size =
+ (qdev->lrg_buffer_len - QL_HEADER_SPACE) + ETHERNET_CRC_SIZE;
+
+ /*
+ * First allocate a page of shared memory and use it for shadow
+ * locations of Network Request Queue Consumer Address Register and
+ * Network Completion Queue Producer Index Register
+ */
+ qdev->shadow_reg_virt_addr =
+ pci_alloc_consistent(qdev->pdev,
+ PAGE_SIZE, &qdev->shadow_reg_phy_addr);
+
+ if (qdev->shadow_reg_virt_addr != NULL) {
+ qdev->preq_consumer_index = (u16 *) qdev->shadow_reg_virt_addr;
+ qdev->req_consumer_index_phy_addr_high =
+ MS_64BITS(qdev->shadow_reg_phy_addr);
+ qdev->req_consumer_index_phy_addr_low =
+ LS_64BITS(qdev->shadow_reg_phy_addr);
+
+ qdev->prsp_producer_index =
+ (u32 *) (((u8 *) qdev->preq_consumer_index) + 8);
+ qdev->rsp_producer_index_phy_addr_high =
+ qdev->req_consumer_index_phy_addr_high;
+ qdev->rsp_producer_index_phy_addr_low =
+ qdev->req_consumer_index_phy_addr_low + 8;
+ } else {
+ printk(KERN_ERR PFX
+ "%s: shadowReg Alloc failed.\n", qdev->ndev->name);
+ return -ENOMEM;
+ }
+
+ if (ql_alloc_net_req_rsp_queues(qdev) != 0) {
+ printk(KERN_ERR PFX
+ "%s: ql_alloc_net_req_rsp_queues failed.\n",
+ qdev->ndev->name);
+ goto err_req_rsp;
+ }
+
+ if (ql_alloc_buffer_queues(qdev) != 0) {
+ printk(KERN_ERR PFX
+ "%s: ql_alloc_buffer_queues failed.\n",
+ qdev->ndev->name);
+ goto err_buffer_queues;
+ }
+
+ if (ql_alloc_small_buffers(qdev) != 0) {
+ printk(KERN_ERR PFX
+ "%s: ql_alloc_small_buffers failed\n", qdev->ndev->name);
+ goto err_small_buffers;
+ }
+
+ if (ql_alloc_large_buffers(qdev) != 0) {
+ printk(KERN_ERR PFX
+ "%s: ql_alloc_large_buffers failed\n", qdev->ndev->name);
+ goto err_small_buffers;
+ }
+
+ /* Initialize the large buffer queue. */
+ ql_init_large_buffers(qdev);
+ ql_create_send_free_list(qdev);
+
+ qdev->rsp_current = qdev->rsp_q_virt_addr;
+
+ return 0;
+
+err_small_buffers:
+ ql_free_buffer_queues(qdev);
+err_buffer_queues:
+ ql_free_net_req_rsp_queues(qdev);
+err_req_rsp:
+ pci_free_consistent(qdev->pdev,
+ PAGE_SIZE,
+ qdev->shadow_reg_virt_addr,
+ qdev->shadow_reg_phy_addr);
+
+ return -ENOMEM;
+}
+
+static void ql_free_mem_resources(struct ql3_adapter *qdev)
+{
+ ql_free_large_buffers(qdev);
+ ql_free_small_buffers(qdev);
+ ql_free_buffer_queues(qdev);
+ ql_free_net_req_rsp_queues(qdev);
+ if (qdev->shadow_reg_virt_addr != NULL) {
+ pci_free_consistent(qdev->pdev,
+ PAGE_SIZE,
+ qdev->shadow_reg_virt_addr,
+ qdev->shadow_reg_phy_addr);
+ qdev->shadow_reg_virt_addr = NULL;
+ }
+}
+
+static int ql_init_misc_registers(struct ql3_adapter *qdev)
+{
+ struct ql3xxx_local_ram_registers *local_ram =
+ (struct ql3xxx_local_ram_registers *)qdev->mem_map_registers;
+
+ if(ql_sem_spinlock(qdev, QL_DDR_RAM_SEM_MASK,
+ (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) *
+ 2) << 4))
+ return -1;
+
+ ql_write_page2_reg(qdev,
+ &local_ram->bufletSize, qdev->nvram_data.bufletSize);
+
+ ql_write_page2_reg(qdev,
+ &local_ram->maxBufletCount,
+ qdev->nvram_data.bufletCount);
+
+ ql_write_page2_reg(qdev,
+ &local_ram->freeBufletThresholdLow,
+ (qdev->nvram_data.tcpWindowThreshold25 << 16) |
+ (qdev->nvram_data.tcpWindowThreshold0));
+
+ ql_write_page2_reg(qdev,
+ &local_ram->freeBufletThresholdHigh,
+ qdev->nvram_data.tcpWindowThreshold50);
+
+ ql_write_page2_reg(qdev,
+ &local_ram->ipHashTableBase,
+ (qdev->nvram_data.ipHashTableBaseHi << 16) |
+ qdev->nvram_data.ipHashTableBaseLo);
+ ql_write_page2_reg(qdev,
+ &local_ram->ipHashTableCount,
+ qdev->nvram_data.ipHashTableSize);
+ ql_write_page2_reg(qdev,
+ &local_ram->tcpHashTableBase,
+ (qdev->nvram_data.tcpHashTableBaseHi << 16) |
+ qdev->nvram_data.tcpHashTableBaseLo);
+ ql_write_page2_reg(qdev,
+ &local_ram->tcpHashTableCount,
+ qdev->nvram_data.tcpHashTableSize);
+ ql_write_page2_reg(qdev,
+ &local_ram->ncbBase,
+ (qdev->nvram_data.ncbTableBaseHi << 16) |
+ qdev->nvram_data.ncbTableBaseLo);
+ ql_write_page2_reg(qdev,
+ &local_ram->maxNcbCount,
+ qdev->nvram_data.ncbTableSize);
+ ql_write_page2_reg(qdev,
+ &local_ram->drbBase,
+ (qdev->nvram_data.drbTableBaseHi << 16) |
+ qdev->nvram_data.drbTableBaseLo);
+ ql_write_page2_reg(qdev,
+ &local_ram->maxDrbCount,
+ qdev->nvram_data.drbTableSize);
+ ql_sem_unlock(qdev, QL_DDR_RAM_SEM_MASK);
+ return 0;
+}
+
+static int ql_adapter_initialize(struct ql3_adapter *qdev)
+{
+ u32 value;
+ struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers;
+ struct ql3xxx_host_memory_registers __iomem *hmem_regs =
+ (struct ql3xxx_host_memory_registers *)port_regs;
+ u32 delay = 10;
+ int status = 0;
+
+ if(ql_mii_setup(qdev))
+ return -1;
+
+ /* Bring out PHY out of reset */
+ ql_write_common_reg(qdev, &port_regs->CommonRegs.serialPortInterfaceReg,
+ (ISP_SERIAL_PORT_IF_WE |
+ (ISP_SERIAL_PORT_IF_WE << 16)));
+
+ qdev->port_link_state = LS_DOWN;
+ netif_carrier_off(qdev->ndev);
+
+ /* V2 chip fix for ARS-39168. */
+ ql_write_common_reg(qdev, &port_regs->CommonRegs.serialPortInterfaceReg,
+ (ISP_SERIAL_PORT_IF_SDE |
+ (ISP_SERIAL_PORT_IF_SDE << 16)));
+
+ /* Request Queue Registers */
+ *((u32 *) (qdev->preq_consumer_index)) = 0;
+ atomic_set(&qdev->tx_count,NUM_REQ_Q_ENTRIES);
+ qdev->req_producer_index = 0;
+
+ ql_write_page1_reg(qdev,
+ &hmem_regs->reqConsumerIndexAddrHigh,
+ qdev->req_consumer_index_phy_addr_high);
+ ql_write_page1_reg(qdev,
+ &hmem_regs->reqConsumerIndexAddrLow,
+ qdev->req_consumer_index_phy_addr_low);
+
+ ql_write_page1_reg(qdev,
+ &hmem_regs->reqBaseAddrHigh,
+ MS_64BITS(qdev->req_q_phy_addr));
+ ql_write_page1_reg(qdev,
+ &hmem_regs->reqBaseAddrLow,
+ LS_64BITS(qdev->req_q_phy_addr));
+ ql_write_page1_reg(qdev, &hmem_regs->reqLength, NUM_REQ_Q_ENTRIES);
+
+ /* Response Queue Registers */
+ *((u16 *) (qdev->prsp_producer_index)) = 0;
+ qdev->rsp_consumer_index = 0;
+ qdev->rsp_current = qdev->rsp_q_virt_addr;
+
+ ql_write_page1_reg(qdev,
+ &hmem_regs->rspProducerIndexAddrHigh,
+ qdev->rsp_producer_index_phy_addr_high);
+
+ ql_write_page1_reg(qdev,
+ &hmem_regs->rspProducerIndexAddrLow,
+ qdev->rsp_producer_index_phy_addr_low);
+
+ ql_write_page1_reg(qdev,
+ &hmem_regs->rspBaseAddrHigh,
+ MS_64BITS(qdev->rsp_q_phy_addr));
+
+ ql_write_page1_reg(qdev,
+ &hmem_regs->rspBaseAddrLow,
+ LS_64BITS(qdev->rsp_q_phy_addr));
+
+ ql_write_page1_reg(qdev, &hmem_regs->rspLength, NUM_RSP_Q_ENTRIES);
+
+ /* Large Buffer Queue */
+ ql_write_page1_reg(qdev,
+ &hmem_regs->rxLargeQBaseAddrHigh,
+ MS_64BITS(qdev->lrg_buf_q_phy_addr));
+
+ ql_write_page1_reg(qdev,
+ &hmem_regs->rxLargeQBaseAddrLow,
+ LS_64BITS(qdev->lrg_buf_q_phy_addr));
+
+ ql_write_page1_reg(qdev, &hmem_regs->rxLargeQLength, NUM_LBUFQ_ENTRIES);
+
+ ql_write_page1_reg(qdev,
+ &hmem_regs->rxLargeBufferLength,
+ qdev->lrg_buffer_len);
+
+ /* Small Buffer Queue */
+ ql_write_page1_reg(qdev,
+ &hmem_regs->rxSmallQBaseAddrHigh,
+ MS_64BITS(qdev->small_buf_q_phy_addr));
+
+ ql_write_page1_reg(qdev,
+ &hmem_regs->rxSmallQBaseAddrLow,
+ LS_64BITS(qdev->small_buf_q_phy_addr));
+
+ ql_write_page1_reg(qdev, &hmem_regs->rxSmallQLength, NUM_SBUFQ_ENTRIES);
+ ql_write_page1_reg(qdev,
+ &hmem_regs->rxSmallBufferLength,
+ QL_SMALL_BUFFER_SIZE);
+
+ qdev->small_buf_q_producer_index = NUM_SBUFQ_ENTRIES - 1;
+ qdev->small_buf_release_cnt = 8;
+ qdev->lrg_buf_q_producer_index = NUM_LBUFQ_ENTRIES - 1;
+ qdev->lrg_buf_release_cnt = 8;
+ qdev->lrg_buf_next_free =
+ (struct bufq_addr_element *)qdev->lrg_buf_q_virt_addr;
+ qdev->small_buf_index = 0;
+ qdev->lrg_buf_index = 0;
+ qdev->lrg_buf_free_count = 0;
+ qdev->lrg_buf_free_head = NULL;
+ qdev->lrg_buf_free_tail = NULL;
+
+ ql_write_common_reg(qdev,
+ (u32 *) & port_regs->CommonRegs.
+ rxSmallQProducerIndex,
+ qdev->small_buf_q_producer_index);
+ ql_write_common_reg(qdev,
+ (u32 *) & port_regs->CommonRegs.
+ rxLargeQProducerIndex,
+ qdev->lrg_buf_q_producer_index);
+
+ /*
+ * Find out if the chip has already been initialized. If it has, then
+ * we skip some of the initialization.
+ */
+ clear_bit(QL_LINK_MASTER, &qdev->flags);
+ value = ql_read_page0_reg(qdev, &port_regs->portStatus);
+ if ((value & PORT_STATUS_IC) == 0) {
+
+ /* Chip has not been configured yet, so let it rip. */
+ if(ql_init_misc_registers(qdev)) {
+ status = -1;
+ goto out;
+ }
+
+ if (qdev->mac_index)
+ ql_write_page0_reg(qdev,
+ &port_regs->mac1MaxFrameLengthReg,
+ qdev->max_frame_size);
+ else
+ ql_write_page0_reg(qdev,
+ &port_regs->mac0MaxFrameLengthReg,
+ qdev->max_frame_size);
+
+ value = qdev->nvram_data.tcpMaxWindowSize;
+ ql_write_page0_reg(qdev, &port_regs->tcpMaxWindow, value);
+
+ value = (0xFFFF << 16) | qdev->nvram_data.extHwConfig;
+
+ if(ql_sem_spinlock(qdev, QL_FLASH_SEM_MASK,
+ (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index)
+ * 2) << 13)) {
+ status = -1;
+ goto out;
+ }
+ ql_write_page0_reg(qdev, &port_regs->ExternalHWConfig, value);
+ ql_write_page0_reg(qdev, &port_regs->InternalChipConfig,
+ (((INTERNAL_CHIP_SD | INTERNAL_CHIP_WE) <<
+ 16) | (INTERNAL_CHIP_SD |
+ INTERNAL_CHIP_WE)));
+ ql_sem_unlock(qdev, QL_FLASH_SEM_MASK);
+ }
+
+
+ if(ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK,
+ (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) *
+ 2) << 7)) {
+ status = -1;
+ goto out;
+ }
+
+ ql_init_scan_mode(qdev);
+ ql_get_phy_owner(qdev);
+
+ /* Load the MAC Configuration */
+
+ /* Program lower 32 bits of the MAC address */
+ ql_write_page0_reg(qdev, &port_regs->macAddrIndirectPtrReg,
+ (MAC_ADDR_INDIRECT_PTR_REG_RP_MASK << 16));
+ ql_write_page0_reg(qdev, &port_regs->macAddrDataReg,
+ ((qdev->ndev->dev_addr[2] << 24)
+ | (qdev->ndev->dev_addr[3] << 16)
+ | (qdev->ndev->dev_addr[4] << 8)
+ | qdev->ndev->dev_addr[5]));
+
+ /* Program top 16 bits of the MAC address */
+ ql_write_page0_reg(qdev, &port_regs->macAddrIndirectPtrReg,
+ ((MAC_ADDR_INDIRECT_PTR_REG_RP_MASK << 16) | 1));
+ ql_write_page0_reg(qdev, &port_regs->macAddrDataReg,
+ ((qdev->ndev->dev_addr[0] << 8)
+ | qdev->ndev->dev_addr[1]));
+
+ /* Enable Primary MAC */
+ ql_write_page0_reg(qdev, &port_regs->macAddrIndirectPtrReg,
+ ((MAC_ADDR_INDIRECT_PTR_REG_PE << 16) |
+ MAC_ADDR_INDIRECT_PTR_REG_PE));
+
+ /* Clear Primary and Secondary IP addresses */
+ ql_write_page0_reg(qdev, &port_regs->ipAddrIndexReg,
+ ((IP_ADDR_INDEX_REG_MASK << 16) |
+ (qdev->mac_index << 2)));
+ ql_write_page0_reg(qdev, &port_regs->ipAddrDataReg, 0);
+
+ ql_write_page0_reg(qdev, &port_regs->ipAddrIndexReg,
+ ((IP_ADDR_INDEX_REG_MASK << 16) |
+ ((qdev->mac_index << 2) + 1)));
+ ql_write_page0_reg(qdev, &port_regs->ipAddrDataReg, 0);
+
+ ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK);
+
+ /* Indicate Configuration Complete */
+ ql_write_page0_reg(qdev,
+ &port_regs->portControl,
+ ((PORT_CONTROL_CC << 16) | PORT_CONTROL_CC));
+
+ do {
+ value = ql_read_page0_reg(qdev, &port_regs->portStatus);
+ if (value & PORT_STATUS_IC)
+ break;
+ msleep(500);
+ } while (--delay);
+
+ if (delay == 0) {
+ printk(KERN_ERR PFX
+ "%s: Hw Initialization timeout.\n", qdev->ndev->name);
+ status = -1;
+ goto out;
+ }
+
+ /* Enable Ethernet Function */
+ value =
+ (PORT_CONTROL_EF | PORT_CONTROL_ET | PORT_CONTROL_EI |
+ PORT_CONTROL_HH);
+ ql_write_page0_reg(qdev, &port_regs->portControl,
+ ((value << 16) | value));
+
+out:
+ return status;
+}
+
+/*
+ * Caller holds hw_lock.
+ */
+static int ql_adapter_reset(struct ql3_adapter *qdev)
+{
+ struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers;
+ int status = 0;
+ u16 value;
+ int max_wait_time;
+
+ set_bit(QL_RESET_ACTIVE, &qdev->flags);
+ clear_bit(QL_RESET_DONE, &qdev->flags);
+
+ /*
+ * Issue soft reset to chip.
+ */
+ printk(KERN_DEBUG PFX
+ "%s: Issue soft reset to chip.\n",
+ qdev->ndev->name);
+ ql_write_common_reg(qdev,
+ (u32 *) & port_regs->CommonRegs.ispControlStatus,
+ ((ISP_CONTROL_SR << 16) | ISP_CONTROL_SR));
+
+ /* Wait 3 seconds for reset to complete. */
+ printk(KERN_DEBUG PFX
+ "%s: Wait 10 milliseconds for reset to complete.\n",
+ qdev->ndev->name);
+
+ /* Wait until the firmware tells us the Soft Reset is done */
+ max_wait_time = 5;
+ do {
+ value =
+ ql_read_common_reg(qdev,
+ &port_regs->CommonRegs.ispControlStatus);
+ if ((value & ISP_CONTROL_SR) == 0)
+ break;
+
+ ssleep(1);
+ } while ((--max_wait_time));
+
+ /*
+ * Also, make sure that the Network Reset Interrupt bit has been
+ * cleared after the soft reset has taken place.
+ */
+ value =
+ ql_read_common_reg(qdev, &port_regs->CommonRegs.ispControlStatus);
+ if (value & ISP_CONTROL_RI) {
+ printk(KERN_DEBUG PFX
+ "ql_adapter_reset: clearing RI after reset.\n");
+ ql_write_common_reg(qdev,
+ (u32 *) & port_regs->CommonRegs.
+ ispControlStatus,
+ ((ISP_CONTROL_RI << 16) | ISP_CONTROL_RI));
+ }
+
+ if (max_wait_time == 0) {
+ /* Issue Force Soft Reset */
+ ql_write_common_reg(qdev,
+ (u32 *) & port_regs->CommonRegs.
+ ispControlStatus,
+ ((ISP_CONTROL_FSR << 16) |
+ ISP_CONTROL_FSR));
+ /*
+ * Wait until the firmware tells us the Force Soft Reset is
+ * done
+ */
+ max_wait_time = 5;
+ do {
+ value =
+ ql_read_common_reg(qdev,
+ &port_regs->CommonRegs.
+ ispControlStatus);
+ if ((value & ISP_CONTROL_FSR) == 0) {
+ break;
+ }
+ ssleep(1);
+ } while ((--max_wait_time));
+ }
+ if (max_wait_time == 0)
+ status = 1;
+
+ clear_bit(QL_RESET_ACTIVE, &qdev->flags);
+ set_bit(QL_RESET_DONE, &qdev->flags);
+ return status;
+}
+
+static void ql_set_mac_info(struct ql3_adapter *qdev)
+{
+ struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers;
+ u32 value, port_status;
+ u8 func_number;
+
+ /* Get the function number */
+ value =
+ ql_read_common_reg_l(qdev, &port_regs->CommonRegs.ispControlStatus);
+ func_number = (u8) ((value >> 4) & OPCODE_FUNC_ID_MASK);
+ port_status = ql_read_page0_reg(qdev, &port_regs->portStatus);
+ switch (value & ISP_CONTROL_FN_MASK) {
+ case ISP_CONTROL_FN0_NET:
+ qdev->mac_index = 0;
+ qdev->mac_ob_opcode = OUTBOUND_MAC_IOCB | func_number;
+ qdev->tcp_ob_opcode = OUTBOUND_TCP_IOCB | func_number;
+ qdev->update_ob_opcode = UPDATE_NCB_IOCB | func_number;
+ qdev->mb_bit_mask = FN0_MA_BITS_MASK;
+ qdev->PHYAddr = PORT0_PHY_ADDRESS;
+ if (port_status & PORT_STATUS_SM0)
+ set_bit(QL_LINK_OPTICAL,&qdev->flags);
+ else
+ clear_bit(QL_LINK_OPTICAL,&qdev->flags);
+ break;
+
+ case ISP_CONTROL_FN1_NET:
+ qdev->mac_index = 1;
+ qdev->mac_ob_opcode = OUTBOUND_MAC_IOCB | func_number;
+ qdev->tcp_ob_opcode = OUTBOUND_TCP_IOCB | func_number;
+ qdev->update_ob_opcode = UPDATE_NCB_IOCB | func_number;
+ qdev->mb_bit_mask = FN1_MA_BITS_MASK;
+ qdev->PHYAddr = PORT1_PHY_ADDRESS;
+ if (port_status & PORT_STATUS_SM1)
+ set_bit(QL_LINK_OPTICAL,&qdev->flags);
+ else
+ clear_bit(QL_LINK_OPTICAL,&qdev->flags);
+ break;
+
+ case ISP_CONTROL_FN0_SCSI:
+ case ISP_CONTROL_FN1_SCSI:
+ default:
+ printk(KERN_DEBUG PFX
+ "%s: Invalid function number, ispControlStatus = 0x%x\n",
+ qdev->ndev->name,value);
+ break;
+ }
+ qdev->numPorts = qdev->nvram_data.numPorts;
+}
+
+static void ql_display_dev_info(struct net_device *ndev)
+{
+ struct ql3_adapter *qdev = (struct ql3_adapter *)netdev_priv(ndev);
+ struct pci_dev *pdev = qdev->pdev;
+
+ printk(KERN_INFO PFX
+ "\n%s Adapter %d RevisionID %d found on PCI slot %d.\n",
+ DRV_NAME, qdev->index, qdev->chip_rev_id, qdev->pci_slot);
+ printk(KERN_INFO PFX
+ "%s Interface.\n",
+ test_bit(QL_LINK_OPTICAL,&qdev->flags) ? "OPTICAL" : "COPPER");
+
+ /*
+ * Print PCI bus width/type.
+ */
+ printk(KERN_INFO PFX
+ "Bus interface is %s %s.\n",
+ ((qdev->pci_width == 64) ? "64-bit" : "32-bit"),
+ ((qdev->pci_x) ? "PCI-X" : "PCI"));
+
+ printk(KERN_INFO PFX
+ "mem IO base address adjusted = 0x%p\n",
+ qdev->mem_map_registers);
+ printk(KERN_INFO PFX "Interrupt number = %d\n", pdev->irq);
+
+ if (netif_msg_probe(qdev))
+ printk(KERN_INFO PFX
+ "%s: MAC address %02x:%02x:%02x:%02x:%02x:%02x\n",
+ ndev->name, ndev->dev_addr[0], ndev->dev_addr[1],
+ ndev->dev_addr[2], ndev->dev_addr[3], ndev->dev_addr[4],
+ ndev->dev_addr[5]);
+}
+
+static int ql_adapter_down(struct ql3_adapter *qdev, int do_reset)
+{
+ struct net_device *ndev = qdev->ndev;
+ int retval = 0;
+
+ netif_stop_queue(ndev);
+ netif_carrier_off(ndev);
+
+ clear_bit(QL_ADAPTER_UP,&qdev->flags);
+ clear_bit(QL_LINK_MASTER,&qdev->flags);
+
+ ql_disable_interrupts(qdev);
+
+ free_irq(qdev->pdev->irq, ndev);
+
+ if (qdev->msi && test_bit(QL_MSI_ENABLED,&qdev->flags)) {
+ printk(KERN_INFO PFX
+ "%s: calling pci_disable_msi().\n", qdev->ndev->name);
+ clear_bit(QL_MSI_ENABLED,&qdev->flags);
+ pci_disable_msi(qdev->pdev);
+ }
+
+ del_timer_sync(&qdev->adapter_timer);
+
+ netif_poll_disable(ndev);
+
+ if (do_reset) {
+ int soft_reset;
+ unsigned long hw_flags;
+
+ spin_lock_irqsave(&qdev->hw_lock, hw_flags);
+ if (ql_wait_for_drvr_lock(qdev)) {
+ if ((soft_reset = ql_adapter_reset(qdev))) {
+ printk(KERN_ERR PFX
+ "%s: ql_adapter_reset(%d) FAILED!\n",
+ ndev->name, qdev->index);
+ }
+ printk(KERN_ERR PFX
+ "%s: Releaseing driver lock via chip reset.\n",ndev->name);
+ } else {
+ printk(KERN_ERR PFX
+ "%s: Could not acquire driver lock to do "
+ "reset!\n", ndev->name);
+ retval = -1;
+ }
+ spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
+ }
+ ql_free_mem_resources(qdev);
+ return retval;
+}
+
+static int ql_adapter_up(struct ql3_adapter *qdev)
+{
+ struct net_device *ndev = qdev->ndev;
+ int err;
+ unsigned long irq_flags = SA_SAMPLE_RANDOM | SA_SHIRQ;
+ unsigned long hw_flags;
+
+ if (ql_alloc_mem_resources(qdev)) {
+ printk(KERN_ERR PFX
+ "%s Unable to allocate buffers.\n", ndev->name);
+ return -ENOMEM;
+ }
+
+ if (qdev->msi) {
+ if (pci_enable_msi(qdev->pdev)) {
+ printk(KERN_ERR PFX
+ "%s: User requested MSI, but MSI failed to "
+ "initialize. Continuing without MSI.\n",
+ qdev->ndev->name);
+ qdev->msi = 0;
+ } else {
+ printk(KERN_INFO PFX "%s: MSI Enabled...\n", qdev->ndev->name);
+ set_bit(QL_MSI_ENABLED,&qdev->flags);
+ irq_flags &= ~SA_SHIRQ;
+ }
+ }
+
+ if ((err = request_irq(qdev->pdev->irq,
+ ql3xxx_isr,
+ irq_flags, ndev->name, ndev))) {
+ printk(KERN_ERR PFX
+ "%s: Failed to reserve interrupt %d already in use.\n",
+ ndev->name, qdev->pdev->irq);
+ goto err_irq;
+ }
+
+ spin_lock_irqsave(&qdev->hw_lock, hw_flags);
+
+ if ((err = ql_wait_for_drvr_lock(qdev))) {
+ if ((err = ql_adapter_initialize(qdev))) {
+ printk(KERN_ERR PFX
+ "%s: Unable to initialize adapter.\n",
+ ndev->name);
+ goto err_init;
+ }
+ printk(KERN_ERR PFX
+ "%s: Releaseing driver lock.\n",ndev->name);
+ ql_sem_unlock(qdev, QL_DRVR_SEM_MASK);
+ } else {
+ printk(KERN_ERR PFX
+ "%s: Could not aquire driver lock.\n",
+ ndev->name);
+ goto err_lock;
+ }
+
+ spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
+
+ set_bit(QL_ADAPTER_UP,&qdev->flags);
+
+ mod_timer(&qdev->adapter_timer, jiffies + HZ * 1);
+
+ netif_poll_enable(ndev);
+ ql_enable_interrupts(qdev);
+ return 0;
+
+err_init:
+ ql_sem_unlock(qdev, QL_DRVR_SEM_MASK);
+err_lock:
+ free_irq(qdev->pdev->irq, ndev);
+err_irq:
+ if (qdev->msi && test_bit(QL_MSI_ENABLED,&qdev->flags)) {
+ printk(KERN_INFO PFX
+ "%s: calling pci_disable_msi().\n",
+ qdev->ndev->name);
+ clear_bit(QL_MSI_ENABLED,&qdev->flags);
+ pci_disable_msi(qdev->pdev);
+ }
+ return err;
+}
+
+static int ql_cycle_adapter(struct ql3_adapter *qdev, int reset)
+{
+ if( ql_adapter_down(qdev,reset) || ql_adapter_up(qdev)) {
+ printk(KERN_ERR PFX
+ "%s: Driver up/down cycle failed, "
+ "closing device\n",qdev->ndev->name);
+ dev_close(qdev->ndev);
+ return -1;
+ }
+ return 0;
+}
+
+static int ql3xxx_close(struct net_device *ndev)
+{
+ struct ql3_adapter *qdev = netdev_priv(ndev);
+
+ /*
+ * Wait for device to recover from a reset.
+ * (Rarely happens, but possible.)
+ */
+ while (!test_bit(QL_ADAPTER_UP,&qdev->flags))
+ msleep(50);
+
+ ql_adapter_down(qdev,QL_DO_RESET);
+ return 0;
+}
+
+static int ql3xxx_open(struct net_device *ndev)
+{
+ struct ql3_adapter *qdev = netdev_priv(ndev);
+ return (ql_adapter_up(qdev));
+}
+
+static struct net_device_stats *ql3xxx_get_stats(struct net_device *dev)
+{
+ struct ql3_adapter *qdev = (struct ql3_adapter *)dev->priv;
+ return &qdev->stats;
+}
+
+static int ql3xxx_change_mtu(struct net_device *ndev, int new_mtu)
+{
+ struct ql3_adapter *qdev = netdev_priv(ndev);
+ printk(KERN_ERR PFX "%s: new mtu size = %d.\n", ndev->name, new_mtu);
+ if (new_mtu != NORMAL_MTU_SIZE && new_mtu != JUMBO_MTU_SIZE) {
+ printk(KERN_ERR PFX
+ "%s: mtu size of %d is not valid. Use exactly %d or "
+ "%d.\n", ndev->name, new_mtu, NORMAL_MTU_SIZE,
+ JUMBO_MTU_SIZE);
+ return -EINVAL;
+ }
+
+ if (!netif_running(ndev)) {
+ ndev->mtu = new_mtu;
+ return 0;
+ }
+
+ ndev->mtu = new_mtu;
+ return ql_cycle_adapter(qdev,QL_DO_RESET);
+}
+
+static void ql3xxx_set_multicast_list(struct net_device *ndev)
+{
+ /*
+ * We are manually parsing the list in the net_device structure.
+ */
+ return;
+}
+
+static int ql3xxx_set_mac_address(struct net_device *ndev, void *p)
+{
+ struct ql3_adapter *qdev = (struct ql3_adapter *)netdev_priv(ndev);
+ struct ql3xxx_port_registers __iomem *port_regs =
+ qdev->mem_map_registers;
+ struct sockaddr *addr = p;
+ unsigned long hw_flags;
+
+ if (netif_running(ndev))
+ return -EBUSY;
+
+ if (!is_valid_ether_addr(addr->sa_data))
+ return -EADDRNOTAVAIL;
+
+ memcpy(ndev->dev_addr, addr->sa_data, ndev->addr_len);
+
+ spin_lock_irqsave(&qdev->hw_lock, hw_flags);
+ /* Program lower 32 bits of the MAC address */
+ ql_write_page0_reg(qdev, &port_regs->macAddrIndirectPtrReg,
+ (MAC_ADDR_INDIRECT_PTR_REG_RP_MASK << 16));
+ ql_write_page0_reg(qdev, &port_regs->macAddrDataReg,
+ ((ndev->dev_addr[2] << 24) | (ndev->
+ dev_addr[3] << 16) |
+ (ndev->dev_addr[4] << 8) | ndev->dev_addr[5]));
+
+ /* Program top 16 bits of the MAC address */
+ ql_write_page0_reg(qdev, &port_regs->macAddrIndirectPtrReg,
+ ((MAC_ADDR_INDIRECT_PTR_REG_RP_MASK << 16) | 1));
+ ql_write_page0_reg(qdev, &port_regs->macAddrDataReg,
+ ((ndev->dev_addr[0] << 8) | ndev->dev_addr[1]));
+ spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
+
+ return 0;
+}
+
+static void ql3xxx_tx_timeout(struct net_device *ndev)
+{
+ struct ql3_adapter *qdev = (struct ql3_adapter *)netdev_priv(ndev);
+
+ printk(KERN_ERR PFX "%s: Resetting...\n", ndev->name);
+ /*
+ * Stop the queues, we've got a problem.
+ */
+ netif_stop_queue(ndev);
+
+ /*
+ * Wake up the worker to process this event.
+ */
+ queue_work(qdev->workqueue, &qdev->tx_timeout_work);
+}
+
+static void ql_reset_work(struct ql3_adapter *qdev)
+{
+ struct net_device *ndev = qdev->ndev;
+ u32 value;
+ struct ql_tx_buf_cb *tx_cb;
+ int max_wait_time, i;
+ struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers;
+ unsigned long hw_flags;
+
+ if (test_bit((QL_RESET_PER_SCSI | QL_RESET_START),&qdev->flags)) {
+ clear_bit(QL_LINK_MASTER,&qdev->flags);
+
+ /*
+ * Loop through the active list and return the skb.
+ */
+ for (i = 0; i < NUM_REQ_Q_ENTRIES; i++) {
+ tx_cb = &qdev->tx_buf[i];
+ if (tx_cb->skb) {
+
+ printk(KERN_DEBUG PFX
+ "%s: Freeing lost SKB.\n",
+ qdev->ndev->name);
+ pci_unmap_single(qdev->pdev,
+ pci_unmap_addr(tx_cb, mapaddr),
+ pci_unmap_len(tx_cb, maplen), PCI_DMA_TODEVICE);
+ dev_kfree_skb(tx_cb->skb);
+ tx_cb->skb = NULL;
+ }
+ }
+
+ printk(KERN_ERR PFX
+ "%s: Clearing NRI after reset.\n", qdev->ndev->name);
+ spin_lock_irqsave(&qdev->hw_lock, hw_flags);
+ ql_write_common_reg(qdev,
+ &port_regs->CommonRegs.
+ ispControlStatus,
+ ((ISP_CONTROL_RI << 16) | ISP_CONTROL_RI));
+ /*
+ * Wait the for Soft Reset to Complete.
+ */
+ max_wait_time = 10;
+ do {
+ value = ql_read_common_reg(qdev,
+ &port_regs->CommonRegs.
+
+ ispControlStatus);
+ if ((value & ISP_CONTROL_SR) == 0) {
+ printk(KERN_DEBUG PFX
+ "%s: reset completed.\n",
+ qdev->ndev->name);
+ break;
+ }
+
+ if (value & ISP_CONTROL_RI) {
+ printk(KERN_DEBUG PFX
+ "%s: clearing NRI after reset.\n",
+ qdev->ndev->name);
+ ql_write_common_reg(qdev,
+ (u32 *) &
+ port_regs->
+ CommonRegs.
+ ispControlStatus,
+ ((ISP_CONTROL_RI <<
+ 16) | ISP_CONTROL_RI));
+ }
+
+ ssleep(1);
+ } while (--max_wait_time);
+ spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
+
+ if (value & ISP_CONTROL_SR) {
+
+ /*
+ * Set the reset flags and clear the board again.
+ * Nothing else to do...
+ */
+ printk(KERN_ERR PFX
+ "%s: Timed out waiting for reset to "
+ "complete.\n", ndev->name);
+ printk(KERN_ERR PFX
+ "%s: Do a reset.\n", ndev->name);
+ clear_bit(QL_RESET_PER_SCSI,&qdev->flags);
+ clear_bit(QL_RESET_START,&qdev->flags);
+ ql_cycle_adapter(qdev,QL_DO_RESET);
+ return;
+ }
+
+ clear_bit(QL_RESET_ACTIVE,&qdev->flags);
+ clear_bit(QL_RESET_PER_SCSI,&qdev->flags);
+ clear_bit(QL_RESET_START,&qdev->flags);
+ ql_cycle_adapter(qdev,QL_NO_RESET);
+ }
+}
+
+static void ql_tx_timeout_work(struct ql3_adapter *qdev)
+{
+ ql_cycle_adapter(qdev,QL_DO_RESET);
+}
+
+static void ql_get_board_info(struct ql3_adapter *qdev)
+{
+ struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers;
+ u32 value;
+
+ value = ql_read_page0_reg_l(qdev, &port_regs->portStatus);
+
+ qdev->chip_rev_id = ((value & PORT_STATUS_REV_ID_MASK) >> 12);
+ if (value & PORT_STATUS_64)
+ qdev->pci_width = 64;
+ else
+ qdev->pci_width = 32;
+ if (value & PORT_STATUS_X)
+ qdev->pci_x = 1;
+ else
+ qdev->pci_x = 0;
+ qdev->pci_slot = (u8) PCI_SLOT(qdev->pdev->devfn);
+}
+
+static void ql3xxx_timer(unsigned long ptr)
+{
+ struct ql3_adapter *qdev = (struct ql3_adapter *)ptr;
+
+ if (test_bit(QL_RESET_ACTIVE,&qdev->flags)) {
+ printk(KERN_DEBUG PFX
+ "%s: Reset in progress.\n",
+ qdev->ndev->name);
+ goto end;
+ }
+
+ ql_link_state_machine(qdev);
+
+ /* Restart timer on 2 second interval. */
+end:
+ mod_timer(&qdev->adapter_timer, jiffies + HZ * 1);
+}
+
+static int __devinit ql3xxx_probe(struct pci_dev *pdev,
+ const struct pci_device_id *pci_entry)
+{
+ struct net_device *ndev = NULL;
+ struct ql3_adapter *qdev = NULL;
+ static int cards_found = 0;
+ int pci_using_dac, err;
+
+ err = pci_enable_device(pdev);
+ if (err) {
+ printk(KERN_ERR PFX "%s cannot enable PCI device\n",
+ pci_name(pdev));
+ goto err_out;
+ }
+
+ err = pci_request_regions(pdev, DRV_NAME);
+ if (err) {
+ printk(KERN_ERR PFX "%s cannot obtain PCI resources\n",
+ pci_name(pdev));
+ goto err_out_disable_pdev;
+ }
+
+ pci_set_master(pdev);
+
+ if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK)) {
+ pci_using_dac = 1;
+ err = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK);
+ } else if (!(err = pci_set_dma_mask(pdev, DMA_32BIT_MASK))) {
+ pci_using_dac = 0;
+ err = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
+ }
+
+ if (err) {
+ printk(KERN_ERR PFX "%s no usable DMA configuration\n",
+ pci_name(pdev));
+ goto err_out_free_regions;
+ }
+
+ ndev = alloc_etherdev(sizeof(struct ql3_adapter));
+ if (!ndev)
+ goto err_out_free_regions;
+
+ SET_MODULE_OWNER(ndev);
+ SET_NETDEV_DEV(ndev, &pdev->dev);
+
+ ndev->features = NETIF_F_LLTX;
+ if (pci_using_dac)
+ ndev->features |= NETIF_F_HIGHDMA;
+
+ pci_set_drvdata(pdev, ndev);
+
+ qdev = netdev_priv(ndev);
+ qdev->index = cards_found;
+ qdev->ndev = ndev;
+ qdev->pdev = pdev;
+ qdev->port_link_state = LS_DOWN;
+ if (msi)
+ qdev->msi = 1;
+
+ qdev->msg_enable = netif_msg_init(debug, default_msg);
+
+ qdev->mem_map_registers =
+ ioremap_nocache(pci_resource_start(pdev, 1),
+ pci_resource_len(qdev->pdev, 1));
+ if (!qdev->mem_map_registers) {
+ printk(KERN_ERR PFX "%s: cannot map device registers\n",
+ pci_name(pdev));
+ goto err_out_free_ndev;
+ }
+
+ spin_lock_init(&qdev->adapter_lock);
+ spin_lock_init(&qdev->hw_lock);
+
+ /* Set driver entry points */
+ ndev->open = ql3xxx_open;
+ ndev->hard_start_xmit = ql3xxx_send;
+ ndev->stop = ql3xxx_close;
+ ndev->get_stats = ql3xxx_get_stats;
+ ndev->change_mtu = ql3xxx_change_mtu;
+ ndev->set_multicast_list = ql3xxx_set_multicast_list;
+ SET_ETHTOOL_OPS(ndev, &ql3xxx_ethtool_ops);
+ ndev->set_mac_address = ql3xxx_set_mac_address;
+ ndev->tx_timeout = ql3xxx_tx_timeout;
+ ndev->watchdog_timeo = 5 * HZ;
+
+ ndev->poll = &ql_poll;
+ ndev->weight = 64;
+
+ ndev->irq = pdev->irq;
+
+ /* make sure the EEPROM is good */
+ if (ql_get_nvram_params(qdev)) {
+ printk(KERN_ALERT PFX
+ "ql3xxx_probe: Adapter #%d, Invalid NVRAM parameters.\n",
+ qdev->index);
+ goto err_out_iounmap;
+ }
+
+ ql_set_mac_info(qdev);
+
+ /* Validate and set parameters */
+ if (qdev->mac_index) {
+ memcpy(ndev->dev_addr, &qdev->nvram_data.funcCfg_fn2.macAddress,
+ ETH_ALEN);
+ } else {
+ memcpy(ndev->dev_addr, &qdev->nvram_data.funcCfg_fn0.macAddress,
+ ETH_ALEN);
+ }
+ memcpy(ndev->perm_addr, ndev->dev_addr, ndev->addr_len);
+
+ ndev->tx_queue_len = NUM_REQ_Q_ENTRIES;
+
+ /* Turn off support for multicasting */
+ ndev->flags &= ~IFF_MULTICAST;
+
+ /* Record PCI bus information. */
+ ql_get_board_info(qdev);
+
+ /*
+ * Set the Maximum Memory Read Byte Count value. We do this to handle
+ * jumbo frames.
+ */
+ if (qdev->pci_x) {
+ pci_write_config_word(pdev, (int)0x4e, (u16) 0x0036);
+ }
+
+ err = register_netdev(ndev);
+ if (err) {
+ printk(KERN_ERR PFX "%s: cannot register net device\n",
+ pci_name(pdev));
+ goto err_out_iounmap;
+ }
+
+ /* we're going to reset, so assume we have no link for now */
+
+ netif_carrier_off(ndev);
+ netif_stop_queue(ndev);
+
+ qdev->workqueue = create_singlethread_workqueue(ndev->name);
+ INIT_WORK(&qdev->reset_work, (void (*)(void *))ql_reset_work, qdev);
+ INIT_WORK(&qdev->tx_timeout_work,
+ (void (*)(void *))ql_tx_timeout_work, qdev);
+
+ init_timer(&qdev->adapter_timer);
+ qdev->adapter_timer.function = ql3xxx_timer;
+ qdev->adapter_timer.expires = jiffies + HZ * 2; /* two second delay */
+ qdev->adapter_timer.data = (unsigned long)qdev;
+
+ if(!cards_found) {
+ printk(KERN_ALERT PFX "%s\n", DRV_STRING);
+ printk(KERN_ALERT PFX "Driver name: %s, Version: %s.\n",
+ DRV_NAME, DRV_VERSION);
+ }
+ ql_display_dev_info(ndev);
+
+ cards_found++;
+ return 0;
+
+err_out_iounmap:
+ iounmap(qdev->mem_map_registers);
+err_out_free_ndev:
+ free_netdev(ndev);
+err_out_free_regions:
+ pci_release_regions(pdev);
+err_out_disable_pdev:
+ pci_disable_device(pdev);
+ pci_set_drvdata(pdev, NULL);
+err_out:
+ return err;
+}
+
+static void __devexit ql3xxx_remove(struct pci_dev *pdev)
+{
+ struct net_device *ndev = pci_get_drvdata(pdev);
+ struct ql3_adapter *qdev = netdev_priv(ndev);
+
+ unregister_netdev(ndev);
+ qdev = netdev_priv(ndev);
+
+ ql_disable_interrupts(qdev);
+
+ if (qdev->workqueue) {
+ cancel_delayed_work(&qdev->reset_work);
+ cancel_delayed_work(&qdev->tx_timeout_work);
+ destroy_workqueue(qdev->workqueue);
+ qdev->workqueue = NULL;
+ }
+
+ iounmap((void *)qdev->mmap_virt_base);
+ pci_release_regions(pdev);
+ pci_set_drvdata(pdev, NULL);
+ free_netdev(ndev);
+}
+
+static struct pci_driver ql3xxx_driver = {
+
+ .name = DRV_NAME,
+ .id_table = ql3xxx_pci_tbl,
+ .probe = ql3xxx_probe,
+ .remove = __devexit_p(ql3xxx_remove),
+};
+
+static int __init ql3xxx_init_module(void)
+{
+ return pci_register_driver(&ql3xxx_driver);
+}
+
+static void __exit ql3xxx_exit(void)
+{
+ pci_unregister_driver(&ql3xxx_driver);
+}
+
+module_init(ql3xxx_init_module);
+module_exit(ql3xxx_exit);
diff --git a/drivers/net/qla3xxx.h b/drivers/net/qla3xxx.h
new file mode 100644
index 00000000000..9492cee6b08
--- /dev/null
+++ b/drivers/net/qla3xxx.h
@@ -0,0 +1,1194 @@
+/*
+ * QLogic QLA3xxx NIC HBA Driver
+ * Copyright (c) 2003-2006 QLogic Corporation
+ *
+ * See LICENSE.qla3xxx for copyright and licensing details.
+ */
+#ifndef _QLA3XXX_H_
+#define _QLA3XXX_H_
+
+/*
+ * IOCB Definitions...
+ */
+#pragma pack(1)
+
+#define OPCODE_OB_MAC_IOCB_FN0 0x01
+#define OPCODE_OB_MAC_IOCB_FN2 0x21
+#define OPCODE_OB_TCP_IOCB_FN0 0x03
+#define OPCODE_OB_TCP_IOCB_FN2 0x23
+#define OPCODE_UPDATE_NCB_IOCB_FN0 0x00
+#define OPCODE_UPDATE_NCB_IOCB_FN2 0x20
+
+#define OPCODE_UPDATE_NCB_IOCB 0xF0
+#define OPCODE_IB_MAC_IOCB 0xF9
+#define OPCODE_IB_IP_IOCB 0xFA
+#define OPCODE_IB_TCP_IOCB 0xFB
+#define OPCODE_DUMP_PROTO_IOCB 0xFE
+#define OPCODE_BUFFER_ALERT_IOCB 0xFB
+
+#define OPCODE_FUNC_ID_MASK 0x30
+#define OUTBOUND_MAC_IOCB 0x01 /* plus function bits */
+#define OUTBOUND_TCP_IOCB 0x03 /* plus function bits */
+#define UPDATE_NCB_IOCB 0x00 /* plus function bits */
+
+#define FN0_MA_BITS_MASK 0x00
+#define FN1_MA_BITS_MASK 0x80
+
+struct ob_mac_iocb_req {
+ u8 opcode;
+ u8 flags;
+#define OB_MAC_IOCB_REQ_MA 0xC0
+#define OB_MAC_IOCB_REQ_F 0x20
+#define OB_MAC_IOCB_REQ_X 0x10
+#define OB_MAC_IOCB_REQ_D 0x02
+#define OB_MAC_IOCB_REQ_I 0x01
+ __le16 reserved0;
+
+ __le32 transaction_id;
+ __le16 data_len;
+ __le16 reserved1;
+ __le32 reserved2;
+ __le32 reserved3;
+ __le32 buf_addr0_low;
+ __le32 buf_addr0_high;
+ __le32 buf_0_len;
+ __le32 buf_addr1_low;
+ __le32 buf_addr1_high;
+ __le32 buf_1_len;
+ __le32 buf_addr2_low;
+ __le32 buf_addr2_high;
+ __le32 buf_2_len;
+ __le32 reserved4;
+ __le32 reserved5;
+};
+/*
+ * The following constants define control bits for buffer
+ * length fields for all IOCB's.
+ */
+#define OB_MAC_IOCB_REQ_E 0x80000000 /* Last valid buffer in list. */
+#define OB_MAC_IOCB_REQ_C 0x40000000 /* points to an OAL. (continuation) */
+#define OB_MAC_IOCB_REQ_L 0x20000000 /* Auburn local address pointer. */
+#define OB_MAC_IOCB_REQ_R 0x10000000 /* 32-bit address pointer. */
+
+struct ob_mac_iocb_rsp {
+ u8 opcode;
+ u8 flags;
+#define OB_MAC_IOCB_RSP_P 0x08
+#define OB_MAC_IOCB_RSP_S 0x02
+#define OB_MAC_IOCB_RSP_I 0x01
+
+ __le16 reserved0;
+ __le32 transaction_id;
+ __le32 reserved1;
+ __le32 reserved2;
+};
+
+struct ib_mac_iocb_rsp {
+ u8 opcode;
+ u8 flags;
+#define IB_MAC_IOCB_RSP_S 0x80
+#define IB_MAC_IOCB_RSP_H1 0x40
+#define IB_MAC_IOCB_RSP_H0 0x20
+#define IB_MAC_IOCB_RSP_B 0x10
+#define IB_MAC_IOCB_RSP_M 0x08
+#define IB_MAC_IOCB_RSP_MA 0x07
+
+ __le16 length;
+ __le32 reserved;
+ __le32 ial_low;
+ __le32 ial_high;
+
+};
+
+struct ob_ip_iocb_req {
+ u8 opcode;
+ __le16 flags;
+#define OB_IP_IOCB_REQ_O 0x100
+#define OB_IP_IOCB_REQ_H 0x008
+#define OB_IP_IOCB_REQ_U 0x004
+#define OB_IP_IOCB_REQ_D 0x002
+#define OB_IP_IOCB_REQ_I 0x001
+
+ u8 reserved0;
+
+ __le32 transaction_id;
+ __le16 data_len;
+ __le16 reserved1;
+ __le32 hncb_ptr_low;
+ __le32 hncb_ptr_high;
+ __le32 buf_addr0_low;
+ __le32 buf_addr0_high;
+ __le32 buf_0_len;
+ __le32 buf_addr1_low;
+ __le32 buf_addr1_high;
+ __le32 buf_1_len;
+ __le32 buf_addr2_low;
+ __le32 buf_addr2_high;
+ __le32 buf_2_len;
+ __le32 reserved2;
+ __le32 reserved3;
+};
+
+/* defines for BufferLength fields above */
+#define OB_IP_IOCB_REQ_E 0x80000000
+#define OB_IP_IOCB_REQ_C 0x40000000
+#define OB_IP_IOCB_REQ_L 0x20000000
+#define OB_IP_IOCB_REQ_R 0x10000000
+
+struct ob_ip_iocb_rsp {
+ u8 opcode;
+ u8 flags;
+#define OB_MAC_IOCB_RSP_E 0x08
+#define OB_MAC_IOCB_RSP_L 0x04
+#define OB_MAC_IOCB_RSP_S 0x02
+#define OB_MAC_IOCB_RSP_I 0x01
+
+ __le16 reserved0;
+ __le32 transaction_id;
+ __le32 reserved1;
+ __le32 reserved2;
+};
+
+struct ob_tcp_iocb_req {
+ u8 opcode;
+
+ u8 flags0;
+#define OB_TCP_IOCB_REQ_P 0x80
+#define OB_TCP_IOCB_REQ_CI 0x20
+#define OB_TCP_IOCB_REQ_H 0x10
+#define OB_TCP_IOCB_REQ_LN 0x08
+#define OB_TCP_IOCB_REQ_K 0x04
+#define OB_TCP_IOCB_REQ_D 0x02
+#define OB_TCP_IOCB_REQ_I 0x01
+
+ u8 flags1;
+#define OB_TCP_IOCB_REQ_OSM 0x40
+#define OB_TCP_IOCB_REQ_URG 0x20
+#define OB_TCP_IOCB_REQ_ACK 0x10
+#define OB_TCP_IOCB_REQ_PSH 0x08
+#define OB_TCP_IOCB_REQ_RST 0x04
+#define OB_TCP_IOCB_REQ_SYN 0x02
+#define OB_TCP_IOCB_REQ_FIN 0x01
+
+ u8 options_len;
+#define OB_TCP_IOCB_REQ_OMASK 0xF0
+#define OB_TCP_IOCB_REQ_SHIFT 4
+
+ __le32 transaction_id;
+ __le32 data_len;
+ __le32 hncb_ptr_low;
+ __le32 hncb_ptr_high;
+ __le32 buf_addr0_low;
+ __le32 buf_addr0_high;
+ __le32 buf_0_len;
+ __le32 buf_addr1_low;
+ __le32 buf_addr1_high;
+ __le32 buf_1_len;
+ __le32 buf_addr2_low;
+ __le32 buf_addr2_high;
+ __le32 buf_2_len;
+ __le32 time_stamp;
+ __le32 reserved1;
+};
+
+struct ob_tcp_iocb_rsp {
+ u8 opcode;
+
+ u8 flags0;
+#define OB_TCP_IOCB_RSP_C 0x20
+#define OB_TCP_IOCB_RSP_H 0x10
+#define OB_TCP_IOCB_RSP_LN 0x08
+#define OB_TCP_IOCB_RSP_K 0x04
+#define OB_TCP_IOCB_RSP_D 0x02
+#define OB_TCP_IOCB_RSP_I 0x01
+
+ u8 flags1;
+#define OB_TCP_IOCB_RSP_E 0x10
+#define OB_TCP_IOCB_RSP_W 0x08
+#define OB_TCP_IOCB_RSP_P 0x04
+#define OB_TCP_IOCB_RSP_T 0x02
+#define OB_TCP_IOCB_RSP_F 0x01
+
+ u8 state;
+#define OB_TCP_IOCB_RSP_SMASK 0xF0
+#define OB_TCP_IOCB_RSP_SHIFT 4
+
+ __le32 transaction_id;
+ __le32 local_ncb_ptr;
+ __le32 reserved0;
+};
+
+struct ib_ip_iocb_rsp {
+ u8 opcode;
+ u8 flags;
+#define IB_IP_IOCB_RSP_S 0x80
+#define IB_IP_IOCB_RSP_H1 0x40
+#define IB_IP_IOCB_RSP_H0 0x20
+#define IB_IP_IOCB_RSP_B 0x10
+#define IB_IP_IOCB_RSP_M 0x08
+#define IB_IP_IOCB_RSP_MA 0x07
+
+ __le16 length;
+ __le16 checksum;
+ __le16 reserved;
+#define IB_IP_IOCB_RSP_R 0x01
+ __le32 ial_low;
+ __le32 ial_high;
+};
+
+struct ib_tcp_iocb_rsp {
+ u8 opcode;
+ u8 flags;
+#define IB_TCP_IOCB_RSP_P 0x80
+#define IB_TCP_IOCB_RSP_T 0x40
+#define IB_TCP_IOCB_RSP_D 0x20
+#define IB_TCP_IOCB_RSP_N 0x10
+#define IB_TCP_IOCB_RSP_IP 0x03
+#define IB_TCP_FLAG_MASK 0xf0
+#define IB_TCP_FLAG_IOCB_SYN 0x00
+
+#define TCP_IB_RSP_FLAGS(x) (x->flags & ~IB_TCP_FLAG_MASK)
+
+ __le16 length;
+ __le32 hncb_ref_num;
+ __le32 ial_low;
+ __le32 ial_high;
+};
+
+struct net_rsp_iocb {
+ u8 opcode;
+ u8 flags;
+ __le16 reserved0;
+ __le32 reserved[3];
+};
+#pragma pack()
+
+/*
+ * Register Definitions...
+ */
+#define PORT0_PHY_ADDRESS 0x1e00
+#define PORT1_PHY_ADDRESS 0x1f00
+
+#define ETHERNET_CRC_SIZE 4
+
+#define MII_SCAN_REGISTER 0x00000001
+
+/* 32-bit ispControlStatus */
+enum {
+ ISP_CONTROL_NP_MASK = 0x0003,
+ ISP_CONTROL_NP_PCSR = 0x0000,
+ ISP_CONTROL_NP_HMCR = 0x0001,
+ ISP_CONTROL_NP_LRAMCR = 0x0002,
+ ISP_CONTROL_NP_PSR = 0x0003,
+ ISP_CONTROL_RI = 0x0008,
+ ISP_CONTROL_CI = 0x0010,
+ ISP_CONTROL_PI = 0x0020,
+ ISP_CONTROL_IN = 0x0040,
+ ISP_CONTROL_BE = 0x0080,
+ ISP_CONTROL_FN_MASK = 0x0700,
+ ISP_CONTROL_FN0_NET = 0x0400,
+ ISP_CONTROL_FN0_SCSI = 0x0500,
+ ISP_CONTROL_FN1_NET = 0x0600,
+ ISP_CONTROL_FN1_SCSI = 0x0700,
+ ISP_CONTROL_LINK_DN_0 = 0x0800,
+ ISP_CONTROL_LINK_DN_1 = 0x1000,
+ ISP_CONTROL_FSR = 0x2000,
+ ISP_CONTROL_FE = 0x4000,
+ ISP_CONTROL_SR = 0x8000,
+};
+
+/* 32-bit ispInterruptMaskReg */
+enum {
+ ISP_IMR_ENABLE_INT = 0x0004,
+ ISP_IMR_DISABLE_RESET_INT = 0x0008,
+ ISP_IMR_DISABLE_CMPL_INT = 0x0010,
+ ISP_IMR_DISABLE_PROC_INT = 0x0020,
+};
+
+/* 32-bit serialPortInterfaceReg */
+enum {
+ ISP_SERIAL_PORT_IF_CLK = 0x0001,
+ ISP_SERIAL_PORT_IF_CS = 0x0002,
+ ISP_SERIAL_PORT_IF_D0 = 0x0004,
+ ISP_SERIAL_PORT_IF_DI = 0x0008,
+ ISP_NVRAM_MASK = (0x000F << 16),
+ ISP_SERIAL_PORT_IF_WE = 0x0010,
+ ISP_SERIAL_PORT_IF_NVR_MASK = 0x001F,
+ ISP_SERIAL_PORT_IF_SCI = 0x0400,
+ ISP_SERIAL_PORT_IF_SC0 = 0x0800,
+ ISP_SERIAL_PORT_IF_SCE = 0x1000,
+ ISP_SERIAL_PORT_IF_SDI = 0x2000,
+ ISP_SERIAL_PORT_IF_SDO = 0x4000,
+ ISP_SERIAL_PORT_IF_SDE = 0x8000,
+ ISP_SERIAL_PORT_IF_I2C_MASK = 0xFC00,
+};
+
+/* semaphoreReg */
+enum {
+ QL_RESOURCE_MASK_BASE_CODE = 0x7,
+ QL_RESOURCE_BITS_BASE_CODE = 0x4,
+ QL_DRVR_SEM_BITS = (QL_RESOURCE_BITS_BASE_CODE << 1),
+ QL_DDR_RAM_SEM_BITS = (QL_RESOURCE_BITS_BASE_CODE << 4),
+ QL_PHY_GIO_SEM_BITS = (QL_RESOURCE_BITS_BASE_CODE << 7),
+ QL_NVRAM_SEM_BITS = (QL_RESOURCE_BITS_BASE_CODE << 10),
+ QL_FLASH_SEM_BITS = (QL_RESOURCE_BITS_BASE_CODE << 13),
+ QL_DRVR_SEM_MASK = (QL_RESOURCE_MASK_BASE_CODE << (1 + 16)),
+ QL_DDR_RAM_SEM_MASK = (QL_RESOURCE_MASK_BASE_CODE << (4 + 16)),
+ QL_PHY_GIO_SEM_MASK = (QL_RESOURCE_MASK_BASE_CODE << (7 + 16)),
+ QL_NVRAM_SEM_MASK = (QL_RESOURCE_MASK_BASE_CODE << (10 + 16)),
+ QL_FLASH_SEM_MASK = (QL_RESOURCE_MASK_BASE_CODE << (13 + 16)),
+};
+
+ /*
+ * QL3XXX memory-mapped registers
+ * QL3XXX has 4 "pages" of registers, each page occupying
+ * 256 bytes. Each page has a "common" area at the start and then
+ * page-specific registers after that.
+ */
+struct ql3xxx_common_registers {
+ u32 MB0; /* Offset 0x00 */
+ u32 MB1; /* Offset 0x04 */
+ u32 MB2; /* Offset 0x08 */
+ u32 MB3; /* Offset 0x0c */
+ u32 MB4; /* Offset 0x10 */
+ u32 MB5; /* Offset 0x14 */
+ u32 MB6; /* Offset 0x18 */
+ u32 MB7; /* Offset 0x1c */
+ u32 flashBiosAddr;
+ u32 flashBiosData;
+ u32 ispControlStatus;
+ u32 ispInterruptMaskReg;
+ u32 serialPortInterfaceReg;
+ u32 semaphoreReg;
+ u32 reqQProducerIndex;
+ u32 rspQConsumerIndex;
+
+ u32 rxLargeQProducerIndex;
+ u32 rxSmallQProducerIndex;
+ u32 arcMadiCommand;
+ u32 arcMadiData;
+};
+
+enum {
+ EXT_HW_CONFIG_SP_MASK = 0x0006,
+ EXT_HW_CONFIG_SP_NONE = 0x0000,
+ EXT_HW_CONFIG_SP_BYTE_PARITY = 0x0002,
+ EXT_HW_CONFIG_SP_ECC = 0x0004,
+ EXT_HW_CONFIG_SP_ECCx = 0x0006,
+ EXT_HW_CONFIG_SIZE_MASK = 0x0060,
+ EXT_HW_CONFIG_SIZE_128M = 0x0000,
+ EXT_HW_CONFIG_SIZE_256M = 0x0020,
+ EXT_HW_CONFIG_SIZE_512M = 0x0040,
+ EXT_HW_CONFIG_SIZE_INVALID = 0x0060,
+ EXT_HW_CONFIG_PD = 0x0080,
+ EXT_HW_CONFIG_FW = 0x0200,
+ EXT_HW_CONFIG_US = 0x0400,
+ EXT_HW_CONFIG_DCS_MASK = 0x1800,
+ EXT_HW_CONFIG_DCS_9MA = 0x0000,
+ EXT_HW_CONFIG_DCS_15MA = 0x0800,
+ EXT_HW_CONFIG_DCS_18MA = 0x1000,
+ EXT_HW_CONFIG_DCS_24MA = 0x1800,
+ EXT_HW_CONFIG_DDS_MASK = 0x6000,
+ EXT_HW_CONFIG_DDS_9MA = 0x0000,
+ EXT_HW_CONFIG_DDS_15MA = 0x2000,
+ EXT_HW_CONFIG_DDS_18MA = 0x4000,
+ EXT_HW_CONFIG_DDS_24MA = 0x6000,
+};
+
+/* InternalChipConfig */
+enum {
+ INTERNAL_CHIP_DM = 0x0001,
+ INTERNAL_CHIP_SD = 0x0002,
+ INTERNAL_CHIP_RAP_MASK = 0x000C,
+ INTERNAL_CHIP_RAP_RR = 0x0000,
+ INTERNAL_CHIP_RAP_NRM = 0x0004,
+ INTERNAL_CHIP_RAP_ERM = 0x0008,
+ INTERNAL_CHIP_RAP_ERMx = 0x000C,
+ INTERNAL_CHIP_WE = 0x0010,
+ INTERNAL_CHIP_EF = 0x0020,
+ INTERNAL_CHIP_FR = 0x0040,
+ INTERNAL_CHIP_FW = 0x0080,
+ INTERNAL_CHIP_FI = 0x0100,
+ INTERNAL_CHIP_FT = 0x0200,
+};
+
+/* portControl */
+enum {
+ PORT_CONTROL_DS = 0x0001,
+ PORT_CONTROL_HH = 0x0002,
+ PORT_CONTROL_EI = 0x0004,
+ PORT_CONTROL_ET = 0x0008,
+ PORT_CONTROL_EF = 0x0010,
+ PORT_CONTROL_DRM = 0x0020,
+ PORT_CONTROL_RLB = 0x0040,
+ PORT_CONTROL_RCB = 0x0080,
+ PORT_CONTROL_MAC = 0x0100,
+ PORT_CONTROL_IPV = 0x0200,
+ PORT_CONTROL_IFP = 0x0400,
+ PORT_CONTROL_ITP = 0x0800,
+ PORT_CONTROL_FI = 0x1000,
+ PORT_CONTROL_DFP = 0x2000,
+ PORT_CONTROL_OI = 0x4000,
+ PORT_CONTROL_CC = 0x8000,
+};
+
+/* portStatus */
+enum {
+ PORT_STATUS_SM0 = 0x0001,
+ PORT_STATUS_SM1 = 0x0002,
+ PORT_STATUS_X = 0x0008,
+ PORT_STATUS_DL = 0x0080,
+ PORT_STATUS_IC = 0x0200,
+ PORT_STATUS_MRC = 0x0400,
+ PORT_STATUS_NL = 0x0800,
+ PORT_STATUS_REV_ID_MASK = 0x7000,
+ PORT_STATUS_REV_ID_1 = 0x1000,
+ PORT_STATUS_REV_ID_2 = 0x2000,
+ PORT_STATUS_REV_ID_3 = 0x3000,
+ PORT_STATUS_64 = 0x8000,
+ PORT_STATUS_UP0 = 0x10000,
+ PORT_STATUS_AC0 = 0x20000,
+ PORT_STATUS_AE0 = 0x40000,
+ PORT_STATUS_UP1 = 0x100000,
+ PORT_STATUS_AC1 = 0x200000,
+ PORT_STATUS_AE1 = 0x400000,
+ PORT_STATUS_F0_ENABLED = 0x1000000,
+ PORT_STATUS_F1_ENABLED = 0x2000000,
+ PORT_STATUS_F2_ENABLED = 0x4000000,
+ PORT_STATUS_F3_ENABLED = 0x8000000,
+};
+
+/* macMIIMgmtControlReg */
+enum {
+ MAC_ADDR_INDIRECT_PTR_REG_RP_MASK = 0x0003,
+ MAC_ADDR_INDIRECT_PTR_REG_RP_PRI_LWR = 0x0000,
+ MAC_ADDR_INDIRECT_PTR_REG_RP_PRI_UPR = 0x0001,
+ MAC_ADDR_INDIRECT_PTR_REG_RP_SEC_LWR = 0x0002,
+ MAC_ADDR_INDIRECT_PTR_REG_RP_SEC_UPR = 0x0003,
+ MAC_ADDR_INDIRECT_PTR_REG_PR = 0x0008,
+ MAC_ADDR_INDIRECT_PTR_REG_SS = 0x0010,
+ MAC_ADDR_INDIRECT_PTR_REG_SE = 0x0020,
+ MAC_ADDR_INDIRECT_PTR_REG_SP = 0x0040,
+ MAC_ADDR_INDIRECT_PTR_REG_PE = 0x0080,
+};
+
+/* macMIIMgmtControlReg */
+enum {
+ MAC_MII_CONTROL_RC = 0x0001,
+ MAC_MII_CONTROL_SC = 0x0002,
+ MAC_MII_CONTROL_AS = 0x0004,
+ MAC_MII_CONTROL_NP = 0x0008,
+ MAC_MII_CONTROL_CLK_SEL_MASK = 0x0070,
+ MAC_MII_CONTROL_CLK_SEL_DIV2 = 0x0000,
+ MAC_MII_CONTROL_CLK_SEL_DIV4 = 0x0010,
+ MAC_MII_CONTROL_CLK_SEL_DIV6 = 0x0020,
+ MAC_MII_CONTROL_CLK_SEL_DIV8 = 0x0030,
+ MAC_MII_CONTROL_CLK_SEL_DIV10 = 0x0040,
+ MAC_MII_CONTROL_CLK_SEL_DIV14 = 0x0050,
+ MAC_MII_CONTROL_CLK_SEL_DIV20 = 0x0060,
+ MAC_MII_CONTROL_CLK_SEL_DIV28 = 0x0070,
+ MAC_MII_CONTROL_RM = 0x8000,
+};
+
+/* macMIIStatusReg */
+enum {
+ MAC_MII_STATUS_BSY = 0x0001,
+ MAC_MII_STATUS_SC = 0x0002,
+ MAC_MII_STATUS_NV = 0x0004,
+};
+
+enum {
+ MAC_CONFIG_REG_PE = 0x0001,
+ MAC_CONFIG_REG_TF = 0x0002,
+ MAC_CONFIG_REG_RF = 0x0004,
+ MAC_CONFIG_REG_FD = 0x0008,
+ MAC_CONFIG_REG_GM = 0x0010,
+ MAC_CONFIG_REG_LB = 0x0020,
+ MAC_CONFIG_REG_SR = 0x8000,
+};
+
+enum {
+ MAC_HALF_DUPLEX_REG_ED = 0x10000,
+ MAC_HALF_DUPLEX_REG_NB = 0x20000,
+ MAC_HALF_DUPLEX_REG_BNB = 0x40000,
+ MAC_HALF_DUPLEX_REG_ALT = 0x80000,
+};
+
+enum {
+ IP_ADDR_INDEX_REG_MASK = 0x000f,
+ IP_ADDR_INDEX_REG_FUNC_0_PRI = 0x0000,
+ IP_ADDR_INDEX_REG_FUNC_0_SEC = 0x0001,
+ IP_ADDR_INDEX_REG_FUNC_1_PRI = 0x0002,
+ IP_ADDR_INDEX_REG_FUNC_1_SEC = 0x0003,
+ IP_ADDR_INDEX_REG_FUNC_2_PRI = 0x0004,
+ IP_ADDR_INDEX_REG_FUNC_2_SEC = 0x0005,
+ IP_ADDR_INDEX_REG_FUNC_3_PRI = 0x0006,
+ IP_ADDR_INDEX_REG_FUNC_3_SEC = 0x0007,
+};
+
+enum {
+ PROBE_MUX_ADDR_REG_MUX_SEL_MASK = 0x003f,
+ PROBE_MUX_ADDR_REG_SYSCLK = 0x0000,
+ PROBE_MUX_ADDR_REG_PCICLK = 0x0040,
+ PROBE_MUX_ADDR_REG_NRXCLK = 0x0080,
+ PROBE_MUX_ADDR_REG_CPUCLK = 0x00C0,
+ PROBE_MUX_ADDR_REG_MODULE_SEL_MASK = 0x3f00,
+ PROBE_MUX_ADDR_REG_UP = 0x4000,
+ PROBE_MUX_ADDR_REG_RE = 0x8000,
+};
+
+enum {
+ STATISTICS_INDEX_REG_MASK = 0x01ff,
+ STATISTICS_INDEX_REG_MAC0_TX_FRAME = 0x0000,
+ STATISTICS_INDEX_REG_MAC0_TX_BYTES = 0x0001,
+ STATISTICS_INDEX_REG_MAC0_TX_STAT1 = 0x0002,
+ STATISTICS_INDEX_REG_MAC0_TX_STAT2 = 0x0003,
+ STATISTICS_INDEX_REG_MAC0_TX_STAT3 = 0x0004,
+ STATISTICS_INDEX_REG_MAC0_TX_STAT4 = 0x0005,
+ STATISTICS_INDEX_REG_MAC0_TX_STAT5 = 0x0006,
+ STATISTICS_INDEX_REG_MAC0_RX_FRAME = 0x0007,
+ STATISTICS_INDEX_REG_MAC0_RX_BYTES = 0x0008,
+ STATISTICS_INDEX_REG_MAC0_RX_STAT1 = 0x0009,
+ STATISTICS_INDEX_REG_MAC0_RX_STAT2 = 0x000a,
+ STATISTICS_INDEX_REG_MAC0_RX_STAT3 = 0x000b,
+ STATISTICS_INDEX_REG_MAC0_RX_ERR_CRC = 0x000c,
+ STATISTICS_INDEX_REG_MAC0_RX_ERR_ENC = 0x000d,
+ STATISTICS_INDEX_REG_MAC0_RX_ERR_LEN = 0x000e,
+ STATISTICS_INDEX_REG_MAC0_RX_STAT4 = 0x000f,
+ STATISTICS_INDEX_REG_MAC1_TX_FRAME = 0x0010,
+ STATISTICS_INDEX_REG_MAC1_TX_BYTES = 0x0011,
+ STATISTICS_INDEX_REG_MAC1_TX_STAT1 = 0x0012,
+ STATISTICS_INDEX_REG_MAC1_TX_STAT2 = 0x0013,
+ STATISTICS_INDEX_REG_MAC1_TX_STAT3 = 0x0014,
+ STATISTICS_INDEX_REG_MAC1_TX_STAT4 = 0x0015,
+ STATISTICS_INDEX_REG_MAC1_TX_STAT5 = 0x0016,
+ STATISTICS_INDEX_REG_MAC1_RX_FRAME = 0x0017,
+ STATISTICS_INDEX_REG_MAC1_RX_BYTES = 0x0018,
+ STATISTICS_INDEX_REG_MAC1_RX_STAT1 = 0x0019,
+ STATISTICS_INDEX_REG_MAC1_RX_STAT2 = 0x001a,
+ STATISTICS_INDEX_REG_MAC1_RX_STAT3 = 0x001b,
+ STATISTICS_INDEX_REG_MAC1_RX_ERR_CRC = 0x001c,
+ STATISTICS_INDEX_REG_MAC1_RX_ERR_ENC = 0x001d,
+ STATISTICS_INDEX_REG_MAC1_RX_ERR_LEN = 0x001e,
+ STATISTICS_INDEX_REG_MAC1_RX_STAT4 = 0x001f,
+ STATISTICS_INDEX_REG_IP_TX_PKTS = 0x0020,
+ STATISTICS_INDEX_REG_IP_TX_BYTES = 0x0021,
+ STATISTICS_INDEX_REG_IP_TX_FRAG = 0x0022,
+ STATISTICS_INDEX_REG_IP_RX_PKTS = 0x0023,
+ STATISTICS_INDEX_REG_IP_RX_BYTES = 0x0024,
+ STATISTICS_INDEX_REG_IP_RX_FRAG = 0x0025,
+ STATISTICS_INDEX_REG_IP_DGRM_REASSEMBLY = 0x0026,
+ STATISTICS_INDEX_REG_IP_V6_RX_PKTS = 0x0027,
+ STATISTICS_INDEX_REG_IP_RX_PKTERR = 0x0028,
+ STATISTICS_INDEX_REG_IP_REASSEMBLY_ERR = 0x0029,
+ STATISTICS_INDEX_REG_TCP_TX_SEG = 0x0030,
+ STATISTICS_INDEX_REG_TCP_TX_BYTES = 0x0031,
+ STATISTICS_INDEX_REG_TCP_RX_SEG = 0x0032,
+ STATISTICS_INDEX_REG_TCP_RX_BYTES = 0x0033,
+ STATISTICS_INDEX_REG_TCP_TIMER_EXP = 0x0034,
+ STATISTICS_INDEX_REG_TCP_RX_ACK = 0x0035,
+ STATISTICS_INDEX_REG_TCP_TX_ACK = 0x0036,
+ STATISTICS_INDEX_REG_TCP_RX_ERR = 0x0037,
+ STATISTICS_INDEX_REG_TCP_RX_WIN_PROBE = 0x0038,
+ STATISTICS_INDEX_REG_TCP_ECC_ERR_CORR = 0x003f,
+};
+
+enum {
+ PORT_FATAL_ERROR_STATUS_OFB_RE_MAC0 = 0x00000001,
+ PORT_FATAL_ERROR_STATUS_OFB_RE_MAC1 = 0x00000002,
+ PORT_FATAL_ERROR_STATUS_OFB_WE = 0x00000004,
+ PORT_FATAL_ERROR_STATUS_IFB_RE = 0x00000008,
+ PORT_FATAL_ERROR_STATUS_IFB_WE_MAC0 = 0x00000010,
+ PORT_FATAL_ERROR_STATUS_IFB_WE_MAC1 = 0x00000020,
+ PORT_FATAL_ERROR_STATUS_ODE_RE = 0x00000040,
+ PORT_FATAL_ERROR_STATUS_ODE_WE = 0x00000080,
+ PORT_FATAL_ERROR_STATUS_IDE_RE = 0x00000100,
+ PORT_FATAL_ERROR_STATUS_IDE_WE = 0x00000200,
+ PORT_FATAL_ERROR_STATUS_SDE_RE = 0x00000400,
+ PORT_FATAL_ERROR_STATUS_SDE_WE = 0x00000800,
+ PORT_FATAL_ERROR_STATUS_BLE = 0x00001000,
+ PORT_FATAL_ERROR_STATUS_SPE = 0x00002000,
+ PORT_FATAL_ERROR_STATUS_EP0 = 0x00004000,
+ PORT_FATAL_ERROR_STATUS_EP1 = 0x00008000,
+ PORT_FATAL_ERROR_STATUS_ICE = 0x00010000,
+ PORT_FATAL_ERROR_STATUS_ILE = 0x00020000,
+ PORT_FATAL_ERROR_STATUS_OPE = 0x00040000,
+ PORT_FATAL_ERROR_STATUS_TA = 0x00080000,
+ PORT_FATAL_ERROR_STATUS_MA = 0x00100000,
+ PORT_FATAL_ERROR_STATUS_SCE = 0x00200000,
+ PORT_FATAL_ERROR_STATUS_RPE = 0x00400000,
+ PORT_FATAL_ERROR_STATUS_MPE = 0x00800000,
+ PORT_FATAL_ERROR_STATUS_OCE = 0x01000000,
+};
+
+/*
+ * port control and status page - page 0
+ */
+
+struct ql3xxx_port_registers {
+ struct ql3xxx_common_registers CommonRegs;
+
+ u32 ExternalHWConfig;
+ u32 InternalChipConfig;
+ u32 portControl;
+ u32 portStatus;
+ u32 macAddrIndirectPtrReg;
+ u32 macAddrDataReg;
+ u32 macMIIMgmtControlReg;
+ u32 macMIIMgmtAddrReg;
+ u32 macMIIMgmtDataReg;
+ u32 macMIIStatusReg;
+ u32 mac0ConfigReg;
+ u32 mac0IpgIfgReg;
+ u32 mac0HalfDuplexReg;
+ u32 mac0MaxFrameLengthReg;
+ u32 mac0PauseThresholdReg;
+ u32 mac1ConfigReg;
+ u32 mac1IpgIfgReg;
+ u32 mac1HalfDuplexReg;
+ u32 mac1MaxFrameLengthReg;
+ u32 mac1PauseThresholdReg;
+ u32 ipAddrIndexReg;
+ u32 ipAddrDataReg;
+ u32 ipReassemblyTimeout;
+ u32 tcpMaxWindow;
+ u32 currentTcpTimestamp[2];
+ u32 internalRamRWAddrReg;
+ u32 internalRamWDataReg;
+ u32 reclaimedBufferAddrRegLow;
+ u32 reclaimedBufferAddrRegHigh;
+ u32 reserved[2];
+ u32 fpgaRevID;
+ u32 localRamAddr;
+ u32 localRamDataAutoIncr;
+ u32 localRamDataNonIncr;
+ u32 gpOutput;
+ u32 gpInput;
+ u32 probeMuxAddr;
+ u32 probeMuxData;
+ u32 statisticsIndexReg;
+ u32 statisticsReadDataRegAutoIncr;
+ u32 statisticsReadDataRegNoIncr;
+ u32 PortFatalErrStatus;
+};
+
+/*
+ * port host memory config page - page 1
+ */
+struct ql3xxx_host_memory_registers {
+ struct ql3xxx_common_registers CommonRegs;
+
+ u32 reserved[12];
+
+ /* Network Request Queue */
+ u32 reqConsumerIndex;
+ u32 reqConsumerIndexAddrLow;
+ u32 reqConsumerIndexAddrHigh;
+ u32 reqBaseAddrLow;
+ u32 reqBaseAddrHigh;
+ u32 reqLength;
+
+ /* Network Completion Queue */
+ u32 rspProducerIndex;
+ u32 rspProducerIndexAddrLow;
+ u32 rspProducerIndexAddrHigh;
+ u32 rspBaseAddrLow;
+ u32 rspBaseAddrHigh;
+ u32 rspLength;
+
+ /* RX Large Buffer Queue */
+ u32 rxLargeQConsumerIndex;
+ u32 rxLargeQBaseAddrLow;
+ u32 rxLargeQBaseAddrHigh;
+ u32 rxLargeQLength;
+ u32 rxLargeBufferLength;
+
+ /* RX Small Buffer Queue */
+ u32 rxSmallQConsumerIndex;
+ u32 rxSmallQBaseAddrLow;
+ u32 rxSmallQBaseAddrHigh;
+ u32 rxSmallQLength;
+ u32 rxSmallBufferLength;
+
+};
+
+/*
+ * port local RAM page - page 2
+ */
+struct ql3xxx_local_ram_registers {
+ struct ql3xxx_common_registers CommonRegs;
+ u32 bufletSize;
+ u32 maxBufletCount;
+ u32 currentBufletCount;
+ u32 reserved;
+ u32 freeBufletThresholdLow;
+ u32 freeBufletThresholdHigh;
+ u32 ipHashTableBase;
+ u32 ipHashTableCount;
+ u32 tcpHashTableBase;
+ u32 tcpHashTableCount;
+ u32 ncbBase;
+ u32 maxNcbCount;
+ u32 currentNcbCount;
+ u32 drbBase;
+ u32 maxDrbCount;
+ u32 currentDrbCount;
+};
+
+/*
+ * definitions for Semaphore bits in Semaphore/Serial NVRAM interface register
+ */
+
+#define LS_64BITS(x) (u32)(0xffffffff & ((u64)x))
+#define MS_64BITS(x) (u32)(0xffffffff & (((u64)x)>>16>>16) )
+
+/*
+ * I/O register
+ */
+
+enum {
+ CONTROL_REG = 0,
+ STATUS_REG = 1,
+ PHY_STAT_LINK_UP = 0x0004,
+ PHY_CTRL_LOOPBACK = 0x4000,
+
+ PETBI_CONTROL_REG = 0x00,
+ PETBI_CTRL_SOFT_RESET = 0x8000,
+ PETBI_CTRL_AUTO_NEG = 0x1000,
+ PETBI_CTRL_RESTART_NEG = 0x0200,
+ PETBI_CTRL_FULL_DUPLEX = 0x0100,
+ PETBI_CTRL_SPEED_1000 = 0x0040,
+
+ PETBI_STATUS_REG = 0x01,
+ PETBI_STAT_NEG_DONE = 0x0020,
+ PETBI_STAT_LINK_UP = 0x0004,
+
+ PETBI_NEG_ADVER = 0x04,
+ PETBI_NEG_PAUSE = 0x0080,
+ PETBI_NEG_PAUSE_MASK = 0x0180,
+ PETBI_NEG_DUPLEX = 0x0020,
+ PETBI_NEG_DUPLEX_MASK = 0x0060,
+
+ PETBI_NEG_PARTNER = 0x05,
+ PETBI_NEG_ERROR_MASK = 0x3000,
+
+ PETBI_EXPANSION_REG = 0x06,
+ PETBI_EXP_PAGE_RX = 0x0002,
+
+ PETBI_TBI_CTRL = 0x11,
+ PETBI_TBI_RESET = 0x8000,
+ PETBI_TBI_AUTO_SENSE = 0x0100,
+ PETBI_TBI_SERDES_MODE = 0x0010,
+ PETBI_TBI_SERDES_WRAP = 0x0002,
+
+ AUX_CONTROL_STATUS = 0x1c,
+ PHY_AUX_NEG_DONE = 0x8000,
+ PHY_NEG_PARTNER = 5,
+ PHY_AUX_DUPLEX_STAT = 0x0020,
+ PHY_AUX_SPEED_STAT = 0x0018,
+ PHY_AUX_NO_HW_STRAP = 0x0004,
+ PHY_AUX_RESET_STICK = 0x0002,
+ PHY_NEG_PAUSE = 0x0400,
+ PHY_CTRL_SOFT_RESET = 0x8000,
+ PHY_NEG_ADVER = 4,
+ PHY_NEG_ADV_SPEED = 0x01e0,
+ PHY_CTRL_RESTART_NEG = 0x0200,
+};
+enum {
+/* AM29LV Flash definitions */
+ FM93C56A_START = 0x1,
+/* Commands */
+ FM93C56A_READ = 0x2,
+ FM93C56A_WEN = 0x0,
+ FM93C56A_WRITE = 0x1,
+ FM93C56A_WRITE_ALL = 0x0,
+ FM93C56A_WDS = 0x0,
+ FM93C56A_ERASE = 0x3,
+ FM93C56A_ERASE_ALL = 0x0,
+/* Command Extentions */
+ FM93C56A_WEN_EXT = 0x3,
+ FM93C56A_WRITE_ALL_EXT = 0x1,
+ FM93C56A_WDS_EXT = 0x0,
+ FM93C56A_ERASE_ALL_EXT = 0x2,
+/* Special Bits */
+ FM93C56A_READ_DUMMY_BITS = 1,
+ FM93C56A_READY = 0,
+ FM93C56A_BUSY = 1,
+ FM93C56A_CMD_BITS = 2,
+/* AM29LV Flash definitions */
+ FM93C56A_SIZE_8 = 0x100,
+ FM93C56A_SIZE_16 = 0x80,
+ FM93C66A_SIZE_8 = 0x200,
+ FM93C66A_SIZE_16 = 0x100,
+ FM93C86A_SIZE_16 = 0x400,
+/* Address Bits */
+ FM93C56A_NO_ADDR_BITS_16 = 8,
+ FM93C56A_NO_ADDR_BITS_8 = 9,
+ FM93C86A_NO_ADDR_BITS_16 = 10,
+/* Data Bits */
+ FM93C56A_DATA_BITS_16 = 16,
+ FM93C56A_DATA_BITS_8 = 8,
+};
+enum {
+/* Auburn Bits */
+ AUBURN_EEPROM_DI = 0x8,
+ AUBURN_EEPROM_DI_0 = 0x0,
+ AUBURN_EEPROM_DI_1 = 0x8,
+ AUBURN_EEPROM_DO = 0x4,
+ AUBURN_EEPROM_DO_0 = 0x0,
+ AUBURN_EEPROM_DO_1 = 0x4,
+ AUBURN_EEPROM_CS = 0x2,
+ AUBURN_EEPROM_CS_0 = 0x0,
+ AUBURN_EEPROM_CS_1 = 0x2,
+ AUBURN_EEPROM_CLK_RISE = 0x1,
+ AUBURN_EEPROM_CLK_FALL = 0x0,
+};
+enum {EEPROM_SIZE = FM93C86A_SIZE_16,
+ EEPROM_NO_ADDR_BITS = FM93C86A_NO_ADDR_BITS_16,
+ EEPROM_NO_DATA_BITS = FM93C56A_DATA_BITS_16,
+};
+
+/*
+ * MAC Config data structure
+ */
+ struct eeprom_port_cfg {
+ u16 etherMtu_mac;
+ u16 pauseThreshold_mac;
+ u16 resumeThreshold_mac;
+ u16 portConfiguration;
+#define PORT_CONFIG_AUTO_NEG_ENABLED 0x8000
+#define PORT_CONFIG_SYM_PAUSE_ENABLED 0x4000
+#define PORT_CONFIG_FULL_DUPLEX_ENABLED 0x2000
+#define PORT_CONFIG_HALF_DUPLEX_ENABLED 0x1000
+#define PORT_CONFIG_1000MB_SPEED 0x0400
+#define PORT_CONFIG_100MB_SPEED 0x0200
+#define PORT_CONFIG_10MB_SPEED 0x0100
+#define PORT_CONFIG_LINK_SPEED_MASK 0x0F00
+ u16 reserved[12];
+
+};
+
+/*
+ * BIOS data structure
+ */
+struct eeprom_bios_cfg {
+ u16 SpinDlyEn:1, disBios:1, EnMemMap:1, EnSelectBoot:1, Reserved:12;
+
+ u8 bootID0:7, boodID0Valid:1;
+ u8 bootLun0[8];
+
+ u8 bootID1:7, boodID1Valid:1;
+ u8 bootLun1[8];
+
+ u16 MaxLunsTrgt;
+ u8 reserved[10];
+};
+
+/*
+ * Function Specific Data structure
+ */
+struct eeprom_function_cfg {
+ u8 reserved[30];
+ u8 macAddress[6];
+ u8 macAddressSecondary[6];
+
+ u16 subsysVendorId;
+ u16 subsysDeviceId;
+};
+
+/*
+ * EEPROM format
+ */
+struct eeprom_data {
+ u8 asicId[4];
+ u8 version;
+ u8 numPorts;
+ u16 boardId;
+
+#define EEPROM_BOARDID_STR_SIZE 16
+#define EEPROM_SERIAL_NUM_SIZE 16
+
+ u8 boardIdStr[16];
+ u8 serialNumber[16];
+ u16 extHwConfig;
+ struct eeprom_port_cfg macCfg_port0;
+ struct eeprom_port_cfg macCfg_port1;
+ u16 bufletSize;
+ u16 bufletCount;
+ u16 tcpWindowThreshold50;
+ u16 tcpWindowThreshold25;
+ u16 tcpWindowThreshold0;
+ u16 ipHashTableBaseHi;
+ u16 ipHashTableBaseLo;
+ u16 ipHashTableSize;
+ u16 tcpHashTableBaseHi;
+ u16 tcpHashTableBaseLo;
+ u16 tcpHashTableSize;
+ u16 ncbTableBaseHi;
+ u16 ncbTableBaseLo;
+ u16 ncbTableSize;
+ u16 drbTableBaseHi;
+ u16 drbTableBaseLo;
+ u16 drbTableSize;
+ u16 reserved_142[4];
+ u16 ipReassemblyTimeout;
+ u16 tcpMaxWindowSize;
+ u16 ipSecurity;
+#define IPSEC_CONFIG_PRESENT 0x0001
+ u8 reserved_156[294];
+ u16 qDebug[8];
+ struct eeprom_function_cfg funcCfg_fn0;
+ u16 reserved_510;
+ u8 oemSpace[432];
+ struct eeprom_bios_cfg biosCfg_fn1;
+ struct eeprom_function_cfg funcCfg_fn1;
+ u16 reserved_1022;
+ u8 reserved_1024[464];
+ struct eeprom_function_cfg funcCfg_fn2;
+ u16 reserved_1534;
+ u8 reserved_1536[432];
+ struct eeprom_bios_cfg biosCfg_fn3;
+ struct eeprom_function_cfg funcCfg_fn3;
+ u16 checksum;
+};
+
+/*
+ * General definitions...
+ */
+
+/*
+ * Below are a number compiler switches for controlling driver behavior.
+ * Some are not supported under certain conditions and are notated as such.
+ */
+
+#define QL3XXX_VENDOR_ID 0x1077
+#define QL3022_DEVICE_ID 0x3022
+
+/* MTU & Frame Size stuff */
+#define NORMAL_MTU_SIZE ETH_DATA_LEN
+#define JUMBO_MTU_SIZE 9000
+#define VLAN_ID_LEN 2
+
+/* Request Queue Related Definitions */
+#define NUM_REQ_Q_ENTRIES 256 /* so that 64 * 64 = 4096 (1 page) */
+
+/* Response Queue Related Definitions */
+#define NUM_RSP_Q_ENTRIES 256 /* so that 256 * 16 = 4096 (1 page) */
+
+/* Transmit and Receive Buffers */
+#define NUM_LBUFQ_ENTRIES 128
+#define NUM_SBUFQ_ENTRIES 64
+#define QL_SMALL_BUFFER_SIZE 32
+#define QL_ADDR_ELE_PER_BUFQ_ENTRY \
+(sizeof(struct lrg_buf_q_entry) / sizeof(struct bufq_addr_element))
+ /* Each send has at least control block. This is how many we keep. */
+#define NUM_SMALL_BUFFERS NUM_SBUFQ_ENTRIES * QL_ADDR_ELE_PER_BUFQ_ENTRY
+#define NUM_LARGE_BUFFERS NUM_LBUFQ_ENTRIES * QL_ADDR_ELE_PER_BUFQ_ENTRY
+#define QL_HEADER_SPACE 32 /* make header space at top of skb. */
+/*
+ * Large & Small Buffers for Receives
+ */
+struct lrg_buf_q_entry {
+
+ u32 addr0_lower;
+#define IAL_LAST_ENTRY 0x00000001
+#define IAL_CONT_ENTRY 0x00000002
+#define IAL_FLAG_MASK 0x00000003
+ u32 addr0_upper;
+ u32 addr1_lower;
+ u32 addr1_upper;
+ u32 addr2_lower;
+ u32 addr2_upper;
+ u32 addr3_lower;
+ u32 addr3_upper;
+ u32 addr4_lower;
+ u32 addr4_upper;
+ u32 addr5_lower;
+ u32 addr5_upper;
+ u32 addr6_lower;
+ u32 addr6_upper;
+ u32 addr7_lower;
+ u32 addr7_upper;
+
+};
+
+struct bufq_addr_element {
+ u32 addr_low;
+ u32 addr_high;
+};
+
+#define QL_NO_RESET 0
+#define QL_DO_RESET 1
+
+enum link_state_t {
+ LS_UNKNOWN = 0,
+ LS_DOWN,
+ LS_DEGRADE,
+ LS_RECOVER,
+ LS_UP,
+};
+
+struct ql_rcv_buf_cb {
+ struct ql_rcv_buf_cb *next;
+ struct sk_buff *skb;
+ DECLARE_PCI_UNMAP_ADDR(mapaddr);
+ DECLARE_PCI_UNMAP_LEN(maplen);
+ __le32 buf_phy_addr_low;
+ __le32 buf_phy_addr_high;
+ int index;
+};
+
+struct ql_tx_buf_cb {
+ struct sk_buff *skb;
+ struct ob_mac_iocb_req *queue_entry ;
+ DECLARE_PCI_UNMAP_ADDR(mapaddr);
+ DECLARE_PCI_UNMAP_LEN(maplen);
+};
+
+/* definitions for type field */
+#define QL_BUF_TYPE_MACIOCB 0x01
+#define QL_BUF_TYPE_IPIOCB 0x02
+#define QL_BUF_TYPE_TCPIOCB 0x03
+
+/* qdev->flags definitions. */
+enum { QL_RESET_DONE = 1, /* Reset finished. */
+ QL_RESET_ACTIVE = 2, /* Waiting for reset to finish. */
+ QL_RESET_START = 3, /* Please reset the chip. */
+ QL_RESET_PER_SCSI = 4, /* SCSI driver requests reset. */
+ QL_TX_TIMEOUT = 5, /* Timeout in progress. */
+ QL_LINK_MASTER = 6, /* This driver controls the link. */
+ QL_ADAPTER_UP = 7, /* Adapter has been brought up. */
+ QL_THREAD_UP = 8, /* This flag is available. */
+ QL_LINK_UP = 9, /* Link Status. */
+ QL_ALLOC_REQ_RSP_Q_DONE = 10,
+ QL_ALLOC_BUFQS_DONE = 11,
+ QL_ALLOC_SMALL_BUF_DONE = 12,
+ QL_LINK_OPTICAL = 13,
+ QL_MSI_ENABLED = 14,
+};
+
+/*
+ * ql3_adapter - The main Adapter structure definition.
+ * This structure has all fields relevant to the hardware.
+ */
+
+struct ql3_adapter {
+ u32 reserved_00;
+ unsigned long flags;
+
+ /* PCI Configuration information for this device */
+ struct pci_dev *pdev;
+ struct net_device *ndev; /* Parent NET device */
+
+ /* Hardware information */
+ u8 chip_rev_id;
+ u8 pci_slot;
+ u8 pci_width;
+ u8 pci_x;
+ u32 msi;
+ int index;
+ struct timer_list adapter_timer; /* timer used for various functions */
+
+ spinlock_t adapter_lock;
+ spinlock_t hw_lock;
+
+ /* PCI Bus Relative Register Addresses */
+ u8 *mmap_virt_base; /* stores return value from ioremap() */
+ struct ql3xxx_port_registers __iomem *mem_map_registers;
+ u32 current_page; /* tracks current register page */
+
+ u32 msg_enable;
+ u8 reserved_01[2];
+ u8 reserved_02[2];
+
+ /* Page for Shadow Registers */
+ void *shadow_reg_virt_addr;
+ dma_addr_t shadow_reg_phy_addr;
+
+ /* Net Request Queue */
+ u32 req_q_size;
+ u32 reserved_03;
+ struct ob_mac_iocb_req *req_q_virt_addr;
+ dma_addr_t req_q_phy_addr;
+ u16 req_producer_index;
+ u16 reserved_04;
+ u16 *preq_consumer_index;
+ u32 req_consumer_index_phy_addr_high;
+ u32 req_consumer_index_phy_addr_low;
+ atomic_t tx_count;
+ struct ql_tx_buf_cb tx_buf[NUM_REQ_Q_ENTRIES];
+
+ /* Net Response Queue */
+ u32 rsp_q_size;
+ u32 eeprom_cmd_data;
+ struct net_rsp_iocb *rsp_q_virt_addr;
+ dma_addr_t rsp_q_phy_addr;
+ struct net_rsp_iocb *rsp_current;
+ u16 rsp_consumer_index;
+ u16 reserved_06;
+ u32 *prsp_producer_index;
+ u32 rsp_producer_index_phy_addr_high;
+ u32 rsp_producer_index_phy_addr_low;
+
+ /* Large Buffer Queue */
+ u32 lrg_buf_q_alloc_size;
+ u32 lrg_buf_q_size;
+ void *lrg_buf_q_alloc_virt_addr;
+ void *lrg_buf_q_virt_addr;
+ dma_addr_t lrg_buf_q_alloc_phy_addr;
+ dma_addr_t lrg_buf_q_phy_addr;
+ u32 lrg_buf_q_producer_index;
+ u32 lrg_buf_release_cnt;
+ struct bufq_addr_element *lrg_buf_next_free;
+
+ /* Large (Receive) Buffers */
+ struct ql_rcv_buf_cb lrg_buf[NUM_LARGE_BUFFERS];
+ struct ql_rcv_buf_cb *lrg_buf_free_head;
+ struct ql_rcv_buf_cb *lrg_buf_free_tail;
+ u32 lrg_buf_free_count;
+ u32 lrg_buffer_len;
+ u32 lrg_buf_index;
+ u32 lrg_buf_skb_check;
+
+ /* Small Buffer Queue */
+ u32 small_buf_q_alloc_size;
+ u32 small_buf_q_size;
+ u32 small_buf_q_producer_index;
+ void *small_buf_q_alloc_virt_addr;
+ void *small_buf_q_virt_addr;
+ dma_addr_t small_buf_q_alloc_phy_addr;
+ dma_addr_t small_buf_q_phy_addr;
+ u32 small_buf_index;
+
+ /* Small (Receive) Buffers */
+ void *small_buf_virt_addr;
+ dma_addr_t small_buf_phy_addr;
+ u32 small_buf_phy_addr_low;
+ u32 small_buf_phy_addr_high;
+ u32 small_buf_release_cnt;
+ u32 small_buf_total_size;
+
+ /* ISR related, saves status for DPC. */
+ u32 control_status;
+
+ struct eeprom_data nvram_data;
+ struct timer_list ioctl_timer;
+ u32 port_link_state;
+ u32 last_rsp_offset;
+
+ /* 4022 specific */
+ u32 mac_index; /* Driver's MAC number can be 0 or 1 for first and second networking functions respectively */
+ u32 PHYAddr; /* Address of PHY 0x1e00 Port 0 and 0x1f00 Port 1 */
+ u32 mac_ob_opcode; /* Opcode to use on mac transmission */
+ u32 tcp_ob_opcode; /* Opcode to use on tcp transmission */
+ u32 update_ob_opcode; /* Opcode to use for updating NCB */
+ u32 mb_bit_mask; /* MA Bits mask to use on transmission */
+ u32 numPorts;
+ struct net_device_stats stats;
+ struct workqueue_struct *workqueue;
+ struct work_struct reset_work;
+ struct work_struct tx_timeout_work;
+ u32 max_frame_size;
+};
+
+#endif /* _QLA3XXX_H_ */
diff --git a/drivers/net/r8169.c b/drivers/net/r8169.c
index 4c2f575faad..5722a5638ff 100644
--- a/drivers/net/r8169.c
+++ b/drivers/net/r8169.c
@@ -2809,7 +2809,7 @@ static struct pci_driver rtl8169_pci_driver = {
static int __init
rtl8169_init_module(void)
{
- return pci_module_init(&rtl8169_pci_driver);
+ return pci_register_driver(&rtl8169_pci_driver);
}
static void __exit
diff --git a/drivers/net/rrunner.c b/drivers/net/rrunner.c
index c3ed734cbe3..31bcdad5471 100644
--- a/drivers/net/rrunner.c
+++ b/drivers/net/rrunner.c
@@ -1736,7 +1736,7 @@ static struct pci_driver rr_driver = {
static int __init rr_init_module(void)
{
- return pci_module_init(&rr_driver);
+ return pci_register_driver(&rr_driver);
}
static void __exit rr_cleanup_module(void)
diff --git a/drivers/net/s2io.c b/drivers/net/s2io.c
index e72e0e09906..c16f9156c98 100644
--- a/drivers/net/s2io.c
+++ b/drivers/net/s2io.c
@@ -7233,7 +7233,7 @@ static void __devexit s2io_rem_nic(struct pci_dev *pdev)
int __init s2io_starter(void)
{
- return pci_module_init(&s2io_driver);
+ return pci_register_driver(&s2io_driver);
}
/**
diff --git a/drivers/net/saa9730.c b/drivers/net/saa9730.c
index b2acedbefa8..c479b07be78 100644
--- a/drivers/net/saa9730.c
+++ b/drivers/net/saa9730.c
@@ -1131,7 +1131,7 @@ static struct pci_driver saa9730_driver = {
static int __init saa9730_init(void)
{
- return pci_module_init(&saa9730_driver);
+ return pci_register_driver(&saa9730_driver);
}
static void __exit saa9730_cleanup(void)
diff --git a/drivers/net/sis190.c b/drivers/net/sis190.c
index df0cbebb327..16e30d523fc 100644
--- a/drivers/net/sis190.c
+++ b/drivers/net/sis190.c
@@ -1871,7 +1871,7 @@ static struct pci_driver sis190_pci_driver = {
static int __init sis190_init_module(void)
{
- return pci_module_init(&sis190_pci_driver);
+ return pci_register_driver(&sis190_pci_driver);
}
static void __exit sis190_cleanup_module(void)
diff --git a/drivers/net/sis900.c b/drivers/net/sis900.c
index 29ee7ffedff..6af50286349 100644
--- a/drivers/net/sis900.c
+++ b/drivers/net/sis900.c
@@ -134,6 +134,7 @@ static const struct mii_chip_info {
{ "AMD 79C901 10BASE-T PHY", 0x0000, 0x6B70, LAN },
{ "AMD 79C901 HomePNA PHY", 0x0000, 0x6B90, HOME},
{ "ICS LAN PHY", 0x0015, 0xF440, LAN },
+ { "ICS LAN PHY", 0x0143, 0xBC70, LAN },
{ "NS 83851 PHY", 0x2000, 0x5C20, MIX },
{ "NS 83847 PHY", 0x2000, 0x5C30, MIX },
{ "Realtek RTL8201 PHY", 0x0000, 0x8200, LAN },
@@ -2495,7 +2496,7 @@ static int __init sis900_init_module(void)
printk(version);
#endif
- return pci_module_init(&sis900_pci_driver);
+ return pci_register_driver(&sis900_pci_driver);
}
static void __exit sis900_cleanup_module(void)
diff --git a/drivers/net/sk98lin/skge.c b/drivers/net/sk98lin/skge.c
index ee62845d3ac..49e76c7f10d 100644
--- a/drivers/net/sk98lin/skge.c
+++ b/drivers/net/sk98lin/skge.c
@@ -5133,7 +5133,7 @@ static struct pci_driver skge_driver = {
static int __init skge_init(void)
{
- return pci_module_init(&skge_driver);
+ return pci_register_driver(&skge_driver);
}
static void __exit skge_exit(void)
diff --git a/drivers/net/skfp/skfddi.c b/drivers/net/skfp/skfddi.c
index b5714a60237..8e4d18440a5 100644
--- a/drivers/net/skfp/skfddi.c
+++ b/drivers/net/skfp/skfddi.c
@@ -2280,7 +2280,7 @@ static struct pci_driver skfddi_pci_driver = {
static int __init skfd_init(void)
{
- return pci_module_init(&skfddi_pci_driver);
+ return pci_register_driver(&skfddi_pci_driver);
}
static void __exit skfd_exit(void)
diff --git a/drivers/net/skge.c b/drivers/net/skge.c
index ad878dfddef..fba8b7455d8 100644
--- a/drivers/net/skge.c
+++ b/drivers/net/skge.c
@@ -43,7 +43,7 @@
#include "skge.h"
#define DRV_NAME "skge"
-#define DRV_VERSION "1.6"
+#define DRV_VERSION "1.8"
#define PFX DRV_NAME " "
#define DEFAULT_TX_RING_SIZE 128
@@ -91,7 +91,7 @@ MODULE_DEVICE_TABLE(pci, skge_id_table);
static int skge_up(struct net_device *dev);
static int skge_down(struct net_device *dev);
static void skge_phy_reset(struct skge_port *skge);
-static void skge_tx_clean(struct skge_port *skge);
+static void skge_tx_clean(struct net_device *dev);
static int xm_phy_write(struct skge_hw *hw, int port, u16 reg, u16 val);
static int gm_phy_write(struct skge_hw *hw, int port, u16 reg, u16 val);
static void genesis_get_stats(struct skge_port *skge, u64 *data);
@@ -105,6 +105,7 @@ static const int txqaddr[] = { Q_XA1, Q_XA2 };
static const int rxqaddr[] = { Q_R1, Q_R2 };
static const u32 rxirqmask[] = { IS_R1_F, IS_R2_F };
static const u32 txirqmask[] = { IS_XA1_F, IS_XA2_F };
+static const u32 irqmask[] = { IS_R1_F|IS_XA1_F, IS_R2_F|IS_XA2_F };
static int skge_get_regs_len(struct net_device *dev)
{
@@ -818,8 +819,9 @@ static void skge_rx_clean(struct skge_port *skge)
/* Allocate buffers for receive ring
* For receive: to_clean is next received frame.
*/
-static int skge_rx_fill(struct skge_port *skge)
+static int skge_rx_fill(struct net_device *dev)
{
+ struct skge_port *skge = netdev_priv(dev);
struct skge_ring *ring = &skge->rx_ring;
struct skge_element *e;
@@ -827,7 +829,8 @@ static int skge_rx_fill(struct skge_port *skge)
do {
struct sk_buff *skb;
- skb = alloc_skb(skge->rx_buf_size + NET_IP_ALIGN, GFP_KERNEL);
+ skb = __netdev_alloc_skb(dev, skge->rx_buf_size + NET_IP_ALIGN,
+ GFP_KERNEL);
if (!skb)
return -ENOMEM;
@@ -2178,7 +2181,7 @@ static int skge_up(struct net_device *dev)
if (err)
goto free_pci_mem;
- err = skge_rx_fill(skge);
+ err = skge_rx_fill(dev);
if (err)
goto free_rx_ring;
@@ -2281,7 +2284,7 @@ static int skge_down(struct net_device *dev)
skge_led(skge, LED_MODE_OFF);
netif_poll_disable(dev);
- skge_tx_clean(skge);
+ skge_tx_clean(dev);
skge_rx_clean(skge);
kfree(skge->rx_ring.start);
@@ -2306,25 +2309,12 @@ static int skge_xmit_frame(struct sk_buff *skb, struct net_device *dev)
int i;
u32 control, len;
u64 map;
- unsigned long flags;
if (skb_padto(skb, ETH_ZLEN))
return NETDEV_TX_OK;
- if (!spin_trylock_irqsave(&skge->tx_lock, flags))
- /* Collision - tell upper layer to requeue */
- return NETDEV_TX_LOCKED;
-
- if (unlikely(skge_avail(&skge->tx_ring) < skb_shinfo(skb)->nr_frags + 1)) {
- if (!netif_queue_stopped(dev)) {
- netif_stop_queue(dev);
-
- printk(KERN_WARNING PFX "%s: ring full when queue awake!\n",
- dev->name);
- }
- spin_unlock_irqrestore(&skge->tx_lock, flags);
+ if (unlikely(skge_avail(&skge->tx_ring) < skb_shinfo(skb)->nr_frags + 1))
return NETDEV_TX_BUSY;
- }
e = skge->tx_ring.to_use;
td = e->desc;
@@ -2399,8 +2389,6 @@ static int skge_xmit_frame(struct sk_buff *skb, struct net_device *dev)
netif_stop_queue(dev);
}
- spin_unlock_irqrestore(&skge->tx_lock, flags);
-
dev->trans_start = jiffies;
return NETDEV_TX_OK;
@@ -2430,18 +2418,18 @@ static void skge_tx_free(struct skge_port *skge, struct skge_element *e,
printk(KERN_DEBUG PFX "%s: tx done slot %td\n",
skge->netdev->name, e - skge->tx_ring.start);
- dev_kfree_skb_any(e->skb);
+ dev_kfree_skb(e->skb);
}
e->skb = NULL;
}
/* Free all buffers in transmit ring */
-static void skge_tx_clean(struct skge_port *skge)
+static void skge_tx_clean(struct net_device *dev)
{
+ struct skge_port *skge = netdev_priv(dev);
struct skge_element *e;
- unsigned long flags;
- spin_lock_irqsave(&skge->tx_lock, flags);
+ netif_tx_lock_bh(dev);
for (e = skge->tx_ring.to_clean; e != skge->tx_ring.to_use; e = e->next) {
struct skge_tx_desc *td = e->desc;
skge_tx_free(skge, e, td->control);
@@ -2449,8 +2437,8 @@ static void skge_tx_clean(struct skge_port *skge)
}
skge->tx_ring.to_clean = e;
- netif_wake_queue(skge->netdev);
- spin_unlock_irqrestore(&skge->tx_lock, flags);
+ netif_wake_queue(dev);
+ netif_tx_unlock_bh(dev);
}
static void skge_tx_timeout(struct net_device *dev)
@@ -2461,7 +2449,7 @@ static void skge_tx_timeout(struct net_device *dev)
printk(KERN_DEBUG PFX "%s: tx timeout\n", dev->name);
skge_write8(skge->hw, Q_ADDR(txqaddr[skge->port], Q_CSR), CSR_STOP);
- skge_tx_clean(skge);
+ skge_tx_clean(dev);
}
static int skge_change_mtu(struct net_device *dev, int new_mtu)
@@ -2584,16 +2572,17 @@ static inline int bad_phy_status(const struct skge_hw *hw, u32 status)
/* Get receive buffer from descriptor.
* Handles copy of small buffers and reallocation failures
*/
-static inline struct sk_buff *skge_rx_get(struct skge_port *skge,
- struct skge_element *e,
- u32 control, u32 status, u16 csum)
+static struct sk_buff *skge_rx_get(struct net_device *dev,
+ struct skge_element *e,
+ u32 control, u32 status, u16 csum)
{
+ struct skge_port *skge = netdev_priv(dev);
struct sk_buff *skb;
u16 len = control & BMU_BBC;
if (unlikely(netif_msg_rx_status(skge)))
printk(KERN_DEBUG PFX "%s: rx slot %td status 0x%x len %d\n",
- skge->netdev->name, e - skge->rx_ring.start,
+ dev->name, e - skge->rx_ring.start,
status, len);
if (len > skge->rx_buf_size)
@@ -2609,7 +2598,7 @@ static inline struct sk_buff *skge_rx_get(struct skge_port *skge,
goto error;
if (len < RX_COPY_THRESHOLD) {
- skb = alloc_skb(len + 2, GFP_ATOMIC);
+ skb = netdev_alloc_skb(dev, len + 2);
if (!skb)
goto resubmit;
@@ -2624,7 +2613,7 @@ static inline struct sk_buff *skge_rx_get(struct skge_port *skge,
skge_rx_reuse(e, skge->rx_buf_size);
} else {
struct sk_buff *nskb;
- nskb = alloc_skb(skge->rx_buf_size + NET_IP_ALIGN, GFP_ATOMIC);
+ nskb = netdev_alloc_skb(dev, skge->rx_buf_size + NET_IP_ALIGN);
if (!nskb)
goto resubmit;
@@ -2639,20 +2628,19 @@ static inline struct sk_buff *skge_rx_get(struct skge_port *skge,
}
skb_put(skb, len);
- skb->dev = skge->netdev;
if (skge->rx_csum) {
skb->csum = csum;
skb->ip_summed = CHECKSUM_HW;
}
- skb->protocol = eth_type_trans(skb, skge->netdev);
+ skb->protocol = eth_type_trans(skb, dev);
return skb;
error:
if (netif_msg_rx_err(skge))
printk(KERN_DEBUG PFX "%s: rx err, slot %td control 0x%x status 0x%x\n",
- skge->netdev->name, e - skge->rx_ring.start,
+ dev->name, e - skge->rx_ring.start,
control, status);
if (skge->hw->chip_id == CHIP_ID_GENESIS) {
@@ -2677,15 +2665,15 @@ resubmit:
}
/* Free all buffers in Tx ring which are no longer owned by device */
-static void skge_txirq(struct net_device *dev)
+static void skge_tx_done(struct net_device *dev)
{
struct skge_port *skge = netdev_priv(dev);
struct skge_ring *ring = &skge->tx_ring;
struct skge_element *e;
- rmb();
+ skge_write8(skge->hw, Q_ADDR(txqaddr[skge->port], Q_CSR), CSR_IRQ_CL_F);
- spin_lock(&skge->tx_lock);
+ netif_tx_lock(dev);
for (e = ring->to_clean; e != ring->to_use; e = e->next) {
struct skge_tx_desc *td = e->desc;
@@ -2696,11 +2684,10 @@ static void skge_txirq(struct net_device *dev)
}
skge->tx_ring.to_clean = e;
- if (netif_queue_stopped(skge->netdev)
- && skge_avail(&skge->tx_ring) > TX_LOW_WATER)
- netif_wake_queue(skge->netdev);
+ if (skge_avail(&skge->tx_ring) > TX_LOW_WATER)
+ netif_wake_queue(dev);
- spin_unlock(&skge->tx_lock);
+ netif_tx_unlock(dev);
}
static int skge_poll(struct net_device *dev, int *budget)
@@ -2712,6 +2699,10 @@ static int skge_poll(struct net_device *dev, int *budget)
int to_do = min(dev->quota, *budget);
int work_done = 0;
+ skge_tx_done(dev);
+
+ skge_write8(hw, Q_ADDR(rxqaddr[skge->port], Q_CSR), CSR_IRQ_CL_F);
+
for (e = ring->to_clean; prefetch(e->next), work_done < to_do; e = e->next) {
struct skge_rx_desc *rd = e->desc;
struct sk_buff *skb;
@@ -2722,7 +2713,7 @@ static int skge_poll(struct net_device *dev, int *budget)
if (control & BMU_OWN)
break;
- skb = skge_rx_get(skge, e, control, rd->status, rd->csum2);
+ skb = skge_rx_get(dev, e, control, rd->status, rd->csum2);
if (likely(skb)) {
dev->last_rx = jiffies;
netif_receive_skb(skb);
@@ -2742,12 +2733,11 @@ static int skge_poll(struct net_device *dev, int *budget)
if (work_done >= to_do)
return 1; /* not done */
- netif_rx_complete(dev);
-
spin_lock_irq(&hw->hw_lock);
- hw->intr_mask |= rxirqmask[skge->port];
+ __netif_rx_complete(dev);
+ hw->intr_mask |= irqmask[skge->port];
skge_write32(hw, B0_IMSK, hw->intr_mask);
- mmiowb();
+ skge_read32(hw, B0_IMSK);
spin_unlock_irq(&hw->hw_lock);
return 0;
@@ -2881,6 +2871,7 @@ static void skge_extirq(void *arg)
spin_lock_irq(&hw->hw_lock);
hw->intr_mask |= IS_EXT_REG;
skge_write32(hw, B0_IMSK, hw->intr_mask);
+ skge_read32(hw, B0_IMSK);
spin_unlock_irq(&hw->hw_lock);
}
@@ -2888,27 +2879,23 @@ static irqreturn_t skge_intr(int irq, void *dev_id, struct pt_regs *regs)
{
struct skge_hw *hw = dev_id;
u32 status;
+ int handled = 0;
+ spin_lock(&hw->hw_lock);
/* Reading this register masks IRQ */
status = skge_read32(hw, B0_SP_ISRC);
- if (status == 0)
- return IRQ_NONE;
+ if (status == 0 || status == ~0)
+ goto out;
- spin_lock(&hw->hw_lock);
+ handled = 1;
status &= hw->intr_mask;
if (status & IS_EXT_REG) {
hw->intr_mask &= ~IS_EXT_REG;
schedule_work(&hw->phy_work);
}
- if (status & IS_XA1_F) {
- skge_write8(hw, Q_ADDR(Q_XA1, Q_CSR), CSR_IRQ_CL_F);
- skge_txirq(hw->dev[0]);
- }
-
- if (status & IS_R1_F) {
- skge_write8(hw, Q_ADDR(Q_R1, Q_CSR), CSR_IRQ_CL_F);
- hw->intr_mask &= ~IS_R1_F;
+ if (status & (IS_XA1_F|IS_R1_F)) {
+ hw->intr_mask &= ~(IS_XA1_F|IS_R1_F);
netif_rx_schedule(hw->dev[0]);
}
@@ -2927,14 +2914,8 @@ static irqreturn_t skge_intr(int irq, void *dev_id, struct pt_regs *regs)
skge_mac_intr(hw, 0);
if (hw->dev[1]) {
- if (status & IS_XA2_F) {
- skge_write8(hw, Q_ADDR(Q_XA2, Q_CSR), CSR_IRQ_CL_F);
- skge_txirq(hw->dev[1]);
- }
-
- if (status & IS_R2_F) {
- skge_write8(hw, Q_ADDR(Q_R2, Q_CSR), CSR_IRQ_CL_F);
- hw->intr_mask &= ~IS_R2_F;
+ if (status & (IS_XA2_F|IS_R2_F)) {
+ hw->intr_mask &= ~(IS_XA2_F|IS_R2_F);
netif_rx_schedule(hw->dev[1]);
}
@@ -2955,9 +2936,11 @@ static irqreturn_t skge_intr(int irq, void *dev_id, struct pt_regs *regs)
skge_error_irq(hw);
skge_write32(hw, B0_IMSK, hw->intr_mask);
+ skge_read32(hw, B0_IMSK);
+out:
spin_unlock(&hw->hw_lock);
- return IRQ_HANDLED;
+ return IRQ_RETVAL(handled);
}
#ifdef CONFIG_NET_POLL_CONTROLLER
@@ -3106,7 +3089,6 @@ static int skge_reset(struct skge_hw *hw)
else
hw->ram_size = t8 * 4096;
- spin_lock_init(&hw->hw_lock);
hw->intr_mask = IS_HW_ERR | IS_EXT_REG | IS_PORT_1;
if (hw->ports > 1)
hw->intr_mask |= IS_PORT_2;
@@ -3222,7 +3204,7 @@ static struct net_device *skge_devinit(struct skge_hw *hw, int port,
dev->poll_controller = skge_netpoll;
#endif
dev->irq = hw->pdev->irq;
- dev->features = NETIF_F_LLTX;
+
if (highmem)
dev->features |= NETIF_F_HIGHDMA;
@@ -3244,8 +3226,6 @@ static struct net_device *skge_devinit(struct skge_hw *hw, int port,
skge->port = port;
- spin_lock_init(&skge->tx_lock);
-
if (hw->chip_id != CHIP_ID_GENESIS) {
dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG;
skge->rx_csum = 1;
@@ -3332,6 +3312,7 @@ static int __devinit skge_probe(struct pci_dev *pdev,
hw->pdev = pdev;
mutex_init(&hw->phy_mutex);
INIT_WORK(&hw->phy_work, skge_extirq, hw);
+ spin_lock_init(&hw->hw_lock);
hw->regs = ioremap_nocache(pci_resource_start(pdev, 0), 0x4000);
if (!hw->regs) {
@@ -3340,23 +3321,16 @@ static int __devinit skge_probe(struct pci_dev *pdev,
goto err_out_free_hw;
}
- err = request_irq(pdev->irq, skge_intr, IRQF_SHARED, DRV_NAME, hw);
- if (err) {
- printk(KERN_ERR PFX "%s: cannot assign irq %d\n",
- pci_name(pdev), pdev->irq);
- goto err_out_iounmap;
- }
- pci_set_drvdata(pdev, hw);
-
err = skge_reset(hw);
if (err)
- goto err_out_free_irq;
+ goto err_out_iounmap;
printk(KERN_INFO PFX DRV_VERSION " addr 0x%llx irq %d chip %s rev %d\n",
(unsigned long long)pci_resource_start(pdev, 0), pdev->irq,
skge_board_name(hw), hw->chip_rev);
- if ((dev = skge_devinit(hw, 0, using_dac)) == NULL)
+ dev = skge_devinit(hw, 0, using_dac);
+ if (!dev)
goto err_out_led_off;
if (!is_valid_ether_addr(dev->dev_addr)) {
@@ -3366,7 +3340,6 @@ static int __devinit skge_probe(struct pci_dev *pdev,
goto err_out_free_netdev;
}
-
err = register_netdev(dev);
if (err) {
printk(KERN_ERR PFX "%s: cannot register net device\n",
@@ -3374,6 +3347,12 @@ static int __devinit skge_probe(struct pci_dev *pdev,
goto err_out_free_netdev;
}
+ err = request_irq(pdev->irq, skge_intr, IRQF_SHARED, dev->name, hw);
+ if (err) {
+ printk(KERN_ERR PFX "%s: cannot assign irq %d\n",
+ dev->name, pdev->irq);
+ goto err_out_unregister;
+ }
skge_show_addr(dev);
if (hw->ports > 1 && (dev1 = skge_devinit(hw, 1, using_dac))) {
@@ -3386,15 +3365,16 @@ static int __devinit skge_probe(struct pci_dev *pdev,
free_netdev(dev1);
}
}
+ pci_set_drvdata(pdev, hw);
return 0;
+err_out_unregister:
+ unregister_netdev(dev);
err_out_free_netdev:
free_netdev(dev);
err_out_led_off:
skge_write16(hw, B0_LED, LED_STAT_OFF);
-err_out_free_irq:
- free_irq(pdev->irq, hw);
err_out_iounmap:
iounmap(hw->regs);
err_out_free_hw:
@@ -3424,6 +3404,7 @@ static void __devexit skge_remove(struct pci_dev *pdev)
spin_lock_irq(&hw->hw_lock);
hw->intr_mask = 0;
skge_write32(hw, B0_IMSK, 0);
+ skge_read32(hw, B0_IMSK);
spin_unlock_irq(&hw->hw_lock);
skge_write16(hw, B0_LED, LED_STAT_OFF);
@@ -3449,26 +3430,25 @@ static int skge_suspend(struct pci_dev *pdev, pm_message_t state)
struct skge_hw *hw = pci_get_drvdata(pdev);
int i, wol = 0;
- for (i = 0; i < 2; i++) {
+ pci_save_state(pdev);
+ for (i = 0; i < hw->ports; i++) {
struct net_device *dev = hw->dev[i];
- if (dev) {
+ if (netif_running(dev)) {
struct skge_port *skge = netdev_priv(dev);
- if (netif_running(dev)) {
- netif_carrier_off(dev);
- if (skge->wol)
- netif_stop_queue(dev);
- else
- skge_down(dev);
- }
- netif_device_detach(dev);
+
+ netif_carrier_off(dev);
+ if (skge->wol)
+ netif_stop_queue(dev);
+ else
+ skge_down(dev);
wol |= skge->wol;
}
+ netif_device_detach(dev);
}
- pci_save_state(pdev);
+ skge_write32(hw, B0_IMSK, 0);
pci_enable_wake(pdev, pci_choose_state(pdev, state), wol);
- pci_disable_device(pdev);
pci_set_power_state(pdev, pci_choose_state(pdev, state));
return 0;
@@ -3477,23 +3457,33 @@ static int skge_suspend(struct pci_dev *pdev, pm_message_t state)
static int skge_resume(struct pci_dev *pdev)
{
struct skge_hw *hw = pci_get_drvdata(pdev);
- int i;
+ int i, err;
pci_set_power_state(pdev, PCI_D0);
pci_restore_state(pdev);
pci_enable_wake(pdev, PCI_D0, 0);
- skge_reset(hw);
+ err = skge_reset(hw);
+ if (err)
+ goto out;
- for (i = 0; i < 2; i++) {
+ for (i = 0; i < hw->ports; i++) {
struct net_device *dev = hw->dev[i];
- if (dev) {
- netif_device_attach(dev);
- if (netif_running(dev) && skge_up(dev))
+
+ netif_device_attach(dev);
+ if (netif_running(dev)) {
+ err = skge_up(dev);
+
+ if (err) {
+ printk(KERN_ERR PFX "%s: could not up: %d\n",
+ dev->name, err);
dev_close(dev);
+ goto out;
+ }
}
}
- return 0;
+out:
+ return err;
}
#endif
@@ -3510,7 +3500,7 @@ static struct pci_driver skge_driver = {
static int __init skge_init_module(void)
{
- return pci_module_init(&skge_driver);
+ return pci_register_driver(&skge_driver);
}
static void __exit skge_cleanup_module(void)
diff --git a/drivers/net/skge.h b/drivers/net/skge.h
index 593387b3c0d..79e09271bcf 100644
--- a/drivers/net/skge.h
+++ b/drivers/net/skge.h
@@ -2417,7 +2417,6 @@ struct skge_port {
struct net_device *netdev;
int port;
- spinlock_t tx_lock;
struct skge_ring tx_ring;
struct skge_ring rx_ring;
diff --git a/drivers/net/sky2.c b/drivers/net/sky2.c
index 933e87f1cc6..7ce0663baf4 100644
--- a/drivers/net/sky2.c
+++ b/drivers/net/sky2.c
@@ -50,7 +50,7 @@
#include "sky2.h"
#define DRV_NAME "sky2"
-#define DRV_VERSION "1.5"
+#define DRV_VERSION "1.7"
#define PFX DRV_NAME " "
/*
@@ -106,6 +106,7 @@ static const struct pci_device_id sky2_id_table[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, 0x9000) },
{ PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, 0x9E00) },
{ PCI_DEVICE(PCI_VENDOR_ID_DLINK, 0x4b00) }, /* DGE-560T */
+ { PCI_DEVICE(PCI_VENDOR_ID_DLINK, 0x4001) }, /* DGE-550SX */
{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4340) },
{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4341) },
{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4342) },
@@ -117,10 +118,17 @@ static const struct pci_device_id sky2_id_table[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4350) },
{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4351) },
{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4352) },
+ { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4353) },
{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4360) },
{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4361) },
{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4362) },
{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4363) },
+ { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4364) },
+ { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4365) },
+ { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4366) },
+ { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4367) },
+ { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4368) },
+ { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4369) },
{ 0 }
};
@@ -190,7 +198,6 @@ static u16 gm_phy_read(struct sky2_hw *hw, unsigned port, u16 reg)
static void sky2_set_power_state(struct sky2_hw *hw, pci_power_t state)
{
u16 power_control;
- u32 reg1;
int vaux;
pr_debug("sky2_set_power_state %d\n", state);
@@ -223,20 +230,9 @@ static void sky2_set_power_state(struct sky2_hw *hw, pci_power_t state)
else
sky2_write8(hw, B2_Y2_CLK_GATE, 0);
- /* Turn off phy power saving */
- reg1 = sky2_pci_read32(hw, PCI_DEV_REG1);
- reg1 &= ~(PCI_Y2_PHY1_POWD | PCI_Y2_PHY2_POWD);
-
- /* looks like this XL is back asswards .. */
- if (hw->chip_id == CHIP_ID_YUKON_XL && hw->chip_rev > 1) {
- reg1 |= PCI_Y2_PHY1_COMA;
- if (hw->ports > 1)
- reg1 |= PCI_Y2_PHY2_COMA;
- }
- sky2_pci_write32(hw, PCI_DEV_REG1, reg1);
- udelay(100);
-
if (hw->chip_id == CHIP_ID_YUKON_EC_U) {
+ u32 reg1;
+
sky2_pci_write32(hw, PCI_DEV_REG3, 0);
reg1 = sky2_pci_read32(hw, PCI_DEV_REG4);
reg1 &= P_ASPM_CONTROL_MSK;
@@ -248,15 +244,6 @@ static void sky2_set_power_state(struct sky2_hw *hw, pci_power_t state)
case PCI_D3hot:
case PCI_D3cold:
- /* Turn on phy power saving */
- reg1 = sky2_pci_read32(hw, PCI_DEV_REG1);
- if (hw->chip_id == CHIP_ID_YUKON_XL && hw->chip_rev > 1)
- reg1 &= ~(PCI_Y2_PHY1_POWD | PCI_Y2_PHY2_POWD);
- else
- reg1 |= (PCI_Y2_PHY1_POWD | PCI_Y2_PHY2_POWD);
- sky2_pci_write32(hw, PCI_DEV_REG1, reg1);
- udelay(100);
-
if (hw->chip_id == CHIP_ID_YUKON_XL && hw->chip_rev > 1)
sky2_write8(hw, B2_Y2_CLK_GATE, 0);
else
@@ -280,7 +267,7 @@ static void sky2_set_power_state(struct sky2_hw *hw, pci_power_t state)
sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_OFF);
}
-static void sky2_phy_reset(struct sky2_hw *hw, unsigned port)
+static void sky2_gmac_reset(struct sky2_hw *hw, unsigned port)
{
u16 reg;
@@ -528,6 +515,29 @@ static void sky2_phy_init(struct sky2_hw *hw, unsigned port)
gm_phy_write(hw, port, PHY_MARV_INT_MASK, PHY_M_DEF_MSK);
}
+static void sky2_phy_power(struct sky2_hw *hw, unsigned port, int onoff)
+{
+ u32 reg1;
+ static const u32 phy_power[]
+ = { PCI_Y2_PHY1_POWD, PCI_Y2_PHY2_POWD };
+
+ /* looks like this XL is back asswards .. */
+ if (hw->chip_id == CHIP_ID_YUKON_XL && hw->chip_rev > 1)
+ onoff = !onoff;
+
+ reg1 = sky2_pci_read32(hw, PCI_DEV_REG1);
+
+ if (onoff)
+ /* Turn off phy power saving */
+ reg1 &= ~phy_power[port];
+ else
+ reg1 |= phy_power[port];
+
+ sky2_pci_write32(hw, PCI_DEV_REG1, reg1);
+ sky2_pci_read32(hw, PCI_DEV_REG1);
+ udelay(100);
+}
+
/* Force a renegotiation */
static void sky2_phy_reinit(struct sky2_port *sky2)
{
@@ -760,9 +770,10 @@ static inline struct sky2_tx_le *get_tx_le(struct sky2_port *sky2)
/* Update chip's next pointer */
static inline void sky2_put_idx(struct sky2_hw *hw, unsigned q, u16 idx)
{
+ q = Y2_QADDR(q, PREF_UNIT_PUT_IDX);
wmb();
- sky2_write16(hw, Y2_QADDR(q, PREF_UNIT_PUT_IDX), idx);
- mmiowb();
+ sky2_write16(hw, q, idx);
+ sky2_read16(hw, q);
}
@@ -949,14 +960,16 @@ static void sky2_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
/*
* It appears the hardware has a bug in the FIFO logic that
* cause it to hang if the FIFO gets overrun and the receive buffer
- * is not aligned. ALso alloc_skb() won't align properly if slab
- * debugging is enabled.
+ * is not 64 byte aligned. The buffer returned from netdev_alloc_skb is
+ * aligned except if slab debugging is enabled.
*/
-static inline struct sk_buff *sky2_alloc_skb(unsigned int size, gfp_t gfp_mask)
+static inline struct sk_buff *sky2_alloc_skb(struct net_device *dev,
+ unsigned int length,
+ gfp_t gfp_mask)
{
struct sk_buff *skb;
- skb = alloc_skb(size + RX_SKB_ALIGN, gfp_mask);
+ skb = __netdev_alloc_skb(dev, length + RX_SKB_ALIGN, gfp_mask);
if (likely(skb)) {
unsigned long p = (unsigned long) skb->data;
skb_reserve(skb, ALIGN(p, RX_SKB_ALIGN) - p);
@@ -992,7 +1005,8 @@ static int sky2_rx_start(struct sky2_port *sky2)
for (i = 0; i < sky2->rx_pending; i++) {
struct ring_info *re = sky2->rx_ring + i;
- re->skb = sky2_alloc_skb(sky2->rx_bufsize, GFP_KERNEL);
+ re->skb = sky2_alloc_skb(sky2->netdev, sky2->rx_bufsize,
+ GFP_KERNEL);
if (!re->skb)
goto nomem;
@@ -1080,6 +1094,8 @@ static int sky2_up(struct net_device *dev)
if (!sky2->rx_ring)
goto err_out;
+ sky2_phy_power(hw, port, 1);
+
sky2_mac_init(hw, port);
/* Determine available ram buffer space (in 4K blocks).
@@ -1184,7 +1200,6 @@ static int sky2_xmit_frame(struct sk_buff *skb, struct net_device *dev)
struct sky2_tx_le *le = NULL;
struct tx_ring_info *re;
unsigned i, len;
- int avail;
dma_addr_t mapping;
u32 addr64;
u16 mss;
@@ -1234,25 +1249,18 @@ static int sky2_xmit_frame(struct sk_buff *skb, struct net_device *dev)
/* Check for TCP Segmentation Offload */
mss = skb_shinfo(skb)->gso_size;
if (mss != 0) {
- /* just drop the packet if non-linear expansion fails */
- if (skb_header_cloned(skb) &&
- pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
- dev_kfree_skb(skb);
- goto out_unlock;
- }
-
mss += ((skb->h.th->doff - 5) * 4); /* TCP options */
mss += (skb->nh.iph->ihl * 4) + sizeof(struct tcphdr);
mss += ETH_HLEN;
- }
- if (mss != sky2->tx_last_mss) {
- le = get_tx_le(sky2);
- le->tx.tso.size = cpu_to_le16(mss);
- le->tx.tso.rsvd = 0;
- le->opcode = OP_LRGLEN | HW_OWNER;
- le->ctrl = 0;
- sky2->tx_last_mss = mss;
+ if (mss != sky2->tx_last_mss) {
+ le = get_tx_le(sky2);
+ le->tx.tso.size = cpu_to_le16(mss);
+ le->tx.tso.rsvd = 0;
+ le->opcode = OP_LRGLEN | HW_OWNER;
+ le->ctrl = 0;
+ sky2->tx_last_mss = mss;
+ }
}
ctrl = 0;
@@ -1280,12 +1288,17 @@ static int sky2_xmit_frame(struct sk_buff *skb, struct net_device *dev)
if (skb->nh.iph->protocol == IPPROTO_UDP)
ctrl |= UDPTCP;
- le = get_tx_le(sky2);
- le->tx.csum.start = cpu_to_le16(hdr);
- le->tx.csum.offset = cpu_to_le16(offset);
- le->length = 0; /* initial checksum value */
- le->ctrl = 1; /* one packet */
- le->opcode = OP_TCPLISW | HW_OWNER;
+ if (hdr != sky2->tx_csum_start || offset != sky2->tx_csum_offset) {
+ sky2->tx_csum_start = hdr;
+ sky2->tx_csum_offset = offset;
+
+ le = get_tx_le(sky2);
+ le->tx.csum.start = cpu_to_le16(hdr);
+ le->tx.csum.offset = cpu_to_le16(offset);
+ le->length = 0; /* initial checksum value */
+ le->ctrl = 1; /* one packet */
+ le->opcode = OP_TCPLISW | HW_OWNER;
+ }
}
le = get_tx_le(sky2);
@@ -1320,23 +1333,18 @@ static int sky2_xmit_frame(struct sk_buff *skb, struct net_device *dev)
le->opcode = OP_BUFFER | HW_OWNER;
fre = sky2->tx_ring
- + RING_NEXT((re - sky2->tx_ring) + i, TX_RING_SIZE);
+ + RING_NEXT((re - sky2->tx_ring) + i, TX_RING_SIZE);
pci_unmap_addr_set(fre, mapaddr, mapping);
}
re->idx = sky2->tx_prod;
le->ctrl |= EOP;
- avail = tx_avail(sky2);
- if (mss != 0 || avail < TX_MIN_PENDING) {
- le->ctrl |= FRC_STAT;
- if (avail <= MAX_SKB_TX_LE)
- netif_stop_queue(dev);
- }
+ if (tx_avail(sky2) <= MAX_SKB_TX_LE)
+ netif_stop_queue(dev);
sky2_put_idx(hw, txqaddr[sky2->port], sky2->tx_prod);
-out_unlock:
spin_unlock(&sky2->tx_lock);
dev->trans_start = jiffies;
@@ -1421,7 +1429,7 @@ static int sky2_down(struct net_device *dev)
/* Stop more packets from being queued */
netif_stop_queue(dev);
- sky2_phy_reset(hw, port);
+ sky2_gmac_reset(hw, port);
/* Stop transmitter */
sky2_write32(hw, Q_ADDR(txqaddr[port], Q_CSR), BMU_STOP);
@@ -1469,6 +1477,8 @@ static int sky2_down(struct net_device *dev)
imask &= ~portirq_msk[port];
sky2_write32(hw, B0_IMSK, imask);
+ sky2_phy_power(hw, port, 0);
+
/* turn off LED's */
sky2_write16(hw, B0_Y2LED, LED_STAT_OFF);
@@ -1832,15 +1842,16 @@ static int sky2_change_mtu(struct net_device *dev, int new_mtu)
* For small packets or errors, just reuse existing skb.
* For larger packets, get new buffer.
*/
-static struct sk_buff *sky2_receive(struct sky2_port *sky2,
+static struct sk_buff *sky2_receive(struct net_device *dev,
u16 length, u32 status)
{
+ struct sky2_port *sky2 = netdev_priv(dev);
struct ring_info *re = sky2->rx_ring + sky2->rx_next;
struct sk_buff *skb = NULL;
if (unlikely(netif_msg_rx_status(sky2)))
printk(KERN_DEBUG PFX "%s: rx slot %u status 0x%x len %d\n",
- sky2->netdev->name, sky2->rx_next, status, length);
+ dev->name, sky2->rx_next, status, length);
sky2->rx_next = (sky2->rx_next + 1) % sky2->rx_pending;
prefetch(sky2->rx_ring + sky2->rx_next);
@@ -1851,11 +1862,11 @@ static struct sk_buff *sky2_receive(struct sky2_port *sky2,
if (!(status & GMR_FS_RX_OK))
goto resubmit;
- if (length > sky2->netdev->mtu + ETH_HLEN)
+ if (length > dev->mtu + ETH_HLEN)
goto oversize;
if (length < copybreak) {
- skb = alloc_skb(length + 2, GFP_ATOMIC);
+ skb = netdev_alloc_skb(dev, length + 2);
if (!skb)
goto resubmit;
@@ -1870,7 +1881,7 @@ static struct sk_buff *sky2_receive(struct sky2_port *sky2,
} else {
struct sk_buff *nskb;
- nskb = sky2_alloc_skb(sky2->rx_bufsize, GFP_ATOMIC);
+ nskb = sky2_alloc_skb(dev, sky2->rx_bufsize, GFP_ATOMIC);
if (!nskb)
goto resubmit;
@@ -1900,7 +1911,7 @@ error:
if (netif_msg_rx_err(sky2) && net_ratelimit())
printk(KERN_INFO PFX "%s: rx error, status 0x%x length %d\n",
- sky2->netdev->name, status, length);
+ dev->name, status, length);
if (status & (GMR_FS_LONG_ERR | GMR_FS_UN_SIZE))
sky2->net_stats.rx_length_errors++;
@@ -1926,12 +1937,6 @@ static inline void sky2_tx_done(struct net_device *dev, u16 last)
}
}
-/* Is status ring empty or is there more to do? */
-static inline int sky2_more_work(const struct sky2_hw *hw)
-{
- return (hw->st_idx != sky2_read16(hw, STAT_PUT_IDX));
-}
-
/* Process status response ring */
static int sky2_status_intr(struct sky2_hw *hw, int to_do)
{
@@ -1960,11 +1965,10 @@ static int sky2_status_intr(struct sky2_hw *hw, int to_do)
switch (le->opcode & ~HW_OWNER) {
case OP_RXSTAT:
- skb = sky2_receive(sky2, length, status);
+ skb = sky2_receive(dev, length, status);
if (!skb)
break;
- skb->dev = dev;
skb->protocol = eth_type_trans(skb, dev);
dev->last_rx = jiffies;
@@ -2022,6 +2026,9 @@ static int sky2_status_intr(struct sky2_hw *hw, int to_do)
}
}
+ /* Fully processed status ring so clear irq */
+ sky2_write32(hw, STAT_CTRL, SC_STAT_CLR_IRQ);
+
exit_loop:
if (buf_write[0]) {
sky2 = netdev_priv(hw->dev[0]);
@@ -2231,19 +2238,16 @@ static int sky2_poll(struct net_device *dev0, int *budget)
sky2_descriptor_error(hw, 1, "transmit", Y2_IS_CHK_TXA2);
work_done = sky2_status_intr(hw, work_limit);
- *budget -= work_done;
- dev0->quota -= work_done;
+ if (work_done < work_limit) {
+ netif_rx_complete(dev0);
- if (status & Y2_IS_STAT_BMU)
- sky2_write32(hw, STAT_CTRL, SC_STAT_CLR_IRQ);
-
- if (sky2_more_work(hw))
+ sky2_read32(hw, B0_Y2_SP_LISR);
+ return 0;
+ } else {
+ *budget -= work_done;
+ dev0->quota -= work_done;
return 1;
-
- netif_rx_complete(dev0);
-
- sky2_read32(hw, B0_Y2_SP_LISR);
- return 0;
+ }
}
static irqreturn_t sky2_intr(int irq, void *dev_id, struct pt_regs *regs)
@@ -2409,7 +2413,7 @@ static int sky2_reset(struct sky2_hw *hw)
sky2_write32(hw, B0_HWE_IMSK, Y2_HWE_ALL_MASK);
for (i = 0; i < hw->ports; i++)
- sky2_phy_reset(hw, i);
+ sky2_gmac_reset(hw, i);
memset(hw->st_le, 0, STATUS_LE_BYTES);
hw->st_idx = 0;
@@ -3200,6 +3204,8 @@ static int __devinit sky2_test_msi(struct sky2_hw *hw)
struct pci_dev *pdev = hw->pdev;
int err;
+ init_waitqueue_head (&hw->msi_wait);
+
sky2_write32(hw, B0_IMSK, Y2_IS_IRQ_SW);
err = request_irq(pdev->irq, sky2_test_intr, IRQF_SHARED, DRV_NAME, hw);
@@ -3209,10 +3215,8 @@ static int __devinit sky2_test_msi(struct sky2_hw *hw)
return err;
}
- init_waitqueue_head (&hw->msi_wait);
-
sky2_write8(hw, B0_CTST, CS_ST_SW_IRQ);
- wmb();
+ sky2_read8(hw, B0_CTST);
wait_event_timeout(hw->msi_wait, hw->msi_detected, HZ/10);
diff --git a/drivers/net/sky2.h b/drivers/net/sky2.h
index 2db8d19b22d..fa8af9f503e 100644
--- a/drivers/net/sky2.h
+++ b/drivers/net/sky2.h
@@ -1748,7 +1748,6 @@ enum {
INIT_SUM= 1<<3,
LOCK_SUM= 1<<4,
INS_VLAN= 1<<5,
- FRC_STAT= 1<<6,
EOP = 1<<7,
};
@@ -1844,6 +1843,8 @@ struct sky2_port {
u32 tx_addr64;
u16 tx_pending;
u16 tx_last_mss;
+ u16 tx_csum_start;
+ u16 tx_csum_offset;
struct ring_info *rx_ring ____cacheline_aligned_in_smp;
struct sky2_rx_le *rx_le;
diff --git a/drivers/net/slhc.c b/drivers/net/slhc.c
index 3a1b7131681..9a540e2092b 100644
--- a/drivers/net/slhc.c
+++ b/drivers/net/slhc.c
@@ -94,27 +94,23 @@ slhc_init(int rslots, int tslots)
register struct cstate *ts;
struct slcompress *comp;
- comp = (struct slcompress *)kmalloc(sizeof(struct slcompress),
- GFP_KERNEL);
+ comp = kzalloc(sizeof(struct slcompress), GFP_KERNEL);
if (! comp)
goto out_fail;
- memset(comp, 0, sizeof(struct slcompress));
if ( rslots > 0 && rslots < 256 ) {
size_t rsize = rslots * sizeof(struct cstate);
- comp->rstate = (struct cstate *) kmalloc(rsize, GFP_KERNEL);
+ comp->rstate = kzalloc(rsize, GFP_KERNEL);
if (! comp->rstate)
goto out_free;
- memset(comp->rstate, 0, rsize);
comp->rslot_limit = rslots - 1;
}
if ( tslots > 0 && tslots < 256 ) {
size_t tsize = tslots * sizeof(struct cstate);
- comp->tstate = (struct cstate *) kmalloc(tsize, GFP_KERNEL);
+ comp->tstate = kzalloc(tsize, GFP_KERNEL);
if (! comp->tstate)
goto out_free2;
- memset(comp->tstate, 0, tsize);
comp->tslot_limit = tslots - 1;
}
@@ -141,9 +137,9 @@ slhc_init(int rslots, int tslots)
return comp;
out_free2:
- kfree((unsigned char *)comp->rstate);
+ kfree(comp->rstate);
out_free:
- kfree((unsigned char *)comp);
+ kfree(comp);
out_fail:
return NULL;
}
@@ -700,20 +696,6 @@ EXPORT_SYMBOL(slhc_compress);
EXPORT_SYMBOL(slhc_uncompress);
EXPORT_SYMBOL(slhc_toss);
-#ifdef MODULE
-
-int init_module(void)
-{
- printk(KERN_INFO "CSLIP: code copyright 1989 Regents of the University of California\n");
- return 0;
-}
-
-void cleanup_module(void)
-{
- return;
-}
-
-#endif /* MODULE */
#else /* CONFIG_INET */
diff --git a/drivers/net/smc911x.c b/drivers/net/smc911x.c
index 0b15290df27..4438fe8c949 100644
--- a/drivers/net/smc911x.c
+++ b/drivers/net/smc911x.c
@@ -55,8 +55,6 @@ static const char version[] =
)
#endif
-
-#include <linux/config.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/kernel.h>
diff --git a/drivers/net/starfire.c b/drivers/net/starfire.c
index c0a62b00ffc..8e1f6206b7d 100644
--- a/drivers/net/starfire.c
+++ b/drivers/net/starfire.c
@@ -2053,7 +2053,7 @@ static int __init starfire_init (void)
return -ENODEV;
}
- return pci_module_init (&starfire_driver);
+ return pci_register_driver(&starfire_driver);
}
diff --git a/drivers/net/sundance.c b/drivers/net/sundance.c
index 698568e751d..c243a80f400 100644
--- a/drivers/net/sundance.c
+++ b/drivers/net/sundance.c
@@ -17,6 +17,8 @@
Support and updates available at
http://www.scyld.com/network/sundance.html
[link no longer provides useful info -jgarzik]
+ Archives of the mailing list are still available at
+ http://www.beowulf.org/pipermail/netdrivers/
*/
@@ -646,7 +648,7 @@ static int __devinit sundance_probe1 (struct pci_dev *pdev,
/* Reset the chip to erase previous misconfiguration. */
if (netif_msg_hw(np))
printk("ASIC Control is %x.\n", ioread32(ioaddr + ASICCtrl));
- iowrite16(0x00ff, ioaddr + ASICCtrl + 2);
+ sundance_reset(dev, 0x00ff << 16);
if (netif_msg_hw(np))
printk("ASIC Control is now %x.\n", ioread32(ioaddr + ASICCtrl));
@@ -1075,13 +1077,8 @@ reset_tx (struct net_device *dev)
/* Reset tx logic, TxListPtr will be cleaned */
iowrite16 (TxDisable, ioaddr + MACCtrl1);
- iowrite16 (TxReset | DMAReset | FIFOReset | NetworkReset,
- ioaddr + ASICCtrl + 2);
- for (i=50; i > 0; i--) {
- if ((ioread16(ioaddr + ASICCtrl + 2) & ResetBusy) == 0)
- break;
- mdelay(1);
- }
+ sundance_reset(dev, (NetworkReset|FIFOReset|DMAReset|TxReset) << 16);
+
/* free all tx skbuff */
for (i = 0; i < TX_RING_SIZE; i++) {
skb = np->tx_skbuff[i];
@@ -1736,7 +1733,7 @@ static int __init sundance_init(void)
#ifdef MODULE
printk(version);
#endif
- return pci_module_init(&sundance_driver);
+ return pci_register_driver(&sundance_driver);
}
static void __exit sundance_exit(void)
diff --git a/drivers/net/sungem.c b/drivers/net/sungem.c
index b70bbd74897..1a441a8a2ad 100644
--- a/drivers/net/sungem.c
+++ b/drivers/net/sungem.c
@@ -3194,7 +3194,7 @@ static struct pci_driver gem_driver = {
static int __init gem_init(void)
{
- return pci_module_init(&gem_driver);
+ return pci_register_driver(&gem_driver);
}
static void __exit gem_cleanup(void)
diff --git a/drivers/net/tc35815.c b/drivers/net/tc35815.c
index 8b53ded66d3..39460fa916f 100644
--- a/drivers/net/tc35815.c
+++ b/drivers/net/tc35815.c
@@ -1725,7 +1725,7 @@ static struct pci_driver tc35815_driver = {
static int __init tc35815_init_module(void)
{
- return pci_module_init(&tc35815_driver);
+ return pci_register_driver(&tc35815_driver);
}
static void __exit tc35815_cleanup_module(void)
diff --git a/drivers/net/tg3.c b/drivers/net/tg3.c
index eafabb253f0..d6e2a6869f2 100644
--- a/drivers/net/tg3.c
+++ b/drivers/net/tg3.c
@@ -11819,7 +11819,7 @@ static struct pci_driver tg3_driver = {
static int __init tg3_init(void)
{
- return pci_module_init(&tg3_driver);
+ return pci_register_driver(&tg3_driver);
}
static void __exit tg3_cleanup(void)
diff --git a/drivers/net/tokenring/3c359.c b/drivers/net/tokenring/3c359.c
index 465921e3874..412390ba142 100644
--- a/drivers/net/tokenring/3c359.c
+++ b/drivers/net/tokenring/3c359.c
@@ -1815,7 +1815,7 @@ static struct pci_driver xl_3c359_driver = {
static int __init xl_pci_init (void)
{
- return pci_module_init (&xl_3c359_driver);
+ return pci_register_driver(&xl_3c359_driver);
}
diff --git a/drivers/net/tokenring/lanstreamer.c b/drivers/net/tokenring/lanstreamer.c
index 28d968ffd5d..0d66700c6ce 100644
--- a/drivers/net/tokenring/lanstreamer.c
+++ b/drivers/net/tokenring/lanstreamer.c
@@ -1998,7 +1998,7 @@ static struct pci_driver streamer_pci_driver = {
};
static int __init streamer_init_module(void) {
- return pci_module_init(&streamer_pci_driver);
+ return pci_register_driver(&streamer_pci_driver);
}
static void __exit streamer_cleanup_module(void) {
diff --git a/drivers/net/tulip/21142.c b/drivers/net/tulip/21142.c
index 683f14b01c0..fa3a2bb105a 100644
--- a/drivers/net/tulip/21142.c
+++ b/drivers/net/tulip/21142.c
@@ -1,7 +1,7 @@
/*
drivers/net/tulip/21142.c
- Maintained by Jeff Garzik <jgarzik@pobox.com>
+ Maintained by Valerie Henson <val_henson@linux.intel.com>
Copyright 2000,2001 The Linux Kernel Team
Written/copyright 1994-2001 by Donald Becker.
@@ -26,9 +26,9 @@ static u16 t21142_csr15[] = { 0x0008, 0x0006, 0x000E, 0x0008, 0x0008, };
/* Handle the 21143 uniquely: do autoselect with NWay, not the EEPROM list
of available transceivers. */
-void t21142_timer(unsigned long data)
+void t21142_media_task(void *data)
{
- struct net_device *dev = (struct net_device *)data;
+ struct net_device *dev = data;
struct tulip_private *tp = netdev_priv(dev);
void __iomem *ioaddr = tp->base_addr;
int csr12 = ioread32(ioaddr + CSR12);
diff --git a/drivers/net/tulip/de2104x.c b/drivers/net/tulip/de2104x.c
index d05c5aa254e..17a2ebaef58 100644
--- a/drivers/net/tulip/de2104x.c
+++ b/drivers/net/tulip/de2104x.c
@@ -2138,17 +2138,21 @@ static int de_resume (struct pci_dev *pdev)
{
struct net_device *dev = pci_get_drvdata (pdev);
struct de_private *de = dev->priv;
+ int retval = 0;
rtnl_lock();
if (netif_device_present(dev))
goto out;
- if (netif_running(dev)) {
- pci_enable_device(pdev);
- de_init_hw(de);
- netif_device_attach(dev);
- } else {
- netif_device_attach(dev);
+ if (!netif_running(dev))
+ goto out_attach;
+ if ((retval = pci_enable_device(pdev))) {
+ printk (KERN_ERR "%s: pci_enable_device failed in resume\n",
+ dev->name);
+ goto out;
}
+ de_init_hw(de);
+out_attach:
+ netif_device_attach(dev);
out:
rtnl_unlock();
return 0;
@@ -2172,7 +2176,7 @@ static int __init de_init (void)
#ifdef MODULE
printk("%s", version);
#endif
- return pci_module_init (&de_driver);
+ return pci_register_driver(&de_driver);
}
static void __exit de_exit (void)
diff --git a/drivers/net/tulip/de4x5.c b/drivers/net/tulip/de4x5.c
index 75ff14a5523..e661d0a9cc6 100644
--- a/drivers/net/tulip/de4x5.c
+++ b/drivers/net/tulip/de4x5.c
@@ -5754,7 +5754,7 @@ static int __init de4x5_module_init (void)
int err = 0;
#ifdef CONFIG_PCI
- err = pci_module_init (&de4x5_pci_driver);
+ err = pci_register_driver(&de4x5_pci_driver);
#endif
#ifdef CONFIG_EISA
err |= eisa_driver_register (&de4x5_eisa_driver);
diff --git a/drivers/net/tulip/dmfe.c b/drivers/net/tulip/dmfe.c
index 4e5b0f2acc3..66dade55682 100644
--- a/drivers/net/tulip/dmfe.c
+++ b/drivers/net/tulip/dmfe.c
@@ -2039,7 +2039,7 @@ static int __init dmfe_init_module(void)
if (HPNA_NoiseFloor > 15)
HPNA_NoiseFloor = 0;
- rc = pci_module_init(&dmfe_driver);
+ rc = pci_register_driver(&dmfe_driver);
if (rc < 0)
return rc;
diff --git a/drivers/net/tulip/eeprom.c b/drivers/net/tulip/eeprom.c
index 5ffbd5b300c..206918bad53 100644
--- a/drivers/net/tulip/eeprom.c
+++ b/drivers/net/tulip/eeprom.c
@@ -1,7 +1,7 @@
/*
drivers/net/tulip/eeprom.c
- Maintained by Jeff Garzik <jgarzik@pobox.com>
+ Maintained by Valerie Henson <val_henson@linux.intel.com>
Copyright 2000,2001 The Linux Kernel Team
Written/copyright 1994-2001 by Donald Becker.
diff --git a/drivers/net/tulip/interrupt.c b/drivers/net/tulip/interrupt.c
index 99ccf2ebb34..7f8f5d42a76 100644
--- a/drivers/net/tulip/interrupt.c
+++ b/drivers/net/tulip/interrupt.c
@@ -1,7 +1,7 @@
/*
drivers/net/tulip/interrupt.c
- Maintained by Jeff Garzik <jgarzik@pobox.com>
+ Maintained by Valerie Henson <val_henson@linux.intel.com>
Copyright 2000,2001 The Linux Kernel Team
Written/copyright 1994-2001 by Donald Becker.
diff --git a/drivers/net/tulip/media.c b/drivers/net/tulip/media.c
index e9bc2a958c1..20bd52b8699 100644
--- a/drivers/net/tulip/media.c
+++ b/drivers/net/tulip/media.c
@@ -1,7 +1,7 @@
/*
drivers/net/tulip/media.c
- Maintained by Jeff Garzik <jgarzik@pobox.com>
+ Maintained by Valerie Henson <val_henson@linux.intel.com>
Copyright 2000,2001 The Linux Kernel Team
Written/copyright 1994-2001 by Donald Becker.
diff --git a/drivers/net/tulip/pnic.c b/drivers/net/tulip/pnic.c
index ca7e53246ad..85a521e0d05 100644
--- a/drivers/net/tulip/pnic.c
+++ b/drivers/net/tulip/pnic.c
@@ -1,7 +1,7 @@
/*
drivers/net/tulip/pnic.c
- Maintained by Jeff Garzik <jgarzik@pobox.com>
+ Maintained by Valerie Henson <val_henson@linux.intel.com>
Copyright 2000,2001 The Linux Kernel Team
Written/copyright 1994-2001 by Donald Becker.
diff --git a/drivers/net/tulip/pnic2.c b/drivers/net/tulip/pnic2.c
index ab985023fcc..c31be0e377a 100644
--- a/drivers/net/tulip/pnic2.c
+++ b/drivers/net/tulip/pnic2.c
@@ -1,7 +1,7 @@
/*
drivers/net/tulip/pnic2.c
- Maintained by Jeff Garzik <jgarzik@pobox.com>
+ Maintained by Valerie Henson <val_henson@linux.intel.com>
Copyright 2000,2001 The Linux Kernel Team
Written/copyright 1994-2001 by Donald Becker.
Modified to hep support PNIC_II by Kevin B. Hendricks
diff --git a/drivers/net/tulip/timer.c b/drivers/net/tulip/timer.c
index e058a9fbfe8..066e5d6bcbd 100644
--- a/drivers/net/tulip/timer.c
+++ b/drivers/net/tulip/timer.c
@@ -1,7 +1,7 @@
/*
drivers/net/tulip/timer.c
- Maintained by Jeff Garzik <jgarzik@pobox.com>
+ Maintained by Valerie Henson <val_henson@linux.intel.com>
Copyright 2000,2001 The Linux Kernel Team
Written/copyright 1994-2001 by Donald Becker.
@@ -18,13 +18,14 @@
#include "tulip.h"
-void tulip_timer(unsigned long data)
+void tulip_media_task(void *data)
{
- struct net_device *dev = (struct net_device *)data;
+ struct net_device *dev = data;
struct tulip_private *tp = netdev_priv(dev);
void __iomem *ioaddr = tp->base_addr;
u32 csr12 = ioread32(ioaddr + CSR12);
int next_tick = 2*HZ;
+ unsigned long flags;
if (tulip_debug > 2) {
printk(KERN_DEBUG "%s: Media selection tick, %s, status %8.8x mode"
@@ -126,6 +127,15 @@ void tulip_timer(unsigned long data)
}
break;
}
+
+
+ spin_lock_irqsave(&tp->lock, flags);
+ if (tp->timeout_recovery) {
+ tulip_tx_timeout_complete(tp, ioaddr);
+ tp->timeout_recovery = 0;
+ }
+ spin_unlock_irqrestore(&tp->lock, flags);
+
/* mod_timer synchronizes us with potential add_timer calls
* from interrupts.
*/
diff --git a/drivers/net/tulip/tulip.h b/drivers/net/tulip/tulip.h
index 3bcfbf3d23e..25668ddb1f7 100644
--- a/drivers/net/tulip/tulip.h
+++ b/drivers/net/tulip/tulip.h
@@ -30,11 +30,10 @@
/* undefine, or define to various debugging levels (>4 == obscene levels) */
#define TULIP_DEBUG 1
-/* undefine USE_IO_OPS for MMIO, define for PIO */
#ifdef CONFIG_TULIP_MMIO
-# undef USE_IO_OPS
+#define TULIP_BAR 1 /* CBMA */
#else
-# define USE_IO_OPS 1
+#define TULIP_BAR 0 /* CBIO */
#endif
@@ -44,7 +43,8 @@ struct tulip_chip_table {
int io_size;
int valid_intrs; /* CSR7 interrupt enable settings */
int flags;
- void (*media_timer) (unsigned long data);
+ void (*media_timer) (unsigned long);
+ void (*media_task) (void *);
};
@@ -142,6 +142,7 @@ enum status_bits {
RxNoBuf = 0x80,
RxIntr = 0x40,
TxFIFOUnderflow = 0x20,
+ RxErrIntr = 0x10,
TxJabber = 0x08,
TxNoBuf = 0x04,
TxDied = 0x02,
@@ -192,9 +193,14 @@ struct tulip_tx_desc {
enum desc_status_bits {
- DescOwned = 0x80000000,
- RxDescFatalErr = 0x8000,
- RxWholePkt = 0x0300,
+ DescOwned = 0x80000000,
+ DescWholePkt = 0x60000000,
+ DescEndPkt = 0x40000000,
+ DescStartPkt = 0x20000000,
+ DescEndRing = 0x02000000,
+ DescUseLink = 0x01000000,
+ RxDescFatalErr = 0x008000,
+ RxWholePkt = 0x00000300,
};
@@ -366,6 +372,7 @@ struct tulip_private {
unsigned int medialock:1; /* Don't sense media type. */
unsigned int mediasense:1; /* Media sensing in progress. */
unsigned int nway:1, nwayset:1; /* 21143 internal NWay. */
+ unsigned int timeout_recovery:1;
unsigned int csr0; /* CSR0 setting. */
unsigned int csr6; /* Current CSR6 control settings. */
unsigned char eeprom[EEPROM_SIZE]; /* Serial EEPROM contents. */
@@ -384,6 +391,7 @@ struct tulip_private {
void __iomem *base_addr;
int csr12_shadow;
int pad0; /* Used for 8-byte alignment */
+ struct work_struct media_work;
};
@@ -398,7 +406,7 @@ struct eeprom_fixup {
/* 21142.c */
extern u16 t21142_csr14[];
-void t21142_timer(unsigned long data);
+void t21142_media_task(void *data);
void t21142_start_nway(struct net_device *dev);
void t21142_lnk_change(struct net_device *dev, int csr5);
@@ -436,7 +444,7 @@ void pnic_lnk_change(struct net_device *dev, int csr5);
void pnic_timer(unsigned long data);
/* timer.c */
-void tulip_timer(unsigned long data);
+void tulip_media_task(void *data);
void mxic_timer(unsigned long data);
void comet_timer(unsigned long data);
@@ -485,4 +493,14 @@ static inline void tulip_restart_rxtx(struct tulip_private *tp)
tulip_start_rxtx(tp);
}
+static inline void tulip_tx_timeout_complete(struct tulip_private *tp, void __iomem *ioaddr)
+{
+ /* Stop and restart the chip's Tx processes. */
+ tulip_restart_rxtx(tp);
+ /* Trigger an immediate transmit demand. */
+ iowrite32(0, ioaddr + CSR1);
+
+ tp->stats.tx_errors++;
+}
+
#endif /* __NET_TULIP_H__ */
diff --git a/drivers/net/tulip/tulip_core.c b/drivers/net/tulip/tulip_core.c
index 7351831f57c..8092c4f8c03 100644
--- a/drivers/net/tulip/tulip_core.c
+++ b/drivers/net/tulip/tulip_core.c
@@ -1,7 +1,7 @@
/* tulip_core.c: A DEC 21x4x-family ethernet driver for Linux. */
/*
- Maintained by Jeff Garzik <jgarzik@pobox.com>
+ Maintained by Valerie Henson <val_henson@linux.intel.com>
Copyright 2000,2001 The Linux Kernel Team
Written/copyright 1994-2001 by Donald Becker.
@@ -130,7 +130,14 @@ int tulip_debug = TULIP_DEBUG;
int tulip_debug = 1;
#endif
+static void tulip_timer(unsigned long data)
+{
+ struct net_device *dev = (struct net_device *)data;
+ struct tulip_private *tp = netdev_priv(dev);
+ if (netif_running(dev))
+ schedule_work(&tp->media_work);
+}
/*
* This table use during operation for capabilities and media timer.
@@ -144,59 +151,60 @@ struct tulip_chip_table tulip_tbl[] = {
/* DC21140 */
{ "Digital DS21140 Tulip", 128, 0x0001ebef,
- HAS_MII | HAS_MEDIA_TABLE | CSR12_IN_SROM | HAS_PCI_MWI, tulip_timer },
+ HAS_MII | HAS_MEDIA_TABLE | CSR12_IN_SROM | HAS_PCI_MWI, tulip_timer,
+ tulip_media_task },
/* DC21142, DC21143 */
- { "Digital DS21143 Tulip", 128, 0x0801fbff,
+ { "Digital DS21142/43 Tulip", 128, 0x0801fbff,
HAS_MII | HAS_MEDIA_TABLE | ALWAYS_CHECK_MII | HAS_ACPI | HAS_NWAY
- | HAS_INTR_MITIGATION | HAS_PCI_MWI, t21142_timer },
+ | HAS_INTR_MITIGATION | HAS_PCI_MWI, tulip_timer, t21142_media_task },
/* LC82C168 */
{ "Lite-On 82c168 PNIC", 256, 0x0001fbef,
- HAS_MII | HAS_PNICNWAY, pnic_timer },
+ HAS_MII | HAS_PNICNWAY, pnic_timer, },
/* MX98713 */
{ "Macronix 98713 PMAC", 128, 0x0001ebef,
- HAS_MII | HAS_MEDIA_TABLE | CSR12_IN_SROM, mxic_timer },
+ HAS_MII | HAS_MEDIA_TABLE | CSR12_IN_SROM, mxic_timer, },
/* MX98715 */
{ "Macronix 98715 PMAC", 256, 0x0001ebef,
- HAS_MEDIA_TABLE, mxic_timer },
+ HAS_MEDIA_TABLE, mxic_timer, },
/* MX98725 */
{ "Macronix 98725 PMAC", 256, 0x0001ebef,
- HAS_MEDIA_TABLE, mxic_timer },
+ HAS_MEDIA_TABLE, mxic_timer, },
/* AX88140 */
{ "ASIX AX88140", 128, 0x0001fbff,
HAS_MII | HAS_MEDIA_TABLE | CSR12_IN_SROM | MC_HASH_ONLY
- | IS_ASIX, tulip_timer },
+ | IS_ASIX, tulip_timer, tulip_media_task },
/* PNIC2 */
{ "Lite-On PNIC-II", 256, 0x0801fbff,
- HAS_MII | HAS_NWAY | HAS_8023X | HAS_PCI_MWI, pnic2_timer },
+ HAS_MII | HAS_NWAY | HAS_8023X | HAS_PCI_MWI, pnic2_timer, },
/* COMET */
{ "ADMtek Comet", 256, 0x0001abef,
- HAS_MII | MC_HASH_ONLY | COMET_MAC_ADDR, comet_timer },
+ HAS_MII | MC_HASH_ONLY | COMET_MAC_ADDR, comet_timer, },
/* COMPEX9881 */
{ "Compex 9881 PMAC", 128, 0x0001ebef,
- HAS_MII | HAS_MEDIA_TABLE | CSR12_IN_SROM, mxic_timer },
+ HAS_MII | HAS_MEDIA_TABLE | CSR12_IN_SROM, mxic_timer, },
/* I21145 */
{ "Intel DS21145 Tulip", 128, 0x0801fbff,
HAS_MII | HAS_MEDIA_TABLE | ALWAYS_CHECK_MII | HAS_ACPI
- | HAS_NWAY | HAS_PCI_MWI, t21142_timer },
+ | HAS_NWAY | HAS_PCI_MWI, tulip_timer, tulip_media_task },
/* DM910X */
{ "Davicom DM9102/DM9102A", 128, 0x0001ebef,
HAS_MII | HAS_MEDIA_TABLE | CSR12_IN_SROM | HAS_ACPI,
- tulip_timer },
+ tulip_timer, tulip_media_task },
/* RS7112 */
{ "Conexant LANfinity", 256, 0x0001ebef,
- HAS_MII | HAS_ACPI, tulip_timer },
+ HAS_MII | HAS_ACPI, tulip_timer, tulip_media_task },
};
@@ -295,12 +303,14 @@ static void tulip_up(struct net_device *dev)
/* Reset the chip, holding bit 0 set at least 50 PCI cycles. */
iowrite32(0x00000001, ioaddr + CSR0);
+ pci_read_config_dword(tp->pdev, PCI_COMMAND, &i); /* flush write */
udelay(100);
/* Deassert reset.
Wait the specified 50 PCI cycles after a reset by initializing
Tx and Rx queues and the address filter list. */
iowrite32(tp->csr0, ioaddr + CSR0);
+ pci_read_config_dword(tp->pdev, PCI_COMMAND, &i); /* flush write */
udelay(100);
if (tulip_debug > 1)
@@ -522,20 +532,9 @@ static void tulip_tx_timeout(struct net_device *dev)
"SIA %8.8x %8.8x %8.8x %8.8x, resetting...\n",
dev->name, ioread32(ioaddr + CSR5), ioread32(ioaddr + CSR12),
ioread32(ioaddr + CSR13), ioread32(ioaddr + CSR14), ioread32(ioaddr + CSR15));
- if ( ! tp->medialock && tp->mtable) {
- do
- --tp->cur_index;
- while (tp->cur_index >= 0
- && (tulip_media_cap[tp->mtable->mleaf[tp->cur_index].media]
- & MediaIsFD));
- if (--tp->cur_index < 0) {
- /* We start again, but should instead look for default. */
- tp->cur_index = tp->mtable->leafcount - 1;
- }
- tulip_select_media(dev, 0);
- printk(KERN_WARNING "%s: transmit timed out, switching to %s "
- "media.\n", dev->name, medianame[dev->if_port]);
- }
+ tp->timeout_recovery = 1;
+ schedule_work(&tp->media_work);
+ goto out_unlock;
} else if (tp->chip_id == PNIC2) {
printk(KERN_WARNING "%s: PNIC2 transmit timed out, status %8.8x, "
"CSR6/7 %8.8x / %8.8x CSR12 %8.8x, resetting...\n",
@@ -575,14 +574,9 @@ static void tulip_tx_timeout(struct net_device *dev)
}
#endif
- /* Stop and restart the chip's Tx processes . */
-
- tulip_restart_rxtx(tp);
- /* Trigger an immediate transmit demand. */
- iowrite32(0, ioaddr + CSR1);
-
- tp->stats.tx_errors++;
+ tulip_tx_timeout_complete(tp, ioaddr);
+out_unlock:
spin_unlock_irqrestore (&tp->lock, flags);
dev->trans_start = jiffies;
netif_wake_queue (dev);
@@ -732,6 +726,8 @@ static void tulip_down (struct net_device *dev)
void __iomem *ioaddr = tp->base_addr;
unsigned long flags;
+ flush_scheduled_work();
+
del_timer_sync (&tp->timer);
#ifdef CONFIG_TULIP_NAPI
del_timer_sync (&tp->oom_timer);
@@ -1361,11 +1357,8 @@ static int __devinit tulip_init_one (struct pci_dev *pdev,
if (pci_request_regions (pdev, "tulip"))
goto err_out_free_netdev;
-#ifndef USE_IO_OPS
- ioaddr = pci_iomap(pdev, 1, tulip_tbl[chip_idx].io_size);
-#else
- ioaddr = pci_iomap(pdev, 0, tulip_tbl[chip_idx].io_size);
-#endif
+ ioaddr = pci_iomap(pdev, TULIP_BAR, tulip_tbl[chip_idx].io_size);
+
if (!ioaddr)
goto err_out_free_res;
@@ -1398,6 +1391,8 @@ static int __devinit tulip_init_one (struct pci_dev *pdev,
tp->timer.data = (unsigned long)dev;
tp->timer.function = tulip_tbl[tp->chip_id].media_timer;
+ INIT_WORK(&tp->media_work, tulip_tbl[tp->chip_id].media_task, dev);
+
dev->base_addr = (unsigned long)ioaddr;
#ifdef CONFIG_TULIP_MWI
@@ -1644,8 +1639,14 @@ static int __devinit tulip_init_one (struct pci_dev *pdev,
if (register_netdev(dev))
goto err_out_free_ring;
- printk(KERN_INFO "%s: %s rev %d at %p,",
- dev->name, chip_name, chip_rev, ioaddr);
+ printk(KERN_INFO "%s: %s rev %d at "
+#ifdef CONFIG_TULIP_MMIO
+ "MMIO"
+#else
+ "Port"
+#endif
+ " %#llx,", dev->name, chip_name, chip_rev,
+ (unsigned long long) pci_resource_start(pdev, TULIP_BAR));
pci_set_drvdata(pdev, dev);
if (eeprom_missing)
@@ -1768,7 +1769,10 @@ static int tulip_resume(struct pci_dev *pdev)
pci_set_power_state(pdev, PCI_D0);
pci_restore_state(pdev);
- pci_enable_device(pdev);
+ if ((retval = pci_enable_device(pdev))) {
+ printk (KERN_ERR "tulip: pci_enable_device failed in resume\n");
+ return retval;
+ }
if ((retval = request_irq(dev->irq, &tulip_interrupt, IRQF_SHARED, dev->name, dev))) {
printk (KERN_ERR "tulip: request_irq failed in resume\n");
@@ -1849,7 +1853,7 @@ static int __init tulip_init (void)
tulip_max_interrupt_work = max_interrupt_work;
/* probe for and init boards */
- return pci_module_init (&tulip_driver);
+ return pci_register_driver(&tulip_driver);
}
diff --git a/drivers/net/tulip/uli526x.c b/drivers/net/tulip/uli526x.c
index fd64b2b3e99..c4c720e2d4c 100644
--- a/drivers/net/tulip/uli526x.c
+++ b/drivers/net/tulip/uli526x.c
@@ -1702,7 +1702,6 @@ MODULE_PARM_DESC(mode, "ULi M5261/M5263: Bit 0: 10/100Mbps, bit 2: duplex, bit 8
static int __init uli526x_init_module(void)
{
- int rc;
printk(version);
printed_version = 1;
@@ -1714,22 +1713,19 @@ static int __init uli526x_init_module(void)
if (cr6set)
uli526x_cr6_user_set = cr6set;
- switch(mode) {
+ switch (mode) {
case ULI526X_10MHF:
case ULI526X_100MHF:
case ULI526X_10MFD:
case ULI526X_100MFD:
uli526x_media_mode = mode;
break;
- default:uli526x_media_mode = ULI526X_AUTO;
+ default:
+ uli526x_media_mode = ULI526X_AUTO;
break;
}
- rc = pci_module_init(&uli526x_driver);
- if (rc < 0)
- return rc;
-
- return 0;
+ return pci_register_driver(&uli526x_driver);
}
diff --git a/drivers/net/tulip/winbond-840.c b/drivers/net/tulip/winbond-840.c
index eba9083da14..a64d6828d9a 100644
--- a/drivers/net/tulip/winbond-840.c
+++ b/drivers/net/tulip/winbond-840.c
@@ -90,10 +90,8 @@ static int full_duplex[MAX_UNITS] = {-1, -1, -1, -1, -1, -1, -1, -1};
Making the Tx ring too large decreases the effectiveness of channel
bonding and packet priority.
There are no ill effects from too-large receive rings. */
-#define TX_RING_SIZE 16
#define TX_QUEUE_LEN 10 /* Limit ring entries actually used. */
#define TX_QUEUE_LEN_RESTART 5
-#define RX_RING_SIZE 32
#define TX_BUFLIMIT (1024-128)
@@ -137,6 +135,8 @@ static int full_duplex[MAX_UNITS] = {-1, -1, -1, -1, -1, -1, -1, -1};
#include <asm/io.h>
#include <asm/irq.h>
+#include "tulip.h"
+
/* These identify the driver base version and may not be removed. */
static char version[] =
KERN_INFO DRV_NAME ".c:v" DRV_VERSION " (2.4 port) " DRV_RELDATE " Donald Becker <becker@scyld.com>\n"
@@ -242,8 +242,8 @@ static const struct pci_id_info pci_id_tbl[] __devinitdata = {
};
/* This driver was written to use PCI memory space, however some x86 systems
- work only with I/O space accesses. Pass -DUSE_IO_OPS to use PCI I/O space
- accesses instead of memory space. */
+ work only with I/O space accesses. See CONFIG_TULIP_MMIO in .config
+*/
/* Offsets to the Command and Status Registers, "CSRs".
While similar to the Tulip, these registers are longword aligned.
@@ -261,21 +261,11 @@ enum w840_offsets {
CurTxDescAddr=0x4C, CurTxBufAddr=0x50,
};
-/* Bits in the interrupt status/enable registers. */
-/* The bits in the Intr Status/Enable registers, mostly interrupt sources. */
-enum intr_status_bits {
- NormalIntr=0x10000, AbnormalIntr=0x8000,
- IntrPCIErr=0x2000, TimerInt=0x800,
- IntrRxDied=0x100, RxNoBuf=0x80, IntrRxDone=0x40,
- TxFIFOUnderflow=0x20, RxErrIntr=0x10,
- TxIdle=0x04, IntrTxStopped=0x02, IntrTxDone=0x01,
-};
-
/* Bits in the NetworkConfig register. */
enum rx_mode_bits {
- AcceptErr=0x80, AcceptRunt=0x40,
- AcceptBroadcast=0x20, AcceptMulticast=0x10,
- AcceptAllPhys=0x08, AcceptMyPhys=0x02,
+ AcceptErr=0x80,
+ RxAcceptBroadcast=0x20, AcceptMulticast=0x10,
+ RxAcceptAllPhys=0x08, AcceptMyPhys=0x02,
};
enum mii_reg_bits {
@@ -297,13 +287,6 @@ struct w840_tx_desc {
u32 buffer1, buffer2;
};
-/* Bits in network_desc.status */
-enum desc_status_bits {
- DescOwn=0x80000000, DescEndRing=0x02000000, DescUseLink=0x01000000,
- DescWholePkt=0x60000000, DescStartPkt=0x20000000, DescEndPkt=0x40000000,
- DescIntr=0x80000000,
-};
-
#define MII_CNT 1 /* winbond only supports one MII */
struct netdev_private {
struct w840_rx_desc *rx_ring;
@@ -371,7 +354,6 @@ static int __devinit w840_probe1 (struct pci_dev *pdev,
int irq;
int i, option = find_cnt < MAX_UNITS ? options[find_cnt] : 0;
void __iomem *ioaddr;
- int bar = 1;
i = pci_enable_device(pdev);
if (i) return i;
@@ -393,10 +375,8 @@ static int __devinit w840_probe1 (struct pci_dev *pdev,
if (pci_request_regions(pdev, DRV_NAME))
goto err_out_netdev;
-#ifdef USE_IO_OPS
- bar = 0;
-#endif
- ioaddr = pci_iomap(pdev, bar, netdev_res_size);
+
+ ioaddr = pci_iomap(pdev, TULIP_BAR, netdev_res_size);
if (!ioaddr)
goto err_out_free_res;
@@ -838,7 +818,7 @@ static void init_rxtx_rings(struct net_device *dev)
np->rx_buf_sz,PCI_DMA_FROMDEVICE);
np->rx_ring[i].buffer1 = np->rx_addr[i];
- np->rx_ring[i].status = DescOwn;
+ np->rx_ring[i].status = DescOwned;
}
np->cur_rx = 0;
@@ -923,7 +903,7 @@ static void init_registers(struct net_device *dev)
}
#elif defined(__powerpc__) || defined(__i386__) || defined(__alpha__) || defined(__ia64__) || defined(__x86_64__)
i |= 0xE000;
-#elif defined(__sparc__)
+#elif defined(__sparc__) || defined (CONFIG_PARISC)
i |= 0x4800;
#else
#warning Processor architecture undefined
@@ -1043,11 +1023,11 @@ static int start_tx(struct sk_buff *skb, struct net_device *dev)
/* Now acquire the irq spinlock.
* The difficult race is the the ordering between
- * increasing np->cur_tx and setting DescOwn:
+ * increasing np->cur_tx and setting DescOwned:
* - if np->cur_tx is increased first the interrupt
* handler could consider the packet as transmitted
- * since DescOwn is cleared.
- * - If DescOwn is set first the NIC could report the
+ * since DescOwned is cleared.
+ * - If DescOwned is set first the NIC could report the
* packet as sent, but the interrupt handler would ignore it
* since the np->cur_tx was not yet increased.
*/
@@ -1055,7 +1035,7 @@ static int start_tx(struct sk_buff *skb, struct net_device *dev)
np->cur_tx++;
wmb(); /* flush length, buffer1, buffer2 */
- np->tx_ring[entry].status = DescOwn;
+ np->tx_ring[entry].status = DescOwned;
wmb(); /* flush status and kick the hardware */
iowrite32(0, np->base_addr + TxStartDemand);
np->tx_q_bytes += skb->len;
@@ -1155,12 +1135,12 @@ static irqreturn_t intr_handler(int irq, void *dev_instance, struct pt_regs *rgs
handled = 1;
- if (intr_status & (IntrRxDone | RxNoBuf))
+ if (intr_status & (RxIntr | RxNoBuf))
netdev_rx(dev);
if (intr_status & RxNoBuf)
iowrite32(0, ioaddr + RxStartDemand);
- if (intr_status & (TxIdle | IntrTxDone) &&
+ if (intr_status & (TxNoBuf | TxIntr) &&
np->cur_tx != np->dirty_tx) {
spin_lock(&np->lock);
netdev_tx_done(dev);
@@ -1168,8 +1148,8 @@ static irqreturn_t intr_handler(int irq, void *dev_instance, struct pt_regs *rgs
}
/* Abnormal error summary/uncommon events handlers. */
- if (intr_status & (AbnormalIntr | TxFIFOUnderflow | IntrPCIErr |
- TimerInt | IntrTxStopped))
+ if (intr_status & (AbnormalIntr | TxFIFOUnderflow | SytemError |
+ TimerInt | TxDied))
netdev_error(dev, intr_status);
if (--work_limit < 0) {
@@ -1305,7 +1285,7 @@ static int netdev_rx(struct net_device *dev)
np->rx_ring[entry].buffer1 = np->rx_addr[entry];
}
wmb();
- np->rx_ring[entry].status = DescOwn;
+ np->rx_ring[entry].status = DescOwned;
}
return 0;
@@ -1342,7 +1322,7 @@ static void netdev_error(struct net_device *dev, int intr_status)
dev->name, new);
update_csr6(dev, new);
}
- if (intr_status & IntrRxDied) { /* Missed a Rx frame. */
+ if (intr_status & RxDied) { /* Missed a Rx frame. */
np->stats.rx_errors++;
}
if (intr_status & TimerInt) {
@@ -1381,13 +1361,13 @@ static u32 __set_rx_mode(struct net_device *dev)
/* Unconditionally log net taps. */
printk(KERN_NOTICE "%s: Promiscuous mode enabled.\n", dev->name);
memset(mc_filter, 0xff, sizeof(mc_filter));
- rx_mode = AcceptBroadcast | AcceptMulticast | AcceptAllPhys
+ rx_mode = RxAcceptBroadcast | AcceptMulticast | RxAcceptAllPhys
| AcceptMyPhys;
} else if ((dev->mc_count > multicast_filter_limit)
|| (dev->flags & IFF_ALLMULTI)) {
/* Too many to match, or accept all multicasts. */
memset(mc_filter, 0xff, sizeof(mc_filter));
- rx_mode = AcceptBroadcast | AcceptMulticast | AcceptMyPhys;
+ rx_mode = RxAcceptBroadcast | AcceptMulticast | AcceptMyPhys;
} else {
struct dev_mc_list *mclist;
int i;
@@ -1398,7 +1378,7 @@ static u32 __set_rx_mode(struct net_device *dev)
filterbit &= 0x3f;
mc_filter[filterbit >> 5] |= 1 << (filterbit & 31);
}
- rx_mode = AcceptBroadcast | AcceptMulticast | AcceptMyPhys;
+ rx_mode = RxAcceptBroadcast | AcceptMulticast | AcceptMyPhys;
}
iowrite32(mc_filter[0], ioaddr + MulticastFilter0);
iowrite32(mc_filter[1], ioaddr + MulticastFilter1);
@@ -1646,14 +1626,18 @@ static int w840_resume (struct pci_dev *pdev)
{
struct net_device *dev = pci_get_drvdata (pdev);
struct netdev_private *np = netdev_priv(dev);
+ int retval = 0;
rtnl_lock();
if (netif_device_present(dev))
goto out; /* device not suspended */
if (netif_running(dev)) {
- pci_enable_device(pdev);
- /* pci_power_on(pdev); */
-
+ if ((retval = pci_enable_device(pdev))) {
+ printk (KERN_ERR
+ "%s: pci_enable_device failed in resume\n",
+ dev->name);
+ goto out;
+ }
spin_lock_irq(&np->lock);
iowrite32(1, np->base_addr+PCIBusCfg);
ioread32(np->base_addr+PCIBusCfg);
@@ -1671,7 +1655,7 @@ static int w840_resume (struct pci_dev *pdev)
}
out:
rtnl_unlock();
- return 0;
+ return retval;
}
#endif
@@ -1689,7 +1673,7 @@ static struct pci_driver w840_driver = {
static int __init w840_init(void)
{
printk(version);
- return pci_module_init(&w840_driver);
+ return pci_register_driver(&w840_driver);
}
static void __exit w840_exit(void)
diff --git a/drivers/net/tulip/xircom_tulip_cb.c b/drivers/net/tulip/xircom_tulip_cb.c
index 17ca7dc42e6..d797b7b2e35 100644
--- a/drivers/net/tulip/xircom_tulip_cb.c
+++ b/drivers/net/tulip/xircom_tulip_cb.c
@@ -1707,7 +1707,7 @@ static int __init xircom_init(void)
#ifdef MODULE
printk(version);
#endif
- return pci_module_init(&xircom_driver);
+ return pci_register_driver(&xircom_driver);
}
diff --git a/drivers/net/typhoon.c b/drivers/net/typhoon.c
index 4103c37172f..1014461178c 100644
--- a/drivers/net/typhoon.c
+++ b/drivers/net/typhoon.c
@@ -2660,7 +2660,7 @@ static struct pci_driver typhoon_driver = {
static int __init
typhoon_init(void)
{
- return pci_module_init(&typhoon_driver);
+ return pci_register_driver(&typhoon_driver);
}
static void __exit
diff --git a/drivers/net/via-rhine.c b/drivers/net/via-rhine.c
index ae971080e2e..efeb10b9c61 100644
--- a/drivers/net/via-rhine.c
+++ b/drivers/net/via-rhine.c
@@ -2005,7 +2005,7 @@ static int __init rhine_init(void)
#ifdef MODULE
printk(version);
#endif
- return pci_module_init(&rhine_driver);
+ return pci_register_driver(&rhine_driver);
}
diff --git a/drivers/net/via-velocity.c b/drivers/net/via-velocity.c
index aa9cd92f46b..e266db1518e 100644
--- a/drivers/net/via-velocity.c
+++ b/drivers/net/via-velocity.c
@@ -2250,7 +2250,7 @@ static int __init velocity_init_module(void)
int ret;
velocity_register_notifier();
- ret = pci_module_init(&velocity_driver);
+ ret = pci_register_driver(&velocity_driver);
if (ret < 0)
velocity_unregister_notifier();
return ret;
diff --git a/drivers/net/via-velocity.h b/drivers/net/via-velocity.h
index 496c3d59744..4665ef2c63d 100644
--- a/drivers/net/via-velocity.h
+++ b/drivers/net/via-velocity.h
@@ -262,25 +262,6 @@ struct velocity_rd_info {
dma_addr_t skb_dma;
};
-/**
- * alloc_rd_info - allocate an rd info block
- *
- * Alocate and initialize a receive info structure used for keeping
- * track of kernel side information related to each receive
- * descriptor we are using
- */
-
-static inline struct velocity_rd_info *alloc_rd_info(void)
-{
- struct velocity_rd_info *ptr;
- if ((ptr = kmalloc(sizeof(struct velocity_rd_info), GFP_ATOMIC)) == NULL)
- return NULL;
- else {
- memset(ptr, 0, sizeof(struct velocity_rd_info));
- return ptr;
- }
-}
-
/*
* Used to track transmit side buffers.
*/
diff --git a/drivers/net/wan/cycx_main.c b/drivers/net/wan/cycx_main.c
index 430b1f630fb..a5e7ce1bd16 100644
--- a/drivers/net/wan/cycx_main.c
+++ b/drivers/net/wan/cycx_main.c
@@ -40,7 +40,6 @@
* 1998/08/08 acme Initial version.
*/
-#include <linux/config.h> /* OS configuration options */
#include <linux/stddef.h> /* offsetof(), etc. */
#include <linux/errno.h> /* return codes */
#include <linux/string.h> /* inline memset(), etc. */
diff --git a/drivers/net/wan/dlci.c b/drivers/net/wan/dlci.c
index 6e1ec5bf22f..73698755943 100644
--- a/drivers/net/wan/dlci.c
+++ b/drivers/net/wan/dlci.c
@@ -28,7 +28,6 @@
* 2 of the License, or (at your option) any later version.
*/
-#include <linux/config.h> /* for CONFIG_DLCI_COUNT */
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/types.h>
diff --git a/drivers/net/wan/dscc4.c b/drivers/net/wan/dscc4.c
index 684af4316ff..af4d4155905 100644
--- a/drivers/net/wan/dscc4.c
+++ b/drivers/net/wan/dscc4.c
@@ -2062,7 +2062,7 @@ static struct pci_driver dscc4_driver = {
static int __init dscc4_init_module(void)
{
- return pci_module_init(&dscc4_driver);
+ return pci_register_driver(&dscc4_driver);
}
static void __exit dscc4_cleanup_module(void)
diff --git a/drivers/net/wan/farsync.c b/drivers/net/wan/farsync.c
index 3705db04a34..564351aafa4 100644
--- a/drivers/net/wan/farsync.c
+++ b/drivers/net/wan/farsync.c
@@ -2697,7 +2697,7 @@ fst_init(void)
for (i = 0; i < FST_MAX_CARDS; i++)
fst_card_array[i] = NULL;
spin_lock_init(&fst_work_q_lock);
- return pci_module_init(&fst_driver);
+ return pci_register_driver(&fst_driver);
}
static void __exit
diff --git a/drivers/net/wan/lmc/lmc_main.c b/drivers/net/wan/lmc/lmc_main.c
index 39f44241a72..7b5d81deb02 100644
--- a/drivers/net/wan/lmc/lmc_main.c
+++ b/drivers/net/wan/lmc/lmc_main.c
@@ -1790,7 +1790,7 @@ static struct pci_driver lmc_driver = {
static int __init init_lmc(void)
{
- return pci_module_init(&lmc_driver);
+ return pci_register_driver(&lmc_driver);
}
static void __exit exit_lmc(void)
diff --git a/drivers/net/wan/pc300_drv.c b/drivers/net/wan/pc300_drv.c
index 567effff4a3..56e69403d17 100644
--- a/drivers/net/wan/pc300_drv.c
+++ b/drivers/net/wan/pc300_drv.c
@@ -3677,7 +3677,7 @@ static struct pci_driver cpc_driver = {
static int __init cpc_init(void)
{
- return pci_module_init(&cpc_driver);
+ return pci_register_driver(&cpc_driver);
}
static void __exit cpc_cleanup_module(void)
diff --git a/drivers/net/wan/pci200syn.c b/drivers/net/wan/pci200syn.c
index 4df61fa3214..a6b9c33b68e 100644
--- a/drivers/net/wan/pci200syn.c
+++ b/drivers/net/wan/pci200syn.c
@@ -476,7 +476,7 @@ static int __init pci200_init_module(void)
printk(KERN_ERR "pci200syn: Invalid PCI clock frequency\n");
return -EINVAL;
}
- return pci_module_init(&pci200_pci_driver);
+ return pci_register_driver(&pci200_pci_driver);
}
diff --git a/drivers/net/wan/sdla.c b/drivers/net/wan/sdla.c
index 7628c2d81f4..0ba018f8382 100644
--- a/drivers/net/wan/sdla.c
+++ b/drivers/net/wan/sdla.c
@@ -32,7 +32,6 @@
* 2 of the License, or (at your option) any later version.
*/
-#include <linux/config.h> /* for CONFIG_DLCI_MAX */
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/types.h>
diff --git a/drivers/net/wan/wanxl.c b/drivers/net/wan/wanxl.c
index b2031dfc4bb..ec68f7dfd93 100644
--- a/drivers/net/wan/wanxl.c
+++ b/drivers/net/wan/wanxl.c
@@ -837,7 +837,7 @@ static int __init wanxl_init_module(void)
#ifdef MODULE
printk(KERN_INFO "%s\n", version);
#endif
- return pci_module_init(&wanxl_pci_driver);
+ return pci_register_driver(&wanxl_pci_driver);
}
static void __exit wanxl_cleanup_module(void)
diff --git a/drivers/net/wireless/atmel_pci.c b/drivers/net/wireless/atmel_pci.c
index d425c3cefde..3bfa791c323 100644
--- a/drivers/net/wireless/atmel_pci.c
+++ b/drivers/net/wireless/atmel_pci.c
@@ -76,7 +76,7 @@ static void __devexit atmel_pci_remove(struct pci_dev *pdev)
static int __init atmel_init_module(void)
{
- return pci_module_init(&atmel_driver);
+ return pci_register_driver(&atmel_driver);
}
static void __exit atmel_cleanup_module(void)
diff --git a/drivers/net/wireless/ipw2100.c b/drivers/net/wireless/ipw2100.c
index 5d5dab6a209..d2db8eb412c 100644
--- a/drivers/net/wireless/ipw2100.c
+++ b/drivers/net/wireless/ipw2100.c
@@ -6532,7 +6532,7 @@ static int __init ipw2100_init(void)
printk(KERN_INFO DRV_NAME ": %s, %s\n", DRV_DESCRIPTION, DRV_VERSION);
printk(KERN_INFO DRV_NAME ": %s\n", DRV_COPYRIGHT);
- ret = pci_module_init(&ipw2100_pci_driver);
+ ret = pci_register_driver(&ipw2100_pci_driver);
#ifdef CONFIG_IPW2100_DEBUG
ipw2100_debug_level = debug;
diff --git a/drivers/net/wireless/ipw2200.c b/drivers/net/wireless/ipw2200.c
index fa245f126c8..f29ec0ebed2 100644
--- a/drivers/net/wireless/ipw2200.c
+++ b/drivers/net/wireless/ipw2200.c
@@ -11788,7 +11788,7 @@ static int __init ipw_init(void)
printk(KERN_INFO DRV_NAME ": " DRV_DESCRIPTION ", " DRV_VERSION "\n");
printk(KERN_INFO DRV_NAME ": " DRV_COPYRIGHT "\n");
- ret = pci_module_init(&ipw_driver);
+ ret = pci_register_driver(&ipw_driver);
if (ret) {
IPW_ERROR("Unable to initialize PCI module\n");
return ret;
diff --git a/drivers/net/wireless/orinoco_nortel.c b/drivers/net/wireless/orinoco_nortel.c
index bf05b907747..eaf3d13b851 100644
--- a/drivers/net/wireless/orinoco_nortel.c
+++ b/drivers/net/wireless/orinoco_nortel.c
@@ -304,7 +304,7 @@ MODULE_LICENSE("Dual MPL/GPL");
static int __init orinoco_nortel_init(void)
{
printk(KERN_DEBUG "%s\n", version);
- return pci_module_init(&orinoco_nortel_driver);
+ return pci_register_driver(&orinoco_nortel_driver);
}
static void __exit orinoco_nortel_exit(void)
diff --git a/drivers/net/wireless/orinoco_pci.c b/drivers/net/wireless/orinoco_pci.c
index 1759c543fbe..97a8b4ff32b 100644
--- a/drivers/net/wireless/orinoco_pci.c
+++ b/drivers/net/wireless/orinoco_pci.c
@@ -244,7 +244,7 @@ MODULE_LICENSE("Dual MPL/GPL");
static int __init orinoco_pci_init(void)
{
printk(KERN_DEBUG "%s\n", version);
- return pci_module_init(&orinoco_pci_driver);
+ return pci_register_driver(&orinoco_pci_driver);
}
static void __exit orinoco_pci_exit(void)
diff --git a/drivers/net/wireless/orinoco_plx.c b/drivers/net/wireless/orinoco_plx.c
index 7f006f62417..31162ac25a9 100644
--- a/drivers/net/wireless/orinoco_plx.c
+++ b/drivers/net/wireless/orinoco_plx.c
@@ -351,7 +351,7 @@ MODULE_LICENSE("Dual MPL/GPL");
static int __init orinoco_plx_init(void)
{
printk(KERN_DEBUG "%s\n", version);
- return pci_module_init(&orinoco_plx_driver);
+ return pci_register_driver(&orinoco_plx_driver);
}
static void __exit orinoco_plx_exit(void)
diff --git a/drivers/net/wireless/orinoco_tmd.c b/drivers/net/wireless/orinoco_tmd.c
index 0831721e4d6..7c7b960c91d 100644
--- a/drivers/net/wireless/orinoco_tmd.c
+++ b/drivers/net/wireless/orinoco_tmd.c
@@ -228,7 +228,7 @@ MODULE_LICENSE("Dual MPL/GPL");
static int __init orinoco_tmd_init(void)
{
printk(KERN_DEBUG "%s\n", version);
- return pci_module_init(&orinoco_tmd_driver);
+ return pci_register_driver(&orinoco_tmd_driver);
}
static void __exit orinoco_tmd_exit(void)
diff --git a/drivers/net/wireless/prism54/islpci_hotplug.c b/drivers/net/wireless/prism54/islpci_hotplug.c
index 09fc17a0f02..f692dccf0d0 100644
--- a/drivers/net/wireless/prism54/islpci_hotplug.c
+++ b/drivers/net/wireless/prism54/islpci_hotplug.c
@@ -313,7 +313,7 @@ prism54_module_init(void)
__bug_on_wrong_struct_sizes ();
- return pci_module_init(&prism54_driver);
+ return pci_register_driver(&prism54_driver);
}
/* by the time prism54_module_exit() terminates, as a postcondition
diff --git a/drivers/net/yellowfin.c b/drivers/net/yellowfin.c
index 8459a18254a..b6b24743370 100644
--- a/drivers/net/yellowfin.c
+++ b/drivers/net/yellowfin.c
@@ -1434,7 +1434,7 @@ static int __init yellowfin_init (void)
#ifdef MODULE
printk(version);
#endif
- return pci_module_init (&yellowfin_driver);
+ return pci_register_driver(&yellowfin_driver);
}
diff --git a/drivers/scsi/ata_piix.c b/drivers/scsi/ata_piix.c
index 2d20caf377f..a9bb3cb7e89 100644
--- a/drivers/scsi/ata_piix.c
+++ b/drivers/scsi/ata_piix.c
@@ -123,7 +123,8 @@ enum {
ich6_sata = 4,
ich6_sata_ahci = 5,
ich6m_sata_ahci = 6,
- ich8_sata_ahci = 7,
+ ich7m_sata_ahci = 7,
+ ich8_sata_ahci = 8,
/* constants for mapping table */
P0 = 0, /* port 0 */
@@ -188,7 +189,7 @@ static const struct pci_device_id piix_pci_tbl[] = {
/* 82801GB/GR/GH (ICH7, identical to ICH6) */
{ 0x8086, 0x27c0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich6_sata_ahci },
/* 2801GBM/GHM (ICH7M, identical to ICH6M) */
- { 0x8086, 0x27c4, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich6m_sata_ahci },
+ { 0x8086, 0x27c4, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich7m_sata_ahci },
/* Enterprise Southbridge 2 (where's the datasheet?) */
{ 0x8086, 0x2680, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich6_sata_ahci },
/* SATA Controller 1 IDE (ICH8, no datasheet yet) */
@@ -336,6 +337,24 @@ static const struct piix_map_db ich6m_map_db = {
},
};
+static const struct piix_map_db ich7m_map_db = {
+ .mask = 0x3,
+ .port_enable = 0x5,
+ .present_shift = 4,
+
+ /* Map 01b isn't specified in the doc but some notebooks use
+ * it anyway. ATM, the only case spotted carries subsystem ID
+ * 1025:0107. This is the only difference from ich6m.
+ */
+ .map = {
+ /* PM PS SM SS MAP */
+ { P0, P2, RV, RV }, /* 00b */
+ { IDE, IDE, P1, P3 }, /* 01b */
+ { P0, P2, IDE, IDE }, /* 10b */
+ { RV, RV, RV, RV },
+ },
+};
+
static const struct piix_map_db ich8_map_db = {
.mask = 0x3,
.port_enable = 0x3,
@@ -355,6 +374,7 @@ static const struct piix_map_db *piix_map_db_table[] = {
[ich6_sata] = &ich6_map_db,
[ich6_sata_ahci] = &ich6_map_db,
[ich6m_sata_ahci] = &ich6m_map_db,
+ [ich7m_sata_ahci] = &ich7m_map_db,
[ich8_sata_ahci] = &ich8_map_db,
};
@@ -444,6 +464,18 @@ static struct ata_port_info piix_port_info[] = {
.port_ops = &piix_sata_ops,
},
+ /* ich7m_sata_ahci */
+ {
+ .sht = &piix_sht,
+ .host_flags = ATA_FLAG_SATA |
+ PIIX_FLAG_CHECKINTR | PIIX_FLAG_SCR |
+ PIIX_FLAG_AHCI,
+ .pio_mask = 0x1f, /* pio0-4 */
+ .mwdma_mask = 0x07, /* mwdma0-2 */
+ .udma_mask = 0x7f, /* udma0-6 */
+ .port_ops = &piix_sata_ops,
+ },
+
/* ich8_sata_ahci */
{
.sht = &piix_sht,
diff --git a/drivers/scsi/sata_mv.c b/drivers/scsi/sata_mv.c
index 1053c7c76b7..fa38a413d16 100644
--- a/drivers/scsi/sata_mv.c
+++ b/drivers/scsi/sata_mv.c
@@ -1961,8 +1961,7 @@ comreset_retry:
timeout = jiffies + msecs_to_jiffies(200);
do {
sata_scr_read(ap, SCR_STATUS, &sstatus);
- sstatus &= 0x3;
- if ((sstatus == 3) || (sstatus == 0))
+ if (((sstatus & 0x3) == 3) || ((sstatus & 0x3) == 0))
break;
__msleep(1, can_sleep);
diff --git a/drivers/scsi/sata_via.c b/drivers/scsi/sata_via.c
index 01d40369a8a..a3727af8b9c 100644
--- a/drivers/scsi/sata_via.c
+++ b/drivers/scsi/sata_via.c
@@ -77,6 +77,7 @@ static void svia_scr_write (struct ata_port *ap, unsigned int sc_reg, u32 val);
static void vt6420_error_handler(struct ata_port *ap);
static const struct pci_device_id svia_pci_tbl[] = {
+ { 0x1106, 0x0591, PCI_ANY_ID, PCI_ANY_ID, 0, 0, vt6420 },
{ 0x1106, 0x3149, PCI_ANY_ID, PCI_ANY_ID, 0, 0, vt6420 },
{ 0x1106, 0x3249, PCI_ANY_ID, PCI_ANY_ID, 0, 0, vt6421 },
diff --git a/drivers/usb/input/hid-core.c b/drivers/usb/input/hid-core.c
index acb24c6219d..a2c56b2de58 100644
--- a/drivers/usb/input/hid-core.c
+++ b/drivers/usb/input/hid-core.c
@@ -1444,7 +1444,7 @@ void hid_init_reports(struct hid_device *hid)
#define USB_DEVICE_ID_GTCO_402 0x0402
#define USB_DEVICE_ID_GTCO_403 0x0403
#define USB_DEVICE_ID_GTCO_404 0x0404
-#define USB_DEVICE_ID_GTCO_404 0x0405
+#define USB_DEVICE_ID_GTCO_405 0x0405
#define USB_DEVICE_ID_GTCO_500 0x0500
#define USB_DEVICE_ID_GTCO_501 0x0501
#define USB_DEVICE_ID_GTCO_502 0x0502
@@ -1657,7 +1657,7 @@ static const struct hid_blacklist {
{ USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_402, HID_QUIRK_IGNORE },
{ USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_403, HID_QUIRK_IGNORE },
{ USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_404, HID_QUIRK_IGNORE },
- { USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_404, HID_QUIRK_IGNORE },
+ { USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_405, HID_QUIRK_IGNORE },
{ USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_500, HID_QUIRK_IGNORE },
{ USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_501, HID_QUIRK_IGNORE },
{ USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_502, HID_QUIRK_IGNORE },
diff --git a/drivers/usb/input/usbtouchscreen.c b/drivers/usb/input/usbtouchscreen.c
index 3b175aa482c..a338bf4c2d7 100644
--- a/drivers/usb/input/usbtouchscreen.c
+++ b/drivers/usb/input/usbtouchscreen.c
@@ -286,7 +286,7 @@ static int mtouch_init(struct usbtouch_usb *usbtouch)
static int itm_read_data(unsigned char *pkt, int *x, int *y, int *touch, int *press)
{
*x = ((pkt[0] & 0x1F) << 7) | (pkt[3] & 0x7F);
- *x = ((pkt[1] & 0x1F) << 7) | (pkt[4] & 0x7F);
+ *y = ((pkt[1] & 0x1F) << 7) | (pkt[4] & 0x7F);
*press = ((pkt[2] & 0x1F) << 7) | (pkt[5] & 0x7F);
*touch = ~pkt[7] & 0x20;
diff --git a/drivers/usb/misc/sisusbvga/sisusb.c b/drivers/usb/misc/sisusbvga/sisusb.c
index 738bd7c7451..e16582f3733 100644
--- a/drivers/usb/misc/sisusbvga/sisusb.c
+++ b/drivers/usb/misc/sisusbvga/sisusb.c
@@ -3435,6 +3435,8 @@ static void sisusb_disconnect(struct usb_interface *intf)
static struct usb_device_id sisusb_table [] = {
{ USB_DEVICE(0x0711, 0x0900) },
+ { USB_DEVICE(0x0711, 0x0901) },
+ { USB_DEVICE(0x0711, 0x0902) },
{ USB_DEVICE(0x182d, 0x021c) },
{ USB_DEVICE(0x182d, 0x0269) },
{ }
diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
index 15945e806f0..c6115aa1b44 100644
--- a/drivers/usb/serial/ftdi_sio.c
+++ b/drivers/usb/serial/ftdi_sio.c
@@ -506,6 +506,7 @@ static struct usb_device_id id_table_combined [] = {
{ USB_DEVICE(FTDI_VID, FTDI_YEI_SERVOCENTER31_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_THORLABS_PID) },
{ USB_DEVICE(TESTO_VID, TESTO_USB_INTERFACE_PID) },
+ { USB_DEVICE(FTDI_VID, FTDI_GAMMA_SCOUT_PID) },
{ }, /* Optional parameter entry */
{ } /* Terminating entry */
};
diff --git a/drivers/usb/serial/ftdi_sio.h b/drivers/usb/serial/ftdi_sio.h
index 8888cd80a49..77299996f7e 100644
--- a/drivers/usb/serial/ftdi_sio.h
+++ b/drivers/usb/serial/ftdi_sio.h
@@ -467,6 +467,11 @@
#define TESTO_VID 0x128D
#define TESTO_USB_INTERFACE_PID 0x0001
+/*
+ * Gamma Scout (http://gamma-scout.com/). Submitted by rsc@runtux.com.
+ */
+#define FTDI_GAMMA_SCOUT_PID 0xD678 /* Gamma Scout online */
+
/* Commands */
#define FTDI_SIO_RESET 0 /* Reset the port */
#define FTDI_SIO_MODEM_CTRL 1 /* Set the modem control register */