From 0ca41c0413a4d9ca58767d53d23accea9aa1cdef Mon Sep 17 00:00:00 2001 From: Divy Le Ray Date: Thu, 25 Sep 2008 14:05:28 +0000 Subject: [2.6.28,1/1] cxgb3 - fix race in EEH A SGE queue set timer might access registers while in EEH recovery, triggering an EEH error loop. Stop all timers early in EEH process. Signed-off-by: Divy Le Ray Signed-off-by: David S. Miller --- drivers/net/cxgb3/cxgb3_main.c | 5 +++++ 1 file changed, 5 insertions(+) (limited to 'drivers/net/cxgb3/cxgb3_main.c') diff --git a/drivers/net/cxgb3/cxgb3_main.c b/drivers/net/cxgb3/cxgb3_main.c index 5447f3e60f0..d355c826b9b 100644 --- a/drivers/net/cxgb3/cxgb3_main.c +++ b/drivers/net/cxgb3/cxgb3_main.c @@ -479,6 +479,7 @@ static int setup_sge_qsets(struct adapter *adap) irq_idx, &adap->params.sge.qset[qset_idx], ntxq, dev); if (err) { + t3_stop_sge_timers(adap); t3_free_sge_resources(adap); return err; } @@ -2449,6 +2450,9 @@ static pci_ers_result_t t3_io_error_detected(struct pci_dev *pdev, test_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map)) offload_close(&adapter->tdev); + /* Stop SGE timers */ + t3_stop_sge_timers(adapter); + adapter->flags &= ~FULL_INIT_DONE; pci_disable_device(pdev); @@ -2801,6 +2805,7 @@ static void __devexit remove_one(struct pci_dev *pdev) if (test_bit(i, &adapter->registered_device_map)) unregister_netdev(adapter->port[i]); + t3_stop_sge_timers(adapter); t3_free_sge_resources(adapter); cxgb_disable_msi(adapter); -- cgit v1.2.3 From 20d3fc11505a2706a33b4c9a932af036d836727f Mon Sep 17 00:00:00 2001 From: Divy Le Ray Date: Wed, 8 Oct 2008 17:36:03 -0700 Subject: cxgb3: reset the adapter on fatal error when a fatal error occurs, bring ports down, reset the chip, and bring ports back up. Factorize code used for both EEH and fatal error recovery. Fix timer usage when bringing up/resetting sge queue sets. Signed-off-by: Divy Le Ray Signed-off-by: David S. Miller --- drivers/net/cxgb3/cxgb3_main.c | 164 ++++++++++++++++++++++++++++------------- 1 file changed, 111 insertions(+), 53 deletions(-) (limited to 'drivers/net/cxgb3/cxgb3_main.c') diff --git a/drivers/net/cxgb3/cxgb3_main.c b/drivers/net/cxgb3/cxgb3_main.c index d355c826b9b..0e51d49842f 100644 --- a/drivers/net/cxgb3/cxgb3_main.c +++ b/drivers/net/cxgb3/cxgb3_main.c @@ -892,6 +892,13 @@ static int cxgb_up(struct adapter *adap) goto out; } + /* + * Clear interrupts now to catch errors if t3_init_hw fails. + * We clear them again later as initialization may trigger + * conditions that can interrupt. + */ + t3_intr_clear(adap); + err = t3_init_hw(adap, 0); if (err) goto out; @@ -1101,9 +1108,9 @@ static int cxgb_close(struct net_device *dev) netif_carrier_off(dev); t3_mac_disable(&pi->mac, MAC_DIRECTION_TX | MAC_DIRECTION_RX); - spin_lock(&adapter->work_lock); /* sync with update task */ + spin_lock_irq(&adapter->work_lock); /* sync with update task */ clear_bit(pi->port_id, &adapter->open_device_map); - spin_unlock(&adapter->work_lock); + spin_unlock_irq(&adapter->work_lock); if (!(adapter->open_device_map & PORT_MASK)) cancel_rearming_delayed_workqueue(cxgb3_wq, @@ -2356,10 +2363,10 @@ static void t3_adap_check_task(struct work_struct *work) check_t3b2_mac(adapter); /* Schedule the next check update if any port is active. */ - spin_lock(&adapter->work_lock); + spin_lock_irq(&adapter->work_lock); if (adapter->open_device_map & PORT_MASK) schedule_chk_task(adapter); - spin_unlock(&adapter->work_lock); + spin_unlock_irq(&adapter->work_lock); } /* @@ -2404,6 +2411,96 @@ void t3_os_ext_intr_handler(struct adapter *adapter) spin_unlock(&adapter->work_lock); } +static int t3_adapter_error(struct adapter *adapter, int reset) +{ + int i, ret = 0; + + /* Stop all ports */ + for_each_port(adapter, i) { + struct net_device *netdev = adapter->port[i]; + + if (netif_running(netdev)) + cxgb_close(netdev); + } + + if (is_offload(adapter) && + test_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map)) + offload_close(&adapter->tdev); + + /* Stop SGE timers */ + t3_stop_sge_timers(adapter); + + adapter->flags &= ~FULL_INIT_DONE; + + if (reset) + ret = t3_reset_adapter(adapter); + + pci_disable_device(adapter->pdev); + + return ret; +} + +static int t3_reenable_adapter(struct adapter *adapter) +{ + if (pci_enable_device(adapter->pdev)) { + dev_err(&adapter->pdev->dev, + "Cannot re-enable PCI device after reset.\n"); + goto err; + } + pci_set_master(adapter->pdev); + pci_restore_state(adapter->pdev); + + /* Free sge resources */ + t3_free_sge_resources(adapter); + + if (t3_replay_prep_adapter(adapter)) + goto err; + + return 0; +err: + return -1; +} + +static void t3_resume_ports(struct adapter *adapter) +{ + int i; + + /* Restart the ports */ + for_each_port(adapter, i) { + struct net_device *netdev = adapter->port[i]; + + if (netif_running(netdev)) { + if (cxgb_open(netdev)) { + dev_err(&adapter->pdev->dev, + "can't bring device back up" + " after reset\n"); + continue; + } + } + } +} + +/* + * processes a fatal error. + * Bring the ports down, reset the chip, bring the ports back up. + */ +static void fatal_error_task(struct work_struct *work) +{ + struct adapter *adapter = container_of(work, struct adapter, + fatal_error_handler_task); + int err = 0; + + rtnl_lock(); + err = t3_adapter_error(adapter, 1); + if (!err) + err = t3_reenable_adapter(adapter); + if (!err) + t3_resume_ports(adapter); + + CH_ALERT(adapter, "adapter reset %s\n", err ? "failed" : "succeeded"); + rtnl_unlock(); +} + void t3_fatal_err(struct adapter *adapter) { unsigned int fw_status[4]; @@ -2414,7 +2511,11 @@ void t3_fatal_err(struct adapter *adapter) t3_write_reg(adapter, A_XGM_RX_CTRL, 0); t3_write_reg(adapter, XGM_REG(A_XGM_TX_CTRL, 1), 0); t3_write_reg(adapter, XGM_REG(A_XGM_RX_CTRL, 1), 0); + + spin_lock(&adapter->work_lock); t3_intr_disable(adapter); + queue_work(cxgb3_wq, &adapter->fatal_error_handler_task); + spin_unlock(&adapter->work_lock); } CH_ALERT(adapter, "encountered fatal error, operation suspended\n"); if (!t3_cim_ctl_blk_read(adapter, 0xa0, 4, fw_status)) @@ -2436,26 +2537,9 @@ static pci_ers_result_t t3_io_error_detected(struct pci_dev *pdev, pci_channel_state_t state) { struct adapter *adapter = pci_get_drvdata(pdev); - int i; - - /* Stop all ports */ - for_each_port(adapter, i) { - struct net_device *netdev = adapter->port[i]; - - if (netif_running(netdev)) - cxgb_close(netdev); - } - - if (is_offload(adapter) && - test_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map)) - offload_close(&adapter->tdev); - - /* Stop SGE timers */ - t3_stop_sge_timers(adapter); - - adapter->flags &= ~FULL_INIT_DONE; + int ret; - pci_disable_device(pdev); + ret = t3_adapter_error(adapter, 0); /* Request a slot reset. */ return PCI_ERS_RESULT_NEED_RESET; @@ -2471,22 +2555,9 @@ static pci_ers_result_t t3_io_slot_reset(struct pci_dev *pdev) { struct adapter *adapter = pci_get_drvdata(pdev); - if (pci_enable_device(pdev)) { - dev_err(&pdev->dev, - "Cannot re-enable PCI device after reset.\n"); - goto err; - } - pci_set_master(pdev); - pci_restore_state(pdev); - - /* Free sge resources */ - t3_free_sge_resources(adapter); - - if (t3_replay_prep_adapter(adapter)) - goto err; + if (!t3_reenable_adapter(adapter)) + return PCI_ERS_RESULT_RECOVERED; - return PCI_ERS_RESULT_RECOVERED; -err: return PCI_ERS_RESULT_DISCONNECT; } @@ -2500,22 +2571,8 @@ err: static void t3_io_resume(struct pci_dev *pdev) { struct adapter *adapter = pci_get_drvdata(pdev); - int i; - - /* Restart the ports */ - for_each_port(adapter, i) { - struct net_device *netdev = adapter->port[i]; - if (netif_running(netdev)) { - if (cxgb_open(netdev)) { - dev_err(&pdev->dev, - "can't bring device back up" - " after reset\n"); - continue; - } - netif_device_attach(netdev); - } - } + t3_resume_ports(adapter); } static struct pci_error_handlers t3_err_handler = { @@ -2664,6 +2721,7 @@ static int __devinit init_one(struct pci_dev *pdev, INIT_LIST_HEAD(&adapter->adapter_list); INIT_WORK(&adapter->ext_intr_handler_task, ext_intr_task); + INIT_WORK(&adapter->fatal_error_handler_task, fatal_error_task); INIT_DELAYED_WORK(&adapter->adap_check_task, t3_adap_check_task); for (i = 0; i < ai->nports; ++i) { -- cgit v1.2.3 From 8c26376112fb4b8dfea42069b602c03d53366052 Mon Sep 17 00:00:00 2001 From: Divy Le Ray Date: Wed, 8 Oct 2008 17:37:33 -0700 Subject: cxgb3: Allocate multiqueues at init time Allocate a queue set per core, up to the maximum of available qsets. Share the queue sets on multi port adapters. Rename MSI-X interrupt vectors ethX-N, N being the queue set number. Signed-off-by: Divy Le Ray Signed-off-by: David S. Miller --- drivers/net/cxgb3/cxgb3_main.c | 179 ++++++++++++++++++++++++++++++++--------- 1 file changed, 142 insertions(+), 37 deletions(-) (limited to 'drivers/net/cxgb3/cxgb3_main.c') diff --git a/drivers/net/cxgb3/cxgb3_main.c b/drivers/net/cxgb3/cxgb3_main.c index 0e51d49842f..fea2c719c8f 100644 --- a/drivers/net/cxgb3/cxgb3_main.c +++ b/drivers/net/cxgb3/cxgb3_main.c @@ -274,10 +274,10 @@ static void name_msix_vecs(struct adapter *adap) for (i = 0; i < pi->nqsets; i++, msi_idx++) { snprintf(adap->msix_info[msi_idx].desc, n, - "%s (queue %d)", d->name, i); + "%s-%d", d->name, pi->first_qset + i); adap->msix_info[msi_idx].desc[n] = 0; } - } + } } static int request_msix_data_irqs(struct adapter *adap) @@ -306,6 +306,22 @@ static int request_msix_data_irqs(struct adapter *adap) return 0; } +static void free_irq_resources(struct adapter *adapter) +{ + if (adapter->flags & USING_MSIX) { + int i, n = 0; + + free_irq(adapter->msix_info[0].vec, adapter); + for_each_port(adapter, i) + n += adap2pinfo(adapter, i)->nqsets; + + for (i = 0; i < n; ++i) + free_irq(adapter->msix_info[i + 1].vec, + &adapter->sge.qs[i]); + } else + free_irq(adapter->pdev->irq, adapter); +} + static int await_mgmt_replies(struct adapter *adap, unsigned long init_cnt, unsigned long n) { @@ -473,7 +489,10 @@ static int setup_sge_qsets(struct adapter *adap) struct port_info *pi = netdev_priv(dev); pi->qs = &adap->sge.qs[pi->first_qset]; - for (j = 0; j < pi->nqsets; ++j, ++qset_idx) { + for (j = pi->first_qset; j < pi->first_qset + pi->nqsets; + ++j, ++qset_idx) { + if (!pi->rx_csum_offload) + adap->params.sge.qset[qset_idx].lro = 0; err = t3_sge_alloc_qset(adap, qset_idx, 1, (adap->flags & USING_MSIX) ? qset_idx + 1 : irq_idx, @@ -740,11 +759,12 @@ static void init_port_mtus(struct adapter *adapter) t3_write_reg(adapter, A_TP_MTU_PORT_TABLE, mtus); } -static void send_pktsched_cmd(struct adapter *adap, int sched, int qidx, int lo, +static int send_pktsched_cmd(struct adapter *adap, int sched, int qidx, int lo, int hi, int port) { struct sk_buff *skb; struct mngt_pktsched_wr *req; + int ret; skb = alloc_skb(sizeof(*req), GFP_KERNEL | __GFP_NOFAIL); req = (struct mngt_pktsched_wr *)skb_put(skb, sizeof(*req)); @@ -755,20 +775,28 @@ static void send_pktsched_cmd(struct adapter *adap, int sched, int qidx, int lo, req->min = lo; req->max = hi; req->binding = port; - t3_mgmt_tx(adap, skb); + ret = t3_mgmt_tx(adap, skb); + + return ret; } -static void bind_qsets(struct adapter *adap) +static int bind_qsets(struct adapter *adap) { - int i, j; + int i, j, err = 0; for_each_port(adap, i) { const struct port_info *pi = adap2pinfo(adap, i); - for (j = 0; j < pi->nqsets; ++j) - send_pktsched_cmd(adap, 1, pi->first_qset + j, -1, - -1, i); + for (j = 0; j < pi->nqsets; ++j) { + int ret = send_pktsched_cmd(adap, 1, + pi->first_qset + j, -1, + -1, i); + if (ret) + err = ret; + } } + + return err; } #define FW_FNAME "t3fw-%d.%d.%d.bin" @@ -954,9 +982,16 @@ static int cxgb_up(struct adapter *adap) t3_write_reg(adap, A_TP_INT_ENABLE, 0x7fbfffff); } - if ((adap->flags & (USING_MSIX | QUEUES_BOUND)) == USING_MSIX) - bind_qsets(adap); - adap->flags |= QUEUES_BOUND; + if (!(adap->flags & QUEUES_BOUND)) { + err = bind_qsets(adap); + if (err) { + CH_ERR(adap, "failed to bind qsets, err %d\n", err); + t3_intr_disable(adap); + free_irq_resources(adap); + goto out; + } + adap->flags |= QUEUES_BOUND; + } out: return err; @@ -975,19 +1010,7 @@ static void cxgb_down(struct adapter *adapter) t3_intr_disable(adapter); spin_unlock_irq(&adapter->work_lock); - if (adapter->flags & USING_MSIX) { - int i, n = 0; - - free_irq(adapter->msix_info[0].vec, adapter); - for_each_port(adapter, i) - n += adap2pinfo(adapter, i)->nqsets; - - for (i = 0; i < n; ++i) - free_irq(adapter->msix_info[i + 1].vec, - &adapter->sge.qs[i]); - } else - free_irq(adapter->pdev->irq, adapter); - + free_irq_resources(adapter); flush_workqueue(cxgb3_wq); /* wait for external IRQ handler */ quiesce_rx(adapter); } @@ -1292,8 +1315,8 @@ static unsigned long collect_sge_port_stats(struct adapter *adapter, int i; unsigned long tot = 0; - for (i = 0; i < p->nqsets; ++i) - tot += adapter->sge.qs[i + p->first_qset].port_stats[idx]; + for (i = p->first_qset; i < p->first_qset + p->nqsets; ++i) + tot += adapter->sge.qs[i].port_stats[idx]; return tot; } @@ -1497,7 +1520,7 @@ static int set_settings(struct net_device *dev, struct ethtool_cmd *cmd) struct link_config *lc = &p->link_config; if (!(lc->supported & SUPPORTED_Autoneg)) - return -EOPNOTSUPP; /* can't change speed/duplex */ + return -EOPNOTSUPP; /* can't change speed/duplex */ if (cmd->autoneg == AUTONEG_DISABLE) { int cap = speed_duplex_to_caps(cmd->speed, cmd->duplex); @@ -1576,8 +1599,10 @@ static int set_rx_csum(struct net_device *dev, u32 data) struct adapter *adap = p->adapter; int i; - for (i = p->first_qset; i < p->first_qset + p->nqsets; i++) + for (i = p->first_qset; i < p->first_qset + p->nqsets; i++) { + adap->params.sge.qset[i].lro = 0; adap->sge.qs[i].lro_enabled = 0; + } } return 0; } @@ -1783,6 +1808,8 @@ static int cxgb_extension_ioctl(struct net_device *dev, void __user *useraddr) int i; struct qset_params *q; struct ch_qset_params t; + int q1 = pi->first_qset; + int nqsets = pi->nqsets; if (!capable(CAP_NET_ADMIN)) return -EPERM; @@ -1805,6 +1832,16 @@ static int cxgb_extension_ioctl(struct net_device *dev, void __user *useraddr) || !in_range(t.rspq_size, MIN_RSPQ_ENTRIES, MAX_RSPQ_ENTRIES)) return -EINVAL; + + if ((adapter->flags & FULL_INIT_DONE) && t.lro > 0) + for_each_port(adapter, i) { + pi = adap2pinfo(adapter, i); + if (t.qset_idx >= pi->first_qset && + t.qset_idx < pi->first_qset + pi->nqsets && + !pi->rx_csum_offload) + return -EINVAL; + } + if ((adapter->flags & FULL_INIT_DONE) && (t.rspq_size >= 0 || t.fl_size[0] >= 0 || t.fl_size[1] >= 0 || t.txq_size[0] >= 0 || @@ -1812,6 +1849,20 @@ static int cxgb_extension_ioctl(struct net_device *dev, void __user *useraddr) t.polling >= 0 || t.cong_thres >= 0)) return -EBUSY; + /* Allow setting of any available qset when offload enabled */ + if (test_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map)) { + q1 = 0; + for_each_port(adapter, i) { + pi = adap2pinfo(adapter, i); + nqsets += pi->first_qset + pi->nqsets; + } + } + + if (t.qset_idx < q1) + return -EINVAL; + if (t.qset_idx > q1 + nqsets - 1) + return -EINVAL; + q = &adapter->params.sge.qset[t.qset_idx]; if (t.rspq_size >= 0) @@ -1861,13 +1912,26 @@ static int cxgb_extension_ioctl(struct net_device *dev, void __user *useraddr) case CHELSIO_GET_QSET_PARAMS:{ struct qset_params *q; struct ch_qset_params t; + int q1 = pi->first_qset; + int nqsets = pi->nqsets; + int i; if (copy_from_user(&t, useraddr, sizeof(t))) return -EFAULT; - if (t.qset_idx >= SGE_QSETS) + + /* Display qsets for all ports when offload enabled */ + if (test_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map)) { + q1 = 0; + for_each_port(adapter, i) { + pi = adap2pinfo(adapter, i); + nqsets = pi->first_qset + pi->nqsets; + } + } + + if (t.qset_idx >= nqsets) return -EINVAL; - q = &adapter->params.sge.qset[t.qset_idx]; + q = &adapter->params.sge.qset[q1 + t.qset_idx]; t.rspq_size = q->rspq_size; t.txq_size[0] = q->txq_size[0]; t.txq_size[1] = q->txq_size[1]; @@ -1878,6 +1942,12 @@ static int cxgb_extension_ioctl(struct net_device *dev, void __user *useraddr) t.lro = q->lro; t.intr_lat = q->coalesce_usecs; t.cong_thres = q->cong_thres; + t.qnum = q1; + + if (adapter->flags & USING_MSIX) + t.vector = adapter->msix_info[q1 + t.qset_idx + 1].vec; + else + t.vector = adapter->pdev->irq; if (copy_to_user(useraddr, &t, sizeof(t))) return -EFAULT; @@ -2223,8 +2293,8 @@ static void t3_synchronize_rx(struct adapter *adap, const struct port_info *p) { int i; - for (i = 0; i < p->nqsets; i++) { - struct sge_rspq *q = &adap->sge.qs[i + p->first_qset].rspq; + for (i = p->first_qset; i < p->first_qset + p->nqsets; i++) { + struct sge_rspq *q = &adap->sge.qs[i].rspq; spin_lock_irq(&q->lock); spin_unlock_irq(&q->lock); @@ -2581,6 +2651,42 @@ static struct pci_error_handlers t3_err_handler = { .resume = t3_io_resume, }; +/* + * Set the number of qsets based on the number of CPUs and the number of ports, + * not to exceed the number of available qsets, assuming there are enough qsets + * per port in HW. + */ +static void set_nqsets(struct adapter *adap) +{ + int i, j = 0; + int num_cpus = num_online_cpus(); + int hwports = adap->params.nports; + int nqsets = SGE_QSETS; + + if (adap->params.rev > 0) { + if (hwports == 2 && + (hwports * nqsets > SGE_QSETS || + num_cpus >= nqsets / hwports)) + nqsets /= hwports; + if (nqsets > num_cpus) + nqsets = num_cpus; + if (nqsets < 1 || hwports == 4) + nqsets = 1; + } else + nqsets = 1; + + for_each_port(adap, i) { + struct port_info *pi = adap2pinfo(adap, i); + + pi->first_qset = j; + pi->nqsets = nqsets; + j = pi->first_qset + nqsets; + + dev_info(&adap->pdev->dev, + "Port %d using %d queue sets.\n", i, nqsets); + } +} + static int __devinit cxgb_enable_msix(struct adapter *adap) { struct msix_entry entries[SGE_QSETS + 1]; @@ -2739,9 +2845,6 @@ static int __devinit init_one(struct pci_dev *pdev, pi = netdev_priv(netdev); pi->adapter = adapter; pi->rx_csum_offload = 1; - pi->nqsets = 1; - pi->first_qset = i; - pi->activity = 0; pi->port_id = i; netif_carrier_off(netdev); netdev->irq = pdev->irq; @@ -2818,6 +2921,8 @@ static int __devinit init_one(struct pci_dev *pdev, else if (msi > 0 && pci_enable_msi(pdev) == 0) adapter->flags |= USING_MSI; + set_nqsets(adapter); + err = sysfs_create_group(&adapter->port[0]->dev.kobj, &cxgb3_attr_group); -- cgit v1.2.3 From 044979827eda13675abab99879ebe3ea535d59fa Mon Sep 17 00:00:00 2001 From: Divy Le Ray Date: Wed, 8 Oct 2008 17:38:29 -0700 Subject: cxgb3: simplify port type struct and usage Second step in overall phy layer reorganization. Clean up the port_type_info structure. Support coextistence of clause 22 and clause 45 MDIO devices. Select the type of MDIO transaction on a per transaction basis. Signed-off-by: Divy Le Ray Signed-off-by: David S. Miller --- drivers/net/cxgb3/cxgb3_main.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'drivers/net/cxgb3/cxgb3_main.c') diff --git a/drivers/net/cxgb3/cxgb3_main.c b/drivers/net/cxgb3/cxgb3_main.c index fea2c719c8f..5b8251decbd 100644 --- a/drivers/net/cxgb3/cxgb3_main.c +++ b/drivers/net/cxgb3/cxgb3_main.c @@ -2368,7 +2368,7 @@ static void check_link_status(struct adapter *adapter) struct net_device *dev = adapter->port[i]; struct port_info *p = netdev_priv(dev); - if (!(p->port_type->caps & SUPPORTED_IRQ) && netif_running(dev)) + if (!(p->phy.caps & SUPPORTED_IRQ) && netif_running(dev)) t3_link_changed(adapter, i); } } @@ -2731,7 +2731,7 @@ static void __devinit print_port_info(struct adapter *adap, if (!test_bit(i, &adap->registered_device_map)) continue; printk(KERN_INFO "%s: %s %s %sNIC (rev %d) %s%s\n", - dev->name, ai->desc, pi->port_type->desc, + dev->name, ai->desc, pi->phy.desc, is_offload(adap) ? "R" : "", adap->params.rev, buf, (adap->flags & USING_MSIX) ? " MSI-X" : (adap->flags & USING_MSI) ? " MSI" : ""); -- cgit v1.2.3 From 9b1e36566c5fafbcc732c971acfcf8580332931a Mon Sep 17 00:00:00 2001 From: Divy Le Ray Date: Wed, 8 Oct 2008 17:39:31 -0700 Subject: cxgb3: commnonize LASI phy code Add generic code to manage interrupt driven PHYs. Do not reset the phy after link parameters update, the new values might get lost. Return early from link change notification when the link parameters remain unchanged. Signed-off-by: Divy Le Ray Signed-off-by: David S. Miller --- drivers/net/cxgb3/cxgb3_main.c | 19 +++++++++++++++---- 1 file changed, 15 insertions(+), 4 deletions(-) (limited to 'drivers/net/cxgb3/cxgb3_main.c') diff --git a/drivers/net/cxgb3/cxgb3_main.c b/drivers/net/cxgb3/cxgb3_main.c index 5b8251decbd..bddcf945d6b 100644 --- a/drivers/net/cxgb3/cxgb3_main.c +++ b/drivers/net/cxgb3/cxgb3_main.c @@ -1516,11 +1516,22 @@ static int speed_duplex_to_caps(int speed, int duplex) static int set_settings(struct net_device *dev, struct ethtool_cmd *cmd) { + int cap; struct port_info *p = netdev_priv(dev); struct link_config *lc = &p->link_config; - if (!(lc->supported & SUPPORTED_Autoneg)) - return -EOPNOTSUPP; /* can't change speed/duplex */ + if (!(lc->supported & SUPPORTED_Autoneg)) { + /* + * PHY offers a single speed/duplex. See if that's what's + * being requested. + */ + if (cmd->autoneg == AUTONEG_DISABLE) { + cap = speed_duplex_to_caps(cmd->speed, cmd->duplex); + if (lc->supported & cap) + return 0; + } + return -EINVAL; + } if (cmd->autoneg == AUTONEG_DISABLE) { int cap = speed_duplex_to_caps(cmd->speed, cmd->duplex); @@ -2195,7 +2206,7 @@ static int cxgb_ioctl(struct net_device *dev, struct ifreq *req, int cmd) mmd = data->phy_id >> 8; if (!mmd) mmd = MDIO_DEV_PCS; - else if (mmd > MDIO_DEV_XGXS) + else if (mmd > MDIO_DEV_VEND2) return -EINVAL; ret = @@ -2221,7 +2232,7 @@ static int cxgb_ioctl(struct net_device *dev, struct ifreq *req, int cmd) mmd = data->phy_id >> 8; if (!mmd) mmd = MDIO_DEV_PCS; - else if (mmd > MDIO_DEV_XGXS) + else if (mmd > MDIO_DEV_VEND2) return -EINVAL; ret = -- cgit v1.2.3 From 1e8820256f9921370cd7423396871e2d850e0323 Mon Sep 17 00:00:00 2001 From: Divy Le Ray Date: Wed, 8 Oct 2008 17:40:07 -0700 Subject: cxgb3: Support for Aeluros 2005 PHY Add support for SR PHY. Auto-detect phy module type, and report type changes. Signed-off-by: Divy Le Ray Signed-off-by: David S. Miller --- drivers/net/cxgb3/cxgb3_main.c | 25 +++++++++++++++++++++++++ 1 file changed, 25 insertions(+) (limited to 'drivers/net/cxgb3/cxgb3_main.c') diff --git a/drivers/net/cxgb3/cxgb3_main.c b/drivers/net/cxgb3/cxgb3_main.c index bddcf945d6b..f31985df0bb 100644 --- a/drivers/net/cxgb3/cxgb3_main.c +++ b/drivers/net/cxgb3/cxgb3_main.c @@ -208,6 +208,31 @@ void t3_os_link_changed(struct adapter *adapter, int port_id, int link_stat, } } +/** + * t3_os_phymod_changed - handle PHY module changes + * @phy: the PHY reporting the module change + * @mod_type: new module type + * + * This is the OS-dependent handler for PHY module changes. It is + * invoked when a PHY module is removed or inserted for any OS-specific + * processing. + */ +void t3_os_phymod_changed(struct adapter *adap, int port_id) +{ + static const char *mod_str[] = { + NULL, "SR", "LR", "LRM", "TWINAX", "TWINAX", "unknown" + }; + + const struct net_device *dev = adap->port[port_id]; + const struct port_info *pi = netdev_priv(dev); + + if (pi->phy.modtype == phy_modtype_none) + printk(KERN_INFO "%s: PHY module unplugged\n", dev->name); + else + printk(KERN_INFO "%s: %s PHY module inserted\n", dev->name, + mod_str[pi->phy.modtype]); +} + static void cxgb_set_rxmode(struct net_device *dev) { struct t3_rx_mode rm; -- cgit v1.2.3