diff options
312 files changed, 5152 insertions, 4335 deletions
diff --git a/Documentation/networking/dccp.txt b/Documentation/networking/dccp.txt index 7a3bb1abb83..b132e4a3cf0 100644 --- a/Documentation/networking/dccp.txt +++ b/Documentation/networking/dccp.txt @@ -141,7 +141,8 @@ rx_ccid = 2 Default CCID for the receiver-sender half-connection; see tx_ccid. seq_window = 100 - The initial sequence window (sec. 7.5.2). + The initial sequence window (sec. 7.5.2) of the sender. This influences + the local ackno validity and the remote seqno validity windows (7.5.1). tx_qlen = 5 The size of the transmit buffer in packets. A value of 0 corresponds diff --git a/Documentation/networking/ip-sysctl.txt b/Documentation/networking/ip-sysctl.txt index c7712787933..ff3f219ee4d 100644 --- a/Documentation/networking/ip-sysctl.txt +++ b/Documentation/networking/ip-sysctl.txt @@ -782,6 +782,12 @@ arp_ignore - INTEGER The max value from conf/{all,interface}/arp_ignore is used when ARP request is received on the {interface} +arp_notify - BOOLEAN + Define mode for notification of address and device changes. + 0 - (default): do nothing + 1 - Generate gratuitous arp replies when device is brought up + or hardware address changes. + arp_accept - BOOLEAN Define behavior when gratuitous arp replies are received: 0 - drop gratuitous arp frames diff --git a/drivers/char/pcmcia/synclink_cs.c b/drivers/char/pcmcia/synclink_cs.c index dc073e167ab..5608a1e5a3b 100644 --- a/drivers/char/pcmcia/synclink_cs.c +++ b/drivers/char/pcmcia/synclink_cs.c @@ -4311,10 +4311,17 @@ static void hdlcdev_rx(MGSLPC_INFO *info, char *buf, int size) dev->stats.rx_bytes += size; netif_rx(skb); - - dev->last_rx = jiffies; } +static const struct net_device_ops hdlcdev_ops = { + .ndo_open = hdlcdev_open, + .ndo_stop = hdlcdev_close, + .ndo_change_mtu = hdlc_change_mtu, + .ndo_start_xmit = hdlc_start_xmit, + .ndo_do_ioctl = hdlcdev_ioctl, + .ndo_tx_timeout = hdlcdev_tx_timeout, +}; + /** * called by device driver when adding device instance * do generic HDLC initialization @@ -4341,11 +4348,8 @@ static int hdlcdev_init(MGSLPC_INFO *info) dev->irq = info->irq_level; /* network layer callbacks and settings */ - dev->do_ioctl = hdlcdev_ioctl; - dev->open = hdlcdev_open; - dev->stop = hdlcdev_close; - dev->tx_timeout = hdlcdev_tx_timeout; - dev->watchdog_timeo = 10*HZ; + dev->netdev_ops = &hdlcdev_ops; + dev->watchdog_timeo = 10 * HZ; dev->tx_queue_len = 50; /* generic HDLC layer callbacks and settings */ diff --git a/drivers/char/synclink.c b/drivers/char/synclink.c index b8063d4cad3..0057a8f58cb 100644 --- a/drivers/char/synclink.c +++ b/drivers/char/synclink.c @@ -8007,10 +8007,17 @@ static void hdlcdev_rx(struct mgsl_struct *info, char *buf, int size) dev->stats.rx_bytes += size; netif_rx(skb); - - dev->last_rx = jiffies; } +static const struct net_device_ops hdlcdev_ops = { + .ndo_open = hdlcdev_open, + .ndo_stop = hdlcdev_close, + .ndo_change_mtu = hdlc_change_mtu, + .ndo_start_xmit = hdlc_start_xmit, + .ndo_do_ioctl = hdlcdev_ioctl, + .ndo_tx_timeout = hdlcdev_tx_timeout, +}; + /** * called by device driver when adding device instance * do generic HDLC initialization @@ -8038,11 +8045,8 @@ static int hdlcdev_init(struct mgsl_struct *info) dev->dma = info->dma_level; /* network layer callbacks and settings */ - dev->do_ioctl = hdlcdev_ioctl; - dev->open = hdlcdev_open; - dev->stop = hdlcdev_close; - dev->tx_timeout = hdlcdev_tx_timeout; - dev->watchdog_timeo = 10*HZ; + dev->netdev_ops = &hdlcdev_ops; + dev->watchdog_timeo = 10 * HZ; dev->tx_queue_len = 50; /* generic HDLC layer callbacks and settings */ diff --git a/drivers/char/synclink_gt.c b/drivers/char/synclink_gt.c index f329f459817..efb3dc928a4 100644 --- a/drivers/char/synclink_gt.c +++ b/drivers/char/synclink_gt.c @@ -1763,10 +1763,17 @@ static void hdlcdev_rx(struct slgt_info *info, char *buf, int size) dev->stats.rx_bytes += size; netif_rx(skb); - - dev->last_rx = jiffies; } +static const struct net_device_ops hdlcdev_ops = { + .ndo_open = hdlcdev_open, + .ndo_stop = hdlcdev_close, + .ndo_change_mtu = hdlc_change_mtu, + .ndo_start_xmit = hdlc_start_xmit, + .ndo_do_ioctl = hdlcdev_ioctl, + .ndo_tx_timeout = hdlcdev_tx_timeout, +}; + /** * called by device driver when adding device instance * do generic HDLC initialization @@ -1794,11 +1801,8 @@ static int hdlcdev_init(struct slgt_info *info) dev->irq = info->irq_level; /* network layer callbacks and settings */ - dev->do_ioctl = hdlcdev_ioctl; - dev->open = hdlcdev_open; - dev->stop = hdlcdev_close; - dev->tx_timeout = hdlcdev_tx_timeout; - dev->watchdog_timeo = 10*HZ; + dev->netdev_ops = &hdlcdev_ops; + dev->watchdog_timeo = 10 * HZ; dev->tx_queue_len = 50; /* generic HDLC layer callbacks and settings */ diff --git a/drivers/char/synclinkmp.c b/drivers/char/synclinkmp.c index 7b0c5b2dd26..8eb6c89a980 100644 --- a/drivers/char/synclinkmp.c +++ b/drivers/char/synclinkmp.c @@ -1907,10 +1907,17 @@ static void hdlcdev_rx(SLMP_INFO *info, char *buf, int size) dev->stats.rx_bytes += size; netif_rx(skb); - - dev->last_rx = jiffies; } +static const struct net_device_ops hdlcdev_ops = { + .ndo_open = hdlcdev_open, + .ndo_stop = hdlcdev_close, + .ndo_change_mtu = hdlc_change_mtu, + .ndo_start_xmit = hdlc_start_xmit, + .ndo_do_ioctl = hdlcdev_ioctl, + .ndo_tx_timeout = hdlcdev_tx_timeout, +}; + /** * called by device driver when adding device instance * do generic HDLC initialization @@ -1938,11 +1945,8 @@ static int hdlcdev_init(SLMP_INFO *info) dev->irq = info->irq_level; /* network layer callbacks and settings */ - dev->do_ioctl = hdlcdev_ioctl; - dev->open = hdlcdev_open; - dev->stop = hdlcdev_close; - dev->tx_timeout = hdlcdev_tx_timeout; - dev->watchdog_timeo = 10*HZ; + dev->netdev_ops = &hdlcdev_ops; + dev->watchdog_timeo = 10 * HZ; dev->tx_queue_len = 50; /* generic HDLC layer callbacks and settings */ diff --git a/drivers/connector/cn_queue.c b/drivers/connector/cn_queue.c index b6fe7e7a2c2..c769ef269fb 100644 --- a/drivers/connector/cn_queue.c +++ b/drivers/connector/cn_queue.c @@ -1,9 +1,9 @@ /* * cn_queue.c - * + * * 2004-2005 Copyright (c) Evgeniy Polyakov <johnpol@2ka.mipt.ru> * All rights reserved. - * + * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or @@ -31,6 +31,48 @@ #include <linux/connector.h> #include <linux/delay.h> + +/* + * This job is sent to the kevent workqueue. + * While no event is once sent to any callback, the connector workqueue + * is not created to avoid a useless waiting kernel task. + * Once the first event is received, we create this dedicated workqueue which + * is necessary because the flow of data can be high and we don't want + * to encumber keventd with that. + */ +static void cn_queue_create(struct work_struct *work) +{ + struct cn_queue_dev *dev; + + dev = container_of(work, struct cn_queue_dev, wq_creation); + + dev->cn_queue = create_singlethread_workqueue(dev->name); + /* If we fail, we will use keventd for all following connector jobs */ + WARN_ON(!dev->cn_queue); +} + +/* + * Queue a data sent to a callback. + * If the connector workqueue is already created, we queue the job on it. + * Otherwise, we queue the job to kevent and queue the connector workqueue + * creation too. + */ +int queue_cn_work(struct cn_callback_entry *cbq, struct work_struct *work) +{ + struct cn_queue_dev *pdev = cbq->pdev; + + if (likely(pdev->cn_queue)) + return queue_work(pdev->cn_queue, work); + + /* Don't create the connector workqueue twice */ + if (atomic_inc_return(&pdev->wq_requested) == 1) + schedule_work(&pdev->wq_creation); + else + atomic_dec(&pdev->wq_requested); + + return schedule_work(work); +} + void cn_queue_wrapper(struct work_struct *work) { struct cn_callback_entry *cbq = @@ -58,14 +100,17 @@ static struct cn_callback_entry *cn_queue_alloc_callback_entry(char *name, struc snprintf(cbq->id.name, sizeof(cbq->id.name), "%s", name); memcpy(&cbq->id.id, id, sizeof(struct cb_id)); cbq->data.callback = callback; - + INIT_WORK(&cbq->work, &cn_queue_wrapper); return cbq; } static void cn_queue_free_callback(struct cn_callback_entry *cbq) { - flush_workqueue(cbq->pdev->cn_queue); + /* The first jobs have been sent to kevent, flush them too */ + flush_scheduled_work(); + if (cbq->pdev->cn_queue) + flush_workqueue(cbq->pdev->cn_queue); kfree(cbq); } @@ -143,14 +188,11 @@ struct cn_queue_dev *cn_queue_alloc_dev(char *name, struct sock *nls) atomic_set(&dev->refcnt, 0); INIT_LIST_HEAD(&dev->queue_list); spin_lock_init(&dev->queue_lock); + init_waitqueue_head(&dev->wq_created); dev->nls = nls; - dev->cn_queue = create_singlethread_workqueue(dev->name); - if (!dev->cn_queue) { - kfree(dev); - return NULL; - } + INIT_WORK(&dev->wq_creation, cn_queue_create); return dev; } @@ -158,9 +200,25 @@ struct cn_queue_dev *cn_queue_alloc_dev(char *name, struct sock *nls) void cn_queue_free_dev(struct cn_queue_dev *dev) { struct cn_callback_entry *cbq, *n; + long timeout; + DEFINE_WAIT(wait); + + /* Flush the first pending jobs queued on kevent */ + flush_scheduled_work(); + + /* If the connector workqueue creation is still pending, wait for it */ + prepare_to_wait(&dev->wq_created, &wait, TASK_UNINTERRUPTIBLE); + if (atomic_read(&dev->wq_requested) && !dev->cn_queue) { + timeout = schedule_timeout(HZ * 2); + if (!timeout && !dev->cn_queue) + WARN_ON(1); + } + finish_wait(&dev->wq_created, &wait); - flush_workqueue(dev->cn_queue); - destroy_workqueue(dev->cn_queue); + if (dev->cn_queue) { + flush_workqueue(dev->cn_queue); + destroy_workqueue(dev->cn_queue); + } spin_lock_bh(&dev->queue_lock); list_for_each_entry_safe(cbq, n, &dev->queue_list, callback_entry) diff --git a/drivers/connector/connector.c b/drivers/connector/connector.c index bf4830082a1..fd336c5a905 100644 --- a/drivers/connector/connector.c +++ b/drivers/connector/connector.c @@ -1,9 +1,9 @@ /* * connector.c - * + * * 2004-2005 Copyright (c) Evgeniy Polyakov <johnpol@2ka.mipt.ru> * All rights reserved. - * + * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or @@ -145,14 +145,13 @@ static int cn_call_callback(struct cn_msg *msg, void (*destruct_data)(void *), v __cbq->data.ddata = data; __cbq->data.destruct_data = destruct_data; - if (queue_work(dev->cbdev->cn_queue, - &__cbq->work)) + if (queue_cn_work(__cbq, &__cbq->work)) err = 0; else err = -EINVAL; } else { struct cn_callback_data *d; - + err = -ENOMEM; __new_cbq = kzalloc(sizeof(struct cn_callback_entry), GFP_ATOMIC); if (__new_cbq) { @@ -163,10 +162,12 @@ static int cn_call_callback(struct cn_msg *msg, void (*destruct_data)(void *), v d->destruct_data = destruct_data; d->free = __new_cbq; + __new_cbq->pdev = __cbq->pdev; + INIT_WORK(&__new_cbq->work, &cn_queue_wrapper); - if (queue_work(dev->cbdev->cn_queue, + if (queue_cn_work(__new_cbq, &__new_cbq->work)) err = 0; else { @@ -237,7 +238,7 @@ static void cn_notify(struct cb_id *id, u32 notify_event) req = (struct cn_notify_req *)ctl->data; for (i = 0; i < ctl->idx_notify_num; ++i, ++req) { - if (id->idx >= req->first && + if (id->idx >= req->first && id->idx < req->first + req->range) { idx_found = 1; break; @@ -245,7 +246,7 @@ static void cn_notify(struct cb_id *id, u32 notify_event) } for (i = 0; i < ctl->val_notify_num; ++i, ++req) { - if (id->val >= req->first && + if (id->val >= req->first && id->val < req->first + req->range) { val_found = 1; break; @@ -459,7 +460,7 @@ static int __devinit cn_init(void) netlink_kernel_release(dev->nls); return -EINVAL; } - + cn_already_initialized = 1; err = cn_add_callback(&dev->id, "connector", &cn_callback); diff --git a/drivers/infiniband/hw/cxgb3/cxio_hal.c b/drivers/infiniband/hw/cxgb3/cxio_hal.c index 4dcf08b3fd8..11efd3528ce 100644 --- a/drivers/infiniband/hw/cxgb3/cxio_hal.c +++ b/drivers/infiniband/hw/cxgb3/cxio_hal.c @@ -701,6 +701,9 @@ static int __cxio_tpt_op(struct cxio_rdev *rdev_p, u32 reset_tpt_entry, u32 stag_idx; u32 wptr; + if (rdev_p->flags) + return -EIO; + stag_state = stag_state > 0; stag_idx = (*stag) >> 8; diff --git a/drivers/infiniband/hw/cxgb3/cxio_hal.h b/drivers/infiniband/hw/cxgb3/cxio_hal.h index 656fe47bc84..9ed65b05517 100644 --- a/drivers/infiniband/hw/cxgb3/cxio_hal.h +++ b/drivers/infiniband/hw/cxgb3/cxio_hal.h @@ -108,6 +108,8 @@ struct cxio_rdev { struct gen_pool *pbl_pool; struct gen_pool *rqt_pool; struct list_head entry; + u32 flags; +#define CXIO_ERROR_FATAL 1 }; static inline int cxio_num_stags(struct cxio_rdev *rdev_p) diff --git a/drivers/infiniband/hw/cxgb3/iwch.c b/drivers/infiniband/hw/cxgb3/iwch.c index 4489c89d671..37a4fc264a0 100644 --- a/drivers/infiniband/hw/cxgb3/iwch.c +++ b/drivers/infiniband/hw/cxgb3/iwch.c @@ -51,13 +51,15 @@ cxgb3_cpl_handler_func t3c_handlers[NUM_CPL_CMDS]; static void open_rnic_dev(struct t3cdev *); static void close_rnic_dev(struct t3cdev *); +static void iwch_err_handler(struct t3cdev *, u32, u32); struct cxgb3_client t3c_client = { .name = "iw_cxgb3", .add = open_rnic_dev, .remove = close_rnic_dev, .handlers = t3c_handlers, - .redirect = iwch_ep_redirect + .redirect = iwch_ep_redirect, + .err_handler = iwch_err_handler }; static LIST_HEAD(dev_list); @@ -160,6 +162,17 @@ static void close_rnic_dev(struct t3cdev *tdev) mutex_unlock(&dev_mutex); } +static void iwch_err_handler(struct t3cdev *tdev, u32 status, u32 error) +{ + struct cxio_rdev *rdev = tdev->ulp; + + if (status == OFFLOAD_STATUS_DOWN) + rdev->flags = CXIO_ERROR_FATAL; + + return; + +} + static int __init iwch_init_module(void) { int err; diff --git a/drivers/infiniband/hw/nes/nes_hw.c b/drivers/infiniband/hw/nes/nes_hw.c index 5d139db1b77..53df9de2342 100644 --- a/drivers/infiniband/hw/nes/nes_hw.c +++ b/drivers/infiniband/hw/nes/nes_hw.c @@ -2541,7 +2541,7 @@ static void nes_nic_napi_ce_handler(struct nes_device *nesdev, struct nes_hw_nic { struct nes_vnic *nesvnic = container_of(cq, struct nes_vnic, nic_cq); - netif_rx_schedule(&nesvnic->napi); + napi_schedule(&nesvnic->napi); } diff --git a/drivers/infiniband/hw/nes/nes_nic.c b/drivers/infiniband/hw/nes/nes_nic.c index 57a47cf7e51..f5484ad1279 100644 --- a/drivers/infiniband/hw/nes/nes_nic.c +++ b/drivers/infiniband/hw/nes/nes_nic.c @@ -111,7 +111,7 @@ static int nes_netdev_poll(struct napi_struct *napi, int budget) nes_nic_ce_handler(nesdev, nescq); if (nescq->cqes_pending == 0) { - netif_rx_complete(napi); + napi_complete(napi); /* clear out completed cqes and arm */ nes_write32(nesdev->regs+NES_CQE_ALLOC, NES_CQE_ALLOC_NOTIFY_NEXT | nescq->cq_number | (nescq->cqe_allocs_pending << 16)); diff --git a/drivers/infiniband/ulp/ipoib/ipoib_ib.c b/drivers/infiniband/ulp/ipoib/ipoib_ib.c index a1925810be3..da608273983 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_ib.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_ib.c @@ -446,11 +446,11 @@ poll_more: if (dev->features & NETIF_F_LRO) lro_flush_all(&priv->lro.lro_mgr); - netif_rx_complete(napi); + napi_complete(napi); if (unlikely(ib_req_notify_cq(priv->recv_cq, IB_CQ_NEXT_COMP | IB_CQ_REPORT_MISSED_EVENTS)) && - netif_rx_reschedule(napi)) + napi_reschedule(napi)) goto poll_more; } @@ -462,7 +462,7 @@ void ipoib_ib_completion(struct ib_cq *cq, void *dev_ptr) struct net_device *dev = dev_ptr; struct ipoib_dev_priv *priv = netdev_priv(dev); - netif_rx_schedule(&priv->napi); + napi_schedule(&priv->napi); } static void drain_tx_cq(struct net_device *dev) diff --git a/drivers/net/3c501.c b/drivers/net/3c501.c index 3d1318a3e68..1c5344aa57c 100644 --- a/drivers/net/3c501.c +++ b/drivers/net/3c501.c @@ -197,6 +197,17 @@ out: return ERR_PTR(err); } +static const struct net_device_ops el_netdev_ops = { + .ndo_open = el_open, + .ndo_stop = el1_close, + .ndo_start_xmit = el_start_xmit, + .ndo_tx_timeout = el_timeout, + .ndo_set_multicast_list = set_multicast_list, + .ndo_change_mtu = eth_change_mtu, + .ndo_set_mac_address = eth_mac_addr, + .ndo_validate_addr = eth_validate_addr, +}; + /** * el1_probe1: * @dev: The device structure to use @@ -305,12 +316,8 @@ static int __init el1_probe1(struct net_device *dev, int ioaddr) * The EL1-specific entries in the device structure. */ - dev->open = &el_open; - dev->hard_start_xmit = &el_start_xmit; - dev->tx_timeout = &el_timeout; + dev->netdev_ops = &el_netdev_ops; dev->watchdog_timeo = HZ; - dev->stop = &el1_close; - dev->set_multicast_list = &set_multicast_list; dev->ethtool_ops = &netdev_ethtool_ops; return 0; } diff --git a/drivers/net/3c505.c b/drivers/net/3c505.c index 6124605bef0..ea1ad8ce883 100644 --- a/drivers/net/3c505.c +++ b/drivers/net/3c505.c @@ -1348,6 +1348,17 @@ static int __init elp_autodetect(struct net_device *dev) return 0; /* Because of this, the layer above will return -ENODEV */ } +static const struct net_device_ops elp_netdev_ops = { + .ndo_open = elp_open, + .ndo_stop = elp_close, + .ndo_get_stats = elp_get_stats, + .ndo_start_xmit = elp_start_xmit, + .ndo_tx_timeout = elp_timeout, + .ndo_set_multicast_list = elp_set_mc_list, + .ndo_change_mtu = eth_change_mtu, + .ndo_set_mac_address = eth_mac_addr, + .ndo_validate_addr = eth_validate_addr, +}; /****************************************************** * @@ -1552,13 +1563,8 @@ static int __init elplus_setup(struct net_device *dev) printk(KERN_ERR "%s: adapter configuration failed\n", dev->name); } - dev->open = elp_open; /* local */ - dev->stop = elp_close; /* local */ - dev->get_stats = elp_get_stats; /* local */ - dev->hard_start_xmit = elp_start_xmit; /* local */ - dev->tx_timeout = elp_timeout; /* local */ + dev->netdev_ops = &elp_netdev_ops; dev->watchdog_timeo = 10*HZ; - dev->set_multicast_list = elp_set_mc_list; /* local */ dev->ethtool_ops = &netdev_ethtool_ops; /* local */ dev->mem_start = dev->mem_end = 0; diff --git a/drivers/net/3c507.c b/drivers/net/3c507.c index 423e65d0ba7..fbbaf826def 100644 --- a/drivers/net/3c507.c +++ b/drivers/net/3c507.c @@ -352,6 +352,16 @@ out: return ERR_PTR(err); } +static const struct net_device_ops netdev_ops = { + .ndo_open = el16_open, + .ndo_stop = el16_close, + .ndo_start_xmit = el16_send_packet, + .ndo_tx_timeout = el16_tx_timeout, + .ndo_change_mtu = eth_change_mtu, + .ndo_set_mac_address = eth_mac_addr, + .ndo_validate_addr = eth_validate_addr, +}; + static int __init el16_probe1(struct net_device *dev, int ioaddr) { static unsigned char init_ID_done, version_printed; @@ -449,10 +459,7 @@ static int __init el16_probe1(struct net_device *dev, int ioaddr) goto out1; } - dev->open = el16_open; - dev->stop = el16_close; - dev->hard_start_xmit = el16_send_packet; - dev->tx_timeout = el16_tx_timeout; + dev->netdev_ops = &netdev_ops; dev->watchdog_timeo = TX_TIMEOUT; dev->ethtool_ops = &netdev_ethtool_ops; dev->flags &= ~IFF_MULTICAST; /* Multicast doesn't work */ diff --git a/drivers/net/3c509.c b/drivers/net/3c509.c index 535c234286e..d58919c7032 100644 --- a/drivers/net/3c509.c +++ b/drivers/net/3c509.c @@ -537,6 +537,21 @@ static struct mca_driver el3_mca_driver = { static int mca_registered; #endif /* CONFIG_MCA */ +static const struct net_device_ops netdev_ops = { + .ndo_open = el3_open, + .ndo_stop = el3_close, + .ndo_start_xmit = el3_start_xmit, + .ndo_get_stats = el3_get_stats, + .ndo_set_multicast_list = set_multicast_list, + .ndo_tx_timeout = el3_tx_timeout, + .ndo_change_mtu = eth_change_mtu, + .ndo_set_mac_address = eth_mac_addr, + .ndo_validate_addr = eth_validate_addr, +#ifdef CONFIG_NET_POLL_CONTROLLER + .ndo_poll_controller = el3_poll_controller, +#endif +}; + static int __devinit el3_common_init(struct net_device *dev) { struct el3_private *lp = netdev_priv(dev); @@ -553,16 +568,8 @@ static int __devinit el3_common_init(struct net_device *dev) } /* The EL3-specific entries in the device structure. */ - dev->open = &el3_open; - dev->hard_start_xmit = &el3_start_xmit; - dev->stop = &el3_close; - dev->get_stats = &el3_get_stats; - dev->set_multicast_list = &set_multicast_list; - dev->tx_timeout = el3_tx_timeout; + dev->netdev_ops = &netdev_ops; dev->watchdog_timeo = TX_TIMEOUT; -#ifdef CONFIG_NET_POLL_CONTROLLER - dev->poll_controller = el3_poll_controller; -#endif SET_ETHTOOL_OPS(dev, ðtool_ops); err = register_netdev(dev); diff --git a/drivers/net/3c515.c b/drivers/net/3c515.c index 39ac12233aa..167bf23066e 100644 --- a/drivers/net/3c515.c +++ b/drivers/net/3c515.c @@ -563,6 +563,20 @@ no_pnp: return NULL; } + +static const struct net_device_ops netdev_ops = { + .ndo_open = corkscrew_open, + .ndo_stop = corkscrew_close, + .ndo_start_xmit = corkscrew_start_xmit, + .ndo_tx_timeout = corkscrew_timeout, + .ndo_get_stats = corkscrew_get_stats, + .ndo_set_multicast_list = set_rx_mode, + .ndo_change_mtu = eth_change_mtu, + .ndo_set_mac_address = eth_mac_addr, + .ndo_validate_addr = eth_validate_addr, +}; + + static int corkscrew_setup(struct net_device *dev, int ioaddr, struct pnp_dev *idev, int card_number) { @@ -681,13 +695,8 @@ static int corkscrew_setup(struct net_device *dev, int ioaddr, vp->full_bus_master_rx = (vp->capabilities & 0x20) ? 1 : 0; /* The 3c51x-specific entries in the device structure. */ - dev->open = &corkscrew_open; - dev->hard_start_xmit = &corkscrew_start_xmit; - dev->tx_timeout = &corkscrew_timeout; + dev->netdev_ops = &netdev_ops; dev->watchdog_timeo = (400 * HZ) / 1000; - dev->stop = &corkscrew_close; - dev->get_stats = &corkscrew_get_stats; - dev->set_multicast_list = &set_rx_mode; dev->ethtool_ops = &netdev_ethtool_ops; return register_netdev(dev); diff --git a/drivers/net/3c523.c b/drivers/net/3c523.c index ff41e1ff560..8f734d74b51 100644 --- a/drivers/net/3c523.c +++ b/drivers/net/3c523.c @@ -403,6 +403,20 @@ static int elmc_getinfo(char *buf, int slot, void *d) return len; } /* elmc_getinfo() */ +static const struct net_device_ops netdev_ops = { + .ndo_open = elmc_open, + .ndo_stop = elmc_close, + .ndo_get_stats = elmc_get_stats, + .ndo_start_xmit = elmc_send_packet, + .ndo_tx_timeout = elmc_timeout, +#ifdef ELMC_MULTICAST + .ndo_set_multicast_list = set_multicast_list, +#endif + .ndo_change_mtu = eth_change_mtu, + .ndo_set_mac_address = eth_mac_addr, + .ndo_validate_addr = eth_validate_addr, +}; + /*****************************************************************/ static int __init do_elmc_probe(struct net_device *dev) @@ -544,17 +558,8 @@ static int __init do_elmc_probe(struct net_device *dev) printk(KERN_INFO "%s: hardware address %pM\n", dev->name, dev->dev_addr); - dev->open = &elmc_open; - dev->stop = &elmc_close; - dev->get_stats = &elmc_get_stats; - dev->hard_start_xmit = &elmc_send_packet; - dev->tx_timeout = &elmc_timeout; + dev->netdev_ops = &netdev_ops; dev->watchdog_timeo = HZ; -#ifdef ELMC_MULTICAST - dev->set_multicast_list = &set_multicast_list; -#else - dev->set_multicast_list = NULL; -#endif dev->ethtool_ops = &netdev_ethtool_ops; /* note that we haven't actually requested the IRQ from the kernel. diff --git a/drivers/net/3c527.c b/drivers/net/3c527.c index 2df3af3b9b2..b61073c42bf 100644 --- a/drivers/net/3c527.c +++ b/drivers/net/3c527.c @@ -288,6 +288,18 @@ struct net_device *__init mc32_probe(int unit) return ERR_PTR(-ENODEV); } +static const struct net_device_ops netdev_ops = { + .ndo_open = mc32_open, + .ndo_stop = mc32_close, + .ndo_start_xmit = mc32_send_packet, + .ndo_get_stats = mc32_get_stats, + .ndo_set_multicast_list = mc32_set_multicast_list, + .ndo_tx_timeout = mc32_timeout, + .ndo_change_mtu = eth_change_mtu, + .ndo_set_mac_address = eth_mac_addr, + .ndo_validate_addr = eth_validate_addr, +}; + /** * mc32_probe1 - Check a given slot for a board and test the card * @dev: Device structure to fill in @@ -518,12 +530,7 @@ static int __init mc32_probe1(struct net_device *dev, int slot) printk("%s: Firmware Rev %d. %d RX buffers, %d TX buffers. Base of 0x%08X.\n", dev->name, lp->exec_box->data[12], lp->rx_len, lp->tx_len, lp->base); - dev->open = mc32_open; - dev->stop = mc32_close; - dev->hard_start_xmit = mc32_send_packet; - dev->get_stats = mc32_get_stats; - dev->set_multicast_list = mc32_set_multicast_list; - dev->tx_timeout = mc32_timeout; + dev->netdev_ops = &netdev_ops; dev->watchdog_timeo = HZ*5; /* Board does all the work */ dev->ethtool_ops = &netdev_ethtool_ops; diff --git a/drivers/net/3c59x.c b/drivers/net/3c59x.c index cdbbb6226fc..b2563d384cf 100644 --- a/drivers/net/3c59x.c +++ b/drivers/net/3c59x.c @@ -992,6 +992,42 @@ out: return rc; } +static const struct net_device_ops boomrang_netdev_ops = { + .ndo_open = vortex_open, + .ndo_stop = vortex_close, + .ndo_start_xmit = boomerang_start_xmit, + .ndo_tx_timeout = vortex_tx_timeout, + .ndo_get_stats = vortex_get_stats, +#ifdef CONFIG_PCI + .ndo_do_ioctl = vortex_ioctl, +#endif + .ndo_set_multicast_list = set_rx_mode, + .ndo_change_mtu = eth_change_mtu, + .ndo_set_mac_address = eth_mac_addr, + .ndo_validate_addr = eth_validate_addr, +#ifdef CONFIG_NET_POLL_CONTROLLER + .ndo_poll_controller = poll_vortex, +#endif +}; + +static const struct net_device_ops vortex_netdev_ops = { + .ndo_open = vortex_open, + .ndo_stop = vortex_close, + .ndo_start_xmit = vortex_start_xmit, + .ndo_tx_timeout = vortex_tx_timeout, + .ndo_get_stats = vortex_get_stats, +#ifdef CONFIG_PCI + .ndo_do_ioctl = vortex_ioctl, +#endif + .ndo_set_multicast_list = set_rx_mode, + .ndo_change_mtu = eth_change_mtu, + .ndo_set_mac_address = eth_mac_addr, + .ndo_validate_addr = eth_validate_addr, +#ifdef CONFIG_NET_POLL_CONTROLLER + .ndo_poll_controller = poll_vortex, +#endif +}; + /* * Start up the PCI/EISA device which is described by *gendev. * Return 0 on success. @@ -1366,18 +1402,16 @@ static int __devinit vortex_probe1(struct device *gendev, } /* The 3c59x-specific entries in the device structure. */ - dev->open = vortex_open; if (vp->full_bus_master_tx) { - dev->hard_start_xmit = boomerang_start_xmit; + dev->netdev_ops = &boomrang_netdev_ops; /* Actually, it still should work with iommu. */ if (card_idx < MAX_UNITS && ((hw_checksums[card_idx] == -1 && (vp->drv_flags & HAS_HWCKSM)) || hw_checksums[card_idx] == 1)) { dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG; } - } else { - dev->hard_start_xmit = vortex_start_xmit; - } + } else + dev->netdev_ops = &vortex_netdev_ops; if (print_info) { printk(KERN_INFO "%s: scatter/gather %sabled. h/w checksums %sabled\n", @@ -1386,18 +1420,9 @@ static int __devinit vortex_probe1(struct device *gendev, (dev->features & NETIF_F_IP_CSUM) ? "en":"dis"); } - dev->stop = vortex_close; - dev->get_stats = vortex_get_stats; -#ifdef CONFIG_PCI - dev->do_ioctl = vortex_ioctl; -#endif dev->ethtool_ops = &vortex_ethtool_ops; - dev->set_multicast_list = set_rx_mode; - dev->tx_timeout = vortex_tx_timeout; dev->watchdog_timeo = (watchdog * HZ) / 1000; -#ifdef CONFIG_NET_POLL_CONTROLLER - dev->poll_controller = poll_vortex; -#endif + if (pdev) { vp->pm_state_valid = 1; pci_save_state(VORTEX_PCI(vp)); diff --git a/drivers/net/8139cp.c b/drivers/net/8139cp.c index 4e19ae3ce6b..35517b06ec3 100644 --- a/drivers/net/8139cp.c +++ b/drivers/net/8139cp.c @@ -604,7 +604,7 @@ rx_next: spin_lock_irqsave(&cp->lock, flags); cpw16_f(IntrMask, cp_intr_mask); - __netif_rx_complete(napi); + __napi_complete(napi); spin_unlock_irqrestore(&cp->lock, flags); } @@ -641,9 +641,9 @@ static irqreturn_t cp_interrupt (int irq, void *dev_instance) } if (status & (RxOK | RxErr | RxEmpty | RxFIFOOvr)) - if (netif_rx_schedule_prep(&cp->napi)) { + if (napi_schedule_prep(&cp->napi)) { cpw16_f(IntrMask, cp_norx_intr_mask); - __netif_rx_schedule(&cp->napi); + __napi_schedule(&cp->napi); } if (status & (TxOK | TxErr | TxEmpty | SWInt)) diff --git a/drivers/net/8139too.c b/drivers/net/8139too.c index a5b24202d56..5341da604e8 100644 --- a/drivers/net/8139too.c +++ b/drivers/net/8139too.c @@ -2128,7 +2128,7 @@ static int rtl8139_poll(struct napi_struct *napi, int budget) */ spin_lock_irqsave(&tp->lock, flags); RTL_W16_F(IntrMask, rtl8139_intr_mask); - __netif_rx_complete(napi); + __napi_complete(napi); spin_unlock_irqrestore(&tp->lock, flags); } spin_unlock(&tp->rx_lock); @@ -2178,9 +2178,9 @@ static irqreturn_t rtl8139_interrupt (int irq, void *dev_instance) /* Receive packets are processed by poll routine. If not running start it now. */ if (status & RxAckBits){ - if (netif_rx_schedule_prep(&tp->napi)) { + if (napi_schedule_prep(&tp->napi)) { RTL_W16_F (IntrMask, rtl8139_norx_intr_mask); - __netif_rx_schedule(&tp->napi); + __napi_schedule(&tp->napi); } } diff --git a/drivers/net/82596.c b/drivers/net/82596.c index b273596368e..cca94b9c08a 100644 --- a/drivers/net/82596.c +++ b/drivers/net/82596.c @@ -1122,6 +1122,17 @@ static void print_eth(unsigned char *add, char *str) static int io = 0x300; static int irq = 10; +static const struct net_device_ops i596_netdev_ops = { + .ndo_open = i596_open, + .ndo_stop = i596_close, + .ndo_start_xmit = i596_start_xmit, + .ndo_set_multicast_list = set_multicast_list, + .ndo_tx_timeout = i596_tx_timeout, + .ndo_change_mtu = eth_change_mtu, + .ndo_set_mac_address = eth_mac_addr, + .ndo_validate_addr = eth_validate_addr, +}; + struct net_device * __init i82596_probe(int unit) { struct net_device *dev; @@ -1232,11 +1243,7 @@ found: DEB(DEB_PROBE,printk(KERN_INFO "%s", version)); /* The 82596-specific entries in the device structure. */ - dev->open = i596_open; - dev->stop = i596_close; - dev->hard_start_xmit = i596_start_xmit; - dev->set_multicast_list = set_multicast_list; - dev->tx_timeout = i596_tx_timeout; + dev->netdev_ops = &i596_netdev_ops; dev->watchdog_timeo = TX_TIMEOUT; dev->ml_priv = (void *)(dev->mem_start); diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig index 6bdfd47d679..49f4d50abc5 100644 --- a/drivers/net/Kconfig +++ b/drivers/net/Kconfig @@ -1829,10 +1829,10 @@ config 68360_ENET config FEC bool "FEC ethernet controller (of ColdFire CPUs)" - depends on M523x || M527x || M5272 || M528x || M520x || M532x + depends on M523x || M527x || M5272 || M528x || M520x || M532x || MACH_MX27 help Say Y here if you want to use the built-in 10/100 Fast ethernet - controller on some Motorola ColdFire processors. + controller on some Motorola ColdFire and Freescale i.MX processors. config FEC2 bool "Second FEC ethernet controller (on some ColdFire CPUs)" @@ -2022,7 +2022,6 @@ config IGB config IGB_LRO bool "Use software LRO" depends on IGB && INET - select INET_LRO ---help--- Say Y here if you want to use large receive offload. @@ -2408,7 +2407,6 @@ config CHELSIO_T3 tristate "Chelsio Communications T3 10Gb Ethernet support" depends on CHELSIO_T3_DEPENDS select FW_LOADER - select INET_LRO help This driver supports Chelsio T3-based gigabit and 10Gb Ethernet adapters. @@ -2444,7 +2442,6 @@ config ENIC config IXGBE tristate "Intel(R) 10GbE PCI Express adapters support" depends on PCI && INET - select INET_LRO ---help--- This driver supports Intel(R) 10GbE PCI Express family of adapters. For more information on how to identify your adapter, go diff --git a/drivers/net/amd8111e.c b/drivers/net/amd8111e.c index 7709992bb6b..cb9c95d3ed0 100644 --- a/drivers/net/amd8111e.c +++ b/drivers/net/amd8111e.c @@ -831,7 +831,7 @@ static int amd8111e_rx_poll(struct napi_struct *napi, int budget) if (rx_pkt_limit > 0) { /* Receive descriptor is empty now */ spin_lock_irqsave(&lp->lock, flags); - __netif_rx_complete(napi); + __napi_complete(napi); writel(VAL0|RINTEN0, mmio + INTEN0); writel(VAL2 | RDMD0, mmio + CMD0); spin_unlock_irqrestore(&lp->lock, flags); @@ -1170,11 +1170,11 @@ static irqreturn_t amd8111e_interrupt(int irq, void *dev_id) /* Check if Receive Interrupt has occurred. */ if (intr0 & RINT0) { - if (netif_rx_schedule_prep(&lp->napi)) { + if (napi_schedule_prep(&lp->napi)) { /* Disable receive interupts */ writel(RINTEN0, mmio + INTEN0); /* Schedule a polling routine */ - __netif_rx_schedule(&lp->napi); + __napi_schedule(&lp->napi); } else if (intren0 & RINTEN0) { printk("************Driver bug! \ interrupt while in poll\n"); diff --git a/drivers/net/arcnet/arc-rawmode.c b/drivers/net/arcnet/arc-rawmode.c index 3ff9affb1a9..646dfc5f50c 100644 --- a/drivers/net/arcnet/arc-rawmode.c +++ b/drivers/net/arcnet/arc-rawmode.c @@ -102,7 +102,7 @@ static void rx(struct net_device *dev, int bufnum, skb = alloc_skb(length + ARC_HDR_SIZE, GFP_ATOMIC); if (skb == NULL) { BUGMSG(D_NORMAL, "Memory squeeze, dropping packet.\n"); - lp->stats.rx_dropped++; + dev->stats.rx_dropped++; return; } skb_put(skb, length + ARC_HDR_SIZE); @@ -122,7 +122,7 @@ static void rx(struct net_device *dev, int bufnum, BUGLVL(D_SKB) arcnet_dump_skb(dev, skb, "rx"); - skb->protocol = __constant_htons(ETH_P_ARCNET); + skb->protocol = cpu_to_be16(ETH_P_ARCNET); ; netif_rx(skb); } diff --git a/drivers/net/arcnet/arcnet.c b/drivers/net/arcnet/arcnet.c index 6b53e5ed125..a80d4a30a46 100644 --- a/drivers/net/arcnet/arcnet.c +++ b/drivers/net/arcnet/arcnet.c @@ -95,17 +95,16 @@ EXPORT_SYMBOL(arcnet_unregister_proto); EXPORT_SYMBOL(arcnet_debug); EXPORT_SYMBOL(alloc_arcdev); EXPORT_SYMBOL(arcnet_interrupt); +EXPORT_SYMBOL(arcnet_open); +EXPORT_SYMBOL(arcnet_close); +EXPORT_SYMBOL(arcnet_send_packet); +EXPORT_SYMBOL(arcnet_timeout); /* Internal function prototypes */ -static int arcnet_open(struct net_device *dev); -static int arcnet_close(struct net_device *dev); -static int arcnet_send_packet(struct sk_buff *skb, struct net_device *dev); -static void arcnet_timeout(struct net_device *dev); static int arcnet_header(struct sk_buff *skb, struct net_device *dev, unsigned short type, const void *daddr, const void *saddr, unsigned len); static int arcnet_rebuild_header(struct sk_buff *skb); -static struct net_device_stats *arcnet_get_stats(struct net_device *dev); static int go_tx(struct net_device *dev); static int debug = ARCNET_DEBUG; @@ -322,11 +321,18 @@ static const struct header_ops arcnet_header_ops = { .rebuild = arcnet_rebuild_header, }; +static const struct net_device_ops arcnet_netdev_ops = { + .ndo_open = arcnet_open, + .ndo_stop = arcnet_close, + .ndo_start_xmit = arcnet_send_packet, + .ndo_tx_timeout = arcnet_timeout, +}; /* Setup a struct device for ARCnet. */ static void arcdev_setup(struct net_device *dev) { dev->type = ARPHRD_ARCNET; + dev->netdev_ops = &arcnet_netdev_ops; dev->header_ops = &arcnet_header_ops; dev->hard_header_len = sizeof(struct archdr); dev->mtu = choose_mtu(); @@ -339,18 +345,9 @@ static void arcdev_setup(struct net_device *dev) /* New-style flags. */ dev->flags = IFF_BROADCAST; - /* - * Put in this stuff here, so we don't have to export the symbols to - * the chipset drivers. - */ - dev->open = arcnet_open; - dev->stop = arcnet_close; - dev->hard_start_xmit = arcnet_send_packet; - dev->tx_timeout = arcnet_timeout; - dev->get_stats = arcnet_get_stats; } -struct net_device *alloc_arcdev(char *name) +struct net_device *alloc_arcdev(const char *name) { struct net_device *dev; @@ -372,7 +369,7 @@ struct net_device *alloc_arcdev(char *name) * that "should" only need to be set once at boot, so that there is * non-reboot way to recover if something goes wrong. */ -static int arcnet_open(struct net_device *dev) +int arcnet_open(struct net_device *dev) { struct arcnet_local *lp = netdev_priv(dev); int count, newmtu, error; @@ -472,7 +469,7 @@ static int arcnet_open(struct net_device *dev) /* The inverse routine to arcnet_open - shuts down the card. */ -static int arcnet_close(struct net_device *dev) +int arcnet_close(struct net_device *dev) { struct arcnet_local *lp = netdev_priv(dev); @@ -583,8 +580,8 @@ static int arcnet_rebuild_header(struct sk_buff *skb) } else { BUGMSG(D_NORMAL, "I don't understand ethernet protocol %Xh addresses!\n", type); - lp->stats.tx_errors++; - lp->stats.tx_aborted_errors++; + dev->stats.tx_errors++; + dev->stats.tx_aborted_errors++; } /* if we couldn't resolve the address... give up. */ @@ -601,7 +598,7 @@ static int arcnet_rebuild_header(struct sk_buff *skb) /* Called by the kernel in order to transmit a packet. */ -static int arcnet_send_packet(struct sk_buff *skb, struct net_device *dev) +int arcnet_send_packet(struct sk_buff *skb, struct net_device *dev) { struct arcnet_local *lp = netdev_priv(dev); struct archdr *pkt; @@ -645,7 +642,7 @@ static int arcnet_send_packet(struct sk_buff *skb, struct net_device *dev) !proto->ack_tx) { /* done right away and we don't want to acknowledge the package later - forget about it now */ - lp->stats.tx_bytes += skb->len; + dev->stats.tx_bytes += skb->len; freeskb = 1; } else { /* do it the 'split' way */ @@ -709,7 +706,7 @@ static int go_tx(struct net_device *dev) /* start sending */ ACOMMAND(TXcmd | (lp->cur_tx << 3)); - lp->stats.tx_packets++; + dev->stats.tx_packets++; lp->lasttrans_dest = lp->lastload_dest; lp->lastload_dest = 0; lp->excnak_pending = 0; @@ -720,7 +717,7 @@ static int go_tx(struct net_device *dev) /* Called by the kernel when transmit times out */ -static void arcnet_timeout(struct net_device *dev) +void arcnet_timeout(struct net_device *dev) { unsigned long flags; struct arcnet_local *lp = netdev_priv(dev); @@ -732,11 +729,11 @@ static void arcnet_timeout(struct net_device *dev) msg = " - missed IRQ?"; } else { msg = ""; - lp->stats.tx_aborted_errors++; + dev->stats.tx_aborted_errors++; lp->timed_out = 1; ACOMMAND(NOTXcmd | (lp->cur_tx << 3)); } - lp->stats.tx_errors++; + dev->stats.tx_errors++; /* make sure we didn't miss a TX or a EXC NAK IRQ */ AINTMASK(0); @@ -865,8 +862,8 @@ irqreturn_t arcnet_interrupt(int irq, void *dev_id) "transmit was not acknowledged! " "(status=%Xh, dest=%02Xh)\n", status, lp->lasttrans_dest); - lp->stats.tx_errors++; - lp->stats.tx_carrier_errors++; + dev->stats.tx_errors++; + dev->stats.tx_carrier_errors++; } else { BUGMSG(D_DURING, "broadcast was not acknowledged; that's normal " @@ -905,7 +902,7 @@ irqreturn_t arcnet_interrupt(int irq, void *dev_id) if (txbuf != -1) { if (lp->outgoing.proto->continue_tx(dev, txbuf)) { /* that was the last segment */ - lp->stats.tx_bytes += lp->outgoing.skb->len; + dev->stats.tx_bytes += lp->outgoing.skb->len; if(!lp->outgoing.proto->ack_tx) { dev_kfree_skb_irq(lp->outgoing.skb); @@ -930,7 +927,7 @@ irqreturn_t arcnet_interrupt(int irq, void *dev_id) } if (status & lp->intmask & RECONflag) { ACOMMAND(CFLAGScmd | CONFIGclear); - lp->stats.tx_carrier_errors++; + dev->stats.tx_carrier_errors++; BUGMSG(D_RECON, "Network reconfiguration detected (status=%Xh)\n", status); @@ -1038,8 +1035,8 @@ static void arcnet_rx(struct net_device *dev, int bufnum) "(%d+4 bytes)\n", bufnum, pkt.hard.source, pkt.hard.dest, length); - lp->stats.rx_packets++; - lp->stats.rx_bytes += length + ARC_HDR_SIZE; + dev->stats.rx_packets++; + dev->stats.rx_bytes += length + ARC_HDR_SIZE; /* call the right receiver for the protocol */ if (arc_proto_map[soft->proto]->is_ip) { @@ -1067,18 +1064,6 @@ static void arcnet_rx(struct net_device *dev, int bufnum) } - -/* - * Get the current statistics. This may be called with the card open or - * closed. - */ -static struct net_device_stats *arcnet_get_stats(struct net_device *dev) -{ - struct arcnet_local *lp = netdev_priv(dev); - return &lp->stats; -} - - static void null_rx(struct net_device *dev, int bufnum, struct archdr *pkthdr, int length) { diff --git a/drivers/net/arcnet/capmode.c b/drivers/net/arcnet/capmode.c index 30580bbe252..083e21094b2 100644 --- a/drivers/net/arcnet/capmode.c +++ b/drivers/net/arcnet/capmode.c @@ -119,7 +119,7 @@ static void rx(struct net_device *dev, int bufnum, skb = alloc_skb(length + ARC_HDR_SIZE + sizeof(int), GFP_ATOMIC); if (skb == NULL) { BUGMSG(D_NORMAL, "Memory squeeze, dropping packet.\n"); - lp->stats.rx_dropped++; + dev->stats.rx_dropped++; return; } skb_put(skb, length + ARC_HDR_SIZE + sizeof(int)); @@ -148,7 +148,7 @@ static void rx(struct net_device *dev, int bufnum, BUGLVL(D_SKB) arcnet_dump_skb(dev, skb, "rx"); - skb->protocol = __constant_htons(ETH_P_ARCNET); + skb->protocol = cpu_to_be16(ETH_P_ARCNET); ; netif_rx(skb); } @@ -282,7 +282,7 @@ static int ack_tx(struct net_device *dev, int acked) BUGMSG(D_PROTO, "Ackknowledge for cap packet %x.\n", *((int*)&ackpkt->soft.cap.cookie[0])); - ackskb->protocol = __constant_htons(ETH_P_ARCNET); + ackskb->protocol = cpu_to_be16(ETH_P_ARCNET); BUGLVL(D_SKB) arcnet_dump_skb(dev, ackskb, "ack_tx_recv"); netif_rx(ackskb); diff --git a/drivers/net/arcnet/com20020-isa.c b/drivers/net/arcnet/com20020-isa.c index ea53a940272..db08fc24047 100644 --- a/drivers/net/arcnet/com20020-isa.c +++ b/drivers/net/arcnet/com20020-isa.c @@ -151,6 +151,8 @@ static int __init com20020_init(void) if (node && node != 0xff) dev->dev_addr[0] = node; + dev->netdev_ops = &com20020_netdev_ops; + lp = netdev_priv(dev); lp->backplane = backplane; lp->clockp = clockp & 7; diff --git a/drivers/net/arcnet/com20020-pci.c b/drivers/net/arcnet/com20020-pci.c index 8b51f632581..dbf4de39754 100644 --- a/drivers/net/arcnet/com20020-pci.c +++ b/drivers/net/arcnet/com20020-pci.c @@ -72,6 +72,9 @@ static int __devinit com20020pci_probe(struct pci_dev *pdev, const struct pci_de dev = alloc_arcdev(device); if (!dev) return -ENOMEM; + + dev->netdev_ops = &com20020_netdev_ops; + lp = netdev_priv(dev); pci_set_drvdata(pdev, dev); diff --git a/drivers/net/arcnet/com20020.c b/drivers/net/arcnet/com20020.c index 103688358fb..651275a5f3d 100644 --- a/drivers/net/arcnet/com20020.c +++ b/drivers/net/arcnet/com20020.c @@ -149,6 +149,14 @@ int com20020_check(struct net_device *dev) return 0; } +const struct net_device_ops com20020_netdev_ops = { + .ndo_open = arcnet_open, + .ndo_stop = arcnet_close, + .ndo_start_xmit = arcnet_send_packet, + .ndo_tx_timeout = arcnet_timeout, + .ndo_set_multicast_list = com20020_set_mc_list, +}; + /* Set up the struct net_device associated with this card. Called after * probing succeeds. */ @@ -170,8 +178,6 @@ int com20020_found(struct net_device *dev, int shared) lp->hw.copy_from_card = com20020_copy_from_card; lp->hw.close = com20020_close; - dev->set_multicast_list = com20020_set_mc_list; - if (!dev->dev_addr[0]) dev->dev_addr[0] = inb(ioaddr + BUS_ALIGN*8); /* FIXME: do this some other way! */ @@ -342,6 +348,7 @@ static void com20020_set_mc_list(struct net_device *dev) defined(CONFIG_ARCNET_COM20020_CS_MODULE) EXPORT_SYMBOL(com20020_check); EXPORT_SYMBOL(com20020_found); +EXPORT_SYMBOL(com20020_netdev_ops); #endif MODULE_LICENSE("GPL"); diff --git a/drivers/net/arcnet/rfc1051.c b/drivers/net/arcnet/rfc1051.c index 49d39a9cb69..06f8fa2f8f2 100644 --- a/drivers/net/arcnet/rfc1051.c +++ b/drivers/net/arcnet/rfc1051.c @@ -88,7 +88,6 @@ MODULE_LICENSE("GPL"); */ static __be16 type_trans(struct sk_buff *skb, struct net_device *dev) { - struct arcnet_local *lp = netdev_priv(dev); struct archdr *pkt = (struct archdr *) skb->data; struct arc_rfc1051 *soft = &pkt->soft.rfc1051; int hdr_size = ARC_HDR_SIZE + RFC1051_HDR_SIZE; @@ -112,8 +111,8 @@ static __be16 type_trans(struct sk_buff *skb, struct net_device *dev) return htons(ETH_P_ARP); default: - lp->stats.rx_errors++; - lp->stats.rx_crc_errors++; + dev->stats.rx_errors++; + dev->stats.rx_crc_errors++; return 0; } @@ -140,7 +139,7 @@ static void rx(struct net_device *dev, int bufnum, skb = alloc_skb(length + ARC_HDR_SIZE, GFP_ATOMIC); if (skb == NULL) { BUGMSG(D_NORMAL, "Memory squeeze, dropping packet.\n"); - lp->stats.rx_dropped++; + dev->stats.rx_dropped++; return; } skb_put(skb, length + ARC_HDR_SIZE); @@ -168,7 +167,6 @@ static void rx(struct net_device *dev, int bufnum, static int build_header(struct sk_buff *skb, struct net_device *dev, unsigned short type, uint8_t daddr) { - struct arcnet_local *lp = netdev_priv(dev); int hdr_size = ARC_HDR_SIZE + RFC1051_HDR_SIZE; struct archdr *pkt = (struct archdr *) skb_push(skb, hdr_size); struct arc_rfc1051 *soft = &pkt->soft.rfc1051; @@ -184,8 +182,8 @@ static int build_header(struct sk_buff *skb, struct net_device *dev, default: BUGMSG(D_NORMAL, "RFC1051: I don't understand protocol %d (%Xh)\n", type, type); - lp->stats.tx_errors++; - lp->stats.tx_aborted_errors++; + dev->stats.tx_errors++; + dev->stats.tx_aborted_errors++; return 0; } diff --git a/drivers/net/arcnet/rfc1201.c b/drivers/net/arcnet/rfc1201.c index 2303d3a1f4b..745530651c4 100644 --- a/drivers/net/arcnet/rfc1201.c +++ b/drivers/net/arcnet/rfc1201.c @@ -92,7 +92,6 @@ static __be16 type_trans(struct sk_buff *skb, struct net_device *dev) { struct archdr *pkt = (struct archdr *) skb->data; struct arc_rfc1201 *soft = &pkt->soft.rfc1201; - struct arcnet_local *lp = netdev_priv(dev); int hdr_size = ARC_HDR_SIZE + RFC1201_HDR_SIZE; /* Pull off the arcnet header. */ @@ -121,8 +120,8 @@ static __be16 type_trans(struct sk_buff *skb, struct net_device *dev) case ARC_P_NOVELL_EC: return htons(ETH_P_802_3); default: - lp->stats.rx_errors++; - lp->stats.rx_crc_errors++; + dev->stats.rx_errors++; + dev->stats.rx_crc_errors++; return 0; } @@ -172,8 +171,8 @@ static void rx(struct net_device *dev, int bufnum, in->sequence, soft->split_flag, soft->sequence); lp->rfc1201.aborted_seq = soft->sequence; dev_kfree_skb_irq(in->skb); - lp->stats.rx_errors++; - lp->stats.rx_missed_errors++; + dev->stats.rx_errors++; + dev->stats.rx_missed_errors++; in->skb = NULL; } in->sequence = soft->sequence; @@ -181,7 +180,7 @@ static void rx(struct net_device *dev, int bufnum, skb = alloc_skb(length + ARC_HDR_SIZE, GFP_ATOMIC); if (skb == NULL) { BUGMSG(D_NORMAL, "Memory squeeze, dropping packet.\n"); - lp->stats.rx_dropped++; + dev->stats.rx_dropped++; return; } skb_put(skb, length + ARC_HDR_SIZE); @@ -213,7 +212,7 @@ static void rx(struct net_device *dev, int bufnum, BUGMSG(D_EXTRA, "ARP source address was 00h, set to %02Xh.\n", saddr); - lp->stats.rx_crc_errors++; + dev->stats.rx_crc_errors++; *cptr = saddr; } else { BUGMSG(D_DURING, "ARP source address (%Xh) is fine.\n", @@ -222,8 +221,8 @@ static void rx(struct net_device *dev, int bufnum, } else { BUGMSG(D_NORMAL, "funny-shaped ARP packet. (%Xh, %Xh)\n", arp->ar_hln, arp->ar_pln); - lp->stats.rx_errors++; - lp->stats.rx_crc_errors++; + dev->stats.rx_errors++; + dev->stats.rx_crc_errors++; } } BUGLVL(D_SKB) arcnet_dump_skb(dev, skb, "rx"); @@ -257,8 +256,8 @@ static void rx(struct net_device *dev, int bufnum, soft->split_flag); dev_kfree_skb_irq(in->skb); in->skb = NULL; - lp->stats.rx_errors++; - lp->stats.rx_missed_errors++; + dev->stats.rx_errors++; + dev->stats.rx_missed_errors++; in->lastpacket = in->numpackets = 0; } if (soft->split_flag & 1) { /* first packet in split */ @@ -269,8 +268,8 @@ static void rx(struct net_device *dev, int bufnum, "(splitflag=%d, seq=%d)\n", in->sequence, soft->split_flag, soft->sequence); - lp->stats.rx_errors++; - lp->stats.rx_missed_errors++; + dev->stats.rx_errors++; + dev->stats.rx_missed_errors++; dev_kfree_skb_irq(in->skb); } in->sequence = soft->sequence; @@ -281,8 +280,8 @@ static void rx(struct net_device *dev, int bufnum, BUGMSG(D_EXTRA, "incoming packet more than 16 segments; dropping. (splitflag=%d)\n", soft->split_flag); lp->rfc1201.aborted_seq = soft->sequence; - lp->stats.rx_errors++; - lp->stats.rx_length_errors++; + dev->stats.rx_errors++; + dev->stats.rx_length_errors++; return; } in->skb = skb = alloc_skb(508 * in->numpackets + ARC_HDR_SIZE, @@ -290,7 +289,7 @@ static void rx(struct net_device *dev, int bufnum, if (skb == NULL) { BUGMSG(D_NORMAL, "(split) memory squeeze, dropping packet.\n"); lp->rfc1201.aborted_seq = soft->sequence; - lp->stats.rx_dropped++; + dev->stats.rx_dropped++; return; } skb->dev = dev; @@ -314,8 +313,8 @@ static void rx(struct net_device *dev, int bufnum, "first! (splitflag=%d, seq=%d, aborted=%d)\n", soft->split_flag, soft->sequence, lp->rfc1201.aborted_seq); - lp->stats.rx_errors++; - lp->stats.rx_missed_errors++; + dev->stats.rx_errors++; + dev->stats.rx_missed_errors++; } return; } @@ -325,8 +324,8 @@ static void rx(struct net_device *dev, int bufnum, if (packetnum <= in->lastpacket - 1) { BUGMSG(D_EXTRA, "duplicate splitpacket ignored! (splitflag=%d)\n", soft->split_flag); - lp->stats.rx_errors++; - lp->stats.rx_frame_errors++; + dev->stats.rx_errors++; + dev->stats.rx_frame_errors++; return; } /* "bad" duplicate, kill reassembly */ @@ -336,8 +335,8 @@ static void rx(struct net_device *dev, int bufnum, lp->rfc1201.aborted_seq = soft->sequence; dev_kfree_skb_irq(in->skb); in->skb = NULL; - lp->stats.rx_errors++; - lp->stats.rx_missed_errors++; + dev->stats.rx_errors++; + dev->stats.rx_missed_errors++; in->lastpacket = in->numpackets = 0; return; } @@ -404,8 +403,8 @@ static int build_header(struct sk_buff *skb, struct net_device *dev, default: BUGMSG(D_NORMAL, "RFC1201: I don't understand protocol %d (%Xh)\n", type, type); - lp->stats.tx_errors++; - lp->stats.tx_aborted_errors++; + dev->stats.tx_errors++; + dev->stats.tx_aborted_errors++; return 0; } diff --git a/drivers/net/arm/ep93xx_eth.c b/drivers/net/arm/ep93xx_eth.c index 3ec20cc18b0..cc7708775da 100644 --- a/drivers/net/arm/ep93xx_eth.c +++ b/drivers/net/arm/ep93xx_eth.c @@ -298,7 +298,7 @@ poll_some_more: int more = 0; spin_lock_irq(&ep->rx_lock); - __netif_rx_complete(napi); + __napi_complete(napi); wrl(ep, REG_INTEN, REG_INTEN_TX | REG_INTEN_RX); if (ep93xx_have_more_rx(ep)) { wrl(ep, REG_INTEN, REG_INTEN_TX); @@ -307,7 +307,7 @@ poll_some_more: } spin_unlock_irq(&ep->rx_lock); - if (more && netif_rx_reschedule(napi)) + if (more && napi_reschedule(napi)) goto poll_some_more; } @@ -415,9 +415,9 @@ static irqreturn_t ep93xx_irq(int irq, void *dev_id) if (status & REG_INTSTS_RX) { spin_lock(&ep->rx_lock); - if (likely(netif_rx_schedule_prep(&ep->napi))) { + if (likely(napi_schedule_prep(&ep->napi))) { wrl(ep, REG_INTEN, REG_INTEN_TX); - __netif_rx_schedule(&ep->napi); + __napi_schedule(&ep->napi); } spin_unlock(&ep->rx_lock); } diff --git a/drivers/net/arm/ixp4xx_eth.c b/drivers/net/arm/ixp4xx_eth.c index 5fce1d5c1a1..5fe17d5eaa5 100644 --- a/drivers/net/arm/ixp4xx_eth.c +++ b/drivers/net/arm/ixp4xx_eth.c @@ -473,7 +473,7 @@ static void eth_rx_irq(void *pdev) printk(KERN_DEBUG "%s: eth_rx_irq\n", dev->name); #endif qmgr_disable_irq(port->plat->rxq); - netif_rx_schedule(&port->napi); + napi_schedule(&port->napi); } static int eth_poll(struct napi_struct *napi, int budget) @@ -498,16 +498,16 @@ static int eth_poll(struct napi_struct *napi, int budget) if ((n = queue_get_desc(rxq, port, 0)) < 0) { #if DEBUG_RX - printk(KERN_DEBUG "%s: eth_poll netif_rx_complete\n", + printk(KERN_DEBUG "%s: eth_poll napi_complete\n", dev->name); #endif - netif_rx_complete(napi); + napi_complete(napi); qmgr_enable_irq(rxq); if (!qmgr_stat_empty(rxq) && - netif_rx_reschedule(napi)) { + napi_reschedule(napi)) { #if DEBUG_RX printk(KERN_DEBUG "%s: eth_poll" - " netif_rx_reschedule successed\n", + " napi_reschedule successed\n", dev->name); #endif qmgr_disable_irq(rxq); @@ -1036,7 +1036,7 @@ static int eth_open(struct net_device *dev) } ports_open++; /* we may already have RX data, enables IRQ */ - netif_rx_schedule(&port->napi); + napi_schedule(&port->napi); return 0; } diff --git a/drivers/net/arm/ks8695net.c b/drivers/net/arm/ks8695net.c index 1cf2f949c0b..b39210cf4fb 100644 --- a/drivers/net/arm/ks8695net.c +++ b/drivers/net/arm/ks8695net.c @@ -1059,7 +1059,7 @@ ks8695_get_drvinfo(struct net_device *ndev, struct ethtool_drvinfo *info) { strlcpy(info->driver, MODULENAME, sizeof(info->driver)); strlcpy(info->version, MODULEVERSION, sizeof(info->version)); - strlcpy(info->bus_info, ndev->dev.parent->bus_id, + strlcpy(info->bus_info, dev_name(ndev->dev.parent), sizeof(info->bus_info)); } diff --git a/drivers/net/atl1e/atl1e_main.c b/drivers/net/atl1e/atl1e_main.c index bb9094d4cbc..c758884728a 100644 --- a/drivers/net/atl1e/atl1e_main.c +++ b/drivers/net/atl1e/atl1e_main.c @@ -1326,9 +1326,9 @@ static irqreturn_t atl1e_intr(int irq, void *data) AT_WRITE_REG(hw, REG_IMR, IMR_NORMAL_MASK & ~ISR_RX_EVENT); AT_WRITE_FLUSH(hw); - if (likely(netif_rx_schedule_prep( + if (likely(napi_schedule_prep( &adapter->napi))) - __netif_rx_schedule(&adapter->napi); + __napi_schedule(&adapter->napi); } } while (--max_ints > 0); /* re-enable Interrupt*/ @@ -1514,7 +1514,7 @@ static int atl1e_clean(struct napi_struct *napi, int budget) /* If no Tx and not enough Rx work done, exit the polling mode */ if (work_done < budget) { quit_polling: - netif_rx_complete(napi); + napi_complete(napi); imr_data = AT_READ_REG(&adapter->hw, REG_IMR); AT_WRITE_REG(&adapter->hw, REG_IMR, imr_data | ISR_RX_EVENT); /* test debug */ diff --git a/drivers/net/au1000_eth.c b/drivers/net/au1000_eth.c index 9c875bb3f76..4274e4ac963 100644 --- a/drivers/net/au1000_eth.c +++ b/drivers/net/au1000_eth.c @@ -81,24 +81,6 @@ MODULE_AUTHOR(DRV_AUTHOR); MODULE_DESCRIPTION(DRV_DESC); MODULE_LICENSE("GPL"); -// prototypes -static void hard_stop(struct net_device *); -static void enable_rx_tx(struct net_device *dev); -static struct net_device * au1000_probe(int port_num); -static int au1000_init(struct net_device *); -static int au1000_open(struct net_device *); -static int au1000_close(struct net_device *); -static int au1000_tx(struct sk_buff *, struct net_device *); -static int au1000_rx(struct net_device *); -static irqreturn_t au1000_interrupt(int, void *); -static void au1000_tx_timeout(struct net_device *); -static void set_rx_mode(struct net_device *); -static int au1000_ioctl(struct net_device *, struct ifreq *, int); -static int au1000_mdio_read(struct net_device *, int, int); -static void au1000_mdio_write(struct net_device *, int, int, u16); -static void au1000_adjust_link(struct net_device *); -static void enable_mac(struct net_device *, int); - /* * Theory of operation * @@ -188,6 +170,26 @@ struct au1000_private *au_macs[NUM_ETH_INTERFACES]; # error MAC0-associated PHY attached 2nd MACs MII bus not supported yet #endif +static void enable_mac(struct net_device *dev, int force_reset) +{ + unsigned long flags; + struct au1000_private *aup = netdev_priv(dev); + + spin_lock_irqsave(&aup->lock, flags); + + if(force_reset || (!aup->mac_enabled)) { + *aup->enable = MAC_EN_CLOCK_ENABLE; + au_sync_delay(2); + *aup->enable = (MAC_EN_RESET0 | MAC_EN_RESET1 | MAC_EN_RESET2 + | MAC_EN_CLOCK_ENABLE); + au_sync_delay(2); + + aup->mac_enabled = 1; + } + + spin_unlock_irqrestore(&aup->lock, flags); +} + /* * MII operations */ @@ -281,6 +283,107 @@ static int au1000_mdiobus_reset(struct mii_bus *bus) return 0; } +static void hard_stop(struct net_device *dev) +{ + struct au1000_private *aup = netdev_priv(dev); + + if (au1000_debug > 4) + printk(KERN_INFO "%s: hard stop\n", dev->name); + + aup->mac->control &= ~(MAC_RX_ENABLE | MAC_TX_ENABLE); + au_sync_delay(10); +} + +static void enable_rx_tx(struct net_device *dev) +{ + struct au1000_private *aup = netdev_priv(dev); + + if (au1000_debug > 4) + printk(KERN_INFO "%s: enable_rx_tx\n", dev->name); + + aup->mac->control |= (MAC_RX_ENABLE | MAC_TX_ENABLE); + au_sync_delay(10); +} + +static void +au1000_adjust_link(struct net_device *dev) +{ + struct au1000_private *aup = netdev_priv(dev); + struct phy_device *phydev = aup->phy_dev; + unsigned long flags; + + int status_change = 0; + + BUG_ON(!aup->phy_dev); + + spin_lock_irqsave(&aup->lock, flags); + + if (phydev->link && (aup->old_speed != phydev->speed)) { + // speed changed + + switch(phydev->speed) { + case SPEED_10: + case SPEED_100: + break; + default: + printk(KERN_WARNING + "%s: Speed (%d) is not 10/100 ???\n", + dev->name, phydev->speed); + break; + } + + aup->old_speed = phydev->speed; + + status_change = 1; + } + + if (phydev->link && (aup->old_duplex != phydev->duplex)) { + // duplex mode changed + + /* switching duplex mode requires to disable rx and tx! */ + hard_stop(dev); + + if (DUPLEX_FULL == phydev->duplex) + aup->mac->control = ((aup->mac->control + | MAC_FULL_DUPLEX) + & ~MAC_DISABLE_RX_OWN); + else + aup->mac->control = ((aup->mac->control + & ~MAC_FULL_DUPLEX) + | MAC_DISABLE_RX_OWN); + au_sync_delay(1); + + enable_rx_tx(dev); + aup->old_duplex = phydev->duplex; + + status_change = 1; + } + + if(phydev->link != aup->old_link) { + // link state changed + + if (!phydev->link) { + /* link went down */ + aup->old_speed = 0; + aup->old_duplex = -1; + } + + aup->old_link = phydev->link; + status_change = 1; + } + + spin_unlock_irqrestore(&aup->lock, flags); + + if (status_change) { + if (phydev->link) + printk(KERN_INFO "%s: link up (%d/%s)\n", + dev->name, phydev->speed, + DUPLEX_FULL == phydev->duplex ? "Full" : "Half"); + else + printk(KERN_INFO "%s: link down\n", dev->name); + } +} + static int mii_probe (struct net_device *dev) { struct au1000_private *const aup = netdev_priv(dev); @@ -355,8 +458,8 @@ static int mii_probe (struct net_device *dev) /* now we are supposed to have a proper phydev, to attach to... */ BUG_ON(phydev->attached_dev); - phydev = phy_connect(dev, phydev->dev.bus_id, &au1000_adjust_link, 0, - PHY_INTERFACE_MODE_MII); + phydev = phy_connect(dev, dev_name(&phydev->dev), &au1000_adjust_link, + 0, PHY_INTERFACE_MODE_MII); if (IS_ERR(phydev)) { printk(KERN_ERR "%s: Could not attach to PHY\n", dev->name); @@ -381,8 +484,8 @@ static int mii_probe (struct net_device *dev) aup->phy_dev = phydev; printk(KERN_INFO "%s: attached PHY driver [%s] " - "(mii_bus:phy_addr=%s, irq=%d)\n", - dev->name, phydev->drv->name, phydev->dev.bus_id, phydev->irq); + "(mii_bus:phy_addr=%s, irq=%d)\n", dev->name, + phydev->drv->name, dev_name(&phydev->dev), phydev->irq); return 0; } @@ -412,48 +515,6 @@ void ReleaseDB(struct au1000_private *aup, db_dest_t *pDB) aup->pDBfree = pDB; } -static void enable_rx_tx(struct net_device *dev) -{ - struct au1000_private *aup = netdev_priv(dev); - - if (au1000_debug > 4) - printk(KERN_INFO "%s: enable_rx_tx\n", dev->name); - - aup->mac->control |= (MAC_RX_ENABLE | MAC_TX_ENABLE); - au_sync_delay(10); -} - -static void hard_stop(struct net_device *dev) -{ - struct au1000_private *aup = netdev_priv(dev); - - if (au1000_debug > 4) - printk(KERN_INFO "%s: hard stop\n", dev->name); - - aup->mac->control &= ~(MAC_RX_ENABLE | MAC_TX_ENABLE); - au_sync_delay(10); -} - -static void enable_mac(struct net_device *dev, int force_reset) -{ - unsigned long flags; - struct au1000_private *aup = netdev_priv(dev); - - spin_lock_irqsave(&aup->lock, flags); - - if(force_reset || (!aup->mac_enabled)) { - *aup->enable = MAC_EN_CLOCK_ENABLE; - au_sync_delay(2); - *aup->enable = (MAC_EN_RESET0 | MAC_EN_RESET1 | MAC_EN_RESET2 - | MAC_EN_CLOCK_ENABLE); - au_sync_delay(2); - - aup->mac_enabled = 1; - } - - spin_unlock_irqrestore(&aup->lock, flags); -} - static void reset_mac_unlocked(struct net_device *dev) { struct au1000_private *const aup = netdev_priv(dev); @@ -542,30 +603,6 @@ static struct { static int num_ifs; /* - * Setup the base address and interrupt of the Au1xxx ethernet macs - * based on cpu type and whether the interface is enabled in sys_pinfunc - * register. The last interface is enabled if SYS_PF_NI2 (bit 4) is 0. - */ -static int __init au1000_init_module(void) -{ - int ni = (int)((au_readl(SYS_PINFUNC) & (u32)(SYS_PF_NI2)) >> 4); - struct net_device *dev; - int i, found_one = 0; - - num_ifs = NUM_ETH_INTERFACES - ni; - - for(i = 0; i < num_ifs; i++) { - dev = au1000_probe(i); - iflist[i].dev = dev; - if (dev) - found_one++; - } - if (!found_one) - return -ENODEV; - return 0; -} - -/* * ethtool operations */ @@ -611,199 +648,6 @@ static const struct ethtool_ops au1000_ethtool_ops = { .get_link = ethtool_op_get_link, }; -static struct net_device * au1000_probe(int port_num) -{ - static unsigned version_printed = 0; - struct au1000_private *aup = NULL; - struct net_device *dev = NULL; - db_dest_t *pDB, *pDBfree; - char ethaddr[6]; - int irq, i, err; - u32 base, macen; - - if (port_num >= NUM_ETH_INTERFACES) - return NULL; - - base = CPHYSADDR(iflist[port_num].base_addr ); - macen = CPHYSADDR(iflist[port_num].macen_addr); - irq = iflist[port_num].irq; - - if (!request_mem_region( base, MAC_IOSIZE, "Au1x00 ENET") || - !request_mem_region(macen, 4, "Au1x00 ENET")) - return NULL; - - if (version_printed++ == 0) - printk("%s version %s %s\n", DRV_NAME, DRV_VERSION, DRV_AUTHOR); - - dev = alloc_etherdev(sizeof(struct au1000_private)); - if (!dev) { - printk(KERN_ERR "%s: alloc_etherdev failed\n", DRV_NAME); - return NULL; - } - - if ((err = register_netdev(dev)) != 0) { - printk(KERN_ERR "%s: Cannot register net device, error %d\n", - DRV_NAME, err); - free_netdev(dev); - return NULL; - } - - printk("%s: Au1xx0 Ethernet found at 0x%x, irq %d\n", - dev->name, base, irq); - - aup = netdev_priv(dev); - - spin_lock_init(&aup->lock); - - /* Allocate the data buffers */ - /* Snooping works fine with eth on all au1xxx */ - aup->vaddr = (u32)dma_alloc_noncoherent(NULL, MAX_BUF_SIZE * - (NUM_TX_BUFFS + NUM_RX_BUFFS), - &aup->dma_addr, 0); - if (!aup->vaddr) { - free_netdev(dev); - release_mem_region( base, MAC_IOSIZE); - release_mem_region(macen, 4); - return NULL; - } - - /* aup->mac is the base address of the MAC's registers */ - aup->mac = (volatile mac_reg_t *)iflist[port_num].base_addr; - - /* Setup some variables for quick register address access */ - aup->enable = (volatile u32 *)iflist[port_num].macen_addr; - aup->mac_id = port_num; - au_macs[port_num] = aup; - - if (port_num == 0) { - if (prom_get_ethernet_addr(ethaddr) == 0) - memcpy(au1000_mac_addr, ethaddr, sizeof(au1000_mac_addr)); - else { - printk(KERN_INFO "%s: No MAC address found\n", - dev->name); - /* Use the hard coded MAC addresses */ - } - - setup_hw_rings(aup, MAC0_RX_DMA_ADDR, MAC0_TX_DMA_ADDR); - } else if (port_num == 1) - setup_hw_rings(aup, MAC1_RX_DMA_ADDR, MAC1_TX_DMA_ADDR); - - /* - * Assign to the Ethernet ports two consecutive MAC addresses - * to match those that are printed on their stickers - */ - memcpy(dev->dev_addr, au1000_mac_addr, sizeof(au1000_mac_addr)); - dev->dev_addr[5] += port_num; - - *aup->enable = 0; - aup->mac_enabled = 0; - - aup->mii_bus = mdiobus_alloc(); - if (aup->mii_bus == NULL) - goto err_out; - - aup->mii_bus->priv = dev; - aup->mii_bus->read = au1000_mdiobus_read; - aup->mii_bus->write = au1000_mdiobus_write; - aup->mii_bus->reset = au1000_mdiobus_reset; - aup->mii_bus->name = "au1000_eth_mii"; - snprintf(aup->mii_bus->id, MII_BUS_ID_SIZE, "%x", aup->mac_id); - aup->mii_bus->irq = kmalloc(sizeof(int)*PHY_MAX_ADDR, GFP_KERNEL); - for(i = 0; i < PHY_MAX_ADDR; ++i) - aup->mii_bus->irq[i] = PHY_POLL; - - /* if known, set corresponding PHY IRQs */ -#if defined(AU1XXX_PHY_STATIC_CONFIG) -# if defined(AU1XXX_PHY0_IRQ) - if (AU1XXX_PHY0_BUSID == aup->mac_id) - aup->mii_bus->irq[AU1XXX_PHY0_ADDR] = AU1XXX_PHY0_IRQ; -# endif -# if defined(AU1XXX_PHY1_IRQ) - if (AU1XXX_PHY1_BUSID == aup->mac_id) - aup->mii_bus->irq[AU1XXX_PHY1_ADDR] = AU1XXX_PHY1_IRQ; -# endif -#endif - mdiobus_register(aup->mii_bus); - - if (mii_probe(dev) != 0) { - goto err_out; - } - - pDBfree = NULL; - /* setup the data buffer descriptors and attach a buffer to each one */ - pDB = aup->db; - for (i = 0; i < (NUM_TX_BUFFS+NUM_RX_BUFFS); i++) { - pDB->pnext = pDBfree; - pDBfree = pDB; - pDB->vaddr = (u32 *)((unsigned)aup->vaddr + MAX_BUF_SIZE*i); - pDB->dma_addr = (dma_addr_t)virt_to_bus(pDB->vaddr); - pDB++; - } - aup->pDBfree = pDBfree; - - for (i = 0; i < NUM_RX_DMA; i++) { - pDB = GetFreeDB(aup); - if (!pDB) { - goto err_out; - } - aup->rx_dma_ring[i]->buff_stat = (unsigned)pDB->dma_addr; - aup->rx_db_inuse[i] = pDB; - } - for (i = 0; i < NUM_TX_DMA; i++) { - pDB = GetFreeDB(aup); - if (!pDB) { - goto err_out; - } - aup->tx_dma_ring[i]->buff_stat = (unsigned)pDB->dma_addr; - aup->tx_dma_ring[i]->len = 0; - aup->tx_db_inuse[i] = pDB; - } - - dev->base_addr = base; - dev->irq = irq; - dev->open = au1000_open; - dev->hard_start_xmit = au1000_tx; - dev->stop = au1000_close; - dev->set_multicast_list = &set_rx_mode; - dev->do_ioctl = &au1000_ioctl; - SET_ETHTOOL_OPS(dev, &au1000_ethtool_ops); - dev->tx_timeout = au1000_tx_timeout; - dev->watchdog_timeo = ETH_TX_TIMEOUT; - - /* - * The boot code uses the ethernet controller, so reset it to start - * fresh. au1000_init() expects that the device is in reset state. - */ - reset_mac(dev); - - return dev; - -err_out: - if (aup->mii_bus != NULL) { - mdiobus_unregister(aup->mii_bus); - mdiobus_free(aup->mii_bus); - } - - /* here we should have a valid dev plus aup-> register addresses - * so we can reset the mac properly.*/ - reset_mac(dev); - - for (i = 0; i < NUM_RX_DMA; i++) { - if (aup->rx_db_inuse[i]) - ReleaseDB(aup, aup->rx_db_inuse[i]); - } - for (i = 0; i < NUM_TX_DMA; i++) { - if (aup->tx_db_inuse[i]) - ReleaseDB(aup, aup->tx_db_inuse[i]); - } - dma_free_noncoherent(NULL, MAX_BUF_SIZE * (NUM_TX_BUFFS + NUM_RX_BUFFS), - (void *)aup->vaddr, aup->dma_addr); - unregister_netdev(dev); - free_netdev(dev); - release_mem_region( base, MAC_IOSIZE); - release_mem_region(macen, 4); - return NULL; -} /* * Initialize the interface. @@ -864,83 +708,170 @@ static int au1000_init(struct net_device *dev) return 0; } -static void -au1000_adjust_link(struct net_device *dev) +static inline void update_rx_stats(struct net_device *dev, u32 status) { struct au1000_private *aup = netdev_priv(dev); - struct phy_device *phydev = aup->phy_dev; - unsigned long flags; + struct net_device_stats *ps = &dev->stats; - int status_change = 0; + ps->rx_packets++; + if (status & RX_MCAST_FRAME) + ps->multicast++; - BUG_ON(!aup->phy_dev); + if (status & RX_ERROR) { + ps->rx_errors++; + if (status & RX_MISSED_FRAME) + ps->rx_missed_errors++; + if (status & (RX_OVERLEN | RX_OVERLEN | RX_LEN_ERROR)) + ps->rx_length_errors++; + if (status & RX_CRC_ERROR) + ps->rx_crc_errors++; + if (status & RX_COLL) + ps->collisions++; + } + else + ps->rx_bytes += status & RX_FRAME_LEN_MASK; - spin_lock_irqsave(&aup->lock, flags); +} - if (phydev->link && (aup->old_speed != phydev->speed)) { - // speed changed +/* + * Au1000 receive routine. + */ +static int au1000_rx(struct net_device *dev) +{ + struct au1000_private *aup = netdev_priv(dev); + struct sk_buff *skb; + volatile rx_dma_t *prxd; + u32 buff_stat, status; + db_dest_t *pDB; + u32 frmlen; - switch(phydev->speed) { - case SPEED_10: - case SPEED_100: - break; - default: - printk(KERN_WARNING - "%s: Speed (%d) is not 10/100 ???\n", - dev->name, phydev->speed); - break; - } + if (au1000_debug > 5) + printk("%s: au1000_rx head %d\n", dev->name, aup->rx_head); - aup->old_speed = phydev->speed; + prxd = aup->rx_dma_ring[aup->rx_head]; + buff_stat = prxd->buff_stat; + while (buff_stat & RX_T_DONE) { + status = prxd->status; + pDB = aup->rx_db_inuse[aup->rx_head]; + update_rx_stats(dev, status); + if (!(status & RX_ERROR)) { - status_change = 1; + /* good frame */ + frmlen = (status & RX_FRAME_LEN_MASK); + frmlen -= 4; /* Remove FCS */ + skb = dev_alloc_skb(frmlen + 2); + if (skb == NULL) { + printk(KERN_ERR + "%s: Memory squeeze, dropping packet.\n", + dev->name); + dev->stats.rx_dropped++; + continue; + } + skb_reserve(skb, 2); /* 16 byte IP header align */ + skb_copy_to_linear_data(skb, + (unsigned char *)pDB->vaddr, frmlen); + skb_put(skb, frmlen); + skb->protocol = eth_type_trans(skb, dev); + netif_rx(skb); /* pass the packet to upper layers */ + } + else { + if (au1000_debug > 4) { + if (status & RX_MISSED_FRAME) + printk("rx miss\n"); + if (status & RX_WDOG_TIMER) + printk("rx wdog\n"); + if (status & RX_RUNT) + printk("rx runt\n"); + if (status & RX_OVERLEN) + printk("rx overlen\n"); + if (status & RX_COLL) + printk("rx coll\n"); + if (status & RX_MII_ERROR) + printk("rx mii error\n"); + if (status & RX_CRC_ERROR) + printk("rx crc error\n"); + if (status & RX_LEN_ERROR) + printk("rx len error\n"); + if (status & RX_U_CNTRL_FRAME) + printk("rx u control frame\n"); + if (status & RX_MISSED_FRAME) + printk("rx miss\n"); + } + } + prxd->buff_stat = (u32)(pDB->dma_addr | RX_DMA_ENABLE); + aup->rx_head = (aup->rx_head + 1) & (NUM_RX_DMA - 1); + au_sync(); + + /* next descriptor */ + prxd = aup->rx_dma_ring[aup->rx_head]; + buff_stat = prxd->buff_stat; } + return 0; +} - if (phydev->link && (aup->old_duplex != phydev->duplex)) { - // duplex mode changed +static void update_tx_stats(struct net_device *dev, u32 status) +{ + struct au1000_private *aup = netdev_priv(dev); + struct net_device_stats *ps = &dev->stats; - /* switching duplex mode requires to disable rx and tx! */ - hard_stop(dev); + if (status & TX_FRAME_ABORTED) { + if (!aup->phy_dev || (DUPLEX_FULL == aup->phy_dev->duplex)) { + if (status & (TX_JAB_TIMEOUT | TX_UNDERRUN)) { + /* any other tx errors are only valid + * in half duplex mode */ + ps->tx_errors++; + ps->tx_aborted_errors++; + } + } + else { + ps->tx_errors++; + ps->tx_aborted_errors++; + if (status & (TX_NO_CARRIER | TX_LOSS_CARRIER)) + ps->tx_carrier_errors++; + } + } +} - if (DUPLEX_FULL == phydev->duplex) - aup->mac->control = ((aup->mac->control - | MAC_FULL_DUPLEX) - & ~MAC_DISABLE_RX_OWN); - else - aup->mac->control = ((aup->mac->control - & ~MAC_FULL_DUPLEX) - | MAC_DISABLE_RX_OWN); - au_sync_delay(1); +/* + * Called from the interrupt service routine to acknowledge + * the TX DONE bits. This is a must if the irq is setup as + * edge triggered. + */ +static void au1000_tx_ack(struct net_device *dev) +{ + struct au1000_private *aup = netdev_priv(dev); + volatile tx_dma_t *ptxd; - enable_rx_tx(dev); - aup->old_duplex = phydev->duplex; + ptxd = aup->tx_dma_ring[aup->tx_tail]; - status_change = 1; - } + while (ptxd->buff_stat & TX_T_DONE) { + update_tx_stats(dev, ptxd->status); + ptxd->buff_stat &= ~TX_T_DONE; + ptxd->len = 0; + au_sync(); - if(phydev->link != aup->old_link) { - // link state changed + aup->tx_tail = (aup->tx_tail + 1) & (NUM_TX_DMA - 1); + ptxd = aup->tx_dma_ring[aup->tx_tail]; - if (!phydev->link) { - /* link went down */ - aup->old_speed = 0; - aup->old_duplex = -1; + if (aup->tx_full) { + aup->tx_full = 0; + netif_wake_queue(dev); } - - aup->old_link = phydev->link; - status_change = 1; } +} - spin_unlock_irqrestore(&aup->lock, flags); +/* + * Au1000 interrupt service routine. + */ +static irqreturn_t au1000_interrupt(int irq, void *dev_id) +{ + struct net_device *dev = dev_id; - if (status_change) { - if (phydev->link) - printk(KERN_INFO "%s: link up (%d/%s)\n", - dev->name, phydev->speed, - DUPLEX_FULL == phydev->duplex ? "Full" : "Half"); - else - printk(KERN_INFO "%s: link down\n", dev->name); - } + /* Handle RX interrupts first to minimize chance of overrun */ + + au1000_rx(dev); + au1000_tx_ack(dev); + return IRQ_RETVAL(1); } static int au1000_open(struct net_device *dev) @@ -1003,88 +934,6 @@ static int au1000_close(struct net_device *dev) return 0; } -static void __exit au1000_cleanup_module(void) -{ - int i, j; - struct net_device *dev; - struct au1000_private *aup; - - for (i = 0; i < num_ifs; i++) { - dev = iflist[i].dev; - if (dev) { - aup = netdev_priv(dev); - unregister_netdev(dev); - mdiobus_unregister(aup->mii_bus); - mdiobus_free(aup->mii_bus); - for (j = 0; j < NUM_RX_DMA; j++) - if (aup->rx_db_inuse[j]) - ReleaseDB(aup, aup->rx_db_inuse[j]); - for (j = 0; j < NUM_TX_DMA; j++) - if (aup->tx_db_inuse[j]) - ReleaseDB(aup, aup->tx_db_inuse[j]); - dma_free_noncoherent(NULL, MAX_BUF_SIZE * - (NUM_TX_BUFFS + NUM_RX_BUFFS), - (void *)aup->vaddr, aup->dma_addr); - release_mem_region(dev->base_addr, MAC_IOSIZE); - release_mem_region(CPHYSADDR(iflist[i].macen_addr), 4); - free_netdev(dev); - } - } -} - -static void update_tx_stats(struct net_device *dev, u32 status) -{ - struct au1000_private *aup = netdev_priv(dev); - struct net_device_stats *ps = &dev->stats; - - if (status & TX_FRAME_ABORTED) { - if (!aup->phy_dev || (DUPLEX_FULL == aup->phy_dev->duplex)) { - if (status & (TX_JAB_TIMEOUT | TX_UNDERRUN)) { - /* any other tx errors are only valid - * in half duplex mode */ - ps->tx_errors++; - ps->tx_aborted_errors++; - } - } - else { - ps->tx_errors++; - ps->tx_aborted_errors++; - if (status & (TX_NO_CARRIER | TX_LOSS_CARRIER)) - ps->tx_carrier_errors++; - } - } -} - - -/* - * Called from the interrupt service routine to acknowledge - * the TX DONE bits. This is a must if the irq is setup as - * edge triggered. - */ -static void au1000_tx_ack(struct net_device *dev) -{ - struct au1000_private *aup = netdev_priv(dev); - volatile tx_dma_t *ptxd; - - ptxd = aup->tx_dma_ring[aup->tx_tail]; - - while (ptxd->buff_stat & TX_T_DONE) { - update_tx_stats(dev, ptxd->status); - ptxd->buff_stat &= ~TX_T_DONE; - ptxd->len = 0; - au_sync(); - - aup->tx_tail = (aup->tx_tail + 1) & (NUM_TX_DMA - 1); - ptxd = aup->tx_dma_ring[aup->tx_tail]; - - if (aup->tx_full) { - aup->tx_full = 0; - netif_wake_queue(dev); - } - } -} - - /* * Au1000 transmit routine. */ @@ -1142,123 +991,6 @@ static int au1000_tx(struct sk_buff *skb, struct net_device *dev) return 0; } -static inline void update_rx_stats(struct net_device *dev, u32 status) -{ - struct au1000_private *aup = netdev_priv(dev); - struct net_device_stats *ps = &dev->stats; - - ps->rx_packets++; - if (status & RX_MCAST_FRAME) - ps->multicast++; - - if (status & RX_ERROR) { - ps->rx_errors++; - if (status & RX_MISSED_FRAME) - ps->rx_missed_errors++; - if (status & (RX_OVERLEN | RX_OVERLEN | RX_LEN_ERROR)) - ps->rx_length_errors++; - if (status & RX_CRC_ERROR) - ps->rx_crc_errors++; - if (status & RX_COLL) - ps->collisions++; - } - else - ps->rx_bytes += status & RX_FRAME_LEN_MASK; - -} - -/* - * Au1000 receive routine. - */ -static int au1000_rx(struct net_device *dev) -{ - struct au1000_private *aup = netdev_priv(dev); - struct sk_buff *skb; - volatile rx_dma_t *prxd; - u32 buff_stat, status; - db_dest_t *pDB; - u32 frmlen; - - if (au1000_debug > 5) - printk("%s: au1000_rx head %d\n", dev->name, aup->rx_head); - - prxd = aup->rx_dma_ring[aup->rx_head]; - buff_stat = prxd->buff_stat; - while (buff_stat & RX_T_DONE) { - status = prxd->status; - pDB = aup->rx_db_inuse[aup->rx_head]; - update_rx_stats(dev, status); - if (!(status & RX_ERROR)) { - - /* good frame */ - frmlen = (status & RX_FRAME_LEN_MASK); - frmlen -= 4; /* Remove FCS */ - skb = dev_alloc_skb(frmlen + 2); - if (skb == NULL) { - printk(KERN_ERR - "%s: Memory squeeze, dropping packet.\n", - dev->name); - dev->stats.rx_dropped++; - continue; - } - skb_reserve(skb, 2); /* 16 byte IP header align */ - skb_copy_to_linear_data(skb, - (unsigned char *)pDB->vaddr, frmlen); - skb_put(skb, frmlen); - skb->protocol = eth_type_trans(skb, dev); - netif_rx(skb); /* pass the packet to upper layers */ - } - else { - if (au1000_debug > 4) { - if (status & RX_MISSED_FRAME) - printk("rx miss\n"); - if (status & RX_WDOG_TIMER) - printk("rx wdog\n"); - if (status & RX_RUNT) - printk("rx runt\n"); - if (status & RX_OVERLEN) - printk("rx overlen\n"); - if (status & RX_COLL) - printk("rx coll\n"); - if (status & RX_MII_ERROR) - printk("rx mii error\n"); - if (status & RX_CRC_ERROR) - printk("rx crc error\n"); - if (status & RX_LEN_ERROR) - printk("rx len error\n"); - if (status & RX_U_CNTRL_FRAME) - printk("rx u control frame\n"); - if (status & RX_MISSED_FRAME) - printk("rx miss\n"); - } - } - prxd->buff_stat = (u32)(pDB->dma_addr | RX_DMA_ENABLE); - aup->rx_head = (aup->rx_head + 1) & (NUM_RX_DMA - 1); - au_sync(); - - /* next descriptor */ - prxd = aup->rx_dma_ring[aup->rx_head]; - buff_stat = prxd->buff_stat; - } - return 0; -} - - -/* - * Au1000 interrupt service routine. - */ -static irqreturn_t au1000_interrupt(int irq, void *dev_id) -{ - struct net_device *dev = dev_id; - - /* Handle RX interrupts first to minimize chance of overrun */ - - au1000_rx(dev); - au1000_tx_ack(dev); - return IRQ_RETVAL(1); -} - - /* * The Tx ring has been full longer than the watchdog timeout * value. The transmitter must be hung? @@ -1315,5 +1047,252 @@ static int au1000_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) return phy_mii_ioctl(aup->phy_dev, if_mii(rq), cmd); } +static struct net_device * au1000_probe(int port_num) +{ + static unsigned version_printed = 0; + struct au1000_private *aup = NULL; + struct net_device *dev = NULL; + db_dest_t *pDB, *pDBfree; + char ethaddr[6]; + int irq, i, err; + u32 base, macen; + + if (port_num >= NUM_ETH_INTERFACES) + return NULL; + + base = CPHYSADDR(iflist[port_num].base_addr ); + macen = CPHYSADDR(iflist[port_num].macen_addr); + irq = iflist[port_num].irq; + + if (!request_mem_region( base, MAC_IOSIZE, "Au1x00 ENET") || + !request_mem_region(macen, 4, "Au1x00 ENET")) + return NULL; + + if (version_printed++ == 0) + printk("%s version %s %s\n", DRV_NAME, DRV_VERSION, DRV_AUTHOR); + + dev = alloc_etherdev(sizeof(struct au1000_private)); + if (!dev) { + printk(KERN_ERR "%s: alloc_etherdev failed\n", DRV_NAME); + return NULL; + } + + if ((err = register_netdev(dev)) != 0) { + printk(KERN_ERR "%s: Cannot register net device, error %d\n", + DRV_NAME, err); + free_netdev(dev); + return NULL; + } + + printk("%s: Au1xx0 Ethernet found at 0x%x, irq %d\n", + dev->name, base, irq); + + aup = netdev_priv(dev); + + spin_lock_init(&aup->lock); + + /* Allocate the data buffers */ + /* Snooping works fine with eth on all au1xxx */ + aup->vaddr = (u32)dma_alloc_noncoherent(NULL, MAX_BUF_SIZE * + (NUM_TX_BUFFS + NUM_RX_BUFFS), + &aup->dma_addr, 0); + if (!aup->vaddr) { + free_netdev(dev); + release_mem_region( base, MAC_IOSIZE); + release_mem_region(macen, 4); + return NULL; + } + + /* aup->mac is the base address of the MAC's registers */ + aup->mac = (volatile mac_reg_t *)iflist[port_num].base_addr; + + /* Setup some variables for quick register address access */ + aup->enable = (volatile u32 *)iflist[port_num].macen_addr; + aup->mac_id = port_num; + au_macs[port_num] = aup; + + if (port_num == 0) { + if (prom_get_ethernet_addr(ethaddr) == 0) + memcpy(au1000_mac_addr, ethaddr, sizeof(au1000_mac_addr)); + else { + printk(KERN_INFO "%s: No MAC address found\n", + dev->name); + /* Use the hard coded MAC addresses */ + } + + setup_hw_rings(aup, MAC0_RX_DMA_ADDR, MAC0_TX_DMA_ADDR); + } else if (port_num == 1) + setup_hw_rings(aup, MAC1_RX_DMA_ADDR, MAC1_TX_DMA_ADDR); + + /* + * Assign to the Ethernet ports two consecutive MAC addresses + * to match those that are printed on their stickers + */ + memcpy(dev->dev_addr, au1000_mac_addr, sizeof(au1000_mac_addr)); + dev->dev_addr[5] += port_num; + + *aup->enable = 0; + aup->mac_enabled = 0; + + aup->mii_bus = mdiobus_alloc(); + if (aup->mii_bus == NULL) + goto err_out; + + aup->mii_bus->priv = dev; + aup->mii_bus->read = au1000_mdiobus_read; + aup->mii_bus->write = au1000_mdiobus_write; + aup->mii_bus->reset = au1000_mdiobus_reset; + aup->mii_bus->name = "au1000_eth_mii"; + snprintf(aup->mii_bus->id, MII_BUS_ID_SIZE, "%x", aup->mac_id); + aup->mii_bus->irq = kmalloc(sizeof(int)*PHY_MAX_ADDR, GFP_KERNEL); + for(i = 0; i < PHY_MAX_ADDR; ++i) + aup->mii_bus->irq[i] = PHY_POLL; + + /* if known, set corresponding PHY IRQs */ +#if defined(AU1XXX_PHY_STATIC_CONFIG) +# if defined(AU1XXX_PHY0_IRQ) + if (AU1XXX_PHY0_BUSID == aup->mac_id) + aup->mii_bus->irq[AU1XXX_PHY0_ADDR] = AU1XXX_PHY0_IRQ; +# endif +# if defined(AU1XXX_PHY1_IRQ) + if (AU1XXX_PHY1_BUSID == aup->mac_id) + aup->mii_bus->irq[AU1XXX_PHY1_ADDR] = AU1XXX_PHY1_IRQ; +# endif +#endif + mdiobus_register(aup->mii_bus); + + if (mii_probe(dev) != 0) { + goto err_out; + } + + pDBfree = NULL; + /* setup the data buffer descriptors and attach a buffer to each one */ + pDB = aup->db; + for (i = 0; i < (NUM_TX_BUFFS+NUM_RX_BUFFS); i++) { + pDB->pnext = pDBfree; + pDBfree = pDB; + pDB->vaddr = (u32 *)((unsigned)aup->vaddr + MAX_BUF_SIZE*i); + pDB->dma_addr = (dma_addr_t)virt_to_bus(pDB->vaddr); + pDB++; + } + aup->pDBfree = pDBfree; + + for (i = 0; i < NUM_RX_DMA; i++) { + pDB = GetFreeDB(aup); + if (!pDB) { + goto err_out; + } + aup->rx_dma_ring[i]->buff_stat = (unsigned)pDB->dma_addr; + aup->rx_db_inuse[i] = pDB; + } + for (i = 0; i < NUM_TX_DMA; i++) { + pDB = GetFreeDB(aup); + if (!pDB) { + goto err_out; + } + aup->tx_dma_ring[i]->buff_stat = (unsigned)pDB->dma_addr; + aup->tx_dma_ring[i]->len = 0; + aup->tx_db_inuse[i] = pDB; + } + + dev->base_addr = base; + dev->irq = irq; + dev->open = au1000_open; + dev->hard_start_xmit = au1000_tx; + dev->stop = au1000_close; + dev->set_multicast_list = &set_rx_mode; + dev->do_ioctl = &au1000_ioctl; + SET_ETHTOOL_OPS(dev, &au1000_ethtool_ops); + dev->tx_timeout = au1000_tx_timeout; + dev->watchdog_timeo = ETH_TX_TIMEOUT; + + /* + * The boot code uses the ethernet controller, so reset it to start + * fresh. au1000_init() expects that the device is in reset state. + */ + reset_mac(dev); + + return dev; + +err_out: + if (aup->mii_bus != NULL) { + mdiobus_unregister(aup->mii_bus); + mdiobus_free(aup->mii_bus); + } + + /* here we should have a valid dev plus aup-> register addresses + * so we can reset the mac properly.*/ + reset_mac(dev); + + for (i = 0; i < NUM_RX_DMA; i++) { + if (aup->rx_db_inuse[i]) + ReleaseDB(aup, aup->rx_db_inuse[i]); + } + for (i = 0; i < NUM_TX_DMA; i++) { + if (aup->tx_db_inuse[i]) + ReleaseDB(aup, aup->tx_db_inuse[i]); + } + dma_free_noncoherent(NULL, MAX_BUF_SIZE * (NUM_TX_BUFFS + NUM_RX_BUFFS), + (void *)aup->vaddr, aup->dma_addr); + unregister_netdev(dev); + free_netdev(dev); + release_mem_region( base, MAC_IOSIZE); + release_mem_region(macen, 4); + return NULL; +} + +/* + * Setup the base address and interrupt of the Au1xxx ethernet macs + * based on cpu type and whether the interface is enabled in sys_pinfunc + * register. The last interface is enabled if SYS_PF_NI2 (bit 4) is 0. + */ +static int __init au1000_init_module(void) +{ + int ni = (int)((au_readl(SYS_PINFUNC) & (u32)(SYS_PF_NI2)) >> 4); + struct net_device *dev; + int i, found_one = 0; + + num_ifs = NUM_ETH_INTERFACES - ni; + + for(i = 0; i < num_ifs; i++) { + dev = au1000_probe(i); + iflist[i].dev = dev; + if (dev) + found_one++; + } + if (!found_one) + return -ENODEV; + return 0; +} + +static void __exit au1000_cleanup_module(void) +{ + int i, j; + struct net_device *dev; + struct au1000_private *aup; + + for (i = 0; i < num_ifs; i++) { + dev = iflist[i].dev; + if (dev) { + aup = netdev_priv(dev); + unregister_netdev(dev); + mdiobus_unregister(aup->mii_bus); + mdiobus_free(aup->mii_bus); + for (j = 0; j < NUM_RX_DMA; j++) + if (aup->rx_db_inuse[j]) + ReleaseDB(aup, aup->rx_db_inuse[j]); + for (j = 0; j < NUM_TX_DMA; j++) + if (aup->tx_db_inuse[j]) + ReleaseDB(aup, aup->tx_db_inuse[j]); + dma_free_noncoherent(NULL, MAX_BUF_SIZE * + (NUM_TX_BUFFS + NUM_RX_BUFFS), + (void *)aup->vaddr, aup->dma_addr); + release_mem_region(dev->base_addr, MAC_IOSIZE); + release_mem_region(CPHYSADDR(iflist[i].macen_addr), 4); + free_netdev(dev); + } + } +} + module_init(au1000_init_module); module_exit(au1000_cleanup_module); diff --git a/drivers/net/b44.c b/drivers/net/b44.c index c38512ebcea..92aaaa1ee9f 100644 --- a/drivers/net/b44.c +++ b/drivers/net/b44.c @@ -874,7 +874,7 @@ static int b44_poll(struct napi_struct *napi, int budget) } if (work_done < budget) { - netif_rx_complete(napi); + napi_complete(napi); b44_enable_ints(bp); } @@ -906,13 +906,13 @@ static irqreturn_t b44_interrupt(int irq, void *dev_id) goto irq_ack; } - if (netif_rx_schedule_prep(&bp->napi)) { + if (napi_schedule_prep(&bp->napi)) { /* NOTE: These writes are posted by the readback of * the ISTAT register below. */ bp->istat = istat; __b44_disable_ints(bp); - __netif_rx_schedule(&bp->napi); + __napi_schedule(&bp->napi); } else { printk(KERN_ERR PFX "%s: Error, poll already scheduled\n", dev->name); diff --git a/drivers/net/bfin_mac.c b/drivers/net/bfin_mac.c index 78e31aa861e..9afe8092dfc 100644 --- a/drivers/net/bfin_mac.c +++ b/drivers/net/bfin_mac.c @@ -415,11 +415,11 @@ static int mii_probe(struct net_device *dev) } #if defined(CONFIG_BFIN_MAC_RMII) - phydev = phy_connect(dev, phydev->dev.bus_id, &bfin_mac_adjust_link, 0, - PHY_INTERFACE_MODE_RMII); + phydev = phy_connect(dev, dev_name(&phydev->dev), &bfin_mac_adjust_link, + 0, PHY_INTERFACE_MODE_RMII); #else - phydev = phy_connect(dev, phydev->dev.bus_id, &bfin_mac_adjust_link, 0, - PHY_INTERFACE_MODE_MII); + phydev = phy_connect(dev, dev_name(&phydev->dev), &bfin_mac_adjust_link, + 0, PHY_INTERFACE_MODE_MII); #endif if (IS_ERR(phydev)) { @@ -447,7 +447,7 @@ static int mii_probe(struct net_device *dev) printk(KERN_INFO "%s: attached PHY driver [%s] " "(mii_bus:phy_addr=%s, irq=%d, mdc_clk=%dHz(mdc_div=%d)" "@sclk=%dMHz)\n", - DRV_NAME, phydev->drv->name, phydev->dev.bus_id, phydev->irq, + DRV_NAME, phydev->drv->name, dev_name(&phydev->dev), phydev->irq, MDC_CLK, mdc_div, sclk/1000000); return 0; @@ -488,7 +488,7 @@ static void bfin_mac_ethtool_getdrvinfo(struct net_device *dev, strcpy(info->driver, DRV_NAME); strcpy(info->version, DRV_VERSION); strcpy(info->fw_version, "N/A"); - strcpy(info->bus_info, dev->dev.bus_id); + strcpy(info->bus_info, dev_name(&dev->dev)); } static struct ethtool_ops bfin_mac_ethtool_ops = { diff --git a/drivers/net/bmac.c b/drivers/net/bmac.c index 8a546a33d58..1ab58375d06 100644 --- a/drivers/net/bmac.c +++ b/drivers/net/bmac.c @@ -1240,7 +1240,7 @@ static void bmac_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *inf { struct bmac_data *bp = netdev_priv(dev); strcpy(info->driver, "bmac"); - strcpy(info->bus_info, bp->mdev->ofdev.dev.bus_id); + strcpy(info->bus_info, dev_name(&bp->mdev->ofdev.dev)); } static const struct ethtool_ops bmac_ethtool_ops = { diff --git a/drivers/net/bnx2.c b/drivers/net/bnx2.c index d4a3dac21dc..49e0e51a9df 100644 --- a/drivers/net/bnx2.c +++ b/drivers/net/bnx2.c @@ -1497,6 +1497,8 @@ static int bnx2_fw_sync(struct bnx2 *, u32, int, int); static int bnx2_setup_remote_phy(struct bnx2 *bp, u8 port) +__releases(&bp->phy_lock) +__acquires(&bp->phy_lock) { u32 speed_arg = 0, pause_adv; @@ -1554,6 +1556,8 @@ bnx2_setup_remote_phy(struct bnx2 *bp, u8 port) static int bnx2_setup_serdes_phy(struct bnx2 *bp, u8 port) +__releases(&bp->phy_lock) +__acquires(&bp->phy_lock) { u32 adv, bmcr; u32 new_adv = 0; @@ -1866,6 +1870,8 @@ bnx2_set_remote_link(struct bnx2 *bp) static int bnx2_setup_copper_phy(struct bnx2 *bp) +__releases(&bp->phy_lock) +__acquires(&bp->phy_lock) { u32 bmcr; u32 new_bmcr; @@ -1963,6 +1969,8 @@ bnx2_setup_copper_phy(struct bnx2 *bp) static int bnx2_setup_phy(struct bnx2 *bp, u8 port) +__releases(&bp->phy_lock) +__acquires(&bp->phy_lock) { if (bp->loopback == MAC_LOOPBACK) return 0; @@ -2176,6 +2184,8 @@ bnx2_init_copper_phy(struct bnx2 *bp, int reset_phy) static int bnx2_init_phy(struct bnx2 *bp, int reset_phy) +__releases(&bp->phy_lock) +__acquires(&bp->phy_lock) { u32 val; int rc = 0; @@ -2997,6 +3007,8 @@ bnx2_rx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget) skb->ip_summed = CHECKSUM_UNNECESSARY; } + skb_record_rx_queue(skb, bnapi - &bp->bnx2_napi[0]); + #ifdef BCM_VLAN if (hw_vlan) vlan_hwaccel_receive_skb(skb, bp->vlgrp, vtag); @@ -3053,7 +3065,7 @@ bnx2_msi(int irq, void *dev_instance) if (unlikely(atomic_read(&bp->intr_sem) != 0)) return IRQ_HANDLED; - netif_rx_schedule(&bnapi->napi); + napi_schedule(&bnapi->napi); return IRQ_HANDLED; } @@ -3070,7 +3082,7 @@ bnx2_msi_1shot(int irq, void *dev_instance) if (unlikely(atomic_read(&bp->intr_sem) != 0)) return IRQ_HANDLED; - netif_rx_schedule(&bnapi->napi); + napi_schedule(&bnapi->napi); return IRQ_HANDLED; } @@ -3106,9 +3118,9 @@ bnx2_interrupt(int irq, void *dev_instance) if (unlikely(atomic_read(&bp->intr_sem) != 0)) return IRQ_HANDLED; - if (netif_rx_schedule_prep(&bnapi->napi)) { + if (napi_schedule_prep(&bnapi->napi)) { bnapi->last_status_idx = sblk->status_idx; - __netif_rx_schedule(&bnapi->napi); + __napi_schedule(&bnapi->napi); } return IRQ_HANDLED; @@ -3218,7 +3230,7 @@ static int bnx2_poll_msix(struct napi_struct *napi, int budget) rmb(); if (likely(!bnx2_has_fast_work(bnapi))) { - netif_rx_complete(napi); + napi_complete(napi); REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num | BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID | bnapi->last_status_idx); @@ -3251,7 +3263,7 @@ static int bnx2_poll(struct napi_struct *napi, int budget) rmb(); if (likely(!bnx2_has_work(bnapi))) { - netif_rx_complete(napi); + napi_complete(napi); if (likely(bp->flags & BNX2_FLAG_USING_MSI_OR_MSIX)) { REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID | diff --git a/drivers/net/bnx2x_main.c b/drivers/net/bnx2x_main.c index d3e7775a9cc..88da14c141f 100644 --- a/drivers/net/bnx2x_main.c +++ b/drivers/net/bnx2x_main.c @@ -1325,6 +1325,7 @@ static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp, skb->protocol = eth_type_trans(skb, bp->dev); skb->ip_summed = CHECKSUM_UNNECESSARY; + skb_record_rx_queue(skb, queue); { struct iphdr *iph; @@ -1654,7 +1655,7 @@ static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie) prefetch(&fp->status_blk->c_status_block.status_block_index); prefetch(&fp->status_blk->u_status_block.status_block_index); - netif_rx_schedule(&bnx2x_fp(bp, index, napi)); + napi_schedule(&bnx2x_fp(bp, index, napi)); return IRQ_HANDLED; } @@ -1693,7 +1694,7 @@ static irqreturn_t bnx2x_interrupt(int irq, void *dev_instance) prefetch(&fp->status_blk->c_status_block.status_block_index); prefetch(&fp->status_blk->u_status_block.status_block_index); - netif_rx_schedule(&bnx2x_fp(bp, 0, napi)); + napi_schedule(&bnx2x_fp(bp, 0, napi)); status &= ~mask; } @@ -9374,7 +9375,7 @@ static int bnx2x_poll(struct napi_struct *napi, int budget) #ifdef BNX2X_STOP_ON_ERROR poll_panic: #endif - netif_rx_complete(napi); + napi_complete(napi); bnx2x_ack_sb(bp, FP_SB_ID(fp), USTORM_ID, le16_to_cpu(fp->fp_u_idx), IGU_INT_NOP, 1); diff --git a/drivers/net/bonding/bond_3ad.h b/drivers/net/bonding/bond_3ad.h index 8a83eb283c2..a306230381c 100644 --- a/drivers/net/bonding/bond_3ad.h +++ b/drivers/net/bonding/bond_3ad.h @@ -29,7 +29,7 @@ // General definitions #define BOND_ETH_P_LACPDU 0x8809 -#define PKT_TYPE_LACPDU __constant_htons(BOND_ETH_P_LACPDU) +#define PKT_TYPE_LACPDU cpu_to_be16(BOND_ETH_P_LACPDU) #define AD_TIMER_INTERVAL 100 /*msec*/ #define MULTICAST_LACPDU_ADDR {0x01, 0x80, 0xC2, 0x00, 0x00, 0x02} diff --git a/drivers/net/bonding/bond_alb.c b/drivers/net/bonding/bond_alb.c index 27fb7f5c21c..409b1407427 100644 --- a/drivers/net/bonding/bond_alb.c +++ b/drivers/net/bonding/bond_alb.c @@ -822,7 +822,7 @@ static int rlb_initialize(struct bonding *bond) _unlock_rx_hashtbl(bond); /*initialize packet type*/ - pk_type->type = __constant_htons(ETH_P_ARP); + pk_type->type = cpu_to_be16(ETH_P_ARP); pk_type->dev = NULL; pk_type->func = rlb_arp_recv; @@ -892,7 +892,7 @@ static void alb_send_learning_packets(struct slave *slave, u8 mac_addr[]) memset(&pkt, 0, size); memcpy(pkt.mac_dst, mac_addr, ETH_ALEN); memcpy(pkt.mac_src, mac_addr, ETH_ALEN); - pkt.type = __constant_htons(ETH_P_LOOP); + pkt.type = cpu_to_be16(ETH_P_LOOP); for (i = 0; i < MAX_LP_BURST; i++) { struct sk_buff *skb; diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c index 9fb388388fb..21bce2c0fde 100644 --- a/drivers/net/bonding/bond_main.c +++ b/drivers/net/bonding/bond_main.c @@ -3369,7 +3369,7 @@ static int bond_info_seq_show(struct seq_file *seq, void *v) return 0; } -static struct seq_operations bond_info_seq_ops = { +static const struct seq_operations bond_info_seq_ops = { .start = bond_info_seq_start, .next = bond_info_seq_next, .stop = bond_info_seq_stop, diff --git a/drivers/net/cassini.c b/drivers/net/cassini.c index bbbc3bb08aa..0effefa1b88 100644 --- a/drivers/net/cassini.c +++ b/drivers/net/cassini.c @@ -2506,7 +2506,7 @@ static irqreturn_t cas_interruptN(int irq, void *dev_id) if (status & INTR_RX_DONE_ALT) { /* handle rx separately */ #ifdef USE_NAPI cas_mask_intr(cp); - netif_rx_schedule(&cp->napi); + napi_schedule(&cp->napi); #else cas_rx_ringN(cp, ring, 0); #endif @@ -2557,7 +2557,7 @@ static irqreturn_t cas_interrupt1(int irq, void *dev_id) if (status & INTR_RX_DONE_ALT) { /* handle rx separately */ #ifdef USE_NAPI cas_mask_intr(cp); - netif_rx_schedule(&cp->napi); + napi_schedule(&cp->napi); #else cas_rx_ringN(cp, 1, 0); #endif @@ -2613,7 +2613,7 @@ static irqreturn_t cas_interrupt(int irq, void *dev_id) if (status & INTR_RX_DONE) { #ifdef USE_NAPI cas_mask_intr(cp); - netif_rx_schedule(&cp->napi); + napi_schedule(&cp->napi); #else cas_rx_ringN(cp, 0, 0); #endif @@ -2691,7 +2691,7 @@ rx_comp: #endif spin_unlock_irqrestore(&cp->lock, flags); if (enable_intr) { - netif_rx_complete(napi); + napi_complete(napi); cas_unmask_intr(cp); } return credits; diff --git a/drivers/net/chelsio/sge.c b/drivers/net/chelsio/sge.c index d984b799576..840da83fb3c 100644 --- a/drivers/net/chelsio/sge.c +++ b/drivers/net/chelsio/sge.c @@ -1612,7 +1612,7 @@ int t1_poll(struct napi_struct *napi, int budget) int work_done = process_responses(adapter, budget); if (likely(work_done < budget)) { - netif_rx_complete(napi); + napi_complete(napi); writel(adapter->sge->respQ.cidx, adapter->regs + A_SG_SLEEPING); } @@ -1630,7 +1630,7 @@ irqreturn_t t1_interrupt(int irq, void *data) if (napi_schedule_prep(&adapter->napi)) { if (process_pure_responses(adapter)) - __netif_rx_schedule(&adapter->napi); + __napi_schedule(&adapter->napi); else { /* no data, no NAPI needed */ writel(sge->respQ.cidx, adapter->regs + A_SG_SLEEPING); diff --git a/drivers/net/cpmac.c b/drivers/net/cpmac.c index f66548751c3..3f476c7c073 100644 --- a/drivers/net/cpmac.c +++ b/drivers/net/cpmac.c @@ -428,7 +428,7 @@ static int cpmac_poll(struct napi_struct *napi, int budget) printk(KERN_WARNING "%s: rx: polling, but no queue\n", priv->dev->name); spin_unlock(&priv->rx_lock); - netif_rx_complete(napi); + napi_complete(napi); return 0; } @@ -514,7 +514,7 @@ static int cpmac_poll(struct napi_struct *napi, int budget) if (processed == 0) { /* we ran out of packets to read, * revert to interrupt-driven mode */ - netif_rx_complete(napi); + napi_complete(napi); cpmac_write(priv->regs, CPMAC_RX_INT_ENABLE, 1); return 0; } @@ -536,7 +536,7 @@ fatal_error: } spin_unlock(&priv->rx_lock); - netif_rx_complete(napi); + napi_complete(napi); netif_tx_stop_all_queues(priv->dev); napi_disable(&priv->napi); @@ -802,9 +802,9 @@ static irqreturn_t cpmac_irq(int irq, void *dev_id) if (status & MAC_INT_RX) { queue = (status >> 8) & 7; - if (netif_rx_schedule_prep(&priv->napi)) { + if (napi_schedule_prep(&priv->napi)) { cpmac_write(priv->regs, CPMAC_RX_INT_CLEAR, 1 << queue); - __netif_rx_schedule(&priv->napi); + __napi_schedule(&priv->napi); } } @@ -1161,7 +1161,7 @@ static int __devinit cpmac_probe(struct platform_device *pdev) priv->msg_enable = netif_msg_init(debug_level, 0xff); memcpy(dev->dev_addr, pdata->dev_addr, sizeof(dev->dev_addr)); - priv->phy = phy_connect(dev, cpmac_mii->phy_map[phy_id]->dev.bus_id, + priv->phy = phy_connect(dev, dev_name(&cpmac_mii->phy_map[phy_id]->dev), &cpmac_adjust_link, 0, PHY_INTERFACE_MODE_MII); if (IS_ERR(priv->phy)) { if (netif_msg_drv(priv)) diff --git a/drivers/net/cxgb3/adapter.h b/drivers/net/cxgb3/adapter.h index a89d8cc5120..fbe15699584 100644 --- a/drivers/net/cxgb3/adapter.h +++ b/drivers/net/cxgb3/adapter.h @@ -42,7 +42,6 @@ #include <linux/cache.h> #include <linux/mutex.h> #include <linux/bitops.h> -#include <linux/inet_lro.h> #include "t3cdev.h" #include <asm/io.h> @@ -178,15 +177,11 @@ enum { /* per port SGE statistics */ SGE_PSTAT_TX_CSUM, /* # of TX checksum offloads */ SGE_PSTAT_VLANEX, /* # of VLAN tag extractions */ SGE_PSTAT_VLANINS, /* # of VLAN tag insertions */ - SGE_PSTAT_LRO_AGGR, /* # of page chunks added to LRO sessions */ - SGE_PSTAT_LRO_FLUSHED, /* # of flushed LRO sessions */ - SGE_PSTAT_LRO_NO_DESC, /* # of overflown LRO sessions */ SGE_PSTAT_MAX /* must be last */ }; -#define T3_MAX_LRO_SES 8 -#define T3_MAX_LRO_MAX_PKTS 64 +struct napi_gro_fraginfo; struct sge_qset { /* an SGE queue set */ struct adapter *adap; @@ -194,12 +189,8 @@ struct sge_qset { /* an SGE queue set */ struct sge_rspq rspq; struct sge_fl fl[SGE_RXQ_PER_SET]; struct sge_txq txq[SGE_TXQ_PER_SET]; - struct net_lro_mgr lro_mgr; - struct net_lro_desc lro_desc[T3_MAX_LRO_SES]; - struct skb_frag_struct *lro_frag_tbl; - int lro_nfrags; + struct napi_gro_fraginfo lro_frag_tbl; int lro_enabled; - int lro_frag_len; void *lro_va; struct net_device *netdev; struct netdev_queue *tx_q; /* associated netdev TX queue */ @@ -230,6 +221,7 @@ struct adapter { unsigned int slow_intr_mask; unsigned long irq_stats[IRQ_NUM_STATS]; + int msix_nvectors; struct { unsigned short vec; char desc[22]; diff --git a/drivers/net/cxgb3/cxgb3_main.c b/drivers/net/cxgb3/cxgb3_main.c index 0089746b8d0..f2c7cc3e263 100644 --- a/drivers/net/cxgb3/cxgb3_main.c +++ b/drivers/net/cxgb3/cxgb3_main.c @@ -338,7 +338,7 @@ static void free_irq_resources(struct adapter *adapter) free_irq(adapter->msix_info[0].vec, adapter); for_each_port(adapter, i) - n += adap2pinfo(adapter, i)->nqsets; + n += adap2pinfo(adapter, i)->nqsets; for (i = 0; i < n; ++i) free_irq(adapter->msix_info[i + 1].vec, @@ -508,19 +508,9 @@ static void set_qset_lro(struct net_device *dev, int qset_idx, int val) { struct port_info *pi = netdev_priv(dev); struct adapter *adapter = pi->adapter; - int i, lro_on = 1; adapter->params.sge.qset[qset_idx].lro = !!val; adapter->sge.qs[qset_idx].lro_enabled = !!val; - - /* let ethtool report LRO on only if all queues are LRO enabled */ - for (i = pi->first_qset; i < pi->first_qset + pi->nqsets; ++i) - lro_on &= adapter->params.sge.qset[i].lro; - - if (lro_on) - dev->features |= NETIF_F_LRO; - else - dev->features &= ~NETIF_F_LRO; } /** @@ -1433,9 +1423,9 @@ static void get_stats(struct net_device *dev, struct ethtool_stats *stats, *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_VLANINS); *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_TX_CSUM); *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_RX_CSUM_GOOD); - *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_LRO_AGGR); - *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_LRO_FLUSHED); - *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_LRO_NO_DESC); + *data++ = 0; + *data++ = 0; + *data++ = 0; *data++ = s->rx_cong_drops; *data++ = s->num_toggled; @@ -1826,28 +1816,6 @@ static void get_wol(struct net_device *dev, struct ethtool_wolinfo *wol) memset(&wol->sopass, 0, sizeof(wol->sopass)); } -static int cxgb3_set_flags(struct net_device *dev, u32 data) -{ - struct port_info *pi = netdev_priv(dev); - int i; - - if (data & ETH_FLAG_LRO) { - if (!(pi->rx_offload & T3_RX_CSUM)) - return -EINVAL; - - pi->rx_offload |= T3_LRO; - for (i = pi->first_qset; i < pi->first_qset + pi->nqsets; i++) - set_qset_lro(dev, i, 1); - - } else { - pi->rx_offload &= ~T3_LRO; - for (i = pi->first_qset; i < pi->first_qset + pi->nqsets; i++) - set_qset_lro(dev, i, 0); - } - - return 0; -} - static const struct ethtool_ops cxgb_ethtool_ops = { .get_settings = get_settings, .set_settings = set_settings, @@ -1877,8 +1845,6 @@ static const struct ethtool_ops cxgb_ethtool_ops = { .get_regs = get_regs, .get_wol = get_wol, .set_tso = ethtool_op_set_tso, - .get_flags = ethtool_op_get_flags, - .set_flags = cxgb3_set_flags, }; static int in_range(int val, int lo, int hi) @@ -2576,6 +2542,12 @@ static int t3_adapter_error(struct adapter *adapter, int reset) { int i, ret = 0; + if (is_offload(adapter) && + test_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map)) { + cxgb3_err_notify(&adapter->tdev, OFFLOAD_STATUS_DOWN, 0); + offload_close(&adapter->tdev); + } + /* Stop all ports */ for_each_port(adapter, i) { struct net_device *netdev = adapter->port[i]; @@ -2584,10 +2556,6 @@ static int t3_adapter_error(struct adapter *adapter, int reset) cxgb_close(netdev); } - if (is_offload(adapter) && - test_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map)) - offload_close(&adapter->tdev); - /* Stop SGE timers */ t3_stop_sge_timers(adapter); @@ -2639,6 +2607,9 @@ static void t3_resume_ports(struct adapter *adapter) } } } + + if (is_offload(adapter) && !ofld_disable) + cxgb3_err_notify(&adapter->tdev, OFFLOAD_STATUS_UP, 0); } /* @@ -2752,7 +2723,7 @@ static void set_nqsets(struct adapter *adap) int i, j = 0; int num_cpus = num_online_cpus(); int hwports = adap->params.nports; - int nqsets = SGE_QSETS; + int nqsets = adap->msix_nvectors - 1; if (adap->params.rev > 0 && adap->flags & USING_MSIX) { if (hwports == 2 && @@ -2781,18 +2752,25 @@ static void set_nqsets(struct adapter *adap) static int __devinit cxgb_enable_msix(struct adapter *adap) { struct msix_entry entries[SGE_QSETS + 1]; + int vectors; int i, err; - for (i = 0; i < ARRAY_SIZE(entries); ++i) + vectors = ARRAY_SIZE(entries); + for (i = 0; i < vectors; ++i) entries[i].entry = i; - err = pci_enable_msix(adap->pdev, entries, ARRAY_SIZE(entries)); + while ((err = pci_enable_msix(adap->pdev, entries, vectors)) > 0) + vectors = err; + + if (!err && vectors < (adap->params.nports + 1)) + err = -1; + if (!err) { - for (i = 0; i < ARRAY_SIZE(entries); ++i) + for (i = 0; i < vectors; ++i) adap->msix_info[i].vec = entries[i].vector; - } else if (err > 0) - dev_info(&adap->pdev->dev, - "only %d MSI-X vectors left, not using MSI-X\n", err); + adap->msix_nvectors = vectors; + } + return err; } @@ -2960,7 +2938,7 @@ static int __devinit init_one(struct pci_dev *pdev, netdev->mem_end = mmio_start + mmio_len - 1; netdev->features |= NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO; netdev->features |= NETIF_F_LLTX; - netdev->features |= NETIF_F_LRO; + netdev->features |= NETIF_F_GRO; if (pci_using_dac) netdev->features |= NETIF_F_HIGHDMA; diff --git a/drivers/net/cxgb3/cxgb3_offload.c b/drivers/net/cxgb3/cxgb3_offload.c index 2d7f69aff1d..620d80be6aa 100644 --- a/drivers/net/cxgb3/cxgb3_offload.c +++ b/drivers/net/cxgb3/cxgb3_offload.c @@ -153,6 +153,18 @@ void cxgb3_remove_clients(struct t3cdev *tdev) mutex_unlock(&cxgb3_db_lock); } +void cxgb3_err_notify(struct t3cdev *tdev, u32 status, u32 error) +{ + struct cxgb3_client *client; + + mutex_lock(&cxgb3_db_lock); + list_for_each_entry(client, &client_list, client_list) { + if (client->err_handler) + client->err_handler(tdev, status, error); + } + mutex_unlock(&cxgb3_db_lock); +} + static struct net_device *get_iff_from_mac(struct adapter *adapter, const unsigned char *mac, unsigned int vlan) diff --git a/drivers/net/cxgb3/cxgb3_offload.h b/drivers/net/cxgb3/cxgb3_offload.h index d514e5019df..a8e8e5fcdf8 100644 --- a/drivers/net/cxgb3/cxgb3_offload.h +++ b/drivers/net/cxgb3/cxgb3_offload.h @@ -64,10 +64,16 @@ void cxgb3_register_client(struct cxgb3_client *client); void cxgb3_unregister_client(struct cxgb3_client *client); void cxgb3_add_clients(struct t3cdev *tdev); void cxgb3_remove_clients(struct t3cdev *tdev); +void cxgb3_err_notify(struct t3cdev *tdev, u32 status, u32 error); typedef int (*cxgb3_cpl_handler_func)(struct t3cdev *dev, struct sk_buff *skb, void *ctx); +enum { + OFFLOAD_STATUS_UP, + OFFLOAD_STATUS_DOWN +}; + struct cxgb3_client { char *name; void (*add) (struct t3cdev *); @@ -76,6 +82,7 @@ struct cxgb3_client { int (*redirect)(void *ctx, struct dst_entry *old, struct dst_entry *new, struct l2t_entry *l2t); struct list_head client_list; + void (*err_handler)(struct t3cdev *tdev, u32 status, u32 error); }; /* diff --git a/drivers/net/cxgb3/sge.c b/drivers/net/cxgb3/sge.c index 379a1324db4..272a0168f3e 100644 --- a/drivers/net/cxgb3/sge.c +++ b/drivers/net/cxgb3/sge.c @@ -585,8 +585,7 @@ static void t3_reset_qset(struct sge_qset *q) memset(q->txq, 0, sizeof(struct sge_txq) * SGE_TXQ_PER_SET); q->txq_stopped = 0; q->tx_reclaim_timer.function = NULL; /* for t3_stop_sge_timers() */ - kfree(q->lro_frag_tbl); - q->lro_nfrags = q->lro_frag_len = 0; + q->lro_frag_tbl.nr_frags = q->lro_frag_tbl.len = 0; } @@ -1938,6 +1937,7 @@ static void rx_eth(struct adapter *adap, struct sge_rspq *rq, skb->ip_summed = CHECKSUM_UNNECESSARY; } else skb->ip_summed = CHECKSUM_NONE; + skb_record_rx_queue(skb, qs - &adap->sge.qs[0]); if (unlikely(p->vlan_valid)) { struct vlan_group *grp = pi->vlan_grp; @@ -1945,10 +1945,8 @@ static void rx_eth(struct adapter *adap, struct sge_rspq *rq, qs->port_stats[SGE_PSTAT_VLANEX]++; if (likely(grp)) if (lro) - lro_vlan_hwaccel_receive_skb(&qs->lro_mgr, skb, - grp, - ntohs(p->vlan), - p); + vlan_gro_receive(&qs->napi, grp, + ntohs(p->vlan), skb); else { if (unlikely(pi->iscsi_ipv4addr && is_arp(skb))) { @@ -1965,7 +1963,7 @@ static void rx_eth(struct adapter *adap, struct sge_rspq *rq, dev_kfree_skb_any(skb); } else if (rq->polling) { if (lro) - lro_receive_skb(&qs->lro_mgr, skb, p); + napi_gro_receive(&qs->napi, skb); else { if (unlikely(pi->iscsi_ipv4addr && is_arp(skb))) cxgb3_arp_process(adap, skb); @@ -1981,59 +1979,6 @@ static inline int is_eth_tcp(u32 rss) } /** - * lro_frame_ok - check if an ingress packet is eligible for LRO - * @p: the CPL header of the packet - * - * Returns true if a received packet is eligible for LRO. - * The following conditions must be true: - * - packet is TCP/IP Ethernet II (checked elsewhere) - * - not an IP fragment - * - no IP options - * - TCP/IP checksums are correct - * - the packet is for this host - */ -static inline int lro_frame_ok(const struct cpl_rx_pkt *p) -{ - const struct ethhdr *eh = (struct ethhdr *)(p + 1); - const struct iphdr *ih = (struct iphdr *)(eh + 1); - - return (*((u8 *)p + 1) & 0x90) == 0x10 && p->csum == htons(0xffff) && - eh->h_proto == htons(ETH_P_IP) && ih->ihl == (sizeof(*ih) >> 2); -} - -static int t3_get_lro_header(void **eh, void **iph, void **tcph, - u64 *hdr_flags, void *priv) -{ - const struct cpl_rx_pkt *cpl = priv; - - if (!lro_frame_ok(cpl)) - return -1; - - *eh = (struct ethhdr *)(cpl + 1); - *iph = (struct iphdr *)((struct ethhdr *)*eh + 1); - *tcph = (struct tcphdr *)((struct iphdr *)*iph + 1); - - *hdr_flags = LRO_IPV4 | LRO_TCP; - return 0; -} - -static int t3_get_skb_header(struct sk_buff *skb, - void **iph, void **tcph, u64 *hdr_flags, - void *priv) -{ - void *eh; - - return t3_get_lro_header(&eh, iph, tcph, hdr_flags, priv); -} - -static int t3_get_frag_header(struct skb_frag_struct *frag, void **eh, - void **iph, void **tcph, u64 *hdr_flags, - void *priv) -{ - return t3_get_lro_header(eh, iph, tcph, hdr_flags, priv); -} - -/** * lro_add_page - add a page chunk to an LRO session * @adap: the adapter * @qs: the associated queue set @@ -2049,8 +1994,9 @@ static void lro_add_page(struct adapter *adap, struct sge_qset *qs, { struct rx_sw_desc *sd = &fl->sdesc[fl->cidx]; struct cpl_rx_pkt *cpl; - struct skb_frag_struct *rx_frag = qs->lro_frag_tbl; - int nr_frags = qs->lro_nfrags, frag_len = qs->lro_frag_len; + struct skb_frag_struct *rx_frag = qs->lro_frag_tbl.frags; + int nr_frags = qs->lro_frag_tbl.nr_frags; + int frag_len = qs->lro_frag_tbl.len; int offset = 0; if (!nr_frags) { @@ -2069,13 +2015,13 @@ static void lro_add_page(struct adapter *adap, struct sge_qset *qs, rx_frag->page_offset = sd->pg_chunk.offset + offset; rx_frag->size = len; frag_len += len; - qs->lro_nfrags++; - qs->lro_frag_len = frag_len; + qs->lro_frag_tbl.nr_frags++; + qs->lro_frag_tbl.len = frag_len; if (!complete) return; - qs->lro_nfrags = qs->lro_frag_len = 0; + qs->lro_frag_tbl.ip_summed = CHECKSUM_UNNECESSARY; cpl = qs->lro_va; if (unlikely(cpl->vlan_valid)) { @@ -2084,36 +2030,15 @@ static void lro_add_page(struct adapter *adap, struct sge_qset *qs, struct vlan_group *grp = pi->vlan_grp; if (likely(grp != NULL)) { - lro_vlan_hwaccel_receive_frags(&qs->lro_mgr, - qs->lro_frag_tbl, - frag_len, frag_len, - grp, ntohs(cpl->vlan), - cpl, 0); - return; + vlan_gro_frags(&qs->napi, grp, ntohs(cpl->vlan), + &qs->lro_frag_tbl); + goto out; } } - lro_receive_frags(&qs->lro_mgr, qs->lro_frag_tbl, - frag_len, frag_len, cpl, 0); -} + napi_gro_frags(&qs->napi, &qs->lro_frag_tbl); -/** - * init_lro_mgr - initialize a LRO manager object - * @lro_mgr: the LRO manager object - */ -static void init_lro_mgr(struct sge_qset *qs, struct net_lro_mgr *lro_mgr) -{ - lro_mgr->dev = qs->netdev; - lro_mgr->features = LRO_F_NAPI; - lro_mgr->frag_align_pad = NET_IP_ALIGN; - lro_mgr->ip_summed = CHECKSUM_UNNECESSARY; - lro_mgr->ip_summed_aggr = CHECKSUM_UNNECESSARY; - lro_mgr->max_desc = T3_MAX_LRO_SES; - lro_mgr->lro_arr = qs->lro_desc; - lro_mgr->get_frag_header = t3_get_frag_header; - lro_mgr->get_skb_header = t3_get_skb_header; - lro_mgr->max_aggr = T3_MAX_LRO_MAX_PKTS; - if (lro_mgr->max_aggr > MAX_SKB_FRAGS) - lro_mgr->max_aggr = MAX_SKB_FRAGS; +out: + qs->lro_frag_tbl.nr_frags = qs->lro_frag_tbl.len = 0; } /** @@ -2357,10 +2282,6 @@ next_fl: } deliver_partial_bundle(&adap->tdev, q, offload_skbs, ngathered); - lro_flush_all(&qs->lro_mgr); - qs->port_stats[SGE_PSTAT_LRO_AGGR] = qs->lro_mgr.stats.aggregated; - qs->port_stats[SGE_PSTAT_LRO_FLUSHED] = qs->lro_mgr.stats.flushed; - qs->port_stats[SGE_PSTAT_LRO_NO_DESC] = qs->lro_mgr.stats.no_desc; if (sleeping) check_ring_db(adap, qs, sleeping); @@ -2907,7 +2828,6 @@ int t3_sge_alloc_qset(struct adapter *adapter, unsigned int id, int nports, { int i, avail, ret = -ENOMEM; struct sge_qset *q = &adapter->sge.qs[id]; - struct net_lro_mgr *lro_mgr = &q->lro_mgr; init_qset_cntxt(q, id); setup_timer(&q->tx_reclaim_timer, sge_timer_cb, (unsigned long)q); @@ -2987,10 +2907,6 @@ int t3_sge_alloc_qset(struct adapter *adapter, unsigned int id, int nports, q->fl[0].order = FL0_PG_ORDER; q->fl[1].order = FL1_PG_ORDER; - q->lro_frag_tbl = kcalloc(MAX_FRAME_SIZE / FL1_PG_CHUNK_SIZE + 1, - sizeof(struct skb_frag_struct), - GFP_KERNEL); - q->lro_nfrags = q->lro_frag_len = 0; spin_lock_irq(&adapter->sge.reg_lock); /* FL threshold comparison uses < */ @@ -3042,8 +2958,6 @@ int t3_sge_alloc_qset(struct adapter *adapter, unsigned int id, int nports, q->tx_q = netdevq; t3_update_qset_coalesce(q, p); - init_lro_mgr(q, lro_mgr); - avail = refill_fl(adapter, &q->fl[0], q->fl[0].size, GFP_KERNEL | __GFP_COMP); if (!avail) { diff --git a/drivers/net/declance.c b/drivers/net/declance.c index 7ce3053530f..861c867fca8 100644 --- a/drivers/net/declance.c +++ b/drivers/net/declance.c @@ -1027,7 +1027,7 @@ static int __init dec_lance_probe(struct device *bdev, const int type) printk(version); if (bdev) - snprintf(name, sizeof(name), "%s", bdev->bus_id); + snprintf(name, sizeof(name), "%s", dev_name(bdev)); else { i = 0; dev = root_lance_dev; @@ -1105,10 +1105,10 @@ static int __init dec_lance_probe(struct device *bdev, const int type) start = to_tc_dev(bdev)->resource.start; len = to_tc_dev(bdev)->resource.end - start + 1; - if (!request_mem_region(start, len, bdev->bus_id)) { + if (!request_mem_region(start, len, dev_name(bdev))) { printk(KERN_ERR "%s: Unable to reserve MMIO resource\n", - bdev->bus_id); + dev_name(bdev)); ret = -EBUSY; goto err_out_dev; } diff --git a/drivers/net/depca.c b/drivers/net/depca.c index e4cef491dc7..55625dbbae5 100644 --- a/drivers/net/depca.c +++ b/drivers/net/depca.c @@ -606,8 +606,8 @@ static int __init depca_hw_init (struct net_device *dev, struct device *device) if (!mem_start || lp->adapter < DEPCA || lp->adapter >=unknown) return -ENXIO; - printk ("%s: %s at 0x%04lx", - device->bus_id, depca_signature[lp->adapter], ioaddr); + printk("%s: %s at 0x%04lx", + dev_name(device), depca_signature[lp->adapter], ioaddr); switch (lp->depca_bus) { #ifdef CONFIG_MCA @@ -669,7 +669,7 @@ static int __init depca_hw_init (struct net_device *dev, struct device *device) spin_lock_init(&lp->lock); sprintf(lp->adapter_name, "%s (%s)", - depca_signature[lp->adapter], device->bus_id); + depca_signature[lp->adapter], dev_name(device)); status = -EBUSY; /* Initialisation Block */ diff --git a/drivers/net/e100.c b/drivers/net/e100.c index 86bb876fb12..861d2eeaa43 100644 --- a/drivers/net/e100.c +++ b/drivers/net/e100.c @@ -1944,9 +1944,9 @@ static irqreturn_t e100_intr(int irq, void *dev_id) if (stat_ack & stat_ack_rnr) nic->ru_running = RU_SUSPENDED; - if (likely(netif_rx_schedule_prep(&nic->napi))) { + if (likely(napi_schedule_prep(&nic->napi))) { e100_disable_irq(nic); - __netif_rx_schedule(&nic->napi); + __napi_schedule(&nic->napi); } return IRQ_HANDLED; @@ -1962,7 +1962,7 @@ static int e100_poll(struct napi_struct *napi, int budget) /* If budget not fully consumed, exit the polling mode */ if (work_done < budget) { - netif_rx_complete(napi); + napi_complete(napi); e100_enable_irq(nic); } diff --git a/drivers/net/e1000/e1000.h b/drivers/net/e1000/e1000.h index f5581de0475..e9a416f4016 100644 --- a/drivers/net/e1000/e1000.h +++ b/drivers/net/e1000/e1000.h @@ -182,7 +182,6 @@ struct e1000_tx_ring { /* array of buffer information structs */ struct e1000_buffer *buffer_info; - spinlock_t tx_lock; u16 tdh; u16 tdt; bool last_tx_tso; @@ -238,7 +237,6 @@ struct e1000_adapter { u16 link_speed; u16 link_duplex; spinlock_t stats_lock; - spinlock_t tx_queue_lock; unsigned int total_tx_bytes; unsigned int total_tx_packets; unsigned int total_rx_bytes; diff --git a/drivers/net/e1000/e1000_main.c b/drivers/net/e1000/e1000_main.c index c986978ce76..40db34deebd 100644 --- a/drivers/net/e1000/e1000_main.c +++ b/drivers/net/e1000/e1000_main.c @@ -1048,8 +1048,6 @@ static int __devinit e1000_probe(struct pci_dev *pdev, if (pci_using_dac) netdev->features |= NETIF_F_HIGHDMA; - netdev->features |= NETIF_F_LLTX; - netdev->vlan_features |= NETIF_F_TSO; netdev->vlan_features |= NETIF_F_TSO6; netdev->vlan_features |= NETIF_F_HW_CSUM; @@ -1368,8 +1366,6 @@ static int __devinit e1000_sw_init(struct e1000_adapter *adapter) return -ENOMEM; } - spin_lock_init(&adapter->tx_queue_lock); - /* Explicitly disable IRQ since the NIC can be in any state. */ e1000_irq_disable(adapter); @@ -1624,7 +1620,6 @@ setup_tx_desc_die: txdr->next_to_use = 0; txdr->next_to_clean = 0; - spin_lock_init(&txdr->tx_lock); return 0; } @@ -2865,11 +2860,11 @@ static bool e1000_tx_csum(struct e1000_adapter *adapter, return false; switch (skb->protocol) { - case __constant_htons(ETH_P_IP): + case cpu_to_be16(ETH_P_IP): if (ip_hdr(skb)->protocol == IPPROTO_TCP) cmd_len |= E1000_TXD_CMD_TCP; break; - case __constant_htons(ETH_P_IPV6): + case cpu_to_be16(ETH_P_IPV6): /* XXX not handling all IPV6 headers */ if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP) cmd_len |= E1000_TXD_CMD_TCP; @@ -3185,7 +3180,6 @@ static int e1000_xmit_frame(struct sk_buff *skb, struct net_device *netdev) unsigned int max_txd_pwr = E1000_MAX_TXD_PWR; unsigned int tx_flags = 0; unsigned int len = skb->len - skb->data_len; - unsigned long flags; unsigned int nr_frags; unsigned int mss; int count = 0; @@ -3290,22 +3284,15 @@ static int e1000_xmit_frame(struct sk_buff *skb, struct net_device *netdev) (hw->mac_type == e1000_82573)) e1000_transfer_dhcp_info(adapter, skb); - if (!spin_trylock_irqsave(&tx_ring->tx_lock, flags)) - /* Collision - tell upper layer to requeue */ - return NETDEV_TX_LOCKED; - /* need: count + 2 desc gap to keep tail from touching * head, otherwise try next time */ - if (unlikely(e1000_maybe_stop_tx(netdev, tx_ring, count + 2))) { - spin_unlock_irqrestore(&tx_ring->tx_lock, flags); + if (unlikely(e1000_maybe_stop_tx(netdev, tx_ring, count + 2))) return NETDEV_TX_BUSY; - } if (unlikely(hw->mac_type == e1000_82547)) { if (unlikely(e1000_82547_fifo_workaround(adapter, skb))) { netif_stop_queue(netdev); mod_timer(&adapter->tx_fifo_stall_timer, jiffies + 1); - spin_unlock_irqrestore(&tx_ring->tx_lock, flags); return NETDEV_TX_BUSY; } } @@ -3320,7 +3307,6 @@ static int e1000_xmit_frame(struct sk_buff *skb, struct net_device *netdev) tso = e1000_tso(adapter, tx_ring, skb); if (tso < 0) { dev_kfree_skb_any(skb); - spin_unlock_irqrestore(&tx_ring->tx_lock, flags); return NETDEV_TX_OK; } @@ -3345,7 +3331,6 @@ static int e1000_xmit_frame(struct sk_buff *skb, struct net_device *netdev) /* Make sure there is space in the ring for the next send. */ e1000_maybe_stop_tx(netdev, tx_ring, MAX_SKB_FRAGS + 2); - spin_unlock_irqrestore(&tx_ring->tx_lock, flags); return NETDEV_TX_OK; } @@ -3687,12 +3672,12 @@ static irqreturn_t e1000_intr_msi(int irq, void *data) mod_timer(&adapter->watchdog_timer, jiffies + 1); } - if (likely(netif_rx_schedule_prep(&adapter->napi))) { + if (likely(napi_schedule_prep(&adapter->napi))) { adapter->total_tx_bytes = 0; adapter->total_tx_packets = 0; adapter->total_rx_bytes = 0; adapter->total_rx_packets = 0; - __netif_rx_schedule(&adapter->napi); + __napi_schedule(&adapter->napi); } else e1000_irq_enable(adapter); @@ -3747,12 +3732,12 @@ static irqreturn_t e1000_intr(int irq, void *data) ew32(IMC, ~0); E1000_WRITE_FLUSH(); } - if (likely(netif_rx_schedule_prep(&adapter->napi))) { + if (likely(napi_schedule_prep(&adapter->napi))) { adapter->total_tx_bytes = 0; adapter->total_tx_packets = 0; adapter->total_rx_bytes = 0; adapter->total_rx_packets = 0; - __netif_rx_schedule(&adapter->napi); + __napi_schedule(&adapter->napi); } else /* this really should not happen! if it does it is basically a * bug, but not a hard error, so enable ints and continue */ @@ -3773,15 +3758,7 @@ static int e1000_clean(struct napi_struct *napi, int budget) adapter = netdev_priv(poll_dev); - /* e1000_clean is called per-cpu. This lock protects - * tx_ring[0] from being cleaned by multiple cpus - * simultaneously. A failure obtaining the lock means - * tx_ring[0] is currently being cleaned anyway. */ - if (spin_trylock(&adapter->tx_queue_lock)) { - tx_cleaned = e1000_clean_tx_irq(adapter, - &adapter->tx_ring[0]); - spin_unlock(&adapter->tx_queue_lock); - } + tx_cleaned = e1000_clean_tx_irq(adapter, &adapter->tx_ring[0]); adapter->clean_rx(adapter, &adapter->rx_ring[0], &work_done, budget); @@ -3793,7 +3770,7 @@ static int e1000_clean(struct napi_struct *napi, int budget) if (work_done < budget) { if (likely(adapter->itr_setting & 3)) e1000_set_itr(adapter); - netif_rx_complete(napi); + napi_complete(napi); e1000_irq_enable(adapter); } diff --git a/drivers/net/e1000e/e1000.h b/drivers/net/e1000e/e1000.h index 37bcb190eef..28bf9a51346 100644 --- a/drivers/net/e1000e/e1000.h +++ b/drivers/net/e1000e/e1000.h @@ -195,8 +195,6 @@ struct e1000_adapter { u16 link_duplex; u16 eeprom_vers; - spinlock_t tx_queue_lock; /* prevent concurrent tail updates */ - /* track device up/down/testing state */ unsigned long state; diff --git a/drivers/net/e1000e/netdev.c b/drivers/net/e1000e/netdev.c index 91817d0afca..c425b19e336 100644 --- a/drivers/net/e1000e/netdev.c +++ b/drivers/net/e1000e/netdev.c @@ -47,7 +47,7 @@ #include "e1000.h" -#define DRV_VERSION "0.3.3.3-k6" +#define DRV_VERSION "0.3.3.4-k2" char e1000e_driver_name[] = "e1000e"; const char e1000e_driver_version[] = DRV_VERSION; @@ -99,8 +99,8 @@ static void e1000_receive_skb(struct e1000_adapter *adapter, skb->protocol = eth_type_trans(skb, netdev); if (adapter->vlgrp && (status & E1000_RXD_STAT_VP)) - vlan_hwaccel_receive_skb(skb, adapter->vlgrp, - le16_to_cpu(vlan)); + vlan_gro_receive(&adapter->napi, adapter->vlgrp, + le16_to_cpu(vlan), skb); else napi_gro_receive(&adapter->napi, skb); } @@ -1179,12 +1179,12 @@ static irqreturn_t e1000_intr_msi(int irq, void *data) mod_timer(&adapter->watchdog_timer, jiffies + 1); } - if (netif_rx_schedule_prep(&adapter->napi)) { + if (napi_schedule_prep(&adapter->napi)) { adapter->total_tx_bytes = 0; adapter->total_tx_packets = 0; adapter->total_rx_bytes = 0; adapter->total_rx_packets = 0; - __netif_rx_schedule(&adapter->napi); + __napi_schedule(&adapter->napi); } return IRQ_HANDLED; @@ -1246,12 +1246,12 @@ static irqreturn_t e1000_intr(int irq, void *data) mod_timer(&adapter->watchdog_timer, jiffies + 1); } - if (netif_rx_schedule_prep(&adapter->napi)) { + if (napi_schedule_prep(&adapter->napi)) { adapter->total_tx_bytes = 0; adapter->total_tx_packets = 0; adapter->total_rx_bytes = 0; adapter->total_rx_packets = 0; - __netif_rx_schedule(&adapter->napi); + __napi_schedule(&adapter->napi); } return IRQ_HANDLED; @@ -1320,10 +1320,10 @@ static irqreturn_t e1000_intr_msix_rx(int irq, void *data) adapter->rx_ring->set_itr = 0; } - if (netif_rx_schedule_prep(&adapter->napi)) { + if (napi_schedule_prep(&adapter->napi)) { adapter->total_rx_bytes = 0; adapter->total_rx_packets = 0; - __netif_rx_schedule(&adapter->napi); + __napi_schedule(&adapter->napi); } return IRQ_HANDLED; } @@ -1698,7 +1698,6 @@ int e1000e_setup_tx_resources(struct e1000_adapter *adapter) tx_ring->next_to_use = 0; tx_ring->next_to_clean = 0; - spin_lock_init(&adapter->tx_queue_lock); return 0; err: @@ -2007,16 +2006,7 @@ static int e1000_clean(struct napi_struct *napi, int budget) !(adapter->rx_ring->ims_val & adapter->tx_ring->ims_val)) goto clean_rx; - /* - * e1000_clean is called per-cpu. This lock protects - * tx_ring from being cleaned by multiple cpus - * simultaneously. A failure obtaining the lock means - * tx_ring is currently being cleaned anyway. - */ - if (spin_trylock(&adapter->tx_queue_lock)) { - tx_cleaned = e1000_clean_tx_irq(adapter); - spin_unlock(&adapter->tx_queue_lock); - } + tx_cleaned = e1000_clean_tx_irq(adapter); clean_rx: adapter->clean_rx(adapter, &work_done, budget); @@ -2028,7 +2018,7 @@ clean_rx: if (work_done < budget) { if (adapter->itr_setting & 3) e1000_set_itr(adapter); - netif_rx_complete(napi); + napi_complete(napi); if (adapter->msix_entries) ew32(IMS, adapter->rx_ring->ims_val); else @@ -2922,8 +2912,6 @@ static int __devinit e1000_sw_init(struct e1000_adapter *adapter) if (e1000_alloc_queues(adapter)) return -ENOMEM; - spin_lock_init(&adapter->tx_queue_lock); - /* Explicitly disable IRQ since the NIC can be in any state. */ e1000_irq_disable(adapter); @@ -3782,11 +3770,11 @@ static bool e1000_tx_csum(struct e1000_adapter *adapter, struct sk_buff *skb) return 0; switch (skb->protocol) { - case __constant_htons(ETH_P_IP): + case cpu_to_be16(ETH_P_IP): if (ip_hdr(skb)->protocol == IPPROTO_TCP) cmd_len |= E1000_TXD_CMD_TCP; break; - case __constant_htons(ETH_P_IPV6): + case cpu_to_be16(ETH_P_IPV6): /* XXX not handling all IPV6 headers */ if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP) cmd_len |= E1000_TXD_CMD_TCP; @@ -4069,7 +4057,6 @@ static int e1000_xmit_frame(struct sk_buff *skb, struct net_device *netdev) unsigned int max_txd_pwr = E1000_MAX_TXD_PWR; unsigned int tx_flags = 0; unsigned int len = skb->len - skb->data_len; - unsigned long irq_flags; unsigned int nr_frags; unsigned int mss; int count = 0; @@ -4138,18 +4125,12 @@ static int e1000_xmit_frame(struct sk_buff *skb, struct net_device *netdev) if (adapter->hw.mac.tx_pkt_filtering) e1000_transfer_dhcp_info(adapter, skb); - if (!spin_trylock_irqsave(&adapter->tx_queue_lock, irq_flags)) - /* Collision - tell upper layer to requeue */ - return NETDEV_TX_LOCKED; - /* * need: count + 2 desc gap to keep tail from touching * head, otherwise try next time */ - if (e1000_maybe_stop_tx(netdev, count + 2)) { - spin_unlock_irqrestore(&adapter->tx_queue_lock, irq_flags); + if (e1000_maybe_stop_tx(netdev, count + 2)) return NETDEV_TX_BUSY; - } if (adapter->vlgrp && vlan_tx_tag_present(skb)) { tx_flags |= E1000_TX_FLAGS_VLAN; @@ -4161,7 +4142,6 @@ static int e1000_xmit_frame(struct sk_buff *skb, struct net_device *netdev) tso = e1000_tso(adapter, skb); if (tso < 0) { dev_kfree_skb_any(skb); - spin_unlock_irqrestore(&adapter->tx_queue_lock, irq_flags); return NETDEV_TX_OK; } @@ -4182,7 +4162,6 @@ static int e1000_xmit_frame(struct sk_buff *skb, struct net_device *netdev) if (count < 0) { /* handle pci_map_single() error in e1000_tx_map */ dev_kfree_skb_any(skb); - spin_unlock_irqrestore(&adapter->tx_queue_lock, irq_flags); return NETDEV_TX_OK; } @@ -4193,7 +4172,6 @@ static int e1000_xmit_frame(struct sk_buff *skb, struct net_device *netdev) /* Make sure there is space in the ring for the next send. */ e1000_maybe_stop_tx(netdev, MAX_SKB_FRAGS + 2); - spin_unlock_irqrestore(&adapter->tx_queue_lock, irq_flags); return NETDEV_TX_OK; } @@ -4922,12 +4900,6 @@ static int __devinit e1000_probe(struct pci_dev *pdev, if (pci_using_dac) netdev->features |= NETIF_F_HIGHDMA; - /* - * We should not be using LLTX anymore, but we are still Tx faster with - * it. - */ - netdev->features |= NETIF_F_LLTX; - if (e1000e_enable_mng_pass_thru(&adapter->hw)) adapter->flags |= FLAG_MNG_PT_ENABLED; diff --git a/drivers/net/ehea/ehea.h b/drivers/net/ehea/ehea.h index 6271b9411cc..f7e2ccfd3e8 100644 --- a/drivers/net/ehea/ehea.h +++ b/drivers/net/ehea/ehea.h @@ -40,7 +40,7 @@ #include <asm/io.h> #define DRV_NAME "ehea" -#define DRV_VERSION "EHEA_0096" +#define DRV_VERSION "EHEA_0097" /* eHEA capability flags */ #define DLPAR_PORT_ADD_REM 1 diff --git a/drivers/net/ehea/ehea_main.c b/drivers/net/ehea/ehea_main.c index dfe92264e82..489fdb90f76 100644 --- a/drivers/net/ehea/ehea_main.c +++ b/drivers/net/ehea/ehea_main.c @@ -308,7 +308,7 @@ static struct net_device_stats *ehea_get_stats(struct net_device *dev) memset(stats, 0, sizeof(*stats)); - cb2 = kzalloc(PAGE_SIZE, GFP_ATOMIC); + cb2 = (void *)get_zeroed_page(GFP_ATOMIC); if (!cb2) { ehea_error("no mem for cb2"); goto out; @@ -341,7 +341,7 @@ static struct net_device_stats *ehea_get_stats(struct net_device *dev) stats->rx_packets = rx_packets; out_herr: - kfree(cb2); + free_page((unsigned long)cb2); out: return stats; } @@ -370,8 +370,6 @@ static void ehea_refill_rq1(struct ehea_port_res *pr, int index, int nr_of_wqes) EHEA_L_PKT_SIZE); if (!skb_arr_rq1[index]) { pr->rq1_skba.os_skbs = fill_wqes - i; - ehea_error("%s: no mem for skb/%d wqes filled", - dev->name, i); break; } } @@ -387,26 +385,19 @@ static void ehea_refill_rq1(struct ehea_port_res *pr, int index, int nr_of_wqes) ehea_update_rq1a(pr->qp, adder); } -static int ehea_init_fill_rq1(struct ehea_port_res *pr, int nr_rq1a) +static void ehea_init_fill_rq1(struct ehea_port_res *pr, int nr_rq1a) { - int ret = 0; struct sk_buff **skb_arr_rq1 = pr->rq1_skba.arr; struct net_device *dev = pr->port->netdev; int i; for (i = 0; i < pr->rq1_skba.len; i++) { skb_arr_rq1[i] = netdev_alloc_skb(dev, EHEA_L_PKT_SIZE); - if (!skb_arr_rq1[i]) { - ehea_error("%s: no mem for skb/%d wqes filled", - dev->name, i); - ret = -ENOMEM; - goto out; - } + if (!skb_arr_rq1[i]) + break; } /* Ring doorbell */ ehea_update_rq1a(pr->qp, nr_rq1a); -out: - return ret; } static int ehea_refill_rq_def(struct ehea_port_res *pr, @@ -435,10 +426,12 @@ static int ehea_refill_rq_def(struct ehea_port_res *pr, u64 tmp_addr; struct sk_buff *skb = netdev_alloc_skb(dev, packet_size); if (!skb) { - ehea_error("%s: no mem for skb/%d wqes filled", - pr->port->netdev->name, i); q_skba->os_skbs = fill_wqes - i; - ret = -ENOMEM; + if (q_skba->os_skbs == q_skba->len - 2) { + ehea_info("%s: rq%i ran dry - no mem for skb", + pr->port->netdev->name, rq_nr); + ret = -ENOMEM; + } break; } skb_reserve(skb, NET_IP_ALIGN); @@ -830,7 +823,7 @@ static int ehea_poll(struct napi_struct *napi, int budget) while ((rx != budget) || force_irq) { pr->poll_counter = 0; force_irq = 0; - netif_rx_complete(napi); + napi_complete(napi); ehea_reset_cq_ep(pr->recv_cq); ehea_reset_cq_ep(pr->send_cq); ehea_reset_cq_n1(pr->recv_cq); @@ -841,7 +834,7 @@ static int ehea_poll(struct napi_struct *napi, int budget) if (!cqe && !cqe_skb) return rx; - if (!netif_rx_reschedule(napi)) + if (!napi_reschedule(napi)) return rx; cqe_skb = ehea_proc_cqes(pr, EHEA_POLL_MAX_CQES); @@ -859,7 +852,7 @@ static void ehea_netpoll(struct net_device *dev) int i; for (i = 0; i < port->num_def_qps; i++) - netif_rx_schedule(&port->port_res[i].napi); + napi_schedule(&port->port_res[i].napi); } #endif @@ -867,7 +860,7 @@ static irqreturn_t ehea_recv_irq_handler(int irq, void *param) { struct ehea_port_res *pr = param; - netif_rx_schedule(&pr->napi); + napi_schedule(&pr->napi); return IRQ_HANDLED; } @@ -915,7 +908,7 @@ int ehea_sense_port_attr(struct ehea_port *port) struct hcp_ehea_port_cb0 *cb0; /* may be called via ehea_neq_tasklet() */ - cb0 = kzalloc(PAGE_SIZE, GFP_ATOMIC); + cb0 = (void *)get_zeroed_page(GFP_ATOMIC); if (!cb0) { ehea_error("no mem for cb0"); ret = -ENOMEM; @@ -996,7 +989,7 @@ int ehea_sense_port_attr(struct ehea_port *port) out_free: if (ret || netif_msg_probe(port)) ehea_dump(cb0, sizeof(*cb0), "ehea_sense_port_attr"); - kfree(cb0); + free_page((unsigned long)cb0); out: return ret; } @@ -1007,7 +1000,7 @@ int ehea_set_portspeed(struct ehea_port *port, u32 port_speed) u64 hret; int ret = 0; - cb4 = kzalloc(PAGE_SIZE, GFP_KERNEL); + cb4 = (void *)get_zeroed_page(GFP_KERNEL); if (!cb4) { ehea_error("no mem for cb4"); ret = -ENOMEM; @@ -1075,7 +1068,7 @@ int ehea_set_portspeed(struct ehea_port *port, u32 port_speed) if (!prop_carrier_state || (port->phy_link == EHEA_PHY_LINK_UP)) netif_carrier_on(port->netdev); - kfree(cb4); + free_page((unsigned long)cb4); out: return ret; } @@ -1201,11 +1194,11 @@ static int ehea_fill_port_res(struct ehea_port_res *pr) int ret; struct ehea_qp_init_attr *init_attr = &pr->qp->init_attr; - ret = ehea_init_fill_rq1(pr, init_attr->act_nr_rwqes_rq1 - - init_attr->act_nr_rwqes_rq2 - - init_attr->act_nr_rwqes_rq3 - 1); + ehea_init_fill_rq1(pr, init_attr->act_nr_rwqes_rq1 + - init_attr->act_nr_rwqes_rq2 + - init_attr->act_nr_rwqes_rq3 - 1); - ret |= ehea_refill_rq2(pr, init_attr->act_nr_rwqes_rq2 - 1); + ret = ehea_refill_rq2(pr, init_attr->act_nr_rwqes_rq2 - 1); ret |= ehea_refill_rq3(pr, init_attr->act_nr_rwqes_rq3 - 1); @@ -1302,7 +1295,7 @@ static int ehea_configure_port(struct ehea_port *port) struct hcp_ehea_port_cb0 *cb0; ret = -ENOMEM; - cb0 = kzalloc(PAGE_SIZE, GFP_KERNEL); + cb0 = (void *)get_zeroed_page(GFP_KERNEL); if (!cb0) goto out; @@ -1338,7 +1331,7 @@ static int ehea_configure_port(struct ehea_port *port) ret = 0; out_free: - kfree(cb0); + free_page((unsigned long)cb0); out: return ret; } @@ -1748,7 +1741,7 @@ static int ehea_set_mac_addr(struct net_device *dev, void *sa) goto out; } - cb0 = kzalloc(PAGE_SIZE, GFP_KERNEL); + cb0 = (void *)get_zeroed_page(GFP_KERNEL); if (!cb0) { ehea_error("no mem for cb0"); ret = -ENOMEM; @@ -1793,7 +1786,7 @@ out_upregs: ehea_update_bcmc_registrations(); spin_unlock(&ehea_bcmc_regs.lock); out_free: - kfree(cb0); + free_page((unsigned long)cb0); out: return ret; } @@ -1817,7 +1810,7 @@ static void ehea_promiscuous(struct net_device *dev, int enable) if ((enable && port->promisc) || (!enable && !port->promisc)) return; - cb7 = kzalloc(PAGE_SIZE, GFP_ATOMIC); + cb7 = (void *)get_zeroed_page(GFP_ATOMIC); if (!cb7) { ehea_error("no mem for cb7"); goto out; @@ -1836,7 +1829,7 @@ static void ehea_promiscuous(struct net_device *dev, int enable) port->promisc = enable; out: - kfree(cb7); + free_page((unsigned long)cb7); return; } @@ -2217,7 +2210,7 @@ static void ehea_vlan_rx_register(struct net_device *dev, port->vgrp = grp; - cb1 = kzalloc(PAGE_SIZE, GFP_KERNEL); + cb1 = (void *)get_zeroed_page(GFP_KERNEL); if (!cb1) { ehea_error("no mem for cb1"); goto out; @@ -2228,7 +2221,7 @@ static void ehea_vlan_rx_register(struct net_device *dev, if (hret != H_SUCCESS) ehea_error("modify_ehea_port failed"); - kfree(cb1); + free_page((unsigned long)cb1); out: return; } @@ -2241,7 +2234,7 @@ static void ehea_vlan_rx_add_vid(struct net_device *dev, unsigned short vid) int index; u64 hret; - cb1 = kzalloc(PAGE_SIZE, GFP_KERNEL); + cb1 = (void *)get_zeroed_page(GFP_KERNEL); if (!cb1) { ehea_error("no mem for cb1"); goto out; @@ -2262,7 +2255,7 @@ static void ehea_vlan_rx_add_vid(struct net_device *dev, unsigned short vid) if (hret != H_SUCCESS) ehea_error("modify_ehea_port failed"); out: - kfree(cb1); + free_page((unsigned long)cb1); return; } @@ -2276,7 +2269,7 @@ static void ehea_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid) vlan_group_set_device(port->vgrp, vid, NULL); - cb1 = kzalloc(PAGE_SIZE, GFP_KERNEL); + cb1 = (void *)get_zeroed_page(GFP_KERNEL); if (!cb1) { ehea_error("no mem for cb1"); goto out; @@ -2297,7 +2290,7 @@ static void ehea_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid) if (hret != H_SUCCESS) ehea_error("modify_ehea_port failed"); out: - kfree(cb1); + free_page((unsigned long)cb1); return; } @@ -2309,7 +2302,7 @@ int ehea_activate_qp(struct ehea_adapter *adapter, struct ehea_qp *qp) u64 dummy64 = 0; struct hcp_modify_qp_cb0 *cb0; - cb0 = kzalloc(PAGE_SIZE, GFP_KERNEL); + cb0 = (void *)get_zeroed_page(GFP_KERNEL); if (!cb0) { ret = -ENOMEM; goto out; @@ -2372,7 +2365,7 @@ int ehea_activate_qp(struct ehea_adapter *adapter, struct ehea_qp *qp) ret = 0; out: - kfree(cb0); + free_page((unsigned long)cb0); return ret; } @@ -2664,7 +2657,7 @@ int ehea_stop_qps(struct net_device *dev) u64 dummy64 = 0; u16 dummy16 = 0; - cb0 = kzalloc(PAGE_SIZE, GFP_KERNEL); + cb0 = (void *)get_zeroed_page(GFP_KERNEL); if (!cb0) { ret = -ENOMEM; goto out; @@ -2716,7 +2709,7 @@ int ehea_stop_qps(struct net_device *dev) ret = 0; out: - kfree(cb0); + free_page((unsigned long)cb0); return ret; } @@ -2766,7 +2759,7 @@ int ehea_restart_qps(struct net_device *dev) u64 dummy64 = 0; u16 dummy16 = 0; - cb0 = kzalloc(PAGE_SIZE, GFP_KERNEL); + cb0 = (void *)get_zeroed_page(GFP_KERNEL); if (!cb0) { ret = -ENOMEM; goto out; @@ -2819,7 +2812,7 @@ int ehea_restart_qps(struct net_device *dev) ehea_refill_rq3(pr, 0); } out: - kfree(cb0); + free_page((unsigned long)cb0); return ret; } @@ -2950,7 +2943,7 @@ int ehea_sense_adapter_attr(struct ehea_adapter *adapter) u64 hret; int ret; - cb = kzalloc(PAGE_SIZE, GFP_KERNEL); + cb = (void *)get_zeroed_page(GFP_KERNEL); if (!cb) { ret = -ENOMEM; goto out; @@ -2967,7 +2960,7 @@ int ehea_sense_adapter_attr(struct ehea_adapter *adapter) ret = 0; out_herr: - kfree(cb); + free_page((unsigned long)cb); out: return ret; } @@ -2981,7 +2974,7 @@ int ehea_get_jumboframe_status(struct ehea_port *port, int *jumbo) *jumbo = 0; /* (Try to) enable *jumbo frames */ - cb4 = kzalloc(PAGE_SIZE, GFP_KERNEL); + cb4 = (void *)get_zeroed_page(GFP_KERNEL); if (!cb4) { ehea_error("no mem for cb4"); ret = -ENOMEM; @@ -3009,7 +3002,7 @@ int ehea_get_jumboframe_status(struct ehea_port *port, int *jumbo) } else ret = -EINVAL; - kfree(cb4); + free_page((unsigned long)cb4); } out: return ret; @@ -3040,7 +3033,7 @@ static struct device *ehea_register_port(struct ehea_port *port, port->ofdev.dev.parent = &port->adapter->ofdev->dev; port->ofdev.dev.bus = &ibmebus_bus_type; - sprintf(port->ofdev.dev.bus_id, "port%d", port_name_cnt++); + dev_set_name(&port->ofdev.dev, "port%d", port_name_cnt++); port->ofdev.dev.release = logical_port_release; ret = of_device_register(&port->ofdev); @@ -3069,6 +3062,22 @@ static void ehea_unregister_port(struct ehea_port *port) of_device_unregister(&port->ofdev); } +static const struct net_device_ops ehea_netdev_ops = { + .ndo_open = ehea_open, + .ndo_stop = ehea_stop, + .ndo_start_xmit = ehea_start_xmit, +#ifdef CONFIG_NET_POLL_CONTROLLER + .ndo_poll_controller = ehea_netpoll, +#endif + .ndo_get_stats = ehea_get_stats, + .ndo_set_mac_address = ehea_set_mac_addr, + .ndo_set_multicast_list = ehea_set_multicast_list, + .ndo_change_mtu = ehea_change_mtu, + .ndo_vlan_rx_register = ehea_vlan_rx_register, + .ndo_vlan_rx_add_vid = ehea_vlan_rx_add_vid, + .ndo_vlan_rx_kill_vid = ehea_vlan_rx_kill_vid +}; + struct ehea_port *ehea_setup_single_port(struct ehea_adapter *adapter, u32 logical_port_id, struct device_node *dn) @@ -3121,19 +3130,9 @@ struct ehea_port *ehea_setup_single_port(struct ehea_adapter *adapter, /* initialize net_device structure */ memcpy(dev->dev_addr, &port->mac_addr, ETH_ALEN); - dev->open = ehea_open; -#ifdef CONFIG_NET_POLL_CONTROLLER - dev->poll_controller = ehea_netpoll; -#endif - dev->stop = ehea_stop; - dev->hard_start_xmit = ehea_start_xmit; - dev->get_stats = ehea_get_stats; - dev->set_multicast_list = ehea_set_multicast_list; - dev->set_mac_address = ehea_set_mac_addr; - dev->change_mtu = ehea_change_mtu; - dev->vlan_rx_register = ehea_vlan_rx_register; - dev->vlan_rx_add_vid = ehea_vlan_rx_add_vid; - dev->vlan_rx_kill_vid = ehea_vlan_rx_kill_vid; + dev->netdev_ops = &ehea_netdev_ops; + ehea_set_ethtool_ops(dev); + dev->features = NETIF_F_SG | NETIF_F_FRAGLIST | NETIF_F_TSO | NETIF_F_HIGHDMA | NETIF_F_IP_CSUM | NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER @@ -3142,7 +3141,6 @@ struct ehea_port *ehea_setup_single_port(struct ehea_adapter *adapter, dev->watchdog_timeo = EHEA_WATCH_DOG_TIMEOUT; INIT_WORK(&port->reset_task, ehea_reset_port); - ehea_set_ethtool_ops(dev); ret = register_netdev(dev); if (ret) { diff --git a/drivers/net/ehea/ehea_qmr.c b/drivers/net/ehea/ehea_qmr.c index 49d766ebbcf..3747457f5e6 100644 --- a/drivers/net/ehea/ehea_qmr.c +++ b/drivers/net/ehea/ehea_qmr.c @@ -1005,7 +1005,7 @@ void ehea_error_data(struct ehea_adapter *adapter, u64 res_handle) unsigned long ret; u64 *rblock; - rblock = kzalloc(PAGE_SIZE, GFP_KERNEL); + rblock = (void *)get_zeroed_page(GFP_KERNEL); if (!rblock) { ehea_error("Cannot allocate rblock memory."); return; @@ -1022,5 +1022,5 @@ void ehea_error_data(struct ehea_adapter *adapter, u64 res_handle) else ehea_error("Error data could not be fetched: %llX", res_handle); - kfree(rblock); + free_page((unsigned long)rblock); } diff --git a/drivers/net/enic/enic_main.c b/drivers/net/enic/enic_main.c index 7d60551d538..5dd11563553 100644 --- a/drivers/net/enic/enic_main.c +++ b/drivers/net/enic/enic_main.c @@ -411,8 +411,8 @@ static irqreturn_t enic_isr_legacy(int irq, void *data) } if (ENIC_TEST_INTR(pba, ENIC_INTX_WQ_RQ)) { - if (netif_rx_schedule_prep(&enic->napi)) - __netif_rx_schedule(&enic->napi); + if (napi_schedule_prep(&enic->napi)) + __napi_schedule(&enic->napi); } else { vnic_intr_unmask(&enic->intr[ENIC_INTX_WQ_RQ]); } @@ -440,7 +440,7 @@ static irqreturn_t enic_isr_msi(int irq, void *data) * writes). */ - netif_rx_schedule(&enic->napi); + napi_schedule(&enic->napi); return IRQ_HANDLED; } @@ -450,7 +450,7 @@ static irqreturn_t enic_isr_msix_rq(int irq, void *data) struct enic *enic = data; /* schedule NAPI polling for RQ cleanup */ - netif_rx_schedule(&enic->napi); + napi_schedule(&enic->napi); return IRQ_HANDLED; } @@ -570,11 +570,11 @@ static inline void enic_queue_wq_skb_tso(struct enic *enic, * to each TCP segment resulting from the TSO. */ - if (skb->protocol == __constant_htons(ETH_P_IP)) { + if (skb->protocol == cpu_to_be16(ETH_P_IP)) { ip_hdr(skb)->check = 0; tcp_hdr(skb)->check = ~csum_tcpudp_magic(ip_hdr(skb)->saddr, ip_hdr(skb)->daddr, 0, IPPROTO_TCP, 0); - } else if (skb->protocol == __constant_htons(ETH_P_IPV6)) { + } else if (skb->protocol == cpu_to_be16(ETH_P_IPV6)) { tcp_hdr(skb)->check = ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr, &ipv6_hdr(skb)->daddr, 0, IPPROTO_TCP, 0); } @@ -1068,7 +1068,7 @@ static int enic_poll(struct napi_struct *napi, int budget) if (netdev->features & NETIF_F_LRO) lro_flush_all(&enic->lro_mgr); - netif_rx_complete(napi); + napi_complete(napi); vnic_intr_unmask(&enic->intr[ENIC_MSIX_RQ]); } @@ -1112,7 +1112,7 @@ static int enic_poll_msix(struct napi_struct *napi, int budget) if (netdev->features & NETIF_F_LRO) lro_flush_all(&enic->lro_mgr); - netif_rx_complete(napi); + napi_complete(napi); vnic_intr_unmask(&enic->intr[ENIC_MSIX_RQ]); } diff --git a/drivers/net/epic100.c b/drivers/net/epic100.c index a539bc3163c..b60e27dfcfa 100644 --- a/drivers/net/epic100.c +++ b/drivers/net/epic100.c @@ -1114,9 +1114,9 @@ static irqreturn_t epic_interrupt(int irq, void *dev_instance) if ((status & EpicNapiEvent) && !ep->reschedule_in_poll) { spin_lock(&ep->napi_lock); - if (netif_rx_schedule_prep(&ep->napi)) { + if (napi_schedule_prep(&ep->napi)) { epic_napi_irq_off(dev, ep); - __netif_rx_schedule(&ep->napi); + __napi_schedule(&ep->napi); } else ep->reschedule_in_poll++; spin_unlock(&ep->napi_lock); @@ -1293,7 +1293,7 @@ rx_action: more = ep->reschedule_in_poll; if (!more) { - __netif_rx_complete(napi); + __napi_complete(napi); outl(EpicNapiEvent, ioaddr + INTSTAT); epic_napi_irq_on(dev, ep); } else diff --git a/drivers/net/fec.c b/drivers/net/fec.c index 2769083bfe8..fe2650237e3 100644 --- a/drivers/net/fec.c +++ b/drivers/net/fec.c @@ -36,30 +36,43 @@ #include <linux/spinlock.h> #include <linux/workqueue.h> #include <linux/bitops.h> +#include <linux/io.h> +#include <linux/irq.h> +#include <linux/clk.h> +#include <linux/platform_device.h> -#include <asm/irq.h> -#include <asm/uaccess.h> -#include <asm/io.h> -#include <asm/pgtable.h> #include <asm/cacheflush.h> +#ifndef CONFIG_ARCH_MXC #include <asm/coldfire.h> #include <asm/mcfsim.h> +#endif + #include "fec.h" -#if defined(CONFIG_FEC2) -#define FEC_MAX_PORTS 2 +#ifdef CONFIG_ARCH_MXC +#include <mach/hardware.h> +#define FEC_ALIGNMENT 0xf #else -#define FEC_MAX_PORTS 1 +#define FEC_ALIGNMENT 0x3 #endif +#if defined CONFIG_M5272 || defined CONFIG_M527x || defined CONFIG_M523x \ + || defined CONFIG_M528x || defined CONFIG_M532x || defined CONFIG_M520x +#define FEC_LEGACY +/* + * Define the fixed address of the FEC hardware. + */ #if defined(CONFIG_M5272) #define HAVE_mii_link_interrupt #endif -/* - * Define the fixed address of the FEC hardware. - */ +#if defined(CONFIG_FEC2) +#define FEC_MAX_PORTS 2 +#else +#define FEC_MAX_PORTS 1 +#endif + static unsigned int fec_hw[] = { #if defined(CONFIG_M5272) (MCF_MBAR + 0x840), @@ -72,8 +85,6 @@ static unsigned int fec_hw[] = { (MCF_MBAR+0x30000), #elif defined(CONFIG_M532x) (MCF_MBAR+0xfc030000), -#else - &(((immap_t *)IMAP_ADDR)->im_cpm.cp_fec), #endif }; @@ -99,6 +110,8 @@ static unsigned char fec_mac_default[] = { #define FEC_FLASHMAC 0 #endif +#endif /* FEC_LEGACY */ + /* Forward declarations of some structures to support different PHYs */ @@ -162,7 +175,7 @@ typedef struct { * account when setting it. */ #if defined(CONFIG_M523x) || defined(CONFIG_M527x) || defined(CONFIG_M528x) || \ - defined(CONFIG_M520x) || defined(CONFIG_M532x) + defined(CONFIG_M520x) || defined(CONFIG_M532x) || defined(CONFIG_ARCH_MXC) #define OPT_FRAME_SIZE (PKT_MAXBUF_SIZE << 16) #else #define OPT_FRAME_SIZE 0 @@ -182,6 +195,8 @@ struct fec_enet_private { struct net_device *netdev; + struct clk *clk; + /* The saved address of a sent-in-place packet/buffer, for skfree(). */ unsigned char *tx_bounce[TX_RING_SIZE]; struct sk_buff* tx_skbuff[TX_RING_SIZE]; @@ -190,6 +205,7 @@ struct fec_enet_private { /* CPM dual port RAM relative addresses. */ + dma_addr_t bd_dma; cbd_t *rx_bd_base; /* Address of Rx and Tx buffers. */ cbd_t *tx_bd_base; cbd_t *cur_rx, *cur_tx; /* The next free ring entry */ @@ -342,10 +358,10 @@ fec_enet_start_xmit(struct sk_buff *skb, struct net_device *dev) * 4-byte boundaries. Use bounce buffers to copy data * and get it aligned. Ugh. */ - if (bdp->cbd_bufaddr & 0x3) { + if (bdp->cbd_bufaddr & FEC_ALIGNMENT) { unsigned int index; index = bdp - fep->tx_bd_base; - memcpy(fep->tx_bounce[index], (void *) bdp->cbd_bufaddr, bdp->cbd_datlen); + memcpy(fep->tx_bounce[index], (void *)skb->data, skb->len); bdp->cbd_bufaddr = __pa(fep->tx_bounce[index]); } @@ -359,8 +375,8 @@ fec_enet_start_xmit(struct sk_buff *skb, struct net_device *dev) /* Push the data cache so the CPM does not get stale memory * data. */ - flush_dcache_range((unsigned long)skb->data, - (unsigned long)skb->data + skb->len); + dma_sync_single(NULL, bdp->cbd_bufaddr, + bdp->cbd_datlen, DMA_TO_DEVICE); /* Send it on its way. Tell FEC it's ready, interrupt when done, * it's the last BD of the frame, and to put the CRC on the end. @@ -633,6 +649,9 @@ while (!((status = bdp->cbd_sc) & BD_ENET_RX_EMPTY)) { dev->stats.rx_bytes += pkt_len; data = (__u8*)__va(bdp->cbd_bufaddr); + dma_sync_single(NULL, (unsigned long)__pa(data), + pkt_len - 4, DMA_FROM_DEVICE); + /* This does 16 byte alignment, exactly what we need. * The packet length includes FCS, but we don't want to * include that when passing upstream as it messes up @@ -1114,7 +1133,7 @@ static phy_info_t const phy_info_am79c874 = { /* register definitions for the 8721 */ #define MII_KS8721BL_RXERCR 21 -#define MII_KS8721BL_ICSR 22 +#define MII_KS8721BL_ICSR 27 #define MII_KS8721BL_PHYCR 31 static phy_cmd_t const phy_cmd_ks8721bl_config[] = { @@ -1308,10 +1327,6 @@ static void __inline__ fec_get_mac(struct net_device *dev) dev->dev_addr[ETH_ALEN-1] = fec_mac_default[ETH_ALEN-1] + fep->index; } -static void __inline__ fec_enable_phy_intr(void) -{ -} - static void __inline__ fec_disable_phy_intr(void) { volatile unsigned long *icrp; @@ -1327,17 +1342,6 @@ static void __inline__ fec_phy_ack_intr(void) *icrp = 0x0d000000; } -static void __inline__ fec_localhw_setup(void) -{ -} - -/* - * Do not need to make region uncached on 5272. - */ -static void __inline__ fec_uncache(unsigned long addr) -{ -} - /* ------------------------------------------------------------------------- */ #elif defined(CONFIG_M523x) || defined(CONFIG_M527x) || defined(CONFIG_M528x) @@ -1477,10 +1481,6 @@ static void __inline__ fec_get_mac(struct net_device *dev) dev->dev_addr[ETH_ALEN-1] = fec_mac_default[ETH_ALEN-1] + fep->index; } -static void __inline__ fec_enable_phy_intr(void) -{ -} - static void __inline__ fec_disable_phy_intr(void) { } @@ -1489,17 +1489,6 @@ static void __inline__ fec_phy_ack_intr(void) { } -static void __inline__ fec_localhw_setup(void) -{ -} - -/* - * Do not need to make region uncached on 5272. - */ -static void __inline__ fec_uncache(unsigned long addr) -{ -} - /* ------------------------------------------------------------------------- */ #elif defined(CONFIG_M520x) @@ -1598,10 +1587,6 @@ static void __inline__ fec_get_mac(struct net_device *dev) dev->dev_addr[ETH_ALEN-1] = fec_mac_default[ETH_ALEN-1] + fep->index; } -static void __inline__ fec_enable_phy_intr(void) -{ -} - static void __inline__ fec_disable_phy_intr(void) { } @@ -1610,14 +1595,6 @@ static void __inline__ fec_phy_ack_intr(void) { } -static void __inline__ fec_localhw_setup(void) -{ -} - -static void __inline__ fec_uncache(unsigned long addr) -{ -} - /* ------------------------------------------------------------------------- */ #elif defined(CONFIG_M532x) @@ -1737,10 +1714,6 @@ static void __inline__ fec_get_mac(struct net_device *dev) dev->dev_addr[ETH_ALEN-1] = fec_mac_default[ETH_ALEN-1] + fep->index; } -static void __inline__ fec_enable_phy_intr(void) -{ -} - static void __inline__ fec_disable_phy_intr(void) { } @@ -1749,107 +1722,6 @@ static void __inline__ fec_phy_ack_intr(void) { } -static void __inline__ fec_localhw_setup(void) -{ -} - -/* - * Do not need to make region uncached on 532x. - */ -static void __inline__ fec_uncache(unsigned long addr) -{ -} - -/* ------------------------------------------------------------------------- */ - - -#else - -/* - * Code specific to the MPC860T setup. - */ -static void __inline__ fec_request_intrs(struct net_device *dev) -{ - volatile immap_t *immap; - - immap = (immap_t *)IMAP_ADDR; /* pointer to internal registers */ - - if (request_8xxirq(FEC_INTERRUPT, fec_enet_interrupt, 0, "fec", dev) != 0) - panic("Could not allocate FEC IRQ!"); -} - -static void __inline__ fec_get_mac(struct net_device *dev) -{ - bd_t *bd; - - bd = (bd_t *)__res; - memcpy(dev->dev_addr, bd->bi_enetaddr, ETH_ALEN); -} - -static void __inline__ fec_set_mii(struct net_device *dev, struct fec_enet_private *fep) -{ - extern uint _get_IMMR(void); - volatile immap_t *immap; - volatile fec_t *fecp; - - fecp = fep->hwp; - immap = (immap_t *)IMAP_ADDR; /* pointer to internal registers */ - - /* Configure all of port D for MII. - */ - immap->im_ioport.iop_pdpar = 0x1fff; - - /* Bits moved from Rev. D onward. - */ - if ((_get_IMMR() & 0xffff) < 0x0501) - immap->im_ioport.iop_pddir = 0x1c58; /* Pre rev. D */ - else - immap->im_ioport.iop_pddir = 0x1fff; /* Rev. D and later */ - - /* Set MII speed to 2.5 MHz - */ - fecp->fec_mii_speed = fep->phy_speed = - ((bd->bi_busfreq * 1000000) / 2500000) & 0x7e; -} - -static void __inline__ fec_enable_phy_intr(void) -{ - volatile fec_t *fecp; - - fecp = fep->hwp; - - /* Enable MII command finished interrupt - */ - fecp->fec_ivec = (FEC_INTERRUPT/2) << 29; -} - -static void __inline__ fec_disable_phy_intr(void) -{ -} - -static void __inline__ fec_phy_ack_intr(void) -{ -} - -static void __inline__ fec_localhw_setup(void) -{ - volatile fec_t *fecp; - - fecp = fep->hwp; - fecp->fec_r_hash = PKT_MAXBUF_SIZE; - /* Enable big endian and don't care about SDMA FC. - */ - fecp->fec_fun_code = 0x78000000; -} - -static void __inline__ fec_uncache(unsigned long addr) -{ - pte_t *pte; - pte = va_to_pte(mem_addr); - pte_val(*pte) |= _PAGE_NO_CACHE; - flush_tlb_page(init_mm.mmap, mem_addr); -} - #endif /* ------------------------------------------------------------------------- */ @@ -2055,7 +1927,9 @@ mii_discover_phy(uint mii_reg, struct net_device *dev) printk("FEC: No PHY device found.\n"); /* Disable external MII interface */ fecp->fec_mii_speed = fep->phy_speed = 0; +#ifdef FREC_LEGACY fec_disable_phy_intr(); +#endif } } @@ -2237,12 +2111,12 @@ fec_set_mac_address(struct net_device *dev) } -/* Initialize the FEC Ethernet on 860T (or ColdFire 5272). - */ /* * XXX: We need to clean up on failure exits here. + * + * index is only used in legacy code */ -int __init fec_enet_init(struct net_device *dev) +int __init fec_enet_init(struct net_device *dev, int index) { struct fec_enet_private *fep = netdev_priv(dev); unsigned long mem_addr; @@ -2250,15 +2124,11 @@ int __init fec_enet_init(struct net_device *dev) cbd_t *cbd_base; volatile fec_t *fecp; int i, j; - static int index = 0; - - /* Only allow us to be probed once. */ - if (index >= FEC_MAX_PORTS) - return -ENXIO; /* Allocate memory for buffer descriptors. */ - mem_addr = __get_free_page(GFP_KERNEL); + mem_addr = (unsigned long)dma_alloc_coherent(NULL, PAGE_SIZE, + &fep->bd_dma, GFP_KERNEL); if (mem_addr == 0) { printk("FEC: allocate descriptor memory failed?\n"); return -ENOMEM; @@ -2269,7 +2139,7 @@ int __init fec_enet_init(struct net_device *dev) /* Create an Ethernet device instance. */ - fecp = (volatile fec_t *) fec_hw[index]; + fecp = (volatile fec_t *)dev->base_addr; fep->index = index; fep->hwp = fecp; @@ -2280,18 +2150,24 @@ int __init fec_enet_init(struct net_device *dev) fecp->fec_ecntrl = 1; udelay(10); - /* Set the Ethernet address. If using multiple Enets on the 8xx, - * this needs some work to get unique addresses. - * - * This is our default MAC address unless the user changes - * it via eth_mac_addr (our dev->set_mac_addr handler). - */ + /* Set the Ethernet address */ +#ifdef FEC_LEGACY fec_get_mac(dev); +#else + { + unsigned long l; + l = fecp->fec_addr_low; + dev->dev_addr[0] = (unsigned char)((l & 0xFF000000) >> 24); + dev->dev_addr[1] = (unsigned char)((l & 0x00FF0000) >> 16); + dev->dev_addr[2] = (unsigned char)((l & 0x0000FF00) >> 8); + dev->dev_addr[3] = (unsigned char)((l & 0x000000FF) >> 0); + l = fecp->fec_addr_high; + dev->dev_addr[4] = (unsigned char)((l & 0xFF000000) >> 24); + dev->dev_addr[5] = (unsigned char)((l & 0x00FF0000) >> 16); + } +#endif cbd_base = (cbd_t *)mem_addr; - /* XXX: missing check for allocation failure */ - - fec_uncache(mem_addr); /* Set receive and transmit descriptor base. */ @@ -2313,8 +2189,6 @@ int __init fec_enet_init(struct net_device *dev) mem_addr = __get_free_page(GFP_KERNEL); /* XXX: missing check for allocation failure */ - fec_uncache(mem_addr); - /* Initialize the BD for every fragment in the page. */ for (j=0; j<FEC_ENET_RX_FRPPG; j++) { @@ -2357,13 +2231,16 @@ int __init fec_enet_init(struct net_device *dev) /* Set receive and transmit descriptor base. */ - fecp->fec_r_des_start = __pa((uint)(fep->rx_bd_base)); - fecp->fec_x_des_start = __pa((uint)(fep->tx_bd_base)); + fecp->fec_r_des_start = fep->bd_dma; + fecp->fec_x_des_start = (unsigned long)fep->bd_dma + sizeof(cbd_t) + * RX_RING_SIZE; +#ifdef FEC_LEGACY /* Install our interrupt handlers. This varies depending on * the architecture. */ fec_request_intrs(dev); +#endif fecp->fec_grp_hash_table_high = 0; fecp->fec_grp_hash_table_low = 0; @@ -2375,8 +2252,6 @@ int __init fec_enet_init(struct net_device *dev) fecp->fec_hash_table_low = 0; #endif - dev->base_addr = (unsigned long)fecp; - /* The FEC Ethernet specific entries in the device structure. */ dev->open = fec_enet_open; dev->hard_start_xmit = fec_enet_start_xmit; @@ -2390,7 +2265,20 @@ int __init fec_enet_init(struct net_device *dev) mii_free = mii_cmds; /* setup MII interface */ +#ifdef FEC_LEGACY fec_set_mii(dev, fep); +#else + fecp->fec_r_cntrl = OPT_FRAME_SIZE | 0x04; + fecp->fec_x_cntrl = 0x00; + + /* + * Set MII speed to 2.5 MHz + */ + fep->phy_speed = ((((clk_get_rate(fep->clk) / 2 + 4999999) + / 2500000) / 2) & 0x3F) << 1; + fecp->fec_mii_speed = fep->phy_speed; + fec_restart(dev, 0); +#endif /* Clear and enable interrupts */ fecp->fec_ievent = 0xffc00000; @@ -2403,7 +2291,6 @@ int __init fec_enet_init(struct net_device *dev) fep->phy_addr = 0; mii_queue(dev, mk_mii_read(MII_REG_PHYIR1), mii_discover_phy); - index++; return 0; } @@ -2430,7 +2317,6 @@ fec_restart(struct net_device *dev, int duplex) /* Clear any outstanding interrupt. */ fecp->fec_ievent = 0xffc00000; - fec_enable_phy_intr(); /* Set station address. */ @@ -2445,12 +2331,11 @@ fec_restart(struct net_device *dev, int duplex) */ fecp->fec_r_buff_size = PKT_MAXBLR_SIZE; - fec_localhw_setup(); - /* Set receive and transmit descriptor base. */ - fecp->fec_r_des_start = __pa((uint)(fep->rx_bd_base)); - fecp->fec_x_des_start = __pa((uint)(fep->tx_bd_base)); + fecp->fec_r_des_start = fep->bd_dma; + fecp->fec_x_des_start = (unsigned long)fep->bd_dma + sizeof(cbd_t) + * RX_RING_SIZE; fep->dirty_tx = fep->cur_tx = fep->tx_bd_base; fep->cur_rx = fep->rx_bd_base; @@ -2552,12 +2437,12 @@ fec_stop(struct net_device *dev) /* Clear outstanding MII command interrupts. */ fecp->fec_ievent = FEC_ENET_MII; - fec_enable_phy_intr(); fecp->fec_imask = FEC_ENET_MII; fecp->fec_mii_speed = fep->phy_speed; } +#ifdef FEC_LEGACY static int __init fec_enet_module_init(void) { struct net_device *dev; @@ -2569,7 +2454,8 @@ static int __init fec_enet_module_init(void) dev = alloc_etherdev(sizeof(struct fec_enet_private)); if (!dev) return -ENOMEM; - err = fec_enet_init(dev); + dev->base_addr = (unsigned long)fec_hw[i]; + err = fec_enet_init(dev, i); if (err) { free_netdev(dev); continue; @@ -2584,6 +2470,170 @@ static int __init fec_enet_module_init(void) } return 0; } +#else + +static int __devinit +fec_probe(struct platform_device *pdev) +{ + struct fec_enet_private *fep; + struct net_device *ndev; + int i, irq, ret = 0; + struct resource *r; + + r = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!r) + return -ENXIO; + + r = request_mem_region(r->start, resource_size(r), pdev->name); + if (!r) + return -EBUSY; + + /* Init network device */ + ndev = alloc_etherdev(sizeof(struct fec_enet_private)); + if (!ndev) + return -ENOMEM; + + SET_NETDEV_DEV(ndev, &pdev->dev); + + /* setup board info structure */ + fep = netdev_priv(ndev); + memset(fep, 0, sizeof(*fep)); + + ndev->base_addr = (unsigned long)ioremap(r->start, resource_size(r)); + + if (!ndev->base_addr) { + ret = -ENOMEM; + goto failed_ioremap; + } + + platform_set_drvdata(pdev, ndev); + + /* This device has up to three irqs on some platforms */ + for (i = 0; i < 3; i++) { + irq = platform_get_irq(pdev, i); + if (i && irq < 0) + break; + ret = request_irq(irq, fec_enet_interrupt, IRQF_DISABLED, pdev->name, ndev); + if (ret) { + while (i >= 0) { + irq = platform_get_irq(pdev, i); + free_irq(irq, ndev); + i--; + } + goto failed_irq; + } + } + + fep->clk = clk_get(&pdev->dev, "fec_clk"); + if (IS_ERR(fep->clk)) { + ret = PTR_ERR(fep->clk); + goto failed_clk; + } + clk_enable(fep->clk); + + ret = fec_enet_init(ndev, 0); + if (ret) + goto failed_init; + + ret = register_netdev(ndev); + if (ret) + goto failed_register; + + return 0; + +failed_register: +failed_init: + clk_disable(fep->clk); + clk_put(fep->clk); +failed_clk: + for (i = 0; i < 3; i++) { + irq = platform_get_irq(pdev, i); + if (irq > 0) + free_irq(irq, ndev); + } +failed_irq: + iounmap((void __iomem *)ndev->base_addr); +failed_ioremap: + free_netdev(ndev); + + return ret; +} + +static int __devexit +fec_drv_remove(struct platform_device *pdev) +{ + struct net_device *ndev = platform_get_drvdata(pdev); + struct fec_enet_private *fep = netdev_priv(ndev); + + platform_set_drvdata(pdev, NULL); + + fec_stop(ndev); + clk_disable(fep->clk); + clk_put(fep->clk); + iounmap((void __iomem *)ndev->base_addr); + unregister_netdev(ndev); + free_netdev(ndev); + return 0; +} + +static int +fec_suspend(struct platform_device *dev, pm_message_t state) +{ + struct net_device *ndev = platform_get_drvdata(dev); + struct fec_enet_private *fep; + + if (ndev) { + fep = netdev_priv(ndev); + if (netif_running(ndev)) { + netif_device_detach(ndev); + fec_stop(ndev); + } + } + return 0; +} + +static int +fec_resume(struct platform_device *dev) +{ + struct net_device *ndev = platform_get_drvdata(dev); + + if (ndev) { + if (netif_running(ndev)) { + fec_enet_init(ndev, 0); + netif_device_attach(ndev); + } + } + return 0; +} + +static struct platform_driver fec_driver = { + .driver = { + .name = "fec", + .owner = THIS_MODULE, + }, + .probe = fec_probe, + .remove = __devexit_p(fec_drv_remove), + .suspend = fec_suspend, + .resume = fec_resume, +}; + +static int __init +fec_enet_module_init(void) +{ + printk(KERN_INFO "FEC Ethernet Driver\n"); + + return platform_driver_register(&fec_driver); +} + +static void __exit +fec_enet_cleanup(void) +{ + platform_driver_unregister(&fec_driver); +} + +module_exit(fec_enet_cleanup); + +#endif /* FEC_LEGACY */ module_init(fec_enet_module_init); diff --git a/drivers/net/fec.h b/drivers/net/fec.h index 292719dacef..76c64c92e19 100644 --- a/drivers/net/fec.h +++ b/drivers/net/fec.h @@ -14,7 +14,7 @@ /****************************************************************************/ #if defined(CONFIG_M523x) || defined(CONFIG_M527x) || defined(CONFIG_M528x) || \ - defined(CONFIG_M520x) || defined(CONFIG_M532x) + defined(CONFIG_M520x) || defined(CONFIG_M532x) || defined(CONFIG_ARCH_MXC) /* * Just figures, Motorola would have to change the offsets for * registers in the same peripheral device on different models @@ -103,12 +103,19 @@ typedef struct fec { /* * Define the buffer descriptor structure. */ +#ifdef CONFIG_ARCH_MXC +typedef struct bufdesc { + unsigned short cbd_datlen; /* Data length */ + unsigned short cbd_sc; /* Control and status info */ + unsigned long cbd_bufaddr; /* Buffer address */ +} cbd_t; +#else typedef struct bufdesc { unsigned short cbd_sc; /* Control and status info */ unsigned short cbd_datlen; /* Data length */ unsigned long cbd_bufaddr; /* Buffer address */ } cbd_t; - +#endif /* * The following definitions courtesy of commproc.h, which where diff --git a/drivers/net/forcedeth.c b/drivers/net/forcedeth.c index 5b910cf6374..875509d7d86 100644 --- a/drivers/net/forcedeth.c +++ b/drivers/net/forcedeth.c @@ -1760,7 +1760,7 @@ static void nv_do_rx_refill(unsigned long data) struct fe_priv *np = netdev_priv(dev); /* Just reschedule NAPI rx processing */ - netif_rx_schedule(&np->napi); + napi_schedule(&np->napi); } #else static void nv_do_rx_refill(unsigned long data) @@ -3406,7 +3406,7 @@ static irqreturn_t nv_nic_irq(int foo, void *data) #ifdef CONFIG_FORCEDETH_NAPI if (events & NVREG_IRQ_RX_ALL) { spin_lock(&np->lock); - netif_rx_schedule(&np->napi); + napi_schedule(&np->napi); /* Disable furthur receive irq's */ np->irqmask &= ~NVREG_IRQ_RX_ALL; @@ -3523,7 +3523,7 @@ static irqreturn_t nv_nic_irq_optimized(int foo, void *data) #ifdef CONFIG_FORCEDETH_NAPI if (events & NVREG_IRQ_RX_ALL) { spin_lock(&np->lock); - netif_rx_schedule(&np->napi); + napi_schedule(&np->napi); /* Disable furthur receive irq's */ np->irqmask &= ~NVREG_IRQ_RX_ALL; @@ -3680,7 +3680,7 @@ static int nv_napi_poll(struct napi_struct *napi, int budget) /* re-enable receive interrupts */ spin_lock_irqsave(&np->lock, flags); - __netif_rx_complete(napi); + __napi_complete(napi); np->irqmask |= NVREG_IRQ_RX_ALL; if (np->msi_flags & NV_MSI_X_ENABLED) @@ -3706,7 +3706,7 @@ static irqreturn_t nv_nic_irq_rx(int foo, void *data) writel(NVREG_IRQ_RX_ALL, base + NvRegMSIXIrqStatus); if (events) { - netif_rx_schedule(&np->napi); + napi_schedule(&np->napi); /* disable receive interrupts on the nic */ writel(NVREG_IRQ_RX_ALL, base + NvRegIrqMask); pci_push(base); diff --git a/drivers/net/fs_enet/fs_enet-main.c b/drivers/net/fs_enet/fs_enet-main.c index ce900e54d8d..b037ce9857b 100644 --- a/drivers/net/fs_enet/fs_enet-main.c +++ b/drivers/net/fs_enet/fs_enet-main.c @@ -209,7 +209,7 @@ static int fs_enet_rx_napi(struct napi_struct *napi, int budget) if (received < budget) { /* done */ - netif_rx_complete(napi); + napi_complete(napi); (*fep->ops->napi_enable_rx)(dev); } return received; @@ -478,7 +478,7 @@ fs_enet_interrupt(int irq, void *dev_id) /* NOTE: it is possible for FCCs in NAPI mode */ /* to submit a spurious interrupt while in poll */ if (napi_ok) - __netif_rx_schedule(&fep->napi); + __napi_schedule(&fep->napi); } } diff --git a/drivers/net/gianfar.c b/drivers/net/gianfar.c index 3f7eab42aef..eb8302c5ba8 100644 --- a/drivers/net/gianfar.c +++ b/drivers/net/gianfar.c @@ -141,8 +141,6 @@ void gfar_start(struct net_device *dev); static void gfar_clear_exact_match(struct net_device *dev); static void gfar_set_mac_for_addr(struct net_device *dev, int num, u8 *addr); -extern const struct ethtool_ops gfar_ethtool_ops; - MODULE_AUTHOR("Freescale Semiconductor, Inc"); MODULE_DESCRIPTION("Gianfar Ethernet Driver"); MODULE_LICENSE("GPL"); @@ -463,6 +461,9 @@ static int gfar_probe(struct of_device *ofdev, goto register_fail; } + device_init_wakeup(&dev->dev, + priv->device_flags & FSL_GIANFAR_DEV_HAS_MAGIC_PACKET); + /* fill out IRQ number and name fields */ len_devname = strlen(dev->name); strncpy(&priv->int_name_tx[0], dev->name, len_devname); @@ -1200,6 +1201,8 @@ static int gfar_enet_open(struct net_device *dev) netif_start_queue(dev); + device_set_wakeup_enable(&dev->dev, priv->wol_en); + return err; } @@ -1623,9 +1626,9 @@ static void gfar_schedule_cleanup(struct net_device *dev) spin_lock_irqsave(&priv->txlock, flags); spin_lock(&priv->rxlock); - if (netif_rx_schedule_prep(&priv->napi)) { + if (napi_schedule_prep(&priv->napi)) { gfar_write(&priv->regs->imask, IMASK_RTX_DISABLED); - __netif_rx_schedule(&priv->napi); + __napi_schedule(&priv->napi); } spin_unlock(&priv->rxlock); @@ -1882,7 +1885,7 @@ static int gfar_poll(struct napi_struct *napi, int budget) return budget; if (rx_cleaned < budget) { - netif_rx_complete(napi); + napi_complete(napi); /* Clear the halt bit in RSTAT */ gfar_write(&priv->regs->rstat, RSTAT_CLEAR_RHALT); diff --git a/drivers/net/gianfar.h b/drivers/net/gianfar.h index b1a83344acc..7820720ceee 100644 --- a/drivers/net/gianfar.h +++ b/drivers/net/gianfar.h @@ -830,4 +830,6 @@ int gfar_local_mdio_write(struct gfar_mii __iomem *regs, int mii_id, int regnum, u16 value); int gfar_local_mdio_read(struct gfar_mii __iomem *regs, int mii_id, int regnum); +extern const struct ethtool_ops gfar_ethtool_ops; + #endif /* __GIANFAR_H */ diff --git a/drivers/net/gianfar_ethtool.c b/drivers/net/gianfar_ethtool.c index 59b3b5d98ef..dbf06e9313c 100644 --- a/drivers/net/gianfar_ethtool.c +++ b/drivers/net/gianfar_ethtool.c @@ -600,6 +600,7 @@ static int gfar_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol) spin_lock_irqsave(&priv->bflock, flags); priv->wol_en = wol->wolopts & WAKE_MAGIC ? 1 : 0; + device_set_wakeup_enable(&dev->dev, priv->wol_en); spin_unlock_irqrestore(&priv->bflock, flags); return 0; diff --git a/drivers/net/gianfar_mii.c b/drivers/net/gianfar_mii.c index f49a426ad68..64e4679b327 100644 --- a/drivers/net/gianfar_mii.c +++ b/drivers/net/gianfar_mii.c @@ -105,7 +105,7 @@ int gfar_local_mdio_read(struct gfar_mii __iomem *regs, int mii_id, int regnum) * All PHY configuration is done through the TSEC1 MIIM regs */ int gfar_mdio_write(struct mii_bus *bus, int mii_id, int regnum, u16 value) { - struct gfar_mii __iomem *regs = (void __iomem *)bus->priv; + struct gfar_mii __iomem *regs = (void __force __iomem *)bus->priv; /* Write to the local MII regs */ return(gfar_local_mdio_write(regs, mii_id, regnum, value)); @@ -116,7 +116,7 @@ int gfar_mdio_write(struct mii_bus *bus, int mii_id, int regnum, u16 value) * configuration has to be done through the TSEC1 MIIM regs */ int gfar_mdio_read(struct mii_bus *bus, int mii_id, int regnum) { - struct gfar_mii __iomem *regs = (void __iomem *)bus->priv; + struct gfar_mii __iomem *regs = (void __force __iomem *)bus->priv; /* Read the local MII regs */ return(gfar_local_mdio_read(regs, mii_id, regnum)); @@ -125,7 +125,7 @@ int gfar_mdio_read(struct mii_bus *bus, int mii_id, int regnum) /* Reset the MIIM registers, and wait for the bus to free */ static int gfar_mdio_reset(struct mii_bus *bus) { - struct gfar_mii __iomem *regs = (void __iomem *)bus->priv; + struct gfar_mii __iomem *regs = (void __force __iomem *)bus->priv; unsigned int timeout = PHY_INIT_TIMEOUT; mutex_lock(&bus->mdio_lock); @@ -268,8 +268,8 @@ static int gfar_mdio_probe(struct of_device *ofdev, * Also, we have to cast back to struct gfar_mii because of * definition weirdness done in gianfar.h. */ - enet_regs = (struct gfar __iomem *) - ((char *)regs - offsetof(struct gfar, gfar_mii_regs)); + enet_regs = (struct gfar __force __iomem *) + ((char __force *)regs - offsetof(struct gfar, gfar_mii_regs)); for_each_child_of_node(np, tbi) { if (!strncmp(tbi->type, "tbi-phy", 8)) @@ -337,7 +337,7 @@ static int gfar_mdio_remove(struct of_device *ofdev) dev_set_drvdata(&ofdev->dev, NULL); - iounmap((void __iomem *)bus->priv); + iounmap((void __force __iomem *)bus->priv); bus->priv = NULL; kfree(bus->irq); mdiobus_free(bus); diff --git a/drivers/net/gianfar_sysfs.c b/drivers/net/gianfar_sysfs.c index 782c2017008..74e0b4d4258 100644 --- a/drivers/net/gianfar_sysfs.c +++ b/drivers/net/gianfar_sysfs.c @@ -81,7 +81,7 @@ static ssize_t gfar_set_bd_stash(struct device *dev, return count; } -DEVICE_ATTR(bd_stash, 0644, gfar_show_bd_stash, gfar_set_bd_stash); +static DEVICE_ATTR(bd_stash, 0644, gfar_show_bd_stash, gfar_set_bd_stash); static ssize_t gfar_show_rx_stash_size(struct device *dev, struct device_attribute *attr, char *buf) @@ -130,8 +130,8 @@ out: return count; } -DEVICE_ATTR(rx_stash_size, 0644, gfar_show_rx_stash_size, - gfar_set_rx_stash_size); +static DEVICE_ATTR(rx_stash_size, 0644, gfar_show_rx_stash_size, + gfar_set_rx_stash_size); /* Stashing will only be enabled when rx_stash_size != 0 */ static ssize_t gfar_show_rx_stash_index(struct device *dev, @@ -172,8 +172,8 @@ out: return count; } -DEVICE_ATTR(rx_stash_index, 0644, gfar_show_rx_stash_index, - gfar_set_rx_stash_index); +static DEVICE_ATTR(rx_stash_index, 0644, gfar_show_rx_stash_index, + gfar_set_rx_stash_index); static ssize_t gfar_show_fifo_threshold(struct device *dev, struct device_attribute *attr, @@ -210,8 +210,8 @@ static ssize_t gfar_set_fifo_threshold(struct device *dev, return count; } -DEVICE_ATTR(fifo_threshold, 0644, gfar_show_fifo_threshold, - gfar_set_fifo_threshold); +static DEVICE_ATTR(fifo_threshold, 0644, gfar_show_fifo_threshold, + gfar_set_fifo_threshold); static ssize_t gfar_show_fifo_starve(struct device *dev, struct device_attribute *attr, char *buf) @@ -247,7 +247,8 @@ static ssize_t gfar_set_fifo_starve(struct device *dev, return count; } -DEVICE_ATTR(fifo_starve, 0644, gfar_show_fifo_starve, gfar_set_fifo_starve); +static DEVICE_ATTR(fifo_starve, 0644, gfar_show_fifo_starve, + gfar_set_fifo_starve); static ssize_t gfar_show_fifo_starve_off(struct device *dev, struct device_attribute *attr, @@ -284,8 +285,8 @@ static ssize_t gfar_set_fifo_starve_off(struct device *dev, return count; } -DEVICE_ATTR(fifo_starve_off, 0644, gfar_show_fifo_starve_off, - gfar_set_fifo_starve_off); +static DEVICE_ATTR(fifo_starve_off, 0644, gfar_show_fifo_starve_off, + gfar_set_fifo_starve_off); void gfar_init_sysfs(struct net_device *dev) { diff --git a/drivers/net/hamachi.c b/drivers/net/hamachi.c index 7e8b3c59a7d..455641f8677 100644 --- a/drivers/net/hamachi.c +++ b/drivers/net/hamachi.c @@ -1244,7 +1244,7 @@ do { \ csum_add(sum, (ih)->saddr & 0xffff); \ csum_add(sum, (ih)->daddr >> 16); \ csum_add(sum, (ih)->daddr & 0xffff); \ - csum_add(sum, __constant_htons(IPPROTO_UDP)); \ + csum_add(sum, cpu_to_be16(IPPROTO_UDP)); \ csum_add(sum, (uh)->len); \ } while (0) @@ -1255,7 +1255,7 @@ do { \ csum_add(sum, (ih)->saddr & 0xffff); \ csum_add(sum, (ih)->daddr >> 16); \ csum_add(sum, (ih)->daddr & 0xffff); \ - csum_add(sum, __constant_htons(IPPROTO_TCP)); \ + csum_add(sum, cpu_to_be16(IPPROTO_TCP)); \ csum_add(sum, htons(len)); \ } while (0) #endif @@ -1296,7 +1296,7 @@ static int hamachi_start_xmit(struct sk_buff *skb, struct net_device *dev) /* tack on checksum tag */ u32 tagval = 0; struct ethhdr *eh = (struct ethhdr *)skb->data; - if (eh->h_proto == __constant_htons(ETH_P_IP)) { + if (eh->h_proto == cpu_to_be16(ETH_P_IP)) { struct iphdr *ih = (struct iphdr *)((char *)eh + ETH_HLEN); if (ih->protocol == IPPROTO_UDP) { struct udphdr *uh @@ -1605,7 +1605,7 @@ static int hamachi_rx(struct net_device *dev) */ if (ntohs(ih->tot_len) >= 46){ /* don't worry about frags */ - if (!(ih->frag_off & __constant_htons(IP_MF|IP_OFFSET))) { + if (!(ih->frag_off & cpu_to_be16(IP_MF|IP_OFFSET))) { u32 inv = *(u32 *) &buf_addr[data_size - 16]; u32 *p = (u32 *) &buf_addr[data_size - 20]; register u32 crc, p_r, p_r1; diff --git a/drivers/net/hamradio/6pack.c b/drivers/net/hamradio/6pack.c index 2d4089894ec..3da9f394b4c 100644 --- a/drivers/net/hamradio/6pack.c +++ b/drivers/net/hamradio/6pack.c @@ -322,23 +322,25 @@ static const struct header_ops sp_header_ops = { .rebuild = sp_rebuild_header, }; +static const struct net_device_ops sp_netdev_ops = { + .ndo_open = sp_open_dev, + .ndo_stop = sp_close, + .ndo_start_xmit = sp_xmit, + .ndo_set_mac_address = sp_set_mac_address, +}; + static void sp_setup(struct net_device *dev) { /* Finish setting up the DEVICE info. */ - dev->mtu = SIXP_MTU; - dev->hard_start_xmit = sp_xmit; - dev->open = sp_open_dev; + dev->netdev_ops = &sp_netdev_ops; dev->destructor = free_netdev; - dev->stop = sp_close; - - dev->set_mac_address = sp_set_mac_address; + dev->mtu = SIXP_MTU; dev->hard_header_len = AX25_MAX_HEADER_LEN; dev->header_ops = &sp_header_ops; dev->addr_len = AX25_ADDR_LEN; dev->type = ARPHRD_AX25; dev->tx_queue_len = 10; - dev->tx_timeout = NULL; /* Only activated in AX.25 mode */ memcpy(dev->broadcast, &ax25_bcast, AX25_ADDR_LEN); diff --git a/drivers/net/hamradio/baycom_epp.c b/drivers/net/hamradio/baycom_epp.c index 81a65e3a1c0..bb78c11559c 100644 --- a/drivers/net/hamradio/baycom_epp.c +++ b/drivers/net/hamradio/baycom_epp.c @@ -203,7 +203,6 @@ struct baycom_state { unsigned char buf[TXBUFFER_SIZE]; } hdlctx; - struct net_device_stats stats; unsigned int ptt_keyed; struct sk_buff *skb; /* next transmit packet */ @@ -423,7 +422,7 @@ static void encode_hdlc(struct baycom_state *bc) bc->hdlctx.bufptr = bc->hdlctx.buf; bc->hdlctx.bufcnt = wp - bc->hdlctx.buf; dev_kfree_skb(skb); - bc->stats.tx_packets++; + bc->dev->stats.tx_packets++; } /* ---------------------------------------------------------------------- */ @@ -547,7 +546,7 @@ static void do_rxpacket(struct net_device *dev) pktlen = bc->hdlcrx.bufcnt-2+1; /* KISS kludge */ if (!(skb = dev_alloc_skb(pktlen))) { printk("%s: memory squeeze, dropping packet\n", dev->name); - bc->stats.rx_dropped++; + dev->stats.rx_dropped++; return; } cp = skb_put(skb, pktlen); @@ -555,7 +554,7 @@ static void do_rxpacket(struct net_device *dev) memcpy(cp, bc->hdlcrx.buf, pktlen - 1); skb->protocol = ax25_type_trans(skb, dev); netif_rx(skb); - bc->stats.rx_packets++; + dev->stats.rx_packets++; } static int receive(struct net_device *dev, int cnt) @@ -802,19 +801,6 @@ static int baycom_set_mac_address(struct net_device *dev, void *addr) /* --------------------------------------------------------------------- */ -static struct net_device_stats *baycom_get_stats(struct net_device *dev) -{ - struct baycom_state *bc = netdev_priv(dev); - - /* - * Get the current statistics. This may be called with the - * card open or closed. - */ - return &bc->stats; -} - -/* --------------------------------------------------------------------- */ - static void epp_wakeup(void *handle) { struct net_device *dev = (struct net_device *)handle; @@ -1065,10 +1051,10 @@ static int baycom_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) hi.data.cs.ptt = !!(bc->stat & EPP_PTTBIT); hi.data.cs.dcd = !(bc->stat & EPP_DCDBIT); hi.data.cs.ptt_keyed = bc->ptt_keyed; - hi.data.cs.tx_packets = bc->stats.tx_packets; - hi.data.cs.tx_errors = bc->stats.tx_errors; - hi.data.cs.rx_packets = bc->stats.rx_packets; - hi.data.cs.rx_errors = bc->stats.rx_errors; + hi.data.cs.tx_packets = dev->stats.tx_packets; + hi.data.cs.tx_errors = dev->stats.tx_errors; + hi.data.cs.rx_packets = dev->stats.rx_packets; + hi.data.cs.rx_errors = dev->stats.rx_errors; break; case HDLCDRVCTL_OLDGETSTAT: @@ -1116,6 +1102,14 @@ static int baycom_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) /* --------------------------------------------------------------------- */ +static const struct net_device_ops baycom_netdev_ops = { + .ndo_open = epp_open, + .ndo_stop = epp_close, + .ndo_do_ioctl = baycom_ioctl, + .ndo_start_xmit = baycom_send_packet, + .ndo_set_mac_address = baycom_set_mac_address, +}; + /* * Check for a network adaptor of this type, and return '0' if one exists. * If dev->base_addr == 0, probe all likely locations. @@ -1143,17 +1137,12 @@ static void baycom_probe(struct net_device *dev) /* * initialize the device struct */ - dev->open = epp_open; - dev->stop = epp_close; - dev->do_ioctl = baycom_ioctl; - dev->hard_start_xmit = baycom_send_packet; - dev->get_stats = baycom_get_stats; /* Fill in the fields of the device structure */ bc->skb = NULL; + dev->netdev_ops = &baycom_netdev_ops; dev->header_ops = &ax25_header_ops; - dev->set_mac_address = baycom_set_mac_address; dev->type = ARPHRD_AX25; /* AF_AX25 device */ dev->hard_header_len = AX25_MAX_HEADER_LEN + AX25_BPQ_HEADER_LEN; diff --git a/drivers/net/hamradio/bpqether.c b/drivers/net/hamradio/bpqether.c index 46f8f3390e7..2c619bc99ae 100644 --- a/drivers/net/hamradio/bpqether.c +++ b/drivers/net/hamradio/bpqether.c @@ -97,7 +97,7 @@ static int bpq_rcv(struct sk_buff *, struct net_device *, struct packet_type *, static int bpq_device_event(struct notifier_block *, unsigned long, void *); static struct packet_type bpq_packet_type = { - .type = __constant_htons(ETH_P_BPQ), + .type = cpu_to_be16(ETH_P_BPQ), .func = bpq_rcv, }; @@ -110,7 +110,6 @@ struct bpqdev { struct list_head bpq_list; /* list of bpq devices chain */ struct net_device *ethdev; /* link to ethernet device */ struct net_device *axdev; /* bpq device (bpq#) */ - struct net_device_stats stats; /* some statistics */ char dest_addr[6]; /* ether destination address */ char acpt_addr[6]; /* accept ether frames from this address only */ }; @@ -222,8 +221,8 @@ static int bpq_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_ty skb_pull(skb, 2); /* Remove the length bytes */ skb_trim(skb, len); /* Set the length of the data */ - bpq->stats.rx_packets++; - bpq->stats.rx_bytes += len; + dev->stats.rx_packets++; + dev->stats.rx_bytes += len; ptr = skb_push(skb, 1); *ptr = 0; @@ -292,7 +291,7 @@ static int bpq_xmit(struct sk_buff *skb, struct net_device *dev) bpq = netdev_priv(dev); if ((dev = bpq_get_ether_dev(dev)) == NULL) { - bpq->stats.tx_dropped++; + dev->stats.tx_dropped++; kfree_skb(skb); return -ENODEV; } @@ -300,8 +299,8 @@ static int bpq_xmit(struct sk_buff *skb, struct net_device *dev) skb->protocol = ax25_type_trans(skb, dev); skb_reset_network_header(skb); dev_hard_header(skb, dev, ETH_P_BPQ, bpq->dest_addr, NULL, 0); - bpq->stats.tx_packets++; - bpq->stats.tx_bytes+=skb->len; + dev->stats.tx_packets++; + dev->stats.tx_bytes+=skb->len; dev_queue_xmit(skb); netif_wake_queue(dev); @@ -309,16 +308,6 @@ static int bpq_xmit(struct sk_buff *skb, struct net_device *dev) } /* - * Statistics - */ -static struct net_device_stats *bpq_get_stats(struct net_device *dev) -{ - struct bpqdev *bpq = netdev_priv(dev); - - return &bpq->stats; -} - -/* * Set AX.25 callsign */ static int bpq_set_mac_address(struct net_device *dev, void *addr) @@ -454,7 +443,7 @@ static int bpq_seq_show(struct seq_file *seq, void *v) return 0; } -static struct seq_operations bpq_seqops = { +static const struct seq_operations bpq_seqops = { .start = bpq_seq_start, .next = bpq_seq_next, .stop = bpq_seq_stop, @@ -477,16 +466,17 @@ static const struct file_operations bpq_info_fops = { /* ------------------------------------------------------------------------ */ +static const struct net_device_ops bpq_netdev_ops = { + .ndo_open = bpq_open, + .ndo_stop = bpq_close, + .ndo_start_xmit = bpq_xmit, + .ndo_set_mac_address = bpq_set_mac_address, + .ndo_do_ioctl = bpq_ioctl, +}; static void bpq_setup(struct net_device *dev) { - - dev->hard_start_xmit = bpq_xmit; - dev->open = bpq_open; - dev->stop = bpq_close; - dev->set_mac_address = bpq_set_mac_address; - dev->get_stats = bpq_get_stats; - dev->do_ioctl = bpq_ioctl; + dev->netdev_ops = &bpq_netdev_ops; dev->destructor = free_netdev; memcpy(dev->broadcast, &ax25_bcast, AX25_ADDR_LEN); diff --git a/drivers/net/hamradio/dmascc.c b/drivers/net/hamradio/dmascc.c index e67103396ed..881bf818bb4 100644 --- a/drivers/net/hamradio/dmascc.c +++ b/drivers/net/hamradio/dmascc.c @@ -195,7 +195,7 @@ struct scc_priv { int chip; struct net_device *dev; struct scc_info *info; - struct net_device_stats stats; + int channel; int card_base, scc_cmd, scc_data; int tmr_cnt, tmr_ctrl, tmr_mode; @@ -239,7 +239,6 @@ static int scc_open(struct net_device *dev); static int scc_close(struct net_device *dev); static int scc_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd); static int scc_send_packet(struct sk_buff *skb, struct net_device *dev); -static struct net_device_stats *scc_get_stats(struct net_device *dev); static int scc_set_mac_address(struct net_device *dev, void *sa); static inline void tx_on(struct scc_priv *priv); @@ -441,6 +440,13 @@ static void __init dev_setup(struct net_device *dev) memcpy(dev->dev_addr, &ax25_defaddr, AX25_ADDR_LEN); } +static const struct net_device_ops scc_netdev_ops = { + .ndo_open = scc_open, + .ndo_stop = scc_close, + .ndo_start_xmit = scc_send_packet, + .ndo_do_ioctl = scc_ioctl, +}; + static int __init setup_adapter(int card_base, int type, int n) { int i, irq, chip; @@ -576,11 +582,7 @@ static int __init setup_adapter(int card_base, int type, int n) sprintf(dev->name, "dmascc%i", 2 * n + i); dev->base_addr = card_base; dev->irq = irq; - dev->open = scc_open; - dev->stop = scc_close; - dev->do_ioctl = scc_ioctl; - dev->hard_start_xmit = scc_send_packet; - dev->get_stats = scc_get_stats; + dev->netdev_ops = &scc_netdev_ops; dev->header_ops = &ax25_header_ops; dev->set_mac_address = scc_set_mac_address; } @@ -961,14 +963,6 @@ static int scc_send_packet(struct sk_buff *skb, struct net_device *dev) } -static struct net_device_stats *scc_get_stats(struct net_device *dev) -{ - struct scc_priv *priv = dev->ml_priv; - - return &priv->stats; -} - - static int scc_set_mac_address(struct net_device *dev, void *sa) { memcpy(dev->dev_addr, ((struct sockaddr *) sa)->sa_data, @@ -1216,17 +1210,17 @@ static void special_condition(struct scc_priv *priv, int rc) } if (priv->rx_over) { /* We had an overrun */ - priv->stats.rx_errors++; + priv->dev->stats.rx_errors++; if (priv->rx_over == 2) - priv->stats.rx_length_errors++; + priv->dev->stats.rx_length_errors++; else - priv->stats.rx_fifo_errors++; + priv->dev->stats.rx_fifo_errors++; priv->rx_over = 0; } else if (rc & CRC_ERR) { /* Count invalid CRC only if packet length >= minimum */ if (cb >= 15) { - priv->stats.rx_errors++; - priv->stats.rx_crc_errors++; + priv->dev->stats.rx_errors++; + priv->dev->stats.rx_crc_errors++; } } else { if (cb >= 15) { @@ -1239,8 +1233,8 @@ static void special_condition(struct scc_priv *priv, int rc) priv->rx_count++; schedule_work(&priv->rx_work); } else { - priv->stats.rx_errors++; - priv->stats.rx_over_errors++; + priv->dev->stats.rx_errors++; + priv->dev->stats.rx_over_errors++; } } } @@ -1275,7 +1269,7 @@ static void rx_bh(struct work_struct *ugli_api) skb = dev_alloc_skb(cb + 1); if (skb == NULL) { /* Drop packet */ - priv->stats.rx_dropped++; + priv->dev->stats.rx_dropped++; } else { /* Fill buffer */ data = skb_put(skb, cb + 1); @@ -1283,8 +1277,8 @@ static void rx_bh(struct work_struct *ugli_api) memcpy(&data[1], priv->rx_buf[i], cb); skb->protocol = ax25_type_trans(skb, priv->dev); netif_rx(skb); - priv->stats.rx_packets++; - priv->stats.rx_bytes += cb; + priv->dev->stats.rx_packets++; + priv->dev->stats.rx_bytes += cb; } spin_lock_irqsave(&priv->ring_lock, flags); /* Move tail */ @@ -1351,15 +1345,15 @@ static void es_isr(struct scc_priv *priv) write_scc(priv, R1, EXT_INT_ENAB | WT_FN_RDYFN); if (res) { /* Update packet statistics */ - priv->stats.tx_errors++; - priv->stats.tx_fifo_errors++; + priv->dev->stats.tx_errors++; + priv->dev->stats.tx_fifo_errors++; /* Other underrun interrupts may already be waiting */ write_scc(priv, R0, RES_EXT_INT); write_scc(priv, R0, RES_EXT_INT); } else { /* Update packet statistics */ - priv->stats.tx_packets++; - priv->stats.tx_bytes += priv->tx_len[i]; + priv->dev->stats.tx_packets++; + priv->dev->stats.tx_bytes += priv->tx_len[i]; /* Remove frame from FIFO */ priv->tx_tail = (i + 1) % NUM_TX_BUF; priv->tx_count--; @@ -1425,7 +1419,7 @@ static void tm_isr(struct scc_priv *priv) write_scc(priv, R15, DCDIE); priv->rr0 = read_scc(priv, R0); if (priv->rr0 & DCD) { - priv->stats.collisions++; + priv->dev->stats.collisions++; rx_on(priv); priv->state = RX_ON; } else { diff --git a/drivers/net/hamradio/hdlcdrv.c b/drivers/net/hamradio/hdlcdrv.c index 8eba61a1d4a..61de56e45ee 100644 --- a/drivers/net/hamradio/hdlcdrv.c +++ b/drivers/net/hamradio/hdlcdrv.c @@ -154,7 +154,7 @@ static void hdlc_rx_flag(struct net_device *dev, struct hdlcdrv_state *s) pkt_len = s->hdlcrx.len - 2 + 1; /* KISS kludge */ if (!(skb = dev_alloc_skb(pkt_len))) { printk("%s: memory squeeze, dropping packet\n", dev->name); - s->stats.rx_dropped++; + dev->stats.rx_dropped++; return; } cp = skb_put(skb, pkt_len); @@ -162,7 +162,7 @@ static void hdlc_rx_flag(struct net_device *dev, struct hdlcdrv_state *s) memcpy(cp, s->hdlcrx.buffer, pkt_len - 1); skb->protocol = ax25_type_trans(skb, dev); netif_rx(skb); - s->stats.rx_packets++; + dev->stats.rx_packets++; } void hdlcdrv_receiver(struct net_device *dev, struct hdlcdrv_state *s) @@ -326,7 +326,7 @@ void hdlcdrv_transmitter(struct net_device *dev, struct hdlcdrv_state *s) s->hdlctx.len = pkt_len+2; /* the appended CRC */ s->hdlctx.tx_state = 2; s->hdlctx.bitstream = 0; - s->stats.tx_packets++; + dev->stats.tx_packets++; break; case 2: if (!s->hdlctx.len) { @@ -427,19 +427,6 @@ static int hdlcdrv_set_mac_address(struct net_device *dev, void *addr) } /* --------------------------------------------------------------------- */ - -static struct net_device_stats *hdlcdrv_get_stats(struct net_device *dev) -{ - struct hdlcdrv_state *sm = netdev_priv(dev); - - /* - * Get the current statistics. This may be called with the - * card open or closed. - */ - return &sm->stats; -} - -/* --------------------------------------------------------------------- */ /* * Open/initialize the board. This is called (in the current kernel) * sometime after booting when the 'ifconfig' program is run. @@ -568,10 +555,10 @@ static int hdlcdrv_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) bi.data.cs.ptt = hdlcdrv_ptt(s); bi.data.cs.dcd = s->hdlcrx.dcd; bi.data.cs.ptt_keyed = s->ptt_keyed; - bi.data.cs.tx_packets = s->stats.tx_packets; - bi.data.cs.tx_errors = s->stats.tx_errors; - bi.data.cs.rx_packets = s->stats.rx_packets; - bi.data.cs.rx_errors = s->stats.rx_errors; + bi.data.cs.tx_packets = dev->stats.tx_packets; + bi.data.cs.tx_errors = dev->stats.tx_errors; + bi.data.cs.rx_packets = dev->stats.rx_packets; + bi.data.cs.rx_errors = dev->stats.rx_errors; break; case HDLCDRVCTL_OLDGETSTAT: @@ -630,6 +617,14 @@ static int hdlcdrv_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) /* --------------------------------------------------------------------- */ +static const struct net_device_ops hdlcdrv_netdev = { + .ndo_open = hdlcdrv_open, + .ndo_stop = hdlcdrv_close, + .ndo_start_xmit = hdlcdrv_send_packet, + .ndo_do_ioctl = hdlcdrv_ioctl, + .ndo_set_mac_address = hdlcdrv_set_mac_address, +}; + /* * Initialize fields in hdlcdrv */ @@ -669,21 +664,13 @@ static void hdlcdrv_setup(struct net_device *dev) s->bitbuf_hdlc.shreg = 0x80; #endif /* HDLCDRV_DEBUG */ - /* - * initialize the device struct - */ - dev->open = hdlcdrv_open; - dev->stop = hdlcdrv_close; - dev->do_ioctl = hdlcdrv_ioctl; - dev->hard_start_xmit = hdlcdrv_send_packet; - dev->get_stats = hdlcdrv_get_stats; /* Fill in the fields of the device structure */ s->skb = NULL; + dev->netdev_ops = &hdlcdrv_netdev; dev->header_ops = &ax25_header_ops; - dev->set_mac_address = hdlcdrv_set_mac_address; dev->type = ARPHRD_AX25; /* AF_AX25 device */ dev->hard_header_len = AX25_MAX_HEADER_LEN + AX25_BPQ_HEADER_LEN; diff --git a/drivers/net/hamradio/mkiss.c b/drivers/net/hamradio/mkiss.c index bbdb311b842..ed5b37d4333 100644 --- a/drivers/net/hamradio/mkiss.c +++ b/drivers/net/hamradio/mkiss.c @@ -59,8 +59,6 @@ struct mkiss { unsigned char *xhead; /* pointer to next byte to XMIT */ int xleft; /* bytes left in XMIT queue */ - struct net_device_stats stats; - /* Detailed SLIP statistics. */ int mtu; /* Our mtu (to spot changes!) */ int buffsize; /* Max buffers sizes */ @@ -253,7 +251,7 @@ static void ax_bump(struct mkiss *ax) if (ax->rbuff[0] > 0x0f) { if (ax->rbuff[0] & 0x80) { if (check_crc_16(ax->rbuff, ax->rcount) < 0) { - ax->stats.rx_errors++; + ax->dev->stats.rx_errors++; spin_unlock_bh(&ax->buflock); return; @@ -268,7 +266,7 @@ static void ax_bump(struct mkiss *ax) *ax->rbuff &= ~0x80; } else if (ax->rbuff[0] & 0x20) { if (check_crc_flex(ax->rbuff, ax->rcount) < 0) { - ax->stats.rx_errors++; + ax->dev->stats.rx_errors++; spin_unlock_bh(&ax->buflock); return; } @@ -295,7 +293,7 @@ static void ax_bump(struct mkiss *ax) if ((skb = dev_alloc_skb(count)) == NULL) { printk(KERN_ERR "mkiss: %s: memory squeeze, dropping packet.\n", ax->dev->name); - ax->stats.rx_dropped++; + ax->dev->stats.rx_dropped++; spin_unlock_bh(&ax->buflock); return; } @@ -303,8 +301,8 @@ static void ax_bump(struct mkiss *ax) memcpy(skb_put(skb,count), ax->rbuff, count); skb->protocol = ax25_type_trans(skb, ax->dev); netif_rx(skb); - ax->stats.rx_packets++; - ax->stats.rx_bytes += count; + ax->dev->stats.rx_packets++; + ax->dev->stats.rx_bytes += count; spin_unlock_bh(&ax->buflock); } @@ -344,7 +342,7 @@ static void kiss_unesc(struct mkiss *ax, unsigned char s) return; } - ax->stats.rx_over_errors++; + ax->dev->stats.rx_over_errors++; set_bit(AXF_ERROR, &ax->flags); } spin_unlock_bh(&ax->buflock); @@ -406,7 +404,7 @@ static void ax_changedmtu(struct mkiss *ax) memcpy(ax->xbuff, ax->xhead, ax->xleft); } else { ax->xleft = 0; - ax->stats.tx_dropped++; + dev->stats.tx_dropped++; } } @@ -417,7 +415,7 @@ static void ax_changedmtu(struct mkiss *ax) memcpy(ax->rbuff, orbuff, ax->rcount); } else { ax->rcount = 0; - ax->stats.rx_over_errors++; + dev->stats.rx_over_errors++; set_bit(AXF_ERROR, &ax->flags); } } @@ -444,7 +442,7 @@ static void ax_encaps(struct net_device *dev, unsigned char *icp, int len) if (len > ax->mtu) { /* Sigh, shouldn't occur BUT ... */ len = ax->mtu; printk(KERN_ERR "mkiss: %s: truncating oversized transmit packet!\n", ax->dev->name); - ax->stats.tx_dropped++; + dev->stats.tx_dropped++; netif_start_queue(dev); return; } @@ -518,8 +516,8 @@ static void ax_encaps(struct net_device *dev, unsigned char *icp, int len) set_bit(TTY_DO_WRITE_WAKEUP, &ax->tty->flags); actual = ax->tty->ops->write(ax->tty, ax->xbuff, count); - ax->stats.tx_packets++; - ax->stats.tx_bytes += actual; + dev->stats.tx_packets++; + dev->stats.tx_bytes += actual; ax->dev->trans_start = jiffies; ax->xleft = count - actual; @@ -664,32 +662,28 @@ static int ax_close(struct net_device *dev) return 0; } -static struct net_device_stats *ax_get_stats(struct net_device *dev) -{ - struct mkiss *ax = netdev_priv(dev); - - return &ax->stats; -} - static const struct header_ops ax_header_ops = { .create = ax_header, .rebuild = ax_rebuild_header, }; +static const struct net_device_ops ax_netdev_ops = { + .ndo_open = ax_open_dev, + .ndo_stop = ax_close, + .ndo_start_xmit = ax_xmit, + .ndo_set_mac_address = ax_set_mac_address, +}; + static void ax_setup(struct net_device *dev) { /* Finish setting up the DEVICE info. */ dev->mtu = AX_MTU; - dev->hard_start_xmit = ax_xmit; - dev->open = ax_open_dev; - dev->stop = ax_close; - dev->get_stats = ax_get_stats; - dev->set_mac_address = ax_set_mac_address; dev->hard_header_len = 0; dev->addr_len = 0; dev->type = ARPHRD_AX25; dev->tx_queue_len = 10; dev->header_ops = &ax_header_ops; + dev->netdev_ops = &ax_netdev_ops; memcpy(dev->broadcast, &ax25_bcast, AX25_ADDR_LEN); @@ -929,7 +923,7 @@ static void mkiss_receive_buf(struct tty_struct *tty, const unsigned char *cp, while (count--) { if (fp != NULL && *fp++) { if (!test_and_set_bit(AXF_ERROR, &ax->flags)) - ax->stats.rx_errors++; + ax->dev->stats.rx_errors++; cp++; continue; } diff --git a/drivers/net/hamradio/scc.c b/drivers/net/hamradio/scc.c index c011af7088e..2acb18f0697 100644 --- a/drivers/net/hamradio/scc.c +++ b/drivers/net/hamradio/scc.c @@ -1542,23 +1542,24 @@ static int scc_net_alloc(const char *name, struct scc_channel *scc) /* * Network driver methods * */ /* ******************************************************************** */ +static const struct net_device_ops scc_netdev_ops = { + .ndo_open = scc_net_open, + .ndo_stop = scc_net_close, + .ndo_start_xmit = scc_net_tx, + .ndo_set_mac_address = scc_net_set_mac_address, + .ndo_get_stats = scc_net_get_stats, + .ndo_do_ioctl = scc_net_ioctl, +}; + /* ----> Initialize device <----- */ static void scc_net_setup(struct net_device *dev) { dev->tx_queue_len = 16; /* should be enough... */ - dev->open = scc_net_open; - dev->stop = scc_net_close; - - dev->hard_start_xmit = scc_net_tx; + dev->netdev_ops = &scc_netdev_ops; dev->header_ops = &ax25_header_ops; - dev->set_mac_address = scc_net_set_mac_address; - dev->get_stats = scc_net_get_stats; - dev->do_ioctl = scc_net_ioctl; - dev->tx_timeout = NULL; - memcpy(dev->broadcast, &ax25_bcast, AX25_ADDR_LEN); memcpy(dev->dev_addr, &ax25_defaddr, AX25_ADDR_LEN); @@ -2073,7 +2074,7 @@ static int scc_net_seq_show(struct seq_file *seq, void *v) return 0; } -static struct seq_operations scc_net_seq_ops = { +static const struct seq_operations scc_net_seq_ops = { .start = scc_net_seq_start, .next = scc_net_seq_next, .stop = scc_net_seq_stop, diff --git a/drivers/net/hamradio/yam.c b/drivers/net/hamradio/yam.c index 5407f7486c9..82a8be7613d 100644 --- a/drivers/net/hamradio/yam.c +++ b/drivers/net/hamradio/yam.c @@ -115,10 +115,6 @@ struct yam_port { struct net_device *dev; - /* Stats section */ - - struct net_device_stats stats; - int nb_rxint; int nb_mdint; @@ -507,7 +503,7 @@ static inline void yam_rx_flag(struct net_device *dev, struct yam_port *yp) } else { if (!(skb = dev_alloc_skb(pkt_len))) { printk(KERN_WARNING "%s: memory squeeze, dropping packet\n", dev->name); - ++yp->stats.rx_dropped; + ++dev->stats.rx_dropped; } else { unsigned char *cp; cp = skb_put(skb, pkt_len); @@ -515,7 +511,7 @@ static inline void yam_rx_flag(struct net_device *dev, struct yam_port *yp) memcpy(cp, yp->rx_buf, pkt_len - 1); skb->protocol = ax25_type_trans(skb, dev); netif_rx(skb); - ++yp->stats.rx_packets; + ++dev->stats.rx_packets; } } } @@ -677,7 +673,7 @@ static void yam_tx_byte(struct net_device *dev, struct yam_port *yp) yp->tx_count = 1; yp->tx_state = TX_HEAD; } - ++yp->stats.tx_packets; + ++dev->stats.tx_packets; break; case TX_TAIL: if (--yp->tx_count <= 0) { @@ -716,7 +712,7 @@ static irqreturn_t yam_interrupt(int irq, void *dev_id) handled = 1; if (lsr & LSR_OE) - ++yp->stats.rx_fifo_errors; + ++dev->stats.rx_fifo_errors; yp->dcd = (msr & RX_DCD) ? 1 : 0; @@ -778,16 +774,16 @@ static int yam_seq_show(struct seq_file *seq, void *v) seq_printf(seq, " TxTail %u\n", yp->txtail); seq_printf(seq, " SlotTime %u\n", yp->slot); seq_printf(seq, " Persist %u\n", yp->pers); - seq_printf(seq, " TxFrames %lu\n", yp->stats.tx_packets); - seq_printf(seq, " RxFrames %lu\n", yp->stats.rx_packets); + seq_printf(seq, " TxFrames %lu\n", dev->stats.tx_packets); + seq_printf(seq, " RxFrames %lu\n", dev->stats.rx_packets); seq_printf(seq, " TxInt %u\n", yp->nb_mdint); seq_printf(seq, " RxInt %u\n", yp->nb_rxint); - seq_printf(seq, " RxOver %lu\n", yp->stats.rx_fifo_errors); + seq_printf(seq, " RxOver %lu\n", dev->stats.rx_fifo_errors); seq_printf(seq, "\n"); return 0; } -static struct seq_operations yam_seqops = { +static const struct seq_operations yam_seqops = { .start = yam_seq_start, .next = yam_seq_next, .stop = yam_seq_stop, @@ -812,26 +808,6 @@ static const struct file_operations yam_info_fops = { /* --------------------------------------------------------------------- */ -static struct net_device_stats *yam_get_stats(struct net_device *dev) -{ - struct yam_port *yp; - - if (!dev) - return NULL; - - yp = netdev_priv(dev); - if (yp->magic != YAM_MAGIC) - return NULL; - - /* - * Get the current statistics. This may be called with the - * card open or closed. - */ - return &yp->stats; -} - -/* --------------------------------------------------------------------- */ - static int yam_open(struct net_device *dev) { struct yam_port *yp = netdev_priv(dev); @@ -878,9 +854,9 @@ static int yam_open(struct net_device *dev) /* Reset overruns for all ports - FPGA programming makes overruns */ for (i = 0; i < NR_PORTS; i++) { struct net_device *dev = yam_devs[i]; - struct yam_port *yp = netdev_priv(dev); + inb(LSR(dev->base_addr)); - yp->stats.rx_fifo_errors = 0; + dev->stats.rx_fifo_errors = 0; } printk(KERN_INFO "%s at iobase 0x%lx irq %u uart %s\n", dev->name, dev->base_addr, dev->irq, @@ -1068,6 +1044,14 @@ static int yam_set_mac_address(struct net_device *dev, void *addr) /* --------------------------------------------------------------------- */ +static const struct net_device_ops yam_netdev_ops = { + .ndo_open = yam_open, + .ndo_stop = yam_close, + .ndo_start_xmit = yam_send_packet, + .ndo_do_ioctl = yam_ioctl, + .ndo_set_mac_address = yam_set_mac_address, +}; + static void yam_setup(struct net_device *dev) { struct yam_port *yp = netdev_priv(dev); @@ -1088,18 +1072,11 @@ static void yam_setup(struct net_device *dev) dev->base_addr = yp->iobase; dev->irq = yp->irq; - dev->open = yam_open; - dev->stop = yam_close; - dev->do_ioctl = yam_ioctl; - dev->hard_start_xmit = yam_send_packet; - dev->get_stats = yam_get_stats; - skb_queue_head_init(&yp->send_queue); + dev->netdev_ops = &yam_netdev_ops; dev->header_ops = &ax25_header_ops; - dev->set_mac_address = yam_set_mac_address; - dev->type = ARPHRD_AX25; dev->hard_header_len = AX25_MAX_HEADER_LEN; dev->mtu = AX25_MTU; diff --git a/drivers/net/ibmveth.c b/drivers/net/ibmveth.c index dfa6348ac1d..5c6315df86b 100644 --- a/drivers/net/ibmveth.c +++ b/drivers/net/ibmveth.c @@ -1028,10 +1028,10 @@ static int ibmveth_poll(struct napi_struct *napi, int budget) ibmveth_assert(lpar_rc == H_SUCCESS); - netif_rx_complete(napi); + napi_complete(napi); if (ibmveth_rxq_pending_buffer(adapter) && - netif_rx_reschedule(napi)) { + napi_reschedule(napi)) { lpar_rc = h_vio_signal(adapter->vdev->unit_address, VIO_IRQ_DISABLE); goto restart_poll; @@ -1047,11 +1047,11 @@ static irqreturn_t ibmveth_interrupt(int irq, void *dev_instance) struct ibmveth_adapter *adapter = netdev_priv(netdev); unsigned long lpar_rc; - if (netif_rx_schedule_prep(&adapter->napi)) { + if (napi_schedule_prep(&adapter->napi)) { lpar_rc = h_vio_signal(adapter->vdev->unit_address, VIO_IRQ_DISABLE); ibmveth_assert(lpar_rc == H_SUCCESS); - __netif_rx_schedule(&adapter->napi); + __napi_schedule(&adapter->napi); } return IRQ_HANDLED; } diff --git a/drivers/net/igb/e1000_82575.c b/drivers/net/igb/e1000_82575.c index 13ca73f96ec..f5e4cad7971 100644 --- a/drivers/net/igb/e1000_82575.c +++ b/drivers/net/igb/e1000_82575.c @@ -1110,6 +1110,13 @@ static s32 igb_setup_fiber_serdes_link_82575(struct e1000_hw *hw) E1000_CTRL_SWDPIN1; wr32(E1000_CTRL, reg); + /* Power on phy for 82576 fiber adapters */ + if (hw->mac.type == e1000_82576) { + reg = rd32(E1000_CTRL_EXT); + reg &= ~E1000_CTRL_EXT_SDP7_DATA; + wr32(E1000_CTRL_EXT, reg); + } + /* Set switch control to serdes energy detect */ reg = rd32(E1000_CONNSW); reg |= E1000_CONNSW_ENRGSRC; diff --git a/drivers/net/igb/igb.h b/drivers/net/igb/igb.h index aebef8e48e7..30657ddf484 100644 --- a/drivers/net/igb/igb.h +++ b/drivers/net/igb/igb.h @@ -36,12 +36,6 @@ struct igb_adapter; -#ifdef CONFIG_IGB_LRO -#include <linux/inet_lro.h> -#define MAX_LRO_AGGR 32 -#define MAX_LRO_DESCRIPTORS 8 -#endif - /* Interrupt defines */ #define IGB_MIN_DYN_ITR 3000 #define IGB_MAX_DYN_ITR 96000 @@ -176,10 +170,6 @@ struct igb_ring { struct napi_struct napi; int set_itr; struct igb_ring *buddy; -#ifdef CONFIG_IGB_LRO - struct net_lro_mgr lro_mgr; - bool lro_used; -#endif }; }; @@ -288,12 +278,6 @@ struct igb_adapter { int need_ioport; struct igb_ring *multi_tx_table[IGB_MAX_TX_QUEUES]; -#ifdef CONFIG_IGB_LRO - unsigned int lro_max_aggr; - unsigned int lro_aggregated; - unsigned int lro_flushed; - unsigned int lro_no_desc; -#endif unsigned int tx_ring_count; unsigned int rx_ring_count; }; diff --git a/drivers/net/igb/igb_ethtool.c b/drivers/net/igb/igb_ethtool.c index 3c831f1472a..4606e63fc6f 100644 --- a/drivers/net/igb/igb_ethtool.c +++ b/drivers/net/igb/igb_ethtool.c @@ -93,11 +93,6 @@ static const struct igb_stats igb_gstrings_stats[] = { { "tx_smbus", IGB_STAT(stats.mgptc) }, { "rx_smbus", IGB_STAT(stats.mgprc) }, { "dropped_smbus", IGB_STAT(stats.mgpdc) }, -#ifdef CONFIG_IGB_LRO - { "lro_aggregated", IGB_STAT(lro_aggregated) }, - { "lro_flushed", IGB_STAT(lro_flushed) }, - { "lro_no_desc", IGB_STAT(lro_no_desc) }, -#endif }; #define IGB_QUEUE_STATS_LEN \ @@ -1921,18 +1916,6 @@ static void igb_get_ethtool_stats(struct net_device *netdev, int stat_count = sizeof(struct igb_queue_stats) / sizeof(u64); int j; int i; -#ifdef CONFIG_IGB_LRO - int aggregated = 0, flushed = 0, no_desc = 0; - - for (i = 0; i < adapter->num_rx_queues; i++) { - aggregated += adapter->rx_ring[i].lro_mgr.stats.aggregated; - flushed += adapter->rx_ring[i].lro_mgr.stats.flushed; - no_desc += adapter->rx_ring[i].lro_mgr.stats.no_desc; - } - adapter->lro_aggregated = aggregated; - adapter->lro_flushed = flushed; - adapter->lro_no_desc = no_desc; -#endif igb_update_stats(adapter); for (i = 0; i < IGB_GLOBAL_STATS_LEN; i++) { diff --git a/drivers/net/igb/igb_main.c b/drivers/net/igb/igb_main.c index a50db5398fa..8b80fe34343 100644 --- a/drivers/net/igb/igb_main.c +++ b/drivers/net/igb/igb_main.c @@ -115,9 +115,6 @@ static bool igb_clean_tx_irq(struct igb_ring *); static int igb_poll(struct napi_struct *, int); static bool igb_clean_rx_irq_adv(struct igb_ring *, int *, int); static void igb_alloc_rx_buffers_adv(struct igb_ring *, int); -#ifdef CONFIG_IGB_LRO -static int igb_get_skb_hdr(struct sk_buff *skb, void **, void **, u64 *, void *); -#endif static int igb_ioctl(struct net_device *, struct ifreq *, int cmd); static void igb_tx_timeout(struct net_device *); static void igb_reset_task(struct work_struct *); @@ -1189,7 +1186,7 @@ static int __devinit igb_probe(struct pci_dev *pdev, netdev->features |= NETIF_F_TSO6; #ifdef CONFIG_IGB_LRO - netdev->features |= NETIF_F_LRO; + netdev->features |= NETIF_F_GRO; #endif netdev->vlan_features |= NETIF_F_TSO; @@ -1200,7 +1197,6 @@ static int __devinit igb_probe(struct pci_dev *pdev, if (pci_using_dac) netdev->features |= NETIF_F_HIGHDMA; - netdev->features |= NETIF_F_LLTX; adapter->en_mng_pt = igb_enable_mng_pass_thru(&adapter->hw); /* before reading the NVM, reset the controller to put the device in a @@ -1738,14 +1734,6 @@ int igb_setup_rx_resources(struct igb_adapter *adapter, struct pci_dev *pdev = adapter->pdev; int size, desc_len; -#ifdef CONFIG_IGB_LRO - size = sizeof(struct net_lro_desc) * MAX_LRO_DESCRIPTORS; - rx_ring->lro_mgr.lro_arr = vmalloc(size); - if (!rx_ring->lro_mgr.lro_arr) - goto err; - memset(rx_ring->lro_mgr.lro_arr, 0, size); -#endif - size = sizeof(struct igb_buffer) * rx_ring->count; rx_ring->buffer_info = vmalloc(size); if (!rx_ring->buffer_info) @@ -1772,10 +1760,6 @@ int igb_setup_rx_resources(struct igb_adapter *adapter, return 0; err: -#ifdef CONFIG_IGB_LRO - vfree(rx_ring->lro_mgr.lro_arr); - rx_ring->lro_mgr.lro_arr = NULL; -#endif vfree(rx_ring->buffer_info); dev_err(&adapter->pdev->dev, "Unable to allocate memory for " "the receive descriptor ring\n"); @@ -1929,16 +1913,6 @@ static void igb_configure_rx(struct igb_adapter *adapter) rxdctl |= IGB_RX_HTHRESH << 8; rxdctl |= IGB_RX_WTHRESH << 16; wr32(E1000_RXDCTL(j), rxdctl); -#ifdef CONFIG_IGB_LRO - /* Intitial LRO Settings */ - ring->lro_mgr.max_aggr = MAX_LRO_AGGR; - ring->lro_mgr.max_desc = MAX_LRO_DESCRIPTORS; - ring->lro_mgr.get_skb_header = igb_get_skb_hdr; - ring->lro_mgr.features = LRO_F_NAPI | LRO_F_EXTRACT_VLAN_ID; - ring->lro_mgr.dev = adapter->netdev; - ring->lro_mgr.ip_summed = CHECKSUM_UNNECESSARY; - ring->lro_mgr.ip_summed_aggr = CHECKSUM_UNNECESSARY; -#endif } if (adapter->num_rx_queues > 1) { @@ -2127,11 +2101,6 @@ void igb_free_rx_resources(struct igb_ring *rx_ring) vfree(rx_ring->buffer_info); rx_ring->buffer_info = NULL; -#ifdef CONFIG_IGB_LRO - vfree(rx_ring->lro_mgr.lro_arr); - rx_ring->lro_mgr.lro_arr = NULL; -#endif - pci_free_consistent(pdev, rx_ring->size, rx_ring->desc, rx_ring->dma); rx_ring->desc = NULL; @@ -2779,12 +2748,12 @@ static inline bool igb_tx_csum_adv(struct igb_adapter *adapter, if (skb->ip_summed == CHECKSUM_PARTIAL) { switch (skb->protocol) { - case __constant_htons(ETH_P_IP): + case cpu_to_be16(ETH_P_IP): tu_cmd |= E1000_ADVTXD_TUCMD_IPV4; if (ip_hdr(skb)->protocol == IPPROTO_TCP) tu_cmd |= E1000_ADVTXD_TUCMD_L4T_TCP; break; - case __constant_htons(ETH_P_IPV6): + case cpu_to_be16(ETH_P_IPV6): /* XXX what about other V6 headers?? */ if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP) tu_cmd |= E1000_ADVTXD_TUCMD_L4T_TCP; @@ -3385,8 +3354,8 @@ static irqreturn_t igb_msix_rx(int irq, void *data) igb_write_itr(rx_ring); - if (netif_rx_schedule_prep(&rx_ring->napi)) - __netif_rx_schedule(&rx_ring->napi); + if (napi_schedule_prep(&rx_ring->napi)) + __napi_schedule(&rx_ring->napi); #ifdef CONFIG_IGB_DCA if (rx_ring->adapter->flags & IGB_FLAG_DCA_ENABLED) @@ -3535,7 +3504,7 @@ static irqreturn_t igb_intr_msi(int irq, void *data) mod_timer(&adapter->watchdog_timer, jiffies + 1); } - netif_rx_schedule(&adapter->rx_ring[0].napi); + napi_schedule(&adapter->rx_ring[0].napi); return IRQ_HANDLED; } @@ -3573,7 +3542,7 @@ static irqreturn_t igb_intr(int irq, void *data) mod_timer(&adapter->watchdog_timer, jiffies + 1); } - netif_rx_schedule(&adapter->rx_ring[0].napi); + napi_schedule(&adapter->rx_ring[0].napi); return IRQ_HANDLED; } @@ -3608,7 +3577,7 @@ static int igb_poll(struct napi_struct *napi, int budget) !netif_running(netdev)) { if (adapter->itr_setting & 3) igb_set_itr(adapter); - netif_rx_complete(napi); + napi_complete(napi); if (!test_bit(__IGB_DOWN, &adapter->state)) igb_irq_enable(adapter); return 0; @@ -3634,7 +3603,7 @@ static int igb_clean_rx_ring_msix(struct napi_struct *napi, int budget) /* If not enough Rx work done, exit the polling mode */ if ((work_done == 0) || !netif_running(netdev)) { - netif_rx_complete(napi); + napi_complete(napi); if (adapter->itr_setting & 3) { if (adapter->num_rx_queues == 1) @@ -3764,39 +3733,6 @@ static bool igb_clean_tx_irq(struct igb_ring *tx_ring) return (count < tx_ring->count); } -#ifdef CONFIG_IGB_LRO - /** - * igb_get_skb_hdr - helper function for LRO header processing - * @skb: pointer to sk_buff to be added to LRO packet - * @iphdr: pointer to ip header structure - * @tcph: pointer to tcp header structure - * @hdr_flags: pointer to header flags - * @priv: pointer to the receive descriptor for the current sk_buff - **/ -static int igb_get_skb_hdr(struct sk_buff *skb, void **iphdr, void **tcph, - u64 *hdr_flags, void *priv) -{ - union e1000_adv_rx_desc *rx_desc = priv; - u16 pkt_type = rx_desc->wb.lower.lo_dword.pkt_info & - (E1000_RXDADV_PKTTYPE_IPV4 | E1000_RXDADV_PKTTYPE_TCP); - - /* Verify that this is a valid IPv4 TCP packet */ - if (pkt_type != (E1000_RXDADV_PKTTYPE_IPV4 | - E1000_RXDADV_PKTTYPE_TCP)) - return -1; - - /* Set network headers */ - skb_reset_network_header(skb); - skb_set_transport_header(skb, ip_hdrlen(skb)); - *iphdr = ip_hdr(skb); - *tcph = tcp_hdr(skb); - *hdr_flags = LRO_IPV4 | LRO_TCP; - - return 0; - -} -#endif /* CONFIG_IGB_LRO */ - /** * igb_receive_skb - helper function to handle rx indications * @ring: pointer to receive ring receving this packet @@ -3811,28 +3747,21 @@ static void igb_receive_skb(struct igb_ring *ring, u8 status, struct igb_adapter * adapter = ring->adapter; bool vlan_extracted = (adapter->vlgrp && (status & E1000_RXD_STAT_VP)); -#ifdef CONFIG_IGB_LRO - if (adapter->netdev->features & NETIF_F_LRO && - skb->ip_summed == CHECKSUM_UNNECESSARY) { + skb_record_rx_queue(skb, ring->queue_index); + if (skb->ip_summed == CHECKSUM_UNNECESSARY) { if (vlan_extracted) - lro_vlan_hwaccel_receive_skb(&ring->lro_mgr, skb, - adapter->vlgrp, - le16_to_cpu(rx_desc->wb.upper.vlan), - rx_desc); + vlan_gro_receive(&ring->napi, adapter->vlgrp, + le16_to_cpu(rx_desc->wb.upper.vlan), + skb); else - lro_receive_skb(&ring->lro_mgr,skb, rx_desc); - ring->lro_used = 1; + napi_gro_receive(&ring->napi, skb); } else { -#endif if (vlan_extracted) vlan_hwaccel_receive_skb(skb, adapter->vlgrp, le16_to_cpu(rx_desc->wb.upper.vlan)); else - netif_receive_skb(skb); -#ifdef CONFIG_IGB_LRO } -#endif } @@ -3987,13 +3916,6 @@ next_desc: rx_ring->next_to_clean = i; cleaned_count = IGB_DESC_UNUSED(rx_ring); -#ifdef CONFIG_IGB_LRO - if (rx_ring->lro_used) { - lro_flush_all(&rx_ring->lro_mgr); - rx_ring->lro_used = 0; - } -#endif - if (cleaned_count) igb_alloc_rx_buffers_adv(rx_ring, cleaned_count); diff --git a/drivers/net/ixgb/ixgb_main.c b/drivers/net/ixgb/ixgb_main.c index eee28d39568..e2ef16b2970 100644 --- a/drivers/net/ixgb/ixgb_main.c +++ b/drivers/net/ixgb/ixgb_main.c @@ -1721,14 +1721,14 @@ ixgb_intr(int irq, void *data) if (!test_bit(__IXGB_DOWN, &adapter->flags)) mod_timer(&adapter->watchdog_timer, jiffies); - if (netif_rx_schedule_prep(&adapter->napi)) { + if (napi_schedule_prep(&adapter->napi)) { /* Disable interrupts and register for poll. The flush of the posted write is intentionally left out. */ IXGB_WRITE_REG(&adapter->hw, IMC, ~0); - __netif_rx_schedule(&adapter->napi); + __napi_schedule(&adapter->napi); } return IRQ_HANDLED; } @@ -1749,7 +1749,7 @@ ixgb_clean(struct napi_struct *napi, int budget) /* If budget not fully consumed, exit the polling mode */ if (work_done < budget) { - netif_rx_complete(napi); + napi_complete(napi); if (!test_bit(__IXGB_DOWN, &adapter->flags)) ixgb_irq_enable(adapter); } diff --git a/drivers/net/ixgbe/Makefile b/drivers/net/ixgbe/Makefile index 6e7ef765bcd..f6061950f5d 100644 --- a/drivers/net/ixgbe/Makefile +++ b/drivers/net/ixgbe/Makefile @@ -1,7 +1,7 @@ ################################################################################ # # Intel 10 Gigabit PCI Express Linux driver -# Copyright(c) 1999 - 2007 Intel Corporation. +# Copyright(c) 1999 - 2009 Intel Corporation. # # This program is free software; you can redistribute it and/or modify it # under the terms and conditions of the GNU General Public License, diff --git a/drivers/net/ixgbe/ixgbe.h b/drivers/net/ixgbe/ixgbe.h index e112008f39c..0ea791ae0d1 100644 --- a/drivers/net/ixgbe/ixgbe.h +++ b/drivers/net/ixgbe/ixgbe.h @@ -1,7 +1,7 @@ /******************************************************************************* Intel 10 Gigabit PCI Express Linux driver - Copyright(c) 1999 - 2008 Intel Corporation. + Copyright(c) 1999 - 2009 Intel Corporation. This program is free software; you can redistribute it and/or modify it under the terms and conditions of the GNU General Public License, @@ -31,7 +31,6 @@ #include <linux/types.h> #include <linux/pci.h> #include <linux/netdevice.h> -#include <linux/inet_lro.h> #include <linux/aer.h> #include "ixgbe_type.h" @@ -88,9 +87,6 @@ #define IXGBE_TX_FLAGS_VLAN_PRIO_MASK 0x0000e000 #define IXGBE_TX_FLAGS_VLAN_SHIFT 16 -#define IXGBE_MAX_LRO_DESCRIPTORS 8 -#define IXGBE_MAX_LRO_AGGREGATE 32 - /* wrapper around a pointer to a socket buffer, * so a DMA handle can be stored along with the buffer */ struct ixgbe_tx_buffer { @@ -142,8 +138,6 @@ struct ixgbe_ring { /* cpu for tx queue */ int cpu; #endif - struct net_lro_mgr lro_mgr; - bool lro_used; struct ixgbe_queue_stats stats; u16 v_idx; /* maps directly to the index for this ring in the hardware * vector array, can also be used for finding the bit in EICR @@ -210,9 +204,13 @@ struct ixgbe_q_vector { #define OTHER_VECTOR 1 #define NON_Q_VECTORS (OTHER_VECTOR) -#define MAX_MSIX_Q_VECTORS 16 +#define MAX_MSIX_VECTORS_82598 18 +#define MAX_MSIX_Q_VECTORS_82598 16 + +#define MAX_MSIX_Q_VECTORS MAX_MSIX_Q_VECTORS_82598 +#define MAX_MSIX_COUNT MAX_MSIX_VECTORS_82598 + #define MIN_MSIX_Q_VECTORS 2 -#define MAX_MSIX_COUNT (MAX_MSIX_Q_VECTORS + NON_Q_VECTORS) #define MIN_MSIX_COUNT (MIN_MSIX_Q_VECTORS + NON_Q_VECTORS) /* board specific private data structure */ @@ -250,6 +248,7 @@ struct ixgbe_adapter { u64 hw_csum_rx_good; u64 non_eop_descs; int num_msix_vectors; + int max_msix_q_vectors; /* true count of q_vectors for device */ struct ixgbe_ring_feature ring_feature[3]; struct msix_entry *msix_entries; @@ -301,9 +300,6 @@ struct ixgbe_adapter { unsigned long state; u64 tx_busy; - u64 lro_aggregated; - u64 lro_flushed; - u64 lro_no_desc; unsigned int tx_ring_count; unsigned int rx_ring_count; diff --git a/drivers/net/ixgbe/ixgbe_82598.c b/drivers/net/ixgbe/ixgbe_82598.c index ad5699d9ab0..8e7315e0a7f 100644 --- a/drivers/net/ixgbe/ixgbe_82598.c +++ b/drivers/net/ixgbe/ixgbe_82598.c @@ -1,7 +1,7 @@ /******************************************************************************* Intel 10 Gigabit PCI Express Linux driver - Copyright(c) 1999 - 2008 Intel Corporation. + Copyright(c) 1999 - 2009 Intel Corporation. This program is free software; you can redistribute it and/or modify it under the terms and conditions of the GNU General Public License, @@ -50,6 +50,27 @@ static s32 ixgbe_read_i2c_eeprom_82598(struct ixgbe_hw *hw, u8 byte_offset, u8 *eeprom_data); /** + * ixgbe_get_pcie_msix_count_82598 - Gets MSI-X vector count + * @hw: pointer to hardware structure + * + * Read PCIe configuration space, and get the MSI-X vector count from + * the capabilities table. + **/ +u16 ixgbe_get_pcie_msix_count_82598(struct ixgbe_hw *hw) +{ + struct ixgbe_adapter *adapter = hw->back; + u16 msix_count; + pci_read_config_word(adapter->pdev, IXGBE_PCIE_MSIX_82598_CAPS, + &msix_count); + msix_count &= IXGBE_PCIE_MSIX_TBL_SZ_MASK; + + /* MSI-X count is zero-based in HW, so increment to give proper value */ + msix_count++; + + return msix_count; +} + +/** */ static s32 ixgbe_get_invariants_82598(struct ixgbe_hw *hw) { @@ -106,6 +127,7 @@ static s32 ixgbe_get_invariants_82598(struct ixgbe_hw *hw) mac->num_rar_entries = IXGBE_82598_RAR_ENTRIES; mac->max_rx_queues = IXGBE_82598_MAX_RX_QUEUES; mac->max_tx_queues = IXGBE_82598_MAX_TX_QUEUES; + mac->max_msix_vectors = ixgbe_get_pcie_msix_count_82598(hw); out: return ret_val; @@ -213,6 +235,10 @@ static enum ixgbe_media_type ixgbe_get_media_type_82598(struct ixgbe_hw *hw) /* Media type for I82598 is based on device ID */ switch (hw->device_id) { + case IXGBE_DEV_ID_82598: + case IXGBE_DEV_ID_82598_BX: + media_type = ixgbe_media_type_backplane; + break; case IXGBE_DEV_ID_82598AF_DUAL_PORT: case IXGBE_DEV_ID_82598AF_SINGLE_PORT: case IXGBE_DEV_ID_82598EB_CX4: @@ -1002,6 +1028,13 @@ static s32 ixgbe_get_supported_physical_layer_82598(struct ixgbe_hw *hw) s32 physical_layer = IXGBE_PHYSICAL_LAYER_UNKNOWN; switch (hw->device_id) { + case IXGBE_DEV_ID_82598: + /* Default device ID is mezzanine card KX/KX4 */ + physical_layer = (IXGBE_PHYSICAL_LAYER_10GBASE_KX4 | + IXGBE_PHYSICAL_LAYER_1000BASE_KX); + break; + case IXGBE_DEV_ID_82598_BX: + physical_layer = IXGBE_PHYSICAL_LAYER_1000BASE_BX; case IXGBE_DEV_ID_82598EB_CX4: case IXGBE_DEV_ID_82598_CX4_DUAL_PORT: physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_CX4; diff --git a/drivers/net/ixgbe/ixgbe_common.c b/drivers/net/ixgbe/ixgbe_common.c index f67c68404bb..05f0e872947 100644 --- a/drivers/net/ixgbe/ixgbe_common.c +++ b/drivers/net/ixgbe/ixgbe_common.c @@ -1,7 +1,7 @@ /******************************************************************************* Intel 10 Gigabit PCI Express Linux driver - Copyright(c) 1999 - 2008 Intel Corporation. + Copyright(c) 1999 - 2009 Intel Corporation. This program is free software; you can redistribute it and/or modify it under the terms and conditions of the GNU General Public License, diff --git a/drivers/net/ixgbe/ixgbe_common.h b/drivers/net/ixgbe/ixgbe_common.h index 192f8d01291..0b5ba575580 100644 --- a/drivers/net/ixgbe/ixgbe_common.h +++ b/drivers/net/ixgbe/ixgbe_common.h @@ -1,7 +1,7 @@ /******************************************************************************* Intel 10 Gigabit PCI Express Linux driver - Copyright(c) 1999 - 2008 Intel Corporation. + Copyright(c) 1999 - 2009 Intel Corporation. This program is free software; you can redistribute it and/or modify it under the terms and conditions of the GNU General Public License, diff --git a/drivers/net/ixgbe/ixgbe_dcb.c b/drivers/net/ixgbe/ixgbe_dcb.c index e2e28ac63de..2a60c89ab34 100644 --- a/drivers/net/ixgbe/ixgbe_dcb.c +++ b/drivers/net/ixgbe/ixgbe_dcb.c @@ -1,7 +1,7 @@ /******************************************************************************* Intel 10 Gigabit PCI Express Linux driver - Copyright(c) 1999 - 2007 Intel Corporation. + Copyright(c) 1999 - 2009 Intel Corporation. This program is free software; you can redistribute it and/or modify it under the terms and conditions of the GNU General Public License, diff --git a/drivers/net/ixgbe/ixgbe_dcb.h b/drivers/net/ixgbe/ixgbe_dcb.h index 75f6efe1e36..0da5c6d5bca 100644 --- a/drivers/net/ixgbe/ixgbe_dcb.h +++ b/drivers/net/ixgbe/ixgbe_dcb.h @@ -1,7 +1,7 @@ /******************************************************************************* Intel 10 Gigabit PCI Express Linux driver - Copyright(c) 1999 - 2007 Intel Corporation. + Copyright(c) 1999 - 2009 Intel Corporation. This program is free software; you can redistribute it and/or modify it under the terms and conditions of the GNU General Public License, diff --git a/drivers/net/ixgbe/ixgbe_dcb_82598.c b/drivers/net/ixgbe/ixgbe_dcb_82598.c index 2c046b0b5d2..56032114893 100644 --- a/drivers/net/ixgbe/ixgbe_dcb_82598.c +++ b/drivers/net/ixgbe/ixgbe_dcb_82598.c @@ -1,7 +1,7 @@ /******************************************************************************* Intel 10 Gigabit PCI Express Linux driver - Copyright(c) 1999 - 2007 Intel Corporation. + Copyright(c) 1999 - 2009 Intel Corporation. This program is free software; you can redistribute it and/or modify it under the terms and conditions of the GNU General Public License, diff --git a/drivers/net/ixgbe/ixgbe_dcb_82598.h b/drivers/net/ixgbe/ixgbe_dcb_82598.h index 1e6a313719d..ebbe53c352a 100644 --- a/drivers/net/ixgbe/ixgbe_dcb_82598.h +++ b/drivers/net/ixgbe/ixgbe_dcb_82598.h @@ -1,7 +1,7 @@ /******************************************************************************* Intel 10 Gigabit PCI Express Linux driver - Copyright(c) 1999 - 2007 Intel Corporation. + Copyright(c) 1999 - 2009 Intel Corporation. This program is free software; you can redistribute it and/or modify it under the terms and conditions of the GNU General Public License, diff --git a/drivers/net/ixgbe/ixgbe_dcb_nl.c b/drivers/net/ixgbe/ixgbe_dcb_nl.c index 4129976953f..dd9d1d63a59 100644 --- a/drivers/net/ixgbe/ixgbe_dcb_nl.c +++ b/drivers/net/ixgbe/ixgbe_dcb_nl.c @@ -1,7 +1,7 @@ /******************************************************************************* Intel 10 Gigabit PCI Express Linux driver - Copyright(c) 1999 - 2008 Intel Corporation. + Copyright(c) 1999 - 2009 Intel Corporation. This program is free software; you can redistribute it and/or modify it under the terms and conditions of the GNU General Public License, diff --git a/drivers/net/ixgbe/ixgbe_ethtool.c b/drivers/net/ixgbe/ixgbe_ethtool.c index 67f87a79154..14e661e0a25 100644 --- a/drivers/net/ixgbe/ixgbe_ethtool.c +++ b/drivers/net/ixgbe/ixgbe_ethtool.c @@ -1,7 +1,7 @@ /******************************************************************************* Intel 10 Gigabit PCI Express Linux driver - Copyright(c) 1999 - 2008 Intel Corporation. + Copyright(c) 1999 - 2009 Intel Corporation. This program is free software; you can redistribute it and/or modify it under the terms and conditions of the GNU General Public License, @@ -89,8 +89,6 @@ static struct ixgbe_stats ixgbe_gstrings_stats[] = { {"rx_header_split", IXGBE_STAT(rx_hdr_split)}, {"alloc_rx_page_failed", IXGBE_STAT(alloc_rx_page_failed)}, {"alloc_rx_buff_failed", IXGBE_STAT(alloc_rx_buff_failed)}, - {"lro_aggregated", IXGBE_STAT(lro_aggregated)}, - {"lro_flushed", IXGBE_STAT(lro_flushed)}, }; #define IXGBE_QUEUE_STATS_LEN \ @@ -132,6 +130,26 @@ static int ixgbe_get_settings(struct net_device *netdev, ecmd->advertising |= ADVERTISED_1000baseT_Full; ecmd->port = PORT_TP; + } else if (hw->phy.media_type == ixgbe_media_type_backplane) { + /* Set as FIBRE until SERDES defined in kernel */ + switch (hw->device_id) { + case IXGBE_DEV_ID_82598: + ecmd->supported |= (SUPPORTED_1000baseT_Full | + SUPPORTED_FIBRE); + ecmd->advertising = (ADVERTISED_10000baseT_Full | + ADVERTISED_1000baseT_Full | + ADVERTISED_FIBRE); + ecmd->port = PORT_FIBRE; + break; + case IXGBE_DEV_ID_82598_BX: + ecmd->supported = (SUPPORTED_1000baseT_Full | + SUPPORTED_FIBRE); + ecmd->advertising = (ADVERTISED_1000baseT_Full | + ADVERTISED_FIBRE); + ecmd->port = PORT_FIBRE; + ecmd->autoneg = AUTONEG_DISABLE; + break; + } } else { ecmd->supported |= SUPPORTED_FIBRE; ecmd->advertising = (ADVERTISED_10000baseT_Full | @@ -808,15 +826,6 @@ static void ixgbe_get_ethtool_stats(struct net_device *netdev, int stat_count = sizeof(struct ixgbe_queue_stats) / sizeof(u64); int j, k; int i; - u64 aggregated = 0, flushed = 0, no_desc = 0; - for (i = 0; i < adapter->num_rx_queues; i++) { - aggregated += adapter->rx_ring[i].lro_mgr.stats.aggregated; - flushed += adapter->rx_ring[i].lro_mgr.stats.flushed; - no_desc += adapter->rx_ring[i].lro_mgr.stats.no_desc; - } - adapter->lro_aggregated = aggregated; - adapter->lro_flushed = flushed; - adapter->lro_no_desc = no_desc; ixgbe_update_stats(adapter); for (i = 0; i < IXGBE_GLOBAL_STATS_LEN; i++) { diff --git a/drivers/net/ixgbe/ixgbe_main.c b/drivers/net/ixgbe/ixgbe_main.c index d2f4d5f508b..ed8d14163c1 100644 --- a/drivers/net/ixgbe/ixgbe_main.c +++ b/drivers/net/ixgbe/ixgbe_main.c @@ -1,7 +1,7 @@ /******************************************************************************* Intel 10 Gigabit PCI Express Linux driver - Copyright(c) 1999 - 2008 Intel Corporation. + Copyright(c) 1999 - 2009 Intel Corporation. This program is free software; you can redistribute it and/or modify it under the terms and conditions of the GNU General Public License, @@ -47,9 +47,9 @@ char ixgbe_driver_name[] = "ixgbe"; static const char ixgbe_driver_string[] = "Intel(R) 10 Gigabit PCI Express Network Driver"; -#define DRV_VERSION "1.3.30-k2" +#define DRV_VERSION "1.3.56-k2" const char ixgbe_driver_version[] = DRV_VERSION; -static char ixgbe_copyright[] = "Copyright (c) 1999-2007 Intel Corporation."; +static char ixgbe_copyright[] = "Copyright (c) 1999-2009 Intel Corporation."; static const struct ixgbe_info *ixgbe_info_tbl[] = { [board_82598] = &ixgbe_82598_info, @@ -64,6 +64,8 @@ static const struct ixgbe_info *ixgbe_info_tbl[] = { * Class, Class Mask, private data (not used) } */ static struct pci_device_id ixgbe_pci_tbl[] = { + {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598), + board_82598 }, {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598AF_DUAL_PORT), board_82598 }, {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598AF_SINGLE_PORT), @@ -82,6 +84,8 @@ static struct pci_device_id ixgbe_pci_tbl[] = { board_82598 }, {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598EB_SFP_LOM), board_82598 }, + {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598_BX), + board_82598 }, /* required last entry */ {0, } @@ -403,23 +407,21 @@ static int __ixgbe_notify_dca(struct device *dev, void *data) * @rx_ring: rx descriptor ring (for a specific queue) to setup * @rx_desc: rx descriptor **/ -static void ixgbe_receive_skb(struct ixgbe_adapter *adapter, +static void ixgbe_receive_skb(struct ixgbe_q_vector *q_vector, struct sk_buff *skb, u8 status, - struct ixgbe_ring *ring, union ixgbe_adv_rx_desc *rx_desc) { + struct ixgbe_adapter *adapter = q_vector->adapter; + struct napi_struct *napi = &q_vector->napi; bool is_vlan = (status & IXGBE_RXD_STAT_VP); u16 tag = le16_to_cpu(rx_desc->wb.upper.vlan); - if (adapter->netdev->features & NETIF_F_LRO && - skb->ip_summed == CHECKSUM_UNNECESSARY) { + skb_record_rx_queue(skb, q_vector - &adapter->q_vector[0]); + if (skb->ip_summed == CHECKSUM_UNNECESSARY) { if (adapter->vlgrp && is_vlan && (tag != 0)) - lro_vlan_hwaccel_receive_skb(&ring->lro_mgr, skb, - adapter->vlgrp, tag, - rx_desc); + vlan_gro_receive(napi, adapter->vlgrp, tag, skb); else - lro_receive_skb(&ring->lro_mgr, skb, rx_desc); - ring->lro_used = true; + napi_gro_receive(napi, skb); } else { if (!(adapter->flags & IXGBE_FLAG_IN_NETPOLL)) { if (adapter->vlgrp && is_vlan && (tag != 0)) @@ -574,10 +576,11 @@ static inline u16 ixgbe_get_pkt_info(union ixgbe_adv_rx_desc *rx_desc) return rx_desc->wb.lower.lo_dword.hs_rss.pkt_info; } -static bool ixgbe_clean_rx_irq(struct ixgbe_adapter *adapter, +static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector, struct ixgbe_ring *rx_ring, int *work_done, int work_to_do) { + struct ixgbe_adapter *adapter = q_vector->adapter; struct pci_dev *pdev = adapter->pdev; union ixgbe_adv_rx_desc *rx_desc, *next_rxd; struct ixgbe_rx_buffer *rx_buffer_info, *next_buffer; @@ -678,7 +681,7 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_adapter *adapter, total_rx_packets++; skb->protocol = eth_type_trans(skb, adapter->netdev); - ixgbe_receive_skb(adapter, skb, staterr, rx_ring, rx_desc); + ixgbe_receive_skb(q_vector, skb, staterr, rx_desc); next_desc: rx_desc->wb.upper.status_error = 0; @@ -696,11 +699,6 @@ next_desc: staterr = le32_to_cpu(rx_desc->wb.upper.status_error); } - if (rx_ring->lro_used) { - lro_flush_all(&rx_ring->lro_mgr); - rx_ring->lro_used = false; - } - rx_ring->next_to_clean = i; cleaned_count = IXGBE_DESC_UNUSED(rx_ring); @@ -1015,7 +1013,7 @@ static irqreturn_t ixgbe_msix_clean_rx(int irq, void *data) rx_ring = &(adapter->rx_ring[r_idx]); /* disable interrupts on this vector only */ IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, rx_ring->v_idx); - netif_rx_schedule(&q_vector->napi); + napi_schedule(&q_vector->napi); return IRQ_HANDLED; } @@ -1052,11 +1050,11 @@ static int ixgbe_clean_rxonly(struct napi_struct *napi, int budget) ixgbe_update_rx_dca(adapter, rx_ring); #endif - ixgbe_clean_rx_irq(adapter, rx_ring, &work_done, budget); + ixgbe_clean_rx_irq(q_vector, rx_ring, &work_done, budget); /* If all Rx work done, exit the polling mode */ if (work_done < budget) { - netif_rx_complete(napi); + napi_complete(napi); if (adapter->itr_setting & 3) ixgbe_set_itr_msix(q_vector); if (!test_bit(__IXGBE_DOWN, &adapter->state)) @@ -1095,7 +1093,7 @@ static int ixgbe_clean_rxonly_many(struct napi_struct *napi, int budget) if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) ixgbe_update_rx_dca(adapter, rx_ring); #endif - ixgbe_clean_rx_irq(adapter, rx_ring, &work_done, budget); + ixgbe_clean_rx_irq(q_vector, rx_ring, &work_done, budget); enable_mask |= rx_ring->v_idx; r_idx = find_next_bit(q_vector->rxr_idx, adapter->num_rx_queues, r_idx + 1); @@ -1105,7 +1103,7 @@ static int ixgbe_clean_rxonly_many(struct napi_struct *napi, int budget) rx_ring = &(adapter->rx_ring[r_idx]); /* If all Rx work done, exit the polling mode */ if (work_done < budget) { - netif_rx_complete(napi); + napi_complete(napi); if (adapter->itr_setting & 3) ixgbe_set_itr_msix(q_vector); if (!test_bit(__IXGBE_DOWN, &adapter->state)) @@ -1381,13 +1379,13 @@ static irqreturn_t ixgbe_intr(int irq, void *data) ixgbe_check_fan_failure(adapter, eicr); - if (netif_rx_schedule_prep(&adapter->q_vector[0].napi)) { + if (napi_schedule_prep(&adapter->q_vector[0].napi)) { adapter->tx_ring[0].total_packets = 0; adapter->tx_ring[0].total_bytes = 0; adapter->rx_ring[0].total_packets = 0; adapter->rx_ring[0].total_bytes = 0; /* would disable interrupts here but EIAM disabled it */ - __netif_rx_schedule(&adapter->q_vector[0].napi); + __napi_schedule(&adapter->q_vector[0].napi); } return IRQ_HANDLED; @@ -1568,33 +1566,6 @@ static void ixgbe_configure_srrctl(struct ixgbe_adapter *adapter, int index) IXGBE_WRITE_REG(&adapter->hw, IXGBE_SRRCTL(index), srrctl); } -/** - * ixgbe_get_skb_hdr - helper function for LRO header processing - * @skb: pointer to sk_buff to be added to LRO packet - * @iphdr: pointer to ip header structure - * @tcph: pointer to tcp header structure - * @hdr_flags: pointer to header flags - * @priv: private data - **/ -static int ixgbe_get_skb_hdr(struct sk_buff *skb, void **iphdr, void **tcph, - u64 *hdr_flags, void *priv) -{ - union ixgbe_adv_rx_desc *rx_desc = priv; - - /* Verify that this is a valid IPv4 TCP packet */ - if (!((ixgbe_get_pkt_info(rx_desc) & IXGBE_RXDADV_PKTTYPE_IPV4) && - (ixgbe_get_pkt_info(rx_desc) & IXGBE_RXDADV_PKTTYPE_TCP))) - return -1; - - /* Set network headers */ - skb_reset_network_header(skb); - skb_set_transport_header(skb, ip_hdrlen(skb)); - *iphdr = ip_hdr(skb); - *tcph = tcp_hdr(skb); - *hdr_flags = LRO_IPV4 | LRO_TCP; - return 0; -} - #define PAGE_USE_COUNT(S) (((S) >> PAGE_SHIFT) + \ (((S) & (PAGE_SIZE - 1)) ? 1 : 0)) @@ -1666,16 +1637,6 @@ static void ixgbe_configure_rx(struct ixgbe_adapter *adapter) adapter->rx_ring[i].head = IXGBE_RDH(j); adapter->rx_ring[i].tail = IXGBE_RDT(j); adapter->rx_ring[i].rx_buf_len = rx_buf_len; - /* Intitial LRO Settings */ - adapter->rx_ring[i].lro_mgr.max_aggr = IXGBE_MAX_LRO_AGGREGATE; - adapter->rx_ring[i].lro_mgr.max_desc = IXGBE_MAX_LRO_DESCRIPTORS; - adapter->rx_ring[i].lro_mgr.get_skb_header = ixgbe_get_skb_hdr; - adapter->rx_ring[i].lro_mgr.features = LRO_F_EXTRACT_VLAN_ID; - if (!(adapter->flags & IXGBE_FLAG_IN_NETPOLL)) - adapter->rx_ring[i].lro_mgr.features |= LRO_F_NAPI; - adapter->rx_ring[i].lro_mgr.dev = adapter->netdev; - adapter->rx_ring[i].lro_mgr.ip_summed = CHECKSUM_UNNECESSARY; - adapter->rx_ring[i].lro_mgr.ip_summed_aggr = CHECKSUM_UNNECESSARY; ixgbe_configure_srrctl(adapter, j); } @@ -2310,14 +2271,14 @@ static int ixgbe_poll(struct napi_struct *napi, int budget) #endif tx_cleaned = ixgbe_clean_tx_irq(adapter, adapter->tx_ring); - ixgbe_clean_rx_irq(adapter, adapter->rx_ring, &work_done, budget); + ixgbe_clean_rx_irq(q_vector, adapter->rx_ring, &work_done, budget); if (tx_cleaned) work_done = budget; /* If budget not fully consumed, exit the polling mode */ if (work_done < budget) { - netif_rx_complete(napi); + napi_complete(napi); if (adapter->itr_setting & 3) ixgbe_set_itr(adapter); if (!test_bit(__IXGBE_DOWN, &adapter->state)) @@ -2460,7 +2421,13 @@ static void ixgbe_acquire_msix_vectors(struct ixgbe_adapter *adapter, ixgbe_set_num_queues(adapter); } else { adapter->flags |= IXGBE_FLAG_MSIX_ENABLED; /* Woot! */ - adapter->num_msix_vectors = vectors; + /* + * Adjust for only the vectors we'll use, which is minimum + * of max_msix_q_vectors + NON_Q_VECTORS, or the number of + * vectors we were allocated. + */ + adapter->num_msix_vectors = min(vectors, + adapter->max_msix_q_vectors + NON_Q_VECTORS); } } @@ -2785,6 +2752,7 @@ static int __devinit ixgbe_sw_init(struct ixgbe_adapter *adapter) adapter->ring_feature[RING_F_RSS].indices = rss; adapter->flags |= IXGBE_FLAG_RSS_ENABLED; adapter->ring_feature[RING_F_DCB].indices = IXGBE_MAX_DCB_INDICES; + adapter->max_msix_q_vectors = MAX_MSIX_Q_VECTORS_82598; #ifdef CONFIG_IXGBE_DCB /* Configure DCB traffic classes */ @@ -2926,12 +2894,6 @@ int ixgbe_setup_rx_resources(struct ixgbe_adapter *adapter, struct pci_dev *pdev = adapter->pdev; int size; - size = sizeof(struct net_lro_desc) * IXGBE_MAX_LRO_DESCRIPTORS; - rx_ring->lro_mgr.lro_arr = vmalloc(size); - if (!rx_ring->lro_mgr.lro_arr) - return -ENOMEM; - memset(rx_ring->lro_mgr.lro_arr, 0, size); - size = sizeof(struct ixgbe_rx_buffer) * rx_ring->count; rx_ring->rx_buffer_info = vmalloc(size); if (!rx_ring->rx_buffer_info) { @@ -2960,8 +2922,6 @@ int ixgbe_setup_rx_resources(struct ixgbe_adapter *adapter, return 0; alloc_failed: - vfree(rx_ring->lro_mgr.lro_arr); - rx_ring->lro_mgr.lro_arr = NULL; return -ENOMEM; } @@ -3039,9 +2999,6 @@ void ixgbe_free_rx_resources(struct ixgbe_adapter *adapter, { struct pci_dev *pdev = adapter->pdev; - vfree(rx_ring->lro_mgr.lro_arr); - rx_ring->lro_mgr.lro_arr = NULL; - ixgbe_clean_rx_ring(adapter, rx_ring); vfree(rx_ring->rx_buffer_info); @@ -3619,13 +3576,13 @@ static bool ixgbe_tx_csum(struct ixgbe_adapter *adapter, if (skb->ip_summed == CHECKSUM_PARTIAL) { switch (skb->protocol) { - case __constant_htons(ETH_P_IP): + case cpu_to_be16(ETH_P_IP): type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV4; if (ip_hdr(skb)->protocol == IPPROTO_TCP) type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_TCP; break; - case __constant_htons(ETH_P_IPV6): + case cpu_to_be16(ETH_P_IPV6): /* XXX what about other V6 headers?? */ if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP) type_tucmd_mlhl |= @@ -3956,16 +3913,27 @@ static void ixgbe_netpoll(struct net_device *netdev) **/ static int ixgbe_link_config(struct ixgbe_hw *hw) { - u32 autoneg = IXGBE_LINK_SPEED_10GB_FULL; + u32 autoneg; + bool link_up = false; + u32 ret = IXGBE_ERR_LINK_SETUP; + + if (hw->mac.ops.check_link) + ret = hw->mac.ops.check_link(hw, &autoneg, &link_up, false); - /* must always autoneg for both 1G and 10G link */ - hw->mac.autoneg = true; + if (ret || !link_up) + goto link_cfg_out; - if ((hw->mac.type == ixgbe_mac_82598EB) && - (hw->phy.media_type == ixgbe_media_type_copper)) - autoneg = IXGBE_LINK_SPEED_82598_AUTONEG; + if (hw->mac.ops.get_link_capabilities) + ret = hw->mac.ops.get_link_capabilities(hw, &autoneg, + &hw->mac.autoneg); + if (ret) + goto link_cfg_out; - return hw->mac.ops.setup_link_speed(hw, autoneg, true, true); + if (hw->mac.ops.setup_link_speed) + ret = hw->mac.ops.setup_link_speed(hw, autoneg, true, true); + +link_cfg_out: + return ret; } static const struct net_device_ops ixgbe_netdev_ops = { @@ -4141,7 +4109,7 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev, netdev->features |= NETIF_F_IPV6_CSUM; netdev->features |= NETIF_F_TSO; netdev->features |= NETIF_F_TSO6; - netdev->features |= NETIF_F_LRO; + netdev->features |= NETIF_F_GRO; netdev->vlan_features |= NETIF_F_TSO; netdev->vlan_features |= NETIF_F_TSO6; diff --git a/drivers/net/ixgbe/ixgbe_phy.c b/drivers/net/ixgbe/ixgbe_phy.c index 5a8669aedf6..77ec26f5650 100644 --- a/drivers/net/ixgbe/ixgbe_phy.c +++ b/drivers/net/ixgbe/ixgbe_phy.c @@ -1,7 +1,7 @@ /******************************************************************************* Intel 10 Gigabit PCI Express Linux driver - Copyright(c) 1999 - 2008 Intel Corporation. + Copyright(c) 1999 - 2009 Intel Corporation. This program is free software; you can redistribute it and/or modify it under the terms and conditions of the GNU General Public License, diff --git a/drivers/net/ixgbe/ixgbe_phy.h b/drivers/net/ixgbe/ixgbe_phy.h index 43a97bc420f..539a3061eb2 100644 --- a/drivers/net/ixgbe/ixgbe_phy.h +++ b/drivers/net/ixgbe/ixgbe_phy.h @@ -1,7 +1,7 @@ /******************************************************************************* Intel 10 Gigabit PCI Express Linux driver - Copyright(c) 1999 - 2008 Intel Corporation. + Copyright(c) 1999 - 2009 Intel Corporation. This program is free software; you can redistribute it and/or modify it under the terms and conditions of the GNU General Public License, diff --git a/drivers/net/ixgbe/ixgbe_type.h b/drivers/net/ixgbe/ixgbe_type.h index f011c57c920..c49ba8a17f1 100644 --- a/drivers/net/ixgbe/ixgbe_type.h +++ b/drivers/net/ixgbe/ixgbe_type.h @@ -1,7 +1,7 @@ /******************************************************************************* Intel 10 Gigabit PCI Express Linux driver - Copyright(c) 1999 - 2008 Intel Corporation. + Copyright(c) 1999 - 2009 Intel Corporation. This program is free software; you can redistribute it and/or modify it under the terms and conditions of the GNU General Public License, @@ -34,6 +34,8 @@ #define IXGBE_INTEL_VENDOR_ID 0x8086 /* Device IDs */ +#define IXGBE_DEV_ID_82598 0x10B6 +#define IXGBE_DEV_ID_82598_BX 0x1508 #define IXGBE_DEV_ID_82598AF_DUAL_PORT 0x10C6 #define IXGBE_DEV_ID_82598AF_SINGLE_PORT 0x10C7 #define IXGBE_DEV_ID_82598EB_SFP_LOM 0x10DB @@ -819,6 +821,10 @@ #define IXGBE_FW_PTR 0x0F #define IXGBE_PBANUM0_PTR 0x15 #define IXGBE_PBANUM1_PTR 0x16 +#define IXGBE_PCIE_MSIX_82598_CAPS 0x62 + +/* MSI-X capability fields masks */ +#define IXGBE_PCIE_MSIX_TBL_SZ_MASK 0x7FF /* Legacy EEPROM word offsets */ #define IXGBE_ISCSI_BOOT_CAPS 0x0033 @@ -1449,6 +1455,7 @@ struct ixgbe_mac_info { u32 num_rar_entries; u32 max_tx_queues; u32 max_rx_queues; + u32 max_msix_vectors; u32 link_attach_type; u32 link_mode_select; bool link_settings_loaded; diff --git a/drivers/net/ixp2000/ixpdev.c b/drivers/net/ixp2000/ixpdev.c index 01474572056..d3bf2f017cc 100644 --- a/drivers/net/ixp2000/ixpdev.c +++ b/drivers/net/ixp2000/ixpdev.c @@ -141,7 +141,7 @@ static int ixpdev_poll(struct napi_struct *napi, int budget) break; } while (ixp2000_reg_read(IXP2000_IRQ_THD_RAW_STATUS_A_0) & 0x00ff); - netif_rx_complete(napi); + napi_complete(napi); ixp2000_reg_write(IXP2000_IRQ_THD_ENABLE_SET_A_0, 0x00ff); return rx; @@ -204,7 +204,7 @@ static irqreturn_t ixpdev_interrupt(int irq, void *dev_id) ixp2000_reg_wrb(IXP2000_IRQ_THD_ENABLE_CLEAR_A_0, 0x00ff); if (likely(napi_schedule_prep(&ip->napi))) { - __netif_rx_schedule(&ip->napi); + __napi_schedule(&ip->napi); } else { printk(KERN_CRIT "ixp2000: irq while polling!!\n"); } diff --git a/drivers/net/jazzsonic.c b/drivers/net/jazzsonic.c index 334ff9e12cd..14248cfc3df 100644 --- a/drivers/net/jazzsonic.c +++ b/drivers/net/jazzsonic.c @@ -131,7 +131,8 @@ static int __init sonic_probe1(struct net_device *dev) if (sonic_debug && version_printed++ == 0) printk(version); - printk(KERN_INFO "%s: Sonic ethernet found at 0x%08lx, ", lp->device->bus_id, dev->base_addr); + printk(KERN_INFO "%s: Sonic ethernet found at 0x%08lx, ", + dev_name(lp->device), dev->base_addr); /* * Put the sonic into software reset, then @@ -156,7 +157,8 @@ static int __init sonic_probe1(struct net_device *dev) if ((lp->descriptors = dma_alloc_coherent(lp->device, SIZEOF_SONIC_DESC * SONIC_BUS_SCALE(lp->dma_bitmode), &lp->descriptors_laddr, GFP_KERNEL)) == NULL) { - printk(KERN_ERR "%s: couldn't alloc DMA memory for descriptors.\n", lp->device->bus_id); + printk(KERN_ERR "%s: couldn't alloc DMA memory for descriptors.\n", + dev_name(lp->device)); goto out; } diff --git a/drivers/net/jme.h b/drivers/net/jme.h index 5154411b5e6..e321c678b11 100644 --- a/drivers/net/jme.h +++ b/drivers/net/jme.h @@ -398,15 +398,15 @@ struct jme_ring { #define JME_NAPI_WEIGHT(w) int w #define JME_NAPI_WEIGHT_VAL(w) w #define JME_NAPI_WEIGHT_SET(w, r) -#define JME_RX_COMPLETE(dev, napis) netif_rx_complete(napis) +#define JME_RX_COMPLETE(dev, napis) napi_complete(napis) #define JME_NAPI_ENABLE(priv) napi_enable(&priv->napi); #define JME_NAPI_DISABLE(priv) \ if (!napi_disable_pending(&priv->napi)) \ napi_disable(&priv->napi); #define JME_RX_SCHEDULE_PREP(priv) \ - netif_rx_schedule_prep(&priv->napi) + napi_schedule_prep(&priv->napi) #define JME_RX_SCHEDULE(priv) \ - __netif_rx_schedule(&priv->napi); + __napi_schedule(&priv->napi); /* * Jmac Adapter Private data diff --git a/drivers/net/korina.c b/drivers/net/korina.c index 75010cac76a..38d6649a29c 100644 --- a/drivers/net/korina.c +++ b/drivers/net/korina.c @@ -334,7 +334,7 @@ static irqreturn_t korina_rx_dma_interrupt(int irq, void *dev_id) DMA_STAT_HALT | DMA_STAT_ERR), &lp->rx_dma_regs->dmasm); - netif_rx_schedule(&lp->napi); + napi_schedule(&lp->napi); if (dmas & DMA_STAT_ERR) printk(KERN_ERR DRV_NAME "%s: DMA error\n", dev->name); @@ -468,7 +468,7 @@ static int korina_poll(struct napi_struct *napi, int budget) work_done = korina_rx(dev, budget); if (work_done < budget) { - netif_rx_complete(napi); + napi_complete(napi); writel(readl(&lp->rx_dma_regs->dmasm) & ~(DMA_STAT_DONE | DMA_STAT_HALT | DMA_STAT_ERR), diff --git a/drivers/net/macb.c b/drivers/net/macb.c index f6c4936e2fa..872c1bdf42b 100644 --- a/drivers/net/macb.c +++ b/drivers/net/macb.c @@ -211,10 +211,10 @@ static int macb_mii_probe(struct net_device *dev) /* attach the mac to the phy */ if (pdata && pdata->is_rmii) { - phydev = phy_connect(dev, phydev->dev.bus_id, + phydev = phy_connect(dev, dev_name(&phydev->dev), &macb_handle_link_change, 0, PHY_INTERFACE_MODE_RMII); } else { - phydev = phy_connect(dev, phydev->dev.bus_id, + phydev = phy_connect(dev, dev_name(&phydev->dev), &macb_handle_link_change, 0, PHY_INTERFACE_MODE_MII); } @@ -527,7 +527,7 @@ static int macb_poll(struct napi_struct *napi, int budget) * this function was called last time, and no packets * have been received since. */ - netif_rx_complete(napi); + napi_complete(napi); goto out; } @@ -538,13 +538,13 @@ static int macb_poll(struct napi_struct *napi, int budget) dev_warn(&bp->pdev->dev, "No RX buffers complete, status = %02lx\n", (unsigned long)status); - netif_rx_complete(napi); + napi_complete(napi); goto out; } work_done = macb_rx(bp, budget); if (work_done < budget) - netif_rx_complete(napi); + napi_complete(napi); /* * We've done what we can to clean the buffers. Make sure we @@ -579,7 +579,7 @@ static irqreturn_t macb_interrupt(int irq, void *dev_id) } if (status & MACB_RX_INT_FLAGS) { - if (netif_rx_schedule_prep(&bp->napi)) { + if (napi_schedule_prep(&bp->napi)) { /* * There's no point taking any more interrupts * until we have processed the buffers @@ -587,7 +587,7 @@ static irqreturn_t macb_interrupt(int irq, void *dev_id) macb_writel(bp, IDR, MACB_RX_INT_FLAGS); dev_dbg(&bp->pdev->dev, "scheduling RX softirq\n"); - __netif_rx_schedule(&bp->napi); + __napi_schedule(&bp->napi); } } @@ -1077,7 +1077,7 @@ static void macb_get_drvinfo(struct net_device *dev, strcpy(info->driver, bp->pdev->dev.driver->name); strcpy(info->version, "$Revision: 1.14 $"); - strcpy(info->bus_info, bp->pdev->dev.bus_id); + strcpy(info->bus_info, dev_name(&bp->pdev->dev)); } static struct ethtool_ops macb_ethtool_ops = { @@ -1234,8 +1234,8 @@ static int __init macb_probe(struct platform_device *pdev) phydev = bp->phy_dev; printk(KERN_INFO "%s: attached PHY driver [%s] " - "(mii_bus:phy_addr=%s, irq=%d)\n", - dev->name, phydev->drv->name, phydev->dev.bus_id, phydev->irq); + "(mii_bus:phy_addr=%s, irq=%d)\n", dev->name, + phydev->drv->name, dev_name(&phydev->dev), phydev->irq); return 0; diff --git a/drivers/net/macsonic.c b/drivers/net/macsonic.c index 205bb05c25d..527166e35d5 100644 --- a/drivers/net/macsonic.c +++ b/drivers/net/macsonic.c @@ -176,7 +176,8 @@ static int __init macsonic_init(struct net_device *dev) if ((lp->descriptors = dma_alloc_coherent(lp->device, SIZEOF_SONIC_DESC * SONIC_BUS_SCALE(lp->dma_bitmode), &lp->descriptors_laddr, GFP_KERNEL)) == NULL) { - printk(KERN_ERR "%s: couldn't alloc DMA memory for descriptors.\n", lp->device->bus_id); + printk(KERN_ERR "%s: couldn't alloc DMA memory for descriptors.\n", + dev_name(lp->device)); return -ENOMEM; } @@ -337,7 +338,7 @@ static int __init mac_onboard_sonic_probe(struct net_device *dev) sonic_version_printed = 1; } printk(KERN_INFO "%s: onboard / comm-slot SONIC at 0x%08lx\n", - lp->device->bus_id, dev->base_addr); + dev_name(lp->device), dev->base_addr); /* The PowerBook's SONIC is 16 bit always. */ if (macintosh_config->ident == MAC_MODEL_PB520) { @@ -370,10 +371,10 @@ static int __init mac_onboard_sonic_probe(struct net_device *dev) } printk(KERN_INFO "%s: revision 0x%04x, using %d bit DMA and register offset %d\n", - lp->device->bus_id, sr, lp->dma_bitmode?32:16, lp->reg_offset); + dev_name(lp->device), sr, lp->dma_bitmode?32:16, lp->reg_offset); #if 0 /* This is sometimes useful to find out how MacOS configured the card. */ - printk(KERN_INFO "%s: DCR: 0x%04x, DCR2: 0x%04x\n", lp->device->bus_id, + printk(KERN_INFO "%s: DCR: 0x%04x, DCR2: 0x%04x\n", dev_name(lp->device), SONIC_READ(SONIC_DCR) & 0xffff, SONIC_READ(SONIC_DCR2) & 0xffff); #endif @@ -525,12 +526,12 @@ static int __init mac_nubus_sonic_probe(struct net_device *dev) sonic_version_printed = 1; } printk(KERN_INFO "%s: %s in slot %X\n", - lp->device->bus_id, ndev->board->name, ndev->board->slot); + dev_name(lp->device), ndev->board->name, ndev->board->slot); printk(KERN_INFO "%s: revision 0x%04x, using %d bit DMA and register offset %d\n", - lp->device->bus_id, SONIC_READ(SONIC_SR), dma_bitmode?32:16, reg_offset); + dev_name(lp->device), SONIC_READ(SONIC_SR), dma_bitmode?32:16, reg_offset); #if 0 /* This is sometimes useful to find out how MacOS configured the card. */ - printk(KERN_INFO "%s: DCR: 0x%04x, DCR2: 0x%04x\n", lp->device->bus_id, + printk(KERN_INFO "%s: DCR: 0x%04x, DCR2: 0x%04x\n", dev_name(lp->device), SONIC_READ(SONIC_DCR) & 0xffff, SONIC_READ(SONIC_DCR2) & 0xffff); #endif diff --git a/drivers/net/mlx4/en_rx.c b/drivers/net/mlx4/en_rx.c index c61b0bdca1a..a4130e76499 100644 --- a/drivers/net/mlx4/en_rx.c +++ b/drivers/net/mlx4/en_rx.c @@ -768,6 +768,7 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud skb->ip_summed = ip_summed; skb->protocol = eth_type_trans(skb, dev); + skb_record_rx_queue(skb, cq->ring); /* Push it up the stack */ if (priv->vlgrp && (be32_to_cpu(cqe->vlan_my_qpn) & @@ -814,7 +815,7 @@ void mlx4_en_rx_irq(struct mlx4_cq *mcq) struct mlx4_en_priv *priv = netdev_priv(cq->dev); if (priv->port_up) - netif_rx_schedule(&cq->napi); + napi_schedule(&cq->napi); else mlx4_en_arm_cq(priv, cq); } @@ -834,7 +835,7 @@ int mlx4_en_poll_rx_cq(struct napi_struct *napi, int budget) INC_PERF_COUNTER(priv->pstats.napi_quota); else { /* Done for now */ - netif_rx_complete(napi); + napi_complete(napi); mlx4_en_arm_cq(priv, cq); } return done; diff --git a/drivers/net/mv643xx_eth.c b/drivers/net/mv643xx_eth.c index 5f31bbb614a..8fab31f631a 100644 --- a/drivers/net/mv643xx_eth.c +++ b/drivers/net/mv643xx_eth.c @@ -2589,7 +2589,7 @@ static void phy_init(struct mv643xx_eth_private *mp, int speed, int duplex) phy_reset(mp); - phy_attach(mp->dev, phy->dev.bus_id, 0, PHY_INTERFACE_MODE_GMII); + phy_attach(mp->dev, dev_name(&phy->dev), 0, PHY_INTERFACE_MODE_GMII); if (speed == 0) { phy->autoneg = AUTONEG_ENABLE; diff --git a/drivers/net/myri10ge/myri10ge.c b/drivers/net/myri10ge/myri10ge.c index e9c1296b267..aea9fdaa3cd 100644 --- a/drivers/net/myri10ge/myri10ge.c +++ b/drivers/net/myri10ge/myri10ge.c @@ -1324,6 +1324,7 @@ myri10ge_rx_done(struct myri10ge_slice_state *ss, struct myri10ge_rx_buf *rx, skb_shinfo(skb)->nr_frags = 0; } skb->protocol = eth_type_trans(skb, dev); + skb_record_rx_queue(skb, ss - &mgp->ss[0]); if (mgp->csum_flag) { if ((skb->protocol == htons(ETH_P_IP)) || @@ -1514,7 +1515,7 @@ static int myri10ge_poll(struct napi_struct *napi, int budget) work_done = myri10ge_clean_rx_done(ss, budget); if (work_done < budget) { - netif_rx_complete(napi); + napi_complete(napi); put_be32(htonl(3), ss->irq_claim); } return work_done; @@ -1532,7 +1533,7 @@ static irqreturn_t myri10ge_intr(int irq, void *arg) /* an interrupt on a non-zero receive-only slice is implicitly * valid since MSI-X irqs are not shared */ if ((mgp->dev->real_num_tx_queues == 1) && (ss != mgp->ss)) { - netif_rx_schedule(&ss->napi); + napi_schedule(&ss->napi); return (IRQ_HANDLED); } @@ -1543,7 +1544,7 @@ static irqreturn_t myri10ge_intr(int irq, void *arg) /* low bit indicates receives are present, so schedule * napi poll handler */ if (stats->valid & 1) - netif_rx_schedule(&ss->napi); + napi_schedule(&ss->napi); if (!mgp->msi_enabled && !mgp->msix_enabled) { put_be32(0, mgp->irq_deassert); diff --git a/drivers/net/myri_sbus.c b/drivers/net/myri_sbus.c index 899ed065a14..88b52883ace 100644 --- a/drivers/net/myri_sbus.c +++ b/drivers/net/myri_sbus.c @@ -748,7 +748,7 @@ static int myri_rebuild_header(struct sk_buff *skb) switch (eth->h_proto) { #ifdef CONFIG_INET - case __constant_htons(ETH_P_IP): + case cpu_to_be16(ETH_P_IP): return arp_find(eth->h_dest, skb); #endif diff --git a/drivers/net/natsemi.c b/drivers/net/natsemi.c index c5dec54251b..c23a58624a3 100644 --- a/drivers/net/natsemi.c +++ b/drivers/net/natsemi.c @@ -2198,10 +2198,10 @@ static irqreturn_t intr_handler(int irq, void *dev_instance) prefetch(&np->rx_skbuff[np->cur_rx % RX_RING_SIZE]); - if (netif_rx_schedule_prep(&np->napi)) { + if (napi_schedule_prep(&np->napi)) { /* Disable interrupts and register for poll */ natsemi_irq_disable(dev); - __netif_rx_schedule(&np->napi); + __napi_schedule(&np->napi); } else printk(KERN_WARNING "%s: Ignoring interrupt, status %#08x, mask %#08x.\n", @@ -2253,7 +2253,7 @@ static int natsemi_poll(struct napi_struct *napi, int budget) np->intr_status = readl(ioaddr + IntrStatus); } while (np->intr_status); - netif_rx_complete(napi); + napi_complete(napi); /* Reenable interrupts providing nothing is trying to shut * the chip down. */ diff --git a/drivers/net/netxen/netxen_nic_main.c b/drivers/net/netxen/netxen_nic_main.c index 645d384fe87..ada462e94c9 100644 --- a/drivers/net/netxen/netxen_nic_main.c +++ b/drivers/net/netxen/netxen_nic_main.c @@ -1170,7 +1170,7 @@ static bool netxen_tso_check(struct net_device *netdev, __be16 protocol = skb->protocol; u16 flags = 0; - if (protocol == __constant_htons(ETH_P_8021Q)) { + if (protocol == cpu_to_be16(ETH_P_8021Q)) { struct vlan_ethhdr *vh = (struct vlan_ethhdr *)skb->data; protocol = vh->h_vlan_encapsulated_proto; flags = FLAGS_VLAN_TAGGED; @@ -1183,21 +1183,21 @@ static bool netxen_tso_check(struct net_device *netdev, desc->total_hdr_length = skb_transport_offset(skb) + tcp_hdrlen(skb); - opcode = (protocol == __constant_htons(ETH_P_IPV6)) ? + opcode = (protocol == cpu_to_be16(ETH_P_IPV6)) ? TX_TCP_LSO6 : TX_TCP_LSO; tso = true; } else if (skb->ip_summed == CHECKSUM_PARTIAL) { u8 l4proto; - if (protocol == __constant_htons(ETH_P_IP)) { + if (protocol == cpu_to_be16(ETH_P_IP)) { l4proto = ip_hdr(skb)->protocol; if (l4proto == IPPROTO_TCP) opcode = TX_TCP_PKT; else if(l4proto == IPPROTO_UDP) opcode = TX_UDP_PKT; - } else if (protocol == __constant_htons(ETH_P_IPV6)) { + } else if (protocol == cpu_to_be16(ETH_P_IPV6)) { l4proto = ipv6_hdr(skb)->nexthdr; if (l4proto == IPPROTO_TCP) @@ -1640,7 +1640,7 @@ static int netxen_nic_poll(struct napi_struct *napi, int budget) } if ((work_done < budget) && tx_complete) { - netif_rx_complete(&adapter->napi); + napi_complete(&adapter->napi); netxen_nic_enable_int(adapter); } diff --git a/drivers/net/niu.c b/drivers/net/niu.c index 0c0b752315c..c26325ded20 100644 --- a/drivers/net/niu.c +++ b/drivers/net/niu.c @@ -3390,6 +3390,7 @@ static int niu_process_rx_pkt(struct niu *np, struct rx_ring_info *rp) rp->rx_bytes += skb->len; skb->protocol = eth_type_trans(skb, np->dev); + skb_record_rx_queue(skb, rp->rx_channel); netif_receive_skb(skb); return num_rcr; @@ -3669,7 +3670,7 @@ static int niu_poll(struct napi_struct *napi, int budget) work_done = niu_poll_core(np, lp, budget); if (work_done < budget) { - netif_rx_complete(napi); + napi_complete(napi); niu_ldg_rearm(np, lp, 1); } return work_done; @@ -4088,12 +4089,12 @@ static void __niu_fastpath_interrupt(struct niu *np, int ldg, u64 v0) static void niu_schedule_napi(struct niu *np, struct niu_ldg *lp, u64 v0, u64 v1, u64 v2) { - if (likely(netif_rx_schedule_prep(&lp->napi))) { + if (likely(napi_schedule_prep(&lp->napi))) { lp->v0 = v0; lp->v1 = v1; lp->v2 = v2; __niu_fastpath_interrupt(np, lp->ldg_num, v0); - __netif_rx_schedule(&lp->napi); + __napi_schedule(&lp->napi); } } @@ -6446,11 +6447,11 @@ static u64 niu_compute_tx_flags(struct sk_buff *skb, struct ethhdr *ehdr, ipv6 = ihl = 0; switch (skb->protocol) { - case __constant_htons(ETH_P_IP): + case cpu_to_be16(ETH_P_IP): ip_proto = ip_hdr(skb)->protocol; ihl = ip_hdr(skb)->ihl; break; - case __constant_htons(ETH_P_IPV6): + case cpu_to_be16(ETH_P_IPV6): ip_proto = ipv6_hdr(skb)->nexthdr; ihl = (40 >> 2); ipv6 = 1; diff --git a/drivers/net/pasemi_mac.c b/drivers/net/pasemi_mac.c index d0349e7d73e..5eeb5a87b73 100644 --- a/drivers/net/pasemi_mac.c +++ b/drivers/net/pasemi_mac.c @@ -970,7 +970,7 @@ static irqreturn_t pasemi_mac_rx_intr(int irq, void *data) if (*chan->status & PAS_STATUS_ERROR) reg |= PAS_IOB_DMA_RXCH_RESET_DINTC; - netif_rx_schedule(&mac->napi); + napi_schedule(&mac->napi); write_iob_reg(PAS_IOB_DMA_RXCH_RESET(chan->chno), reg); @@ -1010,7 +1010,7 @@ static irqreturn_t pasemi_mac_tx_intr(int irq, void *data) mod_timer(&txring->clean_timer, jiffies + (TX_CLEAN_INTERVAL)*2); - netif_rx_schedule(&mac->napi); + napi_schedule(&mac->napi); if (reg) write_iob_reg(PAS_IOB_DMA_TXCH_RESET(chan->chno), reg); @@ -1639,7 +1639,7 @@ static int pasemi_mac_poll(struct napi_struct *napi, int budget) pkts = pasemi_mac_clean_rx(rx_ring(mac), budget); if (pkts < budget) { /* all done, no more packets present */ - netif_rx_complete(napi); + napi_complete(napi); pasemi_mac_restart_rx_intr(mac); pasemi_mac_restart_tx_intr(mac); diff --git a/drivers/net/pcnet32.c b/drivers/net/pcnet32.c index 665a4286da3..80124fac65f 100644 --- a/drivers/net/pcnet32.c +++ b/drivers/net/pcnet32.c @@ -1397,7 +1397,7 @@ static int pcnet32_poll(struct napi_struct *napi, int budget) if (work_done < budget) { spin_lock_irqsave(&lp->lock, flags); - __netif_rx_complete(napi); + __napi_complete(napi); /* clear interrupt masks */ val = lp->a.read_csr(ioaddr, CSR3); @@ -2592,14 +2592,14 @@ pcnet32_interrupt(int irq, void *dev_id) dev->name, csr0); /* unlike for the lance, there is no restart needed */ } - if (netif_rx_schedule_prep(&lp->napi)) { + if (napi_schedule_prep(&lp->napi)) { u16 val; /* set interrupt masks */ val = lp->a.read_csr(ioaddr, CSR3); val |= 0x5f00; lp->a.write_csr(ioaddr, CSR3, val); mmiowb(); - __netif_rx_schedule(&lp->napi); + __napi_schedule(&lp->napi); break; } csr0 = lp->a.read_csr(ioaddr, CSR0); diff --git a/drivers/net/phy/mdio-gpio.c b/drivers/net/phy/mdio-gpio.c index a439ebeb431..3f460c56492 100644 --- a/drivers/net/phy/mdio-gpio.c +++ b/drivers/net/phy/mdio-gpio.c @@ -200,16 +200,21 @@ static int __devinit mdio_ofgpio_probe(struct of_device *ofdev, { struct device_node *np = NULL; struct mdio_gpio_platform_data *pdata; + int ret; pdata = kzalloc(sizeof(*pdata), GFP_KERNEL); if (!pdata) return -ENOMEM; - pdata->mdc = of_get_gpio(ofdev->node, 0); - pdata->mdio = of_get_gpio(ofdev->node, 1); - - if (pdata->mdc < 0 || pdata->mdio < 0) + ret = of_get_gpio(ofdev->node, 0); + if (ret < 0) goto out_free; + pdata->mdc = ret; + + ret = of_get_gpio(ofdev->node, 1); + if (ret < 0) + goto out_free; + pdata->mdio = ret; while ((np = of_get_next_child(ofdev->node, np))) if (!strcmp(np->type, "ethernet-phy")) diff --git a/drivers/net/phy/mdio_bus.c b/drivers/net/phy/mdio_bus.c index 811a637695c..bb29ae3ff17 100644 --- a/drivers/net/phy/mdio_bus.c +++ b/drivers/net/phy/mdio_bus.c @@ -21,6 +21,7 @@ #include <linux/interrupt.h> #include <linux/init.h> #include <linux/delay.h> +#include <linux/device.h> #include <linux/netdevice.h> #include <linux/etherdevice.h> #include <linux/skbuff.h> @@ -286,33 +287,58 @@ static int mdio_bus_match(struct device *dev, struct device_driver *drv) (phydev->phy_id & phydrv->phy_id_mask)); } +static bool mdio_bus_phy_may_suspend(struct phy_device *phydev) +{ + struct device_driver *drv = phydev->dev.driver; + struct phy_driver *phydrv = to_phy_driver(drv); + struct net_device *netdev = phydev->attached_dev; + + if (!drv || !phydrv->suspend) + return false; + + /* PHY not attached? May suspend. */ + if (!netdev) + return true; + + /* + * Don't suspend PHY if the attched netdev parent may wakeup. + * The parent may point to a PCI device, as in tg3 driver. + */ + if (netdev->dev.parent && device_may_wakeup(netdev->dev.parent)) + return false; + + /* + * Also don't suspend PHY if the netdev itself may wakeup. This + * is the case for devices w/o underlaying pwr. mgmt. aware bus, + * e.g. SoC devices. + */ + if (device_may_wakeup(&netdev->dev)) + return false; + + return true; +} + /* Suspend and resume. Copied from platform_suspend and * platform_resume */ static int mdio_bus_suspend(struct device * dev, pm_message_t state) { - int ret = 0; - struct device_driver *drv = dev->driver; - struct phy_driver *phydrv = to_phy_driver(drv); + struct phy_driver *phydrv = to_phy_driver(dev->driver); struct phy_device *phydev = to_phy_device(dev); - if (drv && phydrv->suspend && !device_may_wakeup(phydev->dev.parent)) - ret = phydrv->suspend(phydev); - - return ret; + if (!mdio_bus_phy_may_suspend(phydev)) + return 0; + return phydrv->suspend(phydev); } static int mdio_bus_resume(struct device * dev) { - int ret = 0; - struct device_driver *drv = dev->driver; - struct phy_driver *phydrv = to_phy_driver(drv); + struct phy_driver *phydrv = to_phy_driver(dev->driver); struct phy_device *phydev = to_phy_device(dev); - if (drv && phydrv->resume && !device_may_wakeup(phydev->dev.parent)) - ret = phydrv->resume(phydev); - - return ret; + if (!mdio_bus_phy_may_suspend(phydev)) + return 0; + return phydrv->resume(phydev); } struct bus_type mdio_bus_type = { diff --git a/drivers/net/ppp_generic.c b/drivers/net/ppp_generic.c index 7b2728b8f1b..4405a76ed3d 100644 --- a/drivers/net/ppp_generic.c +++ b/drivers/net/ppp_generic.c @@ -49,6 +49,10 @@ #include <net/slhc_vj.h> #include <asm/atomic.h> +#include <linux/nsproxy.h> +#include <net/net_namespace.h> +#include <net/netns/generic.h> + #define PPP_VERSION "2.4.2" /* @@ -131,6 +135,7 @@ struct ppp { struct sock_filter *active_filter;/* filter for pkts to reset idle */ unsigned pass_len, active_len; #endif /* CONFIG_PPP_FILTER */ + struct net *ppp_net; /* the net we belong to */ }; /* @@ -155,6 +160,7 @@ struct channel { struct rw_semaphore chan_sem; /* protects `chan' during chan ioctl */ spinlock_t downl; /* protects `chan', file.xq dequeue */ struct ppp *ppp; /* ppp unit we're connected to */ + struct net *chan_net; /* the net channel belongs to */ struct list_head clist; /* link in list of channels per unit */ rwlock_t upl; /* protects `ppp' */ #ifdef CONFIG_PPP_MULTILINK @@ -173,26 +179,35 @@ struct channel { * channel.downl. */ -/* - * all_ppp_mutex protects the all_ppp_units mapping. - * It also ensures that finding a ppp unit in the all_ppp_units map - * and updating its file.refcnt field is atomic. - */ -static DEFINE_MUTEX(all_ppp_mutex); static atomic_t ppp_unit_count = ATOMIC_INIT(0); -static DEFINE_IDR(ppp_units_idr); - -/* - * all_channels_lock protects all_channels and last_channel_index, - * and the atomicity of find a channel and updating its file.refcnt - * field. - */ -static DEFINE_SPINLOCK(all_channels_lock); -static LIST_HEAD(all_channels); -static LIST_HEAD(new_channels); -static int last_channel_index; static atomic_t channel_count = ATOMIC_INIT(0); +/* per-net private data for this module */ +static unsigned int ppp_net_id; +struct ppp_net { + /* units to ppp mapping */ + struct idr units_idr; + + /* + * all_ppp_mutex protects the units_idr mapping. + * It also ensures that finding a ppp unit in the units_idr + * map and updating its file.refcnt field is atomic. + */ + struct mutex all_ppp_mutex; + + /* channels */ + struct list_head all_channels; + struct list_head new_channels; + int last_channel_index; + + /* + * all_channels_lock protects all_channels and + * last_channel_index, and the atomicity of find + * a channel and updating its file.refcnt field. + */ + spinlock_t all_channels_lock; +}; + /* Get the PPP protocol number from a skb */ #define PPP_PROTO(skb) (((skb)->data[0] << 8) + (skb)->data[1]) @@ -216,8 +231,8 @@ static atomic_t channel_count = ATOMIC_INIT(0); #define seq_after(a, b) ((s32)((a) - (b)) > 0) /* Prototypes. */ -static int ppp_unattached_ioctl(struct ppp_file *pf, struct file *file, - unsigned int cmd, unsigned long arg); +static int ppp_unattached_ioctl(struct net *net, struct ppp_file *pf, + struct file *file, unsigned int cmd, unsigned long arg); static void ppp_xmit_process(struct ppp *ppp); static void ppp_send_frame(struct ppp *ppp, struct sk_buff *skb); static void ppp_push(struct ppp *ppp); @@ -240,12 +255,12 @@ static void ppp_ccp_peek(struct ppp *ppp, struct sk_buff *skb, int inbound); static void ppp_ccp_closed(struct ppp *ppp); static struct compressor *find_compressor(int type); static void ppp_get_stats(struct ppp *ppp, struct ppp_stats *st); -static struct ppp *ppp_create_interface(int unit, int *retp); +static struct ppp *ppp_create_interface(struct net *net, int unit, int *retp); static void init_ppp_file(struct ppp_file *pf, int kind); static void ppp_shutdown_interface(struct ppp *ppp); static void ppp_destroy_interface(struct ppp *ppp); -static struct ppp *ppp_find_unit(int unit); -static struct channel *ppp_find_channel(int unit); +static struct ppp *ppp_find_unit(struct ppp_net *pn, int unit); +static struct channel *ppp_find_channel(struct ppp_net *pn, int unit); static int ppp_connect_channel(struct channel *pch, int unit); static int ppp_disconnect_channel(struct channel *pch); static void ppp_destroy_channel(struct channel *pch); @@ -256,6 +271,14 @@ static void *unit_find(struct idr *p, int n); static struct class *ppp_class; +/* per net-namespace data */ +static inline struct ppp_net *ppp_pernet(struct net *net) +{ + BUG_ON(!net); + + return net_generic(net, ppp_net_id); +} + /* Translates a PPP protocol number to a NP index (NP == network protocol) */ static inline int proto_to_npindex(int proto) { @@ -544,7 +567,8 @@ static long ppp_ioctl(struct file *file, unsigned int cmd, unsigned long arg) int __user *p = argp; if (!pf) - return ppp_unattached_ioctl(pf, file, cmd, arg); + return ppp_unattached_ioctl(current->nsproxy->net_ns, + pf, file, cmd, arg); if (cmd == PPPIOCDETACH) { /* @@ -763,12 +787,13 @@ static long ppp_ioctl(struct file *file, unsigned int cmd, unsigned long arg) return err; } -static int ppp_unattached_ioctl(struct ppp_file *pf, struct file *file, - unsigned int cmd, unsigned long arg) +static int ppp_unattached_ioctl(struct net *net, struct ppp_file *pf, + struct file *file, unsigned int cmd, unsigned long arg) { int unit, err = -EFAULT; struct ppp *ppp; struct channel *chan; + struct ppp_net *pn; int __user *p = (int __user *)arg; lock_kernel(); @@ -777,7 +802,7 @@ static int ppp_unattached_ioctl(struct ppp_file *pf, struct file *file, /* Create a new ppp unit */ if (get_user(unit, p)) break; - ppp = ppp_create_interface(unit, &err); + ppp = ppp_create_interface(net, unit, &err); if (!ppp) break; file->private_data = &ppp->file; @@ -792,29 +817,31 @@ static int ppp_unattached_ioctl(struct ppp_file *pf, struct file *file, /* Attach to an existing ppp unit */ if (get_user(unit, p)) break; - mutex_lock(&all_ppp_mutex); err = -ENXIO; - ppp = ppp_find_unit(unit); + pn = ppp_pernet(net); + mutex_lock(&pn->all_ppp_mutex); + ppp = ppp_find_unit(pn, unit); if (ppp) { atomic_inc(&ppp->file.refcnt); file->private_data = &ppp->file; err = 0; } - mutex_unlock(&all_ppp_mutex); + mutex_unlock(&pn->all_ppp_mutex); break; case PPPIOCATTCHAN: if (get_user(unit, p)) break; - spin_lock_bh(&all_channels_lock); err = -ENXIO; - chan = ppp_find_channel(unit); + pn = ppp_pernet(net); + spin_lock_bh(&pn->all_channels_lock); + chan = ppp_find_channel(pn, unit); if (chan) { atomic_inc(&chan->file.refcnt); file->private_data = &chan->file; err = 0; } - spin_unlock_bh(&all_channels_lock); + spin_unlock_bh(&pn->all_channels_lock); break; default: @@ -834,6 +861,51 @@ static const struct file_operations ppp_device_fops = { .release = ppp_release }; +static __net_init int ppp_init_net(struct net *net) +{ + struct ppp_net *pn; + int err; + + pn = kzalloc(sizeof(*pn), GFP_KERNEL); + if (!pn) + return -ENOMEM; + + idr_init(&pn->units_idr); + mutex_init(&pn->all_ppp_mutex); + + INIT_LIST_HEAD(&pn->all_channels); + INIT_LIST_HEAD(&pn->new_channels); + + spin_lock_init(&pn->all_channels_lock); + + err = net_assign_generic(net, ppp_net_id, pn); + if (err) { + kfree(pn); + return err; + } + + return 0; +} + +static __net_exit void ppp_exit_net(struct net *net) +{ + struct ppp_net *pn; + + pn = net_generic(net, ppp_net_id); + idr_destroy(&pn->units_idr); + /* + * if someone has cached our net then + * further net_generic call will return NULL + */ + net_assign_generic(net, ppp_net_id, NULL); + kfree(pn); +} + +static __net_initdata struct pernet_operations ppp_net_ops = { + .init = ppp_init_net, + .exit = ppp_exit_net, +}; + #define PPP_MAJOR 108 /* Called at boot time if ppp is compiled into the kernel, @@ -843,25 +915,36 @@ static int __init ppp_init(void) int err; printk(KERN_INFO "PPP generic driver version " PPP_VERSION "\n"); - err = register_chrdev(PPP_MAJOR, "ppp", &ppp_device_fops); - if (!err) { - ppp_class = class_create(THIS_MODULE, "ppp"); - if (IS_ERR(ppp_class)) { - err = PTR_ERR(ppp_class); - goto out_chrdev; - } - device_create(ppp_class, NULL, MKDEV(PPP_MAJOR, 0), NULL, - "ppp"); + + err = register_pernet_gen_device(&ppp_net_id, &ppp_net_ops); + if (err) { + printk(KERN_ERR "failed to register PPP pernet device (%d)\n", err); + goto out; } -out: - if (err) + err = register_chrdev(PPP_MAJOR, "ppp", &ppp_device_fops); + if (err) { printk(KERN_ERR "failed to register PPP device (%d)\n", err); - return err; + goto out_net; + } + + ppp_class = class_create(THIS_MODULE, "ppp"); + if (IS_ERR(ppp_class)) { + err = PTR_ERR(ppp_class); + goto out_chrdev; + } + + /* not a big deal if we fail here :-) */ + device_create(ppp_class, NULL, MKDEV(PPP_MAJOR, 0), NULL, "ppp"); + + return 0; out_chrdev: unregister_chrdev(PPP_MAJOR, "ppp"); - goto out; +out_net: + unregister_pernet_gen_device(ppp_net_id, &ppp_net_ops); +out: + return err; } /* @@ -969,6 +1052,7 @@ static void ppp_setup(struct net_device *dev) dev->tx_queue_len = 3; dev->type = ARPHRD_PPP; dev->flags = IFF_POINTOPOINT | IFF_NOARP | IFF_MULTICAST; + dev->features |= NETIF_F_NETNS_LOCAL; } /* @@ -1986,19 +2070,27 @@ ppp_mp_reconstruct(struct ppp *ppp) * Channel interface. */ -/* - * Create a new, unattached ppp channel. - */ -int -ppp_register_channel(struct ppp_channel *chan) +/* Create a new, unattached ppp channel. */ +int ppp_register_channel(struct ppp_channel *chan) +{ + return ppp_register_net_channel(current->nsproxy->net_ns, chan); +} + +/* Create a new, unattached ppp channel for specified net. */ +int ppp_register_net_channel(struct net *net, struct ppp_channel *chan) { struct channel *pch; + struct ppp_net *pn; pch = kzalloc(sizeof(struct channel), GFP_KERNEL); if (!pch) return -ENOMEM; + + pn = ppp_pernet(net); + pch->ppp = NULL; pch->chan = chan; + pch->chan_net = net; chan->ppp = pch; init_ppp_file(&pch->file, CHANNEL); pch->file.hdrlen = chan->hdrlen; @@ -2008,11 +2100,13 @@ ppp_register_channel(struct ppp_channel *chan) init_rwsem(&pch->chan_sem); spin_lock_init(&pch->downl); rwlock_init(&pch->upl); - spin_lock_bh(&all_channels_lock); - pch->file.index = ++last_channel_index; - list_add(&pch->list, &new_channels); + + spin_lock_bh(&pn->all_channels_lock); + pch->file.index = ++pn->last_channel_index; + list_add(&pch->list, &pn->new_channels); atomic_inc(&channel_count); - spin_unlock_bh(&all_channels_lock); + spin_unlock_bh(&pn->all_channels_lock); + return 0; } @@ -2053,9 +2147,11 @@ void ppp_unregister_channel(struct ppp_channel *chan) { struct channel *pch = chan->ppp; + struct ppp_net *pn; if (!pch) return; /* should never happen */ + chan->ppp = NULL; /* @@ -2068,9 +2164,12 @@ ppp_unregister_channel(struct ppp_channel *chan) spin_unlock_bh(&pch->downl); up_write(&pch->chan_sem); ppp_disconnect_channel(pch); - spin_lock_bh(&all_channels_lock); + + pn = ppp_pernet(pch->chan_net); + spin_lock_bh(&pn->all_channels_lock); list_del(&pch->list); - spin_unlock_bh(&all_channels_lock); + spin_unlock_bh(&pn->all_channels_lock); + pch->file.dead = 1; wake_up_interruptible(&pch->file.rwait); if (atomic_dec_and_test(&pch->file.refcnt)) @@ -2395,9 +2494,10 @@ ppp_get_stats(struct ppp *ppp, struct ppp_stats *st) * unit == -1 means allocate a new number. */ static struct ppp * -ppp_create_interface(int unit, int *retp) +ppp_create_interface(struct net *net, int unit, int *retp) { struct ppp *ppp; + struct ppp_net *pn; struct net_device *dev = NULL; int ret = -ENOMEM; int i; @@ -2406,6 +2506,8 @@ ppp_create_interface(int unit, int *retp) if (!dev) goto out1; + pn = ppp_pernet(net); + ppp = netdev_priv(dev); ppp->dev = dev; ppp->mru = PPP_MRU; @@ -2421,17 +2523,23 @@ ppp_create_interface(int unit, int *retp) skb_queue_head_init(&ppp->mrq); #endif /* CONFIG_PPP_MULTILINK */ + /* + * drum roll: don't forget to set + * the net device is belong to + */ + dev_net_set(dev, net); + ret = -EEXIST; - mutex_lock(&all_ppp_mutex); + mutex_lock(&pn->all_ppp_mutex); if (unit < 0) { - unit = unit_get(&ppp_units_idr, ppp); + unit = unit_get(&pn->units_idr, ppp); if (unit < 0) { *retp = unit; goto out2; } } else { - if (unit_find(&ppp_units_idr, unit)) + if (unit_find(&pn->units_idr, unit)) goto out2; /* unit already exists */ /* * if caller need a specified unit number @@ -2442,7 +2550,7 @@ ppp_create_interface(int unit, int *retp) * fair but at least pppd will ask us to allocate * new unit in this case so user is happy :) */ - unit = unit_set(&ppp_units_idr, ppp, unit); + unit = unit_set(&pn->units_idr, ppp, unit); if (unit < 0) goto out2; } @@ -2453,20 +2561,22 @@ ppp_create_interface(int unit, int *retp) ret = register_netdev(dev); if (ret != 0) { - unit_put(&ppp_units_idr, unit); + unit_put(&pn->units_idr, unit); printk(KERN_ERR "PPP: couldn't register device %s (%d)\n", dev->name, ret); goto out2; } + ppp->ppp_net = net; + atomic_inc(&ppp_unit_count); - mutex_unlock(&all_ppp_mutex); + mutex_unlock(&pn->all_ppp_mutex); *retp = 0; return ppp; out2: - mutex_unlock(&all_ppp_mutex); + mutex_unlock(&pn->all_ppp_mutex); free_netdev(dev); out1: *retp = ret; @@ -2492,7 +2602,11 @@ init_ppp_file(struct ppp_file *pf, int kind) */ static void ppp_shutdown_interface(struct ppp *ppp) { - mutex_lock(&all_ppp_mutex); + struct ppp_net *pn; + + pn = ppp_pernet(ppp->ppp_net); + mutex_lock(&pn->all_ppp_mutex); + /* This will call dev_close() for us. */ ppp_lock(ppp); if (!ppp->closing) { @@ -2502,11 +2616,12 @@ static void ppp_shutdown_interface(struct ppp *ppp) } else ppp_unlock(ppp); - unit_put(&ppp_units_idr, ppp->file.index); + unit_put(&pn->units_idr, ppp->file.index); ppp->file.dead = 1; ppp->owner = NULL; wake_up_interruptible(&ppp->file.rwait); - mutex_unlock(&all_ppp_mutex); + + mutex_unlock(&pn->all_ppp_mutex); } /* @@ -2554,9 +2669,9 @@ static void ppp_destroy_interface(struct ppp *ppp) * The caller should have locked the all_ppp_mutex. */ static struct ppp * -ppp_find_unit(int unit) +ppp_find_unit(struct ppp_net *pn, int unit) { - return unit_find(&ppp_units_idr, unit); + return unit_find(&pn->units_idr, unit); } /* @@ -2568,20 +2683,22 @@ ppp_find_unit(int unit) * when we have a lot of channels in use. */ static struct channel * -ppp_find_channel(int unit) +ppp_find_channel(struct ppp_net *pn, int unit) { struct channel *pch; - list_for_each_entry(pch, &new_channels, list) { + list_for_each_entry(pch, &pn->new_channels, list) { if (pch->file.index == unit) { - list_move(&pch->list, &all_channels); + list_move(&pch->list, &pn->all_channels); return pch; } } - list_for_each_entry(pch, &all_channels, list) { + + list_for_each_entry(pch, &pn->all_channels, list) { if (pch->file.index == unit) return pch; } + return NULL; } @@ -2592,11 +2709,14 @@ static int ppp_connect_channel(struct channel *pch, int unit) { struct ppp *ppp; + struct ppp_net *pn; int ret = -ENXIO; int hdrlen; - mutex_lock(&all_ppp_mutex); - ppp = ppp_find_unit(unit); + pn = ppp_pernet(pch->chan_net); + + mutex_lock(&pn->all_ppp_mutex); + ppp = ppp_find_unit(pn, unit); if (!ppp) goto out; write_lock_bh(&pch->upl); @@ -2620,7 +2740,7 @@ ppp_connect_channel(struct channel *pch, int unit) outl: write_unlock_bh(&pch->upl); out: - mutex_unlock(&all_ppp_mutex); + mutex_unlock(&pn->all_ppp_mutex); return ret; } @@ -2677,7 +2797,7 @@ static void __exit ppp_cleanup(void) unregister_chrdev(PPP_MAJOR, "ppp"); device_destroy(ppp_class, MKDEV(PPP_MAJOR, 0)); class_destroy(ppp_class); - idr_destroy(&ppp_units_idr); + unregister_pernet_gen_device(ppp_net_id, &ppp_net_ops); } /* @@ -2743,6 +2863,7 @@ static void *unit_find(struct idr *p, int n) module_init(ppp_init); module_exit(ppp_cleanup); +EXPORT_SYMBOL(ppp_register_net_channel); EXPORT_SYMBOL(ppp_register_channel); EXPORT_SYMBOL(ppp_unregister_channel); EXPORT_SYMBOL(ppp_channel_index); diff --git a/drivers/net/pppoe.c b/drivers/net/pppoe.c index c22b30533a1..1011fd64108 100644 --- a/drivers/net/pppoe.c +++ b/drivers/net/pppoe.c @@ -78,38 +78,73 @@ #include <linux/proc_fs.h> #include <linux/seq_file.h> +#include <linux/nsproxy.h> #include <net/net_namespace.h> +#include <net/netns/generic.h> #include <net/sock.h> #include <asm/uaccess.h> #define PPPOE_HASH_BITS 4 -#define PPPOE_HASH_SIZE (1<<PPPOE_HASH_BITS) - -static struct ppp_channel_ops pppoe_chan_ops; +#define PPPOE_HASH_SIZE (1 << PPPOE_HASH_BITS) +#define PPPOE_HASH_MASK (PPPOE_HASH_SIZE - 1) static int pppoe_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg); static int pppoe_xmit(struct ppp_channel *chan, struct sk_buff *skb); static int __pppoe_xmit(struct sock *sk, struct sk_buff *skb); static const struct proto_ops pppoe_ops; -static DEFINE_RWLOCK(pppoe_hash_lock); - static struct ppp_channel_ops pppoe_chan_ops; +/* per-net private data for this module */ +static unsigned int pppoe_net_id; +struct pppoe_net { + /* + * we could use _single_ hash table for all + * nets by injecting net id into the hash but + * it would increase hash chains and add + * a few additional math comparations messy + * as well, moreover in case of SMP less locking + * controversy here + */ + struct pppox_sock *hash_table[PPPOE_HASH_SIZE]; + rwlock_t hash_lock; +}; + +/* to eliminate a race btw pppoe_flush_dev and pppoe_release */ +static DEFINE_SPINLOCK(flush_lock); + +/* + * PPPoE could be in the following stages: + * 1) Discovery stage (to obtain remote MAC and Session ID) + * 2) Session stage (MAC and SID are known) + * + * Ethernet frames have a special tag for this but + * we use simplier approach based on session id + */ +static inline bool stage_session(__be16 sid) +{ + return sid != 0; +} + +static inline struct pppoe_net *pppoe_pernet(struct net *net) +{ + BUG_ON(!net); + + return net_generic(net, pppoe_net_id); +} + static inline int cmp_2_addr(struct pppoe_addr *a, struct pppoe_addr *b) { - return (a->sid == b->sid && - (memcmp(a->remote, b->remote, ETH_ALEN) == 0)); + return a->sid == b->sid && !memcmp(a->remote, b->remote, ETH_ALEN); } static inline int cmp_addr(struct pppoe_addr *a, __be16 sid, char *addr) { - return (a->sid == sid && - (memcmp(a->remote,addr,ETH_ALEN) == 0)); + return a->sid == sid && !memcmp(a->remote, addr, ETH_ALEN); } -#if 8%PPPOE_HASH_BITS +#if 8 % PPPOE_HASH_BITS #error 8 must be a multiple of PPPOE_HASH_BITS #endif @@ -118,69 +153,71 @@ static int hash_item(__be16 sid, unsigned char *addr) unsigned char hash = 0; unsigned int i; - for (i = 0 ; i < ETH_ALEN ; i++) { + for (i = 0; i < ETH_ALEN; i++) hash ^= addr[i]; - } - for (i = 0 ; i < sizeof(sid_t)*8 ; i += 8 ){ - hash ^= (__force __u32)sid>>i; - } - for (i = 8 ; (i>>=1) >= PPPOE_HASH_BITS ; ) { - hash ^= hash>>i; - } + for (i = 0; i < sizeof(sid_t) * 8; i += 8) + hash ^= (__force __u32)sid >> i; + for (i = 8; (i >>= 1) >= PPPOE_HASH_BITS;) + hash ^= hash >> i; - return hash & ( PPPOE_HASH_SIZE - 1 ); + return hash & PPPOE_HASH_MASK; } -/* zeroed because its in .bss */ -static struct pppox_sock *item_hash_table[PPPOE_HASH_SIZE]; - /********************************************************************** * * Set/get/delete/rehash items (internal versions) * **********************************************************************/ -static struct pppox_sock *__get_item(__be16 sid, unsigned char *addr, int ifindex) +static struct pppox_sock *__get_item(struct pppoe_net *pn, __be16 sid, + unsigned char *addr, int ifindex) { int hash = hash_item(sid, addr); struct pppox_sock *ret; - ret = item_hash_table[hash]; + ret = pn->hash_table[hash]; + while (ret) { + if (cmp_addr(&ret->pppoe_pa, sid, addr) && + ret->pppoe_ifindex == ifindex) + return ret; - while (ret && !(cmp_addr(&ret->pppoe_pa, sid, addr) && ret->pppoe_ifindex == ifindex)) ret = ret->next; + } - return ret; + return NULL; } -static int __set_item(struct pppox_sock *po) +static int __set_item(struct pppoe_net *pn, struct pppox_sock *po) { int hash = hash_item(po->pppoe_pa.sid, po->pppoe_pa.remote); struct pppox_sock *ret; - ret = item_hash_table[hash]; + ret = pn->hash_table[hash]; while (ret) { - if (cmp_2_addr(&ret->pppoe_pa, &po->pppoe_pa) && ret->pppoe_ifindex == po->pppoe_ifindex) + if (cmp_2_addr(&ret->pppoe_pa, &po->pppoe_pa) && + ret->pppoe_ifindex == po->pppoe_ifindex) return -EALREADY; ret = ret->next; } - po->next = item_hash_table[hash]; - item_hash_table[hash] = po; + po->next = pn->hash_table[hash]; + pn->hash_table[hash] = po; return 0; } -static struct pppox_sock *__delete_item(__be16 sid, char *addr, int ifindex) +static struct pppox_sock *__delete_item(struct pppoe_net *pn, __be16 sid, + char *addr, int ifindex) { int hash = hash_item(sid, addr); struct pppox_sock *ret, **src; - ret = item_hash_table[hash]; - src = &item_hash_table[hash]; + ret = pn->hash_table[hash]; + src = &pn->hash_table[hash]; while (ret) { - if (cmp_addr(&ret->pppoe_pa, sid, addr) && ret->pppoe_ifindex == ifindex) { + if (cmp_addr(&ret->pppoe_pa, sid, addr) && + ret->pppoe_ifindex == ifindex) { *src = ret->next; break; } @@ -197,46 +234,54 @@ static struct pppox_sock *__delete_item(__be16 sid, char *addr, int ifindex) * Set/get/delete/rehash items * **********************************************************************/ -static inline struct pppox_sock *get_item(__be16 sid, - unsigned char *addr, int ifindex) +static inline struct pppox_sock *get_item(struct pppoe_net *pn, __be16 sid, + unsigned char *addr, int ifindex) { struct pppox_sock *po; - read_lock_bh(&pppoe_hash_lock); - po = __get_item(sid, addr, ifindex); + read_lock_bh(&pn->hash_lock); + po = __get_item(pn, sid, addr, ifindex); if (po) sock_hold(sk_pppox(po)); - read_unlock_bh(&pppoe_hash_lock); + read_unlock_bh(&pn->hash_lock); return po; } -static inline struct pppox_sock *get_item_by_addr(struct sockaddr_pppox *sp) +static inline struct pppox_sock *get_item_by_addr(struct net *net, + struct sockaddr_pppox *sp) { struct net_device *dev; + struct pppoe_net *pn; + struct pppox_sock *pppox_sock; + int ifindex; - dev = dev_get_by_name(&init_net, sp->sa_addr.pppoe.dev); - if(!dev) + dev = dev_get_by_name(net, sp->sa_addr.pppoe.dev); + if (!dev) return NULL; + ifindex = dev->ifindex; + pn = net_generic(net, pppoe_net_id); + pppox_sock = get_item(pn, sp->sa_addr.pppoe.sid, + sp->sa_addr.pppoe.remote, ifindex); dev_put(dev); - return get_item(sp->sa_addr.pppoe.sid, sp->sa_addr.pppoe.remote, ifindex); + + return pppox_sock; } -static inline struct pppox_sock *delete_item(__be16 sid, char *addr, int ifindex) +static inline struct pppox_sock *delete_item(struct pppoe_net *pn, __be16 sid, + char *addr, int ifindex) { struct pppox_sock *ret; - write_lock_bh(&pppoe_hash_lock); - ret = __delete_item(sid, addr, ifindex); - write_unlock_bh(&pppoe_hash_lock); + write_lock_bh(&pn->hash_lock); + ret = __delete_item(pn, sid, addr, ifindex); + write_unlock_bh(&pn->hash_lock); return ret; } - - /*************************************************************************** * * Handler for device events. @@ -246,25 +291,33 @@ static inline struct pppox_sock *delete_item(__be16 sid, char *addr, int ifindex static void pppoe_flush_dev(struct net_device *dev) { - int hash; + struct pppoe_net *pn; + int i; + BUG_ON(dev == NULL); - write_lock_bh(&pppoe_hash_lock); - for (hash = 0; hash < PPPOE_HASH_SIZE; hash++) { - struct pppox_sock *po = item_hash_table[hash]; + pn = pppoe_pernet(dev_net(dev)); + if (!pn) /* already freed */ + return; + + write_lock_bh(&pn->hash_lock); + for (i = 0; i < PPPOE_HASH_SIZE; i++) { + struct pppox_sock *po = pn->hash_table[i]; while (po != NULL) { - struct sock *sk = sk_pppox(po); + struct sock *sk; if (po->pppoe_dev != dev) { po = po->next; continue; } + sk = sk_pppox(po); + spin_lock(&flush_lock); po->pppoe_dev = NULL; + spin_unlock(&flush_lock); dev_put(dev); - /* We always grab the socket lock, followed by the - * pppoe_hash_lock, in that order. Since we should + * hash_lock, in that order. Since we should * hold the sock lock while doing any unbinding, * we need to release the lock we're holding. * Hold a reference to the sock so it doesn't disappear @@ -273,7 +326,7 @@ static void pppoe_flush_dev(struct net_device *dev) sock_hold(sk); - write_unlock_bh(&pppoe_hash_lock); + write_unlock_bh(&pn->hash_lock); lock_sock(sk); if (sk->sk_state & (PPPOX_CONNECTED | PPPOX_BOUND)) { @@ -289,20 +342,17 @@ static void pppoe_flush_dev(struct net_device *dev) * While the lock was dropped the chain contents may * have changed. */ - write_lock_bh(&pppoe_hash_lock); - po = item_hash_table[hash]; + write_lock_bh(&pn->hash_lock); + po = pn->hash_table[i]; } } - write_unlock_bh(&pppoe_hash_lock); + write_unlock_bh(&pn->hash_lock); } static int pppoe_device_event(struct notifier_block *this, unsigned long event, void *ptr) { - struct net_device *dev = (struct net_device *) ptr; - - if (dev_net(dev) != &init_net) - return NOTIFY_DONE; + struct net_device *dev = (struct net_device *)ptr; /* Only look at sockets that are using this specific device. */ switch (event) { @@ -324,12 +374,10 @@ static int pppoe_device_event(struct notifier_block *this, return NOTIFY_DONE; } - static struct notifier_block pppoe_notifier = { .notifier_call = pppoe_device_event, }; - /************************************************************************ * * Do the real work of receiving a PPPoE Session frame. @@ -343,8 +391,8 @@ static int pppoe_rcv_core(struct sock *sk, struct sk_buff *skb) if (sk->sk_state & PPPOX_BOUND) { ppp_input(&po->chan, skb); } else if (sk->sk_state & PPPOX_RELAY) { - relay_po = get_item_by_addr(&po->pppoe_relay); - + relay_po = get_item_by_addr(dev_net(po->pppoe_dev), + &po->pppoe_relay); if (relay_po == NULL) goto abort_kfree; @@ -373,22 +421,18 @@ abort_kfree: * Receive wrapper called in BH context. * ***********************************************************************/ -static int pppoe_rcv(struct sk_buff *skb, - struct net_device *dev, - struct packet_type *pt, - struct net_device *orig_dev) - +static int pppoe_rcv(struct sk_buff *skb, struct net_device *dev, + struct packet_type *pt, struct net_device *orig_dev) { struct pppoe_hdr *ph; struct pppox_sock *po; + struct pppoe_net *pn; int len; - if (!(skb = skb_share_check(skb, GFP_ATOMIC))) + skb = skb_share_check(skb, GFP_ATOMIC); + if (!skb) goto out; - if (dev_net(dev) != &init_net) - goto drop; - if (!pskb_may_pull(skb, sizeof(struct pppoe_hdr))) goto drop; @@ -402,7 +446,8 @@ static int pppoe_rcv(struct sk_buff *skb, if (pskb_trim_rcsum(skb, len)) goto drop; - po = get_item(ph->sid, eth_hdr(skb)->h_source, dev->ifindex); + pn = pppoe_pernet(dev_net(dev)); + po = get_item(pn, ph->sid, eth_hdr(skb)->h_source, dev->ifindex); if (!po) goto drop; @@ -420,19 +465,16 @@ out: * This is solely for detection of PADT frames * ***********************************************************************/ -static int pppoe_disc_rcv(struct sk_buff *skb, - struct net_device *dev, - struct packet_type *pt, - struct net_device *orig_dev) +static int pppoe_disc_rcv(struct sk_buff *skb, struct net_device *dev, + struct packet_type *pt, struct net_device *orig_dev) { struct pppoe_hdr *ph; struct pppox_sock *po; + struct pppoe_net *pn; - if (dev_net(dev) != &init_net) - goto abort; - - if (!(skb = skb_share_check(skb, GFP_ATOMIC))) + skb = skb_share_check(skb, GFP_ATOMIC); + if (!skb) goto out; if (!pskb_may_pull(skb, sizeof(struct pppoe_hdr))) @@ -442,7 +484,8 @@ static int pppoe_disc_rcv(struct sk_buff *skb, if (ph->code != PADT_CODE) goto abort; - po = get_item(ph->sid, eth_hdr(skb)->h_source, dev->ifindex); + pn = pppoe_pernet(dev_net(dev)); + po = get_item(pn, ph->sid, eth_hdr(skb)->h_source, dev->ifindex); if (po) { struct sock *sk = sk_pppox(po); @@ -471,12 +514,12 @@ out: } static struct packet_type pppoes_ptype = { - .type = __constant_htons(ETH_P_PPP_SES), + .type = cpu_to_be16(ETH_P_PPP_SES), .func = pppoe_rcv, }; static struct packet_type pppoed_ptype = { - .type = __constant_htons(ETH_P_PPP_DISC), + .type = cpu_to_be16(ETH_P_PPP_DISC), .func = pppoe_disc_rcv, }; @@ -493,38 +536,37 @@ static struct proto pppoe_sk_proto = { **********************************************************************/ static int pppoe_create(struct net *net, struct socket *sock) { - int error = -ENOMEM; struct sock *sk; sk = sk_alloc(net, PF_PPPOX, GFP_KERNEL, &pppoe_sk_proto); if (!sk) - goto out; + return -ENOMEM; sock_init_data(sock, sk); - sock->state = SS_UNCONNECTED; - sock->ops = &pppoe_ops; + sock->state = SS_UNCONNECTED; + sock->ops = &pppoe_ops; - sk->sk_backlog_rcv = pppoe_rcv_core; - sk->sk_state = PPPOX_NONE; - sk->sk_type = SOCK_STREAM; - sk->sk_family = PF_PPPOX; - sk->sk_protocol = PX_PROTO_OE; + sk->sk_backlog_rcv = pppoe_rcv_core; + sk->sk_state = PPPOX_NONE; + sk->sk_type = SOCK_STREAM; + sk->sk_family = PF_PPPOX; + sk->sk_protocol = PX_PROTO_OE; - error = 0; -out: return error; + return 0; } static int pppoe_release(struct socket *sock) { struct sock *sk = sock->sk; struct pppox_sock *po; + struct pppoe_net *pn; if (!sk) return 0; lock_sock(sk); - if (sock_flag(sk, SOCK_DEAD)){ + if (sock_flag(sk, SOCK_DEAD)) { release_sock(sk); return -EBADF; } @@ -534,26 +576,39 @@ static int pppoe_release(struct socket *sock) /* Signal the death of the socket. */ sk->sk_state = PPPOX_DEAD; + /* + * pppoe_flush_dev could lead to a race with + * this routine so we use flush_lock to eliminate + * such a case (we only need per-net specific data) + */ + spin_lock(&flush_lock); + po = pppox_sk(sk); + if (!po->pppoe_dev) { + spin_unlock(&flush_lock); + goto out; + } + pn = pppoe_pernet(dev_net(po->pppoe_dev)); + spin_unlock(&flush_lock); - /* Write lock on hash lock protects the entire "po" struct from - * concurrent updates via pppoe_flush_dev. The "po" struct should - * be considered part of the hash table contents, thus protected - * by the hash table lock */ - write_lock_bh(&pppoe_hash_lock); + /* + * protect "po" from concurrent updates + * on pppoe_flush_dev + */ + write_lock_bh(&pn->hash_lock); po = pppox_sk(sk); - if (po->pppoe_pa.sid) { - __delete_item(po->pppoe_pa.sid, - po->pppoe_pa.remote, po->pppoe_ifindex); - } + if (stage_session(po->pppoe_pa.sid)) + __delete_item(pn, po->pppoe_pa.sid, po->pppoe_pa.remote, + po->pppoe_ifindex); if (po->pppoe_dev) { dev_put(po->pppoe_dev); po->pppoe_dev = NULL; } - write_unlock_bh(&pppoe_hash_lock); + write_unlock_bh(&pn->hash_lock); +out: sock_orphan(sk); sock->sk = NULL; @@ -564,14 +619,14 @@ static int pppoe_release(struct socket *sock) return 0; } - static int pppoe_connect(struct socket *sock, struct sockaddr *uservaddr, int sockaddr_len, int flags) { struct sock *sk = sock->sk; - struct net_device *dev; - struct sockaddr_pppox *sp = (struct sockaddr_pppox *) uservaddr; + struct sockaddr_pppox *sp = (struct sockaddr_pppox *)uservaddr; struct pppox_sock *po = pppox_sk(sk); + struct net_device *dev; + struct pppoe_net *pn; int error; lock_sock(sk); @@ -582,44 +637,45 @@ static int pppoe_connect(struct socket *sock, struct sockaddr *uservaddr, /* Check for already bound sockets */ error = -EBUSY; - if ((sk->sk_state & PPPOX_CONNECTED) && sp->sa_addr.pppoe.sid) + if ((sk->sk_state & PPPOX_CONNECTED) && + stage_session(sp->sa_addr.pppoe.sid)) goto end; /* Check for already disconnected sockets, on attempts to disconnect */ error = -EALREADY; - if ((sk->sk_state & PPPOX_DEAD) && !sp->sa_addr.pppoe.sid ) + if ((sk->sk_state & PPPOX_DEAD) && + !stage_session(sp->sa_addr.pppoe.sid)) goto end; error = 0; - if (po->pppoe_pa.sid) { - pppox_unbind_sock(sk); - - /* Delete the old binding */ - delete_item(po->pppoe_pa.sid,po->pppoe_pa.remote,po->pppoe_ifindex); - if(po->pppoe_dev) + /* Delete the old binding */ + if (stage_session(po->pppoe_pa.sid)) { + pppox_unbind_sock(sk); + if (po->pppoe_dev) { + pn = pppoe_pernet(dev_net(po->pppoe_dev)); + delete_item(pn, po->pppoe_pa.sid, + po->pppoe_pa.remote, po->pppoe_ifindex); dev_put(po->pppoe_dev); - + } memset(sk_pppox(po) + 1, 0, sizeof(struct pppox_sock) - sizeof(struct sock)); - sk->sk_state = PPPOX_NONE; } - /* Don't re-bind if sid==0 */ - if (sp->sa_addr.pppoe.sid != 0) { - dev = dev_get_by_name(&init_net, sp->sa_addr.pppoe.dev); - + /* Re-bind in session stage only */ + if (stage_session(sp->sa_addr.pppoe.sid)) { error = -ENODEV; + dev = dev_get_by_name(sock_net(sk), sp->sa_addr.pppoe.dev); if (!dev) goto end; po->pppoe_dev = dev; po->pppoe_ifindex = dev->ifindex; - - write_lock_bh(&pppoe_hash_lock); - if (!(dev->flags & IFF_UP)){ - write_unlock_bh(&pppoe_hash_lock); + pn = pppoe_pernet(dev_net(dev)); + write_lock_bh(&pn->hash_lock); + if (!(dev->flags & IFF_UP)) { + write_unlock_bh(&pn->hash_lock); goto err_put; } @@ -627,8 +683,8 @@ static int pppoe_connect(struct socket *sock, struct sockaddr *uservaddr, &sp->sa_addr.pppoe, sizeof(struct pppoe_addr)); - error = __set_item(po); - write_unlock_bh(&pppoe_hash_lock); + error = __set_item(pn, po); + write_unlock_bh(&pn->hash_lock); if (error < 0) goto err_put; @@ -639,7 +695,7 @@ static int pppoe_connect(struct socket *sock, struct sockaddr *uservaddr, po->chan.private = sk; po->chan.ops = &pppoe_chan_ops; - error = ppp_register_channel(&po->chan); + error = ppp_register_net_channel(dev_net(dev), &po->chan); if (error) goto err_put; @@ -648,7 +704,7 @@ static int pppoe_connect(struct socket *sock, struct sockaddr *uservaddr, po->num = sp->sa_addr.pppoe.sid; - end: +end: release_sock(sk); return error; err_put: @@ -659,7 +715,6 @@ err_put: goto end; } - static int pppoe_getname(struct socket *sock, struct sockaddr *uaddr, int *usockaddr_len, int peer) { @@ -678,7 +733,6 @@ static int pppoe_getname(struct socket *sock, struct sockaddr *uaddr, return 0; } - static int pppoe_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg) { @@ -690,7 +744,6 @@ static int pppoe_ioctl(struct socket *sock, unsigned int cmd, switch (cmd) { case PPPIOCGMRU: err = -ENXIO; - if (!(sk->sk_state & PPPOX_CONNECTED)) break; @@ -698,7 +751,7 @@ static int pppoe_ioctl(struct socket *sock, unsigned int cmd, if (put_user(po->pppoe_dev->mtu - sizeof(struct pppoe_hdr) - PPP_HDRLEN, - (int __user *) arg)) + (int __user *)arg)) break; err = 0; break; @@ -709,7 +762,7 @@ static int pppoe_ioctl(struct socket *sock, unsigned int cmd, break; err = -EFAULT; - if (get_user(val,(int __user *) arg)) + if (get_user(val, (int __user *)arg)) break; if (val < (po->pppoe_dev->mtu @@ -722,7 +775,7 @@ static int pppoe_ioctl(struct socket *sock, unsigned int cmd, case PPPIOCSFLAGS: err = -EFAULT; - if (get_user(val, (int __user *) arg)) + if (get_user(val, (int __user *)arg)) break; err = 0; break; @@ -749,13 +802,12 @@ static int pppoe_ioctl(struct socket *sock, unsigned int cmd, err = -EINVAL; if (po->pppoe_relay.sa_family != AF_PPPOX || - po->pppoe_relay.sa_protocol!= PX_PROTO_OE) + po->pppoe_relay.sa_protocol != PX_PROTO_OE) break; /* Check that the socket referenced by the address actually exists. */ - relay_po = get_item_by_addr(&po->pppoe_relay); - + relay_po = get_item_by_addr(sock_net(sk), &po->pppoe_relay); if (!relay_po) break; @@ -781,7 +833,6 @@ static int pppoe_ioctl(struct socket *sock, unsigned int cmd, return err; } - static int pppoe_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *m, size_t total_len) { @@ -808,7 +859,7 @@ static int pppoe_sendmsg(struct kiocb *iocb, struct socket *sock, dev = po->pppoe_dev; error = -EMSGSIZE; - if (total_len > (dev->mtu + dev->hard_header_len)) + if (total_len > (dev->mtu + dev->hard_header_len)) goto end; @@ -826,13 +877,12 @@ static int pppoe_sendmsg(struct kiocb *iocb, struct socket *sock, skb->dev = dev; skb->priority = sk->sk_priority; - skb->protocol = __constant_htons(ETH_P_PPP_SES); + skb->protocol = cpu_to_be16(ETH_P_PPP_SES); - ph = (struct pppoe_hdr *) skb_put(skb, total_len + sizeof(struct pppoe_hdr)); - start = (char *) &ph->tag[0]; + ph = (struct pppoe_hdr *)skb_put(skb, total_len + sizeof(struct pppoe_hdr)); + start = (char *)&ph->tag[0]; error = memcpy_fromiovec(start, m->msg_iov, total_len); - if (error < 0) { kfree_skb(skb); goto end; @@ -853,7 +903,6 @@ end: return error; } - /************************************************************************ * * xmit function for internal use. @@ -888,7 +937,7 @@ static int __pppoe_xmit(struct sock *sk, struct sk_buff *skb) ph->sid = po->num; ph->length = htons(data_len); - skb->protocol = __constant_htons(ETH_P_PPP_SES); + skb->protocol = cpu_to_be16(ETH_P_PPP_SES); skb->dev = dev; dev_hard_header(skb, dev, ETH_P_PPP_SES, @@ -903,7 +952,6 @@ abort: return 1; } - /************************************************************************ * * xmit function called by generic PPP driver @@ -912,11 +960,10 @@ abort: ***********************************************************************/ static int pppoe_xmit(struct ppp_channel *chan, struct sk_buff *skb) { - struct sock *sk = (struct sock *) chan->private; + struct sock *sk = (struct sock *)chan->private; return __pppoe_xmit(sk, skb); } - static struct ppp_channel_ops pppoe_chan_ops = { .start_xmit = pppoe_xmit, }; @@ -935,7 +982,6 @@ static int pppoe_recvmsg(struct kiocb *iocb, struct socket *sock, skb = skb_recv_datagram(sk, flags & ~MSG_DONTWAIT, flags & MSG_DONTWAIT, &error); - if (error < 0) goto end; @@ -968,44 +1014,47 @@ static int pppoe_seq_show(struct seq_file *seq, void *v) dev_name = po->pppoe_pa.dev; seq_printf(seq, "%08X %pM %8s\n", - po->pppoe_pa.sid, po->pppoe_pa.remote, dev_name); + po->pppoe_pa.sid, po->pppoe_pa.remote, dev_name); out: return 0; } -static __inline__ struct pppox_sock *pppoe_get_idx(loff_t pos) +static inline struct pppox_sock *pppoe_get_idx(struct pppoe_net *pn, loff_t pos) { struct pppox_sock *po; - int i = 0; + int i; - for (; i < PPPOE_HASH_SIZE; i++) { - po = item_hash_table[i]; + for (i = 0; i < PPPOE_HASH_SIZE; i++) { + po = pn->hash_table[i]; while (po) { if (!pos--) goto out; po = po->next; } } + out: return po; } static void *pppoe_seq_start(struct seq_file *seq, loff_t *pos) - __acquires(pppoe_hash_lock) + __acquires(pn->hash_lock) { + struct pppoe_net *pn = pppoe_pernet(seq_file_net(seq)); loff_t l = *pos; - read_lock_bh(&pppoe_hash_lock); - return l ? pppoe_get_idx(--l) : SEQ_START_TOKEN; + read_lock_bh(&pn->hash_lock); + return l ? pppoe_get_idx(pn, --l) : SEQ_START_TOKEN; } static void *pppoe_seq_next(struct seq_file *seq, void *v, loff_t *pos) { + struct pppoe_net *pn = pppoe_pernet(seq_file_net(seq)); struct pppox_sock *po; ++*pos; if (v == SEQ_START_TOKEN) { - po = pppoe_get_idx(0); + po = pppoe_get_idx(pn, 0); goto out; } po = v; @@ -1015,22 +1064,24 @@ static void *pppoe_seq_next(struct seq_file *seq, void *v, loff_t *pos) int hash = hash_item(po->pppoe_pa.sid, po->pppoe_pa.remote); while (++hash < PPPOE_HASH_SIZE) { - po = item_hash_table[hash]; + po = pn->hash_table[hash]; if (po) break; } } + out: return po; } static void pppoe_seq_stop(struct seq_file *seq, void *v) - __releases(pppoe_hash_lock) + __releases(pn->hash_lock) { - read_unlock_bh(&pppoe_hash_lock); + struct pppoe_net *pn = pppoe_pernet(seq_file_net(seq)); + read_unlock_bh(&pn->hash_lock); } -static struct seq_operations pppoe_seq_ops = { +static const struct seq_operations pppoe_seq_ops = { .start = pppoe_seq_start, .next = pppoe_seq_next, .stop = pppoe_seq_stop, @@ -1039,7 +1090,8 @@ static struct seq_operations pppoe_seq_ops = { static int pppoe_seq_open(struct inode *inode, struct file *file) { - return seq_open(file, &pppoe_seq_ops); + return seq_open_net(inode, file, &pppoe_seq_ops, + sizeof(struct seq_net_private)); } static const struct file_operations pppoe_seq_fops = { @@ -1047,74 +1099,115 @@ static const struct file_operations pppoe_seq_fops = { .open = pppoe_seq_open, .read = seq_read, .llseek = seq_lseek, - .release = seq_release, + .release = seq_release_net, }; -static int __init pppoe_proc_init(void) -{ - struct proc_dir_entry *p; - - p = proc_net_fops_create(&init_net, "pppoe", S_IRUGO, &pppoe_seq_fops); - if (!p) - return -ENOMEM; - return 0; -} -#else /* CONFIG_PROC_FS */ -static inline int pppoe_proc_init(void) { return 0; } #endif /* CONFIG_PROC_FS */ static const struct proto_ops pppoe_ops = { - .family = AF_PPPOX, - .owner = THIS_MODULE, - .release = pppoe_release, - .bind = sock_no_bind, - .connect = pppoe_connect, - .socketpair = sock_no_socketpair, - .accept = sock_no_accept, - .getname = pppoe_getname, - .poll = datagram_poll, - .listen = sock_no_listen, - .shutdown = sock_no_shutdown, - .setsockopt = sock_no_setsockopt, - .getsockopt = sock_no_getsockopt, - .sendmsg = pppoe_sendmsg, - .recvmsg = pppoe_recvmsg, - .mmap = sock_no_mmap, - .ioctl = pppox_ioctl, + .family = AF_PPPOX, + .owner = THIS_MODULE, + .release = pppoe_release, + .bind = sock_no_bind, + .connect = pppoe_connect, + .socketpair = sock_no_socketpair, + .accept = sock_no_accept, + .getname = pppoe_getname, + .poll = datagram_poll, + .listen = sock_no_listen, + .shutdown = sock_no_shutdown, + .setsockopt = sock_no_setsockopt, + .getsockopt = sock_no_getsockopt, + .sendmsg = pppoe_sendmsg, + .recvmsg = pppoe_recvmsg, + .mmap = sock_no_mmap, + .ioctl = pppox_ioctl, }; static struct pppox_proto pppoe_proto = { - .create = pppoe_create, - .ioctl = pppoe_ioctl, - .owner = THIS_MODULE, + .create = pppoe_create, + .ioctl = pppoe_ioctl, + .owner = THIS_MODULE, }; +static __net_init int pppoe_init_net(struct net *net) +{ + struct pppoe_net *pn; + struct proc_dir_entry *pde; + int err; + + pn = kzalloc(sizeof(*pn), GFP_KERNEL); + if (!pn) + return -ENOMEM; + + rwlock_init(&pn->hash_lock); + + err = net_assign_generic(net, pppoe_net_id, pn); + if (err) + goto out; + + pde = proc_net_fops_create(net, "pppoe", S_IRUGO, &pppoe_seq_fops); +#ifdef CONFIG_PROC_FS + if (!pde) { + err = -ENOMEM; + goto out; + } +#endif + + return 0; + +out: + kfree(pn); + return err; +} + +static __net_exit void pppoe_exit_net(struct net *net) +{ + struct pppoe_net *pn; + + proc_net_remove(net, "pppoe"); + pn = net_generic(net, pppoe_net_id); + /* + * if someone has cached our net then + * further net_generic call will return NULL + */ + net_assign_generic(net, pppoe_net_id, NULL); + kfree(pn); +} + +static __net_initdata struct pernet_operations pppoe_net_ops = { + .init = pppoe_init_net, + .exit = pppoe_exit_net, +}; static int __init pppoe_init(void) { - int err = proto_register(&pppoe_sk_proto, 0); + int err; + err = proto_register(&pppoe_sk_proto, 0); if (err) goto out; - err = register_pppox_proto(PX_PROTO_OE, &pppoe_proto); + err = register_pppox_proto(PX_PROTO_OE, &pppoe_proto); if (err) goto out_unregister_pppoe_proto; - err = pppoe_proc_init(); + err = register_pernet_gen_device(&pppoe_net_id, &pppoe_net_ops); if (err) goto out_unregister_pppox_proto; dev_add_pack(&pppoes_ptype); dev_add_pack(&pppoed_ptype); register_netdevice_notifier(&pppoe_notifier); -out: - return err; + + return 0; + out_unregister_pppox_proto: unregister_pppox_proto(PX_PROTO_OE); out_unregister_pppoe_proto: proto_unregister(&pppoe_sk_proto); - goto out; +out: + return err; } static void __exit pppoe_exit(void) @@ -1123,7 +1216,7 @@ static void __exit pppoe_exit(void) dev_remove_pack(&pppoes_ptype); dev_remove_pack(&pppoed_ptype); unregister_netdevice_notifier(&pppoe_notifier); - remove_proc_entry("pppoe", init_net.proc_net); + unregister_pernet_gen_device(pppoe_net_id, &pppoe_net_ops); proto_unregister(&pppoe_sk_proto); } diff --git a/drivers/net/pppol2tp.c b/drivers/net/pppol2tp.c index f1a946785c6..15f4a43a689 100644 --- a/drivers/net/pppol2tp.c +++ b/drivers/net/pppol2tp.c @@ -90,7 +90,9 @@ #include <linux/hash.h> #include <linux/sort.h> #include <linux/proc_fs.h> +#include <linux/nsproxy.h> #include <net/net_namespace.h> +#include <net/netns/generic.h> #include <net/dst.h> #include <net/ip.h> #include <net/udp.h> @@ -204,6 +206,7 @@ struct pppol2tp_tunnel struct sock *sock; /* Parent socket */ struct list_head list; /* Keep a list of all open * prepared sockets */ + struct net *pppol2tp_net; /* the net we belong to */ atomic_t ref_count; }; @@ -227,8 +230,20 @@ static atomic_t pppol2tp_tunnel_count; static atomic_t pppol2tp_session_count; static struct ppp_channel_ops pppol2tp_chan_ops = { pppol2tp_xmit , NULL }; static struct proto_ops pppol2tp_ops; -static LIST_HEAD(pppol2tp_tunnel_list); -static DEFINE_RWLOCK(pppol2tp_tunnel_list_lock); + +/* per-net private data for this module */ +static unsigned int pppol2tp_net_id; +struct pppol2tp_net { + struct list_head pppol2tp_tunnel_list; + rwlock_t pppol2tp_tunnel_list_lock; +}; + +static inline struct pppol2tp_net *pppol2tp_pernet(struct net *net) +{ + BUG_ON(!net); + + return net_generic(net, pppol2tp_net_id); +} /* Helpers to obtain tunnel/session contexts from sockets. */ @@ -321,18 +336,19 @@ pppol2tp_session_find(struct pppol2tp_tunnel *tunnel, u16 session_id) /* Lookup a tunnel by id */ -static struct pppol2tp_tunnel *pppol2tp_tunnel_find(u16 tunnel_id) +static struct pppol2tp_tunnel *pppol2tp_tunnel_find(struct net *net, u16 tunnel_id) { - struct pppol2tp_tunnel *tunnel = NULL; + struct pppol2tp_tunnel *tunnel; + struct pppol2tp_net *pn = pppol2tp_pernet(net); - read_lock_bh(&pppol2tp_tunnel_list_lock); - list_for_each_entry(tunnel, &pppol2tp_tunnel_list, list) { + read_lock_bh(&pn->pppol2tp_tunnel_list_lock); + list_for_each_entry(tunnel, &pn->pppol2tp_tunnel_list, list) { if (tunnel->stats.tunnel_id == tunnel_id) { - read_unlock_bh(&pppol2tp_tunnel_list_lock); + read_unlock_bh(&pn->pppol2tp_tunnel_list_lock); return tunnel; } } - read_unlock_bh(&pppol2tp_tunnel_list_lock); + read_unlock_bh(&pn->pppol2tp_tunnel_list_lock); return NULL; } @@ -1287,10 +1303,12 @@ again: */ static void pppol2tp_tunnel_free(struct pppol2tp_tunnel *tunnel) { + struct pppol2tp_net *pn = pppol2tp_pernet(tunnel->pppol2tp_net); + /* Remove from socket list */ - write_lock_bh(&pppol2tp_tunnel_list_lock); + write_lock_bh(&pn->pppol2tp_tunnel_list_lock); list_del_init(&tunnel->list); - write_unlock_bh(&pppol2tp_tunnel_list_lock); + write_unlock_bh(&pn->pppol2tp_tunnel_list_lock); atomic_dec(&pppol2tp_tunnel_count); kfree(tunnel); @@ -1444,13 +1462,14 @@ error: /* Internal function to prepare a tunnel (UDP) socket to have PPPoX * sockets attached to it. */ -static struct sock *pppol2tp_prepare_tunnel_socket(int fd, u16 tunnel_id, - int *error) +static struct sock *pppol2tp_prepare_tunnel_socket(struct net *net, + int fd, u16 tunnel_id, int *error) { int err; struct socket *sock = NULL; struct sock *sk; struct pppol2tp_tunnel *tunnel; + struct pppol2tp_net *pn; struct sock *ret = NULL; /* Get the tunnel UDP socket from the fd, which was opened by @@ -1524,11 +1543,15 @@ static struct sock *pppol2tp_prepare_tunnel_socket(int fd, u16 tunnel_id, /* Misc init */ rwlock_init(&tunnel->hlist_lock); + /* The net we belong to */ + tunnel->pppol2tp_net = net; + pn = pppol2tp_pernet(net); + /* Add tunnel to our list */ INIT_LIST_HEAD(&tunnel->list); - write_lock_bh(&pppol2tp_tunnel_list_lock); - list_add(&tunnel->list, &pppol2tp_tunnel_list); - write_unlock_bh(&pppol2tp_tunnel_list_lock); + write_lock_bh(&pn->pppol2tp_tunnel_list_lock); + list_add(&tunnel->list, &pn->pppol2tp_tunnel_list); + write_unlock_bh(&pn->pppol2tp_tunnel_list_lock); atomic_inc(&pppol2tp_tunnel_count); /* Bump the reference count. The tunnel context is deleted @@ -1629,7 +1652,8 @@ static int pppol2tp_connect(struct socket *sock, struct sockaddr *uservaddr, * tunnel id. */ if ((sp->pppol2tp.s_session == 0) && (sp->pppol2tp.d_session == 0)) { - tunnel_sock = pppol2tp_prepare_tunnel_socket(sp->pppol2tp.fd, + tunnel_sock = pppol2tp_prepare_tunnel_socket(sock_net(sk), + sp->pppol2tp.fd, sp->pppol2tp.s_tunnel, &error); if (tunnel_sock == NULL) @@ -1637,7 +1661,7 @@ static int pppol2tp_connect(struct socket *sock, struct sockaddr *uservaddr, tunnel = tunnel_sock->sk_user_data; } else { - tunnel = pppol2tp_tunnel_find(sp->pppol2tp.s_tunnel); + tunnel = pppol2tp_tunnel_find(sock_net(sk), sp->pppol2tp.s_tunnel); /* Error if we can't find the tunnel */ error = -ENOENT; @@ -1725,7 +1749,7 @@ static int pppol2tp_connect(struct socket *sock, struct sockaddr *uservaddr, po->chan.ops = &pppol2tp_chan_ops; po->chan.mtu = session->mtu; - error = ppp_register_channel(&po->chan); + error = ppp_register_net_channel(sock_net(sk), &po->chan); if (error) goto end_put_tun; @@ -2347,8 +2371,9 @@ end: #include <linux/seq_file.h> struct pppol2tp_seq_data { - struct pppol2tp_tunnel *tunnel; /* current tunnel */ - struct pppol2tp_session *session; /* NULL means get first session in tunnel */ + struct seq_net_private p; + struct pppol2tp_tunnel *tunnel; /* current tunnel */ + struct pppol2tp_session *session; /* NULL means get first session in tunnel */ }; static struct pppol2tp_session *next_session(struct pppol2tp_tunnel *tunnel, struct pppol2tp_session *curr) @@ -2384,17 +2409,18 @@ out: return session; } -static struct pppol2tp_tunnel *next_tunnel(struct pppol2tp_tunnel *curr) +static struct pppol2tp_tunnel *next_tunnel(struct pppol2tp_net *pn, + struct pppol2tp_tunnel *curr) { struct pppol2tp_tunnel *tunnel = NULL; - read_lock_bh(&pppol2tp_tunnel_list_lock); - if (list_is_last(&curr->list, &pppol2tp_tunnel_list)) { + read_lock_bh(&pn->pppol2tp_tunnel_list_lock); + if (list_is_last(&curr->list, &pn->pppol2tp_tunnel_list)) { goto out; } tunnel = list_entry(curr->list.next, struct pppol2tp_tunnel, list); out: - read_unlock_bh(&pppol2tp_tunnel_list_lock); + read_unlock_bh(&pn->pppol2tp_tunnel_list_lock); return tunnel; } @@ -2402,6 +2428,7 @@ out: static void *pppol2tp_seq_start(struct seq_file *m, loff_t *offs) { struct pppol2tp_seq_data *pd = SEQ_START_TOKEN; + struct pppol2tp_net *pn; loff_t pos = *offs; if (!pos) @@ -2409,14 +2436,15 @@ static void *pppol2tp_seq_start(struct seq_file *m, loff_t *offs) BUG_ON(m->private == NULL); pd = m->private; + pn = pppol2tp_pernet(seq_file_net(m)); if (pd->tunnel == NULL) { - if (!list_empty(&pppol2tp_tunnel_list)) - pd->tunnel = list_entry(pppol2tp_tunnel_list.next, struct pppol2tp_tunnel, list); + if (!list_empty(&pn->pppol2tp_tunnel_list)) + pd->tunnel = list_entry(pn->pppol2tp_tunnel_list.next, struct pppol2tp_tunnel, list); } else { pd->session = next_session(pd->tunnel, pd->session); if (pd->session == NULL) { - pd->tunnel = next_tunnel(pd->tunnel); + pd->tunnel = next_tunnel(pn, pd->tunnel); } } @@ -2517,7 +2545,7 @@ out: return 0; } -static struct seq_operations pppol2tp_seq_ops = { +static const struct seq_operations pppol2tp_seq_ops = { .start = pppol2tp_seq_start, .next = pppol2tp_seq_next, .stop = pppol2tp_seq_stop, @@ -2530,51 +2558,18 @@ static struct seq_operations pppol2tp_seq_ops = { */ static int pppol2tp_proc_open(struct inode *inode, struct file *file) { - struct seq_file *m; - struct pppol2tp_seq_data *pd; - int ret = 0; - - ret = seq_open(file, &pppol2tp_seq_ops); - if (ret < 0) - goto out; - - m = file->private_data; - - /* Allocate and fill our proc_data for access later */ - ret = -ENOMEM; - m->private = kzalloc(sizeof(struct pppol2tp_seq_data), GFP_KERNEL); - if (m->private == NULL) - goto out; - - pd = m->private; - ret = 0; - -out: - return ret; -} - -/* Called when /proc file access completes. - */ -static int pppol2tp_proc_release(struct inode *inode, struct file *file) -{ - struct seq_file *m = (struct seq_file *)file->private_data; - - kfree(m->private); - m->private = NULL; - - return seq_release(inode, file); + return seq_open_net(inode, file, &pppol2tp_seq_ops, + sizeof(struct pppol2tp_seq_data)); } -static struct file_operations pppol2tp_proc_fops = { +static const struct file_operations pppol2tp_proc_fops = { .owner = THIS_MODULE, .open = pppol2tp_proc_open, .read = seq_read, .llseek = seq_lseek, - .release = pppol2tp_proc_release, + .release = seq_release_net, }; -static struct proc_dir_entry *pppol2tp_proc; - #endif /* CONFIG_PROC_FS */ /***************************************************************************** @@ -2606,6 +2601,57 @@ static struct pppox_proto pppol2tp_proto = { .ioctl = pppol2tp_ioctl }; +static __net_init int pppol2tp_init_net(struct net *net) +{ + struct pppol2tp_net *pn; + struct proc_dir_entry *pde; + int err; + + pn = kzalloc(sizeof(*pn), GFP_KERNEL); + if (!pn) + return -ENOMEM; + + INIT_LIST_HEAD(&pn->pppol2tp_tunnel_list); + rwlock_init(&pn->pppol2tp_tunnel_list_lock); + + err = net_assign_generic(net, pppol2tp_net_id, pn); + if (err) + goto out; + + pde = proc_net_fops_create(net, "pppol2tp", S_IRUGO, &pppol2tp_proc_fops); +#ifdef CONFIG_PROC_FS + if (!pde) { + err = -ENOMEM; + goto out; + } +#endif + + return 0; + +out: + kfree(pn); + return err; +} + +static __net_exit void pppol2tp_exit_net(struct net *net) +{ + struct pppoe_net *pn; + + proc_net_remove(net, "pppol2tp"); + pn = net_generic(net, pppol2tp_net_id); + /* + * if someone has cached our net then + * further net_generic call will return NULL + */ + net_assign_generic(net, pppol2tp_net_id, NULL); + kfree(pn); +} + +static __net_initdata struct pernet_operations pppol2tp_net_ops = { + .init = pppol2tp_init_net, + .exit = pppol2tp_exit_net, +}; + static int __init pppol2tp_init(void) { int err; @@ -2617,23 +2663,17 @@ static int __init pppol2tp_init(void) if (err) goto out_unregister_pppol2tp_proto; -#ifdef CONFIG_PROC_FS - pppol2tp_proc = proc_net_fops_create(&init_net, "pppol2tp", 0, - &pppol2tp_proc_fops); - if (!pppol2tp_proc) { - err = -ENOMEM; + err = register_pernet_gen_device(&pppol2tp_net_id, &pppol2tp_net_ops); + if (err) goto out_unregister_pppox_proto; - } -#endif /* CONFIG_PROC_FS */ + printk(KERN_INFO "PPPoL2TP kernel driver, %s\n", PPPOL2TP_DRV_VERSION); out: return err; -#ifdef CONFIG_PROC_FS out_unregister_pppox_proto: unregister_pppox_proto(PX_PROTO_OL2TP); -#endif out_unregister_pppol2tp_proto: proto_unregister(&pppol2tp_sk_proto); goto out; @@ -2642,10 +2682,6 @@ out_unregister_pppol2tp_proto: static void __exit pppol2tp_exit(void) { unregister_pppox_proto(PX_PROTO_OL2TP); - -#ifdef CONFIG_PROC_FS - remove_proc_entry("pppol2tp", init_net.proc_net); -#endif proto_unregister(&pppol2tp_sk_proto); } diff --git a/drivers/net/pppox.c b/drivers/net/pppox.c index 03aecc97fb4..4f6d33fbc67 100644 --- a/drivers/net/pppox.c +++ b/drivers/net/pppox.c @@ -108,9 +108,6 @@ static int pppox_create(struct net *net, struct socket *sock, int protocol) { int rc = -EPROTOTYPE; - if (net != &init_net) - return -EAFNOSUPPORT; - if (protocol < 0 || protocol > PX_MAX_PROTO) goto out; diff --git a/drivers/net/ps3_gelic_net.c b/drivers/net/ps3_gelic_net.c index 4b564eda5bd..30900b30d53 100644 --- a/drivers/net/ps3_gelic_net.c +++ b/drivers/net/ps3_gelic_net.c @@ -745,7 +745,7 @@ static inline struct sk_buff *gelic_put_vlan_tag(struct sk_buff *skb, /* Move the mac addresses to the top of buffer */ memmove(skb->data, skb->data + VLAN_HLEN, 2 * ETH_ALEN); - veth->h_vlan_proto = __constant_htons(ETH_P_8021Q); + veth->h_vlan_proto = cpu_to_be16(ETH_P_8021Q); veth->h_vlan_TCI = htons(tag); return skb; @@ -1403,6 +1403,19 @@ void gelic_net_tx_timeout(struct net_device *netdev) atomic_dec(&card->tx_timeout_task_counter); } +static const struct net_device_ops gelic_netdevice_ops = { + .ndo_open = gelic_net_open, + .ndo_stop = gelic_net_stop, + .ndo_start_xmit = gelic_net_xmit, + .ndo_set_multicast_list = gelic_net_set_multi, + .ndo_change_mtu = gelic_net_change_mtu, + .ndo_tx_timeout = gelic_net_tx_timeout, + .ndo_validate_addr = eth_validate_addr, +#ifdef CONFIG_NET_POLL_CONTROLLER + .ndo_poll_controller = gelic_net_poll_controller, +#endif +}; + /** * gelic_ether_setup_netdev_ops - initialization of net_device operations * @netdev: net_device structure @@ -1412,21 +1425,12 @@ void gelic_net_tx_timeout(struct net_device *netdev) static void gelic_ether_setup_netdev_ops(struct net_device *netdev, struct napi_struct *napi) { - netdev->open = &gelic_net_open; - netdev->stop = &gelic_net_stop; - netdev->hard_start_xmit = &gelic_net_xmit; - netdev->set_multicast_list = &gelic_net_set_multi; - netdev->change_mtu = &gelic_net_change_mtu; - /* tx watchdog */ - netdev->tx_timeout = &gelic_net_tx_timeout; netdev->watchdog_timeo = GELIC_NET_WATCHDOG_TIMEOUT; /* NAPI */ netif_napi_add(netdev, napi, gelic_net_poll, GELIC_NET_NAPI_WEIGHT); netdev->ethtool_ops = &gelic_ether_ethtool_ops; -#ifdef CONFIG_NET_POLL_CONTROLLER - netdev->poll_controller = gelic_net_poll_controller; -#endif + netdev->netdev_ops = &gelic_netdevice_ops; } /** diff --git a/drivers/net/ps3_gelic_wireless.c b/drivers/net/ps3_gelic_wireless.c index 335da4831ab..a5ac2bd58b5 100644 --- a/drivers/net/ps3_gelic_wireless.c +++ b/drivers/net/ps3_gelic_wireless.c @@ -2697,6 +2697,19 @@ static int gelic_wl_stop(struct net_device *netdev) /* -- */ +static const struct net_device_ops gelic_wl_netdevice_ops = { + .ndo_open = gelic_wl_open, + .ndo_stop = gelic_wl_stop, + .ndo_start_xmit = gelic_net_xmit, + .ndo_set_multicast_list = gelic_net_set_multi, + .ndo_change_mtu = gelic_net_change_mtu, + .ndo_tx_timeout = gelic_net_tx_timeout, + .ndo_validate_addr = eth_validate_addr, +#ifdef CONFIG_NET_POLL_CONTROLLER + .ndo_poll_controller = gelic_net_poll_controller, +#endif +}; + static struct ethtool_ops gelic_wl_ethtool_ops = { .get_drvinfo = gelic_net_get_drvinfo, .get_link = gelic_wl_get_link, @@ -2711,21 +2724,12 @@ static void gelic_wl_setup_netdev_ops(struct net_device *netdev) struct gelic_wl_info *wl; wl = port_wl(netdev_priv(netdev)); BUG_ON(!wl); - netdev->open = &gelic_wl_open; - netdev->stop = &gelic_wl_stop; - netdev->hard_start_xmit = &gelic_net_xmit; - netdev->set_multicast_list = &gelic_net_set_multi; - netdev->change_mtu = &gelic_net_change_mtu; - netdev->wireless_data = &wl->wireless_data; - netdev->wireless_handlers = &gelic_wl_wext_handler_def; - /* tx watchdog */ - netdev->tx_timeout = &gelic_net_tx_timeout; netdev->watchdog_timeo = GELIC_NET_WATCHDOG_TIMEOUT; netdev->ethtool_ops = &gelic_wl_ethtool_ops; -#ifdef CONFIG_NET_POLL_CONTROLLER - netdev->poll_controller = gelic_net_poll_controller; -#endif + netdev->netdev_ops = &gelic_wl_netdevice_ops; + netdev->wireless_data = &wl->wireless_data; + netdev->wireless_handlers = &gelic_wl_wext_handler_def; } /* diff --git a/drivers/net/qla3xxx.c b/drivers/net/qla3xxx.c index 189ec29ac7a..8b2823c8dcc 100644 --- a/drivers/net/qla3xxx.c +++ b/drivers/net/qla3xxx.c @@ -2292,7 +2292,7 @@ static int ql_poll(struct napi_struct *napi, int budget) if (tx_cleaned + rx_cleaned != budget) { spin_lock_irqsave(&qdev->hw_lock, hw_flags); - __netif_rx_complete(napi); + __napi_complete(napi); ql_update_small_bufq_prod_index(qdev); ql_update_lrg_bufq_prod_index(qdev); writel(qdev->rsp_consumer_index, @@ -2351,8 +2351,8 @@ static irqreturn_t ql3xxx_isr(int irq, void *dev_id) spin_unlock(&qdev->adapter_lock); } else if (value & ISP_IMR_DISABLE_CMPL_INT) { ql_disable_interrupts(qdev); - if (likely(netif_rx_schedule_prep(&qdev->napi))) { - __netif_rx_schedule(&qdev->napi); + if (likely(napi_schedule_prep(&qdev->napi))) { + __napi_schedule(&qdev->napi); } } else { return IRQ_NONE; diff --git a/drivers/net/qlge/qlge_main.c b/drivers/net/qlge/qlge_main.c index 3d1d7b6e55a..04bf2122264 100644 --- a/drivers/net/qlge/qlge_main.c +++ b/drivers/net/qlge/qlge_main.c @@ -1446,6 +1446,7 @@ static void ql_process_mac_rx_intr(struct ql_adapter *qdev, qdev->stats.rx_packets++; qdev->stats.rx_bytes += skb->len; skb->protocol = eth_type_trans(skb, ndev); + skb_record_rx_queue(skb, rx_ring - &qdev->rx_ring[0]); if (qdev->vlgrp && (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V)) { QPRINTK(qdev, RX_STATUS, DEBUG, "Passing a VLAN packet upstream.\n"); @@ -1652,7 +1653,7 @@ static int ql_napi_poll_msix(struct napi_struct *napi, int budget) rx_ring->cq_id); if (work_done < budget) { - __netif_rx_complete(napi); + __napi_complete(napi); ql_enable_completion_interrupt(qdev, rx_ring->irq); } return work_done; @@ -1737,7 +1738,7 @@ static irqreturn_t qlge_msix_tx_isr(int irq, void *dev_id) static irqreturn_t qlge_msix_rx_isr(int irq, void *dev_id) { struct rx_ring *rx_ring = dev_id; - netif_rx_schedule(&rx_ring->napi); + napi_schedule(&rx_ring->napi); return IRQ_HANDLED; } @@ -1823,7 +1824,7 @@ static irqreturn_t qlge_isr(int irq, void *dev_id) &rx_ring->rx_work, 0); else - netif_rx_schedule(&rx_ring->napi); + napi_schedule(&rx_ring->napi); work_done++; } } diff --git a/drivers/net/r6040.c b/drivers/net/r6040.c index b2dcdb5ed8b..3c27a7bfea4 100644 --- a/drivers/net/r6040.c +++ b/drivers/net/r6040.c @@ -676,7 +676,7 @@ static int r6040_poll(struct napi_struct *napi, int budget) work_done = r6040_rx(dev, budget); if (work_done < budget) { - netif_rx_complete(napi); + napi_complete(napi); /* Enable RX interrupt */ iowrite16(ioread16(ioaddr + MIER) | RX_INTS, ioaddr + MIER); } @@ -713,7 +713,7 @@ static irqreturn_t r6040_interrupt(int irq, void *dev_id) /* Mask off RX interrupt */ misr &= ~RX_INTS; - netif_rx_schedule(&lp->napi); + napi_schedule(&lp->napi); } /* TX interrupt request */ diff --git a/drivers/net/r8169.c b/drivers/net/r8169.c index 2c73ca606b3..1c4a980253f 100644 --- a/drivers/net/r8169.c +++ b/drivers/net/r8169.c @@ -3581,8 +3581,8 @@ static irqreturn_t rtl8169_interrupt(int irq, void *dev_instance) RTL_W16(IntrMask, tp->intr_event & ~tp->napi_event); tp->intr_mask = ~tp->napi_event; - if (likely(netif_rx_schedule_prep(&tp->napi))) - __netif_rx_schedule(&tp->napi); + if (likely(napi_schedule_prep(&tp->napi))) + __napi_schedule(&tp->napi); else if (netif_msg_intr(tp)) { printk(KERN_INFO "%s: interrupt %04x in poll\n", dev->name, status); @@ -3603,7 +3603,7 @@ static int rtl8169_poll(struct napi_struct *napi, int budget) rtl8169_tx_interrupt(dev, tp, ioaddr); if (work_done < budget) { - netif_rx_complete(napi); + napi_complete(napi); tp->intr_mask = 0xffff; /* * 20040426: the barrier is not strictly required but the diff --git a/drivers/net/s2io.c b/drivers/net/s2io.c index f5c57c059bc..e0a353f4ec9 100644 --- a/drivers/net/s2io.c +++ b/drivers/net/s2io.c @@ -2852,7 +2852,7 @@ static int s2io_poll_msix(struct napi_struct *napi, int budget) s2io_chk_rx_buffers(nic, ring); if (pkts_processed < budget_org) { - netif_rx_complete(napi); + napi_complete(napi); /*Re Enable MSI-Rx Vector*/ addr = (u8 __iomem *)&bar0->xmsi_mask_reg; addr += 7 - ring->ring_no; @@ -2889,7 +2889,7 @@ static int s2io_poll_inta(struct napi_struct *napi, int budget) break; } if (pkts_processed < budget_org) { - netif_rx_complete(napi); + napi_complete(napi); /* Re enable the Rx interrupts for the ring */ writeq(0, &bar0->rx_traffic_mask); readl(&bar0->rx_traffic_mask); @@ -4342,7 +4342,7 @@ static irqreturn_t s2io_msix_ring_handle(int irq, void *dev_id) val8 = (ring->ring_no == 0) ? 0x7f : 0xff; writeb(val8, addr); val8 = readb(addr); - netif_rx_schedule(&ring->napi); + napi_schedule(&ring->napi); } else { rx_intr_handler(ring, 0); s2io_chk_rx_buffers(sp, ring); @@ -4789,7 +4789,7 @@ static irqreturn_t s2io_isr(int irq, void *dev_id) if (config->napi) { if (reason & GEN_INTR_RXTRAFFIC) { - netif_rx_schedule(&sp->napi); + napi_schedule(&sp->napi); writeq(S2IO_MINUS_ONE, &bar0->rx_traffic_mask); writeq(S2IO_MINUS_ONE, &bar0->rx_traffic_int); readl(&bar0->rx_traffic_int); @@ -7542,6 +7542,7 @@ static int rx_osm_handler(struct ring_info *ring_data, struct RxD_t * rxdp) sp->mac_control.stats_info->sw_stat.mem_freed += skb->truesize; send_up: + skb_record_rx_queue(skb, ring_no); queue_rx_frame(skb, RXD_GET_VLAN_TAG(rxdp->Control_2)); aggregate: sp->mac_control.rings[ring_no].rx_bufs_left -= 1; diff --git a/drivers/net/sb1250-mac.c b/drivers/net/sb1250-mac.c index 31e38fae017..88dd2e09832 100644 --- a/drivers/net/sb1250-mac.c +++ b/drivers/net/sb1250-mac.c @@ -2039,9 +2039,9 @@ static irqreturn_t sbmac_intr(int irq,void *dev_instance) sbdma_tx_process(sc,&(sc->sbm_txdma), 0); if (isr & (M_MAC_INT_CHANNEL << S_MAC_RX_CH0)) { - if (netif_rx_schedule_prep(&sc->napi)) { + if (napi_schedule_prep(&sc->napi)) { __raw_writeq(0, sc->sbm_imr); - __netif_rx_schedule(&sc->napi); + __napi_schedule(&sc->napi); /* Depend on the exit from poll to reenable intr */ } else { @@ -2478,7 +2478,7 @@ static int sbmac_mii_probe(struct net_device *dev) return -ENXIO; } - phy_dev = phy_connect(dev, phy_dev->dev.bus_id, &sbmac_mii_poll, 0, + phy_dev = phy_connect(dev, dev_name(&phy_dev->dev), &sbmac_mii_poll, 0, PHY_INTERFACE_MODE_GMII); if (IS_ERR(phy_dev)) { printk(KERN_ERR "%s: could not attach to PHY\n", dev->name); @@ -2500,7 +2500,7 @@ static int sbmac_mii_probe(struct net_device *dev) pr_info("%s: attached PHY driver [%s] (mii_bus:phy_addr=%s, irq=%d)\n", dev->name, phy_dev->drv->name, - phy_dev->dev.bus_id, phy_dev->irq); + dev_name(&phy_dev->dev), phy_dev->irq); sc->phy_dev = phy_dev; @@ -2667,7 +2667,7 @@ static int sbmac_poll(struct napi_struct *napi, int budget) sbdma_tx_process(sc, &(sc->sbm_txdma), 1); if (work_done < budget) { - netif_rx_complete(napi); + napi_complete(napi); #ifdef CONFIG_SBMAC_COALESCE __raw_writeq(((M_MAC_INT_EOP_COUNT | M_MAC_INT_EOP_TIMER) << S_MAC_TX_CH0) | @@ -2697,7 +2697,7 @@ static int __init sbmac_probe(struct platform_device *pldev) sbm_base = ioremap_nocache(res->start, res->end - res->start + 1); if (!sbm_base) { printk(KERN_ERR "%s: unable to map device registers\n", - pldev->dev.bus_id); + dev_name(&pldev->dev)); err = -ENOMEM; goto out_out; } @@ -2708,7 +2708,7 @@ static int __init sbmac_probe(struct platform_device *pldev) * If we find a zero, skip this MAC. */ sbmac_orig_hwaddr = __raw_readq(sbm_base + R_MAC_ETHERNET_ADDR); - pr_debug("%s: %sconfiguring MAC at 0x%08Lx\n", pldev->dev.bus_id, + pr_debug("%s: %sconfiguring MAC at 0x%08Lx\n", dev_name(&pldev->dev), sbmac_orig_hwaddr ? "" : "not ", (long long)res->start); if (sbmac_orig_hwaddr == 0) { err = 0; @@ -2721,7 +2721,7 @@ static int __init sbmac_probe(struct platform_device *pldev) dev = alloc_etherdev(sizeof(struct sbmac_softc)); if (!dev) { printk(KERN_ERR "%s: unable to allocate etherdev\n", - pldev->dev.bus_id); + dev_name(&pldev->dev)); err = -ENOMEM; goto out_unmap; } diff --git a/drivers/net/sc92031.c b/drivers/net/sc92031.c index 8b75bef4a84..c13cbf099b8 100644 --- a/drivers/net/sc92031.c +++ b/drivers/net/sc92031.c @@ -13,6 +13,9 @@ * Both are almost identical and seem to be based on pci-skeleton.c * * Rewritten for 2.6 by Cesar Eduardo Barros + * + * A datasheet for this chip can be found at + * http://www.silan.com.cn/english/products/pdf/SC92031AY.pdf */ /* Note about set_mac_address: I don't know how to change the hardware @@ -31,13 +34,7 @@ #include <asm/irq.h> -#define PCI_VENDOR_ID_SILAN 0x1904 -#define PCI_DEVICE_ID_SILAN_SC92031 0x2031 -#define PCI_DEVICE_ID_SILAN_8139D 0x8139 - #define SC92031_NAME "sc92031" -#define SC92031_DESCRIPTION "Silan SC92031 PCI Fast Ethernet Adapter driver" -#define SC92031_VERSION "2.0c" /* BAR 0 is MMIO, BAR 1 is PIO */ #ifndef SC92031_USE_BAR @@ -1264,7 +1261,6 @@ static void sc92031_ethtool_get_drvinfo(struct net_device *dev, struct pci_dev *pdev = priv->pdev; strcpy(drvinfo->driver, SC92031_NAME); - strcpy(drvinfo->version, SC92031_VERSION); strcpy(drvinfo->bus_info, pci_name(pdev)); } @@ -1423,6 +1419,7 @@ static int __devinit sc92031_probe(struct pci_dev *pdev, struct net_device *dev; struct sc92031_priv *priv; u32 mac0, mac1; + unsigned long base_addr; err = pci_enable_device(pdev); if (unlikely(err < 0)) @@ -1497,6 +1494,14 @@ static int __devinit sc92031_probe(struct pci_dev *pdev, if (err < 0) goto out_register_netdev; +#if SC92031_USE_BAR == 0 + base_addr = dev->mem_start; +#elif SC92031_USE_BAR == 1 + base_addr = dev->base_addr; +#endif + printk(KERN_INFO "%s: SC92031 at 0x%lx, %pM, IRQ %d\n", dev->name, + base_addr, dev->dev_addr, dev->irq); + return 0; out_register_netdev: @@ -1586,8 +1591,8 @@ out: } static struct pci_device_id sc92031_pci_device_id_table[] __devinitdata = { - { PCI_DEVICE(PCI_VENDOR_ID_SILAN, PCI_DEVICE_ID_SILAN_SC92031) }, - { PCI_DEVICE(PCI_VENDOR_ID_SILAN, PCI_DEVICE_ID_SILAN_8139D) }, + { PCI_DEVICE(PCI_VENDOR_ID_SILAN, 0x2031) }, + { PCI_DEVICE(PCI_VENDOR_ID_SILAN, 0x8139) }, { 0, } }; MODULE_DEVICE_TABLE(pci, sc92031_pci_device_id_table); @@ -1603,7 +1608,6 @@ static struct pci_driver sc92031_pci_driver = { static int __init sc92031_init(void) { - printk(KERN_INFO SC92031_DESCRIPTION " " SC92031_VERSION "\n"); return pci_register_driver(&sc92031_pci_driver); } @@ -1617,5 +1621,4 @@ module_exit(sc92031_exit); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Cesar Eduardo Barros <cesarb@cesarb.net>"); -MODULE_DESCRIPTION(SC92031_DESCRIPTION); -MODULE_VERSION(SC92031_VERSION); +MODULE_DESCRIPTION("Silan SC92031 PCI Fast Ethernet Adapter driver"); diff --git a/drivers/net/sfc/Kconfig b/drivers/net/sfc/Kconfig index c535408ad6b..12a82966b57 100644 --- a/drivers/net/sfc/Kconfig +++ b/drivers/net/sfc/Kconfig @@ -2,7 +2,6 @@ config SFC tristate "Solarflare Solarstorm SFC4000 support" depends on PCI && INET select MII - select INET_LRO select CRC32 select I2C select I2C_ALGOBIT diff --git a/drivers/net/sfc/bitfield.h b/drivers/net/sfc/bitfield.h index d95c2182801..d54d84c267b 100644 --- a/drivers/net/sfc/bitfield.h +++ b/drivers/net/sfc/bitfield.h @@ -543,7 +543,7 @@ typedef union efx_oword { /* Static initialiser */ #define EFX_OWORD32(a, b, c, d) \ - { .u32 = { __constant_cpu_to_le32(a), __constant_cpu_to_le32(b), \ - __constant_cpu_to_le32(c), __constant_cpu_to_le32(d) } } + { .u32 = { cpu_to_le32(a), cpu_to_le32(b), \ + cpu_to_le32(c), cpu_to_le32(d) } } #endif /* EFX_BITFIELD_H */ diff --git a/drivers/net/sfc/efx.c b/drivers/net/sfc/efx.c index ab0e09bf154..75836599e43 100644 --- a/drivers/net/sfc/efx.c +++ b/drivers/net/sfc/efx.c @@ -182,7 +182,6 @@ static int efx_process_channel(struct efx_channel *channel, int rx_quota) channel->rx_pkt = NULL; } - efx_flush_lro(channel); efx_rx_strategy(channel); efx_fast_push_rx_descriptors(&efx->rx_queue[channel->channel]); @@ -225,11 +224,11 @@ static int efx_poll(struct napi_struct *napi, int budget) if (rx_packets < budget) { /* There is no race here; although napi_disable() will - * only wait for netif_rx_complete(), this isn't a problem + * only wait for napi_complete(), this isn't a problem * since efx_channel_processed() will have no effect if * interrupts have already been disabled. */ - netif_rx_complete(napi); + napi_complete(napi); efx_channel_processed(channel); } @@ -1269,18 +1268,11 @@ static int efx_ioctl(struct net_device *net_dev, struct ifreq *ifr, int cmd) static int efx_init_napi(struct efx_nic *efx) { struct efx_channel *channel; - int rc; efx_for_each_channel(channel, efx) { channel->napi_dev = efx->net_dev; - rc = efx_lro_init(&channel->lro_mgr, efx); - if (rc) - goto err; } return 0; - err: - efx_fini_napi(efx); - return rc; } static void efx_fini_napi(struct efx_nic *efx) @@ -1288,7 +1280,6 @@ static void efx_fini_napi(struct efx_nic *efx) struct efx_channel *channel; efx_for_each_channel(channel, efx) { - efx_lro_fini(&channel->lro_mgr); channel->napi_dev = NULL; } } @@ -2120,7 +2111,7 @@ static int __devinit efx_pci_probe(struct pci_dev *pci_dev, net_dev->features |= (NETIF_F_IP_CSUM | NETIF_F_SG | NETIF_F_HIGHDMA | NETIF_F_TSO); if (lro) - net_dev->features |= NETIF_F_LRO; + net_dev->features |= NETIF_F_GRO; /* Mask for features that also apply to VLAN devices */ net_dev->vlan_features |= (NETIF_F_ALL_CSUM | NETIF_F_SG | NETIF_F_HIGHDMA | NETIF_F_TSO); diff --git a/drivers/net/sfc/efx.h b/drivers/net/sfc/efx.h index 55d0f131b0e..8bde1d2a21d 100644 --- a/drivers/net/sfc/efx.h +++ b/drivers/net/sfc/efx.h @@ -80,7 +80,7 @@ static inline void efx_schedule_channel(struct efx_channel *channel) channel->channel, raw_smp_processor_id()); channel->work_pending = true; - netif_rx_schedule(&channel->napi_str); + napi_schedule(&channel->napi_str); } #endif /* EFX_EFX_H */ diff --git a/drivers/net/sfc/net_driver.h b/drivers/net/sfc/net_driver.h index e019ad1fb9a..19930ff9df7 100644 --- a/drivers/net/sfc/net_driver.h +++ b/drivers/net/sfc/net_driver.h @@ -25,15 +25,11 @@ #include <linux/device.h> #include <linux/highmem.h> #include <linux/workqueue.h> -#include <linux/inet_lro.h> #include <linux/i2c.h> #include "enum.h" #include "bitfield.h" -#define EFX_MAX_LRO_DESCRIPTORS 8 -#define EFX_MAX_LRO_AGGR MAX_SKB_FRAGS - /************************************************************************** * * Build definitions @@ -340,13 +336,10 @@ enum efx_rx_alloc_method { * @eventq_read_ptr: Event queue read pointer * @last_eventq_read_ptr: Last event queue read pointer value. * @eventq_magic: Event queue magic value for driver-generated test events - * @lro_mgr: LRO state * @rx_alloc_level: Watermark based heuristic counter for pushing descriptors * and diagnostic counters * @rx_alloc_push_pages: RX allocation method currently in use for pushing * descriptors - * @rx_alloc_pop_pages: RX allocation method currently in use for popping - * descriptors * @n_rx_tobe_disc: Count of RX_TOBE_DISC errors * @n_rx_ip_frag_err: Count of RX IP fragment errors * @n_rx_ip_hdr_chksum_err: Count of RX IP header checksum errors @@ -371,10 +364,8 @@ struct efx_channel { unsigned int last_eventq_read_ptr; unsigned int eventq_magic; - struct net_lro_mgr lro_mgr; int rx_alloc_level; int rx_alloc_push_pages; - int rx_alloc_pop_pages; unsigned n_rx_tobe_disc; unsigned n_rx_ip_frag_err; diff --git a/drivers/net/sfc/rx.c b/drivers/net/sfc/rx.c index b8ba4bbad88..66d7fe3db3e 100644 --- a/drivers/net/sfc/rx.c +++ b/drivers/net/sfc/rx.c @@ -99,109 +99,6 @@ static inline unsigned int efx_rx_buf_size(struct efx_nic *efx) } -/************************************************************************** - * - * Linux generic LRO handling - * - ************************************************************************** - */ - -static int efx_lro_get_skb_hdr(struct sk_buff *skb, void **ip_hdr, - void **tcpudp_hdr, u64 *hdr_flags, void *priv) -{ - struct efx_channel *channel = priv; - struct iphdr *iph; - struct tcphdr *th; - - iph = (struct iphdr *)skb->data; - if (skb->protocol != htons(ETH_P_IP) || iph->protocol != IPPROTO_TCP) - goto fail; - - th = (struct tcphdr *)(skb->data + iph->ihl * 4); - - *tcpudp_hdr = th; - *ip_hdr = iph; - *hdr_flags = LRO_IPV4 | LRO_TCP; - - channel->rx_alloc_level += RX_ALLOC_FACTOR_LRO; - return 0; -fail: - channel->rx_alloc_level += RX_ALLOC_FACTOR_SKB; - return -1; -} - -static int efx_get_frag_hdr(struct skb_frag_struct *frag, void **mac_hdr, - void **ip_hdr, void **tcpudp_hdr, u64 *hdr_flags, - void *priv) -{ - struct efx_channel *channel = priv; - struct ethhdr *eh; - struct iphdr *iph; - - /* We support EtherII and VLAN encapsulated IPv4 */ - eh = page_address(frag->page) + frag->page_offset; - *mac_hdr = eh; - - if (eh->h_proto == htons(ETH_P_IP)) { - iph = (struct iphdr *)(eh + 1); - } else { - struct vlan_ethhdr *veh = (struct vlan_ethhdr *)eh; - if (veh->h_vlan_encapsulated_proto != htons(ETH_P_IP)) - goto fail; - - iph = (struct iphdr *)(veh + 1); - } - *ip_hdr = iph; - - /* We can only do LRO over TCP */ - if (iph->protocol != IPPROTO_TCP) - goto fail; - - *hdr_flags = LRO_IPV4 | LRO_TCP; - *tcpudp_hdr = (struct tcphdr *)((u8 *) iph + iph->ihl * 4); - - channel->rx_alloc_level += RX_ALLOC_FACTOR_LRO; - return 0; - fail: - channel->rx_alloc_level += RX_ALLOC_FACTOR_SKB; - return -1; -} - -int efx_lro_init(struct net_lro_mgr *lro_mgr, struct efx_nic *efx) -{ - size_t s = sizeof(struct net_lro_desc) * EFX_MAX_LRO_DESCRIPTORS; - struct net_lro_desc *lro_arr; - - /* Allocate the LRO descriptors structure */ - lro_arr = kzalloc(s, GFP_KERNEL); - if (lro_arr == NULL) - return -ENOMEM; - - lro_mgr->lro_arr = lro_arr; - lro_mgr->max_desc = EFX_MAX_LRO_DESCRIPTORS; - lro_mgr->max_aggr = EFX_MAX_LRO_AGGR; - lro_mgr->frag_align_pad = EFX_PAGE_SKB_ALIGN; - - lro_mgr->get_skb_header = efx_lro_get_skb_hdr; - lro_mgr->get_frag_header = efx_get_frag_hdr; - lro_mgr->dev = efx->net_dev; - - lro_mgr->features = LRO_F_NAPI; - - /* We can pass packets up with the checksum intact */ - lro_mgr->ip_summed = CHECKSUM_UNNECESSARY; - - lro_mgr->ip_summed_aggr = CHECKSUM_UNNECESSARY; - - return 0; -} - -void efx_lro_fini(struct net_lro_mgr *lro_mgr) -{ - kfree(lro_mgr->lro_arr); - lro_mgr->lro_arr = NULL; -} - /** * efx_init_rx_buffer_skb - create new RX buffer using skb-based allocation * @@ -549,77 +446,31 @@ static void efx_rx_packet__check_len(struct efx_rx_queue *rx_queue, static void efx_rx_packet_lro(struct efx_channel *channel, struct efx_rx_buffer *rx_buf) { - struct net_lro_mgr *lro_mgr = &channel->lro_mgr; - void *priv = channel; + struct napi_struct *napi = &channel->napi_str; /* Pass the skb/page into the LRO engine */ if (rx_buf->page) { - struct skb_frag_struct frags; + struct napi_gro_fraginfo info; - frags.page = rx_buf->page; - frags.page_offset = efx_rx_buf_offset(rx_buf); - frags.size = rx_buf->len; + info.frags[0].page = rx_buf->page; + info.frags[0].page_offset = efx_rx_buf_offset(rx_buf); + info.frags[0].size = rx_buf->len; + info.nr_frags = 1; + info.ip_summed = CHECKSUM_UNNECESSARY; + info.len = rx_buf->len; - lro_receive_frags(lro_mgr, &frags, rx_buf->len, - rx_buf->len, priv, 0); + napi_gro_frags(napi, &info); EFX_BUG_ON_PARANOID(rx_buf->skb); rx_buf->page = NULL; } else { EFX_BUG_ON_PARANOID(!rx_buf->skb); - lro_receive_skb(lro_mgr, rx_buf->skb, priv); + napi_gro_receive(napi, rx_buf->skb); rx_buf->skb = NULL; } } -/* Allocate and construct an SKB around a struct page.*/ -static struct sk_buff *efx_rx_mk_skb(struct efx_rx_buffer *rx_buf, - struct efx_nic *efx, - int hdr_len) -{ - struct sk_buff *skb; - - /* Allocate an SKB to store the headers */ - skb = netdev_alloc_skb(efx->net_dev, hdr_len + EFX_PAGE_SKB_ALIGN); - if (unlikely(skb == NULL)) { - EFX_ERR_RL(efx, "RX out of memory for skb\n"); - return NULL; - } - - EFX_BUG_ON_PARANOID(skb_shinfo(skb)->nr_frags); - EFX_BUG_ON_PARANOID(rx_buf->len < hdr_len); - - skb->ip_summed = CHECKSUM_UNNECESSARY; - skb_reserve(skb, EFX_PAGE_SKB_ALIGN); - - skb->len = rx_buf->len; - skb->truesize = rx_buf->len + sizeof(struct sk_buff); - memcpy(skb->data, rx_buf->data, hdr_len); - skb->tail += hdr_len; - - /* Append the remaining page onto the frag list */ - if (unlikely(rx_buf->len > hdr_len)) { - struct skb_frag_struct *frag = skb_shinfo(skb)->frags; - frag->page = rx_buf->page; - frag->page_offset = efx_rx_buf_offset(rx_buf) + hdr_len; - frag->size = skb->len - hdr_len; - skb_shinfo(skb)->nr_frags = 1; - skb->data_len = frag->size; - } else { - __free_pages(rx_buf->page, efx->rx_buffer_order); - skb->data_len = 0; - } - - /* Ownership has transferred from the rx_buf to skb */ - rx_buf->page = NULL; - - /* Move past the ethernet header */ - skb->protocol = eth_type_trans(skb, efx->net_dev); - - return skb; -} - void efx_rx_packet(struct efx_rx_queue *rx_queue, unsigned int index, unsigned int len, bool checksummed, bool discard) { @@ -687,7 +538,6 @@ void __efx_rx_packet(struct efx_channel *channel, { struct efx_nic *efx = channel->efx; struct sk_buff *skb; - bool lro = !!(efx->net_dev->features & NETIF_F_LRO); /* If we're in loopback test, then pass the packet directly to the * loopback layer, and free the rx_buf here @@ -709,41 +559,23 @@ void __efx_rx_packet(struct efx_channel *channel, efx->net_dev); } - /* Both our generic-LRO and SFC-SSR support skb and page based - * allocation, but neither support switching from one to the - * other on the fly. If we spot that the allocation mode has - * changed, then flush the LRO state. - */ - if (unlikely(channel->rx_alloc_pop_pages != (rx_buf->page != NULL))) { - efx_flush_lro(channel); - channel->rx_alloc_pop_pages = (rx_buf->page != NULL); - } - if (likely(checksummed && lro)) { + if (likely(checksummed || rx_buf->page)) { efx_rx_packet_lro(channel, rx_buf); goto done; } - /* Form an skb if required */ - if (rx_buf->page) { - int hdr_len = min(rx_buf->len, EFX_SKB_HEADERS); - skb = efx_rx_mk_skb(rx_buf, efx, hdr_len); - if (unlikely(skb == NULL)) { - efx_free_rx_buffer(efx, rx_buf); - goto done; - } - } else { - /* We now own the SKB */ - skb = rx_buf->skb; - rx_buf->skb = NULL; - } + /* We now own the SKB */ + skb = rx_buf->skb; + rx_buf->skb = NULL; EFX_BUG_ON_PARANOID(rx_buf->page); EFX_BUG_ON_PARANOID(rx_buf->skb); EFX_BUG_ON_PARANOID(!skb); /* Set the SKB flags */ - if (unlikely(!checksummed || !efx->rx_checksum_enabled)) - skb->ip_summed = CHECKSUM_NONE; + skb->ip_summed = CHECKSUM_NONE; + + skb_record_rx_queue(skb, channel->channel); /* Pass the packet up */ netif_receive_skb(skb); @@ -760,7 +592,7 @@ void efx_rx_strategy(struct efx_channel *channel) enum efx_rx_alloc_method method = rx_alloc_method; /* Only makes sense to use page based allocation if LRO is enabled */ - if (!(channel->efx->net_dev->features & NETIF_F_LRO)) { + if (!(channel->efx->net_dev->features & NETIF_F_GRO)) { method = RX_ALLOC_METHOD_SKB; } else if (method == RX_ALLOC_METHOD_AUTO) { /* Constrain the rx_alloc_level */ @@ -865,11 +697,6 @@ void efx_remove_rx_queue(struct efx_rx_queue *rx_queue) rx_queue->buffer = NULL; } -void efx_flush_lro(struct efx_channel *channel) -{ - lro_flush_all(&channel->lro_mgr); -} - module_param(rx_alloc_method, int, 0644); MODULE_PARM_DESC(rx_alloc_method, "Allocation method used for RX buffers"); diff --git a/drivers/net/sfc/rx.h b/drivers/net/sfc/rx.h index 0e88a9ddc1c..42ee7555a80 100644 --- a/drivers/net/sfc/rx.h +++ b/drivers/net/sfc/rx.h @@ -17,9 +17,6 @@ void efx_remove_rx_queue(struct efx_rx_queue *rx_queue); void efx_init_rx_queue(struct efx_rx_queue *rx_queue); void efx_fini_rx_queue(struct efx_rx_queue *rx_queue); -int efx_lro_init(struct net_lro_mgr *lro_mgr, struct efx_nic *efx); -void efx_lro_fini(struct net_lro_mgr *lro_mgr); -void efx_flush_lro(struct efx_channel *channel); void efx_rx_strategy(struct efx_channel *channel); void efx_fast_push_rx_descriptors(struct efx_rx_queue *rx_queue); void efx_rx_work(struct work_struct *data); diff --git a/drivers/net/sfc/sfe4001.c b/drivers/net/sfc/sfe4001.c index cb25ae5b257..c0e90683162 100644 --- a/drivers/net/sfc/sfe4001.c +++ b/drivers/net/sfc/sfe4001.c @@ -24,6 +24,7 @@ */ #include <linux/delay.h> +#include <linux/rtnetlink.h> #include "net_driver.h" #include "efx.h" #include "phy.h" diff --git a/drivers/net/sfc/tenxpress.c b/drivers/net/sfc/tenxpress.c index f0efd246962..ac9eeab79f2 100644 --- a/drivers/net/sfc/tenxpress.c +++ b/drivers/net/sfc/tenxpress.c @@ -8,6 +8,7 @@ */ #include <linux/delay.h> +#include <linux/rtnetlink.h> #include <linux/seq_file.h> #include "efx.h" #include "mdio_10g.h" diff --git a/drivers/net/skge.c b/drivers/net/skge.c index c9dbb06f8c9..952d37ffee5 100644 --- a/drivers/net/skge.c +++ b/drivers/net/skge.c @@ -3214,7 +3214,7 @@ static int skge_poll(struct napi_struct *napi, int to_do) unsigned long flags; spin_lock_irqsave(&hw->hw_lock, flags); - __netif_rx_complete(napi); + __napi_complete(napi); hw->intr_mask |= napimask[skge->port]; skge_write32(hw, B0_IMSK, hw->intr_mask); skge_read32(hw, B0_IMSK); @@ -3377,7 +3377,7 @@ static irqreturn_t skge_intr(int irq, void *dev_id) if (status & (IS_XA1_F|IS_R1_F)) { struct skge_port *skge = netdev_priv(hw->dev[0]); hw->intr_mask &= ~(IS_XA1_F|IS_R1_F); - netif_rx_schedule(&skge->napi); + napi_schedule(&skge->napi); } if (status & IS_PA_TO_TX1) @@ -3397,7 +3397,7 @@ static irqreturn_t skge_intr(int irq, void *dev_id) if (status & (IS_XA2_F|IS_R2_F)) { hw->intr_mask &= ~(IS_XA2_F|IS_R2_F); - netif_rx_schedule(&skge->napi); + napi_schedule(&skge->napi); } if (status & IS_PA_TO_RX2) { diff --git a/drivers/net/smc911x.c b/drivers/net/smc911x.c index 223cde0d43b..293610334a7 100644 --- a/drivers/net/smc911x.c +++ b/drivers/net/smc911x.c @@ -1545,7 +1545,7 @@ smc911x_ethtool_getdrvinfo(struct net_device *dev, struct ethtool_drvinfo *info) { strncpy(info->driver, CARDNAME, sizeof(info->driver)); strncpy(info->version, version, sizeof(info->version)); - strncpy(info->bus_info, dev->dev.parent->bus_id, sizeof(info->bus_info)); + strncpy(info->bus_info, dev_name(dev->dev.parent), sizeof(info->bus_info)); } static int smc911x_ethtool_nwayreset(struct net_device *dev) diff --git a/drivers/net/smc91x.c b/drivers/net/smc91x.c index b215a8d85e6..fdcbaf8dfa7 100644 --- a/drivers/net/smc91x.c +++ b/drivers/net/smc91x.c @@ -1614,7 +1614,7 @@ smc_ethtool_getdrvinfo(struct net_device *dev, struct ethtool_drvinfo *info) { strncpy(info->driver, CARDNAME, sizeof(info->driver)); strncpy(info->version, version, sizeof(info->version)); - strncpy(info->bus_info, dev->dev.parent->bus_id, sizeof(info->bus_info)); + strncpy(info->bus_info, dev_name(dev->dev.parent), sizeof(info->bus_info)); } static int smc_ethtool_nwayreset(struct net_device *dev) @@ -1643,6 +1643,117 @@ static void smc_ethtool_setmsglevel(struct net_device *dev, u32 level) lp->msg_enable = level; } +static int smc_write_eeprom_word(struct net_device *dev, u16 addr, u16 word) +{ + u16 ctl; + struct smc_local *lp = netdev_priv(dev); + void __iomem *ioaddr = lp->base; + + spin_lock_irq(&lp->lock); + /* load word into GP register */ + SMC_SELECT_BANK(lp, 1); + SMC_SET_GP(lp, word); + /* set the address to put the data in EEPROM */ + SMC_SELECT_BANK(lp, 2); + SMC_SET_PTR(lp, addr); + /* tell it to write */ + SMC_SELECT_BANK(lp, 1); + ctl = SMC_GET_CTL(lp); + SMC_SET_CTL(lp, ctl | (CTL_EEPROM_SELECT | CTL_STORE)); + /* wait for it to finish */ + do { + udelay(1); + } while (SMC_GET_CTL(lp) & CTL_STORE); + /* clean up */ + SMC_SET_CTL(lp, ctl); + SMC_SELECT_BANK(lp, 2); + spin_unlock_irq(&lp->lock); + return 0; +} + +static int smc_read_eeprom_word(struct net_device *dev, u16 addr, u16 *word) +{ + u16 ctl; + struct smc_local *lp = netdev_priv(dev); + void __iomem *ioaddr = lp->base; + + spin_lock_irq(&lp->lock); + /* set the EEPROM address to get the data from */ + SMC_SELECT_BANK(lp, 2); + SMC_SET_PTR(lp, addr | PTR_READ); + /* tell it to load */ + SMC_SELECT_BANK(lp, 1); + SMC_SET_GP(lp, 0xffff); /* init to known */ + ctl = SMC_GET_CTL(lp); + SMC_SET_CTL(lp, ctl | (CTL_EEPROM_SELECT | CTL_RELOAD)); + /* wait for it to finish */ + do { + udelay(1); + } while (SMC_GET_CTL(lp) & CTL_RELOAD); + /* read word from GP register */ + *word = SMC_GET_GP(lp); + /* clean up */ + SMC_SET_CTL(lp, ctl); + SMC_SELECT_BANK(lp, 2); + spin_unlock_irq(&lp->lock); + return 0; +} + +static int smc_ethtool_geteeprom_len(struct net_device *dev) +{ + return 0x23 * 2; +} + +static int smc_ethtool_geteeprom(struct net_device *dev, + struct ethtool_eeprom *eeprom, u8 *data) +{ + int i; + int imax; + + DBG(1, "Reading %d bytes at %d(0x%x)\n", + eeprom->len, eeprom->offset, eeprom->offset); + imax = smc_ethtool_geteeprom_len(dev); + for (i = 0; i < eeprom->len; i += 2) { + int ret; + u16 wbuf; + int offset = i + eeprom->offset; + if (offset > imax) + break; + ret = smc_read_eeprom_word(dev, offset >> 1, &wbuf); + if (ret != 0) + return ret; + DBG(2, "Read 0x%x from 0x%x\n", wbuf, offset >> 1); + data[i] = (wbuf >> 8) & 0xff; + data[i+1] = wbuf & 0xff; + } + return 0; +} + +static int smc_ethtool_seteeprom(struct net_device *dev, + struct ethtool_eeprom *eeprom, u8 *data) +{ + int i; + int imax; + + DBG(1, "Writing %d bytes to %d(0x%x)\n", + eeprom->len, eeprom->offset, eeprom->offset); + imax = smc_ethtool_geteeprom_len(dev); + for (i = 0; i < eeprom->len; i += 2) { + int ret; + u16 wbuf; + int offset = i + eeprom->offset; + if (offset > imax) + break; + wbuf = (data[i] << 8) | data[i + 1]; + DBG(2, "Writing 0x%x to 0x%x\n", wbuf, offset >> 1); + ret = smc_write_eeprom_word(dev, offset >> 1, wbuf); + if (ret != 0) + return ret; + } + return 0; +} + + static const struct ethtool_ops smc_ethtool_ops = { .get_settings = smc_ethtool_getsettings, .set_settings = smc_ethtool_setsettings, @@ -1652,8 +1763,22 @@ static const struct ethtool_ops smc_ethtool_ops = { .set_msglevel = smc_ethtool_setmsglevel, .nway_reset = smc_ethtool_nwayreset, .get_link = ethtool_op_get_link, -// .get_eeprom = smc_ethtool_geteeprom, -// .set_eeprom = smc_ethtool_seteeprom, + .get_eeprom_len = smc_ethtool_geteeprom_len, + .get_eeprom = smc_ethtool_geteeprom, + .set_eeprom = smc_ethtool_seteeprom, +}; + +static const struct net_device_ops smc_netdev_ops = { + .ndo_open = smc_open, + .ndo_stop = smc_close, + .ndo_start_xmit = smc_hard_start_xmit, + .ndo_tx_timeout = smc_timeout, + .ndo_set_multicast_list = smc_set_multicast_list, + .ndo_validate_addr = eth_validate_addr, + .ndo_set_mac_address = eth_mac_addr, +#ifdef CONFIG_NET_POLL_CONTROLLER + .ndo_poll_controller = smc_poll_controller, +#endif }; /* @@ -1865,16 +1990,9 @@ static int __devinit smc_probe(struct net_device *dev, void __iomem *ioaddr, /* Fill in the fields of the device structure with ethernet values. */ ether_setup(dev); - dev->open = smc_open; - dev->stop = smc_close; - dev->hard_start_xmit = smc_hard_start_xmit; - dev->tx_timeout = smc_timeout; dev->watchdog_timeo = msecs_to_jiffies(watchdog); - dev->set_multicast_list = smc_set_multicast_list; + dev->netdev_ops = &smc_netdev_ops; dev->ethtool_ops = &smc_ethtool_ops; -#ifdef CONFIG_NET_POLL_CONTROLLER - dev->poll_controller = smc_poll_controller; -#endif tasklet_init(&lp->tx_task, smc_hardware_send_pkt, (unsigned long)dev); INIT_WORK(&lp->phy_configure, smc_phy_configure); diff --git a/drivers/net/smc91x.h b/drivers/net/smc91x.h index c4ccd121bc9..ed9ae43523a 100644 --- a/drivers/net/smc91x.h +++ b/drivers/net/smc91x.h @@ -1141,6 +1141,16 @@ static const char * chip_ids[ 16 ] = { #define SMC_GET_MII(lp) SMC_inw(ioaddr, MII_REG(lp)) +#define SMC_GET_GP(lp) SMC_inw(ioaddr, GP_REG(lp)) + +#define SMC_SET_GP(lp, x) \ + do { \ + if (SMC_MUST_ALIGN_WRITE(lp)) \ + SMC_outl((x)<<16, ioaddr, SMC_REG(lp, 8, 1)); \ + else \ + SMC_outw(x, ioaddr, GP_REG(lp)); \ + } while (0) + #define SMC_SET_MII(lp, x) SMC_outw(x, ioaddr, MII_REG(lp)) #define SMC_GET_MIR(lp) SMC_inw(ioaddr, MIR_REG(lp)) diff --git a/drivers/net/smsc911x.c b/drivers/net/smsc911x.c index 783c1a7b869..6e175e5555a 100644 --- a/drivers/net/smsc911x.c +++ b/drivers/net/smsc911x.c @@ -368,48 +368,53 @@ out: return reg; } -/* Autodetects and initialises external phy for SMSC9115 and SMSC9117 flavors. - * If something goes wrong, returns -ENODEV to revert back to internal phy. - * Performed at initialisation only, so interrupts are enabled */ -static int smsc911x_phy_initialise_external(struct smsc911x_data *pdata) +/* Switch to external phy. Assumes tx and rx are stopped. */ +static void smsc911x_phy_enable_external(struct smsc911x_data *pdata) { unsigned int hwcfg = smsc911x_reg_read(pdata, HW_CFG); - /* External phy is requested, supported, and detected */ - if (hwcfg & HW_CFG_EXT_PHY_DET_) { + /* Disable phy clocks to the MAC */ + hwcfg &= (~HW_CFG_PHY_CLK_SEL_); + hwcfg |= HW_CFG_PHY_CLK_SEL_CLK_DIS_; + smsc911x_reg_write(pdata, HW_CFG, hwcfg); + udelay(10); /* Enough time for clocks to stop */ - /* Switch to external phy. Assuming tx and rx are stopped - * because smsc911x_phy_initialise is called before - * smsc911x_rx_initialise and tx_initialise. */ + /* Switch to external phy */ + hwcfg |= HW_CFG_EXT_PHY_EN_; + smsc911x_reg_write(pdata, HW_CFG, hwcfg); - /* Disable phy clocks to the MAC */ - hwcfg &= (~HW_CFG_PHY_CLK_SEL_); - hwcfg |= HW_CFG_PHY_CLK_SEL_CLK_DIS_; - smsc911x_reg_write(pdata, HW_CFG, hwcfg); - udelay(10); /* Enough time for clocks to stop */ + /* Enable phy clocks to the MAC */ + hwcfg &= (~HW_CFG_PHY_CLK_SEL_); + hwcfg |= HW_CFG_PHY_CLK_SEL_EXT_PHY_; + smsc911x_reg_write(pdata, HW_CFG, hwcfg); + udelay(10); /* Enough time for clocks to restart */ - /* Switch to external phy */ - hwcfg |= HW_CFG_EXT_PHY_EN_; - smsc911x_reg_write(pdata, HW_CFG, hwcfg); - - /* Enable phy clocks to the MAC */ - hwcfg &= (~HW_CFG_PHY_CLK_SEL_); - hwcfg |= HW_CFG_PHY_CLK_SEL_EXT_PHY_; - smsc911x_reg_write(pdata, HW_CFG, hwcfg); - udelay(10); /* Enough time for clocks to restart */ + hwcfg |= HW_CFG_SMI_SEL_; + smsc911x_reg_write(pdata, HW_CFG, hwcfg); +} - hwcfg |= HW_CFG_SMI_SEL_; - smsc911x_reg_write(pdata, HW_CFG, hwcfg); +/* Autodetects and enables external phy if present on supported chips. + * autodetection can be overridden by specifying SMSC911X_FORCE_INTERNAL_PHY + * or SMSC911X_FORCE_EXTERNAL_PHY in the platform_data flags. */ +static void smsc911x_phy_initialise_external(struct smsc911x_data *pdata) +{ + unsigned int hwcfg = smsc911x_reg_read(pdata, HW_CFG); - SMSC_TRACE(HW, "Successfully switched to external PHY"); + if (pdata->config.flags & SMSC911X_FORCE_INTERNAL_PHY) { + SMSC_TRACE(HW, "Forcing internal PHY"); + pdata->using_extphy = 0; + } else if (pdata->config.flags & SMSC911X_FORCE_EXTERNAL_PHY) { + SMSC_TRACE(HW, "Forcing external PHY"); + smsc911x_phy_enable_external(pdata); + pdata->using_extphy = 1; + } else if (hwcfg & HW_CFG_EXT_PHY_DET_) { + SMSC_TRACE(HW, "HW_CFG EXT_PHY_DET set, using external PHY"); + smsc911x_phy_enable_external(pdata); pdata->using_extphy = 1; } else { - SMSC_WARNING(HW, "No external PHY detected, " - "Using internal PHY instead."); - /* Use internal phy */ - return -ENODEV; + SMSC_TRACE(HW, "HW_CFG EXT_PHY_DET clear, using internal PHY"); + pdata->using_extphy = 0; } - return 0; } /* Fetches a tx status out of the status fifo */ @@ -769,7 +774,7 @@ static int smsc911x_mii_probe(struct net_device *dev) return -ENODEV; } - phydev = phy_connect(dev, phydev->dev.bus_id, + phydev = phy_connect(dev, dev_name(&phydev->dev), &smsc911x_phy_adjust_link, 0, pdata->config.phy_interface); if (IS_ERR(phydev)) { @@ -778,7 +783,8 @@ static int smsc911x_mii_probe(struct net_device *dev) } pr_info("%s: attached PHY driver [%s] (mii_bus:phy_addr=%s, irq=%d)\n", - dev->name, phydev->drv->name, phydev->dev.bus_id, phydev->irq); + dev->name, phydev->drv->name, + dev_name(&phydev->dev), phydev->irq); /* mask with MAC supported features */ phydev->supported &= (PHY_BASIC_FEATURES | SUPPORTED_Pause | @@ -824,22 +830,18 @@ static int __devinit smsc911x_mii_init(struct platform_device *pdev, pdata->mii_bus->parent = &pdev->dev; - pdata->using_extphy = 0; - switch (pdata->idrev & 0xFFFF0000) { case 0x01170000: case 0x01150000: case 0x117A0000: case 0x115A0000: /* External PHY supported, try to autodetect */ - if (smsc911x_phy_initialise_external(pdata) < 0) { - SMSC_TRACE(HW, "No external PHY detected, " - "using internal PHY"); - } + smsc911x_phy_initialise_external(pdata); break; default: SMSC_TRACE(HW, "External PHY is not supported, " "using internal PHY"); + pdata->using_extphy = 0; break; } @@ -984,7 +986,7 @@ static int smsc911x_poll(struct napi_struct *napi, int budget) /* We processed all packets available. Tell NAPI it can * stop polling then re-enable rx interrupts */ smsc911x_reg_write(pdata, INT_STS, INT_STS_RSFL_); - netif_rx_complete(napi); + napi_complete(napi); temp = smsc911x_reg_read(pdata, INT_EN); temp |= INT_EN_RSFL_EN_; smsc911x_reg_write(pdata, INT_EN, temp); @@ -1246,7 +1248,7 @@ static int smsc911x_open(struct net_device *dev) napi_enable(&pdata->napi); temp = smsc911x_reg_read(pdata, INT_EN); - temp |= (INT_EN_TDFA_EN_ | INT_EN_RSFL_EN_); + temp |= (INT_EN_TDFA_EN_ | INT_EN_RSFL_EN_ | INT_EN_RXSTOP_INT_EN_); smsc911x_reg_write(pdata, INT_EN, temp); spin_lock_irq(&pdata->mac_lock); @@ -1418,11 +1420,6 @@ static void smsc911x_set_multicast_list(struct net_device *dev) /* Request the hardware to stop, then perform the * update when we get an RX_STOP interrupt */ - smsc911x_reg_write(pdata, INT_STS, INT_STS_RXSTOP_INT_); - temp = smsc911x_reg_read(pdata, INT_EN); - temp |= INT_EN_RXSTOP_INT_EN_; - smsc911x_reg_write(pdata, INT_EN, temp); - temp = smsc911x_mac_read(pdata, MAC_CR); temp &= ~(MAC_CR_RXEN_); smsc911x_mac_write(pdata, MAC_CR, temp); @@ -1461,11 +1458,9 @@ static irqreturn_t smsc911x_irqhandler(int irq, void *dev_id) /* Called when there is a multicast update scheduled and * it is now safe to complete the update */ SMSC_TRACE(INTR, "RX Stop interrupt"); - temp = smsc911x_reg_read(pdata, INT_EN); - temp &= (~INT_EN_RXSTOP_INT_EN_); - smsc911x_reg_write(pdata, INT_EN, temp); smsc911x_reg_write(pdata, INT_STS, INT_STS_RXSTOP_INT_); - smsc911x_rx_multicast_update_workaround(pdata); + if (pdata->multicast_update_pending) + smsc911x_rx_multicast_update_workaround(pdata); serviced = IRQ_HANDLED; } @@ -1485,16 +1480,16 @@ static irqreturn_t smsc911x_irqhandler(int irq, void *dev_id) } if (likely(intsts & inten & INT_STS_RSFL_)) { - if (likely(netif_rx_schedule_prep(&pdata->napi))) { + if (likely(napi_schedule_prep(&pdata->napi))) { /* Disable Rx interrupts */ temp = smsc911x_reg_read(pdata, INT_EN); temp &= (~INT_EN_RSFL_EN_); smsc911x_reg_write(pdata, INT_EN, temp); /* Schedule a NAPI poll */ - __netif_rx_schedule(&pdata->napi); + __napi_schedule(&pdata->napi); } else { SMSC_WARNING(RX_ERR, - "netif_rx_schedule_prep failed"); + "napi_schedule_prep failed"); } serviced = IRQ_HANDLED; } @@ -1545,7 +1540,7 @@ static void smsc911x_ethtool_getdrvinfo(struct net_device *dev, { strlcpy(info->driver, SMSC_CHIPNAME, sizeof(info->driver)); strlcpy(info->version, SMSC_DRV_VERSION, sizeof(info->version)); - strlcpy(info->bus_info, dev->dev.parent->bus_id, + strlcpy(info->bus_info, dev_name(dev->dev.parent), sizeof(info->bus_info)); } @@ -1747,6 +1742,21 @@ static const struct net_device_ops smsc911x_netdev_ops = { #endif }; +/* copies the current mac address from hardware to dev->dev_addr */ +static void __devinit smsc911x_read_mac_address(struct net_device *dev) +{ + struct smsc911x_data *pdata = netdev_priv(dev); + u32 mac_high16 = smsc911x_mac_read(pdata, ADDRH); + u32 mac_low32 = smsc911x_mac_read(pdata, ADDRL); + + dev->dev_addr[0] = (u8)(mac_low32); + dev->dev_addr[1] = (u8)(mac_low32 >> 8); + dev->dev_addr[2] = (u8)(mac_low32 >> 16); + dev->dev_addr[3] = (u8)(mac_low32 >> 24); + dev->dev_addr[4] = (u8)(mac_high16); + dev->dev_addr[5] = (u8)(mac_high16 >> 8); +} + /* Initializing private device structures, only called from probe */ static int __devinit smsc911x_init(struct net_device *dev) { @@ -1834,6 +1844,12 @@ static int __devinit smsc911x_init(struct net_device *dev) SMSC_WARNING(PROBE, "This driver is not intended for this chip revision"); + /* workaround for platforms without an eeprom, where the mac address + * is stored elsewhere and set by the bootloader. This saves the + * mac address before resetting the device */ + if (pdata->config.flags & SMSC911X_SAVE_MAC_ADDRESS) + smsc911x_read_mac_address(dev); + /* Reset the LAN911x */ if (smsc911x_soft_reset(pdata)) return -ENODEV; @@ -1892,9 +1908,9 @@ static int __devinit smsc911x_drv_probe(struct platform_device *pdev) struct net_device *dev; struct smsc911x_data *pdata; struct smsc911x_platform_config *config = pdev->dev.platform_data; - struct resource *res; + struct resource *res, *irq_res; unsigned int intcfg = 0; - int res_size; + int res_size, irq_flags; int retval; DECLARE_MAC_BUF(mac); @@ -1919,6 +1935,14 @@ static int __devinit smsc911x_drv_probe(struct platform_device *pdev) } res_size = res->end - res->start; + irq_res = platform_get_resource(pdev, IORESOURCE_IRQ, 0); + if (!irq_res) { + pr_warning("%s: Could not allocate irq resource.\n", + SMSC_CHIPNAME); + retval = -ENODEV; + goto out_0; + } + if (!request_mem_region(res->start, res_size, SMSC_CHIPNAME)) { retval = -EBUSY; goto out_0; @@ -1935,7 +1959,8 @@ static int __devinit smsc911x_drv_probe(struct platform_device *pdev) pdata = netdev_priv(dev); - dev->irq = platform_get_irq(pdev, 0); + dev->irq = irq_res->start; + irq_flags = irq_res->flags & IRQF_TRIGGER_MASK; pdata->ioaddr = ioremap_nocache(res->start, res_size); /* copy config parameters across to pdata */ @@ -1968,8 +1993,8 @@ static int __devinit smsc911x_drv_probe(struct platform_device *pdev) smsc911x_reg_write(pdata, INT_EN, 0); smsc911x_reg_write(pdata, INT_STS, 0xFFFFFFFF); - retval = request_irq(dev->irq, smsc911x_irqhandler, IRQF_DISABLED, - dev->name, dev); + retval = request_irq(dev->irq, smsc911x_irqhandler, + irq_flags | IRQF_SHARED, dev->name, dev); if (retval) { SMSC_WARNING(PROBE, "Unable to claim requested irq: %d", dev->irq); @@ -2005,14 +2030,7 @@ static int __devinit smsc911x_drv_probe(struct platform_device *pdev) } else { /* Try reading mac address from device. if EEPROM is present * it will already have been set */ - u32 mac_high16 = smsc911x_mac_read(pdata, ADDRH); - u32 mac_low32 = smsc911x_mac_read(pdata, ADDRL); - dev->dev_addr[0] = (u8)(mac_low32); - dev->dev_addr[1] = (u8)(mac_low32 >> 8); - dev->dev_addr[2] = (u8)(mac_low32 >> 16); - dev->dev_addr[3] = (u8)(mac_low32 >> 24); - dev->dev_addr[4] = (u8)(mac_high16); - dev->dev_addr[5] = (u8)(mac_high16 >> 8); + smsc911x_read_mac_address(dev); if (is_valid_ether_addr(dev->dev_addr)) { /* eeprom values are valid so use them */ diff --git a/drivers/net/smsc9420.c b/drivers/net/smsc9420.c index a1e4b3895b3..da8b977a535 100644 --- a/drivers/net/smsc9420.c +++ b/drivers/net/smsc9420.c @@ -666,7 +666,7 @@ static irqreturn_t smsc9420_isr(int irq, void *dev_id) smsc9420_pci_flush_write(pd); ints_to_clear |= (DMAC_STS_RX_ | DMAC_STS_NIS_); - netif_rx_schedule(&pd->napi); + napi_schedule(&pd->napi); } if (ints_to_clear) @@ -889,7 +889,7 @@ static int smsc9420_rx_poll(struct napi_struct *napi, int budget) smsc9420_pci_flush_write(pd); if (work_done < budget) { - netif_rx_complete(&pd->napi); + napi_complete(&pd->napi); /* re-enable RX DMA interrupts */ dma_intr_ena = smsc9420_reg_read(pd, DMAC_INTR_ENA); @@ -1156,7 +1156,7 @@ static int smsc9420_mii_probe(struct net_device *dev) smsc_info(PROBE, "PHY addr %d, phy_id 0x%08X", phydev->addr, phydev->phy_id); - phydev = phy_connect(dev, phydev->dev.bus_id, + phydev = phy_connect(dev, dev_name(&phydev->dev), &smsc9420_phy_adjust_link, 0, PHY_INTERFACE_MODE_MII); if (IS_ERR(phydev)) { @@ -1165,7 +1165,7 @@ static int smsc9420_mii_probe(struct net_device *dev) } pr_info("%s: attached PHY driver [%s] (mii_bus:phy_addr=%s, irq=%d)\n", - dev->name, phydev->drv->name, phydev->dev.bus_id, phydev->irq); + dev->name, phydev->drv->name, dev_name(&phydev->dev), phydev->irq); /* mask with MAC supported features */ phydev->supported &= (PHY_BASIC_FEATURES | SUPPORTED_Pause | diff --git a/drivers/net/spider_net.c b/drivers/net/spider_net.c index 88d2c67788d..7f6b4a4052e 100644 --- a/drivers/net/spider_net.c +++ b/drivers/net/spider_net.c @@ -1301,7 +1301,7 @@ static int spider_net_poll(struct napi_struct *napi, int budget) /* if all packets are in the stack, enable interrupts and return 0 */ /* if not, return 1 */ if (packets_done < budget) { - netif_rx_complete(napi); + napi_complete(napi); spider_net_rx_irq_on(card); card->ignore_rx_ramfull = 0; } @@ -1528,7 +1528,7 @@ spider_net_handle_error_irq(struct spider_net_card *card, u32 status_reg, spider_net_refill_rx_chain(card); spider_net_enable_rxdmac(card); card->num_rx_ints ++; - netif_rx_schedule(&card->napi); + napi_schedule(&card->napi); } show_error = 0; break; @@ -1548,7 +1548,7 @@ spider_net_handle_error_irq(struct spider_net_card *card, u32 status_reg, spider_net_refill_rx_chain(card); spider_net_enable_rxdmac(card); card->num_rx_ints ++; - netif_rx_schedule(&card->napi); + napi_schedule(&card->napi); show_error = 0; break; @@ -1562,7 +1562,7 @@ spider_net_handle_error_irq(struct spider_net_card *card, u32 status_reg, spider_net_refill_rx_chain(card); spider_net_enable_rxdmac(card); card->num_rx_ints ++; - netif_rx_schedule(&card->napi); + napi_schedule(&card->napi); show_error = 0; break; @@ -1656,11 +1656,11 @@ spider_net_interrupt(int irq, void *ptr) if (status_reg & SPIDER_NET_RXINT ) { spider_net_rx_irq_off(card); - netif_rx_schedule(&card->napi); + napi_schedule(&card->napi); card->num_rx_ints ++; } if (status_reg & SPIDER_NET_TXINT) - netif_rx_schedule(&card->napi); + napi_schedule(&card->napi); if (status_reg & SPIDER_NET_LINKINT) spider_net_link_reset(netdev); diff --git a/drivers/net/starfire.c b/drivers/net/starfire.c index da3a76b18ef..98fe79515ba 100644 --- a/drivers/net/starfire.c +++ b/drivers/net/starfire.c @@ -1342,8 +1342,8 @@ static irqreturn_t intr_handler(int irq, void *dev_instance) if (intr_status & (IntrRxDone | IntrRxEmpty)) { u32 enable; - if (likely(netif_rx_schedule_prep(&np->napi))) { - __netif_rx_schedule(&np->napi); + if (likely(napi_schedule_prep(&np->napi))) { + __napi_schedule(&np->napi); enable = readl(ioaddr + IntrEnable); enable &= ~(IntrRxDone | IntrRxEmpty); writel(enable, ioaddr + IntrEnable); @@ -1587,7 +1587,7 @@ static int netdev_poll(struct napi_struct *napi, int budget) intr_status = readl(ioaddr + IntrStatus); } while (intr_status & (IntrRxDone | IntrRxEmpty)); - netif_rx_complete(napi); + napi_complete(napi); intr_status = readl(ioaddr + IntrEnable); intr_status |= IntrRxDone | IntrRxEmpty; writel(intr_status, ioaddr + IntrEnable); diff --git a/drivers/net/sungem.c b/drivers/net/sungem.c index b17efa9cc53..fc1e7f1d024 100644 --- a/drivers/net/sungem.c +++ b/drivers/net/sungem.c @@ -921,7 +921,7 @@ static int gem_poll(struct napi_struct *napi, int budget) gp->status = readl(gp->regs + GREG_STAT); } while (gp->status & GREG_STAT_NAPI); - __netif_rx_complete(napi); + __napi_complete(napi); gem_enable_ints(gp); spin_unlock_irqrestore(&gp->lock, flags); @@ -944,7 +944,7 @@ static irqreturn_t gem_interrupt(int irq, void *dev_id) spin_lock_irqsave(&gp->lock, flags); - if (netif_rx_schedule_prep(&gp->napi)) { + if (napi_schedule_prep(&gp->napi)) { u32 gem_status = readl(gp->regs + GREG_STAT); if (gem_status == 0) { @@ -954,7 +954,7 @@ static irqreturn_t gem_interrupt(int irq, void *dev_id) } gp->status = gem_status; gem_disable_ints(gp); - __netif_rx_schedule(&gp->napi); + __napi_schedule(&gp->napi); } spin_unlock_irqrestore(&gp->lock, flags); diff --git a/drivers/net/tc35815.c b/drivers/net/tc35815.c index bcd0e60cbda..b52a1c088f3 100644 --- a/drivers/net/tc35815.c +++ b/drivers/net/tc35815.c @@ -725,7 +725,7 @@ static int tc_mii_probe(struct net_device *dev) } /* attach the mac to the phy */ - phydev = phy_connect(dev, phydev->dev.bus_id, + phydev = phy_connect(dev, dev_name(&phydev->dev), &tc_handle_link_change, 0, lp->chiptype == TC35815_TX4939 ? PHY_INTERFACE_MODE_RMII : PHY_INTERFACE_MODE_MII); @@ -735,7 +735,7 @@ static int tc_mii_probe(struct net_device *dev) } printk(KERN_INFO "%s: attached PHY driver [%s] " "(mii_bus:phy_addr=%s, id=%x)\n", - dev->name, phydev->drv->name, phydev->dev.bus_id, + dev->name, phydev->drv->name, dev_name(&phydev->dev), phydev->phy_id); /* mask with MAC supported features */ @@ -1609,8 +1609,8 @@ static irqreturn_t tc35815_interrupt(int irq, void *dev_id) if (!(dmactl & DMA_IntMask)) { /* disable interrupts */ tc_writel(dmactl | DMA_IntMask, &tr->DMA_Ctl); - if (netif_rx_schedule_prep(&lp->napi)) - __netif_rx_schedule(&lp->napi); + if (napi_schedule_prep(&lp->napi)) + __napi_schedule(&lp->napi); else { printk(KERN_ERR "%s: interrupt taken in poll\n", dev->name); @@ -1919,7 +1919,7 @@ static int tc35815_poll(struct napi_struct *napi, int budget) spin_unlock(&lp->lock); if (received < budget) { - netif_rx_complete(napi); + napi_complete(napi); /* enable interrupts */ tc_writel(tc_readl(&tr->DMA_Ctl) & ~DMA_IntMask, &tr->DMA_Ctl); } diff --git a/drivers/net/tehuti.c b/drivers/net/tehuti.c index a7a4dc4d631..be9f38f8f0b 100644 --- a/drivers/net/tehuti.c +++ b/drivers/net/tehuti.c @@ -265,8 +265,8 @@ static irqreturn_t bdx_isr_napi(int irq, void *dev) bdx_isr_extra(priv, isr); if (isr & (IR_RX_DESC_0 | IR_TX_FREE_0)) { - if (likely(netif_rx_schedule_prep(&priv->napi))) { - __netif_rx_schedule(&priv->napi); + if (likely(napi_schedule_prep(&priv->napi))) { + __napi_schedule(&priv->napi); RET(IRQ_HANDLED); } else { /* NOTE: we get here if intr has slipped into window @@ -302,7 +302,7 @@ static int bdx_poll(struct napi_struct *napi, int budget) * device lock and allow waiting tasks (eg rmmod) to advance) */ priv->napi_stop = 0; - netif_rx_complete(napi); + napi_complete(napi); bdx_enable_interrupts(priv); } return work_done; diff --git a/drivers/net/tg3.c b/drivers/net/tg3.c index 8b3f8468538..5b3d60568d5 100644 --- a/drivers/net/tg3.c +++ b/drivers/net/tg3.c @@ -860,7 +860,7 @@ static int tg3_bmcr_reset(struct tg3 *tp) static int tg3_mdio_read(struct mii_bus *bp, int mii_id, int reg) { - struct tg3 *tp = (struct tg3 *)bp->priv; + struct tg3 *tp = bp->priv; u32 val; if (tp->tg3_flags3 & TG3_FLG3_MDIOBUS_PAUSED) @@ -874,7 +874,7 @@ static int tg3_mdio_read(struct mii_bus *bp, int mii_id, int reg) static int tg3_mdio_write(struct mii_bus *bp, int mii_id, int reg, u16 val) { - struct tg3 *tp = (struct tg3 *)bp->priv; + struct tg3 *tp = bp->priv; if (tp->tg3_flags3 & TG3_FLG3_MDIOBUS_PAUSED) return -EAGAIN; @@ -4460,7 +4460,7 @@ static int tg3_poll(struct napi_struct *napi, int budget) sblk->status &= ~SD_STATUS_UPDATED; if (likely(!tg3_has_work(tp))) { - netif_rx_complete(napi); + napi_complete(napi); tg3_restart_ints(tp); break; } @@ -4470,7 +4470,7 @@ static int tg3_poll(struct napi_struct *napi, int budget) tx_recovery: /* work_done is guaranteed to be less than budget. */ - netif_rx_complete(napi); + napi_complete(napi); schedule_work(&tp->reset_task); return work_done; } @@ -4519,7 +4519,7 @@ static irqreturn_t tg3_msi_1shot(int irq, void *dev_id) prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]); if (likely(!tg3_irq_sync(tp))) - netif_rx_schedule(&tp->napi); + napi_schedule(&tp->napi); return IRQ_HANDLED; } @@ -4544,7 +4544,7 @@ static irqreturn_t tg3_msi(int irq, void *dev_id) */ tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001); if (likely(!tg3_irq_sync(tp))) - netif_rx_schedule(&tp->napi); + napi_schedule(&tp->napi); return IRQ_RETVAL(1); } @@ -4586,7 +4586,7 @@ static irqreturn_t tg3_interrupt(int irq, void *dev_id) sblk->status &= ~SD_STATUS_UPDATED; if (likely(tg3_has_work(tp))) { prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]); - netif_rx_schedule(&tp->napi); + napi_schedule(&tp->napi); } else { /* No work, shared interrupt perhaps? re-enable * interrupts, and flush that PCI write @@ -4632,7 +4632,7 @@ static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id) tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001); if (tg3_irq_sync(tp)) goto out; - if (netif_rx_schedule_prep(&tp->napi)) { + if (napi_schedule_prep(&tp->napi)) { prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]); /* Update last_tag to mark that this status has been * seen. Because interrupt may be shared, we may be @@ -4640,7 +4640,7 @@ static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id) * if tg3_poll() is not scheduled. */ tp->last_tag = sblk->status_tag; - __netif_rx_schedule(&tp->napi); + __napi_schedule(&tp->napi); } out: return IRQ_RETVAL(handled); diff --git a/drivers/net/tokenring/3c359.c b/drivers/net/tokenring/3c359.c index 43853e3b210..4a65fc2dd92 100644 --- a/drivers/net/tokenring/3c359.c +++ b/drivers/net/tokenring/3c359.c @@ -274,6 +274,15 @@ static void xl_ee_write(struct net_device *dev, int ee_addr, u16 ee_value) return ; } + +static const struct net_device_ops xl_netdev_ops = { + .ndo_open = xl_open, + .ndo_stop = xl_close, + .ndo_start_xmit = xl_xmit, + .ndo_change_mtu = xl_change_mtu, + .ndo_set_multicast_list = xl_set_rx_mode, + .ndo_set_mac_address = xl_set_mac_address, +}; static int __devinit xl_probe(struct pci_dev *pdev, const struct pci_device_id *ent) @@ -337,13 +346,7 @@ static int __devinit xl_probe(struct pci_dev *pdev, return i ; } - dev->open=&xl_open; - dev->hard_start_xmit=&xl_xmit; - dev->change_mtu=&xl_change_mtu; - dev->stop=&xl_close; - dev->do_ioctl=NULL; - dev->set_multicast_list=&xl_set_rx_mode; - dev->set_mac_address=&xl_set_mac_address ; + dev->netdev_ops = &xl_netdev_ops; SET_NETDEV_DEV(dev, &pdev->dev); pci_set_drvdata(pdev,dev) ; diff --git a/drivers/net/tokenring/abyss.c b/drivers/net/tokenring/abyss.c index b566d6d79ec..b9db1b5a58a 100644 --- a/drivers/net/tokenring/abyss.c +++ b/drivers/net/tokenring/abyss.c @@ -92,6 +92,8 @@ static void abyss_sifwritew(struct net_device *dev, unsigned short val, unsigned outw(val, dev->base_addr + reg); } +static struct net_device_ops abyss_netdev_ops; + static int __devinit abyss_attach(struct pci_dev *pdev, const struct pci_device_id *ent) { static int versionprinted; @@ -157,8 +159,7 @@ static int __devinit abyss_attach(struct pci_dev *pdev, const struct pci_device_ memcpy(tp->ProductID, "Madge PCI 16/4 Mk2", PROD_ID_SIZE + 1); - dev->open = abyss_open; - dev->stop = abyss_close; + dev->netdev_ops = &abyss_netdev_ops; pci_set_drvdata(pdev, dev); SET_NETDEV_DEV(dev, &pdev->dev); @@ -450,6 +451,11 @@ static struct pci_driver abyss_driver = { static int __init abyss_init (void) { + abyss_netdev_ops = tms380tr_netdev_ops; + + abyss_netdev_ops.ndo_open = abyss_open; + abyss_netdev_ops.ndo_stop = abyss_close; + return pci_register_driver(&abyss_driver); } diff --git a/drivers/net/tokenring/ibmtr.c b/drivers/net/tokenring/ibmtr.c index fa7bce6e0c6..9d896116cf7 100644 --- a/drivers/net/tokenring/ibmtr.c +++ b/drivers/net/tokenring/ibmtr.c @@ -200,7 +200,6 @@ static void tr_rx(struct net_device *dev); static void ibmtr_reset_timer(struct timer_list*tmr,struct net_device *dev); static void tok_rerun(unsigned long dev_addr); static void ibmtr_readlog(struct net_device *dev); -static struct net_device_stats *tok_get_stats(struct net_device *dev); static int ibmtr_change_mtu(struct net_device *dev, int mtu); static void find_turbo_adapters(int *iolist); @@ -816,18 +815,21 @@ static unsigned char __devinit get_sram_size(struct tok_info *adapt_info) /*****************************************************************************/ +static const struct net_device_ops trdev_netdev_ops = { + .ndo_open = tok_open, + .ndo_stop = tok_close, + .ndo_start_xmit = tok_send_packet, + .ndo_set_multicast_list = tok_set_multicast_list, + .ndo_change_mtu = ibmtr_change_mtu, +}; + static int __devinit trdev_init(struct net_device *dev) { struct tok_info *ti = netdev_priv(dev); SET_PAGE(ti->srb_page); ti->open_failure = NO ; - dev->open = tok_open; - dev->stop = tok_close; - dev->hard_start_xmit = tok_send_packet; - dev->get_stats = tok_get_stats; - dev->set_multicast_list = tok_set_multicast_list; - dev->change_mtu = ibmtr_change_mtu; + dev->netdev_ops = &trdev_netdev_ops; return 0; } @@ -1460,7 +1462,7 @@ static irqreturn_t tok_interrupt(int irq, void *dev_id) "%02X\n", (int)retcode, (int)readb(ti->ssb + 6)); else - ti->tr_stats.tx_packets++; + dev->stats.tx_packets++; break; case XMIT_XID_CMD: DPRINTK("xmit xid ret_code: %02X\n", @@ -1646,7 +1648,7 @@ static void tr_tx(struct net_device *dev) break; } writeb(RESP_IN_ASB, ti->mmio + ACA_OFFSET + ACA_SET + ISRA_ODD); - ti->tr_stats.tx_bytes += ti->current_skb->len; + dev->stats.tx_bytes += ti->current_skb->len; dev_kfree_skb_irq(ti->current_skb); ti->current_skb = NULL; netif_wake_queue(dev); @@ -1722,7 +1724,7 @@ static void tr_rx(struct net_device *dev) if (readb(llc + offsetof(struct trllc, llc)) != UI_CMD) { SET_PAGE(ti->asb_page); writeb(DATA_LOST, ti->asb + RETCODE_OFST); - ti->tr_stats.rx_dropped++; + dev->stats.rx_dropped++; writeb(RESP_IN_ASB, ti->mmio + ACA_OFFSET + ACA_SET + ISRA_ODD); return; } @@ -1757,7 +1759,7 @@ static void tr_rx(struct net_device *dev) if (!(skb = dev_alloc_skb(skb_size))) { DPRINTK("out of memory. frame dropped.\n"); - ti->tr_stats.rx_dropped++; + dev->stats.rx_dropped++; SET_PAGE(ti->asb_page); writeb(DATA_LOST, ti->asb + offsetof(struct asb_rec, ret_code)); writeb(RESP_IN_ASB, ti->mmio + ACA_OFFSET + ACA_SET + ISRA_ODD); @@ -1813,8 +1815,8 @@ static void tr_rx(struct net_device *dev) writeb(RESP_IN_ASB, ti->mmio + ACA_OFFSET + ACA_SET + ISRA_ODD); - ti->tr_stats.rx_bytes += skb->len; - ti->tr_stats.rx_packets++; + dev->stats.rx_bytes += skb->len; + dev->stats.rx_packets++; skb->protocol = tr_type_trans(skb, dev); if (IPv4_p) { @@ -1876,21 +1878,6 @@ static void ibmtr_readlog(struct net_device *dev) /*****************************************************************************/ -/* tok_get_stats(): Basically a scaffold routine which will return - the address of the tr_statistics structure associated with - this device -- the tr.... structure is an ethnet look-alike - so at least for this iteration may suffice. */ - -static struct net_device_stats *tok_get_stats(struct net_device *dev) -{ - - struct tok_info *toki; - toki = netdev_priv(dev); - return (struct net_device_stats *) &toki->tr_stats; -} - -/*****************************************************************************/ - static int ibmtr_change_mtu(struct net_device *dev, int mtu) { struct tok_info *ti = netdev_priv(dev); diff --git a/drivers/net/tokenring/lanstreamer.c b/drivers/net/tokenring/lanstreamer.c index 239c75217b1..0b2b7925da2 100644 --- a/drivers/net/tokenring/lanstreamer.c +++ b/drivers/net/tokenring/lanstreamer.c @@ -207,7 +207,6 @@ static int streamer_xmit(struct sk_buff *skb, struct net_device *dev); static int streamer_close(struct net_device *dev); static void streamer_set_rx_mode(struct net_device *dev); static irqreturn_t streamer_interrupt(int irq, void *dev_id); -static struct net_device_stats *streamer_get_stats(struct net_device *dev); static int streamer_set_mac_address(struct net_device *dev, void *addr); static void streamer_arb_cmd(struct net_device *dev); static int streamer_change_mtu(struct net_device *dev, int mtu); @@ -222,6 +221,18 @@ struct streamer_private *dev_streamer=NULL; #endif #endif +static const struct net_device_ops streamer_netdev_ops = { + .ndo_open = streamer_open, + .ndo_stop = streamer_close, + .ndo_start_xmit = streamer_xmit, + .ndo_change_mtu = streamer_change_mtu, +#if STREAMER_IOCTL + .ndo_do_ioctl = streamer_ioctl, +#endif + .ndo_set_multicast_list = streamer_set_rx_mode, + .ndo_set_mac_address = streamer_set_mac_address, +}; + static int __devinit streamer_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) { @@ -321,18 +332,7 @@ static int __devinit streamer_init_one(struct pci_dev *pdev, init_waitqueue_head(&streamer_priv->srb_wait); init_waitqueue_head(&streamer_priv->trb_wait); - dev->open = &streamer_open; - dev->hard_start_xmit = &streamer_xmit; - dev->change_mtu = &streamer_change_mtu; - dev->stop = &streamer_close; -#if STREAMER_IOCTL - dev->do_ioctl = &streamer_ioctl; -#else - dev->do_ioctl = NULL; -#endif - dev->set_multicast_list = &streamer_set_rx_mode; - dev->get_stats = &streamer_get_stats; - dev->set_mac_address = &streamer_set_mac_address; + dev->netdev_ops = &streamer_netdev_ops; dev->irq = pdev->irq; dev->base_addr=pio_start; SET_NETDEV_DEV(dev, &pdev->dev); @@ -937,7 +937,7 @@ static void streamer_rx(struct net_device *dev) if (skb == NULL) { printk(KERN_WARNING "%s: Not enough memory to copy packet to upper layers. \n", dev->name); - streamer_priv->streamer_stats.rx_dropped++; + dev->stats.rx_dropped++; } else { /* we allocated an skb OK */ if (buffer_cnt == 1) { /* release the DMA mapping */ @@ -1009,8 +1009,8 @@ static void streamer_rx(struct net_device *dev) /* send up to the protocol */ netif_rx(skb); } - streamer_priv->streamer_stats.rx_packets++; - streamer_priv->streamer_stats.rx_bytes += length; + dev->stats.rx_packets++; + dev->stats.rx_bytes += length; } /* if skb == null */ } /* end received without errors */ @@ -1053,8 +1053,8 @@ static irqreturn_t streamer_interrupt(int irq, void *dev_id) while(streamer_priv->streamer_tx_ring[(streamer_priv->tx_ring_last_status + 1) & (STREAMER_TX_RING_SIZE - 1)].status) { streamer_priv->tx_ring_last_status = (streamer_priv->tx_ring_last_status + 1) & (STREAMER_TX_RING_SIZE - 1); streamer_priv->free_tx_ring_entries++; - streamer_priv->streamer_stats.tx_bytes += streamer_priv->tx_ring_skb[streamer_priv->tx_ring_last_status]->len; - streamer_priv->streamer_stats.tx_packets++; + dev->stats.tx_bytes += streamer_priv->tx_ring_skb[streamer_priv->tx_ring_last_status]->len; + dev->stats.tx_packets++; dev_kfree_skb_irq(streamer_priv->tx_ring_skb[streamer_priv->tx_ring_last_status]); streamer_priv->streamer_tx_ring[streamer_priv->tx_ring_last_status].buffer = 0xdeadbeef; streamer_priv->streamer_tx_ring[streamer_priv->tx_ring_last_status].status = 0; @@ -1484,13 +1484,6 @@ static void streamer_srb_bh(struct net_device *dev) } /* switch srb[0] */ } -static struct net_device_stats *streamer_get_stats(struct net_device *dev) -{ - struct streamer_private *streamer_priv; - streamer_priv = netdev_priv(dev); - return (struct net_device_stats *) &streamer_priv->streamer_stats; -} - static int streamer_set_mac_address(struct net_device *dev, void *addr) { struct sockaddr *saddr = addr; diff --git a/drivers/net/tokenring/lanstreamer.h b/drivers/net/tokenring/lanstreamer.h index 13ccee6449c..3c58d6a3fbc 100644 --- a/drivers/net/tokenring/lanstreamer.h +++ b/drivers/net/tokenring/lanstreamer.h @@ -299,7 +299,6 @@ struct streamer_private { int tx_ring_free, tx_ring_last_status, rx_ring_last_received, free_tx_ring_entries; - struct net_device_stats streamer_stats; __u16 streamer_lan_status; __u8 streamer_ring_speed; __u16 pkt_buf_sz; diff --git a/drivers/net/tokenring/olympic.c b/drivers/net/tokenring/olympic.c index ecb5c7c9691..77dc9da4c0b 100644 --- a/drivers/net/tokenring/olympic.c +++ b/drivers/net/tokenring/olympic.c @@ -187,7 +187,6 @@ static int olympic_close(struct net_device *dev); static void olympic_set_rx_mode(struct net_device *dev); static void olympic_freemem(struct net_device *dev) ; static irqreturn_t olympic_interrupt(int irq, void *dev_id); -static struct net_device_stats * olympic_get_stats(struct net_device *dev); static int olympic_set_mac_address(struct net_device *dev, void *addr) ; static void olympic_arb_cmd(struct net_device *dev); static int olympic_change_mtu(struct net_device *dev, int mtu); @@ -195,6 +194,15 @@ static void olympic_srb_bh(struct net_device *dev) ; static void olympic_asb_bh(struct net_device *dev) ; static int olympic_proc_info(char *buffer, char **start, off_t offset, int length, int *eof, void *data) ; +static const struct net_device_ops olympic_netdev_ops = { + .ndo_open = olympic_open, + .ndo_stop = olympic_close, + .ndo_start_xmit = olympic_xmit, + .ndo_change_mtu = olympic_change_mtu, + .ndo_set_multicast_list = olympic_set_rx_mode, + .ndo_set_mac_address = olympic_set_mac_address, +}; + static int __devinit olympic_probe(struct pci_dev *pdev, const struct pci_device_id *ent) { struct net_device *dev ; @@ -253,14 +261,7 @@ static int __devinit olympic_probe(struct pci_dev *pdev, const struct pci_device goto op_free_iomap; } - dev->open=&olympic_open; - dev->hard_start_xmit=&olympic_xmit; - dev->change_mtu=&olympic_change_mtu; - dev->stop=&olympic_close; - dev->do_ioctl=NULL; - dev->set_multicast_list=&olympic_set_rx_mode; - dev->get_stats=&olympic_get_stats ; - dev->set_mac_address=&olympic_set_mac_address ; + dev->netdev_ops = &olympic_netdev_ops; SET_NETDEV_DEV(dev, &pdev->dev); pci_set_drvdata(pdev,dev) ; @@ -785,7 +786,7 @@ static void olympic_rx(struct net_device *dev) } olympic_priv->rx_ring_last_received += i ; olympic_priv->rx_ring_last_received &= (OLYMPIC_RX_RING_SIZE -1) ; - olympic_priv->olympic_stats.rx_errors++; + dev->stats.rx_errors++; } else { if (buffer_cnt == 1) { @@ -796,7 +797,7 @@ static void olympic_rx(struct net_device *dev) if (skb == NULL) { printk(KERN_WARNING "%s: Not enough memory to copy packet to upper layers. \n",dev->name) ; - olympic_priv->olympic_stats.rx_dropped++ ; + dev->stats.rx_dropped++; /* Update counters even though we don't transfer the frame */ olympic_priv->rx_ring_last_received += i ; olympic_priv->rx_ring_last_received &= (OLYMPIC_RX_RING_SIZE -1) ; @@ -862,8 +863,8 @@ static void olympic_rx(struct net_device *dev) skb->protocol = tr_type_trans(skb,dev); netif_rx(skb) ; } - olympic_priv->olympic_stats.rx_packets++ ; - olympic_priv->olympic_stats.rx_bytes += length ; + dev->stats.rx_packets++ ; + dev->stats.rx_bytes += length ; } /* if skb == null */ } /* If status & 0x3b */ @@ -971,8 +972,8 @@ static irqreturn_t olympic_interrupt(int irq, void *dev_id) olympic_priv->tx_ring_last_status++; olympic_priv->tx_ring_last_status &= (OLYMPIC_TX_RING_SIZE-1); olympic_priv->free_tx_ring_entries++; - olympic_priv->olympic_stats.tx_bytes += olympic_priv->tx_ring_skb[olympic_priv->tx_ring_last_status]->len; - olympic_priv->olympic_stats.tx_packets++ ; + dev->stats.tx_bytes += olympic_priv->tx_ring_skb[olympic_priv->tx_ring_last_status]->len; + dev->stats.tx_packets++ ; pci_unmap_single(olympic_priv->pdev, le32_to_cpu(olympic_priv->olympic_tx_ring[olympic_priv->tx_ring_last_status].buffer), olympic_priv->tx_ring_skb[olympic_priv->tx_ring_last_status]->len,PCI_DMA_TODEVICE); @@ -1344,13 +1345,6 @@ static void olympic_srb_bh(struct net_device *dev) } -static struct net_device_stats * olympic_get_stats(struct net_device *dev) -{ - struct olympic_private *olympic_priv ; - olympic_priv=netdev_priv(dev); - return (struct net_device_stats *) &olympic_priv->olympic_stats; -} - static int olympic_set_mac_address (struct net_device *dev, void *addr) { struct sockaddr *saddr = addr ; diff --git a/drivers/net/tokenring/olympic.h b/drivers/net/tokenring/olympic.h index 10fbba08978..30631bae4c9 100644 --- a/drivers/net/tokenring/olympic.h +++ b/drivers/net/tokenring/olympic.h @@ -275,7 +275,6 @@ struct olympic_private { struct sk_buff *tx_ring_skb[OLYMPIC_TX_RING_SIZE], *rx_ring_skb[OLYMPIC_RX_RING_SIZE]; int tx_ring_free, tx_ring_last_status, rx_ring_last_received,rx_status_last_received, free_tx_ring_entries; - struct net_device_stats olympic_stats ; u16 olympic_lan_status ; u8 olympic_ring_speed ; u16 pkt_buf_sz ; diff --git a/drivers/net/tokenring/tms380tr.c b/drivers/net/tokenring/tms380tr.c index 5be34c2fd48..b11bb72dc7a 100644 --- a/drivers/net/tokenring/tms380tr.c +++ b/drivers/net/tokenring/tms380tr.c @@ -2330,6 +2330,17 @@ void tmsdev_term(struct net_device *dev) DMA_BIDIRECTIONAL); } +const struct net_device_ops tms380tr_netdev_ops = { + .ndo_open = tms380tr_open, + .ndo_stop = tms380tr_close, + .ndo_start_xmit = tms380tr_send_packet, + .ndo_tx_timeout = tms380tr_timeout, + .ndo_get_stats = tms380tr_get_stats, + .ndo_set_multicast_list = tms380tr_set_multicast_list, + .ndo_set_mac_address = tms380tr_set_mac_address, +}; +EXPORT_SYMBOL(tms380tr_netdev_ops); + int tmsdev_init(struct net_device *dev, struct device *pdev) { struct net_local *tms_local; @@ -2353,16 +2364,8 @@ int tmsdev_init(struct net_device *dev, struct device *pdev) return -ENOMEM; } - /* These can be overridden by the card driver if needed */ - dev->open = tms380tr_open; - dev->stop = tms380tr_close; - dev->do_ioctl = NULL; - dev->hard_start_xmit = tms380tr_send_packet; - dev->tx_timeout = tms380tr_timeout; + dev->netdev_ops = &tms380tr_netdev_ops; dev->watchdog_timeo = HZ; - dev->get_stats = tms380tr_get_stats; - dev->set_multicast_list = &tms380tr_set_multicast_list; - dev->set_mac_address = tms380tr_set_mac_address; return 0; } diff --git a/drivers/net/tokenring/tms380tr.h b/drivers/net/tokenring/tms380tr.h index 7af76d70884..60b30ee38dc 100644 --- a/drivers/net/tokenring/tms380tr.h +++ b/drivers/net/tokenring/tms380tr.h @@ -14,6 +14,7 @@ #include <linux/interrupt.h> /* module prototypes */ +extern const struct net_device_ops tms380tr_netdev_ops; int tms380tr_open(struct net_device *dev); int tms380tr_close(struct net_device *dev); irqreturn_t tms380tr_interrupt(int irq, void *dev_id); diff --git a/drivers/net/tokenring/tmspci.c b/drivers/net/tokenring/tmspci.c index 5f601773c26..b397e8785d6 100644 --- a/drivers/net/tokenring/tmspci.c +++ b/drivers/net/tokenring/tmspci.c @@ -157,8 +157,8 @@ static int __devinit tms_pci_attach(struct pci_dev *pdev, const struct pci_devic tp->tmspriv = cardinfo; - dev->open = tms380tr_open; - dev->stop = tms380tr_close; + dev->netdev_ops = &tms380tr_netdev_ops; + pci_set_drvdata(pdev, dev); SET_NETDEV_DEV(dev, &pdev->dev); diff --git a/drivers/net/tsi108_eth.c b/drivers/net/tsi108_eth.c index a9fd2b2ccaf..bb43e7fb2a5 100644 --- a/drivers/net/tsi108_eth.c +++ b/drivers/net/tsi108_eth.c @@ -888,7 +888,7 @@ static int tsi108_poll(struct napi_struct *napi, int budget) if (num_received < budget) { data->rxpending = 0; - netif_rx_complete(napi); + napi_complete(napi); TSI_WRITE(TSI108_EC_INTMASK, TSI_READ(TSI108_EC_INTMASK) @@ -915,11 +915,11 @@ static void tsi108_rx_int(struct net_device *dev) * * This can happen if this code races with tsi108_poll(), which masks * the interrupts after tsi108_irq_one() read the mask, but before - * netif_rx_schedule is called. It could also happen due to calls + * napi_schedule is called. It could also happen due to calls * from tsi108_check_rxring(). */ - if (netif_rx_schedule_prep(&data->napi)) { + if (napi_schedule_prep(&data->napi)) { /* Mask, rather than ack, the receive interrupts. The ack * will happen in tsi108_poll(). */ @@ -930,7 +930,7 @@ static void tsi108_rx_int(struct net_device *dev) | TSI108_INT_RXTHRESH | TSI108_INT_RXOVERRUN | TSI108_INT_RXERROR | TSI108_INT_RXWAIT); - __netif_rx_schedule(&data->napi); + __napi_schedule(&data->napi); } else { if (!netif_running(dev)) { /* This can happen if an interrupt occurs while the diff --git a/drivers/net/tulip/interrupt.c b/drivers/net/tulip/interrupt.c index 6c3428a37c0..9f946d42108 100644 --- a/drivers/net/tulip/interrupt.c +++ b/drivers/net/tulip/interrupt.c @@ -103,7 +103,7 @@ void oom_timer(unsigned long data) { struct net_device *dev = (struct net_device *)data; struct tulip_private *tp = netdev_priv(dev); - netif_rx_schedule(&tp->napi); + napi_schedule(&tp->napi); } int tulip_poll(struct napi_struct *napi, int budget) @@ -300,7 +300,7 @@ int tulip_poll(struct napi_struct *napi, int budget) /* Remove us from polling list and enable RX intr. */ - netif_rx_complete(napi); + napi_complete(napi); iowrite32(tulip_tbl[tp->chip_id].valid_intrs, tp->base_addr+CSR7); /* The last op happens after poll completion. Which means the following: @@ -333,10 +333,10 @@ int tulip_poll(struct napi_struct *napi, int budget) /* Think: timer_pending() was an explicit signature of bug. * Timer can be pending now but fired and completed - * before we did netif_rx_complete(). See? We would lose it. */ + * before we did napi_complete(). See? We would lose it. */ /* remove ourselves from the polling list */ - netif_rx_complete(napi); + napi_complete(napi); return work_done; } @@ -519,7 +519,7 @@ irqreturn_t tulip_interrupt(int irq, void *dev_instance) rxd++; /* Mask RX intrs and add the device to poll list. */ iowrite32(tulip_tbl[tp->chip_id].valid_intrs&~RxPollInt, ioaddr + CSR7); - netif_rx_schedule(&tp->napi); + napi_schedule(&tp->napi); if (!(csr5&~(AbnormalIntr|NormalIntr|RxPollInt|TPLnkPass))) break; diff --git a/drivers/net/tun.c b/drivers/net/tun.c index d7b81e4fdd5..15d67635bb1 100644 --- a/drivers/net/tun.c +++ b/drivers/net/tun.c @@ -63,6 +63,7 @@ #include <linux/virtio_net.h> #include <net/net_namespace.h> #include <net/netns/generic.h> +#include <net/rtnetlink.h> #include <asm/system.h> #include <asm/uaccess.h> @@ -87,14 +88,19 @@ struct tap_filter { unsigned char addr[FLT_EXACT_COUNT][ETH_ALEN]; }; +struct tun_file { + atomic_t count; + struct tun_struct *tun; + struct net *net; + wait_queue_head_t read_wait; +}; + struct tun_struct { - struct list_head list; + struct tun_file *tfile; unsigned int flags; - int attached; uid_t owner; gid_t group; - wait_queue_head_t read_wait; struct sk_buff_head readq; struct net_device *dev; @@ -107,6 +113,88 @@ struct tun_struct { #endif }; +static int tun_attach(struct tun_struct *tun, struct file *file) +{ + struct tun_file *tfile = file->private_data; + const struct cred *cred = current_cred(); + int err; + + ASSERT_RTNL(); + + /* Check permissions */ + if (((tun->owner != -1 && cred->euid != tun->owner) || + (tun->group != -1 && !in_egroup_p(tun->group))) && + !capable(CAP_NET_ADMIN)) + return -EPERM; + + netif_tx_lock_bh(tun->dev); + + err = -EINVAL; + if (tfile->tun) + goto out; + + err = -EBUSY; + if (tun->tfile) + goto out; + + err = 0; + tfile->tun = tun; + tun->tfile = tfile; + dev_hold(tun->dev); + atomic_inc(&tfile->count); + +out: + netif_tx_unlock_bh(tun->dev); + return err; +} + +static void __tun_detach(struct tun_struct *tun) +{ + struct tun_file *tfile = tun->tfile; + + /* Detach from net device */ + netif_tx_lock_bh(tun->dev); + tfile->tun = NULL; + tun->tfile = NULL; + netif_tx_unlock_bh(tun->dev); + + /* Drop read queue */ + skb_queue_purge(&tun->readq); + + /* Drop the extra count on the net device */ + dev_put(tun->dev); +} + +static void tun_detach(struct tun_struct *tun) +{ + rtnl_lock(); + __tun_detach(tun); + rtnl_unlock(); +} + +static struct tun_struct *__tun_get(struct tun_file *tfile) +{ + struct tun_struct *tun = NULL; + + if (atomic_inc_not_zero(&tfile->count)) + tun = tfile->tun; + + return tun; +} + +static struct tun_struct *tun_get(struct file *file) +{ + return __tun_get(file->private_data); +} + +static void tun_put(struct tun_struct *tun) +{ + struct tun_file *tfile = tun->tfile; + + if (atomic_dec_and_test(&tfile->count)) + tun_detach(tfile->tun); +} + /* TAP filterting */ static void addr_hash_set(u32 *mask, const u8 *addr) { @@ -213,13 +301,23 @@ static int check_filter(struct tap_filter *filter, const struct sk_buff *skb) /* Network device part of the driver */ -static int tun_net_id; -struct tun_net { - struct list_head dev_list; -}; - static const struct ethtool_ops tun_ethtool_ops; +/* Net device detach from fd. */ +static void tun_net_uninit(struct net_device *dev) +{ + struct tun_struct *tun = netdev_priv(dev); + struct tun_file *tfile = tun->tfile; + + /* Inform the methods they need to stop using the dev. + */ + if (tfile) { + wake_up_all(&tfile->read_wait); + if (atomic_dec_and_test(&tfile->count)) + __tun_detach(tun); + } +} + /* Net device open. */ static int tun_net_open(struct net_device *dev) { @@ -242,7 +340,7 @@ static int tun_net_xmit(struct sk_buff *skb, struct net_device *dev) DBG(KERN_INFO "%s: tun_net_xmit %d\n", tun->dev->name, skb->len); /* Drop packet if interface is not attached */ - if (!tun->attached) + if (!tun->tfile) goto drop; /* Drop if the filter does not like it. @@ -274,7 +372,7 @@ static int tun_net_xmit(struct sk_buff *skb, struct net_device *dev) /* Notify and wake up reader process */ if (tun->flags & TUN_FASYNC) kill_fasync(&tun->fasync, SIGIO, POLL_IN); - wake_up_interruptible(&tun->read_wait); + wake_up_interruptible(&tun->tfile->read_wait); return 0; drop: @@ -306,6 +404,7 @@ tun_net_change_mtu(struct net_device *dev, int new_mtu) } static const struct net_device_ops tun_netdev_ops = { + .ndo_uninit = tun_net_uninit, .ndo_open = tun_net_open, .ndo_stop = tun_net_close, .ndo_start_xmit = tun_net_xmit, @@ -313,6 +412,7 @@ static const struct net_device_ops tun_netdev_ops = { }; static const struct net_device_ops tap_netdev_ops = { + .ndo_uninit = tun_net_uninit, .ndo_open = tun_net_open, .ndo_stop = tun_net_close, .ndo_start_xmit = tun_net_xmit, @@ -359,19 +459,24 @@ static void tun_net_init(struct net_device *dev) /* Poll */ static unsigned int tun_chr_poll(struct file *file, poll_table * wait) { - struct tun_struct *tun = file->private_data; + struct tun_file *tfile = file->private_data; + struct tun_struct *tun = __tun_get(tfile); unsigned int mask = POLLOUT | POLLWRNORM; if (!tun) - return -EBADFD; + return POLLERR; DBG(KERN_INFO "%s: tun_chr_poll\n", tun->dev->name); - poll_wait(file, &tun->read_wait, wait); + poll_wait(file, &tfile->read_wait, wait); if (!skb_queue_empty(&tun->readq)) mask |= POLLIN | POLLRDNORM; + if (tun->dev->reg_state != NETREG_REGISTERED) + mask = POLLERR; + + tun_put(tun); return mask; } @@ -438,7 +543,7 @@ static struct sk_buff *tun_alloc_skb(size_t prepad, size_t len, size_t linear, /* Get packet from user space buffer */ static __inline__ ssize_t tun_get_user(struct tun_struct *tun, struct iovec *iv, size_t count) { - struct tun_pi pi = { 0, __constant_htons(ETH_P_IP) }; + struct tun_pi pi = { 0, cpu_to_be16(ETH_P_IP) }; struct sk_buff *skb; size_t len = count, align = 0; struct virtio_net_hdr gso = { 0 }; @@ -556,14 +661,18 @@ static __inline__ ssize_t tun_get_user(struct tun_struct *tun, struct iovec *iv, static ssize_t tun_chr_aio_write(struct kiocb *iocb, const struct iovec *iv, unsigned long count, loff_t pos) { - struct tun_struct *tun = iocb->ki_filp->private_data; + struct tun_struct *tun = tun_get(iocb->ki_filp); + ssize_t result; if (!tun) return -EBADFD; DBG(KERN_INFO "%s: tun_chr_write %ld\n", tun->dev->name, count); - return tun_get_user(tun, (struct iovec *) iv, iov_length(iv, count)); + result = tun_get_user(tun, (struct iovec *) iv, iov_length(iv, count)); + + tun_put(tun); + return result; } /* Put packet to the user space buffer */ @@ -636,7 +745,8 @@ static ssize_t tun_chr_aio_read(struct kiocb *iocb, const struct iovec *iv, unsigned long count, loff_t pos) { struct file *file = iocb->ki_filp; - struct tun_struct *tun = file->private_data; + struct tun_file *tfile = file->private_data; + struct tun_struct *tun = __tun_get(tfile); DECLARE_WAITQUEUE(wait, current); struct sk_buff *skb; ssize_t len, ret = 0; @@ -647,10 +757,12 @@ static ssize_t tun_chr_aio_read(struct kiocb *iocb, const struct iovec *iv, DBG(KERN_INFO "%s: tun_chr_read\n", tun->dev->name); len = iov_length(iv, count); - if (len < 0) - return -EINVAL; + if (len < 0) { + ret = -EINVAL; + goto out; + } - add_wait_queue(&tun->read_wait, &wait); + add_wait_queue(&tfile->read_wait, &wait); while (len) { current->state = TASK_INTERRUPTIBLE; @@ -664,6 +776,10 @@ static ssize_t tun_chr_aio_read(struct kiocb *iocb, const struct iovec *iv, ret = -ERESTARTSYS; break; } + if (tun->dev->reg_state != NETREG_REGISTERED) { + ret = -EIO; + break; + } /* Nothing to read, let's sleep */ schedule(); @@ -677,8 +793,10 @@ static ssize_t tun_chr_aio_read(struct kiocb *iocb, const struct iovec *iv, } current->state = TASK_RUNNING; - remove_wait_queue(&tun->read_wait, &wait); + remove_wait_queue(&tfile->read_wait, &wait); +out: + tun_put(tun); return ret; } @@ -687,54 +805,49 @@ static void tun_setup(struct net_device *dev) struct tun_struct *tun = netdev_priv(dev); skb_queue_head_init(&tun->readq); - init_waitqueue_head(&tun->read_wait); tun->owner = -1; tun->group = -1; dev->ethtool_ops = &tun_ethtool_ops; dev->destructor = free_netdev; - dev->features |= NETIF_F_NETNS_LOCAL; } -static struct tun_struct *tun_get_by_name(struct tun_net *tn, const char *name) +/* Trivial set of netlink ops to allow deleting tun or tap + * device with netlink. + */ +static int tun_validate(struct nlattr *tb[], struct nlattr *data[]) { - struct tun_struct *tun; + return -EINVAL; +} - ASSERT_RTNL(); - list_for_each_entry(tun, &tn->dev_list, list) { - if (!strncmp(tun->dev->name, name, IFNAMSIZ)) - return tun; - } +static struct rtnl_link_ops tun_link_ops __read_mostly = { + .kind = DRV_NAME, + .priv_size = sizeof(struct tun_struct), + .setup = tun_setup, + .validate = tun_validate, +}; - return NULL; -} static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr) { - struct tun_net *tn; struct tun_struct *tun; struct net_device *dev; - const struct cred *cred = current_cred(); int err; - tn = net_generic(net, tun_net_id); - tun = tun_get_by_name(tn, ifr->ifr_name); - if (tun) { - if (tun->attached) - return -EBUSY; - - /* Check permissions */ - if (((tun->owner != -1 && - cred->euid != tun->owner) || - (tun->group != -1 && - cred->egid != tun->group)) && - !capable(CAP_NET_ADMIN)) { - return -EPERM; - } + dev = __dev_get_by_name(net, ifr->ifr_name); + if (dev) { + if ((ifr->ifr_flags & IFF_TUN) && dev->netdev_ops == &tun_netdev_ops) + tun = netdev_priv(dev); + else if ((ifr->ifr_flags & IFF_TAP) && dev->netdev_ops == &tap_netdev_ops) + tun = netdev_priv(dev); + else + return -EINVAL; + + err = tun_attach(tun, file); + if (err < 0) + return err; } - else if (__dev_get_by_name(net, ifr->ifr_name)) - return -EINVAL; else { char *name; unsigned long flags = 0; @@ -765,6 +878,7 @@ static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr) return -ENOMEM; dev_net_set(dev, net); + dev->rtnl_link_ops = &tun_link_ops; tun = netdev_priv(dev); tun->dev = dev; @@ -783,7 +897,9 @@ static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr) if (err < 0) goto err_free_dev; - list_add(&tun->list, &tn->dev_list); + err = tun_attach(tun, file); + if (err < 0) + goto err_free_dev; } DBG(KERN_INFO "%s: tun_set_iff\n", tun->dev->name); @@ -803,10 +919,6 @@ static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr) else tun->flags &= ~TUN_VNET_HDR; - file->private_data = tun; - tun->attached = 1; - get_net(dev_net(tun->dev)); - /* Make sure persistent devices do not get stuck in * xoff state. */ @@ -824,7 +936,7 @@ static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr) static int tun_get_iff(struct net *net, struct file *file, struct ifreq *ifr) { - struct tun_struct *tun = file->private_data; + struct tun_struct *tun = tun_get(file); if (!tun) return -EBADFD; @@ -849,6 +961,7 @@ static int tun_get_iff(struct net *net, struct file *file, struct ifreq *ifr) if (tun->flags & TUN_VNET_HDR) ifr->ifr_flags |= IFF_VNET_HDR; + tun_put(tun); return 0; } @@ -895,7 +1008,8 @@ static int set_offload(struct net_device *dev, unsigned long arg) static int tun_chr_ioctl(struct inode *inode, struct file *file, unsigned int cmd, unsigned long arg) { - struct tun_struct *tun = file->private_data; + struct tun_file *tfile = file->private_data; + struct tun_struct *tun; void __user* argp = (void __user*)arg; struct ifreq ifr; int ret; @@ -904,13 +1018,23 @@ static int tun_chr_ioctl(struct inode *inode, struct file *file, if (copy_from_user(&ifr, argp, sizeof ifr)) return -EFAULT; + if (cmd == TUNGETFEATURES) { + /* Currently this just means: "what IFF flags are valid?". + * This is needed because we never checked for invalid flags on + * TUNSETIFF. */ + return put_user(IFF_TUN | IFF_TAP | IFF_NO_PI | IFF_ONE_QUEUE | + IFF_VNET_HDR, + (unsigned int __user*)argp); + } + + tun = __tun_get(tfile); if (cmd == TUNSETIFF && !tun) { int err; ifr.ifr_name[IFNAMSIZ-1] = '\0'; rtnl_lock(); - err = tun_set_iff(current->nsproxy->net_ns, file, &ifr); + err = tun_set_iff(tfile->net, file, &ifr); rtnl_unlock(); if (err) @@ -921,28 +1045,21 @@ static int tun_chr_ioctl(struct inode *inode, struct file *file, return 0; } - if (cmd == TUNGETFEATURES) { - /* Currently this just means: "what IFF flags are valid?". - * This is needed because we never checked for invalid flags on - * TUNSETIFF. */ - return put_user(IFF_TUN | IFF_TAP | IFF_NO_PI | IFF_ONE_QUEUE | - IFF_VNET_HDR, - (unsigned int __user*)argp); - } if (!tun) return -EBADFD; DBG(KERN_INFO "%s: tun_chr_ioctl cmd %d\n", tun->dev->name, cmd); + ret = 0; switch (cmd) { case TUNGETIFF: ret = tun_get_iff(current->nsproxy->net_ns, file, &ifr); if (ret) - return ret; + break; if (copy_to_user(argp, &ifr, sizeof(ifr))) - return -EFAULT; + ret = -EFAULT; break; case TUNSETNOCSUM: @@ -994,7 +1111,7 @@ static int tun_chr_ioctl(struct inode *inode, struct file *file, ret = 0; } rtnl_unlock(); - return ret; + break; #ifdef TUN_DEBUG case TUNSETDEBUG: @@ -1005,24 +1122,25 @@ static int tun_chr_ioctl(struct inode *inode, struct file *file, rtnl_lock(); ret = set_offload(tun->dev, arg); rtnl_unlock(); - return ret; + break; case TUNSETTXFILTER: /* Can be set only for TAPs */ + ret = -EINVAL; if ((tun->flags & TUN_TYPE_MASK) != TUN_TAP_DEV) - return -EINVAL; + break; rtnl_lock(); ret = update_filter(&tun->txflt, (void __user *)arg); rtnl_unlock(); - return ret; + break; case SIOCGIFHWADDR: /* Get hw addres */ memcpy(ifr.ifr_hwaddr.sa_data, tun->dev->dev_addr, ETH_ALEN); ifr.ifr_hwaddr.sa_family = tun->dev->type; if (copy_to_user(argp, &ifr, sizeof ifr)) - return -EFAULT; - return 0; + ret = -EFAULT; + break; case SIOCSIFHWADDR: /* Set hw address */ @@ -1032,18 +1150,19 @@ static int tun_chr_ioctl(struct inode *inode, struct file *file, rtnl_lock(); ret = dev_set_mac_address(tun->dev, &ifr.ifr_hwaddr); rtnl_unlock(); - return ret; - + break; default: - return -EINVAL; + ret = -EINVAL; + break; }; - return 0; + tun_put(tun); + return ret; } static int tun_chr_fasync(int fd, struct file *file, int on) { - struct tun_struct *tun = file->private_data; + struct tun_struct *tun = tun_get(file); int ret; if (!tun) @@ -1065,42 +1184,48 @@ static int tun_chr_fasync(int fd, struct file *file, int on) ret = 0; out: unlock_kernel(); + tun_put(tun); return ret; } static int tun_chr_open(struct inode *inode, struct file * file) { + struct tun_file *tfile; cycle_kernel_lock(); DBG1(KERN_INFO "tunX: tun_chr_open\n"); - file->private_data = NULL; + + tfile = kmalloc(sizeof(*tfile), GFP_KERNEL); + if (!tfile) + return -ENOMEM; + atomic_set(&tfile->count, 0); + tfile->tun = NULL; + tfile->net = get_net(current->nsproxy->net_ns); + init_waitqueue_head(&tfile->read_wait); + file->private_data = tfile; return 0; } static int tun_chr_close(struct inode *inode, struct file *file) { - struct tun_struct *tun = file->private_data; + struct tun_file *tfile = file->private_data; + struct tun_struct *tun = __tun_get(tfile); - if (!tun) - return 0; - - DBG(KERN_INFO "%s: tun_chr_close\n", tun->dev->name); - rtnl_lock(); + if (tun) { + DBG(KERN_INFO "%s: tun_chr_close\n", tun->dev->name); - /* Detach from net device */ - file->private_data = NULL; - tun->attached = 0; - put_net(dev_net(tun->dev)); + rtnl_lock(); + __tun_detach(tun); - /* Drop read queue */ - skb_queue_purge(&tun->readq); + /* If desireable, unregister the netdevice. */ + if (!(tun->flags & TUN_PERSIST)) + unregister_netdevice(tun->dev); - if (!(tun->flags & TUN_PERSIST)) { - list_del(&tun->list); - unregister_netdevice(tun->dev); + rtnl_unlock(); } - rtnl_unlock(); + put_net(tfile->net); + kfree(tfile); return 0; } @@ -1181,7 +1306,7 @@ static void tun_set_msglevel(struct net_device *dev, u32 value) static u32 tun_get_link(struct net_device *dev) { struct tun_struct *tun = netdev_priv(dev); - return tun->attached; + return !!tun->tfile; } static u32 tun_get_rx_csum(struct net_device *dev) @@ -1210,45 +1335,6 @@ static const struct ethtool_ops tun_ethtool_ops = { .set_rx_csum = tun_set_rx_csum }; -static int tun_init_net(struct net *net) -{ - struct tun_net *tn; - - tn = kmalloc(sizeof(*tn), GFP_KERNEL); - if (tn == NULL) - return -ENOMEM; - - INIT_LIST_HEAD(&tn->dev_list); - - if (net_assign_generic(net, tun_net_id, tn)) { - kfree(tn); - return -ENOMEM; - } - - return 0; -} - -static void tun_exit_net(struct net *net) -{ - struct tun_net *tn; - struct tun_struct *tun, *nxt; - - tn = net_generic(net, tun_net_id); - - rtnl_lock(); - list_for_each_entry_safe(tun, nxt, &tn->dev_list, list) { - DBG(KERN_INFO "%s cleaned up\n", tun->dev->name); - unregister_netdevice(tun->dev); - } - rtnl_unlock(); - - kfree(tn); -} - -static struct pernet_operations tun_net_ops = { - .init = tun_init_net, - .exit = tun_exit_net, -}; static int __init tun_init(void) { @@ -1257,10 +1343,10 @@ static int __init tun_init(void) printk(KERN_INFO "tun: %s, %s\n", DRV_DESCRIPTION, DRV_VERSION); printk(KERN_INFO "tun: %s\n", DRV_COPYRIGHT); - ret = register_pernet_gen_device(&tun_net_id, &tun_net_ops); + ret = rtnl_link_register(&tun_link_ops); if (ret) { - printk(KERN_ERR "tun: Can't register pernet ops\n"); - goto err_pernet; + printk(KERN_ERR "tun: Can't register link_ops\n"); + goto err_linkops; } ret = misc_register(&tun_miscdev); @@ -1268,18 +1354,17 @@ static int __init tun_init(void) printk(KERN_ERR "tun: Can't register misc device %d\n", TUN_MINOR); goto err_misc; } - return 0; - + return 0; err_misc: - unregister_pernet_gen_device(tun_net_id, &tun_net_ops); -err_pernet: + rtnl_link_unregister(&tun_link_ops); +err_linkops: return ret; } static void tun_cleanup(void) { misc_deregister(&tun_miscdev); - unregister_pernet_gen_device(tun_net_id, &tun_net_ops); + rtnl_link_unregister(&tun_link_ops); } module_init(tun_init); diff --git a/drivers/net/typhoon.c b/drivers/net/typhoon.c index 3af9a9516cc..a8e5651f316 100644 --- a/drivers/net/typhoon.c +++ b/drivers/net/typhoon.c @@ -1783,7 +1783,7 @@ typhoon_poll(struct napi_struct *napi, int budget) } if (work_done < budget) { - netif_rx_complete(napi); + napi_complete(napi); iowrite32(TYPHOON_INTR_NONE, tp->ioaddr + TYPHOON_REG_INTR_MASK); typhoon_post_pci_writes(tp->ioaddr); @@ -1806,10 +1806,10 @@ typhoon_interrupt(int irq, void *dev_instance) iowrite32(intr_status, ioaddr + TYPHOON_REG_INTR_STATUS); - if (netif_rx_schedule_prep(&tp->napi)) { + if (napi_schedule_prep(&tp->napi)) { iowrite32(TYPHOON_INTR_ALL, ioaddr + TYPHOON_REG_INTR_MASK); typhoon_post_pci_writes(ioaddr); - __netif_rx_schedule(&tp->napi); + __napi_schedule(&tp->napi); } else { printk(KERN_ERR "%s: Error, poll already scheduled\n", dev->name); @@ -1944,7 +1944,7 @@ typhoon_start_runtime(struct typhoon *tp) goto error_out; INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_VLAN_TYPE_WRITE); - xp_cmd.parm1 = __constant_cpu_to_le16(ETH_P_8021Q); + xp_cmd.parm1 = cpu_to_le16(ETH_P_8021Q); err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL); if(err < 0) goto error_out; diff --git a/drivers/net/typhoon.h b/drivers/net/typhoon.h index dd7022ca735..673fd512591 100644 --- a/drivers/net/typhoon.h +++ b/drivers/net/typhoon.h @@ -174,18 +174,18 @@ struct tx_desc { u64 tx_addr; /* opaque for hardware, for TX_DESC */ }; __le32 processFlags; -#define TYPHOON_TX_PF_NO_CRC __constant_cpu_to_le32(0x00000001) -#define TYPHOON_TX_PF_IP_CHKSUM __constant_cpu_to_le32(0x00000002) -#define TYPHOON_TX_PF_TCP_CHKSUM __constant_cpu_to_le32(0x00000004) -#define TYPHOON_TX_PF_TCP_SEGMENT __constant_cpu_to_le32(0x00000008) -#define TYPHOON_TX_PF_INSERT_VLAN __constant_cpu_to_le32(0x00000010) -#define TYPHOON_TX_PF_IPSEC __constant_cpu_to_le32(0x00000020) -#define TYPHOON_TX_PF_VLAN_PRIORITY __constant_cpu_to_le32(0x00000040) -#define TYPHOON_TX_PF_UDP_CHKSUM __constant_cpu_to_le32(0x00000080) -#define TYPHOON_TX_PF_PAD_FRAME __constant_cpu_to_le32(0x00000100) -#define TYPHOON_TX_PF_RESERVED __constant_cpu_to_le32(0x00000e00) -#define TYPHOON_TX_PF_VLAN_MASK __constant_cpu_to_le32(0x0ffff000) -#define TYPHOON_TX_PF_INTERNAL __constant_cpu_to_le32(0xf0000000) +#define TYPHOON_TX_PF_NO_CRC cpu_to_le32(0x00000001) +#define TYPHOON_TX_PF_IP_CHKSUM cpu_to_le32(0x00000002) +#define TYPHOON_TX_PF_TCP_CHKSUM cpu_to_le32(0x00000004) +#define TYPHOON_TX_PF_TCP_SEGMENT cpu_to_le32(0x00000008) +#define TYPHOON_TX_PF_INSERT_VLAN cpu_to_le32(0x00000010) +#define TYPHOON_TX_PF_IPSEC cpu_to_le32(0x00000020) +#define TYPHOON_TX_PF_VLAN_PRIORITY cpu_to_le32(0x00000040) +#define TYPHOON_TX_PF_UDP_CHKSUM cpu_to_le32(0x00000080) +#define TYPHOON_TX_PF_PAD_FRAME cpu_to_le32(0x00000100) +#define TYPHOON_TX_PF_RESERVED cpu_to_le32(0x00000e00) +#define TYPHOON_TX_PF_VLAN_MASK cpu_to_le32(0x0ffff000) +#define TYPHOON_TX_PF_INTERNAL cpu_to_le32(0xf0000000) #define TYPHOON_TX_PF_VLAN_TAG_SHIFT 12 } __attribute__ ((packed)); @@ -203,8 +203,8 @@ struct tcpopt_desc { u8 flags; u8 numDesc; __le16 mss_flags; -#define TYPHOON_TSO_FIRST __constant_cpu_to_le16(0x1000) -#define TYPHOON_TSO_LAST __constant_cpu_to_le16(0x2000) +#define TYPHOON_TSO_FIRST cpu_to_le16(0x1000) +#define TYPHOON_TSO_LAST cpu_to_le16(0x2000) __le32 respAddrLo; __le32 bytesTx; __le32 status; @@ -222,8 +222,8 @@ struct ipsec_desc { u8 flags; u8 numDesc; __le16 ipsecFlags; -#define TYPHOON_IPSEC_GEN_IV __constant_cpu_to_le16(0x0000) -#define TYPHOON_IPSEC_USE_IV __constant_cpu_to_le16(0x0001) +#define TYPHOON_IPSEC_GEN_IV cpu_to_le16(0x0000) +#define TYPHOON_IPSEC_USE_IV cpu_to_le16(0x0001) __le32 sa1; __le32 sa2; __le32 reserved; @@ -248,41 +248,41 @@ struct rx_desc { u32 addr; /* opaque, comes from virtAddr */ u32 addrHi; /* opaque, comes from virtAddrHi */ __le32 rxStatus; -#define TYPHOON_RX_ERR_INTERNAL __constant_cpu_to_le32(0x00000000) -#define TYPHOON_RX_ERR_FIFO_UNDERRUN __constant_cpu_to_le32(0x00000001) -#define TYPHOON_RX_ERR_BAD_SSD __constant_cpu_to_le32(0x00000002) -#define TYPHOON_RX_ERR_RUNT __constant_cpu_to_le32(0x00000003) -#define TYPHOON_RX_ERR_CRC __constant_cpu_to_le32(0x00000004) -#define TYPHOON_RX_ERR_OVERSIZE __constant_cpu_to_le32(0x00000005) -#define TYPHOON_RX_ERR_ALIGN __constant_cpu_to_le32(0x00000006) -#define TYPHOON_RX_ERR_DRIBBLE __constant_cpu_to_le32(0x00000007) -#define TYPHOON_RX_PROTO_MASK __constant_cpu_to_le32(0x00000003) -#define TYPHOON_RX_PROTO_UNKNOWN __constant_cpu_to_le32(0x00000000) -#define TYPHOON_RX_PROTO_IP __constant_cpu_to_le32(0x00000001) -#define TYPHOON_RX_PROTO_IPX __constant_cpu_to_le32(0x00000002) -#define TYPHOON_RX_VLAN __constant_cpu_to_le32(0x00000004) -#define TYPHOON_RX_IP_FRAG __constant_cpu_to_le32(0x00000008) -#define TYPHOON_RX_IPSEC __constant_cpu_to_le32(0x00000010) -#define TYPHOON_RX_IP_CHK_FAIL __constant_cpu_to_le32(0x00000020) -#define TYPHOON_RX_TCP_CHK_FAIL __constant_cpu_to_le32(0x00000040) -#define TYPHOON_RX_UDP_CHK_FAIL __constant_cpu_to_le32(0x00000080) -#define TYPHOON_RX_IP_CHK_GOOD __constant_cpu_to_le32(0x00000100) -#define TYPHOON_RX_TCP_CHK_GOOD __constant_cpu_to_le32(0x00000200) -#define TYPHOON_RX_UDP_CHK_GOOD __constant_cpu_to_le32(0x00000400) +#define TYPHOON_RX_ERR_INTERNAL cpu_to_le32(0x00000000) +#define TYPHOON_RX_ERR_FIFO_UNDERRUN cpu_to_le32(0x00000001) +#define TYPHOON_RX_ERR_BAD_SSD cpu_to_le32(0x00000002) +#define TYPHOON_RX_ERR_RUNT cpu_to_le32(0x00000003) +#define TYPHOON_RX_ERR_CRC cpu_to_le32(0x00000004) +#define TYPHOON_RX_ERR_OVERSIZE cpu_to_le32(0x00000005) +#define TYPHOON_RX_ERR_ALIGN cpu_to_le32(0x00000006) +#define TYPHOON_RX_ERR_DRIBBLE cpu_to_le32(0x00000007) +#define TYPHOON_RX_PROTO_MASK cpu_to_le32(0x00000003) +#define TYPHOON_RX_PROTO_UNKNOWN cpu_to_le32(0x00000000) +#define TYPHOON_RX_PROTO_IP cpu_to_le32(0x00000001) +#define TYPHOON_RX_PROTO_IPX cpu_to_le32(0x00000002) +#define TYPHOON_RX_VLAN cpu_to_le32(0x00000004) +#define TYPHOON_RX_IP_FRAG cpu_to_le32(0x00000008) +#define TYPHOON_RX_IPSEC cpu_to_le32(0x00000010) +#define TYPHOON_RX_IP_CHK_FAIL cpu_to_le32(0x00000020) +#define TYPHOON_RX_TCP_CHK_FAIL cpu_to_le32(0x00000040) +#define TYPHOON_RX_UDP_CHK_FAIL cpu_to_le32(0x00000080) +#define TYPHOON_RX_IP_CHK_GOOD cpu_to_le32(0x00000100) +#define TYPHOON_RX_TCP_CHK_GOOD cpu_to_le32(0x00000200) +#define TYPHOON_RX_UDP_CHK_GOOD cpu_to_le32(0x00000400) __le16 filterResults; -#define TYPHOON_RX_FILTER_MASK __constant_cpu_to_le16(0x7fff) -#define TYPHOON_RX_FILTERED __constant_cpu_to_le16(0x8000) +#define TYPHOON_RX_FILTER_MASK cpu_to_le16(0x7fff) +#define TYPHOON_RX_FILTERED cpu_to_le16(0x8000) __le16 ipsecResults; -#define TYPHOON_RX_OUTER_AH_GOOD __constant_cpu_to_le16(0x0001) -#define TYPHOON_RX_OUTER_ESP_GOOD __constant_cpu_to_le16(0x0002) -#define TYPHOON_RX_INNER_AH_GOOD __constant_cpu_to_le16(0x0004) -#define TYPHOON_RX_INNER_ESP_GOOD __constant_cpu_to_le16(0x0008) -#define TYPHOON_RX_OUTER_AH_FAIL __constant_cpu_to_le16(0x0010) -#define TYPHOON_RX_OUTER_ESP_FAIL __constant_cpu_to_le16(0x0020) -#define TYPHOON_RX_INNER_AH_FAIL __constant_cpu_to_le16(0x0040) -#define TYPHOON_RX_INNER_ESP_FAIL __constant_cpu_to_le16(0x0080) -#define TYPHOON_RX_UNKNOWN_SA __constant_cpu_to_le16(0x0100) -#define TYPHOON_RX_ESP_FORMAT_ERR __constant_cpu_to_le16(0x0200) +#define TYPHOON_RX_OUTER_AH_GOOD cpu_to_le16(0x0001) +#define TYPHOON_RX_OUTER_ESP_GOOD cpu_to_le16(0x0002) +#define TYPHOON_RX_INNER_AH_GOOD cpu_to_le16(0x0004) +#define TYPHOON_RX_INNER_ESP_GOOD cpu_to_le16(0x0008) +#define TYPHOON_RX_OUTER_AH_FAIL cpu_to_le16(0x0010) +#define TYPHOON_RX_OUTER_ESP_FAIL cpu_to_le16(0x0020) +#define TYPHOON_RX_INNER_AH_FAIL cpu_to_le16(0x0040) +#define TYPHOON_RX_INNER_ESP_FAIL cpu_to_le16(0x0080) +#define TYPHOON_RX_UNKNOWN_SA cpu_to_le16(0x0100) +#define TYPHOON_RX_ESP_FORMAT_ERR cpu_to_le16(0x0200) __be32 vlanTag; } __attribute__ ((packed)); @@ -318,31 +318,31 @@ struct cmd_desc { u8 flags; u8 numDesc; __le16 cmd; -#define TYPHOON_CMD_TX_ENABLE __constant_cpu_to_le16(0x0001) -#define TYPHOON_CMD_TX_DISABLE __constant_cpu_to_le16(0x0002) -#define TYPHOON_CMD_RX_ENABLE __constant_cpu_to_le16(0x0003) -#define TYPHOON_CMD_RX_DISABLE __constant_cpu_to_le16(0x0004) -#define TYPHOON_CMD_SET_RX_FILTER __constant_cpu_to_le16(0x0005) -#define TYPHOON_CMD_READ_STATS __constant_cpu_to_le16(0x0007) -#define TYPHOON_CMD_XCVR_SELECT __constant_cpu_to_le16(0x0013) -#define TYPHOON_CMD_SET_MAX_PKT_SIZE __constant_cpu_to_le16(0x001a) -#define TYPHOON_CMD_READ_MEDIA_STATUS __constant_cpu_to_le16(0x001b) -#define TYPHOON_CMD_GOTO_SLEEP __constant_cpu_to_le16(0x0023) -#define TYPHOON_CMD_SET_MULTICAST_HASH __constant_cpu_to_le16(0x0025) -#define TYPHOON_CMD_SET_MAC_ADDRESS __constant_cpu_to_le16(0x0026) -#define TYPHOON_CMD_READ_MAC_ADDRESS __constant_cpu_to_le16(0x0027) -#define TYPHOON_CMD_VLAN_TYPE_WRITE __constant_cpu_to_le16(0x002b) -#define TYPHOON_CMD_CREATE_SA __constant_cpu_to_le16(0x0034) -#define TYPHOON_CMD_DELETE_SA __constant_cpu_to_le16(0x0035) -#define TYPHOON_CMD_READ_VERSIONS __constant_cpu_to_le16(0x0043) -#define TYPHOON_CMD_IRQ_COALESCE_CTRL __constant_cpu_to_le16(0x0045) -#define TYPHOON_CMD_ENABLE_WAKE_EVENTS __constant_cpu_to_le16(0x0049) -#define TYPHOON_CMD_SET_OFFLOAD_TASKS __constant_cpu_to_le16(0x004f) -#define TYPHOON_CMD_HELLO_RESP __constant_cpu_to_le16(0x0057) -#define TYPHOON_CMD_HALT __constant_cpu_to_le16(0x005d) -#define TYPHOON_CMD_READ_IPSEC_INFO __constant_cpu_to_le16(0x005e) -#define TYPHOON_CMD_GET_IPSEC_ENABLE __constant_cpu_to_le16(0x0067) -#define TYPHOON_CMD_GET_CMD_LVL __constant_cpu_to_le16(0x0069) +#define TYPHOON_CMD_TX_ENABLE cpu_to_le16(0x0001) +#define TYPHOON_CMD_TX_DISABLE cpu_to_le16(0x0002) +#define TYPHOON_CMD_RX_ENABLE cpu_to_le16(0x0003) +#define TYPHOON_CMD_RX_DISABLE cpu_to_le16(0x0004) +#define TYPHOON_CMD_SET_RX_FILTER cpu_to_le16(0x0005) +#define TYPHOON_CMD_READ_STATS cpu_to_le16(0x0007) +#define TYPHOON_CMD_XCVR_SELECT cpu_to_le16(0x0013) +#define TYPHOON_CMD_SET_MAX_PKT_SIZE cpu_to_le16(0x001a) +#define TYPHOON_CMD_READ_MEDIA_STATUS cpu_to_le16(0x001b) +#define TYPHOON_CMD_GOTO_SLEEP cpu_to_le16(0x0023) +#define TYPHOON_CMD_SET_MULTICAST_HASH cpu_to_le16(0x0025) +#define TYPHOON_CMD_SET_MAC_ADDRESS cpu_to_le16(0x0026) +#define TYPHOON_CMD_READ_MAC_ADDRESS cpu_to_le16(0x0027) +#define TYPHOON_CMD_VLAN_TYPE_WRITE cpu_to_le16(0x002b) +#define TYPHOON_CMD_CREATE_SA cpu_to_le16(0x0034) +#define TYPHOON_CMD_DELETE_SA cpu_to_le16(0x0035) +#define TYPHOON_CMD_READ_VERSIONS cpu_to_le16(0x0043) +#define TYPHOON_CMD_IRQ_COALESCE_CTRL cpu_to_le16(0x0045) +#define TYPHOON_CMD_ENABLE_WAKE_EVENTS cpu_to_le16(0x0049) +#define TYPHOON_CMD_SET_OFFLOAD_TASKS cpu_to_le16(0x004f) +#define TYPHOON_CMD_HELLO_RESP cpu_to_le16(0x0057) +#define TYPHOON_CMD_HALT cpu_to_le16(0x005d) +#define TYPHOON_CMD_READ_IPSEC_INFO cpu_to_le16(0x005e) +#define TYPHOON_CMD_GET_IPSEC_ENABLE cpu_to_le16(0x0067) +#define TYPHOON_CMD_GET_CMD_LVL cpu_to_le16(0x0069) u16 seqNo; __le16 parm1; __le32 parm2; @@ -380,11 +380,11 @@ struct resp_desc { /* TYPHOON_CMD_SET_RX_FILTER filter bits (cmd.parm1) */ -#define TYPHOON_RX_FILTER_DIRECTED __constant_cpu_to_le16(0x0001) -#define TYPHOON_RX_FILTER_ALL_MCAST __constant_cpu_to_le16(0x0002) -#define TYPHOON_RX_FILTER_BROADCAST __constant_cpu_to_le16(0x0004) -#define TYPHOON_RX_FILTER_PROMISCOUS __constant_cpu_to_le16(0x0008) -#define TYPHOON_RX_FILTER_MCAST_HASH __constant_cpu_to_le16(0x0010) +#define TYPHOON_RX_FILTER_DIRECTED cpu_to_le16(0x0001) +#define TYPHOON_RX_FILTER_ALL_MCAST cpu_to_le16(0x0002) +#define TYPHOON_RX_FILTER_BROADCAST cpu_to_le16(0x0004) +#define TYPHOON_RX_FILTER_PROMISCOUS cpu_to_le16(0x0008) +#define TYPHOON_RX_FILTER_MCAST_HASH cpu_to_le16(0x0010) /* TYPHOON_CMD_READ_STATS response format */ @@ -416,40 +416,40 @@ struct stats_resp { __le32 rxOverflow; __le32 rxFiltered; __le32 linkStatus; -#define TYPHOON_LINK_STAT_MASK __constant_cpu_to_le32(0x00000001) -#define TYPHOON_LINK_GOOD __constant_cpu_to_le32(0x00000001) -#define TYPHOON_LINK_BAD __constant_cpu_to_le32(0x00000000) -#define TYPHOON_LINK_SPEED_MASK __constant_cpu_to_le32(0x00000002) -#define TYPHOON_LINK_100MBPS __constant_cpu_to_le32(0x00000002) -#define TYPHOON_LINK_10MBPS __constant_cpu_to_le32(0x00000000) -#define TYPHOON_LINK_DUPLEX_MASK __constant_cpu_to_le32(0x00000004) -#define TYPHOON_LINK_FULL_DUPLEX __constant_cpu_to_le32(0x00000004) -#define TYPHOON_LINK_HALF_DUPLEX __constant_cpu_to_le32(0x00000000) +#define TYPHOON_LINK_STAT_MASK cpu_to_le32(0x00000001) +#define TYPHOON_LINK_GOOD cpu_to_le32(0x00000001) +#define TYPHOON_LINK_BAD cpu_to_le32(0x00000000) +#define TYPHOON_LINK_SPEED_MASK cpu_to_le32(0x00000002) +#define TYPHOON_LINK_100MBPS cpu_to_le32(0x00000002) +#define TYPHOON_LINK_10MBPS cpu_to_le32(0x00000000) +#define TYPHOON_LINK_DUPLEX_MASK cpu_to_le32(0x00000004) +#define TYPHOON_LINK_FULL_DUPLEX cpu_to_le32(0x00000004) +#define TYPHOON_LINK_HALF_DUPLEX cpu_to_le32(0x00000000) __le32 unused2; __le32 unused3; } __attribute__ ((packed)); /* TYPHOON_CMD_XCVR_SELECT xcvr values (resp.parm1) */ -#define TYPHOON_XCVR_10HALF __constant_cpu_to_le16(0x0000) -#define TYPHOON_XCVR_10FULL __constant_cpu_to_le16(0x0001) -#define TYPHOON_XCVR_100HALF __constant_cpu_to_le16(0x0002) -#define TYPHOON_XCVR_100FULL __constant_cpu_to_le16(0x0003) -#define TYPHOON_XCVR_AUTONEG __constant_cpu_to_le16(0x0004) +#define TYPHOON_XCVR_10HALF cpu_to_le16(0x0000) +#define TYPHOON_XCVR_10FULL cpu_to_le16(0x0001) +#define TYPHOON_XCVR_100HALF cpu_to_le16(0x0002) +#define TYPHOON_XCVR_100FULL cpu_to_le16(0x0003) +#define TYPHOON_XCVR_AUTONEG cpu_to_le16(0x0004) /* TYPHOON_CMD_READ_MEDIA_STATUS (resp.parm1) */ -#define TYPHOON_MEDIA_STAT_CRC_STRIP_DISABLE __constant_cpu_to_le16(0x0004) -#define TYPHOON_MEDIA_STAT_COLLISION_DETECT __constant_cpu_to_le16(0x0010) -#define TYPHOON_MEDIA_STAT_CARRIER_SENSE __constant_cpu_to_le16(0x0020) -#define TYPHOON_MEDIA_STAT_POLARITY_REV __constant_cpu_to_le16(0x0400) -#define TYPHOON_MEDIA_STAT_NO_LINK __constant_cpu_to_le16(0x0800) +#define TYPHOON_MEDIA_STAT_CRC_STRIP_DISABLE cpu_to_le16(0x0004) +#define TYPHOON_MEDIA_STAT_COLLISION_DETECT cpu_to_le16(0x0010) +#define TYPHOON_MEDIA_STAT_CARRIER_SENSE cpu_to_le16(0x0020) +#define TYPHOON_MEDIA_STAT_POLARITY_REV cpu_to_le16(0x0400) +#define TYPHOON_MEDIA_STAT_NO_LINK cpu_to_le16(0x0800) /* TYPHOON_CMD_SET_MULTICAST_HASH enable values (cmd.parm1) */ -#define TYPHOON_MCAST_HASH_DISABLE __constant_cpu_to_le16(0x0000) -#define TYPHOON_MCAST_HASH_ENABLE __constant_cpu_to_le16(0x0001) -#define TYPHOON_MCAST_HASH_SET __constant_cpu_to_le16(0x0002) +#define TYPHOON_MCAST_HASH_DISABLE cpu_to_le16(0x0000) +#define TYPHOON_MCAST_HASH_ENABLE cpu_to_le16(0x0001) +#define TYPHOON_MCAST_HASH_SET cpu_to_le16(0x0002) /* TYPHOON_CMD_CREATE_SA descriptor and settings */ @@ -459,9 +459,9 @@ struct sa_descriptor { u16 cmd; u16 seqNo; u16 mode; -#define TYPHOON_SA_MODE_NULL __constant_cpu_to_le16(0x0000) -#define TYPHOON_SA_MODE_AH __constant_cpu_to_le16(0x0001) -#define TYPHOON_SA_MODE_ESP __constant_cpu_to_le16(0x0002) +#define TYPHOON_SA_MODE_NULL cpu_to_le16(0x0000) +#define TYPHOON_SA_MODE_AH cpu_to_le16(0x0001) +#define TYPHOON_SA_MODE_ESP cpu_to_le16(0x0002) u8 hashFlags; #define TYPHOON_SA_HASH_ENABLE 0x01 #define TYPHOON_SA_HASH_SHA1 0x02 @@ -493,22 +493,22 @@ struct sa_descriptor { /* TYPHOON_CMD_SET_OFFLOAD_TASKS bits (cmd.parm2 (Tx) & cmd.parm3 (Rx)) * This is all for IPv4. */ -#define TYPHOON_OFFLOAD_TCP_CHKSUM __constant_cpu_to_le32(0x00000002) -#define TYPHOON_OFFLOAD_UDP_CHKSUM __constant_cpu_to_le32(0x00000004) -#define TYPHOON_OFFLOAD_IP_CHKSUM __constant_cpu_to_le32(0x00000008) -#define TYPHOON_OFFLOAD_IPSEC __constant_cpu_to_le32(0x00000010) -#define TYPHOON_OFFLOAD_BCAST_THROTTLE __constant_cpu_to_le32(0x00000020) -#define TYPHOON_OFFLOAD_DHCP_PREVENT __constant_cpu_to_le32(0x00000040) -#define TYPHOON_OFFLOAD_VLAN __constant_cpu_to_le32(0x00000080) -#define TYPHOON_OFFLOAD_FILTERING __constant_cpu_to_le32(0x00000100) -#define TYPHOON_OFFLOAD_TCP_SEGMENT __constant_cpu_to_le32(0x00000200) +#define TYPHOON_OFFLOAD_TCP_CHKSUM cpu_to_le32(0x00000002) +#define TYPHOON_OFFLOAD_UDP_CHKSUM cpu_to_le32(0x00000004) +#define TYPHOON_OFFLOAD_IP_CHKSUM cpu_to_le32(0x00000008) +#define TYPHOON_OFFLOAD_IPSEC cpu_to_le32(0x00000010) +#define TYPHOON_OFFLOAD_BCAST_THROTTLE cpu_to_le32(0x00000020) +#define TYPHOON_OFFLOAD_DHCP_PREVENT cpu_to_le32(0x00000040) +#define TYPHOON_OFFLOAD_VLAN cpu_to_le32(0x00000080) +#define TYPHOON_OFFLOAD_FILTERING cpu_to_le32(0x00000100) +#define TYPHOON_OFFLOAD_TCP_SEGMENT cpu_to_le32(0x00000200) /* TYPHOON_CMD_ENABLE_WAKE_EVENTS bits (cmd.parm1) */ -#define TYPHOON_WAKE_MAGIC_PKT __constant_cpu_to_le16(0x01) -#define TYPHOON_WAKE_LINK_EVENT __constant_cpu_to_le16(0x02) -#define TYPHOON_WAKE_ICMP_ECHO __constant_cpu_to_le16(0x04) -#define TYPHOON_WAKE_ARP __constant_cpu_to_le16(0x08) +#define TYPHOON_WAKE_MAGIC_PKT cpu_to_le16(0x01) +#define TYPHOON_WAKE_LINK_EVENT cpu_to_le16(0x02) +#define TYPHOON_WAKE_ICMP_ECHO cpu_to_le16(0x04) +#define TYPHOON_WAKE_ARP cpu_to_le16(0x08) /* These are used to load the firmware image on the NIC */ diff --git a/drivers/net/ucc_geth.c b/drivers/net/ucc_geth.c index e87986867ba..4a8d5747204 100644 --- a/drivers/net/ucc_geth.c +++ b/drivers/net/ucc_geth.c @@ -3266,7 +3266,7 @@ static int ucc_geth_poll(struct napi_struct *napi, int budget) howmany += ucc_geth_rx(ugeth, i, budget - howmany); if (howmany < budget) { - netif_rx_complete(napi); + napi_complete(napi); setbits32(ugeth->uccf->p_uccm, UCCE_RX_EVENTS); } @@ -3297,10 +3297,10 @@ static irqreturn_t ucc_geth_irq_handler(int irq, void *info) /* check for receive events that require processing */ if (ucce & UCCE_RX_EVENTS) { - if (netif_rx_schedule_prep(&ugeth->napi)) { + if (napi_schedule_prep(&ugeth->napi)) { uccm &= ~UCCE_RX_EVENTS; out_be32(uccf->p_uccm, uccm); - __netif_rx_schedule(&ugeth->napi); + __napi_schedule(&ugeth->napi); } } diff --git a/drivers/net/usb/hso.c b/drivers/net/usb/hso.c index 0d0fa91c025..806cc5da56c 100644 --- a/drivers/net/usb/hso.c +++ b/drivers/net/usb/hso.c @@ -934,8 +934,7 @@ static void packetizeRx(struct hso_net *odev, unsigned char *ip_pkt, if (!odev->rx_buf_missing) { /* Packet is complete. Inject into stack. */ /* We have IP packet here */ - odev->skb_rx_buf->protocol = - __constant_htons(ETH_P_IP); + odev->skb_rx_buf->protocol = cpu_to_be16(ETH_P_IP); /* don't check it */ odev->skb_rx_buf->ip_summed = CHECKSUM_UNNECESSARY; diff --git a/drivers/net/usb/smsc95xx.c b/drivers/net/usb/smsc95xx.c index 5574abe29c7..5b0b9647382 100644 --- a/drivers/net/usb/smsc95xx.c +++ b/drivers/net/usb/smsc95xx.c @@ -55,7 +55,6 @@ struct smsc95xx_priv { struct usb_context { struct usb_ctrlrequest req; - struct completion notify; struct usbnet *dev; }; @@ -307,7 +306,7 @@ static int smsc95xx_write_eeprom(struct usbnet *dev, u32 offset, u32 length, return 0; } -static void smsc95xx_async_cmd_callback(struct urb *urb, struct pt_regs *regs) +static void smsc95xx_async_cmd_callback(struct urb *urb) { struct usb_context *usb_context = urb->context; struct usbnet *dev = usb_context->dev; @@ -316,8 +315,6 @@ static void smsc95xx_async_cmd_callback(struct urb *urb, struct pt_regs *regs) if (status < 0) devwarn(dev, "async callback failed with %d", status); - complete(&usb_context->notify); - kfree(usb_context); usb_free_urb(urb); } @@ -348,11 +345,10 @@ static int smsc95xx_write_reg_async(struct usbnet *dev, u16 index, u32 *data) usb_context->req.wValue = 00; usb_context->req.wIndex = cpu_to_le16(index); usb_context->req.wLength = cpu_to_le16(size); - init_completion(&usb_context->notify); usb_fill_control_urb(urb, dev->udev, usb_sndctrlpipe(dev->udev, 0), (void *)&usb_context->req, data, size, - (usb_complete_t)smsc95xx_async_cmd_callback, + smsc95xx_async_cmd_callback, (void *)usb_context); status = usb_submit_urb(urb, GFP_ATOMIC); diff --git a/drivers/net/via-rhine.c b/drivers/net/via-rhine.c index 3b8e6325427..4671436ecf0 100644 --- a/drivers/net/via-rhine.c +++ b/drivers/net/via-rhine.c @@ -589,7 +589,7 @@ static int rhine_napipoll(struct napi_struct *napi, int budget) work_done = rhine_rx(dev, budget); if (work_done < budget) { - netif_rx_complete(napi); + napi_complete(napi); iowrite16(IntrRxDone | IntrRxErr | IntrRxEmpty| IntrRxOverflow | IntrRxDropped | IntrRxNoBuf | IntrTxAborted | @@ -1319,7 +1319,7 @@ static irqreturn_t rhine_interrupt(int irq, void *dev_instance) IntrPCIErr | IntrStatsMax | IntrLinkChange, ioaddr + IntrEnable); - netif_rx_schedule(&rp->napi); + napi_schedule(&rp->napi); } if (intr_status & (IntrTxErrSummary | IntrTxDone)) { diff --git a/drivers/net/via-velocity.h b/drivers/net/via-velocity.h index 29a33090d3d..ea43e1832af 100644 --- a/drivers/net/via-velocity.h +++ b/drivers/net/via-velocity.h @@ -183,7 +183,7 @@ struct rdesc1 { }; enum { - RX_INTEN = __constant_cpu_to_le16(0x8000) + RX_INTEN = cpu_to_le16(0x8000) }; struct rx_desc { @@ -210,7 +210,7 @@ struct tdesc1 { } __attribute__ ((__packed__)); enum { - TD_QUEUE = __constant_cpu_to_le16(0x8000) + TD_QUEUE = cpu_to_le16(0x8000) }; struct td_buf { @@ -242,7 +242,7 @@ struct velocity_td_info { enum velocity_owner { OWNED_BY_HOST = 0, - OWNED_BY_NIC = __constant_cpu_to_le16(0x8000) + OWNED_BY_NIC = cpu_to_le16(0x8000) }; diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c index c68808336c8..fe576e75a53 100644 --- a/drivers/net/virtio_net.c +++ b/drivers/net/virtio_net.c @@ -43,6 +43,7 @@ struct virtnet_info struct virtqueue *rvq, *svq; struct net_device *dev; struct napi_struct napi; + unsigned int status; /* The skb we couldn't send because buffers were full. */ struct sk_buff *last_xmit_skb; @@ -375,9 +376,9 @@ static void skb_recv_done(struct virtqueue *rvq) { struct virtnet_info *vi = rvq->vdev->priv; /* Schedule NAPI, Suppress further interrupts if successful. */ - if (netif_rx_schedule_prep(&vi->napi)) { + if (napi_schedule_prep(&vi->napi)) { rvq->vq_ops->disable_cb(rvq); - __netif_rx_schedule(&vi->napi); + __napi_schedule(&vi->napi); } } @@ -403,11 +404,11 @@ again: /* Out of packets? */ if (received < budget) { - netif_rx_complete(napi); + napi_complete(napi); if (unlikely(!vi->rvq->vq_ops->enable_cb(vi->rvq)) && napi_schedule_prep(napi)) { vi->rvq->vq_ops->disable_cb(vi->rvq); - __netif_rx_schedule(napi); + __napi_schedule(napi); goto again; } } @@ -581,9 +582,9 @@ static int virtnet_open(struct net_device *dev) * won't get another interrupt, so process any outstanding packets * now. virtnet_poll wants re-enable the queue, so we disable here. * We synchronize against interrupts via NAPI_STATE_SCHED */ - if (netif_rx_schedule_prep(&vi->napi)) { + if (napi_schedule_prep(&vi->napi)) { vi->rvq->vq_ops->disable_cb(vi->rvq); - __netif_rx_schedule(&vi->napi); + __napi_schedule(&vi->napi); } return 0; } @@ -612,6 +613,7 @@ static struct ethtool_ops virtnet_ethtool_ops = { .set_tx_csum = virtnet_set_tx_csum, .set_sg = ethtool_op_set_sg, .set_tso = ethtool_op_set_tso, + .get_link = ethtool_op_get_link, }; #define MIN_MTU 68 @@ -637,6 +639,41 @@ static const struct net_device_ops virtnet_netdev = { #endif }; +static void virtnet_update_status(struct virtnet_info *vi) +{ + u16 v; + + if (!virtio_has_feature(vi->vdev, VIRTIO_NET_F_STATUS)) + return; + + vi->vdev->config->get(vi->vdev, + offsetof(struct virtio_net_config, status), + &v, sizeof(v)); + + /* Ignore unknown (future) status bits */ + v &= VIRTIO_NET_S_LINK_UP; + + if (vi->status == v) + return; + + vi->status = v; + + if (vi->status & VIRTIO_NET_S_LINK_UP) { + netif_carrier_on(vi->dev); + netif_wake_queue(vi->dev); + } else { + netif_carrier_off(vi->dev); + netif_stop_queue(vi->dev); + } +} + +static void virtnet_config_changed(struct virtio_device *vdev) +{ + struct virtnet_info *vi = vdev->priv; + + virtnet_update_status(vi); +} + static int virtnet_probe(struct virtio_device *vdev) { int err; @@ -739,6 +776,9 @@ static int virtnet_probe(struct virtio_device *vdev) goto unregister; } + vi->status = VIRTIO_NET_S_LINK_UP; + virtnet_update_status(vi); + pr_debug("virtnet: registered device %s\n", dev->name); return 0; @@ -794,7 +834,7 @@ static unsigned int features[] = { VIRTIO_NET_F_HOST_TSO4, VIRTIO_NET_F_HOST_UFO, VIRTIO_NET_F_HOST_TSO6, VIRTIO_NET_F_HOST_ECN, VIRTIO_NET_F_GUEST_TSO4, VIRTIO_NET_F_GUEST_TSO6, VIRTIO_NET_F_GUEST_ECN, /* We don't yet handle UFO input. */ - VIRTIO_NET_F_MRG_RXBUF, + VIRTIO_NET_F_MRG_RXBUF, VIRTIO_NET_F_STATUS, VIRTIO_F_NOTIFY_ON_EMPTY, }; @@ -806,6 +846,7 @@ static struct virtio_driver virtio_net = { .id_table = id_table, .probe = virtnet_probe, .remove = __devexit_p(virtnet_remove), + .config_changed = virtnet_config_changed, }; static int __init init(void) diff --git a/drivers/net/wan/c101.c b/drivers/net/wan/c101.c index b46897996f7..9693b0fd323 100644 --- a/drivers/net/wan/c101.c +++ b/drivers/net/wan/c101.c @@ -296,7 +296,13 @@ static void c101_destroy_card(card_t *card) kfree(card); } - +static const struct net_device_ops c101_ops = { + .ndo_open = c101_open, + .ndo_stop = c101_close, + .ndo_change_mtu = hdlc_change_mtu, + .ndo_start_xmit = hdlc_start_xmit, + .ndo_do_ioctl = c101_ioctl, +}; static int __init c101_run(unsigned long irq, unsigned long winbase) { @@ -367,9 +373,7 @@ static int __init c101_run(unsigned long irq, unsigned long winbase) dev->mem_start = winbase; dev->mem_end = winbase + C101_MAPPED_RAM_SIZE - 1; dev->tx_queue_len = 50; - dev->do_ioctl = c101_ioctl; - dev->open = c101_open; - dev->stop = c101_close; + dev->netdev_ops = &c101_ops; hdlc->attach = sca_attach; hdlc->xmit = sca_xmit; card->settings.clock_type = CLOCK_EXT; diff --git a/drivers/net/wan/cosa.c b/drivers/net/wan/cosa.c index d80b72e22de..0d7ba117ef6 100644 --- a/drivers/net/wan/cosa.c +++ b/drivers/net/wan/cosa.c @@ -427,6 +427,15 @@ static void __exit cosa_exit(void) } module_exit(cosa_exit); +static const struct net_device_ops cosa_ops = { + .ndo_open = cosa_net_open, + .ndo_stop = cosa_net_close, + .ndo_change_mtu = hdlc_change_mtu, + .ndo_start_xmit = hdlc_start_xmit, + .ndo_do_ioctl = cosa_net_ioctl, + .ndo_tx_timeout = cosa_net_timeout, +}; + static int cosa_probe(int base, int irq, int dma) { struct cosa_data *cosa = cosa_cards+nr_cards; @@ -575,10 +584,7 @@ static int cosa_probe(int base, int irq, int dma) } dev_to_hdlc(chan->netdev)->attach = cosa_net_attach; dev_to_hdlc(chan->netdev)->xmit = cosa_net_tx; - chan->netdev->open = cosa_net_open; - chan->netdev->stop = cosa_net_close; - chan->netdev->do_ioctl = cosa_net_ioctl; - chan->netdev->tx_timeout = cosa_net_timeout; + chan->netdev->netdev_ops = &cosa_ops; chan->netdev->watchdog_timeo = TX_TIMEOUT; chan->netdev->base_addr = chan->cosa->datareg; chan->netdev->irq = chan->cosa->irq; diff --git a/drivers/net/wan/dscc4.c b/drivers/net/wan/dscc4.c index 888025db2f0..8face5db8f3 100644 --- a/drivers/net/wan/dscc4.c +++ b/drivers/net/wan/dscc4.c @@ -883,6 +883,15 @@ static inline int dscc4_set_quartz(struct dscc4_dev_priv *dpriv, int hz) return ret; } +static const struct net_device_ops dscc4_ops = { + .ndo_open = dscc4_open, + .ndo_stop = dscc4_close, + .ndo_change_mtu = hdlc_change_mtu, + .ndo_start_xmit = hdlc_start_xmit, + .ndo_do_ioctl = dscc4_ioctl, + .ndo_tx_timeout = dscc4_tx_timeout, +}; + static int dscc4_found1(struct pci_dev *pdev, void __iomem *ioaddr) { struct dscc4_pci_priv *ppriv; @@ -916,13 +925,8 @@ static int dscc4_found1(struct pci_dev *pdev, void __iomem *ioaddr) hdlc_device *hdlc = dev_to_hdlc(d); d->base_addr = (unsigned long)ioaddr; - d->init = NULL; d->irq = pdev->irq; - d->open = dscc4_open; - d->stop = dscc4_close; - d->set_multicast_list = NULL; - d->do_ioctl = dscc4_ioctl; - d->tx_timeout = dscc4_tx_timeout; + d->netdev_ops = &dscc4_ops; d->watchdog_timeo = TX_TIMEOUT; SET_NETDEV_DEV(d, &pdev->dev); @@ -1048,7 +1052,7 @@ static int dscc4_open(struct net_device *dev) struct dscc4_pci_priv *ppriv; int ret = -EAGAIN; - if ((dscc4_loopback_check(dpriv) < 0) || !dev->hard_start_xmit) + if ((dscc4_loopback_check(dpriv) < 0)) goto err; if ((ret = hdlc_open(dev))) diff --git a/drivers/net/wan/farsync.c b/drivers/net/wan/farsync.c index 48a2c9d2895..00945f7c1e9 100644 --- a/drivers/net/wan/farsync.c +++ b/drivers/net/wan/farsync.c @@ -2424,6 +2424,15 @@ fst_init_card(struct fst_card_info *card) type_strings[card->type], card->irq, card->nports); } +static const struct net_device_ops fst_ops = { + .ndo_open = fst_open, + .ndo_stop = fst_close, + .ndo_change_mtu = hdlc_change_mtu, + .ndo_start_xmit = hdlc_start_xmit, + .ndo_do_ioctl = fst_ioctl, + .ndo_tx_timeout = fst_tx_timeout, +}; + /* * Initialise card when detected. * Returns 0 to indicate success, or errno otherwise. @@ -2565,12 +2574,9 @@ fst_add_one(struct pci_dev *pdev, const struct pci_device_id *ent) dev->base_addr = card->pci_conf; dev->irq = card->irq; - dev->tx_queue_len = FST_TX_QUEUE_LEN; - dev->open = fst_open; - dev->stop = fst_close; - dev->do_ioctl = fst_ioctl; - dev->watchdog_timeo = FST_TX_TIMEOUT; - dev->tx_timeout = fst_tx_timeout; + dev->netdev_ops = &fst_ops; + dev->tx_queue_len = FST_TX_QUEUE_LEN; + dev->watchdog_timeo = FST_TX_TIMEOUT; hdlc->attach = fst_attach; hdlc->xmit = fst_start_xmit; } diff --git a/drivers/net/wan/hd64572.c b/drivers/net/wan/hd64572.c index 08b3536944f..497b003d723 100644 --- a/drivers/net/wan/hd64572.c +++ b/drivers/net/wan/hd64572.c @@ -341,7 +341,7 @@ static int sca_poll(struct napi_struct *napi, int budget) received = sca_rx_done(port, budget); if (received < budget) { - netif_rx_complete(napi); + napi_complete(napi); enable_intr(port); } @@ -359,7 +359,7 @@ static irqreturn_t sca_intr(int irq, void *dev_id) if (port && (isr0 & (i ? 0x08002200 : 0x00080022))) { handled = 1; disable_intr(port); - netif_rx_schedule(&port->napi); + napi_schedule(&port->napi); } } diff --git a/drivers/net/wan/hdlc.c b/drivers/net/wan/hdlc.c index 1f2a140c9f7..5ce43720555 100644 --- a/drivers/net/wan/hdlc.c +++ b/drivers/net/wan/hdlc.c @@ -44,7 +44,7 @@ static const char* version = "HDLC support module revision 1.22"; static struct hdlc_proto *first_proto; -static int hdlc_change_mtu(struct net_device *dev, int new_mtu) +int hdlc_change_mtu(struct net_device *dev, int new_mtu) { if ((new_mtu < 68) || (new_mtu > HDLC_MAX_MTU)) return -EINVAL; @@ -52,15 +52,6 @@ static int hdlc_change_mtu(struct net_device *dev, int new_mtu) return 0; } - - -static struct net_device_stats *hdlc_get_stats(struct net_device *dev) -{ - return &dev->stats; -} - - - static int hdlc_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *p, struct net_device *orig_dev) { @@ -75,7 +66,15 @@ static int hdlc_rcv(struct sk_buff *skb, struct net_device *dev, return hdlc->proto->netif_rx(skb); } +int hdlc_start_xmit(struct sk_buff *skb, struct net_device *dev) +{ + hdlc_device *hdlc = dev_to_hdlc(dev); + + if (hdlc->proto->xmit) + return hdlc->proto->xmit(skb, dev); + return hdlc->xmit(skb, dev); /* call hardware driver directly */ +} static inline void hdlc_proto_start(struct net_device *dev) { @@ -102,11 +101,11 @@ static int hdlc_device_event(struct notifier_block *this, unsigned long event, hdlc_device *hdlc; unsigned long flags; int on; - + if (dev_net(dev) != &init_net) return NOTIFY_DONE; - if (dev->get_stats != hdlc_get_stats) + if (!(dev->priv_flags & IFF_WAN_HDLC)) return NOTIFY_DONE; /* not an HDLC device */ if (event != NETDEV_CHANGE) @@ -233,15 +232,13 @@ static void hdlc_setup_dev(struct net_device *dev) /* Re-init all variables changed by HDLC protocol drivers, * including ether_setup() called from hdlc_raw_eth.c. */ - dev->get_stats = hdlc_get_stats; dev->flags = IFF_POINTOPOINT | IFF_NOARP; + dev->priv_flags = IFF_WAN_HDLC; dev->mtu = HDLC_MAX_MTU; dev->type = ARPHRD_RAWHDLC; dev->hard_header_len = 16; dev->addr_len = 0; dev->header_ops = &hdlc_null_ops; - - dev->change_mtu = hdlc_change_mtu; } static void hdlc_setup(struct net_device *dev) @@ -339,6 +336,8 @@ MODULE_AUTHOR("Krzysztof Halasa <khc@pm.waw.pl>"); MODULE_DESCRIPTION("HDLC support module"); MODULE_LICENSE("GPL v2"); +EXPORT_SYMBOL(hdlc_change_mtu); +EXPORT_SYMBOL(hdlc_start_xmit); EXPORT_SYMBOL(hdlc_open); EXPORT_SYMBOL(hdlc_close); EXPORT_SYMBOL(hdlc_ioctl); @@ -350,7 +349,7 @@ EXPORT_SYMBOL(attach_hdlc_protocol); EXPORT_SYMBOL(detach_hdlc_protocol); static struct packet_type hdlc_packet_type = { - .type = __constant_htons(ETH_P_HDLC), + .type = cpu_to_be16(ETH_P_HDLC), .func = hdlc_rcv, }; diff --git a/drivers/net/wan/hdlc_cisco.c b/drivers/net/wan/hdlc_cisco.c index 44e64b15dbd..cf5fd17ad70 100644 --- a/drivers/net/wan/hdlc_cisco.c +++ b/drivers/net/wan/hdlc_cisco.c @@ -117,7 +117,7 @@ static void cisco_keepalive_send(struct net_device *dev, u32 type, data->type = htonl(type); data->par1 = par1; data->par2 = par2; - data->rel = __constant_htons(0xFFFF); + data->rel = cpu_to_be16(0xFFFF); /* we will need do_div here if 1000 % HZ != 0 */ data->time = htonl((jiffies - INITIAL_JIFFIES) * (1000 / HZ)); @@ -136,20 +136,20 @@ static __be16 cisco_type_trans(struct sk_buff *skb, struct net_device *dev) struct hdlc_header *data = (struct hdlc_header*)skb->data; if (skb->len < sizeof(struct hdlc_header)) - return __constant_htons(ETH_P_HDLC); + return cpu_to_be16(ETH_P_HDLC); if (data->address != CISCO_MULTICAST && data->address != CISCO_UNICAST) - return __constant_htons(ETH_P_HDLC); + return cpu_to_be16(ETH_P_HDLC); switch(data->protocol) { - case __constant_htons(ETH_P_IP): - case __constant_htons(ETH_P_IPX): - case __constant_htons(ETH_P_IPV6): + case cpu_to_be16(ETH_P_IP): + case cpu_to_be16(ETH_P_IPX): + case cpu_to_be16(ETH_P_IPV6): skb_pull(skb, sizeof(struct hdlc_header)); return data->protocol; default: - return __constant_htons(ETH_P_HDLC); + return cpu_to_be16(ETH_P_HDLC); } } @@ -194,7 +194,7 @@ static int cisco_rx(struct sk_buff *skb) case CISCO_ADDR_REQ: /* Stolen from syncppp.c :-) */ in_dev = dev->ip_ptr; addr = 0; - mask = __constant_htonl(~0); /* is the mask correct? */ + mask = ~cpu_to_be32(0); /* is the mask correct? */ if (in_dev != NULL) { struct in_ifaddr **ifap = &in_dev->ifa_list; @@ -382,7 +382,6 @@ static int cisco_ioctl(struct net_device *dev, struct ifreq *ifr) memcpy(&state(hdlc)->settings, &new_settings, size); spin_lock_init(&state(hdlc)->lock); - dev->hard_start_xmit = hdlc->xmit; dev->header_ops = &cisco_header_ops; dev->type = ARPHRD_CISCO; netif_dormant_on(dev); diff --git a/drivers/net/wan/hdlc_fr.c b/drivers/net/wan/hdlc_fr.c index f1ddd7c3459..80053010109 100644 --- a/drivers/net/wan/hdlc_fr.c +++ b/drivers/net/wan/hdlc_fr.c @@ -278,31 +278,31 @@ static int fr_hard_header(struct sk_buff **skb_p, u16 dlci) struct sk_buff *skb = *skb_p; switch (skb->protocol) { - case __constant_htons(NLPID_CCITT_ANSI_LMI): + case cpu_to_be16(NLPID_CCITT_ANSI_LMI): head_len = 4; skb_push(skb, head_len); skb->data[3] = NLPID_CCITT_ANSI_LMI; break; - case __constant_htons(NLPID_CISCO_LMI): + case cpu_to_be16(NLPID_CISCO_LMI): head_len = 4; skb_push(skb, head_len); skb->data[3] = NLPID_CISCO_LMI; break; - case __constant_htons(ETH_P_IP): + case cpu_to_be16(ETH_P_IP): head_len = 4; skb_push(skb, head_len); skb->data[3] = NLPID_IP; break; - case __constant_htons(ETH_P_IPV6): + case cpu_to_be16(ETH_P_IPV6): head_len = 4; skb_push(skb, head_len); skb->data[3] = NLPID_IPV6; break; - case __constant_htons(ETH_P_802_3): + case cpu_to_be16(ETH_P_802_3): head_len = 10; if (skb_headroom(skb) < head_len) { struct sk_buff *skb2 = skb_realloc_headroom(skb, @@ -426,7 +426,7 @@ static int pvc_xmit(struct sk_buff *skb, struct net_device *dev) skb_put(skb, pad); memset(skb->data + len, 0, pad); } - skb->protocol = __constant_htons(ETH_P_802_3); + skb->protocol = cpu_to_be16(ETH_P_802_3); } if (!fr_hard_header(&skb, pvc->dlci)) { dev->stats.tx_bytes += skb->len; @@ -444,18 +444,6 @@ static int pvc_xmit(struct sk_buff *skb, struct net_device *dev) return 0; } - - -static int pvc_change_mtu(struct net_device *dev, int new_mtu) -{ - if ((new_mtu < 68) || (new_mtu > HDLC_MAX_MTU)) - return -EINVAL; - dev->mtu = new_mtu; - return 0; -} - - - static inline void fr_log_dlci_active(pvc_device *pvc) { printk(KERN_INFO "%s: DLCI %d [%s%s%s]%s %s\n", @@ -508,10 +496,10 @@ static void fr_lmi_send(struct net_device *dev, int fullrep) memset(skb->data, 0, len); skb_reserve(skb, 4); if (lmi == LMI_CISCO) { - skb->protocol = __constant_htons(NLPID_CISCO_LMI); + skb->protocol = cpu_to_be16(NLPID_CISCO_LMI); fr_hard_header(&skb, LMI_CISCO_DLCI); } else { - skb->protocol = __constant_htons(NLPID_CCITT_ANSI_LMI); + skb->protocol = cpu_to_be16(NLPID_CCITT_ANSI_LMI); fr_hard_header(&skb, LMI_CCITT_ANSI_DLCI); } data = skb_tail_pointer(skb); @@ -1068,6 +1056,14 @@ static void pvc_setup(struct net_device *dev) dev->addr_len = 2; } +static const struct net_device_ops pvc_ops = { + .ndo_open = pvc_open, + .ndo_stop = pvc_close, + .ndo_change_mtu = hdlc_change_mtu, + .ndo_start_xmit = pvc_xmit, + .ndo_do_ioctl = pvc_ioctl, +}; + static int fr_add_pvc(struct net_device *frad, unsigned int dlci, int type) { hdlc_device *hdlc = dev_to_hdlc(frad); @@ -1104,11 +1100,7 @@ static int fr_add_pvc(struct net_device *frad, unsigned int dlci, int type) *(__be16*)dev->dev_addr = htons(dlci); dlci_to_q922(dev->broadcast, dlci); } - dev->hard_start_xmit = pvc_xmit; - dev->open = pvc_open; - dev->stop = pvc_close; - dev->do_ioctl = pvc_ioctl; - dev->change_mtu = pvc_change_mtu; + dev->netdev_ops = &pvc_ops; dev->mtu = HDLC_MAX_MTU; dev->tx_queue_len = 0; dev->ml_priv = pvc; @@ -1260,8 +1252,6 @@ static int fr_ioctl(struct net_device *dev, struct ifreq *ifr) state(hdlc)->dce_pvc_count = 0; } memcpy(&state(hdlc)->settings, &new_settings, size); - - dev->hard_start_xmit = hdlc->xmit; dev->type = ARPHRD_FRAD; return 0; diff --git a/drivers/net/wan/hdlc_ppp.c b/drivers/net/wan/hdlc_ppp.c index 57fe714c1c7..72a7cdab424 100644 --- a/drivers/net/wan/hdlc_ppp.c +++ b/drivers/net/wan/hdlc_ppp.c @@ -150,11 +150,11 @@ static __be16 ppp_type_trans(struct sk_buff *skb, struct net_device *dev) return htons(ETH_P_HDLC); switch (data->protocol) { - case __constant_htons(PID_IP): + case cpu_to_be16(PID_IP): skb_pull(skb, sizeof(struct hdlc_header)); return htons(ETH_P_IP); - case __constant_htons(PID_IPV6): + case cpu_to_be16(PID_IPV6): skb_pull(skb, sizeof(struct hdlc_header)); return htons(ETH_P_IPV6); @@ -558,7 +558,6 @@ out: return NET_RX_DROP; } - static void ppp_timer(unsigned long arg) { struct proto *proto = (struct proto *)arg; @@ -679,7 +678,6 @@ static int ppp_ioctl(struct net_device *dev, struct ifreq *ifr) ppp->keepalive_interval = 10; ppp->keepalive_timeout = 60; - dev->hard_start_xmit = hdlc->xmit; dev->hard_header_len = sizeof(struct hdlc_header); dev->header_ops = &ppp_header_ops; dev->type = ARPHRD_PPP; diff --git a/drivers/net/wan/hdlc_raw.c b/drivers/net/wan/hdlc_raw.c index 8612311748f..19f51fdd552 100644 --- a/drivers/net/wan/hdlc_raw.c +++ b/drivers/net/wan/hdlc_raw.c @@ -27,11 +27,9 @@ static int raw_ioctl(struct net_device *dev, struct ifreq *ifr); static __be16 raw_type_trans(struct sk_buff *skb, struct net_device *dev) { - return __constant_htons(ETH_P_IP); + return cpu_to_be16(ETH_P_IP); } - - static struct hdlc_proto proto = { .type_trans = raw_type_trans, .ioctl = raw_ioctl, @@ -86,7 +84,6 @@ static int raw_ioctl(struct net_device *dev, struct ifreq *ifr) if (result) return result; memcpy(hdlc->state, &new_settings, size); - dev->hard_start_xmit = hdlc->xmit; dev->type = ARPHRD_RAWHDLC; netif_dormant_off(dev); return 0; diff --git a/drivers/net/wan/hdlc_raw_eth.c b/drivers/net/wan/hdlc_raw_eth.c index a13fc320752..49e68f5ca5f 100644 --- a/drivers/net/wan/hdlc_raw_eth.c +++ b/drivers/net/wan/hdlc_raw_eth.c @@ -45,6 +45,7 @@ static int eth_tx(struct sk_buff *skb, struct net_device *dev) static struct hdlc_proto proto = { .type_trans = eth_type_trans, + .xmit = eth_tx, .ioctl = raw_eth_ioctl, .module = THIS_MODULE, }; @@ -56,9 +57,7 @@ static int raw_eth_ioctl(struct net_device *dev, struct ifreq *ifr) const size_t size = sizeof(raw_hdlc_proto); raw_hdlc_proto new_settings; hdlc_device *hdlc = dev_to_hdlc(dev); - int result; - int (*old_ch_mtu)(struct net_device *, int); - int old_qlen; + int result, old_qlen; switch (ifr->ifr_settings.type) { case IF_GET_PROTO: @@ -99,11 +98,8 @@ static int raw_eth_ioctl(struct net_device *dev, struct ifreq *ifr) if (result) return result; memcpy(hdlc->state, &new_settings, size); - dev->hard_start_xmit = eth_tx; - old_ch_mtu = dev->change_mtu; old_qlen = dev->tx_queue_len; ether_setup(dev); - dev->change_mtu = old_ch_mtu; dev->tx_queue_len = old_qlen; random_ether_addr(dev->dev_addr); netif_dormant_off(dev); diff --git a/drivers/net/wan/hdlc_x25.c b/drivers/net/wan/hdlc_x25.c index cbcbf6f0414..b1dc29ed158 100644 --- a/drivers/net/wan/hdlc_x25.c +++ b/drivers/net/wan/hdlc_x25.c @@ -184,6 +184,7 @@ static struct hdlc_proto proto = { .close = x25_close, .ioctl = x25_ioctl, .netif_rx = x25_rx, + .xmit = x25_xmit, .module = THIS_MODULE, }; @@ -213,7 +214,6 @@ static int x25_ioctl(struct net_device *dev, struct ifreq *ifr) if ((result = attach_hdlc_protocol(dev, &proto, 0))) return result; - dev->hard_start_xmit = x25_xmit; dev->type = ARPHRD_X25; netif_dormant_off(dev); return 0; diff --git a/drivers/net/wan/hostess_sv11.c b/drivers/net/wan/hostess_sv11.c index af54f0cf1b3..567d4f5062d 100644 --- a/drivers/net/wan/hostess_sv11.c +++ b/drivers/net/wan/hostess_sv11.c @@ -173,6 +173,14 @@ static int hostess_attach(struct net_device *dev, unsigned short encoding, * Description block for a Comtrol Hostess SV11 card */ +static const struct net_device_ops hostess_ops = { + .ndo_open = hostess_open, + .ndo_stop = hostess_close, + .ndo_change_mtu = hdlc_change_mtu, + .ndo_start_xmit = hdlc_start_xmit, + .ndo_do_ioctl = hostess_ioctl, +}; + static struct z8530_dev *sv11_init(int iobase, int irq) { struct z8530_dev *sv; @@ -267,9 +275,7 @@ static struct z8530_dev *sv11_init(int iobase, int irq) dev_to_hdlc(netdev)->attach = hostess_attach; dev_to_hdlc(netdev)->xmit = hostess_queue_xmit; - netdev->open = hostess_open; - netdev->stop = hostess_close; - netdev->do_ioctl = hostess_ioctl; + netdev->netdev_ops = &hostess_ops; netdev->base_addr = iobase; netdev->irq = irq; diff --git a/drivers/net/wan/ixp4xx_hss.c b/drivers/net/wan/ixp4xx_hss.c index 0dbd85b0162..3bf7d3f447d 100644 --- a/drivers/net/wan/ixp4xx_hss.c +++ b/drivers/net/wan/ixp4xx_hss.c @@ -622,7 +622,7 @@ static void hss_hdlc_rx_irq(void *pdev) printk(KERN_DEBUG "%s: hss_hdlc_rx_irq\n", dev->name); #endif qmgr_disable_irq(queue_ids[port->id].rx); - netif_rx_schedule(&port->napi); + napi_schedule(&port->napi); } static int hss_hdlc_poll(struct napi_struct *napi, int budget) @@ -649,15 +649,15 @@ static int hss_hdlc_poll(struct napi_struct *napi, int budget) if ((n = queue_get_desc(rxq, port, 0)) < 0) { #if DEBUG_RX printk(KERN_DEBUG "%s: hss_hdlc_poll" - " netif_rx_complete\n", dev->name); + " napi_complete\n", dev->name); #endif - netif_rx_complete(napi); + napi_complete(napi); qmgr_enable_irq(rxq); if (!qmgr_stat_empty(rxq) && - netif_rx_reschedule(napi)) { + napi_reschedule(napi)) { #if DEBUG_RX printk(KERN_DEBUG "%s: hss_hdlc_poll" - " netif_rx_reschedule succeeded\n", + " napi_reschedule succeeded\n", dev->name); #endif qmgr_disable_irq(rxq); @@ -1069,7 +1069,7 @@ static int hss_hdlc_open(struct net_device *dev) hss_start_hdlc(port); /* we may already have RX data, enables IRQ */ - netif_rx_schedule(&port->napi); + napi_schedule(&port->napi); return 0; err_unlock: @@ -1230,6 +1230,14 @@ static int hss_hdlc_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) * initialization ****************************************************************************/ +static const struct net_device_ops hss_hdlc_ops = { + .ndo_open = hss_hdlc_open, + .ndo_stop = hss_hdlc_close, + .ndo_change_mtu = hdlc_change_mtu, + .ndo_start_xmit = hdlc_start_xmit, + .ndo_do_ioctl = hss_hdlc_ioctl, +}; + static int __devinit hss_init_one(struct platform_device *pdev) { struct port *port; @@ -1254,9 +1262,7 @@ static int __devinit hss_init_one(struct platform_device *pdev) hdlc = dev_to_hdlc(dev); hdlc->attach = hss_hdlc_attach; hdlc->xmit = hss_hdlc_xmit; - dev->open = hss_hdlc_open; - dev->stop = hss_hdlc_close; - dev->do_ioctl = hss_hdlc_ioctl; + dev->netdev_ops = &hss_hdlc_ops; dev->tx_queue_len = 100; port->clock_type = CLOCK_EXT; port->clock_rate = 2048000; diff --git a/drivers/net/wan/lapbether.c b/drivers/net/wan/lapbether.c index 5b61b3eef45..da9dcf59de2 100644 --- a/drivers/net/wan/lapbether.c +++ b/drivers/net/wan/lapbether.c @@ -422,7 +422,7 @@ static int lapbeth_device_event(struct notifier_block *this, /* ------------------------------------------------------------------------ */ static struct packet_type lapbeth_packet_type = { - .type = __constant_htons(ETH_P_DEC), + .type = cpu_to_be16(ETH_P_DEC), .func = lapbeth_rcv, }; diff --git a/drivers/net/wan/lmc/lmc_main.c b/drivers/net/wan/lmc/lmc_main.c index feac3b99f8f..45b1822c962 100644 --- a/drivers/net/wan/lmc/lmc_main.c +++ b/drivers/net/wan/lmc/lmc_main.c @@ -806,6 +806,16 @@ static int lmc_attach(struct net_device *dev, unsigned short encoding, return -EINVAL; } +static const struct net_device_ops lmc_ops = { + .ndo_open = lmc_open, + .ndo_stop = lmc_close, + .ndo_change_mtu = hdlc_change_mtu, + .ndo_start_xmit = hdlc_start_xmit, + .ndo_do_ioctl = lmc_ioctl, + .ndo_tx_timeout = lmc_driver_timeout, + .ndo_get_stats = lmc_get_stats, +}; + static int __devinit lmc_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) { @@ -849,11 +859,7 @@ static int __devinit lmc_init_one(struct pci_dev *pdev, dev->type = ARPHRD_HDLC; dev_to_hdlc(dev)->xmit = lmc_start_xmit; dev_to_hdlc(dev)->attach = lmc_attach; - dev->open = lmc_open; - dev->stop = lmc_close; - dev->get_stats = lmc_get_stats; - dev->do_ioctl = lmc_ioctl; - dev->tx_timeout = lmc_driver_timeout; + dev->netdev_ops = &lmc_ops; dev->watchdog_timeo = HZ; /* 1 second */ dev->tx_queue_len = 100; sc->lmc_device = dev; @@ -1059,9 +1065,6 @@ static int lmc_open(struct net_device *dev) if ((err = lmc_proto_open(sc)) != 0) return err; - dev->do_ioctl = lmc_ioctl; - - netif_start_queue(dev); sc->extra_stats.tx_tbusy0++; diff --git a/drivers/net/wan/lmc/lmc_proto.c b/drivers/net/wan/lmc/lmc_proto.c index 94b4c208b01..044a48175c4 100644 --- a/drivers/net/wan/lmc/lmc_proto.c +++ b/drivers/net/wan/lmc/lmc_proto.c @@ -51,30 +51,15 @@ void lmc_proto_attach(lmc_softc_t *sc) /*FOLD00*/ { lmc_trace(sc->lmc_device, "lmc_proto_attach in"); - switch(sc->if_type){ - case LMC_PPP: - { - struct net_device *dev = sc->lmc_device; - dev->do_ioctl = lmc_ioctl; - } - break; - case LMC_NET: - { + if (sc->if_type == LMC_NET) { struct net_device *dev = sc->lmc_device; /* * They set a few basics because they don't use HDLC */ dev->flags |= IFF_POINTOPOINT; - dev->hard_header_len = 0; dev->addr_len = 0; } - case LMC_RAW: /* Setup the task queue, maybe we should notify someone? */ - { - } - default: - break; - } lmc_trace(sc->lmc_device, "lmc_proto_attach out"); } diff --git a/drivers/net/wan/n2.c b/drivers/net/wan/n2.c index 697715ae80f..83da596e205 100644 --- a/drivers/net/wan/n2.c +++ b/drivers/net/wan/n2.c @@ -324,7 +324,13 @@ static void n2_destroy_card(card_t *card) kfree(card); } - +static const struct net_device_ops n2_ops = { + .ndo_open = n2_open, + .ndo_stop = n2_close, + .ndo_change_mtu = hdlc_change_mtu, + .ndo_start_xmit = hdlc_start_xmit, + .ndo_do_ioctl = n2_ioctl, +}; static int __init n2_run(unsigned long io, unsigned long irq, unsigned long winbase, long valid0, long valid1) @@ -460,9 +466,7 @@ static int __init n2_run(unsigned long io, unsigned long irq, dev->mem_start = winbase; dev->mem_end = winbase + USE_WINDOWSIZE - 1; dev->tx_queue_len = 50; - dev->do_ioctl = n2_ioctl; - dev->open = n2_open; - dev->stop = n2_close; + dev->netdev_ops = &n2_ops; hdlc->attach = sca_attach; hdlc->xmit = sca_xmit; port->settings.clock_type = CLOCK_EXT; diff --git a/drivers/net/wan/pc300too.c b/drivers/net/wan/pc300too.c index f247e5d9002..60ece54bdd9 100644 --- a/drivers/net/wan/pc300too.c +++ b/drivers/net/wan/pc300too.c @@ -287,7 +287,13 @@ static void pc300_pci_remove_one(struct pci_dev *pdev) kfree(card); } - +static const struct net_device_ops pc300_ops = { + .ndo_open = pc300_open, + .ndo_stop = pc300_close, + .ndo_change_mtu = hdlc_change_mtu, + .ndo_start_xmit = hdlc_start_xmit, + .ndo_do_ioctl = pc300_ioctl, +}; static int __devinit pc300_pci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) @@ -448,9 +454,7 @@ static int __devinit pc300_pci_init_one(struct pci_dev *pdev, dev->mem_start = ramphys; dev->mem_end = ramphys + ramsize - 1; dev->tx_queue_len = 50; - dev->do_ioctl = pc300_ioctl; - dev->open = pc300_open; - dev->stop = pc300_close; + dev->netdev_ops = &pc300_ops; hdlc->attach = sca_attach; hdlc->xmit = sca_xmit; port->settings.clock_type = CLOCK_EXT; diff --git a/drivers/net/wan/pci200syn.c b/drivers/net/wan/pci200syn.c index 1104d3a692f..e035d8c57e1 100644 --- a/drivers/net/wan/pci200syn.c +++ b/drivers/net/wan/pci200syn.c @@ -265,7 +265,13 @@ static void pci200_pci_remove_one(struct pci_dev *pdev) kfree(card); } - +static const struct net_device_ops pci200_ops = { + .ndo_open = pci200_open, + .ndo_stop = pci200_close, + .ndo_change_mtu = hdlc_change_mtu, + .ndo_start_xmit = hdlc_start_xmit, + .ndo_do_ioctl = pci200_ioctl, +}; static int __devinit pci200_pci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) @@ -395,9 +401,7 @@ static int __devinit pci200_pci_init_one(struct pci_dev *pdev, dev->mem_start = ramphys; dev->mem_end = ramphys + ramsize - 1; dev->tx_queue_len = 50; - dev->do_ioctl = pci200_ioctl; - dev->open = pci200_open; - dev->stop = pci200_close; + dev->netdev_ops = &pci200_ops; hdlc->attach = sca_attach; hdlc->xmit = sca_xmit; port->settings.clock_type = CLOCK_EXT; diff --git a/drivers/net/wan/sealevel.c b/drivers/net/wan/sealevel.c index 0941a26f6e3..23b26902745 100644 --- a/drivers/net/wan/sealevel.c +++ b/drivers/net/wan/sealevel.c @@ -169,6 +169,14 @@ static int sealevel_attach(struct net_device *dev, unsigned short encoding, return -EINVAL; } +static const struct net_device_ops sealevel_ops = { + .ndo_open = sealevel_open, + .ndo_stop = sealevel_close, + .ndo_change_mtu = hdlc_change_mtu, + .ndo_start_xmit = hdlc_start_xmit, + .ndo_do_ioctl = sealevel_ioctl, +}; + static int slvl_setup(struct slvl_device *sv, int iobase, int irq) { struct net_device *dev = alloc_hdlcdev(sv); @@ -177,9 +185,7 @@ static int slvl_setup(struct slvl_device *sv, int iobase, int irq) dev_to_hdlc(dev)->attach = sealevel_attach; dev_to_hdlc(dev)->xmit = sealevel_queue_xmit; - dev->open = sealevel_open; - dev->stop = sealevel_close; - dev->do_ioctl = sealevel_ioctl; + dev->netdev_ops = &sealevel_ops; dev->base_addr = iobase; dev->irq = irq; diff --git a/drivers/net/wan/wanxl.c b/drivers/net/wan/wanxl.c index 4bffb67ebca..887acb0dc80 100644 --- a/drivers/net/wan/wanxl.c +++ b/drivers/net/wan/wanxl.c @@ -547,6 +547,15 @@ static void wanxl_pci_remove_one(struct pci_dev *pdev) #include "wanxlfw.inc" +static const struct net_device_ops wanxl_ops = { + .ndo_open = wanxl_open, + .ndo_stop = wanxl_close, + .ndo_change_mtu = hdlc_change_mtu, + .ndo_start_xmit = hdlc_start_xmit, + .ndo_do_ioctl = wanxl_ioctl, + .ndo_get_stats = wanxl_get_stats, +}; + static int __devinit wanxl_pci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) { @@ -777,12 +786,9 @@ static int __devinit wanxl_pci_init_one(struct pci_dev *pdev, hdlc = dev_to_hdlc(dev); spin_lock_init(&port->lock); dev->tx_queue_len = 50; - dev->do_ioctl = wanxl_ioctl; - dev->open = wanxl_open; - dev->stop = wanxl_close; + dev->netdev_ops = &wanxl_ops; hdlc->attach = wanxl_attach; hdlc->xmit = wanxl_xmit; - dev->get_stats = wanxl_get_stats; port->card = card; port->node = i; get_status(port)->clocking = CLOCK_EXT; diff --git a/drivers/net/wimax/i2400m/fw.c b/drivers/net/wimax/i2400m/fw.c index 1d8271f34c3..ecd0cfaefdc 100644 --- a/drivers/net/wimax/i2400m/fw.c +++ b/drivers/net/wimax/i2400m/fw.c @@ -140,10 +140,10 @@ static const __le32 i2400m_ACK_BARKER[4] = { - __constant_cpu_to_le32(I2400M_ACK_BARKER), - __constant_cpu_to_le32(I2400M_ACK_BARKER), - __constant_cpu_to_le32(I2400M_ACK_BARKER), - __constant_cpu_to_le32(I2400M_ACK_BARKER) + cpu_to_le32(I2400M_ACK_BARKER), + cpu_to_le32(I2400M_ACK_BARKER), + cpu_to_le32(I2400M_ACK_BARKER), + cpu_to_le32(I2400M_ACK_BARKER) }; @@ -771,8 +771,8 @@ static int i2400m_dnload_init_nonsigned(struct i2400m *i2400m) { #define POKE(a, d) { \ - .address = __constant_cpu_to_le32(a), \ - .data = __constant_cpu_to_le32(d) \ + .address = cpu_to_le32(a), \ + .data = cpu_to_le32(d) \ } static const struct { __le32 address; diff --git a/drivers/net/wimax/i2400m/i2400m.h b/drivers/net/wimax/i2400m/i2400m.h index 067c871cc22..236f19ea4c8 100644 --- a/drivers/net/wimax/i2400m/i2400m.h +++ b/drivers/net/wimax/i2400m/i2400m.h @@ -664,17 +664,17 @@ extern struct i2400m_msg_hdr *i2400m_tx_msg_get(struct i2400m *, size_t *); extern void i2400m_tx_msg_sent(struct i2400m *); static const __le32 i2400m_NBOOT_BARKER[4] = { - __constant_cpu_to_le32(I2400M_NBOOT_BARKER), - __constant_cpu_to_le32(I2400M_NBOOT_BARKER), - __constant_cpu_to_le32(I2400M_NBOOT_BARKER), - __constant_cpu_to_le32(I2400M_NBOOT_BARKER) + cpu_to_le32(I2400M_NBOOT_BARKER), + cpu_to_le32(I2400M_NBOOT_BARKER), + cpu_to_le32(I2400M_NBOOT_BARKER), + cpu_to_le32(I2400M_NBOOT_BARKER) }; static const __le32 i2400m_SBOOT_BARKER[4] = { - __constant_cpu_to_le32(I2400M_SBOOT_BARKER), - __constant_cpu_to_le32(I2400M_SBOOT_BARKER), - __constant_cpu_to_le32(I2400M_SBOOT_BARKER), - __constant_cpu_to_le32(I2400M_SBOOT_BARKER) + cpu_to_le32(I2400M_SBOOT_BARKER), + cpu_to_le32(I2400M_SBOOT_BARKER), + cpu_to_le32(I2400M_SBOOT_BARKER), + cpu_to_le32(I2400M_SBOOT_BARKER) }; diff --git a/drivers/net/wimax/i2400m/netdev.c b/drivers/net/wimax/i2400m/netdev.c index 63fe708e8a3..be8be4d0709 100644 --- a/drivers/net/wimax/i2400m/netdev.c +++ b/drivers/net/wimax/i2400m/netdev.c @@ -419,7 +419,7 @@ void i2400m_rx_fake_eth_header(struct net_device *net_dev, memcpy(eth_hdr->h_dest, net_dev->dev_addr, sizeof(eth_hdr->h_dest)); memset(eth_hdr->h_source, 0, sizeof(eth_hdr->h_dest)); - eth_hdr->h_proto = __constant_cpu_to_be16(ETH_P_IP); + eth_hdr->h_proto = cpu_to_be16(ETH_P_IP); } @@ -493,6 +493,14 @@ error_skb_realloc: i2400m, buf, buf_len); } +static const struct net_device_ops i2400m_netdev_ops = { + .ndo_open = i2400m_open, + .ndo_stop = i2400m_stop, + .ndo_start_xmit = i2400m_hard_start_xmit, + .ndo_tx_timeout = i2400m_tx_timeout, + .ndo_change_mtu = i2400m_change_mtu, +}; + /** * i2400m_netdev_setup - Setup setup @net_dev's i2400m private data @@ -513,11 +521,7 @@ void i2400m_netdev_setup(struct net_device *net_dev) & (~IFF_BROADCAST /* i2400m is P2P */ & ~IFF_MULTICAST); net_dev->watchdog_timeo = I2400M_TX_TIMEOUT; - net_dev->open = i2400m_open; - net_dev->stop = i2400m_stop; - net_dev->hard_start_xmit = i2400m_hard_start_xmit; - net_dev->change_mtu = i2400m_change_mtu; - net_dev->tx_timeout = i2400m_tx_timeout; + net_dev->netdev_ops = &i2400m_netdev_ops; d_fnend(3, NULL, "(net_dev %p) = void\n", net_dev); } EXPORT_SYMBOL_GPL(i2400m_netdev_setup); diff --git a/drivers/net/wimax/i2400m/sdio.c b/drivers/net/wimax/i2400m/sdio.c index 1bfa283bbd8..123a5f8db6a 100644 --- a/drivers/net/wimax/i2400m/sdio.c +++ b/drivers/net/wimax/i2400m/sdio.c @@ -255,16 +255,16 @@ int i2400ms_bus_reset(struct i2400m *i2400m, enum i2400m_reset_type rt) container_of(i2400m, struct i2400ms, i2400m); struct device *dev = i2400m_dev(i2400m); static const __le32 i2400m_WARM_BOOT_BARKER[4] = { - __constant_cpu_to_le32(I2400M_WARM_RESET_BARKER), - __constant_cpu_to_le32(I2400M_WARM_RESET_BARKER), - __constant_cpu_to_le32(I2400M_WARM_RESET_BARKER), - __constant_cpu_to_le32(I2400M_WARM_RESET_BARKER), + cpu_to_le32(I2400M_WARM_RESET_BARKER), + cpu_to_le32(I2400M_WARM_RESET_BARKER), + cpu_to_le32(I2400M_WARM_RESET_BARKER), + cpu_to_le32(I2400M_WARM_RESET_BARKER), }; static const __le32 i2400m_COLD_BOOT_BARKER[4] = { - __constant_cpu_to_le32(I2400M_COLD_RESET_BARKER), - __constant_cpu_to_le32(I2400M_COLD_RESET_BARKER), - __constant_cpu_to_le32(I2400M_COLD_RESET_BARKER), - __constant_cpu_to_le32(I2400M_COLD_RESET_BARKER), + cpu_to_le32(I2400M_COLD_RESET_BARKER), + cpu_to_le32(I2400M_COLD_RESET_BARKER), + cpu_to_le32(I2400M_COLD_RESET_BARKER), + cpu_to_le32(I2400M_COLD_RESET_BARKER), }; if (rt == I2400M_RT_WARM) diff --git a/drivers/net/wimax/i2400m/usb.c b/drivers/net/wimax/i2400m/usb.c index c6d93465c7e..7c28610da6f 100644 --- a/drivers/net/wimax/i2400m/usb.c +++ b/drivers/net/wimax/i2400m/usb.c @@ -211,16 +211,16 @@ int i2400mu_bus_reset(struct i2400m *i2400m, enum i2400m_reset_type rt) container_of(i2400m, struct i2400mu, i2400m); struct device *dev = i2400m_dev(i2400m); static const __le32 i2400m_WARM_BOOT_BARKER[4] = { - __constant_cpu_to_le32(I2400M_WARM_RESET_BARKER), - __constant_cpu_to_le32(I2400M_WARM_RESET_BARKER), - __constant_cpu_to_le32(I2400M_WARM_RESET_BARKER), - __constant_cpu_to_le32(I2400M_WARM_RESET_BARKER), + cpu_to_le32(I2400M_WARM_RESET_BARKER), + cpu_to_le32(I2400M_WARM_RESET_BARKER), + cpu_to_le32(I2400M_WARM_RESET_BARKER), + cpu_to_le32(I2400M_WARM_RESET_BARKER), }; static const __le32 i2400m_COLD_BOOT_BARKER[4] = { - __constant_cpu_to_le32(I2400M_COLD_RESET_BARKER), - __constant_cpu_to_le32(I2400M_COLD_RESET_BARKER), - __constant_cpu_to_le32(I2400M_COLD_RESET_BARKER), - __constant_cpu_to_le32(I2400M_COLD_RESET_BARKER), + cpu_to_le32(I2400M_COLD_RESET_BARKER), + cpu_to_le32(I2400M_COLD_RESET_BARKER), + cpu_to_le32(I2400M_COLD_RESET_BARKER), + cpu_to_le32(I2400M_COLD_RESET_BARKER), }; d_fnstart(3, dev, "(i2400m %p rt %u)\n", i2400m, rt); diff --git a/drivers/net/wireless/ath5k/debug.c b/drivers/net/wireless/ath5k/debug.c index ccaeb5c219d..d281b6e3862 100644 --- a/drivers/net/wireless/ath5k/debug.c +++ b/drivers/net/wireless/ath5k/debug.c @@ -165,7 +165,7 @@ static int reg_show(struct seq_file *seq, void *p) return 0; } -static struct seq_operations register_seq_ops = { +static const struct seq_operations register_seq_ops = { .start = reg_start, .next = reg_next, .stop = reg_stop, diff --git a/drivers/net/wireless/libertas/debugfs.c b/drivers/net/wireless/libertas/debugfs.c index ec4efd7ff3c..50e28a0cdfe 100644 --- a/drivers/net/wireless/libertas/debugfs.c +++ b/drivers/net/wireless/libertas/debugfs.c @@ -629,7 +629,7 @@ static ssize_t lbs_rdrf_write(struct file *file, res = -EFAULT; goto out_unlock; } - priv->rf_offset = simple_strtoul((char *)buf, NULL, 16); + priv->rf_offset = simple_strtoul(buf, NULL, 16); res = count; out_unlock: free_page(addr); @@ -680,12 +680,12 @@ out_unlock: } struct lbs_debugfs_files { - char *name; + const char *name; int perm; struct file_operations fops; }; -static struct lbs_debugfs_files debugfs_files[] = { +static const struct lbs_debugfs_files debugfs_files[] = { { "info", 0444, FOPS(lbs_dev_info, write_file_dummy), }, { "getscantable", 0444, FOPS(lbs_getscantable, write_file_dummy), }, @@ -693,7 +693,7 @@ static struct lbs_debugfs_files debugfs_files[] = { lbs_sleepparams_write), }, }; -static struct lbs_debugfs_files debugfs_events_files[] = { +static const struct lbs_debugfs_files debugfs_events_files[] = { {"low_rssi", 0644, FOPS(lbs_lowrssi_read, lbs_lowrssi_write), }, {"low_snr", 0644, FOPS(lbs_lowsnr_read, @@ -708,7 +708,7 @@ static struct lbs_debugfs_files debugfs_events_files[] = { lbs_highsnr_write), }, }; -static struct lbs_debugfs_files debugfs_regs_files[] = { +static const struct lbs_debugfs_files debugfs_regs_files[] = { {"rdmac", 0644, FOPS(lbs_rdmac_read, lbs_rdmac_write), }, {"wrmac", 0600, FOPS(NULL, lbs_wrmac_write), }, {"rdbbp", 0644, FOPS(lbs_rdbbp_read, lbs_rdbbp_write), }, @@ -735,7 +735,7 @@ void lbs_debugfs_remove(void) void lbs_debugfs_init_one(struct lbs_private *priv, struct net_device *dev) { int i; - struct lbs_debugfs_files *files; + const struct lbs_debugfs_files *files; if (!lbs_dir) goto exit; @@ -938,7 +938,7 @@ static ssize_t lbs_debugfs_write(struct file *f, const char __user *buf, return (ssize_t)cnt; } -static struct file_operations lbs_debug_fops = { +static const struct file_operations lbs_debug_fops = { .owner = THIS_MODULE, .open = open_file_generic, .write = lbs_debugfs_write, diff --git a/drivers/net/wireless/strip.c b/drivers/net/wireless/strip.c index 7015f248055..d6bf8d2ef8e 100644 --- a/drivers/net/wireless/strip.c +++ b/drivers/net/wireless/strip.c @@ -1125,7 +1125,7 @@ static int strip_seq_show(struct seq_file *seq, void *v) } -static struct seq_operations strip_seq_ops = { +static const struct seq_operations strip_seq_ops = { .start = strip_seq_start, .next = strip_seq_next, .stop = strip_seq_stop, diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c index cd6184ee08e..9f102a6535c 100644 --- a/drivers/net/xen-netfront.c +++ b/drivers/net/xen-netfront.c @@ -196,7 +196,7 @@ static void rx_refill_timeout(unsigned long data) { struct net_device *dev = (struct net_device *)data; struct netfront_info *np = netdev_priv(dev); - netif_rx_schedule(&np->napi); + napi_schedule(&np->napi); } static int netfront_tx_slot_available(struct netfront_info *np) @@ -328,7 +328,7 @@ static int xennet_open(struct net_device *dev) xennet_alloc_rx_buffers(dev); np->rx.sring->rsp_event = np->rx.rsp_cons + 1; if (RING_HAS_UNCONSUMED_RESPONSES(&np->rx)) - netif_rx_schedule(&np->napi); + napi_schedule(&np->napi); } spin_unlock_bh(&np->rx_lock); @@ -979,7 +979,7 @@ err: RING_FINAL_CHECK_FOR_RESPONSES(&np->rx, more_to_do); if (!more_to_do) - __netif_rx_complete(napi); + __napi_complete(napi); local_irq_restore(flags); } @@ -1317,7 +1317,7 @@ static irqreturn_t xennet_interrupt(int irq, void *dev_id) xennet_tx_buf_gc(dev); /* Under tx_lock: protects access to rx shared-ring indexes. */ if (RING_HAS_UNCONSUMED_RESPONSES(&np->rx)) - netif_rx_schedule(&np->napi); + napi_schedule(&np->napi); } spin_unlock_irqrestore(&np->tx_lock, flags); diff --git a/drivers/net/xtsonic.c b/drivers/net/xtsonic.c index 03a3f34e903..a12a7211c98 100644 --- a/drivers/net/xtsonic.c +++ b/drivers/net/xtsonic.c @@ -183,7 +183,7 @@ static int __init sonic_probe1(struct net_device *dev) if (lp->descriptors == NULL) { printk(KERN_ERR "%s: couldn't alloc DMA memory for " - " descriptors.\n", lp->device->bus_id); + " descriptors.\n", dev_name(lp->device)); goto out; } diff --git a/drivers/net/znet.c b/drivers/net/znet.c index f0b15c9347d..0a6992d8611 100644 --- a/drivers/net/znet.c +++ b/drivers/net/znet.c @@ -358,6 +358,17 @@ static void znet_set_multicast_list (struct net_device *dev) * multicast address configured isn't equal to IFF_ALLMULTI */ } +static const struct net_device_ops znet_netdev_ops = { + .ndo_open = znet_open, + .ndo_stop = znet_close, + .ndo_start_xmit = znet_send_packet, + .ndo_set_multicast_list = znet_set_multicast_list, + .ndo_tx_timeout = znet_tx_timeout, + .ndo_change_mtu = eth_change_mtu, + .ndo_set_mac_address = eth_mac_addr, + .ndo_validate_addr = eth_validate_addr, +}; + /* The Z-Note probe is pretty easy. The NETIDBLK exists in the safe-to-probe BIOS area. We just scan for the signature, and pull the vital parameters out of the structure. */ @@ -440,11 +451,7 @@ static int __init znet_probe (void) znet->tx_end = znet->tx_start + znet->tx_buf_len; /* The ZNET-specific entries in the device structure. */ - dev->open = &znet_open; - dev->hard_start_xmit = &znet_send_packet; - dev->stop = &znet_close; - dev->set_multicast_list = &znet_set_multicast_list; - dev->tx_timeout = znet_tx_timeout; + dev->netdev_ops = &znet_netdev_ops; dev->watchdog_timeo = TX_TIMEOUT; err = register_netdev(dev); if (err) diff --git a/drivers/s390/net/claw.c b/drivers/s390/net/claw.c index f5e618562c5..6669adf355b 100644 --- a/drivers/s390/net/claw.c +++ b/drivers/s390/net/claw.c @@ -60,6 +60,9 @@ * 1.25 Added Packing support * 1.5 */ + +#define KMSG_COMPONENT "claw" + #include <asm/ccwdev.h> #include <asm/ccwgroup.h> #include <asm/debug.h> @@ -94,7 +97,7 @@ CLAW uses the s390dbf file system see claw_trace and claw_setup */ - +static char version[] __initdata = "CLAW driver"; static char debug_buffer[255]; /** * Debug Facility Stuff @@ -206,20 +209,30 @@ static struct net_device_stats *claw_stats(struct net_device *dev); static int pages_to_order_of_mag(int num_of_pages); static struct sk_buff *claw_pack_skb(struct claw_privbk *privptr); /* sysfs Functions */ -static ssize_t claw_hname_show(struct device *dev, struct device_attribute *attr, char *buf); -static ssize_t claw_hname_write(struct device *dev, struct device_attribute *attr, +static ssize_t claw_hname_show(struct device *dev, + struct device_attribute *attr, char *buf); +static ssize_t claw_hname_write(struct device *dev, + struct device_attribute *attr, const char *buf, size_t count); -static ssize_t claw_adname_show(struct device *dev, struct device_attribute *attr, char *buf); -static ssize_t claw_adname_write(struct device *dev, struct device_attribute *attr, +static ssize_t claw_adname_show(struct device *dev, + struct device_attribute *attr, char *buf); +static ssize_t claw_adname_write(struct device *dev, + struct device_attribute *attr, const char *buf, size_t count); -static ssize_t claw_apname_show(struct device *dev, struct device_attribute *attr, char *buf); -static ssize_t claw_apname_write(struct device *dev, struct device_attribute *attr, +static ssize_t claw_apname_show(struct device *dev, + struct device_attribute *attr, char *buf); +static ssize_t claw_apname_write(struct device *dev, + struct device_attribute *attr, const char *buf, size_t count); -static ssize_t claw_wbuff_show(struct device *dev, struct device_attribute *attr, char *buf); -static ssize_t claw_wbuff_write(struct device *dev, struct device_attribute *attr, +static ssize_t claw_wbuff_show(struct device *dev, + struct device_attribute *attr, char *buf); +static ssize_t claw_wbuff_write(struct device *dev, + struct device_attribute *attr, const char *buf, size_t count); -static ssize_t claw_rbuff_show(struct device *dev, struct device_attribute *attr, char *buf); -static ssize_t claw_rbuff_write(struct device *dev, struct device_attribute *attr, +static ssize_t claw_rbuff_show(struct device *dev, + struct device_attribute *attr, char *buf); +static ssize_t claw_rbuff_write(struct device *dev, + struct device_attribute *attr, const char *buf, size_t count); static int claw_add_files(struct device *dev); static void claw_remove_files(struct device *dev); @@ -298,8 +311,8 @@ claw_probe(struct ccwgroup_device *cgdev) if (rc) { probe_error(cgdev); put_device(&cgdev->dev); - printk(KERN_WARNING "add_files failed %s %s Exit Line %d \n", - dev_name(&cgdev->cdev[0]->dev), __func__, __LINE__); + dev_err(&cgdev->dev, "Creating the /proc files for a new" + " CLAW device failed\n"); CLAW_DBF_TEXT_(2, setup, "probex%d", rc); return rc; } @@ -496,7 +509,8 @@ claw_open(struct net_device *dev) ~(DEV_STAT_CHN_END | DEV_STAT_DEV_END)) != 0x00) || (((privptr->channel[READ].flag | privptr->channel[WRITE].flag) & CLAW_TIMER) != 0x00)) { - printk(KERN_INFO "%s: remote side is not ready\n", dev->name); + dev_info(&privptr->channel[READ].cdev->dev, + "%s: remote side is not ready\n", dev->name); CLAW_DBF_TEXT(2, trace, "notrdy"); for ( i = 0; i < 2; i++) { @@ -582,10 +596,9 @@ claw_irq_handler(struct ccw_device *cdev, CLAW_DBF_TEXT(4, trace, "clawirq"); /* Bypass all 'unsolicited interrupts' */ if (!cdev->dev.driver_data) { - printk(KERN_WARNING "claw: unsolicited interrupt for device:" - "%s received c-%02x d-%02x\n", - dev_name(&cdev->dev), irb->scsw.cmd.cstat, - irb->scsw.cmd.dstat); + dev_warn(&cdev->dev, "An uninitialized CLAW device received an" + " IRQ, c-%02x d-%02x\n", + irb->scsw.cmd.cstat, irb->scsw.cmd.dstat); CLAW_DBF_TEXT(2, trace, "badirq"); return; } @@ -597,8 +610,7 @@ claw_irq_handler(struct ccw_device *cdev, else if (privptr->channel[WRITE].cdev == cdev) p_ch = &privptr->channel[WRITE]; else { - printk(KERN_WARNING "claw: Can't determine channel for " - "interrupt, device %s\n", dev_name(&cdev->dev)); + dev_warn(&cdev->dev, "The device is not a CLAW device\n"); CLAW_DBF_TEXT(2, trace, "badchan"); return; } @@ -612,7 +624,8 @@ claw_irq_handler(struct ccw_device *cdev, /* Check for good subchannel return code, otherwise info message */ if (irb->scsw.cmd.cstat && !(irb->scsw.cmd.cstat & SCHN_STAT_PCI)) { - printk(KERN_INFO "%s: subchannel check for device: %04x -" + dev_info(&cdev->dev, + "%s: subchannel check for device: %04x -" " Sch Stat %02x Dev Stat %02x CPA - %04x\n", dev->name, p_ch->devno, irb->scsw.cmd.cstat, irb->scsw.cmd.dstat, @@ -651,7 +664,7 @@ claw_irq_handler(struct ccw_device *cdev, wake_up(&p_ch->wait); /* wake claw_open (READ)*/ } else if (p_ch->flag == CLAW_WRITE) { p_ch->claw_state = CLAW_START_WRITE; - /* send SYSTEM_VALIDATE */ + /* send SYSTEM_VALIDATE */ claw_strt_read(dev, LOCK_NO); claw_send_control(dev, SYSTEM_VALIDATE_REQUEST, @@ -659,10 +672,9 @@ claw_irq_handler(struct ccw_device *cdev, p_env->host_name, p_env->adapter_name); } else { - printk(KERN_WARNING "claw: unsolicited " - "interrupt for device:" - "%s received c-%02x d-%02x\n", - dev_name(&cdev->dev), + dev_warn(&cdev->dev, "The CLAW device received" + " an unexpected IRQ, " + "c-%02x d-%02x\n", irb->scsw.cmd.cstat, irb->scsw.cmd.dstat); return; @@ -677,8 +689,8 @@ claw_irq_handler(struct ccw_device *cdev, (p_ch->irb->ecw[0] & 0x40) == 0x40 || (p_ch->irb->ecw[0]) == 0) { privptr->stats.rx_errors++; - printk(KERN_INFO "%s: Restart is " - "required after remote " + dev_info(&cdev->dev, + "%s: Restart is required after remote " "side recovers \n", dev->name); } @@ -713,11 +725,13 @@ claw_irq_handler(struct ccw_device *cdev, return; case CLAW_START_WRITE: if (p_ch->irb->scsw.cmd.dstat & DEV_STAT_UNIT_CHECK) { - printk(KERN_INFO "%s: Unit Check Occured in " + dev_info(&cdev->dev, + "%s: Unit Check Occured in " "write channel\n", dev->name); clear_bit(0, (void *)&p_ch->IO_active); if (p_ch->irb->ecw[0] & 0x80) { - printk(KERN_INFO "%s: Resetting Event " + dev_info(&cdev->dev, + "%s: Resetting Event " "occurred:\n", dev->name); init_timer(&p_ch->timer); p_ch->timer.function = @@ -725,7 +739,8 @@ claw_irq_handler(struct ccw_device *cdev, p_ch->timer.data = (unsigned long)p_ch; p_ch->timer.expires = jiffies + 10*HZ; add_timer(&p_ch->timer); - printk(KERN_INFO "%s: write connection " + dev_info(&cdev->dev, + "%s: write connection " "restarting\n", dev->name); } CLAW_DBF_TEXT(4, trace, "rstrtwrt"); @@ -733,9 +748,10 @@ claw_irq_handler(struct ccw_device *cdev, } if (p_ch->irb->scsw.cmd.dstat & DEV_STAT_UNIT_EXCEP) { clear_bit(0, (void *)&p_ch->IO_active); - printk(KERN_INFO "%s: Unit Exception " - "Occured in write channel\n", - dev->name); + dev_info(&cdev->dev, + "%s: Unit Exception " + "occurred in write channel\n", + dev->name); } if (!((p_ch->irb->scsw.cmd.stctl & SCSW_STCTL_SEC_STATUS) || (p_ch->irb->scsw.cmd.stctl == SCSW_STCTL_STATUS_PEND) || @@ -757,8 +773,9 @@ claw_irq_handler(struct ccw_device *cdev, CLAW_DBF_TEXT(4, trace, "StWtExit"); return; default: - printk(KERN_WARNING "%s: wrong selection code - irq " - "state=%d\n", dev->name, p_ch->claw_state); + dev_warn(&cdev->dev, + "The CLAW device for %s received an unexpected IRQ\n", + dev->name); CLAW_DBF_TEXT(2, trace, "badIRQ"); return; } @@ -910,8 +927,10 @@ claw_release(struct net_device *dev) if (((privptr->channel[READ].last_dstat | privptr->channel[WRITE].last_dstat) & ~(DEV_STAT_CHN_END | DEV_STAT_DEV_END)) != 0x00) { - printk(KERN_WARNING "%s: channel problems during close - " - "read: %02x - write: %02x\n", + dev_warn(&privptr->channel[READ].cdev->dev, + "Deactivating %s completed with incorrect" + " subchannel status " + "(read %02x, write %02x)\n", dev->name, privptr->channel[READ].last_dstat, privptr->channel[WRITE].last_dstat); @@ -1076,8 +1095,8 @@ add_claw_reads(struct net_device *dev, struct ccwbk* p_first, } if ( privptr-> p_read_active_first ==NULL ) { - privptr-> p_read_active_first= p_first; /* set new first */ - privptr-> p_read_active_last = p_last; /* set new last */ + privptr->p_read_active_first = p_first; /* set new first */ + privptr->p_read_active_last = p_last; /* set new last */ } else { @@ -1113,7 +1132,7 @@ add_claw_reads(struct net_device *dev, struct ccwbk* p_first, privptr->p_read_active_last->r_TIC_2.cda= (__u32)__pa(&p_first->read); } - /* chain in new set of blocks */ + /* chain in new set of blocks */ privptr->p_read_active_last->next = p_first; privptr->p_read_active_last=p_last; } /* end of if ( privptr-> p_read_active_first ==NULL) */ @@ -1135,21 +1154,18 @@ ccw_check_return_code(struct ccw_device *cdev, int return_code) case -EBUSY: /* BUSY is a transient state no action needed */ break; case -ENODEV: - printk(KERN_EMERG "%s: Missing device called " - "for IO ENODEV\n", dev_name(&cdev->dev)); - break; - case -EIO: - printk(KERN_EMERG "%s: Status pending... EIO \n", - dev_name(&cdev->dev)); + dev_err(&cdev->dev, "The remote channel adapter is not" + " available\n"); break; case -EINVAL: - printk(KERN_EMERG "%s: Invalid Dev State EINVAL \n", - dev_name(&cdev->dev)); + dev_err(&cdev->dev, + "The status of the remote channel adapter" + " is not valid\n"); break; default: - printk(KERN_EMERG "%s: Unknown error in " - "Do_IO %d\n", dev_name(&cdev->dev), - return_code); + dev_err(&cdev->dev, "The common device layer" + " returned error code %d\n", + return_code); } } CLAW_DBF_TEXT(4, trace, "ccwret"); @@ -1163,40 +1179,41 @@ static void ccw_check_unit_check(struct chbk * p_ch, unsigned char sense ) { struct net_device *ndev = p_ch->ndev; + struct device *dev = &p_ch->cdev->dev; CLAW_DBF_TEXT(4, trace, "unitchek"); - printk(KERN_INFO "%s: Unit Check with sense byte:0x%04x\n", - ndev->name, sense); + dev_warn(dev, "The communication peer of %s disconnected\n", + ndev->name); if (sense & 0x40) { if (sense & 0x01) { - printk(KERN_WARNING "%s: Interface disconnect or " - "Selective reset " - "occurred (remote side)\n", ndev->name); - } - else { - printk(KERN_WARNING "%s: System reset occured" - " (remote side)\n", ndev->name); + dev_warn(dev, "The remote channel adapter for" + " %s has been reset\n", + ndev->name); } } else if (sense & 0x20) { if (sense & 0x04) { - printk(KERN_WARNING "%s: Data-streaming " - "timeout)\n", ndev->name); + dev_warn(dev, "A data streaming timeout occurred" + " for %s\n", + ndev->name); } else { - printk(KERN_WARNING "%s: Data-transfer parity" - " error\n", ndev->name); + dev_warn(dev, "A data transfer parity error occurred" + " for %s\n", + ndev->name); } } else if (sense & 0x10) { if (sense & 0x20) { - printk(KERN_WARNING "%s: Hardware malfunction " - "(remote side)\n", ndev->name); + dev_warn(dev, "The remote channel adapter for %s" + " is faulty\n", + ndev->name); } else { - printk(KERN_WARNING "%s: read-data parity error " - "(remote side)\n", ndev->name); + dev_warn(dev, "A read data parity error occurred" + " for %s\n", + ndev->name); } } @@ -1375,7 +1392,7 @@ claw_hw_tx(struct sk_buff *skb, struct net_device *dev, long linkid) */ if (p_first_ccw!=NULL) { - /* setup ending ccw sequence for this segment */ + /* setup ending ccw sequence for this segment */ pEnd=privptr->p_end_ccw; if (pEnd->write1) { pEnd->write1=0x00; /* second end ccw is now active */ @@ -1697,10 +1714,11 @@ init_ccw_bk(struct net_device *dev) p_buf-> w_TIC_1.flags = 0; p_buf-> w_TIC_1.count = 0; - if (((unsigned long)p_buff+privptr->p_env->write_size) >= + if (((unsigned long)p_buff + + privptr->p_env->write_size) >= ((unsigned long)(p_buff+2* - (privptr->p_env->write_size) -1) & PAGE_MASK)) { - p_buff= p_buff+privptr->p_env->write_size; + (privptr->p_env->write_size) - 1) & PAGE_MASK)) { + p_buff = p_buff+privptr->p_env->write_size; } } } @@ -1840,15 +1858,16 @@ init_ccw_bk(struct net_device *dev) p_buf->header.opcode=0xff; p_buf->header.flag=CLAW_PENDING; - if (((unsigned long)p_buff+privptr->p_env->read_size) >= - ((unsigned long)(p_buff+2*(privptr->p_env->read_size) -1) - & PAGE_MASK) ) { + if (((unsigned long)p_buff+privptr->p_env->read_size) >= + ((unsigned long)(p_buff+2*(privptr->p_env->read_size) + -1) + & PAGE_MASK)) { p_buff= p_buff+privptr->p_env->read_size; } else { p_buff= (void *)((unsigned long) - (p_buff+2*(privptr->p_env->read_size) -1) + (p_buff+2*(privptr->p_env->read_size)-1) & PAGE_MASK) ; } } /* for read_buffers */ @@ -1856,24 +1875,28 @@ init_ccw_bk(struct net_device *dev) else { /* read Size >= PAGE_SIZE */ for (i=0 ; i< privptr->p_env->read_buffers ; i++) { p_buff = (void *)__get_free_pages(__GFP_DMA, - (int)pages_to_order_of_mag(privptr->p_buff_pages_perread) ); + (int)pages_to_order_of_mag( + privptr->p_buff_pages_perread)); if (p_buff==NULL) { free_pages((unsigned long)privptr->p_buff_ccw, - (int)pages_to_order_of_mag(privptr->p_buff_ccw_num)); + (int)pages_to_order_of_mag(privptr-> + p_buff_ccw_num)); /* free the write pages */ p_buf=privptr->p_buff_write; while (p_buf!=NULL) { - free_pages((unsigned long)p_buf->p_buffer, - (int)pages_to_order_of_mag( - privptr->p_buff_pages_perwrite )); + free_pages( + (unsigned long)p_buf->p_buffer, + (int)pages_to_order_of_mag( + privptr->p_buff_pages_perwrite)); p_buf=p_buf->next; } /* free any read pages already alloc */ p_buf=privptr->p_buff_read; while (p_buf!=NULL) { - free_pages((unsigned long)p_buf->p_buffer, - (int)pages_to_order_of_mag( - privptr->p_buff_pages_perread )); + free_pages( + (unsigned long)p_buf->p_buffer, + (int)pages_to_order_of_mag( + privptr->p_buff_pages_perread)); p_buf=p_buf->next; } privptr->p_buff_ccw=NULL; @@ -2003,7 +2026,7 @@ claw_process_control( struct net_device *dev, struct ccwbk * p_ccw) tdev = &privptr->channel[READ].cdev->dev; memcpy( &temp_host_name, p_env->host_name, 8); memcpy( &temp_ws_name, p_env->adapter_name , 8); - printk(KERN_INFO "%s: CLAW device %.8s: " + dev_info(tdev, "%s: CLAW device %.8s: " "Received Control Packet\n", dev->name, temp_ws_name); if (privptr->release_pend==1) { @@ -2022,32 +2045,30 @@ claw_process_control( struct net_device *dev, struct ccwbk * p_ccw) if (p_ctlbk->version != CLAW_VERSION_ID) { claw_snd_sys_validate_rsp(dev, p_ctlbk, CLAW_RC_WRONG_VERSION); - printk("%s: %d is wrong version id. " - "Expected %d\n", - dev->name, p_ctlbk->version, - CLAW_VERSION_ID); + dev_warn(tdev, "The communication peer of %s" + " uses an incorrect API version %d\n", + dev->name, p_ctlbk->version); } p_sysval = (struct sysval *)&(p_ctlbk->data); - printk("%s: Recv Sys Validate Request: " - "Vers=%d,link_id=%d,Corr=%d,WS name=%." - "8s,Host name=%.8s\n", - dev->name, p_ctlbk->version, - p_ctlbk->linkid, - p_ctlbk->correlator, - p_sysval->WS_name, - p_sysval->host_name); + dev_info(tdev, "%s: Recv Sys Validate Request: " + "Vers=%d,link_id=%d,Corr=%d,WS name=%.8s," + "Host name=%.8s\n", + dev->name, p_ctlbk->version, + p_ctlbk->linkid, + p_ctlbk->correlator, + p_sysval->WS_name, + p_sysval->host_name); if (memcmp(temp_host_name, p_sysval->host_name, 8)) { claw_snd_sys_validate_rsp(dev, p_ctlbk, CLAW_RC_NAME_MISMATCH); CLAW_DBF_TEXT(2, setup, "HSTBAD"); CLAW_DBF_TEXT_(2, setup, "%s", p_sysval->host_name); CLAW_DBF_TEXT_(2, setup, "%s", temp_host_name); - printk(KERN_INFO "%s: Host name mismatch\n", - dev->name); - printk(KERN_INFO "%s: Received :%s: " - "expected :%s: \n", - dev->name, + dev_warn(tdev, + "Host name %s for %s does not match the" + " remote adapter name %s\n", p_sysval->host_name, + dev->name, temp_host_name); } if (memcmp(temp_ws_name, p_sysval->WS_name, 8)) { @@ -2056,35 +2077,38 @@ claw_process_control( struct net_device *dev, struct ccwbk * p_ccw) CLAW_DBF_TEXT(2, setup, "WSNBAD"); CLAW_DBF_TEXT_(2, setup, "%s", p_sysval->WS_name); CLAW_DBF_TEXT_(2, setup, "%s", temp_ws_name); - printk(KERN_INFO "%s: WS name mismatch\n", - dev->name); - printk(KERN_INFO "%s: Received :%s: " - "expected :%s: \n", - dev->name, - p_sysval->WS_name, - temp_ws_name); + dev_warn(tdev, "Adapter name %s for %s does not match" + " the remote host name %s\n", + p_sysval->WS_name, + dev->name, + temp_ws_name); } if ((p_sysval->write_frame_size < p_env->write_size) && (p_env->packing == 0)) { claw_snd_sys_validate_rsp(dev, p_ctlbk, CLAW_RC_HOST_RCV_TOO_SMALL); - printk(KERN_INFO "%s: host write size is too " - "small\n", dev->name); + dev_warn(tdev, + "The local write buffer is smaller than the" + " remote read buffer\n"); CLAW_DBF_TEXT(2, setup, "wrtszbad"); } if ((p_sysval->read_frame_size < p_env->read_size) && (p_env->packing == 0)) { claw_snd_sys_validate_rsp(dev, p_ctlbk, CLAW_RC_HOST_RCV_TOO_SMALL); - printk(KERN_INFO "%s: host read size is too " - "small\n", dev->name); + dev_warn(tdev, + "The local read buffer is smaller than the" + " remote write buffer\n"); CLAW_DBF_TEXT(2, setup, "rdsizbad"); } claw_snd_sys_validate_rsp(dev, p_ctlbk, 0); - printk(KERN_INFO "%s: CLAW device %.8s: System validate " - "completed.\n", dev->name, temp_ws_name); - printk("%s: sys Validate Rsize:%d Wsize:%d\n", dev->name, - p_sysval->read_frame_size, p_sysval->write_frame_size); + dev_info(tdev, + "CLAW device %.8s: System validate" + " completed.\n", temp_ws_name); + dev_info(tdev, + "%s: sys Validate Rsize:%d Wsize:%d\n", + dev->name, p_sysval->read_frame_size, + p_sysval->write_frame_size); privptr->system_validate_comp = 1; if (strncmp(p_env->api_type, WS_APPL_NAME_PACKED, 6) == 0) p_env->packing = PACKING_ASK; @@ -2092,8 +2116,10 @@ claw_process_control( struct net_device *dev, struct ccwbk * p_ccw) break; case SYSTEM_VALIDATE_RESPONSE: p_sysval = (struct sysval *)&(p_ctlbk->data); - printk("%s: Recv Sys Validate Resp: Vers=%d,Corr=%d,RC=%d," - "WS name=%.8s,Host name=%.8s\n", + dev_info(tdev, + "Settings for %s validated (version=%d, " + "remote device=%d, rc=%d, adapter name=%.8s, " + "host name=%.8s)\n", dev->name, p_ctlbk->version, p_ctlbk->correlator, @@ -2102,41 +2128,39 @@ claw_process_control( struct net_device *dev, struct ccwbk * p_ccw) p_sysval->host_name); switch (p_ctlbk->rc) { case 0: - printk(KERN_INFO "%s: CLAW device " - "%.8s: System validate " - "completed.\n", - dev->name, temp_ws_name); + dev_info(tdev, "%s: CLAW device " + "%.8s: System validate completed.\n", + dev->name, temp_ws_name); if (privptr->system_validate_comp == 0) claw_strt_conn_req(dev); privptr->system_validate_comp = 1; break; case CLAW_RC_NAME_MISMATCH: - printk(KERN_INFO "%s: Sys Validate " - "Resp : Host, WS name is " - "mismatch\n", - dev->name); + dev_warn(tdev, "Validating %s failed because of" + " a host or adapter name mismatch\n", + dev->name); break; case CLAW_RC_WRONG_VERSION: - printk(KERN_INFO "%s: Sys Validate " - "Resp : Wrong version\n", + dev_warn(tdev, "Validating %s failed because of a" + " version conflict\n", dev->name); break; case CLAW_RC_HOST_RCV_TOO_SMALL: - printk(KERN_INFO "%s: Sys Validate " - "Resp : bad frame size\n", + dev_warn(tdev, "Validating %s failed because of a" + " frame size conflict\n", dev->name); break; default: - printk(KERN_INFO "%s: Sys Validate " - "error code=%d \n", - dev->name, p_ctlbk->rc); + dev_warn(tdev, "The communication peer of %s rejected" + " the connection\n", + dev->name); break; } break; case CONNECTION_REQUEST: p_connect = (struct conncmd *)&(p_ctlbk->data); - printk(KERN_INFO "%s: Recv Conn Req: Vers=%d,link_id=%d," + dev_info(tdev, "%s: Recv Conn Req: Vers=%d,link_id=%d," "Corr=%d,HOST appl=%.8s,WS appl=%.8s\n", dev->name, p_ctlbk->version, @@ -2146,21 +2170,21 @@ claw_process_control( struct net_device *dev, struct ccwbk * p_ccw) p_connect->WS_name); if (privptr->active_link_ID != 0) { claw_snd_disc(dev, p_ctlbk); - printk(KERN_INFO "%s: Conn Req error : " - "already logical link is active \n", + dev_info(tdev, "%s rejected a connection request" + " because it is already active\n", dev->name); } if (p_ctlbk->linkid != 1) { claw_snd_disc(dev, p_ctlbk); - printk(KERN_INFO "%s: Conn Req error : " - "req logical link id is not 1\n", + dev_info(tdev, "%s rejected a request to open multiple" + " connections\n", dev->name); } rc = find_link(dev, p_connect->host_name, p_connect->WS_name); if (rc != 0) { claw_snd_disc(dev, p_ctlbk); - printk(KERN_INFO "%s: Conn Resp error: " - "req appl name does not match\n", + dev_info(tdev, "%s rejected a connection request" + " because of a type mismatch\n", dev->name); } claw_send_control(dev, @@ -2172,7 +2196,7 @@ claw_process_control( struct net_device *dev, struct ccwbk * p_ccw) p_env->packing = PACK_SEND; claw_snd_conn_req(dev, 0); } - printk(KERN_INFO "%s: CLAW device %.8s: Connection " + dev_info(tdev, "%s: CLAW device %.8s: Connection " "completed link_id=%d.\n", dev->name, temp_ws_name, p_ctlbk->linkid); @@ -2182,7 +2206,7 @@ claw_process_control( struct net_device *dev, struct ccwbk * p_ccw) break; case CONNECTION_RESPONSE: p_connect = (struct conncmd *)&(p_ctlbk->data); - printk(KERN_INFO "%s: Revc Conn Resp: Vers=%d,link_id=%d," + dev_info(tdev, "%s: Recv Conn Resp: Vers=%d,link_id=%d," "Corr=%d,RC=%d,Host appl=%.8s, WS appl=%.8s\n", dev->name, p_ctlbk->version, @@ -2193,16 +2217,18 @@ claw_process_control( struct net_device *dev, struct ccwbk * p_ccw) p_connect->WS_name); if (p_ctlbk->rc != 0) { - printk(KERN_INFO "%s: Conn Resp error: rc=%d \n", - dev->name, p_ctlbk->rc); + dev_warn(tdev, "The communication peer of %s rejected" + " a connection request\n", + dev->name); return 1; } rc = find_link(dev, p_connect->host_name, p_connect->WS_name); if (rc != 0) { claw_snd_disc(dev, p_ctlbk); - printk(KERN_INFO "%s: Conn Resp error: " - "req appl name does not match\n", + dev_warn(tdev, "The communication peer of %s" + " rejected a connection " + "request because of a type mismatch\n", dev->name); } /* should be until CONNECTION_CONFIRM */ @@ -2210,7 +2236,8 @@ claw_process_control( struct net_device *dev, struct ccwbk * p_ccw) break; case CONNECTION_CONFIRM: p_connect = (struct conncmd *)&(p_ctlbk->data); - printk(KERN_INFO "%s: Recv Conn Confirm:Vers=%d,link_id=%d," + dev_info(tdev, + "%s: Recv Conn Confirm:Vers=%d,link_id=%d," "Corr=%d,Host appl=%.8s,WS appl=%.8s\n", dev->name, p_ctlbk->version, @@ -2221,21 +2248,21 @@ claw_process_control( struct net_device *dev, struct ccwbk * p_ccw) if (p_ctlbk->linkid == -(privptr->active_link_ID)) { privptr->active_link_ID = p_ctlbk->linkid; if (p_env->packing > PACKING_ASK) { - printk(KERN_INFO "%s: Confirmed Now packing\n", - dev->name); + dev_info(tdev, + "%s: Confirmed Now packing\n", dev->name); p_env->packing = DO_PACKED; } p_ch = &privptr->channel[WRITE]; wake_up(&p_ch->wait); } else { - printk(KERN_INFO "%s: Conn confirm: " - "unexpected linkid=%d \n", + dev_warn(tdev, "Activating %s failed because of" + " an incorrect link ID=%d\n", dev->name, p_ctlbk->linkid); claw_snd_disc(dev, p_ctlbk); } break; case DISCONNECT: - printk(KERN_INFO "%s: Disconnect: " + dev_info(tdev, "%s: Disconnect: " "Vers=%d,link_id=%d,Corr=%d\n", dev->name, p_ctlbk->version, p_ctlbk->linkid, p_ctlbk->correlator); @@ -2247,12 +2274,13 @@ claw_process_control( struct net_device *dev, struct ccwbk * p_ccw) privptr->active_link_ID = 0; break; case CLAW_ERROR: - printk(KERN_INFO "%s: CLAW ERROR detected\n", + dev_warn(tdev, "The communication peer of %s failed\n", dev->name); break; default: - printk(KERN_INFO "%s: Unexpected command code=%d \n", - dev->name, p_ctlbk->command); + dev_warn(tdev, "The communication peer of %s sent" + " an unknown command code\n", + dev->name); break; } @@ -2294,12 +2322,14 @@ claw_send_control(struct net_device *dev, __u8 type, __u8 link, memcpy(&p_sysval->host_name, local_name, 8); memcpy(&p_sysval->WS_name, remote_name, 8); if (privptr->p_env->packing > 0) { - p_sysval->read_frame_size=DEF_PACK_BUFSIZE; - p_sysval->write_frame_size=DEF_PACK_BUFSIZE; + p_sysval->read_frame_size = DEF_PACK_BUFSIZE; + p_sysval->write_frame_size = DEF_PACK_BUFSIZE; } else { /* how big is the biggest group of packets */ - p_sysval->read_frame_size=privptr->p_env->read_size; - p_sysval->write_frame_size=privptr->p_env->write_size; + p_sysval->read_frame_size = + privptr->p_env->read_size; + p_sysval->write_frame_size = + privptr->p_env->write_size; } memset(&p_sysval->reserved, 0x00, 4); break; @@ -2511,8 +2541,10 @@ unpack_read(struct net_device *dev ) mtc_this_frm=1; if (p_this_ccw->header.length!= privptr->p_env->read_size ) { - printk(KERN_INFO " %s: Invalid frame detected " - "length is %02x\n" , + dev_warn(p_dev, + "The communication peer of %s" + " sent a faulty" + " frame of length %02x\n", dev->name, p_this_ccw->header.length); } } @@ -2544,7 +2576,7 @@ unpack_next: goto NextFrame; p_packd = p_this_ccw->p_buffer+pack_off; p_packh = (struct clawph *) p_packd; - if ((p_packh->len == 0) || /* all done with this frame? */ + if ((p_packh->len == 0) || /* done with this frame? */ (p_packh->flag != 0)) goto NextFrame; bytes_to_mov = p_packh->len; @@ -2594,9 +2626,9 @@ unpack_next: netif_rx(skb); } else { + dev_info(p_dev, "Allocating a buffer for" + " incoming data failed\n"); privptr->stats.rx_dropped++; - printk(KERN_WARNING "%s: %s() low on memory\n", - dev->name,__func__); } privptr->mtc_offset=0; privptr->mtc_logical_link=-1; @@ -2720,8 +2752,8 @@ claw_strt_out_IO( struct net_device *dev ) if (test_and_set_bit(0, (void *)&p_ch->IO_active) == 0) { parm = (unsigned long) p_ch; CLAW_DBF_TEXT(2, trace, "StWrtIO"); - rc = ccw_device_start (p_ch->cdev,&p_first_ccw->write, parm, - 0xff, 0); + rc = ccw_device_start(p_ch->cdev, &p_first_ccw->write, parm, + 0xff, 0); if (rc != 0) { ccw_check_return_code(p_ch->cdev, rc); } @@ -2816,22 +2848,26 @@ claw_free_netdevice(struct net_device * dev, int free_dev) * Initialize everything of the net device except the name and the * channel structs. */ +static const struct net_device_ops claw_netdev_ops = { + .ndo_open = claw_open, + .ndo_stop = claw_release, + .ndo_get_stats = claw_stats, + .ndo_start_xmit = claw_tx, + .ndo_change_mtu = claw_change_mtu, +}; + static void claw_init_netdevice(struct net_device * dev) { CLAW_DBF_TEXT(2, setup, "init_dev"); CLAW_DBF_TEXT_(2, setup, "%s", dev->name); dev->mtu = CLAW_DEFAULT_MTU_SIZE; - dev->hard_start_xmit = claw_tx; - dev->open = claw_open; - dev->stop = claw_release; - dev->get_stats = claw_stats; - dev->change_mtu = claw_change_mtu; dev->hard_header_len = 0; dev->addr_len = 0; dev->type = ARPHRD_SLIP; dev->tx_queue_len = 1300; dev->flags = IFF_POINTOPOINT | IFF_NOARP; + dev->netdev_ops = &claw_netdev_ops; CLAW_DBF_TEXT(2, setup, "initok"); return; } @@ -2880,8 +2916,8 @@ claw_new_device(struct ccwgroup_device *cgdev) int ret; struct ccw_dev_id dev_id; - printk(KERN_INFO "claw: add for %s\n", - dev_name(&cgdev->cdev[READ]->dev)); + dev_info(&cgdev->dev, "add for %s\n", + dev_name(&cgdev->cdev[READ]->dev)); CLAW_DBF_TEXT(2, setup, "new_dev"); privptr = cgdev->dev.driver_data; cgdev->cdev[READ]->dev.driver_data = privptr; @@ -2897,29 +2933,28 @@ claw_new_device(struct ccwgroup_device *cgdev) if (ret == 0) ret = add_channel(cgdev->cdev[1],1,privptr); if (ret != 0) { - printk(KERN_WARNING - "add channel failed with ret = %d\n", ret); + dev_warn(&cgdev->dev, "Creating a CLAW group device" + " failed with error code %d\n", ret); goto out; } ret = ccw_device_set_online(cgdev->cdev[READ]); if (ret != 0) { - printk(KERN_WARNING - "claw: ccw_device_set_online %s READ failed " - "with ret = %d\n", dev_name(&cgdev->cdev[READ]->dev), - ret); + dev_warn(&cgdev->dev, + "Setting the read subchannel online" + " failed with error code %d\n", ret); goto out; } ret = ccw_device_set_online(cgdev->cdev[WRITE]); if (ret != 0) { - printk(KERN_WARNING - "claw: ccw_device_set_online %s WRITE failed " - "with ret = %d\n", dev_name(&cgdev->cdev[WRITE]->dev), - ret); + dev_warn(&cgdev->dev, + "Setting the write subchannel online " + "failed with error code %d\n", ret); goto out; } dev = alloc_netdev(0,"claw%d",claw_init_netdevice); if (!dev) { - printk(KERN_WARNING "%s:alloc_netdev failed\n",__func__); + dev_warn(&cgdev->dev, + "Activating the CLAW device failed\n"); goto out; } dev->ml_priv = privptr; @@ -2947,13 +2982,13 @@ claw_new_device(struct ccwgroup_device *cgdev) privptr->channel[WRITE].ndev = dev; privptr->p_env->ndev = dev; - printk(KERN_INFO "%s:readsize=%d writesize=%d " + dev_info(&cgdev->dev, "%s:readsize=%d writesize=%d " "readbuffer=%d writebuffer=%d read=0x%04x write=0x%04x\n", dev->name, p_env->read_size, p_env->write_size, p_env->read_buffers, p_env->write_buffers, p_env->devno[READ], p_env->devno[WRITE]); - printk(KERN_INFO "%s:host_name:%.8s, adapter_name " + dev_info(&cgdev->dev, "%s:host_name:%.8s, adapter_name " ":%.8s api_type: %.8s\n", dev->name, p_env->host_name, p_env->adapter_name , p_env->api_type); @@ -2997,8 +3032,8 @@ claw_shutdown_device(struct ccwgroup_device *cgdev) ndev = priv->channel[READ].ndev; if (ndev) { /* Close the device */ - printk(KERN_INFO - "%s: shuting down \n",ndev->name); + dev_info(&cgdev->dev, "%s: shutting down \n", + ndev->name); if (ndev->flags & IFF_RUNNING) ret = claw_release(ndev); ndev->flags &=~IFF_RUNNING; @@ -3023,8 +3058,7 @@ claw_remove_device(struct ccwgroup_device *cgdev) CLAW_DBF_TEXT_(2, setup, "%s", dev_name(&cgdev->dev)); priv = cgdev->dev.driver_data; BUG_ON(!priv); - printk(KERN_INFO "claw: %s() called %s will be removed.\n", - __func__, dev_name(&cgdev->cdev[0]->dev)); + dev_info(&cgdev->dev, " will be removed.\n"); if (cgdev->state == CCWGROUP_ONLINE) claw_shutdown_device(cgdev); claw_remove_files(&cgdev->dev); @@ -3063,7 +3097,8 @@ claw_hname_show(struct device *dev, struct device_attribute *attr, char *buf) } static ssize_t -claw_hname_write(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) +claw_hname_write(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) { struct claw_privbk *priv; struct claw_env * p_env; @@ -3100,7 +3135,8 @@ claw_adname_show(struct device *dev, struct device_attribute *attr, char *buf) } static ssize_t -claw_adname_write(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) +claw_adname_write(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) { struct claw_privbk *priv; struct claw_env * p_env; @@ -3138,7 +3174,8 @@ claw_apname_show(struct device *dev, struct device_attribute *attr, char *buf) } static ssize_t -claw_apname_write(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) +claw_apname_write(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) { struct claw_privbk *priv; struct claw_env * p_env; @@ -3185,7 +3222,8 @@ claw_wbuff_show(struct device *dev, struct device_attribute *attr, char *buf) } static ssize_t -claw_wbuff_write(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) +claw_wbuff_write(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) { struct claw_privbk *priv; struct claw_env * p_env; @@ -3226,7 +3264,8 @@ claw_rbuff_show(struct device *dev, struct device_attribute *attr, char *buf) } static ssize_t -claw_rbuff_write(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) +claw_rbuff_write(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) { struct claw_privbk *priv; struct claw_env *p_env; @@ -3289,7 +3328,7 @@ claw_cleanup(void) { unregister_cu3088_discipline(&claw_group_driver); claw_unregister_debug_facility(); - printk(KERN_INFO "claw: Driver unloaded\n"); + pr_info("Driver unloaded\n"); } @@ -3303,12 +3342,12 @@ static int __init claw_init(void) { int ret = 0; - printk(KERN_INFO "claw: starting driver\n"); + pr_info("Loading %s\n", version); ret = claw_register_debug_facility(); if (ret) { - printk(KERN_WARNING "claw: %s() debug_register failed %d\n", - __func__,ret); + pr_err("Registering with the S/390 debug feature" + " failed with error code %d\n", ret); return ret; } CLAW_DBF_TEXT(2, setup, "init_mod"); @@ -3316,8 +3355,8 @@ claw_init(void) if (ret) { CLAW_DBF_TEXT(2, setup, "init_bad"); claw_unregister_debug_facility(); - printk(KERN_WARNING "claw; %s() cu3088 register failed %d\n", - __func__,ret); + pr_err("Registering with the cu3088 device driver failed " + "with error code %d\n", ret); } return ret; } diff --git a/drivers/s390/net/ctcm_main.c b/drivers/s390/net/ctcm_main.c index 2678573bece..8f2a888d0a0 100644 --- a/drivers/s390/net/ctcm_main.c +++ b/drivers/s390/net/ctcm_main.c @@ -1099,12 +1099,24 @@ static void ctcm_free_netdevice(struct net_device *dev) struct mpc_group *ctcmpc_init_mpc_group(struct ctcm_priv *priv); +static const struct net_device_ops ctcm_netdev_ops = { + .ndo_open = ctcm_open, + .ndo_stop = ctcm_close, + .ndo_get_stats = ctcm_stats, + .ndo_change_mtu = ctcm_change_mtu, + .ndo_start_xmit = ctcm_tx, +}; + +static const struct net_device_ops ctcm_mpc_netdev_ops = { + .ndo_open = ctcm_open, + .ndo_stop = ctcm_close, + .ndo_get_stats = ctcm_stats, + .ndo_change_mtu = ctcm_change_mtu, + .ndo_start_xmit = ctcmpc_tx, +}; + void static ctcm_dev_setup(struct net_device *dev) { - dev->open = ctcm_open; - dev->stop = ctcm_close; - dev->get_stats = ctcm_stats; - dev->change_mtu = ctcm_change_mtu; dev->type = ARPHRD_SLIP; dev->tx_queue_len = 100; dev->flags = IFF_POINTOPOINT | IFF_NOARP; @@ -1157,12 +1169,12 @@ static struct net_device *ctcm_init_netdevice(struct ctcm_priv *priv) dev->mtu = MPC_BUFSIZE_DEFAULT - TH_HEADER_LENGTH - PDU_HEADER_LENGTH; - dev->hard_start_xmit = ctcmpc_tx; + dev->netdev_ops = &ctcm_mpc_netdev_ops; dev->hard_header_len = TH_HEADER_LENGTH + PDU_HEADER_LENGTH; priv->buffer_size = MPC_BUFSIZE_DEFAULT; } else { dev->mtu = CTCM_BUFSIZE_DEFAULT - LL_HEADER_LENGTH - 2; - dev->hard_start_xmit = ctcm_tx; + dev->netdev_ops = &ctcm_netdev_ops; dev->hard_header_len = LL_HEADER_LENGTH + 2; } diff --git a/drivers/s390/net/lcs.c b/drivers/s390/net/lcs.c index 49c3bfa1afd..083f787d260 100644 --- a/drivers/s390/net/lcs.c +++ b/drivers/s390/net/lcs.c @@ -2101,6 +2101,20 @@ lcs_register_netdev(struct ccwgroup_device *ccwgdev) /** * lcs_new_device will be called by setting the group device online. */ +static const struct net_device_ops lcs_netdev_ops = { + .ndo_open = lcs_open_device, + .ndo_stop = lcs_stop_device, + .ndo_get_stats = lcs_getstats, + .ndo_start_xmit = lcs_start_xmit, +}; + +static const struct net_device_ops lcs_mc_netdev_ops = { + .ndo_open = lcs_open_device, + .ndo_stop = lcs_stop_device, + .ndo_get_stats = lcs_getstats, + .ndo_start_xmit = lcs_start_xmit, + .ndo_set_multicast_list = lcs_set_multicast_list, +}; static int lcs_new_device(struct ccwgroup_device *ccwgdev) @@ -2168,14 +2182,11 @@ lcs_new_device(struct ccwgroup_device *ccwgdev) goto out; card->dev = dev; card->dev->ml_priv = card; - card->dev->open = lcs_open_device; - card->dev->stop = lcs_stop_device; - card->dev->hard_start_xmit = lcs_start_xmit; - card->dev->get_stats = lcs_getstats; + card->dev->netdev_ops = &lcs_netdev_ops; memcpy(card->dev->dev_addr, card->mac, LCS_MAC_LENGTH); #ifdef CONFIG_IP_MULTICAST if (!lcs_check_multicast_support(card)) - card->dev->set_multicast_list = lcs_set_multicast_list; + card->dev->netdev_ops = &lcs_mc_netdev_ops; #endif netdev_out: lcs_set_allowed_threads(card,0xffffffff); diff --git a/drivers/s390/net/netiucv.c b/drivers/s390/net/netiucv.c index 930e2fc2a01..1ba4509435f 100644 --- a/drivers/s390/net/netiucv.c +++ b/drivers/s390/net/netiucv.c @@ -1876,20 +1876,24 @@ static void netiucv_free_netdevice(struct net_device *dev) /** * Initialize a net device. (Called from kernel in alloc_netdev()) */ +static const struct net_device_ops netiucv_netdev_ops = { + .ndo_open = netiucv_open, + .ndo_stop = netiucv_close, + .ndo_get_stats = netiucv_stats, + .ndo_start_xmit = netiucv_tx, + .ndo_change_mtu = netiucv_change_mtu, +}; + static void netiucv_setup_netdevice(struct net_device *dev) { dev->mtu = NETIUCV_MTU_DEFAULT; - dev->hard_start_xmit = netiucv_tx; - dev->open = netiucv_open; - dev->stop = netiucv_close; - dev->get_stats = netiucv_stats; - dev->change_mtu = netiucv_change_mtu; dev->destructor = netiucv_free_netdevice; dev->hard_header_len = NETIUCV_HDRLEN; dev->addr_len = 0; dev->type = ARPHRD_SLIP; dev->tx_queue_len = NETIUCV_QUEUELEN_DEFAULT; dev->flags = IFF_POINTOPOINT | IFF_NOARP; + dev->netdev_ops = &netiucv_netdev_ops; } /** diff --git a/drivers/scsi/cxgb3i/cxgb3i_ddp.h b/drivers/scsi/cxgb3i/cxgb3i_ddp.h index 5c7c4d95c49..f675807cc48 100644 --- a/drivers/scsi/cxgb3i/cxgb3i_ddp.h +++ b/drivers/scsi/cxgb3i/cxgb3i_ddp.h @@ -13,6 +13,8 @@ #ifndef __CXGB3I_ULP2_DDP_H__ #define __CXGB3I_ULP2_DDP_H__ +#include <linux/vmalloc.h> + /** * struct cxgb3i_tag_format - cxgb3i ulp tag format for an iscsi entity * diff --git a/include/linux/arcdevice.h b/include/linux/arcdevice.h index a1916078fd0..cd4bcb6989c 100644 --- a/include/linux/arcdevice.h +++ b/include/linux/arcdevice.h @@ -235,8 +235,6 @@ struct Outgoing { struct arcnet_local { - struct net_device_stats stats; - uint8_t config, /* current value of CONFIG register */ timeout, /* Extended timeout for COM20020 */ backplane, /* Backplane flag for COM20020 */ @@ -335,7 +333,12 @@ void arcnet_dump_skb(struct net_device *dev, struct sk_buff *skb, char *desc); void arcnet_unregister_proto(struct ArcProto *proto); irqreturn_t arcnet_interrupt(int irq, void *dev_id); -struct net_device *alloc_arcdev(char *name); +struct net_device *alloc_arcdev(const char *name); + +int arcnet_open(struct net_device *dev); +int arcnet_close(struct net_device *dev); +int arcnet_send_packet(struct sk_buff *skb, struct net_device *dev); +void arcnet_timeout(struct net_device *dev); #endif /* __KERNEL__ */ #endif /* _LINUX_ARCDEVICE_H */ diff --git a/include/linux/com20020.h b/include/linux/com20020.h index ac6d9a43e08..5dcfb944b6c 100644 --- a/include/linux/com20020.h +++ b/include/linux/com20020.h @@ -29,6 +29,7 @@ int com20020_check(struct net_device *dev); int com20020_found(struct net_device *dev, int shared); +extern const struct net_device_ops com20020_netdev_ops; /* The number of low I/O ports used by the card. */ #define ARCNET_TOTAL_SIZE 8 diff --git a/include/linux/connector.h b/include/linux/connector.h index 34f2789d9b9..fc65d219d88 100644 --- a/include/linux/connector.h +++ b/include/linux/connector.h @@ -109,6 +109,12 @@ struct cn_queue_dev { unsigned char name[CN_CBQ_NAMELEN]; struct workqueue_struct *cn_queue; + /* Sent to kevent to create cn_queue only when needed */ + struct work_struct wq_creation; + /* Tell if the wq_creation job is pending/completed */ + atomic_t wq_requested; + /* Wait for cn_queue to be created */ + wait_queue_head_t wq_created; struct list_head queue_list; spinlock_t queue_lock; @@ -164,6 +170,8 @@ int cn_netlink_send(struct cn_msg *, u32, gfp_t); int cn_queue_add_callback(struct cn_queue_dev *dev, char *name, struct cb_id *id, void (*callback)(void *)); void cn_queue_del_callback(struct cn_queue_dev *dev, struct cb_id *id); +int queue_cn_work(struct cn_callback_entry *cbq, struct work_struct *work); + struct cn_queue_dev *cn_queue_alloc_dev(char *name, struct sock *); void cn_queue_free_dev(struct cn_queue_dev *dev); diff --git a/include/linux/dccp.h b/include/linux/dccp.h index 61734e27abb..7434a8353e2 100644 --- a/include/linux/dccp.h +++ b/include/linux/dccp.h @@ -355,46 +355,6 @@ static inline unsigned int dccp_hdr_len(const struct sk_buff *skb) return __dccp_hdr_len(dccp_hdr(skb)); } - -/* initial values for each feature */ -#define DCCPF_INITIAL_SEQUENCE_WINDOW 100 -#define DCCPF_INITIAL_ACK_RATIO 2 -#define DCCPF_INITIAL_CCID DCCPC_CCID2 -/* FIXME: for now we're default to 1 but it should really be 0 */ -#define DCCPF_INITIAL_SEND_NDP_COUNT 1 - -/** - * struct dccp_minisock - Minimal DCCP connection representation - * - * Will be used to pass the state from dccp_request_sock to dccp_sock. - * - * @dccpms_sequence_window - Sequence Window Feature (section 7.5.2) - * @dccpms_pending - List of features being negotiated - * @dccpms_conf - - */ -struct dccp_minisock { - __u64 dccpms_sequence_window; - struct list_head dccpms_pending; - struct list_head dccpms_conf; -}; - -struct dccp_opt_conf { - __u8 *dccpoc_val; - __u8 dccpoc_len; -}; - -struct dccp_opt_pend { - struct list_head dccpop_node; - __u8 dccpop_type; - __u8 dccpop_feat; - __u8 *dccpop_val; - __u8 dccpop_len; - int dccpop_conf; - struct dccp_opt_conf *dccpop_sc; -}; - -extern void dccp_minisock_init(struct dccp_minisock *dmsk); - /** * struct dccp_request_sock - represent DCCP-specific connection request * @dreq_inet_rsk: structure inherited from @@ -483,13 +443,14 @@ struct dccp_ackvec; * @dccps_timestamp_time - time of receiving latest @dccps_timestamp_echo * @dccps_l_ack_ratio - feature-local Ack Ratio * @dccps_r_ack_ratio - feature-remote Ack Ratio + * @dccps_l_seq_win - local Sequence Window (influences ack number validity) + * @dccps_r_seq_win - remote Sequence Window (influences seq number validity) * @dccps_pcslen - sender partial checksum coverage (via sockopt) * @dccps_pcrlen - receiver partial checksum coverage (via sockopt) * @dccps_send_ndp_count - local Send NDP Count feature (7.7.2) * @dccps_ndp_count - number of Non Data Packets since last data packet * @dccps_mss_cache - current value of MSS (path MTU minus header sizes) * @dccps_rate_last - timestamp for rate-limiting DCCP-Sync (RFC 4340, 7.5.4) - * @dccps_minisock - associated minisock (accessed via dccp_msk) * @dccps_featneg - tracks feature-negotiation state (mostly during handshake) * @dccps_hc_rx_ackvec - rx half connection ack vector * @dccps_hc_rx_ccid - CCID used for the receiver (or receiving half-connection) @@ -523,12 +484,13 @@ struct dccp_sock { __u32 dccps_timestamp_time; __u16 dccps_l_ack_ratio; __u16 dccps_r_ack_ratio; + __u64 dccps_l_seq_win:48; + __u64 dccps_r_seq_win:48; __u8 dccps_pcslen:4; __u8 dccps_pcrlen:4; __u8 dccps_send_ndp_count:1; __u64 dccps_ndp_count:48; unsigned long dccps_rate_last; - struct dccp_minisock dccps_minisock; struct list_head dccps_featneg; struct dccp_ackvec *dccps_hc_rx_ackvec; struct ccid *dccps_hc_rx_ccid; @@ -546,11 +508,6 @@ static inline struct dccp_sock *dccp_sk(const struct sock *sk) return (struct dccp_sock *)sk; } -static inline struct dccp_minisock *dccp_msk(const struct sock *sk) -{ - return (struct dccp_minisock *)&dccp_sk(sk)->dccps_minisock; -} - static inline const char *dccp_role(const struct sock *sk) { switch (dccp_sk(sk)->dccps_role) { diff --git a/include/linux/hdlc.h b/include/linux/hdlc.h index fd47a151665..6a6e701f163 100644 --- a/include/linux/hdlc.h +++ b/include/linux/hdlc.h @@ -38,6 +38,7 @@ struct hdlc_proto { int (*ioctl)(struct net_device *dev, struct ifreq *ifr); __be16 (*type_trans)(struct sk_buff *skb, struct net_device *dev); int (*netif_rx)(struct sk_buff *skb); + int (*xmit)(struct sk_buff *skb, struct net_device *dev); struct module *module; struct hdlc_proto *next; /* next protocol in the list */ }; @@ -102,6 +103,10 @@ static __inline__ void debug_frame(const struct sk_buff *skb) int hdlc_open(struct net_device *dev); /* Must be called by hardware driver when HDLC device is being closed */ void hdlc_close(struct net_device *dev); +/* May be used by hardware driver */ +int hdlc_change_mtu(struct net_device *dev, int new_mtu); +/* Must be pointed to by hw driver's dev->netdev_ops->ndo_start_xmit */ +int hdlc_start_xmit(struct sk_buff *skb, struct net_device *dev); int attach_hdlc_protocol(struct net_device *dev, struct hdlc_proto *proto, size_t size); diff --git a/include/linux/hdlcdrv.h b/include/linux/hdlcdrv.h index bf6302f6b5f..0821bac62b8 100644 --- a/include/linux/hdlcdrv.h +++ b/include/linux/hdlcdrv.h @@ -241,7 +241,6 @@ struct hdlcdrv_state { struct hdlcdrv_bitbuffer bitbuf_hdlc; #endif /* HDLCDRV_DEBUG */ - struct net_device_stats stats; int ptt_keyed; /* queued skb for transmission */ diff --git a/include/linux/ibmtr.h b/include/linux/ibmtr.h index 1c7a0dd5536..06695b74d40 100644 --- a/include/linux/ibmtr.h +++ b/include/linux/ibmtr.h @@ -207,7 +207,7 @@ struct tok_info { unsigned short exsap_station_id; unsigned short global_int_enable; struct sk_buff *current_skb; - struct net_device_stats tr_stats; + unsigned char auto_speedsave; open_state open_status, sap_status; enum {MANUAL, AUTOMATIC} open_mode; diff --git a/include/linux/if.h b/include/linux/if.h index 2a6e29620a9..1108f3e099e 100644 --- a/include/linux/if.h +++ b/include/linux/if.h @@ -66,6 +66,7 @@ #define IFF_SLAVE_NEEDARP 0x40 /* need ARPs for validation */ #define IFF_ISATAP 0x80 /* ISATAP interface (RFC4214) */ #define IFF_MASTER_ARPMON 0x100 /* bonding master, ARP mon in use */ +#define IFF_WAN_HDLC 0x200 /* WAN HDLC device */ #define IF_GET_IFACE 0x0001 /* for querying only */ #define IF_GET_PROTO 0x0002 diff --git a/include/linux/inetdevice.h b/include/linux/inetdevice.h index 06fcdb45106..acef2a770b6 100644 --- a/include/linux/inetdevice.h +++ b/include/linux/inetdevice.h @@ -108,6 +108,7 @@ static inline void ipv4_devconf_setall(struct in_device *in_dev) #define IN_DEV_ARPFILTER(in_dev) IN_DEV_ORCONF((in_dev), ARPFILTER) #define IN_DEV_ARP_ANNOUNCE(in_dev) IN_DEV_MAXCONF((in_dev), ARP_ANNOUNCE) #define IN_DEV_ARP_IGNORE(in_dev) IN_DEV_MAXCONF((in_dev), ARP_IGNORE) +#define IN_DEV_ARP_NOTIFY(in_dev) IN_DEV_MAXCONF((in_dev), ARP_NOTIFY) struct in_ifaddr { diff --git a/include/linux/mroute.h b/include/linux/mroute.h index 8a455694d68..0d45b4e8d36 100644 --- a/include/linux/mroute.h +++ b/include/linux/mroute.h @@ -193,6 +193,9 @@ struct vif_device struct mfc_cache { struct mfc_cache *next; /* Next entry on cache line */ +#ifdef CONFIG_NET_NS + struct net *mfc_net; +#endif __be32 mfc_mcastgrp; /* Group the entry belongs to */ __be32 mfc_origin; /* Source of packet */ vifi_t mfc_parent; /* Source interface */ @@ -215,6 +218,18 @@ struct mfc_cache } mfc_un; }; +static inline +struct net *mfc_net(const struct mfc_cache *mfc) +{ + return read_pnet(&mfc->mfc_net); +} + +static inline +void mfc_net_set(struct mfc_cache *mfc, struct net *net) +{ + write_pnet(&mfc->mfc_net, hold_net(net)); +} + #define MFC_STATIC 1 #define MFC_NOTIFY 2 @@ -241,7 +256,8 @@ struct mfc_cache #ifdef __KERNEL__ struct rtmsg; -extern int ipmr_get_route(struct sk_buff *skb, struct rtmsg *rtm, int nowait); +extern int ipmr_get_route(struct net *net, struct sk_buff *skb, + struct rtmsg *rtm, int nowait); #endif #endif diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index ec54785d34f..7a5057fbb7c 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -984,6 +984,9 @@ void netif_napi_add(struct net_device *dev, struct napi_struct *napi, void netif_napi_del(struct napi_struct *napi); struct napi_gro_cb { + /* This indicates where we are processing relative to skb->data. */ + int data_offset; + /* This is non-zero if the packet may be of the same flow. */ int same_flow; @@ -1087,6 +1090,29 @@ extern int dev_restart(struct net_device *dev); #ifdef CONFIG_NETPOLL_TRAP extern int netpoll_trap(void); #endif +extern void *skb_gro_header(struct sk_buff *skb, unsigned int hlen); +extern int skb_gro_receive(struct sk_buff **head, + struct sk_buff *skb); + +static inline unsigned int skb_gro_offset(const struct sk_buff *skb) +{ + return NAPI_GRO_CB(skb)->data_offset; +} + +static inline unsigned int skb_gro_len(const struct sk_buff *skb) +{ + return skb->len - NAPI_GRO_CB(skb)->data_offset; +} + +static inline void skb_gro_pull(struct sk_buff *skb, unsigned int len) +{ + NAPI_GRO_CB(skb)->data_offset += len; +} + +static inline void skb_gro_reset_offset(struct sk_buff *skb) +{ + NAPI_GRO_CB(skb)->data_offset = 0; +} static inline int dev_hard_header(struct sk_buff *skb, struct net_device *dev, unsigned short type, @@ -1375,12 +1401,15 @@ extern int netif_receive_skb(struct sk_buff *skb); extern void napi_gro_flush(struct napi_struct *napi); extern int dev_gro_receive(struct napi_struct *napi, struct sk_buff *skb); +extern int napi_skb_finish(int ret, struct sk_buff *skb); extern int napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb); extern void napi_reuse_skb(struct napi_struct *napi, struct sk_buff *skb); extern struct sk_buff * napi_fraginfo_skb(struct napi_struct *napi, struct napi_gro_fraginfo *info); +extern int napi_frags_finish(struct napi_struct *napi, + struct sk_buff *skb, int ret); extern int napi_gro_frags(struct napi_struct *napi, struct napi_gro_fraginfo *info); extern void netif_nit_deliver(struct sk_buff *skb); @@ -1574,56 +1603,6 @@ static inline u32 netif_msg_init(int debug_value, int default_msg_enable_bits) return (1 << debug_value) - 1; } -/* Test if receive needs to be scheduled but only if up */ -static inline int netif_rx_schedule_prep(struct napi_struct *napi) -{ - return napi_schedule_prep(napi); -} - -/* Add interface to tail of rx poll list. This assumes that _prep has - * already been called and returned 1. - */ -static inline void __netif_rx_schedule(struct napi_struct *napi) -{ - __napi_schedule(napi); -} - -/* Try to reschedule poll. Called by irq handler. */ - -static inline void netif_rx_schedule(struct napi_struct *napi) -{ - if (netif_rx_schedule_prep(napi)) - __netif_rx_schedule(napi); -} - -/* Try to reschedule poll. Called by dev->poll() after netif_rx_complete(). */ -static inline int netif_rx_reschedule(struct napi_struct *napi) -{ - if (napi_schedule_prep(napi)) { - __netif_rx_schedule(napi); - return 1; - } - return 0; -} - -/* same as netif_rx_complete, except that local_irq_save(flags) - * has already been issued - */ -static inline void __netif_rx_complete(struct napi_struct *napi) -{ - __napi_complete(napi); -} - -/* Remove interface from poll list: it must be in the poll list - * on current cpu. This primitive is called by dev->poll(), when - * it completes the work. The device cannot be out of poll list at this - * moment, it is BUG(). - */ -static inline void netif_rx_complete(struct napi_struct *napi) -{ - napi_complete(napi); -} - static inline void __netif_tx_lock(struct netdev_queue *txq, int cpu) { spin_lock(&txq->_xmit_lock); diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h index 52a9fe08451..2370184e365 100644 --- a/include/linux/pci_ids.h +++ b/include/linux/pci_ids.h @@ -2213,6 +2213,8 @@ #define PCI_VENDOR_ID_TOPSPIN 0x1867 +#define PCI_VENDOR_ID_SILAN 0x1904 + #define PCI_VENDOR_ID_TDI 0x192E #define PCI_DEVICE_ID_TDI_EHCI 0x0101 diff --git a/include/linux/ppp_channel.h b/include/linux/ppp_channel.h index a942892d6df..9d64bdf1477 100644 --- a/include/linux/ppp_channel.h +++ b/include/linux/ppp_channel.h @@ -22,6 +22,7 @@ #include <linux/list.h> #include <linux/skbuff.h> #include <linux/poll.h> +#include <net/net_namespace.h> struct ppp_channel; @@ -56,6 +57,9 @@ extern void ppp_input(struct ppp_channel *, struct sk_buff *); that we may have missed a packet. */ extern void ppp_input_error(struct ppp_channel *, int code); +/* Attach a channel to a given PPP unit in specified net. */ +extern int ppp_register_net_channel(struct net *, struct ppp_channel *); + /* Attach a channel to a given PPP unit. */ extern int ppp_register_channel(struct ppp_channel *); diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index cf2cb50f77d..08670d01747 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h @@ -1687,8 +1687,6 @@ extern int skb_shift(struct sk_buff *tgt, struct sk_buff *skb, int shiftlen); extern struct sk_buff *skb_segment(struct sk_buff *skb, int features); -extern int skb_gro_receive(struct sk_buff **head, - struct sk_buff *skb); static inline void *skb_header_pointer(const struct sk_buff *skb, int offset, int len, void *buffer) @@ -1904,6 +1902,21 @@ static inline void skb_copy_queue_mapping(struct sk_buff *to, const struct sk_bu to->queue_mapping = from->queue_mapping; } +static inline void skb_record_rx_queue(struct sk_buff *skb, u16 rx_queue) +{ + skb->queue_mapping = rx_queue + 1; +} + +static inline u16 skb_get_rx_queue(struct sk_buff *skb) +{ + return skb->queue_mapping - 1; +} + +static inline bool skb_rx_queue_recorded(struct sk_buff *skb) +{ + return (skb->queue_mapping != 0); +} + #ifdef CONFIG_XFRM static inline struct sec_path *skb_sec_path(struct sk_buff *skb) { diff --git a/include/linux/smsc911x.h b/include/linux/smsc911x.h index 1cbf0313add..b32725075d7 100644 --- a/include/linux/smsc911x.h +++ b/include/linux/smsc911x.h @@ -43,5 +43,8 @@ struct smsc911x_platform_config { /* Constants for flags */ #define SMSC911X_USE_16BIT (BIT(0)) #define SMSC911X_USE_32BIT (BIT(1)) +#define SMSC911X_FORCE_INTERNAL_PHY (BIT(2)) +#define SMSC911X_FORCE_EXTERNAL_PHY (BIT(3)) +#define SMSC911X_SAVE_MAC_ADDRESS (BIT(4)) #endif /* __LINUX_SMSC911X_H__ */ diff --git a/include/linux/sysctl.h b/include/linux/sysctl.h index 39d471d1163..e76d3b22a46 100644 --- a/include/linux/sysctl.h +++ b/include/linux/sysctl.h @@ -490,6 +490,7 @@ enum NET_IPV4_CONF_ARP_IGNORE=19, NET_IPV4_CONF_PROMOTE_SECONDARIES=20, NET_IPV4_CONF_ARP_ACCEPT=21, + NET_IPV4_CONF_ARP_NOTIFY=22, __NET_IPV4_CONF_MAX }; diff --git a/include/linux/virtio_net.h b/include/linux/virtio_net.h index 3efa86c3ecb..d8e362d52fd 100644 --- a/include/linux/virtio_net.h +++ b/include/linux/virtio_net.h @@ -22,11 +22,16 @@ #define VIRTIO_NET_F_HOST_ECN 13 /* Host can handle TSO[6] w/ ECN in. */ #define VIRTIO_NET_F_HOST_UFO 14 /* Host can handle UFO in. */ #define VIRTIO_NET_F_MRG_RXBUF 15 /* Host can merge receive buffers. */ +#define VIRTIO_NET_F_STATUS 16 /* virtio_net_config.status available */ + +#define VIRTIO_NET_S_LINK_UP 1 /* Link is up */ struct virtio_net_config { /* The config defining mac address (if VIRTIO_NET_F_MAC) */ __u8 mac[6]; + /* See VIRTIO_NET_F_STATUS and VIRTIO_NET_S_* above */ + __u16 status; } __attribute__((packed)); /* This is the first element of the scatter-gather list. If you don't diff --git a/include/net/atmclip.h b/include/net/atmclip.h index b5a51a7bb36..467c531b8a7 100644 --- a/include/net/atmclip.h +++ b/include/net/atmclip.h @@ -50,7 +50,6 @@ struct atmarp_entry { struct clip_priv { int number; /* for convenience ... */ spinlock_t xoff_lock; /* ensures that pop is atomic (SMP) */ - struct net_device_stats stats; struct net_device *next; /* next CLIP interface */ }; diff --git a/include/net/inet_hashtables.h b/include/net/inet_hashtables.h index d0a043153cc..a44e2248b2e 100644 --- a/include/net/inet_hashtables.h +++ b/include/net/inet_hashtables.h @@ -82,6 +82,7 @@ struct inet_bind_bucket { #endif unsigned short port; signed short fastreuse; + int num_owners; struct hlist_node node; struct hlist_head owners; }; @@ -133,7 +134,7 @@ struct inet_hashinfo { struct inet_bind_hashbucket *bhash; unsigned int bhash_size; - /* Note : 4 bytes padding on 64 bit arches */ + /* 4 bytes hole on 64 bit */ struct kmem_cache *bind_bucket_cachep; @@ -150,6 +151,7 @@ struct inet_hashinfo { struct inet_listen_hashbucket listening_hash[INET_LHTABLE_SIZE] ____cacheline_aligned_in_smp; + atomic_t bsockets; }; static inline struct inet_ehash_bucket *inet_ehash_bucket( diff --git a/include/net/netns/ipv4.h b/include/net/netns/ipv4.h index 977f482d97a..2eb3814d625 100644 --- a/include/net/netns/ipv4.h +++ b/include/net/netns/ipv4.h @@ -54,5 +54,18 @@ struct netns_ipv4 { struct timer_list rt_secret_timer; atomic_t rt_genid; + +#ifdef CONFIG_IP_MROUTE + struct sock *mroute_sk; + struct mfc_cache **mfc_cache_array; + struct vif_device *vif_table; + int maxvif; + atomic_t cache_resolve_queue_len; + int mroute_do_assert; + int mroute_do_pim; +#if defined(CONFIG_IP_PIMSM_V1) || defined(CONFIG_IP_PIMSM_V2) + int mroute_reg_vif_num; +#endif +#endif }; #endif diff --git a/include/net/netrom.h b/include/net/netrom.h index f06852bba62..15696b1fd30 100644 --- a/include/net/netrom.h +++ b/include/net/netrom.h @@ -59,10 +59,6 @@ enum { #define NR_MAX_WINDOW_SIZE 127 /* Maximum Window Allowable - 127 */ #define NR_MAX_PACKET_SIZE 236 /* Maximum Packet Length - 236 */ -struct nr_private { - struct net_device_stats stats; -}; - struct nr_sock { struct sock sock; ax25_address user_addr, source_addr, dest_addr; diff --git a/include/net/phonet/phonet.h b/include/net/phonet/phonet.h index 057b0a8a288..d43f71b5ec0 100644 --- a/include/net/phonet/phonet.h +++ b/include/net/phonet/phonet.h @@ -105,7 +105,6 @@ void phonet_proto_unregister(int protocol, struct phonet_protocol *pp); int phonet_sysctl_init(void); void phonet_sysctl_exit(void); -void phonet_netlink_register(void); int isi_register(void); void isi_unregister(void); diff --git a/include/net/phonet/pn_dev.h b/include/net/phonet/pn_dev.h index aa1c59a1d33..5054dc5ea2c 100644 --- a/include/net/phonet/pn_dev.h +++ b/include/net/phonet/pn_dev.h @@ -28,7 +28,7 @@ struct phonet_device_list { spinlock_t lock; }; -extern struct phonet_device_list pndevs; +struct phonet_device_list *phonet_device_list(struct net *net); struct phonet_device { struct list_head list; @@ -36,8 +36,9 @@ struct phonet_device { DECLARE_BITMAP(addrs, 64); }; -void phonet_device_init(void); +int phonet_device_init(void); void phonet_device_exit(void); +int phonet_netlink_register(void); struct net_device *phonet_device_get(struct net *net); int phonet_address_add(struct net_device *dev, u8 addr); diff --git a/include/net/pkt_sched.h b/include/net/pkt_sched.h index 4082f39f507..e37fe3129c1 100644 --- a/include/net/pkt_sched.h +++ b/include/net/pkt_sched.h @@ -85,6 +85,7 @@ extern struct qdisc_rate_table *qdisc_get_rtab(struct tc_ratespec *r, struct nlattr *tab); extern void qdisc_put_rtab(struct qdisc_rate_table *tab); extern void qdisc_put_stab(struct qdisc_size_table *tab); +extern void qdisc_warn_nonwc(char *txt, struct Qdisc *qdisc); extern void __qdisc_run(struct Qdisc *q); diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h index f8c47429044..3d78a4d2246 100644 --- a/include/net/sch_generic.h +++ b/include/net/sch_generic.h @@ -42,9 +42,10 @@ struct Qdisc int (*enqueue)(struct sk_buff *skb, struct Qdisc *dev); struct sk_buff * (*dequeue)(struct Qdisc *dev); unsigned flags; -#define TCQ_F_BUILTIN 1 -#define TCQ_F_THROTTLED 2 -#define TCQ_F_INGRESS 4 +#define TCQ_F_BUILTIN 1 +#define TCQ_F_THROTTLED 2 +#define TCQ_F_INGRESS 4 +#define TCQ_F_WARN_NONWC (1 << 16) int padded; struct Qdisc_ops *ops; struct qdisc_size_table *stab; diff --git a/kernel/sysctl_check.c b/kernel/sysctl_check.c index fafeb48f27c..b38423ca711 100644 --- a/kernel/sysctl_check.c +++ b/kernel/sysctl_check.c @@ -219,6 +219,7 @@ static const struct trans_ctl_table trans_net_ipv4_conf_vars_table[] = { { NET_IPV4_CONF_ARP_IGNORE, "arp_ignore" }, { NET_IPV4_CONF_PROMOTE_SECONDARIES, "promote_secondaries" }, { NET_IPV4_CONF_ARP_ACCEPT, "arp_accept" }, + { NET_IPV4_CONF_ARP_NOTIFY, "arp_notify" }, {} }; diff --git a/net/802/psnap.c b/net/802/psnap.c index 70980baeb68..6ed711748f2 100644 --- a/net/802/psnap.c +++ b/net/802/psnap.c @@ -51,7 +51,7 @@ static int snap_rcv(struct sk_buff *skb, struct net_device *dev, int rc = 1; struct datalink_proto *proto; static struct packet_type snap_packet_type = { - .type = __constant_htons(ETH_P_SNAP), + .type = cpu_to_be16(ETH_P_SNAP), }; if (unlikely(!pskb_may_pull(skb, 5))) diff --git a/net/8021q/vlan.c b/net/8021q/vlan.c index 41e8f65bd3f..4163ea65bf4 100644 --- a/net/8021q/vlan.c +++ b/net/8021q/vlan.c @@ -52,7 +52,7 @@ static const char vlan_copyright[] = "Ben Greear <greearb@candelatech.com>"; static const char vlan_buggyright[] = "David S. Miller <davem@redhat.com>"; static struct packet_type vlan_packet_type = { - .type = __constant_htons(ETH_P_8021Q), + .type = cpu_to_be16(ETH_P_8021Q), .func = vlan_skb_recv, /* VLAN receive method */ }; diff --git a/net/8021q/vlan_core.c b/net/8021q/vlan_core.c index e9db889d622..378fa69d625 100644 --- a/net/8021q/vlan_core.c +++ b/net/8021q/vlan_core.c @@ -98,22 +98,9 @@ drop: int vlan_gro_receive(struct napi_struct *napi, struct vlan_group *grp, unsigned int vlan_tci, struct sk_buff *skb) { - int err = NET_RX_SUCCESS; + skb_gro_reset_offset(skb); - switch (vlan_gro_common(napi, grp, vlan_tci, skb)) { - case -1: - return netif_receive_skb(skb); - - case 2: - err = NET_RX_DROP; - /* fall through */ - - case 1: - kfree_skb(skb); - break; - } - - return err; + return napi_skb_finish(vlan_gro_common(napi, grp, vlan_tci, skb), skb); } EXPORT_SYMBOL(vlan_gro_receive); @@ -121,27 +108,11 @@ int vlan_gro_frags(struct napi_struct *napi, struct vlan_group *grp, unsigned int vlan_tci, struct napi_gro_fraginfo *info) { struct sk_buff *skb = napi_fraginfo_skb(napi, info); - int err = NET_RX_DROP; if (!skb) - goto out; - - err = NET_RX_SUCCESS; - - switch (vlan_gro_common(napi, grp, vlan_tci, skb)) { - case -1: - return netif_receive_skb(skb); - - case 2: - err = NET_RX_DROP; - /* fall through */ - - case 1: - napi_reuse_skb(napi, skb); - break; - } + return NET_RX_DROP; -out: - return err; + return napi_frags_finish(napi, skb, + vlan_gro_common(napi, grp, vlan_tci, skb)); } EXPORT_SYMBOL(vlan_gro_frags); diff --git a/net/Kconfig b/net/Kconfig index cdb8fdef6c4..a12bae0e3fe 100644 --- a/net/Kconfig +++ b/net/Kconfig @@ -185,6 +185,7 @@ source "net/x25/Kconfig" source "net/lapb/Kconfig" source "net/econet/Kconfig" source "net/wanrouter/Kconfig" +source "net/phonet/Kconfig" source "net/sched/Kconfig" source "net/dcb/Kconfig" @@ -229,7 +230,6 @@ source "net/can/Kconfig" source "net/irda/Kconfig" source "net/bluetooth/Kconfig" source "net/rxrpc/Kconfig" -source "net/phonet/Kconfig" config FIB_RULES bool diff --git a/net/appletalk/ddp.c b/net/appletalk/ddp.c index 5abce07fb50..510a6782da8 100644 --- a/net/appletalk/ddp.c +++ b/net/appletalk/ddp.c @@ -1861,12 +1861,12 @@ static struct notifier_block ddp_notifier = { }; static struct packet_type ltalk_packet_type = { - .type = __constant_htons(ETH_P_LOCALTALK), + .type = cpu_to_be16(ETH_P_LOCALTALK), .func = ltalk_rcv, }; static struct packet_type ppptalk_packet_type = { - .type = __constant_htons(ETH_P_PPPTALK), + .type = cpu_to_be16(ETH_P_PPPTALK), .func = atalk_rcv, }; diff --git a/net/appletalk/dev.c b/net/appletalk/dev.c index d856a62ab50..72277d70c98 100644 --- a/net/appletalk/dev.c +++ b/net/appletalk/dev.c @@ -9,22 +9,20 @@ #include <linux/if_arp.h> #include <linux/if_ltalk.h> +#ifdef CONFIG_COMPAT_NET_DEV_OPS static int ltalk_change_mtu(struct net_device *dev, int mtu) { return -EINVAL; } - -static int ltalk_mac_addr(struct net_device *dev, void *addr) -{ - return -EINVAL; -} +#endif static void ltalk_setup(struct net_device *dev) { /* Fill in the fields of the device structure with localtalk-generic values. */ +#ifdef CONFIG_COMPAT_NET_DEV_OPS dev->change_mtu = ltalk_change_mtu; - dev->set_mac_address = ltalk_mac_addr; +#endif dev->type = ARPHRD_LOCALTLK; dev->hard_header_len = LTALK_HLEN; diff --git a/net/atm/br2684.c b/net/atm/br2684.c index ea9438fc685..334fcd4a4ea 100644 --- a/net/atm/br2684.c +++ b/net/atm/br2684.c @@ -83,7 +83,6 @@ struct br2684_dev { struct list_head br2684_devs; int number; struct list_head brvccs; /* one device <=> one vcc (before xmas) */ - struct net_device_stats stats; int mac_was_set; enum br2684_payload payload; }; @@ -148,9 +147,10 @@ static struct net_device *br2684_find_dev(const struct br2684_if_spec *s) * the way for multiple vcc's per itf. Returns true if we can send, * otherwise false */ -static int br2684_xmit_vcc(struct sk_buff *skb, struct br2684_dev *brdev, +static int br2684_xmit_vcc(struct sk_buff *skb, struct net_device *dev, struct br2684_vcc *brvcc) { + struct br2684_dev *brdev = BRPRIV(dev); struct atm_vcc *atmvcc; int minheadroom = (brvcc->encaps == e_llc) ? 10 : 2; @@ -211,8 +211,8 @@ static int br2684_xmit_vcc(struct sk_buff *skb, struct br2684_dev *brdev, } atomic_add(skb->truesize, &sk_atm(atmvcc)->sk_wmem_alloc); ATM_SKB(skb)->atm_options = atmvcc->atm_options; - brdev->stats.tx_packets++; - brdev->stats.tx_bytes += skb->len; + dev->stats.tx_packets++; + dev->stats.tx_bytes += skb->len; atmvcc->send(atmvcc, skb); return 1; } @@ -233,14 +233,14 @@ static int br2684_start_xmit(struct sk_buff *skb, struct net_device *dev) brvcc = pick_outgoing_vcc(skb, brdev); if (brvcc == NULL) { pr_debug("no vcc attached to dev %s\n", dev->name); - brdev->stats.tx_errors++; - brdev->stats.tx_carrier_errors++; + dev->stats.tx_errors++; + dev->stats.tx_carrier_errors++; /* netif_stop_queue(dev); */ dev_kfree_skb(skb); read_unlock(&devs_lock); return 0; } - if (!br2684_xmit_vcc(skb, brdev, brvcc)) { + if (!br2684_xmit_vcc(skb, dev, brvcc)) { /* * We should probably use netif_*_queue() here, but that * involves added complication. We need to walk before @@ -248,27 +248,20 @@ static int br2684_start_xmit(struct sk_buff *skb, struct net_device *dev) * * Don't free here! this pointer might be no longer valid! */ - brdev->stats.tx_errors++; - brdev->stats.tx_fifo_errors++; + dev->stats.tx_errors++; + dev->stats.tx_fifo_errors++; } read_unlock(&devs_lock); return 0; } -static struct net_device_stats *br2684_get_stats(struct net_device *dev) -{ - pr_debug("br2684_get_stats\n"); - return &BRPRIV(dev)->stats; -} - /* * We remember when the MAC gets set, so we don't override it later with * the ESI of the ATM card of the first VC */ -static int (*my_eth_mac_addr) (struct net_device *, void *); static int br2684_mac_addr(struct net_device *dev, void *p) { - int err = my_eth_mac_addr(dev, p); + int err = eth_mac_addr(dev, p); if (!err) BRPRIV(dev)->mac_was_set = 1; return err; @@ -430,17 +423,17 @@ static void br2684_push(struct atm_vcc *atmvcc, struct sk_buff *skb) /* sigh, interface is down? */ if (unlikely(!(net_dev->flags & IFF_UP))) goto dropped; - brdev->stats.rx_packets++; - brdev->stats.rx_bytes += skb->len; + net_dev->stats.rx_packets++; + net_dev->stats.rx_bytes += skb->len; memset(ATM_SKB(skb), 0, sizeof(struct atm_skb_data)); netif_rx(skb); return; dropped: - brdev->stats.rx_dropped++; + net_dev->stats.rx_dropped++; goto free_skb; error: - brdev->stats.rx_errors++; + net_dev->stats.rx_errors++; free_skb: dev_kfree_skb(skb); return; @@ -531,8 +524,8 @@ static int br2684_regvcc(struct atm_vcc *atmvcc, void __user * arg) skb->next = skb->prev = NULL; br2684_push(atmvcc, skb); - BRPRIV(skb->dev)->stats.rx_bytes -= skb->len; - BRPRIV(skb->dev)->stats.rx_packets--; + skb->dev->stats.rx_bytes -= skb->len; + skb->dev->stats.rx_packets--; skb = next; } @@ -544,17 +537,20 @@ static int br2684_regvcc(struct atm_vcc *atmvcc, void __user * arg) return err; } +static const struct net_device_ops br2684_netdev_ops = { + .ndo_start_xmit = br2684_start_xmit, + .ndo_set_mac_address = br2684_mac_addr, + .ndo_change_mtu = eth_change_mtu, + .ndo_validate_addr = eth_validate_addr, +}; + static void br2684_setup(struct net_device *netdev) { struct br2684_dev *brdev = BRPRIV(netdev); ether_setup(netdev); - brdev->net_dev = netdev; - my_eth_mac_addr = netdev->set_mac_address; - netdev->set_mac_address = br2684_mac_addr; - netdev->hard_start_xmit = br2684_start_xmit; - netdev->get_stats = br2684_get_stats; + netdev->netdev_ops = &br2684_netdev_ops; INIT_LIST_HEAD(&brdev->brvccs); } @@ -565,10 +561,8 @@ static void br2684_setup_routed(struct net_device *netdev) brdev->net_dev = netdev; netdev->hard_header_len = 0; - my_eth_mac_addr = netdev->set_mac_address; - netdev->set_mac_address = br2684_mac_addr; - netdev->hard_start_xmit = br2684_start_xmit; - netdev->get_stats = br2684_get_stats; + + netdev->netdev_ops = &br2684_netdev_ops; netdev->addr_len = 0; netdev->mtu = 1500; netdev->type = ARPHRD_PPP; diff --git a/net/atm/clip.c b/net/atm/clip.c index 2d33a83be79..da42fd06b61 100644 --- a/net/atm/clip.c +++ b/net/atm/clip.c @@ -214,15 +214,15 @@ static void clip_push(struct atm_vcc *vcc, struct sk_buff *skb) skb->protocol = ((__be16 *) skb->data)[3]; skb_pull(skb, RFC1483LLC_LEN); if (skb->protocol == htons(ETH_P_ARP)) { - PRIV(skb->dev)->stats.rx_packets++; - PRIV(skb->dev)->stats.rx_bytes += skb->len; + skb->dev->stats.rx_packets++; + skb->dev->stats.rx_bytes += skb->len; clip_arp_rcv(skb); return; } } clip_vcc->last_use = jiffies; - PRIV(skb->dev)->stats.rx_packets++; - PRIV(skb->dev)->stats.rx_bytes += skb->len; + skb->dev->stats.rx_packets++; + skb->dev->stats.rx_bytes += skb->len; memset(ATM_SKB(skb), 0, sizeof(struct atm_skb_data)); netif_rx(skb); } @@ -372,7 +372,7 @@ static int clip_start_xmit(struct sk_buff *skb, struct net_device *dev) if (!skb->dst) { printk(KERN_ERR "clip_start_xmit: skb->dst == NULL\n"); dev_kfree_skb(skb); - clip_priv->stats.tx_dropped++; + dev->stats.tx_dropped++; return 0; } if (!skb->dst->neighbour) { @@ -380,13 +380,13 @@ static int clip_start_xmit(struct sk_buff *skb, struct net_device *dev) skb->dst->neighbour = clip_find_neighbour(skb->dst, 1); if (!skb->dst->neighbour) { dev_kfree_skb(skb); /* lost that one */ - clip_priv->stats.tx_dropped++; + dev->stats.tx_dropped++; return 0; } #endif printk(KERN_ERR "clip_start_xmit: NO NEIGHBOUR !\n"); dev_kfree_skb(skb); - clip_priv->stats.tx_dropped++; + dev->stats.tx_dropped++; return 0; } entry = NEIGH2ENTRY(skb->dst->neighbour); @@ -400,7 +400,7 @@ static int clip_start_xmit(struct sk_buff *skb, struct net_device *dev) skb_queue_tail(&entry->neigh->arp_queue, skb); else { dev_kfree_skb(skb); - clip_priv->stats.tx_dropped++; + dev->stats.tx_dropped++; } return 0; } @@ -423,8 +423,8 @@ static int clip_start_xmit(struct sk_buff *skb, struct net_device *dev) printk(KERN_WARNING "clip_start_xmit: XOFF->XOFF transition\n"); return 0; } - clip_priv->stats.tx_packets++; - clip_priv->stats.tx_bytes += skb->len; + dev->stats.tx_packets++; + dev->stats.tx_bytes += skb->len; vcc->send(vcc, skb); if (atm_may_send(vcc, 0)) { entry->vccs->xoff = 0; @@ -443,11 +443,6 @@ static int clip_start_xmit(struct sk_buff *skb, struct net_device *dev) return 0; } -static struct net_device_stats *clip_get_stats(struct net_device *dev) -{ - return &PRIV(dev)->stats; -} - static int clip_mkip(struct atm_vcc *vcc, int timeout) { struct clip_vcc *clip_vcc; @@ -501,8 +496,8 @@ static int clip_mkip(struct atm_vcc *vcc, int timeout) skb_get(skb); clip_push(vcc, skb); - PRIV(skb->dev)->stats.rx_packets--; - PRIV(skb->dev)->stats.rx_bytes -= len; + skb->dev->stats.rx_packets--; + skb->dev->stats.rx_bytes -= len; kfree_skb(skb); } @@ -561,7 +556,6 @@ static void clip_setup(struct net_device *dev) { dev->hard_start_xmit = clip_start_xmit; /* sg_xmit ... */ - dev->get_stats = clip_get_stats; dev->type = ARPHRD_ATM; dev->hard_header_len = RFC1483LLC_LEN; dev->mtu = RFC1626_MTU; diff --git a/net/atm/lec.c b/net/atm/lec.c index e5e301550e8..c0cba9a037e 100644 --- a/net/atm/lec.c +++ b/net/atm/lec.c @@ -62,7 +62,6 @@ static unsigned char bridge_ula_lec[] = { 0x01, 0x80, 0xc2, 0x00, 0x00 }; static int lec_open(struct net_device *dev); static int lec_start_xmit(struct sk_buff *skb, struct net_device *dev); static int lec_close(struct net_device *dev); -static struct net_device_stats *lec_get_stats(struct net_device *dev); static void lec_init(struct net_device *dev); static struct lec_arp_table *lec_arp_find(struct lec_priv *priv, const unsigned char *mac_addr); @@ -218,28 +217,28 @@ static unsigned char *get_tr_dst(unsigned char *packet, unsigned char *rdesc) static int lec_open(struct net_device *dev) { - struct lec_priv *priv = netdev_priv(dev); - netif_start_queue(dev); - memset(&priv->stats, 0, sizeof(struct net_device_stats)); + memset(&dev->stats, 0, sizeof(struct net_device_stats)); return 0; } -static __inline__ void -lec_send(struct atm_vcc *vcc, struct sk_buff *skb, struct lec_priv *priv) +static void +lec_send(struct atm_vcc *vcc, struct sk_buff *skb) { + struct net_device *dev = skb->dev; + ATM_SKB(skb)->vcc = vcc; ATM_SKB(skb)->atm_options = vcc->atm_options; atomic_add(skb->truesize, &sk_atm(vcc)->sk_wmem_alloc); if (vcc->send(vcc, skb) < 0) { - priv->stats.tx_dropped++; + dev->stats.tx_dropped++; return; } - priv->stats.tx_packets++; - priv->stats.tx_bytes += skb->len; + dev->stats.tx_packets++; + dev->stats.tx_bytes += skb->len; } static void lec_tx_timeout(struct net_device *dev) @@ -270,7 +269,7 @@ static int lec_start_xmit(struct sk_buff *skb, struct net_device *dev) pr_debug("lec_start_xmit called\n"); if (!priv->lecd) { printk("%s:No lecd attached\n", dev->name); - priv->stats.tx_errors++; + dev->stats.tx_errors++; netif_stop_queue(dev); return -EUNATCH; } @@ -345,7 +344,7 @@ static int lec_start_xmit(struct sk_buff *skb, struct net_device *dev) GFP_ATOMIC); dev_kfree_skb(skb); if (skb2 == NULL) { - priv->stats.tx_dropped++; + dev->stats.tx_dropped++; return 0; } skb = skb2; @@ -380,7 +379,7 @@ static int lec_start_xmit(struct sk_buff *skb, struct net_device *dev) ("%s:lec_start_xmit: tx queue full or no arp entry, dropping, ", dev->name); pr_debug("MAC address %pM\n", lec_h->h_dest); - priv->stats.tx_dropped++; + dev->stats.tx_dropped++; dev_kfree_skb(skb); } goto out; @@ -392,10 +391,10 @@ static int lec_start_xmit(struct sk_buff *skb, struct net_device *dev) while (entry && (skb2 = skb_dequeue(&entry->tx_wait))) { pr_debug("lec.c: emptying tx queue, "); pr_debug("MAC address %pM\n", lec_h->h_dest); - lec_send(vcc, skb2, priv); + lec_send(vcc, skb2); } - lec_send(vcc, skb, priv); + lec_send(vcc, skb); if (!atm_may_send(vcc, 0)) { struct lec_vcc_priv *vpriv = LEC_VCC_PRIV(vcc); @@ -427,15 +426,6 @@ static int lec_close(struct net_device *dev) return 0; } -/* - * Get the current statistics. - * This may be called with the card open or closed. - */ -static struct net_device_stats *lec_get_stats(struct net_device *dev) -{ - return &((struct lec_priv *)netdev_priv(dev))->stats; -} - static int lec_atm_send(struct atm_vcc *vcc, struct sk_buff *skb) { unsigned long flags; @@ -677,17 +667,19 @@ static void lec_set_multicast_list(struct net_device *dev) return; } +static const struct net_device_ops lec_netdev_ops = { + .ndo_open = lec_open, + .ndo_stop = lec_close, + .ndo_start_xmit = lec_start_xmit, + .ndo_change_mtu = lec_change_mtu, + .ndo_tx_timeout = lec_tx_timeout, + .ndo_set_multicast_list = lec_set_multicast_list, +}; + + static void lec_init(struct net_device *dev) { - dev->change_mtu = lec_change_mtu; - dev->open = lec_open; - dev->stop = lec_close; - dev->hard_start_xmit = lec_start_xmit; - dev->tx_timeout = lec_tx_timeout; - - dev->get_stats = lec_get_stats; - dev->set_multicast_list = lec_set_multicast_list; - dev->do_ioctl = NULL; + dev->netdev_ops = &lec_netdev_ops; printk("%s: Initialized!\n", dev->name); } @@ -810,8 +802,8 @@ static void lec_push(struct atm_vcc *vcc, struct sk_buff *skb) else #endif skb->protocol = eth_type_trans(skb, dev); - priv->stats.rx_packets++; - priv->stats.rx_bytes += skb->len; + dev->stats.rx_packets++; + dev->stats.rx_bytes += skb->len; memset(ATM_SKB(skb), 0, sizeof(struct atm_skb_data)); netif_rx(skb); } @@ -1887,7 +1879,7 @@ restart: lec_arp_hold(entry); spin_unlock_irqrestore(&priv->lec_arp_lock, flags); while ((skb = skb_dequeue(&entry->tx_wait)) != NULL) - lec_send(vcc, skb, entry->priv); + lec_send(vcc, skb); entry->last_used = jiffies; entry->status = ESI_FORWARD_DIRECT; lec_arp_put(entry); @@ -2305,7 +2297,7 @@ restart: lec_arp_hold(entry); spin_unlock_irqrestore(&priv->lec_arp_lock, flags); while ((skb = skb_dequeue(&entry->tx_wait)) != NULL) - lec_send(vcc, skb, entry->priv); + lec_send(vcc, skb); entry->last_used = jiffies; entry->status = ESI_FORWARD_DIRECT; lec_arp_put(entry); diff --git a/net/atm/lec.h b/net/atm/lec.h index 0d376682c1a..9d14d196cc1 100644 --- a/net/atm/lec.h +++ b/net/atm/lec.h @@ -69,7 +69,6 @@ struct lane2_ops { #define LEC_ARP_TABLE_SIZE 16 struct lec_priv { - struct net_device_stats stats; unsigned short lecid; /* Lecid of this client */ struct hlist_head lec_arp_empty_ones; /* Used for storing VCC's that don't have a MAC address attached yet */ diff --git a/net/ax25/af_ax25.c b/net/ax25/af_ax25.c index 00d9e5e1315..d127fd3ba5c 100644 --- a/net/ax25/af_ax25.c +++ b/net/ax25/af_ax25.c @@ -1986,7 +1986,7 @@ static const struct proto_ops ax25_proto_ops = { * Called by socket.c on kernel start up */ static struct packet_type ax25_packet_type = { - .type = __constant_htons(ETH_P_AX25), + .type = cpu_to_be16(ETH_P_AX25), .dev = NULL, /* All devices */ .func = ax25_kiss_rcv, }; diff --git a/net/bridge/br_netfilter.c b/net/bridge/br_netfilter.c index cf754ace0b7..3953ac4214c 100644 --- a/net/bridge/br_netfilter.c +++ b/net/bridge/br_netfilter.c @@ -107,7 +107,7 @@ static void fake_update_pmtu(struct dst_entry *dst, u32 mtu) static struct dst_ops fake_dst_ops = { .family = AF_INET, - .protocol = __constant_htons(ETH_P_IP), + .protocol = cpu_to_be16(ETH_P_IP), .update_pmtu = fake_update_pmtu, .entries = ATOMIC_INIT(0), }; diff --git a/net/can/af_can.c b/net/can/af_can.c index fa417ca6cbe..d90e8dd975f 100644 --- a/net/can/af_can.c +++ b/net/can/af_can.c @@ -828,7 +828,7 @@ static int can_notifier(struct notifier_block *nb, unsigned long msg, */ static struct packet_type can_packet __read_mostly = { - .type = __constant_htons(ETH_P_CAN), + .type = cpu_to_be16(ETH_P_CAN), .dev = NULL, .func = can_rcv, }; diff --git a/net/core/dev.c b/net/core/dev.c index 5379b0c1190..220f52a1001 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -135,6 +135,14 @@ /* This should be increased if a protocol with a bigger head is added. */ #define GRO_MAX_HEAD (MAX_HEADER + 128) +enum { + GRO_MERGED, + GRO_MERGED_FREE, + GRO_HELD, + GRO_NORMAL, + GRO_DROP, +}; + /* * The list of packet types we will receive (as opposed to discard) * and the routines to invoke. @@ -207,6 +215,13 @@ static inline struct hlist_head *dev_index_hash(struct net *net, int ifindex) return &net->dev_index_head[ifindex & ((1 << NETDEV_HASHBITS) - 1)]; } +static inline void *skb_gro_mac_header(struct sk_buff *skb) +{ + return skb_mac_header(skb) < skb->data ? skb_mac_header(skb) : + page_address(skb_shinfo(skb)->frags[0].page) + + skb_shinfo(skb)->frags[0].page_offset; +} + /* Device list insertion */ static int list_netdevice(struct net_device *dev) { @@ -1708,56 +1723,26 @@ out_kfree_skb: return 0; } -static u32 simple_tx_hashrnd; -static int simple_tx_hashrnd_initialized = 0; +static u32 skb_tx_hashrnd; +static int skb_tx_hashrnd_initialized = 0; -static u16 simple_tx_hash(struct net_device *dev, struct sk_buff *skb) +static u16 skb_tx_hash(struct net_device *dev, struct sk_buff *skb) { - u32 addr1, addr2, ports; - u32 hash, ihl; - u8 ip_proto = 0; + u32 hash; - if (unlikely(!simple_tx_hashrnd_initialized)) { - get_random_bytes(&simple_tx_hashrnd, 4); - simple_tx_hashrnd_initialized = 1; + if (unlikely(!skb_tx_hashrnd_initialized)) { + get_random_bytes(&skb_tx_hashrnd, 4); + skb_tx_hashrnd_initialized = 1; } - switch (skb->protocol) { - case htons(ETH_P_IP): - if (!(ip_hdr(skb)->frag_off & htons(IP_MF | IP_OFFSET))) - ip_proto = ip_hdr(skb)->protocol; - addr1 = ip_hdr(skb)->saddr; - addr2 = ip_hdr(skb)->daddr; - ihl = ip_hdr(skb)->ihl; - break; - case htons(ETH_P_IPV6): - ip_proto = ipv6_hdr(skb)->nexthdr; - addr1 = ipv6_hdr(skb)->saddr.s6_addr32[3]; - addr2 = ipv6_hdr(skb)->daddr.s6_addr32[3]; - ihl = (40 >> 2); - break; - default: - return 0; - } - - - switch (ip_proto) { - case IPPROTO_TCP: - case IPPROTO_UDP: - case IPPROTO_DCCP: - case IPPROTO_ESP: - case IPPROTO_AH: - case IPPROTO_SCTP: - case IPPROTO_UDPLITE: - ports = *((u32 *) (skb_network_header(skb) + (ihl * 4))); - break; - - default: - ports = 0; - break; - } + if (skb_rx_queue_recorded(skb)) { + hash = skb_get_rx_queue(skb); + } else if (skb->sk && skb->sk->sk_hash) { + hash = skb->sk->sk_hash; + } else + hash = skb->protocol; - hash = jhash_3words(addr1, addr2, ports, simple_tx_hashrnd); + hash = jhash_1word(hash, skb_tx_hashrnd); return (u16) (((u64) hash * dev->real_num_tx_queues) >> 32); } @@ -1771,7 +1756,7 @@ static struct netdev_queue *dev_pick_tx(struct net_device *dev, if (ops->ndo_select_queue) queue_index = ops->ndo_select_queue(dev, skb); else if (dev->real_num_tx_queues > 1) - queue_index = simple_tx_hash(dev, skb); + queue_index = skb_tx_hash(dev, skb); skb_set_queue_mapping(skb, queue_index); return netdev_get_tx_queue(dev, queue_index); @@ -2372,7 +2357,6 @@ static int napi_gro_complete(struct sk_buff *skb) out: skb_shinfo(skb)->gso_size = 0; - __skb_push(skb, -skb_network_offset(skb)); return netif_receive_skb(skb); } @@ -2390,6 +2374,25 @@ void napi_gro_flush(struct napi_struct *napi) } EXPORT_SYMBOL(napi_gro_flush); +void *skb_gro_header(struct sk_buff *skb, unsigned int hlen) +{ + unsigned int offset = skb_gro_offset(skb); + + hlen += offset; + if (hlen <= skb_headlen(skb)) + return skb->data + offset; + + if (unlikely(!skb_shinfo(skb)->nr_frags || + skb_shinfo(skb)->frags[0].size <= + hlen - skb_headlen(skb) || + PageHighMem(skb_shinfo(skb)->frags[0].page))) + return pskb_may_pull(skb, hlen) ? skb->data + offset : NULL; + + return page_address(skb_shinfo(skb)->frags[0].page) + + skb_shinfo(skb)->frags[0].page_offset + offset; +} +EXPORT_SYMBOL(skb_gro_header); + int dev_gro_receive(struct napi_struct *napi, struct sk_buff *skb) { struct sk_buff **pp = NULL; @@ -2399,7 +2402,7 @@ int dev_gro_receive(struct napi_struct *napi, struct sk_buff *skb) int count = 0; int same_flow; int mac_len; - int free; + int ret; if (!(skb->dev->features & NETIF_F_GRO)) goto normal; @@ -2410,11 +2413,13 @@ int dev_gro_receive(struct napi_struct *napi, struct sk_buff *skb) rcu_read_lock(); list_for_each_entry_rcu(ptype, head, list) { struct sk_buff *p; + void *mac; if (ptype->type != type || ptype->dev || !ptype->gro_receive) continue; - skb_reset_network_header(skb); + skb_set_network_header(skb, skb_gro_offset(skb)); + mac = skb_gro_mac_header(skb); mac_len = skb->network_header - skb->mac_header; skb->mac_len = mac_len; NAPI_GRO_CB(skb)->same_flow = 0; @@ -2428,8 +2433,7 @@ int dev_gro_receive(struct napi_struct *napi, struct sk_buff *skb) continue; if (p->mac_len != mac_len || - memcmp(skb_mac_header(p), skb_mac_header(skb), - mac_len)) + memcmp(skb_mac_header(p), mac, mac_len)) NAPI_GRO_CB(p)->same_flow = 0; } @@ -2442,7 +2446,7 @@ int dev_gro_receive(struct napi_struct *napi, struct sk_buff *skb) goto normal; same_flow = NAPI_GRO_CB(skb)->same_flow; - free = NAPI_GRO_CB(skb)->free; + ret = NAPI_GRO_CB(skb)->free ? GRO_MERGED_FREE : GRO_MERGED; if (pp) { struct sk_buff *nskb = *pp; @@ -2456,21 +2460,28 @@ int dev_gro_receive(struct napi_struct *napi, struct sk_buff *skb) if (same_flow) goto ok; - if (NAPI_GRO_CB(skb)->flush || count >= MAX_GRO_SKBS) { - __skb_push(skb, -skb_network_offset(skb)); + if (NAPI_GRO_CB(skb)->flush || count >= MAX_GRO_SKBS) goto normal; - } NAPI_GRO_CB(skb)->count = 1; - skb_shinfo(skb)->gso_size = skb->len; + skb_shinfo(skb)->gso_size = skb_gro_len(skb); skb->next = napi->gro_list; napi->gro_list = skb; + ret = GRO_HELD; + +pull: + if (unlikely(!pskb_may_pull(skb, skb_gro_offset(skb)))) { + if (napi->gro_list == skb) + napi->gro_list = skb->next; + ret = GRO_DROP; + } ok: - return free; + return ret; normal: - return -1; + ret = GRO_NORMAL; + goto pull; } EXPORT_SYMBOL(dev_gro_receive); @@ -2486,18 +2497,32 @@ static int __napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb) return dev_gro_receive(napi, skb); } -int napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb) +int napi_skb_finish(int ret, struct sk_buff *skb) { - switch (__napi_gro_receive(napi, skb)) { - case -1: + int err = NET_RX_SUCCESS; + + switch (ret) { + case GRO_NORMAL: return netif_receive_skb(skb); - case 1: + case GRO_DROP: + err = NET_RX_DROP; + /* fall through */ + + case GRO_MERGED_FREE: kfree_skb(skb); break; } - return NET_RX_SUCCESS; + return err; +} +EXPORT_SYMBOL(napi_skb_finish); + +int napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb) +{ + skb_gro_reset_offset(skb); + + return napi_skb_finish(__napi_gro_receive(napi, skb), skb); } EXPORT_SYMBOL(napi_gro_receive); @@ -2515,6 +2540,9 @@ struct sk_buff *napi_fraginfo_skb(struct napi_struct *napi, { struct net_device *dev = napi->dev; struct sk_buff *skb = napi->skb; + struct ethhdr *eth; + skb_frag_t *frag; + int i; napi->skb = NULL; @@ -2527,20 +2555,36 @@ struct sk_buff *napi_fraginfo_skb(struct napi_struct *napi, } BUG_ON(info->nr_frags > MAX_SKB_FRAGS); + frag = &info->frags[info->nr_frags - 1]; + + for (i = skb_shinfo(skb)->nr_frags; i < info->nr_frags; i++) { + skb_fill_page_desc(skb, i, frag->page, frag->page_offset, + frag->size); + frag++; + } skb_shinfo(skb)->nr_frags = info->nr_frags; - memcpy(skb_shinfo(skb)->frags, info->frags, sizeof(info->frags)); skb->data_len = info->len; skb->len += info->len; skb->truesize += info->len; - if (!pskb_may_pull(skb, ETH_HLEN)) { + skb_reset_mac_header(skb); + skb_gro_reset_offset(skb); + + eth = skb_gro_header(skb, sizeof(*eth)); + if (!eth) { napi_reuse_skb(napi, skb); skb = NULL; goto out; } - skb->protocol = eth_type_trans(skb, dev); + skb_gro_pull(skb, sizeof(*eth)); + + /* + * This works because the only protocols we care about don't require + * special handling. We'll fix it up properly at the end. + */ + skb->protocol = eth->h_proto; skb->ip_summed = info->ip_summed; skb->csum = info->csum; @@ -2550,29 +2594,43 @@ out: } EXPORT_SYMBOL(napi_fraginfo_skb); -int napi_gro_frags(struct napi_struct *napi, struct napi_gro_fraginfo *info) +int napi_frags_finish(struct napi_struct *napi, struct sk_buff *skb, int ret) { - struct sk_buff *skb = napi_fraginfo_skb(napi, info); - int err = NET_RX_DROP; + int err = NET_RX_SUCCESS; - if (!skb) - goto out; + switch (ret) { + case GRO_NORMAL: + case GRO_HELD: + skb->protocol = eth_type_trans(skb, napi->dev); - err = NET_RX_SUCCESS; + if (ret == GRO_NORMAL) + return netif_receive_skb(skb); - switch (__napi_gro_receive(napi, skb)) { - case -1: - return netif_receive_skb(skb); + skb_gro_pull(skb, -ETH_HLEN); + break; - case 0: - goto out; - } + case GRO_DROP: + err = NET_RX_DROP; + /* fall through */ - napi_reuse_skb(napi, skb); + case GRO_MERGED_FREE: + napi_reuse_skb(napi, skb); + break; + } -out: return err; } +EXPORT_SYMBOL(napi_frags_finish); + +int napi_gro_frags(struct napi_struct *napi, struct napi_gro_fraginfo *info) +{ + struct sk_buff *skb = napi_fraginfo_skb(napi, info); + + if (!skb) + return NET_RX_DROP; + + return napi_frags_finish(napi, skb, __napi_gro_receive(napi, skb)); +} EXPORT_SYMBOL(napi_gro_frags); static int process_backlog(struct napi_struct *napi, int quota) diff --git a/net/core/skbuff.c b/net/core/skbuff.c index da74b844f4e..e55d1ef5690 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c @@ -1333,14 +1333,39 @@ static void sock_spd_release(struct splice_pipe_desc *spd, unsigned int i) put_page(spd->pages[i]); } -static inline struct page *linear_to_page(struct page *page, unsigned int len, - unsigned int offset) -{ - struct page *p = alloc_pages(GFP_KERNEL, 0); +static inline struct page *linear_to_page(struct page *page, unsigned int *len, + unsigned int *offset, + struct sk_buff *skb) +{ + struct sock *sk = skb->sk; + struct page *p = sk->sk_sndmsg_page; + unsigned int off; + + if (!p) { +new_page: + p = sk->sk_sndmsg_page = alloc_pages(sk->sk_allocation, 0); + if (!p) + return NULL; - if (!p) - return NULL; - memcpy(page_address(p) + offset, page_address(page) + offset, len); + off = sk->sk_sndmsg_off = 0; + /* hold one ref to this page until it's full */ + } else { + unsigned int mlen; + + off = sk->sk_sndmsg_off; + mlen = PAGE_SIZE - off; + if (mlen < 64 && mlen < *len) { + put_page(p); + goto new_page; + } + + *len = min_t(unsigned int, *len, mlen); + } + + memcpy(page_address(p) + off, page_address(page) + *offset, *len); + sk->sk_sndmsg_off += *len; + *offset = off; + get_page(p); return p; } @@ -1349,21 +1374,21 @@ static inline struct page *linear_to_page(struct page *page, unsigned int len, * Fill page/offset/length into spd, if it can hold more pages. */ static inline int spd_fill_page(struct splice_pipe_desc *spd, struct page *page, - unsigned int len, unsigned int offset, + unsigned int *len, unsigned int offset, struct sk_buff *skb, int linear) { if (unlikely(spd->nr_pages == PIPE_BUFFERS)) return 1; if (linear) { - page = linear_to_page(page, len, offset); + page = linear_to_page(page, len, &offset, skb); if (!page) return 1; } else get_page(page); spd->pages[spd->nr_pages] = page; - spd->partial[spd->nr_pages].len = len; + spd->partial[spd->nr_pages].len = *len; spd->partial[spd->nr_pages].offset = offset; spd->nr_pages++; @@ -1405,7 +1430,7 @@ static inline int __splice_segment(struct page *page, unsigned int poff, /* the linear region may spread across several pages */ flen = min_t(unsigned int, flen, PAGE_SIZE - poff); - if (spd_fill_page(spd, page, flen, poff, skb, linear)) + if (spd_fill_page(spd, page, &flen, poff, skb, linear)) return 1; __segment_seek(&page, &poff, &plen, flen); @@ -2585,17 +2610,23 @@ int skb_gro_receive(struct sk_buff **head, struct sk_buff *skb) struct sk_buff *p = *head; struct sk_buff *nskb; unsigned int headroom; - unsigned int hlen = p->data - skb_mac_header(p); - unsigned int len = skb->len; + unsigned int len = skb_gro_len(skb); - if (hlen + p->len + len >= 65536) + if (p->len + len >= 65536) return -E2BIG; if (skb_shinfo(p)->frag_list) goto merge; - else if (!skb_headlen(p) && !skb_headlen(skb) && - skb_shinfo(p)->nr_frags + skb_shinfo(skb)->nr_frags < - MAX_SKB_FRAGS) { + else if (skb_headlen(skb) <= skb_gro_offset(skb)) { + if (skb_shinfo(p)->nr_frags + skb_shinfo(skb)->nr_frags > + MAX_SKB_FRAGS) + return -E2BIG; + + skb_shinfo(skb)->frags[0].page_offset += + skb_gro_offset(skb) - skb_headlen(skb); + skb_shinfo(skb)->frags[0].size -= + skb_gro_offset(skb) - skb_headlen(skb); + memcpy(skb_shinfo(p)->frags + skb_shinfo(p)->nr_frags, skb_shinfo(skb)->frags, skb_shinfo(skb)->nr_frags * sizeof(skb_frag_t)); @@ -2612,7 +2643,7 @@ int skb_gro_receive(struct sk_buff **head, struct sk_buff *skb) } headroom = skb_headroom(p); - nskb = netdev_alloc_skb(p->dev, headroom); + nskb = netdev_alloc_skb(p->dev, headroom + skb_gro_offset(p)); if (unlikely(!nskb)) return -ENOMEM; @@ -2620,12 +2651,15 @@ int skb_gro_receive(struct sk_buff **head, struct sk_buff *skb) nskb->mac_len = p->mac_len; skb_reserve(nskb, headroom); + __skb_put(nskb, skb_gro_offset(p)); - skb_set_mac_header(nskb, -hlen); + skb_set_mac_header(nskb, skb_mac_header(p) - p->data); skb_set_network_header(nskb, skb_network_offset(p)); skb_set_transport_header(nskb, skb_transport_offset(p)); - memcpy(skb_mac_header(nskb), skb_mac_header(p), hlen); + __skb_pull(p, skb_gro_offset(p)); + memcpy(skb_mac_header(nskb), skb_mac_header(p), + p->data - skb_mac_header(p)); *NAPI_GRO_CB(nskb) = *NAPI_GRO_CB(p); skb_shinfo(nskb)->frag_list = p; diff --git a/net/dccp/dccp.h b/net/dccp/dccp.h index f2230fc168e..08a569ff02d 100644 --- a/net/dccp/dccp.h +++ b/net/dccp/dccp.h @@ -42,9 +42,11 @@ extern int dccp_debug; #define dccp_pr_debug(format, a...) DCCP_PR_DEBUG(dccp_debug, format, ##a) #define dccp_pr_debug_cat(format, a...) DCCP_PRINTK(dccp_debug, format, ##a) +#define dccp_debug(fmt, a...) dccp_pr_debug_cat(KERN_DEBUG fmt, ##a) #else #define dccp_pr_debug(format, a...) #define dccp_pr_debug_cat(format, a...) +#define dccp_debug(format, a...) #endif extern struct inet_hashinfo dccp_hashinfo; @@ -95,9 +97,6 @@ extern void dccp_time_wait(struct sock *sk, int state, int timeo); extern int sysctl_dccp_request_retries; extern int sysctl_dccp_retries1; extern int sysctl_dccp_retries2; -extern int sysctl_dccp_feat_sequence_window; -extern int sysctl_dccp_feat_rx_ccid; -extern int sysctl_dccp_feat_tx_ccid; extern int sysctl_dccp_tx_qlen; extern int sysctl_dccp_sync_ratelimit; @@ -409,23 +408,21 @@ static inline void dccp_hdr_set_ack(struct dccp_hdr_ack_bits *dhack, static inline void dccp_update_gsr(struct sock *sk, u64 seq) { struct dccp_sock *dp = dccp_sk(sk); - const struct dccp_minisock *dmsk = dccp_msk(sk); dp->dccps_gsr = seq; - dccp_set_seqno(&dp->dccps_swl, - dp->dccps_gsr + 1 - (dmsk->dccpms_sequence_window / 4)); - dccp_set_seqno(&dp->dccps_swh, - dp->dccps_gsr + (3 * dmsk->dccpms_sequence_window) / 4); + /* Sequence validity window depends on remote Sequence Window (7.5.1) */ + dp->dccps_swl = SUB48(ADD48(dp->dccps_gsr, 1), dp->dccps_r_seq_win / 4); + dp->dccps_swh = ADD48(dp->dccps_gsr, (3 * dp->dccps_r_seq_win) / 4); } static inline void dccp_update_gss(struct sock *sk, u64 seq) { struct dccp_sock *dp = dccp_sk(sk); - dp->dccps_awh = dp->dccps_gss = seq; - dccp_set_seqno(&dp->dccps_awl, - (dp->dccps_gss - - dccp_msk(sk)->dccpms_sequence_window + 1)); + dp->dccps_gss = seq; + /* Ack validity window depends on local Sequence Window value (7.5.1) */ + dp->dccps_awl = SUB48(ADD48(dp->dccps_gss, 1), dp->dccps_l_seq_win); + dp->dccps_awh = dp->dccps_gss; } static inline int dccp_ack_pending(const struct sock *sk) diff --git a/net/dccp/feat.c b/net/dccp/feat.c index 4152308958a..b04160a2eea 100644 --- a/net/dccp/feat.c +++ b/net/dccp/feat.c @@ -25,6 +25,11 @@ #include "ccid.h" #include "feat.h" +/* feature-specific sysctls - initialised to the defaults from RFC 4340, 6.4 */ +unsigned long sysctl_dccp_sequence_window __read_mostly = 100; +int sysctl_dccp_rx_ccid __read_mostly = 2, + sysctl_dccp_tx_ccid __read_mostly = 2; + /* * Feature activation handlers. * @@ -51,8 +56,17 @@ static int dccp_hdlr_ccid(struct sock *sk, u64 ccid, bool rx) static int dccp_hdlr_seq_win(struct sock *sk, u64 seq_win, bool rx) { - if (!rx) - dccp_msk(sk)->dccpms_sequence_window = seq_win; + struct dccp_sock *dp = dccp_sk(sk); + + if (rx) { + dp->dccps_r_seq_win = seq_win; + /* propagate changes to update SWL/SWH */ + dccp_update_gsr(sk, dp->dccps_gsr); + } else { + dp->dccps_l_seq_win = seq_win; + /* propagate changes to update AWL */ + dccp_update_gss(sk, dp->dccps_gss); + } return 0; } @@ -194,6 +208,100 @@ static int dccp_feat_default_value(u8 feat_num) return idx < 0 ? 0 : dccp_feat_table[idx].default_value; } +/* + * Debugging and verbose-printing section + */ +static const char *dccp_feat_fname(const u8 feat) +{ + static const char *feature_names[] = { + [DCCPF_RESERVED] = "Reserved", + [DCCPF_CCID] = "CCID", + [DCCPF_SHORT_SEQNOS] = "Allow Short Seqnos", + [DCCPF_SEQUENCE_WINDOW] = "Sequence Window", + [DCCPF_ECN_INCAPABLE] = "ECN Incapable", + [DCCPF_ACK_RATIO] = "Ack Ratio", + [DCCPF_SEND_ACK_VECTOR] = "Send ACK Vector", + [DCCPF_SEND_NDP_COUNT] = "Send NDP Count", + [DCCPF_MIN_CSUM_COVER] = "Min. Csum Coverage", + [DCCPF_DATA_CHECKSUM] = "Send Data Checksum", + }; + if (feat > DCCPF_DATA_CHECKSUM && feat < DCCPF_MIN_CCID_SPECIFIC) + return feature_names[DCCPF_RESERVED]; + + if (feat == DCCPF_SEND_LEV_RATE) + return "Send Loss Event Rate"; + if (feat >= DCCPF_MIN_CCID_SPECIFIC) + return "CCID-specific"; + + return feature_names[feat]; +} + +static const char *dccp_feat_sname[] = { "DEFAULT", "INITIALISING", "CHANGING", + "UNSTABLE", "STABLE" }; + +#ifdef CONFIG_IP_DCCP_DEBUG +static const char *dccp_feat_oname(const u8 opt) +{ + switch (opt) { + case DCCPO_CHANGE_L: return "Change_L"; + case DCCPO_CONFIRM_L: return "Confirm_L"; + case DCCPO_CHANGE_R: return "Change_R"; + case DCCPO_CONFIRM_R: return "Confirm_R"; + } + return NULL; +} + +static void dccp_feat_printval(u8 feat_num, dccp_feat_val const *val) +{ + u8 i, type = dccp_feat_type(feat_num); + + if (val == NULL || (type == FEAT_SP && val->sp.vec == NULL)) + dccp_pr_debug_cat("(NULL)"); + else if (type == FEAT_SP) + for (i = 0; i < val->sp.len; i++) + dccp_pr_debug_cat("%s%u", i ? " " : "", val->sp.vec[i]); + else if (type == FEAT_NN) + dccp_pr_debug_cat("%llu", (unsigned long long)val->nn); + else + dccp_pr_debug_cat("unknown type %u", type); +} + +static void dccp_feat_printvals(u8 feat_num, u8 *list, u8 len) +{ + u8 type = dccp_feat_type(feat_num); + dccp_feat_val fval = { .sp.vec = list, .sp.len = len }; + + if (type == FEAT_NN) + fval.nn = dccp_decode_value_var(list, len); + dccp_feat_printval(feat_num, &fval); +} + +static void dccp_feat_print_entry(struct dccp_feat_entry const *entry) +{ + dccp_debug(" * %s %s = ", entry->is_local ? "local" : "remote", + dccp_feat_fname(entry->feat_num)); + dccp_feat_printval(entry->feat_num, &entry->val); + dccp_pr_debug_cat(", state=%s %s\n", dccp_feat_sname[entry->state], + entry->needs_confirm ? "(Confirm pending)" : ""); +} + +#define dccp_feat_print_opt(opt, feat, val, len, mandatory) do { \ + dccp_pr_debug("%s(%s, ", dccp_feat_oname(opt), dccp_feat_fname(feat));\ + dccp_feat_printvals(feat, val, len); \ + dccp_pr_debug_cat(") %s\n", mandatory ? "!" : ""); } while (0) + +#define dccp_feat_print_fnlist(fn_list) { \ + const struct dccp_feat_entry *___entry; \ + \ + dccp_pr_debug("List Dump:\n"); \ + list_for_each_entry(___entry, fn_list, node) \ + dccp_feat_print_entry(___entry); \ +} +#else /* ! CONFIG_IP_DCCP_DEBUG */ +#define dccp_feat_print_opt(opt, feat, val, len, mandatory) +#define dccp_feat_print_fnlist(fn_list) +#endif + static int __dccp_feat_activate(struct sock *sk, const int idx, const bool is_local, dccp_feat_val const *fval) { @@ -226,6 +334,10 @@ static int __dccp_feat_activate(struct sock *sk, const int idx, /* Location is RX if this is a local-RX or remote-TX feature */ rx = (is_local == (dccp_feat_table[idx].rxtx == FEAT_AT_RX)); + dccp_debug(" -> activating %s %s, %sval=%llu\n", rx ? "RX" : "TX", + dccp_feat_fname(dccp_feat_table[idx].feat_num), + fval ? "" : "default ", (unsigned long long)val); + return dccp_feat_table[idx].activation_hdlr(sk, val, rx); } @@ -530,6 +642,7 @@ int dccp_feat_insert_opts(struct dccp_sock *dp, struct dccp_request_sock *dreq, return -1; } } + dccp_feat_print_opt(opt, pos->feat_num, ptr, len, 0); if (dccp_insert_fn_opt(skb, opt, pos->feat_num, ptr, len, rpt)) return -1; @@ -783,6 +896,7 @@ int dccp_feat_finalise_settings(struct dccp_sock *dp) while (i--) if (ccids[i] > 0 && dccp_feat_propagate_ccid(fn, ccids[i], i)) return -1; + dccp_feat_print_fnlist(fn); return 0; } @@ -901,6 +1015,8 @@ static u8 dccp_feat_change_recv(struct list_head *fn, u8 is_mandatory, u8 opt, if (len == 0 || type == FEAT_UNKNOWN) /* 6.1 and 6.6.8 */ goto unknown_feature_or_value; + dccp_feat_print_opt(opt, feat, val, len, is_mandatory); + /* * Negotiation of NN features: Change R is invalid, so there is no * simultaneous negotiation; hence we do not look up in the list. @@ -1006,6 +1122,8 @@ static u8 dccp_feat_confirm_recv(struct list_head *fn, u8 is_mandatory, u8 opt, const bool local = (opt == DCCPO_CONFIRM_R); struct dccp_feat_entry *entry = dccp_feat_list_lookup(fn, feat, local); + dccp_feat_print_opt(opt, feat, val, len, is_mandatory); + if (entry == NULL) { /* nothing queued: ignore or handle error */ if (is_mandatory && type == FEAT_UNKNOWN) return DCCP_RESET_CODE_MANDATORY_ERROR; @@ -1115,23 +1233,70 @@ int dccp_feat_parse_options(struct sock *sk, struct dccp_request_sock *dreq, return 0; /* ignore FN options in all other states */ } +/** + * dccp_feat_init - Seed feature negotiation with host-specific defaults + * This initialises global defaults, depending on the value of the sysctls. + * These can later be overridden by registering changes via setsockopt calls. + * The last link in the chain is finalise_settings, to make sure that between + * here and the start of actual feature negotiation no inconsistencies enter. + * + * All features not appearing below use either defaults or are otherwise + * later adjusted through dccp_feat_finalise_settings(). + */ int dccp_feat_init(struct sock *sk) { - struct dccp_sock *dp = dccp_sk(sk); - struct dccp_minisock *dmsk = dccp_msk(sk); + struct list_head *fn = &dccp_sk(sk)->dccps_featneg; + u8 on = 1, off = 0; int rc; + struct { + u8 *val; + u8 len; + } tx, rx; + + /* Non-negotiable (NN) features */ + rc = __feat_register_nn(fn, DCCPF_SEQUENCE_WINDOW, 0, + sysctl_dccp_sequence_window); + if (rc) + return rc; + + /* Server-priority (SP) features */ + + /* Advertise that short seqnos are not supported (7.6.1) */ + rc = __feat_register_sp(fn, DCCPF_SHORT_SEQNOS, true, true, &off, 1); + if (rc) + return rc; - INIT_LIST_HEAD(&dmsk->dccpms_pending); /* XXX no longer used */ - INIT_LIST_HEAD(&dmsk->dccpms_conf); /* XXX no longer used */ + /* RFC 4340 12.1: "If a DCCP is not ECN capable, ..." */ + rc = __feat_register_sp(fn, DCCPF_ECN_INCAPABLE, true, true, &on, 1); + if (rc) + return rc; + + /* + * We advertise the available list of CCIDs and reorder according to + * preferences, to avoid failure resulting from negotiating different + * singleton values (which always leads to failure). + * These settings can still (later) be overridden via sockopts. + */ + if (ccid_get_builtin_ccids(&tx.val, &tx.len) || + ccid_get_builtin_ccids(&rx.val, &rx.len)) + return -ENOBUFS; + + if (!dccp_feat_prefer(sysctl_dccp_tx_ccid, tx.val, tx.len) || + !dccp_feat_prefer(sysctl_dccp_rx_ccid, rx.val, rx.len)) + goto free_ccid_lists; + + rc = __feat_register_sp(fn, DCCPF_CCID, true, false, tx.val, tx.len); + if (rc) + goto free_ccid_lists; + + rc = __feat_register_sp(fn, DCCPF_CCID, false, false, rx.val, rx.len); - /* Ack ratio */ - rc = __feat_register_nn(&dp->dccps_featneg, DCCPF_ACK_RATIO, 0, - dp->dccps_l_ack_ratio); +free_ccid_lists: + kfree(tx.val); + kfree(rx.val); return rc; } -EXPORT_SYMBOL_GPL(dccp_feat_init); - int dccp_feat_activate_values(struct sock *sk, struct list_head *fn_list) { struct dccp_sock *dp = dccp_sk(sk); @@ -1156,9 +1321,10 @@ int dccp_feat_activate_values(struct sock *sk, struct list_head *fn_list) goto activation_failed; } if (cur->state != FEAT_STABLE) { - DCCP_CRIT("Negotiation of %s %u failed in state %u", + DCCP_CRIT("Negotiation of %s %s failed in state %s", cur->is_local ? "local" : "remote", - cur->feat_num, cur->state); + dccp_feat_fname(cur->feat_num), + dccp_feat_sname[cur->state]); goto activation_failed; } fvals[idx][cur->is_local] = &cur->val; @@ -1199,43 +1365,3 @@ activation_failed: dp->dccps_hc_rx_ackvec = NULL; return -1; } - -#ifdef CONFIG_IP_DCCP_DEBUG -const char *dccp_feat_typename(const u8 type) -{ - switch(type) { - case DCCPO_CHANGE_L: return("ChangeL"); - case DCCPO_CONFIRM_L: return("ConfirmL"); - case DCCPO_CHANGE_R: return("ChangeR"); - case DCCPO_CONFIRM_R: return("ConfirmR"); - /* the following case must not appear in feature negotation */ - default: dccp_pr_debug("unknown type %d [BUG!]\n", type); - } - return NULL; -} - -const char *dccp_feat_name(const u8 feat) -{ - static const char *feature_names[] = { - [DCCPF_RESERVED] = "Reserved", - [DCCPF_CCID] = "CCID", - [DCCPF_SHORT_SEQNOS] = "Allow Short Seqnos", - [DCCPF_SEQUENCE_WINDOW] = "Sequence Window", - [DCCPF_ECN_INCAPABLE] = "ECN Incapable", - [DCCPF_ACK_RATIO] = "Ack Ratio", - [DCCPF_SEND_ACK_VECTOR] = "Send ACK Vector", - [DCCPF_SEND_NDP_COUNT] = "Send NDP Count", - [DCCPF_MIN_CSUM_COVER] = "Min. Csum Coverage", - [DCCPF_DATA_CHECKSUM] = "Send Data Checksum", - }; - if (feat > DCCPF_DATA_CHECKSUM && feat < DCCPF_MIN_CCID_SPECIFIC) - return feature_names[DCCPF_RESERVED]; - - if (feat == DCCPF_SEND_LEV_RATE) - return "Send Loss Event Rate"; - if (feat >= DCCPF_MIN_CCID_SPECIFIC) - return "CCID-specific"; - - return feature_names[feat]; -} -#endif /* CONFIG_IP_DCCP_DEBUG */ diff --git a/net/dccp/feat.h b/net/dccp/feat.h index 9b46e2a7866..f96721619de 100644 --- a/net/dccp/feat.h +++ b/net/dccp/feat.h @@ -100,26 +100,21 @@ struct ccid_dependency { u8 val; }; -#ifdef CONFIG_IP_DCCP_DEBUG -extern const char *dccp_feat_typename(const u8 type); -extern const char *dccp_feat_name(const u8 feat); - -static inline void dccp_feat_debug(const u8 type, const u8 feat, const u8 val) -{ - dccp_pr_debug("%s(%s (%d), %d)\n", dccp_feat_typename(type), - dccp_feat_name(feat), feat, val); -} -#else -#define dccp_feat_debug(type, feat, val) -#endif /* CONFIG_IP_DCCP_DEBUG */ +/* + * Sysctls to seed defaults for feature negotiation + */ +extern unsigned long sysctl_dccp_sequence_window; +extern int sysctl_dccp_rx_ccid; +extern int sysctl_dccp_tx_ccid; +extern int dccp_feat_init(struct sock *sk); +extern void dccp_feat_initialise_sysctls(void); extern int dccp_feat_register_sp(struct sock *sk, u8 feat, u8 is_local, u8 const *list, u8 len); extern int dccp_feat_register_nn(struct sock *sk, u8 feat, u64 val); extern int dccp_feat_parse_options(struct sock *, struct dccp_request_sock *, u8 mand, u8 opt, u8 feat, u8 *val, u8 len); extern int dccp_feat_clone_list(struct list_head const *, struct list_head *); -extern int dccp_feat_init(struct sock *sk); /* * Encoding variable-length options and their maximum length. diff --git a/net/dccp/minisocks.c b/net/dccp/minisocks.c index 6821ae33dd3..5ca49cec95f 100644 --- a/net/dccp/minisocks.c +++ b/net/dccp/minisocks.c @@ -42,11 +42,6 @@ struct inet_timewait_death_row dccp_death_row = { EXPORT_SYMBOL_GPL(dccp_death_row); -void dccp_minisock_init(struct dccp_minisock *dmsk) -{ - dmsk->dccpms_sequence_window = sysctl_dccp_feat_sequence_window; -} - void dccp_time_wait(struct sock *sk, int state, int timeo) { struct inet_timewait_sock *tw = NULL; @@ -110,7 +105,6 @@ struct sock *dccp_create_openreq_child(struct sock *sk, struct dccp_request_sock *dreq = dccp_rsk(req); struct inet_connection_sock *newicsk = inet_csk(newsk); struct dccp_sock *newdp = dccp_sk(newsk); - struct dccp_minisock *newdmsk = dccp_msk(newsk); newdp->dccps_role = DCCP_ROLE_SERVER; newdp->dccps_hc_rx_ackvec = NULL; @@ -128,10 +122,6 @@ struct sock *dccp_create_openreq_child(struct sock *sk, * Initialize S.GAR := S.ISS * Set S.ISR, S.GSR, S.SWL, S.SWH from packet or Init Cookies */ - - /* See dccp_v4_conn_request */ - newdmsk->dccpms_sequence_window = req->rcv_wnd; - newdp->dccps_gar = newdp->dccps_iss = dreq->dreq_iss; dccp_update_gss(newsk, dreq->dreq_iss); @@ -290,7 +280,6 @@ int dccp_reqsk_init(struct request_sock *req, inet_rsk(req)->rmt_port = dccp_hdr(skb)->dccph_sport; inet_rsk(req)->loc_port = dccp_hdr(skb)->dccph_dport; inet_rsk(req)->acked = 0; - req->rcv_wnd = sysctl_dccp_feat_sequence_window; dreq->dreq_timestamp_echo = 0; /* inherit feature negotiation options from listening socket */ diff --git a/net/dccp/options.c b/net/dccp/options.c index 7b1165c21f5..1b08cae9c65 100644 --- a/net/dccp/options.c +++ b/net/dccp/options.c @@ -23,10 +23,6 @@ #include "dccp.h" #include "feat.h" -int sysctl_dccp_feat_sequence_window = DCCPF_INITIAL_SEQUENCE_WINDOW; -int sysctl_dccp_feat_rx_ccid = DCCPF_INITIAL_CCID; -int sysctl_dccp_feat_tx_ccid = DCCPF_INITIAL_CCID; - u64 dccp_decode_value_var(const u8 *bf, const u8 len) { u64 value = 0; @@ -502,10 +498,6 @@ int dccp_insert_fn_opt(struct sk_buff *skb, u8 type, u8 feat, *to++ = *val; if (len) memcpy(to, val, len); - - dccp_pr_debug("%s(%s (%d), ...), length %d\n", - dccp_feat_typename(type), - dccp_feat_name(feat), feat, len); return 0; } diff --git a/net/dccp/proto.c b/net/dccp/proto.c index 945b4d5d23b..314a1b5c033 100644 --- a/net/dccp/proto.c +++ b/net/dccp/proto.c @@ -174,8 +174,6 @@ int dccp_init_sock(struct sock *sk, const __u8 ctl_sock_initialized) struct dccp_sock *dp = dccp_sk(sk); struct inet_connection_sock *icsk = inet_csk(sk); - dccp_minisock_init(&dp->dccps_minisock); - icsk->icsk_rto = DCCP_TIMEOUT_INIT; icsk->icsk_syn_retries = sysctl_dccp_request_retries; sk->sk_state = DCCP_CLOSED; diff --git a/net/dccp/sysctl.c b/net/dccp/sysctl.c index 018e210875e..a5a1856234e 100644 --- a/net/dccp/sysctl.c +++ b/net/dccp/sysctl.c @@ -18,55 +18,72 @@ #error This file should not be compiled without CONFIG_SYSCTL defined #endif +/* Boundary values */ +static int zero = 0, + u8_max = 0xFF; +static unsigned long seqw_min = 32; + static struct ctl_table dccp_default_table[] = { { .procname = "seq_window", - .data = &sysctl_dccp_feat_sequence_window, - .maxlen = sizeof(sysctl_dccp_feat_sequence_window), + .data = &sysctl_dccp_sequence_window, + .maxlen = sizeof(sysctl_dccp_sequence_window), .mode = 0644, - .proc_handler = proc_dointvec, + .proc_handler = proc_doulongvec_minmax, + .extra1 = &seqw_min, /* RFC 4340, 7.5.2 */ }, { .procname = "rx_ccid", - .data = &sysctl_dccp_feat_rx_ccid, - .maxlen = sizeof(sysctl_dccp_feat_rx_ccid), + .data = &sysctl_dccp_rx_ccid, + .maxlen = sizeof(sysctl_dccp_rx_ccid), .mode = 0644, - .proc_handler = proc_dointvec, + .proc_handler = proc_dointvec_minmax, + .extra1 = &zero, + .extra2 = &u8_max, /* RFC 4340, 10. */ }, { .procname = "tx_ccid", - .data = &sysctl_dccp_feat_tx_ccid, - .maxlen = sizeof(sysctl_dccp_feat_tx_ccid), + .data = &sysctl_dccp_tx_ccid, + .maxlen = sizeof(sysctl_dccp_tx_ccid), .mode = 0644, - .proc_handler = proc_dointvec, + .proc_handler = proc_dointvec_minmax, + .extra1 = &zero, + .extra2 = &u8_max, /* RFC 4340, 10. */ }, { .procname = "request_retries", .data = &sysctl_dccp_request_retries, .maxlen = sizeof(sysctl_dccp_request_retries), .mode = 0644, - .proc_handler = proc_dointvec, + .proc_handler = proc_dointvec_minmax, + .extra1 = &zero, + .extra2 = &u8_max, }, { .procname = "retries1", .data = &sysctl_dccp_retries1, .maxlen = sizeof(sysctl_dccp_retries1), .mode = 0644, - .proc_handler = proc_dointvec, + .proc_handler = proc_dointvec_minmax, + .extra1 = &zero, + .extra2 = &u8_max, }, { .procname = "retries2", .data = &sysctl_dccp_retries2, .maxlen = sizeof(sysctl_dccp_retries2), .mode = 0644, - .proc_handler = proc_dointvec, + .proc_handler = proc_dointvec_minmax, + .extra1 = &zero, + .extra2 = &u8_max, }, { .procname = "tx_qlen", .data = &sysctl_dccp_tx_qlen, .maxlen = sizeof(sysctl_dccp_tx_qlen), .mode = 0644, - .proc_handler = proc_dointvec, + .proc_handler = proc_dointvec_minmax, + .extra1 = &zero, }, { .procname = "sync_ratelimit", diff --git a/net/decnet/af_decnet.c b/net/decnet/af_decnet.c index cf0e1849929..12bf7d4c16c 100644 --- a/net/decnet/af_decnet.c +++ b/net/decnet/af_decnet.c @@ -2113,7 +2113,7 @@ static struct notifier_block dn_dev_notifier = { extern int dn_route_rcv(struct sk_buff *, struct net_device *, struct packet_type *, struct net_device *); static struct packet_type dn_dix_packet_type = { - .type = __constant_htons(ETH_P_DNA_RT), + .type = cpu_to_be16(ETH_P_DNA_RT), .dev = NULL, /* All devices */ .func = dn_route_rcv, }; diff --git a/net/decnet/dn_route.c b/net/decnet/dn_route.c index c754670b7fc..5130dee0b38 100644 --- a/net/decnet/dn_route.c +++ b/net/decnet/dn_route.c @@ -124,7 +124,7 @@ int decnet_dst_gc_interval = 2; static struct dst_ops dn_dst_ops = { .family = PF_DECnet, - .protocol = __constant_htons(ETH_P_DNA_RT), + .protocol = cpu_to_be16(ETH_P_DNA_RT), .gc_thresh = 128, .gc = dn_dst_gc, .check = dn_dst_check, diff --git a/net/dsa/mv88e6123_61_65.c b/net/dsa/mv88e6123_61_65.c index ec8c6a0482d..10031872221 100644 --- a/net/dsa/mv88e6123_61_65.c +++ b/net/dsa/mv88e6123_61_65.c @@ -394,7 +394,7 @@ static int mv88e6123_61_65_get_sset_count(struct dsa_switch *ds) } static struct dsa_switch_driver mv88e6123_61_65_switch_driver = { - .tag_protocol = __constant_htons(ETH_P_EDSA), + .tag_protocol = cpu_to_be16(ETH_P_EDSA), .priv_size = sizeof(struct mv88e6xxx_priv_state), .probe = mv88e6123_61_65_probe, .setup = mv88e6123_61_65_setup, diff --git a/net/dsa/mv88e6131.c b/net/dsa/mv88e6131.c index 374d46a0126..70fae2444cb 100644 --- a/net/dsa/mv88e6131.c +++ b/net/dsa/mv88e6131.c @@ -353,7 +353,7 @@ static int mv88e6131_get_sset_count(struct dsa_switch *ds) } static struct dsa_switch_driver mv88e6131_switch_driver = { - .tag_protocol = __constant_htons(ETH_P_DSA), + .tag_protocol = cpu_to_be16(ETH_P_DSA), .priv_size = sizeof(struct mv88e6xxx_priv_state), .probe = mv88e6131_probe, .setup = mv88e6131_setup, diff --git a/net/dsa/tag_dsa.c b/net/dsa/tag_dsa.c index f99a019b939..63e532a69fd 100644 --- a/net/dsa/tag_dsa.c +++ b/net/dsa/tag_dsa.c @@ -176,7 +176,7 @@ out: } static struct packet_type dsa_packet_type = { - .type = __constant_htons(ETH_P_DSA), + .type = cpu_to_be16(ETH_P_DSA), .func = dsa_rcv, }; diff --git a/net/dsa/tag_edsa.c b/net/dsa/tag_edsa.c index 328ec957f78..6197f9a7ef4 100644 --- a/net/dsa/tag_edsa.c +++ b/net/dsa/tag_edsa.c @@ -195,7 +195,7 @@ out: } static struct packet_type edsa_packet_type = { - .type = __constant_htons(ETH_P_EDSA), + .type = cpu_to_be16(ETH_P_EDSA), .func = edsa_rcv, }; diff --git a/net/dsa/tag_trailer.c b/net/dsa/tag_trailer.c index b59132878ad..d7e7f424ff0 100644 --- a/net/dsa/tag_trailer.c +++ b/net/dsa/tag_trailer.c @@ -112,7 +112,7 @@ out: } static struct packet_type trailer_packet_type = { - .type = __constant_htons(ETH_P_TRAILER), + .type = cpu_to_be16(ETH_P_TRAILER), .func = trailer_rcv, }; diff --git a/net/econet/af_econet.c b/net/econet/af_econet.c index 8789d2bb1b0..7bf35582f65 100644 --- a/net/econet/af_econet.c +++ b/net/econet/af_econet.c @@ -1103,7 +1103,7 @@ drop: } static struct packet_type econet_packet_type = { - .type = __constant_htons(ETH_P_ECONET), + .type = cpu_to_be16(ETH_P_ECONET), .func = econet_rcv, }; diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c index 743f5542d65..c79087719df 100644 --- a/net/ipv4/af_inet.c +++ b/net/ipv4/af_inet.c @@ -369,7 +369,6 @@ lookup_protocol: sock_init_data(sock, sk); sk->sk_destruct = inet_sock_destruct; - sk->sk_family = PF_INET; sk->sk_protocol = protocol; sk->sk_backlog_rcv = sk->sk_prot->backlog_rcv; @@ -1253,10 +1252,10 @@ static struct sk_buff **inet_gro_receive(struct sk_buff **head, int proto; int id; - if (unlikely(!pskb_may_pull(skb, sizeof(*iph)))) + iph = skb_gro_header(skb, sizeof(*iph)); + if (unlikely(!iph)) goto out; - iph = ip_hdr(skb); proto = iph->protocol & (MAX_INET_PROTOS - 1); rcu_read_lock(); @@ -1270,7 +1269,7 @@ static struct sk_buff **inet_gro_receive(struct sk_buff **head, if (unlikely(ip_fast_csum((u8 *)iph, iph->ihl))) goto out_unlock; - flush = ntohs(iph->tot_len) != skb->len || + flush = ntohs(iph->tot_len) != skb_gro_len(skb) || iph->frag_off != htons(IP_DF); id = ntohs(iph->id); @@ -1298,8 +1297,8 @@ static struct sk_buff **inet_gro_receive(struct sk_buff **head, } NAPI_GRO_CB(skb)->flush |= flush; - __skb_pull(skb, sizeof(*iph)); - skb_reset_transport_header(skb); + skb_gro_pull(skb, sizeof(*iph)); + skb_set_transport_header(skb, skb_gro_offset(skb)); pp = ops->gro_receive(head, skb); @@ -1501,7 +1500,7 @@ static int ipv4_proc_init(void); */ static struct packet_type ip_packet_type = { - .type = __constant_htons(ETH_P_IP), + .type = cpu_to_be16(ETH_P_IP), .func = ip_rcv, .gso_send_check = inet_gso_send_check, .gso_segment = inet_gso_segment, diff --git a/net/ipv4/arp.c b/net/ipv4/arp.c index 29a74c01d8d..3f6b7354699 100644 --- a/net/ipv4/arp.c +++ b/net/ipv4/arp.c @@ -1226,7 +1226,7 @@ void arp_ifdown(struct net_device *dev) */ static struct packet_type arp_packet_type = { - .type = __constant_htons(ETH_P_ARP), + .type = cpu_to_be16(ETH_P_ARP), .func = arp_rcv, }; diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c index 309997edc8a..d519a6a6672 100644 --- a/net/ipv4/devinet.c +++ b/net/ipv4/devinet.c @@ -1075,6 +1075,14 @@ static int inetdev_event(struct notifier_block *this, unsigned long event, } } ip_mc_up(in_dev); + /* fall through */ + case NETDEV_CHANGEADDR: + if (IN_DEV_ARP_NOTIFY(in_dev)) + arp_send(ARPOP_REQUEST, ETH_P_ARP, + in_dev->ifa_list->ifa_address, + dev, + in_dev->ifa_list->ifa_address, + NULL, dev->dev_addr, NULL); break; case NETDEV_DOWN: ip_mc_down(in_dev); @@ -1439,6 +1447,7 @@ static struct devinet_sysctl_table { DEVINET_SYSCTL_RW_ENTRY(ARP_ANNOUNCE, "arp_announce"), DEVINET_SYSCTL_RW_ENTRY(ARP_IGNORE, "arp_ignore"), DEVINET_SYSCTL_RW_ENTRY(ARP_ACCEPT, "arp_accept"), + DEVINET_SYSCTL_RW_ENTRY(ARP_NOTIFY, "arp_notify"), DEVINET_SYSCTL_FLUSHING_ENTRY(NOXFRM, "disable_xfrm"), DEVINET_SYSCTL_FLUSHING_ENTRY(NOPOLICY, "disable_policy"), diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c index f26ab38680d..22cd19ee44e 100644 --- a/net/ipv4/inet_connection_sock.c +++ b/net/ipv4/inet_connection_sock.c @@ -93,24 +93,40 @@ int inet_csk_get_port(struct sock *sk, unsigned short snum) struct inet_bind_hashbucket *head; struct hlist_node *node; struct inet_bind_bucket *tb; - int ret; + int ret, attempts = 5; struct net *net = sock_net(sk); + int smallest_size = -1, smallest_rover; local_bh_disable(); if (!snum) { int remaining, rover, low, high; +again: inet_get_local_port_range(&low, &high); remaining = (high - low) + 1; - rover = net_random() % remaining + low; + smallest_rover = rover = net_random() % remaining + low; + smallest_size = -1; do { head = &hashinfo->bhash[inet_bhashfn(net, rover, hashinfo->bhash_size)]; spin_lock(&head->lock); inet_bind_bucket_for_each(tb, node, &head->chain) - if (ib_net(tb) == net && tb->port == rover) + if (ib_net(tb) == net && tb->port == rover) { + if (tb->fastreuse > 0 && + sk->sk_reuse && + sk->sk_state != TCP_LISTEN && + (tb->num_owners < smallest_size || smallest_size == -1)) { + smallest_size = tb->num_owners; + smallest_rover = rover; + if (atomic_read(&hashinfo->bsockets) > (high - low) + 1) { + spin_unlock(&head->lock); + snum = smallest_rover; + goto have_snum; + } + } goto next; + } break; next: spin_unlock(&head->lock); @@ -125,14 +141,19 @@ int inet_csk_get_port(struct sock *sk, unsigned short snum) * the top level, not from the 'break;' statement. */ ret = 1; - if (remaining <= 0) + if (remaining <= 0) { + if (smallest_size != -1) { + snum = smallest_rover; + goto have_snum; + } goto fail; - + } /* OK, here is the one we will use. HEAD is * non-NULL and we hold it's mutex. */ snum = rover; } else { +have_snum: head = &hashinfo->bhash[inet_bhashfn(net, snum, hashinfo->bhash_size)]; spin_lock(&head->lock); @@ -145,12 +166,19 @@ int inet_csk_get_port(struct sock *sk, unsigned short snum) tb_found: if (!hlist_empty(&tb->owners)) { if (tb->fastreuse > 0 && - sk->sk_reuse && sk->sk_state != TCP_LISTEN) { + sk->sk_reuse && sk->sk_state != TCP_LISTEN && + smallest_size == -1) { goto success; } else { ret = 1; - if (inet_csk(sk)->icsk_af_ops->bind_conflict(sk, tb)) + if (inet_csk(sk)->icsk_af_ops->bind_conflict(sk, tb)) { + if (sk->sk_reuse && sk->sk_state != TCP_LISTEN && + smallest_size != -1 && --attempts >= 0) { + spin_unlock(&head->lock); + goto again; + } goto fail_unlock; + } } } tb_not_found: diff --git a/net/ipv4/inet_hashtables.c b/net/ipv4/inet_hashtables.c index 6a1045da48d..625cc5f64c9 100644 --- a/net/ipv4/inet_hashtables.c +++ b/net/ipv4/inet_hashtables.c @@ -38,6 +38,7 @@ struct inet_bind_bucket *inet_bind_bucket_create(struct kmem_cache *cachep, write_pnet(&tb->ib_net, hold_net(net)); tb->port = snum; tb->fastreuse = 0; + tb->num_owners = 0; INIT_HLIST_HEAD(&tb->owners); hlist_add_head(&tb->node, &head->chain); } @@ -59,8 +60,13 @@ void inet_bind_bucket_destroy(struct kmem_cache *cachep, struct inet_bind_bucket void inet_bind_hash(struct sock *sk, struct inet_bind_bucket *tb, const unsigned short snum) { + struct inet_hashinfo *hashinfo = sk->sk_prot->h.hashinfo; + + atomic_inc(&hashinfo->bsockets); + inet_sk(sk)->num = snum; sk_add_bind_node(sk, &tb->owners); + tb->num_owners++; inet_csk(sk)->icsk_bind_hash = tb; } @@ -75,9 +81,12 @@ static void __inet_put_port(struct sock *sk) struct inet_bind_hashbucket *head = &hashinfo->bhash[bhash]; struct inet_bind_bucket *tb; + atomic_dec(&hashinfo->bsockets); + spin_lock(&head->lock); tb = inet_csk(sk)->icsk_bind_hash; __sk_del_bind_node(sk); + tb->num_owners--; inet_csk(sk)->icsk_bind_hash = NULL; inet_sk(sk)->num = 0; inet_bind_bucket_destroy(hashinfo->bind_bucket_cachep, tb); @@ -444,9 +453,9 @@ int __inet_hash_connect(struct inet_timewait_death_row *death_row, */ inet_bind_bucket_for_each(tb, node, &head->chain) { if (ib_net(tb) == net && tb->port == port) { - WARN_ON(hlist_empty(&tb->owners)); if (tb->fastreuse >= 0) goto next_port; + WARN_ON(hlist_empty(&tb->owners)); if (!check_established(death_row, sk, port, &tw)) goto ok; @@ -523,6 +532,7 @@ void inet_hashinfo_init(struct inet_hashinfo *h) { int i; + atomic_set(&h->bsockets, 0); for (i = 0; i < INET_LHTABLE_SIZE; i++) { spin_lock_init(&h->listening_hash[i].lock); INIT_HLIST_NULLS_HEAD(&h->listening_hash[i].head, diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c index 0101521f366..07a188afb3a 100644 --- a/net/ipv4/ip_gre.c +++ b/net/ipv4/ip_gre.c @@ -164,67 +164,124 @@ static DEFINE_RWLOCK(ipgre_lock); /* Given src, dst and key, find appropriate for input tunnel. */ -static struct ip_tunnel * ipgre_tunnel_lookup(struct net *net, +static struct ip_tunnel * ipgre_tunnel_lookup(struct net_device *dev, __be32 remote, __be32 local, __be32 key, __be16 gre_proto) { + struct net *net = dev_net(dev); + int link = dev->ifindex; unsigned h0 = HASH(remote); unsigned h1 = HASH(key); - struct ip_tunnel *t; - struct ip_tunnel *t2 = NULL; + struct ip_tunnel *t, *cand = NULL; struct ipgre_net *ign = net_generic(net, ipgre_net_id); int dev_type = (gre_proto == htons(ETH_P_TEB)) ? ARPHRD_ETHER : ARPHRD_IPGRE; + int score, cand_score = 4; for (t = ign->tunnels_r_l[h0^h1]; t; t = t->next) { - if (local == t->parms.iph.saddr && remote == t->parms.iph.daddr) { - if (t->parms.i_key == key && t->dev->flags & IFF_UP) { - if (t->dev->type == dev_type) - return t; - if (t->dev->type == ARPHRD_IPGRE && !t2) - t2 = t; - } + if (local != t->parms.iph.saddr || + remote != t->parms.iph.daddr || + key != t->parms.i_key || + !(t->dev->flags & IFF_UP)) + continue; + + if (t->dev->type != ARPHRD_IPGRE && + t->dev->type != dev_type) + continue; + + score = 0; + if (t->parms.link != link) + score |= 1; + if (t->dev->type != dev_type) + score |= 2; + if (score == 0) + return t; + + if (score < cand_score) { + cand = t; + cand_score = score; } } for (t = ign->tunnels_r[h0^h1]; t; t = t->next) { - if (remote == t->parms.iph.daddr) { - if (t->parms.i_key == key && t->dev->flags & IFF_UP) { - if (t->dev->type == dev_type) - return t; - if (t->dev->type == ARPHRD_IPGRE && !t2) - t2 = t; - } + if (remote != t->parms.iph.daddr || + key != t->parms.i_key || + !(t->dev->flags & IFF_UP)) + continue; + + if (t->dev->type != ARPHRD_IPGRE && + t->dev->type != dev_type) + continue; + + score = 0; + if (t->parms.link != link) + score |= 1; + if (t->dev->type != dev_type) + score |= 2; + if (score == 0) + return t; + + if (score < cand_score) { + cand = t; + cand_score = score; } } for (t = ign->tunnels_l[h1]; t; t = t->next) { - if (local == t->parms.iph.saddr || - (local == t->parms.iph.daddr && - ipv4_is_multicast(local))) { - if (t->parms.i_key == key && t->dev->flags & IFF_UP) { - if (t->dev->type == dev_type) - return t; - if (t->dev->type == ARPHRD_IPGRE && !t2) - t2 = t; - } + if ((local != t->parms.iph.saddr && + (local != t->parms.iph.daddr || + !ipv4_is_multicast(local))) || + key != t->parms.i_key || + !(t->dev->flags & IFF_UP)) + continue; + + if (t->dev->type != ARPHRD_IPGRE && + t->dev->type != dev_type) + continue; + + score = 0; + if (t->parms.link != link) + score |= 1; + if (t->dev->type != dev_type) + score |= 2; + if (score == 0) + return t; + + if (score < cand_score) { + cand = t; + cand_score = score; } } for (t = ign->tunnels_wc[h1]; t; t = t->next) { - if (t->parms.i_key == key && t->dev->flags & IFF_UP) { - if (t->dev->type == dev_type) - return t; - if (t->dev->type == ARPHRD_IPGRE && !t2) - t2 = t; + if (t->parms.i_key != key || + !(t->dev->flags & IFF_UP)) + continue; + + if (t->dev->type != ARPHRD_IPGRE && + t->dev->type != dev_type) + continue; + + score = 0; + if (t->parms.link != link) + score |= 1; + if (t->dev->type != dev_type) + score |= 2; + if (score == 0) + return t; + + if (score < cand_score) { + cand = t; + cand_score = score; } } - if (t2) - return t2; + if (cand != NULL) + return cand; - if (ign->fb_tunnel_dev->flags&IFF_UP) + if (ign->fb_tunnel_dev->flags & IFF_UP) return netdev_priv(ign->fb_tunnel_dev); + return NULL; } @@ -284,6 +341,7 @@ static struct ip_tunnel *ipgre_tunnel_find(struct net *net, __be32 remote = parms->iph.daddr; __be32 local = parms->iph.saddr; __be32 key = parms->i_key; + int link = parms->link; struct ip_tunnel *t, **tp; struct ipgre_net *ign = net_generic(net, ipgre_net_id); @@ -291,6 +349,7 @@ static struct ip_tunnel *ipgre_tunnel_find(struct net *net, if (local == t->parms.iph.saddr && remote == t->parms.iph.daddr && key == t->parms.i_key && + link == t->parms.link && type == t->dev->type) break; @@ -421,7 +480,7 @@ static void ipgre_err(struct sk_buff *skb, u32 info) } read_lock(&ipgre_lock); - t = ipgre_tunnel_lookup(dev_net(skb->dev), iph->daddr, iph->saddr, + t = ipgre_tunnel_lookup(skb->dev, iph->daddr, iph->saddr, flags & GRE_KEY ? *(((__be32 *)p) + (grehlen / 4) - 1) : 0, p[1]); @@ -518,7 +577,7 @@ static int ipgre_rcv(struct sk_buff *skb) gre_proto = *(__be16 *)(h + 2); read_lock(&ipgre_lock); - if ((tunnel = ipgre_tunnel_lookup(dev_net(skb->dev), + if ((tunnel = ipgre_tunnel_lookup(skb->dev, iph->saddr, iph->daddr, key, gre_proto))) { struct net_device_stats *stats = &tunnel->dev->stats; diff --git a/net/ipv4/ipconfig.c b/net/ipv4/ipconfig.c index d722013c1ca..90d22ae0a41 100644 --- a/net/ipv4/ipconfig.c +++ b/net/ipv4/ipconfig.c @@ -100,8 +100,8 @@ #define CONF_NAMESERVERS_MAX 3 /* Maximum number of nameservers - '3' from resolv.h */ -#define NONE __constant_htonl(INADDR_NONE) -#define ANY __constant_htonl(INADDR_ANY) +#define NONE cpu_to_be32(INADDR_NONE) +#define ANY cpu_to_be32(INADDR_ANY) /* * Public IP configuration @@ -406,7 +406,7 @@ static int __init ic_defaults(void) static int ic_rarp_recv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt, struct net_device *orig_dev); static struct packet_type rarp_packet_type __initdata = { - .type = __constant_htons(ETH_P_RARP), + .type = cpu_to_be16(ETH_P_RARP), .func = ic_rarp_recv, }; @@ -568,7 +568,7 @@ struct bootp_pkt { /* BOOTP packet format */ static int ic_bootp_recv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt, struct net_device *orig_dev); static struct packet_type bootp_packet_type __initdata = { - .type = __constant_htons(ETH_P_IP), + .type = cpu_to_be16(ETH_P_IP), .func = ic_bootp_recv, }; diff --git a/net/ipv4/ipmr.c b/net/ipv4/ipmr.c index 14666449dc1..21a6dc710f2 100644 --- a/net/ipv4/ipmr.c +++ b/net/ipv4/ipmr.c @@ -67,9 +67,6 @@ #define CONFIG_IP_PIMSM 1 #endif -static struct sock *mroute_socket; - - /* Big lock, protecting vif table, mrt cache and mroute socket state. Note that the changes are semaphored via rtnl_lock. */ @@ -80,18 +77,9 @@ static DEFINE_RWLOCK(mrt_lock); * Multicast router control variables */ -static struct vif_device vif_table[MAXVIFS]; /* Devices */ -static int maxvif; - -#define VIF_EXISTS(idx) (vif_table[idx].dev != NULL) - -static int mroute_do_assert; /* Set in PIM assert */ -static int mroute_do_pim; - -static struct mfc_cache *mfc_cache_array[MFC_LINES]; /* Forwarding cache */ +#define VIF_EXISTS(_net, _idx) ((_net)->ipv4.vif_table[_idx].dev != NULL) static struct mfc_cache *mfc_unres_queue; /* Queue of unresolved entries */ -static atomic_t cache_resolve_queue_len; /* Size of unresolved */ /* Special spinlock for queue of unresolved entries */ static DEFINE_SPINLOCK(mfc_unres_lock); @@ -107,7 +95,8 @@ static DEFINE_SPINLOCK(mfc_unres_lock); static struct kmem_cache *mrt_cachep __read_mostly; static int ip_mr_forward(struct sk_buff *skb, struct mfc_cache *cache, int local); -static int ipmr_cache_report(struct sk_buff *pkt, vifi_t vifi, int assert); +static int ipmr_cache_report(struct net *net, + struct sk_buff *pkt, vifi_t vifi, int assert); static int ipmr_fill_mroute(struct sk_buff *skb, struct mfc_cache *c, struct rtmsg *rtm); #ifdef CONFIG_IP_PIMSM_V2 @@ -120,9 +109,11 @@ static struct timer_list ipmr_expire_timer; static void ipmr_del_tunnel(struct net_device *dev, struct vifctl *v) { + struct net *net = dev_net(dev); + dev_close(dev); - dev = __dev_get_by_name(&init_net, "tunl0"); + dev = __dev_get_by_name(net, "tunl0"); if (dev) { const struct net_device_ops *ops = dev->netdev_ops; struct ifreq ifr; @@ -148,11 +139,11 @@ static void ipmr_del_tunnel(struct net_device *dev, struct vifctl *v) } static -struct net_device *ipmr_new_tunnel(struct vifctl *v) +struct net_device *ipmr_new_tunnel(struct net *net, struct vifctl *v) { struct net_device *dev; - dev = __dev_get_by_name(&init_net, "tunl0"); + dev = __dev_get_by_name(net, "tunl0"); if (dev) { const struct net_device_ops *ops = dev->netdev_ops; @@ -181,7 +172,8 @@ struct net_device *ipmr_new_tunnel(struct vifctl *v) dev = NULL; - if (err == 0 && (dev = __dev_get_by_name(&init_net, p.name)) != NULL) { + if (err == 0 && + (dev = __dev_get_by_name(net, p.name)) != NULL) { dev->flags |= IFF_MULTICAST; in_dev = __in_dev_get_rtnl(dev); @@ -209,14 +201,15 @@ failure: #ifdef CONFIG_IP_PIMSM -static int reg_vif_num = -1; - static int reg_vif_xmit(struct sk_buff *skb, struct net_device *dev) { + struct net *net = dev_net(dev); + read_lock(&mrt_lock); dev->stats.tx_bytes += skb->len; dev->stats.tx_packets++; - ipmr_cache_report(skb, reg_vif_num, IGMPMSG_WHOLEPKT); + ipmr_cache_report(net, skb, net->ipv4.mroute_reg_vif_num, + IGMPMSG_WHOLEPKT); read_unlock(&mrt_lock); kfree_skb(skb); return 0; @@ -283,16 +276,16 @@ failure: * @notify: Set to 1, if the caller is a notifier_call */ -static int vif_delete(int vifi, int notify) +static int vif_delete(struct net *net, int vifi, int notify) { struct vif_device *v; struct net_device *dev; struct in_device *in_dev; - if (vifi < 0 || vifi >= maxvif) + if (vifi < 0 || vifi >= net->ipv4.maxvif) return -EADDRNOTAVAIL; - v = &vif_table[vifi]; + v = &net->ipv4.vif_table[vifi]; write_lock_bh(&mrt_lock); dev = v->dev; @@ -304,17 +297,17 @@ static int vif_delete(int vifi, int notify) } #ifdef CONFIG_IP_PIMSM - if (vifi == reg_vif_num) - reg_vif_num = -1; + if (vifi == net->ipv4.mroute_reg_vif_num) + net->ipv4.mroute_reg_vif_num = -1; #endif - if (vifi+1 == maxvif) { + if (vifi+1 == net->ipv4.maxvif) { int tmp; for (tmp=vifi-1; tmp>=0; tmp--) { - if (VIF_EXISTS(tmp)) + if (VIF_EXISTS(net, tmp)) break; } - maxvif = tmp+1; + net->ipv4.maxvif = tmp+1; } write_unlock_bh(&mrt_lock); @@ -333,6 +326,12 @@ static int vif_delete(int vifi, int notify) return 0; } +static inline void ipmr_cache_free(struct mfc_cache *c) +{ + release_net(mfc_net(c)); + kmem_cache_free(mrt_cachep, c); +} + /* Destroy an unresolved cache entry, killing queued skbs and reporting error to netlink readers. */ @@ -341,8 +340,9 @@ static void ipmr_destroy_unres(struct mfc_cache *c) { struct sk_buff *skb; struct nlmsgerr *e; + struct net *net = mfc_net(c); - atomic_dec(&cache_resolve_queue_len); + atomic_dec(&net->ipv4.cache_resolve_queue_len); while ((skb = skb_dequeue(&c->mfc_un.unres.unresolved))) { if (ip_hdr(skb)->version == 0) { @@ -354,12 +354,12 @@ static void ipmr_destroy_unres(struct mfc_cache *c) e->error = -ETIMEDOUT; memset(&e->msg, 0, sizeof(e->msg)); - rtnl_unicast(skb, &init_net, NETLINK_CB(skb).pid); + rtnl_unicast(skb, net, NETLINK_CB(skb).pid); } else kfree_skb(skb); } - kmem_cache_free(mrt_cachep, c); + ipmr_cache_free(c); } @@ -376,7 +376,7 @@ static void ipmr_expire_process(unsigned long dummy) return; } - if (atomic_read(&cache_resolve_queue_len) == 0) + if (mfc_unres_queue == NULL) goto out; now = jiffies; @@ -397,7 +397,7 @@ static void ipmr_expire_process(unsigned long dummy) ipmr_destroy_unres(c); } - if (atomic_read(&cache_resolve_queue_len)) + if (mfc_unres_queue != NULL) mod_timer(&ipmr_expire_timer, jiffies + expires); out: @@ -409,13 +409,15 @@ out: static void ipmr_update_thresholds(struct mfc_cache *cache, unsigned char *ttls) { int vifi; + struct net *net = mfc_net(cache); cache->mfc_un.res.minvif = MAXVIFS; cache->mfc_un.res.maxvif = 0; memset(cache->mfc_un.res.ttls, 255, MAXVIFS); - for (vifi=0; vifi<maxvif; vifi++) { - if (VIF_EXISTS(vifi) && ttls[vifi] && ttls[vifi] < 255) { + for (vifi = 0; vifi < net->ipv4.maxvif; vifi++) { + if (VIF_EXISTS(net, vifi) && + ttls[vifi] && ttls[vifi] < 255) { cache->mfc_un.res.ttls[vifi] = ttls[vifi]; if (cache->mfc_un.res.minvif > vifi) cache->mfc_un.res.minvif = vifi; @@ -425,16 +427,16 @@ static void ipmr_update_thresholds(struct mfc_cache *cache, unsigned char *ttls) } } -static int vif_add(struct vifctl *vifc, int mrtsock) +static int vif_add(struct net *net, struct vifctl *vifc, int mrtsock) { int vifi = vifc->vifc_vifi; - struct vif_device *v = &vif_table[vifi]; + struct vif_device *v = &net->ipv4.vif_table[vifi]; struct net_device *dev; struct in_device *in_dev; int err; /* Is vif busy ? */ - if (VIF_EXISTS(vifi)) + if (VIF_EXISTS(net, vifi)) return -EADDRINUSE; switch (vifc->vifc_flags) { @@ -444,7 +446,7 @@ static int vif_add(struct vifctl *vifc, int mrtsock) * Special Purpose VIF in PIM * All the packets will be sent to the daemon */ - if (reg_vif_num >= 0) + if (net->ipv4.mroute_reg_vif_num >= 0) return -EADDRINUSE; dev = ipmr_reg_vif(); if (!dev) @@ -458,7 +460,7 @@ static int vif_add(struct vifctl *vifc, int mrtsock) break; #endif case VIFF_TUNNEL: - dev = ipmr_new_tunnel(vifc); + dev = ipmr_new_tunnel(net, vifc); if (!dev) return -ENOBUFS; err = dev_set_allmulti(dev, 1); @@ -469,7 +471,7 @@ static int vif_add(struct vifctl *vifc, int mrtsock) } break; case 0: - dev = ip_dev_find(&init_net, vifc->vifc_lcl_addr.s_addr); + dev = ip_dev_find(net, vifc->vifc_lcl_addr.s_addr); if (!dev) return -EADDRNOTAVAIL; err = dev_set_allmulti(dev, 1); @@ -510,20 +512,22 @@ static int vif_add(struct vifctl *vifc, int mrtsock) v->dev = dev; #ifdef CONFIG_IP_PIMSM if (v->flags&VIFF_REGISTER) - reg_vif_num = vifi; + net->ipv4.mroute_reg_vif_num = vifi; #endif - if (vifi+1 > maxvif) - maxvif = vifi+1; + if (vifi+1 > net->ipv4.maxvif) + net->ipv4.maxvif = vifi+1; write_unlock_bh(&mrt_lock); return 0; } -static struct mfc_cache *ipmr_cache_find(__be32 origin, __be32 mcastgrp) +static struct mfc_cache *ipmr_cache_find(struct net *net, + __be32 origin, + __be32 mcastgrp) { int line = MFC_HASH(mcastgrp, origin); struct mfc_cache *c; - for (c=mfc_cache_array[line]; c; c = c->next) { + for (c = net->ipv4.mfc_cache_array[line]; c; c = c->next) { if (c->mfc_origin==origin && c->mfc_mcastgrp==mcastgrp) break; } @@ -533,22 +537,24 @@ static struct mfc_cache *ipmr_cache_find(__be32 origin, __be32 mcastgrp) /* * Allocate a multicast cache entry */ -static struct mfc_cache *ipmr_cache_alloc(void) +static struct mfc_cache *ipmr_cache_alloc(struct net *net) { struct mfc_cache *c = kmem_cache_zalloc(mrt_cachep, GFP_KERNEL); if (c == NULL) return NULL; c->mfc_un.res.minvif = MAXVIFS; + mfc_net_set(c, net); return c; } -static struct mfc_cache *ipmr_cache_alloc_unres(void) +static struct mfc_cache *ipmr_cache_alloc_unres(struct net *net) { struct mfc_cache *c = kmem_cache_zalloc(mrt_cachep, GFP_ATOMIC); if (c == NULL) return NULL; skb_queue_head_init(&c->mfc_un.unres.unresolved); c->mfc_un.unres.expires = jiffies + 10*HZ; + mfc_net_set(c, net); return c; } @@ -581,7 +587,7 @@ static void ipmr_cache_resolve(struct mfc_cache *uc, struct mfc_cache *c) memset(&e->msg, 0, sizeof(e->msg)); } - rtnl_unicast(skb, &init_net, NETLINK_CB(skb).pid); + rtnl_unicast(skb, mfc_net(c), NETLINK_CB(skb).pid); } else ip_mr_forward(skb, c, 0); } @@ -594,7 +600,8 @@ static void ipmr_cache_resolve(struct mfc_cache *uc, struct mfc_cache *c) * Called under mrt_lock. */ -static int ipmr_cache_report(struct sk_buff *pkt, vifi_t vifi, int assert) +static int ipmr_cache_report(struct net *net, + struct sk_buff *pkt, vifi_t vifi, int assert) { struct sk_buff *skb; const int ihl = ip_hdrlen(pkt); @@ -626,7 +633,7 @@ static int ipmr_cache_report(struct sk_buff *pkt, vifi_t vifi, int assert) memcpy(msg, skb_network_header(pkt), sizeof(struct iphdr)); msg->im_msgtype = IGMPMSG_WHOLEPKT; msg->im_mbz = 0; - msg->im_vif = reg_vif_num; + msg->im_vif = net->ipv4.mroute_reg_vif_num; ip_hdr(skb)->ihl = sizeof(struct iphdr) >> 2; ip_hdr(skb)->tot_len = htons(ntohs(ip_hdr(pkt)->tot_len) + sizeof(struct iphdr)); @@ -658,7 +665,7 @@ static int ipmr_cache_report(struct sk_buff *pkt, vifi_t vifi, int assert) skb->transport_header = skb->network_header; } - if (mroute_socket == NULL) { + if (net->ipv4.mroute_sk == NULL) { kfree_skb(skb); return -EINVAL; } @@ -666,7 +673,8 @@ static int ipmr_cache_report(struct sk_buff *pkt, vifi_t vifi, int assert) /* * Deliver to mrouted */ - if ((ret = sock_queue_rcv_skb(mroute_socket, skb))<0) { + ret = sock_queue_rcv_skb(net->ipv4.mroute_sk, skb); + if (ret < 0) { if (net_ratelimit()) printk(KERN_WARNING "mroute: pending queue full, dropping entries.\n"); kfree_skb(skb); @@ -680,7 +688,7 @@ static int ipmr_cache_report(struct sk_buff *pkt, vifi_t vifi, int assert) */ static int -ipmr_cache_unresolved(vifi_t vifi, struct sk_buff *skb) +ipmr_cache_unresolved(struct net *net, vifi_t vifi, struct sk_buff *skb) { int err; struct mfc_cache *c; @@ -688,7 +696,8 @@ ipmr_cache_unresolved(vifi_t vifi, struct sk_buff *skb) spin_lock_bh(&mfc_unres_lock); for (c=mfc_unres_queue; c; c=c->next) { - if (c->mfc_mcastgrp == iph->daddr && + if (net_eq(mfc_net(c), net) && + c->mfc_mcastgrp == iph->daddr && c->mfc_origin == iph->saddr) break; } @@ -698,8 +707,8 @@ ipmr_cache_unresolved(vifi_t vifi, struct sk_buff *skb) * Create a new entry if allowable */ - if (atomic_read(&cache_resolve_queue_len) >= 10 || - (c=ipmr_cache_alloc_unres())==NULL) { + if (atomic_read(&net->ipv4.cache_resolve_queue_len) >= 10 || + (c = ipmr_cache_alloc_unres(net)) == NULL) { spin_unlock_bh(&mfc_unres_lock); kfree_skb(skb); @@ -716,18 +725,19 @@ ipmr_cache_unresolved(vifi_t vifi, struct sk_buff *skb) /* * Reflect first query at mrouted. */ - if ((err = ipmr_cache_report(skb, vifi, IGMPMSG_NOCACHE))<0) { + err = ipmr_cache_report(net, skb, vifi, IGMPMSG_NOCACHE); + if (err < 0) { /* If the report failed throw the cache entry out - Brad Parker */ spin_unlock_bh(&mfc_unres_lock); - kmem_cache_free(mrt_cachep, c); + ipmr_cache_free(c); kfree_skb(skb); return err; } - atomic_inc(&cache_resolve_queue_len); + atomic_inc(&net->ipv4.cache_resolve_queue_len); c->next = mfc_unres_queue; mfc_unres_queue = c; @@ -753,35 +763,37 @@ ipmr_cache_unresolved(vifi_t vifi, struct sk_buff *skb) * MFC cache manipulation by user space mroute daemon */ -static int ipmr_mfc_delete(struct mfcctl *mfc) +static int ipmr_mfc_delete(struct net *net, struct mfcctl *mfc) { int line; struct mfc_cache *c, **cp; line = MFC_HASH(mfc->mfcc_mcastgrp.s_addr, mfc->mfcc_origin.s_addr); - for (cp=&mfc_cache_array[line]; (c=*cp) != NULL; cp = &c->next) { + for (cp = &net->ipv4.mfc_cache_array[line]; + (c = *cp) != NULL; cp = &c->next) { if (c->mfc_origin == mfc->mfcc_origin.s_addr && c->mfc_mcastgrp == mfc->mfcc_mcastgrp.s_addr) { write_lock_bh(&mrt_lock); *cp = c->next; write_unlock_bh(&mrt_lock); - kmem_cache_free(mrt_cachep, c); + ipmr_cache_free(c); return 0; } } return -ENOENT; } -static int ipmr_mfc_add(struct mfcctl *mfc, int mrtsock) +static int ipmr_mfc_add(struct net *net, struct mfcctl *mfc, int mrtsock) { int line; struct mfc_cache *uc, *c, **cp; line = MFC_HASH(mfc->mfcc_mcastgrp.s_addr, mfc->mfcc_origin.s_addr); - for (cp=&mfc_cache_array[line]; (c=*cp) != NULL; cp = &c->next) { + for (cp = &net->ipv4.mfc_cache_array[line]; + (c = *cp) != NULL; cp = &c->next) { if (c->mfc_origin == mfc->mfcc_origin.s_addr && c->mfc_mcastgrp == mfc->mfcc_mcastgrp.s_addr) break; @@ -800,7 +812,7 @@ static int ipmr_mfc_add(struct mfcctl *mfc, int mrtsock) if (!ipv4_is_multicast(mfc->mfcc_mcastgrp.s_addr)) return -EINVAL; - c = ipmr_cache_alloc(); + c = ipmr_cache_alloc(net); if (c == NULL) return -ENOMEM; @@ -812,8 +824,8 @@ static int ipmr_mfc_add(struct mfcctl *mfc, int mrtsock) c->mfc_flags |= MFC_STATIC; write_lock_bh(&mrt_lock); - c->next = mfc_cache_array[line]; - mfc_cache_array[line] = c; + c->next = net->ipv4.mfc_cache_array[line]; + net->ipv4.mfc_cache_array[line] = c; write_unlock_bh(&mrt_lock); /* @@ -823,19 +835,21 @@ static int ipmr_mfc_add(struct mfcctl *mfc, int mrtsock) spin_lock_bh(&mfc_unres_lock); for (cp = &mfc_unres_queue; (uc=*cp) != NULL; cp = &uc->next) { - if (uc->mfc_origin == c->mfc_origin && + if (net_eq(mfc_net(uc), net) && + uc->mfc_origin == c->mfc_origin && uc->mfc_mcastgrp == c->mfc_mcastgrp) { *cp = uc->next; - if (atomic_dec_and_test(&cache_resolve_queue_len)) - del_timer(&ipmr_expire_timer); + atomic_dec(&net->ipv4.cache_resolve_queue_len); break; } } + if (mfc_unres_queue == NULL) + del_timer(&ipmr_expire_timer); spin_unlock_bh(&mfc_unres_lock); if (uc) { ipmr_cache_resolve(uc, c); - kmem_cache_free(mrt_cachep, uc); + ipmr_cache_free(uc); } return 0; } @@ -844,16 +858,16 @@ static int ipmr_mfc_add(struct mfcctl *mfc, int mrtsock) * Close the multicast socket, and clear the vif tables etc */ -static void mroute_clean_tables(struct sock *sk) +static void mroute_clean_tables(struct net *net) { int i; /* * Shut down all active vif entries */ - for (i=0; i<maxvif; i++) { - if (!(vif_table[i].flags&VIFF_STATIC)) - vif_delete(i, 0); + for (i = 0; i < net->ipv4.maxvif; i++) { + if (!(net->ipv4.vif_table[i].flags&VIFF_STATIC)) + vif_delete(net, i, 0); } /* @@ -862,7 +876,7 @@ static void mroute_clean_tables(struct sock *sk) for (i=0; i<MFC_LINES; i++) { struct mfc_cache *c, **cp; - cp = &mfc_cache_array[i]; + cp = &net->ipv4.mfc_cache_array[i]; while ((c = *cp) != NULL) { if (c->mfc_flags&MFC_STATIC) { cp = &c->next; @@ -872,22 +886,23 @@ static void mroute_clean_tables(struct sock *sk) *cp = c->next; write_unlock_bh(&mrt_lock); - kmem_cache_free(mrt_cachep, c); + ipmr_cache_free(c); } } - if (atomic_read(&cache_resolve_queue_len) != 0) { - struct mfc_cache *c; + if (atomic_read(&net->ipv4.cache_resolve_queue_len) != 0) { + struct mfc_cache *c, **cp; spin_lock_bh(&mfc_unres_lock); - while (mfc_unres_queue != NULL) { - c = mfc_unres_queue; - mfc_unres_queue = c->next; - spin_unlock_bh(&mfc_unres_lock); + cp = &mfc_unres_queue; + while ((c = *cp) != NULL) { + if (!net_eq(mfc_net(c), net)) { + cp = &c->next; + continue; + } + *cp = c->next; ipmr_destroy_unres(c); - - spin_lock_bh(&mfc_unres_lock); } spin_unlock_bh(&mfc_unres_lock); } @@ -895,15 +910,17 @@ static void mroute_clean_tables(struct sock *sk) static void mrtsock_destruct(struct sock *sk) { + struct net *net = sock_net(sk); + rtnl_lock(); - if (sk == mroute_socket) { - IPV4_DEVCONF_ALL(sock_net(sk), MC_FORWARDING)--; + if (sk == net->ipv4.mroute_sk) { + IPV4_DEVCONF_ALL(net, MC_FORWARDING)--; write_lock_bh(&mrt_lock); - mroute_socket = NULL; + net->ipv4.mroute_sk = NULL; write_unlock_bh(&mrt_lock); - mroute_clean_tables(sk); + mroute_clean_tables(net); } rtnl_unlock(); } @@ -920,9 +937,10 @@ int ip_mroute_setsockopt(struct sock *sk, int optname, char __user *optval, int int ret; struct vifctl vif; struct mfcctl mfc; + struct net *net = sock_net(sk); if (optname != MRT_INIT) { - if (sk != mroute_socket && !capable(CAP_NET_ADMIN)) + if (sk != net->ipv4.mroute_sk && !capable(CAP_NET_ADMIN)) return -EACCES; } @@ -935,7 +953,7 @@ int ip_mroute_setsockopt(struct sock *sk, int optname, char __user *optval, int return -ENOPROTOOPT; rtnl_lock(); - if (mroute_socket) { + if (net->ipv4.mroute_sk) { rtnl_unlock(); return -EADDRINUSE; } @@ -943,15 +961,15 @@ int ip_mroute_setsockopt(struct sock *sk, int optname, char __user *optval, int ret = ip_ra_control(sk, 1, mrtsock_destruct); if (ret == 0) { write_lock_bh(&mrt_lock); - mroute_socket = sk; + net->ipv4.mroute_sk = sk; write_unlock_bh(&mrt_lock); - IPV4_DEVCONF_ALL(sock_net(sk), MC_FORWARDING)++; + IPV4_DEVCONF_ALL(net, MC_FORWARDING)++; } rtnl_unlock(); return ret; case MRT_DONE: - if (sk != mroute_socket) + if (sk != net->ipv4.mroute_sk) return -EACCES; return ip_ra_control(sk, 0, NULL); case MRT_ADD_VIF: @@ -964,9 +982,9 @@ int ip_mroute_setsockopt(struct sock *sk, int optname, char __user *optval, int return -ENFILE; rtnl_lock(); if (optname == MRT_ADD_VIF) { - ret = vif_add(&vif, sk==mroute_socket); + ret = vif_add(net, &vif, sk == net->ipv4.mroute_sk); } else { - ret = vif_delete(vif.vifc_vifi, 0); + ret = vif_delete(net, vif.vifc_vifi, 0); } rtnl_unlock(); return ret; @@ -983,9 +1001,9 @@ int ip_mroute_setsockopt(struct sock *sk, int optname, char __user *optval, int return -EFAULT; rtnl_lock(); if (optname == MRT_DEL_MFC) - ret = ipmr_mfc_delete(&mfc); + ret = ipmr_mfc_delete(net, &mfc); else - ret = ipmr_mfc_add(&mfc, sk==mroute_socket); + ret = ipmr_mfc_add(net, &mfc, sk == net->ipv4.mroute_sk); rtnl_unlock(); return ret; /* @@ -996,7 +1014,7 @@ int ip_mroute_setsockopt(struct sock *sk, int optname, char __user *optval, int int v; if (get_user(v,(int __user *)optval)) return -EFAULT; - mroute_do_assert=(v)?1:0; + net->ipv4.mroute_do_assert = (v) ? 1 : 0; return 0; } #ifdef CONFIG_IP_PIMSM @@ -1010,11 +1028,11 @@ int ip_mroute_setsockopt(struct sock *sk, int optname, char __user *optval, int rtnl_lock(); ret = 0; - if (v != mroute_do_pim) { - mroute_do_pim = v; - mroute_do_assert = v; + if (v != net->ipv4.mroute_do_pim) { + net->ipv4.mroute_do_pim = v; + net->ipv4.mroute_do_assert = v; #ifdef CONFIG_IP_PIMSM_V2 - if (mroute_do_pim) + if (net->ipv4.mroute_do_pim) ret = inet_add_protocol(&pim_protocol, IPPROTO_PIM); else @@ -1045,6 +1063,7 @@ int ip_mroute_getsockopt(struct sock *sk, int optname, char __user *optval, int { int olr; int val; + struct net *net = sock_net(sk); if (optname != MRT_VERSION && #ifdef CONFIG_IP_PIMSM @@ -1066,10 +1085,10 @@ int ip_mroute_getsockopt(struct sock *sk, int optname, char __user *optval, int val = 0x0305; #ifdef CONFIG_IP_PIMSM else if (optname == MRT_PIM) - val = mroute_do_pim; + val = net->ipv4.mroute_do_pim; #endif else - val = mroute_do_assert; + val = net->ipv4.mroute_do_assert; if (copy_to_user(optval, &val, olr)) return -EFAULT; return 0; @@ -1085,16 +1104,17 @@ int ipmr_ioctl(struct sock *sk, int cmd, void __user *arg) struct sioc_vif_req vr; struct vif_device *vif; struct mfc_cache *c; + struct net *net = sock_net(sk); switch (cmd) { case SIOCGETVIFCNT: if (copy_from_user(&vr, arg, sizeof(vr))) return -EFAULT; - if (vr.vifi >= maxvif) + if (vr.vifi >= net->ipv4.maxvif) return -EINVAL; read_lock(&mrt_lock); - vif=&vif_table[vr.vifi]; - if (VIF_EXISTS(vr.vifi)) { + vif = &net->ipv4.vif_table[vr.vifi]; + if (VIF_EXISTS(net, vr.vifi)) { vr.icount = vif->pkt_in; vr.ocount = vif->pkt_out; vr.ibytes = vif->bytes_in; @@ -1112,7 +1132,7 @@ int ipmr_ioctl(struct sock *sk, int cmd, void __user *arg) return -EFAULT; read_lock(&mrt_lock); - c = ipmr_cache_find(sr.src.s_addr, sr.grp.s_addr); + c = ipmr_cache_find(net, sr.src.s_addr, sr.grp.s_addr); if (c) { sr.pktcnt = c->mfc_un.res.pkt; sr.bytecnt = c->mfc_un.res.bytes; @@ -1134,18 +1154,19 @@ int ipmr_ioctl(struct sock *sk, int cmd, void __user *arg) static int ipmr_device_event(struct notifier_block *this, unsigned long event, void *ptr) { struct net_device *dev = ptr; + struct net *net = dev_net(dev); struct vif_device *v; int ct; - if (!net_eq(dev_net(dev), &init_net)) + if (!net_eq(dev_net(dev), net)) return NOTIFY_DONE; if (event != NETDEV_UNREGISTER) return NOTIFY_DONE; - v=&vif_table[0]; - for (ct=0; ct<maxvif; ct++,v++) { + v = &net->ipv4.vif_table[0]; + for (ct = 0; ct < net->ipv4.maxvif; ct++, v++) { if (v->dev == dev) - vif_delete(ct, 1); + vif_delete(net, ct, 1); } return NOTIFY_DONE; } @@ -1205,8 +1226,9 @@ static inline int ipmr_forward_finish(struct sk_buff *skb) static void ipmr_queue_xmit(struct sk_buff *skb, struct mfc_cache *c, int vifi) { + struct net *net = mfc_net(c); const struct iphdr *iph = ip_hdr(skb); - struct vif_device *vif = &vif_table[vifi]; + struct vif_device *vif = &net->ipv4.vif_table[vifi]; struct net_device *dev; struct rtable *rt; int encap = 0; @@ -1220,7 +1242,7 @@ static void ipmr_queue_xmit(struct sk_buff *skb, struct mfc_cache *c, int vifi) vif->bytes_out += skb->len; vif->dev->stats.tx_bytes += skb->len; vif->dev->stats.tx_packets++; - ipmr_cache_report(skb, vifi, IGMPMSG_WHOLEPKT); + ipmr_cache_report(net, skb, vifi, IGMPMSG_WHOLEPKT); kfree_skb(skb); return; } @@ -1233,7 +1255,7 @@ static void ipmr_queue_xmit(struct sk_buff *skb, struct mfc_cache *c, int vifi) .saddr = vif->local, .tos = RT_TOS(iph->tos) } }, .proto = IPPROTO_IPIP }; - if (ip_route_output_key(&init_net, &rt, &fl)) + if (ip_route_output_key(net, &rt, &fl)) goto out_free; encap = sizeof(struct iphdr); } else { @@ -1242,7 +1264,7 @@ static void ipmr_queue_xmit(struct sk_buff *skb, struct mfc_cache *c, int vifi) { .daddr = iph->daddr, .tos = RT_TOS(iph->tos) } }, .proto = IPPROTO_IPIP }; - if (ip_route_output_key(&init_net, &rt, &fl)) + if (ip_route_output_key(net, &rt, &fl)) goto out_free; } @@ -1306,9 +1328,10 @@ out_free: static int ipmr_find_vif(struct net_device *dev) { + struct net *net = dev_net(dev); int ct; - for (ct=maxvif-1; ct>=0; ct--) { - if (vif_table[ct].dev == dev) + for (ct = net->ipv4.maxvif-1; ct >= 0; ct--) { + if (net->ipv4.vif_table[ct].dev == dev) break; } return ct; @@ -1320,6 +1343,7 @@ static int ip_mr_forward(struct sk_buff *skb, struct mfc_cache *cache, int local { int psend = -1; int vif, ct; + struct net *net = mfc_net(cache); vif = cache->mfc_parent; cache->mfc_un.res.pkt++; @@ -1328,7 +1352,7 @@ static int ip_mr_forward(struct sk_buff *skb, struct mfc_cache *cache, int local /* * Wrong interface: drop packet and (maybe) send PIM assert. */ - if (vif_table[vif].dev != skb->dev) { + if (net->ipv4.vif_table[vif].dev != skb->dev) { int true_vifi; if (skb->rtable->fl.iif == 0) { @@ -1349,23 +1373,24 @@ static int ip_mr_forward(struct sk_buff *skb, struct mfc_cache *cache, int local cache->mfc_un.res.wrong_if++; true_vifi = ipmr_find_vif(skb->dev); - if (true_vifi >= 0 && mroute_do_assert && + if (true_vifi >= 0 && net->ipv4.mroute_do_assert && /* pimsm uses asserts, when switching from RPT to SPT, so that we cannot check that packet arrived on an oif. It is bad, but otherwise we would need to move pretty large chunk of pimd to kernel. Ough... --ANK */ - (mroute_do_pim || cache->mfc_un.res.ttls[true_vifi] < 255) && + (net->ipv4.mroute_do_pim || + cache->mfc_un.res.ttls[true_vifi] < 255) && time_after(jiffies, cache->mfc_un.res.last_assert + MFC_ASSERT_THRESH)) { cache->mfc_un.res.last_assert = jiffies; - ipmr_cache_report(skb, true_vifi, IGMPMSG_WRONGVIF); + ipmr_cache_report(net, skb, true_vifi, IGMPMSG_WRONGVIF); } goto dont_forward; } - vif_table[vif].pkt_in++; - vif_table[vif].bytes_in += skb->len; + net->ipv4.vif_table[vif].pkt_in++; + net->ipv4.vif_table[vif].bytes_in += skb->len; /* * Forward the frame @@ -1405,6 +1430,7 @@ dont_forward: int ip_mr_input(struct sk_buff *skb) { struct mfc_cache *cache; + struct net *net = dev_net(skb->dev); int local = skb->rtable->rt_flags&RTCF_LOCAL; /* Packet is looped back after forward, it should not be @@ -1425,9 +1451,9 @@ int ip_mr_input(struct sk_buff *skb) that we can forward NO IGMP messages. */ read_lock(&mrt_lock); - if (mroute_socket) { + if (net->ipv4.mroute_sk) { nf_reset(skb); - raw_rcv(mroute_socket, skb); + raw_rcv(net->ipv4.mroute_sk, skb); read_unlock(&mrt_lock); return 0; } @@ -1436,7 +1462,7 @@ int ip_mr_input(struct sk_buff *skb) } read_lock(&mrt_lock); - cache = ipmr_cache_find(ip_hdr(skb)->saddr, ip_hdr(skb)->daddr); + cache = ipmr_cache_find(net, ip_hdr(skb)->saddr, ip_hdr(skb)->daddr); /* * No usable cache entry @@ -1456,7 +1482,7 @@ int ip_mr_input(struct sk_buff *skb) vif = ipmr_find_vif(skb->dev); if (vif >= 0) { - int err = ipmr_cache_unresolved(vif, skb); + int err = ipmr_cache_unresolved(net, vif, skb); read_unlock(&mrt_lock); return err; @@ -1487,6 +1513,7 @@ static int __pim_rcv(struct sk_buff *skb, unsigned int pimlen) { struct net_device *reg_dev = NULL; struct iphdr *encap; + struct net *net = dev_net(skb->dev); encap = (struct iphdr *)(skb_transport_header(skb) + pimlen); /* @@ -1501,8 +1528,8 @@ static int __pim_rcv(struct sk_buff *skb, unsigned int pimlen) return 1; read_lock(&mrt_lock); - if (reg_vif_num >= 0) - reg_dev = vif_table[reg_vif_num].dev; + if (net->ipv4.mroute_reg_vif_num >= 0) + reg_dev = net->ipv4.vif_table[net->ipv4.mroute_reg_vif_num].dev; if (reg_dev) dev_hold(reg_dev); read_unlock(&mrt_lock); @@ -1537,13 +1564,14 @@ static int __pim_rcv(struct sk_buff *skb, unsigned int pimlen) int pim_rcv_v1(struct sk_buff * skb) { struct igmphdr *pim; + struct net *net = dev_net(skb->dev); if (!pskb_may_pull(skb, sizeof(*pim) + sizeof(struct iphdr))) goto drop; pim = igmp_hdr(skb); - if (!mroute_do_pim || + if (!net->ipv4.mroute_do_pim || pim->group != PIM_V1_VERSION || pim->code != PIM_V1_REGISTER) goto drop; @@ -1583,7 +1611,8 @@ ipmr_fill_mroute(struct sk_buff *skb, struct mfc_cache *c, struct rtmsg *rtm) { int ct; struct rtnexthop *nhp; - struct net_device *dev = vif_table[c->mfc_parent].dev; + struct net *net = mfc_net(c); + struct net_device *dev = net->ipv4.vif_table[c->mfc_parent].dev; u8 *b = skb_tail_pointer(skb); struct rtattr *mp_head; @@ -1599,7 +1628,7 @@ ipmr_fill_mroute(struct sk_buff *skb, struct mfc_cache *c, struct rtmsg *rtm) nhp = (struct rtnexthop *)skb_put(skb, RTA_ALIGN(sizeof(*nhp))); nhp->rtnh_flags = 0; nhp->rtnh_hops = c->mfc_un.res.ttls[ct]; - nhp->rtnh_ifindex = vif_table[ct].dev->ifindex; + nhp->rtnh_ifindex = net->ipv4.vif_table[ct].dev->ifindex; nhp->rtnh_len = sizeof(*nhp); } } @@ -1613,14 +1642,15 @@ rtattr_failure: return -EMSGSIZE; } -int ipmr_get_route(struct sk_buff *skb, struct rtmsg *rtm, int nowait) +int ipmr_get_route(struct net *net, + struct sk_buff *skb, struct rtmsg *rtm, int nowait) { int err; struct mfc_cache *cache; struct rtable *rt = skb->rtable; read_lock(&mrt_lock); - cache = ipmr_cache_find(rt->rt_src, rt->rt_dst); + cache = ipmr_cache_find(net, rt->rt_src, rt->rt_dst); if (cache == NULL) { struct sk_buff *skb2; @@ -1651,7 +1681,7 @@ int ipmr_get_route(struct sk_buff *skb, struct rtmsg *rtm, int nowait) iph->saddr = rt->rt_src; iph->daddr = rt->rt_dst; iph->version = 0; - err = ipmr_cache_unresolved(vif, skb2); + err = ipmr_cache_unresolved(net, vif, skb2); read_unlock(&mrt_lock); return err; } @@ -1668,17 +1698,19 @@ int ipmr_get_route(struct sk_buff *skb, struct rtmsg *rtm, int nowait) * The /proc interfaces to multicast routing /proc/ip_mr_cache /proc/ip_mr_vif */ struct ipmr_vif_iter { + struct seq_net_private p; int ct; }; -static struct vif_device *ipmr_vif_seq_idx(struct ipmr_vif_iter *iter, +static struct vif_device *ipmr_vif_seq_idx(struct net *net, + struct ipmr_vif_iter *iter, loff_t pos) { - for (iter->ct = 0; iter->ct < maxvif; ++iter->ct) { - if (!VIF_EXISTS(iter->ct)) + for (iter->ct = 0; iter->ct < net->ipv4.maxvif; ++iter->ct) { + if (!VIF_EXISTS(net, iter->ct)) continue; if (pos-- == 0) - return &vif_table[iter->ct]; + return &net->ipv4.vif_table[iter->ct]; } return NULL; } @@ -1686,23 +1718,26 @@ static struct vif_device *ipmr_vif_seq_idx(struct ipmr_vif_iter *iter, static void *ipmr_vif_seq_start(struct seq_file *seq, loff_t *pos) __acquires(mrt_lock) { + struct net *net = seq_file_net(seq); + read_lock(&mrt_lock); - return *pos ? ipmr_vif_seq_idx(seq->private, *pos - 1) + return *pos ? ipmr_vif_seq_idx(net, seq->private, *pos - 1) : SEQ_START_TOKEN; } static void *ipmr_vif_seq_next(struct seq_file *seq, void *v, loff_t *pos) { struct ipmr_vif_iter *iter = seq->private; + struct net *net = seq_file_net(seq); ++*pos; if (v == SEQ_START_TOKEN) - return ipmr_vif_seq_idx(iter, 0); + return ipmr_vif_seq_idx(net, iter, 0); - while (++iter->ct < maxvif) { - if (!VIF_EXISTS(iter->ct)) + while (++iter->ct < net->ipv4.maxvif) { + if (!VIF_EXISTS(net, iter->ct)) continue; - return &vif_table[iter->ct]; + return &net->ipv4.vif_table[iter->ct]; } return NULL; } @@ -1715,6 +1750,8 @@ static void ipmr_vif_seq_stop(struct seq_file *seq, void *v) static int ipmr_vif_seq_show(struct seq_file *seq, void *v) { + struct net *net = seq_file_net(seq); + if (v == SEQ_START_TOKEN) { seq_puts(seq, "Interface BytesIn PktsIn BytesOut PktsOut Flags Local Remote\n"); @@ -1724,7 +1761,7 @@ static int ipmr_vif_seq_show(struct seq_file *seq, void *v) seq_printf(seq, "%2Zd %-10s %8ld %7ld %8ld %7ld %05X %08X %08X\n", - vif - vif_table, + vif - net->ipv4.vif_table, name, vif->bytes_in, vif->pkt_in, vif->bytes_out, vif->pkt_out, vif->flags, vif->local, vif->remote); @@ -1741,8 +1778,8 @@ static const struct seq_operations ipmr_vif_seq_ops = { static int ipmr_vif_open(struct inode *inode, struct file *file) { - return seq_open_private(file, &ipmr_vif_seq_ops, - sizeof(struct ipmr_vif_iter)); + return seq_open_net(inode, file, &ipmr_vif_seq_ops, + sizeof(struct ipmr_vif_iter)); } static const struct file_operations ipmr_vif_fops = { @@ -1750,23 +1787,26 @@ static const struct file_operations ipmr_vif_fops = { .open = ipmr_vif_open, .read = seq_read, .llseek = seq_lseek, - .release = seq_release_private, + .release = seq_release_net, }; struct ipmr_mfc_iter { + struct seq_net_private p; struct mfc_cache **cache; int ct; }; -static struct mfc_cache *ipmr_mfc_seq_idx(struct ipmr_mfc_iter *it, loff_t pos) +static struct mfc_cache *ipmr_mfc_seq_idx(struct net *net, + struct ipmr_mfc_iter *it, loff_t pos) { struct mfc_cache *mfc; - it->cache = mfc_cache_array; + it->cache = net->ipv4.mfc_cache_array; read_lock(&mrt_lock); for (it->ct = 0; it->ct < MFC_LINES; it->ct++) - for (mfc = mfc_cache_array[it->ct]; mfc; mfc = mfc->next) + for (mfc = net->ipv4.mfc_cache_array[it->ct]; + mfc; mfc = mfc->next) if (pos-- == 0) return mfc; read_unlock(&mrt_lock); @@ -1774,7 +1814,8 @@ static struct mfc_cache *ipmr_mfc_seq_idx(struct ipmr_mfc_iter *it, loff_t pos) it->cache = &mfc_unres_queue; spin_lock_bh(&mfc_unres_lock); for (mfc = mfc_unres_queue; mfc; mfc = mfc->next) - if (pos-- == 0) + if (net_eq(mfc_net(mfc), net) && + pos-- == 0) return mfc; spin_unlock_bh(&mfc_unres_lock); @@ -1786,9 +1827,11 @@ static struct mfc_cache *ipmr_mfc_seq_idx(struct ipmr_mfc_iter *it, loff_t pos) static void *ipmr_mfc_seq_start(struct seq_file *seq, loff_t *pos) { struct ipmr_mfc_iter *it = seq->private; + struct net *net = seq_file_net(seq); + it->cache = NULL; it->ct = 0; - return *pos ? ipmr_mfc_seq_idx(seq->private, *pos - 1) + return *pos ? ipmr_mfc_seq_idx(net, seq->private, *pos - 1) : SEQ_START_TOKEN; } @@ -1796,11 +1839,12 @@ static void *ipmr_mfc_seq_next(struct seq_file *seq, void *v, loff_t *pos) { struct mfc_cache *mfc = v; struct ipmr_mfc_iter *it = seq->private; + struct net *net = seq_file_net(seq); ++*pos; if (v == SEQ_START_TOKEN) - return ipmr_mfc_seq_idx(seq->private, 0); + return ipmr_mfc_seq_idx(net, seq->private, 0); if (mfc->next) return mfc->next; @@ -1808,10 +1852,10 @@ static void *ipmr_mfc_seq_next(struct seq_file *seq, void *v, loff_t *pos) if (it->cache == &mfc_unres_queue) goto end_of_list; - BUG_ON(it->cache != mfc_cache_array); + BUG_ON(it->cache != net->ipv4.mfc_cache_array); while (++it->ct < MFC_LINES) { - mfc = mfc_cache_array[it->ct]; + mfc = net->ipv4.mfc_cache_array[it->ct]; if (mfc) return mfc; } @@ -1823,6 +1867,8 @@ static void *ipmr_mfc_seq_next(struct seq_file *seq, void *v, loff_t *pos) spin_lock_bh(&mfc_unres_lock); mfc = mfc_unres_queue; + while (mfc && !net_eq(mfc_net(mfc), net)) + mfc = mfc->next; if (mfc) return mfc; @@ -1836,16 +1882,18 @@ static void *ipmr_mfc_seq_next(struct seq_file *seq, void *v, loff_t *pos) static void ipmr_mfc_seq_stop(struct seq_file *seq, void *v) { struct ipmr_mfc_iter *it = seq->private; + struct net *net = seq_file_net(seq); if (it->cache == &mfc_unres_queue) spin_unlock_bh(&mfc_unres_lock); - else if (it->cache == mfc_cache_array) + else if (it->cache == net->ipv4.mfc_cache_array) read_unlock(&mrt_lock); } static int ipmr_mfc_seq_show(struct seq_file *seq, void *v) { int n; + struct net *net = seq_file_net(seq); if (v == SEQ_START_TOKEN) { seq_puts(seq, @@ -1866,9 +1914,9 @@ static int ipmr_mfc_seq_show(struct seq_file *seq, void *v) mfc->mfc_un.res.wrong_if); for (n = mfc->mfc_un.res.minvif; n < mfc->mfc_un.res.maxvif; n++ ) { - if (VIF_EXISTS(n) - && mfc->mfc_un.res.ttls[n] < 255) - seq_printf(seq, + if (VIF_EXISTS(net, n) && + mfc->mfc_un.res.ttls[n] < 255) + seq_printf(seq, " %2d:%-3d", n, mfc->mfc_un.res.ttls[n]); } @@ -1892,8 +1940,8 @@ static const struct seq_operations ipmr_mfc_seq_ops = { static int ipmr_mfc_open(struct inode *inode, struct file *file) { - return seq_open_private(file, &ipmr_mfc_seq_ops, - sizeof(struct ipmr_mfc_iter)); + return seq_open_net(inode, file, &ipmr_mfc_seq_ops, + sizeof(struct ipmr_mfc_iter)); } static const struct file_operations ipmr_mfc_fops = { @@ -1901,7 +1949,7 @@ static const struct file_operations ipmr_mfc_fops = { .open = ipmr_mfc_open, .read = seq_read, .llseek = seq_lseek, - .release = seq_release_private, + .release = seq_release_net, }; #endif @@ -1915,6 +1963,65 @@ static struct net_protocol pim_protocol = { /* * Setup for IP multicast routing */ +static int __net_init ipmr_net_init(struct net *net) +{ + int err = 0; + + net->ipv4.vif_table = kcalloc(MAXVIFS, sizeof(struct vif_device), + GFP_KERNEL); + if (!net->ipv4.vif_table) { + err = -ENOMEM; + goto fail; + } + + /* Forwarding cache */ + net->ipv4.mfc_cache_array = kcalloc(MFC_LINES, + sizeof(struct mfc_cache *), + GFP_KERNEL); + if (!net->ipv4.mfc_cache_array) { + err = -ENOMEM; + goto fail_mfc_cache; + } + +#ifdef CONFIG_IP_PIMSM + net->ipv4.mroute_reg_vif_num = -1; +#endif + +#ifdef CONFIG_PROC_FS + err = -ENOMEM; + if (!proc_net_fops_create(net, "ip_mr_vif", 0, &ipmr_vif_fops)) + goto proc_vif_fail; + if (!proc_net_fops_create(net, "ip_mr_cache", 0, &ipmr_mfc_fops)) + goto proc_cache_fail; +#endif + return 0; + +#ifdef CONFIG_PROC_FS +proc_cache_fail: + proc_net_remove(net, "ip_mr_vif"); +proc_vif_fail: + kfree(net->ipv4.mfc_cache_array); +#endif +fail_mfc_cache: + kfree(net->ipv4.vif_table); +fail: + return err; +} + +static void __net_exit ipmr_net_exit(struct net *net) +{ +#ifdef CONFIG_PROC_FS + proc_net_remove(net, "ip_mr_cache"); + proc_net_remove(net, "ip_mr_vif"); +#endif + kfree(net->ipv4.mfc_cache_array); + kfree(net->ipv4.vif_table); +} + +static struct pernet_operations ipmr_net_ops = { + .init = ipmr_net_init, + .exit = ipmr_net_exit, +}; int __init ip_mr_init(void) { @@ -1927,26 +2034,20 @@ int __init ip_mr_init(void) if (!mrt_cachep) return -ENOMEM; + err = register_pernet_subsys(&ipmr_net_ops); + if (err) + goto reg_pernet_fail; + setup_timer(&ipmr_expire_timer, ipmr_expire_process, 0); err = register_netdevice_notifier(&ip_mr_notifier); if (err) goto reg_notif_fail; -#ifdef CONFIG_PROC_FS - err = -ENOMEM; - if (!proc_net_fops_create(&init_net, "ip_mr_vif", 0, &ipmr_vif_fops)) - goto proc_vif_fail; - if (!proc_net_fops_create(&init_net, "ip_mr_cache", 0, &ipmr_mfc_fops)) - goto proc_cache_fail; -#endif return 0; -#ifdef CONFIG_PROC_FS -proc_cache_fail: - proc_net_remove(&init_net, "ip_mr_vif"); -proc_vif_fail: - unregister_netdevice_notifier(&ip_mr_notifier); -#endif + reg_notif_fail: del_timer(&ipmr_expire_timer); + unregister_pernet_subsys(&ipmr_net_ops); +reg_pernet_fail: kmem_cache_destroy(mrt_cachep); return err; } diff --git a/net/ipv4/netfilter/nf_nat_snmp_basic.c b/net/ipv4/netfilter/nf_nat_snmp_basic.c index 182f845de92..d9521f6f9ed 100644 --- a/net/ipv4/netfilter/nf_nat_snmp_basic.c +++ b/net/ipv4/netfilter/nf_nat_snmp_basic.c @@ -1292,7 +1292,7 @@ static struct nf_conntrack_helper snmp_helper __read_mostly = { .expect_policy = &snmp_exp_policy, .name = "snmp", .tuple.src.l3num = AF_INET, - .tuple.src.u.udp.port = __constant_htons(SNMP_PORT), + .tuple.src.u.udp.port = cpu_to_be16(SNMP_PORT), .tuple.dst.protonum = IPPROTO_UDP, }; @@ -1302,7 +1302,7 @@ static struct nf_conntrack_helper snmp_trap_helper __read_mostly = { .expect_policy = &snmp_exp_policy, .name = "snmp_trap", .tuple.src.l3num = AF_INET, - .tuple.src.u.udp.port = __constant_htons(SNMP_TRAP_PORT), + .tuple.src.u.udp.port = cpu_to_be16(SNMP_TRAP_PORT), .tuple.dst.protonum = IPPROTO_UDP, }; diff --git a/net/ipv4/route.c b/net/ipv4/route.c index 97f71153584..5caee609be0 100644 --- a/net/ipv4/route.c +++ b/net/ipv4/route.c @@ -151,7 +151,7 @@ static void rt_emergency_hash_rebuild(struct net *net); static struct dst_ops ipv4_dst_ops = { .family = AF_INET, - .protocol = __constant_htons(ETH_P_IP), + .protocol = cpu_to_be16(ETH_P_IP), .gc = rt_garbage_collect, .check = ipv4_dst_check, .destroy = ipv4_dst_destroy, @@ -2696,7 +2696,7 @@ static void ipv4_rt_blackhole_update_pmtu(struct dst_entry *dst, u32 mtu) static struct dst_ops ipv4_dst_blackhole_ops = { .family = AF_INET, - .protocol = __constant_htons(ETH_P_IP), + .protocol = cpu_to_be16(ETH_P_IP), .destroy = ipv4_dst_destroy, .check = ipv4_dst_check, .update_pmtu = ipv4_rt_blackhole_update_pmtu, @@ -2779,7 +2779,8 @@ int ip_route_output_key(struct net *net, struct rtable **rp, struct flowi *flp) return ip_route_output_flow(net, rp, flp, NULL, 0); } -static int rt_fill_info(struct sk_buff *skb, u32 pid, u32 seq, int event, +static int rt_fill_info(struct net *net, + struct sk_buff *skb, u32 pid, u32 seq, int event, int nowait, unsigned int flags) { struct rtable *rt = skb->rtable; @@ -2844,8 +2845,8 @@ static int rt_fill_info(struct sk_buff *skb, u32 pid, u32 seq, int event, __be32 dst = rt->rt_dst; if (ipv4_is_multicast(dst) && !ipv4_is_local_multicast(dst) && - IPV4_DEVCONF_ALL(&init_net, MC_FORWARDING)) { - int err = ipmr_get_route(skb, r, nowait); + IPV4_DEVCONF_ALL(net, MC_FORWARDING)) { + int err = ipmr_get_route(net, skb, r, nowait); if (err <= 0) { if (!nowait) { if (err == 0) @@ -2950,7 +2951,7 @@ static int inet_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr* nlh, void if (rtm->rtm_flags & RTM_F_NOTIFY) rt->rt_flags |= RTCF_NOTIFY; - err = rt_fill_info(skb, NETLINK_CB(in_skb).pid, nlh->nlmsg_seq, + err = rt_fill_info(net, skb, NETLINK_CB(in_skb).pid, nlh->nlmsg_seq, RTM_NEWROUTE, 0, 0); if (err <= 0) goto errout_free; @@ -2988,7 +2989,7 @@ int ip_rt_dump(struct sk_buff *skb, struct netlink_callback *cb) if (rt_is_expired(rt)) continue; skb->dst = dst_clone(&rt->u.dst); - if (rt_fill_info(skb, NETLINK_CB(cb->skb).pid, + if (rt_fill_info(net, skb, NETLINK_CB(cb->skb).pid, cb->nlh->nlmsg_seq, RTM_NEWROUTE, 1, NLM_F_MULTI) <= 0) { dst_release(xchg(&skb->dst, NULL)); diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c index 76b148bcb0d..73266b79c19 100644 --- a/net/ipv4/tcp.c +++ b/net/ipv4/tcp.c @@ -2482,19 +2482,19 @@ struct sk_buff **tcp_gro_receive(struct sk_buff **head, struct sk_buff *skb) unsigned int mss = 1; int flush = 1; - if (!pskb_may_pull(skb, sizeof(*th))) + th = skb_gro_header(skb, sizeof(*th)); + if (unlikely(!th)) goto out; - th = tcp_hdr(skb); thlen = th->doff * 4; if (thlen < sizeof(*th)) goto out; - if (!pskb_may_pull(skb, thlen)) + th = skb_gro_header(skb, thlen); + if (unlikely(!th)) goto out; - th = tcp_hdr(skb); - __skb_pull(skb, thlen); + skb_gro_pull(skb, thlen); flags = tcp_flag_word(th); @@ -2522,10 +2522,10 @@ found: flush |= th->ack_seq != th2->ack_seq || th->window != th2->window; flush |= memcmp(th + 1, th2 + 1, thlen - sizeof(*th)); - total = p->len; + total = skb_gro_len(p); mss = skb_shinfo(p)->gso_size; - flush |= skb->len > mss || skb->len <= 0; + flush |= skb_gro_len(skb) > mss || !skb_gro_len(skb); flush |= ntohl(th2->seq) + total != ntohl(th->seq); if (flush || skb_gro_receive(head, skb)) { @@ -2538,7 +2538,7 @@ found: tcp_flag_word(th2) |= flags & (TCP_FLAG_FIN | TCP_FLAG_PSH); out_check_final: - flush = skb->len < mss; + flush = skb_gro_len(skb) < mss; flush |= flags & (TCP_FLAG_URG | TCP_FLAG_PSH | TCP_FLAG_RST | TCP_FLAG_SYN | TCP_FLAG_FIN); diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c index 19d7b429a26..f6b962f56ab 100644 --- a/net/ipv4/tcp_ipv4.c +++ b/net/ipv4/tcp_ipv4.c @@ -2355,7 +2355,7 @@ struct sk_buff **tcp4_gro_receive(struct sk_buff **head, struct sk_buff *skb) switch (skb->ip_summed) { case CHECKSUM_COMPLETE: - if (!tcp_v4_check(skb->len, iph->saddr, iph->daddr, + if (!tcp_v4_check(skb_gro_len(skb), iph->saddr, iph->daddr, skb->csum)) { skb->ip_summed = CHECKSUM_UNNECESSARY; break; diff --git a/net/ipv4/xfrm4_policy.c b/net/ipv4/xfrm4_policy.c index 2ad24ba31f9..60d918c96a4 100644 --- a/net/ipv4/xfrm4_policy.c +++ b/net/ipv4/xfrm4_policy.c @@ -241,7 +241,7 @@ static void xfrm4_dst_ifdown(struct dst_entry *dst, struct net_device *dev, static struct dst_ops xfrm4_dst_ops = { .family = AF_INET, - .protocol = __constant_htons(ETH_P_IP), + .protocol = cpu_to_be16(ETH_P_IP), .gc = xfrm4_garbage_collect, .update_pmtu = xfrm4_update_pmtu, .destroy = xfrm4_dst_destroy, diff --git a/net/ipv6/af_inet6.c b/net/ipv6/af_inet6.c index c802bc1658a..fa2ac7ee662 100644 --- a/net/ipv6/af_inet6.c +++ b/net/ipv6/af_inet6.c @@ -799,24 +799,34 @@ static struct sk_buff **ipv6_gro_receive(struct sk_buff **head, int proto; __wsum csum; - if (unlikely(!pskb_may_pull(skb, sizeof(*iph)))) + iph = skb_gro_header(skb, sizeof(*iph)); + if (unlikely(!iph)) goto out; - iph = ipv6_hdr(skb); - __skb_pull(skb, sizeof(*iph)); + skb_gro_pull(skb, sizeof(*iph)); + skb_set_transport_header(skb, skb_gro_offset(skb)); - flush += ntohs(iph->payload_len) != skb->len; + flush += ntohs(iph->payload_len) != skb_gro_len(skb); rcu_read_lock(); - proto = ipv6_gso_pull_exthdrs(skb, iph->nexthdr); - iph = ipv6_hdr(skb); - IPV6_GRO_CB(skb)->proto = proto; + proto = iph->nexthdr; ops = rcu_dereference(inet6_protos[proto]); - if (!ops || !ops->gro_receive) - goto out_unlock; + if (!ops || !ops->gro_receive) { + __pskb_pull(skb, skb_gro_offset(skb)); + proto = ipv6_gso_pull_exthdrs(skb, proto); + skb_gro_pull(skb, -skb_transport_offset(skb)); + skb_reset_transport_header(skb); + __skb_push(skb, skb_gro_offset(skb)); + + if (!ops || !ops->gro_receive) + goto out_unlock; + + iph = ipv6_hdr(skb); + } + + IPV6_GRO_CB(skb)->proto = proto; flush--; - skb_reset_transport_header(skb); nlen = skb_network_header_len(skb); for (p = *head; p; p = p->next) { @@ -880,7 +890,7 @@ out_unlock: } static struct packet_type ipv6_packet_type = { - .type = __constant_htons(ETH_P_IPV6), + .type = cpu_to_be16(ETH_P_IPV6), .func = ipv6_rcv, .gso_send_check = ipv6_gso_send_check, .gso_segment = ipv6_gso_segment, diff --git a/net/ipv6/route.c b/net/ipv6/route.c index 9c574235c90..c3d486a3eda 100644 --- a/net/ipv6/route.c +++ b/net/ipv6/route.c @@ -98,7 +98,7 @@ static struct rt6_info *rt6_get_route_info(struct net *net, static struct dst_ops ip6_dst_ops_template = { .family = AF_INET6, - .protocol = __constant_htons(ETH_P_IPV6), + .protocol = cpu_to_be16(ETH_P_IPV6), .gc = ip6_dst_gc, .gc_thresh = 1024, .check = ip6_dst_check, @@ -117,7 +117,7 @@ static void ip6_rt_blackhole_update_pmtu(struct dst_entry *dst, u32 mtu) static struct dst_ops ip6_dst_blackhole_ops = { .family = AF_INET6, - .protocol = __constant_htons(ETH_P_IPV6), + .protocol = cpu_to_be16(ETH_P_IPV6), .destroy = ip6_dst_destroy, .check = ip6_dst_check, .update_pmtu = ip6_rt_blackhole_update_pmtu, diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c index e5b85d45bee..00f1269e11e 100644 --- a/net/ipv6/tcp_ipv6.c +++ b/net/ipv6/tcp_ipv6.c @@ -948,7 +948,7 @@ struct sk_buff **tcp6_gro_receive(struct sk_buff **head, struct sk_buff *skb) switch (skb->ip_summed) { case CHECKSUM_COMPLETE: - if (!tcp_v6_check(skb->len, &iph->saddr, &iph->daddr, + if (!tcp_v6_check(skb_gro_len(skb), &iph->saddr, &iph->daddr, skb->csum)) { skb->ip_summed = CHECKSUM_UNNECESSARY; break; diff --git a/net/ipv6/xfrm6_policy.c b/net/ipv6/xfrm6_policy.c index 97ab068e8cc..b4b16a43f27 100644 --- a/net/ipv6/xfrm6_policy.c +++ b/net/ipv6/xfrm6_policy.c @@ -272,7 +272,7 @@ static void xfrm6_dst_ifdown(struct dst_entry *dst, struct net_device *dev, static struct dst_ops xfrm6_dst_ops = { .family = AF_INET6, - .protocol = __constant_htons(ETH_P_IPV6), + .protocol = cpu_to_be16(ETH_P_IPV6), .gc = xfrm6_garbage_collect, .update_pmtu = xfrm6_update_pmtu, .destroy = xfrm6_dst_destroy, diff --git a/net/ipx/af_ipx.c b/net/ipx/af_ipx.c index b6e70f92e7f..43d0ffc6d56 100644 --- a/net/ipx/af_ipx.c +++ b/net/ipx/af_ipx.c @@ -1959,12 +1959,12 @@ static const struct proto_ops SOCKOPS_WRAPPED(ipx_dgram_ops) = { SOCKOPS_WRAP(ipx_dgram, PF_IPX); static struct packet_type ipx_8023_packet_type = { - .type = __constant_htons(ETH_P_802_3), + .type = cpu_to_be16(ETH_P_802_3), .func = ipx_rcv, }; static struct packet_type ipx_dix_packet_type = { - .type = __constant_htons(ETH_P_IPX), + .type = cpu_to_be16(ETH_P_IPX), .func = ipx_rcv, }; diff --git a/net/irda/irmod.c b/net/irda/irmod.c index 4c487a88372..1bb607f2f5c 100644 --- a/net/irda/irmod.c +++ b/net/irda/irmod.c @@ -56,7 +56,7 @@ EXPORT_SYMBOL(irda_debug); * Tell the kernel how IrDA packets should be handled. */ static struct packet_type irda_packet_type = { - .type = __constant_htons(ETH_P_IRDA), + .type = cpu_to_be16(ETH_P_IRDA), .func = irlap_driver_rcv, /* Packet type handler irlap_frame.c */ }; diff --git a/net/llc/llc_core.c b/net/llc/llc_core.c index 50d5b10e23a..a7fe1adc378 100644 --- a/net/llc/llc_core.c +++ b/net/llc/llc_core.c @@ -148,12 +148,12 @@ void llc_sap_close(struct llc_sap *sap) } static struct packet_type llc_packet_type = { - .type = __constant_htons(ETH_P_802_2), + .type = cpu_to_be16(ETH_P_802_2), .func = llc_rcv, }; static struct packet_type llc_tr_packet_type = { - .type = __constant_htons(ETH_P_TR_802_2), + .type = cpu_to_be16(ETH_P_TR_802_2), .func = llc_rcv, }; diff --git a/net/netfilter/ipvs/ip_vs_sync.c b/net/netfilter/ipvs/ip_vs_sync.c index 6be5d4efa51..5c48378a852 100644 --- a/net/netfilter/ipvs/ip_vs_sync.c +++ b/net/netfilter/ipvs/ip_vs_sync.c @@ -149,8 +149,8 @@ static struct task_struct *sync_backup_thread; /* multicast addr */ static struct sockaddr_in mcast_addr = { .sin_family = AF_INET, - .sin_port = __constant_htons(IP_VS_SYNC_PORT), - .sin_addr.s_addr = __constant_htonl(IP_VS_SYNC_GROUP), + .sin_port = cpu_to_be16(IP_VS_SYNC_PORT), + .sin_addr.s_addr = cpu_to_be32(IP_VS_SYNC_GROUP), }; diff --git a/net/netfilter/nf_conntrack_amanda.c b/net/netfilter/nf_conntrack_amanda.c index 4f8fcf49854..07d9d8857e5 100644 --- a/net/netfilter/nf_conntrack_amanda.c +++ b/net/netfilter/nf_conntrack_amanda.c @@ -177,7 +177,7 @@ static struct nf_conntrack_helper amanda_helper[2] __read_mostly = { .me = THIS_MODULE, .help = amanda_help, .tuple.src.l3num = AF_INET, - .tuple.src.u.udp.port = __constant_htons(10080), + .tuple.src.u.udp.port = cpu_to_be16(10080), .tuple.dst.protonum = IPPROTO_UDP, .expect_policy = &amanda_exp_policy, }, @@ -186,7 +186,7 @@ static struct nf_conntrack_helper amanda_helper[2] __read_mostly = { .me = THIS_MODULE, .help = amanda_help, .tuple.src.l3num = AF_INET6, - .tuple.src.u.udp.port = __constant_htons(10080), + .tuple.src.u.udp.port = cpu_to_be16(10080), .tuple.dst.protonum = IPPROTO_UDP, .expect_policy = &amanda_exp_policy, }, diff --git a/net/netfilter/nf_conntrack_h323_main.c b/net/netfilter/nf_conntrack_h323_main.c index 687bd633c3d..66369490230 100644 --- a/net/netfilter/nf_conntrack_h323_main.c +++ b/net/netfilter/nf_conntrack_h323_main.c @@ -1167,7 +1167,7 @@ static struct nf_conntrack_helper nf_conntrack_helper_q931[] __read_mostly = { .name = "Q.931", .me = THIS_MODULE, .tuple.src.l3num = AF_INET, - .tuple.src.u.tcp.port = __constant_htons(Q931_PORT), + .tuple.src.u.tcp.port = cpu_to_be16(Q931_PORT), .tuple.dst.protonum = IPPROTO_TCP, .help = q931_help, .expect_policy = &q931_exp_policy, @@ -1176,7 +1176,7 @@ static struct nf_conntrack_helper nf_conntrack_helper_q931[] __read_mostly = { .name = "Q.931", .me = THIS_MODULE, .tuple.src.l3num = AF_INET6, - .tuple.src.u.tcp.port = __constant_htons(Q931_PORT), + .tuple.src.u.tcp.port = cpu_to_be16(Q931_PORT), .tuple.dst.protonum = IPPROTO_TCP, .help = q931_help, .expect_policy = &q931_exp_policy, @@ -1741,7 +1741,7 @@ static struct nf_conntrack_helper nf_conntrack_helper_ras[] __read_mostly = { .name = "RAS", .me = THIS_MODULE, .tuple.src.l3num = AF_INET, - .tuple.src.u.udp.port = __constant_htons(RAS_PORT), + .tuple.src.u.udp.port = cpu_to_be16(RAS_PORT), .tuple.dst.protonum = IPPROTO_UDP, .help = ras_help, .expect_policy = &ras_exp_policy, @@ -1750,7 +1750,7 @@ static struct nf_conntrack_helper nf_conntrack_helper_ras[] __read_mostly = { .name = "RAS", .me = THIS_MODULE, .tuple.src.l3num = AF_INET6, - .tuple.src.u.udp.port = __constant_htons(RAS_PORT), + .tuple.src.u.udp.port = cpu_to_be16(RAS_PORT), .tuple.dst.protonum = IPPROTO_UDP, .help = ras_help, .expect_policy = &ras_exp_policy, diff --git a/net/netfilter/nf_conntrack_netbios_ns.c b/net/netfilter/nf_conntrack_netbios_ns.c index 5af4273b466..8a3875e36ec 100644 --- a/net/netfilter/nf_conntrack_netbios_ns.c +++ b/net/netfilter/nf_conntrack_netbios_ns.c @@ -105,7 +105,7 @@ static struct nf_conntrack_expect_policy exp_policy = { static struct nf_conntrack_helper helper __read_mostly = { .name = "netbios-ns", .tuple.src.l3num = AF_INET, - .tuple.src.u.udp.port = __constant_htons(NMBD_PORT), + .tuple.src.u.udp.port = cpu_to_be16(NMBD_PORT), .tuple.dst.protonum = IPPROTO_UDP, .me = THIS_MODULE, .help = help, diff --git a/net/netfilter/nf_conntrack_pptp.c b/net/netfilter/nf_conntrack_pptp.c index 9e169ef2e85..72cca638a82 100644 --- a/net/netfilter/nf_conntrack_pptp.c +++ b/net/netfilter/nf_conntrack_pptp.c @@ -591,7 +591,7 @@ static struct nf_conntrack_helper pptp __read_mostly = { .name = "pptp", .me = THIS_MODULE, .tuple.src.l3num = AF_INET, - .tuple.src.u.tcp.port = __constant_htons(PPTP_CONTROL_PORT), + .tuple.src.u.tcp.port = cpu_to_be16(PPTP_CONTROL_PORT), .tuple.dst.protonum = IPPROTO_TCP, .help = conntrack_pptp_help, .destroy = pptp_destroy_siblings, diff --git a/net/netrom/af_netrom.c b/net/netrom/af_netrom.c index e9c05b8f4f4..cba7849de98 100644 --- a/net/netrom/af_netrom.c +++ b/net/netrom/af_netrom.c @@ -1432,7 +1432,7 @@ static int __init nr_proto_init(void) struct net_device *dev; sprintf(name, "nr%d", i); - dev = alloc_netdev(sizeof(struct nr_private), name, nr_setup); + dev = alloc_netdev(0, name, nr_setup); if (!dev) { printk(KERN_ERR "NET/ROM: nr_proto_init - unable to allocate device structure\n"); goto fail; diff --git a/net/netrom/nr_dev.c b/net/netrom/nr_dev.c index 6caf459665f..351372463fe 100644 --- a/net/netrom/nr_dev.c +++ b/net/netrom/nr_dev.c @@ -42,7 +42,7 @@ int nr_rx_ip(struct sk_buff *skb, struct net_device *dev) { - struct net_device_stats *stats = netdev_priv(dev); + struct net_device_stats *stats = &dev->stats; if (!netif_running(dev)) { stats->rx_dropped++; @@ -171,8 +171,7 @@ static int nr_close(struct net_device *dev) static int nr_xmit(struct sk_buff *skb, struct net_device *dev) { - struct nr_private *nr = netdev_priv(dev); - struct net_device_stats *stats = &nr->stats; + struct net_device_stats *stats = &dev->stats; unsigned int len = skb->len; if (!nr_route_frame(skb, NULL)) { @@ -187,34 +186,27 @@ static int nr_xmit(struct sk_buff *skb, struct net_device *dev) return 0; } -static struct net_device_stats *nr_get_stats(struct net_device *dev) -{ - struct nr_private *nr = netdev_priv(dev); - - return &nr->stats; -} - static const struct header_ops nr_header_ops = { .create = nr_header, .rebuild= nr_rebuild_header, }; +static const struct net_device_ops nr_netdev_ops = { + .ndo_open = nr_open, + .ndo_stop = nr_close, + .ndo_start_xmit = nr_xmit, + .ndo_set_mac_address = nr_set_mac_address, +}; void nr_setup(struct net_device *dev) { dev->mtu = NR_MAX_PACKET_SIZE; - dev->hard_start_xmit = nr_xmit; - dev->open = nr_open; - dev->stop = nr_close; - + dev->netdev_ops = &nr_netdev_ops; dev->header_ops = &nr_header_ops; dev->hard_header_len = NR_NETWORK_LEN + NR_TRANSPORT_LEN; dev->addr_len = AX25_ADDR_LEN; dev->type = ARPHRD_NETROM; - dev->set_mac_address = nr_set_mac_address; /* New-style flags. */ dev->flags = IFF_NOARP; - - dev->get_stats = nr_get_stats; } diff --git a/net/phonet/af_phonet.c b/net/phonet/af_phonet.c index 13cb323f8c3..81795ea8779 100644 --- a/net/phonet/af_phonet.c +++ b/net/phonet/af_phonet.c @@ -275,8 +275,6 @@ static inline int can_respond(struct sk_buff *skb) return 0; ph = pn_hdr(skb); - if (phonet_address_get(skb->dev, ph->pn_rdev) != ph->pn_rdev) - return 0; /* we are not the destination */ if (ph->pn_res == PN_PREFIX && !pskb_may_pull(skb, 5)) return 0; if (ph->pn_res == PN_COMMGR) /* indications */ @@ -344,8 +342,8 @@ static int phonet_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pkttype, struct net_device *orig_dev) { + struct net *net = dev_net(dev); struct phonethdr *ph; - struct sock *sk; struct sockaddr_pn sa; u16 len; @@ -364,28 +362,28 @@ static int phonet_rcv(struct sk_buff *skb, struct net_device *dev, skb_reset_transport_header(skb); pn_skb_get_dst_sockaddr(skb, &sa); - if (pn_sockaddr_get_addr(&sa) == 0) - goto out; /* currently, we cannot be device 0 */ - sk = pn_find_sock_by_sa(dev_net(dev), &sa); - if (sk == NULL) { + /* check if we are the destination */ + if (phonet_address_lookup(net, pn_sockaddr_get_addr(&sa)) == 0) { + /* Phonet packet input */ + struct sock *sk = pn_find_sock_by_sa(net, &sa); + + if (sk) + return sk_receive_skb(sk, skb, 0); + if (can_respond(skb)) { send_obj_unreachable(skb); send_reset_indications(skb); } - goto out; } - /* Push data to the socket (or other sockets connected to it). */ - return sk_receive_skb(sk, skb, 0); - out: kfree_skb(skb); return NET_RX_DROP; } static struct packet_type phonet_packet_type = { - .type = __constant_htons(ETH_P_PHONET), + .type = cpu_to_be16(ETH_P_PHONET), .dev = NULL, .func = phonet_rcv, }; @@ -428,16 +426,18 @@ static int __init phonet_init(void) { int err; + err = phonet_device_init(); + if (err) + return err; + err = sock_register(&phonet_proto_family); if (err) { printk(KERN_ALERT "phonet protocol family initialization failed\n"); - return err; + goto err_sock; } - phonet_device_init(); dev_add_pack(&phonet_packet_type); - phonet_netlink_register(); phonet_sysctl_init(); err = isi_register(); @@ -449,6 +449,7 @@ err: phonet_sysctl_exit(); sock_unregister(PF_PHONET); dev_remove_pack(&phonet_packet_type); +err_sock: phonet_device_exit(); return err; } diff --git a/net/phonet/pn_dev.c b/net/phonet/pn_dev.c index 5491bf5e354..80a322d7790 100644 --- a/net/phonet/pn_dev.c +++ b/net/phonet/pn_dev.c @@ -28,32 +28,41 @@ #include <linux/netdevice.h> #include <linux/phonet.h> #include <net/sock.h> +#include <net/netns/generic.h> #include <net/phonet/pn_dev.h> -/* when accessing, remember to lock with spin_lock(&pndevs.lock); */ -struct phonet_device_list pndevs = { - .list = LIST_HEAD_INIT(pndevs.list), - .lock = __SPIN_LOCK_UNLOCKED(pndevs.lock), +struct phonet_net { + struct phonet_device_list pndevs; }; +int phonet_net_id; + +struct phonet_device_list *phonet_device_list(struct net *net) +{ + struct phonet_net *pnn = net_generic(net, phonet_net_id); + return &pnn->pndevs; +} + /* Allocate new Phonet device. */ static struct phonet_device *__phonet_device_alloc(struct net_device *dev) { + struct phonet_device_list *pndevs = phonet_device_list(dev_net(dev)); struct phonet_device *pnd = kmalloc(sizeof(*pnd), GFP_ATOMIC); if (pnd == NULL) return NULL; pnd->netdev = dev; bitmap_zero(pnd->addrs, 64); - list_add(&pnd->list, &pndevs.list); + list_add(&pnd->list, &pndevs->list); return pnd; } static struct phonet_device *__phonet_get(struct net_device *dev) { + struct phonet_device_list *pndevs = phonet_device_list(dev_net(dev)); struct phonet_device *pnd; - list_for_each_entry(pnd, &pndevs.list, list) { + list_for_each_entry(pnd, &pndevs->list, list) { if (pnd->netdev == dev) return pnd; } @@ -68,32 +77,33 @@ static void __phonet_device_free(struct phonet_device *pnd) struct net_device *phonet_device_get(struct net *net) { + struct phonet_device_list *pndevs = phonet_device_list(net); struct phonet_device *pnd; struct net_device *dev; - spin_lock_bh(&pndevs.lock); - list_for_each_entry(pnd, &pndevs.list, list) { + spin_lock_bh(&pndevs->lock); + list_for_each_entry(pnd, &pndevs->list, list) { dev = pnd->netdev; BUG_ON(!dev); - if (net_eq(dev_net(dev), net) && - (dev->reg_state == NETREG_REGISTERED) && + if ((dev->reg_state == NETREG_REGISTERED) && ((pnd->netdev->flags & IFF_UP)) == IFF_UP) break; dev = NULL; } if (dev) dev_hold(dev); - spin_unlock_bh(&pndevs.lock); + spin_unlock_bh(&pndevs->lock); return dev; } int phonet_address_add(struct net_device *dev, u8 addr) { + struct phonet_device_list *pndevs = phonet_device_list(dev_net(dev)); struct phonet_device *pnd; int err = 0; - spin_lock_bh(&pndevs.lock); + spin_lock_bh(&pndevs->lock); /* Find or create Phonet-specific device data */ pnd = __phonet_get(dev); if (pnd == NULL) @@ -102,31 +112,33 @@ int phonet_address_add(struct net_device *dev, u8 addr) err = -ENOMEM; else if (test_and_set_bit(addr >> 2, pnd->addrs)) err = -EEXIST; - spin_unlock_bh(&pndevs.lock); + spin_unlock_bh(&pndevs->lock); return err; } int phonet_address_del(struct net_device *dev, u8 addr) { + struct phonet_device_list *pndevs = phonet_device_list(dev_net(dev)); struct phonet_device *pnd; int err = 0; - spin_lock_bh(&pndevs.lock); + spin_lock_bh(&pndevs->lock); pnd = __phonet_get(dev); if (!pnd || !test_and_clear_bit(addr >> 2, pnd->addrs)) err = -EADDRNOTAVAIL; else if (bitmap_empty(pnd->addrs, 64)) __phonet_device_free(pnd); - spin_unlock_bh(&pndevs.lock); + spin_unlock_bh(&pndevs->lock); return err; } /* Gets a source address toward a destination, through a interface. */ u8 phonet_address_get(struct net_device *dev, u8 addr) { + struct phonet_device_list *pndevs = phonet_device_list(dev_net(dev)); struct phonet_device *pnd; - spin_lock_bh(&pndevs.lock); + spin_lock_bh(&pndevs->lock); pnd = __phonet_get(dev); if (pnd) { BUG_ON(bitmap_empty(pnd->addrs, 64)); @@ -136,30 +148,31 @@ u8 phonet_address_get(struct net_device *dev, u8 addr) addr = find_first_bit(pnd->addrs, 64) << 2; } else addr = PN_NO_ADDR; - spin_unlock_bh(&pndevs.lock); + spin_unlock_bh(&pndevs->lock); return addr; } int phonet_address_lookup(struct net *net, u8 addr) { + struct phonet_device_list *pndevs = phonet_device_list(net); struct phonet_device *pnd; + int err = -EADDRNOTAVAIL; - spin_lock_bh(&pndevs.lock); - list_for_each_entry(pnd, &pndevs.list, list) { - if (!net_eq(dev_net(pnd->netdev), net)) - continue; + spin_lock_bh(&pndevs->lock); + list_for_each_entry(pnd, &pndevs->list, list) { /* Don't allow unregistering devices! */ if ((pnd->netdev->reg_state != NETREG_REGISTERED) || ((pnd->netdev->flags & IFF_UP)) != IFF_UP) continue; if (test_bit(addr >> 2, pnd->addrs)) { - spin_unlock_bh(&pndevs.lock); - return 0; + err = 0; + goto found; } } - spin_unlock_bh(&pndevs.lock); - return -EADDRNOTAVAIL; +found: + spin_unlock_bh(&pndevs->lock); + return err; } /* notify Phonet of device events */ @@ -169,14 +182,16 @@ static int phonet_device_notify(struct notifier_block *me, unsigned long what, struct net_device *dev = arg; if (what == NETDEV_UNREGISTER) { + struct phonet_device_list *pndevs; struct phonet_device *pnd; /* Destroy phonet-specific device data */ - spin_lock_bh(&pndevs.lock); + pndevs = phonet_device_list(dev_net(dev)); + spin_lock_bh(&pndevs->lock); pnd = __phonet_get(dev); if (pnd) __phonet_device_free(pnd); - spin_unlock_bh(&pndevs.lock); + spin_unlock_bh(&pndevs->lock); } return 0; @@ -187,24 +202,52 @@ static struct notifier_block phonet_device_notifier = { .priority = 0, }; -/* Initialize Phonet devices list */ -void phonet_device_init(void) +/* Per-namespace Phonet devices handling */ +static int phonet_init_net(struct net *net) { - register_netdevice_notifier(&phonet_device_notifier); + struct phonet_net *pnn = kmalloc(sizeof(*pnn), GFP_KERNEL); + if (!pnn) + return -ENOMEM; + + INIT_LIST_HEAD(&pnn->pndevs.list); + spin_lock_init(&pnn->pndevs.lock); + net_assign_generic(net, phonet_net_id, pnn); + return 0; } -void phonet_device_exit(void) +static void phonet_exit_net(struct net *net) { + struct phonet_net *pnn = net_generic(net, phonet_net_id); struct phonet_device *pnd, *n; - rtnl_unregister_all(PF_PHONET); - rtnl_lock(); - spin_lock_bh(&pndevs.lock); - - list_for_each_entry_safe(pnd, n, &pndevs.list, list) + list_for_each_entry_safe(pnd, n, &pnn->pndevs.list, list) __phonet_device_free(pnd); - spin_unlock_bh(&pndevs.lock); - rtnl_unlock(); + kfree(pnn); +} + +static struct pernet_operations phonet_net_ops = { + .init = phonet_init_net, + .exit = phonet_exit_net, +}; + +/* Initialize Phonet devices list */ +int __init phonet_device_init(void) +{ + int err = register_pernet_gen_device(&phonet_net_id, &phonet_net_ops); + if (err) + return err; + + register_netdevice_notifier(&phonet_device_notifier); + err = phonet_netlink_register(); + if (err) + phonet_device_exit(); + return err; +} + +void phonet_device_exit(void) +{ + rtnl_unregister_all(PF_PHONET); unregister_netdevice_notifier(&phonet_device_notifier); + unregister_pernet_gen_device(phonet_net_id, &phonet_net_ops); } diff --git a/net/phonet/pn_netlink.c b/net/phonet/pn_netlink.c index 242fe8f8c32..1ceea1f9241 100644 --- a/net/phonet/pn_netlink.c +++ b/net/phonet/pn_netlink.c @@ -123,17 +123,16 @@ nla_put_failure: static int getaddr_dumpit(struct sk_buff *skb, struct netlink_callback *cb) { - struct net *net = sock_net(skb->sk); + struct phonet_device_list *pndevs; struct phonet_device *pnd; int dev_idx = 0, dev_start_idx = cb->args[0]; int addr_idx = 0, addr_start_idx = cb->args[1]; - spin_lock_bh(&pndevs.lock); - list_for_each_entry(pnd, &pndevs.list, list) { + pndevs = phonet_device_list(sock_net(skb->sk)); + spin_lock_bh(&pndevs->lock); + list_for_each_entry(pnd, &pndevs->list, list) { u8 addr; - if (!net_eq(dev_net(pnd->netdev), net)) - continue; if (dev_idx > dev_start_idx) addr_start_idx = 0; if (dev_idx++ < dev_start_idx) @@ -153,16 +152,21 @@ static int getaddr_dumpit(struct sk_buff *skb, struct netlink_callback *cb) } out: - spin_unlock_bh(&pndevs.lock); + spin_unlock_bh(&pndevs->lock); cb->args[0] = dev_idx; cb->args[1] = addr_idx; return skb->len; } -void __init phonet_netlink_register(void) +int __init phonet_netlink_register(void) { - rtnl_register(PF_PHONET, RTM_NEWADDR, addr_doit, NULL); - rtnl_register(PF_PHONET, RTM_DELADDR, addr_doit, NULL); - rtnl_register(PF_PHONET, RTM_GETADDR, NULL, getaddr_dumpit); + int err = __rtnl_register(PF_PHONET, RTM_NEWADDR, addr_doit, NULL); + if (err) + return err; + + /* Further __rtnl_register() cannot fail */ + __rtnl_register(PF_PHONET, RTM_DELADDR, addr_doit, NULL); + __rtnl_register(PF_PHONET, RTM_GETADDR, NULL, getaddr_dumpit); + return 0; } diff --git a/net/rose/af_rose.c b/net/rose/af_rose.c index 01392649b46..65013962658 100644 --- a/net/rose/af_rose.c +++ b/net/rose/af_rose.c @@ -1587,8 +1587,7 @@ static int __init rose_proto_init(void) char name[IFNAMSIZ]; sprintf(name, "rose%d", i); - dev = alloc_netdev(sizeof(struct net_device_stats), - name, rose_setup); + dev = alloc_netdev(0, name, rose_setup); if (!dev) { printk(KERN_ERR "ROSE: rose_proto_init - unable to allocate memory\n"); rc = -ENOMEM; diff --git a/net/rose/rose_dev.c b/net/rose/rose_dev.c index 12cfcf09556..7dcf2569613 100644 --- a/net/rose/rose_dev.c +++ b/net/rose/rose_dev.c @@ -57,7 +57,7 @@ static int rose_rebuild_header(struct sk_buff *skb) { #ifdef CONFIG_INET struct net_device *dev = skb->dev; - struct net_device_stats *stats = netdev_priv(dev); + struct net_device_stats *stats = &dev->stats; unsigned char *bp = (unsigned char *)skb->data; struct sk_buff *skbn; unsigned int len; @@ -133,7 +133,7 @@ static int rose_close(struct net_device *dev) static int rose_xmit(struct sk_buff *skb, struct net_device *dev) { - struct net_device_stats *stats = netdev_priv(dev); + struct net_device_stats *stats = &dev->stats; if (!netif_running(dev)) { printk(KERN_ERR "ROSE: rose_xmit - called when iface is down\n"); @@ -144,30 +144,28 @@ static int rose_xmit(struct sk_buff *skb, struct net_device *dev) return 0; } -static struct net_device_stats *rose_get_stats(struct net_device *dev) -{ - return netdev_priv(dev); -} - static const struct header_ops rose_header_ops = { .create = rose_header, .rebuild= rose_rebuild_header, }; +static const struct net_device_ops rose_netdev_ops = { + .ndo_open = rose_open, + .ndo_stop = rose_close, + .ndo_start_xmit = rose_xmit, + .ndo_set_mac_address = rose_set_mac_address, +}; + void rose_setup(struct net_device *dev) { dev->mtu = ROSE_MAX_PACKET_SIZE - 2; - dev->hard_start_xmit = rose_xmit; - dev->open = rose_open; - dev->stop = rose_close; + dev->netdev_ops = &rose_netdev_ops; dev->header_ops = &rose_header_ops; dev->hard_header_len = AX25_BPQ_HEADER_LEN + AX25_MAX_HEADER_LEN + ROSE_MIN_LEN; dev->addr_len = ROSE_ADDR_LEN; dev->type = ARPHRD_ROSE; - dev->set_mac_address = rose_set_mac_address; /* New-style flags. */ dev->flags = IFF_NOARP; - dev->get_stats = rose_get_stats; } diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c index 0fc4a18fd96..32009793307 100644 --- a/net/sched/sch_api.c +++ b/net/sched/sch_api.c @@ -444,6 +444,17 @@ out: } EXPORT_SYMBOL(qdisc_calculate_pkt_len); +void qdisc_warn_nonwc(char *txt, struct Qdisc *qdisc) +{ + if (!(qdisc->flags & TCQ_F_WARN_NONWC)) { + printk(KERN_WARNING + "%s: %s qdisc %X: is non-work-conserving?\n", + txt, qdisc->ops->id, qdisc->handle >> 16); + qdisc->flags |= TCQ_F_WARN_NONWC; + } +} +EXPORT_SYMBOL(qdisc_warn_nonwc); + static enum hrtimer_restart qdisc_watchdog(struct hrtimer *timer) { struct qdisc_watchdog *wd = container_of(timer, struct qdisc_watchdog, diff --git a/net/sched/sch_hfsc.c b/net/sched/sch_hfsc.c index 45c31b1a4e1..74226b26552 100644 --- a/net/sched/sch_hfsc.c +++ b/net/sched/sch_hfsc.c @@ -887,8 +887,7 @@ qdisc_peek_len(struct Qdisc *sch) skb = sch->ops->peek(sch); if (skb == NULL) { - if (net_ratelimit()) - printk("qdisc_peek_len: non work-conserving qdisc ?\n"); + qdisc_warn_nonwc("qdisc_peek_len", sch); return 0; } len = qdisc_pkt_len(skb); @@ -1642,8 +1641,7 @@ hfsc_dequeue(struct Qdisc *sch) skb = qdisc_dequeue_peeked(cl->qdisc); if (skb == NULL) { - if (net_ratelimit()) - printk("HFSC: Non-work-conserving qdisc ?\n"); + qdisc_warn_nonwc("HFSC", cl->qdisc); return NULL; } diff --git a/net/sched/sch_htb.c b/net/sched/sch_htb.c index 2f0f0b04d3f..355974f610c 100644 --- a/net/sched/sch_htb.c +++ b/net/sched/sch_htb.c @@ -35,6 +35,7 @@ #include <linux/list.h> #include <linux/compiler.h> #include <linux/rbtree.h> +#include <linux/workqueue.h> #include <net/netlink.h> #include <net/pkt_sched.h> @@ -114,8 +115,6 @@ struct htb_class { struct tcf_proto *filter_list; int filter_cnt; - int warned; /* only one warning about non work conserving .. */ - /* token bucket parameters */ struct qdisc_rate_table *rate; /* rate table of the class itself */ struct qdisc_rate_table *ceil; /* ceiling rate (limits borrows too) */ @@ -155,6 +154,10 @@ struct htb_sched { int direct_qlen; /* max qlen of above */ long direct_pkts; + +#define HTB_WARN_TOOMANYEVENTS 0x1 + unsigned int warned; /* only one warning */ + struct work_struct work; }; /* find class in global hash table using given handle */ @@ -658,7 +661,7 @@ static void htb_charge_class(struct htb_sched *q, struct htb_class *cl, * htb_do_events - make mode changes to classes at the level * * Scans event queue for pending events and applies them. Returns time of - * next pending event (0 for no event in pq). + * next pending event (0 for no event in pq, q->now for too many events). * Note: Applied are events whose have cl->pq_key <= q->now. */ static psched_time_t htb_do_events(struct htb_sched *q, int level, @@ -686,8 +689,14 @@ static psched_time_t htb_do_events(struct htb_sched *q, int level, if (cl->cmode != HTB_CAN_SEND) htb_add_to_wait_tree(q, cl, diff); } - /* too much load - let's continue on next jiffie (including above) */ - return q->now + 2 * PSCHED_TICKS_PER_SEC / HZ; + + /* too much load - let's continue after a break for scheduling */ + if (!(q->warned & HTB_WARN_TOOMANYEVENTS)) { + printk(KERN_WARNING "htb: too many events!\n"); + q->warned |= HTB_WARN_TOOMANYEVENTS; + } + + return q->now; } /* Returns class->node+prio from id-tree where classe's id is >= id. NULL @@ -809,13 +818,8 @@ next: skb = cl->un.leaf.q->dequeue(cl->un.leaf.q); if (likely(skb != NULL)) break; - if (!cl->warned) { - printk(KERN_WARNING - "htb: class %X isn't work conserving ?!\n", - cl->common.classid); - cl->warned = 1; - } + qdisc_warn_nonwc("htb", cl->un.leaf.q); htb_next_rb_node((level ? cl->parent->un.inner.ptr : q-> ptr[0]) + prio); cl = htb_lookup_leaf(q->row[level] + prio, prio, @@ -892,7 +896,10 @@ static struct sk_buff *htb_dequeue(struct Qdisc *sch) } } sch->qstats.overlimits++; - qdisc_watchdog_schedule(&q->watchdog, next_event); + if (likely(next_event > q->now)) + qdisc_watchdog_schedule(&q->watchdog, next_event); + else + schedule_work(&q->work); fin: return skb; } @@ -962,6 +969,14 @@ static const struct nla_policy htb_policy[TCA_HTB_MAX + 1] = { [TCA_HTB_RTAB] = { .type = NLA_BINARY, .len = TC_RTAB_SIZE }, }; +static void htb_work_func(struct work_struct *work) +{ + struct htb_sched *q = container_of(work, struct htb_sched, work); + struct Qdisc *sch = q->watchdog.qdisc; + + __netif_schedule(qdisc_root(sch)); +} + static int htb_init(struct Qdisc *sch, struct nlattr *opt) { struct htb_sched *q = qdisc_priv(sch); @@ -996,6 +1011,7 @@ static int htb_init(struct Qdisc *sch, struct nlattr *opt) INIT_LIST_HEAD(q->drops + i); qdisc_watchdog_init(&q->watchdog, sch); + INIT_WORK(&q->work, htb_work_func); skb_queue_head_init(&q->direct_queue); q->direct_qlen = qdisc_dev(sch)->tx_queue_len; @@ -1188,7 +1204,6 @@ static void htb_destroy_class(struct Qdisc *sch, struct htb_class *cl) kfree(cl); } -/* always caled under BH & queue lock */ static void htb_destroy(struct Qdisc *sch) { struct htb_sched *q = qdisc_priv(sch); @@ -1196,6 +1211,7 @@ static void htb_destroy(struct Qdisc *sch) struct htb_class *cl; unsigned int i; + cancel_work_sync(&q->work); qdisc_watchdog_cancel(&q->watchdog); /* This line used to be after htb_destroy_class call below and surprisingly it worked in 2.4. But it must precede it diff --git a/net/sctp/ipv6.c b/net/sctp/ipv6.c index ceaa4aa066e..78622756669 100644 --- a/net/sctp/ipv6.c +++ b/net/sctp/ipv6.c @@ -97,8 +97,7 @@ static int sctp_inet6addr_event(struct notifier_block *this, unsigned long ev, if (addr) { addr->a.v6.sin6_family = AF_INET6; addr->a.v6.sin6_port = 0; - memcpy(&addr->a.v6.sin6_addr, &ifa->addr, - sizeof(struct in6_addr)); + ipv6_addr_copy(&addr->a.v6.sin6_addr, &ifa->addr); addr->a.v6.sin6_scope_id = ifa->idev->dev->ifindex; addr->valid = 1; spin_lock_bh(&sctp_local_addr_lock); diff --git a/net/sctp/output.c b/net/sctp/output.c index 73639355157..47bfba6c03e 100644 --- a/net/sctp/output.c +++ b/net/sctp/output.c @@ -367,7 +367,7 @@ int sctp_packet_transmit(struct sctp_packet *packet) struct sctp_transport *tp = packet->transport; struct sctp_association *asoc = tp->asoc; struct sctphdr *sh; - __be32 crc32 = __constant_cpu_to_be32(0); + __be32 crc32 = cpu_to_be32(0); struct sk_buff *nskb; struct sctp_chunk *chunk, *tmp; struct sock *sk; diff --git a/net/sctp/sm_make_chunk.c b/net/sctp/sm_make_chunk.c index fd8acb48c3f..b40e95f9851 100644 --- a/net/sctp/sm_make_chunk.c +++ b/net/sctp/sm_make_chunk.c @@ -100,11 +100,11 @@ int sctp_chunk_iif(const struct sctp_chunk *chunk) */ static const struct sctp_paramhdr ecap_param = { SCTP_PARAM_ECN_CAPABLE, - __constant_htons(sizeof(struct sctp_paramhdr)), + cpu_to_be16(sizeof(struct sctp_paramhdr)), }; static const struct sctp_paramhdr prsctp_param = { SCTP_PARAM_FWD_TSN_SUPPORT, - __constant_htons(sizeof(struct sctp_paramhdr)), + cpu_to_be16(sizeof(struct sctp_paramhdr)), }; /* A helper to initialize to initialize an op error inside a diff --git a/net/x25/af_x25.c b/net/x25/af_x25.c index 9fc5b023d11..8f76f4009c2 100644 --- a/net/x25/af_x25.c +++ b/net/x25/af_x25.c @@ -1609,7 +1609,7 @@ static const struct proto_ops SOCKOPS_WRAPPED(x25_proto_ops) = { SOCKOPS_WRAP(x25_proto, AF_X25); static struct packet_type x25_packet_type = { - .type = __constant_htons(ETH_P_X25), + .type = cpu_to_be16(ETH_P_X25), .func = x25_lapb_receive_frame, }; |