aboutsummaryrefslogtreecommitdiff
path: root/drivers/net
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net')
-rw-r--r--drivers/net/3c523.c2
-rw-r--r--drivers/net/7990.c4
-rw-r--r--drivers/net/8139too.c2
-rw-r--r--drivers/net/Kconfig21
-rw-r--r--drivers/net/Makefile1
-rw-r--r--drivers/net/a2065.c4
-rw-r--r--drivers/net/ariadne.c2
-rw-r--r--drivers/net/arm/ep93xx_eth.c2
-rw-r--r--drivers/net/au1000_eth.c4
-rw-r--r--drivers/net/bnx2.c503
-rw-r--r--drivers/net/bnx2.h66
-rw-r--r--drivers/net/dl2k.c4
-rw-r--r--drivers/net/dummy.c82
-rw-r--r--drivers/net/eepro100.c2
-rw-r--r--drivers/net/epic100.c2
-rw-r--r--drivers/net/fealnx.c4
-rw-r--r--drivers/net/fec.c2
-rw-r--r--drivers/net/hamachi.c4
-rw-r--r--drivers/net/ifb.c78
-rw-r--r--drivers/net/irda/kingsun-sir.c4
-rw-r--r--drivers/net/irda/vlsi_ir.c27
-rw-r--r--drivers/net/irda/vlsi_ir.h2
-rw-r--r--drivers/net/ixp2000/ixpdev.c2
-rw-r--r--drivers/net/lance.c4
-rw-r--r--drivers/net/natsemi.c4
-rw-r--r--drivers/net/ni52.c2
-rw-r--r--drivers/net/ni65.c4
-rw-r--r--drivers/net/pci-skeleton.c2
-rw-r--r--drivers/net/pcnet32.c4
-rw-r--r--drivers/net/pppol2tp.c2486
-rw-r--r--drivers/net/saa9730.c4
-rw-r--r--drivers/net/sgiseeq.c2
-rw-r--r--drivers/net/sis190.c2
-rw-r--r--drivers/net/starfire.c2
-rw-r--r--drivers/net/sun3_82586.c2
-rw-r--r--drivers/net/sun3lance.c5
-rw-r--r--drivers/net/sunbmac.c2
-rw-r--r--drivers/net/sundance.c2
-rw-r--r--drivers/net/sunlance.c4
-rw-r--r--drivers/net/sunqe.c4
-rw-r--r--drivers/net/tg3.c140
-rw-r--r--drivers/net/tg3.h9
-rw-r--r--drivers/net/tulip/interrupt.c8
-rw-r--r--drivers/net/tulip/winbond-840.c2
-rw-r--r--drivers/net/tulip/xircom_cb.c2
-rw-r--r--drivers/net/tulip/xircom_tulip_cb.c4
-rw-r--r--drivers/net/tun.c15
-rw-r--r--drivers/net/typhoon.c2
-rw-r--r--drivers/net/usb/catc.c2
-rw-r--r--drivers/net/usb/kaweth.c2
-rw-r--r--drivers/net/via-rhine.c4
-rw-r--r--drivers/net/wireless/wl3501_cs.c2
-rw-r--r--drivers/net/yellowfin.c2
53 files changed, 3284 insertions, 269 deletions
diff --git a/drivers/net/3c523.c b/drivers/net/3c523.c
index da1a22c1386..ab18343e58e 100644
--- a/drivers/net/3c523.c
+++ b/drivers/net/3c523.c
@@ -990,7 +990,7 @@ static void elmc_rcv_int(struct net_device *dev)
if (skb != NULL) {
skb_reserve(skb, 2); /* 16 byte alignment */
skb_put(skb,totlen);
- eth_copy_and_sum(skb, (char *) p->base+(unsigned long) rbd->buffer,totlen,0);
+ skb_copy_to_linear_data(skb, (char *) p->base+(unsigned long) rbd->buffer,totlen);
skb->protocol = eth_type_trans(skb, dev);
netif_rx(skb);
dev->last_rx = jiffies;
diff --git a/drivers/net/7990.c b/drivers/net/7990.c
index 0877fc372f4..e89ace109a5 100644
--- a/drivers/net/7990.c
+++ b/drivers/net/7990.c
@@ -333,9 +333,9 @@ static int lance_rx (struct net_device *dev)
skb_reserve (skb, 2); /* 16 byte align */
skb_put (skb, len); /* make room */
- eth_copy_and_sum(skb,
+ skb_copy_to_linear_data(skb,
(unsigned char *)&(ib->rx_buf [lp->rx_new][0]),
- len, 0);
+ len);
skb->protocol = eth_type_trans (skb, dev);
netif_rx (skb);
dev->last_rx = jiffies;
diff --git a/drivers/net/8139too.c b/drivers/net/8139too.c
index a844b1fe2dc..21a6ccbf92e 100644
--- a/drivers/net/8139too.c
+++ b/drivers/net/8139too.c
@@ -2017,7 +2017,7 @@ no_early_rx:
#if RX_BUF_IDX == 3
wrap_copy(skb, rx_ring, ring_offset+4, pkt_size);
#else
- eth_copy_and_sum (skb, &rx_ring[ring_offset + 4], pkt_size, 0);
+ skb_copy_to_linear_data (skb, &rx_ring[ring_offset + 4], pkt_size);
#endif
skb_put (skb, pkt_size);
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index 80572e2c9da..ba314adf68b 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -25,6 +25,14 @@ menuconfig NETDEVICES
# that for each of the symbols.
if NETDEVICES
+config NETDEVICES_MULTIQUEUE
+ bool "Netdevice multiple hardware queue support"
+ ---help---
+ Say Y here if you want to allow the network stack to use multiple
+ hardware TX queues on an ethernet device.
+
+ Most people will say N here.
+
config IFB
tristate "Intermediate Functional Block support"
depends on NET_CLS_ACT
@@ -2784,6 +2792,19 @@ config PPPOATM
which can lead to bad results if the ATM peer loses state and
changes its encapsulation unilaterally.
+config PPPOL2TP
+ tristate "PPP over L2TP (EXPERIMENTAL)"
+ depends on EXPERIMENTAL && PPP
+ help
+ Support for PPP-over-L2TP socket family. L2TP is a protocol
+ used by ISPs and enterprises to tunnel PPP traffic over UDP
+ tunnels. L2TP is replacing PPTP for VPN uses.
+
+ This kernel component handles only L2TP data packets: a
+ userland daemon handles L2TP the control protocol (tunnel
+ and session setup). One such daemon is OpenL2TP
+ (http://openl2tp.sourceforge.net/).
+
config SLIP
tristate "SLIP (serial line) support"
---help---
diff --git a/drivers/net/Makefile b/drivers/net/Makefile
index 1bbcbedad04..a2241e6e145 100644
--- a/drivers/net/Makefile
+++ b/drivers/net/Makefile
@@ -121,6 +121,7 @@ obj-$(CONFIG_PPP_DEFLATE) += ppp_deflate.o
obj-$(CONFIG_PPP_BSDCOMP) += bsd_comp.o
obj-$(CONFIG_PPP_MPPE) += ppp_mppe.o
obj-$(CONFIG_PPPOE) += pppox.o pppoe.o
+obj-$(CONFIG_PPPOL2TP) += pppox.o pppol2tp.o
obj-$(CONFIG_SLIP) += slip.o
obj-$(CONFIG_SLHC) += slhc.o
diff --git a/drivers/net/a2065.c b/drivers/net/a2065.c
index 81d5a374042..a45de6975bf 100644
--- a/drivers/net/a2065.c
+++ b/drivers/net/a2065.c
@@ -322,9 +322,9 @@ static int lance_rx (struct net_device *dev)
skb_reserve (skb, 2); /* 16 byte align */
skb_put (skb, len); /* make room */
- eth_copy_and_sum(skb,
+ skb_copy_to_linear_data(skb,
(unsigned char *)&(ib->rx_buf [lp->rx_new][0]),
- len, 0);
+ len);
skb->protocol = eth_type_trans (skb, dev);
netif_rx (skb);
dev->last_rx = jiffies;
diff --git a/drivers/net/ariadne.c b/drivers/net/ariadne.c
index a241ae7855a..bc5a38a6705 100644
--- a/drivers/net/ariadne.c
+++ b/drivers/net/ariadne.c
@@ -746,7 +746,7 @@ static int ariadne_rx(struct net_device *dev)
skb_reserve(skb,2); /* 16 byte align */
skb_put(skb,pkt_len); /* Make room */
- eth_copy_and_sum(skb, (char *)priv->rx_buff[entry], pkt_len,0);
+ skb_copy_to_linear_data(skb, (char *)priv->rx_buff[entry], pkt_len);
skb->protocol=eth_type_trans(skb,dev);
#if 0
printk(KERN_DEBUG "RX pkt type 0x%04x from ",
diff --git a/drivers/net/arm/ep93xx_eth.c b/drivers/net/arm/ep93xx_eth.c
index 2438c5bff23..f6ece1d43f6 100644
--- a/drivers/net/arm/ep93xx_eth.c
+++ b/drivers/net/arm/ep93xx_eth.c
@@ -258,7 +258,7 @@ static int ep93xx_rx(struct net_device *dev, int *budget)
skb_reserve(skb, 2);
dma_sync_single(NULL, ep->descs->rdesc[entry].buf_addr,
length, DMA_FROM_DEVICE);
- eth_copy_and_sum(skb, ep->rx_buf[entry], length, 0);
+ skb_copy_to_linear_data(skb, ep->rx_buf[entry], length);
skb_put(skb, length);
skb->protocol = eth_type_trans(skb, dev);
diff --git a/drivers/net/au1000_eth.c b/drivers/net/au1000_eth.c
index c27cfcef45f..e86b3691765 100644
--- a/drivers/net/au1000_eth.c
+++ b/drivers/net/au1000_eth.c
@@ -1205,8 +1205,8 @@ static int au1000_rx(struct net_device *dev)
continue;
}
skb_reserve(skb, 2); /* 16 byte IP header align */
- eth_copy_and_sum(skb,
- (unsigned char *)pDB->vaddr, frmlen, 0);
+ skb_copy_to_linear_data(skb,
+ (unsigned char *)pDB->vaddr, frmlen);
skb_put(skb, frmlen);
skb->protocol = eth_type_trans(skb, dev);
netif_rx(skb); /* pass the packet to upper layers */
diff --git a/drivers/net/bnx2.c b/drivers/net/bnx2.c
index ce3ed67a878..d681903c592 100644
--- a/drivers/net/bnx2.c
+++ b/drivers/net/bnx2.c
@@ -40,7 +40,6 @@
#define BCM_VLAN 1
#endif
#include <net/ip.h>
-#include <net/tcp.h>
#include <net/checksum.h>
#include <linux/workqueue.h>
#include <linux/crc32.h>
@@ -54,8 +53,8 @@
#define DRV_MODULE_NAME "bnx2"
#define PFX DRV_MODULE_NAME ": "
-#define DRV_MODULE_VERSION "1.5.11"
-#define DRV_MODULE_RELDATE "June 4, 2007"
+#define DRV_MODULE_VERSION "1.6.2"
+#define DRV_MODULE_RELDATE "July 6, 2007"
#define RUN_AT(x) (jiffies + (x))
@@ -550,6 +549,9 @@ bnx2_report_fw_link(struct bnx2 *bp)
{
u32 fw_link_status = 0;
+ if (bp->phy_flags & REMOTE_PHY_CAP_FLAG)
+ return;
+
if (bp->link_up) {
u32 bmsr;
@@ -601,12 +603,21 @@ bnx2_report_fw_link(struct bnx2 *bp)
REG_WR_IND(bp, bp->shmem_base + BNX2_LINK_STATUS, fw_link_status);
}
+static char *
+bnx2_xceiver_str(struct bnx2 *bp)
+{
+ return ((bp->phy_port == PORT_FIBRE) ? "SerDes" :
+ ((bp->phy_flags & PHY_SERDES_FLAG) ? "Remote Copper" :
+ "Copper"));
+}
+
static void
bnx2_report_link(struct bnx2 *bp)
{
if (bp->link_up) {
netif_carrier_on(bp->dev);
- printk(KERN_INFO PFX "%s NIC Link is Up, ", bp->dev->name);
+ printk(KERN_INFO PFX "%s NIC %s Link is Up, ", bp->dev->name,
+ bnx2_xceiver_str(bp));
printk("%d Mbps ", bp->line_speed);
@@ -630,7 +641,8 @@ bnx2_report_link(struct bnx2 *bp)
}
else {
netif_carrier_off(bp->dev);
- printk(KERN_ERR PFX "%s NIC Link is Down\n", bp->dev->name);
+ printk(KERN_ERR PFX "%s NIC %s Link is Down\n", bp->dev->name,
+ bnx2_xceiver_str(bp));
}
bnx2_report_fw_link(bp);
@@ -1100,6 +1112,9 @@ bnx2_set_link(struct bnx2 *bp)
return 0;
}
+ if (bp->phy_flags & REMOTE_PHY_CAP_FLAG)
+ return 0;
+
link_up = bp->link_up;
bnx2_enable_bmsr1(bp);
@@ -1210,12 +1225,74 @@ bnx2_phy_get_pause_adv(struct bnx2 *bp)
return adv;
}
+static int bnx2_fw_sync(struct bnx2 *, u32, int);
+
static int
-bnx2_setup_serdes_phy(struct bnx2 *bp)
+bnx2_setup_remote_phy(struct bnx2 *bp, u8 port)
+{
+ u32 speed_arg = 0, pause_adv;
+
+ pause_adv = bnx2_phy_get_pause_adv(bp);
+
+ if (bp->autoneg & AUTONEG_SPEED) {
+ speed_arg |= BNX2_NETLINK_SET_LINK_ENABLE_AUTONEG;
+ if (bp->advertising & ADVERTISED_10baseT_Half)
+ speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_10HALF;
+ if (bp->advertising & ADVERTISED_10baseT_Full)
+ speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_10FULL;
+ if (bp->advertising & ADVERTISED_100baseT_Half)
+ speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_100HALF;
+ if (bp->advertising & ADVERTISED_100baseT_Full)
+ speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_100FULL;
+ if (bp->advertising & ADVERTISED_1000baseT_Full)
+ speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_1GFULL;
+ if (bp->advertising & ADVERTISED_2500baseX_Full)
+ speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_2G5FULL;
+ } else {
+ if (bp->req_line_speed == SPEED_2500)
+ speed_arg = BNX2_NETLINK_SET_LINK_SPEED_2G5FULL;
+ else if (bp->req_line_speed == SPEED_1000)
+ speed_arg = BNX2_NETLINK_SET_LINK_SPEED_1GFULL;
+ else if (bp->req_line_speed == SPEED_100) {
+ if (bp->req_duplex == DUPLEX_FULL)
+ speed_arg = BNX2_NETLINK_SET_LINK_SPEED_100FULL;
+ else
+ speed_arg = BNX2_NETLINK_SET_LINK_SPEED_100HALF;
+ } else if (bp->req_line_speed == SPEED_10) {
+ if (bp->req_duplex == DUPLEX_FULL)
+ speed_arg = BNX2_NETLINK_SET_LINK_SPEED_10FULL;
+ else
+ speed_arg = BNX2_NETLINK_SET_LINK_SPEED_10HALF;
+ }
+ }
+
+ if (pause_adv & (ADVERTISE_1000XPAUSE | ADVERTISE_PAUSE_CAP))
+ speed_arg |= BNX2_NETLINK_SET_LINK_FC_SYM_PAUSE;
+ if (pause_adv & (ADVERTISE_1000XPSE_ASYM | ADVERTISE_1000XPSE_ASYM))
+ speed_arg |= BNX2_NETLINK_SET_LINK_FC_ASYM_PAUSE;
+
+ if (port == PORT_TP)
+ speed_arg |= BNX2_NETLINK_SET_LINK_PHY_APP_REMOTE |
+ BNX2_NETLINK_SET_LINK_ETH_AT_WIRESPEED;
+
+ REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_MB_ARG0, speed_arg);
+
+ spin_unlock_bh(&bp->phy_lock);
+ bnx2_fw_sync(bp, BNX2_DRV_MSG_CODE_CMD_SET_LINK, 0);
+ spin_lock_bh(&bp->phy_lock);
+
+ return 0;
+}
+
+static int
+bnx2_setup_serdes_phy(struct bnx2 *bp, u8 port)
{
u32 adv, bmcr;
u32 new_adv = 0;
+ if (bp->phy_flags & REMOTE_PHY_CAP_FLAG)
+ return (bnx2_setup_remote_phy(bp, port));
+
if (!(bp->autoneg & AUTONEG_SPEED)) {
u32 new_bmcr;
int force_link_down = 0;
@@ -1323,7 +1400,9 @@ bnx2_setup_serdes_phy(struct bnx2 *bp)
}
#define ETHTOOL_ALL_FIBRE_SPEED \
- (ADVERTISED_1000baseT_Full)
+ (bp->phy_flags & PHY_2_5G_CAPABLE_FLAG) ? \
+ (ADVERTISED_2500baseX_Full | ADVERTISED_1000baseT_Full) :\
+ (ADVERTISED_1000baseT_Full)
#define ETHTOOL_ALL_COPPER_SPEED \
(ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | \
@@ -1335,6 +1414,188 @@ bnx2_setup_serdes_phy(struct bnx2 *bp)
#define PHY_ALL_1000_SPEED (ADVERTISE_1000HALF | ADVERTISE_1000FULL)
+static void
+bnx2_set_default_remote_link(struct bnx2 *bp)
+{
+ u32 link;
+
+ if (bp->phy_port == PORT_TP)
+ link = REG_RD_IND(bp, bp->shmem_base + BNX2_RPHY_COPPER_LINK);
+ else
+ link = REG_RD_IND(bp, bp->shmem_base + BNX2_RPHY_SERDES_LINK);
+
+ if (link & BNX2_NETLINK_SET_LINK_ENABLE_AUTONEG) {
+ bp->req_line_speed = 0;
+ bp->autoneg |= AUTONEG_SPEED;
+ bp->advertising = ADVERTISED_Autoneg;
+ if (link & BNX2_NETLINK_SET_LINK_SPEED_10HALF)
+ bp->advertising |= ADVERTISED_10baseT_Half;
+ if (link & BNX2_NETLINK_SET_LINK_SPEED_10FULL)
+ bp->advertising |= ADVERTISED_10baseT_Full;
+ if (link & BNX2_NETLINK_SET_LINK_SPEED_100HALF)
+ bp->advertising |= ADVERTISED_100baseT_Half;
+ if (link & BNX2_NETLINK_SET_LINK_SPEED_100FULL)
+ bp->advertising |= ADVERTISED_100baseT_Full;
+ if (link & BNX2_NETLINK_SET_LINK_SPEED_1GFULL)
+ bp->advertising |= ADVERTISED_1000baseT_Full;
+ if (link & BNX2_NETLINK_SET_LINK_SPEED_2G5FULL)
+ bp->advertising |= ADVERTISED_2500baseX_Full;
+ } else {
+ bp->autoneg = 0;
+ bp->advertising = 0;
+ bp->req_duplex = DUPLEX_FULL;
+ if (link & BNX2_NETLINK_SET_LINK_SPEED_10) {
+ bp->req_line_speed = SPEED_10;
+ if (link & BNX2_NETLINK_SET_LINK_SPEED_10HALF)
+ bp->req_duplex = DUPLEX_HALF;
+ }
+ if (link & BNX2_NETLINK_SET_LINK_SPEED_100) {
+ bp->req_line_speed = SPEED_100;
+ if (link & BNX2_NETLINK_SET_LINK_SPEED_100HALF)
+ bp->req_duplex = DUPLEX_HALF;
+ }
+ if (link & BNX2_NETLINK_SET_LINK_SPEED_1GFULL)
+ bp->req_line_speed = SPEED_1000;
+ if (link & BNX2_NETLINK_SET_LINK_SPEED_2G5FULL)
+ bp->req_line_speed = SPEED_2500;
+ }
+}
+
+static void
+bnx2_set_default_link(struct bnx2 *bp)
+{
+ if (bp->phy_flags & REMOTE_PHY_CAP_FLAG)
+ return bnx2_set_default_remote_link(bp);
+
+ bp->autoneg = AUTONEG_SPEED | AUTONEG_FLOW_CTRL;
+ bp->req_line_speed = 0;
+ if (bp->phy_flags & PHY_SERDES_FLAG) {
+ u32 reg;
+
+ bp->advertising = ETHTOOL_ALL_FIBRE_SPEED | ADVERTISED_Autoneg;
+
+ reg = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_CONFIG);
+ reg &= BNX2_PORT_HW_CFG_CFG_DFLT_LINK_MASK;
+ if (reg == BNX2_PORT_HW_CFG_CFG_DFLT_LINK_1G) {
+ bp->autoneg = 0;
+ bp->req_line_speed = bp->line_speed = SPEED_1000;
+ bp->req_duplex = DUPLEX_FULL;
+ }
+ } else
+ bp->advertising = ETHTOOL_ALL_COPPER_SPEED | ADVERTISED_Autoneg;
+}
+
+static void
+bnx2_send_heart_beat(struct bnx2 *bp)
+{
+ u32 msg;
+ u32 addr;
+
+ spin_lock(&bp->indirect_lock);
+ msg = (u32) (++bp->fw_drv_pulse_wr_seq & BNX2_DRV_PULSE_SEQ_MASK);
+ addr = bp->shmem_base + BNX2_DRV_PULSE_MB;
+ REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, addr);
+ REG_WR(bp, BNX2_PCICFG_REG_WINDOW, msg);
+ spin_unlock(&bp->indirect_lock);
+}
+
+static void
+bnx2_remote_phy_event(struct bnx2 *bp)
+{
+ u32 msg;
+ u8 link_up = bp->link_up;
+ u8 old_port;
+
+ msg = REG_RD_IND(bp, bp->shmem_base + BNX2_LINK_STATUS);
+
+ if (msg & BNX2_LINK_STATUS_HEART_BEAT_EXPIRED)
+ bnx2_send_heart_beat(bp);
+
+ msg &= ~BNX2_LINK_STATUS_HEART_BEAT_EXPIRED;
+
+ if ((msg & BNX2_LINK_STATUS_LINK_UP) == BNX2_LINK_STATUS_LINK_DOWN)
+ bp->link_up = 0;
+ else {
+ u32 speed;
+
+ bp->link_up = 1;
+ speed = msg & BNX2_LINK_STATUS_SPEED_MASK;
+ bp->duplex = DUPLEX_FULL;
+ switch (speed) {
+ case BNX2_LINK_STATUS_10HALF:
+ bp->duplex = DUPLEX_HALF;
+ case BNX2_LINK_STATUS_10FULL:
+ bp->line_speed = SPEED_10;
+ break;
+ case BNX2_LINK_STATUS_100HALF:
+ bp->duplex = DUPLEX_HALF;
+ case BNX2_LINK_STATUS_100BASE_T4:
+ case BNX2_LINK_STATUS_100FULL:
+ bp->line_speed = SPEED_100;
+ break;
+ case BNX2_LINK_STATUS_1000HALF:
+ bp->duplex = DUPLEX_HALF;
+ case BNX2_LINK_STATUS_1000FULL:
+ bp->line_speed = SPEED_1000;
+ break;
+ case BNX2_LINK_STATUS_2500HALF:
+ bp->duplex = DUPLEX_HALF;
+ case BNX2_LINK_STATUS_2500FULL:
+ bp->line_speed = SPEED_2500;
+ break;
+ default:
+ bp->line_speed = 0;
+ break;
+ }
+
+ spin_lock(&bp->phy_lock);
+ bp->flow_ctrl = 0;
+ if ((bp->autoneg & (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) !=
+ (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) {
+ if (bp->duplex == DUPLEX_FULL)
+ bp->flow_ctrl = bp->req_flow_ctrl;
+ } else {
+ if (msg & BNX2_LINK_STATUS_TX_FC_ENABLED)
+ bp->flow_ctrl |= FLOW_CTRL_TX;
+ if (msg & BNX2_LINK_STATUS_RX_FC_ENABLED)
+ bp->flow_ctrl |= FLOW_CTRL_RX;
+ }
+
+ old_port = bp->phy_port;
+ if (msg & BNX2_LINK_STATUS_SERDES_LINK)
+ bp->phy_port = PORT_FIBRE;
+ else
+ bp->phy_port = PORT_TP;
+
+ if (old_port != bp->phy_port)
+ bnx2_set_default_link(bp);
+
+ spin_unlock(&bp->phy_lock);
+ }
+ if (bp->link_up != link_up)
+ bnx2_report_link(bp);
+
+ bnx2_set_mac_link(bp);
+}
+
+static int
+bnx2_set_remote_link(struct bnx2 *bp)
+{
+ u32 evt_code;
+
+ evt_code = REG_RD_IND(bp, bp->shmem_base + BNX2_FW_EVT_CODE_MB);
+ switch (evt_code) {
+ case BNX2_FW_EVT_CODE_LINK_EVENT:
+ bnx2_remote_phy_event(bp);
+ break;
+ case BNX2_FW_EVT_CODE_SW_TIMER_EXPIRATION_EVENT:
+ default:
+ bnx2_send_heart_beat(bp);
+ break;
+ }
+ return 0;
+}
+
static int
bnx2_setup_copper_phy(struct bnx2 *bp)
{
@@ -1433,13 +1694,13 @@ bnx2_setup_copper_phy(struct bnx2 *bp)
}
static int
-bnx2_setup_phy(struct bnx2 *bp)
+bnx2_setup_phy(struct bnx2 *bp, u8 port)
{
if (bp->loopback == MAC_LOOPBACK)
return 0;
if (bp->phy_flags & PHY_SERDES_FLAG) {
- return (bnx2_setup_serdes_phy(bp));
+ return (bnx2_setup_serdes_phy(bp, port));
}
else {
return (bnx2_setup_copper_phy(bp));
@@ -1659,6 +1920,9 @@ bnx2_init_phy(struct bnx2 *bp)
REG_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
+ if (bp->phy_flags & REMOTE_PHY_CAP_FLAG)
+ goto setup_phy;
+
bnx2_read_phy(bp, MII_PHYSID1, &val);
bp->phy_id = val << 16;
bnx2_read_phy(bp, MII_PHYSID2, &val);
@@ -1676,7 +1940,9 @@ bnx2_init_phy(struct bnx2 *bp)
rc = bnx2_init_copper_phy(bp);
}
- bnx2_setup_phy(bp);
+setup_phy:
+ if (!rc)
+ rc = bnx2_setup_phy(bp, bp->phy_port);
return rc;
}
@@ -1984,6 +2250,9 @@ bnx2_phy_int(struct bnx2 *bp)
bnx2_set_link(bp);
spin_unlock(&bp->phy_lock);
}
+ if (bnx2_phy_event_is_set(bp, STATUS_ATTN_BITS_TIMER_ABORT))
+ bnx2_set_remote_link(bp);
+
}
static void
@@ -2297,6 +2566,7 @@ bnx2_interrupt(int irq, void *dev_instance)
{
struct net_device *dev = dev_instance;
struct bnx2 *bp = netdev_priv(dev);
+ struct status_block *sblk = bp->status_blk;
/* When using INTx, it is possible for the interrupt to arrive
* at the CPU before the status block posted prior to the
@@ -2304,7 +2574,7 @@ bnx2_interrupt(int irq, void *dev_instance)
* When using MSI, the MSI message will always complete after
* the status block write.
*/
- if ((bp->status_blk->status_idx == bp->last_status_idx) &&
+ if ((sblk->status_idx == bp->last_status_idx) &&
(REG_RD(bp, BNX2_PCICFG_MISC_STATUS) &
BNX2_PCICFG_MISC_STATUS_INTA_VALUE))
return IRQ_NONE;
@@ -2313,16 +2583,25 @@ bnx2_interrupt(int irq, void *dev_instance)
BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
+ /* Read back to deassert IRQ immediately to avoid too many
+ * spurious interrupts.
+ */
+ REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD);
+
/* Return here if interrupt is shared and is disabled. */
if (unlikely(atomic_read(&bp->intr_sem) != 0))
return IRQ_HANDLED;
- netif_rx_schedule(dev);
+ if (netif_rx_schedule_prep(dev)) {
+ bp->last_status_idx = sblk->status_idx;
+ __netif_rx_schedule(dev);
+ }
return IRQ_HANDLED;
}
-#define STATUS_ATTN_EVENTS STATUS_ATTN_BITS_LINK_STATE
+#define STATUS_ATTN_EVENTS (STATUS_ATTN_BITS_LINK_STATE | \
+ STATUS_ATTN_BITS_TIMER_ABORT)
static inline int
bnx2_has_work(struct bnx2 *bp)
@@ -3562,6 +3841,36 @@ nvram_write_end:
return rc;
}
+static void
+bnx2_init_remote_phy(struct bnx2 *bp)
+{
+ u32 val;
+
+ bp->phy_flags &= ~REMOTE_PHY_CAP_FLAG;
+ if (!(bp->phy_flags & PHY_SERDES_FLAG))
+ return;
+
+ val = REG_RD_IND(bp, bp->shmem_base + BNX2_FW_CAP_MB);
+ if ((val & BNX2_FW_CAP_SIGNATURE_MASK) != BNX2_FW_CAP_SIGNATURE)
+ return;
+
+ if (val & BNX2_FW_CAP_REMOTE_PHY_CAPABLE) {
+ if (netif_running(bp->dev)) {
+ val = BNX2_DRV_ACK_CAP_SIGNATURE |
+ BNX2_FW_CAP_REMOTE_PHY_CAPABLE;
+ REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_ACK_CAP_MB,
+ val);
+ }
+ bp->phy_flags |= REMOTE_PHY_CAP_FLAG;
+
+ val = REG_RD_IND(bp, bp->shmem_base + BNX2_LINK_STATUS);
+ if (val & BNX2_LINK_STATUS_SERDES_LINK)
+ bp->phy_port = PORT_FIBRE;
+ else
+ bp->phy_port = PORT_TP;
+ }
+}
+
static int
bnx2_reset_chip(struct bnx2 *bp, u32 reset_code)
{
@@ -3642,6 +3951,12 @@ bnx2_reset_chip(struct bnx2 *bp, u32 reset_code)
if (rc)
return rc;
+ spin_lock_bh(&bp->phy_lock);
+ bnx2_init_remote_phy(bp);
+ if (bp->phy_flags & REMOTE_PHY_CAP_FLAG)
+ bnx2_set_default_remote_link(bp);
+ spin_unlock_bh(&bp->phy_lock);
+
if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
/* Adjust the voltage regular to two steps lower. The default
* of this register is 0x0000000e. */
@@ -3826,7 +4141,7 @@ bnx2_init_chip(struct bnx2 *bp)
rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT2 | BNX2_DRV_MSG_CODE_RESET,
0);
- REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS, 0x5ffffff);
+ REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS, BNX2_MISC_ENABLE_DEFAULT);
REG_RD(bp, BNX2_MISC_ENABLE_SET_BITS);
udelay(20);
@@ -4069,8 +4384,8 @@ bnx2_init_nic(struct bnx2 *bp)
spin_lock_bh(&bp->phy_lock);
bnx2_init_phy(bp);
- spin_unlock_bh(&bp->phy_lock);
bnx2_set_link(bp);
+ spin_unlock_bh(&bp->phy_lock);
return 0;
}
@@ -4600,6 +4915,9 @@ bnx2_5706_serdes_timer(struct bnx2 *bp)
static void
bnx2_5708_serdes_timer(struct bnx2 *bp)
{
+ if (bp->phy_flags & REMOTE_PHY_CAP_FLAG)
+ return;
+
if ((bp->phy_flags & PHY_2_5G_CAPABLE_FLAG) == 0) {
bp->serdes_an_pending = 0;
return;
@@ -4631,7 +4949,6 @@ static void
bnx2_timer(unsigned long data)
{
struct bnx2 *bp = (struct bnx2 *) data;
- u32 msg;
if (!netif_running(bp->dev))
return;
@@ -4639,8 +4956,7 @@ bnx2_timer(unsigned long data)
if (atomic_read(&bp->intr_sem) != 0)
goto bnx2_restart_timer;
- msg = (u32) ++bp->fw_drv_pulse_wr_seq;
- REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_PULSE_MB, msg);
+ bnx2_send_heart_beat(bp);
bp->stats_blk->stat_FwRxDrop = REG_RD_IND(bp, BNX2_FW_RX_DROP_COUNT);
@@ -5083,17 +5399,25 @@ static int
bnx2_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
{
struct bnx2 *bp = netdev_priv(dev);
+ int support_serdes = 0, support_copper = 0;
cmd->supported = SUPPORTED_Autoneg;
- if (bp->phy_flags & PHY_SERDES_FLAG) {
+ if (bp->phy_flags & REMOTE_PHY_CAP_FLAG) {
+ support_serdes = 1;
+ support_copper = 1;
+ } else if (bp->phy_port == PORT_FIBRE)
+ support_serdes = 1;
+ else
+ support_copper = 1;
+
+ if (support_serdes) {
cmd->supported |= SUPPORTED_1000baseT_Full |
SUPPORTED_FIBRE;
if (bp->phy_flags & PHY_2_5G_CAPABLE_FLAG)
cmd->supported |= SUPPORTED_2500baseX_Full;
- cmd->port = PORT_FIBRE;
}
- else {
+ if (support_copper) {
cmd->supported |= SUPPORTED_10baseT_Half |
SUPPORTED_10baseT_Full |
SUPPORTED_100baseT_Half |
@@ -5101,9 +5425,10 @@ bnx2_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
SUPPORTED_1000baseT_Full |
SUPPORTED_TP;
- cmd->port = PORT_TP;
}
+ spin_lock_bh(&bp->phy_lock);
+ cmd->port = bp->phy_port;
cmd->advertising = bp->advertising;
if (bp->autoneg & AUTONEG_SPEED) {
@@ -5121,6 +5446,7 @@ bnx2_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
cmd->speed = -1;
cmd->duplex = -1;
}
+ spin_unlock_bh(&bp->phy_lock);
cmd->transceiver = XCVR_INTERNAL;
cmd->phy_address = bp->phy_addr;
@@ -5136,6 +5462,15 @@ bnx2_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
u8 req_duplex = bp->req_duplex;
u16 req_line_speed = bp->req_line_speed;
u32 advertising = bp->advertising;
+ int err = -EINVAL;
+
+ spin_lock_bh(&bp->phy_lock);
+
+ if (cmd->port != PORT_TP && cmd->port != PORT_FIBRE)
+ goto err_out_unlock;
+
+ if (cmd->port != bp->phy_port && !(bp->phy_flags & REMOTE_PHY_CAP_FLAG))
+ goto err_out_unlock;
if (cmd->autoneg == AUTONEG_ENABLE) {
autoneg |= AUTONEG_SPEED;
@@ -5148,44 +5483,41 @@ bnx2_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
(cmd->advertising == ADVERTISED_100baseT_Half) ||
(cmd->advertising == ADVERTISED_100baseT_Full)) {
- if (bp->phy_flags & PHY_SERDES_FLAG)
- return -EINVAL;
+ if (cmd->port == PORT_FIBRE)
+ goto err_out_unlock;
advertising = cmd->advertising;
} else if (cmd->advertising == ADVERTISED_2500baseX_Full) {
- if (!(bp->phy_flags & PHY_2_5G_CAPABLE_FLAG))
- return -EINVAL;
- } else if (cmd->advertising == ADVERTISED_1000baseT_Full) {
+ if (!(bp->phy_flags & PHY_2_5G_CAPABLE_FLAG) ||
+ (cmd->port == PORT_TP))
+ goto err_out_unlock;
+ } else if (cmd->advertising == ADVERTISED_1000baseT_Full)
advertising = cmd->advertising;
- }
- else if (cmd->advertising == ADVERTISED_1000baseT_Half) {
- return -EINVAL;
- }
+ else if (cmd->advertising == ADVERTISED_1000baseT_Half)
+ goto err_out_unlock;
else {
- if (bp->phy_flags & PHY_SERDES_FLAG) {
+ if (cmd->port == PORT_FIBRE)
advertising = ETHTOOL_ALL_FIBRE_SPEED;
- }
- else {
+ else
advertising = ETHTOOL_ALL_COPPER_SPEED;
- }
}
advertising |= ADVERTISED_Autoneg;
}
else {
- if (bp->phy_flags & PHY_SERDES_FLAG) {
+ if (cmd->port == PORT_FIBRE) {
if ((cmd->speed != SPEED_1000 &&
cmd->speed != SPEED_2500) ||
(cmd->duplex != DUPLEX_FULL))
- return -EINVAL;
+ goto err_out_unlock;
if (cmd->speed == SPEED_2500 &&
!(bp->phy_flags & PHY_2_5G_CAPABLE_FLAG))
- return -EINVAL;
- }
- else if (cmd->speed == SPEED_1000) {
- return -EINVAL;
+ goto err_out_unlock;
}
+ else if (cmd->speed == SPEED_1000 || cmd->speed == SPEED_2500)
+ goto err_out_unlock;
+
autoneg &= ~AUTONEG_SPEED;
req_line_speed = cmd->speed;
req_duplex = cmd->duplex;
@@ -5197,13 +5529,12 @@ bnx2_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
bp->req_line_speed = req_line_speed;
bp->req_duplex = req_duplex;
- spin_lock_bh(&bp->phy_lock);
-
- bnx2_setup_phy(bp);
+ err = bnx2_setup_phy(bp, cmd->port);
+err_out_unlock:
spin_unlock_bh(&bp->phy_lock);
- return 0;
+ return err;
}
static void
@@ -5214,11 +5545,7 @@ bnx2_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
strcpy(info->driver, DRV_MODULE_NAME);
strcpy(info->version, DRV_MODULE_VERSION);
strcpy(info->bus_info, pci_name(bp->pdev));
- info->fw_version[0] = ((bp->fw_ver & 0xff000000) >> 24) + '0';
- info->fw_version[2] = ((bp->fw_ver & 0xff0000) >> 16) + '0';
- info->fw_version[4] = ((bp->fw_ver & 0xff00) >> 8) + '0';
- info->fw_version[1] = info->fw_version[3] = '.';
- info->fw_version[5] = 0;
+ strcpy(info->fw_version, bp->fw_version);
}
#define BNX2_REGDUMP_LEN (32 * 1024)
@@ -5330,6 +5657,14 @@ bnx2_nway_reset(struct net_device *dev)
spin_lock_bh(&bp->phy_lock);
+ if (bp->phy_flags & REMOTE_PHY_CAP_FLAG) {
+ int rc;
+
+ rc = bnx2_setup_remote_phy(bp, bp->phy_port);
+ spin_unlock_bh(&bp->phy_lock);
+ return rc;
+ }
+
/* Force a link down visible on the other side */
if (bp->phy_flags & PHY_SERDES_FLAG) {
bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
@@ -5543,7 +5878,7 @@ bnx2_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
spin_lock_bh(&bp->phy_lock);
- bnx2_setup_phy(bp);
+ bnx2_setup_phy(bp, bp->phy_port);
spin_unlock_bh(&bp->phy_lock);
@@ -5939,6 +6274,9 @@ bnx2_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
case SIOCGMIIREG: {
u32 mii_regval;
+ if (bp->phy_flags & REMOTE_PHY_CAP_FLAG)
+ return -EOPNOTSUPP;
+
if (!netif_running(dev))
return -EAGAIN;
@@ -5955,6 +6293,9 @@ bnx2_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
if (!capable(CAP_NET_ADMIN))
return -EPERM;
+ if (bp->phy_flags & REMOTE_PHY_CAP_FLAG)
+ return -EOPNOTSUPP;
+
if (!netif_running(dev))
return -EAGAIN;
@@ -6116,7 +6457,7 @@ bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
{
struct bnx2 *bp;
unsigned long mem_len;
- int rc;
+ int rc, i, j;
u32 reg;
u64 dma_mask, persist_dma_mask;
@@ -6273,7 +6614,35 @@ bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
goto err_out_unmap;
}
- bp->fw_ver = REG_RD_IND(bp, bp->shmem_base + BNX2_DEV_INFO_BC_REV);
+ reg = REG_RD_IND(bp, bp->shmem_base + BNX2_DEV_INFO_BC_REV);
+ for (i = 0, j = 0; i < 3; i++) {
+ u8 num, k, skip0;
+
+ num = (u8) (reg >> (24 - (i * 8)));
+ for (k = 100, skip0 = 1; k >= 1; num %= k, k /= 10) {
+ if (num >= k || !skip0 || k == 1) {
+ bp->fw_version[j++] = (num / k) + '0';
+ skip0 = 0;
+ }
+ }
+ if (i != 2)
+ bp->fw_version[j++] = '.';
+ }
+ reg = REG_RD_IND(bp, bp->shmem_base + BNX2_BC_STATE_CONDITION);
+ reg &= BNX2_CONDITION_MFW_RUN_MASK;
+ if (reg != BNX2_CONDITION_MFW_RUN_UNKNOWN &&
+ reg != BNX2_CONDITION_MFW_RUN_NONE) {
+ int i;
+ u32 addr = REG_RD_IND(bp, bp->shmem_base + BNX2_MFW_VER_PTR);
+
+ bp->fw_version[j++] = ' ';
+ for (i = 0; i < 3; i++) {
+ reg = REG_RD_IND(bp, addr + i * 4);
+ reg = swab32(reg);
+ memcpy(&bp->fw_version[j], &reg, 4);
+ j += 4;
+ }
+ }
reg = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_MAC_UPPER);
bp->mac_addr[0] = (u8) (reg >> 8);
@@ -6315,7 +6684,9 @@ bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
else if (CHIP_BOND_ID(bp) & CHIP_BOND_ID_SERDES_BIT)
bp->phy_flags |= PHY_SERDES_FLAG;
+ bp->phy_port = PORT_TP;
if (bp->phy_flags & PHY_SERDES_FLAG) {
+ bp->phy_port = PORT_FIBRE;
bp->flags |= NO_WOL_FLAG;
if (CHIP_NUM(bp) != CHIP_NUM_5706) {
bp->phy_addr = 2;
@@ -6324,6 +6695,8 @@ bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
if (reg & BNX2_SHARED_HW_CFG_PHY_2_5G)
bp->phy_flags |= PHY_2_5G_CAPABLE_FLAG;
}
+ bnx2_init_remote_phy(bp);
+
} else if (CHIP_NUM(bp) == CHIP_NUM_5706 ||
CHIP_NUM(bp) == CHIP_NUM_5708)
bp->phy_flags |= PHY_CRC_FIX_FLAG;
@@ -6374,23 +6747,7 @@ bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
}
}
- bp->autoneg = AUTONEG_SPEED | AUTONEG_FLOW_CTRL;
- bp->req_line_speed = 0;
- if (bp->phy_flags & PHY_SERDES_FLAG) {
- bp->advertising = ETHTOOL_ALL_FIBRE_SPEED | ADVERTISED_Autoneg;
-
- reg = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_CONFIG);
- reg &= BNX2_PORT_HW_CFG_CFG_DFLT_LINK_MASK;
- if (reg == BNX2_PORT_HW_CFG_CFG_DFLT_LINK_1G) {
- bp->autoneg = 0;
- bp->req_line_speed = bp->line_speed = SPEED_1000;
- bp->req_duplex = DUPLEX_FULL;
- }
- }
- else {
- bp->advertising = ETHTOOL_ALL_COPPER_SPEED | ADVERTISED_Autoneg;
- }
-
+ bnx2_set_default_link(bp);
bp->req_flow_ctrl = FLOW_CTRL_RX | FLOW_CTRL_TX;
init_timer(&bp->timer);
@@ -6490,10 +6847,10 @@ bnx2_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
memcpy(dev->perm_addr, bp->mac_addr, 6);
bp->name = board_info[ent->driver_data].name;
+ dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG;
if (CHIP_NUM(bp) == CHIP_NUM_5709)
- dev->features |= NETIF_F_HW_CSUM | NETIF_F_SG;
- else
- dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG;
+ dev->features |= NETIF_F_IPV6_CSUM;
+
#ifdef BCM_VLAN
dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
#endif
diff --git a/drivers/net/bnx2.h b/drivers/net/bnx2.h
index 49a5de253b1..d8cd1afeb23 100644
--- a/drivers/net/bnx2.h
+++ b/drivers/net/bnx2.h
@@ -6338,6 +6338,8 @@ struct l2_fhdr {
#define RX_COPY_THRESH 92
+#define BNX2_MISC_ENABLE_DEFAULT 0x7ffffff
+
#define DMA_READ_CHANS 5
#define DMA_WRITE_CHANS 3
@@ -6537,6 +6539,7 @@ struct bnx2 {
#define PHY_INT_MODE_AUTO_POLLING_FLAG 0x100
#define PHY_INT_MODE_LINK_READY_FLAG 0x200
#define PHY_DIS_EARLY_DAC_FLAG 0x400
+#define REMOTE_PHY_CAP_FLAG 0x800
u32 mii_bmcr;
u32 mii_bmsr;
@@ -6625,6 +6628,7 @@ struct bnx2 {
u16 req_line_speed;
u8 req_duplex;
+ u8 phy_port;
u8 link_up;
u16 line_speed;
@@ -6656,7 +6660,7 @@ struct bnx2 {
u32 shmem_base;
- u32 fw_ver;
+ char fw_version[32];
int pm_cap;
int pcix_cap;
@@ -6770,7 +6774,7 @@ struct fw_info {
* the firmware has timed out, the driver will assume there is no firmware
* running and there won't be any firmware-driver synchronization during a
* driver reset. */
-#define FW_ACK_TIME_OUT_MS 100
+#define FW_ACK_TIME_OUT_MS 1000
#define BNX2_DRV_RESET_SIGNATURE 0x00000000
@@ -6788,6 +6792,7 @@ struct fw_info {
#define BNX2_DRV_MSG_CODE_DIAG 0x07000000
#define BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL 0x09000000
#define BNX2_DRV_MSG_CODE_UNLOAD_LNK_DN 0x0b000000
+#define BNX2_DRV_MSG_CODE_CMD_SET_LINK 0x10000000
#define BNX2_DRV_MSG_DATA 0x00ff0000
#define BNX2_DRV_MSG_DATA_WAIT0 0x00010000
@@ -6836,6 +6841,7 @@ struct fw_info {
#define BNX2_LINK_STATUS_SERDES_LINK (1<<20)
#define BNX2_LINK_STATUS_PARTNER_AD_2500FULL (1<<21)
#define BNX2_LINK_STATUS_PARTNER_AD_2500HALF (1<<22)
+#define BNX2_LINK_STATUS_HEART_BEAT_EXPIRED (1<<31)
#define BNX2_DRV_PULSE_MB 0x00000010
#define BNX2_DRV_PULSE_SEQ_MASK 0x00007fff
@@ -6845,6 +6851,30 @@ struct fw_info {
* This is used for debugging. */
#define BNX2_DRV_MSG_DATA_PULSE_CODE_ALWAYS_ALIVE 0x00080000
+#define BNX2_DRV_MB_ARG0 0x00000014
+#define BNX2_NETLINK_SET_LINK_SPEED_10HALF (1<<0)
+#define BNX2_NETLINK_SET_LINK_SPEED_10FULL (1<<1)
+#define BNX2_NETLINK_SET_LINK_SPEED_10 \
+ (BNX2_NETLINK_SET_LINK_SPEED_10HALF | \
+ BNX2_NETLINK_SET_LINK_SPEED_10FULL)
+#define BNX2_NETLINK_SET_LINK_SPEED_100HALF (1<<2)
+#define BNX2_NETLINK_SET_LINK_SPEED_100FULL (1<<3)
+#define BNX2_NETLINK_SET_LINK_SPEED_100 \
+ (BNX2_NETLINK_SET_LINK_SPEED_100HALF | \
+ BNX2_NETLINK_SET_LINK_SPEED_100FULL)
+#define BNX2_NETLINK_SET_LINK_SPEED_1GHALF (1<<4)
+#define BNX2_NETLINK_SET_LINK_SPEED_1GFULL (1<<5)
+#define BNX2_NETLINK_SET_LINK_SPEED_2G5HALF (1<<6)
+#define BNX2_NETLINK_SET_LINK_SPEED_2G5FULL (1<<7)
+#define BNX2_NETLINK_SET_LINK_SPEED_10GHALF (1<<8)
+#define BNX2_NETLINK_SET_LINK_SPEED_10GFULL (1<<9)
+#define BNX2_NETLINK_SET_LINK_ENABLE_AUTONEG (1<<10)
+#define BNX2_NETLINK_SET_LINK_PHY_APP_REMOTE (1<<11)
+#define BNX2_NETLINK_SET_LINK_FC_SYM_PAUSE (1<<12)
+#define BNX2_NETLINK_SET_LINK_FC_ASYM_PAUSE (1<<13)
+#define BNX2_NETLINK_SET_LINK_ETH_AT_WIRESPEED (1<<14)
+#define BNX2_NETLINK_SET_LINK_PHY_RESET (1<<15)
+
#define BNX2_DEV_INFO_SIGNATURE 0x00000020
#define BNX2_DEV_INFO_SIGNATURE_MAGIC 0x44564900
#define BNX2_DEV_INFO_SIGNATURE_MAGIC_MASK 0xffffff00
@@ -7006,6 +7036,8 @@ struct fw_info {
#define BNX2_PORT_FEATURE_MBA_VLAN_TAG_MASK 0xffff
#define BNX2_PORT_FEATURE_MBA_VLAN_ENABLE 0x10000
+#define BNX2_MFW_VER_PTR 0x00000014c
+
#define BNX2_BC_STATE_RESET_TYPE 0x000001c0
#define BNX2_BC_STATE_RESET_TYPE_SIG 0x00005254
#define BNX2_BC_STATE_RESET_TYPE_SIG_MASK 0x0000ffff
@@ -7059,12 +7091,42 @@ struct fw_info {
#define BNX2_BC_STATE_ERR_NO_RXP (BNX2_BC_STATE_SIGN | 0x0600)
#define BNX2_BC_STATE_ERR_TOO_MANY_RBUF (BNX2_BC_STATE_SIGN | 0x0700)
+#define BNX2_BC_STATE_CONDITION 0x000001c8
+#define BNX2_CONDITION_MFW_RUN_UNKNOWN 0x00000000
+#define BNX2_CONDITION_MFW_RUN_IPMI 0x00002000
+#define BNX2_CONDITION_MFW_RUN_UMP 0x00004000
+#define BNX2_CONDITION_MFW_RUN_NCSI 0x00006000
+#define BNX2_CONDITION_MFW_RUN_NONE 0x0000e000
+#define BNX2_CONDITION_MFW_RUN_MASK 0x0000e000
+
#define BNX2_BC_STATE_DEBUG_CMD 0x1dc
#define BNX2_BC_STATE_BC_DBG_CMD_SIGNATURE 0x42440000
#define BNX2_BC_STATE_BC_DBG_CMD_SIGNATURE_MASK 0xffff0000
#define BNX2_BC_STATE_BC_DBG_CMD_LOOP_CNT_MASK 0xffff
#define BNX2_BC_STATE_BC_DBG_CMD_LOOP_INFINITE 0xffff
+#define BNX2_FW_EVT_CODE_MB 0x354
+#define BNX2_FW_EVT_CODE_SW_TIMER_EXPIRATION_EVENT 0x00000000
+#define BNX2_FW_EVT_CODE_LINK_EVENT 0x00000001
+
+#define BNX2_DRV_ACK_CAP_MB 0x364
+#define BNX2_DRV_ACK_CAP_SIGNATURE 0x35450000
+#define BNX2_CAPABILITY_SIGNATURE_MASK 0xFFFF0000
+
+#define BNX2_FW_CAP_MB 0x368
+#define BNX2_FW_CAP_SIGNATURE 0xaa550000
+#define BNX2_FW_ACK_DRV_SIGNATURE 0x52500000
+#define BNX2_FW_CAP_SIGNATURE_MASK 0xffff0000
+#define BNX2_FW_CAP_REMOTE_PHY_CAPABLE 0x00000001
+#define BNX2_FW_CAP_REMOTE_PHY_PRESENT 0x00000002
+
+#define BNX2_RPHY_SIGNATURE 0x36c
+#define BNX2_RPHY_LOAD_SIGNATURE 0x5a5a5a5a
+
+#define BNX2_RPHY_FLAGS 0x370
+#define BNX2_RPHY_SERDES_LINK 0x374
+#define BNX2_RPHY_COPPER_LINK 0x378
+
#define HOST_VIEW_SHMEM_BASE 0x167c00
#endif
diff --git a/drivers/net/dl2k.c b/drivers/net/dl2k.c
index 74ec64a1625..a4ace071f1c 100644
--- a/drivers/net/dl2k.c
+++ b/drivers/net/dl2k.c
@@ -866,9 +866,9 @@ receive_packet (struct net_device *dev)
PCI_DMA_FROMDEVICE);
/* 16 byte align the IP header */
skb_reserve (skb, 2);
- eth_copy_and_sum (skb,
+ skb_copy_to_linear_data (skb,
np->rx_skbuff[entry]->data,
- pkt_len, 0);
+ pkt_len);
skb_put (skb, pkt_len);
pci_dma_sync_single_for_device(np->pdev,
desc->fraginfo &
diff --git a/drivers/net/dummy.c b/drivers/net/dummy.c
index 60673bc292c..756a6bcb038 100644
--- a/drivers/net/dummy.c
+++ b/drivers/net/dummy.c
@@ -34,11 +34,12 @@
#include <linux/etherdevice.h>
#include <linux/init.h>
#include <linux/moduleparam.h>
+#include <linux/rtnetlink.h>
+#include <net/rtnetlink.h>
static int numdummies = 1;
static int dummy_xmit(struct sk_buff *skb, struct net_device *dev);
-static struct net_device_stats *dummy_get_stats(struct net_device *dev);
static int dummy_set_address(struct net_device *dev, void *p)
{
@@ -56,13 +57,13 @@ static void set_multicast_list(struct net_device *dev)
{
}
-static void __init dummy_setup(struct net_device *dev)
+static void dummy_setup(struct net_device *dev)
{
/* Initialize the device structure. */
- dev->get_stats = dummy_get_stats;
dev->hard_start_xmit = dummy_xmit;
dev->set_multicast_list = set_multicast_list;
dev->set_mac_address = dummy_set_address;
+ dev->destructor = free_netdev;
/* Fill in device structure with ethernet-generic values. */
ether_setup(dev);
@@ -76,77 +77,80 @@ static void __init dummy_setup(struct net_device *dev)
static int dummy_xmit(struct sk_buff *skb, struct net_device *dev)
{
- struct net_device_stats *stats = netdev_priv(dev);
-
- stats->tx_packets++;
- stats->tx_bytes+=skb->len;
+ dev->stats.tx_packets++;
+ dev->stats.tx_bytes += skb->len;
dev_kfree_skb(skb);
return 0;
}
-static struct net_device_stats *dummy_get_stats(struct net_device *dev)
+static int dummy_validate(struct nlattr *tb[], struct nlattr *data[])
{
- return netdev_priv(dev);
+ if (tb[IFLA_ADDRESS]) {
+ if (nla_len(tb[IFLA_ADDRESS]) != ETH_ALEN)
+ return -EINVAL;
+ if (!is_valid_ether_addr(nla_data(tb[IFLA_ADDRESS])))
+ return -EADDRNOTAVAIL;
+ }
+ return 0;
}
-static struct net_device **dummies;
+static struct rtnl_link_ops dummy_link_ops __read_mostly = {
+ .kind = "dummy",
+ .setup = dummy_setup,
+ .validate = dummy_validate,
+};
/* Number of dummy devices to be set up by this module. */
module_param(numdummies, int, 0);
MODULE_PARM_DESC(numdummies, "Number of dummy pseudo devices");
-static int __init dummy_init_one(int index)
+static int __init dummy_init_one(void)
{
struct net_device *dev_dummy;
int err;
- dev_dummy = alloc_netdev(sizeof(struct net_device_stats),
- "dummy%d", dummy_setup);
-
+ dev_dummy = alloc_netdev(0, "dummy%d", dummy_setup);
if (!dev_dummy)
return -ENOMEM;
- if ((err = register_netdev(dev_dummy))) {
- free_netdev(dev_dummy);
- dev_dummy = NULL;
- } else {
- dummies[index] = dev_dummy;
- }
+ err = dev_alloc_name(dev_dummy, dev_dummy->name);
+ if (err < 0)
+ goto err;
- return err;
-}
+ dev_dummy->rtnl_link_ops = &dummy_link_ops;
+ err = register_netdevice(dev_dummy);
+ if (err < 0)
+ goto err;
+ return 0;
-static void dummy_free_one(int index)
-{
- unregister_netdev(dummies[index]);
- free_netdev(dummies[index]);
+err:
+ free_netdev(dev_dummy);
+ return err;
}
static int __init dummy_init_module(void)
{
int i, err = 0;
- dummies = kmalloc(numdummies * sizeof(void *), GFP_KERNEL);
- if (!dummies)
- return -ENOMEM;
+
+ rtnl_lock();
+ err = __rtnl_link_register(&dummy_link_ops);
+
for (i = 0; i < numdummies && !err; i++)
- err = dummy_init_one(i);
- if (err) {
- i--;
- while (--i >= 0)
- dummy_free_one(i);
- }
+ err = dummy_init_one();
+ if (err < 0)
+ __rtnl_link_unregister(&dummy_link_ops);
+ rtnl_unlock();
+
return err;
}
static void __exit dummy_cleanup_module(void)
{
- int i;
- for (i = 0; i < numdummies; i++)
- dummy_free_one(i);
- kfree(dummies);
+ rtnl_link_unregister(&dummy_link_ops);
}
module_init(dummy_init_module);
module_exit(dummy_cleanup_module);
MODULE_LICENSE("GPL");
+MODULE_ALIAS_RTNL_LINK("dummy");
diff --git a/drivers/net/eepro100.c b/drivers/net/eepro100.c
index 9800341956a..9afa47edfc5 100644
--- a/drivers/net/eepro100.c
+++ b/drivers/net/eepro100.c
@@ -1801,7 +1801,7 @@ speedo_rx(struct net_device *dev)
#if 1 || USE_IP_CSUM
/* Packet is in one chunk -- we can copy + cksum. */
- eth_copy_and_sum(skb, sp->rx_skbuff[entry]->data, pkt_len, 0);
+ skb_copy_to_linear_data(skb, sp->rx_skbuff[entry]->data, pkt_len);
skb_put(skb, pkt_len);
#else
skb_copy_from_linear_data(sp->rx_skbuff[entry],
diff --git a/drivers/net/epic100.c b/drivers/net/epic100.c
index 5e517946f46..119778401e4 100644
--- a/drivers/net/epic100.c
+++ b/drivers/net/epic100.c
@@ -1201,7 +1201,7 @@ static int epic_rx(struct net_device *dev, int budget)
ep->rx_ring[entry].bufaddr,
ep->rx_buf_sz,
PCI_DMA_FROMDEVICE);
- eth_copy_and_sum(skb, ep->rx_skbuff[entry]->data, pkt_len, 0);
+ skb_copy_to_linear_data(skb, ep->rx_skbuff[entry]->data, pkt_len);
skb_put(skb, pkt_len);
pci_dma_sync_single_for_device(ep->pci_dev,
ep->rx_ring[entry].bufaddr,
diff --git a/drivers/net/fealnx.c b/drivers/net/fealnx.c
index abe9b089c61..ff9f177d715 100644
--- a/drivers/net/fealnx.c
+++ b/drivers/net/fealnx.c
@@ -1727,8 +1727,8 @@ static int netdev_rx(struct net_device *dev)
/* Call copy + cksum if available. */
#if ! defined(__alpha__)
- eth_copy_and_sum(skb,
- np->cur_rx->skbuff->data, pkt_len, 0);
+ skb_copy_to_linear_data(skb,
+ np->cur_rx->skbuff->data, pkt_len);
skb_put(skb, pkt_len);
#else
memcpy(skb_put(skb, pkt_len),
diff --git a/drivers/net/fec.c b/drivers/net/fec.c
index 255b09124e1..03023dd1782 100644
--- a/drivers/net/fec.c
+++ b/drivers/net/fec.c
@@ -648,7 +648,7 @@ while (!((status = bdp->cbd_sc) & BD_ENET_RX_EMPTY)) {
fep->stats.rx_dropped++;
} else {
skb_put(skb,pkt_len-4); /* Make room */
- eth_copy_and_sum(skb, data, pkt_len-4, 0);
+ skb_copy_to_linear_data(skb, data, pkt_len-4);
skb->protocol=eth_type_trans(skb,dev);
netif_rx(skb);
}
diff --git a/drivers/net/hamachi.c b/drivers/net/hamachi.c
index 2521b111b3a..15254dc7876 100644
--- a/drivers/net/hamachi.c
+++ b/drivers/net/hamachi.c
@@ -1575,8 +1575,8 @@ static int hamachi_rx(struct net_device *dev)
PCI_DMA_FROMDEVICE);
/* Call copy + cksum if available. */
#if 1 || USE_IP_COPYSUM
- eth_copy_and_sum(skb,
- hmp->rx_skbuff[entry]->data, pkt_len, 0);
+ skb_copy_to_linear_data(skb,
+ hmp->rx_skbuff[entry]->data, pkt_len);
skb_put(skb, pkt_len);
#else
memcpy(skb_put(skb, pkt_len), hmp->rx_ring_dma
diff --git a/drivers/net/ifb.c b/drivers/net/ifb.c
index 07b4c0d7a75..f5c3598e59a 100644
--- a/drivers/net/ifb.c
+++ b/drivers/net/ifb.c
@@ -136,13 +136,14 @@ resched:
}
-static void __init ifb_setup(struct net_device *dev)
+static void ifb_setup(struct net_device *dev)
{
/* Initialize the device structure. */
dev->get_stats = ifb_get_stats;
dev->hard_start_xmit = ifb_xmit;
dev->open = &ifb_open;
dev->stop = &ifb_close;
+ dev->destructor = free_netdev;
/* Fill in device structure with ethernet-generic values. */
ether_setup(dev);
@@ -197,12 +198,6 @@ static struct net_device_stats *ifb_get_stats(struct net_device *dev)
return stats;
}
-static struct net_device **ifbs;
-
-/* Number of ifb devices to be set up by this module. */
-module_param(numifbs, int, 0);
-MODULE_PARM_DESC(numifbs, "Number of ifb devices");
-
static int ifb_close(struct net_device *dev)
{
struct ifb_private *dp = netdev_priv(dev);
@@ -226,6 +221,28 @@ static int ifb_open(struct net_device *dev)
return 0;
}
+static int ifb_validate(struct nlattr *tb[], struct nlattr *data[])
+{
+ if (tb[IFLA_ADDRESS]) {
+ if (nla_len(tb[IFLA_ADDRESS]) != ETH_ALEN)
+ return -EINVAL;
+ if (!is_valid_ether_addr(nla_data(tb[IFLA_ADDRESS])))
+ return -EADDRNOTAVAIL;
+ }
+ return 0;
+}
+
+static struct rtnl_link_ops ifb_link_ops __read_mostly = {
+ .kind = "ifb",
+ .priv_size = sizeof(struct ifb_private),
+ .setup = ifb_setup,
+ .validate = ifb_validate,
+};
+
+/* Number of ifb devices to be set up by this module. */
+module_param(numifbs, int, 0);
+MODULE_PARM_DESC(numifbs, "Number of ifb devices");
+
static int __init ifb_init_one(int index)
{
struct net_device *dev_ifb;
@@ -237,49 +254,44 @@ static int __init ifb_init_one(int index)
if (!dev_ifb)
return -ENOMEM;
- if ((err = register_netdev(dev_ifb))) {
- free_netdev(dev_ifb);
- dev_ifb = NULL;
- } else {
- ifbs[index] = dev_ifb;
- }
+ err = dev_alloc_name(dev_ifb, dev_ifb->name);
+ if (err < 0)
+ goto err;
- return err;
-}
+ dev_ifb->rtnl_link_ops = &ifb_link_ops;
+ err = register_netdevice(dev_ifb);
+ if (err < 0)
+ goto err;
+ return 0;
-static void ifb_free_one(int index)
-{
- unregister_netdev(ifbs[index]);
- free_netdev(ifbs[index]);
+err:
+ free_netdev(dev_ifb);
+ return err;
}
static int __init ifb_init_module(void)
{
- int i, err = 0;
- ifbs = kmalloc(numifbs * sizeof(void *), GFP_KERNEL);
- if (!ifbs)
- return -ENOMEM;
+ int i, err;
+
+ rtnl_lock();
+ err = __rtnl_link_register(&ifb_link_ops);
+
for (i = 0; i < numifbs && !err; i++)
err = ifb_init_one(i);
- if (err) {
- i--;
- while (--i >= 0)
- ifb_free_one(i);
- }
+ if (err)
+ __rtnl_link_unregister(&ifb_link_ops);
+ rtnl_unlock();
return err;
}
static void __exit ifb_cleanup_module(void)
{
- int i;
-
- for (i = 0; i < numifbs; i++)
- ifb_free_one(i);
- kfree(ifbs);
+ rtnl_link_unregister(&ifb_link_ops);
}
module_init(ifb_init_module);
module_exit(ifb_cleanup_module);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Jamal Hadi Salim");
+MODULE_ALIAS_RTNL_LINK("ifb");
diff --git a/drivers/net/irda/kingsun-sir.c b/drivers/net/irda/kingsun-sir.c
index 217429122e7..bdd5c979bea 100644
--- a/drivers/net/irda/kingsun-sir.c
+++ b/drivers/net/irda/kingsun-sir.c
@@ -4,7 +4,7 @@
* Version: 0.1.1
* Description: Irda KingSun/DonShine USB Dongle
* Status: Experimental
-* Author: Alex Villac�s Lasso <a_villacis@palosanto.com>
+* Author: Alex Villacís Lasso <a_villacis@palosanto.com>
*
* Based on stir4200 and mcs7780 drivers, with (strange?) differences
*
@@ -652,6 +652,6 @@ static void __exit kingsun_cleanup(void)
}
module_exit(kingsun_cleanup);
-MODULE_AUTHOR("Alex Villac�s Lasso <a_villacis@palosanto.com>");
+MODULE_AUTHOR("Alex Villacís Lasso <a_villacis@palosanto.com>");
MODULE_DESCRIPTION("IrDA-USB Dongle Driver for KingSun/DonShine");
MODULE_LICENSE("GPL");
diff --git a/drivers/net/irda/vlsi_ir.c b/drivers/net/irda/vlsi_ir.c
index bf78ef1120a..0538ca9ce05 100644
--- a/drivers/net/irda/vlsi_ir.c
+++ b/drivers/net/irda/vlsi_ir.c
@@ -44,6 +44,7 @@ MODULE_LICENSE("GPL");
#include <linux/time.h>
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
+#include <linux/mutex.h>
#include <asm/uaccess.h>
#include <asm/byteorder.h>
@@ -1660,8 +1661,8 @@ vlsi_irda_probe(struct pci_dev *pdev, const struct pci_device_id *id)
idev = ndev->priv;
spin_lock_init(&idev->lock);
- init_MUTEX(&idev->sem);
- down(&idev->sem);
+ mutex_init(&idev->mtx);
+ mutex_lock(&idev->mtx);
idev->pdev = pdev;
if (vlsi_irda_init(ndev) < 0)
@@ -1689,12 +1690,12 @@ vlsi_irda_probe(struct pci_dev *pdev, const struct pci_device_id *id)
IRDA_MESSAGE("%s: registered device %s\n", drivername, ndev->name);
pci_set_drvdata(pdev, ndev);
- up(&idev->sem);
+ mutex_unlock(&idev->mtx);
return 0;
out_freedev:
- up(&idev->sem);
+ mutex_unlock(&idev->mtx);
free_netdev(ndev);
out_disable:
pci_disable_device(pdev);
@@ -1716,12 +1717,12 @@ static void __devexit vlsi_irda_remove(struct pci_dev *pdev)
unregister_netdev(ndev);
idev = ndev->priv;
- down(&idev->sem);
+ mutex_lock(&idev->mtx);
if (idev->proc_entry) {
remove_proc_entry(ndev->name, vlsi_proc_root);
idev->proc_entry = NULL;
}
- up(&idev->sem);
+ mutex_unlock(&idev->mtx);
free_netdev(ndev);
@@ -1751,7 +1752,7 @@ static int vlsi_irda_suspend(struct pci_dev *pdev, pm_message_t state)
return 0;
}
idev = ndev->priv;
- down(&idev->sem);
+ mutex_lock(&idev->mtx);
if (pdev->current_state != 0) { /* already suspended */
if (state.event > pdev->current_state) { /* simply go deeper */
pci_set_power_state(pdev, pci_choose_state(pdev, state));
@@ -1759,7 +1760,7 @@ static int vlsi_irda_suspend(struct pci_dev *pdev, pm_message_t state)
}
else
IRDA_ERROR("%s - %s: invalid suspend request %u -> %u\n", __FUNCTION__, pci_name(pdev), pdev->current_state, state.event);
- up(&idev->sem);
+ mutex_unlock(&idev->mtx);
return 0;
}
@@ -1775,7 +1776,7 @@ static int vlsi_irda_suspend(struct pci_dev *pdev, pm_message_t state)
pci_set_power_state(pdev, pci_choose_state(pdev, state));
pdev->current_state = state.event;
idev->resume_ok = 1;
- up(&idev->sem);
+ mutex_unlock(&idev->mtx);
return 0;
}
@@ -1790,9 +1791,9 @@ static int vlsi_irda_resume(struct pci_dev *pdev)
return 0;
}
idev = ndev->priv;
- down(&idev->sem);
+ mutex_lock(&idev->mtx);
if (pdev->current_state == 0) {
- up(&idev->sem);
+ mutex_unlock(&idev->mtx);
IRDA_WARNING("%s - %s: already resumed\n",
__FUNCTION__, pci_name(pdev));
return 0;
@@ -1814,7 +1815,7 @@ static int vlsi_irda_resume(struct pci_dev *pdev)
* device and independently resume_ok should catch any garbage config.
*/
IRDA_WARNING("%s - hm, nothing to resume?\n", __FUNCTION__);
- up(&idev->sem);
+ mutex_unlock(&idev->mtx);
return 0;
}
@@ -1824,7 +1825,7 @@ static int vlsi_irda_resume(struct pci_dev *pdev)
netif_device_attach(ndev);
}
idev->resume_ok = 0;
- up(&idev->sem);
+ mutex_unlock(&idev->mtx);
return 0;
}
diff --git a/drivers/net/irda/vlsi_ir.h b/drivers/net/irda/vlsi_ir.h
index 2d3b773d8e3..ca12a609641 100644
--- a/drivers/net/irda/vlsi_ir.h
+++ b/drivers/net/irda/vlsi_ir.h
@@ -728,7 +728,7 @@ typedef struct vlsi_irda_dev {
struct timeval last_rx;
spinlock_t lock;
- struct semaphore sem;
+ struct mutex mtx;
u8 resume_ok;
struct proc_dir_entry *proc_entry;
diff --git a/drivers/net/ixp2000/ixpdev.c b/drivers/net/ixp2000/ixpdev.c
index d5f694fc4a2..d9ce1aef148 100644
--- a/drivers/net/ixp2000/ixpdev.c
+++ b/drivers/net/ixp2000/ixpdev.c
@@ -111,7 +111,7 @@ static int ixpdev_rx(struct net_device *dev, int *budget)
skb = dev_alloc_skb(desc->pkt_length + 2);
if (likely(skb != NULL)) {
skb_reserve(skb, 2);
- eth_copy_and_sum(skb, buf, desc->pkt_length, 0);
+ skb_copy_to_linear_data(skb, buf, desc->pkt_length);
skb_put(skb, desc->pkt_length);
skb->protocol = eth_type_trans(skb, nds[desc->channel]);
diff --git a/drivers/net/lance.c b/drivers/net/lance.c
index 0fe96c85828..a2f37e52b92 100644
--- a/drivers/net/lance.c
+++ b/drivers/net/lance.c
@@ -1186,9 +1186,9 @@ lance_rx(struct net_device *dev)
}
skb_reserve(skb,2); /* 16 byte align */
skb_put(skb,pkt_len); /* Make room */
- eth_copy_and_sum(skb,
+ skb_copy_to_linear_data(skb,
(unsigned char *)isa_bus_to_virt((lp->rx_ring[entry].base & 0x00ffffff)),
- pkt_len,0);
+ pkt_len);
skb->protocol=eth_type_trans(skb,dev);
netif_rx(skb);
dev->last_rx = jiffies;
diff --git a/drivers/net/natsemi.c b/drivers/net/natsemi.c
index 460a08718c6..3450051ae56 100644
--- a/drivers/net/natsemi.c
+++ b/drivers/net/natsemi.c
@@ -2357,8 +2357,8 @@ static void netdev_rx(struct net_device *dev, int *work_done, int work_to_do)
np->rx_dma[entry],
buflen,
PCI_DMA_FROMDEVICE);
- eth_copy_and_sum(skb,
- np->rx_skbuff[entry]->data, pkt_len, 0);
+ skb_copy_to_linear_data(skb,
+ np->rx_skbuff[entry]->data, pkt_len);
skb_put(skb, pkt_len);
pci_dma_sync_single_for_device(np->pci_dev,
np->rx_dma[entry],
diff --git a/drivers/net/ni52.c b/drivers/net/ni52.c
index 8dbd6d1900b..5e7999db209 100644
--- a/drivers/net/ni52.c
+++ b/drivers/net/ni52.c
@@ -936,7 +936,7 @@ static void ni52_rcv_int(struct net_device *dev)
{
skb_reserve(skb,2);
skb_put(skb,totlen);
- eth_copy_and_sum(skb,(char *) p->base+(unsigned long) rbd->buffer,totlen,0);
+ skb_copy_to_linear_data(skb,(char *) p->base+(unsigned long) rbd->buffer,totlen);
skb->protocol=eth_type_trans(skb,dev);
netif_rx(skb);
dev->last_rx = jiffies;
diff --git a/drivers/net/ni65.c b/drivers/net/ni65.c
index 3818edf0ac1..4ef5fe34519 100644
--- a/drivers/net/ni65.c
+++ b/drivers/net/ni65.c
@@ -1096,7 +1096,7 @@ static void ni65_recv_intr(struct net_device *dev,int csr0)
#ifdef RCV_VIA_SKB
if( (unsigned long) (skb->data + R_BUF_SIZE) > 0x1000000) {
skb_put(skb,len);
- eth_copy_and_sum(skb, (unsigned char *)(p->recv_skb[p->rmdnum]->data),len,0);
+ skb_copy_to_linear_data(skb, (unsigned char *)(p->recv_skb[p->rmdnum]->data),len);
}
else {
struct sk_buff *skb1 = p->recv_skb[p->rmdnum];
@@ -1108,7 +1108,7 @@ static void ni65_recv_intr(struct net_device *dev,int csr0)
}
#else
skb_put(skb,len);
- eth_copy_and_sum(skb, (unsigned char *) p->recvbounce[p->rmdnum],len,0);
+ skb_copy_to_linear_data(skb, (unsigned char *) p->recvbounce[p->rmdnum],len);
#endif
p->stats.rx_packets++;
p->stats.rx_bytes += len;
diff --git a/drivers/net/pci-skeleton.c b/drivers/net/pci-skeleton.c
index df8998b4f37..3cdbe118200 100644
--- a/drivers/net/pci-skeleton.c
+++ b/drivers/net/pci-skeleton.c
@@ -1567,7 +1567,7 @@ static void netdrv_rx_interrupt (struct net_device *dev,
if (skb) {
skb_reserve (skb, 2); /* 16 byte align the IP fields. */
- eth_copy_and_sum (skb, &rx_ring[ring_offset + 4], pkt_size, 0);
+ skb_copy_to_linear_data (skb, &rx_ring[ring_offset + 4], pkt_size);
skb_put (skb, pkt_size);
skb->protocol = eth_type_trans (skb, dev);
diff --git a/drivers/net/pcnet32.c b/drivers/net/pcnet32.c
index 9c171a7390e..465485a3fbc 100644
--- a/drivers/net/pcnet32.c
+++ b/drivers/net/pcnet32.c
@@ -1235,9 +1235,9 @@ static void pcnet32_rx_entry(struct net_device *dev,
lp->rx_dma_addr[entry],
pkt_len,
PCI_DMA_FROMDEVICE);
- eth_copy_and_sum(skb,
+ skb_copy_to_linear_data(skb,
(unsigned char *)(lp->rx_skbuff[entry]->data),
- pkt_len, 0);
+ pkt_len);
pci_dma_sync_single_for_device(lp->pci_dev,
lp->rx_dma_addr[entry],
pkt_len,
diff --git a/drivers/net/pppol2tp.c b/drivers/net/pppol2tp.c
new file mode 100644
index 00000000000..5891a0fbdc8
--- /dev/null
+++ b/drivers/net/pppol2tp.c
@@ -0,0 +1,2486 @@
+/*****************************************************************************
+ * Linux PPP over L2TP (PPPoX/PPPoL2TP) Sockets
+ *
+ * PPPoX --- Generic PPP encapsulation socket family
+ * PPPoL2TP --- PPP over L2TP (RFC 2661)
+ *
+ * Version: 1.0.0
+ *
+ * Authors: Martijn van Oosterhout <kleptog@svana.org>
+ * James Chapman (jchapman@katalix.com)
+ * Contributors:
+ * Michal Ostrowski <mostrows@speakeasy.net>
+ * Arnaldo Carvalho de Melo <acme@xconectiva.com.br>
+ * David S. Miller (davem@redhat.com)
+ *
+ * License:
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ */
+
+/* This driver handles only L2TP data frames; control frames are handled by a
+ * userspace application.
+ *
+ * To send data in an L2TP session, userspace opens a PPPoL2TP socket and
+ * attaches it to a bound UDP socket with local tunnel_id / session_id and
+ * peer tunnel_id / session_id set. Data can then be sent or received using
+ * regular socket sendmsg() / recvmsg() calls. Kernel parameters of the socket
+ * can be read or modified using ioctl() or [gs]etsockopt() calls.
+ *
+ * When a PPPoL2TP socket is connected with local and peer session_id values
+ * zero, the socket is treated as a special tunnel management socket.
+ *
+ * Here's example userspace code to create a socket for sending/receiving data
+ * over an L2TP session:-
+ *
+ * struct sockaddr_pppol2tp sax;
+ * int fd;
+ * int session_fd;
+ *
+ * fd = socket(AF_PPPOX, SOCK_DGRAM, PX_PROTO_OL2TP);
+ *
+ * sax.sa_family = AF_PPPOX;
+ * sax.sa_protocol = PX_PROTO_OL2TP;
+ * sax.pppol2tp.fd = tunnel_fd; // bound UDP socket
+ * sax.pppol2tp.addr.sin_addr.s_addr = addr->sin_addr.s_addr;
+ * sax.pppol2tp.addr.sin_port = addr->sin_port;
+ * sax.pppol2tp.addr.sin_family = AF_INET;
+ * sax.pppol2tp.s_tunnel = tunnel_id;
+ * sax.pppol2tp.s_session = session_id;
+ * sax.pppol2tp.d_tunnel = peer_tunnel_id;
+ * sax.pppol2tp.d_session = peer_session_id;
+ *
+ * session_fd = connect(fd, (struct sockaddr *)&sax, sizeof(sax));
+ *
+ * A pppd plugin that allows PPP traffic to be carried over L2TP using
+ * this driver is available from the OpenL2TP project at
+ * http://openl2tp.sourceforge.net.
+ */
+
+#include <linux/module.h>
+#include <linux/version.h>
+#include <linux/string.h>
+#include <linux/list.h>
+#include <asm/uaccess.h>
+
+#include <linux/kernel.h>
+#include <linux/spinlock.h>
+#include <linux/kthread.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/errno.h>
+#include <linux/jiffies.h>
+
+#include <linux/netdevice.h>
+#include <linux/net.h>
+#include <linux/inetdevice.h>
+#include <linux/skbuff.h>
+#include <linux/init.h>
+#include <linux/ip.h>
+#include <linux/udp.h>
+#include <linux/if_pppox.h>
+#include <linux/if_pppol2tp.h>
+#include <net/sock.h>
+#include <linux/ppp_channel.h>
+#include <linux/ppp_defs.h>
+#include <linux/if_ppp.h>
+#include <linux/file.h>
+#include <linux/hash.h>
+#include <linux/sort.h>
+#include <linux/proc_fs.h>
+#include <net/dst.h>
+#include <net/ip.h>
+#include <net/udp.h>
+#include <net/xfrm.h>
+
+#include <asm/byteorder.h>
+#include <asm/atomic.h>
+
+
+#define PPPOL2TP_DRV_VERSION "V1.0"
+
+/* L2TP header constants */
+#define L2TP_HDRFLAG_T 0x8000
+#define L2TP_HDRFLAG_L 0x4000
+#define L2TP_HDRFLAG_S 0x0800
+#define L2TP_HDRFLAG_O 0x0200
+#define L2TP_HDRFLAG_P 0x0100
+
+#define L2TP_HDR_VER_MASK 0x000F
+#define L2TP_HDR_VER 0x0002
+
+/* Space for UDP, L2TP and PPP headers */
+#define PPPOL2TP_HEADER_OVERHEAD 40
+
+/* Just some random numbers */
+#define L2TP_TUNNEL_MAGIC 0x42114DDA
+#define L2TP_SESSION_MAGIC 0x0C04EB7D
+
+#define PPPOL2TP_HASH_BITS 4
+#define PPPOL2TP_HASH_SIZE (1 << PPPOL2TP_HASH_BITS)
+
+/* Default trace flags */
+#define PPPOL2TP_DEFAULT_DEBUG_FLAGS 0
+
+#define PRINTK(_mask, _type, _lvl, _fmt, args...) \
+ do { \
+ if ((_mask) & (_type)) \
+ printk(_lvl "PPPOL2TP: " _fmt, ##args); \
+ } while(0)
+
+/* Number of bytes to build transmit L2TP headers.
+ * Unfortunately the size is different depending on whether sequence numbers
+ * are enabled.
+ */
+#define PPPOL2TP_L2TP_HDR_SIZE_SEQ 10
+#define PPPOL2TP_L2TP_HDR_SIZE_NOSEQ 6
+
+struct pppol2tp_tunnel;
+
+/* Describes a session. It is the sk_user_data field in the PPPoL2TP
+ * socket. Contains information to determine incoming packets and transmit
+ * outgoing ones.
+ */
+struct pppol2tp_session
+{
+ int magic; /* should be
+ * L2TP_SESSION_MAGIC */
+ int owner; /* pid that opened the socket */
+
+ struct sock *sock; /* Pointer to the session
+ * PPPoX socket */
+ struct sock *tunnel_sock; /* Pointer to the tunnel UDP
+ * socket */
+
+ struct pppol2tp_addr tunnel_addr; /* Description of tunnel */
+
+ struct pppol2tp_tunnel *tunnel; /* back pointer to tunnel
+ * context */
+
+ char name[20]; /* "sess xxxxx/yyyyy", where
+ * x=tunnel_id, y=session_id */
+ int mtu;
+ int mru;
+ int flags; /* accessed by PPPIOCGFLAGS.
+ * Unused. */
+ unsigned recv_seq:1; /* expect receive packets with
+ * sequence numbers? */
+ unsigned send_seq:1; /* send packets with sequence
+ * numbers? */
+ unsigned lns_mode:1; /* behave as LNS? LAC enables
+ * sequence numbers under
+ * control of LNS. */
+ int debug; /* bitmask of debug message
+ * categories */
+ int reorder_timeout; /* configured reorder timeout
+ * (in jiffies) */
+ u16 nr; /* session NR state (receive) */
+ u16 ns; /* session NR state (send) */
+ struct sk_buff_head reorder_q; /* receive reorder queue */
+ struct pppol2tp_ioc_stats stats;
+ struct hlist_node hlist; /* Hash list node */
+};
+
+/* The sk_user_data field of the tunnel's UDP socket. It contains info to track
+ * all the associated sessions so incoming packets can be sorted out
+ */
+struct pppol2tp_tunnel
+{
+ int magic; /* Should be L2TP_TUNNEL_MAGIC */
+ rwlock_t hlist_lock; /* protect session_hlist */
+ struct hlist_head session_hlist[PPPOL2TP_HASH_SIZE];
+ /* hashed list of sessions,
+ * hashed by id */
+ int debug; /* bitmask of debug message
+ * categories */
+ char name[12]; /* "tunl xxxxx" */
+ struct pppol2tp_ioc_stats stats;
+
+ void (*old_sk_destruct)(struct sock *);
+
+ struct sock *sock; /* Parent socket */
+ struct list_head list; /* Keep a list of all open
+ * prepared sockets */
+
+ atomic_t ref_count;
+};
+
+/* Private data stored for received packets in the skb.
+ */
+struct pppol2tp_skb_cb {
+ u16 ns;
+ u16 nr;
+ u16 has_seq;
+ u16 length;
+ unsigned long expires;
+};
+
+#define PPPOL2TP_SKB_CB(skb) ((struct pppol2tp_skb_cb *) &skb->cb[sizeof(struct inet_skb_parm)])
+
+static int pppol2tp_xmit(struct ppp_channel *chan, struct sk_buff *skb);
+static void pppol2tp_tunnel_free(struct pppol2tp_tunnel *tunnel);
+
+static atomic_t pppol2tp_tunnel_count;
+static atomic_t pppol2tp_session_count;
+static struct ppp_channel_ops pppol2tp_chan_ops = { pppol2tp_xmit , NULL };
+static struct proto_ops pppol2tp_ops;
+static LIST_HEAD(pppol2tp_tunnel_list);
+static DEFINE_RWLOCK(pppol2tp_tunnel_list_lock);
+
+/* Helpers to obtain tunnel/session contexts from sockets.
+ */
+static inline struct pppol2tp_session *pppol2tp_sock_to_session(struct sock *sk)
+{
+ struct pppol2tp_session *session;
+
+ if (sk == NULL)
+ return NULL;
+
+ session = (struct pppol2tp_session *)(sk->sk_user_data);
+ if (session == NULL)
+ return NULL;
+
+ BUG_ON(session->magic != L2TP_SESSION_MAGIC);
+
+ return session;
+}
+
+static inline struct pppol2tp_tunnel *pppol2tp_sock_to_tunnel(struct sock *sk)
+{
+ struct pppol2tp_tunnel *tunnel;
+
+ if (sk == NULL)
+ return NULL;
+
+ tunnel = (struct pppol2tp_tunnel *)(sk->sk_user_data);
+ if (tunnel == NULL)
+ return NULL;
+
+ BUG_ON(tunnel->magic != L2TP_TUNNEL_MAGIC);
+
+ return tunnel;
+}
+
+/* Tunnel reference counts. Incremented per session that is added to
+ * the tunnel.
+ */
+static inline void pppol2tp_tunnel_inc_refcount(struct pppol2tp_tunnel *tunnel)
+{
+ atomic_inc(&tunnel->ref_count);
+}
+
+static inline void pppol2tp_tunnel_dec_refcount(struct pppol2tp_tunnel *tunnel)
+{
+ if (atomic_dec_and_test(&tunnel->ref_count))
+ pppol2tp_tunnel_free(tunnel);
+}
+
+/* Session hash list.
+ * The session_id SHOULD be random according to RFC2661, but several
+ * L2TP implementations (Cisco and Microsoft) use incrementing
+ * session_ids. So we do a real hash on the session_id, rather than a
+ * simple bitmask.
+ */
+static inline struct hlist_head *
+pppol2tp_session_id_hash(struct pppol2tp_tunnel *tunnel, u16 session_id)
+{
+ unsigned long hash_val = (unsigned long) session_id;
+ return &tunnel->session_hlist[hash_long(hash_val, PPPOL2TP_HASH_BITS)];
+}
+
+/* Lookup a session by id
+ */
+static struct pppol2tp_session *
+pppol2tp_session_find(struct pppol2tp_tunnel *tunnel, u16 session_id)
+{
+ struct hlist_head *session_list =
+ pppol2tp_session_id_hash(tunnel, session_id);
+ struct pppol2tp_session *session;
+ struct hlist_node *walk;
+
+ read_lock(&tunnel->hlist_lock);
+ hlist_for_each_entry(session, walk, session_list, hlist) {
+ if (session->tunnel_addr.s_session == session_id) {
+ read_unlock(&tunnel->hlist_lock);
+ return session;
+ }
+ }
+ read_unlock(&tunnel->hlist_lock);
+
+ return NULL;
+}
+
+/* Lookup a tunnel by id
+ */
+static struct pppol2tp_tunnel *pppol2tp_tunnel_find(u16 tunnel_id)
+{
+ struct pppol2tp_tunnel *tunnel = NULL;
+
+ read_lock(&pppol2tp_tunnel_list_lock);
+ list_for_each_entry(tunnel, &pppol2tp_tunnel_list, list) {
+ if (tunnel->stats.tunnel_id == tunnel_id) {
+ read_unlock(&pppol2tp_tunnel_list_lock);
+ return tunnel;
+ }
+ }
+ read_unlock(&pppol2tp_tunnel_list_lock);
+
+ return NULL;
+}
+
+/*****************************************************************************
+ * Receive data handling
+ *****************************************************************************/
+
+/* Queue a skb in order. We come here only if the skb has an L2TP sequence
+ * number.
+ */
+static void pppol2tp_recv_queue_skb(struct pppol2tp_session *session, struct sk_buff *skb)
+{
+ struct sk_buff *skbp;
+ u16 ns = PPPOL2TP_SKB_CB(skb)->ns;
+
+ spin_lock(&session->reorder_q.lock);
+ skb_queue_walk(&session->reorder_q, skbp) {
+ if (PPPOL2TP_SKB_CB(skbp)->ns > ns) {
+ __skb_insert(skb, skbp->prev, skbp, &session->reorder_q);
+ PRINTK(session->debug, PPPOL2TP_MSG_SEQ, KERN_DEBUG,
+ "%s: pkt %hu, inserted before %hu, reorder_q len=%d\n",
+ session->name, ns, PPPOL2TP_SKB_CB(skbp)->ns,
+ skb_queue_len(&session->reorder_q));
+ session->stats.rx_oos_packets++;
+ goto out;
+ }
+ }
+
+ __skb_queue_tail(&session->reorder_q, skb);
+
+out:
+ spin_unlock(&session->reorder_q.lock);
+}
+
+/* Dequeue a single skb.
+ */
+static void pppol2tp_recv_dequeue_skb(struct pppol2tp_session *session, struct sk_buff *skb)
+{
+ struct pppol2tp_tunnel *tunnel = session->tunnel;
+ int length = PPPOL2TP_SKB_CB(skb)->length;
+ struct sock *session_sock = NULL;
+
+ /* We're about to requeue the skb, so unlink it and return resources
+ * to its current owner (a socket receive buffer).
+ */
+ skb_unlink(skb, &session->reorder_q);
+ skb_orphan(skb);
+
+ tunnel->stats.rx_packets++;
+ tunnel->stats.rx_bytes += length;
+ session->stats.rx_packets++;
+ session->stats.rx_bytes += length;
+
+ if (PPPOL2TP_SKB_CB(skb)->has_seq) {
+ /* Bump our Nr */
+ session->nr++;
+ PRINTK(session->debug, PPPOL2TP_MSG_SEQ, KERN_DEBUG,
+ "%s: updated nr to %hu\n", session->name, session->nr);
+ }
+
+ /* If the socket is bound, send it in to PPP's input queue. Otherwise
+ * queue it on the session socket.
+ */
+ session_sock = session->sock;
+ if (session_sock->sk_state & PPPOX_BOUND) {
+ struct pppox_sock *po;
+ PRINTK(session->debug, PPPOL2TP_MSG_DATA, KERN_DEBUG,
+ "%s: recv %d byte data frame, passing to ppp\n",
+ session->name, length);
+
+ /* We need to forget all info related to the L2TP packet
+ * gathered in the skb as we are going to reuse the same
+ * skb for the inner packet.
+ * Namely we need to:
+ * - reset xfrm (IPSec) information as it applies to
+ * the outer L2TP packet and not to the inner one
+ * - release the dst to force a route lookup on the inner
+ * IP packet since skb->dst currently points to the dst
+ * of the UDP tunnel
+ * - reset netfilter information as it doesn't apply
+ * to the inner packet either
+ */
+ secpath_reset(skb);
+ dst_release(skb->dst);
+ skb->dst = NULL;
+ nf_reset(skb);
+
+ po = pppox_sk(session_sock);
+ ppp_input(&po->chan, skb);
+ } else {
+ PRINTK(session->debug, PPPOL2TP_MSG_DATA, KERN_INFO,
+ "%s: socket not bound\n", session->name);
+
+ /* Not bound. Nothing we can do, so discard. */
+ session->stats.rx_errors++;
+ kfree_skb(skb);
+ }
+
+ sock_put(session->sock);
+}
+
+/* Dequeue skbs from the session's reorder_q, subject to packet order.
+ * Skbs that have been in the queue for too long are simply discarded.
+ */
+static void pppol2tp_recv_dequeue(struct pppol2tp_session *session)
+{
+ struct sk_buff *skb;
+ struct sk_buff *tmp;
+
+ /* If the pkt at the head of the queue has the nr that we
+ * expect to send up next, dequeue it and any other
+ * in-sequence packets behind it.
+ */
+ spin_lock(&session->reorder_q.lock);
+ skb_queue_walk_safe(&session->reorder_q, skb, tmp) {
+ if (time_after(jiffies, PPPOL2TP_SKB_CB(skb)->expires)) {
+ session->stats.rx_seq_discards++;
+ session->stats.rx_errors++;
+ PRINTK(session->debug, PPPOL2TP_MSG_SEQ, KERN_DEBUG,
+ "%s: oos pkt %hu len %d discarded (too old), "
+ "waiting for %hu, reorder_q_len=%d\n",
+ session->name, PPPOL2TP_SKB_CB(skb)->ns,
+ PPPOL2TP_SKB_CB(skb)->length, session->nr,
+ skb_queue_len(&session->reorder_q));
+ __skb_unlink(skb, &session->reorder_q);
+ kfree_skb(skb);
+ continue;
+ }
+
+ if (PPPOL2TP_SKB_CB(skb)->has_seq) {
+ if (PPPOL2TP_SKB_CB(skb)->ns != session->nr) {
+ PRINTK(session->debug, PPPOL2TP_MSG_SEQ, KERN_DEBUG,
+ "%s: holding oos pkt %hu len %d, "
+ "waiting for %hu, reorder_q_len=%d\n",
+ session->name, PPPOL2TP_SKB_CB(skb)->ns,
+ PPPOL2TP_SKB_CB(skb)->length, session->nr,
+ skb_queue_len(&session->reorder_q));
+ goto out;
+ }
+ }
+ spin_unlock(&session->reorder_q.lock);
+ pppol2tp_recv_dequeue_skb(session, skb);
+ spin_lock(&session->reorder_q.lock);
+ }
+
+out:
+ spin_unlock(&session->reorder_q.lock);
+}
+
+/* Internal receive frame. Do the real work of receiving an L2TP data frame
+ * here. The skb is not on a list when we get here.
+ * Returns 0 if the packet was a data packet and was successfully passed on.
+ * Returns 1 if the packet was not a good data packet and could not be
+ * forwarded. All such packets are passed up to userspace to deal with.
+ */
+static int pppol2tp_recv_core(struct sock *sock, struct sk_buff *skb)
+{
+ struct pppol2tp_session *session = NULL;
+ struct pppol2tp_tunnel *tunnel;
+ unsigned char *ptr;
+ u16 hdrflags;
+ u16 tunnel_id, session_id;
+ int length;
+ struct udphdr *uh;
+
+ tunnel = pppol2tp_sock_to_tunnel(sock);
+ if (tunnel == NULL)
+ goto error;
+
+ /* Short packet? */
+ if (skb->len < sizeof(struct udphdr)) {
+ PRINTK(tunnel->debug, PPPOL2TP_MSG_DATA, KERN_INFO,
+ "%s: recv short packet (len=%d)\n", tunnel->name, skb->len);
+ goto error;
+ }
+
+ /* Point to L2TP header */
+ ptr = skb->data + sizeof(struct udphdr);
+
+ /* Get L2TP header flags */
+ hdrflags = ntohs(*(__be16*)ptr);
+
+ /* Trace packet contents, if enabled */
+ if (tunnel->debug & PPPOL2TP_MSG_DATA) {
+ printk(KERN_DEBUG "%s: recv: ", tunnel->name);
+
+ for (length = 0; length < 16; length++)
+ printk(" %02X", ptr[length]);
+ printk("\n");
+ }
+
+ /* Get length of L2TP packet */
+ uh = (struct udphdr *) skb_transport_header(skb);
+ length = ntohs(uh->len) - sizeof(struct udphdr);
+
+ /* Too short? */
+ if (length < 12) {
+ PRINTK(tunnel->debug, PPPOL2TP_MSG_DATA, KERN_INFO,
+ "%s: recv short L2TP packet (len=%d)\n", tunnel->name, length);
+ goto error;
+ }
+
+ /* If type is control packet, it is handled by userspace. */
+ if (hdrflags & L2TP_HDRFLAG_T) {
+ PRINTK(tunnel->debug, PPPOL2TP_MSG_DATA, KERN_DEBUG,
+ "%s: recv control packet, len=%d\n", tunnel->name, length);
+ goto error;
+ }
+
+ /* Skip flags */
+ ptr += 2;
+
+ /* If length is present, skip it */
+ if (hdrflags & L2TP_HDRFLAG_L)
+ ptr += 2;
+
+ /* Extract tunnel and session ID */
+ tunnel_id = ntohs(*(__be16 *) ptr);
+ ptr += 2;
+ session_id = ntohs(*(__be16 *) ptr);
+ ptr += 2;
+
+ /* Find the session context */
+ session = pppol2tp_session_find(tunnel, session_id);
+ if (!session) {
+ /* Not found? Pass to userspace to deal with */
+ PRINTK(tunnel->debug, PPPOL2TP_MSG_DATA, KERN_INFO,
+ "%s: no socket found (%hu/%hu). Passing up.\n",
+ tunnel->name, tunnel_id, session_id);
+ goto error;
+ }
+ sock_hold(session->sock);
+
+ /* The ref count on the socket was increased by the above call since
+ * we now hold a pointer to the session. Take care to do sock_put()
+ * when exiting this function from now on...
+ */
+
+ /* Handle the optional sequence numbers. If we are the LAC,
+ * enable/disable sequence numbers under the control of the LNS. If
+ * no sequence numbers present but we were expecting them, discard
+ * frame.
+ */
+ if (hdrflags & L2TP_HDRFLAG_S) {
+ u16 ns, nr;
+ ns = ntohs(*(__be16 *) ptr);
+ ptr += 2;
+ nr = ntohs(*(__be16 *) ptr);
+ ptr += 2;
+
+ /* Received a packet with sequence numbers. If we're the LNS,
+ * check if we sre sending sequence numbers and if not,
+ * configure it so.
+ */
+ if ((!session->lns_mode) && (!session->send_seq)) {
+ PRINTK(session->debug, PPPOL2TP_MSG_SEQ, KERN_INFO,
+ "%s: requested to enable seq numbers by LNS\n",
+ session->name);
+ session->send_seq = -1;
+ }
+
+ /* Store L2TP info in the skb */
+ PPPOL2TP_SKB_CB(skb)->ns = ns;
+ PPPOL2TP_SKB_CB(skb)->nr = nr;
+ PPPOL2TP_SKB_CB(skb)->has_seq = 1;
+
+ PRINTK(session->debug, PPPOL2TP_MSG_SEQ, KERN_DEBUG,
+ "%s: recv data ns=%hu, nr=%hu, session nr=%hu\n",
+ session->name, ns, nr, session->nr);
+ } else {
+ /* No sequence numbers.
+ * If user has configured mandatory sequence numbers, discard.
+ */
+ if (session->recv_seq) {
+ PRINTK(session->debug, PPPOL2TP_MSG_SEQ, KERN_WARNING,
+ "%s: recv data has no seq numbers when required. "
+ "Discarding\n", session->name);
+ session->stats.rx_seq_discards++;
+ session->stats.rx_errors++;
+ goto discard;
+ }
+
+ /* If we're the LAC and we're sending sequence numbers, the
+ * LNS has requested that we no longer send sequence numbers.
+ * If we're the LNS and we're sending sequence numbers, the
+ * LAC is broken. Discard the frame.
+ */
+ if ((!session->lns_mode) && (session->send_seq)) {
+ PRINTK(session->debug, PPPOL2TP_MSG_SEQ, KERN_INFO,
+ "%s: requested to disable seq numbers by LNS\n",
+ session->name);
+ session->send_seq = 0;
+ } else if (session->send_seq) {
+ PRINTK(session->debug, PPPOL2TP_MSG_SEQ, KERN_WARNING,
+ "%s: recv data has no seq numbers when required. "
+ "Discarding\n", session->name);
+ session->stats.rx_seq_discards++;
+ session->stats.rx_errors++;
+ goto discard;
+ }
+
+ /* Store L2TP info in the skb */
+ PPPOL2TP_SKB_CB(skb)->has_seq = 0;
+ }
+
+ /* If offset bit set, skip it. */
+ if (hdrflags & L2TP_HDRFLAG_O)
+ ptr += 2 + ntohs(*(__be16 *) ptr);
+
+ skb_pull(skb, ptr - skb->data);
+
+ /* Skip PPP header, if present. In testing, Microsoft L2TP clients
+ * don't send the PPP header (PPP header compression enabled), but
+ * other clients can include the header. So we cope with both cases
+ * here. The PPP header is always FF03 when using L2TP.
+ *
+ * Note that skb->data[] isn't dereferenced from a u16 ptr here since
+ * the field may be unaligned.
+ */
+ if ((skb->data[0] == 0xff) && (skb->data[1] == 0x03))
+ skb_pull(skb, 2);
+
+ /* Prepare skb for adding to the session's reorder_q. Hold
+ * packets for max reorder_timeout or 1 second if not
+ * reordering.
+ */
+ PPPOL2TP_SKB_CB(skb)->length = length;
+ PPPOL2TP_SKB_CB(skb)->expires = jiffies +
+ (session->reorder_timeout ? session->reorder_timeout : HZ);
+
+ /* Add packet to the session's receive queue. Reordering is done here, if
+ * enabled. Saved L2TP protocol info is stored in skb->sb[].
+ */
+ if (PPPOL2TP_SKB_CB(skb)->has_seq) {
+ if (session->reorder_timeout != 0) {
+ /* Packet reordering enabled. Add skb to session's
+ * reorder queue, in order of ns.
+ */
+ pppol2tp_recv_queue_skb(session, skb);
+ } else {
+ /* Packet reordering disabled. Discard out-of-sequence
+ * packets
+ */
+ if (PPPOL2TP_SKB_CB(skb)->ns != session->nr) {
+ session->stats.rx_seq_discards++;
+ session->stats.rx_errors++;
+ PRINTK(session->debug, PPPOL2TP_MSG_SEQ, KERN_DEBUG,
+ "%s: oos pkt %hu len %d discarded, "
+ "waiting for %hu, reorder_q_len=%d\n",
+ session->name, PPPOL2TP_SKB_CB(skb)->ns,
+ PPPOL2TP_SKB_CB(skb)->length, session->nr,
+ skb_queue_len(&session->reorder_q));
+ goto discard;
+ }
+ skb_queue_tail(&session->reorder_q, skb);
+ }
+ } else {
+ /* No sequence numbers. Add the skb to the tail of the
+ * reorder queue. This ensures that it will be
+ * delivered after all previous sequenced skbs.
+ */
+ skb_queue_tail(&session->reorder_q, skb);
+ }
+
+ /* Try to dequeue as many skbs from reorder_q as we can. */
+ pppol2tp_recv_dequeue(session);
+
+ return 0;
+
+discard:
+ kfree_skb(skb);
+ sock_put(session->sock);
+
+ return 0;
+
+error:
+ return 1;
+}
+
+/* UDP encapsulation receive handler. See net/ipv4/udp.c.
+ * Return codes:
+ * 0 : success.
+ * <0: error
+ * >0: skb should be passed up to userspace as UDP.
+ */
+static int pppol2tp_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
+{
+ struct pppol2tp_tunnel *tunnel;
+
+ tunnel = pppol2tp_sock_to_tunnel(sk);
+ if (tunnel == NULL)
+ goto pass_up;
+
+ PRINTK(tunnel->debug, PPPOL2TP_MSG_DATA, KERN_DEBUG,
+ "%s: received %d bytes\n", tunnel->name, skb->len);
+
+ if (pppol2tp_recv_core(sk, skb))
+ goto pass_up;
+
+ return 0;
+
+pass_up:
+ return 1;
+}
+
+/* Receive message. This is the recvmsg for the PPPoL2TP socket.
+ */
+static int pppol2tp_recvmsg(struct kiocb *iocb, struct socket *sock,
+ struct msghdr *msg, size_t len,
+ int flags)
+{
+ int err;
+ struct sk_buff *skb;
+ struct sock *sk = sock->sk;
+
+ err = -EIO;
+ if (sk->sk_state & PPPOX_BOUND)
+ goto end;
+
+ msg->msg_namelen = 0;
+
+ err = 0;
+ skb = skb_recv_datagram(sk, flags & ~MSG_DONTWAIT,
+ flags & MSG_DONTWAIT, &err);
+ if (skb) {
+ err = memcpy_toiovec(msg->msg_iov, (unsigned char *) skb->data,
+ skb->len);
+ if (err < 0)
+ goto do_skb_free;
+ err = skb->len;
+ }
+do_skb_free:
+ kfree_skb(skb);
+end:
+ return err;
+}
+
+/************************************************************************
+ * Transmit handling
+ ***********************************************************************/
+
+/* Tell how big L2TP headers are for a particular session. This
+ * depends on whether sequence numbers are being used.
+ */
+static inline int pppol2tp_l2tp_header_len(struct pppol2tp_session *session)
+{
+ if (session->send_seq)
+ return PPPOL2TP_L2TP_HDR_SIZE_SEQ;
+
+ return PPPOL2TP_L2TP_HDR_SIZE_NOSEQ;
+}
+
+/* Build an L2TP header for the session into the buffer provided.
+ */
+static void pppol2tp_build_l2tp_header(struct pppol2tp_session *session,
+ void *buf)
+{
+ __be16 *bufp = buf;
+ u16 flags = L2TP_HDR_VER;
+
+ if (session->send_seq)
+ flags |= L2TP_HDRFLAG_S;
+
+ /* Setup L2TP header.
+ * FIXME: Can this ever be unaligned? Is direct dereferencing of
+ * 16-bit header fields safe here for all architectures?
+ */
+ *bufp++ = htons(flags);
+ *bufp++ = htons(session->tunnel_addr.d_tunnel);
+ *bufp++ = htons(session->tunnel_addr.d_session);
+ if (session->send_seq) {
+ *bufp++ = htons(session->ns);
+ *bufp++ = 0;
+ session->ns++;
+ PRINTK(session->debug, PPPOL2TP_MSG_SEQ, KERN_DEBUG,
+ "%s: updated ns to %hu\n", session->name, session->ns);
+ }
+}
+
+/* This is the sendmsg for the PPPoL2TP pppol2tp_session socket. We come here
+ * when a user application does a sendmsg() on the session socket. L2TP and
+ * PPP headers must be inserted into the user's data.
+ */
+static int pppol2tp_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *m,
+ size_t total_len)
+{
+ static const unsigned char ppph[2] = { 0xff, 0x03 };
+ struct sock *sk = sock->sk;
+ struct inet_sock *inet;
+ __wsum csum = 0;
+ struct sk_buff *skb;
+ int error;
+ int hdr_len;
+ struct pppol2tp_session *session;
+ struct pppol2tp_tunnel *tunnel;
+ struct udphdr *uh;
+
+ error = -ENOTCONN;
+ if (sock_flag(sk, SOCK_DEAD) || !(sk->sk_state & PPPOX_CONNECTED))
+ goto error;
+
+ /* Get session and tunnel contexts */
+ error = -EBADF;
+ session = pppol2tp_sock_to_session(sk);
+ if (session == NULL)
+ goto error;
+
+ tunnel = pppol2tp_sock_to_tunnel(session->tunnel_sock);
+ if (tunnel == NULL)
+ goto error;
+
+ /* What header length is configured for this session? */
+ hdr_len = pppol2tp_l2tp_header_len(session);
+
+ /* Allocate a socket buffer */
+ error = -ENOMEM;
+ skb = sock_wmalloc(sk, NET_SKB_PAD + sizeof(struct iphdr) +
+ sizeof(struct udphdr) + hdr_len +
+ sizeof(ppph) + total_len,
+ 0, GFP_KERNEL);
+ if (!skb)
+ goto error;
+
+ /* Reserve space for headers. */
+ skb_reserve(skb, NET_SKB_PAD);
+ skb_reset_network_header(skb);
+ skb_reserve(skb, sizeof(struct iphdr));
+ skb_reset_transport_header(skb);
+
+ /* Build UDP header */
+ inet = inet_sk(session->tunnel_sock);
+ uh = (struct udphdr *) skb->data;
+ uh->source = inet->sport;
+ uh->dest = inet->dport;
+ uh->len = htons(hdr_len + sizeof(ppph) + total_len);
+ uh->check = 0;
+ skb_put(skb, sizeof(struct udphdr));
+
+ /* Build L2TP header */
+ pppol2tp_build_l2tp_header(session, skb->data);
+ skb_put(skb, hdr_len);
+
+ /* Add PPP header */
+ skb->data[0] = ppph[0];
+ skb->data[1] = ppph[1];
+ skb_put(skb, 2);
+
+ /* Copy user data into skb */
+ error = memcpy_fromiovec(skb->data, m->msg_iov, total_len);
+ if (error < 0) {
+ kfree_skb(skb);
+ goto error;
+ }
+ skb_put(skb, total_len);
+
+ /* Calculate UDP checksum if configured to do so */
+ if (session->tunnel_sock->sk_no_check != UDP_CSUM_NOXMIT)
+ csum = udp_csum_outgoing(sk, skb);
+
+ /* Debug */
+ if (session->send_seq)
+ PRINTK(session->debug, PPPOL2TP_MSG_DATA, KERN_DEBUG,
+ "%s: send %Zd bytes, ns=%hu\n", session->name,
+ total_len, session->ns - 1);
+ else
+ PRINTK(session->debug, PPPOL2TP_MSG_DATA, KERN_DEBUG,
+ "%s: send %Zd bytes\n", session->name, total_len);
+
+ if (session->debug & PPPOL2TP_MSG_DATA) {
+ int i;
+ unsigned char *datap = skb->data;
+
+ printk(KERN_DEBUG "%s: xmit:", session->name);
+ for (i = 0; i < total_len; i++) {
+ printk(" %02X", *datap++);
+ if (i == 15) {
+ printk(" ...");
+ break;
+ }
+ }
+ printk("\n");
+ }
+
+ /* Queue the packet to IP for output */
+ error = ip_queue_xmit(skb, 1);
+
+ /* Update stats */
+ if (error >= 0) {
+ tunnel->stats.tx_packets++;
+ tunnel->stats.tx_bytes += skb->len;
+ session->stats.tx_packets++;
+ session->stats.tx_bytes += skb->len;
+ } else {
+ tunnel->stats.tx_errors++;
+ session->stats.tx_errors++;
+ }
+
+error:
+ return error;
+}
+
+/* Transmit function called by generic PPP driver. Sends PPP frame
+ * over PPPoL2TP socket.
+ *
+ * This is almost the same as pppol2tp_sendmsg(), but rather than
+ * being called with a msghdr from userspace, it is called with a skb
+ * from the kernel.
+ *
+ * The supplied skb from ppp doesn't have enough headroom for the
+ * insertion of L2TP, UDP and IP headers so we need to allocate more
+ * headroom in the skb. This will create a cloned skb. But we must be
+ * careful in the error case because the caller will expect to free
+ * the skb it supplied, not our cloned skb. So we take care to always
+ * leave the original skb unfreed if we return an error.
+ */
+static int pppol2tp_xmit(struct ppp_channel *chan, struct sk_buff *skb)
+{
+ static const u8 ppph[2] = { 0xff, 0x03 };
+ struct sock *sk = (struct sock *) chan->private;
+ struct sock *sk_tun;
+ int hdr_len;
+ struct pppol2tp_session *session;
+ struct pppol2tp_tunnel *tunnel;
+ int rc;
+ int headroom;
+ int data_len = skb->len;
+ struct inet_sock *inet;
+ __wsum csum = 0;
+ struct sk_buff *skb2 = NULL;
+ struct udphdr *uh;
+
+ if (sock_flag(sk, SOCK_DEAD) || !(sk->sk_state & PPPOX_CONNECTED))
+ goto abort;
+
+ /* Get session and tunnel contexts from the socket */
+ session = pppol2tp_sock_to_session(sk);
+ if (session == NULL)
+ goto abort;
+
+ sk_tun = session->tunnel_sock;
+ if (sk_tun == NULL)
+ goto abort;
+ tunnel = pppol2tp_sock_to_tunnel(sk_tun);
+ if (tunnel == NULL)
+ goto abort;
+
+ /* What header length is configured for this session? */
+ hdr_len = pppol2tp_l2tp_header_len(session);
+
+ /* Check that there's enough headroom in the skb to insert IP,
+ * UDP and L2TP and PPP headers. If not enough, expand it to
+ * make room. Note that a new skb (or a clone) is
+ * allocated. If we return an error from this point on, make
+ * sure we free the new skb but do not free the original skb
+ * since that is done by the caller for the error case.
+ */
+ headroom = NET_SKB_PAD + sizeof(struct iphdr) +
+ sizeof(struct udphdr) + hdr_len + sizeof(ppph);
+ if (skb_headroom(skb) < headroom) {
+ skb2 = skb_realloc_headroom(skb, headroom);
+ if (skb2 == NULL)
+ goto abort;
+ } else
+ skb2 = skb;
+
+ /* Check that the socket has room */
+ if (atomic_read(&sk_tun->sk_wmem_alloc) < sk_tun->sk_sndbuf)
+ skb_set_owner_w(skb2, sk_tun);
+ else
+ goto discard;
+
+ /* Setup PPP header */
+ skb_push(skb2, sizeof(ppph));
+ skb2->data[0] = ppph[0];
+ skb2->data[1] = ppph[1];
+
+ /* Setup L2TP header */
+ skb_push(skb2, hdr_len);
+ pppol2tp_build_l2tp_header(session, skb2->data);
+
+ /* Setup UDP header */
+ inet = inet_sk(sk_tun);
+ skb_push(skb2, sizeof(struct udphdr));
+ skb_reset_transport_header(skb2);
+ uh = (struct udphdr *) skb2->data;
+ uh->source = inet->sport;
+ uh->dest = inet->dport;
+ uh->len = htons(sizeof(struct udphdr) + hdr_len + sizeof(ppph) + data_len);
+ uh->check = 0;
+
+ /* Calculate UDP checksum if configured to do so */
+ if (sk_tun->sk_no_check != UDP_CSUM_NOXMIT)
+ csum = udp_csum_outgoing(sk_tun, skb2);
+
+ /* Debug */
+ if (session->send_seq)
+ PRINTK(session->debug, PPPOL2TP_MSG_DATA, KERN_DEBUG,
+ "%s: send %d bytes, ns=%hu\n", session->name,
+ data_len, session->ns - 1);
+ else
+ PRINTK(session->debug, PPPOL2TP_MSG_DATA, KERN_DEBUG,
+ "%s: send %d bytes\n", session->name, data_len);
+
+ if (session->debug & PPPOL2TP_MSG_DATA) {
+ int i;
+ unsigned char *datap = skb2->data;
+
+ printk(KERN_DEBUG "%s: xmit:", session->name);
+ for (i = 0; i < data_len; i++) {
+ printk(" %02X", *datap++);
+ if (i == 31) {
+ printk(" ...");
+ break;
+ }
+ }
+ printk("\n");
+ }
+
+ /* Get routing info from the tunnel socket */
+ skb2->dst = sk_dst_get(sk_tun);
+
+ /* Queue the packet to IP for output */
+ rc = ip_queue_xmit(skb2, 1);
+
+ /* Update stats */
+ if (rc >= 0) {
+ tunnel->stats.tx_packets++;
+ tunnel->stats.tx_bytes += skb2->len;
+ session->stats.tx_packets++;
+ session->stats.tx_bytes += skb2->len;
+ } else {
+ tunnel->stats.tx_errors++;
+ session->stats.tx_errors++;
+ }
+
+ /* Free the original skb */
+ kfree_skb(skb);
+
+ return 1;
+
+discard:
+ /* Free the new skb. Caller will free original skb. */
+ if (skb2 != skb)
+ kfree_skb(skb2);
+abort:
+ return 0;
+}
+
+/*****************************************************************************
+ * Session (and tunnel control) socket create/destroy.
+ *****************************************************************************/
+
+/* When the tunnel UDP socket is closed, all the attached sockets need to go
+ * too.
+ */
+static void pppol2tp_tunnel_closeall(struct pppol2tp_tunnel *tunnel)
+{
+ int hash;
+ struct hlist_node *walk;
+ struct hlist_node *tmp;
+ struct pppol2tp_session *session;
+ struct sock *sk;
+
+ if (tunnel == NULL)
+ BUG();
+
+ PRINTK(tunnel->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO,
+ "%s: closing all sessions...\n", tunnel->name);
+
+ write_lock(&tunnel->hlist_lock);
+ for (hash = 0; hash < PPPOL2TP_HASH_SIZE; hash++) {
+again:
+ hlist_for_each_safe(walk, tmp, &tunnel->session_hlist[hash]) {
+ session = hlist_entry(walk, struct pppol2tp_session, hlist);
+
+ sk = session->sock;
+
+ PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO,
+ "%s: closing session\n", session->name);
+
+ hlist_del_init(&session->hlist);
+
+ /* Since we should hold the sock lock while
+ * doing any unbinding, we need to release the
+ * lock we're holding before taking that lock.
+ * Hold a reference to the sock so it doesn't
+ * disappear as we're jumping between locks.
+ */
+ sock_hold(sk);
+ write_unlock(&tunnel->hlist_lock);
+ lock_sock(sk);
+
+ if (sk->sk_state & (PPPOX_CONNECTED | PPPOX_BOUND)) {
+ pppox_unbind_sock(sk);
+ sk->sk_state = PPPOX_DEAD;
+ sk->sk_state_change(sk);
+ }
+
+ /* Purge any queued data */
+ skb_queue_purge(&sk->sk_receive_queue);
+ skb_queue_purge(&sk->sk_write_queue);
+ skb_queue_purge(&session->reorder_q);
+
+ release_sock(sk);
+ sock_put(sk);
+
+ /* Now restart from the beginning of this hash
+ * chain. We always remove a session from the
+ * list so we are guaranteed to make forward
+ * progress.
+ */
+ write_lock(&tunnel->hlist_lock);
+ goto again;
+ }
+ }
+ write_unlock(&tunnel->hlist_lock);
+}
+
+/* Really kill the tunnel.
+ * Come here only when all sessions have been cleared from the tunnel.
+ */
+static void pppol2tp_tunnel_free(struct pppol2tp_tunnel *tunnel)
+{
+ /* Remove from socket list */
+ write_lock(&pppol2tp_tunnel_list_lock);
+ list_del_init(&tunnel->list);
+ write_unlock(&pppol2tp_tunnel_list_lock);
+
+ atomic_dec(&pppol2tp_tunnel_count);
+ kfree(tunnel);
+}
+
+/* Tunnel UDP socket destruct hook.
+ * The tunnel context is deleted only when all session sockets have been
+ * closed.
+ */
+static void pppol2tp_tunnel_destruct(struct sock *sk)
+{
+ struct pppol2tp_tunnel *tunnel;
+
+ tunnel = pppol2tp_sock_to_tunnel(sk);
+ if (tunnel == NULL)
+ goto end;
+
+ PRINTK(tunnel->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO,
+ "%s: closing...\n", tunnel->name);
+
+ /* Close all sessions */
+ pppol2tp_tunnel_closeall(tunnel);
+
+ /* No longer an encapsulation socket. See net/ipv4/udp.c */
+ (udp_sk(sk))->encap_type = 0;
+ (udp_sk(sk))->encap_rcv = NULL;
+
+ /* Remove hooks into tunnel socket */
+ tunnel->sock = NULL;
+ sk->sk_destruct = tunnel->old_sk_destruct;
+ sk->sk_user_data = NULL;
+
+ /* Call original (UDP) socket descructor */
+ if (sk->sk_destruct != NULL)
+ (*sk->sk_destruct)(sk);
+
+ pppol2tp_tunnel_dec_refcount(tunnel);
+
+end:
+ return;
+}
+
+/* Really kill the session socket. (Called from sock_put() if
+ * refcnt == 0.)
+ */
+static void pppol2tp_session_destruct(struct sock *sk)
+{
+ struct pppol2tp_session *session = NULL;
+
+ if (sk->sk_user_data != NULL) {
+ struct pppol2tp_tunnel *tunnel;
+
+ session = pppol2tp_sock_to_session(sk);
+ if (session == NULL)
+ goto out;
+
+ /* Don't use pppol2tp_sock_to_tunnel() here to
+ * get the tunnel context because the tunnel
+ * socket might have already been closed (its
+ * sk->sk_user_data will be NULL) so use the
+ * session's private tunnel ptr instead.
+ */
+ tunnel = session->tunnel;
+ if (tunnel != NULL) {
+ BUG_ON(tunnel->magic != L2TP_TUNNEL_MAGIC);
+
+ /* If session_id is zero, this is a null
+ * session context, which was created for a
+ * socket that is being used only to manage
+ * tunnels.
+ */
+ if (session->tunnel_addr.s_session != 0) {
+ /* Delete the session socket from the
+ * hash
+ */
+ write_lock(&tunnel->hlist_lock);
+ hlist_del_init(&session->hlist);
+ write_unlock(&tunnel->hlist_lock);
+
+ atomic_dec(&pppol2tp_session_count);
+ }
+
+ /* This will delete the tunnel context if this
+ * is the last session on the tunnel.
+ */
+ session->tunnel = NULL;
+ session->tunnel_sock = NULL;
+ pppol2tp_tunnel_dec_refcount(tunnel);
+ }
+ }
+
+ kfree(session);
+out:
+ return;
+}
+
+/* Called when the PPPoX socket (session) is closed.
+ */
+static int pppol2tp_release(struct socket *sock)
+{
+ struct sock *sk = sock->sk;
+ int error;
+
+ if (!sk)
+ return 0;
+
+ error = -EBADF;
+ lock_sock(sk);
+ if (sock_flag(sk, SOCK_DEAD) != 0)
+ goto error;
+
+ pppox_unbind_sock(sk);
+
+ /* Signal the death of the socket. */
+ sk->sk_state = PPPOX_DEAD;
+ sock_orphan(sk);
+ sock->sk = NULL;
+
+ /* Purge any queued data */
+ skb_queue_purge(&sk->sk_receive_queue);
+ skb_queue_purge(&sk->sk_write_queue);
+
+ release_sock(sk);
+
+ /* This will delete the session context via
+ * pppol2tp_session_destruct() if the socket's refcnt drops to
+ * zero.
+ */
+ sock_put(sk);
+
+ return 0;
+
+error:
+ release_sock(sk);
+ return error;
+}
+
+/* Internal function to prepare a tunnel (UDP) socket to have PPPoX
+ * sockets attached to it.
+ */
+static struct sock *pppol2tp_prepare_tunnel_socket(int fd, u16 tunnel_id,
+ int *error)
+{
+ int err;
+ struct socket *sock = NULL;
+ struct sock *sk;
+ struct pppol2tp_tunnel *tunnel;
+ struct sock *ret = NULL;
+
+ /* Get the tunnel UDP socket from the fd, which was opened by
+ * the userspace L2TP daemon.
+ */
+ err = -EBADF;
+ sock = sockfd_lookup(fd, &err);
+ if (!sock) {
+ PRINTK(-1, PPPOL2TP_MSG_CONTROL, KERN_ERR,
+ "tunl %hu: sockfd_lookup(fd=%d) returned %d\n",
+ tunnel_id, fd, err);
+ goto err;
+ }
+
+ /* Quick sanity checks */
+ err = -ESOCKTNOSUPPORT;
+ if (sock->type != SOCK_DGRAM) {
+ PRINTK(-1, PPPOL2TP_MSG_CONTROL, KERN_ERR,
+ "tunl %hu: fd %d wrong type, got %d, expected %d\n",
+ tunnel_id, fd, sock->type, SOCK_DGRAM);
+ goto err;
+ }
+ err = -EAFNOSUPPORT;
+ if (sock->ops->family != AF_INET) {
+ PRINTK(-1, PPPOL2TP_MSG_CONTROL, KERN_ERR,
+ "tunl %hu: fd %d wrong family, got %d, expected %d\n",
+ tunnel_id, fd, sock->ops->family, AF_INET);
+ goto err;
+ }
+
+ err = -ENOTCONN;
+ sk = sock->sk;
+
+ /* Check if this socket has already been prepped */
+ tunnel = (struct pppol2tp_tunnel *)sk->sk_user_data;
+ if (tunnel != NULL) {
+ /* User-data field already set */
+ err = -EBUSY;
+ BUG_ON(tunnel->magic != L2TP_TUNNEL_MAGIC);
+
+ /* This socket has already been prepped */
+ ret = tunnel->sock;
+ goto out;
+ }
+
+ /* This socket is available and needs prepping. Create a new tunnel
+ * context and init it.
+ */
+ sk->sk_user_data = tunnel = kzalloc(sizeof(struct pppol2tp_tunnel), GFP_KERNEL);
+ if (sk->sk_user_data == NULL) {
+ err = -ENOMEM;
+ goto err;
+ }
+
+ tunnel->magic = L2TP_TUNNEL_MAGIC;
+ sprintf(&tunnel->name[0], "tunl %hu", tunnel_id);
+
+ tunnel->stats.tunnel_id = tunnel_id;
+ tunnel->debug = PPPOL2TP_DEFAULT_DEBUG_FLAGS;
+
+ /* Hook on the tunnel socket destructor so that we can cleanup
+ * if the tunnel socket goes away.
+ */
+ tunnel->old_sk_destruct = sk->sk_destruct;
+ sk->sk_destruct = &pppol2tp_tunnel_destruct;
+
+ tunnel->sock = sk;
+ sk->sk_allocation = GFP_ATOMIC;
+
+ /* Misc init */
+ rwlock_init(&tunnel->hlist_lock);
+
+ /* Add tunnel to our list */
+ INIT_LIST_HEAD(&tunnel->list);
+ write_lock(&pppol2tp_tunnel_list_lock);
+ list_add(&tunnel->list, &pppol2tp_tunnel_list);
+ write_unlock(&pppol2tp_tunnel_list_lock);
+ atomic_inc(&pppol2tp_tunnel_count);
+
+ /* Bump the reference count. The tunnel context is deleted
+ * only when this drops to zero.
+ */
+ pppol2tp_tunnel_inc_refcount(tunnel);
+
+ /* Mark socket as an encapsulation socket. See net/ipv4/udp.c */
+ (udp_sk(sk))->encap_type = UDP_ENCAP_L2TPINUDP;
+ (udp_sk(sk))->encap_rcv = pppol2tp_udp_encap_recv;
+
+ ret = tunnel->sock;
+
+ *error = 0;
+out:
+ if (sock)
+ sockfd_put(sock);
+
+ return ret;
+
+err:
+ *error = err;
+ goto out;
+}
+
+static struct proto pppol2tp_sk_proto = {
+ .name = "PPPOL2TP",
+ .owner = THIS_MODULE,
+ .obj_size = sizeof(struct pppox_sock),
+};
+
+/* socket() handler. Initialize a new struct sock.
+ */
+static int pppol2tp_create(struct socket *sock)
+{
+ int error = -ENOMEM;
+ struct sock *sk;
+
+ sk = sk_alloc(PF_PPPOX, GFP_KERNEL, &pppol2tp_sk_proto, 1);
+ if (!sk)
+ goto out;
+
+ sock_init_data(sock, sk);
+
+ sock->state = SS_UNCONNECTED;
+ sock->ops = &pppol2tp_ops;
+
+ sk->sk_backlog_rcv = pppol2tp_recv_core;
+ sk->sk_protocol = PX_PROTO_OL2TP;
+ sk->sk_family = PF_PPPOX;
+ sk->sk_state = PPPOX_NONE;
+ sk->sk_type = SOCK_STREAM;
+ sk->sk_destruct = pppol2tp_session_destruct;
+
+ error = 0;
+
+out:
+ return error;
+}
+
+/* connect() handler. Attach a PPPoX socket to a tunnel UDP socket
+ */
+static int pppol2tp_connect(struct socket *sock, struct sockaddr *uservaddr,
+ int sockaddr_len, int flags)
+{
+ struct sock *sk = sock->sk;
+ struct sockaddr_pppol2tp *sp = (struct sockaddr_pppol2tp *) uservaddr;
+ struct pppox_sock *po = pppox_sk(sk);
+ struct sock *tunnel_sock = NULL;
+ struct pppol2tp_session *session = NULL;
+ struct pppol2tp_tunnel *tunnel;
+ struct dst_entry *dst;
+ int error = 0;
+
+ lock_sock(sk);
+
+ error = -EINVAL;
+ if (sp->sa_protocol != PX_PROTO_OL2TP)
+ goto end;
+
+ /* Check for already bound sockets */
+ error = -EBUSY;
+ if (sk->sk_state & PPPOX_CONNECTED)
+ goto end;
+
+ /* We don't supporting rebinding anyway */
+ error = -EALREADY;
+ if (sk->sk_user_data)
+ goto end; /* socket is already attached */
+
+ /* Don't bind if s_tunnel is 0 */
+ error = -EINVAL;
+ if (sp->pppol2tp.s_tunnel == 0)
+ goto end;
+
+ /* Special case: prepare tunnel socket if s_session and
+ * d_session is 0. Otherwise look up tunnel using supplied
+ * tunnel id.
+ */
+ if ((sp->pppol2tp.s_session == 0) && (sp->pppol2tp.d_session == 0)) {
+ tunnel_sock = pppol2tp_prepare_tunnel_socket(sp->pppol2tp.fd,
+ sp->pppol2tp.s_tunnel,
+ &error);
+ if (tunnel_sock == NULL)
+ goto end;
+
+ tunnel = tunnel_sock->sk_user_data;
+ } else {
+ tunnel = pppol2tp_tunnel_find(sp->pppol2tp.s_tunnel);
+
+ /* Error if we can't find the tunnel */
+ error = -ENOENT;
+ if (tunnel == NULL)
+ goto end;
+
+ tunnel_sock = tunnel->sock;
+ }
+
+ /* Check that this session doesn't already exist */
+ error = -EEXIST;
+ session = pppol2tp_session_find(tunnel, sp->pppol2tp.s_session);
+ if (session != NULL)
+ goto end;
+
+ /* Allocate and initialize a new session context. */
+ session = kzalloc(sizeof(struct pppol2tp_session), GFP_KERNEL);
+ if (session == NULL) {
+ error = -ENOMEM;
+ goto end;
+ }
+
+ skb_queue_head_init(&session->reorder_q);
+
+ session->magic = L2TP_SESSION_MAGIC;
+ session->owner = current->pid;
+ session->sock = sk;
+ session->tunnel = tunnel;
+ session->tunnel_sock = tunnel_sock;
+ session->tunnel_addr = sp->pppol2tp;
+ sprintf(&session->name[0], "sess %hu/%hu",
+ session->tunnel_addr.s_tunnel,
+ session->tunnel_addr.s_session);
+
+ session->stats.tunnel_id = session->tunnel_addr.s_tunnel;
+ session->stats.session_id = session->tunnel_addr.s_session;
+
+ INIT_HLIST_NODE(&session->hlist);
+
+ /* Inherit debug options from tunnel */
+ session->debug = tunnel->debug;
+
+ /* Default MTU must allow space for UDP/L2TP/PPP
+ * headers.
+ */
+ session->mtu = session->mru = 1500 - PPPOL2TP_HEADER_OVERHEAD;
+
+ /* If PMTU discovery was enabled, use the MTU that was discovered */
+ dst = sk_dst_get(sk);
+ if (dst != NULL) {
+ u32 pmtu = dst_mtu(__sk_dst_get(sk));
+ if (pmtu != 0)
+ session->mtu = session->mru = pmtu -
+ PPPOL2TP_HEADER_OVERHEAD;
+ dst_release(dst);
+ }
+
+ /* Special case: if source & dest session_id == 0x0000, this socket is
+ * being created to manage the tunnel. Don't add the session to the
+ * session hash list, just set up the internal context for use by
+ * ioctl() and sockopt() handlers.
+ */
+ if ((session->tunnel_addr.s_session == 0) &&
+ (session->tunnel_addr.d_session == 0)) {
+ error = 0;
+ sk->sk_user_data = session;
+ goto out_no_ppp;
+ }
+
+ /* Get tunnel context from the tunnel socket */
+ tunnel = pppol2tp_sock_to_tunnel(tunnel_sock);
+ if (tunnel == NULL) {
+ error = -EBADF;
+ goto end;
+ }
+
+ /* Right now, because we don't have a way to push the incoming skb's
+ * straight through the UDP layer, the only header we need to worry
+ * about is the L2TP header. This size is different depending on
+ * whether sequence numbers are enabled for the data channel.
+ */
+ po->chan.hdrlen = PPPOL2TP_L2TP_HDR_SIZE_NOSEQ;
+
+ po->chan.private = sk;
+ po->chan.ops = &pppol2tp_chan_ops;
+ po->chan.mtu = session->mtu;
+
+ error = ppp_register_channel(&po->chan);
+ if (error)
+ goto end;
+
+ /* This is how we get the session context from the socket. */
+ sk->sk_user_data = session;
+
+ /* Add session to the tunnel's hash list */
+ write_lock(&tunnel->hlist_lock);
+ hlist_add_head(&session->hlist,
+ pppol2tp_session_id_hash(tunnel,
+ session->tunnel_addr.s_session));
+ write_unlock(&tunnel->hlist_lock);
+
+ atomic_inc(&pppol2tp_session_count);
+
+out_no_ppp:
+ pppol2tp_tunnel_inc_refcount(tunnel);
+ sk->sk_state = PPPOX_CONNECTED;
+ PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO,
+ "%s: created\n", session->name);
+
+end:
+ release_sock(sk);
+
+ if (error != 0)
+ PRINTK(session ? session->debug : -1, PPPOL2TP_MSG_CONTROL, KERN_WARNING,
+ "%s: connect failed: %d\n", session->name, error);
+
+ return error;
+}
+
+/* getname() support.
+ */
+static int pppol2tp_getname(struct socket *sock, struct sockaddr *uaddr,
+ int *usockaddr_len, int peer)
+{
+ int len = sizeof(struct sockaddr_pppol2tp);
+ struct sockaddr_pppol2tp sp;
+ int error = 0;
+ struct pppol2tp_session *session;
+
+ error = -ENOTCONN;
+ if (sock->sk->sk_state != PPPOX_CONNECTED)
+ goto end;
+
+ session = pppol2tp_sock_to_session(sock->sk);
+ if (session == NULL) {
+ error = -EBADF;
+ goto end;
+ }
+
+ sp.sa_family = AF_PPPOX;
+ sp.sa_protocol = PX_PROTO_OL2TP;
+ memcpy(&sp.pppol2tp, &session->tunnel_addr,
+ sizeof(struct pppol2tp_addr));
+
+ memcpy(uaddr, &sp, len);
+
+ *usockaddr_len = len;
+
+ error = 0;
+
+end:
+ return error;
+}
+
+/****************************************************************************
+ * ioctl() handlers.
+ *
+ * The PPPoX socket is created for L2TP sessions: tunnels have their own UDP
+ * sockets. However, in order to control kernel tunnel features, we allow
+ * userspace to create a special "tunnel" PPPoX socket which is used for
+ * control only. Tunnel PPPoX sockets have session_id == 0 and simply allow
+ * the user application to issue L2TP setsockopt(), getsockopt() and ioctl()
+ * calls.
+ ****************************************************************************/
+
+/* Session ioctl helper.
+ */
+static int pppol2tp_session_ioctl(struct pppol2tp_session *session,
+ unsigned int cmd, unsigned long arg)
+{
+ struct ifreq ifr;
+ int err = 0;
+ struct sock *sk = session->sock;
+ int val = (int) arg;
+
+ PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_DEBUG,
+ "%s: pppol2tp_session_ioctl(cmd=%#x, arg=%#lx)\n",
+ session->name, cmd, arg);
+
+ sock_hold(sk);
+
+ switch (cmd) {
+ case SIOCGIFMTU:
+ err = -ENXIO;
+ if (!(sk->sk_state & PPPOX_CONNECTED))
+ break;
+
+ err = -EFAULT;
+ if (copy_from_user(&ifr, (void __user *) arg, sizeof(struct ifreq)))
+ break;
+ ifr.ifr_mtu = session->mtu;
+ if (copy_to_user((void __user *) arg, &ifr, sizeof(struct ifreq)))
+ break;
+
+ PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO,
+ "%s: get mtu=%d\n", session->name, session->mtu);
+ err = 0;
+ break;
+
+ case SIOCSIFMTU:
+ err = -ENXIO;
+ if (!(sk->sk_state & PPPOX_CONNECTED))
+ break;
+
+ err = -EFAULT;
+ if (copy_from_user(&ifr, (void __user *) arg, sizeof(struct ifreq)))
+ break;
+
+ session->mtu = ifr.ifr_mtu;
+
+ PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO,
+ "%s: set mtu=%d\n", session->name, session->mtu);
+ err = 0;
+ break;
+
+ case PPPIOCGMRU:
+ err = -ENXIO;
+ if (!(sk->sk_state & PPPOX_CONNECTED))
+ break;
+
+ err = -EFAULT;
+ if (put_user(session->mru, (int __user *) arg))
+ break;
+
+ PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO,
+ "%s: get mru=%d\n", session->name, session->mru);
+ err = 0;
+ break;
+
+ case PPPIOCSMRU:
+ err = -ENXIO;
+ if (!(sk->sk_state & PPPOX_CONNECTED))
+ break;
+
+ err = -EFAULT;
+ if (get_user(val,(int __user *) arg))
+ break;
+
+ session->mru = val;
+ PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO,
+ "%s: set mru=%d\n", session->name, session->mru);
+ err = 0;
+ break;
+
+ case PPPIOCGFLAGS:
+ err = -EFAULT;
+ if (put_user(session->flags, (int __user *) arg))
+ break;
+
+ PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO,
+ "%s: get flags=%d\n", session->name, session->flags);
+ err = 0;
+ break;
+
+ case PPPIOCSFLAGS:
+ err = -EFAULT;
+ if (get_user(val, (int __user *) arg))
+ break;
+ session->flags = val;
+ PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO,
+ "%s: set flags=%d\n", session->name, session->flags);
+ err = 0;
+ break;
+
+ case PPPIOCGL2TPSTATS:
+ err = -ENXIO;
+ if (!(sk->sk_state & PPPOX_CONNECTED))
+ break;
+
+ if (copy_to_user((void __user *) arg, &session->stats,
+ sizeof(session->stats)))
+ break;
+ PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO,
+ "%s: get L2TP stats\n", session->name);
+ err = 0;
+ break;
+
+ default:
+ err = -ENOSYS;
+ break;
+ }
+
+ sock_put(sk);
+
+ return err;
+}
+
+/* Tunnel ioctl helper.
+ *
+ * Note the special handling for PPPIOCGL2TPSTATS below. If the ioctl data
+ * specifies a session_id, the session ioctl handler is called. This allows an
+ * application to retrieve session stats via a tunnel socket.
+ */
+static int pppol2tp_tunnel_ioctl(struct pppol2tp_tunnel *tunnel,
+ unsigned int cmd, unsigned long arg)
+{
+ int err = 0;
+ struct sock *sk = tunnel->sock;
+ struct pppol2tp_ioc_stats stats_req;
+
+ PRINTK(tunnel->debug, PPPOL2TP_MSG_CONTROL, KERN_DEBUG,
+ "%s: pppol2tp_tunnel_ioctl(cmd=%#x, arg=%#lx)\n", tunnel->name,
+ cmd, arg);
+
+ sock_hold(sk);
+
+ switch (cmd) {
+ case PPPIOCGL2TPSTATS:
+ err = -ENXIO;
+ if (!(sk->sk_state & PPPOX_CONNECTED))
+ break;
+
+ if (copy_from_user(&stats_req, (void __user *) arg,
+ sizeof(stats_req))) {
+ err = -EFAULT;
+ break;
+ }
+ if (stats_req.session_id != 0) {
+ /* resend to session ioctl handler */
+ struct pppol2tp_session *session =
+ pppol2tp_session_find(tunnel, stats_req.session_id);
+ if (session != NULL)
+ err = pppol2tp_session_ioctl(session, cmd, arg);
+ else
+ err = -EBADR;
+ break;
+ }
+#ifdef CONFIG_XFRM
+ tunnel->stats.using_ipsec = (sk->sk_policy[0] || sk->sk_policy[1]) ? 1 : 0;
+#endif
+ if (copy_to_user((void __user *) arg, &tunnel->stats,
+ sizeof(tunnel->stats))) {
+ err = -EFAULT;
+ break;
+ }
+ PRINTK(tunnel->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO,
+ "%s: get L2TP stats\n", tunnel->name);
+ err = 0;
+ break;
+
+ default:
+ err = -ENOSYS;
+ break;
+ }
+
+ sock_put(sk);
+
+ return err;
+}
+
+/* Main ioctl() handler.
+ * Dispatch to tunnel or session helpers depending on the socket.
+ */
+static int pppol2tp_ioctl(struct socket *sock, unsigned int cmd,
+ unsigned long arg)
+{
+ struct sock *sk = sock->sk;
+ struct pppol2tp_session *session;
+ struct pppol2tp_tunnel *tunnel;
+ int err;
+
+ if (!sk)
+ return 0;
+
+ err = -EBADF;
+ if (sock_flag(sk, SOCK_DEAD) != 0)
+ goto end;
+
+ err = -ENOTCONN;
+ if ((sk->sk_user_data == NULL) ||
+ (!(sk->sk_state & (PPPOX_CONNECTED | PPPOX_BOUND))))
+ goto end;
+
+ /* Get session context from the socket */
+ err = -EBADF;
+ session = pppol2tp_sock_to_session(sk);
+ if (session == NULL)
+ goto end;
+
+ /* Special case: if session's session_id is zero, treat ioctl as a
+ * tunnel ioctl
+ */
+ if ((session->tunnel_addr.s_session == 0) &&
+ (session->tunnel_addr.d_session == 0)) {
+ err = -EBADF;
+ tunnel = pppol2tp_sock_to_tunnel(session->tunnel_sock);
+ if (tunnel == NULL)
+ goto end;
+
+ err = pppol2tp_tunnel_ioctl(tunnel, cmd, arg);
+ goto end;
+ }
+
+ err = pppol2tp_session_ioctl(session, cmd, arg);
+
+end:
+ return err;
+}
+
+/*****************************************************************************
+ * setsockopt() / getsockopt() support.
+ *
+ * The PPPoX socket is created for L2TP sessions: tunnels have their own UDP
+ * sockets. In order to control kernel tunnel features, we allow userspace to
+ * create a special "tunnel" PPPoX socket which is used for control only.
+ * Tunnel PPPoX sockets have session_id == 0 and simply allow the user
+ * application to issue L2TP setsockopt(), getsockopt() and ioctl() calls.
+ *****************************************************************************/
+
+/* Tunnel setsockopt() helper.
+ */
+static int pppol2tp_tunnel_setsockopt(struct sock *sk,
+ struct pppol2tp_tunnel *tunnel,
+ int optname, int val)
+{
+ int err = 0;
+
+ switch (optname) {
+ case PPPOL2TP_SO_DEBUG:
+ tunnel->debug = val;
+ PRINTK(tunnel->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO,
+ "%s: set debug=%x\n", tunnel->name, tunnel->debug);
+ break;
+
+ default:
+ err = -ENOPROTOOPT;
+ break;
+ }
+
+ return err;
+}
+
+/* Session setsockopt helper.
+ */
+static int pppol2tp_session_setsockopt(struct sock *sk,
+ struct pppol2tp_session *session,
+ int optname, int val)
+{
+ int err = 0;
+
+ switch (optname) {
+ case PPPOL2TP_SO_RECVSEQ:
+ if ((val != 0) && (val != 1)) {
+ err = -EINVAL;
+ break;
+ }
+ session->recv_seq = val ? -1 : 0;
+ PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO,
+ "%s: set recv_seq=%d\n", session->name,
+ session->recv_seq);
+ break;
+
+ case PPPOL2TP_SO_SENDSEQ:
+ if ((val != 0) && (val != 1)) {
+ err = -EINVAL;
+ break;
+ }
+ session->send_seq = val ? -1 : 0;
+ {
+ struct sock *ssk = session->sock;
+ struct pppox_sock *po = pppox_sk(ssk);
+ po->chan.hdrlen = val ? PPPOL2TP_L2TP_HDR_SIZE_SEQ :
+ PPPOL2TP_L2TP_HDR_SIZE_NOSEQ;
+ }
+ PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO,
+ "%s: set send_seq=%d\n", session->name, session->send_seq);
+ break;
+
+ case PPPOL2TP_SO_LNSMODE:
+ if ((val != 0) && (val != 1)) {
+ err = -EINVAL;
+ break;
+ }
+ session->lns_mode = val ? -1 : 0;
+ PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO,
+ "%s: set lns_mode=%d\n", session->name,
+ session->lns_mode);
+ break;
+
+ case PPPOL2TP_SO_DEBUG:
+ session->debug = val;
+ PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO,
+ "%s: set debug=%x\n", session->name, session->debug);
+ break;
+
+ case PPPOL2TP_SO_REORDERTO:
+ session->reorder_timeout = msecs_to_jiffies(val);
+ PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO,
+ "%s: set reorder_timeout=%d\n", session->name,
+ session->reorder_timeout);
+ break;
+
+ default:
+ err = -ENOPROTOOPT;
+ break;
+ }
+
+ return err;
+}
+
+/* Main setsockopt() entry point.
+ * Does API checks, then calls either the tunnel or session setsockopt
+ * handler, according to whether the PPPoL2TP socket is a for a regular
+ * session or the special tunnel type.
+ */
+static int pppol2tp_setsockopt(struct socket *sock, int level, int optname,
+ char __user *optval, int optlen)
+{
+ struct sock *sk = sock->sk;
+ struct pppol2tp_session *session = sk->sk_user_data;
+ struct pppol2tp_tunnel *tunnel;
+ int val;
+ int err;
+
+ if (level != SOL_PPPOL2TP)
+ return udp_prot.setsockopt(sk, level, optname, optval, optlen);
+
+ if (optlen < sizeof(int))
+ return -EINVAL;
+
+ if (get_user(val, (int __user *)optval))
+ return -EFAULT;
+
+ err = -ENOTCONN;
+ if (sk->sk_user_data == NULL)
+ goto end;
+
+ /* Get session context from the socket */
+ err = -EBADF;
+ session = pppol2tp_sock_to_session(sk);
+ if (session == NULL)
+ goto end;
+
+ /* Special case: if session_id == 0x0000, treat as operation on tunnel
+ */
+ if ((session->tunnel_addr.s_session == 0) &&
+ (session->tunnel_addr.d_session == 0)) {
+ err = -EBADF;
+ tunnel = pppol2tp_sock_to_tunnel(session->tunnel_sock);
+ if (tunnel == NULL)
+ goto end;
+
+ err = pppol2tp_tunnel_setsockopt(sk, tunnel, optname, val);
+ } else
+ err = pppol2tp_session_setsockopt(sk, session, optname, val);
+
+ err = 0;
+
+end:
+ return err;
+}
+
+/* Tunnel getsockopt helper. Called with sock locked.
+ */
+static int pppol2tp_tunnel_getsockopt(struct sock *sk,
+ struct pppol2tp_tunnel *tunnel,
+ int optname, int __user *val)
+{
+ int err = 0;
+
+ switch (optname) {
+ case PPPOL2TP_SO_DEBUG:
+ *val = tunnel->debug;
+ PRINTK(tunnel->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO,
+ "%s: get debug=%x\n", tunnel->name, tunnel->debug);
+ break;
+
+ default:
+ err = -ENOPROTOOPT;
+ break;
+ }
+
+ return err;
+}
+
+/* Session getsockopt helper. Called with sock locked.
+ */
+static int pppol2tp_session_getsockopt(struct sock *sk,
+ struct pppol2tp_session *session,
+ int optname, int __user *val)
+{
+ int err = 0;
+
+ switch (optname) {
+ case PPPOL2TP_SO_RECVSEQ:
+ *val = session->recv_seq;
+ PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO,
+ "%s: get recv_seq=%d\n", session->name, *val);
+ break;
+
+ case PPPOL2TP_SO_SENDSEQ:
+ *val = session->send_seq;
+ PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO,
+ "%s: get send_seq=%d\n", session->name, *val);
+ break;
+
+ case PPPOL2TP_SO_LNSMODE:
+ *val = session->lns_mode;
+ PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO,
+ "%s: get lns_mode=%d\n", session->name, *val);
+ break;
+
+ case PPPOL2TP_SO_DEBUG:
+ *val = session->debug;
+ PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO,
+ "%s: get debug=%d\n", session->name, *val);
+ break;
+
+ case PPPOL2TP_SO_REORDERTO:
+ *val = (int) jiffies_to_msecs(session->reorder_timeout);
+ PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO,
+ "%s: get reorder_timeout=%d\n", session->name, *val);
+ break;
+
+ default:
+ err = -ENOPROTOOPT;
+ }
+
+ return err;
+}
+
+/* Main getsockopt() entry point.
+ * Does API checks, then calls either the tunnel or session getsockopt
+ * handler, according to whether the PPPoX socket is a for a regular session
+ * or the special tunnel type.
+ */
+static int pppol2tp_getsockopt(struct socket *sock, int level,
+ int optname, char __user *optval, int __user *optlen)
+{
+ struct sock *sk = sock->sk;
+ struct pppol2tp_session *session = sk->sk_user_data;
+ struct pppol2tp_tunnel *tunnel;
+ int val, len;
+ int err;
+
+ if (level != SOL_PPPOL2TP)
+ return udp_prot.getsockopt(sk, level, optname, optval, optlen);
+
+ if (get_user(len, (int __user *) optlen))
+ return -EFAULT;
+
+ len = min_t(unsigned int, len, sizeof(int));
+
+ if (len < 0)
+ return -EINVAL;
+
+ err = -ENOTCONN;
+ if (sk->sk_user_data == NULL)
+ goto end;
+
+ /* Get the session context */
+ err = -EBADF;
+ session = pppol2tp_sock_to_session(sk);
+ if (session == NULL)
+ goto end;
+
+ /* Special case: if session_id == 0x0000, treat as operation on tunnel */
+ if ((session->tunnel_addr.s_session == 0) &&
+ (session->tunnel_addr.d_session == 0)) {
+ err = -EBADF;
+ tunnel = pppol2tp_sock_to_tunnel(session->tunnel_sock);
+ if (tunnel == NULL)
+ goto end;
+
+ err = pppol2tp_tunnel_getsockopt(sk, tunnel, optname, &val);
+ } else
+ err = pppol2tp_session_getsockopt(sk, session, optname, &val);
+
+ err = -EFAULT;
+ if (put_user(len, (int __user *) optlen))
+ goto end;
+
+ if (copy_to_user((void __user *) optval, &val, len))
+ goto end;
+
+ err = 0;
+end:
+ return err;
+}
+
+/*****************************************************************************
+ * /proc filesystem for debug
+ *****************************************************************************/
+
+#ifdef CONFIG_PROC_FS
+
+#include <linux/seq_file.h>
+
+struct pppol2tp_seq_data {
+ struct pppol2tp_tunnel *tunnel; /* current tunnel */
+ struct pppol2tp_session *session; /* NULL means get first session in tunnel */
+};
+
+static struct pppol2tp_session *next_session(struct pppol2tp_tunnel *tunnel, struct pppol2tp_session *curr)
+{
+ struct pppol2tp_session *session = NULL;
+ struct hlist_node *walk;
+ int found = 0;
+ int next = 0;
+ int i;
+
+ read_lock(&tunnel->hlist_lock);
+ for (i = 0; i < PPPOL2TP_HASH_SIZE; i++) {
+ hlist_for_each_entry(session, walk, &tunnel->session_hlist[i], hlist) {
+ if (curr == NULL) {
+ found = 1;
+ goto out;
+ }
+ if (session == curr) {
+ next = 1;
+ continue;
+ }
+ if (next) {
+ found = 1;
+ goto out;
+ }
+ }
+ }
+out:
+ read_unlock(&tunnel->hlist_lock);
+ if (!found)
+ session = NULL;
+
+ return session;
+}
+
+static struct pppol2tp_tunnel *next_tunnel(struct pppol2tp_tunnel *curr)
+{
+ struct pppol2tp_tunnel *tunnel = NULL;
+
+ read_lock(&pppol2tp_tunnel_list_lock);
+ if (list_is_last(&curr->list, &pppol2tp_tunnel_list)) {
+ goto out;
+ }
+ tunnel = list_entry(curr->list.next, struct pppol2tp_tunnel, list);
+out:
+ read_unlock(&pppol2tp_tunnel_list_lock);
+
+ return tunnel;
+}
+
+static void *pppol2tp_seq_start(struct seq_file *m, loff_t *offs)
+{
+ struct pppol2tp_seq_data *pd = SEQ_START_TOKEN;
+ loff_t pos = *offs;
+
+ if (!pos)
+ goto out;
+
+ BUG_ON(m->private == NULL);
+ pd = m->private;
+
+ if (pd->tunnel == NULL) {
+ if (!list_empty(&pppol2tp_tunnel_list))
+ pd->tunnel = list_entry(pppol2tp_tunnel_list.next, struct pppol2tp_tunnel, list);
+ } else {
+ pd->session = next_session(pd->tunnel, pd->session);
+ if (pd->session == NULL) {
+ pd->tunnel = next_tunnel(pd->tunnel);
+ }
+ }
+
+ /* NULL tunnel and session indicates end of list */
+ if ((pd->tunnel == NULL) && (pd->session == NULL))
+ pd = NULL;
+
+out:
+ return pd;
+}
+
+static void *pppol2tp_seq_next(struct seq_file *m, void *v, loff_t *pos)
+{
+ (*pos)++;
+ return NULL;
+}
+
+static void pppol2tp_seq_stop(struct seq_file *p, void *v)
+{
+ /* nothing to do */
+}
+
+static void pppol2tp_seq_tunnel_show(struct seq_file *m, void *v)
+{
+ struct pppol2tp_tunnel *tunnel = v;
+
+ seq_printf(m, "\nTUNNEL '%s', %c %d\n",
+ tunnel->name,
+ (tunnel == tunnel->sock->sk_user_data) ? 'Y':'N',
+ atomic_read(&tunnel->ref_count) - 1);
+ seq_printf(m, " %08x %llu/%llu/%llu %llu/%llu/%llu\n",
+ tunnel->debug,
+ tunnel->stats.tx_packets, tunnel->stats.tx_bytes,
+ tunnel->stats.tx_errors,
+ tunnel->stats.rx_packets, tunnel->stats.rx_bytes,
+ tunnel->stats.rx_errors);
+}
+
+static void pppol2tp_seq_session_show(struct seq_file *m, void *v)
+{
+ struct pppol2tp_session *session = v;
+
+ seq_printf(m, " SESSION '%s' %08X/%d %04X/%04X -> "
+ "%04X/%04X %d %c\n",
+ session->name,
+ ntohl(session->tunnel_addr.addr.sin_addr.s_addr),
+ ntohs(session->tunnel_addr.addr.sin_port),
+ session->tunnel_addr.s_tunnel,
+ session->tunnel_addr.s_session,
+ session->tunnel_addr.d_tunnel,
+ session->tunnel_addr.d_session,
+ session->sock->sk_state,
+ (session == session->sock->sk_user_data) ?
+ 'Y' : 'N');
+ seq_printf(m, " %d/%d/%c/%c/%s %08x %u\n",
+ session->mtu, session->mru,
+ session->recv_seq ? 'R' : '-',
+ session->send_seq ? 'S' : '-',
+ session->lns_mode ? "LNS" : "LAC",
+ session->debug,
+ jiffies_to_msecs(session->reorder_timeout));
+ seq_printf(m, " %hu/%hu %llu/%llu/%llu %llu/%llu/%llu\n",
+ session->nr, session->ns,
+ session->stats.tx_packets,
+ session->stats.tx_bytes,
+ session->stats.tx_errors,
+ session->stats.rx_packets,
+ session->stats.rx_bytes,
+ session->stats.rx_errors);
+}
+
+static int pppol2tp_seq_show(struct seq_file *m, void *v)
+{
+ struct pppol2tp_seq_data *pd = v;
+
+ /* display header on line 1 */
+ if (v == SEQ_START_TOKEN) {
+ seq_puts(m, "PPPoL2TP driver info, " PPPOL2TP_DRV_VERSION "\n");
+ seq_puts(m, "TUNNEL name, user-data-ok session-count\n");
+ seq_puts(m, " debug tx-pkts/bytes/errs rx-pkts/bytes/errs\n");
+ seq_puts(m, " SESSION name, addr/port src-tid/sid "
+ "dest-tid/sid state user-data-ok\n");
+ seq_puts(m, " mtu/mru/rcvseq/sendseq/lns debug reorderto\n");
+ seq_puts(m, " nr/ns tx-pkts/bytes/errs rx-pkts/bytes/errs\n");
+ goto out;
+ }
+
+ /* Show the tunnel or session context.
+ */
+ if (pd->session == NULL)
+ pppol2tp_seq_tunnel_show(m, pd->tunnel);
+ else
+ pppol2tp_seq_session_show(m, pd->session);
+
+out:
+ return 0;
+}
+
+static struct seq_operations pppol2tp_seq_ops = {
+ .start = pppol2tp_seq_start,
+ .next = pppol2tp_seq_next,
+ .stop = pppol2tp_seq_stop,
+ .show = pppol2tp_seq_show,
+};
+
+/* Called when our /proc file is opened. We allocate data for use when
+ * iterating our tunnel / session contexts and store it in the private
+ * data of the seq_file.
+ */
+static int pppol2tp_proc_open(struct inode *inode, struct file *file)
+{
+ struct seq_file *m;
+ struct pppol2tp_seq_data *pd;
+ int ret = 0;
+
+ ret = seq_open(file, &pppol2tp_seq_ops);
+ if (ret < 0)
+ goto out;
+
+ m = file->private_data;
+
+ /* Allocate and fill our proc_data for access later */
+ ret = -ENOMEM;
+ m->private = kzalloc(sizeof(struct pppol2tp_seq_data), GFP_KERNEL);
+ if (m->private == NULL)
+ goto out;
+
+ pd = m->private;
+ ret = 0;
+
+out:
+ return ret;
+}
+
+/* Called when /proc file access completes.
+ */
+static int pppol2tp_proc_release(struct inode *inode, struct file *file)
+{
+ struct seq_file *m = (struct seq_file *)file->private_data;
+
+ kfree(m->private);
+ m->private = NULL;
+
+ return seq_release(inode, file);
+}
+
+static struct file_operations pppol2tp_proc_fops = {
+ .owner = THIS_MODULE,
+ .open = pppol2tp_proc_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = pppol2tp_proc_release,
+};
+
+static struct proc_dir_entry *pppol2tp_proc;
+
+#endif /* CONFIG_PROC_FS */
+
+/*****************************************************************************
+ * Init and cleanup
+ *****************************************************************************/
+
+static struct proto_ops pppol2tp_ops = {
+ .family = AF_PPPOX,
+ .owner = THIS_MODULE,
+ .release = pppol2tp_release,
+ .bind = sock_no_bind,
+ .connect = pppol2tp_connect,
+ .socketpair = sock_no_socketpair,
+ .accept = sock_no_accept,
+ .getname = pppol2tp_getname,
+ .poll = datagram_poll,
+ .listen = sock_no_listen,
+ .shutdown = sock_no_shutdown,
+ .setsockopt = pppol2tp_setsockopt,
+ .getsockopt = pppol2tp_getsockopt,
+ .sendmsg = pppol2tp_sendmsg,
+ .recvmsg = pppol2tp_recvmsg,
+ .mmap = sock_no_mmap,
+ .ioctl = pppox_ioctl,
+};
+
+static struct pppox_proto pppol2tp_proto = {
+ .create = pppol2tp_create,
+ .ioctl = pppol2tp_ioctl
+};
+
+static int __init pppol2tp_init(void)
+{
+ int err;
+
+ err = proto_register(&pppol2tp_sk_proto, 0);
+ if (err)
+ goto out;
+ err = register_pppox_proto(PX_PROTO_OL2TP, &pppol2tp_proto);
+ if (err)
+ goto out_unregister_pppol2tp_proto;
+
+#ifdef CONFIG_PROC_FS
+ pppol2tp_proc = create_proc_entry("pppol2tp", 0, proc_net);
+ if (!pppol2tp_proc) {
+ err = -ENOMEM;
+ goto out_unregister_pppox_proto;
+ }
+ pppol2tp_proc->proc_fops = &pppol2tp_proc_fops;
+#endif /* CONFIG_PROC_FS */
+ printk(KERN_INFO "PPPoL2TP kernel driver, %s\n",
+ PPPOL2TP_DRV_VERSION);
+
+out:
+ return err;
+
+out_unregister_pppox_proto:
+ unregister_pppox_proto(PX_PROTO_OL2TP);
+out_unregister_pppol2tp_proto:
+ proto_unregister(&pppol2tp_sk_proto);
+ goto out;
+}
+
+static void __exit pppol2tp_exit(void)
+{
+ unregister_pppox_proto(PX_PROTO_OL2TP);
+
+#ifdef CONFIG_PROC_FS
+ remove_proc_entry("pppol2tp", proc_net);
+#endif
+ proto_unregister(&pppol2tp_sk_proto);
+}
+
+module_init(pppol2tp_init);
+module_exit(pppol2tp_exit);
+
+MODULE_AUTHOR("Martijn van Oosterhout <kleptog@svana.org>,"
+ "James Chapman <jchapman@katalix.com>");
+MODULE_DESCRIPTION("PPP over L2TP over UDP");
+MODULE_LICENSE("GPL");
+MODULE_VERSION(PPPOL2TP_DRV_VERSION);
diff --git a/drivers/net/saa9730.c b/drivers/net/saa9730.c
index ad94358ece8..451486b32f2 100644
--- a/drivers/net/saa9730.c
+++ b/drivers/net/saa9730.c
@@ -690,9 +690,9 @@ static int lan_saa9730_rx(struct net_device *dev)
lp->stats.rx_packets++;
skb_reserve(skb, 2); /* 16 byte align */
skb_put(skb, len); /* make room */
- eth_copy_and_sum(skb,
+ skb_copy_to_linear_data(skb,
(unsigned char *) pData,
- len, 0);
+ len);
skb->protocol = eth_type_trans(skb, dev);
netif_rx(skb);
dev->last_rx = jiffies;
diff --git a/drivers/net/sgiseeq.c b/drivers/net/sgiseeq.c
index 2106becf699..384b4685e97 100644
--- a/drivers/net/sgiseeq.c
+++ b/drivers/net/sgiseeq.c
@@ -320,7 +320,7 @@ static inline void sgiseeq_rx(struct net_device *dev, struct sgiseeq_private *sp
skb_put(skb, len);
/* Copy out of kseg1 to avoid silly cache flush. */
- eth_copy_and_sum(skb, pkt_pointer + 2, len, 0);
+ skb_copy_to_linear_data(skb, pkt_pointer + 2, len);
skb->protocol = eth_type_trans(skb, dev);
/* We don't want to receive our own packets */
diff --git a/drivers/net/sis190.c b/drivers/net/sis190.c
index bc8de48da31..ec2ad9f0efa 100644
--- a/drivers/net/sis190.c
+++ b/drivers/net/sis190.c
@@ -548,7 +548,7 @@ static inline int sis190_try_rx_copy(struct sk_buff **sk_buff, int pkt_size,
skb = dev_alloc_skb(pkt_size + NET_IP_ALIGN);
if (skb) {
skb_reserve(skb, NET_IP_ALIGN);
- eth_copy_and_sum(skb, sk_buff[0]->data, pkt_size, 0);
+ skb_copy_to_linear_data(skb, sk_buff[0]->data, pkt_size);
*sk_buff = skb;
sis190_give_to_asic(desc, rx_buf_sz);
ret = 0;
diff --git a/drivers/net/starfire.c b/drivers/net/starfire.c
index 786d4b9c07e..f2e10196720 100644
--- a/drivers/net/starfire.c
+++ b/drivers/net/starfire.c
@@ -1456,7 +1456,7 @@ static int __netdev_rx(struct net_device *dev, int *quota)
pci_dma_sync_single_for_cpu(np->pci_dev,
np->rx_info[entry].mapping,
pkt_len, PCI_DMA_FROMDEVICE);
- eth_copy_and_sum(skb, np->rx_info[entry].skb->data, pkt_len, 0);
+ skb_copy_to_linear_data(skb, np->rx_info[entry].skb->data, pkt_len);
pci_dma_sync_single_for_device(np->pci_dev,
np->rx_info[entry].mapping,
pkt_len, PCI_DMA_FROMDEVICE);
diff --git a/drivers/net/sun3_82586.c b/drivers/net/sun3_82586.c
index a123ea87893..b77ab6e8fd3 100644
--- a/drivers/net/sun3_82586.c
+++ b/drivers/net/sun3_82586.c
@@ -777,7 +777,7 @@ static void sun3_82586_rcv_int(struct net_device *dev)
{
skb_reserve(skb,2);
skb_put(skb,totlen);
- eth_copy_and_sum(skb,(char *) p->base+swab32((unsigned long) rbd->buffer),totlen,0);
+ skb_copy_to_linear_data(skb,(char *) p->base+swab32((unsigned long) rbd->buffer),totlen);
skb->protocol=eth_type_trans(skb,dev);
netif_rx(skb);
p->stats.rx_packets++;
diff --git a/drivers/net/sun3lance.c b/drivers/net/sun3lance.c
index 791e081fdc1..f1548c03332 100644
--- a/drivers/net/sun3lance.c
+++ b/drivers/net/sun3lance.c
@@ -853,10 +853,9 @@ static int lance_rx( struct net_device *dev )
skb_reserve( skb, 2 ); /* 16 byte align */
skb_put( skb, pkt_len ); /* Make room */
-// skb_copy_to_linear_data(skb, PKTBUF_ADDR(head), pkt_len);
- eth_copy_and_sum(skb,
+ skb_copy_to_linear_data(skb,
PKTBUF_ADDR(head),
- pkt_len, 0);
+ pkt_len);
skb->protocol = eth_type_trans( skb, dev );
netif_rx( skb );
diff --git a/drivers/net/sunbmac.c b/drivers/net/sunbmac.c
index 2ad8d58dee3..b3e0158def4 100644
--- a/drivers/net/sunbmac.c
+++ b/drivers/net/sunbmac.c
@@ -860,7 +860,7 @@ static void bigmac_rx(struct bigmac *bp)
sbus_dma_sync_single_for_cpu(bp->bigmac_sdev,
this->rx_addr, len,
SBUS_DMA_FROMDEVICE);
- eth_copy_and_sum(copy_skb, (unsigned char *)skb->data, len, 0);
+ skb_copy_to_linear_data(copy_skb, (unsigned char *)skb->data, len);
sbus_dma_sync_single_for_device(bp->bigmac_sdev,
this->rx_addr, len,
SBUS_DMA_FROMDEVICE);
diff --git a/drivers/net/sundance.c b/drivers/net/sundance.c
index e1f912d0404..c8ba534c17b 100644
--- a/drivers/net/sundance.c
+++ b/drivers/net/sundance.c
@@ -1313,7 +1313,7 @@ static void rx_poll(unsigned long data)
np->rx_buf_sz,
PCI_DMA_FROMDEVICE);
- eth_copy_and_sum(skb, np->rx_skbuff[entry]->data, pkt_len, 0);
+ skb_copy_to_linear_data(skb, np->rx_skbuff[entry]->data, pkt_len);
pci_dma_sync_single_for_device(np->pci_dev,
desc->frag[0].addr,
np->rx_buf_sz,
diff --git a/drivers/net/sunlance.c b/drivers/net/sunlance.c
index 42722530ab2..053b7cb0d94 100644
--- a/drivers/net/sunlance.c
+++ b/drivers/net/sunlance.c
@@ -549,9 +549,9 @@ static void lance_rx_dvma(struct net_device *dev)
skb_reserve(skb, 2); /* 16 byte align */
skb_put(skb, len); /* make room */
- eth_copy_and_sum(skb,
+ skb_copy_to_linear_data(skb,
(unsigned char *)&(ib->rx_buf [entry][0]),
- len, 0);
+ len);
skb->protocol = eth_type_trans(skb, dev);
netif_rx(skb);
dev->last_rx = jiffies;
diff --git a/drivers/net/sunqe.c b/drivers/net/sunqe.c
index fa70e0b78af..1b65ae8a1c7 100644
--- a/drivers/net/sunqe.c
+++ b/drivers/net/sunqe.c
@@ -439,8 +439,8 @@ static void qe_rx(struct sunqe *qep)
} else {
skb_reserve(skb, 2);
skb_put(skb, len);
- eth_copy_and_sum(skb, (unsigned char *) this_qbuf,
- len, 0);
+ skb_copy_to_linear_data(skb, (unsigned char *) this_qbuf,
+ len);
skb->protocol = eth_type_trans(skb, qep->dev);
netif_rx(skb);
qep->dev->last_rx = jiffies;
diff --git a/drivers/net/tg3.c b/drivers/net/tg3.c
index 2f3184184ad..3245f16baab 100644
--- a/drivers/net/tg3.c
+++ b/drivers/net/tg3.c
@@ -64,8 +64,8 @@
#define DRV_MODULE_NAME "tg3"
#define PFX DRV_MODULE_NAME ": "
-#define DRV_MODULE_VERSION "3.77"
-#define DRV_MODULE_RELDATE "May 31, 2007"
+#define DRV_MODULE_VERSION "3.78"
+#define DRV_MODULE_RELDATE "July 11, 2007"
#define TG3_DEF_MAC_MODE 0
#define TG3_DEF_RX_MODE 0
@@ -721,6 +721,44 @@ static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
return ret;
}
+static void tg3_phy_toggle_automdix(struct tg3 *tp, int enable)
+{
+ u32 phy;
+
+ if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ||
+ (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES))
+ return;
+
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
+ u32 ephy;
+
+ if (!tg3_readphy(tp, MII_TG3_EPHY_TEST, &ephy)) {
+ tg3_writephy(tp, MII_TG3_EPHY_TEST,
+ ephy | MII_TG3_EPHY_SHADOW_EN);
+ if (!tg3_readphy(tp, MII_TG3_EPHYTST_MISCCTRL, &phy)) {
+ if (enable)
+ phy |= MII_TG3_EPHYTST_MISCCTRL_MDIX;
+ else
+ phy &= ~MII_TG3_EPHYTST_MISCCTRL_MDIX;
+ tg3_writephy(tp, MII_TG3_EPHYTST_MISCCTRL, phy);
+ }
+ tg3_writephy(tp, MII_TG3_EPHY_TEST, ephy);
+ }
+ } else {
+ phy = MII_TG3_AUXCTL_MISC_RDSEL_MISC |
+ MII_TG3_AUXCTL_SHDWSEL_MISC;
+ if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, phy) &&
+ !tg3_readphy(tp, MII_TG3_AUX_CTRL, &phy)) {
+ if (enable)
+ phy |= MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
+ else
+ phy &= ~MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
+ phy |= MII_TG3_AUXCTL_MISC_WREN;
+ tg3_writephy(tp, MII_TG3_AUX_CTRL, phy);
+ }
+ }
+}
+
static void tg3_phy_set_wirespeed(struct tg3 *tp)
{
u32 val;
@@ -1045,23 +1083,11 @@ out:
}
if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
- u32 phy_reg;
-
/* adjust output voltage */
tg3_writephy(tp, MII_TG3_EPHY_PTEST, 0x12);
-
- if (!tg3_readphy(tp, MII_TG3_EPHY_TEST, &phy_reg)) {
- u32 phy_reg2;
-
- tg3_writephy(tp, MII_TG3_EPHY_TEST,
- phy_reg | MII_TG3_EPHY_SHADOW_EN);
- /* Enable auto-MDIX */
- if (!tg3_readphy(tp, 0x10, &phy_reg2))
- tg3_writephy(tp, 0x10, phy_reg2 | 0x4000);
- tg3_writephy(tp, MII_TG3_EPHY_TEST, phy_reg);
- }
}
+ tg3_phy_toggle_automdix(tp, 1);
tg3_phy_set_wirespeed(tp);
return 0;
}
@@ -1162,6 +1188,19 @@ static void tg3_frob_aux_power(struct tg3 *tp)
}
}
+static int tg3_5700_link_polarity(struct tg3 *tp, u32 speed)
+{
+ if (tp->led_ctrl == LED_CTRL_MODE_PHY_2)
+ return 1;
+ else if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5411) {
+ if (speed != SPEED_10)
+ return 1;
+ } else if (speed == SPEED_10)
+ return 1;
+
+ return 0;
+}
+
static int tg3_setup_phy(struct tg3 *, int);
#define RESET_KIND_SHUTDOWN 0
@@ -1320,9 +1359,17 @@ static int tg3_set_power_state(struct tg3 *tp, pci_power_t state)
else
mac_mode = MAC_MODE_PORT_MODE_MII;
- if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 ||
- !(tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB))
- mac_mode |= MAC_MODE_LINK_POLARITY;
+ mac_mode |= tp->mac_mode & MAC_MODE_LINK_POLARITY;
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
+ ASIC_REV_5700) {
+ u32 speed = (tp->tg3_flags &
+ TG3_FLAG_WOL_SPEED_100MB) ?
+ SPEED_100 : SPEED_10;
+ if (tg3_5700_link_polarity(tp, speed))
+ mac_mode |= MAC_MODE_LINK_POLARITY;
+ else
+ mac_mode &= ~MAC_MODE_LINK_POLARITY;
+ }
} else {
mac_mode = MAC_MODE_PORT_MODE_TBI;
}
@@ -1990,15 +2037,12 @@ relink:
if (tp->link_config.active_duplex == DUPLEX_HALF)
tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
- tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
- if ((tp->led_ctrl == LED_CTRL_MODE_PHY_2) ||
- (current_link_up == 1 &&
- tp->link_config.active_speed == SPEED_10))
- tp->mac_mode |= MAC_MODE_LINK_POLARITY;
- } else {
- if (current_link_up == 1)
+ if (current_link_up == 1 &&
+ tg3_5700_link_polarity(tp, tp->link_config.active_speed))
tp->mac_mode |= MAC_MODE_LINK_POLARITY;
+ else
+ tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
}
/* ??? Without this setting Netgear GA302T PHY does not
@@ -2639,6 +2683,9 @@ static int tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status)
tw32_f(MAC_MODE, (tp->mac_mode | MAC_MODE_SEND_CONFIGS));
udelay(40);
+
+ tw32_f(MAC_MODE, tp->mac_mode);
+ udelay(40);
}
out:
@@ -2698,10 +2745,6 @@ static int tg3_setup_fiber_phy(struct tg3 *tp, int force_reset)
else
current_link_up = tg3_setup_fiber_by_hand(tp, mac_status);
- tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
- tw32_f(MAC_MODE, tp->mac_mode);
- udelay(40);
-
tp->hw_status->status =
(SD_STATUS_UPDATED |
(tp->hw_status->status & ~SD_STATUS_LINK_CHG));
@@ -3512,9 +3555,9 @@ static inline int tg3_irq_sync(struct tg3 *tp)
*/
static inline void tg3_full_lock(struct tg3 *tp, int irq_sync)
{
+ spin_lock_bh(&tp->lock);
if (irq_sync)
tg3_irq_quiesce(tp);
- spin_lock_bh(&tp->lock);
}
static inline void tg3_full_unlock(struct tg3 *tp)
@@ -6444,6 +6487,10 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
tp->mac_mode = MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE |
MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE | MAC_MODE_FHDE_ENABLE;
+ if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) &&
+ !(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) &&
+ GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700)
+ tp->mac_mode |= MAC_MODE_LINK_POLARITY;
tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_RXSTAT_CLEAR | MAC_MODE_TXSTAT_CLEAR);
udelay(40);
@@ -8805,7 +8852,9 @@ static int tg3_run_loopback(struct tg3 *tp, int loopback_mode)
return 0;
mac_mode = (tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK) |
- MAC_MODE_PORT_INT_LPBACK | MAC_MODE_LINK_POLARITY;
+ MAC_MODE_PORT_INT_LPBACK;
+ if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
+ mac_mode |= MAC_MODE_LINK_POLARITY;
if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
mac_mode |= MAC_MODE_PORT_MODE_MII;
else
@@ -8824,19 +8873,18 @@ static int tg3_run_loopback(struct tg3 *tp, int loopback_mode)
phytest | MII_TG3_EPHY_SHADOW_EN);
if (!tg3_readphy(tp, 0x1b, &phy))
tg3_writephy(tp, 0x1b, phy & ~0x20);
- if (!tg3_readphy(tp, 0x10, &phy))
- tg3_writephy(tp, 0x10, phy & ~0x4000);
tg3_writephy(tp, MII_TG3_EPHY_TEST, phytest);
}
val = BMCR_LOOPBACK | BMCR_FULLDPLX | BMCR_SPEED100;
} else
val = BMCR_LOOPBACK | BMCR_FULLDPLX | BMCR_SPEED1000;
+ tg3_phy_toggle_automdix(tp, 0);
+
tg3_writephy(tp, MII_BMCR, val);
udelay(40);
- mac_mode = (tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK) |
- MAC_MODE_LINK_POLARITY;
+ mac_mode = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
tg3_writephy(tp, MII_TG3_EPHY_PTEST, 0x1800);
mac_mode |= MAC_MODE_PORT_MODE_MII;
@@ -8849,8 +8897,11 @@ static int tg3_run_loopback(struct tg3 *tp, int loopback_mode)
udelay(10);
tw32_f(MAC_RX_MODE, tp->rx_mode);
}
- if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
- mac_mode &= ~MAC_MODE_LINK_POLARITY;
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
+ if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401)
+ mac_mode &= ~MAC_MODE_LINK_POLARITY;
+ else if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5411)
+ mac_mode |= MAC_MODE_LINK_POLARITY;
tg3_writephy(tp, MII_TG3_EXT_CTRL,
MII_TG3_EXT_CTRL_LNK3_LED_MODE);
}
@@ -9116,10 +9167,10 @@ static void tg3_vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
/* Update RX_MODE_KEEP_VLAN_TAG bit in RX_MODE register. */
__tg3_set_rx_mode(dev);
- tg3_full_unlock(tp);
-
if (netif_running(dev))
tg3_netif_start(tp);
+
+ tg3_full_unlock(tp);
}
#endif
@@ -9410,11 +9461,13 @@ static void __devinit tg3_get_5755_nvram_info(struct tg3 *tp)
case FLASH_5755VENDOR_ATMEL_FLASH_1:
case FLASH_5755VENDOR_ATMEL_FLASH_2:
case FLASH_5755VENDOR_ATMEL_FLASH_3:
+ case FLASH_5755VENDOR_ATMEL_FLASH_5:
tp->nvram_jedecnum = JEDEC_ATMEL;
tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
tp->tg3_flags2 |= TG3_FLG2_FLASH;
tp->nvram_pagesize = 264;
- if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_1)
+ if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_1 ||
+ nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_5)
tp->nvram_size = (protect ? 0x3e200 : 0x80000);
else if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_2)
tp->nvram_size = (protect ? 0x1f200 : 0x40000);
@@ -11944,12 +11997,11 @@ static int __devinit tg3_init_one(struct pci_dev *pdev,
* checksumming.
*/
if ((tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) == 0) {
+ dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG;
if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787)
- dev->features |= NETIF_F_HW_CSUM;
- else
- dev->features |= NETIF_F_IP_CSUM;
- dev->features |= NETIF_F_SG;
+ dev->features |= NETIF_F_IPV6_CSUM;
+
tp->tg3_flags |= TG3_FLAG_RX_CHECKSUMS;
} else
tp->tg3_flags &= ~TG3_FLAG_RX_CHECKSUMS;
diff --git a/drivers/net/tg3.h b/drivers/net/tg3.h
index bd9f4f428e5..d84e75e7365 100644
--- a/drivers/net/tg3.h
+++ b/drivers/net/tg3.h
@@ -1467,6 +1467,7 @@
#define FLASH_5755VENDOR_ATMEL_FLASH_2 0x03400002
#define FLASH_5755VENDOR_ATMEL_FLASH_3 0x03400000
#define FLASH_5755VENDOR_ATMEL_FLASH_4 0x00000003
+#define FLASH_5755VENDOR_ATMEL_FLASH_5 0x02000003
#define FLASH_5755VENDOR_ATMEL_EEPROM_64KHZ 0x03c00003
#define FLASH_5755VENDOR_ATMEL_EEPROM_376KHZ 0x03c00002
#define FLASH_5787VENDOR_ATMEL_EEPROM_64KHZ 0x03000003
@@ -1642,6 +1643,11 @@
#define MII_TG3_AUX_CTRL 0x18 /* auxilliary control register */
+#define MII_TG3_AUXCTL_MISC_WREN 0x8000
+#define MII_TG3_AUXCTL_MISC_FORCE_AMDIX 0x0200
+#define MII_TG3_AUXCTL_MISC_RDSEL_MISC 0x7000
+#define MII_TG3_AUXCTL_SHDWSEL_MISC 0x0007
+
#define MII_TG3_AUX_STAT 0x19 /* auxilliary status register */
#define MII_TG3_AUX_STAT_LPASS 0x0004
#define MII_TG3_AUX_STAT_SPDMASK 0x0700
@@ -1667,6 +1673,9 @@
#define MII_TG3_EPHY_TEST 0x1f /* 5906 PHY register */
#define MII_TG3_EPHY_SHADOW_EN 0x80
+#define MII_TG3_EPHYTST_MISCCTRL 0x10 /* 5906 EPHY misc ctrl shadow register */
+#define MII_TG3_EPHYTST_MISCCTRL_MDIX 0x4000
+
#define MII_TG3_TEST1 0x1e
#define MII_TG3_TEST1_TRIM_EN 0x0010
#define MII_TG3_TEST1_CRC_EN 0x8000
diff --git a/drivers/net/tulip/interrupt.c b/drivers/net/tulip/interrupt.c
index ea896777bca..53efd6694e7 100644
--- a/drivers/net/tulip/interrupt.c
+++ b/drivers/net/tulip/interrupt.c
@@ -197,8 +197,8 @@ int tulip_poll(struct net_device *dev, int *budget)
tp->rx_buffers[entry].mapping,
pkt_len, PCI_DMA_FROMDEVICE);
#if ! defined(__alpha__)
- eth_copy_and_sum(skb, tp->rx_buffers[entry].skb->data,
- pkt_len, 0);
+ skb_copy_to_linear_data(skb, tp->rx_buffers[entry].skb->data,
+ pkt_len);
skb_put(skb, pkt_len);
#else
memcpy(skb_put(skb, pkt_len),
@@ -420,8 +420,8 @@ static int tulip_rx(struct net_device *dev)
tp->rx_buffers[entry].mapping,
pkt_len, PCI_DMA_FROMDEVICE);
#if ! defined(__alpha__)
- eth_copy_and_sum(skb, tp->rx_buffers[entry].skb->data,
- pkt_len, 0);
+ skb_copy_to_linear_data(skb, tp->rx_buffers[entry].skb->data,
+ pkt_len);
skb_put(skb, pkt_len);
#else
memcpy(skb_put(skb, pkt_len),
diff --git a/drivers/net/tulip/winbond-840.c b/drivers/net/tulip/winbond-840.c
index 38f3b99716b..5824f6a3549 100644
--- a/drivers/net/tulip/winbond-840.c
+++ b/drivers/net/tulip/winbond-840.c
@@ -1232,7 +1232,7 @@ static int netdev_rx(struct net_device *dev)
pci_dma_sync_single_for_cpu(np->pci_dev,np->rx_addr[entry],
np->rx_skbuff[entry]->len,
PCI_DMA_FROMDEVICE);
- eth_copy_and_sum(skb, np->rx_skbuff[entry]->data, pkt_len, 0);
+ skb_copy_to_linear_data(skb, np->rx_skbuff[entry]->data, pkt_len);
skb_put(skb, pkt_len);
pci_dma_sync_single_for_device(np->pci_dev,np->rx_addr[entry],
np->rx_skbuff[entry]->len,
diff --git a/drivers/net/tulip/xircom_cb.c b/drivers/net/tulip/xircom_cb.c
index 2470b1ee33c..37e35cd277a 100644
--- a/drivers/net/tulip/xircom_cb.c
+++ b/drivers/net/tulip/xircom_cb.c
@@ -1208,7 +1208,7 @@ static void investigate_read_descriptor(struct net_device *dev,struct xircom_pri
goto out;
}
skb_reserve(skb, 2);
- eth_copy_and_sum(skb, (unsigned char*)&card->rx_buffer[bufferoffset / 4], pkt_len, 0);
+ skb_copy_to_linear_data(skb, (unsigned char*)&card->rx_buffer[bufferoffset / 4], pkt_len);
skb_put(skb, pkt_len);
skb->protocol = eth_type_trans(skb, dev);
netif_rx(skb);
diff --git a/drivers/net/tulip/xircom_tulip_cb.c b/drivers/net/tulip/xircom_tulip_cb.c
index f6417292737..f984fbde8b2 100644
--- a/drivers/net/tulip/xircom_tulip_cb.c
+++ b/drivers/net/tulip/xircom_tulip_cb.c
@@ -1242,8 +1242,8 @@ xircom_rx(struct net_device *dev)
&& (skb = dev_alloc_skb(pkt_len + 2)) != NULL) {
skb_reserve(skb, 2); /* 16 byte align the IP header */
#if ! defined(__alpha__)
- eth_copy_and_sum(skb, bus_to_virt(tp->rx_ring[entry].buffer1),
- pkt_len, 0);
+ skb_copy_to_linear_data(skb, bus_to_virt(tp->rx_ring[entry].buffer1),
+ pkt_len);
skb_put(skb, pkt_len);
#else
memcpy(skb_put(skb, pkt_len),
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index a2c6caaaae9..62b2b300501 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -432,6 +432,7 @@ static void tun_setup(struct net_device *dev)
init_waitqueue_head(&tun->read_wait);
tun->owner = -1;
+ tun->group = -1;
SET_MODULE_OWNER(dev);
dev->open = tun_net_open;
@@ -467,8 +468,11 @@ static int tun_set_iff(struct file *file, struct ifreq *ifr)
return -EBUSY;
/* Check permissions */
- if (tun->owner != -1 &&
- current->euid != tun->owner && !capable(CAP_NET_ADMIN))
+ if (((tun->owner != -1 &&
+ current->euid != tun->owner) ||
+ (tun->group != -1 &&
+ current->egid != tun->group)) &&
+ !capable(CAP_NET_ADMIN))
return -EPERM;
}
else if (__dev_get_by_name(ifr->ifr_name))
@@ -610,6 +614,13 @@ static int tun_chr_ioctl(struct inode *inode, struct file *file,
DBG(KERN_INFO "%s: owner set to %d\n", tun->dev->name, tun->owner);
break;
+ case TUNSETGROUP:
+ /* Set group of the device */
+ tun->group= (gid_t) arg;
+
+ DBG(KERN_INFO "%s: group set to %d\n", tun->dev->name, tun->group);
+ break;
+
case TUNSETLINK:
/* Only allow setting the type when the interface is down */
if (tun->dev->flags & IFF_UP) {
diff --git a/drivers/net/typhoon.c b/drivers/net/typhoon.c
index 15b2fb8aa49..df524548d53 100644
--- a/drivers/net/typhoon.c
+++ b/drivers/net/typhoon.c
@@ -1703,7 +1703,7 @@ typhoon_rx(struct typhoon *tp, struct basic_ring *rxRing, volatile u32 * ready,
pci_dma_sync_single_for_cpu(tp->pdev, dma_addr,
PKT_BUF_SZ,
PCI_DMA_FROMDEVICE);
- eth_copy_and_sum(new_skb, skb->data, pkt_len, 0);
+ skb_copy_to_linear_data(new_skb, skb->data, pkt_len);
pci_dma_sync_single_for_device(tp->pdev, dma_addr,
PKT_BUF_SZ,
PCI_DMA_FROMDEVICE);
diff --git a/drivers/net/usb/catc.c b/drivers/net/usb/catc.c
index 86e90c59d55..76752d84a30 100644
--- a/drivers/net/usb/catc.c
+++ b/drivers/net/usb/catc.c
@@ -255,7 +255,7 @@ static void catc_rx_done(struct urb *urb)
if (!(skb = dev_alloc_skb(pkt_len)))
return;
- eth_copy_and_sum(skb, pkt_start + pkt_offset, pkt_len, 0);
+ skb_copy_to_linear_data(skb, pkt_start + pkt_offset, pkt_len);
skb_put(skb, pkt_len);
skb->protocol = eth_type_trans(skb, catc->netdev);
diff --git a/drivers/net/usb/kaweth.c b/drivers/net/usb/kaweth.c
index 60d29440f31..524dc5f5e46 100644
--- a/drivers/net/usb/kaweth.c
+++ b/drivers/net/usb/kaweth.c
@@ -635,7 +635,7 @@ static void kaweth_usb_receive(struct urb *urb)
skb_reserve(skb, 2); /* Align IP on 16 byte boundaries */
- eth_copy_and_sum(skb, kaweth->rx_buf + 2, pkt_len, 0);
+ skb_copy_to_linear_data(skb, kaweth->rx_buf + 2, pkt_len);
skb_put(skb, pkt_len);
diff --git a/drivers/net/via-rhine.c b/drivers/net/via-rhine.c
index adea290a9d5..565f6cc185c 100644
--- a/drivers/net/via-rhine.c
+++ b/drivers/net/via-rhine.c
@@ -1492,9 +1492,9 @@ static int rhine_rx(struct net_device *dev, int limit)
rp->rx_buf_sz,
PCI_DMA_FROMDEVICE);
- eth_copy_and_sum(skb,
+ skb_copy_to_linear_data(skb,
rp->rx_skbuff[entry]->data,
- pkt_len, 0);
+ pkt_len);
skb_put(skb, pkt_len);
pci_dma_sync_single_for_device(rp->pdev,
rp->rx_skbuff_dma[entry],
diff --git a/drivers/net/wireless/wl3501_cs.c b/drivers/net/wireless/wl3501_cs.c
index ce9230b2f63..c8b5c227193 100644
--- a/drivers/net/wireless/wl3501_cs.c
+++ b/drivers/net/wireless/wl3501_cs.c
@@ -1011,7 +1011,7 @@ static inline void wl3501_md_ind_interrupt(struct net_device *dev,
} else {
skb->dev = dev;
skb_reserve(skb, 2); /* IP headers on 16 bytes boundaries */
- eth_copy_and_sum(skb, (unsigned char *)&sig.daddr, 12, 0);
+ skb_copy_to_linear_data(skb, (unsigned char *)&sig.daddr, 12);
wl3501_receive(this, skb->data, pkt_len);
skb_put(skb, pkt_len);
skb->protocol = eth_type_trans(skb, dev);
diff --git a/drivers/net/yellowfin.c b/drivers/net/yellowfin.c
index f2a90a7fa2d..870c5393c21 100644
--- a/drivers/net/yellowfin.c
+++ b/drivers/net/yellowfin.c
@@ -1137,7 +1137,7 @@ static int yellowfin_rx(struct net_device *dev)
if (skb == NULL)
break;
skb_reserve(skb, 2); /* 16 byte align the IP header */
- eth_copy_and_sum(skb, rx_skb->data, pkt_len, 0);
+ skb_copy_to_linear_data(skb, rx_skb->data, pkt_len);
skb_put(skb, pkt_len);
pci_dma_sync_single_for_device(yp->pci_dev, desc->addr,
yp->rx_buf_sz,