aboutsummaryrefslogtreecommitdiff
path: root/drivers/infiniband/ulp/ipoib/ipoib_cm.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/infiniband/ulp/ipoib/ipoib_cm.c')
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_cm.c50
1 files changed, 23 insertions, 27 deletions
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_cm.c b/drivers/infiniband/ulp/ipoib/ipoib_cm.c
index 076a0bbb63d..08b4676a382 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_cm.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_cm.c
@@ -56,13 +56,6 @@ MODULE_PARM_DESC(cm_data_debug_level,
#define IPOIB_CM_RX_DELAY (3 * 256 * HZ)
#define IPOIB_CM_RX_UPDATE_MASK (0x3)
-struct ipoib_cm_id {
- struct ib_cm_id *id;
- int flags;
- u32 remote_qpn;
- u32 remote_mtu;
-};
-
static struct ib_qp_attr ipoib_cm_err_attr = {
.qp_state = IB_QPS_ERR
};
@@ -155,8 +148,8 @@ partial_error:
ib_dma_unmap_single(priv->ca, mapping[0], IPOIB_CM_HEAD_SIZE, DMA_FROM_DEVICE);
- for (; i >= 0; --i)
- ib_dma_unmap_single(priv->ca, mapping[i + 1], PAGE_SIZE, DMA_FROM_DEVICE);
+ for (; i > 0; --i)
+ ib_dma_unmap_single(priv->ca, mapping[i], PAGE_SIZE, DMA_FROM_DEVICE);
dev_kfree_skb_any(skb);
return NULL;
@@ -288,7 +281,6 @@ static int ipoib_cm_send_rep(struct net_device *dev, struct ib_cm_id *cm_id,
rep.private_data_len = sizeof data;
rep.flow_control = 0;
rep.rnr_retry_count = req->rnr_retry_count;
- rep.target_ack_delay = 20; /* FIXME */
rep.srq = 1;
rep.qp_num = qp->qp_num;
rep.starting_psn = psn;
@@ -309,6 +301,11 @@ static int ipoib_cm_req_handler(struct ib_cm_id *cm_id, struct ib_cm_event *even
return -ENOMEM;
p->dev = dev;
p->id = cm_id;
+ cm_id->context = p;
+ p->state = IPOIB_CM_RX_LIVE;
+ p->jiffies = jiffies;
+ INIT_LIST_HEAD(&p->list);
+
p->qp = ipoib_cm_create_rx_qp(dev, p);
if (IS_ERR(p->qp)) {
ret = PTR_ERR(p->qp);
@@ -320,24 +317,24 @@ static int ipoib_cm_req_handler(struct ib_cm_id *cm_id, struct ib_cm_event *even
if (ret)
goto err_modify;
+ spin_lock_irq(&priv->lock);
+ queue_delayed_work(ipoib_workqueue,
+ &priv->cm.stale_task, IPOIB_CM_RX_DELAY);
+ /* Add this entry to passive ids list head, but do not re-add it
+ * if IB_EVENT_QP_LAST_WQE_REACHED has moved it to flush list. */
+ p->jiffies = jiffies;
+ if (p->state == IPOIB_CM_RX_LIVE)
+ list_move(&p->list, &priv->cm.passive_ids);
+ spin_unlock_irq(&priv->lock);
+
ret = ipoib_cm_send_rep(dev, cm_id, p->qp, &event->param.req_rcvd, psn);
if (ret) {
ipoib_warn(priv, "failed to send REP: %d\n", ret);
- goto err_rep;
+ if (ib_modify_qp(p->qp, &ipoib_cm_err_attr, IB_QP_STATE))
+ ipoib_warn(priv, "unable to move qp to error state\n");
}
-
- cm_id->context = p;
- p->jiffies = jiffies;
- p->state = IPOIB_CM_RX_LIVE;
- spin_lock_irq(&priv->lock);
- if (list_empty(&priv->cm.passive_ids))
- queue_delayed_work(ipoib_workqueue,
- &priv->cm.stale_task, IPOIB_CM_RX_DELAY);
- list_add(&p->list, &priv->cm.passive_ids);
- spin_unlock_irq(&priv->lock);
return 0;
-err_rep:
err_modify:
ib_destroy_qp(p->qp);
err_qp:
@@ -754,9 +751,9 @@ static int ipoib_cm_rep_handler(struct ib_cm_id *cm_id, struct ib_cm_event *even
p->mtu = be32_to_cpu(data->mtu);
- if (p->mtu < priv->dev->mtu + IPOIB_ENCAP_LEN) {
- ipoib_warn(priv, "Rejecting connection: mtu %d < device mtu %d + 4\n",
- p->mtu, priv->dev->mtu);
+ if (p->mtu <= IPOIB_ENCAP_LEN) {
+ ipoib_warn(priv, "Rejecting connection: mtu %d <= %d\n",
+ p->mtu, IPOIB_ENCAP_LEN);
return -EINVAL;
}
@@ -1150,7 +1147,6 @@ static void ipoib_cm_skb_reap(struct work_struct *work)
{
struct ipoib_dev_priv *priv = container_of(work, struct ipoib_dev_priv,
cm.skb_task);
- struct net_device *dev = priv->dev;
struct sk_buff *skb;
unsigned mtu = priv->mcast_mtu;
@@ -1164,7 +1160,7 @@ static void ipoib_cm_skb_reap(struct work_struct *work)
icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED, htonl(mtu));
#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
else if (skb->protocol == htons(ETH_P_IPV6))
- icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu, dev);
+ icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu, priv->dev);
#endif
dev_kfree_skb_any(skb);
spin_lock_irq(&priv->tx_lock);