aboutsummaryrefslogtreecommitdiff
path: root/drivers/infiniband
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/infiniband')
-rw-r--r--drivers/infiniband/core/mad.c10
-rw-r--r--drivers/infiniband/hw/mthca/mthca_cmd.c38
-rw-r--r--drivers/infiniband/hw/mthca/mthca_dev.h4
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib.h1
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_multicast.c28
-rw-r--r--drivers/infiniband/ulp/srp/ib_srp.c6
6 files changed, 61 insertions, 26 deletions
diff --git a/drivers/infiniband/core/mad.c b/drivers/infiniband/core/mad.c
index d393b504bf2..c82f47a66e4 100644
--- a/drivers/infiniband/core/mad.c
+++ b/drivers/infiniband/core/mad.c
@@ -665,7 +665,15 @@ static int handle_outgoing_dr_smp(struct ib_mad_agent_private *mad_agent_priv,
struct ib_wc mad_wc;
struct ib_send_wr *send_wr = &mad_send_wr->send_wr;
- if (!smi_handle_dr_smp_send(smp, device->node_type, port_num)) {
+ /*
+ * Directed route handling starts if the initial LID routed part of
+ * a request or the ending LID routed part of a response is empty.
+ * If we are at the start of the LID routed part, don't update the
+ * hop_ptr or hop_cnt. See section 14.2.2, Vol 1 IB spec.
+ */
+ if ((ib_get_smp_direction(smp) ? smp->dr_dlid : smp->dr_slid) ==
+ IB_LID_PERMISSIVE &&
+ !smi_handle_dr_smp_send(smp, device->node_type, port_num)) {
ret = -EINVAL;
printk(KERN_ERR PFX "Invalid directed route\n");
goto out;
diff --git a/drivers/infiniband/hw/mthca/mthca_cmd.c b/drivers/infiniband/hw/mthca/mthca_cmd.c
index f9b9b93dc50..2825615ce81 100644
--- a/drivers/infiniband/hw/mthca/mthca_cmd.c
+++ b/drivers/infiniband/hw/mthca/mthca_cmd.c
@@ -1029,25 +1029,6 @@ int mthca_QUERY_DEV_LIM(struct mthca_dev *dev,
MTHCA_GET(size, outbox, QUERY_DEV_LIM_UAR_ENTRY_SZ_OFFSET);
dev_lim->uar_scratch_entry_sz = size;
- mthca_dbg(dev, "Max QPs: %d, reserved QPs: %d, entry size: %d\n",
- dev_lim->max_qps, dev_lim->reserved_qps, dev_lim->qpc_entry_sz);
- mthca_dbg(dev, "Max SRQs: %d, reserved SRQs: %d, entry size: %d\n",
- dev_lim->max_srqs, dev_lim->reserved_srqs, dev_lim->srq_entry_sz);
- mthca_dbg(dev, "Max CQs: %d, reserved CQs: %d, entry size: %d\n",
- dev_lim->max_cqs, dev_lim->reserved_cqs, dev_lim->cqc_entry_sz);
- mthca_dbg(dev, "Max EQs: %d, reserved EQs: %d, entry size: %d\n",
- dev_lim->max_eqs, dev_lim->reserved_eqs, dev_lim->eqc_entry_sz);
- mthca_dbg(dev, "reserved MPTs: %d, reserved MTTs: %d\n",
- dev_lim->reserved_mrws, dev_lim->reserved_mtts);
- mthca_dbg(dev, "Max PDs: %d, reserved PDs: %d, reserved UARs: %d\n",
- dev_lim->max_pds, dev_lim->reserved_pds, dev_lim->reserved_uars);
- mthca_dbg(dev, "Max QP/MCG: %d, reserved MGMs: %d\n",
- dev_lim->max_pds, dev_lim->reserved_mgms);
- mthca_dbg(dev, "Max CQEs: %d, max WQEs: %d, max SRQ WQEs: %d\n",
- dev_lim->max_cq_sz, dev_lim->max_qp_sz, dev_lim->max_srq_sz);
-
- mthca_dbg(dev, "Flags: %08x\n", dev_lim->flags);
-
if (mthca_is_memfree(dev)) {
MTHCA_GET(field, outbox, QUERY_DEV_LIM_MAX_SRQ_SZ_OFFSET);
dev_lim->max_srq_sz = 1 << field;
@@ -1093,6 +1074,25 @@ int mthca_QUERY_DEV_LIM(struct mthca_dev *dev,
dev_lim->mpt_entry_sz = MTHCA_MPT_ENTRY_SIZE;
}
+ mthca_dbg(dev, "Max QPs: %d, reserved QPs: %d, entry size: %d\n",
+ dev_lim->max_qps, dev_lim->reserved_qps, dev_lim->qpc_entry_sz);
+ mthca_dbg(dev, "Max SRQs: %d, reserved SRQs: %d, entry size: %d\n",
+ dev_lim->max_srqs, dev_lim->reserved_srqs, dev_lim->srq_entry_sz);
+ mthca_dbg(dev, "Max CQs: %d, reserved CQs: %d, entry size: %d\n",
+ dev_lim->max_cqs, dev_lim->reserved_cqs, dev_lim->cqc_entry_sz);
+ mthca_dbg(dev, "Max EQs: %d, reserved EQs: %d, entry size: %d\n",
+ dev_lim->max_eqs, dev_lim->reserved_eqs, dev_lim->eqc_entry_sz);
+ mthca_dbg(dev, "reserved MPTs: %d, reserved MTTs: %d\n",
+ dev_lim->reserved_mrws, dev_lim->reserved_mtts);
+ mthca_dbg(dev, "Max PDs: %d, reserved PDs: %d, reserved UARs: %d\n",
+ dev_lim->max_pds, dev_lim->reserved_pds, dev_lim->reserved_uars);
+ mthca_dbg(dev, "Max QP/MCG: %d, reserved MGMs: %d\n",
+ dev_lim->max_pds, dev_lim->reserved_mgms);
+ mthca_dbg(dev, "Max CQEs: %d, max WQEs: %d, max SRQ WQEs: %d\n",
+ dev_lim->max_cq_sz, dev_lim->max_qp_sz, dev_lim->max_srq_sz);
+
+ mthca_dbg(dev, "Flags: %08x\n", dev_lim->flags);
+
out:
mthca_free_mailbox(dev, mailbox);
return err;
diff --git a/drivers/infiniband/hw/mthca/mthca_dev.h b/drivers/infiniband/hw/mthca/mthca_dev.h
index 2a165fd06e5..e481037288d 100644
--- a/drivers/infiniband/hw/mthca/mthca_dev.h
+++ b/drivers/infiniband/hw/mthca/mthca_dev.h
@@ -53,8 +53,8 @@
#define DRV_NAME "ib_mthca"
#define PFX DRV_NAME ": "
-#define DRV_VERSION "0.06"
-#define DRV_RELDATE "June 23, 2005"
+#define DRV_VERSION "0.07"
+#define DRV_RELDATE "February 13, 2006"
enum {
MTHCA_FLAG_DDR_HIDDEN = 1 << 1,
diff --git a/drivers/infiniband/ulp/ipoib/ipoib.h b/drivers/infiniband/ulp/ipoib/ipoib.h
index e0a5412b7e6..2f85a9a831b 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib.h
+++ b/drivers/infiniband/ulp/ipoib/ipoib.h
@@ -78,6 +78,7 @@ enum {
IPOIB_FLAG_SUBINTERFACE = 4,
IPOIB_MCAST_RUN = 5,
IPOIB_STOP_REAPER = 6,
+ IPOIB_MCAST_STARTED = 7,
IPOIB_MAX_BACKOFF_SECONDS = 16,
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
index ccaa0c38707..a2408d7ec59 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
@@ -533,8 +533,10 @@ void ipoib_mcast_join_task(void *dev_ptr)
}
if (!priv->broadcast) {
- priv->broadcast = ipoib_mcast_alloc(dev, 1);
- if (!priv->broadcast) {
+ struct ipoib_mcast *broadcast;
+
+ broadcast = ipoib_mcast_alloc(dev, 1);
+ if (!broadcast) {
ipoib_warn(priv, "failed to allocate broadcast group\n");
mutex_lock(&mcast_mutex);
if (test_bit(IPOIB_MCAST_RUN, &priv->flags))
@@ -544,10 +546,11 @@ void ipoib_mcast_join_task(void *dev_ptr)
return;
}
- memcpy(priv->broadcast->mcmember.mgid.raw, priv->dev->broadcast + 4,
+ spin_lock_irq(&priv->lock);
+ memcpy(broadcast->mcmember.mgid.raw, priv->dev->broadcast + 4,
sizeof (union ib_gid));
+ priv->broadcast = broadcast;
- spin_lock_irq(&priv->lock);
__ipoib_mcast_add(dev, priv->broadcast);
spin_unlock_irq(&priv->lock);
}
@@ -601,6 +604,10 @@ int ipoib_mcast_start_thread(struct net_device *dev)
queue_work(ipoib_workqueue, &priv->mcast_task);
mutex_unlock(&mcast_mutex);
+ spin_lock_irq(&priv->lock);
+ set_bit(IPOIB_MCAST_STARTED, &priv->flags);
+ spin_unlock_irq(&priv->lock);
+
return 0;
}
@@ -611,6 +618,10 @@ int ipoib_mcast_stop_thread(struct net_device *dev, int flush)
ipoib_dbg_mcast(priv, "stopping multicast thread\n");
+ spin_lock_irq(&priv->lock);
+ clear_bit(IPOIB_MCAST_STARTED, &priv->flags);
+ spin_unlock_irq(&priv->lock);
+
mutex_lock(&mcast_mutex);
clear_bit(IPOIB_MCAST_RUN, &priv->flags);
cancel_delayed_work(&priv->mcast_task);
@@ -693,6 +704,14 @@ void ipoib_mcast_send(struct net_device *dev, union ib_gid *mgid,
*/
spin_lock(&priv->lock);
+ if (!test_bit(IPOIB_MCAST_STARTED, &priv->flags) ||
+ !priv->broadcast ||
+ !test_bit(IPOIB_MCAST_FLAG_ATTACHED, &priv->broadcast->flags)) {
+ ++priv->stats.tx_dropped;
+ dev_kfree_skb_any(skb);
+ goto unlock;
+ }
+
mcast = __ipoib_mcast_find(dev, mgid);
if (!mcast) {
/* Let's create a new send only group now */
@@ -754,6 +773,7 @@ out:
ipoib_send(dev, skb, mcast->ah, IB_MULTICAST_QPN);
}
+unlock:
spin_unlock(&priv->lock);
}
diff --git a/drivers/infiniband/ulp/srp/ib_srp.c b/drivers/infiniband/ulp/srp/ib_srp.c
index 2d2d4ac3525..960dae5c87d 100644
--- a/drivers/infiniband/ulp/srp/ib_srp.c
+++ b/drivers/infiniband/ulp/srp/ib_srp.c
@@ -1155,6 +1155,12 @@ static int srp_send_tsk_mgmt(struct scsi_cmnd *scmnd, u8 func)
spin_lock_irq(target->scsi_host->host_lock);
+ if (target->state == SRP_TARGET_DEAD ||
+ target->state == SRP_TARGET_REMOVED) {
+ scmnd->result = DID_BAD_TARGET << 16;
+ goto out;
+ }
+
if (scmnd->host_scribble == (void *) -1L)
goto out;