aboutsummaryrefslogtreecommitdiff
path: root/drivers/infiniband/core
diff options
context:
space:
mode:
authorDmitry Torokhov <dmitry.torokhov@gmail.com>2008-08-08 16:21:02 -0400
committerDmitry Torokhov <dmitry.torokhov@gmail.com>2008-08-08 16:21:02 -0400
commite4ddcb0a6bf04d53ce77b4eb87bbbb32c4261d11 (patch)
treed27d2fea50a384d97aa2d0cf5c8657c916f761d4 /drivers/infiniband/core
parentf2afa7711f8585ffc088ba538b9a510e0d5dca12 (diff)
parent6e86841d05f371b5b9b86ce76c02aaee83352298 (diff)
Merge commit 'v2.6.27-rc1' into for-linus
Diffstat (limited to 'drivers/infiniband/core')
-rw-r--r--drivers/infiniband/core/cm.c72
-rw-r--r--drivers/infiniband/core/cma.c99
-rw-r--r--drivers/infiniband/core/iwcm.c3
-rw-r--r--drivers/infiniband/core/sa_query.c3
-rw-r--r--drivers/infiniband/core/ucm.c10
-rw-r--r--drivers/infiniband/core/ucma.c11
6 files changed, 148 insertions, 50 deletions
diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c
index 55738eead3b..922d35f4fc0 100644
--- a/drivers/infiniband/core/cm.c
+++ b/drivers/infiniband/core/cm.c
@@ -44,6 +44,7 @@
#include <linux/spinlock.h>
#include <linux/sysfs.h>
#include <linux/workqueue.h>
+#include <linux/kdev_t.h>
#include <rdma/ib_cache.h>
#include <rdma/ib_cm.h>
@@ -162,8 +163,8 @@ struct cm_port {
struct cm_device {
struct list_head list;
- struct ib_device *device;
- struct kobject dev_obj;
+ struct ib_device *ib_device;
+ struct device *device;
u8 ack_delay;
struct cm_port *port[0];
};
@@ -339,7 +340,7 @@ static void cm_init_av_for_response(struct cm_port *port, struct ib_wc *wc,
{
av->port = port;
av->pkey_index = wc->pkey_index;
- ib_init_ah_from_wc(port->cm_dev->device, port->port_num, wc,
+ ib_init_ah_from_wc(port->cm_dev->ib_device, port->port_num, wc,
grh, &av->ah_attr);
}
@@ -353,7 +354,7 @@ static int cm_init_av_by_path(struct ib_sa_path_rec *path, struct cm_av *av)
read_lock_irqsave(&cm.device_lock, flags);
list_for_each_entry(cm_dev, &cm.device_list, list) {
- if (!ib_find_cached_gid(cm_dev->device, &path->sgid,
+ if (!ib_find_cached_gid(cm_dev->ib_device, &path->sgid,
&p, NULL)) {
port = cm_dev->port[p-1];
break;
@@ -364,13 +365,13 @@ static int cm_init_av_by_path(struct ib_sa_path_rec *path, struct cm_av *av)
if (!port)
return -EINVAL;
- ret = ib_find_cached_pkey(cm_dev->device, port->port_num,
+ ret = ib_find_cached_pkey(cm_dev->ib_device, port->port_num,
be16_to_cpu(path->pkey), &av->pkey_index);
if (ret)
return ret;
av->port = port;
- ib_init_ah_from_path(cm_dev->device, port->port_num, path,
+ ib_init_ah_from_path(cm_dev->ib_device, port->port_num, path,
&av->ah_attr);
av->timeout = path->packet_life_time + 1;
return 0;
@@ -1515,7 +1516,7 @@ static int cm_req_handler(struct cm_work *work)
req_msg = (struct cm_req_msg *)work->mad_recv_wc->recv_buf.mad;
- cm_id = ib_create_cm_id(work->port->cm_dev->device, NULL, NULL);
+ cm_id = ib_create_cm_id(work->port->cm_dev->ib_device, NULL, NULL);
if (IS_ERR(cm_id))
return PTR_ERR(cm_id);
@@ -1550,7 +1551,7 @@ static int cm_req_handler(struct cm_work *work)
cm_format_paths_from_req(req_msg, &work->path[0], &work->path[1]);
ret = cm_init_av_by_path(&work->path[0], &cm_id_priv->av);
if (ret) {
- ib_get_cached_gid(work->port->cm_dev->device,
+ ib_get_cached_gid(work->port->cm_dev->ib_device,
work->port->port_num, 0, &work->path[0].sgid);
ib_send_cm_rej(cm_id, IB_CM_REJ_INVALID_GID,
&work->path[0].sgid, sizeof work->path[0].sgid,
@@ -2950,7 +2951,7 @@ static int cm_sidr_req_handler(struct cm_work *work)
struct cm_sidr_req_msg *sidr_req_msg;
struct ib_wc *wc;
- cm_id = ib_create_cm_id(work->port->cm_dev->device, NULL, NULL);
+ cm_id = ib_create_cm_id(work->port->cm_dev->ib_device, NULL, NULL);
if (IS_ERR(cm_id))
return PTR_ERR(cm_id);
cm_id_priv = container_of(cm_id, struct cm_id_private, id);
@@ -3578,7 +3579,7 @@ static void cm_get_ack_delay(struct cm_device *cm_dev)
{
struct ib_device_attr attr;
- if (ib_query_device(cm_dev->device, &attr))
+ if (ib_query_device(cm_dev->ib_device, &attr))
cm_dev->ack_delay = 0; /* acks will rely on packet life time */
else
cm_dev->ack_delay = attr.local_ca_ack_delay;
@@ -3618,18 +3619,6 @@ static struct kobj_type cm_port_obj_type = {
.release = cm_release_port_obj
};
-static void cm_release_dev_obj(struct kobject *obj)
-{
- struct cm_device *cm_dev;
-
- cm_dev = container_of(obj, struct cm_device, dev_obj);
- kfree(cm_dev);
-}
-
-static struct kobj_type cm_dev_obj_type = {
- .release = cm_release_dev_obj
-};
-
struct class cm_class = {
.name = "infiniband_cm",
};
@@ -3640,7 +3629,7 @@ static int cm_create_port_fs(struct cm_port *port)
int i, ret;
ret = kobject_init_and_add(&port->port_obj, &cm_port_obj_type,
- &port->cm_dev->dev_obj,
+ &port->cm_dev->device->kobj,
"%d", port->port_num);
if (ret) {
kfree(port);
@@ -3676,7 +3665,7 @@ static void cm_remove_port_fs(struct cm_port *port)
kobject_put(&port->port_obj);
}
-static void cm_add_one(struct ib_device *device)
+static void cm_add_one(struct ib_device *ib_device)
{
struct cm_device *cm_dev;
struct cm_port *port;
@@ -3691,26 +3680,27 @@ static void cm_add_one(struct ib_device *device)
int ret;
u8 i;
- if (rdma_node_get_transport(device->node_type) != RDMA_TRANSPORT_IB)
+ if (rdma_node_get_transport(ib_device->node_type) != RDMA_TRANSPORT_IB)
return;
cm_dev = kzalloc(sizeof(*cm_dev) + sizeof(*port) *
- device->phys_port_cnt, GFP_KERNEL);
+ ib_device->phys_port_cnt, GFP_KERNEL);
if (!cm_dev)
return;
- cm_dev->device = device;
+ cm_dev->ib_device = ib_device;
cm_get_ack_delay(cm_dev);
- ret = kobject_init_and_add(&cm_dev->dev_obj, &cm_dev_obj_type,
- &cm_class.subsys.kobj, "%s", device->name);
- if (ret) {
+ cm_dev->device = device_create_drvdata(&cm_class, &ib_device->dev,
+ MKDEV(0, 0), NULL,
+ "%s", ib_device->name);
+ if (!cm_dev->device) {
kfree(cm_dev);
return;
}
set_bit(IB_MGMT_METHOD_SEND, reg_req.method_mask);
- for (i = 1; i <= device->phys_port_cnt; i++) {
+ for (i = 1; i <= ib_device->phys_port_cnt; i++) {
port = kzalloc(sizeof *port, GFP_KERNEL);
if (!port)
goto error1;
@@ -3723,7 +3713,7 @@ static void cm_add_one(struct ib_device *device)
if (ret)
goto error1;
- port->mad_agent = ib_register_mad_agent(device, i,
+ port->mad_agent = ib_register_mad_agent(ib_device, i,
IB_QPT_GSI,
&reg_req,
0,
@@ -3733,11 +3723,11 @@ static void cm_add_one(struct ib_device *device)
if (IS_ERR(port->mad_agent))
goto error2;
- ret = ib_modify_port(device, i, 0, &port_modify);
+ ret = ib_modify_port(ib_device, i, 0, &port_modify);
if (ret)
goto error3;
}
- ib_set_client_data(device, &cm_client, cm_dev);
+ ib_set_client_data(ib_device, &cm_client, cm_dev);
write_lock_irqsave(&cm.device_lock, flags);
list_add_tail(&cm_dev->list, &cm.device_list);
@@ -3753,14 +3743,14 @@ error1:
port_modify.clr_port_cap_mask = IB_PORT_CM_SUP;
while (--i) {
port = cm_dev->port[i-1];
- ib_modify_port(device, port->port_num, 0, &port_modify);
+ ib_modify_port(ib_device, port->port_num, 0, &port_modify);
ib_unregister_mad_agent(port->mad_agent);
cm_remove_port_fs(port);
}
- kobject_put(&cm_dev->dev_obj);
+ device_unregister(cm_dev->device);
}
-static void cm_remove_one(struct ib_device *device)
+static void cm_remove_one(struct ib_device *ib_device)
{
struct cm_device *cm_dev;
struct cm_port *port;
@@ -3770,7 +3760,7 @@ static void cm_remove_one(struct ib_device *device)
unsigned long flags;
int i;
- cm_dev = ib_get_client_data(device, &cm_client);
+ cm_dev = ib_get_client_data(ib_device, &cm_client);
if (!cm_dev)
return;
@@ -3778,14 +3768,14 @@ static void cm_remove_one(struct ib_device *device)
list_del(&cm_dev->list);
write_unlock_irqrestore(&cm.device_lock, flags);
- for (i = 1; i <= device->phys_port_cnt; i++) {
+ for (i = 1; i <= ib_device->phys_port_cnt; i++) {
port = cm_dev->port[i-1];
- ib_modify_port(device, port->port_num, 0, &port_modify);
+ ib_modify_port(ib_device, port->port_num, 0, &port_modify);
ib_unregister_mad_agent(port->mad_agent);
flush_workqueue(cm.wq);
cm_remove_port_fs(port);
}
- kobject_put(&cm_dev->dev_obj);
+ device_unregister(cm_dev->device);
}
static int __init ib_cm_init(void)
diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c
index ae11d5cc74d..e980ff3335d 100644
--- a/drivers/infiniband/core/cma.c
+++ b/drivers/infiniband/core/cma.c
@@ -168,6 +168,12 @@ struct cma_work {
struct rdma_cm_event event;
};
+struct cma_ndev_work {
+ struct work_struct work;
+ struct rdma_id_private *id;
+ struct rdma_cm_event event;
+};
+
union cma_ip_addr {
struct in6_addr ip6;
struct {
@@ -914,7 +920,10 @@ static int cma_ib_handler(struct ib_cm_id *cm_id, struct ib_cm_event *ib_event)
struct rdma_cm_event event;
int ret = 0;
- if (cma_disable_callback(id_priv, CMA_CONNECT))
+ if ((ib_event->event != IB_CM_TIMEWAIT_EXIT &&
+ cma_disable_callback(id_priv, CMA_CONNECT)) ||
+ (ib_event->event == IB_CM_TIMEWAIT_EXIT &&
+ cma_disable_callback(id_priv, CMA_DISCONNECT)))
return 0;
memset(&event, 0, sizeof event);
@@ -950,6 +959,8 @@ static int cma_ib_handler(struct ib_cm_id *cm_id, struct ib_cm_event *ib_event)
event.event = RDMA_CM_EVENT_DISCONNECTED;
break;
case IB_CM_TIMEWAIT_EXIT:
+ event.event = RDMA_CM_EVENT_TIMEWAIT_EXIT;
+ break;
case IB_CM_MRA_RECEIVED:
/* ignore event */
goto out;
@@ -1598,6 +1609,30 @@ out:
kfree(work);
}
+static void cma_ndev_work_handler(struct work_struct *_work)
+{
+ struct cma_ndev_work *work = container_of(_work, struct cma_ndev_work, work);
+ struct rdma_id_private *id_priv = work->id;
+ int destroy = 0;
+
+ mutex_lock(&id_priv->handler_mutex);
+ if (id_priv->state == CMA_DESTROYING ||
+ id_priv->state == CMA_DEVICE_REMOVAL)
+ goto out;
+
+ if (id_priv->id.event_handler(&id_priv->id, &work->event)) {
+ cma_exch(id_priv, CMA_DESTROYING);
+ destroy = 1;
+ }
+
+out:
+ mutex_unlock(&id_priv->handler_mutex);
+ cma_deref_id(id_priv);
+ if (destroy)
+ rdma_destroy_id(&id_priv->id);
+ kfree(work);
+}
+
static int cma_resolve_ib_route(struct rdma_id_private *id_priv, int timeout_ms)
{
struct rdma_route *route = &id_priv->id.route;
@@ -2723,6 +2758,65 @@ void rdma_leave_multicast(struct rdma_cm_id *id, struct sockaddr *addr)
}
EXPORT_SYMBOL(rdma_leave_multicast);
+static int cma_netdev_change(struct net_device *ndev, struct rdma_id_private *id_priv)
+{
+ struct rdma_dev_addr *dev_addr;
+ struct cma_ndev_work *work;
+
+ dev_addr = &id_priv->id.route.addr.dev_addr;
+
+ if ((dev_addr->src_dev == ndev) &&
+ memcmp(dev_addr->src_dev_addr, ndev->dev_addr, ndev->addr_len)) {
+ printk(KERN_INFO "RDMA CM addr change for ndev %s used by id %p\n",
+ ndev->name, &id_priv->id);
+ work = kzalloc(sizeof *work, GFP_KERNEL);
+ if (!work)
+ return -ENOMEM;
+
+ INIT_WORK(&work->work, cma_ndev_work_handler);
+ work->id = id_priv;
+ work->event.event = RDMA_CM_EVENT_ADDR_CHANGE;
+ atomic_inc(&id_priv->refcount);
+ queue_work(cma_wq, &work->work);
+ }
+
+ return 0;
+}
+
+static int cma_netdev_callback(struct notifier_block *self, unsigned long event,
+ void *ctx)
+{
+ struct net_device *ndev = (struct net_device *)ctx;
+ struct cma_device *cma_dev;
+ struct rdma_id_private *id_priv;
+ int ret = NOTIFY_DONE;
+
+ if (dev_net(ndev) != &init_net)
+ return NOTIFY_DONE;
+
+ if (event != NETDEV_BONDING_FAILOVER)
+ return NOTIFY_DONE;
+
+ if (!(ndev->flags & IFF_MASTER) || !(ndev->priv_flags & IFF_BONDING))
+ return NOTIFY_DONE;
+
+ mutex_lock(&lock);
+ list_for_each_entry(cma_dev, &dev_list, list)
+ list_for_each_entry(id_priv, &cma_dev->id_list, list) {
+ ret = cma_netdev_change(ndev, id_priv);
+ if (ret)
+ goto out;
+ }
+
+out:
+ mutex_unlock(&lock);
+ return ret;
+}
+
+static struct notifier_block cma_nb = {
+ .notifier_call = cma_netdev_callback
+};
+
static void cma_add_one(struct ib_device *device)
{
struct cma_device *cma_dev;
@@ -2831,6 +2925,7 @@ static int cma_init(void)
ib_sa_register_client(&sa_client);
rdma_addr_register_client(&addr_client);
+ register_netdevice_notifier(&cma_nb);
ret = ib_register_client(&cma_client);
if (ret)
@@ -2838,6 +2933,7 @@ static int cma_init(void)
return 0;
err:
+ unregister_netdevice_notifier(&cma_nb);
rdma_addr_unregister_client(&addr_client);
ib_sa_unregister_client(&sa_client);
destroy_workqueue(cma_wq);
@@ -2847,6 +2943,7 @@ err:
static void cma_cleanup(void)
{
ib_unregister_client(&cma_client);
+ unregister_netdevice_notifier(&cma_nb);
rdma_addr_unregister_client(&addr_client);
ib_sa_unregister_client(&sa_client);
destroy_workqueue(cma_wq);
diff --git a/drivers/infiniband/core/iwcm.c b/drivers/infiniband/core/iwcm.c
index 81c9195b512..8f9509e1ebf 100644
--- a/drivers/infiniband/core/iwcm.c
+++ b/drivers/infiniband/core/iwcm.c
@@ -942,8 +942,7 @@ static int iwcm_init_qp_init_attr(struct iwcm_id_private *cm_id_priv,
case IW_CM_STATE_CONN_RECV:
case IW_CM_STATE_ESTABLISHED:
*qp_attr_mask = IB_QP_STATE | IB_QP_ACCESS_FLAGS;
- qp_attr->qp_access_flags = IB_ACCESS_LOCAL_WRITE |
- IB_ACCESS_REMOTE_WRITE|
+ qp_attr->qp_access_flags = IB_ACCESS_REMOTE_WRITE|
IB_ACCESS_REMOTE_READ;
ret = 0;
break;
diff --git a/drivers/infiniband/core/sa_query.c b/drivers/infiniband/core/sa_query.c
index 1341de793e5..7863a50d56f 100644
--- a/drivers/infiniband/core/sa_query.c
+++ b/drivers/infiniband/core/sa_query.c
@@ -1064,7 +1064,8 @@ static void ib_sa_remove_one(struct ib_device *device)
for (i = 0; i <= sa_dev->end_port - sa_dev->start_port; ++i) {
ib_unregister_mad_agent(sa_dev->port[i].agent);
- kref_put(&sa_dev->port[i].sm_ah->ref, free_sm_ah);
+ if (sa_dev->port[i].sm_ah)
+ kref_put(&sa_dev->port[i].sm_ah->ref, free_sm_ah);
}
kfree(sa_dev);
diff --git a/drivers/infiniband/core/ucm.c b/drivers/infiniband/core/ucm.c
index 9494005d1c9..e603736682b 100644
--- a/drivers/infiniband/core/ucm.c
+++ b/drivers/infiniband/core/ucm.c
@@ -43,7 +43,6 @@
#include <linux/cdev.h>
#include <linux/idr.h>
#include <linux/mutex.h>
-#include <linux/smp_lock.h>
#include <asm/uaccess.h>
@@ -1154,11 +1153,18 @@ static unsigned int ib_ucm_poll(struct file *filp,
return mask;
}
+/*
+ * ib_ucm_open() does not need the BKL:
+ *
+ * - no global state is referred to;
+ * - there is no ioctl method to race against;
+ * - no further module initialization is required for open to work
+ * after the device is registered.
+ */
static int ib_ucm_open(struct inode *inode, struct file *filp)
{
struct ib_ucm_file *file;
- cycle_kernel_lock();
file = kmalloc(sizeof(*file), GFP_KERNEL);
if (!file)
return -ENOMEM;
diff --git a/drivers/infiniband/core/ucma.c b/drivers/infiniband/core/ucma.c
index 195f97302fe..b41dd26bbfa 100644
--- a/drivers/infiniband/core/ucma.c
+++ b/drivers/infiniband/core/ucma.c
@@ -38,7 +38,6 @@
#include <linux/in.h>
#include <linux/in6.h>
#include <linux/miscdevice.h>
-#include <linux/smp_lock.h>
#include <rdma/rdma_user_cm.h>
#include <rdma/ib_marshall.h>
@@ -1149,6 +1148,14 @@ static unsigned int ucma_poll(struct file *filp, struct poll_table_struct *wait)
return mask;
}
+/*
+ * ucma_open() does not need the BKL:
+ *
+ * - no global state is referred to;
+ * - there is no ioctl method to race against;
+ * - no further module initialization is required for open to work
+ * after the device is registered.
+ */
static int ucma_open(struct inode *inode, struct file *filp)
{
struct ucma_file *file;
@@ -1157,7 +1164,6 @@ static int ucma_open(struct inode *inode, struct file *filp)
if (!file)
return -ENOMEM;
- lock_kernel();
INIT_LIST_HEAD(&file->event_list);
INIT_LIST_HEAD(&file->ctx_list);
init_waitqueue_head(&file->poll_wait);
@@ -1165,7 +1171,6 @@ static int ucma_open(struct inode *inode, struct file *filp)
filp->private_data = file;
file->filp = filp;
- unlock_kernel();
return 0;
}