aboutsummaryrefslogtreecommitdiff
path: root/drivers/infiniband/core/mad.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/infiniband/core/mad.c')
-rw-r--r--drivers/infiniband/core/mad.c117
1 files changed, 58 insertions, 59 deletions
diff --git a/drivers/infiniband/core/mad.c b/drivers/infiniband/core/mad.c
index a72bcea46ff..5ed141ebd1c 100644
--- a/drivers/infiniband/core/mad.c
+++ b/drivers/infiniband/core/mad.c
@@ -46,7 +46,7 @@ MODULE_DESCRIPTION("kernel IB MAD API");
MODULE_AUTHOR("Hal Rosenstock");
MODULE_AUTHOR("Sean Hefty");
-static kmem_cache_t *ib_mad_cache;
+static struct kmem_cache *ib_mad_cache;
static struct list_head ib_mad_port_list;
static u32 ib_mad_client_id = 0;
@@ -65,8 +65,8 @@ static struct ib_mad_agent_private *find_mad_agent(
static int ib_mad_post_receive_mads(struct ib_mad_qp_info *qp_info,
struct ib_mad_private *mad);
static void cancel_mads(struct ib_mad_agent_private *mad_agent_priv);
-static void timeout_sends(void *data);
-static void local_completions(void *data);
+static void timeout_sends(struct work_struct *work);
+static void local_completions(struct work_struct *work);
static int add_nonoui_reg_req(struct ib_mad_reg_req *mad_reg_req,
struct ib_mad_agent_private *agent_priv,
u8 mgmt_class);
@@ -356,10 +356,9 @@ struct ib_mad_agent *ib_register_mad_agent(struct ib_device *device,
INIT_LIST_HEAD(&mad_agent_priv->wait_list);
INIT_LIST_HEAD(&mad_agent_priv->done_list);
INIT_LIST_HEAD(&mad_agent_priv->rmpp_list);
- INIT_WORK(&mad_agent_priv->timed_work, timeout_sends, mad_agent_priv);
+ INIT_DELAYED_WORK(&mad_agent_priv->timed_work, timeout_sends);
INIT_LIST_HEAD(&mad_agent_priv->local_list);
- INIT_WORK(&mad_agent_priv->local_work, local_completions,
- mad_agent_priv);
+ INIT_WORK(&mad_agent_priv->local_work, local_completions);
atomic_set(&mad_agent_priv->refcount, 1);
init_completion(&mad_agent_priv->comp);
@@ -999,17 +998,17 @@ int ib_send_mad(struct ib_mad_send_wr_private *mad_send_wr)
mad_agent = mad_send_wr->send_buf.mad_agent;
sge = mad_send_wr->sg_list;
- sge[0].addr = dma_map_single(mad_agent->device->dma_device,
- mad_send_wr->send_buf.mad,
- sge[0].length,
- DMA_TO_DEVICE);
- pci_unmap_addr_set(mad_send_wr, header_mapping, sge[0].addr);
-
- sge[1].addr = dma_map_single(mad_agent->device->dma_device,
- ib_get_payload(mad_send_wr),
- sge[1].length,
- DMA_TO_DEVICE);
- pci_unmap_addr_set(mad_send_wr, payload_mapping, sge[1].addr);
+ sge[0].addr = ib_dma_map_single(mad_agent->device,
+ mad_send_wr->send_buf.mad,
+ sge[0].length,
+ DMA_TO_DEVICE);
+ mad_send_wr->header_mapping = sge[0].addr;
+
+ sge[1].addr = ib_dma_map_single(mad_agent->device,
+ ib_get_payload(mad_send_wr),
+ sge[1].length,
+ DMA_TO_DEVICE);
+ mad_send_wr->payload_mapping = sge[1].addr;
spin_lock_irqsave(&qp_info->send_queue.lock, flags);
if (qp_info->send_queue.count < qp_info->send_queue.max_active) {
@@ -1027,12 +1026,12 @@ int ib_send_mad(struct ib_mad_send_wr_private *mad_send_wr)
}
spin_unlock_irqrestore(&qp_info->send_queue.lock, flags);
if (ret) {
- dma_unmap_single(mad_agent->device->dma_device,
- pci_unmap_addr(mad_send_wr, header_mapping),
- sge[0].length, DMA_TO_DEVICE);
- dma_unmap_single(mad_agent->device->dma_device,
- pci_unmap_addr(mad_send_wr, payload_mapping),
- sge[1].length, DMA_TO_DEVICE);
+ ib_dma_unmap_single(mad_agent->device,
+ mad_send_wr->header_mapping,
+ sge[0].length, DMA_TO_DEVICE);
+ ib_dma_unmap_single(mad_agent->device,
+ mad_send_wr->payload_mapping,
+ sge[1].length, DMA_TO_DEVICE);
}
return ret;
}
@@ -1851,11 +1850,11 @@ static void ib_mad_recv_done_handler(struct ib_mad_port_private *port_priv,
mad_priv_hdr = container_of(mad_list, struct ib_mad_private_header,
mad_list);
recv = container_of(mad_priv_hdr, struct ib_mad_private, header);
- dma_unmap_single(port_priv->device->dma_device,
- pci_unmap_addr(&recv->header, mapping),
- sizeof(struct ib_mad_private) -
- sizeof(struct ib_mad_private_header),
- DMA_FROM_DEVICE);
+ ib_dma_unmap_single(port_priv->device,
+ recv->header.mapping,
+ sizeof(struct ib_mad_private) -
+ sizeof(struct ib_mad_private_header),
+ DMA_FROM_DEVICE);
/* Setup MAD receive work completion from "normal" work completion */
recv->header.wc = *wc;
@@ -2081,12 +2080,12 @@ static void ib_mad_send_done_handler(struct ib_mad_port_private *port_priv,
qp_info = send_queue->qp_info;
retry:
- dma_unmap_single(mad_send_wr->send_buf.mad_agent->device->dma_device,
- pci_unmap_addr(mad_send_wr, header_mapping),
- mad_send_wr->sg_list[0].length, DMA_TO_DEVICE);
- dma_unmap_single(mad_send_wr->send_buf.mad_agent->device->dma_device,
- pci_unmap_addr(mad_send_wr, payload_mapping),
- mad_send_wr->sg_list[1].length, DMA_TO_DEVICE);
+ ib_dma_unmap_single(mad_send_wr->send_buf.mad_agent->device,
+ mad_send_wr->header_mapping,
+ mad_send_wr->sg_list[0].length, DMA_TO_DEVICE);
+ ib_dma_unmap_single(mad_send_wr->send_buf.mad_agent->device,
+ mad_send_wr->payload_mapping,
+ mad_send_wr->sg_list[1].length, DMA_TO_DEVICE);
queued_send_wr = NULL;
spin_lock_irqsave(&send_queue->lock, flags);
list_del(&mad_list->list);
@@ -2198,12 +2197,12 @@ static void mad_error_handler(struct ib_mad_port_private *port_priv,
/*
* IB MAD completion callback
*/
-static void ib_mad_completion_handler(void *data)
+static void ib_mad_completion_handler(struct work_struct *work)
{
struct ib_mad_port_private *port_priv;
struct ib_wc wc;
- port_priv = (struct ib_mad_port_private *)data;
+ port_priv = container_of(work, struct ib_mad_port_private, work);
ib_req_notify_cq(port_priv->cq, IB_CQ_NEXT_COMP);
while (ib_poll_cq(port_priv->cq, 1, &wc) == 1) {
@@ -2324,7 +2323,7 @@ void ib_cancel_mad(struct ib_mad_agent *mad_agent,
}
EXPORT_SYMBOL(ib_cancel_mad);
-static void local_completions(void *data)
+static void local_completions(struct work_struct *work)
{
struct ib_mad_agent_private *mad_agent_priv;
struct ib_mad_local_private *local;
@@ -2334,7 +2333,8 @@ static void local_completions(void *data)
struct ib_wc wc;
struct ib_mad_send_wc mad_send_wc;
- mad_agent_priv = (struct ib_mad_agent_private *)data;
+ mad_agent_priv =
+ container_of(work, struct ib_mad_agent_private, local_work);
spin_lock_irqsave(&mad_agent_priv->lock, flags);
while (!list_empty(&mad_agent_priv->local_list)) {
@@ -2434,14 +2434,15 @@ static int retry_send(struct ib_mad_send_wr_private *mad_send_wr)
return ret;
}
-static void timeout_sends(void *data)
+static void timeout_sends(struct work_struct *work)
{
struct ib_mad_agent_private *mad_agent_priv;
struct ib_mad_send_wr_private *mad_send_wr;
struct ib_mad_send_wc mad_send_wc;
unsigned long flags, delay;
- mad_agent_priv = (struct ib_mad_agent_private *)data;
+ mad_agent_priv = container_of(work, struct ib_mad_agent_private,
+ timed_work.work);
mad_send_wc.vendor_err = 0;
spin_lock_irqsave(&mad_agent_priv->lock, flags);
@@ -2527,13 +2528,12 @@ static int ib_mad_post_receive_mads(struct ib_mad_qp_info *qp_info,
break;
}
}
- sg_list.addr = dma_map_single(qp_info->port_priv->
- device->dma_device,
- &mad_priv->grh,
- sizeof *mad_priv -
- sizeof mad_priv->header,
- DMA_FROM_DEVICE);
- pci_unmap_addr_set(&mad_priv->header, mapping, sg_list.addr);
+ sg_list.addr = ib_dma_map_single(qp_info->port_priv->device,
+ &mad_priv->grh,
+ sizeof *mad_priv -
+ sizeof mad_priv->header,
+ DMA_FROM_DEVICE);
+ mad_priv->header.mapping = sg_list.addr;
recv_wr.wr_id = (unsigned long)&mad_priv->header.mad_list;
mad_priv->header.mad_list.mad_queue = recv_queue;
@@ -2548,12 +2548,11 @@ static int ib_mad_post_receive_mads(struct ib_mad_qp_info *qp_info,
list_del(&mad_priv->header.mad_list.list);
recv_queue->count--;
spin_unlock_irqrestore(&recv_queue->lock, flags);
- dma_unmap_single(qp_info->port_priv->device->dma_device,
- pci_unmap_addr(&mad_priv->header,
- mapping),
- sizeof *mad_priv -
- sizeof mad_priv->header,
- DMA_FROM_DEVICE);
+ ib_dma_unmap_single(qp_info->port_priv->device,
+ mad_priv->header.mapping,
+ sizeof *mad_priv -
+ sizeof mad_priv->header,
+ DMA_FROM_DEVICE);
kmem_cache_free(ib_mad_cache, mad_priv);
printk(KERN_ERR PFX "ib_post_recv failed: %d\n", ret);
break;
@@ -2585,11 +2584,11 @@ static void cleanup_recv_queue(struct ib_mad_qp_info *qp_info)
/* Remove from posted receive MAD list */
list_del(&mad_list->list);
- dma_unmap_single(qp_info->port_priv->device->dma_device,
- pci_unmap_addr(&recv->header, mapping),
- sizeof(struct ib_mad_private) -
- sizeof(struct ib_mad_private_header),
- DMA_FROM_DEVICE);
+ ib_dma_unmap_single(qp_info->port_priv->device,
+ recv->header.mapping,
+ sizeof(struct ib_mad_private) -
+ sizeof(struct ib_mad_private_header),
+ DMA_FROM_DEVICE);
kmem_cache_free(ib_mad_cache, recv);
}
@@ -2799,7 +2798,7 @@ static int ib_mad_port_open(struct ib_device *device,
ret = -ENOMEM;
goto error8;
}
- INIT_WORK(&port_priv->work, ib_mad_completion_handler, port_priv);
+ INIT_WORK(&port_priv->work, ib_mad_completion_handler);
spin_lock_irqsave(&ib_mad_port_list_lock, flags);
list_add_tail(&port_priv->port_list, &ib_mad_port_list);