diff options
author | Haavard Skinnemoen <haavard.skinnemoen@atmel.com> | 2009-03-27 16:14:38 +0100 |
---|---|---|
committer | Haavard Skinnemoen <haavard.skinnemoen@atmel.com> | 2009-03-27 16:14:38 +0100 |
commit | b92efa9abffc4a634cd2e7a0f81f8aa6310d67c9 (patch) | |
tree | 9847508d9b8d4e585f90db4a453bfbc3700c997e /drivers/infiniband | |
parent | a16fffdd8eb95ebab7dc22414896fe6493951e0e (diff) | |
parent | be0ea69674ed95e1e98cb3687a241badc756d228 (diff) |
Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux-2.6 into avr32-arch
Diffstat (limited to 'drivers/infiniband')
62 files changed, 1188 insertions, 787 deletions
diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c index f1e82a92e61..5130fc55b8e 100644 --- a/drivers/infiniband/core/cm.c +++ b/drivers/infiniband/core/cm.c @@ -927,8 +927,7 @@ int ib_cm_listen(struct ib_cm_id *cm_id, __be64 service_id, __be64 service_mask, unsigned long flags; int ret = 0; - service_mask = service_mask ? service_mask : - __constant_cpu_to_be64(~0ULL); + service_mask = service_mask ? service_mask : ~cpu_to_be64(0); service_id &= service_mask; if ((service_id & IB_SERVICE_ID_AGN_MASK) == IB_CM_ASSIGN_SERVICE_ID && (service_id != IB_CM_ASSIGN_SERVICE_ID)) @@ -954,7 +953,7 @@ int ib_cm_listen(struct ib_cm_id *cm_id, __be64 service_id, __be64 service_mask, spin_lock_irqsave(&cm.lock, flags); if (service_id == IB_CM_ASSIGN_SERVICE_ID) { cm_id->service_id = cpu_to_be64(cm.listen_service_id++); - cm_id->service_mask = __constant_cpu_to_be64(~0ULL); + cm_id->service_mask = ~cpu_to_be64(0); } else { cm_id->service_id = service_id; cm_id->service_mask = service_mask; @@ -1134,7 +1133,7 @@ int ib_send_cm_req(struct ib_cm_id *cm_id, goto error1; } cm_id->service_id = param->service_id; - cm_id->service_mask = __constant_cpu_to_be64(~0ULL); + cm_id->service_mask = ~cpu_to_be64(0); cm_id_priv->timeout_ms = cm_convert_to_ms( param->primary_path->packet_life_time) * 2 + cm_convert_to_ms( @@ -1545,7 +1544,7 @@ static int cm_req_handler(struct cm_work *work) cm_id_priv->id.cm_handler = listen_cm_id_priv->id.cm_handler; cm_id_priv->id.context = listen_cm_id_priv->id.context; cm_id_priv->id.service_id = req_msg->service_id; - cm_id_priv->id.service_mask = __constant_cpu_to_be64(~0ULL); + cm_id_priv->id.service_mask = ~cpu_to_be64(0); cm_process_routed_req(req_msg, work->mad_recv_wc->wc); cm_format_paths_from_req(req_msg, &work->path[0], &work->path[1]); @@ -2898,7 +2897,7 @@ int ib_send_cm_sidr_req(struct ib_cm_id *cm_id, goto out; cm_id->service_id = param->service_id; - cm_id->service_mask = __constant_cpu_to_be64(~0ULL); + cm_id->service_mask = ~cpu_to_be64(0); cm_id_priv->timeout_ms = param->timeout_ms; cm_id_priv->max_cm_retries = param->max_cm_retries; ret = cm_alloc_msg(cm_id_priv, &msg); @@ -2992,7 +2991,7 @@ static int cm_sidr_req_handler(struct cm_work *work) cm_id_priv->id.cm_handler = cur_cm_id_priv->id.cm_handler; cm_id_priv->id.context = cur_cm_id_priv->id.context; cm_id_priv->id.service_id = sidr_req_msg->service_id; - cm_id_priv->id.service_mask = __constant_cpu_to_be64(~0ULL); + cm_id_priv->id.service_mask = ~cpu_to_be64(0); cm_format_sidr_req_event(work, &cur_cm_id_priv->id); cm_process_work(cm_id_priv, work); @@ -3789,7 +3788,7 @@ static int __init ib_cm_init(void) rwlock_init(&cm.device_lock); spin_lock_init(&cm.lock); cm.listen_service_table = RB_ROOT; - cm.listen_service_id = __constant_be64_to_cpu(IB_CM_ASSIGN_SERVICE_ID); + cm.listen_service_id = be64_to_cpu(IB_CM_ASSIGN_SERVICE_ID); cm.remote_id_table = RB_ROOT; cm.remote_qp_table = RB_ROOT; cm.remote_sidr_table = RB_ROOT; diff --git a/drivers/infiniband/core/cm_msgs.h b/drivers/infiniband/core/cm_msgs.h index aec9c7af825..7e63c08f697 100644 --- a/drivers/infiniband/core/cm_msgs.h +++ b/drivers/infiniband/core/cm_msgs.h @@ -44,17 +44,17 @@ #define IB_CM_CLASS_VERSION 2 /* IB specification 1.2 */ -#define CM_REQ_ATTR_ID __constant_htons(0x0010) -#define CM_MRA_ATTR_ID __constant_htons(0x0011) -#define CM_REJ_ATTR_ID __constant_htons(0x0012) -#define CM_REP_ATTR_ID __constant_htons(0x0013) -#define CM_RTU_ATTR_ID __constant_htons(0x0014) -#define CM_DREQ_ATTR_ID __constant_htons(0x0015) -#define CM_DREP_ATTR_ID __constant_htons(0x0016) -#define CM_SIDR_REQ_ATTR_ID __constant_htons(0x0017) -#define CM_SIDR_REP_ATTR_ID __constant_htons(0x0018) -#define CM_LAP_ATTR_ID __constant_htons(0x0019) -#define CM_APR_ATTR_ID __constant_htons(0x001A) +#define CM_REQ_ATTR_ID cpu_to_be16(0x0010) +#define CM_MRA_ATTR_ID cpu_to_be16(0x0011) +#define CM_REJ_ATTR_ID cpu_to_be16(0x0012) +#define CM_REP_ATTR_ID cpu_to_be16(0x0013) +#define CM_RTU_ATTR_ID cpu_to_be16(0x0014) +#define CM_DREQ_ATTR_ID cpu_to_be16(0x0015) +#define CM_DREP_ATTR_ID cpu_to_be16(0x0016) +#define CM_SIDR_REQ_ATTR_ID cpu_to_be16(0x0017) +#define CM_SIDR_REP_ATTR_ID cpu_to_be16(0x0018) +#define CM_LAP_ATTR_ID cpu_to_be16(0x0019) +#define CM_APR_ATTR_ID cpu_to_be16(0x001A) enum cm_msg_sequence { CM_MSG_SEQUENCE_REQ, diff --git a/drivers/infiniband/core/device.c b/drivers/infiniband/core/device.c index 7913b804311..d1fba415333 100644 --- a/drivers/infiniband/core/device.c +++ b/drivers/infiniband/core/device.c @@ -193,7 +193,7 @@ void ib_dealloc_device(struct ib_device *device) BUG_ON(device->reg_state != IB_DEV_UNREGISTERED); - ib_device_unregister_sysfs(device); + kobject_put(&device->dev.kobj); } EXPORT_SYMBOL(ib_dealloc_device); @@ -348,6 +348,8 @@ void ib_unregister_device(struct ib_device *device) mutex_unlock(&device_mutex); + ib_device_unregister_sysfs(device); + spin_lock_irqsave(&device->client_data_lock, flags); list_for_each_entry_safe(context, tmp, &device->client_data_list, list) kfree(context); diff --git a/drivers/infiniband/core/mad.c b/drivers/infiniband/core/mad.c index 5c54fc2350b..de922a04ca2 100644 --- a/drivers/infiniband/core/mad.c +++ b/drivers/infiniband/core/mad.c @@ -301,6 +301,16 @@ struct ib_mad_agent *ib_register_mad_agent(struct ib_device *device, mad_agent_priv->agent.context = context; mad_agent_priv->agent.qp = port_priv->qp_info[qpn].qp; mad_agent_priv->agent.port_num = port_num; + spin_lock_init(&mad_agent_priv->lock); + INIT_LIST_HEAD(&mad_agent_priv->send_list); + INIT_LIST_HEAD(&mad_agent_priv->wait_list); + INIT_LIST_HEAD(&mad_agent_priv->done_list); + INIT_LIST_HEAD(&mad_agent_priv->rmpp_list); + INIT_DELAYED_WORK(&mad_agent_priv->timed_work, timeout_sends); + INIT_LIST_HEAD(&mad_agent_priv->local_list); + INIT_WORK(&mad_agent_priv->local_work, local_completions); + atomic_set(&mad_agent_priv->refcount, 1); + init_completion(&mad_agent_priv->comp); spin_lock_irqsave(&port_priv->reg_lock, flags); mad_agent_priv->agent.hi_tid = ++ib_mad_client_id; @@ -350,17 +360,6 @@ struct ib_mad_agent *ib_register_mad_agent(struct ib_device *device, list_add_tail(&mad_agent_priv->agent_list, &port_priv->agent_list); spin_unlock_irqrestore(&port_priv->reg_lock, flags); - spin_lock_init(&mad_agent_priv->lock); - INIT_LIST_HEAD(&mad_agent_priv->send_list); - INIT_LIST_HEAD(&mad_agent_priv->wait_list); - INIT_LIST_HEAD(&mad_agent_priv->done_list); - INIT_LIST_HEAD(&mad_agent_priv->rmpp_list); - INIT_DELAYED_WORK(&mad_agent_priv->timed_work, timeout_sends); - INIT_LIST_HEAD(&mad_agent_priv->local_list); - INIT_WORK(&mad_agent_priv->local_work, local_completions); - atomic_set(&mad_agent_priv->refcount, 1); - init_completion(&mad_agent_priv->comp); - return &mad_agent_priv->agent; error4: @@ -743,9 +742,7 @@ static int handle_outgoing_dr_smp(struct ib_mad_agent_private *mad_agent_priv, break; case IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_CONSUMED: kmem_cache_free(ib_mad_cache, mad_priv); - kfree(local); - ret = 1; - goto out; + break; case IB_MAD_RESULT_SUCCESS: /* Treat like an incoming receive MAD */ port_priv = ib_get_mad_port(mad_agent_priv->agent.device, @@ -756,10 +753,12 @@ static int handle_outgoing_dr_smp(struct ib_mad_agent_private *mad_agent_priv, &mad_priv->mad.mad); } if (!port_priv || !recv_mad_agent) { + /* + * No receiving agent so drop packet and + * generate send completion. + */ kmem_cache_free(ib_mad_cache, mad_priv); - kfree(local); - ret = 0; - goto out; + break; } local->mad_priv = mad_priv; local->recv_mad_agent = recv_mad_agent; @@ -2356,7 +2355,7 @@ static void local_completions(struct work_struct *work) struct ib_mad_local_private *local; struct ib_mad_agent_private *recv_mad_agent; unsigned long flags; - int recv = 0; + int free_mad; struct ib_wc wc; struct ib_mad_send_wc mad_send_wc; @@ -2370,14 +2369,15 @@ static void local_completions(struct work_struct *work) completion_list); list_del(&local->completion_list); spin_unlock_irqrestore(&mad_agent_priv->lock, flags); + free_mad = 0; if (local->mad_priv) { recv_mad_agent = local->recv_mad_agent; if (!recv_mad_agent) { printk(KERN_ERR PFX "No receive MAD agent for local completion\n"); + free_mad = 1; goto local_send_completion; } - recv = 1; /* * Defined behavior is to complete response * before request @@ -2422,7 +2422,7 @@ local_send_completion: spin_lock_irqsave(&mad_agent_priv->lock, flags); atomic_dec(&mad_agent_priv->refcount); - if (!recv) + if (free_mad) kmem_cache_free(ib_mad_cache, local->mad_priv); kfree(local); } diff --git a/drivers/infiniband/core/mad_rmpp.c b/drivers/infiniband/core/mad_rmpp.c index 3af2b84cd83..57a3c6f947b 100644 --- a/drivers/infiniband/core/mad_rmpp.c +++ b/drivers/infiniband/core/mad_rmpp.c @@ -735,7 +735,7 @@ process_rmpp_data(struct ib_mad_agent_private *agent, goto bad; } - if (rmpp_hdr->seg_num == __constant_htonl(1)) { + if (rmpp_hdr->seg_num == cpu_to_be32(1)) { if (!(ib_get_rmpp_flags(rmpp_hdr) & IB_MGMT_RMPP_FLAG_FIRST)) { rmpp_status = IB_MGMT_RMPP_STATUS_BAD_SEG; goto bad; diff --git a/drivers/infiniband/core/sa_query.c b/drivers/infiniband/core/sa_query.c index 7863a50d56f..1865049e80f 100644 --- a/drivers/infiniband/core/sa_query.c +++ b/drivers/infiniband/core/sa_query.c @@ -395,6 +395,8 @@ static void update_sm_ah(struct work_struct *work) } spin_lock_irq(&port->ah_lock); + if (port->sm_ah) + kref_put(&port->sm_ah->ref, free_sm_ah); port->sm_ah = new_ah; spin_unlock_irq(&port->ah_lock); diff --git a/drivers/infiniband/core/sysfs.c b/drivers/infiniband/core/sysfs.c index b43f7d3682d..5c04cfb54cb 100644 --- a/drivers/infiniband/core/sysfs.c +++ b/drivers/infiniband/core/sysfs.c @@ -66,11 +66,6 @@ struct port_table_attribute { int index; }; -static inline int ibdev_is_alive(const struct ib_device *dev) -{ - return dev->reg_state == IB_DEV_REGISTERED; -} - static ssize_t port_attr_show(struct kobject *kobj, struct attribute *attr, char *buf) { @@ -80,8 +75,6 @@ static ssize_t port_attr_show(struct kobject *kobj, if (!port_attr->show) return -EIO; - if (!ibdev_is_alive(p->ibdev)) - return -ENODEV; return port_attr->show(p, port_attr, buf); } @@ -562,9 +555,6 @@ static ssize_t show_node_type(struct device *device, { struct ib_device *dev = container_of(device, struct ib_device, dev); - if (!ibdev_is_alive(dev)) - return -ENODEV; - switch (dev->node_type) { case RDMA_NODE_IB_CA: return sprintf(buf, "%d: CA\n", dev->node_type); case RDMA_NODE_RNIC: return sprintf(buf, "%d: RNIC\n", dev->node_type); @@ -581,9 +571,6 @@ static ssize_t show_sys_image_guid(struct device *device, struct ib_device_attr attr; ssize_t ret; - if (!ibdev_is_alive(dev)) - return -ENODEV; - ret = ib_query_device(dev, &attr); if (ret) return ret; @@ -600,9 +587,6 @@ static ssize_t show_node_guid(struct device *device, { struct ib_device *dev = container_of(device, struct ib_device, dev); - if (!ibdev_is_alive(dev)) - return -ENODEV; - return sprintf(buf, "%04x:%04x:%04x:%04x\n", be16_to_cpu(((__be16 *) &dev->node_guid)[0]), be16_to_cpu(((__be16 *) &dev->node_guid)[1]), @@ -848,6 +832,9 @@ void ib_device_unregister_sysfs(struct ib_device *device) struct kobject *p, *t; struct ib_port *port; + /* Hold kobject until ib_dealloc_device() */ + kobject_get(&device->dev.kobj); + list_for_each_entry_safe(p, t, &device->port_list, entry) { list_del(&p->entry); port = container_of(p, struct ib_port, kobj); diff --git a/drivers/infiniband/hw/amso1100/c2.c b/drivers/infiniband/hw/amso1100/c2.c index 113f3c03c5b..7d79aa361e2 100644 --- a/drivers/infiniband/hw/amso1100/c2.c +++ b/drivers/infiniband/hw/amso1100/c2.c @@ -76,7 +76,6 @@ static irqreturn_t c2_interrupt(int irq, void *dev_id); static void c2_tx_timeout(struct net_device *netdev); static int c2_change_mtu(struct net_device *netdev, int new_mtu); static void c2_reset(struct c2_port *c2_port); -static struct net_device_stats *c2_get_stats(struct net_device *netdev); static struct pci_device_id c2_pci_table[] = { { PCI_DEVICE(0x18b8, 0xb001) }, @@ -349,7 +348,7 @@ static void c2_tx_clean(struct c2_port *c2_port) elem->hw_desc + C2_TXP_ADDR); __raw_writew((__force u16) cpu_to_be16(TXP_HTXD_DONE), elem->hw_desc + C2_TXP_FLAGS); - c2_port->netstats.tx_dropped++; + c2_port->netdev->stats.tx_dropped++; break; } else { __raw_writew(0, @@ -457,7 +456,7 @@ static void c2_rx_error(struct c2_port *c2_port, struct c2_element *elem) elem->hw_desc + C2_RXP_FLAGS); pr_debug("packet dropped\n"); - c2_port->netstats.rx_dropped++; + c2_port->netdev->stats.rx_dropped++; } static void c2_rx_interrupt(struct net_device *netdev) @@ -532,8 +531,8 @@ static void c2_rx_interrupt(struct net_device *netdev) netif_rx(skb); netdev->last_rx = jiffies; - c2_port->netstats.rx_packets++; - c2_port->netstats.rx_bytes += buflen; + netdev->stats.rx_packets++; + netdev->stats.rx_bytes += buflen; } /* Save where we left off */ @@ -797,8 +796,8 @@ static int c2_xmit_frame(struct sk_buff *skb, struct net_device *netdev) __raw_writew((__force u16) cpu_to_be16(TXP_HTXD_READY), elem->hw_desc + C2_TXP_FLAGS); - c2_port->netstats.tx_packets++; - c2_port->netstats.tx_bytes += maplen; + netdev->stats.tx_packets++; + netdev->stats.tx_bytes += maplen; /* Loop thru additional data fragments and queue them */ if (skb_shinfo(skb)->nr_frags) { @@ -823,8 +822,8 @@ static int c2_xmit_frame(struct sk_buff *skb, struct net_device *netdev) __raw_writew((__force u16) cpu_to_be16(TXP_HTXD_READY), elem->hw_desc + C2_TXP_FLAGS); - c2_port->netstats.tx_packets++; - c2_port->netstats.tx_bytes += maplen; + netdev->stats.tx_packets++; + netdev->stats.tx_bytes += maplen; } } @@ -845,13 +844,6 @@ static int c2_xmit_frame(struct sk_buff *skb, struct net_device *netdev) return NETDEV_TX_OK; } -static struct net_device_stats *c2_get_stats(struct net_device *netdev) -{ - struct c2_port *c2_port = netdev_priv(netdev); - - return &c2_port->netstats; -} - static void c2_tx_timeout(struct net_device *netdev) { struct c2_port *c2_port = netdev_priv(netdev); @@ -880,6 +872,16 @@ static int c2_change_mtu(struct net_device *netdev, int new_mtu) return ret; } +static const struct net_device_ops c2_netdev = { + .ndo_open = c2_up, + .ndo_stop = c2_down, + .ndo_start_xmit = c2_xmit_frame, + .ndo_tx_timeout = c2_tx_timeout, + .ndo_change_mtu = c2_change_mtu, + .ndo_set_mac_address = eth_mac_addr, + .ndo_validate_addr = eth_validate_addr, +}; + /* Initialize network device */ static struct net_device *c2_devinit(struct c2_dev *c2dev, void __iomem * mmio_addr) @@ -894,12 +896,7 @@ static struct net_device *c2_devinit(struct c2_dev *c2dev, SET_NETDEV_DEV(netdev, &c2dev->pcidev->dev); - netdev->open = c2_up; - netdev->stop = c2_down; - netdev->hard_start_xmit = c2_xmit_frame; - netdev->get_stats = c2_get_stats; - netdev->tx_timeout = c2_tx_timeout; - netdev->change_mtu = c2_change_mtu; + netdev->netdev_ops = &c2_netdev; netdev->watchdog_timeo = C2_TX_TIMEOUT; netdev->irq = c2dev->pcidev->irq; diff --git a/drivers/infiniband/hw/amso1100/c2.h b/drivers/infiniband/hw/amso1100/c2.h index d12a24a84fd..f7ff66f9836 100644 --- a/drivers/infiniband/hw/amso1100/c2.h +++ b/drivers/infiniband/hw/amso1100/c2.h @@ -369,8 +369,6 @@ struct c2_port { unsigned long mem_size; u32 rx_buf_size; - - struct net_device_stats netstats; }; /* diff --git a/drivers/infiniband/hw/amso1100/c2_provider.c b/drivers/infiniband/hw/amso1100/c2_provider.c index 5119d650818..f1948fad85d 100644 --- a/drivers/infiniband/hw/amso1100/c2_provider.c +++ b/drivers/infiniband/hw/amso1100/c2_provider.c @@ -708,26 +708,27 @@ static int c2_pseudo_xmit_frame(struct sk_buff *skb, struct net_device *netdev) static int c2_pseudo_change_mtu(struct net_device *netdev, int new_mtu) { - int ret = 0; - if (new_mtu < ETH_ZLEN || new_mtu > ETH_JUMBO_MTU) return -EINVAL; netdev->mtu = new_mtu; /* TODO: Tell rnic about new rmda interface mtu */ - return ret; + return 0; } +static const struct net_device_ops c2_pseudo_netdev_ops = { + .ndo_open = c2_pseudo_up, + .ndo_stop = c2_pseudo_down, + .ndo_start_xmit = c2_pseudo_xmit_frame, + .ndo_change_mtu = c2_pseudo_change_mtu, + .ndo_validate_addr = eth_validate_addr, +}; + static void setup(struct net_device *netdev) { - netdev->open = c2_pseudo_up; - netdev->stop = c2_pseudo_down; - netdev->hard_start_xmit = c2_pseudo_xmit_frame; - netdev->get_stats = NULL; - netdev->tx_timeout = NULL; - netdev->set_mac_address = NULL; - netdev->change_mtu = c2_pseudo_change_mtu; + netdev->netdev_ops = &c2_pseudo_netdev_ops; + netdev->watchdog_timeo = 0; netdev->type = ARPHRD_ETHER; netdev->mtu = 1500; @@ -735,7 +736,6 @@ static void setup(struct net_device *netdev) netdev->addr_len = ETH_ALEN; netdev->tx_queue_len = 0; netdev->flags |= IFF_NOARP; - return; } static struct net_device *c2_pseudo_netdev_init(struct c2_dev *c2dev) diff --git a/drivers/infiniband/hw/cxgb3/cxio_hal.c b/drivers/infiniband/hw/cxgb3/cxio_hal.c index 4dcf08b3fd8..a4a82bff710 100644 --- a/drivers/infiniband/hw/cxgb3/cxio_hal.c +++ b/drivers/infiniband/hw/cxgb3/cxio_hal.c @@ -450,7 +450,7 @@ static int cqe_completes_wr(struct t3_cqe *cqe, struct t3_wq *wq) if ((CQE_OPCODE(*cqe) == T3_READ_RESP) && SQ_TYPE(*cqe)) return 0; - if ((CQE_OPCODE(*cqe) == T3_SEND) && RQ_TYPE(*cqe) && + if (CQE_SEND_OPCODE(*cqe) && RQ_TYPE(*cqe) && Q_EMPTY(wq->rq_rptr, wq->rq_wptr)) return 0; @@ -701,6 +701,9 @@ static int __cxio_tpt_op(struct cxio_rdev *rdev_p, u32 reset_tpt_entry, u32 stag_idx; u32 wptr; + if (rdev_p->flags) + return -EIO; + stag_state = stag_state > 0; stag_idx = (*stag) >> 8; @@ -938,6 +941,23 @@ int cxio_rdev_open(struct cxio_rdev *rdev_p) if (!rdev_p->t3cdev_p) rdev_p->t3cdev_p = dev2t3cdev(netdev_p); rdev_p->t3cdev_p->ulp = (void *) rdev_p; + + err = rdev_p->t3cdev_p->ctl(rdev_p->t3cdev_p, GET_EMBEDDED_INFO, + &(rdev_p->fw_info)); + if (err) { + printk(KERN_ERR "%s t3cdev_p(%p)->ctl returned error %d.\n", + __func__, rdev_p->t3cdev_p, err); + goto err1; + } + if (G_FW_VERSION_MAJOR(rdev_p->fw_info.fw_vers) != CXIO_FW_MAJ) { + printk(KERN_ERR MOD "fatal firmware version mismatch: " + "need version %u but adapter has version %u\n", + CXIO_FW_MAJ, + G_FW_VERSION_MAJOR(rdev_p->fw_info.fw_vers)); + err = -EINVAL; + goto err1; + } + err = rdev_p->t3cdev_p->ctl(rdev_p->t3cdev_p, RDMA_GET_PARAMS, &(rdev_p->rnic_info)); if (err) { @@ -1204,11 +1224,12 @@ int cxio_poll_cq(struct t3_wq *wq, struct t3_cq *cq, struct t3_cqe *cqe, } /* incoming SEND with no receive posted failures */ - if ((CQE_OPCODE(*hw_cqe) == T3_SEND) && RQ_TYPE(*hw_cqe) && + if (CQE_SEND_OPCODE(*hw_cqe) && RQ_TYPE(*hw_cqe) && Q_EMPTY(wq->rq_rptr, wq->rq_wptr)) { ret = -1; goto skip_cqe; } + BUG_ON((*cqe_flushed == 0) && !SW_CQE(*hw_cqe)); goto proc_cqe; } @@ -1223,6 +1244,13 @@ int cxio_poll_cq(struct t3_wq *wq, struct t3_cq *cq, struct t3_cqe *cqe, * then we complete this with TPT_ERR_MSN and mark the wq in * error. */ + + if (Q_EMPTY(wq->rq_rptr, wq->rq_wptr)) { + wq->error = 1; + ret = -1; + goto skip_cqe; + } + if (unlikely((CQE_WRID_MSN(*hw_cqe) != (wq->rq_rptr + 1)))) { wq->error = 1; hw_cqe->header |= htonl(V_CQE_STATUS(TPT_ERR_MSN)); @@ -1277,6 +1305,7 @@ proc_cqe: cxio_hal_pblpool_free(wq->rdev, wq->rq[Q_PTR2IDX(wq->rq_rptr, wq->rq_size_log2)].pbl_addr, T3_STAG0_PBL_SIZE); + BUG_ON(Q_EMPTY(wq->rq_rptr, wq->rq_wptr)); wq->rq_rptr++; } diff --git a/drivers/infiniband/hw/cxgb3/cxio_hal.h b/drivers/infiniband/hw/cxgb3/cxio_hal.h index 656fe47bc84..094a66d1480 100644 --- a/drivers/infiniband/hw/cxgb3/cxio_hal.h +++ b/drivers/infiniband/hw/cxgb3/cxio_hal.h @@ -61,6 +61,8 @@ #define T3_MAX_DEV_NAME_LEN 32 +#define CXIO_FW_MAJ 7 + struct cxio_hal_ctrl_qp { u32 wptr; u32 rptr; @@ -108,6 +110,9 @@ struct cxio_rdev { struct gen_pool *pbl_pool; struct gen_pool *rqt_pool; struct list_head entry; + struct ch_embedded_info fw_info; + u32 flags; +#define CXIO_ERROR_FATAL 1 }; static inline int cxio_num_stags(struct cxio_rdev *rdev_p) diff --git a/drivers/infiniband/hw/cxgb3/cxio_wr.h b/drivers/infiniband/hw/cxgb3/cxio_wr.h index 04618f7bfbb..ff9be1a1310 100644 --- a/drivers/infiniband/hw/cxgb3/cxio_wr.h +++ b/drivers/infiniband/hw/cxgb3/cxio_wr.h @@ -604,6 +604,12 @@ struct t3_cqe { #define CQE_STATUS(x) (G_CQE_STATUS(be32_to_cpu((x).header))) #define CQE_OPCODE(x) (G_CQE_OPCODE(be32_to_cpu((x).header))) +#define CQE_SEND_OPCODE(x)( \ + (G_CQE_OPCODE(be32_to_cpu((x).header)) == T3_SEND) || \ + (G_CQE_OPCODE(be32_to_cpu((x).header)) == T3_SEND_WITH_SE) || \ + (G_CQE_OPCODE(be32_to_cpu((x).header)) == T3_SEND_WITH_INV) || \ + (G_CQE_OPCODE(be32_to_cpu((x).header)) == T3_SEND_WITH_SE_INV)) + #define CQE_LEN(x) (be32_to_cpu((x).len)) /* used for RQ completion processing */ diff --git a/drivers/infiniband/hw/cxgb3/iwch.c b/drivers/infiniband/hw/cxgb3/iwch.c index 4489c89d671..37a4fc264a0 100644 --- a/drivers/infiniband/hw/cxgb3/iwch.c +++ b/drivers/infiniband/hw/cxgb3/iwch.c @@ -51,13 +51,15 @@ cxgb3_cpl_handler_func t3c_handlers[NUM_CPL_CMDS]; static void open_rnic_dev(struct t3cdev *); static void close_rnic_dev(struct t3cdev *); +static void iwch_err_handler(struct t3cdev *, u32, u32); struct cxgb3_client t3c_client = { .name = "iw_cxgb3", .add = open_rnic_dev, .remove = close_rnic_dev, .handlers = t3c_handlers, - .redirect = iwch_ep_redirect + .redirect = iwch_ep_redirect, + .err_handler = iwch_err_handler }; static LIST_HEAD(dev_list); @@ -160,6 +162,17 @@ static void close_rnic_dev(struct t3cdev *tdev) mutex_unlock(&dev_mutex); } +static void iwch_err_handler(struct t3cdev *tdev, u32 status, u32 error) +{ + struct cxio_rdev *rdev = tdev->ulp; + + if (status == OFFLOAD_STATUS_DOWN) + rdev->flags = CXIO_ERROR_FATAL; + + return; + +} + static int __init iwch_init_module(void) { int err; diff --git a/drivers/infiniband/hw/cxgb3/iwch_cm.c b/drivers/infiniband/hw/cxgb3/iwch_cm.c index 44e936e48a3..8699947aaf6 100644 --- a/drivers/infiniband/hw/cxgb3/iwch_cm.c +++ b/drivers/infiniband/hw/cxgb3/iwch_cm.c @@ -1678,6 +1678,9 @@ static int terminate(struct t3cdev *tdev, struct sk_buff *skb, void *ctx) { struct iwch_ep *ep = ctx; + if (state_read(&ep->com) != FPDU_MODE) + return CPL_RET_BUF_DONE; + PDBG("%s ep %p\n", __func__, ep); skb_pull(skb, sizeof(struct cpl_rdma_terminate)); PDBG("%s saving %d bytes of term msg\n", __func__, skb->len); diff --git a/drivers/infiniband/hw/cxgb3/iwch_ev.c b/drivers/infiniband/hw/cxgb3/iwch_ev.c index 7b67a677172..743c5d8b880 100644 --- a/drivers/infiniband/hw/cxgb3/iwch_ev.c +++ b/drivers/infiniband/hw/cxgb3/iwch_ev.c @@ -179,11 +179,6 @@ void iwch_ev_dispatch(struct cxio_rdev *rdev_p, struct sk_buff *skb) case TPT_ERR_BOUND: case TPT_ERR_INVALIDATE_SHARED_MR: case TPT_ERR_INVALIDATE_MR_WITH_MW_BOUND: - printk(KERN_ERR "%s - CQE Err qpid 0x%x opcode %d status 0x%x " - "type %d wrid.hi 0x%x wrid.lo 0x%x \n", __func__, - CQE_QPID(rsp_msg->cqe), CQE_OPCODE(rsp_msg->cqe), - CQE_STATUS(rsp_msg->cqe), CQE_TYPE(rsp_msg->cqe), - CQE_WRID_HI(rsp_msg->cqe), CQE_WRID_LOW(rsp_msg->cqe)); (*chp->ibcq.comp_handler)(&chp->ibcq, chp->ibcq.cq_context); post_qp_event(rnicp, chp, rsp_msg, IB_EVENT_QP_ACCESS_ERR, 1); break; diff --git a/drivers/infiniband/hw/cxgb3/iwch_qp.c b/drivers/infiniband/hw/cxgb3/iwch_qp.c index 19661b2f040..c758fbd5847 100644 --- a/drivers/infiniband/hw/cxgb3/iwch_qp.c +++ b/drivers/infiniband/hw/cxgb3/iwch_qp.c @@ -99,8 +99,8 @@ static int build_rdma_write(union t3_wr *wqe, struct ib_send_wr *wr, if (wr->opcode == IB_WR_RDMA_WRITE_WITH_IMM) { plen = 4; wqe->write.sgl[0].stag = wr->ex.imm_data; - wqe->write.sgl[0].len = __constant_cpu_to_be32(0); - wqe->write.num_sgle = __constant_cpu_to_be32(0); + wqe->write.sgl[0].len = cpu_to_be32(0); + wqe->write.num_sgle = cpu_to_be32(0); *flit_cnt = 6; } else { plen = 0; @@ -195,15 +195,12 @@ static int build_inv_stag(union t3_wr *wqe, struct ib_send_wr *wr, return 0; } -/* - * TBD: this is going to be moved to firmware. Missing pdid/qpid check for now. - */ static int iwch_sgl2pbl_map(struct iwch_dev *rhp, struct ib_sge *sg_list, u32 num_sgle, u32 * pbl_addr, u8 * page_size) { int i; struct iwch_mr *mhp; - u32 offset; + u64 offset; for (i = 0; i < num_sgle; i++) { mhp = get_mhp(rhp, (sg_list[i].lkey) >> 8); @@ -235,8 +232,8 @@ static int iwch_sgl2pbl_map(struct iwch_dev *rhp, struct ib_sge *sg_list, return -EINVAL; } offset = sg_list[i].addr - mhp->attr.va_fbo; - offset += ((u32) mhp->attr.va_fbo) % - (1UL << (12 + mhp->attr.page_size)); + offset += mhp->attr.va_fbo & + ((1UL << (12 + mhp->attr.page_size)) - 1); pbl_addr[i] = ((mhp->attr.pbl_addr - rhp->rdev.rnic_info.pbl_base) >> 3) + (offset >> (12 + mhp->attr.page_size)); @@ -266,8 +263,8 @@ static int build_rdma_recv(struct iwch_qp *qhp, union t3_wr *wqe, wqe->recv.sgl[i].len = cpu_to_be32(wr->sg_list[i].length); /* to in the WQE == the offset into the page */ - wqe->recv.sgl[i].to = cpu_to_be64(((u32) wr->sg_list[i].addr) % - (1UL << (12 + page_size[i]))); + wqe->recv.sgl[i].to = cpu_to_be64(((u32)wr->sg_list[i].addr) & + ((1UL << (12 + page_size[i])) - 1)); /* pbl_addr is the adapters address in the PBL */ wqe->recv.pbl_addr[i] = cpu_to_be32(pbl_addr[i]); diff --git a/drivers/infiniband/hw/ehca/ehca_cq.c b/drivers/infiniband/hw/ehca/ehca_cq.c index 2f4c28a3027..97e4b231cdc 100644 --- a/drivers/infiniband/hw/ehca/ehca_cq.c +++ b/drivers/infiniband/hw/ehca/ehca_cq.c @@ -196,7 +196,7 @@ struct ib_cq *ehca_create_cq(struct ib_device *device, int cqe, int comp_vector, if (h_ret != H_SUCCESS) { ehca_err(device, "hipz_h_alloc_resource_cq() failed " - "h_ret=%li device=%p", h_ret, device); + "h_ret=%lli device=%p", h_ret, device); cq = ERR_PTR(ehca2ib_return_code(h_ret)); goto create_cq_exit2; } @@ -232,7 +232,7 @@ struct ib_cq *ehca_create_cq(struct ib_device *device, int cqe, int comp_vector, if (h_ret < H_SUCCESS) { ehca_err(device, "hipz_h_register_rpage_cq() failed " - "ehca_cq=%p cq_num=%x h_ret=%li counter=%i " + "ehca_cq=%p cq_num=%x h_ret=%lli counter=%i " "act_pages=%i", my_cq, my_cq->cq_number, h_ret, counter, param.act_pages); cq = ERR_PTR(-EINVAL); @@ -244,7 +244,7 @@ struct ib_cq *ehca_create_cq(struct ib_device *device, int cqe, int comp_vector, if ((h_ret != H_SUCCESS) || vpage) { ehca_err(device, "Registration of pages not " "complete ehca_cq=%p cq_num=%x " - "h_ret=%li", my_cq, my_cq->cq_number, + "h_ret=%lli", my_cq, my_cq->cq_number, h_ret); cq = ERR_PTR(-EAGAIN); goto create_cq_exit4; @@ -252,7 +252,7 @@ struct ib_cq *ehca_create_cq(struct ib_device *device, int cqe, int comp_vector, } else { if (h_ret != H_PAGE_REGISTERED) { ehca_err(device, "Registration of page failed " - "ehca_cq=%p cq_num=%x h_ret=%li " + "ehca_cq=%p cq_num=%x h_ret=%lli " "counter=%i act_pages=%i", my_cq, my_cq->cq_number, h_ret, counter, param.act_pages); @@ -266,7 +266,7 @@ struct ib_cq *ehca_create_cq(struct ib_device *device, int cqe, int comp_vector, gal = my_cq->galpas.kernel; cqx_fec = hipz_galpa_load(gal, CQTEMM_OFFSET(cqx_fec)); - ehca_dbg(device, "ehca_cq=%p cq_num=%x CQX_FEC=%lx", + ehca_dbg(device, "ehca_cq=%p cq_num=%x CQX_FEC=%llx", my_cq, my_cq->cq_number, cqx_fec); my_cq->ib_cq.cqe = my_cq->nr_of_entries = @@ -307,7 +307,7 @@ create_cq_exit3: h_ret = hipz_h_destroy_cq(adapter_handle, my_cq, 1); if (h_ret != H_SUCCESS) ehca_err(device, "hipz_h_destroy_cq() failed ehca_cq=%p " - "cq_num=%x h_ret=%li", my_cq, my_cq->cq_number, h_ret); + "cq_num=%x h_ret=%lli", my_cq, my_cq->cq_number, h_ret); create_cq_exit2: write_lock_irqsave(&ehca_cq_idr_lock, flags); @@ -355,7 +355,7 @@ int ehca_destroy_cq(struct ib_cq *cq) h_ret = hipz_h_destroy_cq(adapter_handle, my_cq, 0); if (h_ret == H_R_STATE) { /* cq in err: read err data and destroy it forcibly */ - ehca_dbg(device, "ehca_cq=%p cq_num=%x ressource=%lx in err " + ehca_dbg(device, "ehca_cq=%p cq_num=%x resource=%llx in err " "state. Try to delete it forcibly.", my_cq, cq_num, my_cq->ipz_cq_handle.handle); ehca_error_data(shca, my_cq, my_cq->ipz_cq_handle.handle); @@ -365,7 +365,7 @@ int ehca_destroy_cq(struct ib_cq *cq) cq_num); } if (h_ret != H_SUCCESS) { - ehca_err(device, "hipz_h_destroy_cq() failed h_ret=%li " + ehca_err(device, "hipz_h_destroy_cq() failed h_ret=%lli " "ehca_cq=%p cq_num=%x", h_ret, my_cq, cq_num); return ehca2ib_return_code(h_ret); } diff --git a/drivers/infiniband/hw/ehca/ehca_hca.c b/drivers/infiniband/hw/ehca/ehca_hca.c index 46288220cfb..9209c5332df 100644 --- a/drivers/infiniband/hw/ehca/ehca_hca.c +++ b/drivers/infiniband/hw/ehca/ehca_hca.c @@ -393,7 +393,7 @@ int ehca_modify_port(struct ib_device *ibdev, hret = hipz_h_modify_port(shca->ipz_hca_handle, port, cap, props->init_type, port_modify_mask); if (hret != H_SUCCESS) { - ehca_err(&shca->ib_device, "Modify port failed h_ret=%li", + ehca_err(&shca->ib_device, "Modify port failed h_ret=%lli", hret); ret = -EINVAL; } diff --git a/drivers/infiniband/hw/ehca/ehca_irq.c b/drivers/infiniband/hw/ehca/ehca_irq.c index 3128a5090db..99bcbd7ffb0 100644 --- a/drivers/infiniband/hw/ehca/ehca_irq.c +++ b/drivers/infiniband/hw/ehca/ehca_irq.c @@ -99,7 +99,7 @@ static void print_error_data(struct ehca_shca *shca, void *data, return; ehca_err(&shca->ib_device, - "QP 0x%x (resource=%lx) has errors.", + "QP 0x%x (resource=%llx) has errors.", qp->ib_qp.qp_num, resource); break; } @@ -108,21 +108,21 @@ static void print_error_data(struct ehca_shca *shca, void *data, struct ehca_cq *cq = (struct ehca_cq *)data; ehca_err(&shca->ib_device, - "CQ 0x%x (resource=%lx) has errors.", + "CQ 0x%x (resource=%llx) has errors.", cq->cq_number, resource); break; } default: ehca_err(&shca->ib_device, - "Unknown error type: %lx on %s.", + "Unknown error type: %llx on %s.", type, shca->ib_device.name); break; } - ehca_err(&shca->ib_device, "Error data is available: %lx.", resource); + ehca_err(&shca->ib_device, "Error data is available: %llx.", resource); ehca_err(&shca->ib_device, "EHCA ----- error data begin " "---------------------------------------------------"); - ehca_dmp(rblock, length, "resource=%lx", resource); + ehca_dmp(rblock, length, "resource=%llx", resource); ehca_err(&shca->ib_device, "EHCA ----- error data end " "----------------------------------------------------"); @@ -152,7 +152,7 @@ int ehca_error_data(struct ehca_shca *shca, void *data, if (ret == H_R_STATE) ehca_err(&shca->ib_device, - "No error data is available: %lx.", resource); + "No error data is available: %llx.", resource); else if (ret == H_SUCCESS) { int length; @@ -164,7 +164,7 @@ int ehca_error_data(struct ehca_shca *shca, void *data, print_error_data(shca, data, rblock, length); } else ehca_err(&shca->ib_device, - "Error data could not be fetched: %lx", resource); + "Error data could not be fetched: %llx", resource); ehca_free_fw_ctrlblock(rblock); @@ -514,7 +514,7 @@ static inline void process_eqe(struct ehca_shca *shca, struct ehca_eqe *eqe) struct ehca_cq *cq; eqe_value = eqe->entry; - ehca_dbg(&shca->ib_device, "eqe_value=%lx", eqe_value); + ehca_dbg(&shca->ib_device, "eqe_value=%llx", eqe_value); if (EHCA_BMASK_GET(EQE_COMPLETION_EVENT, eqe_value)) { ehca_dbg(&shca->ib_device, "Got completion event"); token = EHCA_BMASK_GET(EQE_CQ_TOKEN, eqe_value); @@ -603,7 +603,7 @@ void ehca_process_eq(struct ehca_shca *shca, int is_irq) ret = hipz_h_eoi(eq->ist); if (ret != H_SUCCESS) ehca_err(&shca->ib_device, - "bad return code EOI -rc = %ld\n", ret); + "bad return code EOI -rc = %lld\n", ret); ehca_dbg(&shca->ib_device, "deadman found %x eqe", eqe_cnt); } if (unlikely(eqe_cnt == EHCA_EQE_CACHE_SIZE)) diff --git a/drivers/infiniband/hw/ehca/ehca_main.c b/drivers/infiniband/hw/ehca/ehca_main.c index c7b8a506af6..368311ce332 100644 --- a/drivers/infiniband/hw/ehca/ehca_main.c +++ b/drivers/infiniband/hw/ehca/ehca_main.c @@ -304,7 +304,7 @@ static int ehca_sense_attributes(struct ehca_shca *shca) h_ret = hipz_h_query_hca(shca->ipz_hca_handle, rblock); if (h_ret != H_SUCCESS) { - ehca_gen_err("Cannot query device properties. h_ret=%li", + ehca_gen_err("Cannot query device properties. h_ret=%lli", h_ret); ret = -EPERM; goto sense_attributes1; @@ -391,7 +391,7 @@ static int ehca_sense_attributes(struct ehca_shca *shca) port = (struct hipz_query_port *)rblock; h_ret = hipz_h_query_port(shca->ipz_hca_handle, 1, port); if (h_ret != H_SUCCESS) { - ehca_gen_err("Cannot query port properties. h_ret=%li", + ehca_gen_err("Cannot query port properties. h_ret=%lli", h_ret); ret = -EPERM; goto sense_attributes1; @@ -682,7 +682,7 @@ static ssize_t ehca_show_adapter_handle(struct device *dev, { struct ehca_shca *shca = dev->driver_data; - return sprintf(buf, "%lx\n", shca->ipz_hca_handle.handle); + return sprintf(buf, "%llx\n", shca->ipz_hca_handle.handle); } static DEVICE_ATTR(adapter_handle, S_IRUGO, ehca_show_adapter_handle, NULL); diff --git a/drivers/infiniband/hw/ehca/ehca_mcast.c b/drivers/infiniband/hw/ehca/ehca_mcast.c index e3ef0264ccc..120aedf9f98 100644 --- a/drivers/infiniband/hw/ehca/ehca_mcast.c +++ b/drivers/infiniband/hw/ehca/ehca_mcast.c @@ -88,7 +88,7 @@ int ehca_attach_mcast(struct ib_qp *ibqp, union ib_gid *gid, u16 lid) if (h_ret != H_SUCCESS) ehca_err(ibqp->device, "ehca_qp=%p qp_num=%x hipz_h_attach_mcqp() failed " - "h_ret=%li", my_qp, ibqp->qp_num, h_ret); + "h_ret=%lli", my_qp, ibqp->qp_num, h_ret); return ehca2ib_return_code(h_ret); } @@ -125,7 +125,7 @@ int ehca_detach_mcast(struct ib_qp *ibqp, union ib_gid *gid, u16 lid) if (h_ret != H_SUCCESS) ehca_err(ibqp->device, "ehca_qp=%p qp_num=%x hipz_h_detach_mcqp() failed " - "h_ret=%li", my_qp, ibqp->qp_num, h_ret); + "h_ret=%lli", my_qp, ibqp->qp_num, h_ret); return ehca2ib_return_code(h_ret); } diff --git a/drivers/infiniband/hw/ehca/ehca_mrmw.c b/drivers/infiniband/hw/ehca/ehca_mrmw.c index f974367cad4..72f83f7df61 100644 --- a/drivers/infiniband/hw/ehca/ehca_mrmw.c +++ b/drivers/infiniband/hw/ehca/ehca_mrmw.c @@ -204,7 +204,7 @@ struct ib_mr *ehca_reg_phys_mr(struct ib_pd *pd, } if ((size == 0) || (((u64)iova_start + size) < (u64)iova_start)) { - ehca_err(pd->device, "bad input values: size=%lx iova_start=%p", + ehca_err(pd->device, "bad input values: size=%llx iova_start=%p", size, iova_start); ib_mr = ERR_PTR(-EINVAL); goto reg_phys_mr_exit0; @@ -309,8 +309,8 @@ struct ib_mr *ehca_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, } if (length == 0 || virt + length < virt) { - ehca_err(pd->device, "bad input values: length=%lx " - "virt_base=%lx", length, virt); + ehca_err(pd->device, "bad input values: length=%llx " + "virt_base=%llx", length, virt); ib_mr = ERR_PTR(-EINVAL); goto reg_user_mr_exit0; } @@ -373,7 +373,7 @@ reg_user_mr_fallback: &e_mr->ib.ib_mr.rkey); if (ret == -EINVAL && pginfo.hwpage_size > PAGE_SIZE) { ehca_warn(pd->device, "failed to register mr " - "with hwpage_size=%lx", hwpage_size); + "with hwpage_size=%llx", hwpage_size); ehca_info(pd->device, "try to register mr with " "kpage_size=%lx", PAGE_SIZE); /* @@ -509,7 +509,7 @@ int ehca_rereg_phys_mr(struct ib_mr *mr, goto rereg_phys_mr_exit1; if ((new_size == 0) || (((u64)iova_start + new_size) < (u64)iova_start)) { - ehca_err(mr->device, "bad input values: new_size=%lx " + ehca_err(mr->device, "bad input values: new_size=%llx " "iova_start=%p", new_size, iova_start); ret = -EINVAL; goto rereg_phys_mr_exit1; @@ -580,8 +580,8 @@ int ehca_query_mr(struct ib_mr *mr, struct ib_mr_attr *mr_attr) h_ret = hipz_h_query_mr(shca->ipz_hca_handle, e_mr, &hipzout); if (h_ret != H_SUCCESS) { - ehca_err(mr->device, "hipz_mr_query failed, h_ret=%li mr=%p " - "hca_hndl=%lx mr_hndl=%lx lkey=%x", + ehca_err(mr->device, "hipz_mr_query failed, h_ret=%lli mr=%p " + "hca_hndl=%llx mr_hndl=%llx lkey=%x", h_ret, mr, shca->ipz_hca_handle.handle, e_mr->ipz_mr_handle.handle, mr->lkey); ret = ehca2ib_return_code(h_ret); @@ -630,8 +630,8 @@ int ehca_dereg_mr(struct ib_mr *mr) /* TODO: BUSY: MR still has bound window(s) */ h_ret = hipz_h_free_resource_mr(shca->ipz_hca_handle, e_mr); if (h_ret != H_SUCCESS) { - ehca_err(mr->device, "hipz_free_mr failed, h_ret=%li shca=%p " - "e_mr=%p hca_hndl=%lx mr_hndl=%lx mr->lkey=%x", + ehca_err(mr->device, "hipz_free_mr failed, h_ret=%lli shca=%p " + "e_mr=%p hca_hndl=%llx mr_hndl=%llx mr->lkey=%x", h_ret, shca, e_mr, shca->ipz_hca_handle.handle, e_mr->ipz_mr_handle.handle, mr->lkey); ret = ehca2ib_return_code(h_ret); @@ -671,8 +671,8 @@ struct ib_mw *ehca_alloc_mw(struct ib_pd *pd) h_ret = hipz_h_alloc_resource_mw(shca->ipz_hca_handle, e_mw, e_pd->fw_pd, &hipzout); if (h_ret != H_SUCCESS) { - ehca_err(pd->device, "hipz_mw_allocate failed, h_ret=%li " - "shca=%p hca_hndl=%lx mw=%p", + ehca_err(pd->device, "hipz_mw_allocate failed, h_ret=%lli " + "shca=%p hca_hndl=%llx mw=%p", h_ret, shca, shca->ipz_hca_handle.handle, e_mw); ib_mw = ERR_PTR(ehca2ib_return_code(h_ret)); goto alloc_mw_exit1; @@ -713,8 +713,8 @@ int ehca_dealloc_mw(struct ib_mw *mw) h_ret = hipz_h_free_resource_mw(shca->ipz_hca_handle, e_mw); if (h_ret != H_SUCCESS) { - ehca_err(mw->device, "hipz_free_mw failed, h_ret=%li shca=%p " - "mw=%p rkey=%x hca_hndl=%lx mw_hndl=%lx", + ehca_err(mw->device, "hipz_free_mw failed, h_ret=%lli shca=%p " + "mw=%p rkey=%x hca_hndl=%llx mw_hndl=%llx", h_ret, shca, mw, mw->rkey, shca->ipz_hca_handle.handle, e_mw->ipz_mw_handle.handle); return ehca2ib_return_code(h_ret); @@ -840,7 +840,7 @@ int ehca_map_phys_fmr(struct ib_fmr *fmr, goto map_phys_fmr_exit0; if (iova % e_fmr->fmr_page_size) { /* only whole-numbered pages */ - ehca_err(fmr->device, "bad iova, iova=%lx fmr_page_size=%x", + ehca_err(fmr->device, "bad iova, iova=%llx fmr_page_size=%x", iova, e_fmr->fmr_page_size); ret = -EINVAL; goto map_phys_fmr_exit0; @@ -878,7 +878,7 @@ int ehca_map_phys_fmr(struct ib_fmr *fmr, map_phys_fmr_exit0: if (ret) ehca_err(fmr->device, "ret=%i fmr=%p page_list=%p list_len=%x " - "iova=%lx", ret, fmr, page_list, list_len, iova); + "iova=%llx", ret, fmr, page_list, list_len, iova); return ret; } /* end ehca_map_phys_fmr() */ @@ -964,8 +964,8 @@ int ehca_dealloc_fmr(struct ib_fmr *fmr) h_ret = hipz_h_free_resource_mr(shca->ipz_hca_handle, e_fmr); if (h_ret != H_SUCCESS) { - ehca_err(fmr->device, "hipz_free_mr failed, h_ret=%li e_fmr=%p " - "hca_hndl=%lx fmr_hndl=%lx fmr->lkey=%x", + ehca_err(fmr->device, "hipz_free_mr failed, h_ret=%lli e_fmr=%p " + "hca_hndl=%llx fmr_hndl=%llx fmr->lkey=%x", h_ret, e_fmr, shca->ipz_hca_handle.handle, e_fmr->ipz_mr_handle.handle, fmr->lkey); ret = ehca2ib_return_code(h_ret); @@ -1007,8 +1007,8 @@ int ehca_reg_mr(struct ehca_shca *shca, (u64)iova_start, size, hipz_acl, e_pd->fw_pd, &hipzout); if (h_ret != H_SUCCESS) { - ehca_err(&shca->ib_device, "hipz_alloc_mr failed, h_ret=%li " - "hca_hndl=%lx", h_ret, shca->ipz_hca_handle.handle); + ehca_err(&shca->ib_device, "hipz_alloc_mr failed, h_ret=%lli " + "hca_hndl=%llx", h_ret, shca->ipz_hca_handle.handle); ret = ehca2ib_return_code(h_ret); goto ehca_reg_mr_exit0; } @@ -1033,9 +1033,9 @@ int ehca_reg_mr(struct ehca_shca *shca, ehca_reg_mr_exit1: h_ret = hipz_h_free_resource_mr(shca->ipz_hca_handle, e_mr); if (h_ret != H_SUCCESS) { - ehca_err(&shca->ib_device, "h_ret=%li shca=%p e_mr=%p " - "iova_start=%p size=%lx acl=%x e_pd=%p lkey=%x " - "pginfo=%p num_kpages=%lx num_hwpages=%lx ret=%i", + ehca_err(&shca->ib_device, "h_ret=%lli shca=%p e_mr=%p " + "iova_start=%p size=%llx acl=%x e_pd=%p lkey=%x " + "pginfo=%p num_kpages=%llx num_hwpages=%llx ret=%i", h_ret, shca, e_mr, iova_start, size, acl, e_pd, hipzout.lkey, pginfo, pginfo->num_kpages, pginfo->num_hwpages, ret); @@ -1045,8 +1045,8 @@ ehca_reg_mr_exit1: ehca_reg_mr_exit0: if (ret) ehca_err(&shca->ib_device, "ret=%i shca=%p e_mr=%p " - "iova_start=%p size=%lx acl=%x e_pd=%p pginfo=%p " - "num_kpages=%lx num_hwpages=%lx", + "iova_start=%p size=%llx acl=%x e_pd=%p pginfo=%p " + "num_kpages=%llx num_hwpages=%llx", ret, shca, e_mr, iova_start, size, acl, e_pd, pginfo, pginfo->num_kpages, pginfo->num_hwpages); return ret; @@ -1116,8 +1116,8 @@ int ehca_reg_mr_rpages(struct ehca_shca *shca, */ if (h_ret != H_SUCCESS) { ehca_err(&shca->ib_device, "last " - "hipz_reg_rpage_mr failed, h_ret=%li " - "e_mr=%p i=%x hca_hndl=%lx mr_hndl=%lx" + "hipz_reg_rpage_mr failed, h_ret=%lli " + "e_mr=%p i=%x hca_hndl=%llx mr_hndl=%llx" " lkey=%x", h_ret, e_mr, i, shca->ipz_hca_handle.handle, e_mr->ipz_mr_handle.handle, @@ -1128,8 +1128,8 @@ int ehca_reg_mr_rpages(struct ehca_shca *shca, ret = 0; } else if (h_ret != H_PAGE_REGISTERED) { ehca_err(&shca->ib_device, "hipz_reg_rpage_mr failed, " - "h_ret=%li e_mr=%p i=%x lkey=%x hca_hndl=%lx " - "mr_hndl=%lx", h_ret, e_mr, i, + "h_ret=%lli e_mr=%p i=%x lkey=%x hca_hndl=%llx " + "mr_hndl=%llx", h_ret, e_mr, i, e_mr->ib.ib_mr.lkey, shca->ipz_hca_handle.handle, e_mr->ipz_mr_handle.handle); @@ -1145,7 +1145,7 @@ ehca_reg_mr_rpages_exit1: ehca_reg_mr_rpages_exit0: if (ret) ehca_err(&shca->ib_device, "ret=%i shca=%p e_mr=%p pginfo=%p " - "num_kpages=%lx num_hwpages=%lx", ret, shca, e_mr, + "num_kpages=%llx num_hwpages=%llx", ret, shca, e_mr, pginfo, pginfo->num_kpages, pginfo->num_hwpages); return ret; } /* end ehca_reg_mr_rpages() */ @@ -1184,7 +1184,7 @@ inline int ehca_rereg_mr_rereg1(struct ehca_shca *shca, ret = ehca_set_pagebuf(pginfo, pginfo->num_hwpages, kpage); if (ret) { ehca_err(&shca->ib_device, "set pagebuf failed, e_mr=%p " - "pginfo=%p type=%x num_kpages=%lx num_hwpages=%lx " + "pginfo=%p type=%x num_kpages=%llx num_hwpages=%llx " "kpage=%p", e_mr, pginfo, pginfo->type, pginfo->num_kpages, pginfo->num_hwpages, kpage); goto ehca_rereg_mr_rereg1_exit1; @@ -1205,13 +1205,13 @@ inline int ehca_rereg_mr_rereg1(struct ehca_shca *shca, * (MW bound or MR is shared) */ ehca_warn(&shca->ib_device, "hipz_h_reregister_pmr failed " - "(Rereg1), h_ret=%li e_mr=%p", h_ret, e_mr); + "(Rereg1), h_ret=%lli e_mr=%p", h_ret, e_mr); *pginfo = pginfo_save; ret = -EAGAIN; } else if ((u64 *)hipzout.vaddr != iova_start) { ehca_err(&shca->ib_device, "PHYP changed iova_start in " - "rereg_pmr, iova_start=%p iova_start_out=%lx e_mr=%p " - "mr_handle=%lx lkey=%x lkey_out=%x", iova_start, + "rereg_pmr, iova_start=%p iova_start_out=%llx e_mr=%p " + "mr_handle=%llx lkey=%x lkey_out=%x", iova_start, hipzout.vaddr, e_mr, e_mr->ipz_mr_handle.handle, e_mr->ib.ib_mr.lkey, hipzout.lkey); ret = -EFAULT; @@ -1235,7 +1235,7 @@ ehca_rereg_mr_rereg1_exit1: ehca_rereg_mr_rereg1_exit0: if ( ret && (ret != -EAGAIN) ) ehca_err(&shca->ib_device, "ret=%i lkey=%x rkey=%x " - "pginfo=%p num_kpages=%lx num_hwpages=%lx", + "pginfo=%p num_kpages=%llx num_hwpages=%llx", ret, *lkey, *rkey, pginfo, pginfo->num_kpages, pginfo->num_hwpages); return ret; @@ -1263,7 +1263,7 @@ int ehca_rereg_mr(struct ehca_shca *shca, (e_mr->num_hwpages > MAX_RPAGES) || (pginfo->num_hwpages > e_mr->num_hwpages)) { ehca_dbg(&shca->ib_device, "Rereg3 case, " - "pginfo->num_hwpages=%lx e_mr->num_hwpages=%x", + "pginfo->num_hwpages=%llx e_mr->num_hwpages=%x", pginfo->num_hwpages, e_mr->num_hwpages); rereg_1_hcall = 0; rereg_3_hcall = 1; @@ -1295,7 +1295,7 @@ int ehca_rereg_mr(struct ehca_shca *shca, h_ret = hipz_h_free_resource_mr(shca->ipz_hca_handle, e_mr); if (h_ret != H_SUCCESS) { ehca_err(&shca->ib_device, "hipz_free_mr failed, " - "h_ret=%li e_mr=%p hca_hndl=%lx mr_hndl=%lx " + "h_ret=%lli e_mr=%p hca_hndl=%llx mr_hndl=%llx " "mr->lkey=%x", h_ret, e_mr, shca->ipz_hca_handle.handle, e_mr->ipz_mr_handle.handle, @@ -1328,8 +1328,8 @@ int ehca_rereg_mr(struct ehca_shca *shca, ehca_rereg_mr_exit0: if (ret) ehca_err(&shca->ib_device, "ret=%i shca=%p e_mr=%p " - "iova_start=%p size=%lx acl=%x e_pd=%p pginfo=%p " - "num_kpages=%lx lkey=%x rkey=%x rereg_1_hcall=%x " + "iova_start=%p size=%llx acl=%x e_pd=%p pginfo=%p " + "num_kpages=%llx lkey=%x rkey=%x rereg_1_hcall=%x " "rereg_3_hcall=%x", ret, shca, e_mr, iova_start, size, acl, e_pd, pginfo, pginfo->num_kpages, *lkey, *rkey, rereg_1_hcall, rereg_3_hcall); @@ -1371,8 +1371,8 @@ int ehca_unmap_one_fmr(struct ehca_shca *shca, * FMRs are not shared and no MW bound to FMRs */ ehca_err(&shca->ib_device, "hipz_reregister_pmr failed " - "(Rereg1), h_ret=%li e_fmr=%p hca_hndl=%lx " - "mr_hndl=%lx lkey=%x lkey_out=%x", + "(Rereg1), h_ret=%lli e_fmr=%p hca_hndl=%llx " + "mr_hndl=%llx lkey=%x lkey_out=%x", h_ret, e_fmr, shca->ipz_hca_handle.handle, e_fmr->ipz_mr_handle.handle, e_fmr->ib.ib_fmr.lkey, hipzout.lkey); @@ -1383,7 +1383,7 @@ int ehca_unmap_one_fmr(struct ehca_shca *shca, h_ret = hipz_h_free_resource_mr(shca->ipz_hca_handle, e_fmr); if (h_ret != H_SUCCESS) { ehca_err(&shca->ib_device, "hipz_free_mr failed, " - "h_ret=%li e_fmr=%p hca_hndl=%lx mr_hndl=%lx " + "h_ret=%lli e_fmr=%p hca_hndl=%llx mr_hndl=%llx " "lkey=%x", h_ret, e_fmr, shca->ipz_hca_handle.handle, e_fmr->ipz_mr_handle.handle, @@ -1447,9 +1447,9 @@ int ehca_reg_smr(struct ehca_shca *shca, (u64)iova_start, hipz_acl, e_pd->fw_pd, &hipzout); if (h_ret != H_SUCCESS) { - ehca_err(&shca->ib_device, "hipz_reg_smr failed, h_ret=%li " + ehca_err(&shca->ib_device, "hipz_reg_smr failed, h_ret=%lli " "shca=%p e_origmr=%p e_newmr=%p iova_start=%p acl=%x " - "e_pd=%p hca_hndl=%lx mr_hndl=%lx lkey=%x", + "e_pd=%p hca_hndl=%llx mr_hndl=%llx lkey=%x", h_ret, shca, e_origmr, e_newmr, iova_start, acl, e_pd, shca->ipz_hca_handle.handle, e_origmr->ipz_mr_handle.handle, @@ -1527,7 +1527,7 @@ int ehca_reg_internal_maxmr( &e_mr->ib.ib_mr.rkey); if (ret) { ehca_err(&shca->ib_device, "reg of internal max MR failed, " - "e_mr=%p iova_start=%p size_maxmr=%lx num_kpages=%x " + "e_mr=%p iova_start=%p size_maxmr=%llx num_kpages=%x " "num_hwpages=%x", e_mr, iova_start, size_maxmr, num_kpages, num_hwpages); goto ehca_reg_internal_maxmr_exit1; @@ -1573,8 +1573,8 @@ int ehca_reg_maxmr(struct ehca_shca *shca, (u64)iova_start, hipz_acl, e_pd->fw_pd, &hipzout); if (h_ret != H_SUCCESS) { - ehca_err(&shca->ib_device, "hipz_reg_smr failed, h_ret=%li " - "e_origmr=%p hca_hndl=%lx mr_hndl=%lx lkey=%x", + ehca_err(&shca->ib_device, "hipz_reg_smr failed, h_ret=%lli " + "e_origmr=%p hca_hndl=%llx mr_hndl=%llx lkey=%x", h_ret, e_origmr, shca->ipz_hca_handle.handle, e_origmr->ipz_mr_handle.handle, e_origmr->ib.ib_mr.lkey); @@ -1651,28 +1651,28 @@ int ehca_mr_chk_buf_and_calc_size(struct ib_phys_buf *phys_buf_array, /* check first buffer */ if (((u64)iova_start & ~PAGE_MASK) != (pbuf->addr & ~PAGE_MASK)) { ehca_gen_err("iova_start/addr mismatch, iova_start=%p " - "pbuf->addr=%lx pbuf->size=%lx", + "pbuf->addr=%llx pbuf->size=%llx", iova_start, pbuf->addr, pbuf->size); return -EINVAL; } if (((pbuf->addr + pbuf->size) % PAGE_SIZE) && (num_phys_buf > 1)) { - ehca_gen_err("addr/size mismatch in 1st buf, pbuf->addr=%lx " - "pbuf->size=%lx", pbuf->addr, pbuf->size); + ehca_gen_err("addr/size mismatch in 1st buf, pbuf->addr=%llx " + "pbuf->size=%llx", pbuf->addr, pbuf->size); return -EINVAL; } for (i = 0; i < num_phys_buf; i++) { if ((i > 0) && (pbuf->addr % PAGE_SIZE)) { - ehca_gen_err("bad address, i=%x pbuf->addr=%lx " - "pbuf->size=%lx", + ehca_gen_err("bad address, i=%x pbuf->addr=%llx " + "pbuf->size=%llx", i, pbuf->addr, pbuf->size); return -EINVAL; } if (((i > 0) && /* not 1st */ (i < (num_phys_buf - 1)) && /* not last */ (pbuf->size % PAGE_SIZE)) || (pbuf->size == 0)) { - ehca_gen_err("bad size, i=%x pbuf->size=%lx", + ehca_gen_err("bad size, i=%x pbuf->size=%llx", i, pbuf->size); return -EINVAL; } @@ -1705,7 +1705,7 @@ int ehca_fmr_check_page_list(struct ehca_mr *e_fmr, page = page_list; for (i = 0; i < list_len; i++) { if (*page % e_fmr->fmr_page_size) { - ehca_gen_err("bad page, i=%x *page=%lx page=%p fmr=%p " + ehca_gen_err("bad page, i=%x *page=%llx page=%p fmr=%p " "fmr_page_size=%x", i, *page, page, e_fmr, e_fmr->fmr_page_size); return -EINVAL; @@ -1743,9 +1743,9 @@ static int ehca_set_pagebuf_user1(struct ehca_mr_pginfo *pginfo, (pginfo->next_hwpage * pginfo->hwpage_size)); if ( !(*kpage) ) { - ehca_gen_err("pgaddr=%lx " - "chunk->page_list[i]=%lx " - "i=%x next_hwpage=%lx", + ehca_gen_err("pgaddr=%llx " + "chunk->page_list[i]=%llx " + "i=%x next_hwpage=%llx", pgaddr, (u64)sg_dma_address( &chunk->page_list[i]), i, pginfo->next_hwpage); @@ -1795,11 +1795,11 @@ static int ehca_check_kpages_per_ate(struct scatterlist *page_list, for (t = start_idx; t <= end_idx; t++) { u64 pgaddr = page_to_pfn(sg_page(&page_list[t])) << PAGE_SHIFT; if (ehca_debug_level >= 3) - ehca_gen_dbg("chunk_page=%lx value=%016lx", pgaddr, + ehca_gen_dbg("chunk_page=%llx value=%016llx", pgaddr, *(u64 *)abs_to_virt(phys_to_abs(pgaddr))); if (pgaddr - PAGE_SIZE != *prev_pgaddr) { - ehca_gen_err("uncontiguous page found pgaddr=%lx " - "prev_pgaddr=%lx page_list_i=%x", + ehca_gen_err("uncontiguous page found pgaddr=%llx " + "prev_pgaddr=%llx page_list_i=%x", pgaddr, *prev_pgaddr, t); return -EINVAL; } @@ -1833,7 +1833,7 @@ static int ehca_set_pagebuf_user2(struct ehca_mr_pginfo *pginfo, << PAGE_SHIFT ); *kpage = phys_to_abs(pgaddr); if ( !(*kpage) ) { - ehca_gen_err("pgaddr=%lx i=%x", + ehca_gen_err("pgaddr=%llx i=%x", pgaddr, i); ret = -EFAULT; return ret; @@ -1846,8 +1846,8 @@ static int ehca_set_pagebuf_user2(struct ehca_mr_pginfo *pginfo, if (pginfo->hwpage_cnt) { ehca_gen_err( "invalid alignment " - "pgaddr=%lx i=%x " - "mr_pgsize=%lx", + "pgaddr=%llx i=%x " + "mr_pgsize=%llx", pgaddr, i, pginfo->hwpage_size); ret = -EFAULT; @@ -1866,8 +1866,8 @@ static int ehca_set_pagebuf_user2(struct ehca_mr_pginfo *pginfo, if (ehca_debug_level >= 3) { u64 val = *(u64 *)abs_to_virt( phys_to_abs(pgaddr)); - ehca_gen_dbg("kpage=%lx chunk_page=%lx " - "value=%016lx", + ehca_gen_dbg("kpage=%llx chunk_page=%llx " + "value=%016llx", *kpage, pgaddr, val); } prev_pgaddr = pgaddr; @@ -1944,9 +1944,9 @@ static int ehca_set_pagebuf_phys(struct ehca_mr_pginfo *pginfo, if ((pginfo->kpage_cnt >= pginfo->num_kpages) || (pginfo->hwpage_cnt >= pginfo->num_hwpages)) { ehca_gen_err("kpage_cnt >= num_kpages, " - "kpage_cnt=%lx num_kpages=%lx " - "hwpage_cnt=%lx " - "num_hwpages=%lx i=%x", + "kpage_cnt=%llx num_kpages=%llx " + "hwpage_cnt=%llx " + "num_hwpages=%llx i=%x", pginfo->kpage_cnt, pginfo->num_kpages, pginfo->hwpage_cnt, @@ -1957,8 +1957,8 @@ static int ehca_set_pagebuf_phys(struct ehca_mr_pginfo *pginfo, (pbuf->addr & ~(pginfo->hwpage_size - 1)) + (pginfo->next_hwpage * pginfo->hwpage_size)); if ( !(*kpage) && pbuf->addr ) { - ehca_gen_err("pbuf->addr=%lx pbuf->size=%lx " - "next_hwpage=%lx", pbuf->addr, + ehca_gen_err("pbuf->addr=%llx pbuf->size=%llx " + "next_hwpage=%llx", pbuf->addr, pbuf->size, pginfo->next_hwpage); return -EFAULT; } @@ -1996,8 +1996,8 @@ static int ehca_set_pagebuf_fmr(struct ehca_mr_pginfo *pginfo, *kpage = phys_to_abs((*fmrlist & ~(pginfo->hwpage_size - 1)) + pginfo->next_hwpage * pginfo->hwpage_size); if ( !(*kpage) ) { - ehca_gen_err("*fmrlist=%lx fmrlist=%p " - "next_listelem=%lx next_hwpage=%lx", + ehca_gen_err("*fmrlist=%llx fmrlist=%p " + "next_listelem=%llx next_hwpage=%llx", *fmrlist, fmrlist, pginfo->u.fmr.next_listelem, pginfo->next_hwpage); @@ -2025,7 +2025,7 @@ static int ehca_set_pagebuf_fmr(struct ehca_mr_pginfo *pginfo, ~(pginfo->hwpage_size - 1)); if (prev + pginfo->u.fmr.fmr_pgsize != p) { ehca_gen_err("uncontiguous fmr pages " - "found prev=%lx p=%lx " + "found prev=%llx p=%llx " "idx=%x", prev, p, i + j); return -EINVAL; } diff --git a/drivers/infiniband/hw/ehca/ehca_qp.c b/drivers/infiniband/hw/ehca/ehca_qp.c index f161cf173db..00c10815971 100644 --- a/drivers/infiniband/hw/ehca/ehca_qp.c +++ b/drivers/infiniband/hw/ehca/ehca_qp.c @@ -331,7 +331,7 @@ static inline int init_qp_queue(struct ehca_shca *shca, if (cnt == (nr_q_pages - 1)) { /* last page! */ if (h_ret != expected_hret) { ehca_err(ib_dev, "hipz_qp_register_rpage() " - "h_ret=%li", h_ret); + "h_ret=%lli", h_ret); ret = ehca2ib_return_code(h_ret); goto init_qp_queue1; } @@ -345,7 +345,7 @@ static inline int init_qp_queue(struct ehca_shca *shca, } else { if (h_ret != H_PAGE_REGISTERED) { ehca_err(ib_dev, "hipz_qp_register_rpage() " - "h_ret=%li", h_ret); + "h_ret=%lli", h_ret); ret = ehca2ib_return_code(h_ret); goto init_qp_queue1; } @@ -709,7 +709,7 @@ static struct ehca_qp *internal_create_qp( h_ret = hipz_h_alloc_resource_qp(shca->ipz_hca_handle, &parms); if (h_ret != H_SUCCESS) { - ehca_err(pd->device, "h_alloc_resource_qp() failed h_ret=%li", + ehca_err(pd->device, "h_alloc_resource_qp() failed h_ret=%lli", h_ret); ret = ehca2ib_return_code(h_ret); goto create_qp_exit1; @@ -1010,7 +1010,7 @@ struct ib_srq *ehca_create_srq(struct ib_pd *pd, mqpcb, my_qp->galpas.kernel); if (hret != H_SUCCESS) { ehca_err(pd->device, "Could not modify SRQ to INIT " - "ehca_qp=%p qp_num=%x h_ret=%li", + "ehca_qp=%p qp_num=%x h_ret=%lli", my_qp, my_qp->real_qp_num, hret); goto create_srq2; } @@ -1024,7 +1024,7 @@ struct ib_srq *ehca_create_srq(struct ib_pd *pd, mqpcb, my_qp->galpas.kernel); if (hret != H_SUCCESS) { ehca_err(pd->device, "Could not enable SRQ " - "ehca_qp=%p qp_num=%x h_ret=%li", + "ehca_qp=%p qp_num=%x h_ret=%lli", my_qp, my_qp->real_qp_num, hret); goto create_srq2; } @@ -1038,7 +1038,7 @@ struct ib_srq *ehca_create_srq(struct ib_pd *pd, mqpcb, my_qp->galpas.kernel); if (hret != H_SUCCESS) { ehca_err(pd->device, "Could not modify SRQ to RTR " - "ehca_qp=%p qp_num=%x h_ret=%li", + "ehca_qp=%p qp_num=%x h_ret=%lli", my_qp, my_qp->real_qp_num, hret); goto create_srq2; } @@ -1078,7 +1078,7 @@ static int prepare_sqe_rts(struct ehca_qp *my_qp, struct ehca_shca *shca, &bad_send_wqe_p, NULL, 2); if (h_ret != H_SUCCESS) { ehca_err(&shca->ib_device, "hipz_h_disable_and_get_wqe() failed" - " ehca_qp=%p qp_num=%x h_ret=%li", + " ehca_qp=%p qp_num=%x h_ret=%lli", my_qp, qp_num, h_ret); return ehca2ib_return_code(h_ret); } @@ -1134,7 +1134,7 @@ static int calc_left_cqes(u64 wqe_p, struct ipz_queue *ipz_queue, if (ipz_queue_abs_to_offset(ipz_queue, wqe_p, &q_ofs)) { ehca_gen_err("Invalid offset for calculating left cqes " - "wqe_p=%#lx wqe_v=%p\n", wqe_p, wqe_v); + "wqe_p=%#llx wqe_v=%p\n", wqe_p, wqe_v); return -EFAULT; } @@ -1168,7 +1168,7 @@ static int check_for_left_cqes(struct ehca_qp *my_qp, struct ehca_shca *shca) &send_wqe_p, &recv_wqe_p, 4); if (h_ret != H_SUCCESS) { ehca_err(&shca->ib_device, "disable_and_get_wqe() " - "failed ehca_qp=%p qp_num=%x h_ret=%li", + "failed ehca_qp=%p qp_num=%x h_ret=%lli", my_qp, qp_num, h_ret); return ehca2ib_return_code(h_ret); } @@ -1261,7 +1261,7 @@ static int internal_modify_qp(struct ib_qp *ibqp, mqpcb, my_qp->galpas.kernel); if (h_ret != H_SUCCESS) { ehca_err(ibqp->device, "hipz_h_query_qp() failed " - "ehca_qp=%p qp_num=%x h_ret=%li", + "ehca_qp=%p qp_num=%x h_ret=%lli", my_qp, ibqp->qp_num, h_ret); ret = ehca2ib_return_code(h_ret); goto modify_qp_exit1; @@ -1690,7 +1690,7 @@ static int internal_modify_qp(struct ib_qp *ibqp, if (h_ret != H_SUCCESS) { ret = ehca2ib_return_code(h_ret); - ehca_err(ibqp->device, "hipz_h_modify_qp() failed h_ret=%li " + ehca_err(ibqp->device, "hipz_h_modify_qp() failed h_ret=%lli " "ehca_qp=%p qp_num=%x", h_ret, my_qp, ibqp->qp_num); goto modify_qp_exit2; } @@ -1723,7 +1723,7 @@ static int internal_modify_qp(struct ib_qp *ibqp, ret = ehca2ib_return_code(h_ret); ehca_err(ibqp->device, "ENABLE in context of " "RESET_2_INIT failed! Maybe you didn't get " - "a LID h_ret=%li ehca_qp=%p qp_num=%x", + "a LID h_ret=%lli ehca_qp=%p qp_num=%x", h_ret, my_qp, ibqp->qp_num); goto modify_qp_exit2; } @@ -1909,7 +1909,7 @@ int ehca_query_qp(struct ib_qp *qp, if (h_ret != H_SUCCESS) { ret = ehca2ib_return_code(h_ret); ehca_err(qp->device, "hipz_h_query_qp() failed " - "ehca_qp=%p qp_num=%x h_ret=%li", + "ehca_qp=%p qp_num=%x h_ret=%lli", my_qp, qp->qp_num, h_ret); goto query_qp_exit1; } @@ -2074,7 +2074,7 @@ int ehca_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr, if (h_ret != H_SUCCESS) { ret = ehca2ib_return_code(h_ret); - ehca_err(ibsrq->device, "hipz_h_modify_qp() failed h_ret=%li " + ehca_err(ibsrq->device, "hipz_h_modify_qp() failed h_ret=%lli " "ehca_qp=%p qp_num=%x", h_ret, my_qp, my_qp->real_qp_num); } @@ -2108,7 +2108,7 @@ int ehca_query_srq(struct ib_srq *srq, struct ib_srq_attr *srq_attr) if (h_ret != H_SUCCESS) { ret = ehca2ib_return_code(h_ret); ehca_err(srq->device, "hipz_h_query_qp() failed " - "ehca_qp=%p qp_num=%x h_ret=%li", + "ehca_qp=%p qp_num=%x h_ret=%lli", my_qp, my_qp->real_qp_num, h_ret); goto query_srq_exit1; } @@ -2179,7 +2179,7 @@ static int internal_destroy_qp(struct ib_device *dev, struct ehca_qp *my_qp, h_ret = hipz_h_destroy_qp(shca->ipz_hca_handle, my_qp); if (h_ret != H_SUCCESS) { - ehca_err(dev, "hipz_h_destroy_qp() failed h_ret=%li " + ehca_err(dev, "hipz_h_destroy_qp() failed h_ret=%lli " "ehca_qp=%p qp_num=%x", h_ret, my_qp, qp_num); return ehca2ib_return_code(h_ret); } diff --git a/drivers/infiniband/hw/ehca/ehca_reqs.c b/drivers/infiniband/hw/ehca/ehca_reqs.c index c7112686782..5a3d96f84c7 100644 --- a/drivers/infiniband/hw/ehca/ehca_reqs.c +++ b/drivers/infiniband/hw/ehca/ehca_reqs.c @@ -822,7 +822,7 @@ static int generate_flush_cqes(struct ehca_qp *my_qp, struct ib_cq *cq, offset = qmap->next_wqe_idx * ipz_queue->qe_size; wqe = (struct ehca_wqe *)ipz_qeit_calc(ipz_queue, offset); if (!wqe) { - ehca_err(cq->device, "Invalid wqe offset=%#lx on " + ehca_err(cq->device, "Invalid wqe offset=%#llx on " "qp_num=%#x", offset, my_qp->real_qp_num); return nr; } diff --git a/drivers/infiniband/hw/ehca/ehca_sqp.c b/drivers/infiniband/hw/ehca/ehca_sqp.c index 706d97ad555..c568b28f4e2 100644 --- a/drivers/infiniband/hw/ehca/ehca_sqp.c +++ b/drivers/infiniband/hw/ehca/ehca_sqp.c @@ -46,11 +46,11 @@ #include "ehca_iverbs.h" #include "hcp_if.h" -#define IB_MAD_STATUS_REDIRECT __constant_htons(0x0002) -#define IB_MAD_STATUS_UNSUP_VERSION __constant_htons(0x0004) -#define IB_MAD_STATUS_UNSUP_METHOD __constant_htons(0x0008) +#define IB_MAD_STATUS_REDIRECT cpu_to_be16(0x0002) +#define IB_MAD_STATUS_UNSUP_VERSION cpu_to_be16(0x0004) +#define IB_MAD_STATUS_UNSUP_METHOD cpu_to_be16(0x0008) -#define IB_PMA_CLASS_PORT_INFO __constant_htons(0x0001) +#define IB_PMA_CLASS_PORT_INFO cpu_to_be16(0x0001) /** * ehca_define_sqp - Defines special queue pair 1 (GSI QP). When special queue @@ -85,7 +85,7 @@ u64 ehca_define_sqp(struct ehca_shca *shca, if (ret != H_SUCCESS) { ehca_err(&shca->ib_device, - "Can't define AQP1 for port %x. h_ret=%li", + "Can't define AQP1 for port %x. h_ret=%lli", port, ret); return ret; } diff --git a/drivers/infiniband/hw/ehca/ehca_tools.h b/drivers/infiniband/hw/ehca/ehca_tools.h index 21f7d06f14a..f09914cccf5 100644 --- a/drivers/infiniband/hw/ehca/ehca_tools.h +++ b/drivers/infiniband/hw/ehca/ehca_tools.h @@ -116,7 +116,7 @@ extern int ehca_debug_level; unsigned char *deb = (unsigned char *)(adr); \ for (x = 0; x < l; x += 16) { \ printk(KERN_INFO "EHCA_DMP:%s " format \ - " adr=%p ofs=%04x %016lx %016lx\n", \ + " adr=%p ofs=%04x %016llx %016llx\n", \ __func__, ##args, deb, x, \ *((u64 *)&deb[0]), *((u64 *)&deb[8])); \ deb += 16; \ diff --git a/drivers/infiniband/hw/ehca/ehca_uverbs.c b/drivers/infiniband/hw/ehca/ehca_uverbs.c index e43ed8f8a0c..3cb688d2913 100644 --- a/drivers/infiniband/hw/ehca/ehca_uverbs.c +++ b/drivers/infiniband/hw/ehca/ehca_uverbs.c @@ -114,7 +114,7 @@ static int ehca_mmap_fw(struct vm_area_struct *vma, struct h_galpas *galpas, physical = galpas->user.fw_handle; vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); - ehca_gen_dbg("vsize=%lx physical=%lx", vsize, physical); + ehca_gen_dbg("vsize=%llx physical=%llx", vsize, physical); /* VM_IO | VM_RESERVED are set by remap_pfn_range() */ ret = remap_4k_pfn(vma, vma->vm_start, physical >> EHCA_PAGESHIFT, vma->vm_page_prot); diff --git a/drivers/infiniband/hw/ehca/hcp_if.c b/drivers/infiniband/hw/ehca/hcp_if.c index 415d3a465de..d0ab0c0d5e9 100644 --- a/drivers/infiniband/hw/ehca/hcp_if.c +++ b/drivers/infiniband/hw/ehca/hcp_if.c @@ -226,7 +226,7 @@ u64 hipz_h_alloc_resource_eq(const struct ipz_adapter_handle adapter_handle, u32 *eq_ist) { u64 ret; - u64 outs[PLPAR_HCALL9_BUFSIZE]; + unsigned long outs[PLPAR_HCALL9_BUFSIZE]; u64 allocate_controls; /* resource type */ @@ -249,7 +249,7 @@ u64 hipz_h_alloc_resource_eq(const struct ipz_adapter_handle adapter_handle, *eq_ist = (u32)outs[5]; if (ret == H_NOT_ENOUGH_RESOURCES) - ehca_gen_err("Not enough resource - ret=%li ", ret); + ehca_gen_err("Not enough resource - ret=%lli ", ret); return ret; } @@ -270,7 +270,7 @@ u64 hipz_h_alloc_resource_cq(const struct ipz_adapter_handle adapter_handle, struct ehca_alloc_cq_parms *param) { u64 ret; - u64 outs[PLPAR_HCALL9_BUFSIZE]; + unsigned long outs[PLPAR_HCALL9_BUFSIZE]; ret = ehca_plpar_hcall9(H_ALLOC_RESOURCE, outs, adapter_handle.handle, /* r4 */ @@ -287,7 +287,7 @@ u64 hipz_h_alloc_resource_cq(const struct ipz_adapter_handle adapter_handle, hcp_galpas_ctor(&cq->galpas, outs[5], outs[6]); if (ret == H_NOT_ENOUGH_RESOURCES) - ehca_gen_err("Not enough resources. ret=%li", ret); + ehca_gen_err("Not enough resources. ret=%lli", ret); return ret; } @@ -297,7 +297,7 @@ u64 hipz_h_alloc_resource_qp(const struct ipz_adapter_handle adapter_handle, { u64 ret; u64 allocate_controls, max_r10_reg, r11, r12; - u64 outs[PLPAR_HCALL9_BUFSIZE]; + unsigned long outs[PLPAR_HCALL9_BUFSIZE]; allocate_controls = EHCA_BMASK_SET(H_ALL_RES_QP_ENHANCED_OPS, parms->ext_type) @@ -362,7 +362,7 @@ u64 hipz_h_alloc_resource_qp(const struct ipz_adapter_handle adapter_handle, hcp_galpas_ctor(&parms->galpas, outs[6], outs[6]); if (ret == H_NOT_ENOUGH_RESOURCES) - ehca_gen_err("Not enough resources. ret=%li", ret); + ehca_gen_err("Not enough resources. ret=%lli", ret); return ret; } @@ -454,7 +454,7 @@ u64 hipz_h_register_rpage_eq(const struct ipz_adapter_handle adapter_handle, const u64 count) { if (count != 1) { - ehca_gen_err("Ppage counter=%lx", count); + ehca_gen_err("Ppage counter=%llx", count); return H_PARAMETER; } return hipz_h_register_rpage(adapter_handle, @@ -489,7 +489,7 @@ u64 hipz_h_register_rpage_cq(const struct ipz_adapter_handle adapter_handle, const struct h_galpa gal) { if (count != 1) { - ehca_gen_err("Page counter=%lx", count); + ehca_gen_err("Page counter=%llx", count); return H_PARAMETER; } @@ -508,7 +508,7 @@ u64 hipz_h_register_rpage_qp(const struct ipz_adapter_handle adapter_handle, const struct h_galpa galpa) { if (count > 1) { - ehca_gen_err("Page counter=%lx", count); + ehca_gen_err("Page counter=%llx", count); return H_PARAMETER; } @@ -525,7 +525,7 @@ u64 hipz_h_disable_and_get_wqe(const struct ipz_adapter_handle adapter_handle, int dis_and_get_function_code) { u64 ret; - u64 outs[PLPAR_HCALL9_BUFSIZE]; + unsigned long outs[PLPAR_HCALL9_BUFSIZE]; ret = ehca_plpar_hcall9(H_DISABLE_AND_GETC, outs, adapter_handle.handle, /* r4 */ @@ -548,7 +548,7 @@ u64 hipz_h_modify_qp(const struct ipz_adapter_handle adapter_handle, struct h_galpa gal) { u64 ret; - u64 outs[PLPAR_HCALL9_BUFSIZE]; + unsigned long outs[PLPAR_HCALL9_BUFSIZE]; ret = ehca_plpar_hcall9(H_MODIFY_QP, outs, adapter_handle.handle, /* r4 */ qp_handle.handle, /* r5 */ @@ -557,7 +557,7 @@ u64 hipz_h_modify_qp(const struct ipz_adapter_handle adapter_handle, 0, 0, 0, 0, 0); if (ret == H_NOT_ENOUGH_RESOURCES) - ehca_gen_err("Insufficient resources ret=%li", ret); + ehca_gen_err("Insufficient resources ret=%lli", ret); return ret; } @@ -579,7 +579,7 @@ u64 hipz_h_destroy_qp(const struct ipz_adapter_handle adapter_handle, struct ehca_qp *qp) { u64 ret; - u64 outs[PLPAR_HCALL9_BUFSIZE]; + unsigned long outs[PLPAR_HCALL9_BUFSIZE]; ret = hcp_galpas_dtor(&qp->galpas); if (ret) { @@ -593,7 +593,7 @@ u64 hipz_h_destroy_qp(const struct ipz_adapter_handle adapter_handle, qp->ipz_qp_handle.handle, /* r6 */ 0, 0, 0, 0, 0, 0); if (ret == H_HARDWARE) - ehca_gen_err("HCA not operational. ret=%li", ret); + ehca_gen_err("HCA not operational. ret=%lli", ret); ret = ehca_plpar_hcall_norets(H_FREE_RESOURCE, adapter_handle.handle, /* r4 */ @@ -601,7 +601,7 @@ u64 hipz_h_destroy_qp(const struct ipz_adapter_handle adapter_handle, 0, 0, 0, 0, 0); if (ret == H_RESOURCE) - ehca_gen_err("Resource still in use. ret=%li", ret); + ehca_gen_err("Resource still in use. ret=%lli", ret); return ret; } @@ -625,7 +625,7 @@ u64 hipz_h_define_aqp1(const struct ipz_adapter_handle adapter_handle, u32 * bma_qp_nr) { u64 ret; - u64 outs[PLPAR_HCALL9_BUFSIZE]; + unsigned long outs[PLPAR_HCALL9_BUFSIZE]; ret = ehca_plpar_hcall9(H_DEFINE_AQP1, outs, adapter_handle.handle, /* r4 */ @@ -636,7 +636,7 @@ u64 hipz_h_define_aqp1(const struct ipz_adapter_handle adapter_handle, *bma_qp_nr = (u32)outs[1]; if (ret == H_ALIAS_EXIST) - ehca_gen_err("AQP1 already exists. ret=%li", ret); + ehca_gen_err("AQP1 already exists. ret=%lli", ret); return ret; } @@ -658,7 +658,7 @@ u64 hipz_h_attach_mcqp(const struct ipz_adapter_handle adapter_handle, 0, 0); if (ret == H_NOT_ENOUGH_RESOURCES) - ehca_gen_err("Not enough resources. ret=%li", ret); + ehca_gen_err("Not enough resources. ret=%lli", ret); return ret; } @@ -697,7 +697,7 @@ u64 hipz_h_destroy_cq(const struct ipz_adapter_handle adapter_handle, 0, 0, 0, 0); if (ret == H_RESOURCE) - ehca_gen_err("H_FREE_RESOURCE failed ret=%li ", ret); + ehca_gen_err("H_FREE_RESOURCE failed ret=%lli ", ret); return ret; } @@ -719,7 +719,7 @@ u64 hipz_h_destroy_eq(const struct ipz_adapter_handle adapter_handle, 0, 0, 0, 0, 0); if (ret == H_RESOURCE) - ehca_gen_err("Resource in use. ret=%li ", ret); + ehca_gen_err("Resource in use. ret=%lli ", ret); return ret; } @@ -733,7 +733,7 @@ u64 hipz_h_alloc_resource_mr(const struct ipz_adapter_handle adapter_handle, struct ehca_mr_hipzout_parms *outparms) { u64 ret; - u64 outs[PLPAR_HCALL9_BUFSIZE]; + unsigned long outs[PLPAR_HCALL9_BUFSIZE]; ret = ehca_plpar_hcall9(H_ALLOC_RESOURCE, outs, adapter_handle.handle, /* r4 */ @@ -774,9 +774,9 @@ u64 hipz_h_register_rpage_mr(const struct ipz_adapter_handle adapter_handle, if ((count > 1) && (logical_address_of_page & (EHCA_PAGESIZE-1))) { ehca_gen_err("logical_address_of_page not on a 4k boundary " - "adapter_handle=%lx mr=%p mr_handle=%lx " + "adapter_handle=%llx mr=%p mr_handle=%llx " "pagesize=%x queue_type=%x " - "logical_address_of_page=%lx count=%lx", + "logical_address_of_page=%llx count=%llx", adapter_handle.handle, mr, mr->ipz_mr_handle.handle, pagesize, queue_type, logical_address_of_page, count); @@ -794,7 +794,7 @@ u64 hipz_h_query_mr(const struct ipz_adapter_handle adapter_handle, struct ehca_mr_hipzout_parms *outparms) { u64 ret; - u64 outs[PLPAR_HCALL9_BUFSIZE]; + unsigned long outs[PLPAR_HCALL9_BUFSIZE]; ret = ehca_plpar_hcall9(H_QUERY_MR, outs, adapter_handle.handle, /* r4 */ @@ -828,7 +828,7 @@ u64 hipz_h_reregister_pmr(const struct ipz_adapter_handle adapter_handle, struct ehca_mr_hipzout_parms *outparms) { u64 ret; - u64 outs[PLPAR_HCALL9_BUFSIZE]; + unsigned long outs[PLPAR_HCALL9_BUFSIZE]; ret = ehca_plpar_hcall9(H_REREGISTER_PMR, outs, adapter_handle.handle, /* r4 */ @@ -855,7 +855,7 @@ u64 hipz_h_register_smr(const struct ipz_adapter_handle adapter_handle, struct ehca_mr_hipzout_parms *outparms) { u64 ret; - u64 outs[PLPAR_HCALL9_BUFSIZE]; + unsigned long outs[PLPAR_HCALL9_BUFSIZE]; ret = ehca_plpar_hcall9(H_REGISTER_SMR, outs, adapter_handle.handle, /* r4 */ @@ -877,7 +877,7 @@ u64 hipz_h_alloc_resource_mw(const struct ipz_adapter_handle adapter_handle, struct ehca_mw_hipzout_parms *outparms) { u64 ret; - u64 outs[PLPAR_HCALL9_BUFSIZE]; + unsigned long outs[PLPAR_HCALL9_BUFSIZE]; ret = ehca_plpar_hcall9(H_ALLOC_RESOURCE, outs, adapter_handle.handle, /* r4 */ @@ -895,7 +895,7 @@ u64 hipz_h_query_mw(const struct ipz_adapter_handle adapter_handle, struct ehca_mw_hipzout_parms *outparms) { u64 ret; - u64 outs[PLPAR_HCALL9_BUFSIZE]; + unsigned long outs[PLPAR_HCALL9_BUFSIZE]; ret = ehca_plpar_hcall9(H_QUERY_MW, outs, adapter_handle.handle, /* r4 */ diff --git a/drivers/infiniband/hw/ipath/ipath_driver.c b/drivers/infiniband/hw/ipath/ipath_driver.c index 69c0ce321b4..cb9daa6ac02 100644 --- a/drivers/infiniband/hw/ipath/ipath_driver.c +++ b/drivers/infiniband/hw/ipath/ipath_driver.c @@ -2715,7 +2715,7 @@ static void ipath_hol_signal_up(struct ipath_devdata *dd) * to prevent HoL blocking, then start the HoL timer that * periodically continues, then stop procs, so they can detect * link down if they want, and do something about it. - * Timer may already be running, so use __mod_timer, not add_timer. + * Timer may already be running, so use mod_timer, not add_timer. */ void ipath_hol_down(struct ipath_devdata *dd) { @@ -2724,7 +2724,7 @@ void ipath_hol_down(struct ipath_devdata *dd) dd->ipath_hol_next = IPATH_HOL_DOWNCONT; dd->ipath_hol_timer.expires = jiffies + msecs_to_jiffies(ipath_hol_timeout_ms); - __mod_timer(&dd->ipath_hol_timer, dd->ipath_hol_timer.expires); + mod_timer(&dd->ipath_hol_timer, dd->ipath_hol_timer.expires); } /* @@ -2763,7 +2763,7 @@ void ipath_hol_event(unsigned long opaque) else { dd->ipath_hol_timer.expires = jiffies + msecs_to_jiffies(ipath_hol_timeout_ms); - __mod_timer(&dd->ipath_hol_timer, + mod_timer(&dd->ipath_hol_timer, dd->ipath_hol_timer.expires); } } diff --git a/drivers/infiniband/hw/ipath/ipath_eeprom.c b/drivers/infiniband/hw/ipath/ipath_eeprom.c index dc37277f1c8..fc7181985e8 100644 --- a/drivers/infiniband/hw/ipath/ipath_eeprom.c +++ b/drivers/infiniband/hw/ipath/ipath_eeprom.c @@ -772,8 +772,8 @@ void ipath_get_eeprom_info(struct ipath_devdata *dd) "0x%x, not 0x%x\n", csum, ifp->if_csum); goto done; } - if (*(__be64 *) ifp->if_guid == 0ULL || - *(__be64 *) ifp->if_guid == __constant_cpu_to_be64(-1LL)) { + if (*(__be64 *) ifp->if_guid == cpu_to_be64(0) || + *(__be64 *) ifp->if_guid == ~cpu_to_be64(0)) { ipath_dev_err(dd, "Invalid GUID %llx from flash; " "ignoring\n", *(unsigned long long *) ifp->if_guid); diff --git a/drivers/infiniband/hw/ipath/ipath_init_chip.c b/drivers/infiniband/hw/ipath/ipath_init_chip.c index 64aeefbd2a5..077879c0bdb 100644 --- a/drivers/infiniband/hw/ipath/ipath_init_chip.c +++ b/drivers/infiniband/hw/ipath/ipath_init_chip.c @@ -455,7 +455,7 @@ static void init_shadow_tids(struct ipath_devdata *dd) if (!addrs) { ipath_dev_err(dd, "failed to allocate shadow dma handle " "array, no expected sends!\n"); - vfree(dd->ipath_pageshadow); + vfree(pages); dd->ipath_pageshadow = NULL; return; } diff --git a/drivers/infiniband/hw/ipath/ipath_mad.c b/drivers/infiniband/hw/ipath/ipath_mad.c index 17a12319747..16a702d4601 100644 --- a/drivers/infiniband/hw/ipath/ipath_mad.c +++ b/drivers/infiniband/hw/ipath/ipath_mad.c @@ -37,10 +37,10 @@ #include "ipath_verbs.h" #include "ipath_common.h" -#define IB_SMP_UNSUP_VERSION __constant_htons(0x0004) -#define IB_SMP_UNSUP_METHOD __constant_htons(0x0008) -#define IB_SMP_UNSUP_METH_ATTR __constant_htons(0x000C) -#define IB_SMP_INVALID_FIELD __constant_htons(0x001C) +#define IB_SMP_UNSUP_VERSION cpu_to_be16(0x0004) +#define IB_SMP_UNSUP_METHOD cpu_to_be16(0x0008) +#define IB_SMP_UNSUP_METH_ATTR cpu_to_be16(0x000C) +#define IB_SMP_INVALID_FIELD cpu_to_be16(0x001C) static int reply(struct ib_smp *smp) { @@ -789,12 +789,12 @@ static int recv_subn_set_pkeytable(struct ib_smp *smp, return recv_subn_get_pkeytable(smp, ibdev); } -#define IB_PMA_CLASS_PORT_INFO __constant_htons(0x0001) -#define IB_PMA_PORT_SAMPLES_CONTROL __constant_htons(0x0010) -#define IB_PMA_PORT_SAMPLES_RESULT __constant_htons(0x0011) -#define IB_PMA_PORT_COUNTERS __constant_htons(0x0012) -#define IB_PMA_PORT_COUNTERS_EXT __constant_htons(0x001D) -#define IB_PMA_PORT_SAMPLES_RESULT_EXT __constant_htons(0x001E) +#define IB_PMA_CLASS_PORT_INFO cpu_to_be16(0x0001) +#define IB_PMA_PORT_SAMPLES_CONTROL cpu_to_be16(0x0010) +#define IB_PMA_PORT_SAMPLES_RESULT cpu_to_be16(0x0011) +#define IB_PMA_PORT_COUNTERS cpu_to_be16(0x0012) +#define IB_PMA_PORT_COUNTERS_EXT cpu_to_be16(0x001D) +#define IB_PMA_PORT_SAMPLES_RESULT_EXT cpu_to_be16(0x001E) struct ib_perf { u8 base_version; @@ -884,19 +884,19 @@ struct ib_pma_portcounters { __be32 port_rcv_packets; } __attribute__ ((packed)); -#define IB_PMA_SEL_SYMBOL_ERROR __constant_htons(0x0001) -#define IB_PMA_SEL_LINK_ERROR_RECOVERY __constant_htons(0x0002) -#define IB_PMA_SEL_LINK_DOWNED __constant_htons(0x0004) -#define IB_PMA_SEL_PORT_RCV_ERRORS __constant_htons(0x0008) -#define IB_PMA_SEL_PORT_RCV_REMPHYS_ERRORS __constant_htons(0x0010) -#define IB_PMA_SEL_PORT_XMIT_DISCARDS __constant_htons(0x0040) -#define IB_PMA_SEL_LOCAL_LINK_INTEGRITY_ERRORS __constant_htons(0x0200) -#define IB_PMA_SEL_EXCESSIVE_BUFFER_OVERRUNS __constant_htons(0x0400) -#define IB_PMA_SEL_PORT_VL15_DROPPED __constant_htons(0x0800) -#define IB_PMA_SEL_PORT_XMIT_DATA __constant_htons(0x1000) -#define IB_PMA_SEL_PORT_RCV_DATA __constant_htons(0x2000) -#define IB_PMA_SEL_PORT_XMIT_PACKETS __constant_htons(0x4000) -#define IB_PMA_SEL_PORT_RCV_PACKETS __constant_htons(0x8000) +#define IB_PMA_SEL_SYMBOL_ERROR cpu_to_be16(0x0001) +#define IB_PMA_SEL_LINK_ERROR_RECOVERY cpu_to_be16(0x0002) +#define IB_PMA_SEL_LINK_DOWNED cpu_to_be16(0x0004) +#define IB_PMA_SEL_PORT_RCV_ERRORS cpu_to_be16(0x0008) +#define IB_PMA_SEL_PORT_RCV_REMPHYS_ERRORS cpu_to_be16(0x0010) +#define IB_PMA_SEL_PORT_XMIT_DISCARDS cpu_to_be16(0x0040) +#define IB_PMA_SEL_LOCAL_LINK_INTEGRITY_ERRORS cpu_to_be16(0x0200) +#define IB_PMA_SEL_EXCESSIVE_BUFFER_OVERRUNS cpu_to_be16(0x0400) +#define IB_PMA_SEL_PORT_VL15_DROPPED cpu_to_be16(0x0800) +#define IB_PMA_SEL_PORT_XMIT_DATA cpu_to_be16(0x1000) +#define IB_PMA_SEL_PORT_RCV_DATA cpu_to_be16(0x2000) +#define IB_PMA_SEL_PORT_XMIT_PACKETS cpu_to_be16(0x4000) +#define IB_PMA_SEL_PORT_RCV_PACKETS cpu_to_be16(0x8000) struct ib_pma_portcounters_ext { u8 reserved; @@ -913,14 +913,14 @@ struct ib_pma_portcounters_ext { __be64 port_multicast_rcv_packets; } __attribute__ ((packed)); -#define IB_PMA_SELX_PORT_XMIT_DATA __constant_htons(0x0001) -#define IB_PMA_SELX_PORT_RCV_DATA __constant_htons(0x0002) -#define IB_PMA_SELX_PORT_XMIT_PACKETS __constant_htons(0x0004) -#define IB_PMA_SELX_PORT_RCV_PACKETS __constant_htons(0x0008) -#define IB_PMA_SELX_PORT_UNI_XMIT_PACKETS __constant_htons(0x0010) -#define IB_PMA_SELX_PORT_UNI_RCV_PACKETS __constant_htons(0x0020) -#define IB_PMA_SELX_PORT_MULTI_XMIT_PACKETS __constant_htons(0x0040) -#define IB_PMA_SELX_PORT_MULTI_RCV_PACKETS __constant_htons(0x0080) +#define IB_PMA_SELX_PORT_XMIT_DATA cpu_to_be16(0x0001) +#define IB_PMA_SELX_PORT_RCV_DATA cpu_to_be16(0x0002) +#define IB_PMA_SELX_PORT_XMIT_PACKETS cpu_to_be16(0x0004) +#define IB_PMA_SELX_PORT_RCV_PACKETS cpu_to_be16(0x0008) +#define IB_PMA_SELX_PORT_UNI_XMIT_PACKETS cpu_to_be16(0x0010) +#define IB_PMA_SELX_PORT_UNI_RCV_PACKETS cpu_to_be16(0x0020) +#define IB_PMA_SELX_PORT_MULTI_XMIT_PACKETS cpu_to_be16(0x0040) +#define IB_PMA_SELX_PORT_MULTI_RCV_PACKETS cpu_to_be16(0x0080) static int recv_pma_get_classportinfo(struct ib_perf *pmp) { @@ -933,7 +933,7 @@ static int recv_pma_get_classportinfo(struct ib_perf *pmp) pmp->status |= IB_SMP_INVALID_FIELD; /* Indicate AllPortSelect is valid (only one port anyway) */ - p->cap_mask = __constant_cpu_to_be16(1 << 8); + p->cap_mask = cpu_to_be16(1 << 8); p->base_version = 1; p->class_version = 1; /* @@ -951,12 +951,11 @@ static int recv_pma_get_classportinfo(struct ib_perf *pmp) * We support 5 counters which only count the mandatory quantities. */ #define COUNTER_MASK(q, n) (q << ((9 - n) * 3)) -#define COUNTER_MASK0_9 \ - __constant_cpu_to_be32(COUNTER_MASK(1, 0) | \ - COUNTER_MASK(1, 1) | \ - COUNTER_MASK(1, 2) | \ - COUNTER_MASK(1, 3) | \ - COUNTER_MASK(1, 4)) +#define COUNTER_MASK0_9 cpu_to_be32(COUNTER_MASK(1, 0) | \ + COUNTER_MASK(1, 1) | \ + COUNTER_MASK(1, 2) | \ + COUNTER_MASK(1, 3) | \ + COUNTER_MASK(1, 4)) static int recv_pma_get_portsamplescontrol(struct ib_perf *pmp, struct ib_device *ibdev, u8 port) @@ -1137,7 +1136,7 @@ static int recv_pma_get_portsamplesresult_ext(struct ib_perf *pmp, status = dev->pma_sample_status; p->sample_status = cpu_to_be16(status); /* 64 bits */ - p->extended_width = __constant_cpu_to_be32(0x80000000); + p->extended_width = cpu_to_be32(0x80000000); for (i = 0; i < ARRAY_SIZE(dev->pma_counter_select); i++) p->counter[i] = (status != IB_PMA_SAMPLE_STATUS_DONE) ? 0 : cpu_to_be64( @@ -1185,7 +1184,7 @@ static int recv_pma_get_portcounters(struct ib_perf *pmp, pmp->status |= IB_SMP_INVALID_FIELD; if (cntrs.symbol_error_counter > 0xFFFFUL) - p->symbol_error_counter = __constant_cpu_to_be16(0xFFFF); + p->symbol_error_counter = cpu_to_be16(0xFFFF); else p->symbol_error_counter = cpu_to_be16((u16)cntrs.symbol_error_counter); @@ -1199,17 +1198,17 @@ static int recv_pma_get_portcounters(struct ib_perf *pmp, else p->link_downed_counter = (u8)cntrs.link_downed_counter; if (cntrs.port_rcv_errors > 0xFFFFUL) - p->port_rcv_errors = __constant_cpu_to_be16(0xFFFF); + p->port_rcv_errors = cpu_to_be16(0xFFFF); else p->port_rcv_errors = cpu_to_be16((u16) cntrs.port_rcv_errors); if (cntrs.port_rcv_remphys_errors > 0xFFFFUL) - p->port_rcv_remphys_errors = __constant_cpu_to_be16(0xFFFF); + p->port_rcv_remphys_errors = cpu_to_be16(0xFFFF); else p->port_rcv_remphys_errors = cpu_to_be16((u16)cntrs.port_rcv_remphys_errors); if (cntrs.port_xmit_discards > 0xFFFFUL) - p->port_xmit_discards = __constant_cpu_to_be16(0xFFFF); + p->port_xmit_discards = cpu_to_be16(0xFFFF); else p->port_xmit_discards = cpu_to_be16((u16)cntrs.port_xmit_discards); @@ -1220,24 +1219,24 @@ static int recv_pma_get_portcounters(struct ib_perf *pmp, p->lli_ebor_errors = (cntrs.local_link_integrity_errors << 4) | cntrs.excessive_buffer_overrun_errors; if (cntrs.vl15_dropped > 0xFFFFUL) - p->vl15_dropped = __constant_cpu_to_be16(0xFFFF); + p->vl15_dropped = cpu_to_be16(0xFFFF); else p->vl15_dropped = cpu_to_be16((u16)cntrs.vl15_dropped); if (cntrs.port_xmit_data > 0xFFFFFFFFUL) - p->port_xmit_data = __constant_cpu_to_be32(0xFFFFFFFF); + p->port_xmit_data = cpu_to_be32(0xFFFFFFFF); else p->port_xmit_data = cpu_to_be32((u32)cntrs.port_xmit_data); if (cntrs.port_rcv_data > 0xFFFFFFFFUL) - p->port_rcv_data = __constant_cpu_to_be32(0xFFFFFFFF); + p->port_rcv_data = cpu_to_be32(0xFFFFFFFF); else p->port_rcv_data = cpu_to_be32((u32)cntrs.port_rcv_data); if (cntrs.port_xmit_packets > 0xFFFFFFFFUL) - p->port_xmit_packets = __constant_cpu_to_be32(0xFFFFFFFF); + p->port_xmit_packets = cpu_to_be32(0xFFFFFFFF); else p->port_xmit_packets = cpu_to_be32((u32)cntrs.port_xmit_packets); if (cntrs.port_rcv_packets > 0xFFFFFFFFUL) - p->port_rcv_packets = __constant_cpu_to_be32(0xFFFFFFFF); + p->port_rcv_packets = cpu_to_be32(0xFFFFFFFF); else p->port_rcv_packets = cpu_to_be32((u32) cntrs.port_rcv_packets); diff --git a/drivers/infiniband/hw/ipath/ipath_rc.c b/drivers/infiniband/hw/ipath/ipath_rc.c index 9170710b950..79b3dbc9717 100644 --- a/drivers/infiniband/hw/ipath/ipath_rc.c +++ b/drivers/infiniband/hw/ipath/ipath_rc.c @@ -1744,7 +1744,7 @@ void ipath_rc_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr, /* Signal completion event if the solicited bit is set. */ ipath_cq_enter(to_icq(qp->ibqp.recv_cq), &wc, (ohdr->bth[0] & - __constant_cpu_to_be32(1 << 23)) != 0); + cpu_to_be32(1 << 23)) != 0); break; case OP(RDMA_WRITE_FIRST): diff --git a/drivers/infiniband/hw/ipath/ipath_sdma.c b/drivers/infiniband/hw/ipath/ipath_sdma.c index 8e255adf5d9..4b069859085 100644 --- a/drivers/infiniband/hw/ipath/ipath_sdma.c +++ b/drivers/infiniband/hw/ipath/ipath_sdma.c @@ -781,10 +781,10 @@ retry: descqp = &dd->ipath_sdma_descq[dd->ipath_sdma_descq_cnt].qw[0]; descqp -= 2; /* SDmaLastDesc */ - descqp[0] |= __constant_cpu_to_le64(1ULL << 11); + descqp[0] |= cpu_to_le64(1ULL << 11); if (tx->txreq.flags & IPATH_SDMA_TXREQ_F_INTREQ) { /* SDmaIntReq */ - descqp[0] |= __constant_cpu_to_le64(1ULL << 15); + descqp[0] |= cpu_to_le64(1ULL << 15); } /* Commit writes to memory and advance the tail on the chip */ diff --git a/drivers/infiniband/hw/ipath/ipath_uc.c b/drivers/infiniband/hw/ipath/ipath_uc.c index 82cc588b8bf..22e60998f1a 100644 --- a/drivers/infiniband/hw/ipath/ipath_uc.c +++ b/drivers/infiniband/hw/ipath/ipath_uc.c @@ -419,7 +419,7 @@ void ipath_uc_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr, /* Signal completion event if the solicited bit is set. */ ipath_cq_enter(to_icq(qp->ibqp.recv_cq), &wc, (ohdr->bth[0] & - __constant_cpu_to_be32(1 << 23)) != 0); + cpu_to_be32(1 << 23)) != 0); break; case OP(RDMA_WRITE_FIRST): diff --git a/drivers/infiniband/hw/ipath/ipath_ud.c b/drivers/infiniband/hw/ipath/ipath_ud.c index 91c74cc797a..6076cb61bf6 100644 --- a/drivers/infiniband/hw/ipath/ipath_ud.c +++ b/drivers/infiniband/hw/ipath/ipath_ud.c @@ -370,7 +370,7 @@ int ipath_make_ud_req(struct ipath_qp *qp) */ ohdr->bth[1] = ah_attr->dlid >= IPATH_MULTICAST_LID_BASE && ah_attr->dlid != IPATH_PERMISSIVE_LID ? - __constant_cpu_to_be32(IPATH_MULTICAST_QPN) : + cpu_to_be32(IPATH_MULTICAST_QPN) : cpu_to_be32(wqe->wr.wr.ud.remote_qpn); ohdr->bth[2] = cpu_to_be32(qp->s_next_psn++ & IPATH_PSN_MASK); /* @@ -573,7 +573,7 @@ void ipath_ud_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr, /* Signal completion event if the solicited bit is set. */ ipath_cq_enter(to_icq(qp->ibqp.recv_cq), &wc, (ohdr->bth[0] & - __constant_cpu_to_be32(1 << 23)) != 0); + cpu_to_be32(1 << 23)) != 0); bail:; } diff --git a/drivers/infiniband/hw/ipath/ipath_user_pages.c b/drivers/infiniband/hw/ipath/ipath_user_pages.c index 0190edc8044..855911e7396 100644 --- a/drivers/infiniband/hw/ipath/ipath_user_pages.c +++ b/drivers/infiniband/hw/ipath/ipath_user_pages.c @@ -209,20 +209,20 @@ void ipath_release_user_pages_on_close(struct page **p, size_t num_pages) mm = get_task_mm(current); if (!mm) - goto bail; + return; work = kmalloc(sizeof(*work), GFP_KERNEL); if (!work) goto bail_mm; - goto bail; - INIT_WORK(&work->work, user_pages_account); work->mm = mm; work->num_pages = num_pages; + schedule_work(&work->work); + return; + bail_mm: mmput(mm); -bail: return; } diff --git a/drivers/infiniband/hw/ipath/ipath_user_sdma.c b/drivers/infiniband/hw/ipath/ipath_user_sdma.c index 82d9a0b5ca2..7bff4b9baa0 100644 --- a/drivers/infiniband/hw/ipath/ipath_user_sdma.c +++ b/drivers/infiniband/hw/ipath/ipath_user_sdma.c @@ -667,13 +667,13 @@ static inline __le64 ipath_sdma_make_desc0(struct ipath_devdata *dd, static inline __le64 ipath_sdma_make_first_desc0(__le64 descq) { - return descq | __constant_cpu_to_le64(1ULL << 12); + return descq | cpu_to_le64(1ULL << 12); } static inline __le64 ipath_sdma_make_last_desc0(__le64 descq) { /* last */ /* dma head */ - return descq | __constant_cpu_to_le64(1ULL << 11 | 1ULL << 13); + return descq | cpu_to_le64(1ULL << 11 | 1ULL << 13); } static inline __le64 ipath_sdma_make_desc1(u64 addr) @@ -763,7 +763,7 @@ static int ipath_user_sdma_push_pkts(struct ipath_devdata *dd, if (ofs >= IPATH_SMALLBUF_DWORDS) { for (i = 0; i < pkt->naddr; i++) { dd->ipath_sdma_descq[dtail].qw[0] |= - __constant_cpu_to_le64(1ULL << 14); + cpu_to_le64(1ULL << 14); if (++dtail == dd->ipath_sdma_descq_cnt) dtail = 0; } diff --git a/drivers/infiniband/hw/ipath/ipath_verbs.c b/drivers/infiniband/hw/ipath/ipath_verbs.c index cdf0e6abd34..9289ab4b0ae 100644 --- a/drivers/infiniband/hw/ipath/ipath_verbs.c +++ b/drivers/infiniband/hw/ipath/ipath_verbs.c @@ -1585,7 +1585,7 @@ static int ipath_query_port(struct ib_device *ibdev, u64 ibcstat; memset(props, 0, sizeof(*props)); - props->lid = lid ? lid : __constant_be16_to_cpu(IB_LID_PERMISSIVE); + props->lid = lid ? lid : be16_to_cpu(IB_LID_PERMISSIVE); props->lmc = dd->ipath_lmc; props->sm_lid = dev->sm_lid; props->sm_sl = dev->sm_sl; diff --git a/drivers/infiniband/hw/ipath/ipath_verbs.h b/drivers/infiniband/hw/ipath/ipath_verbs.h index 11e3f613df9..ae6cff4abff 100644 --- a/drivers/infiniband/hw/ipath/ipath_verbs.h +++ b/drivers/infiniband/hw/ipath/ipath_verbs.h @@ -86,11 +86,11 @@ #define IB_PMA_SAMPLE_STATUS_RUNNING 0x02 /* Mandatory IB performance counter select values. */ -#define IB_PMA_PORT_XMIT_DATA __constant_htons(0x0001) -#define IB_PMA_PORT_RCV_DATA __constant_htons(0x0002) -#define IB_PMA_PORT_XMIT_PKTS __constant_htons(0x0003) -#define IB_PMA_PORT_RCV_PKTS __constant_htons(0x0004) -#define IB_PMA_PORT_XMIT_WAIT __constant_htons(0x0005) +#define IB_PMA_PORT_XMIT_DATA cpu_to_be16(0x0001) +#define IB_PMA_PORT_RCV_DATA cpu_to_be16(0x0002) +#define IB_PMA_PORT_XMIT_PKTS cpu_to_be16(0x0003) +#define IB_PMA_PORT_RCV_PKTS cpu_to_be16(0x0004) +#define IB_PMA_PORT_XMIT_WAIT cpu_to_be16(0x0005) struct ib_reth { __be64 vaddr; diff --git a/drivers/infiniband/hw/mlx4/mad.c b/drivers/infiniband/hw/mlx4/mad.c index 606f1e2ef28..19e68ab6616 100644 --- a/drivers/infiniband/hw/mlx4/mad.c +++ b/drivers/infiniband/hw/mlx4/mad.c @@ -147,7 +147,8 @@ static void update_sm_ah(struct mlx4_ib_dev *dev, u8 port_num, u16 lid, u8 sl) * Snoop SM MADs for port info and P_Key table sets, so we can * synthesize LID change and P_Key change events. */ -static void smp_snoop(struct ib_device *ibdev, u8 port_num, struct ib_mad *mad) +static void smp_snoop(struct ib_device *ibdev, u8 port_num, struct ib_mad *mad, + u16 prev_lid) { struct ib_event event; @@ -157,6 +158,7 @@ static void smp_snoop(struct ib_device *ibdev, u8 port_num, struct ib_mad *mad) if (mad->mad_hdr.attr_id == IB_SMP_ATTR_PORT_INFO) { struct ib_port_info *pinfo = (struct ib_port_info *) ((struct ib_smp *) mad)->data; + u16 lid = be16_to_cpu(pinfo->lid); update_sm_ah(to_mdev(ibdev), port_num, be16_to_cpu(pinfo->sm_lid), @@ -165,12 +167,15 @@ static void smp_snoop(struct ib_device *ibdev, u8 port_num, struct ib_mad *mad) event.device = ibdev; event.element.port_num = port_num; - if (pinfo->clientrereg_resv_subnetto & 0x80) + if (pinfo->clientrereg_resv_subnetto & 0x80) { event.event = IB_EVENT_CLIENT_REREGISTER; - else - event.event = IB_EVENT_LID_CHANGE; + ib_dispatch_event(&event); + } - ib_dispatch_event(&event); + if (prev_lid != lid) { + event.event = IB_EVENT_LID_CHANGE; + ib_dispatch_event(&event); + } } if (mad->mad_hdr.attr_id == IB_SMP_ATTR_PKEY_TABLE) { @@ -228,8 +233,9 @@ int mlx4_ib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num, struct ib_wc *in_wc, struct ib_grh *in_grh, struct ib_mad *in_mad, struct ib_mad *out_mad) { - u16 slid; + u16 slid, prev_lid = 0; int err; + struct ib_port_attr pattr; slid = in_wc ? in_wc->slid : be16_to_cpu(IB_LID_PERMISSIVE); @@ -263,6 +269,13 @@ int mlx4_ib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num, } else return IB_MAD_RESULT_SUCCESS; + if ((in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_LID_ROUTED || + in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) && + in_mad->mad_hdr.method == IB_MGMT_METHOD_SET && + in_mad->mad_hdr.attr_id == IB_SMP_ATTR_PORT_INFO && + !ib_query_port(ibdev, port_num, &pattr)) + prev_lid = pattr.lid; + err = mlx4_MAD_IFC(to_mdev(ibdev), mad_flags & IB_MAD_IGNORE_MKEY, mad_flags & IB_MAD_IGNORE_BKEY, @@ -271,7 +284,7 @@ int mlx4_ib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num, return IB_MAD_RESULT_FAILURE; if (!out_mad->mad_hdr.status) { - smp_snoop(ibdev, port_num, in_mad); + smp_snoop(ibdev, port_num, in_mad, prev_lid); node_desc_override(ibdev, out_mad); } diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c index 61588bd273b..2ccb9d31771 100644 --- a/drivers/infiniband/hw/mlx4/main.c +++ b/drivers/infiniband/hw/mlx4/main.c @@ -699,11 +699,12 @@ static void mlx4_ib_remove(struct mlx4_dev *dev, void *ibdev_ptr) struct mlx4_ib_dev *ibdev = ibdev_ptr; int p; + mlx4_ib_mad_cleanup(ibdev); + ib_unregister_device(&ibdev->ib_dev); + for (p = 1; p <= ibdev->num_ports; ++p) mlx4_CLOSE_PORT(dev, p); - mlx4_ib_mad_cleanup(ibdev); - ib_unregister_device(&ibdev->ib_dev); iounmap(ibdev->uar_map); mlx4_uar_free(dev, &ibdev->priv_uar); mlx4_pd_free(dev, ibdev->priv_pdn); diff --git a/drivers/infiniband/hw/mlx4/qp.c b/drivers/infiniband/hw/mlx4/qp.c index 39167a797f9..f385a24d31d 100644 --- a/drivers/infiniband/hw/mlx4/qp.c +++ b/drivers/infiniband/hw/mlx4/qp.c @@ -71,17 +71,17 @@ enum { }; static const __be32 mlx4_ib_opcode[] = { - [IB_WR_SEND] = __constant_cpu_to_be32(MLX4_OPCODE_SEND), - [IB_WR_LSO] = __constant_cpu_to_be32(MLX4_OPCODE_LSO), - [IB_WR_SEND_WITH_IMM] = __constant_cpu_to_be32(MLX4_OPCODE_SEND_IMM), - [IB_WR_RDMA_WRITE] = __constant_cpu_to_be32(MLX4_OPCODE_RDMA_WRITE), - [IB_WR_RDMA_WRITE_WITH_IMM] = __constant_cpu_to_be32(MLX4_OPCODE_RDMA_WRITE_IMM), - [IB_WR_RDMA_READ] = __constant_cpu_to_be32(MLX4_OPCODE_RDMA_READ), - [IB_WR_ATOMIC_CMP_AND_SWP] = __constant_cpu_to_be32(MLX4_OPCODE_ATOMIC_CS), - [IB_WR_ATOMIC_FETCH_AND_ADD] = __constant_cpu_to_be32(MLX4_OPCODE_ATOMIC_FA), - [IB_WR_SEND_WITH_INV] = __constant_cpu_to_be32(MLX4_OPCODE_SEND_INVAL), - [IB_WR_LOCAL_INV] = __constant_cpu_to_be32(MLX4_OPCODE_LOCAL_INVAL), - [IB_WR_FAST_REG_MR] = __constant_cpu_to_be32(MLX4_OPCODE_FMR), + [IB_WR_SEND] = cpu_to_be32(MLX4_OPCODE_SEND), + [IB_WR_LSO] = cpu_to_be32(MLX4_OPCODE_LSO), + [IB_WR_SEND_WITH_IMM] = cpu_to_be32(MLX4_OPCODE_SEND_IMM), + [IB_WR_RDMA_WRITE] = cpu_to_be32(MLX4_OPCODE_RDMA_WRITE), + [IB_WR_RDMA_WRITE_WITH_IMM] = cpu_to_be32(MLX4_OPCODE_RDMA_WRITE_IMM), + [IB_WR_RDMA_READ] = cpu_to_be32(MLX4_OPCODE_RDMA_READ), + [IB_WR_ATOMIC_CMP_AND_SWP] = cpu_to_be32(MLX4_OPCODE_ATOMIC_CS), + [IB_WR_ATOMIC_FETCH_AND_ADD] = cpu_to_be32(MLX4_OPCODE_ATOMIC_FA), + [IB_WR_SEND_WITH_INV] = cpu_to_be32(MLX4_OPCODE_SEND_INVAL), + [IB_WR_LOCAL_INV] = cpu_to_be32(MLX4_OPCODE_LOCAL_INVAL), + [IB_WR_FAST_REG_MR] = cpu_to_be32(MLX4_OPCODE_FMR), }; static struct mlx4_ib_sqp *to_msqp(struct mlx4_ib_qp *mqp) @@ -1462,7 +1462,8 @@ static void __set_data_seg(struct mlx4_wqe_data_seg *dseg, struct ib_sge *sg) } static int build_lso_seg(struct mlx4_wqe_lso_seg *wqe, struct ib_send_wr *wr, - struct mlx4_ib_qp *qp, unsigned *lso_seg_len) + struct mlx4_ib_qp *qp, unsigned *lso_seg_len, + __be32 *lso_hdr_sz) { unsigned halign = ALIGN(sizeof *wqe + wr->wr.ud.hlen, 16); @@ -1479,12 +1480,8 @@ static int build_lso_seg(struct mlx4_wqe_lso_seg *wqe, struct ib_send_wr *wr, memcpy(wqe->header, wr->wr.ud.header, wr->wr.ud.hlen); - /* make sure LSO header is written before overwriting stamping */ - wmb(); - - wqe->mss_hdr_size = cpu_to_be32((wr->wr.ud.mss - wr->wr.ud.hlen) << 16 | - wr->wr.ud.hlen); - + *lso_hdr_sz = cpu_to_be32((wr->wr.ud.mss - wr->wr.ud.hlen) << 16 | + wr->wr.ud.hlen); *lso_seg_len = halign; return 0; } @@ -1518,6 +1515,9 @@ int mlx4_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, int uninitialized_var(stamp); int uninitialized_var(size); unsigned uninitialized_var(seglen); + __be32 dummy; + __be32 *lso_wqe; + __be32 uninitialized_var(lso_hdr_sz); int i; spin_lock_irqsave(&qp->sq.lock, flags); @@ -1525,6 +1525,8 @@ int mlx4_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, ind = qp->sq_next_wqe; for (nreq = 0; wr; ++nreq, wr = wr->next) { + lso_wqe = &dummy; + if (mlx4_wq_overflow(&qp->sq, nreq, qp->ibqp.send_cq)) { err = -ENOMEM; *bad_wr = wr; @@ -1606,11 +1608,12 @@ int mlx4_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, size += sizeof (struct mlx4_wqe_datagram_seg) / 16; if (wr->opcode == IB_WR_LSO) { - err = build_lso_seg(wqe, wr, qp, &seglen); + err = build_lso_seg(wqe, wr, qp, &seglen, &lso_hdr_sz); if (unlikely(err)) { *bad_wr = wr; goto out; } + lso_wqe = (__be32 *) wqe; wqe += seglen; size += seglen / 16; } @@ -1652,6 +1655,14 @@ int mlx4_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, for (i = wr->num_sge - 1; i >= 0; --i, --dseg) set_data_seg(dseg, wr->sg_list + i); + /* + * Possibly overwrite stamping in cacheline with LSO + * segment only after making sure all data segments + * are written. + */ + wmb(); + *lso_wqe = lso_hdr_sz; + ctrl->fence_size = (wr->send_flags & IB_SEND_FENCE ? MLX4_WQE_CTRL_FENCE : 0) | size; @@ -1686,7 +1697,6 @@ int mlx4_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, stamp_send_wqe(qp, stamp, size * 16); ind = pad_wraparound(qp, ind); } - } out: diff --git a/drivers/infiniband/hw/mthca/mthca_mad.c b/drivers/infiniband/hw/mthca/mthca_mad.c index 640449582ab..5648659ff0b 100644 --- a/drivers/infiniband/hw/mthca/mthca_mad.c +++ b/drivers/infiniband/hw/mthca/mthca_mad.c @@ -104,7 +104,8 @@ static void update_sm_ah(struct mthca_dev *dev, */ static void smp_snoop(struct ib_device *ibdev, u8 port_num, - struct ib_mad *mad) + struct ib_mad *mad, + u16 prev_lid) { struct ib_event event; @@ -114,6 +115,7 @@ static void smp_snoop(struct ib_device *ibdev, if (mad->mad_hdr.attr_id == IB_SMP_ATTR_PORT_INFO) { struct ib_port_info *pinfo = (struct ib_port_info *) ((struct ib_smp *) mad)->data; + u16 lid = be16_to_cpu(pinfo->lid); mthca_update_rate(to_mdev(ibdev), port_num); update_sm_ah(to_mdev(ibdev), port_num, @@ -123,12 +125,15 @@ static void smp_snoop(struct ib_device *ibdev, event.device = ibdev; event.element.port_num = port_num; - if (pinfo->clientrereg_resv_subnetto & 0x80) + if (pinfo->clientrereg_resv_subnetto & 0x80) { event.event = IB_EVENT_CLIENT_REREGISTER; - else - event.event = IB_EVENT_LID_CHANGE; + ib_dispatch_event(&event); + } - ib_dispatch_event(&event); + if (prev_lid != lid) { + event.event = IB_EVENT_LID_CHANGE; + ib_dispatch_event(&event); + } } if (mad->mad_hdr.attr_id == IB_SMP_ATTR_PKEY_TABLE) { @@ -196,6 +201,8 @@ int mthca_process_mad(struct ib_device *ibdev, int err; u8 status; u16 slid = in_wc ? in_wc->slid : be16_to_cpu(IB_LID_PERMISSIVE); + u16 prev_lid = 0; + struct ib_port_attr pattr; /* Forward locally generated traps to the SM */ if (in_mad->mad_hdr.method == IB_MGMT_METHOD_TRAP && @@ -233,6 +240,12 @@ int mthca_process_mad(struct ib_device *ibdev, return IB_MAD_RESULT_SUCCESS; } else return IB_MAD_RESULT_SUCCESS; + if ((in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_LID_ROUTED || + in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) && + in_mad->mad_hdr.method == IB_MGMT_METHOD_SET && + in_mad->mad_hdr.attr_id == IB_SMP_ATTR_PORT_INFO && + !ib_query_port(ibdev, port_num, &pattr)) + prev_lid = pattr.lid; err = mthca_MAD_IFC(to_mdev(ibdev), mad_flags & IB_MAD_IGNORE_MKEY, @@ -252,7 +265,7 @@ int mthca_process_mad(struct ib_device *ibdev, } if (!out_mad->mad_hdr.status) { - smp_snoop(ibdev, port_num, in_mad); + smp_snoop(ibdev, port_num, in_mad, prev_lid); node_desc_override(ibdev, out_mad); } diff --git a/drivers/infiniband/hw/nes/nes.c b/drivers/infiniband/hw/nes/nes.c index b9611ade9ea..ca599767ffb 100644 --- a/drivers/infiniband/hw/nes/nes.c +++ b/drivers/infiniband/hw/nes/nes.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2006 - 2008 NetEffect, Inc. All rights reserved. + * Copyright (c) 2006 - 2009 Intel-NE, Inc. All rights reserved. * Copyright (c) 2005 Open Grid Computing, Inc. All rights reserved. * * This software is available to you under a choice of one of two diff --git a/drivers/infiniband/hw/nes/nes.h b/drivers/infiniband/hw/nes/nes.h index 13a5bb1a7bc..04b12ad2339 100644 --- a/drivers/infiniband/hw/nes/nes.h +++ b/drivers/infiniband/hw/nes/nes.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2006 - 2008 NetEffect, Inc. All rights reserved. + * Copyright (c) 2006 - 2009 Intel-NE, Inc. All rights reserved. * Copyright (c) 2005 Open Grid Computing, Inc. All rights reserved. * * This software is available to you under a choice of one of two diff --git a/drivers/infiniband/hw/nes/nes_cm.c b/drivers/infiniband/hw/nes/nes_cm.c index a01b4488208..52425154acd 100644 --- a/drivers/infiniband/hw/nes/nes_cm.c +++ b/drivers/infiniband/hw/nes/nes_cm.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2006 - 2008 NetEffect, Inc. All rights reserved. + * Copyright (c) 2006 - 2009 Intel-NE, Inc. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU @@ -103,6 +103,7 @@ static int nes_disconnect(struct nes_qp *nesqp, int abrupt); static void nes_disconnect_worker(struct work_struct *work); static int send_mpa_request(struct nes_cm_node *, struct sk_buff *); +static int send_mpa_reject(struct nes_cm_node *); static int send_syn(struct nes_cm_node *, u32, struct sk_buff *); static int send_reset(struct nes_cm_node *, struct sk_buff *); static int send_ack(struct nes_cm_node *cm_node, struct sk_buff *skb); @@ -113,8 +114,7 @@ static void process_packet(struct nes_cm_node *, struct sk_buff *, static void active_open_err(struct nes_cm_node *, struct sk_buff *, int); static void passive_open_err(struct nes_cm_node *, struct sk_buff *, int); static void cleanup_retrans_entry(struct nes_cm_node *); -static void handle_rcv_mpa(struct nes_cm_node *, struct sk_buff *, - enum nes_cm_event_type); +static void handle_rcv_mpa(struct nes_cm_node *, struct sk_buff *); static void free_retrans_entry(struct nes_cm_node *cm_node); static int handle_tcp_options(struct nes_cm_node *cm_node, struct tcphdr *tcph, struct sk_buff *skb, int optionsize, int passive); @@ -124,6 +124,8 @@ static void cm_event_connected(struct nes_cm_event *); static void cm_event_connect_error(struct nes_cm_event *); static void cm_event_reset(struct nes_cm_event *); static void cm_event_mpa_req(struct nes_cm_event *); +static void cm_event_mpa_reject(struct nes_cm_event *); +static void handle_recv_entry(struct nes_cm_node *cm_node, u32 rem_node); static void print_core(struct nes_cm_core *core); @@ -196,7 +198,6 @@ static struct nes_cm_event *create_event(struct nes_cm_node *cm_node, */ static int send_mpa_request(struct nes_cm_node *cm_node, struct sk_buff *skb) { - int ret; if (!skb) { nes_debug(NES_DBG_CM, "skb set to NULL\n"); return -1; @@ -206,11 +207,27 @@ static int send_mpa_request(struct nes_cm_node *cm_node, struct sk_buff *skb) form_cm_frame(skb, cm_node, NULL, 0, &cm_node->mpa_frame, cm_node->mpa_frame_size, SET_ACK); - ret = schedule_nes_timer(cm_node, skb, NES_TIMER_TYPE_SEND, 1, 0); - if (ret < 0) - return ret; + return schedule_nes_timer(cm_node, skb, NES_TIMER_TYPE_SEND, 1, 0); +} - return 0; + + +static int send_mpa_reject(struct nes_cm_node *cm_node) +{ + struct sk_buff *skb = NULL; + + skb = dev_alloc_skb(MAX_CM_BUFFER); + if (!skb) { + nes_debug(NES_DBG_CM, "Failed to get a Free pkt\n"); + return -ENOMEM; + } + + /* send an MPA reject frame */ + form_cm_frame(skb, cm_node, NULL, 0, &cm_node->mpa_frame, + cm_node->mpa_frame_size, SET_ACK | SET_FIN); + + cm_node->state = NES_CM_STATE_FIN_WAIT1; + return schedule_nes_timer(cm_node, skb, NES_TIMER_TYPE_SEND, 1, 0); } @@ -218,14 +235,17 @@ static int send_mpa_request(struct nes_cm_node *cm_node, struct sk_buff *skb) * recv_mpa - process a received TCP pkt, we are expecting an * IETF MPA frame */ -static int parse_mpa(struct nes_cm_node *cm_node, u8 *buffer, u32 len) +static int parse_mpa(struct nes_cm_node *cm_node, u8 *buffer, u32 *type, + u32 len) { struct ietf_mpa_frame *mpa_frame; + *type = NES_MPA_REQUEST_ACCEPT; + /* assume req frame is in tcp data payload */ if (len < sizeof(struct ietf_mpa_frame)) { nes_debug(NES_DBG_CM, "The received ietf buffer was too small (%x)\n", len); - return -1; + return -EINVAL; } mpa_frame = (struct ietf_mpa_frame *)buffer; @@ -234,14 +254,25 @@ static int parse_mpa(struct nes_cm_node *cm_node, u8 *buffer, u32 len) if (cm_node->mpa_frame_size + sizeof(struct ietf_mpa_frame) != len) { nes_debug(NES_DBG_CM, "The received ietf buffer was not right" " complete (%x + %x != %x)\n", - cm_node->mpa_frame_size, (u32)sizeof(struct ietf_mpa_frame), len); - return -1; + cm_node->mpa_frame_size, + (u32)sizeof(struct ietf_mpa_frame), len); + return -EINVAL; + } + /* make sure it does not exceed the max size */ + if (len > MAX_CM_BUFFER) { + nes_debug(NES_DBG_CM, "The received ietf buffer was too large" + " (%x + %x != %x)\n", + cm_node->mpa_frame_size, + (u32)sizeof(struct ietf_mpa_frame), len); + return -EINVAL; } /* copy entire MPA frame to our cm_node's frame */ memcpy(cm_node->mpa_frame_buf, buffer + sizeof(struct ietf_mpa_frame), cm_node->mpa_frame_size); + if (mpa_frame->flags & IETF_MPA_FLAGS_REJECT) + *type = NES_MPA_REQUEST_REJECT; return 0; } @@ -380,7 +411,7 @@ int schedule_nes_timer(struct nes_cm_node *cm_node, struct sk_buff *skb, new_send = kzalloc(sizeof(*new_send), GFP_ATOMIC); if (!new_send) - return -1; + return -ENOMEM; /* new_send->timetosend = currenttime */ new_send->retrycount = NES_DEFAULT_RETRYS; @@ -394,9 +425,11 @@ int schedule_nes_timer(struct nes_cm_node *cm_node, struct sk_buff *skb, if (type == NES_TIMER_TYPE_CLOSE) { new_send->timetosend += (HZ/10); - spin_lock_irqsave(&cm_node->recv_list_lock, flags); - list_add_tail(&new_send->list, &cm_node->recv_list); - spin_unlock_irqrestore(&cm_node->recv_list_lock, flags); + if (cm_node->recv_entry) { + WARN_ON(1); + return -EINVAL; + } + cm_node->recv_entry = new_send; } if (type == NES_TIMER_TYPE_SEND) { @@ -435,24 +468,78 @@ int schedule_nes_timer(struct nes_cm_node *cm_node, struct sk_buff *skb, return ret; } +static void nes_retrans_expired(struct nes_cm_node *cm_node) +{ + switch (cm_node->state) { + case NES_CM_STATE_SYN_RCVD: + case NES_CM_STATE_CLOSING: + rem_ref_cm_node(cm_node->cm_core, cm_node); + break; + case NES_CM_STATE_LAST_ACK: + case NES_CM_STATE_FIN_WAIT1: + case NES_CM_STATE_MPAREJ_RCVD: + send_reset(cm_node, NULL); + break; + default: + create_event(cm_node, NES_CM_EVENT_ABORTED); + } +} + +static void handle_recv_entry(struct nes_cm_node *cm_node, u32 rem_node) +{ + struct nes_timer_entry *recv_entry = cm_node->recv_entry; + struct iw_cm_id *cm_id = cm_node->cm_id; + struct nes_qp *nesqp; + unsigned long qplockflags; + + if (!recv_entry) + return; + nesqp = (struct nes_qp *)recv_entry->skb; + if (nesqp) { + spin_lock_irqsave(&nesqp->lock, qplockflags); + if (nesqp->cm_id) { + nes_debug(NES_DBG_CM, "QP%u: cm_id = %p, " + "refcount = %d: HIT A " + "NES_TIMER_TYPE_CLOSE with something " + "to do!!!\n", nesqp->hwqp.qp_id, cm_id, + atomic_read(&nesqp->refcount)); + nesqp->hw_tcp_state = NES_AEQE_TCP_STATE_CLOSED; + nesqp->last_aeq = NES_AEQE_AEID_RESET_SENT; + nesqp->ibqp_state = IB_QPS_ERR; + spin_unlock_irqrestore(&nesqp->lock, qplockflags); + nes_cm_disconn(nesqp); + } else { + spin_unlock_irqrestore(&nesqp->lock, qplockflags); + nes_debug(NES_DBG_CM, "QP%u: cm_id = %p, " + "refcount = %d: HIT A " + "NES_TIMER_TYPE_CLOSE with nothing " + "to do!!!\n", nesqp->hwqp.qp_id, cm_id, + atomic_read(&nesqp->refcount)); + } + } else if (rem_node) { + /* TIME_WAIT state */ + rem_ref_cm_node(cm_node->cm_core, cm_node); + } + if (cm_node->cm_id) + cm_id->rem_ref(cm_id); + kfree(recv_entry); + cm_node->recv_entry = NULL; +} /** * nes_cm_timer_tick */ static void nes_cm_timer_tick(unsigned long pass) { - unsigned long flags, qplockflags; + unsigned long flags; unsigned long nexttimeout = jiffies + NES_LONG_TIME; - struct iw_cm_id *cm_id; struct nes_cm_node *cm_node; struct nes_timer_entry *send_entry, *recv_entry; - struct list_head *list_core, *list_core_temp; - struct list_head *list_node, *list_node_temp; + struct list_head *list_core_temp; + struct list_head *list_node; struct nes_cm_core *cm_core = g_cm_core; - struct nes_qp *nesqp; u32 settimer = 0; int ret = NETDEV_TX_OK; - enum nes_cm_node_state last_state; struct list_head timer_list; INIT_LIST_HEAD(&timer_list); @@ -461,7 +548,7 @@ static void nes_cm_timer_tick(unsigned long pass) list_for_each_safe(list_node, list_core_temp, &cm_core->connected_nodes) { cm_node = container_of(list_node, struct nes_cm_node, list); - if (!list_empty(&cm_node->recv_list) || (cm_node->send_entry)) { + if ((cm_node->recv_entry) || (cm_node->send_entry)) { add_ref_cm_node(cm_node); list_add(&cm_node->timer_entry, &timer_list); } @@ -471,54 +558,18 @@ static void nes_cm_timer_tick(unsigned long pass) list_for_each_safe(list_node, list_core_temp, &timer_list) { cm_node = container_of(list_node, struct nes_cm_node, timer_entry); - spin_lock_irqsave(&cm_node->recv_list_lock, flags); - list_for_each_safe(list_core, list_node_temp, - &cm_node->recv_list) { - recv_entry = container_of(list_core, - struct nes_timer_entry, list); - if (!recv_entry) - break; + recv_entry = cm_node->recv_entry; + + if (recv_entry) { if (time_after(recv_entry->timetosend, jiffies)) { if (nexttimeout > recv_entry->timetosend || - !settimer) { + !settimer) { nexttimeout = recv_entry->timetosend; settimer = 1; } - continue; - } - list_del(&recv_entry->list); - cm_id = cm_node->cm_id; - spin_unlock_irqrestore(&cm_node->recv_list_lock, flags); - nesqp = (struct nes_qp *)recv_entry->skb; - spin_lock_irqsave(&nesqp->lock, qplockflags); - if (nesqp->cm_id) { - nes_debug(NES_DBG_CM, "QP%u: cm_id = %p, " - "refcount = %d: HIT A " - "NES_TIMER_TYPE_CLOSE with something " - "to do!!!\n", nesqp->hwqp.qp_id, cm_id, - atomic_read(&nesqp->refcount)); - nesqp->hw_tcp_state = NES_AEQE_TCP_STATE_CLOSED; - nesqp->last_aeq = NES_AEQE_AEID_RESET_SENT; - nesqp->ibqp_state = IB_QPS_ERR; - spin_unlock_irqrestore(&nesqp->lock, - qplockflags); - nes_cm_disconn(nesqp); - } else { - spin_unlock_irqrestore(&nesqp->lock, - qplockflags); - nes_debug(NES_DBG_CM, "QP%u: cm_id = %p, " - "refcount = %d: HIT A " - "NES_TIMER_TYPE_CLOSE with nothing " - "to do!!!\n", nesqp->hwqp.qp_id, cm_id, - atomic_read(&nesqp->refcount)); - } - if (cm_id) - cm_id->rem_ref(cm_id); - - kfree(recv_entry); - spin_lock_irqsave(&cm_node->recv_list_lock, flags); + } else + handle_recv_entry(cm_node, 1); } - spin_unlock_irqrestore(&cm_node->recv_list_lock, flags); spin_lock_irqsave(&cm_node->retrans_list_lock, flags); do { @@ -533,12 +584,11 @@ static void nes_cm_timer_tick(unsigned long pass) nexttimeout = send_entry->timetosend; settimer = 1; - break; } } else { free_retrans_entry(cm_node); - break; } + break; } if ((cm_node->state == NES_CM_STATE_TSA) || @@ -550,16 +600,12 @@ static void nes_cm_timer_tick(unsigned long pass) if (!send_entry->retranscount || !send_entry->retrycount) { cm_packets_dropped++; - last_state = cm_node->state; - cm_node->state = NES_CM_STATE_CLOSED; free_retrans_entry(cm_node); + spin_unlock_irqrestore( &cm_node->retrans_list_lock, flags); - if (last_state == NES_CM_STATE_SYN_RCVD) - rem_ref_cm_node(cm_core, cm_node); - else - create_event(cm_node, - NES_CM_EVENT_ABORTED); + nes_retrans_expired(cm_node); + cm_node->state = NES_CM_STATE_CLOSED; spin_lock_irqsave(&cm_node->retrans_list_lock, flags); break; @@ -714,7 +760,7 @@ static int send_reset(struct nes_cm_node *cm_node, struct sk_buff *skb) skb = dev_alloc_skb(MAX_CM_BUFFER); if (!skb) { nes_debug(NES_DBG_CM, "Failed to get a Free pkt\n"); - return -1; + return -ENOMEM; } form_cm_frame(skb, cm_node, NULL, 0, NULL, 0, flags); @@ -778,14 +824,10 @@ static struct nes_cm_node *find_node(struct nes_cm_core *cm_core, unsigned long flags; struct list_head *hte; struct nes_cm_node *cm_node; - __be32 tmp_addr = cpu_to_be32(loc_addr); /* get a handle on the hte */ hte = &cm_core->connected_nodes; - nes_debug(NES_DBG_CM, "Searching for an owner node: %pI4:%x from core %p->%p\n", - &tmp_addr, loc_port, cm_core, hte); - /* walk list and find cm_node associated with this session ID */ spin_lock_irqsave(&cm_core->ht_lock, flags); list_for_each_entry(cm_node, hte, list) { @@ -875,7 +917,8 @@ static int add_hte_node(struct nes_cm_core *cm_core, struct nes_cm_node *cm_node static int mini_cm_dec_refcnt_listen(struct nes_cm_core *cm_core, struct nes_cm_listener *listener, int free_hanging_nodes) { - int ret = 1; + int ret = -EINVAL; + int err = 0; unsigned long flags; struct list_head *list_pos = NULL; struct list_head *list_temp = NULL; @@ -904,10 +947,60 @@ static int mini_cm_dec_refcnt_listen(struct nes_cm_core *cm_core, list_for_each_safe(list_pos, list_temp, &reset_list) { cm_node = container_of(list_pos, struct nes_cm_node, - reset_entry); - cleanup_retrans_entry(cm_node); - send_reset(cm_node, NULL); - rem_ref_cm_node(cm_node->cm_core, cm_node); + reset_entry); + { + struct nes_cm_node *loopback = cm_node->loopbackpartner; + if (NES_CM_STATE_FIN_WAIT1 <= cm_node->state) { + rem_ref_cm_node(cm_node->cm_core, cm_node); + } else { + if (!loopback) { + cleanup_retrans_entry(cm_node); + err = send_reset(cm_node, NULL); + if (err) { + cm_node->state = + NES_CM_STATE_CLOSED; + WARN_ON(1); + } else { + cm_node->state = + NES_CM_STATE_CLOSED; + rem_ref_cm_node( + cm_node->cm_core, + cm_node); + } + } else { + struct nes_cm_event event; + + event.cm_node = loopback; + event.cm_info.rem_addr = + loopback->rem_addr; + event.cm_info.loc_addr = + loopback->loc_addr; + event.cm_info.rem_port = + loopback->rem_port; + event.cm_info.loc_port = + loopback->loc_port; + event.cm_info.cm_id = loopback->cm_id; + cm_event_connect_error(&event); + loopback->state = NES_CM_STATE_CLOSED; + + event.cm_node = cm_node; + event.cm_info.rem_addr = + cm_node->rem_addr; + event.cm_info.loc_addr = + cm_node->loc_addr; + event.cm_info.rem_port = + cm_node->rem_port; + event.cm_info.loc_port = + cm_node->loc_port; + event.cm_info.cm_id = cm_node->cm_id; + cm_event_reset(&event); + + rem_ref_cm_node(cm_node->cm_core, + cm_node); + + } + } + } } spin_lock_irqsave(&cm_core->listen_list_lock, flags); @@ -968,6 +1061,7 @@ static inline int mini_cm_accelerated(struct nes_cm_core *cm_core, if (cm_node->accept_pend) { BUG_ON(!cm_node->listener); atomic_dec(&cm_node->listener->pend_accepts_cnt); + cm_node->accept_pend = 0; BUG_ON(atomic_read(&cm_node->listener->pend_accepts_cnt) < 0); } @@ -994,7 +1088,7 @@ static int nes_addr_resolve_neigh(struct nes_vnic *nesvnic, u32 dst_ip) memset(&fl, 0, sizeof fl); fl.nl_u.ip4_u.daddr = htonl(dst_ip); if (ip_route_output_key(&init_net, &rt, &fl)) { - printk("%s: ip_route_output_key failed for 0x%08X\n", + printk(KERN_ERR "%s: ip_route_output_key failed for 0x%08X\n", __func__, dst_ip); return rc; } @@ -1057,8 +1151,6 @@ static struct nes_cm_node *make_cm_node(struct nes_cm_core *cm_core, cm_node->cm_id); spin_lock_init(&cm_node->retrans_list_lock); - INIT_LIST_HEAD(&cm_node->recv_list); - spin_lock_init(&cm_node->recv_list_lock); cm_node->loopbackpartner = NULL; atomic_set(&cm_node->ref_count, 1); @@ -1126,10 +1218,7 @@ static int add_ref_cm_node(struct nes_cm_node *cm_node) static int rem_ref_cm_node(struct nes_cm_core *cm_core, struct nes_cm_node *cm_node) { - unsigned long flags, qplockflags; - struct nes_timer_entry *recv_entry; - struct iw_cm_id *cm_id; - struct list_head *list_core, *list_node_temp; + unsigned long flags; struct nes_qp *nesqp; if (!cm_node) @@ -1150,38 +1239,9 @@ static int rem_ref_cm_node(struct nes_cm_core *cm_core, atomic_dec(&cm_node->listener->pend_accepts_cnt); BUG_ON(atomic_read(&cm_node->listener->pend_accepts_cnt) < 0); } - BUG_ON(cm_node->send_entry); - spin_lock_irqsave(&cm_node->recv_list_lock, flags); - list_for_each_safe(list_core, list_node_temp, &cm_node->recv_list) { - recv_entry = container_of(list_core, struct nes_timer_entry, - list); - list_del(&recv_entry->list); - cm_id = cm_node->cm_id; - spin_unlock_irqrestore(&cm_node->recv_list_lock, flags); - nesqp = (struct nes_qp *)recv_entry->skb; - spin_lock_irqsave(&nesqp->lock, qplockflags); - if (nesqp->cm_id) { - nes_debug(NES_DBG_CM, "QP%u: cm_id = %p: HIT A " - "NES_TIMER_TYPE_CLOSE with something to do!\n", - nesqp->hwqp.qp_id, cm_id); - nesqp->hw_tcp_state = NES_AEQE_TCP_STATE_CLOSED; - nesqp->last_aeq = NES_AEQE_AEID_RESET_SENT; - nesqp->ibqp_state = IB_QPS_ERR; - spin_unlock_irqrestore(&nesqp->lock, qplockflags); - nes_cm_disconn(nesqp); - } else { - spin_unlock_irqrestore(&nesqp->lock, qplockflags); - nes_debug(NES_DBG_CM, "QP%u: cm_id = %p: HIT A " - "NES_TIMER_TYPE_CLOSE with nothing to do!\n", - nesqp->hwqp.qp_id, cm_id); - } - cm_id->rem_ref(cm_id); - - kfree(recv_entry); - spin_lock_irqsave(&cm_node->recv_list_lock, flags); - } - spin_unlock_irqrestore(&cm_node->recv_list_lock, flags); - + WARN_ON(cm_node->send_entry); + if (cm_node->recv_entry) + handle_recv_entry(cm_node, 0); if (cm_node->listener) { mini_cm_dec_refcnt_listen(cm_core, cm_node->listener, 0); } else { @@ -1266,8 +1326,7 @@ static void drop_packet(struct sk_buff *skb) dev_kfree_skb_any(skb); } -static void handle_fin_pkt(struct nes_cm_node *cm_node, struct sk_buff *skb, - struct tcphdr *tcph) +static void handle_fin_pkt(struct nes_cm_node *cm_node) { nes_debug(NES_DBG_CM, "Received FIN, cm_node = %p, state = %u. " "refcnt=%d\n", cm_node, cm_node->state, @@ -1279,23 +1338,30 @@ static void handle_fin_pkt(struct nes_cm_node *cm_node, struct sk_buff *skb, case NES_CM_STATE_SYN_SENT: case NES_CM_STATE_ESTABLISHED: case NES_CM_STATE_MPAREQ_SENT: + case NES_CM_STATE_MPAREJ_RCVD: cm_node->state = NES_CM_STATE_LAST_ACK; - send_fin(cm_node, skb); + send_fin(cm_node, NULL); break; case NES_CM_STATE_FIN_WAIT1: cm_node->state = NES_CM_STATE_CLOSING; - send_ack(cm_node, skb); + send_ack(cm_node, NULL); + /* Wait for ACK as this is simultanous close.. + * After we receive ACK, do not send anything.. + * Just rm the node.. Done.. */ break; case NES_CM_STATE_FIN_WAIT2: cm_node->state = NES_CM_STATE_TIME_WAIT; - send_ack(cm_node, skb); + send_ack(cm_node, NULL); + schedule_nes_timer(cm_node, NULL, NES_TIMER_TYPE_CLOSE, 1, 0); + break; + case NES_CM_STATE_TIME_WAIT: cm_node->state = NES_CM_STATE_CLOSED; + rem_ref_cm_node(cm_node->cm_core, cm_node); break; case NES_CM_STATE_TSA: default: nes_debug(NES_DBG_CM, "Error Rcvd FIN for node-%p state = %d\n", cm_node, cm_node->state); - drop_packet(skb); break; } } @@ -1341,23 +1407,35 @@ static void handle_rst_pkt(struct nes_cm_node *cm_node, struct sk_buff *skb, cleanup_retrans_entry(cm_node); drop_packet(skb); break; + case NES_CM_STATE_TIME_WAIT: + cleanup_retrans_entry(cm_node); + cm_node->state = NES_CM_STATE_CLOSED; + rem_ref_cm_node(cm_node->cm_core, cm_node); + drop_packet(skb); + break; + case NES_CM_STATE_FIN_WAIT1: + cleanup_retrans_entry(cm_node); + nes_debug(NES_DBG_CM, "Bad state %s[%u]\n", __func__, __LINE__); default: drop_packet(skb); break; } } -static void handle_rcv_mpa(struct nes_cm_node *cm_node, struct sk_buff *skb, - enum nes_cm_event_type type) + +static void handle_rcv_mpa(struct nes_cm_node *cm_node, struct sk_buff *skb) { - int ret; + int ret = 0; int datasize = skb->len; u8 *dataloc = skb->data; - ret = parse_mpa(cm_node, dataloc, datasize); - if (ret < 0) { + + enum nes_cm_event_type type = NES_CM_EVENT_UNKNOWN; + u32 res_type; + ret = parse_mpa(cm_node, dataloc, &res_type, datasize); + if (ret) { nes_debug(NES_DBG_CM, "didn't like MPA Request\n"); - if (type == NES_CM_EVENT_CONNECTED) { + if (cm_node->state == NES_CM_STATE_MPAREQ_SENT) { nes_debug(NES_DBG_CM, "%s[%u] create abort for " "cm_node=%p listener=%p state=%d\n", __func__, __LINE__, cm_node, cm_node->listener, @@ -1366,18 +1444,38 @@ static void handle_rcv_mpa(struct nes_cm_node *cm_node, struct sk_buff *skb, } else { passive_open_err(cm_node, skb, 1); } - } else { - cleanup_retrans_entry(cm_node); - dev_kfree_skb_any(skb); - if (type == NES_CM_EVENT_CONNECTED) + return; + } + + switch (cm_node->state) { + case NES_CM_STATE_ESTABLISHED: + if (res_type == NES_MPA_REQUEST_REJECT) { + /*BIG problem as we are receiving the MPA.. So should + * not be REJECT.. This is Passive Open.. We can + * only receive it Reject for Active Open...*/ + WARN_ON(1); + } + cm_node->state = NES_CM_STATE_MPAREQ_RCVD; + type = NES_CM_EVENT_MPA_REQ; + atomic_set(&cm_node->passive_state, + NES_PASSIVE_STATE_INDICATED); + break; + case NES_CM_STATE_MPAREQ_SENT: + if (res_type == NES_MPA_REQUEST_REJECT) { + type = NES_CM_EVENT_MPA_REJECT; + cm_node->state = NES_CM_STATE_MPAREJ_RCVD; + } else { + type = NES_CM_EVENT_CONNECTED; cm_node->state = NES_CM_STATE_TSA; - else - atomic_set(&cm_node->passive_state, - NES_PASSIVE_STATE_INDICATED); - create_event(cm_node, type); + } + break; + default: + WARN_ON(1); + break; } - return ; + dev_kfree_skb_any(skb); + create_event(cm_node, type); } static void indicate_pkt_err(struct nes_cm_node *cm_node, struct sk_buff *skb) @@ -1465,8 +1563,6 @@ static void handle_syn_pkt(struct nes_cm_node *cm_node, struct sk_buff *skb, break; case NES_CM_STATE_LISTENING: /* Passive OPEN */ - cm_node->accept_pend = 1; - atomic_inc(&cm_node->listener->pend_accepts_cnt); if (atomic_read(&cm_node->listener->pend_accepts_cnt) > cm_node->listener->backlog) { nes_debug(NES_DBG_CM, "drop syn due to backlog " @@ -1484,6 +1580,9 @@ static void handle_syn_pkt(struct nes_cm_node *cm_node, struct sk_buff *skb, } cm_node->tcp_cntxt.rcv_nxt = inc_sequence + 1; BUG_ON(cm_node->send_entry); + cm_node->accept_pend = 1; + atomic_inc(&cm_node->listener->pend_accepts_cnt); + cm_node->state = NES_CM_STATE_SYN_RCVD; send_syn(cm_node, 1, skb); break; @@ -1518,6 +1617,7 @@ static void handle_synack_pkt(struct nes_cm_node *cm_node, struct sk_buff *skb, inc_sequence = ntohl(tcph->seq); switch (cm_node->state) { case NES_CM_STATE_SYN_SENT: + cleanup_retrans_entry(cm_node); /* active open */ if (check_syn(cm_node, tcph, skb)) return; @@ -1567,10 +1667,7 @@ static void handle_ack_pkt(struct nes_cm_node *cm_node, struct sk_buff *skb, u32 rem_seq; int ret; int optionsize; - u32 temp_seq = cm_node->tcp_cntxt.loc_seq_num; - optionsize = (tcph->doff << 2) - sizeof(struct tcphdr); - cm_node->tcp_cntxt.loc_seq_num = ntohl(tcph->ack_seq); if (check_seq(cm_node, tcph, skb)) return; @@ -1580,7 +1677,7 @@ static void handle_ack_pkt(struct nes_cm_node *cm_node, struct sk_buff *skb, rem_seq = ntohl(tcph->seq); rem_seq_ack = ntohl(tcph->ack_seq); datasize = skb->len; - + cleanup_retrans_entry(cm_node); switch (cm_node->state) { case NES_CM_STATE_SYN_RCVD: /* Passive OPEN */ @@ -1588,7 +1685,6 @@ static void handle_ack_pkt(struct nes_cm_node *cm_node, struct sk_buff *skb, if (ret) break; cm_node->tcp_cntxt.rem_ack_num = ntohl(tcph->ack_seq); - cm_node->tcp_cntxt.loc_seq_num = temp_seq; if (cm_node->tcp_cntxt.rem_ack_num != cm_node->tcp_cntxt.loc_seq_num) { nes_debug(NES_DBG_CM, "rem_ack_num != loc_seq_num\n"); @@ -1597,31 +1693,30 @@ static void handle_ack_pkt(struct nes_cm_node *cm_node, struct sk_buff *skb, return; } cm_node->state = NES_CM_STATE_ESTABLISHED; + cleanup_retrans_entry(cm_node); if (datasize) { cm_node->tcp_cntxt.rcv_nxt = inc_sequence + datasize; - cm_node->state = NES_CM_STATE_MPAREQ_RCVD; - handle_rcv_mpa(cm_node, skb, NES_CM_EVENT_MPA_REQ); - } else { /* rcvd ACK only */ + handle_rcv_mpa(cm_node, skb); + } else { /* rcvd ACK only */ dev_kfree_skb_any(skb); cleanup_retrans_entry(cm_node); } break; case NES_CM_STATE_ESTABLISHED: /* Passive OPEN */ - /* We expect mpa frame to be received only */ + cleanup_retrans_entry(cm_node); if (datasize) { cm_node->tcp_cntxt.rcv_nxt = inc_sequence + datasize; - cm_node->state = NES_CM_STATE_MPAREQ_RCVD; - handle_rcv_mpa(cm_node, skb, - NES_CM_EVENT_MPA_REQ); + handle_rcv_mpa(cm_node, skb); } else drop_packet(skb); break; case NES_CM_STATE_MPAREQ_SENT: + cleanup_retrans_entry(cm_node); cm_node->tcp_cntxt.rem_ack_num = ntohl(tcph->ack_seq); if (datasize) { cm_node->tcp_cntxt.rcv_nxt = inc_sequence + datasize; - handle_rcv_mpa(cm_node, skb, NES_CM_EVENT_CONNECTED); + handle_rcv_mpa(cm_node, skb); } else { /* Could be just an ack pkt.. */ cleanup_retrans_entry(cm_node); dev_kfree_skb_any(skb); @@ -1632,13 +1727,24 @@ static void handle_ack_pkt(struct nes_cm_node *cm_node, struct sk_buff *skb, cleanup_retrans_entry(cm_node); send_reset(cm_node, skb); break; + case NES_CM_STATE_LAST_ACK: + cleanup_retrans_entry(cm_node); + cm_node->state = NES_CM_STATE_CLOSED; + cm_node->cm_id->rem_ref(cm_node->cm_id); + case NES_CM_STATE_CLOSING: + cleanup_retrans_entry(cm_node); + rem_ref_cm_node(cm_node->cm_core, cm_node); + drop_packet(skb); + break; case NES_CM_STATE_FIN_WAIT1: + cleanup_retrans_entry(cm_node); + drop_packet(skb); + cm_node->state = NES_CM_STATE_FIN_WAIT2; + break; case NES_CM_STATE_SYN_SENT: case NES_CM_STATE_FIN_WAIT2: case NES_CM_STATE_TSA: case NES_CM_STATE_MPAREQ_RCVD: - case NES_CM_STATE_LAST_ACK: - case NES_CM_STATE_CLOSING: case NES_CM_STATE_UNKNOWN: default: drop_packet(skb); @@ -1748,6 +1854,7 @@ static void process_packet(struct nes_cm_node *cm_node, struct sk_buff *skb, { enum nes_tcpip_pkt_type pkt_type = NES_PKT_TYPE_UNKNOWN; struct tcphdr *tcph = tcp_hdr(skb); + u32 fin_set = 0; skb_pull(skb, ip_hdr(skb)->ihl << 2); nes_debug(NES_DBG_CM, "process_packet: cm_node=%p state =%d syn=%d " @@ -1760,10 +1867,10 @@ static void process_packet(struct nes_cm_node *cm_node, struct sk_buff *skb, pkt_type = NES_PKT_TYPE_SYN; if (tcph->ack) pkt_type = NES_PKT_TYPE_SYNACK; - } else if (tcph->fin) - pkt_type = NES_PKT_TYPE_FIN; - else if (tcph->ack) + } else if (tcph->ack) pkt_type = NES_PKT_TYPE_ACK; + if (tcph->fin) + fin_set = 1; switch (pkt_type) { case NES_PKT_TYPE_SYN: @@ -1774,15 +1881,16 @@ static void process_packet(struct nes_cm_node *cm_node, struct sk_buff *skb, break; case NES_PKT_TYPE_ACK: handle_ack_pkt(cm_node, skb, tcph); + if (fin_set) + handle_fin_pkt(cm_node); break; case NES_PKT_TYPE_RST: handle_rst_pkt(cm_node, skb, tcph); break; - case NES_PKT_TYPE_FIN: - handle_fin_pkt(cm_node, skb, tcph); - break; default: drop_packet(skb); + if (fin_set) + handle_fin_pkt(cm_node); break; } } @@ -1925,7 +2033,7 @@ static struct nes_cm_node *mini_cm_connect(struct nes_cm_core *cm_core, loopbackremotenode->tcp_cntxt.rcv_wscale; loopbackremotenode->tcp_cntxt.snd_wscale = cm_node->tcp_cntxt.rcv_wscale; - + loopbackremotenode->state = NES_CM_STATE_MPAREQ_RCVD; create_event(loopbackremotenode, NES_CM_EVENT_MPA_REQ); } return cm_node; @@ -1980,7 +2088,11 @@ static int mini_cm_reject(struct nes_cm_core *cm_core, struct ietf_mpa_frame *mpa_frame, struct nes_cm_node *cm_node) { int ret = 0; + int err = 0; int passive_state; + struct nes_cm_event event; + struct iw_cm_id *cm_id = cm_node->cm_id; + struct nes_cm_node *loopback = cm_node->loopbackpartner; nes_debug(NES_DBG_CM, "%s cm_node=%p type=%d state=%d\n", __func__, cm_node, cm_node->tcp_cntxt.client, cm_node->state); @@ -1989,12 +2101,38 @@ static int mini_cm_reject(struct nes_cm_core *cm_core, return ret; cleanup_retrans_entry(cm_node); - passive_state = atomic_add_return(1, &cm_node->passive_state); - cm_node->state = NES_CM_STATE_CLOSED; - if (passive_state == NES_SEND_RESET_EVENT) + if (!loopback) { + passive_state = atomic_add_return(1, &cm_node->passive_state); + if (passive_state == NES_SEND_RESET_EVENT) { + cm_node->state = NES_CM_STATE_CLOSED; + rem_ref_cm_node(cm_core, cm_node); + } else { + ret = send_mpa_reject(cm_node); + if (ret) { + cm_node->state = NES_CM_STATE_CLOSED; + err = send_reset(cm_node, NULL); + if (err) + WARN_ON(1); + } else + cm_id->add_ref(cm_id); + } + } else { + cm_node->cm_id = NULL; + event.cm_node = loopback; + event.cm_info.rem_addr = loopback->rem_addr; + event.cm_info.loc_addr = loopback->loc_addr; + event.cm_info.rem_port = loopback->rem_port; + event.cm_info.loc_port = loopback->loc_port; + event.cm_info.cm_id = loopback->cm_id; + cm_event_mpa_reject(&event); rem_ref_cm_node(cm_core, cm_node); - else - ret = send_reset(cm_node, NULL); + loopback->state = NES_CM_STATE_CLOSING; + + cm_id = loopback->cm_id; + rem_ref_cm_node(cm_core, loopback); + cm_id->rem_ref(cm_id); + } + return ret; } @@ -2031,6 +2169,7 @@ static int mini_cm_close(struct nes_cm_core *cm_core, struct nes_cm_node *cm_nod case NES_CM_STATE_CLOSING: ret = -1; break; + case NES_CM_STATE_MPAREJ_RCVD: case NES_CM_STATE_LISTENING: case NES_CM_STATE_UNKNOWN: case NES_CM_STATE_INITED: @@ -2227,15 +2366,15 @@ static int mini_cm_set(struct nes_cm_core *cm_core, u32 type, u32 value) int ret = 0; switch (type) { - case NES_CM_SET_PKT_SIZE: - cm_core->mtu = value; - break; - case NES_CM_SET_FREE_PKT_Q_SIZE: - cm_core->free_tx_pkt_max = value; - break; - default: - /* unknown set option */ - ret = -EINVAL; + case NES_CM_SET_PKT_SIZE: + cm_core->mtu = value; + break; + case NES_CM_SET_FREE_PKT_Q_SIZE: + cm_core->free_tx_pkt_max = value; + break; + default: + /* unknown set option */ + ret = -EINVAL; } return ret; @@ -2490,12 +2629,14 @@ static int nes_disconnect(struct nes_qp *nesqp, int abrupt) int ret = 0; struct nes_vnic *nesvnic; struct nes_device *nesdev; + struct nes_ib_device *nesibdev; nesvnic = to_nesvnic(nesqp->ibqp.device); if (!nesvnic) return -EINVAL; nesdev = nesvnic->nesdev; + nesibdev = nesvnic->nesibdev; nes_debug(NES_DBG_CM, "netdev refcnt = %u.\n", atomic_read(&nesvnic->netdev->refcnt)); @@ -2507,6 +2648,8 @@ static int nes_disconnect(struct nes_qp *nesqp, int abrupt) } else { /* Need to free the Last Streaming Mode Message */ if (nesqp->ietf_frame) { + if (nesqp->lsmm_mr) + nesibdev->ibdev.dereg_mr(nesqp->lsmm_mr); pci_free_consistent(nesdev->pcidev, nesqp->private_data_len+sizeof(struct ietf_mpa_frame), nesqp->ietf_frame, nesqp->ietf_frame_pbase); @@ -2543,6 +2686,12 @@ int nes_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param) u32 crc_value; int ret; int passive_state; + struct nes_ib_device *nesibdev; + struct ib_mr *ibmr = NULL; + struct ib_phys_buf ibphysbuf; + struct nes_pd *nespd; + + ibqp = nes_get_qp(cm_id->device, conn_param->qpn); if (!ibqp) @@ -2601,6 +2750,26 @@ int nes_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param) if (cm_id->remote_addr.sin_addr.s_addr != cm_id->local_addr.sin_addr.s_addr) { u64temp = (unsigned long)nesqp; + nesibdev = nesvnic->nesibdev; + nespd = nesqp->nespd; + ibphysbuf.addr = nesqp->ietf_frame_pbase; + ibphysbuf.size = conn_param->private_data_len + + sizeof(struct ietf_mpa_frame); + ibmr = nesibdev->ibdev.reg_phys_mr((struct ib_pd *)nespd, + &ibphysbuf, 1, + IB_ACCESS_LOCAL_WRITE, + (u64 *)&nesqp->ietf_frame); + if (!ibmr) { + nes_debug(NES_DBG_CM, "Unable to register memory region" + "for lSMM for cm_node = %p \n", + cm_node); + return -ENOMEM; + } + + ibmr->pd = &nespd->ibpd; + ibmr->device = nespd->ibpd.device; + nesqp->lsmm_mr = ibmr; + u64temp |= NES_SW_CONTEXT_ALIGN>>1; set_wqe_64bit_value(wqe->wqe_words, NES_IWARP_SQ_WQE_COMP_CTX_LOW_IDX, @@ -2611,23 +2780,20 @@ int nes_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param) wqe->wqe_words[NES_IWARP_SQ_WQE_TOTAL_PAYLOAD_IDX] = cpu_to_le32(conn_param->private_data_len + sizeof(struct ietf_mpa_frame)); - wqe->wqe_words[NES_IWARP_SQ_WQE_FRAG0_LOW_IDX] = - cpu_to_le32((u32)nesqp->ietf_frame_pbase); - wqe->wqe_words[NES_IWARP_SQ_WQE_FRAG0_HIGH_IDX] = - cpu_to_le32((u32)((u64)nesqp->ietf_frame_pbase >> 32)); + set_wqe_64bit_value(wqe->wqe_words, + NES_IWARP_SQ_WQE_FRAG0_LOW_IDX, + (u64)nesqp->ietf_frame); wqe->wqe_words[NES_IWARP_SQ_WQE_LENGTH0_IDX] = cpu_to_le32(conn_param->private_data_len + sizeof(struct ietf_mpa_frame)); - wqe->wqe_words[NES_IWARP_SQ_WQE_STAG0_IDX] = 0; + wqe->wqe_words[NES_IWARP_SQ_WQE_STAG0_IDX] = ibmr->lkey; nesqp->nesqp_context->ird_ord_sizes |= cpu_to_le32(NES_QPCONTEXT_ORDIRD_LSMM_PRESENT | NES_QPCONTEXT_ORDIRD_WRPDU); } else { nesqp->nesqp_context->ird_ord_sizes |= - cpu_to_le32((NES_QPCONTEXT_ORDIRD_LSMM_PRESENT | - NES_QPCONTEXT_ORDIRD_WRPDU | - NES_QPCONTEXT_ORDIRD_ALSMM)); + cpu_to_le32(NES_QPCONTEXT_ORDIRD_WRPDU); } nesqp->skip_lsmm = 1; @@ -2749,23 +2915,35 @@ int nes_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param) int nes_reject(struct iw_cm_id *cm_id, const void *pdata, u8 pdata_len) { struct nes_cm_node *cm_node; + struct nes_cm_node *loopback; + struct nes_cm_core *cm_core; atomic_inc(&cm_rejects); cm_node = (struct nes_cm_node *) cm_id->provider_data; + loopback = cm_node->loopbackpartner; cm_core = cm_node->cm_core; + cm_node->cm_id = cm_id; cm_node->mpa_frame_size = sizeof(struct ietf_mpa_frame) + pdata_len; + if (cm_node->mpa_frame_size > MAX_CM_BUFFER) + return -EINVAL; + strcpy(&cm_node->mpa_frame.key[0], IEFT_MPA_KEY_REP); - memcpy(&cm_node->mpa_frame.priv_data, pdata, pdata_len); + if (loopback) { + memcpy(&loopback->mpa_frame.priv_data, pdata, pdata_len); + loopback->mpa_frame.priv_data_len = pdata_len; + loopback->mpa_frame_size = sizeof(struct ietf_mpa_frame) + + pdata_len; + } else { + memcpy(&cm_node->mpa_frame.priv_data, pdata, pdata_len); + cm_node->mpa_frame.priv_data_len = cpu_to_be16(pdata_len); + } - cm_node->mpa_frame.priv_data_len = cpu_to_be16(pdata_len); cm_node->mpa_frame.rev = mpa_version; cm_node->mpa_frame.flags = IETF_MPA_FLAGS_CRC | IETF_MPA_FLAGS_REJECT; - cm_core->api->reject(cm_core, &cm_node->mpa_frame, cm_node); - - return 0; + return cm_core->api->reject(cm_core, &cm_node->mpa_frame, cm_node); } @@ -3274,13 +3452,56 @@ static void cm_event_mpa_req(struct nes_cm_event *event) cm_event.remote_addr.sin_family = AF_INET; cm_event.remote_addr.sin_port = htons(event->cm_info.rem_port); cm_event.remote_addr.sin_addr.s_addr = htonl(event->cm_info.rem_addr); + cm_event.private_data = cm_node->mpa_frame_buf; + cm_event.private_data_len = (u8) cm_node->mpa_frame_size; + + ret = cm_id->event_handler(cm_id, &cm_event); + if (ret) + printk(KERN_ERR "%s[%u] OFA CM event_handler returned, ret=%d\n", + __func__, __LINE__, ret); + return; +} + + +static void cm_event_mpa_reject(struct nes_cm_event *event) +{ + struct iw_cm_id *cm_id; + struct iw_cm_event cm_event; + struct nes_cm_node *cm_node; + int ret; + + cm_node = event->cm_node; + if (!cm_node) + return; + cm_id = cm_node->cm_id; + + atomic_inc(&cm_connect_reqs); + nes_debug(NES_DBG_CM, "cm_node = %p - cm_id = %p, jiffies = %lu\n", + cm_node, cm_id, jiffies); + + cm_event.event = IW_CM_EVENT_CONNECT_REPLY; + cm_event.status = -ECONNREFUSED; + cm_event.provider_data = cm_id->provider_data; + + cm_event.local_addr.sin_family = AF_INET; + cm_event.local_addr.sin_port = htons(event->cm_info.loc_port); + cm_event.local_addr.sin_addr.s_addr = htonl(event->cm_info.loc_addr); + + cm_event.remote_addr.sin_family = AF_INET; + cm_event.remote_addr.sin_port = htons(event->cm_info.rem_port); + cm_event.remote_addr.sin_addr.s_addr = htonl(event->cm_info.rem_addr); - cm_event.private_data = cm_node->mpa_frame_buf; - cm_event.private_data_len = (u8) cm_node->mpa_frame_size; + cm_event.private_data = cm_node->mpa_frame_buf; + cm_event.private_data_len = (u8) cm_node->mpa_frame_size; + + nes_debug(NES_DBG_CM, "call CM_EVENT_MPA_REJECTED, local_addr=%08x, " + "remove_addr=%08x\n", + cm_event.local_addr.sin_addr.s_addr, + cm_event.remote_addr.sin_addr.s_addr); ret = cm_id->event_handler(cm_id, &cm_event); if (ret) - printk("%s[%u] OFA CM event_handler returned, ret=%d\n", + printk(KERN_ERR "%s[%u] OFA CM event_handler returned, ret=%d\n", __func__, __LINE__, ret); return; @@ -3345,6 +3566,14 @@ static void nes_cm_event_handler(struct work_struct *work) cm_event_connected(event); nes_debug(NES_DBG_CM, "CM Event: CONNECTED\n"); break; + case NES_CM_EVENT_MPA_REJECT: + if ((!event->cm_node->cm_id) || + (event->cm_node->state == NES_CM_STATE_TSA)) + break; + cm_event_mpa_reject(event); + nes_debug(NES_DBG_CM, "CM Event: REJECT\n"); + break; + case NES_CM_EVENT_ABORTED: if ((!event->cm_node->cm_id) || (event->cm_node->state == NES_CM_STATE_TSA)) diff --git a/drivers/infiniband/hw/nes/nes_cm.h b/drivers/infiniband/hw/nes/nes_cm.h index fafa35042eb..d5f778202eb 100644 --- a/drivers/infiniband/hw/nes/nes_cm.h +++ b/drivers/infiniband/hw/nes/nes_cm.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2006 - 2008 NetEffect, Inc. All rights reserved. + * Copyright (c) 2006 - 2009 Intel-NE, Inc. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU @@ -39,6 +39,9 @@ #define NES_MANAGE_APBVT_DEL 0 #define NES_MANAGE_APBVT_ADD 1 +#define NES_MPA_REQUEST_ACCEPT 1 +#define NES_MPA_REQUEST_REJECT 2 + /* IETF MPA -- defines, enums, structs */ #define IEFT_MPA_KEY_REQ "MPA ID Req Frame" #define IEFT_MPA_KEY_REP "MPA ID Rep Frame" @@ -186,6 +189,7 @@ enum nes_cm_node_state { NES_CM_STATE_ACCEPTING, NES_CM_STATE_MPAREQ_SENT, NES_CM_STATE_MPAREQ_RCVD, + NES_CM_STATE_MPAREJ_RCVD, NES_CM_STATE_TSA, NES_CM_STATE_FIN_WAIT1, NES_CM_STATE_FIN_WAIT2, @@ -278,13 +282,12 @@ struct nes_cm_node { struct nes_timer_entry *send_entry; spinlock_t retrans_list_lock; - struct list_head recv_list; - spinlock_t recv_list_lock; + struct nes_timer_entry *recv_entry; int send_write0; union { struct ietf_mpa_frame mpa_frame; - u8 mpa_frame_buf[NES_CM_DEFAULT_MTU]; + u8 mpa_frame_buf[MAX_CM_BUFFER]; }; u16 mpa_frame_size; struct iw_cm_id *cm_id; @@ -326,6 +329,7 @@ enum nes_cm_event_type { NES_CM_EVENT_MPA_REQ, NES_CM_EVENT_MPA_CONNECT, NES_CM_EVENT_MPA_ACCEPT, + NES_CM_EVENT_MPA_REJECT, NES_CM_EVENT_MPA_ESTABLISHED, NES_CM_EVENT_CONNECTED, NES_CM_EVENT_CLOSED, diff --git a/drivers/infiniband/hw/nes/nes_context.h b/drivers/infiniband/hw/nes/nes_context.h index da9daba8e66..0fb8d81d9a6 100644 --- a/drivers/infiniband/hw/nes/nes_context.h +++ b/drivers/infiniband/hw/nes/nes_context.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2006 - 2008 NetEffect, Inc. All rights reserved. + * Copyright (c) 2006 - 2009 Intel-NE, Inc. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU diff --git a/drivers/infiniband/hw/nes/nes_hw.c b/drivers/infiniband/hw/nes/nes_hw.c index 5d139db1b77..52e734042b8 100644 --- a/drivers/infiniband/hw/nes/nes_hw.c +++ b/drivers/infiniband/hw/nes/nes_hw.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2006 - 2008 NetEffect, Inc. All rights reserved. + * Copyright (c) 2006 - 2009 Intel-NE, Inc. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU @@ -254,6 +254,7 @@ struct nes_adapter *nes_init_adapter(struct nes_device *nesdev, u8 hw_rev) { u32 adapter_size; u32 arp_table_size; u16 vendor_id; + u16 device_id; u8 OneG_Mode; u8 func_index; @@ -356,6 +357,13 @@ struct nes_adapter *nes_init_adapter(struct nes_device *nesdev, u8 hw_rev) { return NULL; } + nesadapter->vendor_id = (((u32) nesadapter->mac_addr_high) << 8) | + (nesadapter->mac_addr_low >> 24); + + pci_bus_read_config_word(nesdev->pcidev->bus, nesdev->pcidev->devfn, + PCI_DEVICE_ID, &device_id); + nesadapter->vendor_part_id = device_id; + if (nes_init_serdes(nesdev, hw_rev, port_count, nesadapter, OneG_Mode)) { kfree(nesadapter); @@ -1636,7 +1644,6 @@ int nes_init_nic_qp(struct nes_device *nesdev, struct net_device *netdev) nesvnic->post_cqp_request = nes_post_cqp_request; nesvnic->mcrq_mcast_filter = NULL; - spin_lock_init(&nesvnic->nic.sq_lock); spin_lock_init(&nesvnic->nic.rq_lock); /* setup the RQ */ @@ -2261,6 +2268,8 @@ static void nes_process_aeq(struct nes_device *nesdev, struct nes_hw_aeq *aeq) if (++head >= aeq_size) head = 0; + + nes_write32(nesdev->regs + NES_AEQ_ALLOC, 1 << 16); } while (1); aeq->aeq_head = head; @@ -2541,7 +2550,7 @@ static void nes_nic_napi_ce_handler(struct nes_device *nesdev, struct nes_hw_nic { struct nes_vnic *nesvnic = container_of(cq, struct nes_vnic, nic_cq); - netif_rx_schedule(&nesvnic->napi); + napi_schedule(&nesvnic->napi); } @@ -2622,9 +2631,9 @@ void nes_nic_ce_handler(struct nes_device *nesdev, struct nes_hw_nic_cq *cq) } else break; } - if (skb) - dev_kfree_skb_any(skb); } + if (skb) + dev_kfree_skb_any(skb); nesnic->sq_tail++; nesnic->sq_tail &= nesnic->sq_size-1; if (sq_cqes > 128) { diff --git a/drivers/infiniband/hw/nes/nes_hw.h b/drivers/infiniband/hw/nes/nes_hw.h index bc0b4de0445..f41a8710d2a 100644 --- a/drivers/infiniband/hw/nes/nes_hw.h +++ b/drivers/infiniband/hw/nes/nes_hw.h @@ -1,5 +1,5 @@ /* -* Copyright (c) 2006 - 2008 NetEffect, Inc. All rights reserved. +* Copyright (c) 2006 - 2009 Intel-NE, Inc. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU @@ -61,6 +61,7 @@ enum pci_regs { NES_CQ_ACK = 0x0034, NES_WQE_ALLOC = 0x0040, NES_CQE_ALLOC = 0x0044, + NES_AEQ_ALLOC = 0x0048 }; enum indexed_regs { @@ -875,7 +876,6 @@ struct nes_hw_nic { u8 replenishing_rq; u8 reserved; - spinlock_t sq_lock; spinlock_t rq_lock; }; @@ -1147,7 +1147,6 @@ struct nes_ib_device; struct nes_vnic { struct nes_ib_device *nesibdev; u64 sq_full; - u64 sq_locked; u64 tso_requests; u64 segmented_tso_requests; u64 linearized_skbs; diff --git a/drivers/infiniband/hw/nes/nes_nic.c b/drivers/infiniband/hw/nes/nes_nic.c index 57a47cf7e51..8d3e4c6f237 100644 --- a/drivers/infiniband/hw/nes/nes_nic.c +++ b/drivers/infiniband/hw/nes/nes_nic.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2006 - 2008 NetEffect, Inc. All rights reserved. + * Copyright (c) 2006 - 2009 Intel-NE, Inc. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU @@ -111,7 +111,7 @@ static int nes_netdev_poll(struct napi_struct *napi, int budget) nes_nic_ce_handler(nesdev, nescq); if (nescq->cqes_pending == 0) { - netif_rx_complete(napi); + napi_complete(napi); /* clear out completed cqes and arm */ nes_write32(nesdev->regs+NES_CQE_ALLOC, NES_CQE_ALLOC_NOTIFY_NEXT | nescq->cq_number | (nescq->cqe_allocs_pending << 16)); @@ -400,8 +400,7 @@ static int nes_nic_send(struct sk_buff *skb, struct net_device *netdev) if (skb_headlen(skb) == skb->len) { if (skb_headlen(skb) <= NES_FIRST_FRAG_SIZE) { nic_sqe->wqe_words[NES_NIC_SQ_WQE_LENGTH_2_1_IDX] = 0; - nesnic->tx_skb[nesnic->sq_head] = NULL; - dev_kfree_skb(skb); + nesnic->tx_skb[nesnic->sq_head] = skb; } } else { /* Deal with Fragments */ @@ -453,7 +452,6 @@ static int nes_netdev_start_xmit(struct sk_buff *skb, struct net_device *netdev) u32 wqe_count=1; u32 send_rc; struct iphdr *iph; - unsigned long flags; __le16 *wqe_fragment_length; u32 nr_frags; u32 original_first_length; @@ -480,13 +478,6 @@ static int nes_netdev_start_xmit(struct sk_buff *skb, struct net_device *netdev) if (netif_queue_stopped(netdev)) return NETDEV_TX_BUSY; - local_irq_save(flags); - if (!spin_trylock(&nesnic->sq_lock)) { - local_irq_restore(flags); - nesvnic->sq_locked++; - return NETDEV_TX_LOCKED; - } - /* Check if SQ is full */ if ((((nesnic->sq_tail+(nesnic->sq_size*2))-nesnic->sq_head) & (nesnic->sq_size - 1)) == 1) { if (!netif_queue_stopped(netdev)) { @@ -498,7 +489,6 @@ static int nes_netdev_start_xmit(struct sk_buff *skb, struct net_device *netdev) } } nesvnic->sq_full++; - spin_unlock_irqrestore(&nesnic->sq_lock, flags); return NETDEV_TX_BUSY; } @@ -531,7 +521,6 @@ sq_no_longer_full: } } nesvnic->sq_full++; - spin_unlock_irqrestore(&nesnic->sq_lock, flags); nes_debug(NES_DBG_NIC_TX, "%s: HNIC SQ full- TSO request has too many frags!\n", netdev->name); return NETDEV_TX_BUSY; @@ -656,17 +645,13 @@ tso_sq_no_longer_full: skb_set_transport_header(skb, hoffset); skb_set_network_header(skb, nhoffset); send_rc = nes_nic_send(skb, netdev); - if (send_rc != NETDEV_TX_OK) { - spin_unlock_irqrestore(&nesnic->sq_lock, flags); + if (send_rc != NETDEV_TX_OK) return NETDEV_TX_OK; - } } } else { send_rc = nes_nic_send(skb, netdev); - if (send_rc != NETDEV_TX_OK) { - spin_unlock_irqrestore(&nesnic->sq_lock, flags); + if (send_rc != NETDEV_TX_OK) return NETDEV_TX_OK; - } } barrier(); @@ -676,7 +661,6 @@ tso_sq_no_longer_full: (wqe_count << 24) | (1 << 23) | nesvnic->nic.qp_id); netdev->trans_start = jiffies; - spin_unlock_irqrestore(&nesnic->sq_lock, flags); return NETDEV_TX_OK; } @@ -1012,7 +996,6 @@ static const char nes_ethtool_stringset[][ETH_GSTRING_LEN] = { "Pause Frames Received", "Internal Routing Errors", "SQ SW Dropped SKBs", - "SQ Locked", "SQ Full", "Segmented TSO Requests", "Rx Symbol Errors", @@ -1129,16 +1112,17 @@ static void nes_netdev_get_ethtool_stats(struct net_device *netdev, struct nes_device *nesdev = nesvnic->nesdev; u32 nic_count; u32 u32temp; + u32 index = 0; target_ethtool_stats->n_stats = NES_ETHTOOL_STAT_COUNT; - target_stat_values[0] = nesvnic->nesdev->link_status_interrupts; - target_stat_values[1] = nesvnic->linearized_skbs; - target_stat_values[2] = nesvnic->tso_requests; + target_stat_values[index] = nesvnic->nesdev->link_status_interrupts; + target_stat_values[++index] = nesvnic->linearized_skbs; + target_stat_values[++index] = nesvnic->tso_requests; u32temp = nes_read_indexed(nesdev, NES_IDX_MAC_TX_PAUSE_FRAMES + (nesvnic->nesdev->mac_index*0x200)); nesvnic->nesdev->mac_pause_frames_sent += u32temp; - target_stat_values[3] = nesvnic->nesdev->mac_pause_frames_sent; + target_stat_values[++index] = nesvnic->nesdev->mac_pause_frames_sent; u32temp = nes_read_indexed(nesdev, NES_IDX_MAC_RX_PAUSE_FRAMES + (nesvnic->nesdev->mac_index*0x200)); @@ -1209,60 +1193,59 @@ static void nes_netdev_get_ethtool_stats(struct net_device *netdev, nesvnic->endnode_ipv4_tcp_retransmits += u32temp; } - target_stat_values[4] = nesvnic->nesdev->mac_pause_frames_received; - target_stat_values[5] = nesdev->nesadapter->nic_rx_eth_route_err; - target_stat_values[6] = nesvnic->tx_sw_dropped; - target_stat_values[7] = nesvnic->sq_locked; - target_stat_values[8] = nesvnic->sq_full; - target_stat_values[9] = nesvnic->segmented_tso_requests; - target_stat_values[10] = nesvnic->nesdev->mac_rx_symbol_err_frames; - target_stat_values[11] = nesvnic->nesdev->mac_rx_jabber_frames; - target_stat_values[12] = nesvnic->nesdev->mac_rx_oversized_frames; - target_stat_values[13] = nesvnic->nesdev->mac_rx_short_frames; - target_stat_values[14] = nesvnic->endnode_nstat_rx_discard; - target_stat_values[15] = nesvnic->endnode_nstat_rx_octets; - target_stat_values[16] = nesvnic->endnode_nstat_rx_frames; - target_stat_values[17] = nesvnic->endnode_nstat_tx_octets; - target_stat_values[18] = nesvnic->endnode_nstat_tx_frames; - target_stat_values[19] = mh_detected; - target_stat_values[20] = mh_pauses_sent; - target_stat_values[21] = nesvnic->endnode_ipv4_tcp_retransmits; - target_stat_values[22] = atomic_read(&cm_connects); - target_stat_values[23] = atomic_read(&cm_accepts); - target_stat_values[24] = atomic_read(&cm_disconnects); - target_stat_values[25] = atomic_read(&cm_connecteds); - target_stat_values[26] = atomic_read(&cm_connect_reqs); - target_stat_values[27] = atomic_read(&cm_rejects); - target_stat_values[28] = atomic_read(&mod_qp_timouts); - target_stat_values[29] = atomic_read(&qps_created); - target_stat_values[30] = atomic_read(&sw_qps_destroyed); - target_stat_values[31] = atomic_read(&qps_destroyed); - target_stat_values[32] = atomic_read(&cm_closes); - target_stat_values[33] = cm_packets_sent; - target_stat_values[34] = cm_packets_bounced; - target_stat_values[35] = cm_packets_created; - target_stat_values[36] = cm_packets_received; - target_stat_values[37] = cm_packets_dropped; - target_stat_values[38] = cm_packets_retrans; - target_stat_values[39] = cm_listens_created; - target_stat_values[40] = cm_listens_destroyed; - target_stat_values[41] = cm_backlog_drops; - target_stat_values[42] = atomic_read(&cm_loopbacks); - target_stat_values[43] = atomic_read(&cm_nodes_created); - target_stat_values[44] = atomic_read(&cm_nodes_destroyed); - target_stat_values[45] = atomic_read(&cm_accel_dropped_pkts); - target_stat_values[46] = atomic_read(&cm_resets_recvd); - target_stat_values[47] = int_mod_timer_init; - target_stat_values[48] = int_mod_cq_depth_1; - target_stat_values[49] = int_mod_cq_depth_4; - target_stat_values[50] = int_mod_cq_depth_16; - target_stat_values[51] = int_mod_cq_depth_24; - target_stat_values[52] = int_mod_cq_depth_32; - target_stat_values[53] = int_mod_cq_depth_128; - target_stat_values[54] = int_mod_cq_depth_256; - target_stat_values[55] = nesvnic->lro_mgr.stats.aggregated; - target_stat_values[56] = nesvnic->lro_mgr.stats.flushed; - target_stat_values[57] = nesvnic->lro_mgr.stats.no_desc; + target_stat_values[++index] = nesvnic->nesdev->mac_pause_frames_received; + target_stat_values[++index] = nesdev->nesadapter->nic_rx_eth_route_err; + target_stat_values[++index] = nesvnic->tx_sw_dropped; + target_stat_values[++index] = nesvnic->sq_full; + target_stat_values[++index] = nesvnic->segmented_tso_requests; + target_stat_values[++index] = nesvnic->nesdev->mac_rx_symbol_err_frames; + target_stat_values[++index] = nesvnic->nesdev->mac_rx_jabber_frames; + target_stat_values[++index] = nesvnic->nesdev->mac_rx_oversized_frames; + target_stat_values[++index] = nesvnic->nesdev->mac_rx_short_frames; + target_stat_values[++index] = nesvnic->endnode_nstat_rx_discard; + target_stat_values[++index] = nesvnic->endnode_nstat_rx_octets; + target_stat_values[++index] = nesvnic->endnode_nstat_rx_frames; + target_stat_values[++index] = nesvnic->endnode_nstat_tx_octets; + target_stat_values[++index] = nesvnic->endnode_nstat_tx_frames; + target_stat_values[++index] = mh_detected; + target_stat_values[++index] = mh_pauses_sent; + target_stat_values[++index] = nesvnic->endnode_ipv4_tcp_retransmits; + target_stat_values[++index] = atomic_read(&cm_connects); + target_stat_values[++index] = atomic_read(&cm_accepts); + target_stat_values[++index] = atomic_read(&cm_disconnects); + target_stat_values[++index] = atomic_read(&cm_connecteds); + target_stat_values[++index] = atomic_read(&cm_connect_reqs); + target_stat_values[++index] = atomic_read(&cm_rejects); + target_stat_values[++index] = atomic_read(&mod_qp_timouts); + target_stat_values[++index] = atomic_read(&qps_created); + target_stat_values[++index] = atomic_read(&sw_qps_destroyed); + target_stat_values[++index] = atomic_read(&qps_destroyed); + target_stat_values[++index] = atomic_read(&cm_closes); + target_stat_values[++index] = cm_packets_sent; + target_stat_values[++index] = cm_packets_bounced; + target_stat_values[++index] = cm_packets_created; + target_stat_values[++index] = cm_packets_received; + target_stat_values[++index] = cm_packets_dropped; + target_stat_values[++index] = cm_packets_retrans; + target_stat_values[++index] = cm_listens_created; + target_stat_values[++index] = cm_listens_destroyed; + target_stat_values[++index] = cm_backlog_drops; + target_stat_values[++index] = atomic_read(&cm_loopbacks); + target_stat_values[++index] = atomic_read(&cm_nodes_created); + target_stat_values[++index] = atomic_read(&cm_nodes_destroyed); + target_stat_values[++index] = atomic_read(&cm_accel_dropped_pkts); + target_stat_values[++index] = atomic_read(&cm_resets_recvd); + target_stat_values[++index] = int_mod_timer_init; + target_stat_values[++index] = int_mod_cq_depth_1; + target_stat_values[++index] = int_mod_cq_depth_4; + target_stat_values[++index] = int_mod_cq_depth_16; + target_stat_values[++index] = int_mod_cq_depth_24; + target_stat_values[++index] = int_mod_cq_depth_32; + target_stat_values[++index] = int_mod_cq_depth_128; + target_stat_values[++index] = int_mod_cq_depth_256; + target_stat_values[++index] = nesvnic->lro_mgr.stats.aggregated; + target_stat_values[++index] = nesvnic->lro_mgr.stats.flushed; + target_stat_values[++index] = nesvnic->lro_mgr.stats.no_desc; } @@ -1568,6 +1551,19 @@ static void nes_netdev_vlan_rx_register(struct net_device *netdev, struct vlan_g spin_unlock_irqrestore(&nesadapter->phy_lock, flags); } +static const struct net_device_ops nes_netdev_ops = { + .ndo_open = nes_netdev_open, + .ndo_stop = nes_netdev_stop, + .ndo_start_xmit = nes_netdev_start_xmit, + .ndo_get_stats = nes_netdev_get_stats, + .ndo_tx_timeout = nes_netdev_tx_timeout, + .ndo_set_mac_address = nes_netdev_set_mac_address, + .ndo_set_multicast_list = nes_netdev_set_multicast_list, + .ndo_change_mtu = nes_netdev_change_mtu, + .ndo_set_mac_address = eth_mac_addr, + .ndo_validate_addr = eth_validate_addr, + .ndo_vlan_rx_register = nes_netdev_vlan_rx_register, +}; /** * nes_netdev_init - initialize network device @@ -1576,7 +1572,7 @@ struct net_device *nes_netdev_init(struct nes_device *nesdev, void __iomem *mmio_addr) { u64 u64temp; - struct nes_vnic *nesvnic = NULL; + struct nes_vnic *nesvnic; struct net_device *netdev; struct nic_qp_map *curr_qp_map; u32 u32temp; @@ -1588,22 +1584,12 @@ struct net_device *nes_netdev_init(struct nes_device *nesdev, printk(KERN_ERR PFX "nesvnic etherdev alloc failed"); return NULL; } + nesvnic = netdev_priv(netdev); nes_debug(NES_DBG_INIT, "netdev = %p, %s\n", netdev, netdev->name); SET_NETDEV_DEV(netdev, &nesdev->pcidev->dev); - nesvnic = netdev_priv(netdev); - memset(nesvnic, 0, sizeof(*nesvnic)); - - netdev->open = nes_netdev_open; - netdev->stop = nes_netdev_stop; - netdev->hard_start_xmit = nes_netdev_start_xmit; - netdev->get_stats = nes_netdev_get_stats; - netdev->tx_timeout = nes_netdev_tx_timeout; - netdev->set_mac_address = nes_netdev_set_mac_address; - netdev->set_multicast_list = nes_netdev_set_multicast_list; - netdev->change_mtu = nes_netdev_change_mtu; netdev->watchdog_timeo = NES_TX_TIMEOUT; netdev->irq = nesdev->pcidev->irq; netdev->mtu = ETH_DATA_LEN; @@ -1611,6 +1597,7 @@ struct net_device *nes_netdev_init(struct nes_device *nesdev, netdev->addr_len = ETH_ALEN; netdev->type = ARPHRD_ETHER; netdev->features = NETIF_F_HIGHDMA; + netdev->netdev_ops = &nes_netdev_ops; netdev->ethtool_ops = &nes_ethtool_ops; netif_napi_add(netdev, &nesvnic->napi, nes_netdev_poll, 128); nes_debug(NES_DBG_INIT, "Enabling VLAN Insert/Delete.\n"); diff --git a/drivers/infiniband/hw/nes/nes_user.h b/drivers/infiniband/hw/nes/nes_user.h index e64306bce80..cc90c14b49e 100644 --- a/drivers/infiniband/hw/nes/nes_user.h +++ b/drivers/infiniband/hw/nes/nes_user.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2006 - 2008 NetEffect. All rights reserved. + * Copyright (c) 2006 - 2009 Intel-NE, Inc. All rights reserved. * Copyright (c) 2005 Topspin Communications. All rights reserved. * Copyright (c) 2005 Cisco Systems. All rights reserved. * Copyright (c) 2005 Open Grid Computing, Inc. All rights reserved. diff --git a/drivers/infiniband/hw/nes/nes_utils.c b/drivers/infiniband/hw/nes/nes_utils.c index 6f3bc1b6bf2..a282031d15c 100644 --- a/drivers/infiniband/hw/nes/nes_utils.c +++ b/drivers/infiniband/hw/nes/nes_utils.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2006 - 2008 NetEffect, Inc. All rights reserved. + * Copyright (c) 2006 - 2009 Intel-NE, Inc. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU diff --git a/drivers/infiniband/hw/nes/nes_verbs.c b/drivers/infiniband/hw/nes/nes_verbs.c index 4fdb72454f9..7e5b5ba13a7 100644 --- a/drivers/infiniband/hw/nes/nes_verbs.c +++ b/drivers/infiniband/hw/nes/nes_verbs.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2006 - 2008 NetEffect, Inc. All rights reserved. + * Copyright (c) 2006 - 2009 Intel-NE, Inc. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU @@ -551,6 +551,7 @@ static int nes_dealloc_fmr(struct ib_fmr *ibfmr) struct nes_device *nesdev = nesvnic->nesdev; struct nes_adapter *nesadapter = nesdev->nesadapter; int i = 0; + int rc; /* free the resources */ if (nesfmr->leaf_pbl_cnt == 0) { @@ -572,7 +573,9 @@ static int nes_dealloc_fmr(struct ib_fmr *ibfmr) nesmr->ibmw.rkey = ibfmr->rkey; nesmr->ibmw.uobject = NULL; - if (nesfmr->nesmr.pbls_used != 0) { + rc = nes_dealloc_mw(&nesmr->ibmw); + + if ((rc == 0) && (nesfmr->nesmr.pbls_used != 0)) { spin_lock_irqsave(&nesadapter->pbl_lock, flags); if (nesfmr->nesmr.pbl_4k) { nesadapter->free_4kpbl += nesfmr->nesmr.pbls_used; @@ -584,7 +587,7 @@ static int nes_dealloc_fmr(struct ib_fmr *ibfmr) spin_unlock_irqrestore(&nesadapter->pbl_lock, flags); } - return nes_dealloc_mw(&nesmr->ibmw); + return rc; } @@ -1360,8 +1363,10 @@ static struct ib_qp *nes_create_qp(struct ib_pd *ibpd, NES_QPCONTEXT_MISC_RQ_SIZE_SHIFT); nesqp->nesqp_context->misc |= cpu_to_le32((u32)nesqp->hwqp.sq_encoded_size << NES_QPCONTEXT_MISC_SQ_SIZE_SHIFT); + if (!udata) { nesqp->nesqp_context->misc |= cpu_to_le32(NES_QPCONTEXT_MISC_PRIV_EN); nesqp->nesqp_context->misc |= cpu_to_le32(NES_QPCONTEXT_MISC_FAST_REGISTER_EN); + } nesqp->nesqp_context->cqs = cpu_to_le32(nesqp->nesscq->hw_cq.cq_number + ((u32)nesqp->nesrcq->hw_cq.cq_number << 16)); u64temp = (u64)nesqp->hwqp.sq_pbase; @@ -1884,21 +1889,75 @@ static int nes_destroy_cq(struct ib_cq *ib_cq) return ret; } +/** + * root_256 + */ +static u32 root_256(struct nes_device *nesdev, + struct nes_root_vpbl *root_vpbl, + struct nes_root_vpbl *new_root, + u16 pbl_count_4k, + u16 pbl_count_256) +{ + u64 leaf_pbl; + int i, j, k; + + if (pbl_count_4k == 1) { + new_root->pbl_vbase = pci_alloc_consistent(nesdev->pcidev, + 512, &new_root->pbl_pbase); + + if (new_root->pbl_vbase == NULL) + return 0; + + leaf_pbl = (u64)root_vpbl->pbl_pbase; + for (i = 0; i < 16; i++) { + new_root->pbl_vbase[i].pa_low = + cpu_to_le32((u32)leaf_pbl); + new_root->pbl_vbase[i].pa_high = + cpu_to_le32((u32)((((u64)leaf_pbl) >> 32))); + leaf_pbl += 256; + } + } else { + for (i = 3; i >= 0; i--) { + j = i * 16; + root_vpbl->pbl_vbase[j] = root_vpbl->pbl_vbase[i]; + leaf_pbl = le32_to_cpu(root_vpbl->pbl_vbase[j].pa_low) + + (((u64)le32_to_cpu(root_vpbl->pbl_vbase[j].pa_high)) + << 32); + for (k = 1; k < 16; k++) { + leaf_pbl += 256; + root_vpbl->pbl_vbase[j + k].pa_low = + cpu_to_le32((u32)leaf_pbl); + root_vpbl->pbl_vbase[j + k].pa_high = + cpu_to_le32((u32)((((u64)leaf_pbl) >> 32))); + } + } + } + + return 1; +} + /** * nes_reg_mr */ static int nes_reg_mr(struct nes_device *nesdev, struct nes_pd *nespd, u32 stag, u64 region_length, struct nes_root_vpbl *root_vpbl, - dma_addr_t single_buffer, u16 pbl_count, u16 residual_page_count, - int acc, u64 *iova_start) + dma_addr_t single_buffer, u16 pbl_count_4k, + u16 residual_page_count_4k, int acc, u64 *iova_start, + u16 *actual_pbl_cnt, u8 *used_4k_pbls) { struct nes_hw_cqp_wqe *cqp_wqe; struct nes_cqp_request *cqp_request; unsigned long flags; int ret; struct nes_adapter *nesadapter = nesdev->nesadapter; - /* int count; */ + uint pg_cnt = 0; + u16 pbl_count_256; + u16 pbl_count = 0; + u8 use_256_pbls = 0; + u8 use_4k_pbls = 0; + u16 use_two_level = (pbl_count_4k > 1) ? 1 : 0; + struct nes_root_vpbl new_root = {0, 0, 0}; u32 opcode = 0; u16 major_code; @@ -1911,41 +1970,70 @@ static int nes_reg_mr(struct nes_device *nesdev, struct nes_pd *nespd, cqp_request->waiting = 1; cqp_wqe = &cqp_request->cqp_wqe; - spin_lock_irqsave(&nesadapter->pbl_lock, flags); - /* track PBL resources */ - if (pbl_count != 0) { - if (pbl_count > 1) { - /* Two level PBL */ - if ((pbl_count+1) > nesadapter->free_4kpbl) { - nes_debug(NES_DBG_MR, "Out of 4KB Pbls for two level request.\n"); - spin_unlock_irqrestore(&nesadapter->pbl_lock, flags); - nes_free_cqp_request(nesdev, cqp_request); - return -ENOMEM; - } else { - nesadapter->free_4kpbl -= pbl_count+1; - } - } else if (residual_page_count > 32) { - if (pbl_count > nesadapter->free_4kpbl) { - nes_debug(NES_DBG_MR, "Out of 4KB Pbls.\n"); - spin_unlock_irqrestore(&nesadapter->pbl_lock, flags); - nes_free_cqp_request(nesdev, cqp_request); - return -ENOMEM; - } else { - nesadapter->free_4kpbl -= pbl_count; + if (pbl_count_4k) { + spin_lock_irqsave(&nesadapter->pbl_lock, flags); + + pg_cnt = ((pbl_count_4k - 1) * 512) + residual_page_count_4k; + pbl_count_256 = (pg_cnt + 31) / 32; + if (pg_cnt <= 32) { + if (pbl_count_256 <= nesadapter->free_256pbl) + use_256_pbls = 1; + else if (pbl_count_4k <= nesadapter->free_4kpbl) + use_4k_pbls = 1; + } else if (pg_cnt <= 2048) { + if (((pbl_count_4k + use_two_level) <= nesadapter->free_4kpbl) && + (nesadapter->free_4kpbl > (nesadapter->max_4kpbl >> 1))) { + use_4k_pbls = 1; + } else if ((pbl_count_256 + 1) <= nesadapter->free_256pbl) { + use_256_pbls = 1; + use_two_level = 1; + } else if ((pbl_count_4k + use_two_level) <= nesadapter->free_4kpbl) { + use_4k_pbls = 1; } } else { - if (pbl_count > nesadapter->free_256pbl) { - nes_debug(NES_DBG_MR, "Out of 256B Pbls.\n"); - spin_unlock_irqrestore(&nesadapter->pbl_lock, flags); - nes_free_cqp_request(nesdev, cqp_request); - return -ENOMEM; - } else { - nesadapter->free_256pbl -= pbl_count; - } + if ((pbl_count_4k + 1) <= nesadapter->free_4kpbl) + use_4k_pbls = 1; } + + if (use_256_pbls) { + pbl_count = pbl_count_256; + nesadapter->free_256pbl -= pbl_count + use_two_level; + } else if (use_4k_pbls) { + pbl_count = pbl_count_4k; + nesadapter->free_4kpbl -= pbl_count + use_two_level; + } else { + spin_unlock_irqrestore(&nesadapter->pbl_lock, flags); + nes_debug(NES_DBG_MR, "Out of Pbls\n"); + nes_free_cqp_request(nesdev, cqp_request); + return -ENOMEM; + } + + spin_unlock_irqrestore(&nesadapter->pbl_lock, flags); } - spin_unlock_irqrestore(&nesadapter->pbl_lock, flags); + if (use_256_pbls && use_two_level) { + if (root_256(nesdev, root_vpbl, &new_root, pbl_count_4k, pbl_count_256) == 1) { + if (new_root.pbl_pbase != 0) + root_vpbl = &new_root; + } else { + spin_lock_irqsave(&nesadapter->pbl_lock, flags); + nesadapter->free_256pbl += pbl_count_256 + use_two_level; + use_256_pbls = 0; + + if (pbl_count_4k == 1) + use_two_level = 0; + pbl_count = pbl_count_4k; + + if ((pbl_count_4k + use_two_level) <= nesadapter->free_4kpbl) { + nesadapter->free_4kpbl -= pbl_count + use_two_level; + use_4k_pbls = 1; + } + spin_unlock_irqrestore(&nesadapter->pbl_lock, flags); + + if (use_4k_pbls == 0) + return -ENOMEM; + } + } opcode = NES_CQP_REGISTER_STAG | NES_CQP_STAG_RIGHTS_LOCAL_READ | NES_CQP_STAG_VA_TO | NES_CQP_STAG_MR; @@ -1974,10 +2062,9 @@ static int nes_reg_mr(struct nes_device *nesdev, struct nes_pd *nespd, } else { set_wqe_64bit_value(cqp_wqe->wqe_words, NES_CQP_STAG_WQE_PA_LOW_IDX, root_vpbl->pbl_pbase); set_wqe_32bit_value(cqp_wqe->wqe_words, NES_CQP_STAG_WQE_PBL_BLK_COUNT_IDX, pbl_count); - set_wqe_32bit_value(cqp_wqe->wqe_words, NES_CQP_STAG_WQE_PBL_LEN_IDX, - (((pbl_count - 1) * 4096) + (residual_page_count*8))); + set_wqe_32bit_value(cqp_wqe->wqe_words, NES_CQP_STAG_WQE_PBL_LEN_IDX, (pg_cnt * 8)); - if ((pbl_count > 1) || (residual_page_count > 32)) + if (use_4k_pbls) cqp_wqe->wqe_words[NES_CQP_WQE_OPCODE_IDX] |= cpu_to_le32(NES_CQP_STAG_PBL_BLK_SIZE); } barrier(); @@ -1994,13 +2081,25 @@ static int nes_reg_mr(struct nes_device *nesdev, struct nes_pd *nespd, major_code = cqp_request->major_code; nes_put_cqp_request(nesdev, cqp_request); + if ((!ret || major_code) && pbl_count != 0) { + spin_lock_irqsave(&nesadapter->pbl_lock, flags); + if (use_256_pbls) + nesadapter->free_256pbl += pbl_count + use_two_level; + else if (use_4k_pbls) + nesadapter->free_4kpbl += pbl_count + use_two_level; + spin_unlock_irqrestore(&nesadapter->pbl_lock, flags); + } + if (new_root.pbl_pbase) + pci_free_consistent(nesdev->pcidev, 512, new_root.pbl_vbase, + new_root.pbl_pbase); + if (!ret) return -ETIME; else if (major_code) return -EIO; - else - return 0; + *actual_pbl_cnt = pbl_count + use_two_level; + *used_4k_pbls = use_4k_pbls; return 0; } @@ -2165,18 +2264,14 @@ static struct ib_mr *nes_reg_phys_mr(struct ib_pd *ib_pd, pbl_count = root_pbl_index; } ret = nes_reg_mr(nesdev, nespd, stag, region_length, &root_vpbl, - buffer_list[0].addr, pbl_count, (u16)cur_pbl_index, acc, iova_start); + buffer_list[0].addr, pbl_count, (u16)cur_pbl_index, acc, iova_start, + &nesmr->pbls_used, &nesmr->pbl_4k); if (ret == 0) { nesmr->ibmr.rkey = stag; nesmr->ibmr.lkey = stag; nesmr->mode = IWNES_MEMREG_TYPE_MEM; ibmr = &nesmr->ibmr; - nesmr->pbl_4k = ((pbl_count > 1) || (cur_pbl_index > 32)) ? 1 : 0; - nesmr->pbls_used = pbl_count; - if (pbl_count > 1) { - nesmr->pbls_used++; - } } else { kfree(nesmr); ibmr = ERR_PTR(-ENOMEM); @@ -2454,8 +2549,9 @@ static struct ib_mr *nes_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, stag, (unsigned int)iova_start, (unsigned int)region_length, stag_index, (unsigned long long)region->length, pbl_count); - ret = nes_reg_mr( nesdev, nespd, stag, region->length, &root_vpbl, - first_dma_addr, pbl_count, (u16)cur_pbl_index, acc, &iova_start); + ret = nes_reg_mr(nesdev, nespd, stag, region->length, &root_vpbl, + first_dma_addr, pbl_count, (u16)cur_pbl_index, acc, + &iova_start, &nesmr->pbls_used, &nesmr->pbl_4k); nes_debug(NES_DBG_MR, "ret=%d\n", ret); @@ -2464,11 +2560,6 @@ static struct ib_mr *nes_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, nesmr->ibmr.lkey = stag; nesmr->mode = IWNES_MEMREG_TYPE_MEM; ibmr = &nesmr->ibmr; - nesmr->pbl_4k = ((pbl_count > 1) || (cur_pbl_index > 32)) ? 1 : 0; - nesmr->pbls_used = pbl_count; - if (pbl_count > 1) { - nesmr->pbls_used++; - } } else { ib_umem_release(region); kfree(nesmr); @@ -2607,24 +2698,6 @@ static int nes_dereg_mr(struct ib_mr *ib_mr) cqp_request->waiting = 1; cqp_wqe = &cqp_request->cqp_wqe; - spin_lock_irqsave(&nesadapter->pbl_lock, flags); - if (nesmr->pbls_used != 0) { - if (nesmr->pbl_4k) { - nesadapter->free_4kpbl += nesmr->pbls_used; - if (nesadapter->free_4kpbl > nesadapter->max_4kpbl) { - printk(KERN_ERR PFX "free 4KB PBLs(%u) has exceeded the max(%u)\n", - nesadapter->free_4kpbl, nesadapter->max_4kpbl); - } - } else { - nesadapter->free_256pbl += nesmr->pbls_used; - if (nesadapter->free_256pbl > nesadapter->max_256pbl) { - printk(KERN_ERR PFX "free 256B PBLs(%u) has exceeded the max(%u)\n", - nesadapter->free_256pbl, nesadapter->max_256pbl); - } - } - } - - spin_unlock_irqrestore(&nesadapter->pbl_lock, flags); nes_fill_init_cqp_wqe(cqp_wqe, nesdev); set_wqe_32bit_value(cqp_wqe->wqe_words, NES_CQP_WQE_OPCODE_IDX, NES_CQP_DEALLOCATE_STAG | NES_CQP_STAG_VA_TO | @@ -2642,11 +2715,6 @@ static int nes_dereg_mr(struct ib_mr *ib_mr) " CQP Major:Minor codes = 0x%04X:0x%04X\n", ib_mr->rkey, ret, cqp_request->major_code, cqp_request->minor_code); - nes_free_resource(nesadapter, nesadapter->allocated_mrs, - (ib_mr->rkey & 0x0fffff00) >> 8); - - kfree(nesmr); - major_code = cqp_request->major_code; minor_code = cqp_request->minor_code; @@ -2662,8 +2730,33 @@ static int nes_dereg_mr(struct ib_mr *ib_mr) " to destroy STag, ib_mr=%p, rkey = 0x%08X\n", major_code, minor_code, ib_mr, ib_mr->rkey); return -EIO; - } else - return 0; + } + + if (nesmr->pbls_used != 0) { + spin_lock_irqsave(&nesadapter->pbl_lock, flags); + if (nesmr->pbl_4k) { + nesadapter->free_4kpbl += nesmr->pbls_used; + if (nesadapter->free_4kpbl > nesadapter->max_4kpbl) + printk(KERN_ERR PFX "free 4KB PBLs(%u) has " + "exceeded the max(%u)\n", + nesadapter->free_4kpbl, + nesadapter->max_4kpbl); + } else { + nesadapter->free_256pbl += nesmr->pbls_used; + if (nesadapter->free_256pbl > nesadapter->max_256pbl) + printk(KERN_ERR PFX "free 256B PBLs(%u) has " + "exceeded the max(%u)\n", + nesadapter->free_256pbl, + nesadapter->max_256pbl); + } + spin_unlock_irqrestore(&nesadapter->pbl_lock, flags); + } + nes_free_resource(nesadapter, nesadapter->allocated_mrs, + (ib_mr->rkey & 0x0fffff00) >> 8); + + kfree(nesmr); + + return 0; } diff --git a/drivers/infiniband/hw/nes/nes_verbs.h b/drivers/infiniband/hw/nes/nes_verbs.h index 6c6b4da5184..5e48f67fbe8 100644 --- a/drivers/infiniband/hw/nes/nes_verbs.h +++ b/drivers/infiniband/hw/nes/nes_verbs.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2006 - 2008 NetEffect, Inc. All rights reserved. + * Copyright (c) 2006 - 2009 Intel-NE, Inc. All rights reserved. * Copyright (c) 2005 Open Grid Computing, Inc. All rights reserved. * * This software is available to you under a choice of one of two @@ -134,6 +134,7 @@ struct nes_qp { struct ietf_mpa_frame *ietf_frame; dma_addr_t ietf_frame_pbase; wait_queue_head_t state_waitq; + struct ib_mr *lsmm_mr; unsigned long socket; struct nes_hw_qp hwqp; struct work_struct work; diff --git a/drivers/infiniband/ulp/ipoib/ipoib_ib.c b/drivers/infiniband/ulp/ipoib/ipoib_ib.c index a1925810be3..da608273983 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_ib.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_ib.c @@ -446,11 +446,11 @@ poll_more: if (dev->features & NETIF_F_LRO) lro_flush_all(&priv->lro.lro_mgr); - netif_rx_complete(napi); + napi_complete(napi); if (unlikely(ib_req_notify_cq(priv->recv_cq, IB_CQ_NEXT_COMP | IB_CQ_REPORT_MISSED_EVENTS)) && - netif_rx_reschedule(napi)) + napi_reschedule(napi)) goto poll_more; } @@ -462,7 +462,7 @@ void ipoib_ib_completion(struct ib_cq *cq, void *dev_ptr) struct net_device *dev = dev_ptr; struct ipoib_dev_priv *priv = netdev_priv(dev); - netif_rx_schedule(&priv->napi); + napi_schedule(&priv->napi); } static void drain_tx_cq(struct net_device *dev) diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c index dce0443f9d6..421a6640c9b 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_main.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c @@ -106,23 +106,17 @@ int ipoib_open(struct net_device *dev) ipoib_dbg(priv, "bringing up interface\n"); - set_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags); + if (!test_and_set_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags)) + napi_enable(&priv->napi); if (ipoib_pkey_dev_delay_open(dev)) return 0; - napi_enable(&priv->napi); + if (ipoib_ib_dev_open(dev)) + goto err_disable; - if (ipoib_ib_dev_open(dev)) { - napi_disable(&priv->napi); - return -EINVAL; - } - - if (ipoib_ib_dev_up(dev)) { - ipoib_ib_dev_stop(dev, 1); - napi_disable(&priv->napi); - return -EINVAL; - } + if (ipoib_ib_dev_up(dev)) + goto err_stop; if (!test_bit(IPOIB_FLAG_SUBINTERFACE, &priv->flags)) { struct ipoib_dev_priv *cpriv; @@ -144,6 +138,15 @@ int ipoib_open(struct net_device *dev) netif_start_queue(dev); return 0; + +err_stop: + ipoib_ib_dev_stop(dev, 1); + +err_disable: + napi_disable(&priv->napi); + clear_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags); + + return -EINVAL; } static int ipoib_stop(struct net_device *dev) @@ -657,8 +660,12 @@ static void unicast_arp_send(struct sk_buff *skb, struct net_device *dev, path = __path_find(dev, phdr->hwaddr + 4); if (!path || !path->valid) { - if (!path) + int new_path = 0; + + if (!path) { path = path_rec_create(dev, phdr->hwaddr + 4); + new_path = 1; + } if (path) { /* put pseudoheader back on for next time */ skb_push(skb, sizeof *phdr); @@ -666,7 +673,8 @@ static void unicast_arp_send(struct sk_buff *skb, struct net_device *dev, if (!path->query && path_rec_start(dev, path)) { spin_unlock_irqrestore(&priv->lock, flags); - path_free(dev, path); + if (new_path) + path_free(dev, path); return; } else __path_add(dev, path); @@ -1013,18 +1021,22 @@ static void ipoib_lro_setup(struct ipoib_dev_priv *priv) priv->lro.lro_mgr.ip_summed_aggr = CHECKSUM_UNNECESSARY; } +static const struct net_device_ops ipoib_netdev_ops = { + .ndo_open = ipoib_open, + .ndo_stop = ipoib_stop, + .ndo_change_mtu = ipoib_change_mtu, + .ndo_start_xmit = ipoib_start_xmit, + .ndo_tx_timeout = ipoib_timeout, + .ndo_set_multicast_list = ipoib_set_mcast_list, + .ndo_neigh_setup = ipoib_neigh_setup_dev, +}; + static void ipoib_setup(struct net_device *dev) { struct ipoib_dev_priv *priv = netdev_priv(dev); - dev->open = ipoib_open; - dev->stop = ipoib_stop; - dev->change_mtu = ipoib_change_mtu; - dev->hard_start_xmit = ipoib_start_xmit; - dev->tx_timeout = ipoib_timeout; + dev->netdev_ops = &ipoib_netdev_ops; dev->header_ops = &ipoib_header_ops; - dev->set_multicast_list = ipoib_set_mcast_list; - dev->neigh_setup = ipoib_neigh_setup_dev; ipoib_set_ethtool_ops(dev); diff --git a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c index 59d02e0b8df..425e31112ed 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c @@ -409,7 +409,7 @@ static int ipoib_mcast_join_complete(int status, } if (mcast->logcount++ < 20) { - if (status == -ETIMEDOUT) { + if (status == -ETIMEDOUT || status == -EAGAIN) { ipoib_dbg_mcast(priv, "multicast join failed for %pI6, status %d\n", mcast->mcmember.mgid.raw, status); } else { diff --git a/drivers/infiniband/ulp/ipoib/ipoib_vlan.c b/drivers/infiniband/ulp/ipoib/ipoib_vlan.c index 2cf1a408871..5a76a551035 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_vlan.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_vlan.c @@ -61,6 +61,7 @@ int ipoib_vlan_add(struct net_device *pdev, unsigned short pkey) ppriv = netdev_priv(pdev); + rtnl_lock(); mutex_lock(&ppriv->vlan_mutex); /* @@ -111,7 +112,7 @@ int ipoib_vlan_add(struct net_device *pdev, unsigned short pkey) goto device_init_failed; } - result = register_netdev(priv->dev); + result = register_netdevice(priv->dev); if (result) { ipoib_warn(priv, "failed to initialize; error %i", result); goto register_failed; @@ -134,12 +135,13 @@ int ipoib_vlan_add(struct net_device *pdev, unsigned short pkey) list_add_tail(&priv->list, &ppriv->child_intfs); mutex_unlock(&ppriv->vlan_mutex); + rtnl_unlock(); return 0; sysfs_failed: ipoib_delete_debug_files(priv->dev); - unregister_netdev(priv->dev); + unregister_netdevice(priv->dev); register_failed: ipoib_dev_cleanup(priv->dev); @@ -149,6 +151,7 @@ device_init_failed: err: mutex_unlock(&ppriv->vlan_mutex); + rtnl_unlock(); return result; } @@ -162,10 +165,11 @@ int ipoib_vlan_delete(struct net_device *pdev, unsigned short pkey) ppriv = netdev_priv(pdev); + rtnl_lock(); mutex_lock(&ppriv->vlan_mutex); list_for_each_entry_safe(priv, tpriv, &ppriv->child_intfs, list) { if (priv->pkey == pkey) { - unregister_netdev(priv->dev); + unregister_netdevice(priv->dev); ipoib_dev_cleanup(priv->dev); list_del(&priv->list); free_netdev(priv->dev); @@ -175,6 +179,7 @@ int ipoib_vlan_delete(struct net_device *pdev, unsigned short pkey) } } mutex_unlock(&ppriv->vlan_mutex); + rtnl_unlock(); return ret; } diff --git a/drivers/infiniband/ulp/iser/iser_verbs.c b/drivers/infiniband/ulp/iser/iser_verbs.c index 319b188145b..ea9e1556e0d 100644 --- a/drivers/infiniband/ulp/iser/iser_verbs.c +++ b/drivers/infiniband/ulp/iser/iser_verbs.c @@ -401,13 +401,6 @@ static void iser_route_handler(struct rdma_cm_id *cma_id) if (ret) goto failure; - iser_dbg("path.mtu is %d setting it to %d\n", - cma_id->route.path_rec->mtu, IB_MTU_1024); - - /* we must set the MTU to 1024 as this is what the target is assuming */ - if (cma_id->route.path_rec->mtu > IB_MTU_1024) - cma_id->route.path_rec->mtu = IB_MTU_1024; - memset(&conn_param, 0, sizeof conn_param); conn_param.responder_resources = 4; conn_param.initiator_depth = 1; |