diff options
Diffstat (limited to 'drivers/infiniband')
-rw-r--r-- | drivers/infiniband/core/cma.c | 5 | ||||
-rw-r--r-- | drivers/infiniband/core/umem.c | 11 | ||||
-rw-r--r-- | drivers/infiniband/hw/ehca/ehca_classes.h | 2 | ||||
-rw-r--r-- | drivers/infiniband/hw/ehca/ehca_eq.c | 6 | ||||
-rw-r--r-- | drivers/infiniband/hw/ehca/ehca_main.c | 36 | ||||
-rw-r--r-- | drivers/infiniband/hw/ehca/ehca_mrmw.c | 6 | ||||
-rw-r--r-- | drivers/infiniband/hw/ipath/ipath_dma.c | 12 | ||||
-rw-r--r-- | drivers/infiniband/hw/ipath/ipath_mr.c | 2 | ||||
-rw-r--r-- | drivers/infiniband/hw/mthca/mthca_memfree.c | 24 | ||||
-rw-r--r-- | drivers/infiniband/ulp/ipoib/ipoib.h | 4 | ||||
-rw-r--r-- | drivers/infiniband/ulp/ipoib/ipoib_main.c | 29 | ||||
-rw-r--r-- | drivers/infiniband/ulp/ipoib/ipoib_multicast.c | 3 | ||||
-rw-r--r-- | drivers/infiniband/ulp/iser/iser_memory.c | 75 | ||||
-rw-r--r-- | drivers/infiniband/ulp/srp/Kconfig | 1 | ||||
-rw-r--r-- | drivers/infiniband/ulp/srp/ib_srp.c | 28 |
15 files changed, 151 insertions, 93 deletions
diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c index ee946cc2576..0751697ef98 100644 --- a/drivers/infiniband/core/cma.c +++ b/drivers/infiniband/core/cma.c @@ -2803,11 +2803,12 @@ static void cma_remove_one(struct ib_device *device) static int cma_init(void) { - int ret, low, high; + int ret, low, high, remaining; get_random_bytes(&next_port, sizeof next_port); inet_get_local_port_range(&low, &high); - next_port = ((unsigned int) next_port % (high - low)) + low; + remaining = (high - low) + 1; + next_port = ((unsigned int) next_port % remaining) + low; cma_wq = create_singlethread_workqueue("rdma_cm"); if (!cma_wq) diff --git a/drivers/infiniband/core/umem.c b/drivers/infiniband/core/umem.c index 2f54e29dc7a..14159ff2940 100644 --- a/drivers/infiniband/core/umem.c +++ b/drivers/infiniband/core/umem.c @@ -55,9 +55,11 @@ static void __ib_umem_release(struct ib_device *dev, struct ib_umem *umem, int d ib_dma_unmap_sg(dev, chunk->page_list, chunk->nents, DMA_BIDIRECTIONAL); for (i = 0; i < chunk->nents; ++i) { + struct page *page = sg_page(&chunk->page_list[i]); + if (umem->writable && dirty) - set_page_dirty_lock(chunk->page_list[i].page); - put_page(chunk->page_list[i].page); + set_page_dirty_lock(page); + put_page(page); } kfree(chunk); @@ -164,11 +166,12 @@ struct ib_umem *ib_umem_get(struct ib_ucontext *context, unsigned long addr, } chunk->nents = min_t(int, ret, IB_UMEM_MAX_PAGE_CHUNK); + sg_init_table(chunk->page_list, chunk->nents); for (i = 0; i < chunk->nents; ++i) { if (vma_list && !is_vm_hugetlb_page(vma_list[i + off])) umem->hugetlb = 0; - chunk->page_list[i].page = page_list[i + off]; + sg_set_page(&chunk->page_list[i], page_list[i + off]); chunk->page_list[i].offset = 0; chunk->page_list[i].length = PAGE_SIZE; } @@ -179,7 +182,7 @@ struct ib_umem *ib_umem_get(struct ib_ucontext *context, unsigned long addr, DMA_BIDIRECTIONAL); if (chunk->nmap <= 0) { for (i = 0; i < chunk->nents; ++i) - put_page(chunk->page_list[i].page); + put_page(sg_page(&chunk->page_list[i])); kfree(chunk); ret = -ENOMEM; diff --git a/drivers/infiniband/hw/ehca/ehca_classes.h b/drivers/infiniband/hw/ehca/ehca_classes.h index 365bc5dfcc8..2d660ae189e 100644 --- a/drivers/infiniband/hw/ehca/ehca_classes.h +++ b/drivers/infiniband/hw/ehca/ehca_classes.h @@ -107,7 +107,7 @@ struct ehca_sport { struct ehca_shca { struct ib_device ib_device; - struct ibmebus_dev *ibmebus_dev; + struct of_device *ofdev; u8 num_ports; int hw_level; struct list_head shca_list; diff --git a/drivers/infiniband/hw/ehca/ehca_eq.c b/drivers/infiniband/hw/ehca/ehca_eq.c index 1d41faa7a33..b4ac617a70e 100644 --- a/drivers/infiniband/hw/ehca/ehca_eq.c +++ b/drivers/infiniband/hw/ehca/ehca_eq.c @@ -123,7 +123,7 @@ int ehca_create_eq(struct ehca_shca *shca, /* register interrupt handlers and initialize work queues */ if (type == EHCA_EQ) { - ret = ibmebus_request_irq(NULL, eq->ist, ehca_interrupt_eq, + ret = ibmebus_request_irq(eq->ist, ehca_interrupt_eq, IRQF_DISABLED, "ehca_eq", (void *)shca); if (ret < 0) @@ -131,7 +131,7 @@ int ehca_create_eq(struct ehca_shca *shca, tasklet_init(&eq->interrupt_task, ehca_tasklet_eq, (long)shca); } else if (type == EHCA_NEQ) { - ret = ibmebus_request_irq(NULL, eq->ist, ehca_interrupt_neq, + ret = ibmebus_request_irq(eq->ist, ehca_interrupt_neq, IRQF_DISABLED, "ehca_neq", (void *)shca); if (ret < 0) @@ -171,7 +171,7 @@ int ehca_destroy_eq(struct ehca_shca *shca, struct ehca_eq *eq) u64 h_ret; spin_lock_irqsave(&eq->spinlock, flags); - ibmebus_free_irq(NULL, eq->ist, (void *)shca); + ibmebus_free_irq(eq->ist, (void *)shca); h_ret = hipz_h_destroy_eq(shca->ipz_hca_handle, eq); diff --git a/drivers/infiniband/hw/ehca/ehca_main.c b/drivers/infiniband/hw/ehca/ehca_main.c index 2f51c13d2f4..c6cd38c5321 100644 --- a/drivers/infiniband/hw/ehca/ehca_main.c +++ b/drivers/infiniband/hw/ehca/ehca_main.c @@ -418,7 +418,7 @@ int ehca_init_device(struct ehca_shca *shca) shca->ib_device.node_type = RDMA_NODE_IB_CA; shca->ib_device.phys_port_cnt = shca->num_ports; shca->ib_device.num_comp_vectors = 1; - shca->ib_device.dma_device = &shca->ibmebus_dev->ofdev.dev; + shca->ib_device.dma_device = &shca->ofdev->dev; shca->ib_device.query_device = ehca_query_device; shca->ib_device.query_port = ehca_query_port; shca->ib_device.query_gid = ehca_query_gid; @@ -593,12 +593,12 @@ static ssize_t ehca_show_##name(struct device *dev, \ \ rblock = ehca_alloc_fw_ctrlblock(GFP_KERNEL); \ if (!rblock) { \ - dev_err(dev, "Can't allocate rblock memory."); \ + dev_err(dev, "Can't allocate rblock memory.\n"); \ return 0; \ } \ \ if (hipz_h_query_hca(shca->ipz_hca_handle, rblock) != H_SUCCESS) { \ - dev_err(dev, "Can't query device properties"); \ + dev_err(dev, "Can't query device properties\n"); \ ehca_free_fw_ctrlblock(rblock); \ return 0; \ } \ @@ -672,7 +672,7 @@ static struct attribute_group ehca_dev_attr_grp = { .attrs = ehca_dev_attrs }; -static int __devinit ehca_probe(struct ibmebus_dev *dev, +static int __devinit ehca_probe(struct of_device *dev, const struct of_device_id *id) { struct ehca_shca *shca; @@ -680,16 +680,16 @@ static int __devinit ehca_probe(struct ibmebus_dev *dev, struct ib_pd *ibpd; int ret; - handle = of_get_property(dev->ofdev.node, "ibm,hca-handle", NULL); + handle = of_get_property(dev->node, "ibm,hca-handle", NULL); if (!handle) { ehca_gen_err("Cannot get eHCA handle for adapter: %s.", - dev->ofdev.node->full_name); + dev->node->full_name); return -ENODEV; } if (!(*handle)) { ehca_gen_err("Wrong eHCA handle for adapter: %s.", - dev->ofdev.node->full_name); + dev->node->full_name); return -ENODEV; } @@ -700,9 +700,9 @@ static int __devinit ehca_probe(struct ibmebus_dev *dev, } mutex_init(&shca->modify_mutex); - shca->ibmebus_dev = dev; + shca->ofdev = dev; shca->ipz_hca_handle.handle = *handle; - dev->ofdev.dev.driver_data = shca; + dev->dev.driver_data = shca; ret = ehca_sense_attributes(shca); if (ret < 0) { @@ -778,7 +778,7 @@ static int __devinit ehca_probe(struct ibmebus_dev *dev, } } - ret = sysfs_create_group(&dev->ofdev.dev.kobj, &ehca_dev_attr_grp); + ret = sysfs_create_group(&dev->dev.kobj, &ehca_dev_attr_grp); if (ret) /* only complain; we can live without attributes */ ehca_err(&shca->ib_device, "Cannot create device attributes ret=%d", ret); @@ -828,12 +828,12 @@ probe1: return -EINVAL; } -static int __devexit ehca_remove(struct ibmebus_dev *dev) +static int __devexit ehca_remove(struct of_device *dev) { - struct ehca_shca *shca = dev->ofdev.dev.driver_data; + struct ehca_shca *shca = dev->dev.driver_data; int ret; - sysfs_remove_group(&dev->ofdev.dev.kobj, &ehca_dev_attr_grp); + sysfs_remove_group(&dev->dev.kobj, &ehca_dev_attr_grp); if (ehca_open_aqp1 == 1) { int i; @@ -884,11 +884,11 @@ static struct of_device_id ehca_device_table[] = {}, }; -static struct ibmebus_driver ehca_driver = { - .name = "ehca", - .id_table = ehca_device_table, - .probe = ehca_probe, - .remove = ehca_remove, +static struct of_platform_driver ehca_driver = { + .name = "ehca", + .match_table = ehca_device_table, + .probe = ehca_probe, + .remove = ehca_remove, }; void ehca_poll_eqs(unsigned long data) diff --git a/drivers/infiniband/hw/ehca/ehca_mrmw.c b/drivers/infiniband/hw/ehca/ehca_mrmw.c index bb9791555f4..e239bbf54da 100644 --- a/drivers/infiniband/hw/ehca/ehca_mrmw.c +++ b/drivers/infiniband/hw/ehca/ehca_mrmw.c @@ -1769,7 +1769,7 @@ static int ehca_set_pagebuf_user1(struct ehca_mr_pginfo *pginfo, list_for_each_entry_continue( chunk, (&(pginfo->u.usr.region->chunk_list)), list) { for (i = pginfo->u.usr.next_nmap; i < chunk->nmap; ) { - pgaddr = page_to_pfn(chunk->page_list[i].page) + pgaddr = page_to_pfn(sg_page(&chunk->page_list[i])) << PAGE_SHIFT ; *kpage = phys_to_abs(pgaddr + (pginfo->next_hwpage * @@ -1825,7 +1825,7 @@ static int ehca_check_kpages_per_ate(struct scatterlist *page_list, { int t; for (t = start_idx; t <= end_idx; t++) { - u64 pgaddr = page_to_pfn(page_list[t].page) << PAGE_SHIFT; + u64 pgaddr = page_to_pfn(sg_page(&page_list[t])) << PAGE_SHIFT; ehca_gen_dbg("chunk_page=%lx value=%016lx", pgaddr, *(u64 *)abs_to_virt(phys_to_abs(pgaddr))); if (pgaddr - PAGE_SIZE != *prev_pgaddr) { @@ -1860,7 +1860,7 @@ static int ehca_set_pagebuf_user2(struct ehca_mr_pginfo *pginfo, chunk, (&(pginfo->u.usr.region->chunk_list)), list) { for (i = pginfo->u.usr.next_nmap; i < chunk->nmap; ) { if (nr_kpages == kpages_per_hwpage) { - pgaddr = ( page_to_pfn(chunk->page_list[i].page) + pgaddr = ( page_to_pfn(sg_page(&chunk->page_list[i])) << PAGE_SHIFT ); *kpage = phys_to_abs(pgaddr); if ( !(*kpage) ) { diff --git a/drivers/infiniband/hw/ipath/ipath_dma.c b/drivers/infiniband/hw/ipath/ipath_dma.c index f87f003e3ef..e90a0ea538a 100644 --- a/drivers/infiniband/hw/ipath/ipath_dma.c +++ b/drivers/infiniband/hw/ipath/ipath_dma.c @@ -30,6 +30,7 @@ * SOFTWARE. */ +#include <linux/scatterlist.h> #include <rdma/ib_verbs.h> #include "ipath_verbs.h" @@ -96,17 +97,18 @@ static void ipath_dma_unmap_page(struct ib_device *dev, BUG_ON(!valid_dma_direction(direction)); } -static int ipath_map_sg(struct ib_device *dev, struct scatterlist *sg, int nents, - enum dma_data_direction direction) +static int ipath_map_sg(struct ib_device *dev, struct scatterlist *sgl, + int nents, enum dma_data_direction direction) { + struct scatterlist *sg; u64 addr; int i; int ret = nents; BUG_ON(!valid_dma_direction(direction)); - for (i = 0; i < nents; i++) { - addr = (u64) page_address(sg[i].page); + for_each_sg(sgl, sg, nents, i) { + addr = (u64) page_address(sg_page(sg)); /* TODO: handle highmem pages */ if (!addr) { ret = 0; @@ -125,7 +127,7 @@ static void ipath_unmap_sg(struct ib_device *dev, static u64 ipath_sg_dma_address(struct ib_device *dev, struct scatterlist *sg) { - u64 addr = (u64) page_address(sg->page); + u64 addr = (u64) page_address(sg_page(sg)); if (addr) addr += sg->offset; diff --git a/drivers/infiniband/hw/ipath/ipath_mr.c b/drivers/infiniband/hw/ipath/ipath_mr.c index e442470a237..db4ba92f79f 100644 --- a/drivers/infiniband/hw/ipath/ipath_mr.c +++ b/drivers/infiniband/hw/ipath/ipath_mr.c @@ -225,7 +225,7 @@ struct ib_mr *ipath_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, for (i = 0; i < chunk->nents; i++) { void *vaddr; - vaddr = page_address(chunk->page_list[i].page); + vaddr = page_address(sg_page(&chunk->page_list[i])); if (!vaddr) { ret = ERR_PTR(-EINVAL); goto bail; diff --git a/drivers/infiniband/hw/mthca/mthca_memfree.c b/drivers/infiniband/hw/mthca/mthca_memfree.c index e61f3e62698..007b38157fc 100644 --- a/drivers/infiniband/hw/mthca/mthca_memfree.c +++ b/drivers/infiniband/hw/mthca/mthca_memfree.c @@ -71,7 +71,7 @@ static void mthca_free_icm_pages(struct mthca_dev *dev, struct mthca_icm_chunk * PCI_DMA_BIDIRECTIONAL); for (i = 0; i < chunk->npages; ++i) - __free_pages(chunk->mem[i].page, + __free_pages(sg_page(&chunk->mem[i]), get_order(chunk->mem[i].length)); } @@ -81,7 +81,7 @@ static void mthca_free_icm_coherent(struct mthca_dev *dev, struct mthca_icm_chun for (i = 0; i < chunk->npages; ++i) { dma_free_coherent(&dev->pdev->dev, chunk->mem[i].length, - lowmem_page_address(chunk->mem[i].page), + lowmem_page_address(sg_page(&chunk->mem[i])), sg_dma_address(&chunk->mem[i])); } } @@ -107,10 +107,13 @@ void mthca_free_icm(struct mthca_dev *dev, struct mthca_icm *icm, int coherent) static int mthca_alloc_icm_pages(struct scatterlist *mem, int order, gfp_t gfp_mask) { - mem->page = alloc_pages(gfp_mask, order); - if (!mem->page) + struct page *page; + + page = alloc_pages(gfp_mask, order); + if (!page) return -ENOMEM; + sg_set_page(mem, page); mem->length = PAGE_SIZE << order; mem->offset = 0; return 0; @@ -157,6 +160,7 @@ struct mthca_icm *mthca_alloc_icm(struct mthca_dev *dev, int npages, if (!chunk) goto fail; + sg_init_table(chunk->mem, MTHCA_ICM_CHUNK_LEN); chunk->npages = 0; chunk->nsg = 0; list_add_tail(&chunk->list, &icm->chunk_list); @@ -304,7 +308,7 @@ void *mthca_table_find(struct mthca_icm_table *table, int obj, dma_addr_t *dma_h * so if we found the page, dma_handle has already * been assigned to. */ if (chunk->mem[i].length > offset) { - page = chunk->mem[i].page; + page = sg_page(&chunk->mem[i]); goto out; } offset -= chunk->mem[i].length; @@ -445,6 +449,7 @@ static u64 mthca_uarc_virt(struct mthca_dev *dev, struct mthca_uar *uar, int pag int mthca_map_user_db(struct mthca_dev *dev, struct mthca_uar *uar, struct mthca_user_db_table *db_tab, int index, u64 uaddr) { + struct page *pages[1]; int ret = 0; u8 status; int i; @@ -472,16 +477,17 @@ int mthca_map_user_db(struct mthca_dev *dev, struct mthca_uar *uar, } ret = get_user_pages(current, current->mm, uaddr & PAGE_MASK, 1, 1, 0, - &db_tab->page[i].mem.page, NULL); + pages, NULL); if (ret < 0) goto out; + sg_set_page(&db_tab->page[i].mem, pages[0]); db_tab->page[i].mem.length = MTHCA_ICM_PAGE_SIZE; db_tab->page[i].mem.offset = uaddr & ~PAGE_MASK; ret = pci_map_sg(dev->pdev, &db_tab->page[i].mem, 1, PCI_DMA_TODEVICE); if (ret < 0) { - put_page(db_tab->page[i].mem.page); + put_page(pages[0]); goto out; } @@ -491,7 +497,7 @@ int mthca_map_user_db(struct mthca_dev *dev, struct mthca_uar *uar, ret = -EINVAL; if (ret) { pci_unmap_sg(dev->pdev, &db_tab->page[i].mem, 1, PCI_DMA_TODEVICE); - put_page(db_tab->page[i].mem.page); + put_page(sg_page(&db_tab->page[i].mem)); goto out; } @@ -557,7 +563,7 @@ void mthca_cleanup_user_db_tab(struct mthca_dev *dev, struct mthca_uar *uar, if (db_tab->page[i].uvirt) { mthca_UNMAP_ICM(dev, mthca_uarc_virt(dev, uar, i), 1, &status); pci_unmap_sg(dev->pdev, &db_tab->page[i].mem, 1, PCI_DMA_TODEVICE); - put_page(db_tab->page[i].mem.page); + put_page(sg_page(&db_tab->page[i].mem)); } } diff --git a/drivers/infiniband/ulp/ipoib/ipoib.h b/drivers/infiniband/ulp/ipoib/ipoib.h index 0a00ea0e887..eb7edab0e83 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib.h +++ b/drivers/infiniband/ulp/ipoib/ipoib.h @@ -348,6 +348,7 @@ struct ipoib_neigh { struct sk_buff_head queue; struct neighbour *neighbour; + struct net_device *dev; struct list_head list; }; @@ -364,7 +365,8 @@ static inline struct ipoib_neigh **to_ipoib_neigh(struct neighbour *neigh) INFINIBAND_ALEN, sizeof(void *)); } -struct ipoib_neigh *ipoib_neigh_alloc(struct neighbour *neigh); +struct ipoib_neigh *ipoib_neigh_alloc(struct neighbour *neigh, + struct net_device *dev); void ipoib_neigh_free(struct net_device *dev, struct ipoib_neigh *neigh); extern struct workqueue_struct *ipoib_workqueue; diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c index ace2345960e..a03a65ebcf0 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_main.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c @@ -515,7 +515,7 @@ static void neigh_add_path(struct sk_buff *skb, struct net_device *dev) struct ipoib_path *path; struct ipoib_neigh *neigh; - neigh = ipoib_neigh_alloc(skb->dst->neighbour); + neigh = ipoib_neigh_alloc(skb->dst->neighbour, skb->dev); if (!neigh) { ++dev->stats.tx_dropped; dev_kfree_skb_any(skb); @@ -690,9 +690,10 @@ static int ipoib_start_xmit(struct sk_buff *skb, struct net_device *dev) goto out; } } else if (neigh->ah) { - if (unlikely(memcmp(&neigh->dgid.raw, + if (unlikely((memcmp(&neigh->dgid.raw, skb->dst->neighbour->ha + 4, - sizeof(union ib_gid)))) { + sizeof(union ib_gid))) || + (neigh->dev != dev))) { spin_lock(&priv->lock); /* * It's safe to call ipoib_put_ah() inside @@ -815,6 +816,13 @@ static void ipoib_neigh_cleanup(struct neighbour *n) unsigned long flags; struct ipoib_ah *ah = NULL; + neigh = *to_ipoib_neigh(n); + if (neigh) { + priv = netdev_priv(neigh->dev); + ipoib_dbg(priv, "neigh_destructor for bonding device: %s\n", + n->dev->name); + } else + return; ipoib_dbg(priv, "neigh_cleanup for %06x " IPOIB_GID_FMT "\n", IPOIB_QPN(n->ha), @@ -822,13 +830,10 @@ static void ipoib_neigh_cleanup(struct neighbour *n) spin_lock_irqsave(&priv->lock, flags); - neigh = *to_ipoib_neigh(n); - if (neigh) { - if (neigh->ah) - ah = neigh->ah; - list_del(&neigh->list); - ipoib_neigh_free(n->dev, neigh); - } + if (neigh->ah) + ah = neigh->ah; + list_del(&neigh->list); + ipoib_neigh_free(n->dev, neigh); spin_unlock_irqrestore(&priv->lock, flags); @@ -836,7 +841,8 @@ static void ipoib_neigh_cleanup(struct neighbour *n) ipoib_put_ah(ah); } -struct ipoib_neigh *ipoib_neigh_alloc(struct neighbour *neighbour) +struct ipoib_neigh *ipoib_neigh_alloc(struct neighbour *neighbour, + struct net_device *dev) { struct ipoib_neigh *neigh; @@ -845,6 +851,7 @@ struct ipoib_neigh *ipoib_neigh_alloc(struct neighbour *neighbour) return NULL; neigh->neighbour = neighbour; + neigh->dev = dev; *to_ipoib_neigh(neighbour) = neigh; skb_queue_head_init(&neigh->queue); ipoib_cm_set(neigh, NULL); diff --git a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c index 827820ec66d..9bcfc7ad6aa 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c @@ -705,7 +705,8 @@ out: if (skb->dst && skb->dst->neighbour && !*to_ipoib_neigh(skb->dst->neighbour)) { - struct ipoib_neigh *neigh = ipoib_neigh_alloc(skb->dst->neighbour); + struct ipoib_neigh *neigh = ipoib_neigh_alloc(skb->dst->neighbour, + skb->dev); if (neigh) { kref_get(&mcast->ah->ref); diff --git a/drivers/infiniband/ulp/iser/iser_memory.c b/drivers/infiniband/ulp/iser/iser_memory.c index e05690e3592..d6879806179 100644 --- a/drivers/infiniband/ulp/iser/iser_memory.c +++ b/drivers/infiniband/ulp/iser/iser_memory.c @@ -124,17 +124,19 @@ static int iser_start_rdma_unaligned_sg(struct iscsi_iser_cmd_task *iser_ctask, if (cmd_dir == ISER_DIR_OUT) { /* copy the unaligned sg the buffer which is used for RDMA */ - struct scatterlist *sg = (struct scatterlist *)data->buf; + struct scatterlist *sgl = (struct scatterlist *)data->buf; + struct scatterlist *sg; int i; char *p, *from; - for (p = mem, i = 0; i < data->size; i++) { - from = kmap_atomic(sg[i].page, KM_USER0); + p = mem; + for_each_sg(sgl, sg, data->size, i) { + from = kmap_atomic(sg_page(sg), KM_USER0); memcpy(p, - from + sg[i].offset, - sg[i].length); + from + sg->offset, + sg->length); kunmap_atomic(from, KM_USER0); - p += sg[i].length; + p += sg->length; } } @@ -176,7 +178,7 @@ void iser_finalize_rdma_unaligned_sg(struct iscsi_iser_cmd_task *iser_ctask, if (cmd_dir == ISER_DIR_IN) { char *mem; - struct scatterlist *sg; + struct scatterlist *sgl, *sg; unsigned char *p, *to; unsigned int sg_size; int i; @@ -184,16 +186,17 @@ void iser_finalize_rdma_unaligned_sg(struct iscsi_iser_cmd_task *iser_ctask, /* copy back read RDMA to unaligned sg */ mem = mem_copy->copy_buf; - sg = (struct scatterlist *)iser_ctask->data[ISER_DIR_IN].buf; + sgl = (struct scatterlist *)iser_ctask->data[ISER_DIR_IN].buf; sg_size = iser_ctask->data[ISER_DIR_IN].size; - for (p = mem, i = 0; i < sg_size; i++){ - to = kmap_atomic(sg[i].page, KM_SOFTIRQ0); - memcpy(to + sg[i].offset, + p = mem; + for_each_sg(sgl, sg, sg_size, i) { + to = kmap_atomic(sg_page(sg), KM_SOFTIRQ0); + memcpy(to + sg->offset, p, - sg[i].length); + sg->length); kunmap_atomic(to, KM_SOFTIRQ0); - p += sg[i].length; + p += sg->length; } } @@ -224,7 +227,8 @@ static int iser_sg_to_page_vec(struct iser_data_buf *data, struct iser_page_vec *page_vec, struct ib_device *ibdev) { - struct scatterlist *sg = (struct scatterlist *)data->buf; + struct scatterlist *sgl = (struct scatterlist *)data->buf; + struct scatterlist *sg; u64 first_addr, last_addr, page; int end_aligned; unsigned int cur_page = 0; @@ -232,24 +236,25 @@ static int iser_sg_to_page_vec(struct iser_data_buf *data, int i; /* compute the offset of first element */ - page_vec->offset = (u64) sg[0].offset & ~MASK_4K; + page_vec->offset = (u64) sgl[0].offset & ~MASK_4K; - for (i = 0; i < data->dma_nents; i++) { - unsigned int dma_len = ib_sg_dma_len(ibdev, &sg[i]); + for_each_sg(sgl, sg, data->dma_nents, i) { + unsigned int dma_len = ib_sg_dma_len(ibdev, sg); total_sz += dma_len; - first_addr = ib_sg_dma_address(ibdev, &sg[i]); + first_addr = ib_sg_dma_address(ibdev, sg); last_addr = first_addr + dma_len; end_aligned = !(last_addr & ~MASK_4K); /* continue to collect page fragments till aligned or SG ends */ while (!end_aligned && (i + 1 < data->dma_nents)) { + sg = sg_next(sg); i++; - dma_len = ib_sg_dma_len(ibdev, &sg[i]); + dma_len = ib_sg_dma_len(ibdev, sg); total_sz += dma_len; - last_addr = ib_sg_dma_address(ibdev, &sg[i]) + dma_len; + last_addr = ib_sg_dma_address(ibdev, sg) + dma_len; end_aligned = !(last_addr & ~MASK_4K); } @@ -284,25 +289,26 @@ static int iser_sg_to_page_vec(struct iser_data_buf *data, static unsigned int iser_data_buf_aligned_len(struct iser_data_buf *data, struct ib_device *ibdev) { - struct scatterlist *sg; + struct scatterlist *sgl, *sg; u64 end_addr, next_addr; int i, cnt; unsigned int ret_len = 0; - sg = (struct scatterlist *)data->buf; + sgl = (struct scatterlist *)data->buf; - for (cnt = 0, i = 0; i < data->dma_nents; i++, cnt++) { + cnt = 0; + for_each_sg(sgl, sg, data->dma_nents, i) { /* iser_dbg("Checking sg iobuf [%d]: phys=0x%08lX " "offset: %ld sz: %ld\n", i, - (unsigned long)page_to_phys(sg[i].page), - (unsigned long)sg[i].offset, - (unsigned long)sg[i].length); */ - end_addr = ib_sg_dma_address(ibdev, &sg[i]) + - ib_sg_dma_len(ibdev, &sg[i]); + (unsigned long)sg_phys(sg), + (unsigned long)sg->offset, + (unsigned long)sg->length); */ + end_addr = ib_sg_dma_address(ibdev, sg) + + ib_sg_dma_len(ibdev, sg); /* iser_dbg("Checking sg iobuf end address " "0x%08lX\n", end_addr); */ if (i + 1 < data->dma_nents) { - next_addr = ib_sg_dma_address(ibdev, &sg[i+1]); + next_addr = ib_sg_dma_address(ibdev, sg_next(sg)); /* are i, i+1 fragments of the same page? */ if (end_addr == next_addr) continue; @@ -322,15 +328,16 @@ static unsigned int iser_data_buf_aligned_len(struct iser_data_buf *data, static void iser_data_buf_dump(struct iser_data_buf *data, struct ib_device *ibdev) { - struct scatterlist *sg = (struct scatterlist *)data->buf; + struct scatterlist *sgl = (struct scatterlist *)data->buf; + struct scatterlist *sg; int i; - for (i = 0; i < data->dma_nents; i++) + for_each_sg(sgl, sg, data->dma_nents, i) iser_err("sg[%d] dma_addr:0x%lX page:0x%p " "off:0x%x sz:0x%x dma_len:0x%x\n", - i, (unsigned long)ib_sg_dma_address(ibdev, &sg[i]), - sg[i].page, sg[i].offset, - sg[i].length, ib_sg_dma_len(ibdev, &sg[i])); + i, (unsigned long)ib_sg_dma_address(ibdev, sg), + sg_page(sg), sg->offset, + sg->length, ib_sg_dma_len(ibdev, sg)); } static void iser_dump_page_vec(struct iser_page_vec *page_vec) diff --git a/drivers/infiniband/ulp/srp/Kconfig b/drivers/infiniband/ulp/srp/Kconfig index 3432dce2952..c74ee963304 100644 --- a/drivers/infiniband/ulp/srp/Kconfig +++ b/drivers/infiniband/ulp/srp/Kconfig @@ -1,6 +1,7 @@ config INFINIBAND_SRP tristate "InfiniBand SCSI RDMA Protocol" depends on SCSI + select SCSI_SRP_ATTRS ---help--- Support for the SCSI RDMA Protocol over InfiniBand. This allows you to access storage devices that speak SRP over diff --git a/drivers/infiniband/ulp/srp/ib_srp.c b/drivers/infiniband/ulp/srp/ib_srp.c index 9ccc63886d9..950228fb009 100644 --- a/drivers/infiniband/ulp/srp/ib_srp.c +++ b/drivers/infiniband/ulp/srp/ib_srp.c @@ -47,6 +47,7 @@ #include <scsi/scsi_device.h> #include <scsi/scsi_dbg.h> #include <scsi/srp.h> +#include <scsi/scsi_transport_srp.h> #include <rdma/ib_cache.h> @@ -86,6 +87,8 @@ static void srp_remove_one(struct ib_device *device); static void srp_completion(struct ib_cq *cq, void *target_ptr); static int srp_cm_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event); +static struct scsi_transport_template *ib_srp_transport_template; + static struct ib_client srp_client = { .name = "srp", .add = srp_add_one, @@ -420,6 +423,7 @@ static void srp_remove_work(struct work_struct *work) list_del(&target->list); spin_unlock(&target->srp_host->target_lock); + srp_remove_host(target->scsi_host); scsi_remove_host(target->scsi_host); ib_destroy_cm_id(target->cm_id); srp_free_target_ib(target); @@ -1544,12 +1548,24 @@ static struct scsi_host_template srp_template = { static int srp_add_target(struct srp_host *host, struct srp_target_port *target) { + struct srp_rport_identifiers ids; + struct srp_rport *rport; + sprintf(target->target_name, "SRP.T10:%016llX", (unsigned long long) be64_to_cpu(target->id_ext)); if (scsi_add_host(target->scsi_host, host->dev->dev->dma_device)) return -ENODEV; + memcpy(ids.port_id, &target->id_ext, 8); + memcpy(ids.port_id + 8, &target->ioc_guid, 8); + ids.roles = SRP_RPORT_ROLE_TARGET; + rport = srp_rport_add(target->scsi_host, &ids); + if (IS_ERR(rport)) { + scsi_remove_host(target->scsi_host); + return PTR_ERR(rport); + } + spin_lock(&host->target_lock); list_add_tail(&target->list, &host->target_list); spin_unlock(&host->target_lock); @@ -1775,6 +1791,7 @@ static ssize_t srp_create_target(struct class_device *class_dev, if (!target_host) return -ENOMEM; + target_host->transportt = ib_srp_transport_template; target_host->max_lun = SRP_MAX_LUN; target_host->max_cmd_len = sizeof ((struct srp_cmd *) (void *) 0L)->cdb; @@ -2054,10 +2071,18 @@ static void srp_remove_one(struct ib_device *device) kfree(srp_dev); } +static struct srp_function_template ib_srp_transport_functions = { +}; + static int __init srp_init_module(void) { int ret; + ib_srp_transport_template = + srp_attach_transport(&ib_srp_transport_functions); + if (!ib_srp_transport_template) + return -ENOMEM; + srp_template.sg_tablesize = srp_sg_tablesize; srp_max_iu_len = (sizeof (struct srp_cmd) + sizeof (struct srp_indirect_buf) + @@ -2066,6 +2091,7 @@ static int __init srp_init_module(void) ret = class_register(&srp_class); if (ret) { printk(KERN_ERR PFX "couldn't register class infiniband_srp\n"); + srp_release_transport(ib_srp_transport_template); return ret; } @@ -2074,6 +2100,7 @@ static int __init srp_init_module(void) ret = ib_register_client(&srp_client); if (ret) { printk(KERN_ERR PFX "couldn't register IB client\n"); + srp_release_transport(ib_srp_transport_template); ib_sa_unregister_client(&srp_sa_client); class_unregister(&srp_class); return ret; @@ -2087,6 +2114,7 @@ static void __exit srp_cleanup_module(void) ib_unregister_client(&srp_client); ib_sa_unregister_client(&srp_sa_client); class_unregister(&srp_class); + srp_release_transport(ib_srp_transport_template); } module_init(srp_init_module); |