diff options
Diffstat (limited to 'lib/swiotlb.c')
-rw-r--r-- | lib/swiotlb.c | 51 |
1 files changed, 21 insertions, 30 deletions
diff --git a/lib/swiotlb.c b/lib/swiotlb.c index d568894df8c..f8eebd48914 100644 --- a/lib/swiotlb.c +++ b/lib/swiotlb.c @@ -274,13 +274,14 @@ cleanup1: } static int -address_needs_mapping(struct device *hwdev, dma_addr_t addr) +address_needs_mapping(struct device *hwdev, dma_addr_t addr, size_t size) { - dma_addr_t mask = 0xffffffff; - /* If the device has a mask, use it, otherwise default to 32 bits */ - if (hwdev && hwdev->dma_mask) - mask = *hwdev->dma_mask; - return (addr & ~mask) != 0; + return !is_buffer_dma_capable(dma_get_mask(hwdev), addr, size); +} + +static int is_swiotlb_buffer(char *addr) +{ + return addr >= io_tlb_start && addr < io_tlb_end; } /* @@ -467,15 +468,8 @@ swiotlb_alloc_coherent(struct device *hwdev, size_t size, void *ret; int order = get_order(size); - /* - * XXX fix me: the DMA API should pass us an explicit DMA mask - * instead, or use ZONE_DMA32 (ia64 overloads ZONE_DMA to be a ~32 - * bit range instead of a 16MB one). - */ - flags |= GFP_DMA; - ret = (void *)__get_free_pages(flags, order); - if (ret && address_needs_mapping(hwdev, virt_to_bus(ret))) { + if (ret && address_needs_mapping(hwdev, virt_to_bus(ret), size)) { /* * The allocated memory isn't reachable by the device. * Fall back on swiotlb_map_single(). @@ -490,19 +484,16 @@ swiotlb_alloc_coherent(struct device *hwdev, size_t size, * swiotlb_map_single(), which will grab memory from * the lowest available address range. */ - dma_addr_t handle; - handle = swiotlb_map_single(NULL, NULL, size, DMA_FROM_DEVICE); - if (swiotlb_dma_mapping_error(handle)) + ret = map_single(hwdev, NULL, size, DMA_FROM_DEVICE); + if (!ret) return NULL; - - ret = bus_to_virt(handle); } memset(ret, 0, size); dev_addr = virt_to_bus(ret); /* Confirm address can be DMA'd by device */ - if (address_needs_mapping(hwdev, dev_addr)) { + if (address_needs_mapping(hwdev, dev_addr, size)) { printk("hwdev DMA mask = 0x%016Lx, dev_addr = 0x%016Lx\n", (unsigned long long)*hwdev->dma_mask, (unsigned long long)dev_addr); @@ -518,12 +509,11 @@ swiotlb_free_coherent(struct device *hwdev, size_t size, void *vaddr, dma_addr_t dma_handle) { WARN_ON(irqs_disabled()); - if (!(vaddr >= (void *)io_tlb_start - && vaddr < (void *)io_tlb_end)) + if (!is_swiotlb_buffer(vaddr)) free_pages((unsigned long) vaddr, get_order(size)); else /* DMA_TO_DEVICE to avoid memcpy in unmap_single */ - swiotlb_unmap_single (hwdev, dma_handle, size, DMA_TO_DEVICE); + unmap_single(hwdev, vaddr, size, DMA_TO_DEVICE); } static void @@ -567,7 +557,7 @@ swiotlb_map_single_attrs(struct device *hwdev, void *ptr, size_t size, * we can safely return the device addr and not worry about bounce * buffering it. */ - if (!address_needs_mapping(hwdev, dev_addr) && !swiotlb_force) + if (!address_needs_mapping(hwdev, dev_addr, size) && !swiotlb_force) return dev_addr; /* @@ -584,7 +574,7 @@ swiotlb_map_single_attrs(struct device *hwdev, void *ptr, size_t size, /* * Ensure that the address returned is DMA'ble */ - if (address_needs_mapping(hwdev, dev_addr)) + if (address_needs_mapping(hwdev, dev_addr, size)) panic("map_single: bounce buffer is not DMA'ble"); return dev_addr; @@ -612,7 +602,7 @@ swiotlb_unmap_single_attrs(struct device *hwdev, dma_addr_t dev_addr, char *dma_addr = bus_to_virt(dev_addr); BUG_ON(dir == DMA_NONE); - if (dma_addr >= io_tlb_start && dma_addr < io_tlb_end) + if (is_swiotlb_buffer(dma_addr)) unmap_single(hwdev, dma_addr, size, dir); else if (dir == DMA_FROM_DEVICE) dma_mark_clean(dma_addr, size); @@ -642,7 +632,7 @@ swiotlb_sync_single(struct device *hwdev, dma_addr_t dev_addr, char *dma_addr = bus_to_virt(dev_addr); BUG_ON(dir == DMA_NONE); - if (dma_addr >= io_tlb_start && dma_addr < io_tlb_end) + if (is_swiotlb_buffer(dma_addr)) sync_single(hwdev, dma_addr, size, dir, target); else if (dir == DMA_FROM_DEVICE) dma_mark_clean(dma_addr, size); @@ -673,7 +663,7 @@ swiotlb_sync_single_range(struct device *hwdev, dma_addr_t dev_addr, char *dma_addr = bus_to_virt(dev_addr) + offset; BUG_ON(dir == DMA_NONE); - if (dma_addr >= io_tlb_start && dma_addr < io_tlb_end) + if (is_swiotlb_buffer(dma_addr)) sync_single(hwdev, dma_addr, size, dir, target); else if (dir == DMA_FROM_DEVICE) dma_mark_clean(dma_addr, size); @@ -727,7 +717,8 @@ swiotlb_map_sg_attrs(struct device *hwdev, struct scatterlist *sgl, int nelems, for_each_sg(sgl, sg, nelems, i) { addr = SG_ENT_VIRT_ADDRESS(sg); dev_addr = virt_to_bus(addr); - if (swiotlb_force || address_needs_mapping(hwdev, dev_addr)) { + if (swiotlb_force || + address_needs_mapping(hwdev, dev_addr, sg->length)) { void *map = map_single(hwdev, addr, sg->length, dir); if (!map) { /* Don't panic here, we expect map_sg users @@ -824,7 +815,7 @@ swiotlb_sync_sg_for_device(struct device *hwdev, struct scatterlist *sg, } int -swiotlb_dma_mapping_error(dma_addr_t dma_addr) +swiotlb_dma_mapping_error(struct device *hwdev, dma_addr_t dma_addr) { return (dma_addr == virt_to_bus(io_tlb_overflow_buffer)); } |