diff options
author | Jan Beulich <jbeulich@novell.com> | 2007-02-05 18:49:45 -0800 |
---|---|---|
committer | Tony Luck <tony.luck@intel.com> | 2007-02-05 18:49:45 -0800 |
commit | 93fbff63e62b87fe450814db41f859d60b048fb8 (patch) | |
tree | 3a806003e88a1270820f6a1aa1e60e28b994277e | |
parent | cde14bbfb3aa79b479db35bd29e6c083513d8614 (diff) |
[IA64] make swiotlb use bus_to_virt/virt_to_bus
Convert all phys_to_virt/virt_to_phys uses to bus_to_virt/virt_to_bus, as is
what is meant and what is needed in (at least) some virtualized environments
like Xen.
Signed-off-by: Jan Beulich <jbeulich@novell.com>
Acked-by: Muli Ben-Yehuda <muli@il.ibm.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Tony Luck <tony.luck@intel.com>
-rw-r--r-- | lib/swiotlb.c | 33 |
1 files changed, 17 insertions, 16 deletions
diff --git a/lib/swiotlb.c b/lib/swiotlb.c index 34278338aad..bc684d1fd42 100644 --- a/lib/swiotlb.c +++ b/lib/swiotlb.c @@ -36,7 +36,7 @@ ( (val) & ( (align) - 1))) #define SG_ENT_VIRT_ADDRESS(sg) (page_address((sg)->page) + (sg)->offset) -#define SG_ENT_PHYS_ADDRESS(SG) virt_to_phys(SG_ENT_VIRT_ADDRESS(SG)) +#define SG_ENT_PHYS_ADDRESS(sg) virt_to_bus(SG_ENT_VIRT_ADDRESS(sg)) /* * Maximum allowable number of contiguous slabs to map, @@ -163,7 +163,7 @@ swiotlb_init_with_default_size (size_t default_size) */ io_tlb_overflow_buffer = alloc_bootmem_low(io_tlb_overflow); printk(KERN_INFO "Placing software IO TLB between 0x%lx - 0x%lx\n", - virt_to_phys(io_tlb_start), virt_to_phys(io_tlb_end)); + virt_to_bus(io_tlb_start), virt_to_bus(io_tlb_end)); } void @@ -244,7 +244,7 @@ swiotlb_late_init_with_default_size (size_t default_size) printk(KERN_INFO "Placing %ldMB software IO TLB between 0x%lx - " "0x%lx\n", (io_tlb_nslabs * (1 << IO_TLB_SHIFT)) >> 20, - virt_to_phys(io_tlb_start), virt_to_phys(io_tlb_end)); + virt_to_bus(io_tlb_start), virt_to_bus(io_tlb_end)); return 0; @@ -445,7 +445,7 @@ swiotlb_alloc_coherent(struct device *hwdev, size_t size, flags |= GFP_DMA; ret = (void *)__get_free_pages(flags, order); - if (ret && address_needs_mapping(hwdev, virt_to_phys(ret))) { + if (ret && address_needs_mapping(hwdev, virt_to_bus(ret))) { /* * The allocated memory isn't reachable by the device. * Fall back on swiotlb_map_single(). @@ -465,11 +465,11 @@ swiotlb_alloc_coherent(struct device *hwdev, size_t size, if (swiotlb_dma_mapping_error(handle)) return NULL; - ret = phys_to_virt(handle); + ret = bus_to_virt(handle); } memset(ret, 0, size); - dev_addr = virt_to_phys(ret); + dev_addr = virt_to_bus(ret); /* Confirm address can be DMA'd by device */ if (address_needs_mapping(hwdev, dev_addr)) { @@ -525,7 +525,7 @@ swiotlb_full(struct device *dev, size_t size, int dir, int do_panic) dma_addr_t swiotlb_map_single(struct device *hwdev, void *ptr, size_t size, int dir) { - unsigned long dev_addr = virt_to_phys(ptr); + unsigned long dev_addr = virt_to_bus(ptr); void *map; BUG_ON(dir == DMA_NONE); @@ -546,7 +546,7 @@ swiotlb_map_single(struct device *hwdev, void *ptr, size_t size, int dir) map = io_tlb_overflow_buffer; } - dev_addr = virt_to_phys(map); + dev_addr = virt_to_bus(map); /* * Ensure that the address returned is DMA'ble @@ -569,7 +569,7 @@ void swiotlb_unmap_single(struct device *hwdev, dma_addr_t dev_addr, size_t size, int dir) { - char *dma_addr = phys_to_virt(dev_addr); + char *dma_addr = bus_to_virt(dev_addr); BUG_ON(dir == DMA_NONE); if (dma_addr >= io_tlb_start && dma_addr < io_tlb_end) @@ -592,7 +592,7 @@ static inline void swiotlb_sync_single(struct device *hwdev, dma_addr_t dev_addr, size_t size, int dir, int target) { - char *dma_addr = phys_to_virt(dev_addr); + char *dma_addr = bus_to_virt(dev_addr); BUG_ON(dir == DMA_NONE); if (dma_addr >= io_tlb_start && dma_addr < io_tlb_end) @@ -623,7 +623,7 @@ swiotlb_sync_single_range(struct device *hwdev, dma_addr_t dev_addr, unsigned long offset, size_t size, int dir, int target) { - char *dma_addr = phys_to_virt(dev_addr) + offset; + char *dma_addr = bus_to_virt(dev_addr) + offset; BUG_ON(dir == DMA_NONE); if (dma_addr >= io_tlb_start && dma_addr < io_tlb_end) @@ -676,7 +676,7 @@ swiotlb_map_sg(struct device *hwdev, struct scatterlist *sg, int nelems, for (i = 0; i < nelems; i++, sg++) { addr = SG_ENT_VIRT_ADDRESS(sg); - dev_addr = virt_to_phys(addr); + dev_addr = virt_to_bus(addr); if (swiotlb_force || address_needs_mapping(hwdev, dev_addr)) { void *map = map_single(hwdev, addr, sg->length, dir); if (!map) { @@ -709,7 +709,8 @@ swiotlb_unmap_sg(struct device *hwdev, struct scatterlist *sg, int nelems, for (i = 0; i < nelems; i++, sg++) if (sg->dma_address != SG_ENT_PHYS_ADDRESS(sg)) - unmap_single(hwdev, (void *) phys_to_virt(sg->dma_address), sg->dma_length, dir); + unmap_single(hwdev, bus_to_virt(sg->dma_address), + sg->dma_length, dir); else if (dir == DMA_FROM_DEVICE) dma_mark_clean(SG_ENT_VIRT_ADDRESS(sg), sg->dma_length); } @@ -731,7 +732,7 @@ swiotlb_sync_sg(struct device *hwdev, struct scatterlist *sg, for (i = 0; i < nelems; i++, sg++) if (sg->dma_address != SG_ENT_PHYS_ADDRESS(sg)) - sync_single(hwdev, (void *) sg->dma_address, + sync_single(hwdev, bus_to_virt(sg->dma_address), sg->dma_length, dir, target); else if (dir == DMA_FROM_DEVICE) dma_mark_clean(SG_ENT_VIRT_ADDRESS(sg), sg->dma_length); @@ -754,7 +755,7 @@ swiotlb_sync_sg_for_device(struct device *hwdev, struct scatterlist *sg, int swiotlb_dma_mapping_error(dma_addr_t dma_addr) { - return (dma_addr == virt_to_phys(io_tlb_overflow_buffer)); + return (dma_addr == virt_to_bus(io_tlb_overflow_buffer)); } /* @@ -766,7 +767,7 @@ swiotlb_dma_mapping_error(dma_addr_t dma_addr) int swiotlb_dma_supported (struct device *hwdev, u64 mask) { - return virt_to_phys(io_tlb_end - 1) <= mask; + return virt_to_bus(io_tlb_end - 1) <= mask; } EXPORT_SYMBOL(swiotlb_init); |