aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2009-08-07 10:44:11 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2009-08-07 10:44:11 -0700
commitff1649ff780fb7c0bfbf42d05ffc9b56336b9aa3 (patch)
tree80ca9b82d398948b01c97b01ad034353dea70ada
parentda758ddede96dd850945d3417ff75209a666ba0d (diff)
parentc5b1525533c484238015c48c78f86d49a1bfb86b (diff)
Merge git://git.infradead.org/~dwmw2/iommu-2.6.31
* git://git.infradead.org/~dwmw2/iommu-2.6.31: intel-iommu: Fix enabling snooping feature by mistake intel-iommu: Mask physical address to correct page size in intel_map_single() intel-iommu: Correct sglist size calculation.
-rw-r--r--drivers/pci/intel-iommu.c25
1 files changed, 13 insertions, 12 deletions
diff --git a/drivers/pci/intel-iommu.c b/drivers/pci/intel-iommu.c
index ebc9b8dca88..2314ad7ee5f 100644
--- a/drivers/pci/intel-iommu.c
+++ b/drivers/pci/intel-iommu.c
@@ -1505,7 +1505,6 @@ static int domain_context_mapping_one(struct dmar_domain *domain, int segment,
}
set_bit(num, iommu->domain_ids);
- set_bit(iommu->seq_id, &domain->iommu_bmp);
iommu->domains[num] = domain;
id = num;
}
@@ -1648,6 +1647,14 @@ static int domain_context_mapped(struct pci_dev *pdev)
tmp->devfn);
}
+/* Returns a number of VTD pages, but aligned to MM page size */
+static inline unsigned long aligned_nrpages(unsigned long host_addr,
+ size_t size)
+{
+ host_addr &= ~PAGE_MASK;
+ return PAGE_ALIGN(host_addr + size) >> VTD_PAGE_SHIFT;
+}
+
static int __domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
struct scatterlist *sg, unsigned long phys_pfn,
unsigned long nr_pages, int prot)
@@ -1675,7 +1682,7 @@ static int __domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
uint64_t tmp;
if (!sg_res) {
- sg_res = (sg->offset + sg->length + VTD_PAGE_SIZE - 1) >> VTD_PAGE_SHIFT;
+ sg_res = aligned_nrpages(sg->offset, sg->length);
sg->dma_address = ((dma_addr_t)iov_pfn << VTD_PAGE_SHIFT) + sg->offset;
sg->dma_length = sg->length;
pteval = page_to_phys(sg_page(sg)) | prot;
@@ -2415,14 +2422,6 @@ error:
return ret;
}
-/* Returns a number of VTD pages, but aligned to MM page size */
-static inline unsigned long aligned_nrpages(unsigned long host_addr,
- size_t size)
-{
- host_addr &= ~PAGE_MASK;
- return PAGE_ALIGN(host_addr + size) >> VTD_PAGE_SHIFT;
-}
-
/* This takes a number of _MM_ pages, not VTD pages */
static struct iova *intel_alloc_iova(struct device *dev,
struct dmar_domain *domain,
@@ -2551,6 +2550,7 @@ static dma_addr_t __intel_map_single(struct device *hwdev, phys_addr_t paddr,
int prot = 0;
int ret;
struct intel_iommu *iommu;
+ unsigned long paddr_pfn = paddr >> PAGE_SHIFT;
BUG_ON(dir == DMA_NONE);
@@ -2585,7 +2585,7 @@ static dma_addr_t __intel_map_single(struct device *hwdev, phys_addr_t paddr,
* is not a big problem
*/
ret = domain_pfn_mapping(domain, mm_to_dma_pfn(iova->pfn_lo),
- paddr >> VTD_PAGE_SHIFT, size, prot);
+ mm_to_dma_pfn(paddr_pfn), size, prot);
if (ret)
goto error;
@@ -2875,7 +2875,7 @@ static int intel_map_sg(struct device *hwdev, struct scatterlist *sglist, int ne
start_vpfn = mm_to_dma_pfn(iova->pfn_lo);
- ret = domain_sg_mapping(domain, start_vpfn, sglist, mm_to_dma_pfn(size), prot);
+ ret = domain_sg_mapping(domain, start_vpfn, sglist, size, prot);
if (unlikely(ret)) {
/* clear the page */
dma_pte_clear_range(domain, start_vpfn,
@@ -3408,6 +3408,7 @@ static int md_domain_init(struct dmar_domain *domain, int guest_width)
domain->iommu_count = 0;
domain->iommu_coherency = 0;
+ domain->iommu_snooping = 0;
domain->max_addr = 0;
/* always allocate the top pgd */