From f05810c9962bba3e809f07619bda1bfdebbfbfb9 Mon Sep 17 00:00:00 2001 From: Suresh Siddha Date: Thu, 16 Oct 2008 16:31:54 -0700 Subject: dmar: use spin_lock_irqsave() in qi_submit_sync() Next patch in the series will use queued invalidation interface qi_submit_sync() for DMA-remapping aswell, which can be called from interrupt context. So use spin_lock_irqsave() instead of spin_lock() in qi_submit_sync(). Signed-off-by: Suresh Siddha Signed-off-by: Youquan Song Signed-off-by: David Woodhouse --- drivers/pci/dmar.c | 19 +++++++++++++------ 1 file changed, 13 insertions(+), 6 deletions(-) (limited to 'drivers') diff --git a/drivers/pci/dmar.c b/drivers/pci/dmar.c index e842e756308..b64cec19054 100644 --- a/drivers/pci/dmar.c +++ b/drivers/pci/dmar.c @@ -580,11 +580,11 @@ void qi_submit_sync(struct qi_desc *desc, struct intel_iommu *iommu) hw = qi->desc; - spin_lock(&qi->q_lock); + spin_lock_irqsave(&qi->q_lock, flags); while (qi->free_cnt < 3) { - spin_unlock(&qi->q_lock); + spin_unlock_irqrestore(&qi->q_lock, flags); cpu_relax(); - spin_lock(&qi->q_lock); + spin_lock_irqsave(&qi->q_lock, flags); } index = qi->free_head; @@ -605,15 +605,22 @@ void qi_submit_sync(struct qi_desc *desc, struct intel_iommu *iommu) qi->free_head = (qi->free_head + 2) % QI_LENGTH; qi->free_cnt -= 2; - spin_lock_irqsave(&iommu->register_lock, flags); + spin_lock(&iommu->register_lock); /* * update the HW tail register indicating the presence of * new descriptors. */ writel(qi->free_head << 4, iommu->reg + DMAR_IQT_REG); - spin_unlock_irqrestore(&iommu->register_lock, flags); + spin_unlock(&iommu->register_lock); while (qi->desc_status[wait_index] != QI_DONE) { + /* + * We will leave the interrupts disabled, to prevent interrupt + * context to queue another cmd while a cmd is already submitted + * and waiting for completion on this cpu. This is to avoid + * a deadlock where the interrupt context can wait indefinitely + * for free slots in the queue. + */ spin_unlock(&qi->q_lock); cpu_relax(); spin_lock(&qi->q_lock); @@ -622,7 +629,7 @@ void qi_submit_sync(struct qi_desc *desc, struct intel_iommu *iommu) qi->desc_status[index] = QI_DONE; reclaim_free_desc(qi); - spin_unlock(&qi->q_lock); + spin_unlock_irqrestore(&qi->q_lock, flags); } /* -- cgit v1.2.3 From 3481f21097cb560392c411377893b5109fbde557 Mon Sep 17 00:00:00 2001 From: Youquan Song Date: Thu, 16 Oct 2008 16:31:55 -0700 Subject: dmar: context cache and IOTLB invalidation using queued invalidation Implement context cache invalidate and IOTLB invalidation using queued invalidation interface. This interface will be used by DMA remapping, when queued invalidation is supported. Signed-off-by: Youquan Song Signed-off-by: Suresh Siddha Signed-off-by: David Woodhouse --- drivers/pci/dmar.c | 56 ++++++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 56 insertions(+) (limited to 'drivers') diff --git a/drivers/pci/dmar.c b/drivers/pci/dmar.c index b64cec19054..0f409e23631 100644 --- a/drivers/pci/dmar.c +++ b/drivers/pci/dmar.c @@ -645,6 +645,62 @@ void qi_global_iec(struct intel_iommu *iommu) qi_submit_sync(&desc, iommu); } +int qi_flush_context(struct intel_iommu *iommu, u16 did, u16 sid, u8 fm, + u64 type, int non_present_entry_flush) +{ + + struct qi_desc desc; + + if (non_present_entry_flush) { + if (!cap_caching_mode(iommu->cap)) + return 1; + else + did = 0; + } + + desc.low = QI_CC_FM(fm) | QI_CC_SID(sid) | QI_CC_DID(did) + | QI_CC_GRAN(type) | QI_CC_TYPE; + desc.high = 0; + + qi_submit_sync(&desc, iommu); + + return 0; + +} + +int qi_flush_iotlb(struct intel_iommu *iommu, u16 did, u64 addr, + unsigned int size_order, u64 type, + int non_present_entry_flush) +{ + u8 dw = 0, dr = 0; + + struct qi_desc desc; + int ih = 0; + + if (non_present_entry_flush) { + if (!cap_caching_mode(iommu->cap)) + return 1; + else + did = 0; + } + + if (cap_write_drain(iommu->cap)) + dw = 1; + + if (cap_read_drain(iommu->cap)) + dr = 1; + + desc.low = QI_IOTLB_DID(did) | QI_IOTLB_DR(dr) | QI_IOTLB_DW(dw) + | QI_IOTLB_GRAN(type) | QI_IOTLB_TYPE; + desc.high = QI_IOTLB_ADDR(addr) | QI_IOTLB_IH(ih) + | QI_IOTLB_AM(size_order); + + qi_submit_sync(&desc, iommu); + + return 0; + +} + /* * Enable Queued Invalidation interface. This is a must to support * interrupt-remapping. Also used by DMA-remapping, which replaces -- cgit v1.2.3 From a77b67d4023770805141014b8fa9eb5467457817 Mon Sep 17 00:00:00 2001 From: Youquan Song Date: Thu, 16 Oct 2008 16:31:56 -0700 Subject: dmar: Use queued invalidation interface for IOTLB and context invalidation If queued invalidation interface is available and enabled, queued invalidation interface will be used instead of the register based interface. According to Vt-d2 specification, when queued invalidation is enabled, invalidation command submit works only through invalidation queue and not through the command registers interface. Signed-off-by: Youquan Song Signed-off-by: Suresh Siddha Signed-off-by: David Woodhouse --- drivers/pci/intel-iommu.c | 95 ++++++++++++++++++++++------------------------- 1 file changed, 45 insertions(+), 50 deletions(-) (limited to 'drivers') diff --git a/drivers/pci/intel-iommu.c b/drivers/pci/intel-iommu.c index fc5f2dbf532..50947041913 100644 --- a/drivers/pci/intel-iommu.c +++ b/drivers/pci/intel-iommu.c @@ -567,27 +567,6 @@ static int __iommu_flush_context(struct intel_iommu *iommu, return 0; } -static int inline iommu_flush_context_global(struct intel_iommu *iommu, - int non_present_entry_flush) -{ - return __iommu_flush_context(iommu, 0, 0, 0, DMA_CCMD_GLOBAL_INVL, - non_present_entry_flush); -} - -static int inline iommu_flush_context_domain(struct intel_iommu *iommu, u16 did, - int non_present_entry_flush) -{ - return __iommu_flush_context(iommu, did, 0, 0, DMA_CCMD_DOMAIN_INVL, - non_present_entry_flush); -} - -static int inline iommu_flush_context_device(struct intel_iommu *iommu, - u16 did, u16 source_id, u8 function_mask, int non_present_entry_flush) -{ - return __iommu_flush_context(iommu, did, source_id, function_mask, - DMA_CCMD_DEVICE_INVL, non_present_entry_flush); -} - /* return value determine if we need a write buffer flush */ static int __iommu_flush_iotlb(struct intel_iommu *iommu, u16 did, u64 addr, unsigned int size_order, u64 type, @@ -660,20 +639,6 @@ static int __iommu_flush_iotlb(struct intel_iommu *iommu, u16 did, return 0; } -static int inline iommu_flush_iotlb_global(struct intel_iommu *iommu, - int non_present_entry_flush) -{ - return __iommu_flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH, - non_present_entry_flush); -} - -static int inline iommu_flush_iotlb_dsi(struct intel_iommu *iommu, u16 did, - int non_present_entry_flush) -{ - return __iommu_flush_iotlb(iommu, did, 0, 0, DMA_TLB_DSI_FLUSH, - non_present_entry_flush); -} - static int iommu_flush_iotlb_psi(struct intel_iommu *iommu, u16 did, u64 addr, unsigned int pages, int non_present_entry_flush) { @@ -684,8 +649,9 @@ static int iommu_flush_iotlb_psi(struct intel_iommu *iommu, u16 did, /* Fallback to domain selective flush if no PSI support */ if (!cap_pgsel_inv(iommu->cap)) - return iommu_flush_iotlb_dsi(iommu, did, - non_present_entry_flush); + return iommu->flush.flush_iotlb(iommu, did, 0, 0, + DMA_TLB_DSI_FLUSH, + non_present_entry_flush); /* * PSI requires page size to be 2 ^ x, and the base address is naturally @@ -694,11 +660,12 @@ static int iommu_flush_iotlb_psi(struct intel_iommu *iommu, u16 did, mask = ilog2(__roundup_pow_of_two(pages)); /* Fallback to domain selective flush if size is too big */ if (mask > cap_max_amask_val(iommu->cap)) - return iommu_flush_iotlb_dsi(iommu, did, - non_present_entry_flush); + return iommu->flush.flush_iotlb(iommu, did, 0, 0, + DMA_TLB_DSI_FLUSH, non_present_entry_flush); - return __iommu_flush_iotlb(iommu, did, addr, mask, - DMA_TLB_PSI_FLUSH, non_present_entry_flush); + return iommu->flush.flush_iotlb(iommu, did, addr, mask, + DMA_TLB_PSI_FLUSH, + non_present_entry_flush); } static void iommu_disable_protect_mem_regions(struct intel_iommu *iommu) @@ -1204,11 +1171,13 @@ static int domain_context_mapping_one(struct dmar_domain *domain, __iommu_flush_cache(iommu, context, sizeof(*context)); /* it's a non-present to present mapping */ - if (iommu_flush_context_device(iommu, domain->id, - (((u16)bus) << 8) | devfn, DMA_CCMD_MASK_NOBIT, 1)) + if (iommu->flush.flush_context(iommu, domain->id, + (((u16)bus) << 8) | devfn, DMA_CCMD_MASK_NOBIT, + DMA_CCMD_DEVICE_INVL, 1)) iommu_flush_write_buffer(iommu); else - iommu_flush_iotlb_dsi(iommu, 0, 0); + iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_DSI_FLUSH, 0); + spin_unlock_irqrestore(&iommu->lock, flags); return 0; } @@ -1310,8 +1279,10 @@ domain_page_mapping(struct dmar_domain *domain, dma_addr_t iova, static void detach_domain_for_dev(struct dmar_domain *domain, u8 bus, u8 devfn) { clear_context_table(domain->iommu, bus, devfn); - iommu_flush_context_global(domain->iommu, 0); - iommu_flush_iotlb_global(domain->iommu, 0); + domain->iommu->flush.flush_context(domain->iommu, 0, 0, 0, + DMA_CCMD_GLOBAL_INVL, 0); + domain->iommu->flush.flush_iotlb(domain->iommu, 0, 0, 0, + DMA_TLB_GLOBAL_FLUSH, 0); } static void domain_remove_dev_info(struct dmar_domain *domain) @@ -1662,6 +1633,28 @@ int __init init_dmars(void) } } + for_each_drhd_unit(drhd) { + if (drhd->ignored) + continue; + + iommu = drhd->iommu; + if (dmar_enable_qi(iommu)) { + /* + * Queued Invalidate not enabled, use Register Based + * Invalidate + */ + iommu->flush.flush_context = __iommu_flush_context; + iommu->flush.flush_iotlb = __iommu_flush_iotlb; + printk(KERN_INFO "IOMMU 0x%Lx: using Register based " + "invalidation\n", drhd->reg_base_addr); + } else { + iommu->flush.flush_context = qi_flush_context; + iommu->flush.flush_iotlb = qi_flush_iotlb; + printk(KERN_INFO "IOMMU 0x%Lx: using Queued " + "invalidation\n", drhd->reg_base_addr); + } + } + /* * For each rmrr * for each dev attached to rmrr @@ -1714,9 +1707,10 @@ int __init init_dmars(void) iommu_set_root_entry(iommu); - iommu_flush_context_global(iommu, 0); - iommu_flush_iotlb_global(iommu, 0); - + iommu->flush.flush_context(iommu, 0, 0, 0, DMA_CCMD_GLOBAL_INVL, + 0); + iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH, + 0); iommu_disable_protect_mem_regions(iommu); ret = iommu_enable_translation(iommu); @@ -1891,7 +1885,8 @@ static void flush_unmaps(void) struct intel_iommu *iommu = deferred_flush[i].domain[0]->iommu; - iommu_flush_iotlb_global(iommu, 0); + iommu->flush.flush_iotlb(iommu, 0, 0, 0, + DMA_TLB_GLOBAL_FLUSH, 0); for (j = 0; j < deferred_flush[i].next; j++) { __free_iova(&deferred_flush[i].domain[j]->iovad, deferred_flush[i].iova[j]); -- cgit v1.2.3 From cacd4213d8ffed83676f38d5d8e93c673e0f1af7 Mon Sep 17 00:00:00 2001 From: Youquan Song Date: Thu, 16 Oct 2008 16:31:57 -0700 Subject: dmar: remove the quirk which disables dma-remapping when intr-remapping enabled Now that we have DMA-remapping support for queued invalidation, we can enable both DMA-remapping and interrupt-remapping at the same time. Signed-off-by: Youquan Song Signed-off-by: Suresh Siddha Signed-off-by: David Woodhouse --- drivers/pci/dmar.c | 20 +++++--------------- 1 file changed, 5 insertions(+), 15 deletions(-) (limited to 'drivers') diff --git a/drivers/pci/dmar.c b/drivers/pci/dmar.c index 0f409e23631..44d6c7081b8 100644 --- a/drivers/pci/dmar.c +++ b/drivers/pci/dmar.c @@ -455,8 +455,8 @@ void __init detect_intel_iommu(void) ret = early_dmar_detect(); -#ifdef CONFIG_DMAR { +#ifdef CONFIG_INTR_REMAP struct acpi_table_dmar *dmar; /* * for now we will disable dma-remapping when interrupt @@ -465,28 +465,18 @@ void __init detect_intel_iommu(void) * is added, we will not need this any more. */ dmar = (struct acpi_table_dmar *) dmar_tbl; - if (ret && cpu_has_x2apic && dmar->flags & 0x1) { + if (ret && cpu_has_x2apic && dmar->flags & 0x1) printk(KERN_INFO "Queued invalidation will be enabled to support " "x2apic and Intr-remapping.\n"); - printk(KERN_INFO - "Disabling IOMMU detection, because of missing " - "queued invalidation support for IOTLB " - "invalidation\n"); - printk(KERN_INFO - "Use \"nox2apic\", if you want to use Intel " - " IOMMU for DMA-remapping and don't care about " - " x2apic support\n"); - - dmar_disabled = 1; - return; - } +#endif +#ifdef CONFIG_DMAR if (ret && !no_iommu && !iommu_detected && !swiotlb && !dmar_disabled) iommu_detected = 1; - } #endif + } } -- cgit v1.2.3 From 5b6985ce8ec7127b4d60ad450b64ca8b82748a3b Mon Sep 17 00:00:00 2001 From: Fenghua Yu Date: Thu, 16 Oct 2008 18:02:32 -0700 Subject: intel-iommu: IA64 support The current Intel IOMMU code assumes that both host page size and Intel IOMMU page size are 4KiB. The first patch supports variable page size. This provides support for IA64 which has multiple page sizes. This patch also adds some other code hooks for IA64 platform including DMAR_OPERATION_TIMEOUT definition. [dwmw2: some cleanup] Signed-off-by: Fenghua Yu Signed-off-by: Tony Luck Signed-off-by: David Woodhouse --- drivers/pci/dmar.c | 19 ++++--- drivers/pci/intel-iommu.c | 128 ++++++++++++++++++++++++---------------------- drivers/pci/quirks.c | 14 +++++ 3 files changed, 91 insertions(+), 70 deletions(-) (limited to 'drivers') diff --git a/drivers/pci/dmar.c b/drivers/pci/dmar.c index 44d6c7081b8..b65173828bc 100644 --- a/drivers/pci/dmar.c +++ b/drivers/pci/dmar.c @@ -277,14 +277,15 @@ dmar_table_print_dmar_entry(struct acpi_dmar_header *header) drhd = (struct acpi_dmar_hardware_unit *)header; printk (KERN_INFO PREFIX "DRHD (flags: 0x%08x)base: 0x%016Lx\n", - drhd->flags, drhd->address); + drhd->flags, (unsigned long long)drhd->address); break; case ACPI_DMAR_TYPE_RESERVED_MEMORY: rmrr = (struct acpi_dmar_reserved_memory *)header; printk (KERN_INFO PREFIX "RMRR base: 0x%016Lx end: 0x%016Lx\n", - rmrr->base_address, rmrr->end_address); + (unsigned long long)rmrr->base_address, + (unsigned long long)rmrr->end_address); break; } } @@ -304,7 +305,7 @@ parse_dmar_table(void) if (!dmar) return -ENODEV; - if (dmar->width < PAGE_SHIFT_4K - 1) { + if (dmar->width < PAGE_SHIFT - 1) { printk(KERN_WARNING PREFIX "Invalid DMAR haw\n"); return -EINVAL; } @@ -493,7 +494,7 @@ int alloc_iommu(struct dmar_drhd_unit *drhd) iommu->seq_id = iommu_allocated++; - iommu->reg = ioremap(drhd->reg_base_addr, PAGE_SIZE_4K); + iommu->reg = ioremap(drhd->reg_base_addr, VTD_PAGE_SIZE); if (!iommu->reg) { printk(KERN_ERR "IOMMU: can't map the region\n"); goto error; @@ -504,8 +505,8 @@ int alloc_iommu(struct dmar_drhd_unit *drhd) /* the registers might be more than one page */ map_size = max_t(int, ecap_max_iotlb_offset(iommu->ecap), cap_max_fault_reg_offset(iommu->cap)); - map_size = PAGE_ALIGN_4K(map_size); - if (map_size > PAGE_SIZE_4K) { + map_size = VTD_PAGE_ALIGN(map_size); + if (map_size > VTD_PAGE_SIZE) { iounmap(iommu->reg); iommu->reg = ioremap(drhd->reg_base_addr, map_size); if (!iommu->reg) { @@ -516,8 +517,10 @@ int alloc_iommu(struct dmar_drhd_unit *drhd) ver = readl(iommu->reg + DMAR_VER_REG); pr_debug("IOMMU %llx: ver %d:%d cap %llx ecap %llx\n", - drhd->reg_base_addr, DMAR_VER_MAJOR(ver), DMAR_VER_MINOR(ver), - iommu->cap, iommu->ecap); + (unsigned long long)drhd->reg_base_addr, + DMAR_VER_MAJOR(ver), DMAR_VER_MINOR(ver), + (unsigned long long)iommu->cap, + (unsigned long long)iommu->ecap); spin_lock_init(&iommu->register_lock); diff --git a/drivers/pci/intel-iommu.c b/drivers/pci/intel-iommu.c index 50947041913..2bf96babbc4 100644 --- a/drivers/pci/intel-iommu.c +++ b/drivers/pci/intel-iommu.c @@ -18,6 +18,7 @@ * Author: Ashok Raj * Author: Shaohua Li * Author: Anil S Keshavamurthy + * Author: Fenghua Yu */ #include @@ -35,11 +36,13 @@ #include #include #include -#include /* force_iommu in this header in x86-64*/ #include #include #include "pci.h" +#define ROOT_SIZE VTD_PAGE_SIZE +#define CONTEXT_SIZE VTD_PAGE_SIZE + #define IS_GFX_DEVICE(pdev) ((pdev->class >> 16) == PCI_BASE_CLASS_DISPLAY) #define IS_ISA_DEVICE(pdev) ((pdev->class >> 8) == PCI_CLASS_BRIDGE_ISA) @@ -199,7 +202,7 @@ static struct context_entry * device_to_context_entry(struct intel_iommu *iommu, spin_unlock_irqrestore(&iommu->lock, flags); return NULL; } - __iommu_flush_cache(iommu, (void *)context, PAGE_SIZE_4K); + __iommu_flush_cache(iommu, (void *)context, CONTEXT_SIZE); phy_addr = virt_to_phys((void *)context); set_root_value(root, phy_addr); set_root_present(root); @@ -345,7 +348,7 @@ static struct dma_pte * addr_to_dma_pte(struct dmar_domain *domain, u64 addr) return NULL; } __iommu_flush_cache(domain->iommu, tmp_page, - PAGE_SIZE_4K); + PAGE_SIZE); dma_set_pte_addr(*pte, virt_to_phys(tmp_page)); /* * high level table always sets r/w, last level page @@ -408,13 +411,13 @@ static void dma_pte_clear_range(struct dmar_domain *domain, u64 start, u64 end) start &= (((u64)1) << addr_width) - 1; end &= (((u64)1) << addr_width) - 1; /* in case it's partial page */ - start = PAGE_ALIGN_4K(start); - end &= PAGE_MASK_4K; + start = PAGE_ALIGN(start); + end &= PAGE_MASK; /* we don't need lock here, nobody else touches the iova range */ while (start < end) { dma_pte_clear_one(domain, start); - start += PAGE_SIZE_4K; + start += VTD_PAGE_SIZE; } } @@ -468,7 +471,7 @@ static int iommu_alloc_root_entry(struct intel_iommu *iommu) if (!root) return -ENOMEM; - __iommu_flush_cache(iommu, root, PAGE_SIZE_4K); + __iommu_flush_cache(iommu, root, ROOT_SIZE); spin_lock_irqsave(&iommu->lock, flags); iommu->root_entry = root; @@ -634,7 +637,8 @@ static int __iommu_flush_iotlb(struct intel_iommu *iommu, u16 did, printk(KERN_ERR"IOMMU: flush IOTLB failed\n"); if (DMA_TLB_IAIG(val) != DMA_TLB_IIRG(type)) pr_debug("IOMMU: tlb flush request %Lx, actual %Lx\n", - DMA_TLB_IIRG(type), DMA_TLB_IAIG(val)); + (unsigned long long)DMA_TLB_IIRG(type), + (unsigned long long)DMA_TLB_IAIG(val)); /* flush context entry will implictly flush write buffer */ return 0; } @@ -644,7 +648,7 @@ static int iommu_flush_iotlb_psi(struct intel_iommu *iommu, u16 did, { unsigned int mask; - BUG_ON(addr & (~PAGE_MASK_4K)); + BUG_ON(addr & (~VTD_PAGE_MASK)); BUG_ON(pages == 0); /* Fallback to domain selective flush if no PSI support */ @@ -798,7 +802,7 @@ void dmar_msi_read(int irq, struct msi_msg *msg) } static int iommu_page_fault_do_one(struct intel_iommu *iommu, int type, - u8 fault_reason, u16 source_id, u64 addr) + u8 fault_reason, u16 source_id, unsigned long long addr) { const char *reason; @@ -1051,9 +1055,9 @@ static void dmar_init_reserved_ranges(void) if (!r->flags || !(r->flags & IORESOURCE_MEM)) continue; addr = r->start; - addr &= PAGE_MASK_4K; + addr &= PAGE_MASK; size = r->end - addr; - size = PAGE_ALIGN_4K(size); + size = PAGE_ALIGN(size); iova = reserve_iova(&reserved_iova_list, IOVA_PFN(addr), IOVA_PFN(size + addr) - 1); if (!iova) @@ -1115,7 +1119,7 @@ static int domain_init(struct dmar_domain *domain, int guest_width) domain->pgd = (struct dma_pte *)alloc_pgtable_page(); if (!domain->pgd) return -ENOMEM; - __iommu_flush_cache(iommu, domain->pgd, PAGE_SIZE_4K); + __iommu_flush_cache(iommu, domain->pgd, PAGE_SIZE); return 0; } @@ -1131,7 +1135,7 @@ static void domain_exit(struct dmar_domain *domain) /* destroy iovas */ put_iova_domain(&domain->iovad); end = DOMAIN_MAX_ADDR(domain->gaw); - end = end & (~PAGE_MASK_4K); + end = end & (~PAGE_MASK); /* clear ptes */ dma_pte_clear_range(domain, 0, end); @@ -1252,22 +1256,25 @@ domain_page_mapping(struct dmar_domain *domain, dma_addr_t iova, u64 start_pfn, end_pfn; struct dma_pte *pte; int index; + int addr_width = agaw_to_width(domain->agaw); + + hpa &= (((u64)1) << addr_width) - 1; if ((prot & (DMA_PTE_READ|DMA_PTE_WRITE)) == 0) return -EINVAL; - iova &= PAGE_MASK_4K; - start_pfn = ((u64)hpa) >> PAGE_SHIFT_4K; - end_pfn = (PAGE_ALIGN_4K(((u64)hpa) + size)) >> PAGE_SHIFT_4K; + iova &= PAGE_MASK; + start_pfn = ((u64)hpa) >> VTD_PAGE_SHIFT; + end_pfn = (VTD_PAGE_ALIGN(((u64)hpa) + size)) >> VTD_PAGE_SHIFT; index = 0; while (start_pfn < end_pfn) { - pte = addr_to_dma_pte(domain, iova + PAGE_SIZE_4K * index); + pte = addr_to_dma_pte(domain, iova + VTD_PAGE_SIZE * index); if (!pte) return -ENOMEM; /* We don't need lock here, nobody else * touches the iova range */ BUG_ON(dma_pte_addr(*pte)); - dma_set_pte_addr(*pte, start_pfn << PAGE_SHIFT_4K); + dma_set_pte_addr(*pte, start_pfn << VTD_PAGE_SHIFT); dma_set_pte_prot(*pte, prot); __iommu_flush_cache(domain->iommu, pte, sizeof(*pte)); start_pfn++; @@ -1445,11 +1452,13 @@ error: return find_domain(pdev); } -static int iommu_prepare_identity_map(struct pci_dev *pdev, u64 start, u64 end) +static int iommu_prepare_identity_map(struct pci_dev *pdev, + unsigned long long start, + unsigned long long end) { struct dmar_domain *domain; unsigned long size; - u64 base; + unsigned long long base; int ret; printk(KERN_INFO @@ -1461,9 +1470,9 @@ static int iommu_prepare_identity_map(struct pci_dev *pdev, u64 start, u64 end) return -ENOMEM; /* The address might not be aligned */ - base = start & PAGE_MASK_4K; + base = start & PAGE_MASK; size = end - base; - size = PAGE_ALIGN_4K(size); + size = PAGE_ALIGN(size); if (!reserve_iova(&domain->iovad, IOVA_PFN(base), IOVA_PFN(base + size) - 1)) { printk(KERN_ERR "IOMMU: reserve iova failed\n"); @@ -1732,8 +1741,8 @@ error: static inline u64 aligned_size(u64 host_addr, size_t size) { u64 addr; - addr = (host_addr & (~PAGE_MASK_4K)) + size; - return PAGE_ALIGN_4K(addr); + addr = (host_addr & (~PAGE_MASK)) + size; + return PAGE_ALIGN(addr); } struct iova * @@ -1747,7 +1756,7 @@ iommu_alloc_iova(struct dmar_domain *domain, size_t size, u64 end) return NULL; piova = alloc_iova(&domain->iovad, - size >> PAGE_SHIFT_4K, IOVA_PFN(end), 1); + size >> PAGE_SHIFT, IOVA_PFN(end), 1); return piova; } @@ -1807,12 +1816,12 @@ get_valid_domain_for_dev(struct pci_dev *pdev) return domain; } -static dma_addr_t +dma_addr_t intel_map_single(struct device *hwdev, phys_addr_t paddr, size_t size, int dir) { struct pci_dev *pdev = to_pci_dev(hwdev); struct dmar_domain *domain; - unsigned long start_paddr; + phys_addr_t start_paddr; struct iova *iova; int prot = 0; int ret; @@ -1831,7 +1840,7 @@ intel_map_single(struct device *hwdev, phys_addr_t paddr, size_t size, int dir) if (!iova) goto error; - start_paddr = iova->pfn_lo << PAGE_SHIFT_4K; + start_paddr = (phys_addr_t)iova->pfn_lo << PAGE_SHIFT; /* * Check if DMAR supports zero-length reads on write only @@ -1849,27 +1858,23 @@ intel_map_single(struct device *hwdev, phys_addr_t paddr, size_t size, int dir) * is not a big problem */ ret = domain_page_mapping(domain, start_paddr, - ((u64)paddr) & PAGE_MASK_4K, size, prot); + ((u64)paddr) & PAGE_MASK, size, prot); if (ret) goto error; - pr_debug("Device %s request: %lx@%llx mapping: %lx@%llx, dir %d\n", - pci_name(pdev), size, (u64)paddr, - size, (u64)start_paddr, dir); - /* it's a non-present to present mapping */ ret = iommu_flush_iotlb_psi(domain->iommu, domain->id, - start_paddr, size >> PAGE_SHIFT_4K, 1); + start_paddr, size >> VTD_PAGE_SHIFT, 1); if (ret) iommu_flush_write_buffer(domain->iommu); - return (start_paddr + ((u64)paddr & (~PAGE_MASK_4K))); + return start_paddr + ((u64)paddr & (~PAGE_MASK)); error: if (iova) __free_iova(&domain->iovad, iova); printk(KERN_ERR"Device %s request: %lx@%llx dir %d --- failed\n", - pci_name(pdev), size, (u64)paddr, dir); + pci_name(pdev), size, (unsigned long long)paddr, dir); return 0; } @@ -1931,8 +1936,8 @@ static void add_unmap(struct dmar_domain *dom, struct iova *iova) spin_unlock_irqrestore(&async_umap_flush_lock, flags); } -static void intel_unmap_single(struct device *dev, dma_addr_t dev_addr, - size_t size, int dir) +void intel_unmap_single(struct device *dev, dma_addr_t dev_addr, size_t size, + int dir) { struct pci_dev *pdev = to_pci_dev(dev); struct dmar_domain *domain; @@ -1948,11 +1953,11 @@ static void intel_unmap_single(struct device *dev, dma_addr_t dev_addr, if (!iova) return; - start_addr = iova->pfn_lo << PAGE_SHIFT_4K; + start_addr = iova->pfn_lo << PAGE_SHIFT; size = aligned_size((u64)dev_addr, size); pr_debug("Device %s unmapping: %lx@%llx\n", - pci_name(pdev), size, (u64)start_addr); + pci_name(pdev), size, (unsigned long long)start_addr); /* clear the whole page */ dma_pte_clear_range(domain, start_addr, start_addr + size); @@ -1960,7 +1965,7 @@ static void intel_unmap_single(struct device *dev, dma_addr_t dev_addr, dma_pte_free_pagetable(domain, start_addr, start_addr + size); if (intel_iommu_strict) { if (iommu_flush_iotlb_psi(domain->iommu, - domain->id, start_addr, size >> PAGE_SHIFT_4K, 0)) + domain->id, start_addr, size >> VTD_PAGE_SHIFT, 0)) iommu_flush_write_buffer(domain->iommu); /* free iova */ __free_iova(&domain->iovad, iova); @@ -1973,13 +1978,13 @@ static void intel_unmap_single(struct device *dev, dma_addr_t dev_addr, } } -static void * intel_alloc_coherent(struct device *hwdev, size_t size, - dma_addr_t *dma_handle, gfp_t flags) +void *intel_alloc_coherent(struct device *hwdev, size_t size, + dma_addr_t *dma_handle, gfp_t flags) { void *vaddr; int order; - size = PAGE_ALIGN_4K(size); + size = PAGE_ALIGN(size); order = get_order(size); flags &= ~(GFP_DMA | GFP_DMA32); @@ -1995,12 +2000,12 @@ static void * intel_alloc_coherent(struct device *hwdev, size_t size, return NULL; } -static void intel_free_coherent(struct device *hwdev, size_t size, - void *vaddr, dma_addr_t dma_handle) +void intel_free_coherent(struct device *hwdev, size_t size, void *vaddr, + dma_addr_t dma_handle) { int order; - size = PAGE_ALIGN_4K(size); + size = PAGE_ALIGN(size); order = get_order(size); intel_unmap_single(hwdev, dma_handle, size, DMA_BIDIRECTIONAL); @@ -2008,8 +2013,9 @@ static void intel_free_coherent(struct device *hwdev, size_t size, } #define SG_ENT_VIRT_ADDRESS(sg) (sg_virt((sg))) -static void intel_unmap_sg(struct device *hwdev, struct scatterlist *sglist, - int nelems, int dir) + +void intel_unmap_sg(struct device *hwdev, struct scatterlist *sglist, + int nelems, int dir) { int i; struct pci_dev *pdev = to_pci_dev(hwdev); @@ -2033,7 +2039,7 @@ static void intel_unmap_sg(struct device *hwdev, struct scatterlist *sglist, size += aligned_size((u64)addr, sg->length); } - start_addr = iova->pfn_lo << PAGE_SHIFT_4K; + start_addr = iova->pfn_lo << PAGE_SHIFT; /* clear the whole page */ dma_pte_clear_range(domain, start_addr, start_addr + size); @@ -2041,7 +2047,7 @@ static void intel_unmap_sg(struct device *hwdev, struct scatterlist *sglist, dma_pte_free_pagetable(domain, start_addr, start_addr + size); if (iommu_flush_iotlb_psi(domain->iommu, domain->id, start_addr, - size >> PAGE_SHIFT_4K, 0)) + size >> VTD_PAGE_SHIFT, 0)) iommu_flush_write_buffer(domain->iommu); /* free iova */ @@ -2062,8 +2068,8 @@ static int intel_nontranslate_map_sg(struct device *hddev, return nelems; } -static int intel_map_sg(struct device *hwdev, struct scatterlist *sglist, - int nelems, int dir) +int intel_map_sg(struct device *hwdev, struct scatterlist *sglist, int nelems, + int dir) { void *addr; int i; @@ -2107,14 +2113,14 @@ static int intel_map_sg(struct device *hwdev, struct scatterlist *sglist, if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL) prot |= DMA_PTE_WRITE; - start_addr = iova->pfn_lo << PAGE_SHIFT_4K; + start_addr = iova->pfn_lo << PAGE_SHIFT; offset = 0; for_each_sg(sglist, sg, nelems, i) { addr = SG_ENT_VIRT_ADDRESS(sg); addr = (void *)virt_to_phys(addr); size = aligned_size((u64)addr, sg->length); ret = domain_page_mapping(domain, start_addr + offset, - ((u64)addr) & PAGE_MASK_4K, + ((u64)addr) & PAGE_MASK, size, prot); if (ret) { /* clear the page */ @@ -2128,14 +2134,14 @@ static int intel_map_sg(struct device *hwdev, struct scatterlist *sglist, return 0; } sg->dma_address = start_addr + offset + - ((u64)addr & (~PAGE_MASK_4K)); + ((u64)addr & (~PAGE_MASK)); sg->dma_length = sg->length; offset += size; } /* it's a non-present to present mapping */ if (iommu_flush_iotlb_psi(domain->iommu, domain->id, - start_addr, offset >> PAGE_SHIFT_4K, 1)) + start_addr, offset >> VTD_PAGE_SHIFT, 1)) iommu_flush_write_buffer(domain->iommu); return nelems; } @@ -2175,7 +2181,6 @@ static inline int iommu_devinfo_cache_init(void) sizeof(struct device_domain_info), 0, SLAB_HWCACHE_ALIGN, - NULL); if (!iommu_devinfo_cache) { printk(KERN_ERR "Couldn't create devinfo cache\n"); @@ -2193,7 +2198,6 @@ static inline int iommu_iova_cache_init(void) sizeof(struct iova), 0, SLAB_HWCACHE_ALIGN, - NULL); if (!iommu_iova_cache) { printk(KERN_ERR "Couldn't create iova cache\n"); @@ -2322,7 +2326,7 @@ void intel_iommu_domain_exit(struct dmar_domain *domain) return; end = DOMAIN_MAX_ADDR(domain->gaw); - end = end & (~PAGE_MASK_4K); + end = end & (~VTD_PAGE_MASK); /* clear ptes */ dma_pte_clear_range(domain, 0, end); @@ -2418,6 +2422,6 @@ u64 intel_iommu_iova_to_pfn(struct dmar_domain *domain, u64 iova) if (pte) pfn = dma_pte_addr(*pte); - return pfn >> PAGE_SHIFT_4K; + return pfn >> VTD_PAGE_SHIFT; } EXPORT_SYMBOL_GPL(intel_iommu_iova_to_pfn); diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c index e872ac925b4..832175d9ca2 100644 --- a/drivers/pci/quirks.c +++ b/drivers/pci/quirks.c @@ -35,6 +35,20 @@ static void __devinit quirk_mellanox_tavor(struct pci_dev *dev) DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_MELLANOX,PCI_DEVICE_ID_MELLANOX_TAVOR,quirk_mellanox_tavor); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_MELLANOX,PCI_DEVICE_ID_MELLANOX_TAVOR_BRIDGE,quirk_mellanox_tavor); +/* Many VIA bridges seem to corrupt data for DAC. Disable it here */ +int forbid_dac __read_mostly; +EXPORT_SYMBOL(forbid_dac); + +static __devinit void via_no_dac(struct pci_dev *dev) +{ + if ((dev->class >> 8) == PCI_CLASS_BRIDGE_PCI && forbid_dac == 0) { + dev_info(&dev->dev, + "VIA PCI bridge detected. Disabling DAC.\n"); + forbid_dac = 1; + } +} +DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA, PCI_ANY_ID, via_no_dac); + /* Deal with broken BIOS'es that neglect to enable passive release, which can cause problems in combination with the 82441FX/PPro MTRRs */ static void quirk_passive_release(struct pci_dev *dev) -- cgit v1.2.3 From bb9e6d65078da2f38cfe1067cfd31a896ca867c0 Mon Sep 17 00:00:00 2001 From: FUJITA Tomonori Date: Wed, 15 Oct 2008 16:08:28 +0900 Subject: intel-iommu: use coherent_dma_mask in alloc_coherent This patch fixes intel-iommu to use dev->coherent_dma_mask in alloc_coherent. Currently, intel-iommu uses dev->dma_mask in alloc_coherent but alloc_coherent is supposed to use coherent_dma_mask. It could break drivers that uses smaller coherent_dma_mask than dma_mask (though the current code works for the majority that use the same mask for coherent_dma_mask and dma_mask). [dwmw2: dma_mask can be bigger than 'unsigned long'] Signed-off-by: FUJITA Tomonori Reviewed-by: Grant Grundler Signed-off-by: David Woodhouse --- drivers/pci/intel-iommu.c | 29 +++++++++++++++++++---------- 1 file changed, 19 insertions(+), 10 deletions(-) (limited to 'drivers') diff --git a/drivers/pci/intel-iommu.c b/drivers/pci/intel-iommu.c index 2bf96babbc4..d315e413fae 100644 --- a/drivers/pci/intel-iommu.c +++ b/drivers/pci/intel-iommu.c @@ -1762,14 +1762,14 @@ iommu_alloc_iova(struct dmar_domain *domain, size_t size, u64 end) static struct iova * __intel_alloc_iova(struct device *dev, struct dmar_domain *domain, - size_t size) + size_t size, u64 dma_mask) { struct pci_dev *pdev = to_pci_dev(dev); struct iova *iova = NULL; - if ((pdev->dma_mask <= DMA_32BIT_MASK) || (dmar_forcedac)) { - iova = iommu_alloc_iova(domain, size, pdev->dma_mask); - } else { + if (dma_mask <= DMA_32BIT_MASK || dmar_forcedac) + iova = iommu_alloc_iova(domain, size, dma_mask); + else { /* * First try to allocate an io virtual address in * DMA_32BIT_MASK and if that fails then try allocating @@ -1777,7 +1777,7 @@ __intel_alloc_iova(struct device *dev, struct dmar_domain *domain, */ iova = iommu_alloc_iova(domain, size, DMA_32BIT_MASK); if (!iova) - iova = iommu_alloc_iova(domain, size, pdev->dma_mask); + iova = iommu_alloc_iova(domain, size, dma_mask); } if (!iova) { @@ -1816,8 +1816,8 @@ get_valid_domain_for_dev(struct pci_dev *pdev) return domain; } -dma_addr_t -intel_map_single(struct device *hwdev, phys_addr_t paddr, size_t size, int dir) +static dma_addr_t __intel_map_single(struct device *hwdev, phys_addr_t paddr, + size_t size, int dir, u64 dma_mask) { struct pci_dev *pdev = to_pci_dev(hwdev); struct dmar_domain *domain; @@ -1836,7 +1836,7 @@ intel_map_single(struct device *hwdev, phys_addr_t paddr, size_t size, int dir) size = aligned_size((u64)paddr, size); - iova = __intel_alloc_iova(hwdev, domain, size); + iova = __intel_alloc_iova(hwdev, domain, size, pdev->dma_mask); if (!iova) goto error; @@ -1878,6 +1878,13 @@ error: return 0; } +dma_addr_t intel_map_single(struct device *hwdev, phys_addr_t paddr, + size_t size, int dir) +{ + return __intel_map_single(hwdev, paddr, size, dir, + to_pci_dev(hwdev)->dma_mask); +} + static void flush_unmaps(void) { int i, j; @@ -1993,7 +2000,9 @@ void *intel_alloc_coherent(struct device *hwdev, size_t size, return NULL; memset(vaddr, 0, size); - *dma_handle = intel_map_single(hwdev, virt_to_bus(vaddr), size, DMA_BIDIRECTIONAL); + *dma_handle = __intel_map_single(hwdev, virt_to_bus(vaddr), size, + DMA_BIDIRECTIONAL, + hwdev->coherent_dma_mask); if (*dma_handle) return vaddr; free_pages((unsigned long)vaddr, order); @@ -2097,7 +2106,7 @@ int intel_map_sg(struct device *hwdev, struct scatterlist *sglist, int nelems, size += aligned_size((u64)addr, sg->length); } - iova = __intel_alloc_iova(hwdev, domain, size); + iova = __intel_alloc_iova(hwdev, domain, size, pdev->dma_mask); if (!iova) { sglist->dma_length = 0; return 0; -- cgit v1.2.3 From f82851a8a480a26611175f064f54e17f5f7b01ae Mon Sep 17 00:00:00 2001 From: David Woodhouse Date: Sat, 18 Oct 2008 15:43:14 +0100 Subject: dmar: fix uninitialised 'ret' variable in dmar_parse_dev() This was introduced by commit 1886e8a90a580f3ad343f2065c84c1b9e1dac9ef ("x64, x2apic/intr-remap: code re-structuring, to be used by both DMA and Interrupt remapping"). It was causing bogus results to be returned from dmar_parse_dev() when the first unit with the INCLUDE_ALL flag was processed. Signed-off-by: David Woodhouse --- drivers/pci/dmar.c | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) (limited to 'drivers') diff --git a/drivers/pci/dmar.c b/drivers/pci/dmar.c index b65173828bc..7b3751136e6 100644 --- a/drivers/pci/dmar.c +++ b/drivers/pci/dmar.c @@ -188,12 +188,11 @@ dmar_parse_one_drhd(struct acpi_dmar_header *header) return 0; } -static int __init -dmar_parse_dev(struct dmar_drhd_unit *dmaru) +static int __init dmar_parse_dev(struct dmar_drhd_unit *dmaru) { struct acpi_dmar_hardware_unit *drhd; static int include_all; - int ret; + int ret = 0; drhd = (struct acpi_dmar_hardware_unit *) dmaru->hdr; -- cgit v1.2.3