aboutsummaryrefslogtreecommitdiff
path: root/include
diff options
context:
space:
mode:
authorFenghua Yu <fenghua.yu@intel.com>2008-10-16 18:02:32 -0700
committerDavid Woodhouse <David.Woodhouse@intel.com>2008-10-18 14:29:15 +0100
commit5b6985ce8ec7127b4d60ad450b64ca8b82748a3b (patch)
treef1d5a27601df04a3481690a1a2f90fc688034aff /include
parentcacd4213d8ffed83676f38d5d8e93c673e0f1af7 (diff)
intel-iommu: IA64 support
The current Intel IOMMU code assumes that both host page size and Intel IOMMU page size are 4KiB. The first patch supports variable page size. This provides support for IA64 which has multiple page sizes. This patch also adds some other code hooks for IA64 platform including DMAR_OPERATION_TIMEOUT definition. [dwmw2: some cleanup] Signed-off-by: Fenghua Yu <fenghua.yu@intel.com> Signed-off-by: Tony Luck <tony.luck@intel.com> Signed-off-by: David Woodhouse <David.Woodhouse@intel.com>
Diffstat (limited to 'include')
-rw-r--r--include/asm-x86/iommu.h4
-rw-r--r--include/linux/dma_remapping.h27
-rw-r--r--include/linux/intel-iommu.h39
3 files changed, 40 insertions, 30 deletions
diff --git a/include/asm-x86/iommu.h b/include/asm-x86/iommu.h
index 961e746da97..2daaffcda52 100644
--- a/include/asm-x86/iommu.h
+++ b/include/asm-x86/iommu.h
@@ -7,9 +7,13 @@ extern struct dma_mapping_ops nommu_dma_ops;
extern int force_iommu, no_iommu;
extern int iommu_detected;
extern int dmar_disabled;
+extern int forbid_dac;
extern unsigned long iommu_nr_pages(unsigned long addr, unsigned long len);
+/* 10 seconds */
+#define DMAR_OPERATION_TIMEOUT ((cycles_t) tsc_khz*10*1000)
+
#ifdef CONFIG_GART_IOMMU
extern int gart_iommu_aperture;
extern int gart_iommu_aperture_allowed;
diff --git a/include/linux/dma_remapping.h b/include/linux/dma_remapping.h
index bff5c65f81d..952df39c989 100644
--- a/include/linux/dma_remapping.h
+++ b/include/linux/dma_remapping.h
@@ -2,15 +2,14 @@
#define _DMA_REMAPPING_H
/*
- * We need a fixed PAGE_SIZE of 4K irrespective of
- * arch PAGE_SIZE for IOMMU page tables.
+ * VT-d hardware uses 4KiB page size regardless of host page size.
*/
-#define PAGE_SHIFT_4K (12)
-#define PAGE_SIZE_4K (1UL << PAGE_SHIFT_4K)
-#define PAGE_MASK_4K (((u64)-1) << PAGE_SHIFT_4K)
-#define PAGE_ALIGN_4K(addr) (((addr) + PAGE_SIZE_4K - 1) & PAGE_MASK_4K)
+#define VTD_PAGE_SHIFT (12)
+#define VTD_PAGE_SIZE (1UL << VTD_PAGE_SHIFT)
+#define VTD_PAGE_MASK (((u64)-1) << VTD_PAGE_SHIFT)
+#define VTD_PAGE_ALIGN(addr) (((addr) + VTD_PAGE_SIZE - 1) & VTD_PAGE_MASK)
-#define IOVA_PFN(addr) ((addr) >> PAGE_SHIFT_4K)
+#define IOVA_PFN(addr) ((addr) >> PAGE_SHIFT)
#define DMA_32BIT_PFN IOVA_PFN(DMA_32BIT_MASK)
#define DMA_64BIT_PFN IOVA_PFN(DMA_64BIT_MASK)
@@ -25,7 +24,7 @@ struct root_entry {
u64 val;
u64 rsvd1;
};
-#define ROOT_ENTRY_NR (PAGE_SIZE_4K/sizeof(struct root_entry))
+#define ROOT_ENTRY_NR (VTD_PAGE_SIZE/sizeof(struct root_entry))
static inline bool root_present(struct root_entry *root)
{
return (root->val & 1);
@@ -36,7 +35,7 @@ static inline void set_root_present(struct root_entry *root)
}
static inline void set_root_value(struct root_entry *root, unsigned long value)
{
- root->val |= value & PAGE_MASK_4K;
+ root->val |= value & VTD_PAGE_MASK;
}
struct context_entry;
@@ -45,7 +44,7 @@ get_context_addr_from_root(struct root_entry *root)
{
return (struct context_entry *)
(root_present(root)?phys_to_virt(
- root->val & PAGE_MASK_4K):
+ root->val & VTD_PAGE_MASK) :
NULL);
}
@@ -67,7 +66,7 @@ struct context_entry {
#define context_present(c) ((c).lo & 1)
#define context_fault_disable(c) (((c).lo >> 1) & 1)
#define context_translation_type(c) (((c).lo >> 2) & 3)
-#define context_address_root(c) ((c).lo & PAGE_MASK_4K)
+#define context_address_root(c) ((c).lo & VTD_PAGE_MASK)
#define context_address_width(c) ((c).hi & 7)
#define context_domain_id(c) (((c).hi >> 8) & ((1 << 16) - 1))
@@ -81,7 +80,7 @@ struct context_entry {
} while (0)
#define CONTEXT_TT_MULTI_LEVEL 0
#define context_set_address_root(c, val) \
- do {(c).lo |= (val) & PAGE_MASK_4K;} while (0)
+ do {(c).lo |= (val) & VTD_PAGE_MASK; } while (0)
#define context_set_address_width(c, val) do {(c).hi |= (val) & 7;} while (0)
#define context_set_domain_id(c, val) \
do {(c).hi |= ((val) & ((1 << 16) - 1)) << 8;} while (0)
@@ -107,9 +106,9 @@ struct dma_pte {
#define dma_set_pte_writable(p) do {(p).val |= DMA_PTE_WRITE;} while (0)
#define dma_set_pte_prot(p, prot) \
do {(p).val = ((p).val & ~3) | ((prot) & 3); } while (0)
-#define dma_pte_addr(p) ((p).val & PAGE_MASK_4K)
+#define dma_pte_addr(p) ((p).val & VTD_PAGE_MASK)
#define dma_set_pte_addr(p, addr) do {\
- (p).val |= ((addr) & PAGE_MASK_4K); } while (0)
+ (p).val |= ((addr) & VTD_PAGE_MASK); } while (0)
#define dma_pte_present(p) (((p).val & 3) != 0)
struct intel_iommu;
diff --git a/include/linux/intel-iommu.h b/include/linux/intel-iommu.h
index afb0d2a5b7c..3d017cfd245 100644
--- a/include/linux/intel-iommu.h
+++ b/include/linux/intel-iommu.h
@@ -29,6 +29,7 @@
#include <linux/io.h>
#include <linux/dma_remapping.h>
#include <asm/cacheflush.h>
+#include <asm/iommu.h>
/*
* Intel IOMMU register specification per version 1.0 public spec.
@@ -202,22 +203,21 @@ static inline void dmar_writeq(void __iomem *addr, u64 val)
#define dma_frcd_type(d) ((d >> 30) & 1)
#define dma_frcd_fault_reason(c) (c & 0xff)
#define dma_frcd_source_id(c) (c & 0xffff)
-#define dma_frcd_page_addr(d) (d & (((u64)-1) << 12)) /* low 64 bit */
-
-#define DMAR_OPERATION_TIMEOUT ((cycles_t) tsc_khz*10*1000) /* 10sec */
-
-#define IOMMU_WAIT_OP(iommu, offset, op, cond, sts) \
-{\
- cycles_t start_time = get_cycles();\
- while (1) {\
- sts = op (iommu->reg + offset);\
- if (cond)\
- break;\
+/* low 64 bit */
+#define dma_frcd_page_addr(d) (d & (((u64)-1) << PAGE_SHIFT))
+
+#define IOMMU_WAIT_OP(iommu, offset, op, cond, sts) \
+do { \
+ cycles_t start_time = get_cycles(); \
+ while (1) { \
+ sts = op(iommu->reg + offset); \
+ if (cond) \
+ break; \
if (DMAR_OPERATION_TIMEOUT < (get_cycles() - start_time))\
- panic("DMAR hardware is malfunctioning\n");\
- cpu_relax();\
- }\
-}
+ panic("DMAR hardware is malfunctioning\n"); \
+ cpu_relax(); \
+ } \
+} while (0)
#define QI_LENGTH 256 /* queue length */
@@ -244,7 +244,7 @@ enum {
#define QI_IOTLB_DR(dr) (((u64)dr) << 7)
#define QI_IOTLB_DW(dw) (((u64)dw) << 6)
#define QI_IOTLB_GRAN(gran) (((u64)gran) >> (DMA_TLB_FLUSH_GRANU_OFFSET-4))
-#define QI_IOTLB_ADDR(addr) (((u64)addr) & PAGE_MASK_4K)
+#define QI_IOTLB_ADDR(addr) (((u64)addr) & VTD_PAGE_MASK)
#define QI_IOTLB_IH(ih) (((u64)ih) << 6)
#define QI_IOTLB_AM(am) (((u8)am))
@@ -353,4 +353,11 @@ static inline int intel_iommu_found(void)
}
#endif /* CONFIG_DMAR */
+extern void *intel_alloc_coherent(struct device *, size_t, dma_addr_t *, gfp_t);
+extern void intel_free_coherent(struct device *, size_t, void *, dma_addr_t);
+extern dma_addr_t intel_map_single(struct device *, phys_addr_t, size_t, int);
+extern void intel_unmap_single(struct device *, dma_addr_t, size_t, int);
+extern int intel_map_sg(struct device *, struct scatterlist *, int, int);
+extern void intel_unmap_sg(struct device *, struct scatterlist *, int, int);
+
#endif