diff options
author | David Woodhouse <David.Woodhouse@intel.com> | 2008-10-20 20:16:53 +0100 |
---|---|---|
committer | David Woodhouse <David.Woodhouse@intel.com> | 2008-10-20 20:19:36 +0100 |
commit | b364776ad1208a71f0c53578c84619a395412a8d (patch) | |
tree | d6050e5db6298095324ccb8af7d477684485d52e /include/linux | |
parent | 6da0b38f4433fb0f24615449d7966471b6e5eae0 (diff) | |
parent | 6c8909b42fee1be67647bcd2518161a0fa8ca533 (diff) |
Merge branch 'master' of master.kernel.org:/pub/scm/linux/kernel/git/torvalds/linux-2.6
Conflicts:
drivers/pci/intel-iommu.c
Diffstat (limited to 'include/linux')
-rw-r--r-- | include/linux/dma_remapping.h | 27 | ||||
-rw-r--r-- | include/linux/intel-iommu.h | 66 |
2 files changed, 64 insertions, 29 deletions
diff --git a/include/linux/dma_remapping.h b/include/linux/dma_remapping.h index bff5c65f81d..952df39c989 100644 --- a/include/linux/dma_remapping.h +++ b/include/linux/dma_remapping.h @@ -2,15 +2,14 @@ #define _DMA_REMAPPING_H /* - * We need a fixed PAGE_SIZE of 4K irrespective of - * arch PAGE_SIZE for IOMMU page tables. + * VT-d hardware uses 4KiB page size regardless of host page size. */ -#define PAGE_SHIFT_4K (12) -#define PAGE_SIZE_4K (1UL << PAGE_SHIFT_4K) -#define PAGE_MASK_4K (((u64)-1) << PAGE_SHIFT_4K) -#define PAGE_ALIGN_4K(addr) (((addr) + PAGE_SIZE_4K - 1) & PAGE_MASK_4K) +#define VTD_PAGE_SHIFT (12) +#define VTD_PAGE_SIZE (1UL << VTD_PAGE_SHIFT) +#define VTD_PAGE_MASK (((u64)-1) << VTD_PAGE_SHIFT) +#define VTD_PAGE_ALIGN(addr) (((addr) + VTD_PAGE_SIZE - 1) & VTD_PAGE_MASK) -#define IOVA_PFN(addr) ((addr) >> PAGE_SHIFT_4K) +#define IOVA_PFN(addr) ((addr) >> PAGE_SHIFT) #define DMA_32BIT_PFN IOVA_PFN(DMA_32BIT_MASK) #define DMA_64BIT_PFN IOVA_PFN(DMA_64BIT_MASK) @@ -25,7 +24,7 @@ struct root_entry { u64 val; u64 rsvd1; }; -#define ROOT_ENTRY_NR (PAGE_SIZE_4K/sizeof(struct root_entry)) +#define ROOT_ENTRY_NR (VTD_PAGE_SIZE/sizeof(struct root_entry)) static inline bool root_present(struct root_entry *root) { return (root->val & 1); @@ -36,7 +35,7 @@ static inline void set_root_present(struct root_entry *root) } static inline void set_root_value(struct root_entry *root, unsigned long value) { - root->val |= value & PAGE_MASK_4K; + root->val |= value & VTD_PAGE_MASK; } struct context_entry; @@ -45,7 +44,7 @@ get_context_addr_from_root(struct root_entry *root) { return (struct context_entry *) (root_present(root)?phys_to_virt( - root->val & PAGE_MASK_4K): + root->val & VTD_PAGE_MASK) : NULL); } @@ -67,7 +66,7 @@ struct context_entry { #define context_present(c) ((c).lo & 1) #define context_fault_disable(c) (((c).lo >> 1) & 1) #define context_translation_type(c) (((c).lo >> 2) & 3) -#define context_address_root(c) ((c).lo & PAGE_MASK_4K) +#define context_address_root(c) ((c).lo & VTD_PAGE_MASK) #define context_address_width(c) ((c).hi & 7) #define context_domain_id(c) (((c).hi >> 8) & ((1 << 16) - 1)) @@ -81,7 +80,7 @@ struct context_entry { } while (0) #define CONTEXT_TT_MULTI_LEVEL 0 #define context_set_address_root(c, val) \ - do {(c).lo |= (val) & PAGE_MASK_4K;} while (0) + do {(c).lo |= (val) & VTD_PAGE_MASK; } while (0) #define context_set_address_width(c, val) do {(c).hi |= (val) & 7;} while (0) #define context_set_domain_id(c, val) \ do {(c).hi |= ((val) & ((1 << 16) - 1)) << 8;} while (0) @@ -107,9 +106,9 @@ struct dma_pte { #define dma_set_pte_writable(p) do {(p).val |= DMA_PTE_WRITE;} while (0) #define dma_set_pte_prot(p, prot) \ do {(p).val = ((p).val & ~3) | ((prot) & 3); } while (0) -#define dma_pte_addr(p) ((p).val & PAGE_MASK_4K) +#define dma_pte_addr(p) ((p).val & VTD_PAGE_MASK) #define dma_set_pte_addr(p, addr) do {\ - (p).val |= ((addr) & PAGE_MASK_4K); } while (0) + (p).val |= ((addr) & VTD_PAGE_MASK); } while (0) #define dma_pte_present(p) (((p).val & 3) != 0) struct intel_iommu; diff --git a/include/linux/intel-iommu.h b/include/linux/intel-iommu.h index 2e117f30a76..3d017cfd245 100644 --- a/include/linux/intel-iommu.h +++ b/include/linux/intel-iommu.h @@ -29,6 +29,7 @@ #include <linux/io.h> #include <linux/dma_remapping.h> #include <asm/cacheflush.h> +#include <asm/iommu.h> /* * Intel IOMMU register specification per version 1.0 public spec. @@ -127,6 +128,7 @@ static inline void dmar_writeq(void __iomem *addr, u64 val) /* IOTLB_REG */ +#define DMA_TLB_FLUSH_GRANU_OFFSET 60 #define DMA_TLB_GLOBAL_FLUSH (((u64)1) << 60) #define DMA_TLB_DSI_FLUSH (((u64)2) << 60) #define DMA_TLB_PSI_FLUSH (((u64)3) << 60) @@ -140,6 +142,7 @@ static inline void dmar_writeq(void __iomem *addr, u64 val) #define DMA_TLB_MAX_SIZE (0x3f) /* INVALID_DESC */ +#define DMA_CCMD_INVL_GRANU_OFFSET 61 #define DMA_ID_TLB_GLOBAL_FLUSH (((u64)1) << 3) #define DMA_ID_TLB_DSI_FLUSH (((u64)2) << 3) #define DMA_ID_TLB_PSI_FLUSH (((u64)3) << 3) @@ -200,22 +203,21 @@ static inline void dmar_writeq(void __iomem *addr, u64 val) #define dma_frcd_type(d) ((d >> 30) & 1) #define dma_frcd_fault_reason(c) (c & 0xff) #define dma_frcd_source_id(c) (c & 0xffff) -#define dma_frcd_page_addr(d) (d & (((u64)-1) << 12)) /* low 64 bit */ - -#define DMAR_OPERATION_TIMEOUT ((cycles_t) tsc_khz*10*1000) /* 10sec */ - -#define IOMMU_WAIT_OP(iommu, offset, op, cond, sts) \ -{\ - cycles_t start_time = get_cycles();\ - while (1) {\ - sts = op (iommu->reg + offset);\ - if (cond)\ - break;\ +/* low 64 bit */ +#define dma_frcd_page_addr(d) (d & (((u64)-1) << PAGE_SHIFT)) + +#define IOMMU_WAIT_OP(iommu, offset, op, cond, sts) \ +do { \ + cycles_t start_time = get_cycles(); \ + while (1) { \ + sts = op(iommu->reg + offset); \ + if (cond) \ + break; \ if (DMAR_OPERATION_TIMEOUT < (get_cycles() - start_time))\ - panic("DMAR hardware is malfunctioning\n");\ - cpu_relax();\ - }\ -} + panic("DMAR hardware is malfunctioning\n"); \ + cpu_relax(); \ + } \ +} while (0) #define QI_LENGTH 256 /* queue length */ @@ -238,6 +240,19 @@ enum { #define QI_IWD_STATUS_DATA(d) (((u64)d) << 32) #define QI_IWD_STATUS_WRITE (((u64)1) << 5) +#define QI_IOTLB_DID(did) (((u64)did) << 16) +#define QI_IOTLB_DR(dr) (((u64)dr) << 7) +#define QI_IOTLB_DW(dw) (((u64)dw) << 6) +#define QI_IOTLB_GRAN(gran) (((u64)gran) >> (DMA_TLB_FLUSH_GRANU_OFFSET-4)) +#define QI_IOTLB_ADDR(addr) (((u64)addr) & VTD_PAGE_MASK) +#define QI_IOTLB_IH(ih) (((u64)ih) << 6) +#define QI_IOTLB_AM(am) (((u8)am)) + +#define QI_CC_FM(fm) (((u64)fm) << 48) +#define QI_CC_SID(sid) (((u64)sid) << 32) +#define QI_CC_DID(did) (((u64)did) << 16) +#define QI_CC_GRAN(gran) (((u64)gran) >> (DMA_CCMD_INVL_GRANU_OFFSET-4)) + struct qi_desc { u64 low, high; }; @@ -263,6 +278,13 @@ struct ir_table { }; #endif +struct iommu_flush { + int (*flush_context)(struct intel_iommu *iommu, u16 did, u16 sid, u8 fm, + u64 type, int non_present_entry_flush); + int (*flush_iotlb)(struct intel_iommu *iommu, u16 did, u64 addr, + unsigned int size_order, u64 type, int non_present_entry_flush); +}; + struct intel_iommu { void __iomem *reg; /* Pointer to hardware regs, virtual addr */ u64 cap; @@ -282,6 +304,7 @@ struct intel_iommu { unsigned char name[7]; /* Device Name */ struct msi_msg saved_msg; struct sys_device sysdev; + struct iommu_flush flush; #endif struct q_inval *qi; /* Queued invalidation info */ #ifdef CONFIG_INTR_REMAP @@ -303,6 +326,12 @@ extern void free_iommu(struct intel_iommu *iommu); extern int dmar_enable_qi(struct intel_iommu *iommu); extern void qi_global_iec(struct intel_iommu *iommu); +extern int qi_flush_context(struct intel_iommu *iommu, u16 did, u16 sid, + u8 fm, u64 type, int non_present_entry_flush); +extern int qi_flush_iotlb(struct intel_iommu *iommu, u16 did, u64 addr, + unsigned int size_order, u64 type, + int non_present_entry_flush); + extern void qi_submit_sync(struct qi_desc *desc, struct intel_iommu *iommu); void intel_iommu_domain_exit(struct dmar_domain *domain); @@ -324,4 +353,11 @@ static inline int intel_iommu_found(void) } #endif /* CONFIG_DMAR */ +extern void *intel_alloc_coherent(struct device *, size_t, dma_addr_t *, gfp_t); +extern void intel_free_coherent(struct device *, size_t, void *, dma_addr_t); +extern dma_addr_t intel_map_single(struct device *, phys_addr_t, size_t, int); +extern void intel_unmap_single(struct device *, dma_addr_t, size_t, int); +extern int intel_map_sg(struct device *, struct scatterlist *, int, int); +extern void intel_unmap_sg(struct device *, struct scatterlist *, int, int); + #endif |