aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorJoerg Roedel <joerg.roedel@amd.com>2009-09-02 16:00:23 +0200
committerJoerg Roedel <joerg.roedel@amd.com>2009-09-03 16:03:45 +0200
commit04bfdd8406099fca2e6b8844748c4d6c5eba8c8d (patch)
treebab300413ef4c176ddc4050a50365d2e95160845
parent407d733e30a97daf5ea6f9eb5f9ebbd42a0a9ef2 (diff)
x86/amd-iommu: Flush domains if address space size was increased
Thist patch introduces the update_domain function which propagates the larger address space of a protection domain to the device table and flushes all relevant DTEs and the domain TLB. Signed-off-by: Joerg Roedel <joerg.roedel@amd.com>
-rw-r--r--arch/x86/include/asm/amd_iommu_types.h1
-rw-r--r--arch/x86/kernel/amd_iommu.c32
2 files changed, 33 insertions, 0 deletions
diff --git a/arch/x86/include/asm/amd_iommu_types.h b/arch/x86/include/asm/amd_iommu_types.h
index 7fce4ef77bd..97f3d09d3be 100644
--- a/arch/x86/include/asm/amd_iommu_types.h
+++ b/arch/x86/include/asm/amd_iommu_types.h
@@ -235,6 +235,7 @@ struct protection_domain {
int mode; /* paging mode (0-6 levels) */
u64 *pt_root; /* page table root pointer */
unsigned long flags; /* flags to find out type of domain */
+ bool updated; /* complete domain flush required */
unsigned dev_cnt; /* devices assigned to this domain */
void *priv; /* private data */
};
diff --git a/arch/x86/kernel/amd_iommu.c b/arch/x86/kernel/amd_iommu.c
index 0fab1f1d135..5eab6a84b9c 100644
--- a/arch/x86/kernel/amd_iommu.c
+++ b/arch/x86/kernel/amd_iommu.c
@@ -63,6 +63,7 @@ static void dma_ops_reserve_addresses(struct dma_ops_domain *dom,
unsigned int pages);
static u64 *fetch_pte(struct protection_domain *domain,
unsigned long address);
+static void update_domain(struct protection_domain *domain);
#ifndef BUS_NOTIFY_UNBOUND_DRIVER
#define BUS_NOTIFY_UNBOUND_DRIVER 0x0005
@@ -546,6 +547,8 @@ static int iommu_map_page(struct protection_domain *dom,
*pte = __pte;
+ update_domain(dom);
+
return 0;
}
@@ -762,9 +765,13 @@ static int alloc_new_range(struct amd_iommu *iommu,
dma_ops_reserve_addresses(dma_dom, i << PAGE_SHIFT, 1);
}
+ update_domain(&dma_dom->domain);
+
return 0;
out_free:
+ update_domain(&dma_dom->domain);
+
free_page((unsigned long)dma_dom->aperture[index]->bitmap);
kfree(dma_dom->aperture[index]);
@@ -1294,6 +1301,29 @@ static int get_device_resources(struct device *dev,
return 1;
}
+static void update_device_table(struct protection_domain *domain)
+{
+ int i;
+
+ for (i = 0; i <= amd_iommu_last_bdf; ++i) {
+ if (amd_iommu_pd_table[i] != domain)
+ continue;
+ set_dte_entry(i, domain);
+ }
+}
+
+static void update_domain(struct protection_domain *domain)
+{
+ if (!domain->updated)
+ return;
+
+ update_device_table(domain);
+ flush_devices_by_domain(domain);
+ iommu_flush_domain(domain->id);
+
+ domain->updated = false;
+}
+
/*
* If the pte_page is not yet allocated this function is called
*/
@@ -1351,6 +1381,8 @@ static u64* dma_ops_get_pte(struct dma_ops_domain *dom,
} else
pte += IOMMU_PTE_L0_INDEX(address);
+ update_domain(&dom->domain);
+
return pte;
}