diff options
author | Donald Dutile <ddutile@redhat.com> | 2009-08-20 16:51:34 -0400 |
---|---|---|
committer | David Woodhouse <David.Woodhouse@intel.com> | 2009-08-24 08:56:26 +0100 |
commit | 94a91b5051a77d8a71d4f11a3240f0d9c51b6cf2 (patch) | |
tree | 04d81ea79520cbd662024ee23cd3f834dd950ed0 /drivers | |
parent | 071e13746f9ebb259987c71ea77f11e7656769a2 (diff) |
intel-iommu: iommu init error path bug fixes
The kcalloc() failure path in iommu_init_domains() calls
free_dmar_iommu(), which assumes that ->domains, ->domain_ids,
and ->lock have been properly initialized.
Add checks in free_[dmar]_iommu to not use ->domains,->domain_ids
if not alloced. Move the lock init to prior to the kcalloc()'s,
so it is valid in free_context_table() when free_dmar_iommu() invokes
it at the end.
Patch based on iommu-2.6,
commit 132032274a594ee9ffb6b9c9e2e9698149a09ea9
Signed-off-by: Donald Dutile <ddutile@redhat.com>
Signed-off-by: David Woodhouse <David.Woodhouse@intel.com>
Diffstat (limited to 'drivers')
-rw-r--r-- | drivers/pci/intel-iommu.c | 35 |
1 files changed, 18 insertions, 17 deletions
diff --git a/drivers/pci/intel-iommu.c b/drivers/pci/intel-iommu.c index e67335ba24a..beb5ccfd89b 100644 --- a/drivers/pci/intel-iommu.c +++ b/drivers/pci/intel-iommu.c @@ -1158,6 +1158,8 @@ static int iommu_init_domains(struct intel_iommu *iommu) pr_debug("Number of Domains supportd <%ld>\n", ndomains); nlongs = BITS_TO_LONGS(ndomains); + spin_lock_init(&iommu->lock); + /* TBD: there might be 64K domains, * consider other allocation for future chip */ @@ -1170,12 +1172,9 @@ static int iommu_init_domains(struct intel_iommu *iommu) GFP_KERNEL); if (!iommu->domains) { printk(KERN_ERR "Allocating domain array failed\n"); - kfree(iommu->domain_ids); return -ENOMEM; } - spin_lock_init(&iommu->lock); - /* * if Caching mode is set, then invalid translations are tagged * with domainid 0. Hence we need to pre-allocate it. @@ -1195,22 +1194,24 @@ void free_dmar_iommu(struct intel_iommu *iommu) int i; unsigned long flags; - i = find_first_bit(iommu->domain_ids, cap_ndoms(iommu->cap)); - for (; i < cap_ndoms(iommu->cap); ) { - domain = iommu->domains[i]; - clear_bit(i, iommu->domain_ids); + if ((iommu->domains) && (iommu->domain_ids)) { + i = find_first_bit(iommu->domain_ids, cap_ndoms(iommu->cap)); + for (; i < cap_ndoms(iommu->cap); ) { + domain = iommu->domains[i]; + clear_bit(i, iommu->domain_ids); + + spin_lock_irqsave(&domain->iommu_lock, flags); + if (--domain->iommu_count == 0) { + if (domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE) + vm_domain_exit(domain); + else + domain_exit(domain); + } + spin_unlock_irqrestore(&domain->iommu_lock, flags); - spin_lock_irqsave(&domain->iommu_lock, flags); - if (--domain->iommu_count == 0) { - if (domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE) - vm_domain_exit(domain); - else - domain_exit(domain); + i = find_next_bit(iommu->domain_ids, + cap_ndoms(iommu->cap), i+1); } - spin_unlock_irqrestore(&domain->iommu_lock, flags); - - i = find_next_bit(iommu->domain_ids, - cap_ndoms(iommu->cap), i+1); } if (iommu->gcmd & DMA_GCMD_TE) |