iommu/vt-d: Use device_domain_lock accurately

The device_domain_lock is used to protect the device tracking list of
a domain. Remove unnecessary spin_lock/unlock()'s and move the necessary
ones around the list access.

Signed-off-by: Lu Baolu <baolu.lu@linux.intel.com>
Reviewed-by: Kevin Tian <kevin.tian@intel.com>
Link: https://lore.kernel.org/r/20220706025524.2904370-11-baolu.lu@linux.intel.com
Signed-off-by: Joerg Roedel <jroedel@suse.de>
This commit is contained in:
Lu Baolu 2022-07-12 08:09:01 +08:00 committed by Joerg Roedel
parent db75c9573b
commit 969aaefbaa

View file

@ -535,15 +535,8 @@ static int domain_update_device_node(struct dmar_domain *domain)
struct device_domain_info *info;
int nid = NUMA_NO_NODE;
assert_spin_locked(&device_domain_lock);
if (list_empty(&domain->devices))
return NUMA_NO_NODE;
spin_lock(&device_domain_lock);
list_for_each_entry(info, &domain->devices, link) {
if (!info->dev)
continue;
/*
* There could possibly be multiple device numa nodes as devices
* within the same domain may sit behind different IOMMUs. There
@ -554,6 +547,7 @@ static int domain_update_device_node(struct dmar_domain *domain)
if (nid != NUMA_NO_NODE)
break;
}
spin_unlock(&device_domain_lock);
return nid;
}
@ -1376,23 +1370,23 @@ static void __iommu_flush_iotlb(struct intel_iommu *iommu, u16 did,
}
static struct device_domain_info *
iommu_support_dev_iotlb (struct dmar_domain *domain, struct intel_iommu *iommu,
u8 bus, u8 devfn)
iommu_support_dev_iotlb(struct dmar_domain *domain, struct intel_iommu *iommu,
u8 bus, u8 devfn)
{
struct device_domain_info *info;
assert_spin_locked(&device_domain_lock);
if (!iommu->qi)
return NULL;
list_for_each_entry(info, &domain->devices, link)
spin_lock(&device_domain_lock);
list_for_each_entry(info, &domain->devices, link) {
if (info->iommu == iommu && info->bus == bus &&
info->devfn == devfn) {
if (info->ats_supported && info->dev)
return info;
break;
spin_unlock(&device_domain_lock);
return info->ats_supported ? info : NULL;
}
}
spin_unlock(&device_domain_lock);
return NULL;
}
@ -1402,23 +1396,21 @@ static void domain_update_iotlb(struct dmar_domain *domain)
struct device_domain_info *info;
bool has_iotlb_device = false;
assert_spin_locked(&device_domain_lock);
list_for_each_entry(info, &domain->devices, link)
spin_lock(&device_domain_lock);
list_for_each_entry(info, &domain->devices, link) {
if (info->ats_enabled) {
has_iotlb_device = true;
break;
}
}
domain->has_iotlb_device = has_iotlb_device;
spin_unlock(&device_domain_lock);
}
static void iommu_enable_dev_iotlb(struct device_domain_info *info)
{
struct pci_dev *pdev;
assert_spin_locked(&device_domain_lock);
if (!info || !dev_is_pci(info->dev))
return;
@ -1464,8 +1456,6 @@ static void iommu_disable_dev_iotlb(struct device_domain_info *info)
{
struct pci_dev *pdev;
assert_spin_locked(&device_domain_lock);
if (!dev_is_pci(info->dev))
return;
@ -1906,9 +1896,10 @@ static int domain_context_mapping_one(struct dmar_domain *domain,
struct pasid_table *table,
u8 bus, u8 devfn)
{
struct device_domain_info *info =
iommu_support_dev_iotlb(domain, iommu, bus, devfn);
u16 did = domain->iommu_did[iommu->seq_id];
int translation = CONTEXT_TT_MULTI_LEVEL;
struct device_domain_info *info = NULL;
struct context_entry *context;
int ret;
@ -1922,9 +1913,7 @@ static int domain_context_mapping_one(struct dmar_domain *domain,
BUG_ON(!domain->pgd);
spin_lock(&device_domain_lock);
spin_lock(&iommu->lock);
ret = -ENOMEM;
context = iommu_context_addr(iommu, bus, devfn, 1);
if (!context)
@ -1975,7 +1964,6 @@ static int domain_context_mapping_one(struct dmar_domain *domain,
* Setup the Device-TLB enable bit and Page request
* Enable bit:
*/
info = iommu_support_dev_iotlb(domain, iommu, bus, devfn);
if (info && info->ats_supported)
context_set_sm_dte(context);
if (info && info->pri_supported)
@ -1998,7 +1986,6 @@ static int domain_context_mapping_one(struct dmar_domain *domain,
goto out_unlock;
}
info = iommu_support_dev_iotlb(domain, iommu, bus, devfn);
if (info && info->ats_supported)
translation = CONTEXT_TT_DEV_IOTLB;
else
@ -2044,7 +2031,6 @@ static int domain_context_mapping_one(struct dmar_domain *domain,
out_unlock:
spin_unlock(&iommu->lock);
spin_unlock(&device_domain_lock);
return ret;
}
@ -2456,13 +2442,11 @@ static int domain_add_dev_info(struct dmar_domain *domain, struct device *dev)
if (!iommu)
return -ENODEV;
spin_lock(&device_domain_lock);
info->domain = domain;
ret = domain_attach_iommu(domain, iommu);
if (ret) {
spin_unlock(&device_domain_lock);
if (ret)
return ret;
}
info->domain = domain;
spin_lock(&device_domain_lock);
list_add(&info->link, &domain->devices);
spin_unlock(&device_domain_lock);
@ -4638,9 +4622,7 @@ int intel_iommu_enable_pasid(struct intel_iommu *iommu, struct device *dev)
if (!domain)
return -EINVAL;
spin_lock(&device_domain_lock);
spin_lock(&iommu->lock);
ret = -EINVAL;
if (!info->pasid_supported)
goto out;
@ -4670,7 +4652,6 @@ int intel_iommu_enable_pasid(struct intel_iommu *iommu, struct device *dev)
out:
spin_unlock(&iommu->lock);
spin_unlock(&device_domain_lock);
return ret;
}