summaryrefslogtreecommitdiff
path: root/drivers/iommu
diff options
context:
space:
mode:
authorJoerg Roedel <jroedel@suse.de>2015-07-21 11:41:21 +0300
committerJoerg Roedel <jroedel@suse.de>2015-08-12 17:23:33 +0300
commit8bf478163e69e42973c7070179a11815139e5bf0 (patch)
treeea64f7177aa39fc71a0847687e8dd3c34fdd6fa7 /drivers/iommu
parent9452d5bfe5c3df6befb89835d2c44920e03bd390 (diff)
downloadlinux-8bf478163e69e42973c7070179a11815139e5bf0.tar.xz
iommu/vt-d: Split up iommu->domains array
This array is indexed by the domain-id and contains the pointers to the domains attached to this iommu. Modern systems support 65536 domain ids, so that this array has a size of 512kb, per iommu. This is a huge waste of space, as the array is usually sparsely populated. This patch makes the array two-dimensional and allocates the memory for the domain pointers on-demand. Signed-off-by: Joerg Roedel <jroedel@suse.de>
Diffstat (limited to 'drivers/iommu')
-rw-r--r--drivers/iommu/intel-iommu.c54
1 files changed, 43 insertions, 11 deletions
diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
index 1c2d6126e5fd..90ab4b0d975c 100644
--- a/drivers/iommu/intel-iommu.c
+++ b/drivers/iommu/intel-iommu.c
@@ -571,13 +571,32 @@ static struct kmem_cache *iommu_devinfo_cache;
static struct dmar_domain* get_iommu_domain(struct intel_iommu *iommu, u16 did)
{
- return iommu->domains[did];
+ struct dmar_domain **domains;
+ int idx = did >> 8;
+
+ domains = iommu->domains[idx];
+ if (!domains)
+ return NULL;
+
+ return domains[did & 0xff];
}
static void set_iommu_domain(struct intel_iommu *iommu, u16 did,
struct dmar_domain *domain)
{
- iommu->domains[did] = domain;
+ struct dmar_domain **domains;
+ int idx = did >> 8;
+
+ if (!iommu->domains[idx]) {
+ size_t size = 256 * sizeof(struct dmar_domain *);
+ iommu->domains[idx] = kzalloc(size, GFP_ATOMIC);
+ }
+
+ domains = iommu->domains[idx];
+ if (WARN_ON(!domains))
+ return;
+ else
+ domains[did & 0xff] = domain;
}
static inline void *alloc_pgtable_page(int node)
@@ -1530,35 +1549,43 @@ static void iommu_disable_translation(struct intel_iommu *iommu)
static int iommu_init_domains(struct intel_iommu *iommu)
{
- unsigned long ndomains;
- unsigned long nlongs;
+ u32 ndomains, nlongs;
+ size_t size;
ndomains = cap_ndoms(iommu->cap);
- pr_debug("%s: Number of Domains supported <%ld>\n",
+ pr_debug("%s: Number of Domains supported <%d>\n",
iommu->name, ndomains);
nlongs = BITS_TO_LONGS(ndomains);
spin_lock_init(&iommu->lock);
- /* TBD: there might be 64K domains,
- * consider other allocation for future chip
- */
iommu->domain_ids = kcalloc(nlongs, sizeof(unsigned long), GFP_KERNEL);
if (!iommu->domain_ids) {
pr_err("%s: Allocating domain id array failed\n",
iommu->name);
return -ENOMEM;
}
- iommu->domains = kcalloc(ndomains, sizeof(struct dmar_domain *),
- GFP_KERNEL);
- if (!iommu->domains) {
+
+ size = ((ndomains >> 8) + 1) * sizeof(struct dmar_domain **);
+ iommu->domains = kzalloc(size, GFP_KERNEL);
+
+ if (iommu->domains) {
+ size = 256 * sizeof(struct dmar_domain *);
+ iommu->domains[0] = kzalloc(size, GFP_KERNEL);
+ }
+
+ if (!iommu->domains || !iommu->domains[0]) {
pr_err("%s: Allocating domain array failed\n",
iommu->name);
kfree(iommu->domain_ids);
+ kfree(iommu->domains);
iommu->domain_ids = NULL;
+ iommu->domains = NULL;
return -ENOMEM;
}
+
+
/*
* If Caching mode is set, then invalid translations are tagged
* with domain-id 0, hence we need to pre-allocate it. We also
@@ -1600,6 +1627,11 @@ static void disable_dmar_iommu(struct intel_iommu *iommu)
static void free_dmar_iommu(struct intel_iommu *iommu)
{
if ((iommu->domains) && (iommu->domain_ids)) {
+ int elems = (cap_ndoms(iommu->cap) >> 8) + 1;
+ int i;
+
+ for (i = 0; i < elems; i++)
+ kfree(iommu->domains[i]);
kfree(iommu->domains);
kfree(iommu->domain_ids);
iommu->domains = NULL;