summaryrefslogtreecommitdiff
path: root/drivers/iommu
diff options
context:
space:
mode:
authorRussell King <rmk+kernel@arm.linux.org.uk>2015-07-27 15:30:02 +0300
committerThierry Reding <treding@nvidia.com>2015-08-13 17:06:43 +0300
commit707917cbc6ac0c0ea968b5eb635722ea84808286 (patch)
tree918113654c8ff1ad3e5796d46ed1f9974e3c96bd /drivers/iommu
parent05a65f06f69fa6c487c2933f2971d9ec4e33eb0d (diff)
downloadlinux-707917cbc6ac0c0ea968b5eb635722ea84808286.tar.xz
iommu/tegra-smmu: Use __GFP_ZERO to allocate zeroed pages
Rather than explicitly zeroing pages allocated via alloc_page(), add __GFP_ZERO to the gfp mask to ask the allocator for zeroed pages. Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk> Signed-off-by: Thierry Reding <treding@nvidia.com>
Diffstat (limited to 'drivers/iommu')
-rw-r--r--drivers/iommu/tegra-smmu.c18
1 files changed, 2 insertions, 16 deletions
diff --git a/drivers/iommu/tegra-smmu.c b/drivers/iommu/tegra-smmu.c
index eb9f6068fe2e..27d31f62a822 100644
--- a/drivers/iommu/tegra-smmu.c
+++ b/drivers/iommu/tegra-smmu.c
@@ -258,8 +258,6 @@ static bool tegra_smmu_capable(enum iommu_cap cap)
static struct iommu_domain *tegra_smmu_domain_alloc(unsigned type)
{
struct tegra_smmu_as *as;
- unsigned int i;
- uint32_t *pd;
if (type != IOMMU_DOMAIN_UNMANAGED)
return NULL;
@@ -270,7 +268,7 @@ static struct iommu_domain *tegra_smmu_domain_alloc(unsigned type)
as->attr = SMMU_PD_READABLE | SMMU_PD_WRITABLE | SMMU_PD_NONSECURE;
- as->pd = alloc_page(GFP_KERNEL | __GFP_DMA);
+ as->pd = alloc_page(GFP_KERNEL | __GFP_DMA | __GFP_ZERO);
if (!as->pd) {
kfree(as);
return NULL;
@@ -291,12 +289,6 @@ static struct iommu_domain *tegra_smmu_domain_alloc(unsigned type)
return NULL;
}
- /* clear PDEs */
- pd = page_address(as->pd);
-
- for (i = 0; i < SMMU_NUM_PDE; i++)
- pd[i] = 0;
-
/* setup aperture */
as->domain.geometry.aperture_start = 0;
as->domain.geometry.aperture_end = 0xffffffff;
@@ -533,21 +525,15 @@ static u32 *as_get_pte(struct tegra_smmu_as *as, dma_addr_t iova,
u32 *pd = page_address(as->pd), *pt;
unsigned int pde = iova_pd_index(iova);
struct tegra_smmu *smmu = as->smmu;
- unsigned int i;
if (!as->pts[pde]) {
struct page *page;
dma_addr_t dma;
- page = alloc_page(GFP_KERNEL | __GFP_DMA);
+ page = alloc_page(GFP_KERNEL | __GFP_DMA | __GFP_ZERO);
if (!page)
return NULL;
- pt = page_address(page);
-
- for (i = 0; i < SMMU_NUM_PTE; i++)
- pt[i] = 0;
-
dma = dma_map_page(smmu->dev, page, 0, SMMU_SIZE_PT,
DMA_TO_DEVICE);
if (dma_mapping_error(smmu->dev, dma)) {