diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2021-07-10 19:46:20 +0300 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2021-07-10 19:46:20 +0300 |
commit | 071e5aceebebf1d33b5c29ccfd2688ed39c60007 (patch) | |
tree | 8f1800a962fb22a857939e1f50d213968c8a2e11 /drivers/iommu | |
parent | e083bbd6040f4efa5c13633fb4e460b919d69dae (diff) | |
parent | 2afd1c20e7547887f37f638d6b7953138d8c948e (diff) | |
download | linux-071e5aceebebf1d33b5c29ccfd2688ed39c60007.tar.xz |
Merge tag 'arm-drivers-5.14' of git://git.kernel.org/pub/scm/linux/kernel/git/soc/soc
Pull ARM driver updates from Olof Johansson:
- Reset controllers: Adding support for Microchip Sparx5 Switch.
- Memory controllers: ARM Primecell PL35x SMC memory controller driver
cleanups and improvements.
- i.MX SoC drivers: Power domain support for i.MX8MM and i.MX8MN.
- Rockchip: RK3568 power domains support + DT binding updates,
cleanups.
- Qualcomm SoC drivers: Amend socinfo with more SoC/PMIC details,
including support for MSM8226, MDM9607, SM6125 and SC8180X.
- ARM FFA driver: "Firmware Framework for ARMv8-A", defining management
interfaces and communication (including bus model) between partitions
both in Normal and Secure Worlds.
- Tegra Memory controller changes, including major rework to deal with
identity mappings at boot and integration with ARM SMMU pieces.
* tag 'arm-drivers-5.14' of git://git.kernel.org/pub/scm/linux/kernel/git/soc/soc: (120 commits)
firmware: turris-mox-rwtm: add marvell,armada-3700-rwtm-firmware compatible string
firmware: turris-mox-rwtm: show message about HWRNG registration
firmware: turris-mox-rwtm: fail probing when firmware does not support hwrng
firmware: turris-mox-rwtm: report failures better
firmware: turris-mox-rwtm: fix reply status decoding function
soc: imx: gpcv2: add support for i.MX8MN power domains
dt-bindings: add defines for i.MX8MN power domains
firmware: tegra: bpmp: Fix Tegra234-only builds
iommu/arm-smmu: Use Tegra implementation on Tegra186
iommu/arm-smmu: tegra: Implement SID override programming
iommu/arm-smmu: tegra: Detect number of instances at runtime
dt-bindings: arm-smmu: Add Tegra186 compatible string
firmware: qcom_scm: Add MDM9607 compatible
soc: qcom: rpmpd: Add MDM9607 RPM Power Domains
soc: renesas: Add support to read LSI DEVID register of RZ/G2{L,LC} SoC's
soc: renesas: Add ARCH_R9A07G044 for the new RZ/G2L SoC's
dt-bindings: soc: rockchip: drop unnecessary #phy-cells from grf.yaml
memory: emif: remove unused frequency and voltage notifiers
memory: fsl_ifc: fix leak of private memory on probe failure
memory: fsl_ifc: fix leak of IO mapping on probe failure
...
Diffstat (limited to 'drivers/iommu')
-rw-r--r-- | drivers/iommu/arm/arm-smmu/arm-smmu-impl.c | 3 | ||||
-rw-r--r-- | drivers/iommu/arm/arm-smmu/arm-smmu-nvidia.c | 90 | ||||
-rw-r--r-- | drivers/iommu/tegra-smmu.c | 16 |
3 files changed, 81 insertions, 28 deletions
diff --git a/drivers/iommu/arm/arm-smmu/arm-smmu-impl.c b/drivers/iommu/arm/arm-smmu/arm-smmu-impl.c index 136872e77195..9f465e146799 100644 --- a/drivers/iommu/arm/arm-smmu/arm-smmu-impl.c +++ b/drivers/iommu/arm/arm-smmu/arm-smmu-impl.c @@ -211,7 +211,8 @@ struct arm_smmu_device *arm_smmu_impl_init(struct arm_smmu_device *smmu) if (of_property_read_bool(np, "calxeda,smmu-secure-config-access")) smmu->impl = &calxeda_impl; - if (of_device_is_compatible(np, "nvidia,tegra194-smmu")) + if (of_device_is_compatible(np, "nvidia,tegra194-smmu") || + of_device_is_compatible(np, "nvidia,tegra186-smmu")) return nvidia_smmu_impl_init(smmu); smmu = qcom_smmu_impl_init(smmu); diff --git a/drivers/iommu/arm/arm-smmu/arm-smmu-nvidia.c b/drivers/iommu/arm/arm-smmu/arm-smmu-nvidia.c index 29117444e5a0..01e9b50b10a1 100644 --- a/drivers/iommu/arm/arm-smmu/arm-smmu-nvidia.c +++ b/drivers/iommu/arm/arm-smmu/arm-smmu-nvidia.c @@ -7,6 +7,8 @@ #include <linux/platform_device.h> #include <linux/slab.h> +#include <soc/tegra/mc.h> + #include "arm-smmu.h" /* @@ -15,18 +17,32 @@ * interleaved IOVA accesses across them and translates accesses from * non-isochronous HW devices. * Third one is used for translating accesses from isochronous HW devices. + * + * In addition, the SMMU driver needs to coordinate with the memory controller + * driver to ensure that the right SID override is programmed for any given + * memory client. This is necessary to allow for use-case such as seamlessly + * handing over the display controller configuration from the firmware to the + * kernel. + * * This implementation supports programming of the two instances that must - * be programmed identically. - * The third instance usage is through standard arm-smmu driver itself and - * is out of scope of this implementation. + * be programmed identically and takes care of invoking the memory controller + * driver for SID override programming after devices have been attached to an + * SMMU instance. */ -#define NUM_SMMU_INSTANCES 2 +#define MAX_SMMU_INSTANCES 2 struct nvidia_smmu { - struct arm_smmu_device smmu; - void __iomem *bases[NUM_SMMU_INSTANCES]; + struct arm_smmu_device smmu; + void __iomem *bases[MAX_SMMU_INSTANCES]; + unsigned int num_instances; + struct tegra_mc *mc; }; +static inline struct nvidia_smmu *to_nvidia_smmu(struct arm_smmu_device *smmu) +{ + return container_of(smmu, struct nvidia_smmu, smmu); +} + static inline void __iomem *nvidia_smmu_page(struct arm_smmu_device *smmu, unsigned int inst, int page) { @@ -47,9 +63,10 @@ static u32 nvidia_smmu_read_reg(struct arm_smmu_device *smmu, static void nvidia_smmu_write_reg(struct arm_smmu_device *smmu, int page, int offset, u32 val) { + struct nvidia_smmu *nvidia = to_nvidia_smmu(smmu); unsigned int i; - for (i = 0; i < NUM_SMMU_INSTANCES; i++) { + for (i = 0; i < nvidia->num_instances; i++) { void __iomem *reg = nvidia_smmu_page(smmu, i, page) + offset; writel_relaxed(val, reg); @@ -67,9 +84,10 @@ static u64 nvidia_smmu_read_reg64(struct arm_smmu_device *smmu, static void nvidia_smmu_write_reg64(struct arm_smmu_device *smmu, int page, int offset, u64 val) { + struct nvidia_smmu *nvidia = to_nvidia_smmu(smmu); unsigned int i; - for (i = 0; i < NUM_SMMU_INSTANCES; i++) { + for (i = 0; i < nvidia->num_instances; i++) { void __iomem *reg = nvidia_smmu_page(smmu, i, page) + offset; writeq_relaxed(val, reg); @@ -79,6 +97,7 @@ static void nvidia_smmu_write_reg64(struct arm_smmu_device *smmu, static void nvidia_smmu_tlb_sync(struct arm_smmu_device *smmu, int page, int sync, int status) { + struct nvidia_smmu *nvidia = to_nvidia_smmu(smmu); unsigned int delay; arm_smmu_writel(smmu, page, sync, 0); @@ -90,7 +109,7 @@ static void nvidia_smmu_tlb_sync(struct arm_smmu_device *smmu, int page, u32 val = 0; unsigned int i; - for (i = 0; i < NUM_SMMU_INSTANCES; i++) { + for (i = 0; i < nvidia->num_instances; i++) { void __iomem *reg; reg = nvidia_smmu_page(smmu, i, page) + status; @@ -112,9 +131,10 @@ static void nvidia_smmu_tlb_sync(struct arm_smmu_device *smmu, int page, static int nvidia_smmu_reset(struct arm_smmu_device *smmu) { + struct nvidia_smmu *nvidia = to_nvidia_smmu(smmu); unsigned int i; - for (i = 0; i < NUM_SMMU_INSTANCES; i++) { + for (i = 0; i < nvidia->num_instances; i++) { u32 val; void __iomem *reg = nvidia_smmu_page(smmu, i, ARM_SMMU_GR0) + ARM_SMMU_GR0_sGFSR; @@ -157,8 +177,9 @@ static irqreturn_t nvidia_smmu_global_fault(int irq, void *dev) unsigned int inst; irqreturn_t ret = IRQ_NONE; struct arm_smmu_device *smmu = dev; + struct nvidia_smmu *nvidia = to_nvidia_smmu(smmu); - for (inst = 0; inst < NUM_SMMU_INSTANCES; inst++) { + for (inst = 0; inst < nvidia->num_instances; inst++) { irqreturn_t irq_ret; irq_ret = nvidia_smmu_global_fault_inst(irq, smmu, inst); @@ -202,11 +223,13 @@ static irqreturn_t nvidia_smmu_context_fault(int irq, void *dev) struct arm_smmu_device *smmu; struct iommu_domain *domain = dev; struct arm_smmu_domain *smmu_domain; + struct nvidia_smmu *nvidia; smmu_domain = container_of(domain, struct arm_smmu_domain, domain); smmu = smmu_domain->smmu; + nvidia = to_nvidia_smmu(smmu); - for (inst = 0; inst < NUM_SMMU_INSTANCES; inst++) { + for (inst = 0; inst < nvidia->num_instances; inst++) { irqreturn_t irq_ret; /* @@ -224,6 +247,17 @@ static irqreturn_t nvidia_smmu_context_fault(int irq, void *dev) return ret; } +static void nvidia_smmu_probe_finalize(struct arm_smmu_device *smmu, struct device *dev) +{ + struct nvidia_smmu *nvidia = to_nvidia_smmu(smmu); + int err; + + err = tegra_mc_probe_device(nvidia->mc, dev); + if (err < 0) + dev_err(smmu->dev, "memory controller probe failed for %s: %d\n", + dev_name(dev), err); +} + static const struct arm_smmu_impl nvidia_smmu_impl = { .read_reg = nvidia_smmu_read_reg, .write_reg = nvidia_smmu_write_reg, @@ -233,6 +267,11 @@ static const struct arm_smmu_impl nvidia_smmu_impl = { .tlb_sync = nvidia_smmu_tlb_sync, .global_fault = nvidia_smmu_global_fault, .context_fault = nvidia_smmu_context_fault, + .probe_finalize = nvidia_smmu_probe_finalize, +}; + +static const struct arm_smmu_impl nvidia_smmu_single_impl = { + .probe_finalize = nvidia_smmu_probe_finalize, }; struct arm_smmu_device *nvidia_smmu_impl_init(struct arm_smmu_device *smmu) @@ -241,23 +280,36 @@ struct arm_smmu_device *nvidia_smmu_impl_init(struct arm_smmu_device *smmu) struct device *dev = smmu->dev; struct nvidia_smmu *nvidia_smmu; struct platform_device *pdev = to_platform_device(dev); + unsigned int i; nvidia_smmu = devm_krealloc(dev, smmu, sizeof(*nvidia_smmu), GFP_KERNEL); if (!nvidia_smmu) return ERR_PTR(-ENOMEM); + nvidia_smmu->mc = devm_tegra_memory_controller_get(dev); + if (IS_ERR(nvidia_smmu->mc)) + return ERR_CAST(nvidia_smmu->mc); + /* Instance 0 is ioremapped by arm-smmu.c. */ nvidia_smmu->bases[0] = smmu->base; + nvidia_smmu->num_instances++; + + for (i = 1; i < MAX_SMMU_INSTANCES; i++) { + res = platform_get_resource(pdev, IORESOURCE_MEM, i); + if (!res) + break; - res = platform_get_resource(pdev, IORESOURCE_MEM, 1); - if (!res) - return ERR_PTR(-ENODEV); + nvidia_smmu->bases[i] = devm_ioremap_resource(dev, res); + if (IS_ERR(nvidia_smmu->bases[i])) + return ERR_CAST(nvidia_smmu->bases[i]); - nvidia_smmu->bases[1] = devm_ioremap_resource(dev, res); - if (IS_ERR(nvidia_smmu->bases[1])) - return ERR_CAST(nvidia_smmu->bases[1]); + nvidia_smmu->num_instances++; + } - nvidia_smmu->smmu.impl = &nvidia_smmu_impl; + if (nvidia_smmu->num_instances == 1) + nvidia_smmu->smmu.impl = &nvidia_smmu_single_impl; + else + nvidia_smmu->smmu.impl = &nvidia_smmu_impl; return &nvidia_smmu->smmu; } diff --git a/drivers/iommu/tegra-smmu.c b/drivers/iommu/tegra-smmu.c index 1e98dc63ad13..0a281833f611 100644 --- a/drivers/iommu/tegra-smmu.c +++ b/drivers/iommu/tegra-smmu.c @@ -376,9 +376,9 @@ static void tegra_smmu_enable(struct tegra_smmu *smmu, unsigned int swgroup, if (client->swgroup != swgroup) continue; - value = smmu_readl(smmu, client->smmu.reg); - value |= BIT(client->smmu.bit); - smmu_writel(smmu, value, client->smmu.reg); + value = smmu_readl(smmu, client->regs.smmu.reg); + value |= BIT(client->regs.smmu.bit); + smmu_writel(smmu, value, client->regs.smmu.reg); } } @@ -404,9 +404,9 @@ static void tegra_smmu_disable(struct tegra_smmu *smmu, unsigned int swgroup, if (client->swgroup != swgroup) continue; - value = smmu_readl(smmu, client->smmu.reg); - value &= ~BIT(client->smmu.bit); - smmu_writel(smmu, value, client->smmu.reg); + value = smmu_readl(smmu, client->regs.smmu.reg); + value &= ~BIT(client->regs.smmu.bit); + smmu_writel(smmu, value, client->regs.smmu.reg); } } @@ -1042,9 +1042,9 @@ static int tegra_smmu_clients_show(struct seq_file *s, void *data) const struct tegra_mc_client *client = &smmu->soc->clients[i]; const char *status; - value = smmu_readl(smmu, client->smmu.reg); + value = smmu_readl(smmu, client->regs.smmu.reg); - if (value & BIT(client->smmu.bit)) + if (value & BIT(client->regs.smmu.bit)) status = "yes"; else status = "no"; |