summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/panthor/panthor_mmu.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/panthor/panthor_mmu.c')
-rw-r--r--drivers/gpu/drm/panthor/panthor_mmu.c125
1 files changed, 69 insertions, 56 deletions
diff --git a/drivers/gpu/drm/panthor/panthor_mmu.c b/drivers/gpu/drm/panthor/panthor_mmu.c
index 6ca9a2642a4e..6dec4354e378 100644
--- a/drivers/gpu/drm/panthor/panthor_mmu.c
+++ b/drivers/gpu/drm/panthor/panthor_mmu.c
@@ -29,6 +29,7 @@
#include "panthor_device.h"
#include "panthor_gem.h"
+#include "panthor_gpu.h"
#include "panthor_heap.h"
#include "panthor_mmu.h"
#include "panthor_regs.h"
@@ -510,9 +511,9 @@ static int wait_ready(struct panthor_device *ptdev, u32 as_nr)
/* Wait for the MMU status to indicate there is no active command, in
* case one is pending.
*/
- ret = readl_relaxed_poll_timeout_atomic(ptdev->iomem + AS_STATUS(as_nr),
- val, !(val & AS_STATUS_AS_ACTIVE),
- 10, 100000);
+ ret = gpu_read_relaxed_poll_timeout_atomic(ptdev, AS_STATUS(as_nr), val,
+ !(val & AS_STATUS_AS_ACTIVE),
+ 10, 100000);
if (ret) {
panthor_device_schedule_reset(ptdev);
@@ -564,16 +565,31 @@ static void lock_region(struct panthor_device *ptdev, u32 as_nr,
region = region_width | region_start;
/* Lock the region that needs to be updated */
- gpu_write(ptdev, AS_LOCKADDR_LO(as_nr), lower_32_bits(region));
- gpu_write(ptdev, AS_LOCKADDR_HI(as_nr), upper_32_bits(region));
+ gpu_write64(ptdev, AS_LOCKADDR(as_nr), region);
write_cmd(ptdev, as_nr, AS_COMMAND_LOCK);
}
static int mmu_hw_do_operation_locked(struct panthor_device *ptdev, int as_nr,
u64 iova, u64 size, u32 op)
{
+ const u32 l2_flush_op = CACHE_CLEAN | CACHE_INV;
+ u32 lsc_flush_op;
+ int ret;
+
lockdep_assert_held(&ptdev->mmu->as.slots_lock);
+ switch (op) {
+ case AS_COMMAND_FLUSH_MEM:
+ lsc_flush_op = CACHE_CLEAN | CACHE_INV;
+ break;
+ case AS_COMMAND_FLUSH_PT:
+ lsc_flush_op = 0;
+ break;
+ default:
+ drm_WARN(&ptdev->base, 1, "Unexpected AS_COMMAND: %d", op);
+ return -EINVAL;
+ }
+
if (as_nr < 0)
return 0;
@@ -583,13 +599,24 @@ static int mmu_hw_do_operation_locked(struct panthor_device *ptdev, int as_nr,
* power it up
*/
- if (op != AS_COMMAND_UNLOCK)
- lock_region(ptdev, as_nr, iova, size);
+ lock_region(ptdev, as_nr, iova, size);
- /* Run the MMU operation */
- write_cmd(ptdev, as_nr, op);
+ ret = wait_ready(ptdev, as_nr);
+ if (ret)
+ return ret;
- /* Wait for the flush to complete */
+ ret = panthor_gpu_flush_caches(ptdev, l2_flush_op, lsc_flush_op, 0);
+ if (ret)
+ return ret;
+
+ /*
+ * Explicitly unlock the region as the AS is not unlocked automatically
+ * at the end of the GPU_CONTROL cache flush command, unlike
+ * AS_COMMAND_FLUSH_MEM or AS_COMMAND_FLUSH_PT.
+ */
+ write_cmd(ptdev, as_nr, AS_COMMAND_UNLOCK);
+
+ /* Wait for the unlock command to complete */
return wait_ready(ptdev, as_nr);
}
@@ -615,14 +642,9 @@ static int panthor_mmu_as_enable(struct panthor_device *ptdev, u32 as_nr,
if (ret)
return ret;
- gpu_write(ptdev, AS_TRANSTAB_LO(as_nr), lower_32_bits(transtab));
- gpu_write(ptdev, AS_TRANSTAB_HI(as_nr), upper_32_bits(transtab));
-
- gpu_write(ptdev, AS_MEMATTR_LO(as_nr), lower_32_bits(memattr));
- gpu_write(ptdev, AS_MEMATTR_HI(as_nr), upper_32_bits(memattr));
-
- gpu_write(ptdev, AS_TRANSCFG_LO(as_nr), lower_32_bits(transcfg));
- gpu_write(ptdev, AS_TRANSCFG_HI(as_nr), upper_32_bits(transcfg));
+ gpu_write64(ptdev, AS_TRANSTAB(as_nr), transtab);
+ gpu_write64(ptdev, AS_MEMATTR(as_nr), memattr);
+ gpu_write64(ptdev, AS_TRANSCFG(as_nr), transcfg);
return write_cmd(ptdev, as_nr, AS_COMMAND_UPDATE);
}
@@ -635,14 +657,9 @@ static int panthor_mmu_as_disable(struct panthor_device *ptdev, u32 as_nr)
if (ret)
return ret;
- gpu_write(ptdev, AS_TRANSTAB_LO(as_nr), 0);
- gpu_write(ptdev, AS_TRANSTAB_HI(as_nr), 0);
-
- gpu_write(ptdev, AS_MEMATTR_LO(as_nr), 0);
- gpu_write(ptdev, AS_MEMATTR_HI(as_nr), 0);
-
- gpu_write(ptdev, AS_TRANSCFG_LO(as_nr), AS_TRANSCFG_ADRMODE_UNMAPPED);
- gpu_write(ptdev, AS_TRANSCFG_HI(as_nr), 0);
+ gpu_write64(ptdev, AS_TRANSTAB(as_nr), 0);
+ gpu_write64(ptdev, AS_MEMATTR(as_nr), 0);
+ gpu_write64(ptdev, AS_TRANSCFG(as_nr), AS_TRANSCFG_ADRMODE_UNMAPPED);
return write_cmd(ptdev, as_nr, AS_COMMAND_UPDATE);
}
@@ -896,17 +913,6 @@ static int panthor_vm_flush_range(struct panthor_vm *vm, u64 iova, u64 size)
return ret;
}
-/**
- * panthor_vm_flush_all() - Flush L2 caches for the entirety of a VM's AS
- * @vm: VM whose cache to flush
- *
- * Return: 0 on success, a negative error code if flush failed.
- */
-int panthor_vm_flush_all(struct panthor_vm *vm)
-{
- return panthor_vm_flush_range(vm, vm->base.mm_start, vm->base.mm_range);
-}
-
static int panthor_vm_unmap_pages(struct panthor_vm *vm, u64 iova, u64 size)
{
struct panthor_device *ptdev = vm->ptdev;
@@ -1096,9 +1102,9 @@ static void panthor_vm_bo_put(struct drm_gpuvm_bo *vm_bo)
* GEM vm_bo list.
*/
dma_resv_lock(drm_gpuvm_resv(vm), NULL);
- mutex_lock(&bo->gpuva_list_lock);
+ mutex_lock(&bo->base.base.gpuva.lock);
unpin = drm_gpuvm_bo_put(vm_bo);
- mutex_unlock(&bo->gpuva_list_lock);
+ mutex_unlock(&bo->base.base.gpuva.lock);
dma_resv_unlock(drm_gpuvm_resv(vm));
/* If the vm_bo object was destroyed, release the pin reference that
@@ -1216,7 +1222,7 @@ static int panthor_vm_prepare_map_op_ctx(struct panthor_vm_op_ctx *op_ctx,
(flags & DRM_PANTHOR_VM_BIND_OP_TYPE_MASK) != DRM_PANTHOR_VM_BIND_OP_TYPE_MAP)
return -EINVAL;
- /* Make sure the VA and size are aligned and in-bounds. */
+ /* Make sure the VA and size are in-bounds. */
if (size > bo->base.base.size || offset > bo->base.base.size - size)
return -EINVAL;
@@ -1271,9 +1277,9 @@ static int panthor_vm_prepare_map_op_ctx(struct panthor_vm_op_ctx *op_ctx,
* calling this function.
*/
dma_resv_lock(panthor_vm_resv(vm), NULL);
- mutex_lock(&bo->gpuva_list_lock);
+ mutex_lock(&bo->base.base.gpuva.lock);
op_ctx->map.vm_bo = drm_gpuvm_bo_obtain_prealloc(preallocated_vm_bo);
- mutex_unlock(&bo->gpuva_list_lock);
+ mutex_unlock(&bo->base.base.gpuva.lock);
dma_resv_unlock(panthor_vm_resv(vm));
/* If the a vm_bo for this <VM,BO> combination exists, it already
@@ -1681,8 +1687,7 @@ static void panthor_mmu_irq_handler(struct panthor_device *ptdev, u32 status)
u32 source_id;
fault_status = gpu_read(ptdev, AS_FAULTSTATUS(as));
- addr = gpu_read(ptdev, AS_FAULTADDRESS_LO(as));
- addr |= (u64)gpu_read(ptdev, AS_FAULTADDRESS_HI(as)) << 32;
+ addr = gpu_read64(ptdev, AS_FAULTADDRESS(as));
/* decode the fault status */
exception_type = fault_status & 0xFF;
@@ -2026,10 +2031,10 @@ static void panthor_vma_link(struct panthor_vm *vm,
{
struct panthor_gem_object *bo = to_panthor_bo(vma->base.gem.obj);
- mutex_lock(&bo->gpuva_list_lock);
+ mutex_lock(&bo->base.base.gpuva.lock);
drm_gpuva_link(&vma->base, vm_bo);
drm_WARN_ON(&vm->ptdev->base, drm_gpuvm_bo_put(vm_bo));
- mutex_unlock(&bo->gpuva_list_lock);
+ mutex_unlock(&bo->base.base.gpuva.lock);
}
static void panthor_vma_unlink(struct panthor_vm *vm,
@@ -2038,9 +2043,9 @@ static void panthor_vma_unlink(struct panthor_vm *vm,
struct panthor_gem_object *bo = to_panthor_bo(vma->base.gem.obj);
struct drm_gpuvm_bo *vm_bo = drm_gpuvm_bo_get(vma->base.vm_bo);
- mutex_lock(&bo->gpuva_list_lock);
+ mutex_lock(&bo->base.base.gpuva.lock);
drm_gpuva_unlink(&vma->base);
- mutex_unlock(&bo->gpuva_list_lock);
+ mutex_unlock(&bo->base.base.gpuva.lock);
/* drm_gpuva_unlink() release the vm_bo, but we manually retained it
* when entering this function, so we can implement deferred VMA
@@ -2192,15 +2197,22 @@ panthor_vm_exec_op(struct panthor_vm *vm, struct panthor_vm_op_ctx *op,
mutex_lock(&vm->op_lock);
vm->op_ctx = op;
switch (op_type) {
- case DRM_PANTHOR_VM_BIND_OP_TYPE_MAP:
+ case DRM_PANTHOR_VM_BIND_OP_TYPE_MAP: {
+ const struct drm_gpuvm_map_req map_req = {
+ .map.va.addr = op->va.addr,
+ .map.va.range = op->va.range,
+ .map.gem.obj = op->map.vm_bo->obj,
+ .map.gem.offset = op->map.bo_offset,
+ };
+
if (vm->unusable) {
ret = -EINVAL;
break;
}
- ret = drm_gpuvm_sm_map(&vm->base, vm, op->va.addr, op->va.range,
- op->map.vm_bo->obj, op->map.bo_offset);
+ ret = drm_gpuvm_sm_map(&vm->base, vm, &map_req);
break;
+ }
case DRM_PANTHOR_VM_BIND_OP_TYPE_UNMAP:
ret = drm_gpuvm_sm_unmap(&vm->base, vm, op->va.addr, op->va.range);
@@ -2282,7 +2294,7 @@ static enum drm_gpu_sched_stat
panthor_vm_bind_timedout_job(struct drm_sched_job *sched_job)
{
WARN(1, "VM_BIND ops are synchronous for now, there should be no timeout!");
- return DRM_GPU_SCHED_STAT_NOMINAL;
+ return DRM_GPU_SCHED_STAT_RESET;
}
static const struct drm_sched_backend_ops panthor_vm_bind_ops = {
@@ -2403,8 +2415,9 @@ panthor_vm_create(struct panthor_device *ptdev, bool for_mcu,
* to be handled the same way user VMAs are.
*/
drm_gpuvm_init(&vm->base, for_mcu ? "panthor-MCU-VM" : "panthor-GPU-VM",
- DRM_GPUVM_RESV_PROTECTED, &ptdev->base, dummy_gem,
- min_va, va_range, 0, 0, &panthor_gpuvm_ops);
+ DRM_GPUVM_RESV_PROTECTED | DRM_GPUVM_IMMEDIATE_MODE,
+ &ptdev->base, dummy_gem, min_va, va_range, 0, 0,
+ &panthor_gpuvm_ops);
drm_gem_object_put(dummy_gem);
return vm;
@@ -2434,7 +2447,7 @@ panthor_vm_bind_prepare_op_ctx(struct drm_file *file,
int ret;
/* Aligned on page size. */
- if (!IS_ALIGNED(op->va | op->size, vm_pgsz))
+ if (!IS_ALIGNED(op->va | op->size | op->bo_offset, vm_pgsz))
return -EINVAL;
switch (op->flags & DRM_PANTHOR_VM_BIND_OP_TYPE_MASK) {
@@ -2523,7 +2536,7 @@ panthor_vm_bind_job_create(struct drm_file *file,
kref_init(&job->refcount);
job->vm = panthor_vm_get(vm);
- ret = drm_sched_job_init(&job->base, &vm->entity, 1, vm);
+ ret = drm_sched_job_init(&job->base, &vm->entity, 1, vm, file->client_id);
if (ret)
goto err_put_job;