summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorBoris Brezillon <boris.brezillon@collabora.com>2025-11-28 12:48:32 +0300
committerBoris Brezillon <boris.brezillon@collabora.com>2025-11-28 12:56:44 +0300
commit851f58d02f0d6c9c5fa8aee32fe349aaa9796758 (patch)
treed3e497a437549b78d37aaa9651c54fab22665f89
parentddf2cb3c9e655dd9b7bb5172249f6c01fc251549 (diff)
downloadlinux-851f58d02f0d6c9c5fa8aee32fe349aaa9796758.tar.xz
drm/panthor: Simplify group idleness tracking
csg_slot_sync_queues_state_locked() queries the queues state which can then be used to determine if a group is idle or not. Let's base our idleness detection logic solely on the {idle,blocked}_queues masks to avoid inconsistencies between the group state and the state of its subqueues. v2: - Add R-b v3: - Collect R-b Reviewed-by: Steven Price <steven.price@arm.com> Reviewed-by: Chia-I Wu <olvaffe@gmail.com> Link: https://patch.msgid.link/20251128094839.3856402-2-boris.brezillon@collabora.com Signed-off-by: Boris Brezillon <boris.brezillon@collabora.com>
-rw-r--r--drivers/gpu/drm/panthor/panthor_sched.c31
1 files changed, 2 insertions, 29 deletions
diff --git a/drivers/gpu/drm/panthor/panthor_sched.c b/drivers/gpu/drm/panthor/panthor_sched.c
index 1beddc175722..5b2ab963ac99 100644
--- a/drivers/gpu/drm/panthor/panthor_sched.c
+++ b/drivers/gpu/drm/panthor/panthor_sched.c
@@ -108,15 +108,6 @@ struct panthor_csg_slot {
/** @priority: Group priority. */
u8 priority;
-
- /**
- * @idle: True if the group bound to this slot is idle.
- *
- * A group is idle when it has nothing waiting for execution on
- * all its queues, or when queues are blocked waiting for something
- * to happen (synchronization object).
- */
- bool idle;
};
/**
@@ -1056,13 +1047,8 @@ group_unbind_locked(struct panthor_group *group)
static bool
group_is_idle(struct panthor_group *group)
{
- struct panthor_device *ptdev = group->ptdev;
- u32 inactive_queues;
-
- if (group->csg_id >= 0)
- return ptdev->scheduler->csg_slots[group->csg_id].idle;
+ u32 inactive_queues = group->idle_queues | group->blocked_queues;
- inactive_queues = group->idle_queues | group->blocked_queues;
return hweight32(inactive_queues) == group->queue_count;
}
@@ -1719,17 +1705,6 @@ static bool cs_slot_process_irq_locked(struct panthor_device *ptdev,
return (events & (CS_FAULT | CS_TILER_OOM)) != 0;
}
-static void csg_slot_sync_idle_state_locked(struct panthor_device *ptdev, u32 csg_id)
-{
- struct panthor_csg_slot *csg_slot = &ptdev->scheduler->csg_slots[csg_id];
- struct panthor_fw_csg_iface *csg_iface;
-
- lockdep_assert_held(&ptdev->scheduler->lock);
-
- csg_iface = panthor_fw_get_csg_iface(ptdev, csg_id);
- csg_slot->idle = csg_iface->output->status_state & CSG_STATUS_STATE_IS_IDLE;
-}
-
static void csg_slot_process_idle_event_locked(struct panthor_device *ptdev, u32 csg_id)
{
struct panthor_scheduler *sched = ptdev->scheduler;
@@ -1991,10 +1966,8 @@ static int csgs_upd_ctx_apply_locked(struct panthor_device *ptdev,
if (acked & CSG_STATE_MASK)
csg_slot_sync_state_locked(ptdev, csg_id);
- if (acked & CSG_STATUS_UPDATE) {
+ if (acked & CSG_STATUS_UPDATE)
csg_slot_sync_queues_state_locked(ptdev, csg_id);
- csg_slot_sync_idle_state_locked(ptdev, csg_id);
- }
if (ret && acked != req_mask &&
((csg_iface->input->req ^ csg_iface->output->ack) & req_mask) != 0) {