diff options
Diffstat (limited to 'drivers/gpu/drm/xe/xe_gt_sriov_pf_config.c')
| -rw-r--r-- | drivers/gpu/drm/xe/xe_gt_sriov_pf_config.c | 349 |
1 files changed, 308 insertions, 41 deletions
diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_pf_config.c b/drivers/gpu/drm/xe/xe_gt_sriov_pf_config.c index 6344b5205c08..59c5c6b4d994 100644 --- a/drivers/gpu/drm/xe/xe_gt_sriov_pf_config.c +++ b/drivers/gpu/drm/xe/xe_gt_sriov_pf_config.c @@ -9,6 +9,7 @@ #include "abi/guc_actions_sriov_abi.h" #include "abi/guc_klvs_abi.h" +#include "regs/xe_gtt_defs.h" #include "regs/xe_guc_regs.h" #include "xe_bo.h" @@ -697,6 +698,22 @@ static u64 pf_estimate_fair_ggtt(struct xe_gt *gt, unsigned int num_vfs) return fair; } +static u64 pf_profile_fair_ggtt(struct xe_gt *gt, unsigned int num_vfs) +{ + bool admin_only_pf = xe_sriov_pf_admin_only(gt_to_xe(gt)); + u64 shareable = ALIGN_DOWN(GUC_GGTT_TOP, SZ_512M); + u64 alignment = pf_get_ggtt_alignment(gt); + + if (admin_only_pf && num_vfs == 1) + return ALIGN_DOWN(shareable, alignment); + + /* need to hardcode due to ~512M of GGTT being reserved */ + if (num_vfs > 56) + return SZ_64M - SZ_8M; + + return rounddown_pow_of_two(div_u64(shareable, num_vfs)); +} + /** * xe_gt_sriov_pf_config_set_fair_ggtt - Provision many VFs with fair GGTT. * @gt: the &xe_gt (can't be media) @@ -710,6 +727,7 @@ static u64 pf_estimate_fair_ggtt(struct xe_gt *gt, unsigned int num_vfs) int xe_gt_sriov_pf_config_set_fair_ggtt(struct xe_gt *gt, unsigned int vfid, unsigned int num_vfs) { + u64 profile = pf_profile_fair_ggtt(gt, num_vfs); u64 fair; xe_gt_assert(gt, vfid); @@ -723,9 +741,71 @@ int xe_gt_sriov_pf_config_set_fair_ggtt(struct xe_gt *gt, unsigned int vfid, if (!fair) return -ENOSPC; + fair = min(fair, profile); + if (fair < profile) + xe_gt_sriov_info(gt, "Using non-profile provisioning (%s %llu vs %llu)\n", + "GGTT", fair, profile); + return xe_gt_sriov_pf_config_bulk_set_ggtt(gt, vfid, num_vfs, fair); } +/** + * xe_gt_sriov_pf_config_ggtt_save() - Save a VF provisioned GGTT data into a buffer. + * @gt: the &xe_gt + * @vfid: VF identifier (can't be 0) + * @buf: the GGTT data destination buffer (or NULL to query the buf size) + * @size: the size of the buffer (or 0 to query the buf size) + * + * This function can only be called on PF. + * + * Return: size of the buffer needed to save GGTT data if querying, + * 0 on successful save or a negative error code on failure. + */ +ssize_t xe_gt_sriov_pf_config_ggtt_save(struct xe_gt *gt, unsigned int vfid, + void *buf, size_t size) +{ + struct xe_ggtt_node *node; + + xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt))); + xe_gt_assert(gt, vfid); + xe_gt_assert(gt, !(!buf ^ !size)); + + guard(mutex)(xe_gt_sriov_pf_master_mutex(gt)); + + node = pf_pick_vf_config(gt, vfid)->ggtt_region; + + if (!buf) + return xe_ggtt_node_pt_size(node); + + return xe_ggtt_node_save(node, buf, size, vfid); +} + +/** + * xe_gt_sriov_pf_config_ggtt_restore() - Restore a VF provisioned GGTT data from a buffer. + * @gt: the &xe_gt + * @vfid: VF identifier (can't be 0) + * @buf: the GGTT data source buffer + * @size: the size of the buffer + * + * This function can only be called on PF. + * + * Return: 0 on success or a negative error code on failure. + */ +int xe_gt_sriov_pf_config_ggtt_restore(struct xe_gt *gt, unsigned int vfid, + const void *buf, size_t size) +{ + struct xe_ggtt_node *node; + + xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt))); + xe_gt_assert(gt, vfid); + + guard(mutex)(xe_gt_sriov_pf_master_mutex(gt)); + + node = pf_pick_vf_config(gt, vfid)->ggtt_region; + + return xe_ggtt_node_load(node, buf, size, vfid); +} + static u32 pf_get_min_spare_ctxs(struct xe_gt *gt) { /* XXX: preliminary */ @@ -924,7 +1004,8 @@ static int pf_config_bulk_set_u32_done(struct xe_gt *gt, unsigned int first, uns const char *what, const char *(*unit)(u32), unsigned int last, int err) { - xe_gt_assert(gt, first); + char name[8]; + xe_gt_assert(gt, num_vfs); xe_gt_assert(gt, first <= last); @@ -932,8 +1013,9 @@ static int pf_config_bulk_set_u32_done(struct xe_gt *gt, unsigned int first, uns return pf_config_set_u32_done(gt, first, value, get(gt, first), what, unit, err); if (unlikely(err)) { - xe_gt_sriov_notice(gt, "Failed to bulk provision VF%u..VF%u with %s\n", - first, first + num_vfs - 1, what); + xe_gt_sriov_notice(gt, "Failed to bulk provision %s..VF%u with %s\n", + xe_sriov_function_name(first, name, sizeof(name)), + first + num_vfs - 1, what); if (last > first) pf_config_bulk_set_u32_done(gt, first, last - first, value, get, what, unit, last, 0); @@ -942,8 +1024,9 @@ static int pf_config_bulk_set_u32_done(struct xe_gt *gt, unsigned int first, uns /* pick actual value from first VF - bulk provisioning shall be equal across all VFs */ value = get(gt, first); - xe_gt_sriov_info(gt, "VF%u..VF%u provisioned with %u%s %s\n", - first, first + num_vfs - 1, value, unit(value), what); + xe_gt_sriov_info(gt, "%s..VF%u provisioned with %u%s %s\n", + xe_sriov_function_name(first, name, sizeof(name)), + first + num_vfs - 1, value, unit(value), what); return 0; } @@ -982,6 +1065,16 @@ int xe_gt_sriov_pf_config_bulk_set_ctxs(struct xe_gt *gt, unsigned int vfid, "GuC context IDs", no_unit, n, err); } +static u32 pf_profile_fair_ctxs(struct xe_gt *gt, unsigned int num_vfs) +{ + bool admin_only_pf = xe_sriov_pf_admin_only(gt_to_xe(gt)); + + if (admin_only_pf && num_vfs == 1) + return ALIGN_DOWN(GUC_ID_MAX, SZ_1K); + + return rounddown_pow_of_two(GUC_ID_MAX / num_vfs); +} + static u32 pf_estimate_fair_ctxs(struct xe_gt *gt, unsigned int num_vfs) { struct xe_guc_id_mgr *idm = >->uc.guc.submission_state.idm; @@ -1014,6 +1107,7 @@ static u32 pf_estimate_fair_ctxs(struct xe_gt *gt, unsigned int num_vfs) int xe_gt_sriov_pf_config_set_fair_ctxs(struct xe_gt *gt, unsigned int vfid, unsigned int num_vfs) { + u32 profile = pf_profile_fair_ctxs(gt, num_vfs); u32 fair; xe_gt_assert(gt, vfid); @@ -1026,6 +1120,11 @@ int xe_gt_sriov_pf_config_set_fair_ctxs(struct xe_gt *gt, unsigned int vfid, if (!fair) return -ENOSPC; + fair = min(fair, profile); + if (fair < profile) + xe_gt_sriov_info(gt, "Using non-profile provisioning (%s %u vs %u)\n", + "GuC context IDs", fair, profile); + return xe_gt_sriov_pf_config_bulk_set_ctxs(gt, vfid, num_vfs, fair); } @@ -1230,6 +1329,17 @@ int xe_gt_sriov_pf_config_bulk_set_dbs(struct xe_gt *gt, unsigned int vfid, "GuC doorbell IDs", no_unit, n, err); } +static u32 pf_profile_fair_dbs(struct xe_gt *gt, unsigned int num_vfs) +{ + bool admin_only_pf = xe_sriov_pf_admin_only(gt_to_xe(gt)); + + /* XXX: preliminary */ + if (admin_only_pf && num_vfs == 1) + return GUC_NUM_DOORBELLS - SZ_16; + + return rounddown_pow_of_two(GUC_NUM_DOORBELLS / (num_vfs + 1)); +} + static u32 pf_estimate_fair_dbs(struct xe_gt *gt, unsigned int num_vfs) { struct xe_guc_db_mgr *dbm = >->uc.guc.dbm; @@ -1262,6 +1372,7 @@ static u32 pf_estimate_fair_dbs(struct xe_gt *gt, unsigned int num_vfs) int xe_gt_sriov_pf_config_set_fair_dbs(struct xe_gt *gt, unsigned int vfid, unsigned int num_vfs) { + u32 profile = pf_profile_fair_dbs(gt, num_vfs); u32 fair; xe_gt_assert(gt, vfid); @@ -1274,6 +1385,11 @@ int xe_gt_sriov_pf_config_set_fair_dbs(struct xe_gt *gt, unsigned int vfid, if (!fair) return -ENOSPC; + fair = min(fair, profile); + if (fair < profile) + xe_gt_sriov_info(gt, "Using non-profile provisioning (%s %u vs %u)\n", + "GuC doorbell IDs", fair, profile); + return xe_gt_sriov_pf_config_bulk_set_dbs(gt, vfid, num_vfs, fair); } @@ -1484,7 +1600,8 @@ static int pf_provision_vf_lmem(struct xe_gt *gt, unsigned int vfid, u64 size) XE_BO_FLAG_VRAM_IF_DGFX(tile) | XE_BO_FLAG_NEEDS_2M | XE_BO_FLAG_PINNED | - XE_BO_FLAG_PINNED_LATE_RESTORE); + XE_BO_FLAG_PINNED_LATE_RESTORE | + XE_BO_FLAG_FORCE_USER_VRAM); if (IS_ERR(bo)) return PTR_ERR(bo); @@ -1547,7 +1664,8 @@ int xe_gt_sriov_pf_config_set_lmem(struct xe_gt *gt, unsigned int vfid, u64 size { int err; - xe_gt_assert(gt, xe_device_has_lmtt(gt_to_xe(gt))); + if (!xe_device_has_lmtt(gt_to_xe(gt))) + return -EPERM; mutex_lock(xe_gt_sriov_pf_master_mutex(gt)); if (vfid) @@ -1597,6 +1715,32 @@ int xe_gt_sriov_pf_config_bulk_set_lmem(struct xe_gt *gt, unsigned int vfid, "LMEM", n, err); } +static struct xe_bo *pf_get_vf_config_lmem_obj(struct xe_gt *gt, unsigned int vfid) +{ + struct xe_gt_sriov_config *config = pf_pick_vf_config(gt, vfid); + + return config->lmem_obj; +} + +/** + * xe_gt_sriov_pf_config_get_lmem_obj() - Take a reference to the struct &xe_bo backing VF LMEM. + * @gt: the &xe_gt + * @vfid: the VF identifier (can't be 0) + * + * This function can only be called on PF. + * The caller is responsible for calling xe_bo_put() on the returned object. + * + * Return: pointer to struct &xe_bo backing VF LMEM (if any). + */ +struct xe_bo *xe_gt_sriov_pf_config_get_lmem_obj(struct xe_gt *gt, unsigned int vfid) +{ + xe_gt_assert(gt, vfid); + + guard(mutex)(xe_gt_sriov_pf_master_mutex(gt)); + + return xe_bo_get(pf_get_vf_config_lmem_obj(gt, vfid)); +} + static u64 pf_query_free_lmem(struct xe_gt *gt) { struct xe_tile *tile = gt->tile; @@ -1722,7 +1866,7 @@ static int pf_provision_exec_quantum(struct xe_gt *gt, unsigned int vfid, return 0; } -static int pf_get_exec_quantum(struct xe_gt *gt, unsigned int vfid) +static u32 pf_get_exec_quantum(struct xe_gt *gt, unsigned int vfid) { struct xe_gt_sriov_config *config = pf_pick_vf_config(gt, vfid); @@ -1730,47 +1874,107 @@ static int pf_get_exec_quantum(struct xe_gt *gt, unsigned int vfid) } /** - * xe_gt_sriov_pf_config_set_exec_quantum - Configure execution quantum for the VF. + * xe_gt_sriov_pf_config_set_exec_quantum_locked() - Configure PF/VF execution quantum. * @gt: the &xe_gt - * @vfid: the VF identifier + * @vfid: the PF or VF identifier * @exec_quantum: requested execution quantum in milliseconds (0 is infinity) * - * This function can only be called on PF. + * This function can only be called on PF with the master mutex hold. + * It will log the provisioned value or an error in case of the failure. * * Return: 0 on success or a negative error code on failure. */ -int xe_gt_sriov_pf_config_set_exec_quantum(struct xe_gt *gt, unsigned int vfid, - u32 exec_quantum) +int xe_gt_sriov_pf_config_set_exec_quantum_locked(struct xe_gt *gt, unsigned int vfid, + u32 exec_quantum) { int err; - mutex_lock(xe_gt_sriov_pf_master_mutex(gt)); + lockdep_assert_held(xe_gt_sriov_pf_master_mutex(gt)); + err = pf_provision_exec_quantum(gt, vfid, exec_quantum); - mutex_unlock(xe_gt_sriov_pf_master_mutex(gt)); return pf_config_set_u32_done(gt, vfid, exec_quantum, - xe_gt_sriov_pf_config_get_exec_quantum(gt, vfid), + pf_get_exec_quantum(gt, vfid), "execution quantum", exec_quantum_unit, err); } /** - * xe_gt_sriov_pf_config_get_exec_quantum - Get VF's execution quantum. + * xe_gt_sriov_pf_config_set_exec_quantum() - Configure PF/VF execution quantum. * @gt: the &xe_gt - * @vfid: the VF identifier + * @vfid: the PF or VF identifier + * @exec_quantum: requested execution quantum in milliseconds (0 is infinity) * * This function can only be called on PF. + * It will log the provisioned value or an error in case of the failure. * - * Return: VF's (or PF's) execution quantum in milliseconds. + * Return: 0 on success or a negative error code on failure. + */ +int xe_gt_sriov_pf_config_set_exec_quantum(struct xe_gt *gt, unsigned int vfid, + u32 exec_quantum) +{ + guard(mutex)(xe_gt_sriov_pf_master_mutex(gt)); + + return xe_gt_sriov_pf_config_set_exec_quantum_locked(gt, vfid, exec_quantum); +} + +/** + * xe_gt_sriov_pf_config_get_exec_quantum_locked() - Get PF/VF execution quantum. + * @gt: the &xe_gt + * @vfid: the PF or VF identifier + * + * This function can only be called on PF with the master mutex hold. + * + * Return: execution quantum in milliseconds (or 0 if infinity). + */ +u32 xe_gt_sriov_pf_config_get_exec_quantum_locked(struct xe_gt *gt, unsigned int vfid) +{ + lockdep_assert_held(xe_gt_sriov_pf_master_mutex(gt)); + + return pf_get_exec_quantum(gt, vfid); +} + +/** + * xe_gt_sriov_pf_config_get_exec_quantum() - Get PF/VF execution quantum. + * @gt: the &xe_gt + * @vfid: the PF or VF identifier + * + * This function can only be called on PF. + * + * Return: execution quantum in milliseconds (or 0 if infinity). */ u32 xe_gt_sriov_pf_config_get_exec_quantum(struct xe_gt *gt, unsigned int vfid) { - u32 exec_quantum; + guard(mutex)(xe_gt_sriov_pf_master_mutex(gt)); - mutex_lock(xe_gt_sriov_pf_master_mutex(gt)); - exec_quantum = pf_get_exec_quantum(gt, vfid); - mutex_unlock(xe_gt_sriov_pf_master_mutex(gt)); + return pf_get_exec_quantum(gt, vfid); +} + +/** + * xe_gt_sriov_pf_config_bulk_set_exec_quantum_locked() - Configure EQ for PF and VFs. + * @gt: the &xe_gt to configure + * @exec_quantum: requested execution quantum in milliseconds (0 is infinity) + * + * This function can only be called on PF with the master mutex hold. + * + * Return: 0 on success or a negative error code on failure. + */ +int xe_gt_sriov_pf_config_bulk_set_exec_quantum_locked(struct xe_gt *gt, u32 exec_quantum) +{ + unsigned int totalvfs = xe_gt_sriov_pf_get_totalvfs(gt); + unsigned int n; + int err = 0; - return exec_quantum; + lockdep_assert_held(xe_gt_sriov_pf_master_mutex(gt)); + + for (n = 0; n <= totalvfs; n++) { + err = pf_provision_exec_quantum(gt, VFID(n), exec_quantum); + if (err) + break; + } + + return pf_config_bulk_set_u32_done(gt, 0, 1 + totalvfs, exec_quantum, + pf_get_exec_quantum, "execution quantum", + exec_quantum_unit, n, err); } static const char *preempt_timeout_unit(u32 preempt_timeout) @@ -1793,7 +1997,7 @@ static int pf_provision_preempt_timeout(struct xe_gt *gt, unsigned int vfid, return 0; } -static int pf_get_preempt_timeout(struct xe_gt *gt, unsigned int vfid) +static u32 pf_get_preempt_timeout(struct xe_gt *gt, unsigned int vfid) { struct xe_gt_sriov_config *config = pf_pick_vf_config(gt, vfid); @@ -1801,47 +2005,106 @@ static int pf_get_preempt_timeout(struct xe_gt *gt, unsigned int vfid) } /** - * xe_gt_sriov_pf_config_set_preempt_timeout - Configure preemption timeout for the VF. + * xe_gt_sriov_pf_config_set_preempt_timeout_locked() - Configure PF/VF preemption timeout. * @gt: the &xe_gt - * @vfid: the VF identifier + * @vfid: the PF or VF identifier * @preempt_timeout: requested preemption timeout in microseconds (0 is infinity) * - * This function can only be called on PF. + * This function can only be called on PF with the master mutex hold. + * It will log the provisioned value or an error in case of the failure. * * Return: 0 on success or a negative error code on failure. */ -int xe_gt_sriov_pf_config_set_preempt_timeout(struct xe_gt *gt, unsigned int vfid, - u32 preempt_timeout) +int xe_gt_sriov_pf_config_set_preempt_timeout_locked(struct xe_gt *gt, unsigned int vfid, + u32 preempt_timeout) { int err; - mutex_lock(xe_gt_sriov_pf_master_mutex(gt)); + lockdep_assert_held(xe_gt_sriov_pf_master_mutex(gt)); + err = pf_provision_preempt_timeout(gt, vfid, preempt_timeout); - mutex_unlock(xe_gt_sriov_pf_master_mutex(gt)); return pf_config_set_u32_done(gt, vfid, preempt_timeout, - xe_gt_sriov_pf_config_get_preempt_timeout(gt, vfid), + pf_get_preempt_timeout(gt, vfid), "preemption timeout", preempt_timeout_unit, err); } /** - * xe_gt_sriov_pf_config_get_preempt_timeout - Get VF's preemption timeout. + * xe_gt_sriov_pf_config_set_preempt_timeout() - Configure PF/VF preemption timeout. * @gt: the &xe_gt - * @vfid: the VF identifier + * @vfid: the PF or VF identifier + * @preempt_timeout: requested preemption timeout in microseconds (0 is infinity) + * + * This function can only be called on PF. + * + * Return: 0 on success or a negative error code on failure. + */ +int xe_gt_sriov_pf_config_set_preempt_timeout(struct xe_gt *gt, unsigned int vfid, + u32 preempt_timeout) +{ + guard(mutex)(xe_gt_sriov_pf_master_mutex(gt)); + + return xe_gt_sriov_pf_config_set_preempt_timeout_locked(gt, vfid, preempt_timeout); +} + +/** + * xe_gt_sriov_pf_config_get_preempt_timeout_locked() - Get PF/VF preemption timeout. + * @gt: the &xe_gt + * @vfid: the PF or VF identifier + * + * This function can only be called on PF with the master mutex hold. + * + * Return: preemption timeout in microseconds (or 0 if infinity). + */ +u32 xe_gt_sriov_pf_config_get_preempt_timeout_locked(struct xe_gt *gt, unsigned int vfid) +{ + lockdep_assert_held(xe_gt_sriov_pf_master_mutex(gt)); + + return pf_get_preempt_timeout(gt, vfid); +} + +/** + * xe_gt_sriov_pf_config_get_preempt_timeout() - Get PF/VF preemption timeout. + * @gt: the &xe_gt + * @vfid: the PF or VF identifier * * This function can only be called on PF. * - * Return: VF's (or PF's) preemption timeout in microseconds. + * Return: preemption timeout in microseconds (or 0 if infinity). */ u32 xe_gt_sriov_pf_config_get_preempt_timeout(struct xe_gt *gt, unsigned int vfid) { - u32 preempt_timeout; + guard(mutex)(xe_gt_sriov_pf_master_mutex(gt)); - mutex_lock(xe_gt_sriov_pf_master_mutex(gt)); - preempt_timeout = pf_get_preempt_timeout(gt, vfid); - mutex_unlock(xe_gt_sriov_pf_master_mutex(gt)); + return pf_get_preempt_timeout(gt, vfid); +} - return preempt_timeout; +/** + * xe_gt_sriov_pf_config_bulk_set_preempt_timeout_locked() - Configure PT for PF and VFs. + * @gt: the &xe_gt to configure + * @preempt_timeout: requested preemption timeout in microseconds (0 is infinity) + * + * This function can only be called on PF with the master mutex hold. + * + * Return: 0 on success or a negative error code on failure. + */ +int xe_gt_sriov_pf_config_bulk_set_preempt_timeout_locked(struct xe_gt *gt, u32 preempt_timeout) +{ + unsigned int totalvfs = xe_gt_sriov_pf_get_totalvfs(gt); + unsigned int n; + int err = 0; + + lockdep_assert_held(xe_gt_sriov_pf_master_mutex(gt)); + + for (n = 0; n <= totalvfs; n++) { + err = pf_provision_preempt_timeout(gt, VFID(n), preempt_timeout); + if (err) + break; + } + + return pf_config_bulk_set_u32_done(gt, 0, 1 + totalvfs, preempt_timeout, + pf_get_preempt_timeout, "preemption timeout", + preempt_timeout_unit, n, err); } static const char *sched_priority_unit(u32 priority) @@ -2669,3 +2932,7 @@ int xe_gt_sriov_pf_config_print_available_ggtt(struct xe_gt *gt, struct drm_prin return 0; } + +#if IS_BUILTIN(CONFIG_DRM_XE_KUNIT_TEST) +#include "tests/xe_gt_sriov_pf_config_kunit.c" +#endif |
