diff options
Diffstat (limited to 'drivers/gpu/drm/i915/gt/uc')
19 files changed, 702 insertions, 324 deletions
diff --git a/drivers/gpu/drm/i915/gt/uc/abi/guc_actions_abi.h b/drivers/gpu/drm/i915/gt/uc/abi/guc_actions_abi.h index fe5d7d261797..7afdadc7656f 100644 --- a/drivers/gpu/drm/i915/gt/uc/abi/guc_actions_abi.h +++ b/drivers/gpu/drm/i915/gt/uc/abi/guc_actions_abi.h @@ -7,9 +7,9 @@ #define _ABI_GUC_ACTIONS_ABI_H /** - * DOC: HOST2GUC_REGISTER_CTB + * DOC: HOST2GUC_SELF_CFG * - * This message is used as part of the `CTB based communication`_ setup. + * This message is used by Host KMD to setup of the `GuC Self Config KLVs`_. * * This message must be sent as `MMIO HXG Message`_. * @@ -22,20 +22,18 @@ * | +-------+--------------------------------------------------------------+ * | | 27:16 | DATA0 = MBZ | * | +-------+--------------------------------------------------------------+ - * | | 15:0 | ACTION = _`GUC_ACTION_HOST2GUC_REGISTER_CTB` = 0x4505 | + * | | 15:0 | ACTION = _`GUC_ACTION_HOST2GUC_SELF_CFG` = 0x0508 | * +---+-------+--------------------------------------------------------------+ - * | 1 | 31:12 | RESERVED = MBZ | + * | 1 | 31:16 | **KLV_KEY** - KLV key, see `GuC Self Config KLVs`_ | * | +-------+--------------------------------------------------------------+ - * | | 11:8 | **TYPE** - type for the `CT Buffer`_ | + * | | 15:0 | **KLV_LEN** - KLV length | * | | | | - * | | | - _`GUC_CTB_TYPE_HOST2GUC` = 0 | - * | | | - _`GUC_CTB_TYPE_GUC2HOST` = 1 | - * | +-------+--------------------------------------------------------------+ - * | | 7:0 | **SIZE** - size of the `CT Buffer`_ in 4K units minus 1 | + * | | | - 32 bit KLV = 1 | + * | | | - 64 bit KLV = 2 | * +---+-------+--------------------------------------------------------------+ - * | 2 | 31:0 | **DESC_ADDR** - GGTT address of the `CTB Descriptor`_ | + * | 2 | 31:0 | **VALUE32** - Bits 31-0 of the KLV value | * +---+-------+--------------------------------------------------------------+ - * | 3 | 31:0 | **BUFF_ADDF** - GGTT address of the `CT Buffer`_ | + * | 3 | 31:0 | **VALUE64** - Bits 63-32 of the KLV value (**KLV_LEN** = 2) | * +---+-------+--------------------------------------------------------------+ * * +---+-------+--------------------------------------------------------------+ @@ -45,28 +43,25 @@ * | +-------+--------------------------------------------------------------+ * | | 30:28 | TYPE = GUC_HXG_TYPE_RESPONSE_SUCCESS_ | * | +-------+--------------------------------------------------------------+ - * | | 27:0 | DATA0 = MBZ | + * | | 27:0 | DATA0 = **NUM** - 1 if KLV was parsed, 0 if not recognized | * +---+-------+--------------------------------------------------------------+ */ -#define GUC_ACTION_HOST2GUC_REGISTER_CTB 0x4505 +#define GUC_ACTION_HOST2GUC_SELF_CFG 0x0508 -#define HOST2GUC_REGISTER_CTB_REQUEST_MSG_LEN (GUC_HXG_REQUEST_MSG_MIN_LEN + 3u) -#define HOST2GUC_REGISTER_CTB_REQUEST_MSG_0_MBZ GUC_HXG_REQUEST_MSG_0_DATA0 -#define HOST2GUC_REGISTER_CTB_REQUEST_MSG_1_MBZ (0xfffff << 12) -#define HOST2GUC_REGISTER_CTB_REQUEST_MSG_1_TYPE (0xf << 8) -#define GUC_CTB_TYPE_HOST2GUC 0u -#define GUC_CTB_TYPE_GUC2HOST 1u -#define HOST2GUC_REGISTER_CTB_REQUEST_MSG_1_SIZE (0xff << 0) -#define HOST2GUC_REGISTER_CTB_REQUEST_MSG_2_DESC_ADDR GUC_HXG_REQUEST_MSG_n_DATAn -#define HOST2GUC_REGISTER_CTB_REQUEST_MSG_3_BUFF_ADDR GUC_HXG_REQUEST_MSG_n_DATAn +#define HOST2GUC_SELF_CFG_REQUEST_MSG_LEN (GUC_HXG_REQUEST_MSG_MIN_LEN + 3u) +#define HOST2GUC_SELF_CFG_REQUEST_MSG_0_MBZ GUC_HXG_REQUEST_MSG_0_DATA0 +#define HOST2GUC_SELF_CFG_REQUEST_MSG_1_KLV_KEY (0xffff << 16) +#define HOST2GUC_SELF_CFG_REQUEST_MSG_1_KLV_LEN (0xffff << 0) +#define HOST2GUC_SELF_CFG_REQUEST_MSG_2_VALUE32 GUC_HXG_REQUEST_MSG_n_DATAn +#define HOST2GUC_SELF_CFG_REQUEST_MSG_3_VALUE64 GUC_HXG_REQUEST_MSG_n_DATAn -#define HOST2GUC_REGISTER_CTB_RESPONSE_MSG_LEN GUC_HXG_RESPONSE_MSG_MIN_LEN -#define HOST2GUC_REGISTER_CTB_RESPONSE_MSG_0_MBZ GUC_HXG_RESPONSE_MSG_0_DATA0 +#define HOST2GUC_SELF_CFG_RESPONSE_MSG_LEN GUC_HXG_RESPONSE_MSG_MIN_LEN +#define HOST2GUC_SELF_CFG_RESPONSE_MSG_0_NUM GUC_HXG_RESPONSE_MSG_0_DATA0 /** - * DOC: HOST2GUC_DEREGISTER_CTB + * DOC: HOST2GUC_CONTROL_CTB * - * This message is used as part of the `CTB based communication`_ teardown. + * This H2G action allows Vf Host to enable or disable H2G and G2H `CT Buffer`_. * * This message must be sent as `MMIO HXG Message`_. * @@ -79,15 +74,12 @@ * | +-------+--------------------------------------------------------------+ * | | 27:16 | DATA0 = MBZ | * | +-------+--------------------------------------------------------------+ - * | | 15:0 | ACTION = _`GUC_ACTION_HOST2GUC_DEREGISTER_CTB` = 0x4506 | + * | | 15:0 | ACTION = _`GUC_ACTION_HOST2GUC_CONTROL_CTB` = 0x4509 | * +---+-------+--------------------------------------------------------------+ - * | 1 | 31:12 | RESERVED = MBZ | - * | +-------+--------------------------------------------------------------+ - * | | 11:8 | **TYPE** - type of the `CT Buffer`_ | + * | 1 | 31:0 | **CONTROL** - control `CTB based communication`_ | * | | | | - * | | | see `GUC_ACTION_HOST2GUC_REGISTER_CTB`_ | - * | +-------+--------------------------------------------------------------+ - * | | 7:0 | RESERVED = MBZ | + * | | | - _`GUC_CTB_CONTROL_DISABLE` = 0 | + * | | | - _`GUC_CTB_CONTROL_ENABLE` = 1 | * +---+-------+--------------------------------------------------------------+ * * +---+-------+--------------------------------------------------------------+ @@ -100,16 +92,16 @@ * | | 27:0 | DATA0 = MBZ | * +---+-------+--------------------------------------------------------------+ */ -#define GUC_ACTION_HOST2GUC_DEREGISTER_CTB 0x4506 +#define GUC_ACTION_HOST2GUC_CONTROL_CTB 0x4509 -#define HOST2GUC_DEREGISTER_CTB_REQUEST_MSG_LEN (GUC_HXG_REQUEST_MSG_MIN_LEN + 1u) -#define HOST2GUC_DEREGISTER_CTB_REQUEST_MSG_0_MBZ GUC_HXG_REQUEST_MSG_0_DATA0 -#define HOST2GUC_DEREGISTER_CTB_REQUEST_MSG_1_MBZ (0xfffff << 12) -#define HOST2GUC_DEREGISTER_CTB_REQUEST_MSG_1_TYPE (0xf << 8) -#define HOST2GUC_DEREGISTER_CTB_REQUEST_MSG_1_MBZ2 (0xff << 0) +#define HOST2GUC_CONTROL_CTB_REQUEST_MSG_LEN (GUC_HXG_REQUEST_MSG_MIN_LEN + 1u) +#define HOST2GUC_CONTROL_CTB_REQUEST_MSG_0_MBZ GUC_HXG_REQUEST_MSG_0_DATA0 +#define HOST2GUC_CONTROL_CTB_REQUEST_MSG_1_CONTROL GUC_HXG_REQUEST_MSG_n_DATAn +#define GUC_CTB_CONTROL_DISABLE 0u +#define GUC_CTB_CONTROL_ENABLE 1u -#define HOST2GUC_DEREGISTER_CTB_RESPONSE_MSG_LEN GUC_HXG_RESPONSE_MSG_MIN_LEN -#define HOST2GUC_DEREGISTER_CTB_RESPONSE_MSG_0_MBZ GUC_HXG_RESPONSE_MSG_0_DATA0 +#define HOST2GUC_CONTROL_CTB_RESPONSE_MSG_LEN GUC_HXG_RESPONSE_MSG_MIN_LEN +#define HOST2GUC_CONTROL_CTB_RESPONSE_MSG_0_MBZ GUC_HXG_RESPONSE_MSG_0_DATA0 /* legacy definitions */ @@ -143,8 +135,12 @@ enum intel_guc_action { INTEL_GUC_ACTION_DEREGISTER_COMMAND_TRANSPORT_BUFFER = 0x4506, INTEL_GUC_ACTION_DEREGISTER_CONTEXT_DONE = 0x4600, INTEL_GUC_ACTION_REGISTER_CONTEXT_MULTI_LRC = 0x4601, - INTEL_GUC_ACTION_RESET_CLIENT = 0x5507, + INTEL_GUC_ACTION_CLIENT_SOFT_RESET = 0x5507, INTEL_GUC_ACTION_SET_ENG_UTIL_BUFF = 0x550A, + INTEL_GUC_ACTION_STATE_CAPTURE_NOTIFICATION = 0x8002, + INTEL_GUC_ACTION_NOTIFY_FLUSH_LOG_BUFFER_TO_FILE = 0x8003, + INTEL_GUC_ACTION_NOTIFY_CRASH_DUMP_POSTED = 0x8004, + INTEL_GUC_ACTION_NOTIFY_EXCEPTION = 0x8005, INTEL_GUC_ACTION_LIMIT }; diff --git a/drivers/gpu/drm/i915/gt/uc/abi/guc_errors_abi.h b/drivers/gpu/drm/i915/gt/uc/abi/guc_errors_abi.h index 488b6061ee89..c20658ee85a5 100644 --- a/drivers/gpu/drm/i915/gt/uc/abi/guc_errors_abi.h +++ b/drivers/gpu/drm/i915/gt/uc/abi/guc_errors_abi.h @@ -11,4 +11,27 @@ enum intel_guc_response_status { INTEL_GUC_RESPONSE_STATUS_GENERIC_FAIL = 0xF000, }; +enum intel_guc_load_status { + INTEL_GUC_LOAD_STATUS_DEFAULT = 0x00, + INTEL_GUC_LOAD_STATUS_START = 0x01, + INTEL_GUC_LOAD_STATUS_ERROR_DEVID_BUILD_MISMATCH = 0x02, + INTEL_GUC_LOAD_STATUS_GUC_PREPROD_BUILD_MISMATCH = 0x03, + INTEL_GUC_LOAD_STATUS_ERROR_DEVID_INVALID_GUCTYPE = 0x04, + INTEL_GUC_LOAD_STATUS_GDT_DONE = 0x10, + INTEL_GUC_LOAD_STATUS_IDT_DONE = 0x20, + INTEL_GUC_LOAD_STATUS_LAPIC_DONE = 0x30, + INTEL_GUC_LOAD_STATUS_GUCINT_DONE = 0x40, + INTEL_GUC_LOAD_STATUS_DPC_READY = 0x50, + INTEL_GUC_LOAD_STATUS_DPC_ERROR = 0x60, + INTEL_GUC_LOAD_STATUS_EXCEPTION = 0x70, + INTEL_GUC_LOAD_STATUS_INIT_DATA_INVALID = 0x71, + INTEL_GUC_LOAD_STATUS_PXP_TEARDOWN_CTRL_ENABLED = 0x72, + INTEL_GUC_LOAD_STATUS_INVALID_INIT_DATA_RANGE_START, + INTEL_GUC_LOAD_STATUS_MPU_DATA_INVALID = 0x73, + INTEL_GUC_LOAD_STATUS_INIT_MMIO_SAVE_RESTORE_INVALID = 0x74, + INTEL_GUC_LOAD_STATUS_INVALID_INIT_DATA_RANGE_END, + + INTEL_GUC_LOAD_STATUS_READY = 0xF0, +}; + #endif /* _ABI_GUC_ERRORS_ABI_H */ diff --git a/drivers/gpu/drm/i915/gt/uc/abi/guc_klvs_abi.h b/drivers/gpu/drm/i915/gt/uc/abi/guc_klvs_abi.h new file mode 100644 index 000000000000..f0814a57c191 --- /dev/null +++ b/drivers/gpu/drm/i915/gt/uc/abi/guc_klvs_abi.h @@ -0,0 +1,82 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright © 2021 Intel Corporation + */ + +#ifndef _ABI_GUC_KLVS_ABI_H +#define _ABI_GUC_KLVS_ABI_H + +/** + * DOC: GuC KLV + * + * +---+-------+--------------------------------------------------------------+ + * | | Bits | Description | + * +===+=======+==============================================================+ + * | 0 | 31:16 | **KEY** - KLV key identifier | + * | | | - `GuC Self Config KLVs`_ | + * | | | | + * | +-------+--------------------------------------------------------------+ + * | | 15:0 | **LEN** - length of VALUE (in 32bit dwords) | + * +---+-------+--------------------------------------------------------------+ + * | 1 | 31:0 | **VALUE** - actual value of the KLV (format depends on KEY) | + * +---+-------+ | + * |...| | | + * +---+-------+ | + * | n | 31:0 | | + * +---+-------+--------------------------------------------------------------+ + */ + +#define GUC_KLV_LEN_MIN 1u +#define GUC_KLV_0_KEY (0xffff << 16) +#define GUC_KLV_0_LEN (0xffff << 0) +#define GUC_KLV_n_VALUE (0xffffffff << 0) + +/** + * DOC: GuC Self Config KLVs + * + * `GuC KLV`_ keys available for use with HOST2GUC_SELF_CFG_. + * + * _`GUC_KLV_SELF_CFG_H2G_CTB_ADDR` : 0x0902 + * Refers to 64 bit Global Gfx address of H2G `CT Buffer`_. + * Should be above WOPCM address but below APIC base address for native mode. + * + * _`GUC_KLV_SELF_CFG_H2G_CTB_DESCRIPTOR_ADDR` : 0x0903 + * Refers to 64 bit Global Gfx address of H2G `CTB Descriptor`_. + * Should be above WOPCM address but below APIC base address for native mode. + * + * _`GUC_KLV_SELF_CFG_H2G_CTB_SIZE` : 0x0904 + * Refers to size of H2G `CT Buffer`_ in bytes. + * Should be a multiple of 4K. + * + * _`GUC_KLV_SELF_CFG_G2H_CTB_ADDR` : 0x0905 + * Refers to 64 bit Global Gfx address of G2H `CT Buffer`_. + * Should be above WOPCM address but below APIC base address for native mode. + * + * _`GUC_KLV_SELF_CFG_G2H_CTB_DESCRIPTOR_ADDR` : 0x0906 + * Refers to 64 bit Global Gfx address of G2H `CTB Descriptor`_. + * Should be above WOPCM address but below APIC base address for native mode. + * + * _`GUC_KLV_SELF_CFG_G2H_CTB_SIZE` : 0x0907 + * Refers to size of G2H `CT Buffer`_ in bytes. + * Should be a multiple of 4K. + */ + +#define GUC_KLV_SELF_CFG_H2G_CTB_ADDR_KEY 0x0902 +#define GUC_KLV_SELF_CFG_H2G_CTB_ADDR_LEN 2u + +#define GUC_KLV_SELF_CFG_H2G_CTB_DESCRIPTOR_ADDR_KEY 0x0903 +#define GUC_KLV_SELF_CFG_H2G_CTB_DESCRIPTOR_ADDR_LEN 2u + +#define GUC_KLV_SELF_CFG_H2G_CTB_SIZE_KEY 0x0904 +#define GUC_KLV_SELF_CFG_H2G_CTB_SIZE_LEN 1u + +#define GUC_KLV_SELF_CFG_G2H_CTB_ADDR_KEY 0x0905 +#define GUC_KLV_SELF_CFG_G2H_CTB_ADDR_LEN 2u + +#define GUC_KLV_SELF_CFG_G2H_CTB_DESCRIPTOR_ADDR_KEY 0x0906 +#define GUC_KLV_SELF_CFG_G2H_CTB_DESCRIPTOR_ADDR_LEN 2u + +#define GUC_KLV_SELF_CFG_G2H_CTB_SIZE_KEY 0x0907 +#define GUC_KLV_SELF_CFG_G2H_CTB_SIZE_LEN 1u + +#endif /* _ABI_GUC_KLVS_ABI_H */ diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc.c b/drivers/gpu/drm/i915/gt/uc/intel_guc.c index 5bab32fef120..447a976c9f25 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_guc.c +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc.c @@ -184,6 +184,9 @@ void intel_guc_init_early(struct intel_guc *guc) guc->send_regs.count = GUC_MAX_MMIO_MSG_LEN; BUILD_BUG_ON(GUC_MAX_MMIO_MSG_LEN > SOFT_SCRATCH_COUNT); } + + intel_guc_enable_msg(guc, INTEL_GUC_RECV_MSG_EXCEPTION | + INTEL_GUC_RECV_MSG_CRASH_DUMP_POSTED); } void intel_guc_init_late(struct intel_guc *guc) @@ -224,32 +227,48 @@ static u32 guc_ctl_log_params_flags(struct intel_guc *guc) u32 flags; #if (((CRASH_BUFFER_SIZE) % SZ_1M) == 0) - #define UNIT SZ_1M - #define FLAG GUC_LOG_ALLOC_IN_MEGABYTE + #define LOG_UNIT SZ_1M + #define LOG_FLAG GUC_LOG_LOG_ALLOC_UNITS #else - #define UNIT SZ_4K - #define FLAG 0 + #define LOG_UNIT SZ_4K + #define LOG_FLAG 0 + #endif + + #if (((CAPTURE_BUFFER_SIZE) % SZ_1M) == 0) + #define CAPTURE_UNIT SZ_1M + #define CAPTURE_FLAG GUC_LOG_CAPTURE_ALLOC_UNITS + #else + #define CAPTURE_UNIT SZ_4K + #define CAPTURE_FLAG 0 #endif BUILD_BUG_ON(!CRASH_BUFFER_SIZE); - BUILD_BUG_ON(!IS_ALIGNED(CRASH_BUFFER_SIZE, UNIT)); + BUILD_BUG_ON(!IS_ALIGNED(CRASH_BUFFER_SIZE, LOG_UNIT)); BUILD_BUG_ON(!DEBUG_BUFFER_SIZE); - BUILD_BUG_ON(!IS_ALIGNED(DEBUG_BUFFER_SIZE, UNIT)); + BUILD_BUG_ON(!IS_ALIGNED(DEBUG_BUFFER_SIZE, LOG_UNIT)); + BUILD_BUG_ON(!CAPTURE_BUFFER_SIZE); + BUILD_BUG_ON(!IS_ALIGNED(CAPTURE_BUFFER_SIZE, CAPTURE_UNIT)); - BUILD_BUG_ON((CRASH_BUFFER_SIZE / UNIT - 1) > + BUILD_BUG_ON((CRASH_BUFFER_SIZE / LOG_UNIT - 1) > (GUC_LOG_CRASH_MASK >> GUC_LOG_CRASH_SHIFT)); - BUILD_BUG_ON((DEBUG_BUFFER_SIZE / UNIT - 1) > + BUILD_BUG_ON((DEBUG_BUFFER_SIZE / LOG_UNIT - 1) > (GUC_LOG_DEBUG_MASK >> GUC_LOG_DEBUG_SHIFT)); + BUILD_BUG_ON((CAPTURE_BUFFER_SIZE / CAPTURE_UNIT - 1) > + (GUC_LOG_CAPTURE_MASK >> GUC_LOG_CAPTURE_SHIFT)); flags = GUC_LOG_VALID | GUC_LOG_NOTIFY_ON_HALF_FULL | - FLAG | - ((CRASH_BUFFER_SIZE / UNIT - 1) << GUC_LOG_CRASH_SHIFT) | - ((DEBUG_BUFFER_SIZE / UNIT - 1) << GUC_LOG_DEBUG_SHIFT) | + CAPTURE_FLAG | + LOG_FLAG | + ((CRASH_BUFFER_SIZE / LOG_UNIT - 1) << GUC_LOG_CRASH_SHIFT) | + ((DEBUG_BUFFER_SIZE / LOG_UNIT - 1) << GUC_LOG_DEBUG_SHIFT) | + ((CAPTURE_BUFFER_SIZE / CAPTURE_UNIT - 1) << GUC_LOG_CAPTURE_SHIFT) | (offset << GUC_LOG_BUF_ADDR_SHIFT); - #undef UNIT - #undef FLAG + #undef LOG_UNIT + #undef LOG_FLAG + #undef CAPTURE_UNIT + #undef CAPTURE_FLAG return flags; } @@ -262,6 +281,26 @@ static u32 guc_ctl_ads_flags(struct intel_guc *guc) return flags; } +static u32 guc_ctl_wa_flags(struct intel_guc *guc) +{ + struct intel_gt *gt = guc_to_gt(guc); + u32 flags = 0; + + /* Wa_22012773006:gen11,gen12 < XeHP */ + if (GRAPHICS_VER(gt->i915) >= 11 && + GRAPHICS_VER_FULL(gt->i915) < IP_VER(12, 50)) + flags |= GUC_WA_POLLCS; + + return flags; +} + +static u32 guc_ctl_devid(struct intel_guc *guc) +{ + struct drm_i915_private *i915 = guc_to_gt(guc)->i915; + + return (INTEL_DEVID(i915) << 16) | INTEL_REVID(i915); +} + /* * Initialise the GuC parameter block before starting the firmware * transfer. These parameters are read by the firmware on startup @@ -278,6 +317,8 @@ static void guc_init_params(struct intel_guc *guc) params[GUC_CTL_FEATURE] = guc_ctl_feature_flags(guc); params[GUC_CTL_DEBUG] = guc_ctl_debug_flags(guc); params[GUC_CTL_ADS] = guc_ctl_ads_flags(guc); + params[GUC_CTL_WA] = guc_ctl_wa_flags(guc); + params[GUC_CTL_DEVID] = guc_ctl_devid(guc); for (i = 0; i < GUC_CTL_MAX_DWORDS; i++) DRM_DEBUG_DRIVER("param[%2d] = %#x\n", i, params[i]); @@ -515,9 +556,10 @@ int intel_guc_to_host_process_recv_msg(struct intel_guc *guc, /* Make sure to handle only enabled messages */ msg = payload[0] & guc->msg_enabled_mask; - if (msg & (INTEL_GUC_RECV_MSG_FLUSH_LOG_BUFFER | - INTEL_GUC_RECV_MSG_CRASH_DUMP_POSTED)) - intel_guc_log_handle_flush_event(&guc->log); + if (msg & INTEL_GUC_RECV_MSG_CRASH_DUMP_POSTED) + drm_err(&guc_to_gt(guc)->i915->drm, "Received early GuC crash dump notification!\n"); + if (msg & INTEL_GUC_RECV_MSG_EXCEPTION) + drm_err(&guc_to_gt(guc)->i915->drm, "Received early GuC exception notification!\n"); return 0; } @@ -551,7 +593,7 @@ int intel_guc_suspend(struct intel_guc *guc) { int ret; u32 action[] = { - INTEL_GUC_ACTION_RESET_CLIENT, + INTEL_GUC_ACTION_CLIENT_SOFT_RESET, }; if (!intel_guc_is_ready(guc)) @@ -715,6 +757,56 @@ int intel_guc_allocate_and_map_vma(struct intel_guc *guc, u32 size, return 0; } +static int __guc_action_self_cfg(struct intel_guc *guc, u16 key, u16 len, u64 value) +{ + u32 request[HOST2GUC_SELF_CFG_REQUEST_MSG_LEN] = { + FIELD_PREP(GUC_HXG_MSG_0_ORIGIN, GUC_HXG_ORIGIN_HOST) | + FIELD_PREP(GUC_HXG_MSG_0_TYPE, GUC_HXG_TYPE_REQUEST) | + FIELD_PREP(GUC_HXG_REQUEST_MSG_0_ACTION, GUC_ACTION_HOST2GUC_SELF_CFG), + FIELD_PREP(HOST2GUC_SELF_CFG_REQUEST_MSG_1_KLV_KEY, key) | + FIELD_PREP(HOST2GUC_SELF_CFG_REQUEST_MSG_1_KLV_LEN, len), + FIELD_PREP(HOST2GUC_SELF_CFG_REQUEST_MSG_2_VALUE32, lower_32_bits(value)), + FIELD_PREP(HOST2GUC_SELF_CFG_REQUEST_MSG_3_VALUE64, upper_32_bits(value)), + }; + int ret; + + GEM_BUG_ON(len > 2); + GEM_BUG_ON(len == 1 && upper_32_bits(value)); + + /* Self config must go over MMIO */ + ret = intel_guc_send_mmio(guc, request, ARRAY_SIZE(request), NULL, 0); + + if (unlikely(ret < 0)) + return ret; + if (unlikely(ret > 1)) + return -EPROTO; + if (unlikely(!ret)) + return -ENOKEY; + + return 0; +} + +static int __guc_self_cfg(struct intel_guc *guc, u16 key, u16 len, u64 value) +{ + struct drm_i915_private *i915 = guc_to_gt(guc)->i915; + int err = __guc_action_self_cfg(guc, key, len, value); + + if (unlikely(err)) + i915_probe_error(i915, "Unsuccessful self-config (%pe) key %#hx value %#llx\n", + ERR_PTR(err), key, value); + return err; +} + +int intel_guc_self_cfg32(struct intel_guc *guc, u16 key, u32 value) +{ + return __guc_self_cfg(guc, key, 1, value); +} + +int intel_guc_self_cfg64(struct intel_guc *guc, u16 key, u64 value) +{ + return __guc_self_cfg(guc, key, 2, value); +} + /** * intel_guc_load_status - dump information about GuC load status * @guc: the GuC diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc.h b/drivers/gpu/drm/i915/gt/uc/intel_guc.h index 3aabe164c329..9d779de16613 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_guc.h +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc.h @@ -119,6 +119,15 @@ struct intel_guc { * function as it might be in an atomic context (no sleeping) */ struct work_struct destroyed_worker; + /** + * @reset_fail_worker: worker to trigger a GT reset after an + * engine reset fails + */ + struct work_struct reset_fail_worker; + /** + * @reset_fail_mask: mask of engines that failed to reset + */ + intel_engine_mask_t reset_fail_mask; } submission_state; /** @@ -141,6 +150,13 @@ struct intel_guc { struct __guc_ads_blob *ads_blob; /** @ads_regset_size: size of the save/restore regsets in the ADS */ u32 ads_regset_size; + /** + * @ads_regset_count: number of save/restore registers in the ADS for + * each engine + */ + u32 ads_regset_count[I915_NUM_ENGINES]; + /** @ads_regset: save/restore regsets in the ADS */ + struct guc_mmio_reg *ads_regset; /** @ads_golden_ctxt_size: size of the golden contexts in the ADS */ u32 ads_golden_ctxt_size; /** @ads_engine_usage_size: size of engine usage in the ADS */ @@ -333,6 +349,8 @@ int intel_guc_resume(struct intel_guc *guc); struct i915_vma *intel_guc_allocate_vma(struct intel_guc *guc, u32 size); int intel_guc_allocate_and_map_vma(struct intel_guc *guc, u32 size, struct i915_vma **out_vma, void **out_vaddr); +int intel_guc_self_cfg32(struct intel_guc *guc, u16 key, u32 value); +int intel_guc_self_cfg64(struct intel_guc *guc, u16 key, u64 value); static inline bool intel_guc_is_supported(struct intel_guc *guc) { @@ -409,6 +427,8 @@ int intel_guc_context_reset_process_msg(struct intel_guc *guc, const u32 *msg, u32 len); int intel_guc_engine_failure_process_msg(struct intel_guc *guc, const u32 *msg, u32 len); +int intel_guc_error_capture_process_msg(struct intel_guc *guc, + const u32 *msg, u32 len); void intel_guc_find_hung_context(struct intel_engine_cs *engine); diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_ads.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_ads.c index 162b89198567..7e41175618f5 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_guc_ads.c +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_ads.c @@ -42,6 +42,10 @@ * +---------------------------------------+ * | padding | * +---------------------------------------+ <== 4K aligned + * | capture lists | + * +---------------------------------------+ + * | padding | + * +---------------------------------------+ <== 4K aligned * | private data | * +---------------------------------------+ * | padding | @@ -67,6 +71,12 @@ static u32 guc_ads_golden_ctxt_size(struct intel_guc *guc) return PAGE_ALIGN(guc->ads_golden_ctxt_size); } +static u32 guc_ads_capture_size(struct intel_guc *guc) +{ + /* FIXME: Allocate a proper capture list */ + return PAGE_ALIGN(PAGE_SIZE); +} + static u32 guc_ads_private_data_size(struct intel_guc *guc) { return PAGE_ALIGN(guc->fw.private_data_size); @@ -87,7 +97,7 @@ static u32 guc_ads_golden_ctxt_offset(struct intel_guc *guc) return PAGE_ALIGN(offset); } -static u32 guc_ads_private_data_offset(struct intel_guc *guc) +static u32 guc_ads_capture_offset(struct intel_guc *guc) { u32 offset; @@ -97,6 +107,16 @@ static u32 guc_ads_private_data_offset(struct intel_guc *guc) return PAGE_ALIGN(offset); } +static u32 guc_ads_private_data_offset(struct intel_guc *guc) +{ + u32 offset; + + offset = guc_ads_capture_offset(guc) + + guc_ads_capture_size(guc); + + return PAGE_ALIGN(offset); +} + static u32 guc_ads_blob_size(struct intel_guc *guc) { return guc_ads_private_data_offset(guc) + @@ -188,14 +208,18 @@ static void guc_mapping_table_init(struct intel_gt *gt, /* * The save/restore register list must be pre-calculated to a temporary - * buffer of driver defined size before it can be generated in place - * inside the ADS. + * buffer before it can be copied inside the ADS. */ -#define MAX_MMIO_REGS 128 /* Arbitrary size, increase as needed */ struct temp_regset { + /* + * ptr to the section of the storage for the engine currently being + * worked on + */ struct guc_mmio_reg *registers; - u32 used; - u32 size; + /* ptr to the base of the allocated storage for all engines */ + struct guc_mmio_reg *storage; + u32 storage_used; + u32 storage_max; }; static int guc_mmio_reg_cmp(const void *a, const void *b) @@ -206,18 +230,44 @@ static int guc_mmio_reg_cmp(const void *a, const void *b) return (int)ra->offset - (int)rb->offset; } -static void guc_mmio_reg_add(struct temp_regset *regset, - u32 offset, u32 flags) +static struct guc_mmio_reg * __must_check +__mmio_reg_add(struct temp_regset *regset, struct guc_mmio_reg *reg) { - u32 count = regset->used; + u32 pos = regset->storage_used; + struct guc_mmio_reg *slot; + + if (pos >= regset->storage_max) { + size_t size = ALIGN((pos + 1) * sizeof(*slot), PAGE_SIZE); + struct guc_mmio_reg *r = krealloc(regset->storage, + size, GFP_KERNEL); + if (!r) { + WARN_ONCE(1, "Incomplete regset list: can't add register (%d)\n", + -ENOMEM); + return ERR_PTR(-ENOMEM); + } + + regset->registers = r + (regset->registers - regset->storage); + regset->storage = r; + regset->storage_max = size / sizeof(*slot); + } + + slot = ®set->storage[pos]; + regset->storage_used++; + *slot = *reg; + + return slot; +} + +static long __must_check guc_mmio_reg_add(struct temp_regset *regset, + u32 offset, u32 flags) +{ + u32 count = regset->storage_used - (regset->registers - regset->storage); struct guc_mmio_reg reg = { .offset = offset, .flags = flags, }; struct guc_mmio_reg *slot; - GEM_BUG_ON(count >= regset->size); - /* * The mmio list is built using separate lists within the driver. * It's possible that at some point we may attempt to add the same @@ -226,11 +276,11 @@ static void guc_mmio_reg_add(struct temp_regset *regset, */ if (bsearch(®, regset->registers, count, sizeof(reg), guc_mmio_reg_cmp)) - return; + return 0; - slot = ®set->registers[count]; - regset->used++; - *slot = reg; + slot = __mmio_reg_add(regset, ®); + if (IS_ERR(slot)) + return PTR_ERR(slot); while (slot-- > regset->registers) { GEM_BUG_ON(slot[0].offset == slot[1].offset); @@ -239,6 +289,8 @@ static void guc_mmio_reg_add(struct temp_regset *regset, swap(slot[1], slot[0]); } + + return 0; } #define GUC_MMIO_REG_ADD(regset, reg, masked) \ @@ -246,62 +298,71 @@ static void guc_mmio_reg_add(struct temp_regset *regset, i915_mmio_reg_offset((reg)), \ (masked) ? GUC_REGSET_MASKED : 0) -static void guc_mmio_regset_init(struct temp_regset *regset, - struct intel_engine_cs *engine) +static int guc_mmio_regset_init(struct temp_regset *regset, + struct intel_engine_cs *engine) { const u32 base = engine->mmio_base; struct i915_wa_list *wal = &engine->wa_list; struct i915_wa *wa; unsigned int i; + int ret = 0; - regset->used = 0; + /* + * Each engine's registers point to a new start relative to + * storage + */ + regset->registers = regset->storage + regset->storage_used; - GUC_MMIO_REG_ADD(regset, RING_MODE_GEN7(base), true); - GUC_MMIO_REG_ADD(regset, RING_HWS_PGA(base), false); - GUC_MMIO_REG_ADD(regset, RING_IMR(base), false); + ret |= GUC_MMIO_REG_ADD(regset, RING_MODE_GEN7(base), true); + ret |= GUC_MMIO_REG_ADD(regset, RING_HWS_PGA(base), false); + ret |= GUC_MMIO_REG_ADD(regset, RING_IMR(base), false); for (i = 0, wa = wal->list; i < wal->count; i++, wa++) - GUC_MMIO_REG_ADD(regset, wa->reg, wa->masked_reg); + ret |= GUC_MMIO_REG_ADD(regset, wa->reg, wa->masked_reg); /* Be extra paranoid and include all whitelist registers. */ for (i = 0; i < RING_MAX_NONPRIV_SLOTS; i++) - GUC_MMIO_REG_ADD(regset, - RING_FORCE_TO_NONPRIV(base, i), - false); + ret |= GUC_MMIO_REG_ADD(regset, + RING_FORCE_TO_NONPRIV(base, i), + false); /* add in local MOCS registers */ for (i = 0; i < GEN9_LNCFCMOCS_REG_COUNT; i++) - GUC_MMIO_REG_ADD(regset, GEN9_LNCFCMOCS(i), false); + ret |= GUC_MMIO_REG_ADD(regset, GEN9_LNCFCMOCS(i), false); + + return ret ? -1 : 0; } -static int guc_mmio_reg_state_query(struct intel_guc *guc) +static long guc_mmio_reg_state_create(struct intel_guc *guc) { struct intel_gt *gt = guc_to_gt(guc); struct intel_engine_cs *engine; enum intel_engine_id id; - struct temp_regset temp_set; - u32 total; + struct temp_regset temp_set = {}; + long total = 0; + long ret; - /* - * Need to actually build the list in order to filter out - * duplicates and other such data dependent constructions. - */ - temp_set.size = MAX_MMIO_REGS; - temp_set.registers = kmalloc_array(temp_set.size, - sizeof(*temp_set.registers), - GFP_KERNEL); - if (!temp_set.registers) - return -ENOMEM; - - total = 0; for_each_engine(engine, gt, id) { - guc_mmio_regset_init(&temp_set, engine); - total += temp_set.used; + u32 used = temp_set.storage_used; + + ret = guc_mmio_regset_init(&temp_set, engine); + if (ret < 0) + goto fail_regset_init; + + guc->ads_regset_count[id] = temp_set.storage_used - used; + total += guc->ads_regset_count[id]; } - kfree(temp_set.registers); + guc->ads_regset = temp_set.storage; + + drm_dbg(&guc_to_gt(guc)->i915->drm, "Used %zu KB for temporary ADS regset\n", + (temp_set.storage_max * sizeof(struct guc_mmio_reg)) >> 10); return total * sizeof(struct guc_mmio_reg); + +fail_regset_init: + kfree(temp_set.storage); + return ret; } static void guc_mmio_reg_state_init(struct intel_guc *guc, @@ -309,40 +370,38 @@ static void guc_mmio_reg_state_init(struct intel_guc *guc, { struct intel_gt *gt = guc_to_gt(guc); struct intel_engine_cs *engine; + struct guc_mmio_reg *ads_registers; enum intel_engine_id id; - struct temp_regset temp_set; - struct guc_mmio_reg_set *ads_reg_set; u32 addr_ggtt, offset; - u8 guc_class; offset = guc_ads_regset_offset(guc); addr_ggtt = intel_guc_ggtt_offset(guc, guc->ads_vma) + offset; - temp_set.registers = (struct guc_mmio_reg *)(((u8 *)blob) + offset); - temp_set.size = guc->ads_regset_size / sizeof(temp_set.registers[0]); + ads_registers = (struct guc_mmio_reg *)(((u8 *)blob) + offset); + + memcpy(ads_registers, guc->ads_regset, guc->ads_regset_size); for_each_engine(engine, gt, id) { + u32 count = guc->ads_regset_count[id]; + struct guc_mmio_reg_set *ads_reg_set; + u8 guc_class; + /* Class index is checked in class converter */ GEM_BUG_ON(engine->instance >= GUC_MAX_INSTANCES_PER_CLASS); guc_class = engine_class_to_guc_class(engine->class); ads_reg_set = &blob->ads.reg_state_list[guc_class][engine->instance]; - guc_mmio_regset_init(&temp_set, engine); - if (!temp_set.used) { + if (!count) { ads_reg_set->address = 0; ads_reg_set->count = 0; continue; } ads_reg_set->address = addr_ggtt; - ads_reg_set->count = temp_set.used; + ads_reg_set->count = count; - temp_set.size -= temp_set.used; - temp_set.registers += temp_set.used; - addr_ggtt += temp_set.used * sizeof(struct guc_mmio_reg); + addr_ggtt += count * sizeof(struct guc_mmio_reg); } - - GEM_BUG_ON(temp_set.size); } static void fill_engine_enable_masks(struct intel_gt *gt, @@ -501,6 +560,26 @@ static void guc_init_golden_context(struct intel_guc *guc) GEM_BUG_ON(guc->ads_golden_ctxt_size != total_size); } +static void guc_capture_list_init(struct intel_guc *guc, struct __guc_ads_blob *blob) +{ + int i, j; + u32 addr_ggtt, offset; + + offset = guc_ads_capture_offset(guc); + addr_ggtt = intel_guc_ggtt_offset(guc, guc->ads_vma) + offset; + + /* FIXME: Populate a proper capture list */ + + for (i = 0; i < GUC_CAPTURE_LIST_INDEX_MAX; i++) { + for (j = 0; j < GUC_MAX_ENGINE_CLASSES; j++) { + blob->ads.capture_instance[i][j] = addr_ggtt; + blob->ads.capture_class[i][j] = addr_ggtt; + } + + blob->ads.capture_global[i] = addr_ggtt; + } +} + static void __guc_ads_init(struct intel_guc *guc) { struct intel_gt *gt = guc_to_gt(guc); @@ -534,6 +613,9 @@ static void __guc_ads_init(struct intel_guc *guc) base = intel_guc_ggtt_offset(guc, guc->ads_vma); + /* Capture list for hang debug */ + guc_capture_list_init(guc, blob); + /* ADS */ blob->ads.scheduler_policies = base + ptr_offset(blob, policies); blob->ads.gt_system_info = base + ptr_offset(blob, system_info); @@ -561,8 +643,11 @@ int intel_guc_ads_create(struct intel_guc *guc) GEM_BUG_ON(guc->ads_vma); - /* Need to calculate the reg state size dynamically: */ - ret = guc_mmio_reg_state_query(guc); + /* + * Create reg state size dynamically on system memory to be copied to + * the final ads blob on gt init/reset + */ + ret = guc_mmio_reg_state_create(guc); if (ret < 0) return ret; guc->ads_regset_size = ret; @@ -602,6 +687,7 @@ void intel_guc_ads_destroy(struct intel_guc *guc) { i915_vma_unpin_and_release(&guc->ads_vma, I915_VMA_RELEASE_MAP); guc->ads_blob = NULL; + kfree(guc->ads_regset); } static void guc_ads_private_data_reset(struct intel_guc *guc) diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_ct.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_ct.c index aa6dd6415202..2f7fc87a78e1 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_guc_ct.c +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_ct.c @@ -112,18 +112,6 @@ void intel_guc_ct_init_early(struct intel_guc_ct *ct) init_waitqueue_head(&ct->wq); } -static inline const char *guc_ct_buffer_type_to_str(u32 type) -{ - switch (type) { - case GUC_CTB_TYPE_HOST2GUC: - return "SEND"; - case GUC_CTB_TYPE_GUC2HOST: - return "RECV"; - default: - return "<invalid>"; - } -} - static void guc_ct_buffer_desc_init(struct guc_ct_buffer_desc *desc) { memset(desc, 0, sizeof(*desc)); @@ -156,71 +144,65 @@ static void guc_ct_buffer_init(struct intel_guc_ct_buffer *ctb, guc_ct_buffer_reset(ctb); } -static int guc_action_register_ct_buffer(struct intel_guc *guc, u32 type, - u32 desc_addr, u32 buff_addr, u32 size) +static int guc_action_control_ctb(struct intel_guc *guc, u32 control) { - u32 request[HOST2GUC_REGISTER_CTB_REQUEST_MSG_LEN] = { + u32 request[HOST2GUC_CONTROL_CTB_REQUEST_MSG_LEN] = { FIELD_PREP(GUC_HXG_MSG_0_ORIGIN, GUC_HXG_ORIGIN_HOST) | FIELD_PREP(GUC_HXG_MSG_0_TYPE, GUC_HXG_TYPE_REQUEST) | - FIELD_PREP(GUC_HXG_REQUEST_MSG_0_ACTION, GUC_ACTION_HOST2GUC_REGISTER_CTB), - FIELD_PREP(HOST2GUC_REGISTER_CTB_REQUEST_MSG_1_SIZE, size / SZ_4K - 1) | - FIELD_PREP(HOST2GUC_REGISTER_CTB_REQUEST_MSG_1_TYPE, type), - FIELD_PREP(HOST2GUC_REGISTER_CTB_REQUEST_MSG_2_DESC_ADDR, desc_addr), - FIELD_PREP(HOST2GUC_REGISTER_CTB_REQUEST_MSG_3_BUFF_ADDR, buff_addr), + FIELD_PREP(GUC_HXG_REQUEST_MSG_0_ACTION, GUC_ACTION_HOST2GUC_CONTROL_CTB), + FIELD_PREP(HOST2GUC_CONTROL_CTB_REQUEST_MSG_1_CONTROL, control), }; int ret; - GEM_BUG_ON(type != GUC_CTB_TYPE_HOST2GUC && type != GUC_CTB_TYPE_GUC2HOST); - GEM_BUG_ON(size % SZ_4K); + GEM_BUG_ON(control != GUC_CTB_CONTROL_DISABLE && control != GUC_CTB_CONTROL_ENABLE); - /* CT registration must go over MMIO */ + /* CT control must go over MMIO */ ret = intel_guc_send_mmio(guc, request, ARRAY_SIZE(request), NULL, 0); return ret > 0 ? -EPROTO : ret; } -static int ct_register_buffer(struct intel_guc_ct *ct, u32 type, - u32 desc_addr, u32 buff_addr, u32 size) +static int ct_control_enable(struct intel_guc_ct *ct, bool enable) { int err; - err = i915_inject_probe_error(guc_to_gt(ct_to_guc(ct))->i915, -ENXIO); + err = guc_action_control_ctb(ct_to_guc(ct), enable ? + GUC_CTB_CONTROL_ENABLE : GUC_CTB_CONTROL_DISABLE); if (unlikely(err)) - return err; + CT_PROBE_ERROR(ct, "Failed to control/%s CTB (%pe)\n", + enabledisable(enable), ERR_PTR(err)); - err = guc_action_register_ct_buffer(ct_to_guc(ct), type, - desc_addr, buff_addr, size); - if (unlikely(err)) - CT_ERROR(ct, "Failed to register %s buffer (%pe)\n", - guc_ct_buffer_type_to_str(type), ERR_PTR(err)); return err; } -static int guc_action_deregister_ct_buffer(struct intel_guc *guc, u32 type) +static int ct_register_buffer(struct intel_guc_ct *ct, bool send, + u32 desc_addr, u32 buff_addr, u32 size) { - u32 request[HOST2GUC_DEREGISTER_CTB_REQUEST_MSG_LEN] = { - FIELD_PREP(GUC_HXG_MSG_0_ORIGIN, GUC_HXG_ORIGIN_HOST) | - FIELD_PREP(GUC_HXG_MSG_0_TYPE, GUC_HXG_TYPE_REQUEST) | - FIELD_PREP(GUC_HXG_REQUEST_MSG_0_ACTION, GUC_ACTION_HOST2GUC_DEREGISTER_CTB), - FIELD_PREP(HOST2GUC_DEREGISTER_CTB_REQUEST_MSG_1_TYPE, type), - }; - int ret; - - GEM_BUG_ON(type != GUC_CTB_TYPE_HOST2GUC && type != GUC_CTB_TYPE_GUC2HOST); - - /* CT deregistration must go over MMIO */ - ret = intel_guc_send_mmio(guc, request, ARRAY_SIZE(request), NULL, 0); + int err; - return ret > 0 ? -EPROTO : ret; -} + err = intel_guc_self_cfg64(ct_to_guc(ct), send ? + GUC_KLV_SELF_CFG_H2G_CTB_DESCRIPTOR_ADDR_KEY : + GUC_KLV_SELF_CFG_G2H_CTB_DESCRIPTOR_ADDR_KEY, + desc_addr); + if (unlikely(err)) + goto failed; -static int ct_deregister_buffer(struct intel_guc_ct *ct, u32 type) -{ - int err = guc_action_deregister_ct_buffer(ct_to_guc(ct), type); + err = intel_guc_self_cfg64(ct_to_guc(ct), send ? + GUC_KLV_SELF_CFG_H2G_CTB_ADDR_KEY : + GUC_KLV_SELF_CFG_G2H_CTB_ADDR_KEY, + buff_addr); + if (unlikely(err)) + goto failed; + err = intel_guc_self_cfg32(ct_to_guc(ct), send ? + GUC_KLV_SELF_CFG_H2G_CTB_SIZE_KEY : + GUC_KLV_SELF_CFG_G2H_CTB_SIZE_KEY, + size); if (unlikely(err)) - CT_ERROR(ct, "Failed to deregister %s buffer (%pe)\n", - guc_ct_buffer_type_to_str(type), ERR_PTR(err)); +failed: + CT_PROBE_ERROR(ct, "Failed to register %s buffer (%pe)\n", + send ? "SEND" : "RECV", ERR_PTR(err)); + return err; } @@ -308,7 +290,7 @@ void intel_guc_ct_fini(struct intel_guc_ct *ct) int intel_guc_ct_enable(struct intel_guc_ct *ct) { struct intel_guc *guc = ct_to_guc(ct); - u32 base, desc, cmds; + u32 base, desc, cmds, size; void *blob; int err; @@ -333,27 +315,27 @@ int intel_guc_ct_enable(struct intel_guc_ct *ct) */ desc = base + ptrdiff(ct->ctbs.recv.desc, blob); cmds = base + ptrdiff(ct->ctbs.recv.cmds, blob); - err = ct_register_buffer(ct, GUC_CTB_TYPE_GUC2HOST, - desc, cmds, ct->ctbs.recv.size * 4); - + size = ct->ctbs.recv.size * 4; + err = ct_register_buffer(ct, false, desc, cmds, size); if (unlikely(err)) goto err_out; desc = base + ptrdiff(ct->ctbs.send.desc, blob); cmds = base + ptrdiff(ct->ctbs.send.cmds, blob); - err = ct_register_buffer(ct, GUC_CTB_TYPE_HOST2GUC, - desc, cmds, ct->ctbs.send.size * 4); + size = ct->ctbs.send.size * 4; + err = ct_register_buffer(ct, true, desc, cmds, size); + if (unlikely(err)) + goto err_out; + err = ct_control_enable(ct, true); if (unlikely(err)) - goto err_deregister; + goto err_out; ct->enabled = true; ct->stall_time = KTIME_MAX; return 0; -err_deregister: - ct_deregister_buffer(ct, GUC_CTB_TYPE_GUC2HOST); err_out: CT_PROBE_ERROR(ct, "Failed to enable CTB (%pe)\n", ERR_PTR(err)); return err; @@ -372,8 +354,7 @@ void intel_guc_ct_disable(struct intel_guc_ct *ct) ct->enabled = false; if (intel_guc_is_fw_running(guc)) { - ct_deregister_buffer(ct, GUC_CTB_TYPE_HOST2GUC); - ct_deregister_buffer(ct, GUC_CTB_TYPE_GUC2HOST); + ct_control_enable(ct, false); } } @@ -662,6 +643,7 @@ static int ct_send(struct intel_guc_ct *ct, struct ct_request request; unsigned long flags; unsigned int sleep_period_ms = 1; + bool send_again; u32 fence; int err; @@ -671,6 +653,9 @@ static int ct_send(struct intel_guc_ct *ct, GEM_BUG_ON(!response_buf && response_buf_size); might_sleep(); +resend: + send_again = false; + /* * We use a lazy spin wait loop here as we believe that if the CT * buffers are sized correctly the flow control condition should be @@ -725,6 +710,13 @@ retry: goto unlink; } + if (FIELD_GET(GUC_HXG_MSG_0_TYPE, *status) == GUC_HXG_TYPE_NO_RESPONSE_RETRY) { + CT_DEBUG(ct, "retrying request %#x (%u)\n", *action, + FIELD_GET(GUC_HXG_RETRY_MSG_0_REASON, *status)); + send_again = true; + goto unlink; + } + if (FIELD_GET(GUC_HXG_MSG_0_TYPE, *status) != GUC_HXG_TYPE_RESPONSE_SUCCESS) { err = -EIO; goto unlink; @@ -747,6 +739,9 @@ unlink: list_del(&request.link); spin_unlock_irqrestore(&ct->requests.lock, flags); + if (unlikely(send_again)) + goto resend; + return err; } @@ -789,7 +784,7 @@ static struct ct_incoming_msg *ct_alloc_msg(u32 num_dwords) { struct ct_incoming_msg *msg; - msg = kmalloc(sizeof(*msg) + sizeof(u32) * num_dwords, GFP_ATOMIC); + msg = kmalloc(struct_size(msg, msg, num_dwords), GFP_ATOMIC); if (msg) msg->size = num_dwords; return msg; @@ -918,6 +913,7 @@ static int ct_handle_response(struct intel_guc_ct *ct, struct ct_incoming_msg *r GEM_BUG_ON(len < GUC_HXG_MSG_MIN_LEN); GEM_BUG_ON(FIELD_GET(GUC_HXG_MSG_0_ORIGIN, hxg[0]) != GUC_HXG_ORIGIN_GUC); GEM_BUG_ON(FIELD_GET(GUC_HXG_MSG_0_TYPE, hxg[0]) != GUC_HXG_TYPE_RESPONSE_SUCCESS && + FIELD_GET(GUC_HXG_MSG_0_TYPE, hxg[0]) != GUC_HXG_TYPE_NO_RESPONSE_RETRY && FIELD_GET(GUC_HXG_MSG_0_TYPE, hxg[0]) != GUC_HXG_TYPE_RESPONSE_FAILURE); CT_DEBUG(ct, "response fence %u status %#x\n", fence, hxg[0]); @@ -990,9 +986,27 @@ static int ct_process_request(struct intel_guc_ct *ct, struct ct_incoming_msg *r case INTEL_GUC_ACTION_CONTEXT_RESET_NOTIFICATION: ret = intel_guc_context_reset_process_msg(guc, payload, len); break; + case INTEL_GUC_ACTION_STATE_CAPTURE_NOTIFICATION: + ret = intel_guc_error_capture_process_msg(guc, payload, len); + if (unlikely(ret)) + CT_ERROR(ct, "error capture notification failed %x %*ph\n", + action, 4 * len, payload); + break; case INTEL_GUC_ACTION_ENGINE_FAILURE_NOTIFICATION: ret = intel_guc_engine_failure_process_msg(guc, payload, len); break; + case INTEL_GUC_ACTION_NOTIFY_FLUSH_LOG_BUFFER_TO_FILE: + intel_guc_log_handle_flush_event(&guc->log); + ret = 0; + break; + case INTEL_GUC_ACTION_NOTIFY_CRASH_DUMP_POSTED: + CT_ERROR(ct, "Received GuC crash dump notification!\n"); + ret = 0; + break; + case INTEL_GUC_ACTION_NOTIFY_EXCEPTION: + CT_ERROR(ct, "Received GuC exception notification!\n"); + ret = 0; + break; default: ret = -EOPNOTSUPP; break; @@ -1098,6 +1112,7 @@ static int ct_handle_hxg(struct intel_guc_ct *ct, struct ct_incoming_msg *msg) break; case GUC_HXG_TYPE_RESPONSE_SUCCESS: case GUC_HXG_TYPE_RESPONSE_FAILURE: + case GUC_HXG_TYPE_NO_RESPONSE_RETRY: err = ct_handle_response(ct, msg); break; default: diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_fw.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_fw.c index dcb51b53b495..a0372735cddb 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_guc_fw.c +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_fw.c @@ -16,13 +16,15 @@ static void guc_prepare_xfer(struct intel_uncore *uncore) { - u32 shim_flags = GUC_DISABLE_SRAM_INIT_TO_ZEROES | - GUC_ENABLE_READ_CACHE_LOGIC | - GUC_ENABLE_MIA_CACHING | + u32 shim_flags = GUC_ENABLE_READ_CACHE_LOGIC | GUC_ENABLE_READ_CACHE_FOR_SRAM_DATA | GUC_ENABLE_READ_CACHE_FOR_WOPCM_DATA | GUC_ENABLE_MIA_CLOCK_GATING; + if (GRAPHICS_VER_FULL(uncore->i915) < IP_VER(12, 50)) + shim_flags |= GUC_DISABLE_SRAM_INIT_TO_ZEROES | + GUC_ENABLE_MIA_CACHING; + /* Must program this register before loading the ucode with DMA */ intel_uncore_write(uncore, GUC_SHIM_CONTROL, shim_flags); @@ -91,11 +93,10 @@ static int guc_xfer_rsa(struct intel_uc_fw *guc_fw, static inline bool guc_ready(struct intel_uncore *uncore, u32 *status) { u32 val = intel_uncore_read(uncore, GUC_STATUS); - u32 uk_val = val & GS_UKERNEL_MASK; + u32 uk_val = REG_FIELD_GET(GS_UKERNEL_MASK, val); *status = val; - return (uk_val == GS_UKERNEL_READY) || - ((val & GS_MIA_CORE_STATE) && (uk_val == GS_UKERNEL_LAPIC_DONE)); + return uk_val == INTEL_GUC_LOAD_STATUS_READY; } static int guc_wait_ucode(struct intel_uncore *uncore) @@ -106,17 +107,26 @@ static int guc_wait_ucode(struct intel_uncore *uncore) /* * Wait for the GuC to start up. * NB: Docs recommend not using the interrupt for completion. - * Measurements indicate this should take no more than 20ms, so a + * Measurements indicate this should take no more than 20ms + * (assuming the GT clock is at maximum frequency). So, a * timeout here indicates that the GuC has failed and is unusable. * (Higher levels of the driver may decide to reset the GuC and * attempt the ucode load again if this happens.) + * + * FIXME: There is a known (but exceedingly unlikely) race condition + * where the asynchronous frequency management code could reduce + * the GT clock while a GuC reload is in progress (during a full + * GT reset). A fix is in progress but there are complex locking + * issues to be resolved. In the meantime bump the timeout to + * 200ms. Even at slowest clock, this should be sufficient. And + * in the working case, a larger timeout makes no difference. */ - ret = wait_for(guc_ready(uncore, &status), 100); + ret = wait_for(guc_ready(uncore, &status), 200); if (ret) { struct drm_device *drm = &uncore->i915->drm; - drm_dbg(drm, "GuC load failed: status = 0x%08X\n", status); - drm_dbg(drm, "GuC load failed: status: Reset = %d, " + drm_info(drm, "GuC load failed: status = 0x%08X\n", status); + drm_info(drm, "GuC load failed: status: Reset = %d, " "BootROM = 0x%02X, UKernel = 0x%02X, " "MIA = 0x%02X, Auth = 0x%02X\n", REG_FIELD_GET(GS_MIA_IN_RESET, status), @@ -126,13 +136,13 @@ static int guc_wait_ucode(struct intel_uncore *uncore) REG_FIELD_GET(GS_AUTH_STATUS_MASK, status)); if ((status & GS_BOOTROM_MASK) == GS_BOOTROM_RSA_FAILED) { - drm_dbg(drm, "GuC firmware signature verification failed\n"); + drm_info(drm, "GuC firmware signature verification failed\n"); ret = -ENOEXEC; } - if ((status & GS_UKERNEL_MASK) == GS_UKERNEL_EXCEPTION) { - drm_dbg(drm, "GuC firmware exception. EIP: %#x\n", - intel_uncore_read(uncore, SOFT_SCRATCH(13))); + if (REG_FIELD_GET(GS_UKERNEL_MASK, status) == INTEL_GUC_LOAD_STATUS_EXCEPTION) { + drm_info(drm, "GuC firmware exception. EIP: %#x\n", + intel_uncore_read(uncore, SOFT_SCRATCH(13))); ret = -ENXIO; } } diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_fwif.h b/drivers/gpu/drm/i915/gt/uc/intel_guc_fwif.h index 7072e30e99f4..6a4612a852e2 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_guc_fwif.h +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_fwif.h @@ -16,6 +16,7 @@ #include "abi/guc_errors_abi.h" #include "abi/guc_communication_mmio_abi.h" #include "abi/guc_communication_ctb_abi.h" +#include "abi/guc_klvs_abi.h" #include "abi/guc_messages_abi.h" /* Payload length only i.e. don't include G2H header length */ @@ -84,19 +85,24 @@ #define GUC_STAGE_DESC_ATTR_TERMINATED BIT(7) #define GUC_CTL_LOG_PARAMS 0 -#define GUC_LOG_VALID (1 << 0) -#define GUC_LOG_NOTIFY_ON_HALF_FULL (1 << 1) -#define GUC_LOG_ALLOC_IN_MEGABYTE (1 << 3) +#define GUC_LOG_VALID BIT(0) +#define GUC_LOG_NOTIFY_ON_HALF_FULL BIT(1) +#define GUC_LOG_CAPTURE_ALLOC_UNITS BIT(2) +#define GUC_LOG_LOG_ALLOC_UNITS BIT(3) #define GUC_LOG_CRASH_SHIFT 4 #define GUC_LOG_CRASH_MASK (0x3 << GUC_LOG_CRASH_SHIFT) #define GUC_LOG_DEBUG_SHIFT 6 #define GUC_LOG_DEBUG_MASK (0xF << GUC_LOG_DEBUG_SHIFT) +#define GUC_LOG_CAPTURE_SHIFT 10 +#define GUC_LOG_CAPTURE_MASK (0x3 << GUC_LOG_CAPTURE_SHIFT) #define GUC_LOG_BUF_ADDR_SHIFT 12 #define GUC_CTL_WA 1 +#define GUC_WA_POLLCS BIT(18) + #define GUC_CTL_FEATURE 2 -#define GUC_CTL_DISABLE_SCHEDULER (1 << 14) #define GUC_CTL_ENABLE_SLPC BIT(2) +#define GUC_CTL_DISABLE_SCHEDULER BIT(14) #define GUC_CTL_DEBUG 3 #define GUC_LOG_VERBOSITY_SHIFT 0 @@ -116,6 +122,8 @@ #define GUC_ADS_ADDR_SHIFT 1 #define GUC_ADS_ADDR_MASK (0xFFFFF << GUC_ADS_ADDR_SHIFT) +#define GUC_CTL_DEVID 5 + #define GUC_CTL_MAX_DWORDS (SOFT_SCRATCH_COUNT - 2) /* [1..14] */ /* Generic GT SysInfo data types */ @@ -263,7 +271,10 @@ struct guc_mmio_reg { u32 offset; u32 value; u32 flags; -#define GUC_REGSET_MASKED (1 << 0) + u32 mask; +#define GUC_REGSET_MASKED BIT(0) +#define GUC_REGSET_MASKED_WITH_VALUE BIT(2) +#define GUC_REGSET_RESTORE_ONLY BIT(3) } __packed; /* GuC register sets */ @@ -280,6 +291,12 @@ struct guc_gt_system_info { u32 generic_gt_sysinfo[GUC_GENERIC_GT_SYSINFO_MAX]; } __packed; +enum { + GUC_CAPTURE_LIST_INDEX_PF = 0, + GUC_CAPTURE_LIST_INDEX_VF = 1, + GUC_CAPTURE_LIST_INDEX_MAX = 2, +}; + /* GuC Additional Data Struct */ struct guc_ads { struct guc_mmio_reg_set reg_state_list[GUC_MAX_ENGINE_CLASSES][GUC_MAX_INSTANCES_PER_CLASS]; @@ -291,7 +308,11 @@ struct guc_ads { u32 golden_context_lrca[GUC_MAX_ENGINE_CLASSES]; u32 eng_state_size[GUC_MAX_ENGINE_CLASSES]; u32 private_data; - u32 reserved[15]; + u32 reserved2; + u32 capture_instance[GUC_CAPTURE_LIST_INDEX_MAX][GUC_MAX_ENGINE_CLASSES]; + u32 capture_class[GUC_CAPTURE_LIST_INDEX_MAX][GUC_MAX_ENGINE_CLASSES]; + u32 capture_global[GUC_CAPTURE_LIST_INDEX_MAX]; + u32 reserved[14]; } __packed; /* Engine usage stats */ @@ -312,6 +333,7 @@ struct guc_engine_usage { enum guc_log_buffer_type { GUC_DEBUG_LOG_BUFFER, GUC_CRASH_DUMP_LOG_BUFFER, + GUC_CAPTURE_LOG_BUFFER, GUC_MAX_LOG_BUFFER }; @@ -342,6 +364,7 @@ struct guc_log_buffer_state { u32 write_ptr; u32 size; u32 sampled_write_ptr; + u32 wrap_offset; union { struct { u32 flush_to_file:1; @@ -382,7 +405,7 @@ struct guc_shared_ctx_data { /* This action will be programmed in C1BC - SOFT_SCRATCH_15_REG */ enum intel_guc_recv_message { INTEL_GUC_RECV_MSG_CRASH_DUMP_POSTED = BIT(1), - INTEL_GUC_RECV_MSG_FLUSH_LOG_BUFFER = BIT(3) + INTEL_GUC_RECV_MSG_EXCEPTION = BIT(30), }; #endif diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_log.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_log.c index 7b0b43e87244..b53f61f3101f 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_guc_log.c +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_log.c @@ -56,20 +56,6 @@ static int guc_action_control_log(struct intel_guc *guc, bool enable, return intel_guc_send(guc, action, ARRAY_SIZE(action)); } -static void guc_log_enable_flush_events(struct intel_guc_log *log) -{ - intel_guc_enable_msg(log_to_guc(log), - INTEL_GUC_RECV_MSG_FLUSH_LOG_BUFFER | - INTEL_GUC_RECV_MSG_CRASH_DUMP_POSTED); -} - -static void guc_log_disable_flush_events(struct intel_guc_log *log) -{ - intel_guc_disable_msg(log_to_guc(log), - INTEL_GUC_RECV_MSG_FLUSH_LOG_BUFFER | - INTEL_GUC_RECV_MSG_CRASH_DUMP_POSTED); -} - /* * Sub buffer switch callback. Called whenever relay has to switch to a new * sub buffer, relay stays on the same sub buffer if 0 is returned. @@ -202,6 +188,8 @@ static unsigned int guc_get_log_buffer_size(enum guc_log_buffer_type type) return DEBUG_BUFFER_SIZE; case GUC_CRASH_DUMP_LOG_BUFFER: return CRASH_BUFFER_SIZE; + case GUC_CAPTURE_LOG_BUFFER: + return CAPTURE_BUFFER_SIZE; default: MISSING_CASE(type); } @@ -464,14 +452,19 @@ int intel_guc_log_create(struct intel_guc_log *log) * +-------------------------------+ 32B * | Debug state header | * +-------------------------------+ 64B + * | Capture state header | + * +-------------------------------+ 96B * | | * +===============================+ PAGE_SIZE (4KB) * | Crash Dump logs | * +===============================+ + CRASH_SIZE * | Debug logs | * +===============================+ + DEBUG_SIZE + * | Capture logs | + * +===============================+ + CAPTURE_SIZE */ - guc_log_size = PAGE_SIZE + CRASH_BUFFER_SIZE + DEBUG_BUFFER_SIZE; + guc_log_size = PAGE_SIZE + CRASH_BUFFER_SIZE + DEBUG_BUFFER_SIZE + + CAPTURE_BUFFER_SIZE; vma = intel_guc_allocate_vma(guc, guc_log_size); if (IS_ERR(vma)) { @@ -593,8 +586,6 @@ int intel_guc_log_relay_start(struct intel_guc_log *log) if (log->relay.started) return -EEXIST; - guc_log_enable_flush_events(log); - /* * When GuC is logging without us relaying to userspace, we're ignoring * the flush notification. This means that we need to unconditionally @@ -641,7 +632,6 @@ static void guc_log_relay_stop(struct intel_guc_log *log) if (!log->relay.started) return; - guc_log_disable_flush_events(log); intel_synchronize_irq(i915); flush_work(&log->relay.flush_work); @@ -662,7 +652,8 @@ void intel_guc_log_relay_close(struct intel_guc_log *log) void intel_guc_log_handle_flush_event(struct intel_guc_log *log) { - queue_work(system_highpri_wq, &log->relay.flush_work); + if (log->relay.started) + queue_work(system_highpri_wq, &log->relay.flush_work); } static const char * @@ -673,6 +664,8 @@ stringify_guc_log_type(enum guc_log_buffer_type type) return "DEBUG"; case GUC_CRASH_DUMP_LOG_BUFFER: return "CRASH"; + case GUC_CAPTURE_LOG_BUFFER: + return "CAPTURE"; default: MISSING_CASE(type); } diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_log.h b/drivers/gpu/drm/i915/gt/uc/intel_guc_log.h index fe6ab7550a14..d7e1b6471fed 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_guc_log.h +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_log.h @@ -18,12 +18,15 @@ struct intel_guc; #if defined(CONFIG_DRM_I915_DEBUG_GUC) #define CRASH_BUFFER_SIZE SZ_2M #define DEBUG_BUFFER_SIZE SZ_16M +#define CAPTURE_BUFFER_SIZE SZ_4M #elif defined(CONFIG_DRM_I915_DEBUG_GEM) #define CRASH_BUFFER_SIZE SZ_1M #define DEBUG_BUFFER_SIZE SZ_2M +#define CAPTURE_BUFFER_SIZE SZ_1M #else #define CRASH_BUFFER_SIZE SZ_8K #define DEBUG_BUFFER_SIZE SZ_64K +#define CAPTURE_BUFFER_SIZE SZ_16K #endif /* diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_reg.h b/drivers/gpu/drm/i915/gt/uc/intel_guc_reg.h index 85846c5570c5..66027a42cda9 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_guc_reg.h +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_reg.h @@ -22,10 +22,6 @@ #define GS_BOOTROM_JUMP_PASSED (0x76 << GS_BOOTROM_SHIFT) #define GS_UKERNEL_SHIFT 8 #define GS_UKERNEL_MASK (0xFF << GS_UKERNEL_SHIFT) -#define GS_UKERNEL_LAPIC_DONE (0x30 << GS_UKERNEL_SHIFT) -#define GS_UKERNEL_DPC_ERROR (0x60 << GS_UKERNEL_SHIFT) -#define GS_UKERNEL_EXCEPTION (0x70 << GS_UKERNEL_SHIFT) -#define GS_UKERNEL_READY (0xF0 << GS_UKERNEL_SHIFT) #define GS_MIA_SHIFT 16 #define GS_MIA_MASK (0x07 << GS_MIA_SHIFT) #define GS_MIA_CORE_STATE (0x01 << GS_MIA_SHIFT) @@ -98,6 +94,9 @@ #define GUC_ENABLE_MIA_CLOCK_GATING (1<<15) #define GUC_GEN10_SHIM_WC_ENABLE (1<<21) +#define GUC_SHIM_CONTROL2 _MMIO(0xc068) +#define GUC_IS_PRIVILEGED (1<<29) + #define GUC_SEND_INTERRUPT _MMIO(0xc4c8) #define GUC_SEND_TRIGGER (1<<0) #define GEN11_GUC_HOST_INTERRUPT _MMIO(0x1901f0) diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_slpc.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_slpc.c index ddbea939b1dc..b3d28b003b73 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_guc_slpc.c +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_slpc.c @@ -3,9 +3,12 @@ * Copyright © 2021 Intel Corporation */ +#include <drm/drm_cache.h> + #include "i915_drv.h" #include "i915_reg.h" #include "intel_guc_slpc.h" +#include "intel_mchbar_regs.h" #include "gt/intel_gt.h" #include "gt/intel_gt_regs.h" diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c index 04b8321fc758..b3a429a92c0d 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c @@ -1430,7 +1430,8 @@ submission_disabled(struct intel_guc *guc) struct i915_sched_engine * const sched_engine = guc->sched_engine; return unlikely(!sched_engine || - !__tasklet_is_enabled(&sched_engine->tasklet)); + !__tasklet_is_enabled(&sched_engine->tasklet) || + intel_gt_is_wedged(guc_to_gt(guc))); } static void disable_submission(struct intel_guc *guc) @@ -1475,8 +1476,6 @@ static void guc_flush_destroyed_contexts(struct intel_guc *guc); void intel_guc_submission_reset_prepare(struct intel_guc *guc) { - int i; - if (unlikely(!guc_submission_initialized(guc))) { /* Reset called during driver load? GuC not yet initialised! */ return; @@ -1493,21 +1492,7 @@ void intel_guc_submission_reset_prepare(struct intel_guc *guc) guc_flush_submissions(guc); guc_flush_destroyed_contexts(guc); - - /* - * Handle any outstanding G2Hs before reset. Call IRQ handler directly - * each pass as interrupt have been disabled. We always scrub for - * outstanding G2H as it is possible for outstanding_submission_g2h to - * be incremented after the context state update. - */ - for (i = 0; i < 4 && atomic_read(&guc->outstanding_submission_g2h); ++i) { - intel_guc_to_host_event_handler(guc); -#define wait_for_reset(guc, wait_var) \ - intel_guc_wait_for_pending_msg(guc, wait_var, false, (HZ / 20)) - do { - wait_for_reset(guc, &guc->outstanding_submission_g2h); - } while (!list_empty(&guc->ct.requests.incoming)); - } + flush_work(&guc->ct.requests.worker); scrub_guc_desc_for_outstanding_g2h(guc); } @@ -1612,7 +1597,6 @@ static void __guc_reset_context(struct intel_context *ce, bool stalled) unsigned long flags; u32 head; int i, number_children = ce->parallel.number_children; - bool skip = false; struct intel_context *parent = ce; GEM_BUG_ON(intel_context_is_child(ce)); @@ -1623,23 +1607,10 @@ static void __guc_reset_context(struct intel_context *ce, bool stalled) * GuC will implicitly mark the context as non-schedulable when it sends * the reset notification. Make sure our state reflects this change. The * context will be marked enabled on resubmission. - * - * XXX: If the context is reset as a result of the request cancellation - * this G2H is received after the schedule disable complete G2H which is - * wrong as this creates a race between the request cancellation code - * re-submitting the context and this G2H handler. This is a bug in the - * GuC but can be worked around in the meantime but converting this to a - * NOP if a pending enable is in flight as this indicates that a request - * cancellation has occurred. */ spin_lock_irqsave(&ce->guc_state.lock, flags); - if (likely(!context_pending_enable(ce))) - clr_context_enabled(ce); - else - skip = true; + clr_context_enabled(ce); spin_unlock_irqrestore(&ce->guc_state.lock, flags); - if (unlikely(skip)) - goto out_put; /* * For each context in the relationship find the hanging request @@ -1671,7 +1642,6 @@ next_context: } __unwind_incomplete_requests(parent); -out_put: intel_context_put(parent); } @@ -1806,7 +1776,7 @@ void intel_guc_submission_reset_finish(struct intel_guc *guc) { /* Reset called during driver load or during wedge? */ if (unlikely(!guc_submission_initialized(guc) || - test_bit(I915_WEDGED, &guc_to_gt(guc)->reset.flags))) { + intel_gt_is_wedged(guc_to_gt(guc)))) { return; } @@ -1825,6 +1795,7 @@ void intel_guc_submission_reset_finish(struct intel_guc *guc) } static void destroyed_worker_func(struct work_struct *w); +static void reset_fail_worker_func(struct work_struct *w); /* * Set up the memory resources to be shared with the GuC (via the GGTT) @@ -1855,6 +1826,8 @@ int intel_guc_submission_init(struct intel_guc *guc) INIT_LIST_HEAD(&guc->submission_state.destroyed_contexts); INIT_WORK(&guc->submission_state.destroyed_worker, destroyed_worker_func); + INIT_WORK(&guc->submission_state.reset_fail_worker, + reset_fail_worker_func); guc->submission_state.guc_ids_bitmap = bitmap_zalloc(NUMBER_MULTI_LRC_GUC_ID(guc), GFP_KERNEL); @@ -2611,12 +2584,6 @@ static void guc_context_cancel_request(struct intel_context *ce, true); } - /* - * XXX: Racey if context is reset, see comment in - * __guc_reset_context(). - */ - flush_work(&ce_to_guc(ce)->ct.requests.worker); - guc_context_unblock(block_context); intel_context_put(ce); } @@ -3330,8 +3297,6 @@ static void guc_parent_context_unpin(struct intel_context *ce) GEM_BUG_ON(!intel_context_is_parent(ce)); GEM_BUG_ON(!intel_engine_is_virtual(ce->engine)); - if (ce->parallel.last_rq) - i915_request_put(ce->parallel.last_rq); unpin_guc_id(guc, ce); lrc_unpin(ce); } @@ -4053,14 +4018,14 @@ static void guc_handle_context_reset(struct intel_guc *guc, { trace_intel_context_reset(ce); - /* - * XXX: Racey if request cancellation has occurred, see comment in - * __guc_reset_context(). - */ - if (likely(!intel_context_is_banned(ce) && - !context_blocked(ce))) { + if (likely(!intel_context_is_banned(ce))) { capture_error_state(guc, ce); guc_context_replay(ce); + } else { + drm_err(&guc_to_gt(guc)->i915->drm, + "Invalid GuC engine reset notificaion for 0x%04X on %s: banned = %d, blocked = %d", + ce->guc_id.id, ce->engine->name, intel_context_is_banned(ce), + context_blocked(ce)); } } @@ -4099,6 +4064,24 @@ int intel_guc_context_reset_process_msg(struct intel_guc *guc, return 0; } +int intel_guc_error_capture_process_msg(struct intel_guc *guc, + const u32 *msg, u32 len) +{ + int status; + + if (unlikely(len != 1)) { + drm_dbg(&guc_to_gt(guc)->i915->drm, "Invalid length %u", len); + return -EPROTO; + } + + status = msg[0]; + drm_info(&guc_to_gt(guc)->i915->drm, "Got error capture: status = %d", status); + + /* FIXME: Do something with the capture */ + + return 0; +} + static struct intel_engine_cs * guc_lookup_engine(struct intel_guc *guc, u8 guc_class, u8 instance) { @@ -4111,6 +4094,26 @@ guc_lookup_engine(struct intel_guc *guc, u8 guc_class, u8 instance) return gt->engine_class[engine_class][instance]; } +static void reset_fail_worker_func(struct work_struct *w) +{ + struct intel_guc *guc = container_of(w, struct intel_guc, + submission_state.reset_fail_worker); + struct intel_gt *gt = guc_to_gt(guc); + intel_engine_mask_t reset_fail_mask; + unsigned long flags; + + spin_lock_irqsave(&guc->submission_state.lock, flags); + reset_fail_mask = guc->submission_state.reset_fail_mask; + guc->submission_state.reset_fail_mask = 0; + spin_unlock_irqrestore(&guc->submission_state.lock, flags); + + if (likely(reset_fail_mask)) + intel_gt_handle_error(gt, reset_fail_mask, + I915_ERROR_CAPTURE, + "GuC failed to reset engine mask=0x%x\n", + reset_fail_mask); +} + int intel_guc_engine_failure_process_msg(struct intel_guc *guc, const u32 *msg, u32 len) { @@ -4118,6 +4121,7 @@ int intel_guc_engine_failure_process_msg(struct intel_guc *guc, struct intel_gt *gt = guc_to_gt(guc); u8 guc_class, instance; u32 reason; + unsigned long flags; if (unlikely(len != 3)) { drm_err(>->i915->drm, "Invalid length %u", len); @@ -4142,10 +4146,15 @@ int intel_guc_engine_failure_process_msg(struct intel_guc *guc, drm_err(>->i915->drm, "GuC engine reset request failed on %d:%d (%s) because 0x%08X", guc_class, instance, engine->name, reason); - intel_gt_handle_error(gt, engine->mask, - I915_ERROR_CAPTURE, - "GuC failed to reset %s (reason=0x%08x)\n", - engine->name, reason); + spin_lock_irqsave(&guc->submission_state.lock, flags); + guc->submission_state.reset_fail_mask |= engine->mask; + spin_unlock_irqrestore(&guc->submission_state.lock, flags); + + /* + * A GT reset flushes this worker queue (G2H handler) so we must use + * another worker to trigger a GT reset. + */ + queue_work(system_unbound_wq, &guc->submission_state.reset_fail_worker); return 0; } @@ -4514,27 +4523,31 @@ static inline bool skip_handshake(struct i915_request *rq) return test_bit(I915_FENCE_FLAG_SKIP_PARALLEL, &rq->fence.flags); } +#define NON_SKIP_LEN 6 static u32 * emit_fini_breadcrumb_parent_no_preempt_mid_batch(struct i915_request *rq, u32 *cs) { struct intel_context *ce = rq->context; + __maybe_unused u32 *before_fini_breadcrumb_user_interrupt_cs; + __maybe_unused u32 *start_fini_breadcrumb_cs = cs; GEM_BUG_ON(!intel_context_is_parent(ce)); if (unlikely(skip_handshake(rq))) { /* * NOP everything in __emit_fini_breadcrumb_parent_no_preempt_mid_batch, - * the -6 comes from the length of the emits below. + * the NON_SKIP_LEN comes from the length of the emits below. */ memset(cs, 0, sizeof(u32) * - (ce->engine->emit_fini_breadcrumb_dw - 6)); - cs += ce->engine->emit_fini_breadcrumb_dw - 6; + (ce->engine->emit_fini_breadcrumb_dw - NON_SKIP_LEN)); + cs += ce->engine->emit_fini_breadcrumb_dw - NON_SKIP_LEN; } else { cs = __emit_fini_breadcrumb_parent_no_preempt_mid_batch(rq, cs); } /* Emit fini breadcrumb */ + before_fini_breadcrumb_user_interrupt_cs = cs; cs = gen8_emit_ggtt_write(cs, rq->fence.seqno, i915_request_active_timeline(rq)->hwsp_offset, @@ -4544,6 +4557,12 @@ emit_fini_breadcrumb_parent_no_preempt_mid_batch(struct i915_request *rq, *cs++ = MI_USER_INTERRUPT; *cs++ = MI_NOOP; + /* Ensure our math for skip + emit is correct */ + GEM_BUG_ON(before_fini_breadcrumb_user_interrupt_cs + NON_SKIP_LEN != + cs); + GEM_BUG_ON(start_fini_breadcrumb_cs + + ce->engine->emit_fini_breadcrumb_dw != cs); + rq->tail = intel_ring_offset(rq, cs); return cs; @@ -4586,22 +4605,25 @@ emit_fini_breadcrumb_child_no_preempt_mid_batch(struct i915_request *rq, u32 *cs) { struct intel_context *ce = rq->context; + __maybe_unused u32 *before_fini_breadcrumb_user_interrupt_cs; + __maybe_unused u32 *start_fini_breadcrumb_cs = cs; GEM_BUG_ON(!intel_context_is_child(ce)); if (unlikely(skip_handshake(rq))) { /* * NOP everything in __emit_fini_breadcrumb_child_no_preempt_mid_batch, - * the -6 comes from the length of the emits below. + * the NON_SKIP_LEN comes from the length of the emits below. */ memset(cs, 0, sizeof(u32) * - (ce->engine->emit_fini_breadcrumb_dw - 6)); - cs += ce->engine->emit_fini_breadcrumb_dw - 6; + (ce->engine->emit_fini_breadcrumb_dw - NON_SKIP_LEN)); + cs += ce->engine->emit_fini_breadcrumb_dw - NON_SKIP_LEN; } else { cs = __emit_fini_breadcrumb_child_no_preempt_mid_batch(rq, cs); } /* Emit fini breadcrumb */ + before_fini_breadcrumb_user_interrupt_cs = cs; cs = gen8_emit_ggtt_write(cs, rq->fence.seqno, i915_request_active_timeline(rq)->hwsp_offset, @@ -4611,11 +4633,19 @@ emit_fini_breadcrumb_child_no_preempt_mid_batch(struct i915_request *rq, *cs++ = MI_USER_INTERRUPT; *cs++ = MI_NOOP; + /* Ensure our math for skip + emit is correct */ + GEM_BUG_ON(before_fini_breadcrumb_user_interrupt_cs + NON_SKIP_LEN != + cs); + GEM_BUG_ON(start_fini_breadcrumb_cs + + ce->engine->emit_fini_breadcrumb_dw != cs); + rq->tail = intel_ring_offset(rq, cs); return cs; } +#undef NON_SKIP_LEN + static struct intel_context * guc_create_virtual(struct intel_engine_cs **siblings, unsigned int count, unsigned long flags) diff --git a/drivers/gpu/drm/i915/gt/uc/intel_huc.c b/drivers/gpu/drm/i915/gt/uc/intel_huc.c index d10b227ac4aa..556829de9c17 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_huc.c +++ b/drivers/gpu/drm/i915/gt/uc/intel_huc.c @@ -124,6 +124,7 @@ int intel_huc_auth(struct intel_huc *huc) } intel_uc_fw_change_status(&huc->fw, INTEL_UC_FIRMWARE_RUNNING); + drm_info(>->i915->drm, "HuC authenticated\n"); return 0; fail: diff --git a/drivers/gpu/drm/i915/gt/uc/intel_uc.c b/drivers/gpu/drm/i915/gt/uc/intel_uc.c index 09ed29df67bc..da199aa6989f 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_uc.c +++ b/drivers/gpu/drm/i915/gt/uc/intel_uc.c @@ -432,6 +432,15 @@ static int __uc_check_hw(struct intel_uc *uc) return 0; } +static void print_fw_ver(struct intel_uc *uc, struct intel_uc_fw *fw) +{ + struct drm_i915_private *i915 = uc_to_gt(uc)->i915; + + drm_info(&i915->drm, "%s firmware %s version %u.%u\n", + intel_uc_fw_type_repr(fw->type), fw->path, + fw->major_ver_found, fw->minor_ver_found); +} + static int __uc_init_hw(struct intel_uc *uc) { struct drm_i915_private *i915 = uc_to_gt(uc)->i915; @@ -442,6 +451,11 @@ static int __uc_init_hw(struct intel_uc *uc) GEM_BUG_ON(!intel_uc_supports_guc(uc)); GEM_BUG_ON(!intel_uc_wants_guc(uc)); + print_fw_ver(uc, &guc->fw); + + if (intel_uc_uses_huc(uc)) + print_fw_ver(uc, &huc->fw); + if (!intel_uc_fw_is_loadable(&guc->fw)) { ret = __uc_check_hw(uc) || intel_uc_fw_is_overridden(&guc->fw) || @@ -507,24 +521,11 @@ static int __uc_init_hw(struct intel_uc *uc) intel_rps_lower_unslice(&uc_to_gt(uc)->rps); } - drm_info(&i915->drm, "%s firmware %s version %u.%u %s:%s\n", - intel_uc_fw_type_repr(INTEL_UC_FW_TYPE_GUC), guc->fw.path, - guc->fw.major_ver_found, guc->fw.minor_ver_found, - "submission", + drm_info(&i915->drm, "GuC submission %s\n", enableddisabled(intel_uc_uses_guc_submission(uc))); - - drm_info(&i915->drm, "GuC SLPC: %s\n", + drm_info(&i915->drm, "GuC SLPC %s\n", enableddisabled(intel_uc_uses_guc_slpc(uc))); - if (intel_uc_uses_huc(uc)) { - drm_info(&i915->drm, "%s firmware %s version %u.%u %s:%s\n", - intel_uc_fw_type_repr(INTEL_UC_FW_TYPE_HUC), - huc->fw.path, - huc->fw.major_ver_found, huc->fw.minor_ver_found, - "authenticated", - yesno(intel_huc_is_authenticated(huc))); - } - return 0; /* diff --git a/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c b/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c index dd588ecf048a..c88113044494 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c +++ b/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c @@ -5,6 +5,8 @@ #include <linux/bitfield.h> #include <linux/firmware.h> + +#include <drm/drm_cache.h> #include <drm/drm_print.h> #include "gem/i915_gem_lmem.h" @@ -50,21 +52,21 @@ void intel_uc_fw_change_status(struct intel_uc_fw *uc_fw, * firmware as TGL. */ #define INTEL_GUC_FIRMWARE_DEFS(fw_def, guc_def) \ - fw_def(ALDERLAKE_P, 0, guc_def(adlp, 62, 0, 3)) \ - fw_def(ALDERLAKE_S, 0, guc_def(tgl, 62, 0, 0)) \ - fw_def(DG1, 0, guc_def(dg1, 62, 0, 0)) \ - fw_def(ROCKETLAKE, 0, guc_def(tgl, 62, 0, 0)) \ - fw_def(TIGERLAKE, 0, guc_def(tgl, 62, 0, 0)) \ - fw_def(JASPERLAKE, 0, guc_def(ehl, 62, 0, 0)) \ - fw_def(ELKHARTLAKE, 0, guc_def(ehl, 62, 0, 0)) \ - fw_def(ICELAKE, 0, guc_def(icl, 62, 0, 0)) \ - fw_def(COMETLAKE, 5, guc_def(cml, 62, 0, 0)) \ - fw_def(COMETLAKE, 0, guc_def(kbl, 62, 0, 0)) \ - fw_def(COFFEELAKE, 0, guc_def(kbl, 62, 0, 0)) \ - fw_def(GEMINILAKE, 0, guc_def(glk, 62, 0, 0)) \ - fw_def(KABYLAKE, 0, guc_def(kbl, 62, 0, 0)) \ - fw_def(BROXTON, 0, guc_def(bxt, 62, 0, 0)) \ - fw_def(SKYLAKE, 0, guc_def(skl, 62, 0, 0)) + fw_def(ALDERLAKE_P, 0, guc_def(adlp, 69, 0, 3)) \ + fw_def(ALDERLAKE_S, 0, guc_def(tgl, 69, 0, 3)) \ + fw_def(DG1, 0, guc_def(dg1, 69, 0, 3)) \ + fw_def(ROCKETLAKE, 0, guc_def(tgl, 69, 0, 3)) \ + fw_def(TIGERLAKE, 0, guc_def(tgl, 69, 0, 3)) \ + fw_def(JASPERLAKE, 0, guc_def(ehl, 69, 0, 3)) \ + fw_def(ELKHARTLAKE, 0, guc_def(ehl, 69, 0, 3)) \ + fw_def(ICELAKE, 0, guc_def(icl, 69, 0, 3)) \ + fw_def(COMETLAKE, 5, guc_def(cml, 69, 0, 3)) \ + fw_def(COMETLAKE, 0, guc_def(kbl, 69, 0, 3)) \ + fw_def(COFFEELAKE, 0, guc_def(kbl, 69, 0, 3)) \ + fw_def(GEMINILAKE, 0, guc_def(glk, 69, 0, 3)) \ + fw_def(KABYLAKE, 0, guc_def(kbl, 69, 0, 3)) \ + fw_def(BROXTON, 0, guc_def(bxt, 69, 0, 3)) \ + fw_def(SKYLAKE, 0, guc_def(skl, 69, 0, 3)) #define INTEL_HUC_FIRMWARE_DEFS(fw_def, huc_def) \ fw_def(ALDERLAKE_P, 0, huc_def(tgl, 7, 9, 3)) \ @@ -449,20 +451,19 @@ static void uc_fw_bind_ggtt(struct intel_uc_fw *uc_fw) { struct drm_i915_gem_object *obj = uc_fw->obj; struct i915_ggtt *ggtt = __uc_fw_to_gt(uc_fw)->ggtt; - struct i915_vma *dummy = &uc_fw->dummy; + struct i915_vma_resource *dummy = &uc_fw->dummy; u32 pte_flags = 0; - dummy->node.start = uc_fw_ggtt_offset(uc_fw); - dummy->node.size = obj->base.size; - dummy->pages = obj->mm.pages; - dummy->vm = &ggtt->vm; + dummy->start = uc_fw_ggtt_offset(uc_fw); + dummy->node_size = obj->base.size; + dummy->bi.pages = obj->mm.pages; GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj)); - GEM_BUG_ON(dummy->node.size > ggtt->uc_fw.size); + GEM_BUG_ON(dummy->node_size > ggtt->uc_fw.size); /* uc_fw->obj cache domains were not controlled across suspend */ if (i915_gem_object_has_struct_page(obj)) - drm_clflush_sg(dummy->pages); + drm_clflush_sg(dummy->bi.pages); if (i915_gem_object_is_lmem(obj)) pte_flags |= PTE_LM; diff --git a/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.h b/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.h index d9d1dc0b4cbb..3229018877d3 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.h +++ b/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.h @@ -85,7 +85,7 @@ struct intel_uc_fw { * threaded as it done during driver load (inherently single threaded) * or during a GT reset (mutex guarantees single threaded). */ - struct i915_vma dummy; + struct i915_vma_resource dummy; struct i915_vma *rsa_data; /* diff --git a/drivers/gpu/drm/i915/gt/uc/selftest_guc.c b/drivers/gpu/drm/i915/gt/uc/selftest_guc.c index d3327b802b76..a115894d5896 100644 --- a/drivers/gpu/drm/i915/gt/uc/selftest_guc.c +++ b/drivers/gpu/drm/i915/gt/uc/selftest_guc.c @@ -157,7 +157,7 @@ static int intel_guc_steal_guc_ids(void *arg) wakeref = intel_runtime_pm_get(gt->uncore->rpm); engine = intel_selftest_find_any_engine(gt); sv = guc->submission_state.num_guc_ids; - guc->submission_state.num_guc_ids = 4096; + guc->submission_state.num_guc_ids = 512; /* Create spinner to block requests in below loop */ ce[context_index] = intel_context_create(engine); |