From fbaa38214cd9e150764ccaa82e04ecf42cc1140c Mon Sep 17 00:00:00 2001 From: Lukas Wunner Date: Sat, 11 Mar 2023 15:40:01 +0100 Subject: cxl/pci: Fix CDAT retrieval on big endian The CDAT exposed in sysfs differs between little endian and big endian arches: On big endian, every 4 bytes are byte-swapped. PCI Configuration Space is little endian (PCI r3.0 sec 6.1). Accessors such as pci_read_config_dword() implicitly swap bytes on big endian. That way, the macros in include/uapi/linux/pci_regs.h work regardless of the arch's endianness. For an example of implicit byte-swapping, see ppc4xx_pciex_read_config(), which calls in_le32(), which uses lwbrx (Load Word Byte-Reverse Indexed). DOE Read/Write Data Mailbox Registers are unlike other registers in Configuration Space in that they contain or receive a 4 byte portion of an opaque byte stream (a "Data Object" per PCIe r6.0 sec 7.9.24.5f). They need to be copied to or from the request/response buffer verbatim. So amend pci_doe_send_req() and pci_doe_recv_resp() to undo the implicit byte-swapping. The CXL_DOE_TABLE_ACCESS_* and PCI_DOE_DATA_OBJECT_DISC_* macros assume implicit byte-swapping. Byte-swap requests after constructing them with those macros and byte-swap responses before parsing them. Change the request and response type to __le32 to avoid sparse warnings. Per a request from Jonathan, replace sizeof(u32) with sizeof(__le32) for consistency. Fixes: c97006046c79 ("cxl/port: Read CDAT table") Tested-by: Ira Weiny Signed-off-by: Lukas Wunner Reviewed-by: Dan Williams Cc: stable@vger.kernel.org # v6.0+ Reviewed-by: Jonathan Cameron Link: https://lore.kernel.org/r/3051114102f41d19df3debbee123129118fc5e6d.1678543498.git.lukas@wunner.de Signed-off-by: Dan Williams --- drivers/cxl/core/pci.c | 26 +++++++++++++------------- 1 file changed, 13 insertions(+), 13 deletions(-) (limited to 'drivers/cxl/core/pci.c') diff --git a/drivers/cxl/core/pci.c b/drivers/cxl/core/pci.c index 7328a2552411..49a99a84b6aa 100644 --- a/drivers/cxl/core/pci.c +++ b/drivers/cxl/core/pci.c @@ -462,7 +462,7 @@ static struct pci_doe_mb *find_cdat_doe(struct device *uport) return NULL; } -#define CDAT_DOE_REQ(entry_handle) \ +#define CDAT_DOE_REQ(entry_handle) cpu_to_le32 \ (FIELD_PREP(CXL_DOE_TABLE_ACCESS_REQ_CODE, \ CXL_DOE_TABLE_ACCESS_REQ_CODE_READ) | \ FIELD_PREP(CXL_DOE_TABLE_ACCESS_TABLE_TYPE, \ @@ -475,8 +475,8 @@ static void cxl_doe_task_complete(struct pci_doe_task *task) } struct cdat_doe_task { - u32 request_pl; - u32 response_pl[32]; + __le32 request_pl; + __le32 response_pl[32]; struct completion c; struct pci_doe_task task; }; @@ -510,10 +510,10 @@ static int cxl_cdat_get_length(struct device *dev, return rc; } wait_for_completion(&t.c); - if (t.task.rv < sizeof(u32)) + if (t.task.rv < sizeof(__le32)) return -EIO; - *length = t.response_pl[1]; + *length = le32_to_cpu(t.response_pl[1]); dev_dbg(dev, "CDAT length %zu\n", *length); return 0; @@ -524,13 +524,13 @@ static int cxl_cdat_read_table(struct device *dev, struct cxl_cdat *cdat) { size_t length = cdat->length; - u32 *data = cdat->table; + __le32 *data = cdat->table; int entry_handle = 0; do { DECLARE_CDAT_DOE_TASK(CDAT_DOE_REQ(entry_handle), t); size_t entry_dw; - u32 *entry; + __le32 *entry; int rc; rc = pci_doe_submit_task(cdat_doe, &t.task); @@ -540,21 +540,21 @@ static int cxl_cdat_read_table(struct device *dev, } wait_for_completion(&t.c); /* 1 DW header + 1 DW data min */ - if (t.task.rv < (2 * sizeof(u32))) + if (t.task.rv < (2 * sizeof(__le32))) return -EIO; /* Get the CXL table access header entry handle */ entry_handle = FIELD_GET(CXL_DOE_TABLE_ACCESS_ENTRY_HANDLE, - t.response_pl[0]); + le32_to_cpu(t.response_pl[0])); entry = t.response_pl + 1; - entry_dw = t.task.rv / sizeof(u32); + entry_dw = t.task.rv / sizeof(__le32); /* Skip Header */ entry_dw -= 1; - entry_dw = min(length / sizeof(u32), entry_dw); + entry_dw = min(length / sizeof(__le32), entry_dw); /* Prevent length < 1 DW from causing a buffer overflow */ if (entry_dw) { - memcpy(data, entry, entry_dw * sizeof(u32)); - length -= entry_dw * sizeof(u32); + memcpy(data, entry, entry_dw * sizeof(__le32)); + length -= entry_dw * sizeof(__le32); data += entry_dw; } } while (entry_handle != CXL_DOE_TABLE_ACCESS_LAST_ENTRY); -- cgit v1.2.3 From 34bafc747c54fb58c1908ec3116fa6137393e596 Mon Sep 17 00:00:00 2001 From: Lukas Wunner Date: Sat, 11 Mar 2023 15:40:02 +0100 Subject: cxl/pci: Handle truncated CDAT header cxl_cdat_get_length() only checks whether the DOE response size is sufficient for the Table Access response header (1 dword), but not the succeeding CDAT header (1 dword length plus other fields). It thus returns whatever uninitialized memory happens to be on the stack if a truncated DOE response with only 1 dword was received. Fix it. Fixes: c97006046c79 ("cxl/port: Read CDAT table") Reported-by: Ming Li Tested-by: Ira Weiny Signed-off-by: Lukas Wunner Reviewed-by: Ming Li Reviewed-by: Dan Williams Reviewed-by: Jonathan Cameron Cc: stable@vger.kernel.org # v6.0+ Reviewed-by: Kuppuswamy Sathyanarayanan Link: https://lore.kernel.org/r/000e69cd163461c8b1bc2cf4155b6e25402c29c7.1678543498.git.lukas@wunner.de Signed-off-by: Dan Williams --- drivers/cxl/core/pci.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/cxl/core/pci.c') diff --git a/drivers/cxl/core/pci.c b/drivers/cxl/core/pci.c index 49a99a84b6aa..87da8c935185 100644 --- a/drivers/cxl/core/pci.c +++ b/drivers/cxl/core/pci.c @@ -510,7 +510,7 @@ static int cxl_cdat_get_length(struct device *dev, return rc; } wait_for_completion(&t.c); - if (t.task.rv < sizeof(__le32)) + if (t.task.rv < 2 * sizeof(__le32)) return -EIO; *length = le32_to_cpu(t.response_pl[1]); -- cgit v1.2.3 From b56faef2312057db20479b240eb71bd2e51fb51c Mon Sep 17 00:00:00 2001 From: Lukas Wunner Date: Sat, 11 Mar 2023 15:40:03 +0100 Subject: cxl/pci: Handle truncated CDAT entries If truncated CDAT entries are received from a device, the concatenation of those entries constitutes a corrupt CDAT, yet is happily exposed to user space. Avoid by verifying response lengths and erroring out if truncation is detected. The last CDAT entry may still be truncated despite the checks introduced herein if the length in the CDAT header is too small. However, that is easily detectable by user space because it reaches EOF prematurely. A subsequent commit which rightsizes the CDAT response allocation closes that remaining loophole. The two lines introduced here which exceed 80 chars are shortened to less than 80 chars by a subsequent commit which migrates to a synchronous DOE API and replaces "t.task.rv" by "rc". The existing acpi_cdat_header and acpi_table_cdat struct definitions provided by ACPICA cannot be used because they do not employ __le16 or __le32 types. I believe that cannot be changed because those types are Linux-specific and ACPI is specified for little endian platforms only, hence doesn't care about endianness. So duplicate the structs. Fixes: c97006046c79 ("cxl/port: Read CDAT table") Tested-by: Ira Weiny Signed-off-by: Lukas Wunner Reviewed-by: Dan Williams Reviewed-by: Jonathan Cameron Cc: stable@vger.kernel.org # v6.0+ Link: https://lore.kernel.org/r/bce3aebc0e8e18a1173425a7a865b232c3912963.1678543498.git.lukas@wunner.de Signed-off-by: Dan Williams --- drivers/cxl/core/pci.c | 13 +++++++++---- drivers/cxl/cxlpci.h | 14 ++++++++++++++ 2 files changed, 23 insertions(+), 4 deletions(-) (limited to 'drivers/cxl/core/pci.c') diff --git a/drivers/cxl/core/pci.c b/drivers/cxl/core/pci.c index 87da8c935185..fb600dfbf5a6 100644 --- a/drivers/cxl/core/pci.c +++ b/drivers/cxl/core/pci.c @@ -529,8 +529,8 @@ static int cxl_cdat_read_table(struct device *dev, do { DECLARE_CDAT_DOE_TASK(CDAT_DOE_REQ(entry_handle), t); + struct cdat_entry_header *entry; size_t entry_dw; - __le32 *entry; int rc; rc = pci_doe_submit_task(cdat_doe, &t.task); @@ -539,14 +539,19 @@ static int cxl_cdat_read_table(struct device *dev, return rc; } wait_for_completion(&t.c); - /* 1 DW header + 1 DW data min */ - if (t.task.rv < (2 * sizeof(__le32))) + + /* 1 DW Table Access Response Header + CDAT entry */ + entry = (struct cdat_entry_header *)(t.response_pl + 1); + if ((entry_handle == 0 && + t.task.rv != sizeof(__le32) + sizeof(struct cdat_header)) || + (entry_handle > 0 && + (t.task.rv < sizeof(__le32) + sizeof(*entry) || + t.task.rv != sizeof(__le32) + le16_to_cpu(entry->length)))) return -EIO; /* Get the CXL table access header entry handle */ entry_handle = FIELD_GET(CXL_DOE_TABLE_ACCESS_ENTRY_HANDLE, le32_to_cpu(t.response_pl[0])); - entry = t.response_pl + 1; entry_dw = t.task.rv / sizeof(__le32); /* Skip Header */ entry_dw -= 1; diff --git a/drivers/cxl/cxlpci.h b/drivers/cxl/cxlpci.h index be6a2ef3cce3..0465ef963cd6 100644 --- a/drivers/cxl/cxlpci.h +++ b/drivers/cxl/cxlpci.h @@ -68,6 +68,20 @@ enum cxl_regloc_type { CXL_REGLOC_RBI_TYPES }; +struct cdat_header { + __le32 length; + u8 revision; + u8 checksum; + u8 reserved[6]; + __le32 sequence; +} __packed; + +struct cdat_entry_header { + u8 type; + u8 reserved; + __le16 length; +} __packed; + int devm_cxl_port_enumerate_dports(struct cxl_port *port); struct cxl_dev_state; int cxl_hdm_decode_init(struct cxl_dev_state *cxlds, struct cxl_hdm *cxlhdm, -- cgit v1.2.3 From 4fe2c13d59d849be3b45371e3913ec5dc77fc0fb Mon Sep 17 00:00:00 2001 From: Lukas Wunner Date: Sat, 11 Mar 2023 15:40:04 +0100 Subject: cxl/pci: Handle excessive CDAT length If the length in the CDAT header is larger than the concatenation of the header and all table entries, then the CDAT exposed to user space contains trailing null bytes. Not every consumer may be able to handle that. Per Postel's robustness principle, "be liberal in what you accept" and silently reduce the cached length to avoid exposing those null bytes. Fixes: c97006046c79 ("cxl/port: Read CDAT table") Tested-by: Ira Weiny Signed-off-by: Lukas Wunner Reviewed-by: Dan Williams Reviewed-by: Jonathan Cameron Cc: stable@vger.kernel.org # v6.0+ Link: https://lore.kernel.org/r/6d98b3c7da5343172bd3ccabfabbc1f31c079d74.1678543498.git.lukas@wunner.de Signed-off-by: Dan Williams --- drivers/cxl/core/pci.c | 3 +++ 1 file changed, 3 insertions(+) (limited to 'drivers/cxl/core/pci.c') diff --git a/drivers/cxl/core/pci.c b/drivers/cxl/core/pci.c index fb600dfbf5a6..523d5b9fd7fc 100644 --- a/drivers/cxl/core/pci.c +++ b/drivers/cxl/core/pci.c @@ -564,6 +564,9 @@ static int cxl_cdat_read_table(struct device *dev, } } while (entry_handle != CXL_DOE_TABLE_ACCESS_LAST_ENTRY); + /* Length in CDAT header may exceed concatenation of CDAT entries */ + cdat->length -= length; + return 0; } -- cgit v1.2.3 From 58709b924ea5911b7d500bba9ed36b71e1e76598 Mon Sep 17 00:00:00 2001 From: Lukas Wunner Date: Sat, 11 Mar 2023 15:40:08 +0100 Subject: cxl/pci: Use synchronous API for DOE A synchronous API for DOE has just been introduced. Convert CXL CDAT retrieval over to it. Tested-by: Ira Weiny Signed-off-by: Lukas Wunner Reviewed-by: Ira Weiny Reviewed-by: Jonathan Cameron Cc: Dan Williams Link: https://lore.kernel.org/r/c329c0a21c11c3b524ce2336b0bbb3c80a28c415.1678543498.git.lukas@wunner.de Signed-off-by: Dan Williams --- drivers/cxl/core/pci.c | 66 +++++++++++++++++--------------------------------- 1 file changed, 22 insertions(+), 44 deletions(-) (limited to 'drivers/cxl/core/pci.c') diff --git a/drivers/cxl/core/pci.c b/drivers/cxl/core/pci.c index 523d5b9fd7fc..8575eaadd522 100644 --- a/drivers/cxl/core/pci.c +++ b/drivers/cxl/core/pci.c @@ -469,51 +469,26 @@ static struct pci_doe_mb *find_cdat_doe(struct device *uport) CXL_DOE_TABLE_ACCESS_TABLE_TYPE_CDATA) | \ FIELD_PREP(CXL_DOE_TABLE_ACCESS_ENTRY_HANDLE, (entry_handle))) -static void cxl_doe_task_complete(struct pci_doe_task *task) -{ - complete(task->private); -} - -struct cdat_doe_task { - __le32 request_pl; - __le32 response_pl[32]; - struct completion c; - struct pci_doe_task task; -}; - -#define DECLARE_CDAT_DOE_TASK(req, cdt) \ -struct cdat_doe_task cdt = { \ - .c = COMPLETION_INITIALIZER_ONSTACK(cdt.c), \ - .request_pl = req, \ - .task = { \ - .prot.vid = PCI_DVSEC_VENDOR_ID_CXL, \ - .prot.type = CXL_DOE_PROTOCOL_TABLE_ACCESS, \ - .request_pl = &cdt.request_pl, \ - .request_pl_sz = sizeof(cdt.request_pl), \ - .response_pl = cdt.response_pl, \ - .response_pl_sz = sizeof(cdt.response_pl), \ - .complete = cxl_doe_task_complete, \ - .private = &cdt.c, \ - } \ -} - static int cxl_cdat_get_length(struct device *dev, struct pci_doe_mb *cdat_doe, size_t *length) { - DECLARE_CDAT_DOE_TASK(CDAT_DOE_REQ(0), t); + __le32 request = CDAT_DOE_REQ(0); + __le32 response[32]; int rc; - rc = pci_doe_submit_task(cdat_doe, &t.task); + rc = pci_doe(cdat_doe, PCI_DVSEC_VENDOR_ID_CXL, + CXL_DOE_PROTOCOL_TABLE_ACCESS, + &request, sizeof(request), + &response, sizeof(response)); if (rc < 0) { - dev_err(dev, "DOE submit failed: %d", rc); + dev_err(dev, "DOE failed: %d", rc); return rc; } - wait_for_completion(&t.c); - if (t.task.rv < 2 * sizeof(__le32)) + if (rc < 2 * sizeof(__le32)) return -EIO; - *length = le32_to_cpu(t.response_pl[1]); + *length = le32_to_cpu(response[1]); dev_dbg(dev, "CDAT length %zu\n", *length); return 0; @@ -528,31 +503,34 @@ static int cxl_cdat_read_table(struct device *dev, int entry_handle = 0; do { - DECLARE_CDAT_DOE_TASK(CDAT_DOE_REQ(entry_handle), t); + __le32 request = CDAT_DOE_REQ(entry_handle); struct cdat_entry_header *entry; + __le32 response[32]; size_t entry_dw; int rc; - rc = pci_doe_submit_task(cdat_doe, &t.task); + rc = pci_doe(cdat_doe, PCI_DVSEC_VENDOR_ID_CXL, + CXL_DOE_PROTOCOL_TABLE_ACCESS, + &request, sizeof(request), + &response, sizeof(response)); if (rc < 0) { - dev_err(dev, "DOE submit failed: %d", rc); + dev_err(dev, "DOE failed: %d", rc); return rc; } - wait_for_completion(&t.c); /* 1 DW Table Access Response Header + CDAT entry */ - entry = (struct cdat_entry_header *)(t.response_pl + 1); + entry = (struct cdat_entry_header *)(response + 1); if ((entry_handle == 0 && - t.task.rv != sizeof(__le32) + sizeof(struct cdat_header)) || + rc != sizeof(__le32) + sizeof(struct cdat_header)) || (entry_handle > 0 && - (t.task.rv < sizeof(__le32) + sizeof(*entry) || - t.task.rv != sizeof(__le32) + le16_to_cpu(entry->length)))) + (rc < sizeof(__le32) + sizeof(*entry) || + rc != sizeof(__le32) + le16_to_cpu(entry->length)))) return -EIO; /* Get the CXL table access header entry handle */ entry_handle = FIELD_GET(CXL_DOE_TABLE_ACCESS_ENTRY_HANDLE, - le32_to_cpu(t.response_pl[0])); - entry_dw = t.task.rv / sizeof(__le32); + le32_to_cpu(response[0])); + entry_dw = rc / sizeof(__le32); /* Skip Header */ entry_dw -= 1; entry_dw = min(length / sizeof(__le32), entry_dw); -- cgit v1.2.3 From af0a6c3587dc39df348d23c46996b9dad46d07db Mon Sep 17 00:00:00 2001 From: Lukas Wunner Date: Sat, 11 Mar 2023 15:40:13 +0100 Subject: cxl/pci: Use CDAT DOE mailbox created by PCI core The PCI core has just been amended to create a pci_doe_mb struct for every DOE instance on device enumeration. Drop creation of a (duplicate) CDAT DOE mailbox on cxl probing in favor of the one already created by the PCI core. Tested-by: Ira Weiny Signed-off-by: Lukas Wunner Reviewed-by: Ira Weiny Reviewed-by: Jonathan Cameron Link: https://lore.kernel.org/r/becaf70e8faf9681d474200117d62d7eaac46cca.1678543498.git.lukas@wunner.de Signed-off-by: Dan Williams --- drivers/cxl/core/pci.c | 27 +++++---------------------- drivers/cxl/cxlmem.h | 3 --- drivers/cxl/pci.c | 49 ------------------------------------------------- 3 files changed, 5 insertions(+), 74 deletions(-) (limited to 'drivers/cxl/core/pci.c') diff --git a/drivers/cxl/core/pci.c b/drivers/cxl/core/pci.c index 8575eaadd522..c868f4a2f1de 100644 --- a/drivers/cxl/core/pci.c +++ b/drivers/cxl/core/pci.c @@ -441,27 +441,6 @@ EXPORT_SYMBOL_NS_GPL(cxl_hdm_decode_init, CXL); #define CXL_DOE_TABLE_ACCESS_LAST_ENTRY 0xffff #define CXL_DOE_PROTOCOL_TABLE_ACCESS 2 -static struct pci_doe_mb *find_cdat_doe(struct device *uport) -{ - struct cxl_memdev *cxlmd; - struct cxl_dev_state *cxlds; - unsigned long index; - void *entry; - - cxlmd = to_cxl_memdev(uport); - cxlds = cxlmd->cxlds; - - xa_for_each(&cxlds->doe_mbs, index, entry) { - struct pci_doe_mb *cur = entry; - - if (pci_doe_supports_prot(cur, PCI_DVSEC_VENDOR_ID_CXL, - CXL_DOE_PROTOCOL_TABLE_ACCESS)) - return cur; - } - - return NULL; -} - #define CDAT_DOE_REQ(entry_handle) cpu_to_le32 \ (FIELD_PREP(CXL_DOE_TABLE_ACCESS_REQ_CODE, \ CXL_DOE_TABLE_ACCESS_REQ_CODE_READ) | \ @@ -559,10 +538,14 @@ void read_cdat_data(struct cxl_port *port) struct pci_doe_mb *cdat_doe; struct device *dev = &port->dev; struct device *uport = port->uport; + struct cxl_memdev *cxlmd = to_cxl_memdev(uport); + struct cxl_dev_state *cxlds = cxlmd->cxlds; + struct pci_dev *pdev = to_pci_dev(cxlds->dev); size_t cdat_length; int rc; - cdat_doe = find_cdat_doe(uport); + cdat_doe = pci_find_doe_mailbox(pdev, PCI_DVSEC_VENDOR_ID_CXL, + CXL_DOE_PROTOCOL_TABLE_ACCESS); if (!cdat_doe) { dev_dbg(dev, "No CDAT mailbox\n"); return; diff --git a/drivers/cxl/cxlmem.h b/drivers/cxl/cxlmem.h index 090acebba4fa..001dabf0231b 100644 --- a/drivers/cxl/cxlmem.h +++ b/drivers/cxl/cxlmem.h @@ -249,7 +249,6 @@ struct cxl_event_state { * @component_reg_phys: register base of component registers * @info: Cached DVSEC information about the device. * @serial: PCIe Device Serial Number - * @doe_mbs: PCI DOE mailbox array * @event: event log driver state * @mbox_send: @dev specific transport for transmitting mailbox commands * @@ -287,8 +286,6 @@ struct cxl_dev_state { resource_size_t component_reg_phys; u64 serial; - struct xarray doe_mbs; - struct cxl_event_state event; int (*mbox_send)(struct cxl_dev_state *cxlds, struct cxl_mbox_cmd *cmd); diff --git a/drivers/cxl/pci.c b/drivers/cxl/pci.c index 60b23624d167..ea38bd49b0cf 100644 --- a/drivers/cxl/pci.c +++ b/drivers/cxl/pci.c @@ -8,7 +8,6 @@ #include #include #include -#include #include #include #include "cxlmem.h" @@ -357,52 +356,6 @@ static int cxl_setup_regs(struct pci_dev *pdev, enum cxl_regloc_type type, return rc; } -static void cxl_pci_destroy_doe(void *mbs) -{ - xa_destroy(mbs); -} - -static void devm_cxl_pci_create_doe(struct cxl_dev_state *cxlds) -{ - struct device *dev = cxlds->dev; - struct pci_dev *pdev = to_pci_dev(dev); - u16 off = 0; - - xa_init(&cxlds->doe_mbs); - if (devm_add_action(&pdev->dev, cxl_pci_destroy_doe, &cxlds->doe_mbs)) { - dev_err(dev, "Failed to create XArray for DOE's\n"); - return; - } - - /* - * Mailbox creation is best effort. Higher layers must determine if - * the lack of a mailbox for their protocol is a device failure or not. - */ - pci_doe_for_each_off(pdev, off) { - struct pci_doe_mb *doe_mb; - - doe_mb = pcim_doe_create_mb(pdev, off); - if (IS_ERR(doe_mb)) { - dev_err(dev, "Failed to create MB object for MB @ %x\n", - off); - continue; - } - - if (!pci_request_config_region_exclusive(pdev, off, - PCI_DOE_CAP_SIZEOF, - dev_name(dev))) - pci_err(pdev, "Failed to exclude DOE registers\n"); - - if (xa_insert(&cxlds->doe_mbs, off, doe_mb, GFP_KERNEL)) { - dev_err(dev, "xa_insert failed to insert MB @ %x\n", - off); - continue; - } - - dev_dbg(dev, "Created DOE mailbox @%x\n", off); - } -} - /* * Assume that any RCIEP that emits the CXL memory expander class code * is an RCD @@ -750,8 +703,6 @@ static int cxl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) cxlds->component_reg_phys = map.resource; - devm_cxl_pci_create_doe(cxlds); - rc = cxl_map_component_regs(&pdev->dev, &cxlds->regs.component, &map, BIT(CXL_CM_CAP_CAP_ID_RAS)); if (rc) -- cgit v1.2.3 From 7a877c923995b8257069209b1d757735af4f4ce0 Mon Sep 17 00:00:00 2001 From: Dave Jiang Date: Sat, 11 Mar 2023 15:40:16 +0100 Subject: cxl/pci: Simplify CDAT retrieval error path The cdat.table and cdat.length fields in struct cxl_port are set before CDAT retrieval and must therefore be unset on failure. Simplify by setting only on success. Suggested-by: Jonathan Cameron Link: https://lore.kernel.org/linux-cxl/20230209113422.00007017@Huawei.com/ Signed-off-by: Dave Jiang [lukas: rebase and rephrase commit message] Signed-off-by: Lukas Wunner Reviewed-by: Ira Weiny Reviewed-by: Jonathan Cameron Acked-by: Bjorn Helgaas Link: https://lore.kernel.org/r/7a5c7104fb6a3016dbaec1c5d0ed34619ea11a0c.1678543498.git.lukas@wunner.de Signed-off-by: Dan Williams --- drivers/cxl/core/pci.c | 23 ++++++++++++----------- 1 file changed, 12 insertions(+), 11 deletions(-) (limited to 'drivers/cxl/core/pci.c') diff --git a/drivers/cxl/core/pci.c b/drivers/cxl/core/pci.c index c868f4a2f1de..0609bd629073 100644 --- a/drivers/cxl/core/pci.c +++ b/drivers/cxl/core/pci.c @@ -475,10 +475,10 @@ static int cxl_cdat_get_length(struct device *dev, static int cxl_cdat_read_table(struct device *dev, struct pci_doe_mb *cdat_doe, - struct cxl_cdat *cdat) + void *cdat_table, size_t *cdat_length) { - size_t length = cdat->length; - __le32 *data = cdat->table; + size_t length = *cdat_length; + __le32 *data = cdat_table; int entry_handle = 0; do { @@ -522,7 +522,7 @@ static int cxl_cdat_read_table(struct device *dev, } while (entry_handle != CXL_DOE_TABLE_ACCESS_LAST_ENTRY); /* Length in CDAT header may exceed concatenation of CDAT entries */ - cdat->length -= length; + *cdat_length -= length; return 0; } @@ -542,6 +542,7 @@ void read_cdat_data(struct cxl_port *port) struct cxl_dev_state *cxlds = cxlmd->cxlds; struct pci_dev *pdev = to_pci_dev(cxlds->dev); size_t cdat_length; + void *cdat_table; int rc; cdat_doe = pci_find_doe_mailbox(pdev, PCI_DVSEC_VENDOR_ID_CXL, @@ -558,19 +559,19 @@ void read_cdat_data(struct cxl_port *port) return; } - port->cdat.table = devm_kzalloc(dev, cdat_length, GFP_KERNEL); - if (!port->cdat.table) + cdat_table = devm_kzalloc(dev, cdat_length, GFP_KERNEL); + if (!cdat_table) return; - port->cdat.length = cdat_length; - rc = cxl_cdat_read_table(dev, cdat_doe, &port->cdat); + rc = cxl_cdat_read_table(dev, cdat_doe, cdat_table, &cdat_length); if (rc) { /* Don't leave table data allocated on error */ - devm_kfree(dev, port->cdat.table); - port->cdat.table = NULL; - port->cdat.length = 0; + devm_kfree(dev, cdat_table); dev_err(dev, "CDAT data read error\n"); } + + port->cdat.table = cdat_table; + port->cdat.length = cdat_length; } EXPORT_SYMBOL_NS_GPL(read_cdat_data, CXL); -- cgit v1.2.3 From f960e57dca9fa3653d9e9c0a9e1386d2241e0aad Mon Sep 17 00:00:00 2001 From: Lukas Wunner Date: Sat, 11 Mar 2023 15:40:17 +0100 Subject: cxl/pci: Rightsize CDAT response allocation Jonathan notes that cxl_cdat_get_length() and cxl_cdat_read_table() allocate 32 dwords for the DOE response even though it may be smaller. In the case of cxl_cdat_get_length(), only the second dword of the response is of interest (it contains the length). So reduce the allocation to 2 dwords and let DOE discard the remainder. In the case of cxl_cdat_read_table(), a correctly sized allocation for the full CDAT already exists. Let DOE write each table entry directly into that allocation. There's a snag in that the table entry is preceded by a Table Access Response Header (1 dword, CXL 3.0 table 8-14). Save the last dword of the previous table entry, let DOE overwrite it with the header of the next entry and restore it afterwards. The resulting CDAT is preceded by 4 unavoidable useless bytes. Increase the allocation size accordingly. The buffer overflow check in cxl_cdat_read_table() becomes unnecessary because the remaining bytes in the allocation are tracked in "length", which is passed to DOE and limits how many bytes it writes to the allocation. Additionally, cxl_cdat_read_table() bails out if the DOE response is truncated due to insufficient space. Tested-by: Ira Weiny Signed-off-by: Lukas Wunner Reviewed-by: Jonathan Cameron Cc: Dave Jiang Link: https://lore.kernel.org/r/7a4e1f86958a79a70f29b96a92199522f08f8322.1678543498.git.lukas@wunner.de Signed-off-by: Dan Williams --- drivers/cxl/core/pci.c | 36 +++++++++++++++++++----------------- 1 file changed, 19 insertions(+), 17 deletions(-) (limited to 'drivers/cxl/core/pci.c') diff --git a/drivers/cxl/core/pci.c b/drivers/cxl/core/pci.c index 0609bd629073..25b7e8125d5d 100644 --- a/drivers/cxl/core/pci.c +++ b/drivers/cxl/core/pci.c @@ -453,7 +453,7 @@ static int cxl_cdat_get_length(struct device *dev, size_t *length) { __le32 request = CDAT_DOE_REQ(0); - __le32 response[32]; + __le32 response[2]; int rc; rc = pci_doe(cdat_doe, PCI_DVSEC_VENDOR_ID_CXL, @@ -464,7 +464,7 @@ static int cxl_cdat_get_length(struct device *dev, dev_err(dev, "DOE failed: %d", rc); return rc; } - if (rc < 2 * sizeof(__le32)) + if (rc < sizeof(response)) return -EIO; *length = le32_to_cpu(response[1]); @@ -477,28 +477,28 @@ static int cxl_cdat_read_table(struct device *dev, struct pci_doe_mb *cdat_doe, void *cdat_table, size_t *cdat_length) { - size_t length = *cdat_length; + size_t length = *cdat_length + sizeof(__le32); __le32 *data = cdat_table; int entry_handle = 0; + __le32 saved_dw = 0; do { __le32 request = CDAT_DOE_REQ(entry_handle); struct cdat_entry_header *entry; - __le32 response[32]; size_t entry_dw; int rc; rc = pci_doe(cdat_doe, PCI_DVSEC_VENDOR_ID_CXL, CXL_DOE_PROTOCOL_TABLE_ACCESS, &request, sizeof(request), - &response, sizeof(response)); + data, length); if (rc < 0) { dev_err(dev, "DOE failed: %d", rc); return rc; } /* 1 DW Table Access Response Header + CDAT entry */ - entry = (struct cdat_entry_header *)(response + 1); + entry = (struct cdat_entry_header *)(data + 1); if ((entry_handle == 0 && rc != sizeof(__le32) + sizeof(struct cdat_header)) || (entry_handle > 0 && @@ -508,21 +508,22 @@ static int cxl_cdat_read_table(struct device *dev, /* Get the CXL table access header entry handle */ entry_handle = FIELD_GET(CXL_DOE_TABLE_ACCESS_ENTRY_HANDLE, - le32_to_cpu(response[0])); + le32_to_cpu(data[0])); entry_dw = rc / sizeof(__le32); /* Skip Header */ entry_dw -= 1; - entry_dw = min(length / sizeof(__le32), entry_dw); - /* Prevent length < 1 DW from causing a buffer overflow */ - if (entry_dw) { - memcpy(data, entry, entry_dw * sizeof(__le32)); - length -= entry_dw * sizeof(__le32); - data += entry_dw; - } + /* + * Table Access Response Header overwrote the last DW of + * previous entry, so restore that DW + */ + *data = saved_dw; + length -= entry_dw * sizeof(__le32); + data += entry_dw; + saved_dw = *data; } while (entry_handle != CXL_DOE_TABLE_ACCESS_LAST_ENTRY); /* Length in CDAT header may exceed concatenation of CDAT entries */ - *cdat_length -= length; + *cdat_length -= length - sizeof(__le32); return 0; } @@ -559,7 +560,8 @@ void read_cdat_data(struct cxl_port *port) return; } - cdat_table = devm_kzalloc(dev, cdat_length, GFP_KERNEL); + cdat_table = devm_kzalloc(dev, cdat_length + sizeof(__le32), + GFP_KERNEL); if (!cdat_table) return; @@ -570,7 +572,7 @@ void read_cdat_data(struct cxl_port *port) dev_err(dev, "CDAT data read error\n"); } - port->cdat.table = cdat_table; + port->cdat.table = cdat_table + sizeof(__le32); port->cdat.length = cdat_length; } EXPORT_SYMBOL_NS_GPL(read_cdat_data, CXL); -- cgit v1.2.3 From 267214a2319b5692bbc9b128a6514960291dcca8 Mon Sep 17 00:00:00 2001 From: Dan Williams Date: Fri, 21 Apr 2023 19:51:47 -0700 Subject: cxl/port: Fix port to pci device assumptions in read_cdat_data() Not all endpoint CXL ports are associated with PCI devices. The cxl_test infrastructure models 'struct cxl_port' instances hosted by platform devices. Teach read_cdat_data() to be careful about non-pci hosted cxl_memdev instances. Otherwise, cxl_test crashes with this signature: RIP: 0010:xas_start+0x6d/0x290 [..] Call Trace: xas_load+0xa/0x50 xas_find+0x25b/0x2f0 xa_find+0x118/0x1d0 pci_find_doe_mailbox+0x51/0xc0 read_cdat_data+0x45/0x190 [cxl_core] cxl_port_probe+0x10a/0x1e0 [cxl_port] cxl_bus_probe+0x17/0x50 [cxl_core] Some other cleanups are included like removing the single-use @uport variable, and removing the indirection through 'struct cxl_dev_state' to lookup the device that registered the memdev and may be a pci device. Fixes: af0a6c3587dc ("cxl/pci: Use CDAT DOE mailbox created by PCI core") Reviewed-by: Lukas Wunner Reviewed-by: Jonathan Cameron Link: https://lore.kernel.org/r/168213190748.708404.16215095414060364800.stgit@dwillia2-xfh.jf.intel.com Signed-off-by: Dan Williams --- drivers/cxl/core/pci.c | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-) (limited to 'drivers/cxl/core/pci.c') diff --git a/drivers/cxl/core/pci.c b/drivers/cxl/core/pci.c index 25b7e8125d5d..bdbd907884ce 100644 --- a/drivers/cxl/core/pci.c +++ b/drivers/cxl/core/pci.c @@ -536,17 +536,18 @@ static int cxl_cdat_read_table(struct device *dev, */ void read_cdat_data(struct cxl_port *port) { - struct pci_doe_mb *cdat_doe; + struct cxl_memdev *cxlmd = to_cxl_memdev(port->uport); + struct device *host = cxlmd->dev.parent; struct device *dev = &port->dev; - struct device *uport = port->uport; - struct cxl_memdev *cxlmd = to_cxl_memdev(uport); - struct cxl_dev_state *cxlds = cxlmd->cxlds; - struct pci_dev *pdev = to_pci_dev(cxlds->dev); + struct pci_doe_mb *cdat_doe; size_t cdat_length; void *cdat_table; int rc; - cdat_doe = pci_find_doe_mailbox(pdev, PCI_DVSEC_VENDOR_ID_CXL, + if (!dev_is_pci(host)) + return; + cdat_doe = pci_find_doe_mailbox(to_pci_dev(host), + PCI_DVSEC_VENDOR_ID_CXL, CXL_DOE_PROTOCOL_TABLE_ACCESS); if (!cdat_doe) { dev_dbg(dev, "No CDAT mailbox\n"); -- cgit v1.2.3 From 764d102ef94e880ca834a7fe3968a00a05b1fb12 Mon Sep 17 00:00:00 2001 From: Dave Jiang Date: Sat, 13 May 2023 00:20:06 -0700 Subject: cxl: Add missing return to cdat read error path Add a return to the error path when cxl_cdat_read_table() fails. Current code continues with the table pointer points to freed memory. Fixes: 7a877c923995 ("cxl/pci: Simplify CDAT retrieval error path") Signed-off-by: Dave Jiang Reviewed-by: Davidlohr Bueso Link: https://lore.kernel.org/r/168382793506.3510737.4792518576623749076.stgit@djiang5-mobl3 Signed-off-by: Dan Williams --- drivers/cxl/core/pci.c | 1 + 1 file changed, 1 insertion(+) (limited to 'drivers/cxl/core/pci.c') diff --git a/drivers/cxl/core/pci.c b/drivers/cxl/core/pci.c index bdbd907884ce..f332fe7af92b 100644 --- a/drivers/cxl/core/pci.c +++ b/drivers/cxl/core/pci.c @@ -571,6 +571,7 @@ void read_cdat_data(struct cxl_port *port) /* Don't leave table data allocated on error */ devm_kfree(dev, cdat_table); dev_err(dev, "CDAT data read error\n"); + return; } port->cdat.table = cdat_table + sizeof(__le32); -- cgit v1.2.3 From eb0764b822b9b26880b28ccb9100b2983e01bc17 Mon Sep 17 00:00:00 2001 From: Dan Williams Date: Wed, 17 May 2023 20:19:43 -0700 Subject: cxl/port: Enable the HDM decoder capability for switch ports Derick noticed, when testing hot plug, that hot-add behaves nominally after a removal. However, if the hot-add is done without a prior removal, CXL.mem accesses fail. It turns out that the original implementation of the port driver and region programming wrongly assumed that platform-firmware always enables the host-bridge HDM decoder capability. Add support turning on switch-level HDM decoders in the case where platform-firmware has not. The implementation is careful to only arrange for the enable to be undone if the current instance of the driver was the one that did the enable. This is to interoperate with platform-firmware that may expect CXL.mem to remain active after the driver is shutdown. This comes at the cost of potentially not shutting down the enable on kexec flows, but it is mitigated by the fact that the related HDM decoders still need to be enabled on an individual basis. Cc: Reported-by: Derick Marks Fixes: 54cdbf845cf7 ("cxl/port: Add a driver for 'struct cxl_port' objects") Reviewed-by: Ira Weiny Link: https://lore.kernel.org/r/168437998331.403037.15719879757678389217.stgit@dwillia2-xfh.jf.intel.com Signed-off-by: Dan Williams --- drivers/cxl/core/pci.c | 27 +++++++++++++++++++++++---- drivers/cxl/cxl.h | 1 + drivers/cxl/port.c | 14 +++++++++----- tools/testing/cxl/Kbuild | 1 + tools/testing/cxl/test/mock.c | 15 +++++++++++++++ 5 files changed, 49 insertions(+), 9 deletions(-) (limited to 'drivers/cxl/core/pci.c') diff --git a/drivers/cxl/core/pci.c b/drivers/cxl/core/pci.c index f332fe7af92b..c35c002b65dd 100644 --- a/drivers/cxl/core/pci.c +++ b/drivers/cxl/core/pci.c @@ -241,17 +241,36 @@ static void disable_hdm(void *_cxlhdm) hdm + CXL_HDM_DECODER_CTRL_OFFSET); } -static int devm_cxl_enable_hdm(struct device *host, struct cxl_hdm *cxlhdm) +int devm_cxl_enable_hdm(struct cxl_port *port, struct cxl_hdm *cxlhdm) { - void __iomem *hdm = cxlhdm->regs.hdm_decoder; + void __iomem *hdm; u32 global_ctrl; + /* + * If the hdm capability was not mapped there is nothing to enable and + * the caller is responsible for what happens next. For example, + * emulate a passthrough decoder. + */ + if (IS_ERR(cxlhdm)) + return 0; + + hdm = cxlhdm->regs.hdm_decoder; global_ctrl = readl(hdm + CXL_HDM_DECODER_CTRL_OFFSET); + + /* + * If the HDM decoder capability was enabled on entry, skip + * registering disable_hdm() since this decode capability may be + * owned by platform firmware. + */ + if (global_ctrl & CXL_HDM_DECODER_ENABLE) + return 0; + writel(global_ctrl | CXL_HDM_DECODER_ENABLE, hdm + CXL_HDM_DECODER_CTRL_OFFSET); - return devm_add_action_or_reset(host, disable_hdm, cxlhdm); + return devm_add_action_or_reset(&port->dev, disable_hdm, cxlhdm); } +EXPORT_SYMBOL_NS_GPL(devm_cxl_enable_hdm, CXL); int cxl_dvsec_rr_decode(struct device *dev, int d, struct cxl_endpoint_dvsec_info *info) @@ -425,7 +444,7 @@ int cxl_hdm_decode_init(struct cxl_dev_state *cxlds, struct cxl_hdm *cxlhdm, if (info->mem_enabled) return 0; - rc = devm_cxl_enable_hdm(&port->dev, cxlhdm); + rc = devm_cxl_enable_hdm(port, cxlhdm); if (rc) return rc; diff --git a/drivers/cxl/cxl.h b/drivers/cxl/cxl.h index 044a92d9813e..f93a28538962 100644 --- a/drivers/cxl/cxl.h +++ b/drivers/cxl/cxl.h @@ -710,6 +710,7 @@ struct cxl_endpoint_dvsec_info { struct cxl_hdm; struct cxl_hdm *devm_cxl_setup_hdm(struct cxl_port *port, struct cxl_endpoint_dvsec_info *info); +int devm_cxl_enable_hdm(struct cxl_port *port, struct cxl_hdm *cxlhdm); int devm_cxl_enumerate_decoders(struct cxl_hdm *cxlhdm, struct cxl_endpoint_dvsec_info *info); int devm_cxl_add_passthrough_decoder(struct cxl_port *port); diff --git a/drivers/cxl/port.c b/drivers/cxl/port.c index eb57324c4ad4..17a95f469c26 100644 --- a/drivers/cxl/port.c +++ b/drivers/cxl/port.c @@ -60,13 +60,17 @@ static int discover_region(struct device *dev, void *root) static int cxl_switch_port_probe(struct cxl_port *port) { struct cxl_hdm *cxlhdm; - int rc; + int rc, nr_dports; - rc = devm_cxl_port_enumerate_dports(port); - if (rc < 0) - return rc; + nr_dports = devm_cxl_port_enumerate_dports(port); + if (nr_dports < 0) + return nr_dports; cxlhdm = devm_cxl_setup_hdm(port, NULL); + rc = devm_cxl_enable_hdm(port, cxlhdm); + if (rc) + return rc; + if (!IS_ERR(cxlhdm)) return devm_cxl_enumerate_decoders(cxlhdm, NULL); @@ -75,7 +79,7 @@ static int cxl_switch_port_probe(struct cxl_port *port) return PTR_ERR(cxlhdm); } - if (rc == 1) { + if (nr_dports == 1) { dev_dbg(&port->dev, "Fallback to passthrough decoder\n"); return devm_cxl_add_passthrough_decoder(port); } diff --git a/tools/testing/cxl/Kbuild b/tools/testing/cxl/Kbuild index fba7bec96acd..6f9347ade82c 100644 --- a/tools/testing/cxl/Kbuild +++ b/tools/testing/cxl/Kbuild @@ -6,6 +6,7 @@ ldflags-y += --wrap=acpi_pci_find_root ldflags-y += --wrap=nvdimm_bus_register ldflags-y += --wrap=devm_cxl_port_enumerate_dports ldflags-y += --wrap=devm_cxl_setup_hdm +ldflags-y += --wrap=devm_cxl_enable_hdm ldflags-y += --wrap=devm_cxl_add_passthrough_decoder ldflags-y += --wrap=devm_cxl_enumerate_decoders ldflags-y += --wrap=cxl_await_media_ready diff --git a/tools/testing/cxl/test/mock.c b/tools/testing/cxl/test/mock.c index de3933a776fd..284416527644 100644 --- a/tools/testing/cxl/test/mock.c +++ b/tools/testing/cxl/test/mock.c @@ -149,6 +149,21 @@ struct cxl_hdm *__wrap_devm_cxl_setup_hdm(struct cxl_port *port, } EXPORT_SYMBOL_NS_GPL(__wrap_devm_cxl_setup_hdm, CXL); +int __wrap_devm_cxl_enable_hdm(struct cxl_port *port, struct cxl_hdm *cxlhdm) +{ + int index, rc; + struct cxl_mock_ops *ops = get_cxl_mock_ops(&index); + + if (ops && ops->is_mock_port(port->uport)) + rc = 0; + else + rc = devm_cxl_enable_hdm(port, cxlhdm); + put_cxl_mock_ops(index); + + return rc; +} +EXPORT_SYMBOL_NS_GPL(__wrap_devm_cxl_enable_hdm, CXL); + int __wrap_devm_cxl_add_passthrough_decoder(struct cxl_port *port) { int rc, index; -- cgit v1.2.3 From ce17ad0d54985e2595a3e615fda31df61808a08c Mon Sep 17 00:00:00 2001 From: Dave Jiang Date: Thu, 18 May 2023 14:54:34 -0700 Subject: cxl: Wait Memory_Info_Valid before access memory related info The Memory_Info_Valid bit (CXL 3.0 8.1.3.8.2) indicates that the CXL Range Size High and Size Low registers are valid. The bit must be set within 1 second of reset deassertion to the device. Check valid bit before we check the Memory_Active bit when waiting for cxl_await_media_ready() to ensure that the memory info is valid for consumption. Also ensures both DVSEC ranges 1 and 2 are ready if DVSEC Capability indicates they are both supported. Fixes: 523e594d9cc0 ("cxl/pci: Implement wait for media active") Reviewed-by: Jonathan Cameron Signed-off-by: Dave Jiang Link: https://lore.kernel.org/r/168444687469.3134781.11033518965387297327.stgit@djiang5-mobl3 Signed-off-by: Dan Williams --- drivers/cxl/core/pci.c | 85 ++++++++++++++++++++++++++++++++++++++++++++------ drivers/cxl/cxlpci.h | 2 ++ 2 files changed, 78 insertions(+), 9 deletions(-) (limited to 'drivers/cxl/core/pci.c') diff --git a/drivers/cxl/core/pci.c b/drivers/cxl/core/pci.c index c35c002b65dd..67f4ab6daa34 100644 --- a/drivers/cxl/core/pci.c +++ b/drivers/cxl/core/pci.c @@ -101,23 +101,57 @@ int devm_cxl_port_enumerate_dports(struct cxl_port *port) } EXPORT_SYMBOL_NS_GPL(devm_cxl_port_enumerate_dports, CXL); -/* - * Wait up to @media_ready_timeout for the device to report memory - * active. - */ -int cxl_await_media_ready(struct cxl_dev_state *cxlds) +static int cxl_dvsec_mem_range_valid(struct cxl_dev_state *cxlds, int id) +{ + struct pci_dev *pdev = to_pci_dev(cxlds->dev); + int d = cxlds->cxl_dvsec; + bool valid = false; + int rc, i; + u32 temp; + + if (id > CXL_DVSEC_RANGE_MAX) + return -EINVAL; + + /* Check MEM INFO VALID bit first, give up after 1s */ + i = 1; + do { + rc = pci_read_config_dword(pdev, + d + CXL_DVSEC_RANGE_SIZE_LOW(id), + &temp); + if (rc) + return rc; + + valid = FIELD_GET(CXL_DVSEC_MEM_INFO_VALID, temp); + if (valid) + break; + msleep(1000); + } while (i--); + + if (!valid) { + dev_err(&pdev->dev, + "Timeout awaiting memory range %d valid after 1s.\n", + id); + return -ETIMEDOUT; + } + + return 0; +} + +static int cxl_dvsec_mem_range_active(struct cxl_dev_state *cxlds, int id) { struct pci_dev *pdev = to_pci_dev(cxlds->dev); int d = cxlds->cxl_dvsec; bool active = false; - u64 md_status; int rc, i; + u32 temp; - for (i = media_ready_timeout; i; i--) { - u32 temp; + if (id > CXL_DVSEC_RANGE_MAX) + return -EINVAL; + /* Check MEM ACTIVE bit, up to 60s timeout by default */ + for (i = media_ready_timeout; i; i--) { rc = pci_read_config_dword( - pdev, d + CXL_DVSEC_RANGE_SIZE_LOW(0), &temp); + pdev, d + CXL_DVSEC_RANGE_SIZE_LOW(id), &temp); if (rc) return rc; @@ -134,6 +168,39 @@ int cxl_await_media_ready(struct cxl_dev_state *cxlds) return -ETIMEDOUT; } + return 0; +} + +/* + * Wait up to @media_ready_timeout for the device to report memory + * active. + */ +int cxl_await_media_ready(struct cxl_dev_state *cxlds) +{ + struct pci_dev *pdev = to_pci_dev(cxlds->dev); + int d = cxlds->cxl_dvsec; + int rc, i, hdm_count; + u64 md_status; + u16 cap; + + rc = pci_read_config_word(pdev, + d + CXL_DVSEC_CAP_OFFSET, &cap); + if (rc) + return rc; + + hdm_count = FIELD_GET(CXL_DVSEC_HDM_COUNT_MASK, cap); + for (i = 0; i < hdm_count; i++) { + rc = cxl_dvsec_mem_range_valid(cxlds, i); + if (rc) + return rc; + } + + for (i = 0; i < hdm_count; i++) { + rc = cxl_dvsec_mem_range_active(cxlds, i); + if (rc) + return rc; + } + md_status = readq(cxlds->regs.memdev + CXLMDEV_STATUS_OFFSET); if (!CXLMDEV_READY(md_status)) return -EIO; diff --git a/drivers/cxl/cxlpci.h b/drivers/cxl/cxlpci.h index 0465ef963cd6..7c02e55b8042 100644 --- a/drivers/cxl/cxlpci.h +++ b/drivers/cxl/cxlpci.h @@ -31,6 +31,8 @@ #define CXL_DVSEC_RANGE_BASE_LOW(i) (0x24 + (i * 0x10)) #define CXL_DVSEC_MEM_BASE_LOW_MASK GENMASK(31, 28) +#define CXL_DVSEC_RANGE_MAX 2 + /* CXL 2.0 8.1.4: Non-CXL Function Map DVSEC */ #define CXL_DVSEC_FUNCTION_MAP 2 -- cgit v1.2.3