summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorDave Jiang <dave.jiang@intel.com>2026-04-03 22:15:11 +0300
committerDave Jiang <dave.jiang@intel.com>2026-04-03 22:15:11 +0300
commit2fb3bdeb00111519965601389a7b60afb97bafc0 (patch)
tree546b18ad5180aa344022c266240ab5db624ca93d
parentf3b1d2260703f8fb39fd667a26d931d63d2dd10e (diff)
parente8069c66d09309579e53567be8ddfa6ccb2f452a (diff)
downloadlinux-2fb3bdeb00111519965601389a7b60afb97bafc0.tar.xz
Merge branch 'for-7.1/cxl-consolidate-endpoint' into cxl-for-next
Add code to ensure the endpoint has completed initialization before usage. cxl/pci: Check memdev driver binding status in cxl_reset_done() cxl/pci: Hold memdev lock in cxl_event_trace_record()
-rw-r--r--drivers/cxl/core/mbox.c5
-rw-r--r--drivers/cxl/core/region.c8
-rw-r--r--drivers/cxl/cxlmem.h2
-rw-r--r--drivers/cxl/pci.c3
-rw-r--r--include/linux/device.h1
5 files changed, 13 insertions, 6 deletions
diff --git a/drivers/cxl/core/mbox.c b/drivers/cxl/core/mbox.c
index e7a6452bf544..3f34bbabf4d3 100644
--- a/drivers/cxl/core/mbox.c
+++ b/drivers/cxl/core/mbox.c
@@ -893,7 +893,7 @@ out:
}
EXPORT_SYMBOL_NS_GPL(cxl_enumerate_cmds, "CXL");
-void cxl_event_trace_record(const struct cxl_memdev *cxlmd,
+void cxl_event_trace_record(struct cxl_memdev *cxlmd,
enum cxl_event_log_type type,
enum cxl_event_type event_type,
const uuid_t *uuid, union cxl_event *evt)
@@ -920,6 +920,7 @@ void cxl_event_trace_record(const struct cxl_memdev *cxlmd,
* translations. Take topology mutation locks and lookup
* { HPA, REGION } from { DPA, MEMDEV } in the event record.
*/
+ guard(device)(&cxlmd->dev);
guard(rwsem_read)(&cxl_rwsem.region);
guard(rwsem_read)(&cxl_rwsem.dpa);
@@ -968,7 +969,7 @@ void cxl_event_trace_record(const struct cxl_memdev *cxlmd,
}
EXPORT_SYMBOL_NS_GPL(cxl_event_trace_record, "CXL");
-static void __cxl_event_trace_record(const struct cxl_memdev *cxlmd,
+static void __cxl_event_trace_record(struct cxl_memdev *cxlmd,
enum cxl_event_log_type type,
struct cxl_event_record_raw *record)
{
diff --git a/drivers/cxl/core/region.c b/drivers/cxl/core/region.c
index 95d81816008e..f901034624e1 100644
--- a/drivers/cxl/core/region.c
+++ b/drivers/cxl/core/region.c
@@ -2967,13 +2967,15 @@ static int __cxl_dpa_to_region(struct device *dev, void *arg)
struct cxl_region *cxl_dpa_to_region(const struct cxl_memdev *cxlmd, u64 dpa)
{
struct cxl_dpa_to_region_context ctx;
- struct cxl_port *port;
+ struct cxl_port *port = cxlmd->endpoint;
+
+ if (!cxlmd->dev.driver)
+ return NULL;
ctx = (struct cxl_dpa_to_region_context) {
.dpa = dpa,
};
- port = cxlmd->endpoint;
- if (port && is_cxl_endpoint(port) && cxl_num_decoders_committed(port))
+ if (cxl_num_decoders_committed(port))
device_for_each_child(&port->dev, &ctx, __cxl_dpa_to_region);
return ctx.cxlr;
diff --git a/drivers/cxl/cxlmem.h b/drivers/cxl/cxlmem.h
index e21d744d639b..7a34a19c02c8 100644
--- a/drivers/cxl/cxlmem.h
+++ b/drivers/cxl/cxlmem.h
@@ -864,7 +864,7 @@ void set_exclusive_cxl_commands(struct cxl_memdev_state *mds,
void clear_exclusive_cxl_commands(struct cxl_memdev_state *mds,
unsigned long *cmds);
void cxl_mem_get_event_records(struct cxl_memdev_state *mds, u32 status);
-void cxl_event_trace_record(const struct cxl_memdev *cxlmd,
+void cxl_event_trace_record(struct cxl_memdev *cxlmd,
enum cxl_event_log_type type,
enum cxl_event_type event_type,
const uuid_t *uuid, union cxl_event *evt);
diff --git a/drivers/cxl/pci.c b/drivers/cxl/pci.c
index 84cff73b39e5..6e04c8ef1912 100644
--- a/drivers/cxl/pci.c
+++ b/drivers/cxl/pci.c
@@ -1056,6 +1056,9 @@ static void cxl_reset_done(struct pci_dev *pdev)
* that no longer exists.
*/
guard(device)(&cxlmd->dev);
+ if (!cxlmd->dev.driver)
+ return;
+
if (cxlmd->endpoint &&
cxl_endpoint_decoder_reset_detected(cxlmd->endpoint)) {
device_for_each_child(&cxlmd->endpoint->dev, NULL,
diff --git a/include/linux/device.h b/include/linux/device.h
index 0be95294b6e6..4fafee80524b 100644
--- a/include/linux/device.h
+++ b/include/linux/device.h
@@ -911,6 +911,7 @@ static inline void device_unlock(struct device *dev)
}
DEFINE_GUARD(device, struct device *, device_lock(_T), device_unlock(_T))
+DEFINE_GUARD_COND(device, _intr, device_lock_interruptible(_T), _RET == 0)
static inline void device_lock_assert(struct device *dev)
{