1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright(c) 2022 Intel Corporation. All rights reserved. */
3 #include <linux/debugfs.h>
4 #include <linux/device.h>
5 #include <linux/module.h>
14 * CXL memory endpoint devices and switches are CXL capable devices that are
15 * participating in CXL.mem protocol. Their functionality builds on top of the
16 * CXL.io protocol that allows enumerating and configuring components via
17 * standard PCI mechanisms.
19 * The cxl_mem driver owns kicking off the enumeration of this CXL.mem
20 * capability. With the detection of a CXL capable endpoint, the driver will
21 * walk up to find the platform specific port it is connected to, and determine
22 * if there are intervening switches in the path. If there are switches, a
23 * secondary action is to enumerate those (implemented in cxl_core). Finally the
24 * cxl_mem driver adds the device it is bound to as a CXL endpoint-port for use
25 * in higher level operations.
28 static void enable_suspend(void *data
)
33 static void remove_debugfs(void *dentry
)
35 debugfs_remove_recursive(dentry
);
38 static int cxl_mem_dpa_show(struct seq_file
*file
, void *data
)
40 struct device
*dev
= file
->private;
41 struct cxl_memdev
*cxlmd
= to_cxl_memdev(dev
);
43 cxl_dpa_debug(file
, cxlmd
->cxlds
);
48 static int devm_cxl_add_endpoint(struct device
*host
, struct cxl_memdev
*cxlmd
,
49 struct cxl_dport
*parent_dport
)
51 struct cxl_port
*parent_port
= parent_dport
->port
;
52 struct cxl_port
*endpoint
, *iter
, *down
;
56 * Now that the path to the root is established record all the
57 * intervening ports in the chain.
59 for (iter
= parent_port
, down
= NULL
; !is_cxl_root(iter
);
60 down
= iter
, iter
= to_cxl_port(iter
->dev
.parent
)) {
63 ep
= cxl_ep_load(iter
, cxlmd
);
67 /* Note: endpoint port component registers are derived from @cxlds */
68 endpoint
= devm_cxl_add_port(host
, &cxlmd
->dev
, CXL_RESOURCE_NONE
,
71 return PTR_ERR(endpoint
);
73 rc
= cxl_endpoint_autoremove(cxlmd
, endpoint
);
77 if (!endpoint
->dev
.driver
) {
78 dev_err(&cxlmd
->dev
, "%s failed probe\n",
79 dev_name(&endpoint
->dev
));
86 static int cxl_debugfs_poison_inject(void *data
, u64 dpa
)
88 struct cxl_memdev
*cxlmd
= data
;
90 return cxl_inject_poison(cxlmd
, dpa
);
93 DEFINE_DEBUGFS_ATTRIBUTE(cxl_poison_inject_fops
, NULL
,
94 cxl_debugfs_poison_inject
, "%llx\n");
96 static int cxl_debugfs_poison_clear(void *data
, u64 dpa
)
98 struct cxl_memdev
*cxlmd
= data
;
100 return cxl_clear_poison(cxlmd
, dpa
);
103 DEFINE_DEBUGFS_ATTRIBUTE(cxl_poison_clear_fops
, NULL
,
104 cxl_debugfs_poison_clear
, "%llx\n");
106 static int cxl_mem_probe(struct device
*dev
)
108 struct cxl_memdev
*cxlmd
= to_cxl_memdev(dev
);
109 struct cxl_memdev_state
*mds
= to_cxl_memdev_state(cxlmd
->cxlds
);
110 struct cxl_dev_state
*cxlds
= cxlmd
->cxlds
;
111 struct device
*endpoint_parent
;
112 struct cxl_dport
*dport
;
113 struct dentry
*dentry
;
116 if (!cxlds
->media_ready
)
120 * Someone is trying to reattach this device after it lost its port
121 * connection (an endpoint port previously registered by this memdev was
122 * disabled). This racy check is ok because if the port is still gone,
123 * no harm done, and if the port hierarchy comes back it will re-trigger
124 * this probe. Port rescan and memdev detach work share the same
125 * single-threaded workqueue.
127 if (work_pending(&cxlmd
->detach_work
))
130 dentry
= cxl_debugfs_create_dir(dev_name(dev
));
131 debugfs_create_devm_seqfile(dev
, "dpamem", dentry
, cxl_mem_dpa_show
);
133 if (test_bit(CXL_POISON_ENABLED_INJECT
, mds
->poison
.enabled_cmds
))
134 debugfs_create_file("inject_poison", 0200, dentry
, cxlmd
,
135 &cxl_poison_inject_fops
);
136 if (test_bit(CXL_POISON_ENABLED_CLEAR
, mds
->poison
.enabled_cmds
))
137 debugfs_create_file("clear_poison", 0200, dentry
, cxlmd
,
138 &cxl_poison_clear_fops
);
140 rc
= devm_add_action_or_reset(dev
, remove_debugfs
, dentry
);
144 rc
= devm_cxl_enumerate_ports(cxlmd
);
148 struct cxl_port
*parent_port
__free(put_cxl_port
) =
149 cxl_mem_find_port(cxlmd
, &dport
);
151 dev_err(dev
, "CXL port topology not found\n");
155 if (resource_size(&cxlds
->pmem_res
) && IS_ENABLED(CONFIG_CXL_PMEM
)) {
156 rc
= devm_cxl_add_nvdimm(parent_port
, cxlmd
);
159 dev_info(dev
, "PMEM disabled by platform\n");
165 endpoint_parent
= parent_port
->uport_dev
;
167 endpoint_parent
= &parent_port
->dev
;
169 cxl_dport_init_ras_reporting(dport
, dev
);
171 scoped_guard(device
, endpoint_parent
) {
172 if (!endpoint_parent
->driver
) {
173 dev_err(dev
, "CXL port topology %s not enabled\n",
174 dev_name(endpoint_parent
));
178 rc
= devm_cxl_add_endpoint(endpoint_parent
, cxlmd
, dport
);
184 * The kernel may be operating out of CXL memory on this device,
185 * there is no spec defined way to determine whether this device
186 * preserves contents over suspend, and there is no simple way
187 * to arrange for the suspend image to avoid CXL memory which
188 * would setup a circular dependency between PCI resume and save
191 * TODO: support suspend when all the regions this device is
192 * hosting are locked and covered by the system address map,
193 * i.e. platform firmware owns restoring the HDM configuration
196 cxl_mem_active_inc();
197 return devm_add_action_or_reset(dev
, enable_suspend
, NULL
);
200 static ssize_t
trigger_poison_list_store(struct device
*dev
,
201 struct device_attribute
*attr
,
202 const char *buf
, size_t len
)
207 if (kstrtobool(buf
, &trigger
) || !trigger
)
210 rc
= cxl_trigger_poison_list(to_cxl_memdev(dev
));
212 return rc
? rc
: len
;
214 static DEVICE_ATTR_WO(trigger_poison_list
);
216 static umode_t
cxl_mem_visible(struct kobject
*kobj
, struct attribute
*a
, int n
)
218 struct device
*dev
= kobj_to_dev(kobj
);
219 struct cxl_memdev
*cxlmd
= to_cxl_memdev(dev
);
220 struct cxl_memdev_state
*mds
= to_cxl_memdev_state(cxlmd
->cxlds
);
222 if (a
== &dev_attr_trigger_poison_list
.attr
)
223 if (!test_bit(CXL_POISON_ENABLED_LIST
,
224 mds
->poison
.enabled_cmds
))
230 static struct attribute
*cxl_mem_attrs
[] = {
231 &dev_attr_trigger_poison_list
.attr
,
235 static struct attribute_group cxl_mem_group
= {
236 .attrs
= cxl_mem_attrs
,
237 .is_visible
= cxl_mem_visible
,
240 __ATTRIBUTE_GROUPS(cxl_mem
);
242 static struct cxl_driver cxl_mem_driver
= {
244 .probe
= cxl_mem_probe
,
245 .id
= CXL_DEVICE_MEMORY_EXPANDER
,
247 .dev_groups
= cxl_mem_groups
,
251 module_cxl_driver(cxl_mem_driver
);
253 MODULE_DESCRIPTION("CXL: Memory Expansion");
254 MODULE_LICENSE("GPL v2");
255 MODULE_IMPORT_NS(CXL
);
256 MODULE_ALIAS_CXL(CXL_DEVICE_MEMORY_EXPANDER
);
258 * create_endpoint() wants to validate port driver attach immediately after
259 * endpoint registration.
261 MODULE_SOFTDEP("pre: cxl_port");