1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright(c) 2021 Intel Corporation. All rights reserved. */
3 #include <linux/libnvdimm.h>
4 #include <linux/unaligned.h>
5 #include <linux/device.h>
6 #include <linux/module.h>
7 #include <linux/ndctl.h>
8 #include <linux/async.h>
9 #include <linux/slab.h>
14 static __read_mostly
DECLARE_BITMAP(exclusive_cmds
, CXL_MEM_COMMAND_ID_MAX
);
16 static void clear_exclusive(void *mds
)
18 clear_exclusive_cxl_commands(mds
, exclusive_cmds
);
21 static void unregister_nvdimm(void *nvdimm
)
23 nvdimm_delete(nvdimm
);
26 static ssize_t
provider_show(struct device
*dev
, struct device_attribute
*attr
, char *buf
)
28 struct nvdimm
*nvdimm
= to_nvdimm(dev
);
29 struct cxl_nvdimm
*cxl_nvd
= nvdimm_provider_data(nvdimm
);
31 return sysfs_emit(buf
, "%s\n", dev_name(&cxl_nvd
->dev
));
33 static DEVICE_ATTR_RO(provider
);
35 static ssize_t
id_show(struct device
*dev
, struct device_attribute
*attr
, char *buf
)
37 struct nvdimm
*nvdimm
= to_nvdimm(dev
);
38 struct cxl_nvdimm
*cxl_nvd
= nvdimm_provider_data(nvdimm
);
39 struct cxl_dev_state
*cxlds
= cxl_nvd
->cxlmd
->cxlds
;
41 return sysfs_emit(buf
, "%lld\n", cxlds
->serial
);
43 static DEVICE_ATTR_RO(id
);
45 static struct attribute
*cxl_dimm_attributes
[] = {
47 &dev_attr_provider
.attr
,
51 static const struct attribute_group cxl_dimm_attribute_group
= {
53 .attrs
= cxl_dimm_attributes
,
56 static const struct attribute_group
*cxl_dimm_attribute_groups
[] = {
57 &cxl_dimm_attribute_group
,
61 static int cxl_nvdimm_probe(struct device
*dev
)
63 struct cxl_nvdimm
*cxl_nvd
= to_cxl_nvdimm(dev
);
64 struct cxl_memdev
*cxlmd
= cxl_nvd
->cxlmd
;
65 struct cxl_nvdimm_bridge
*cxl_nvb
= cxlmd
->cxl_nvb
;
66 struct cxl_memdev_state
*mds
= to_cxl_memdev_state(cxlmd
->cxlds
);
67 unsigned long flags
= 0, cmd_mask
= 0;
68 struct nvdimm
*nvdimm
;
71 set_exclusive_cxl_commands(mds
, exclusive_cmds
);
72 rc
= devm_add_action_or_reset(dev
, clear_exclusive
, mds
);
76 set_bit(NDD_LABELING
, &flags
);
77 set_bit(NDD_REGISTER_SYNC
, &flags
);
78 set_bit(ND_CMD_GET_CONFIG_SIZE
, &cmd_mask
);
79 set_bit(ND_CMD_GET_CONFIG_DATA
, &cmd_mask
);
80 set_bit(ND_CMD_SET_CONFIG_DATA
, &cmd_mask
);
81 nvdimm
= __nvdimm_create(cxl_nvb
->nvdimm_bus
, cxl_nvd
,
82 cxl_dimm_attribute_groups
, flags
,
83 cmd_mask
, 0, NULL
, cxl_nvd
->dev_id
,
84 cxl_security_ops
, NULL
);
88 dev_set_drvdata(dev
, nvdimm
);
89 return devm_add_action_or_reset(dev
, unregister_nvdimm
, nvdimm
);
92 static struct cxl_driver cxl_nvdimm_driver
= {
94 .probe
= cxl_nvdimm_probe
,
95 .id
= CXL_DEVICE_NVDIMM
,
97 .suppress_bind_attrs
= true,
101 static int cxl_pmem_get_config_size(struct cxl_memdev_state
*mds
,
102 struct nd_cmd_get_config_size
*cmd
,
103 unsigned int buf_len
)
105 struct cxl_mailbox
*cxl_mbox
= &mds
->cxlds
.cxl_mbox
;
107 if (sizeof(*cmd
) > buf_len
)
110 *cmd
= (struct nd_cmd_get_config_size
){
111 .config_size
= mds
->lsa_size
,
113 cxl_mbox
->payload_size
- sizeof(struct cxl_mbox_set_lsa
),
119 static int cxl_pmem_get_config_data(struct cxl_memdev_state
*mds
,
120 struct nd_cmd_get_config_data_hdr
*cmd
,
121 unsigned int buf_len
)
123 struct cxl_mailbox
*cxl_mbox
= &mds
->cxlds
.cxl_mbox
;
124 struct cxl_mbox_get_lsa get_lsa
;
125 struct cxl_mbox_cmd mbox_cmd
;
128 if (sizeof(*cmd
) > buf_len
)
130 if (struct_size(cmd
, out_buf
, cmd
->in_length
) > buf_len
)
133 get_lsa
= (struct cxl_mbox_get_lsa
) {
134 .offset
= cpu_to_le32(cmd
->in_offset
),
135 .length
= cpu_to_le32(cmd
->in_length
),
137 mbox_cmd
= (struct cxl_mbox_cmd
) {
138 .opcode
= CXL_MBOX_OP_GET_LSA
,
139 .payload_in
= &get_lsa
,
140 .size_in
= sizeof(get_lsa
),
141 .size_out
= cmd
->in_length
,
142 .payload_out
= cmd
->out_buf
,
145 rc
= cxl_internal_send_cmd(cxl_mbox
, &mbox_cmd
);
151 static int cxl_pmem_set_config_data(struct cxl_memdev_state
*mds
,
152 struct nd_cmd_set_config_hdr
*cmd
,
153 unsigned int buf_len
)
155 struct cxl_mailbox
*cxl_mbox
= &mds
->cxlds
.cxl_mbox
;
156 struct cxl_mbox_set_lsa
*set_lsa
;
157 struct cxl_mbox_cmd mbox_cmd
;
160 if (sizeof(*cmd
) > buf_len
)
163 /* 4-byte status follows the input data in the payload */
164 if (size_add(struct_size(cmd
, in_buf
, cmd
->in_length
), 4) > buf_len
)
168 kvzalloc(struct_size(set_lsa
, data
, cmd
->in_length
), GFP_KERNEL
);
172 *set_lsa
= (struct cxl_mbox_set_lsa
) {
173 .offset
= cpu_to_le32(cmd
->in_offset
),
175 memcpy(set_lsa
->data
, cmd
->in_buf
, cmd
->in_length
);
176 mbox_cmd
= (struct cxl_mbox_cmd
) {
177 .opcode
= CXL_MBOX_OP_SET_LSA
,
178 .payload_in
= set_lsa
,
179 .size_in
= struct_size(set_lsa
, data
, cmd
->in_length
),
182 rc
= cxl_internal_send_cmd(cxl_mbox
, &mbox_cmd
);
185 * Set "firmware" status (4-packed bytes at the end of the input
188 put_unaligned(0, (u32
*) &cmd
->in_buf
[cmd
->in_length
]);
194 static int cxl_pmem_nvdimm_ctl(struct nvdimm
*nvdimm
, unsigned int cmd
,
195 void *buf
, unsigned int buf_len
)
197 struct cxl_nvdimm
*cxl_nvd
= nvdimm_provider_data(nvdimm
);
198 unsigned long cmd_mask
= nvdimm_cmd_mask(nvdimm
);
199 struct cxl_memdev
*cxlmd
= cxl_nvd
->cxlmd
;
200 struct cxl_memdev_state
*mds
= to_cxl_memdev_state(cxlmd
->cxlds
);
202 if (!test_bit(cmd
, &cmd_mask
))
206 case ND_CMD_GET_CONFIG_SIZE
:
207 return cxl_pmem_get_config_size(mds
, buf
, buf_len
);
208 case ND_CMD_GET_CONFIG_DATA
:
209 return cxl_pmem_get_config_data(mds
, buf
, buf_len
);
210 case ND_CMD_SET_CONFIG_DATA
:
211 return cxl_pmem_set_config_data(mds
, buf
, buf_len
);
217 static int cxl_pmem_ctl(struct nvdimm_bus_descriptor
*nd_desc
,
218 struct nvdimm
*nvdimm
, unsigned int cmd
, void *buf
,
219 unsigned int buf_len
, int *cmd_rc
)
222 * No firmware response to translate, let the transport error
223 * code take precedence.
229 return cxl_pmem_nvdimm_ctl(nvdimm
, cmd
, buf
, buf_len
);
232 static int detach_nvdimm(struct device
*dev
, void *data
)
234 struct cxl_nvdimm
*cxl_nvd
;
235 bool release
= false;
237 if (!is_cxl_nvdimm(dev
))
240 scoped_guard(device
, dev
) {
242 cxl_nvd
= to_cxl_nvdimm(dev
);
243 if (cxl_nvd
->cxlmd
&& cxl_nvd
->cxlmd
->cxl_nvb
== data
)
248 device_release_driver(dev
);
252 static void unregister_nvdimm_bus(void *_cxl_nvb
)
254 struct cxl_nvdimm_bridge
*cxl_nvb
= _cxl_nvb
;
255 struct nvdimm_bus
*nvdimm_bus
= cxl_nvb
->nvdimm_bus
;
257 bus_for_each_dev(&cxl_bus_type
, NULL
, cxl_nvb
, detach_nvdimm
);
259 cxl_nvb
->nvdimm_bus
= NULL
;
260 nvdimm_bus_unregister(nvdimm_bus
);
263 static int cxl_nvdimm_bridge_probe(struct device
*dev
)
265 struct cxl_nvdimm_bridge
*cxl_nvb
= to_cxl_nvdimm_bridge(dev
);
267 cxl_nvb
->nd_desc
= (struct nvdimm_bus_descriptor
) {
268 .provider_name
= "CXL",
269 .module
= THIS_MODULE
,
270 .ndctl
= cxl_pmem_ctl
,
273 cxl_nvb
->nvdimm_bus
=
274 nvdimm_bus_register(&cxl_nvb
->dev
, &cxl_nvb
->nd_desc
);
276 if (!cxl_nvb
->nvdimm_bus
)
279 return devm_add_action_or_reset(dev
, unregister_nvdimm_bus
, cxl_nvb
);
282 static struct cxl_driver cxl_nvdimm_bridge_driver
= {
283 .name
= "cxl_nvdimm_bridge",
284 .probe
= cxl_nvdimm_bridge_probe
,
285 .id
= CXL_DEVICE_NVDIMM_BRIDGE
,
287 .suppress_bind_attrs
= true,
291 static void unregister_nvdimm_region(void *nd_region
)
293 nvdimm_region_delete(nd_region
);
296 static void cxlr_pmem_remove_resource(void *res
)
298 remove_resource(res
);
301 struct cxl_pmem_region_info
{
306 static int cxl_pmem_region_probe(struct device
*dev
)
308 struct nd_mapping_desc mappings
[CXL_DECODER_MAX_INTERLEAVE
];
309 struct cxl_pmem_region
*cxlr_pmem
= to_cxl_pmem_region(dev
);
310 struct cxl_region
*cxlr
= cxlr_pmem
->cxlr
;
311 struct cxl_nvdimm_bridge
*cxl_nvb
= cxlr
->cxl_nvb
;
312 struct cxl_pmem_region_info
*info
= NULL
;
313 struct nd_interleave_set
*nd_set
;
314 struct nd_region_desc ndr_desc
;
315 struct cxl_nvdimm
*cxl_nvd
;
316 struct nvdimm
*nvdimm
;
317 struct resource
*res
;
320 memset(&mappings
, 0, sizeof(mappings
));
321 memset(&ndr_desc
, 0, sizeof(ndr_desc
));
323 res
= devm_kzalloc(dev
, sizeof(*res
), GFP_KERNEL
);
327 res
->name
= "Persistent Memory";
328 res
->start
= cxlr_pmem
->hpa_range
.start
;
329 res
->end
= cxlr_pmem
->hpa_range
.end
;
330 res
->flags
= IORESOURCE_MEM
;
331 res
->desc
= IORES_DESC_PERSISTENT_MEMORY
;
333 rc
= insert_resource(&iomem_resource
, res
);
337 rc
= devm_add_action_or_reset(dev
, cxlr_pmem_remove_resource
, res
);
342 ndr_desc
.provider_data
= cxlr_pmem
;
344 ndr_desc
.numa_node
= memory_add_physaddr_to_nid(res
->start
);
345 ndr_desc
.target_node
= phys_to_target_node(res
->start
);
346 if (ndr_desc
.target_node
== NUMA_NO_NODE
) {
347 ndr_desc
.target_node
= ndr_desc
.numa_node
;
348 dev_dbg(&cxlr
->dev
, "changing target node from %d to %d",
349 NUMA_NO_NODE
, ndr_desc
.target_node
);
352 nd_set
= devm_kzalloc(dev
, sizeof(*nd_set
), GFP_KERNEL
);
356 ndr_desc
.memregion
= cxlr
->id
;
357 set_bit(ND_REGION_CXL
, &ndr_desc
.flags
);
358 set_bit(ND_REGION_PERSIST_MEMCTRL
, &ndr_desc
.flags
);
360 info
= kmalloc_array(cxlr_pmem
->nr_mappings
, sizeof(*info
), GFP_KERNEL
);
364 for (i
= 0; i
< cxlr_pmem
->nr_mappings
; i
++) {
365 struct cxl_pmem_region_mapping
*m
= &cxlr_pmem
->mapping
[i
];
366 struct cxl_memdev
*cxlmd
= m
->cxlmd
;
367 struct cxl_dev_state
*cxlds
= cxlmd
->cxlds
;
369 cxl_nvd
= cxlmd
->cxl_nvd
;
370 nvdimm
= dev_get_drvdata(&cxl_nvd
->dev
);
372 dev_dbg(dev
, "[%d]: %s: no nvdimm found\n", i
,
373 dev_name(&cxlmd
->dev
));
378 m
->cxl_nvd
= cxl_nvd
;
379 mappings
[i
] = (struct nd_mapping_desc
) {
385 info
[i
].offset
= m
->start
;
386 info
[i
].serial
= cxlds
->serial
;
388 ndr_desc
.num_mappings
= cxlr_pmem
->nr_mappings
;
389 ndr_desc
.mapping
= mappings
;
392 * TODO enable CXL labels which skip the need for 'interleave-set cookie'
395 nd_fletcher64(info
, sizeof(*info
) * cxlr_pmem
->nr_mappings
, 0);
396 nd_set
->cookie2
= nd_set
->cookie1
;
397 ndr_desc
.nd_set
= nd_set
;
399 cxlr_pmem
->nd_region
=
400 nvdimm_pmem_region_create(cxl_nvb
->nvdimm_bus
, &ndr_desc
);
401 if (!cxlr_pmem
->nd_region
) {
406 rc
= devm_add_action_or_reset(dev
, unregister_nvdimm_region
,
407 cxlr_pmem
->nd_region
);
414 static struct cxl_driver cxl_pmem_region_driver
= {
415 .name
= "cxl_pmem_region",
416 .probe
= cxl_pmem_region_probe
,
417 .id
= CXL_DEVICE_PMEM_REGION
,
419 .suppress_bind_attrs
= true,
423 static __init
int cxl_pmem_init(void)
427 set_bit(CXL_MEM_COMMAND_ID_SET_SHUTDOWN_STATE
, exclusive_cmds
);
428 set_bit(CXL_MEM_COMMAND_ID_SET_LSA
, exclusive_cmds
);
430 rc
= cxl_driver_register(&cxl_nvdimm_bridge_driver
);
434 rc
= cxl_driver_register(&cxl_nvdimm_driver
);
438 rc
= cxl_driver_register(&cxl_pmem_region_driver
);
445 cxl_driver_unregister(&cxl_nvdimm_driver
);
447 cxl_driver_unregister(&cxl_nvdimm_bridge_driver
);
451 static __exit
void cxl_pmem_exit(void)
453 cxl_driver_unregister(&cxl_pmem_region_driver
);
454 cxl_driver_unregister(&cxl_nvdimm_driver
);
455 cxl_driver_unregister(&cxl_nvdimm_bridge_driver
);
458 MODULE_DESCRIPTION("CXL PMEM: Persistent Memory Support");
459 MODULE_LICENSE("GPL v2");
460 module_init(cxl_pmem_init
);
461 module_exit(cxl_pmem_exit
);
462 MODULE_IMPORT_NS("CXL");
463 MODULE_ALIAS_CXL(CXL_DEVICE_NVDIMM_BRIDGE
);
464 MODULE_ALIAS_CXL(CXL_DEVICE_NVDIMM
);
465 MODULE_ALIAS_CXL(CXL_DEVICE_PMEM_REGION
);