1 /* SPDX-License-Identifier: GPL-2.0-only */
3 * Copyright(c) 2013-2015 Intel Corporation. All rights reserved.
8 #include <linux/ndctl.h>
9 #include <linux/device.h>
10 #include <linux/badblocks.h>
11 #include <linux/perf_event.h>
14 NVDIMM_REVALIDATE_POISON
,
15 NVDIMM_REVALIDATE_REGION
,
18 enum nvdimm_claim_class
{
24 NVDIMM_CCLASS_UNKNOWN
,
27 #define NVDIMM_EVENT_VAR(_id) event_attr_##_id
28 #define NVDIMM_EVENT_PTR(_id) (&event_attr_##_id.attr.attr)
30 #define NVDIMM_EVENT_ATTR(_name, _id) \
31 PMU_EVENT_ATTR(_name, NVDIMM_EVENT_VAR(_id), _id, \
32 nvdimm_events_sysfs_show)
34 /* Event attribute array index */
35 #define NVDIMM_PMU_FORMAT_ATTR 0
36 #define NVDIMM_PMU_EVENT_ATTR 1
37 #define NVDIMM_PMU_CPUMASK_ATTR 2
38 #define NVDIMM_PMU_NULL_ATTR 3
41 * struct nvdimm_pmu - data structure for nvdimm perf driver
42 * @pmu: pmu data structure for nvdimm performance stats.
43 * @dev: nvdimm device pointer.
44 * @cpu: designated cpu for counter access.
45 * @node: node for cpu hotplug notifier link.
46 * @cpuhp_state: state for cpu hotplug notification.
47 * @arch_cpumask: cpumask to get designated cpu for counter access.
53 struct hlist_node node
;
54 enum cpuhp_state cpuhp_state
;
55 /* cpumask provided by arch/platform specific code */
56 struct cpumask arch_cpumask
;
59 struct platform_device
;
61 #ifdef CONFIG_PERF_EVENTS
62 extern ssize_t
nvdimm_events_sysfs_show(struct device
*dev
,
63 struct device_attribute
*attr
,
66 int register_nvdimm_pmu(struct nvdimm_pmu
*nvdimm
, struct platform_device
*pdev
);
67 void unregister_nvdimm_pmu(struct nvdimm_pmu
*nd_pmu
);
70 static inline int register_nvdimm_pmu(struct nvdimm_pmu
*nvdimm
, struct platform_device
*pdev
)
75 static inline void unregister_nvdimm_pmu(struct nvdimm_pmu
*nd_pmu
) { }
78 struct nd_device_driver
{
79 struct device_driver drv
;
81 int (*probe
)(struct device
*dev
);
82 void (*remove
)(struct device
*dev
);
83 void (*shutdown
)(struct device
*dev
);
84 void (*notify
)(struct device
*dev
, enum nvdimm_event event
);
87 #define to_nd_device_driver(__drv) container_of_const(__drv, struct nd_device_driver, drv)
90 * struct nd_namespace_common - core infrastructure of a namespace
91 * @force_raw: ignore other personalities for the namespace (e.g. btt)
92 * @dev: device model node
93 * @claim: when set a another personality has taken ownership of the namespace
94 * @claim_class: restrict claim type to a given class
95 * @rw_bytes: access the raw namespace capacity with byte-aligned transfers
97 struct nd_namespace_common
{
100 struct device
*claim
;
101 enum nvdimm_claim_class claim_class
;
102 int (*rw_bytes
)(struct nd_namespace_common
*, resource_size_t offset
,
103 void *buf
, size_t size
, int rw
, unsigned long flags
);
106 static inline struct nd_namespace_common
*to_ndns(struct device
*dev
)
108 return container_of(dev
, struct nd_namespace_common
, dev
);
112 * struct nd_namespace_io - device representation of a persistent memory range
113 * @dev: namespace device created by the nd region driver
114 * @res: struct resource conversion of a NFIT SPA table
115 * @size: cached resource_size(@res) for fast path size checks
116 * @addr: virtual address to access the namespace range
117 * @bb: badblocks list for the namespace range
119 struct nd_namespace_io
{
120 struct nd_namespace_common common
;
122 resource_size_t size
;
128 * struct nd_namespace_pmem - namespace device for dimm-backed interleaved memory
129 * @nsio: device and system physical address range to drive
130 * @lbasize: logical sector size for the namespace in block-device-mode
131 * @alt_name: namespace name supplied in the dimm label
132 * @uuid: namespace name supplied in the dimm label
133 * @id: ida allocated id
135 struct nd_namespace_pmem
{
136 struct nd_namespace_io nsio
;
137 unsigned long lbasize
;
143 static inline struct nd_namespace_io
*to_nd_namespace_io(const struct device
*dev
)
145 return container_of(dev
, struct nd_namespace_io
, common
.dev
);
148 static inline struct nd_namespace_pmem
*to_nd_namespace_pmem(const struct device
*dev
)
150 struct nd_namespace_io
*nsio
= to_nd_namespace_io(dev
);
152 return container_of(nsio
, struct nd_namespace_pmem
, nsio
);
156 * nvdimm_read_bytes() - synchronously read bytes from an nvdimm namespace
157 * @ndns: device to read
158 * @offset: namespace-relative starting offset
159 * @buf: buffer to fill
160 * @size: transfer length
162 * @buf is up-to-date upon return from this routine.
164 static inline int nvdimm_read_bytes(struct nd_namespace_common
*ndns
,
165 resource_size_t offset
, void *buf
, size_t size
,
168 return ndns
->rw_bytes(ndns
, offset
, buf
, size
, READ
, flags
);
172 * nvdimm_write_bytes() - synchronously write bytes to an nvdimm namespace
173 * @ndns: device to write
174 * @offset: namespace-relative starting offset
175 * @buf: buffer to drain
176 * @size: transfer length
178 * NVDIMM Namepaces disks do not implement sectors internally. Depending on
179 * the @ndns, the contents of @buf may be in cpu cache, platform buffers,
180 * or on backing memory media upon return from this routine. Flushing
181 * to media is handled internal to the @ndns driver, if at all.
183 static inline int nvdimm_write_bytes(struct nd_namespace_common
*ndns
,
184 resource_size_t offset
, void *buf
, size_t size
,
187 return ndns
->rw_bytes(ndns
, offset
, buf
, size
, WRITE
, flags
);
190 #define MODULE_ALIAS_ND_DEVICE(type) \
191 MODULE_ALIAS("nd:t" __stringify(type) "*")
192 #define ND_DEVICE_MODALIAS_FMT "nd:t%d"
195 void nvdimm_region_notify(struct nd_region
*nd_region
, enum nvdimm_event event
);
196 int __must_check
__nd_driver_register(struct nd_device_driver
*nd_drv
,
197 struct module
*module
, const char *mod_name
);
198 static inline void nd_driver_unregister(struct nd_device_driver
*drv
)
200 driver_unregister(&drv
->drv
);
202 #define nd_driver_register(driver) \
203 __nd_driver_register(driver, THIS_MODULE, KBUILD_MODNAME)
204 #define module_nd_driver(driver) \
205 module_driver(driver, nd_driver_register, nd_driver_unregister)
206 #endif /* __LINUX_ND_H__ */