2 * Copyright(c) 2013-2015 Intel Corporation. All rights reserved.
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of version 2 of the GNU General Public License as
6 * published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 #include <linux/blkdev.h>
14 #include <linux/device.h>
15 #include <linux/genhd.h>
16 #include <linux/sizes.h>
17 #include <linux/slab.h>
24 static void nd_pfn_release(struct device
*dev
)
26 struct nd_region
*nd_region
= to_nd_region(dev
->parent
);
27 struct nd_pfn
*nd_pfn
= to_nd_pfn(dev
);
29 dev_dbg(dev
, "%s\n", __func__
);
30 nd_detach_ndns(&nd_pfn
->dev
, &nd_pfn
->ndns
);
31 ida_simple_remove(&nd_region
->pfn_ida
, nd_pfn
->id
);
36 static struct device_type nd_pfn_device_type
= {
38 .release
= nd_pfn_release
,
41 bool is_nd_pfn(struct device
*dev
)
43 return dev
? dev
->type
== &nd_pfn_device_type
: false;
45 EXPORT_SYMBOL(is_nd_pfn
);
47 struct nd_pfn
*to_nd_pfn(struct device
*dev
)
49 struct nd_pfn
*nd_pfn
= container_of(dev
, struct nd_pfn
, dev
);
51 WARN_ON(!is_nd_pfn(dev
));
54 EXPORT_SYMBOL(to_nd_pfn
);
56 static ssize_t
mode_show(struct device
*dev
,
57 struct device_attribute
*attr
, char *buf
)
59 struct nd_pfn
*nd_pfn
= to_nd_pfn(dev
);
61 switch (nd_pfn
->mode
) {
63 return sprintf(buf
, "ram\n");
65 return sprintf(buf
, "pmem\n");
67 return sprintf(buf
, "none\n");
71 static ssize_t
mode_store(struct device
*dev
,
72 struct device_attribute
*attr
, const char *buf
, size_t len
)
74 struct nd_pfn
*nd_pfn
= to_nd_pfn(dev
);
84 if (strncmp(buf
, "pmem\n", n
) == 0
85 || strncmp(buf
, "pmem", n
) == 0) {
86 /* TODO: allocate from PMEM support */
88 } else if (strncmp(buf
, "ram\n", n
) == 0
89 || strncmp(buf
, "ram", n
) == 0)
90 nd_pfn
->mode
= PFN_MODE_RAM
;
91 else if (strncmp(buf
, "none\n", n
) == 0
92 || strncmp(buf
, "none", n
) == 0)
93 nd_pfn
->mode
= PFN_MODE_NONE
;
97 dev_dbg(dev
, "%s: result: %zd wrote: %s%s", __func__
,
98 rc
, buf
, buf
[len
- 1] == '\n' ? "" : "\n");
99 nvdimm_bus_unlock(dev
);
102 return rc
? rc
: len
;
104 static DEVICE_ATTR_RW(mode
);
106 static ssize_t
uuid_show(struct device
*dev
,
107 struct device_attribute
*attr
, char *buf
)
109 struct nd_pfn
*nd_pfn
= to_nd_pfn(dev
);
112 return sprintf(buf
, "%pUb\n", nd_pfn
->uuid
);
113 return sprintf(buf
, "\n");
116 static ssize_t
uuid_store(struct device
*dev
,
117 struct device_attribute
*attr
, const char *buf
, size_t len
)
119 struct nd_pfn
*nd_pfn
= to_nd_pfn(dev
);
123 rc
= nd_uuid_store(dev
, &nd_pfn
->uuid
, buf
, len
);
124 dev_dbg(dev
, "%s: result: %zd wrote: %s%s", __func__
,
125 rc
, buf
, buf
[len
- 1] == '\n' ? "" : "\n");
128 return rc
? rc
: len
;
130 static DEVICE_ATTR_RW(uuid
);
132 static ssize_t
namespace_show(struct device
*dev
,
133 struct device_attribute
*attr
, char *buf
)
135 struct nd_pfn
*nd_pfn
= to_nd_pfn(dev
);
138 nvdimm_bus_lock(dev
);
139 rc
= sprintf(buf
, "%s\n", nd_pfn
->ndns
140 ? dev_name(&nd_pfn
->ndns
->dev
) : "");
141 nvdimm_bus_unlock(dev
);
145 static ssize_t
namespace_store(struct device
*dev
,
146 struct device_attribute
*attr
, const char *buf
, size_t len
)
148 struct nd_pfn
*nd_pfn
= to_nd_pfn(dev
);
152 nvdimm_bus_lock(dev
);
153 rc
= nd_namespace_store(dev
, &nd_pfn
->ndns
, buf
, len
);
154 dev_dbg(dev
, "%s: result: %zd wrote: %s%s", __func__
,
155 rc
, buf
, buf
[len
- 1] == '\n' ? "" : "\n");
156 nvdimm_bus_unlock(dev
);
161 static DEVICE_ATTR_RW(namespace);
163 static struct attribute
*nd_pfn_attributes
[] = {
165 &dev_attr_namespace
.attr
,
170 static struct attribute_group nd_pfn_attribute_group
= {
171 .attrs
= nd_pfn_attributes
,
174 static const struct attribute_group
*nd_pfn_attribute_groups
[] = {
175 &nd_pfn_attribute_group
,
176 &nd_device_attribute_group
,
177 &nd_numa_attribute_group
,
181 static struct device
*__nd_pfn_create(struct nd_region
*nd_region
,
182 u8
*uuid
, enum nd_pfn_mode mode
,
183 struct nd_namespace_common
*ndns
)
185 struct nd_pfn
*nd_pfn
;
188 /* we can only create pages for contiguous ranged of pmem */
189 if (!is_nd_pmem(&nd_region
->dev
))
192 nd_pfn
= kzalloc(sizeof(*nd_pfn
), GFP_KERNEL
);
196 nd_pfn
->id
= ida_simple_get(&nd_region
->pfn_ida
, 0, 0, GFP_KERNEL
);
197 if (nd_pfn
->id
< 0) {
204 uuid
= kmemdup(uuid
, 16, GFP_KERNEL
);
207 dev_set_name(dev
, "pfn%d.%d", nd_region
->id
, nd_pfn
->id
);
208 dev
->parent
= &nd_region
->dev
;
209 dev
->type
= &nd_pfn_device_type
;
210 dev
->groups
= nd_pfn_attribute_groups
;
211 device_initialize(&nd_pfn
->dev
);
212 if (ndns
&& !__nd_attach_ndns(&nd_pfn
->dev
, ndns
, &nd_pfn
->ndns
)) {
213 dev_dbg(&ndns
->dev
, "%s failed, already claimed by %s\n",
214 __func__
, dev_name(ndns
->claim
));
221 struct device
*nd_pfn_create(struct nd_region
*nd_region
)
223 struct device
*dev
= __nd_pfn_create(nd_region
, NULL
, PFN_MODE_NONE
,
227 __nd_device_register(dev
);
231 int nd_pfn_validate(struct nd_pfn
*nd_pfn
)
233 struct nd_namespace_common
*ndns
= nd_pfn
->ndns
;
234 struct nd_pfn_sb
*pfn_sb
= nd_pfn
->pfn_sb
;
235 struct nd_namespace_io
*nsio
;
236 u64 checksum
, offset
;
238 if (!pfn_sb
|| !ndns
)
241 if (!is_nd_pmem(nd_pfn
->dev
.parent
))
244 /* section alignment for simple hotplug */
245 if (nvdimm_namespace_capacity(ndns
) < ND_PFN_ALIGN
)
248 if (nvdimm_read_bytes(ndns
, SZ_4K
, pfn_sb
, sizeof(*pfn_sb
)))
251 if (memcmp(pfn_sb
->signature
, PFN_SIG
, PFN_SIG_LEN
) != 0)
254 checksum
= le64_to_cpu(pfn_sb
->checksum
);
255 pfn_sb
->checksum
= 0;
256 if (checksum
!= nd_sb_checksum((struct nd_gen_sb
*) pfn_sb
))
258 pfn_sb
->checksum
= cpu_to_le64(checksum
);
260 switch (le32_to_cpu(pfn_sb
->mode
)) {
264 /* TODO: allocate from PMEM support */
271 /* from probe we allocate */
272 nd_pfn
->uuid
= kmemdup(pfn_sb
->uuid
, 16, GFP_KERNEL
);
276 /* from init we validate */
277 if (memcmp(nd_pfn
->uuid
, pfn_sb
->uuid
, 16) != 0)
282 * These warnings are verbose because they can only trigger in
283 * the case where the physical address alignment of the
284 * namespace has changed since the pfn superblock was
287 offset
= le64_to_cpu(pfn_sb
->dataoff
);
288 nsio
= to_nd_namespace_io(&ndns
->dev
);
289 if (nsio
->res
.start
& ND_PFN_MASK
) {
290 dev_err(&nd_pfn
->dev
,
291 "init failed: %s not section aligned\n",
292 dev_name(&ndns
->dev
));
294 } else if (offset
>= resource_size(&nsio
->res
)) {
295 dev_err(&nd_pfn
->dev
, "pfn array size exceeds capacity of %s\n",
296 dev_name(&ndns
->dev
));
302 EXPORT_SYMBOL(nd_pfn_validate
);
304 int nd_pfn_probe(struct nd_namespace_common
*ndns
, void *drvdata
)
308 struct nd_pfn
*nd_pfn
;
309 struct nd_pfn_sb
*pfn_sb
;
310 struct nd_region
*nd_region
= to_nd_region(ndns
->dev
.parent
);
315 nvdimm_bus_lock(&ndns
->dev
);
316 dev
= __nd_pfn_create(nd_region
, NULL
, PFN_MODE_NONE
, ndns
);
317 nvdimm_bus_unlock(&ndns
->dev
);
320 dev_set_drvdata(dev
, drvdata
);
321 pfn_sb
= kzalloc(sizeof(*pfn_sb
), GFP_KERNEL
);
322 nd_pfn
= to_nd_pfn(dev
);
323 nd_pfn
->pfn_sb
= pfn_sb
;
324 rc
= nd_pfn_validate(nd_pfn
);
325 nd_pfn
->pfn_sb
= NULL
;
327 dev_dbg(&ndns
->dev
, "%s: pfn: %s\n", __func__
,
328 rc
== 0 ? dev_name(dev
) : "<none>");
330 __nd_detach_ndns(dev
, &nd_pfn
->ndns
);
333 __nd_device_register(&nd_pfn
->dev
);
337 EXPORT_SYMBOL(nd_pfn_probe
);