2 * Copyright(c) 2013-2015 Intel Corporation. All rights reserved.
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of version 2 of the GNU General Public License as
6 * published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 #include <linux/blkdev.h>
14 #include <linux/device.h>
15 #include <linux/genhd.h>
16 #include <linux/sizes.h>
17 #include <linux/slab.h>
24 static void nd_pfn_release(struct device
*dev
)
26 struct nd_region
*nd_region
= to_nd_region(dev
->parent
);
27 struct nd_pfn
*nd_pfn
= to_nd_pfn(dev
);
29 dev_dbg(dev
, "%s\n", __func__
);
30 nd_detach_ndns(&nd_pfn
->dev
, &nd_pfn
->ndns
);
31 ida_simple_remove(&nd_region
->pfn_ida
, nd_pfn
->id
);
36 static struct device_type nd_pfn_device_type
= {
38 .release
= nd_pfn_release
,
41 bool is_nd_pfn(struct device
*dev
)
43 return dev
? dev
->type
== &nd_pfn_device_type
: false;
45 EXPORT_SYMBOL(is_nd_pfn
);
47 struct nd_pfn
*to_nd_pfn(struct device
*dev
)
49 struct nd_pfn
*nd_pfn
= container_of(dev
, struct nd_pfn
, dev
);
51 WARN_ON(!is_nd_pfn(dev
));
54 EXPORT_SYMBOL(to_nd_pfn
);
56 static ssize_t
mode_show(struct device
*dev
,
57 struct device_attribute
*attr
, char *buf
)
59 struct nd_pfn
*nd_pfn
= to_nd_pfn(dev
);
61 switch (nd_pfn
->mode
) {
63 return sprintf(buf
, "ram\n");
65 return sprintf(buf
, "pmem\n");
67 return sprintf(buf
, "none\n");
71 static ssize_t
mode_store(struct device
*dev
,
72 struct device_attribute
*attr
, const char *buf
, size_t len
)
74 struct nd_pfn
*nd_pfn
= to_nd_pfn(dev
);
84 if (strncmp(buf
, "pmem\n", n
) == 0
85 || strncmp(buf
, "pmem", n
) == 0) {
86 nd_pfn
->mode
= PFN_MODE_PMEM
;
87 } else if (strncmp(buf
, "ram\n", n
) == 0
88 || strncmp(buf
, "ram", n
) == 0)
89 nd_pfn
->mode
= PFN_MODE_RAM
;
90 else if (strncmp(buf
, "none\n", n
) == 0
91 || strncmp(buf
, "none", n
) == 0)
92 nd_pfn
->mode
= PFN_MODE_NONE
;
96 dev_dbg(dev
, "%s: result: %zd wrote: %s%s", __func__
,
97 rc
, buf
, buf
[len
- 1] == '\n' ? "" : "\n");
98 nvdimm_bus_unlock(dev
);
101 return rc
? rc
: len
;
103 static DEVICE_ATTR_RW(mode
);
105 static ssize_t
align_show(struct device
*dev
,
106 struct device_attribute
*attr
, char *buf
)
108 struct nd_pfn
*nd_pfn
= to_nd_pfn(dev
);
110 return sprintf(buf
, "%lx\n", nd_pfn
->align
);
113 static ssize_t
__align_store(struct nd_pfn
*nd_pfn
, const char *buf
)
118 rc
= kstrtoul(buf
, 0, &val
);
122 if (!is_power_of_2(val
) || val
< PAGE_SIZE
|| val
> SZ_1G
)
125 if (nd_pfn
->dev
.driver
)
133 static ssize_t
align_store(struct device
*dev
,
134 struct device_attribute
*attr
, const char *buf
, size_t len
)
136 struct nd_pfn
*nd_pfn
= to_nd_pfn(dev
);
140 nvdimm_bus_lock(dev
);
141 rc
= __align_store(nd_pfn
, buf
);
142 dev_dbg(dev
, "%s: result: %zd wrote: %s%s", __func__
,
143 rc
, buf
, buf
[len
- 1] == '\n' ? "" : "\n");
144 nvdimm_bus_unlock(dev
);
147 return rc
? rc
: len
;
149 static DEVICE_ATTR_RW(align
);
151 static ssize_t
uuid_show(struct device
*dev
,
152 struct device_attribute
*attr
, char *buf
)
154 struct nd_pfn
*nd_pfn
= to_nd_pfn(dev
);
157 return sprintf(buf
, "%pUb\n", nd_pfn
->uuid
);
158 return sprintf(buf
, "\n");
161 static ssize_t
uuid_store(struct device
*dev
,
162 struct device_attribute
*attr
, const char *buf
, size_t len
)
164 struct nd_pfn
*nd_pfn
= to_nd_pfn(dev
);
168 rc
= nd_uuid_store(dev
, &nd_pfn
->uuid
, buf
, len
);
169 dev_dbg(dev
, "%s: result: %zd wrote: %s%s", __func__
,
170 rc
, buf
, buf
[len
- 1] == '\n' ? "" : "\n");
173 return rc
? rc
: len
;
175 static DEVICE_ATTR_RW(uuid
);
177 static ssize_t
namespace_show(struct device
*dev
,
178 struct device_attribute
*attr
, char *buf
)
180 struct nd_pfn
*nd_pfn
= to_nd_pfn(dev
);
183 nvdimm_bus_lock(dev
);
184 rc
= sprintf(buf
, "%s\n", nd_pfn
->ndns
185 ? dev_name(&nd_pfn
->ndns
->dev
) : "");
186 nvdimm_bus_unlock(dev
);
190 static ssize_t
namespace_store(struct device
*dev
,
191 struct device_attribute
*attr
, const char *buf
, size_t len
)
193 struct nd_pfn
*nd_pfn
= to_nd_pfn(dev
);
197 nvdimm_bus_lock(dev
);
198 rc
= nd_namespace_store(dev
, &nd_pfn
->ndns
, buf
, len
);
199 dev_dbg(dev
, "%s: result: %zd wrote: %s%s", __func__
,
200 rc
, buf
, buf
[len
- 1] == '\n' ? "" : "\n");
201 nvdimm_bus_unlock(dev
);
206 static DEVICE_ATTR_RW(namespace);
208 static ssize_t
resource_show(struct device
*dev
,
209 struct device_attribute
*attr
, char *buf
)
211 struct nd_pfn
*nd_pfn
= to_nd_pfn(dev
);
216 struct nd_pfn_sb
*pfn_sb
= nd_pfn
->pfn_sb
;
217 u64 offset
= __le64_to_cpu(pfn_sb
->dataoff
);
218 struct nd_namespace_common
*ndns
= nd_pfn
->ndns
;
219 u32 start_pad
= __le32_to_cpu(pfn_sb
->start_pad
);
220 struct nd_namespace_io
*nsio
= to_nd_namespace_io(&ndns
->dev
);
222 rc
= sprintf(buf
, "%#llx\n", (unsigned long long) nsio
->res
.start
223 + start_pad
+ offset
);
225 /* no address to convey if the pfn instance is disabled */
232 static DEVICE_ATTR_RO(resource
);
234 static ssize_t
size_show(struct device
*dev
,
235 struct device_attribute
*attr
, char *buf
)
237 struct nd_pfn
*nd_pfn
= to_nd_pfn(dev
);
242 struct nd_pfn_sb
*pfn_sb
= nd_pfn
->pfn_sb
;
243 u64 offset
= __le64_to_cpu(pfn_sb
->dataoff
);
244 struct nd_namespace_common
*ndns
= nd_pfn
->ndns
;
245 u32 start_pad
= __le32_to_cpu(pfn_sb
->start_pad
);
246 u32 end_trunc
= __le32_to_cpu(pfn_sb
->end_trunc
);
247 struct nd_namespace_io
*nsio
= to_nd_namespace_io(&ndns
->dev
);
249 rc
= sprintf(buf
, "%llu\n", (unsigned long long)
250 resource_size(&nsio
->res
) - start_pad
251 - end_trunc
- offset
);
253 /* no size to convey if the pfn instance is disabled */
260 static DEVICE_ATTR_RO(size
);
262 static struct attribute
*nd_pfn_attributes
[] = {
264 &dev_attr_namespace
.attr
,
266 &dev_attr_align
.attr
,
267 &dev_attr_resource
.attr
,
272 static struct attribute_group nd_pfn_attribute_group
= {
273 .attrs
= nd_pfn_attributes
,
276 static const struct attribute_group
*nd_pfn_attribute_groups
[] = {
277 &nd_pfn_attribute_group
,
278 &nd_device_attribute_group
,
279 &nd_numa_attribute_group
,
283 static struct device
*__nd_pfn_create(struct nd_region
*nd_region
,
284 struct nd_namespace_common
*ndns
)
286 struct nd_pfn
*nd_pfn
;
289 /* we can only create pages for contiguous ranged of pmem */
290 if (!is_nd_pmem(&nd_region
->dev
))
293 nd_pfn
= kzalloc(sizeof(*nd_pfn
), GFP_KERNEL
);
297 nd_pfn
->id
= ida_simple_get(&nd_region
->pfn_ida
, 0, 0, GFP_KERNEL
);
298 if (nd_pfn
->id
< 0) {
303 nd_pfn
->mode
= PFN_MODE_NONE
;
304 nd_pfn
->align
= HPAGE_SIZE
;
306 dev_set_name(dev
, "pfn%d.%d", nd_region
->id
, nd_pfn
->id
);
307 dev
->parent
= &nd_region
->dev
;
308 dev
->type
= &nd_pfn_device_type
;
309 dev
->groups
= nd_pfn_attribute_groups
;
310 device_initialize(&nd_pfn
->dev
);
311 if (ndns
&& !__nd_attach_ndns(&nd_pfn
->dev
, ndns
, &nd_pfn
->ndns
)) {
312 dev_dbg(&ndns
->dev
, "%s failed, already claimed by %s\n",
313 __func__
, dev_name(ndns
->claim
));
320 struct device
*nd_pfn_create(struct nd_region
*nd_region
)
322 struct device
*dev
= __nd_pfn_create(nd_region
, NULL
);
325 __nd_device_register(dev
);
329 int nd_pfn_validate(struct nd_pfn
*nd_pfn
)
331 u64 checksum
, offset
;
332 struct nd_namespace_io
*nsio
;
333 struct nd_pfn_sb
*pfn_sb
= nd_pfn
->pfn_sb
;
334 struct nd_namespace_common
*ndns
= nd_pfn
->ndns
;
335 const u8
*parent_uuid
= nd_dev_to_uuid(&ndns
->dev
);
337 if (!pfn_sb
|| !ndns
)
340 if (!is_nd_pmem(nd_pfn
->dev
.parent
))
343 if (nvdimm_read_bytes(ndns
, SZ_4K
, pfn_sb
, sizeof(*pfn_sb
)))
346 if (memcmp(pfn_sb
->signature
, PFN_SIG
, PFN_SIG_LEN
) != 0)
349 checksum
= le64_to_cpu(pfn_sb
->checksum
);
350 pfn_sb
->checksum
= 0;
351 if (checksum
!= nd_sb_checksum((struct nd_gen_sb
*) pfn_sb
))
353 pfn_sb
->checksum
= cpu_to_le64(checksum
);
355 if (memcmp(pfn_sb
->parent_uuid
, parent_uuid
, 16) != 0)
358 if (__le16_to_cpu(pfn_sb
->version_minor
) < 1) {
359 pfn_sb
->start_pad
= 0;
360 pfn_sb
->end_trunc
= 0;
363 switch (le32_to_cpu(pfn_sb
->mode
)) {
372 /* from probe we allocate */
373 nd_pfn
->uuid
= kmemdup(pfn_sb
->uuid
, 16, GFP_KERNEL
);
377 /* from init we validate */
378 if (memcmp(nd_pfn
->uuid
, pfn_sb
->uuid
, 16) != 0)
382 if (nd_pfn
->align
> nvdimm_namespace_capacity(ndns
)) {
383 dev_err(&nd_pfn
->dev
, "alignment: %lx exceeds capacity %llx\n",
384 nd_pfn
->align
, nvdimm_namespace_capacity(ndns
));
389 * These warnings are verbose because they can only trigger in
390 * the case where the physical address alignment of the
391 * namespace has changed since the pfn superblock was
394 offset
= le64_to_cpu(pfn_sb
->dataoff
);
395 nsio
= to_nd_namespace_io(&ndns
->dev
);
396 if (offset
>= resource_size(&nsio
->res
)) {
397 dev_err(&nd_pfn
->dev
, "pfn array size exceeds capacity of %s\n",
398 dev_name(&ndns
->dev
));
402 nd_pfn
->align
= 1UL << ilog2(offset
);
403 if (!is_power_of_2(offset
) || offset
< PAGE_SIZE
) {
404 dev_err(&nd_pfn
->dev
, "bad offset: %#llx dax disabled\n",
411 EXPORT_SYMBOL(nd_pfn_validate
);
413 int nd_pfn_probe(struct nd_namespace_common
*ndns
, void *drvdata
)
417 struct nd_pfn
*nd_pfn
;
418 struct nd_pfn_sb
*pfn_sb
;
419 struct nd_region
*nd_region
= to_nd_region(ndns
->dev
.parent
);
424 nvdimm_bus_lock(&ndns
->dev
);
425 dev
= __nd_pfn_create(nd_region
, ndns
);
426 nvdimm_bus_unlock(&ndns
->dev
);
429 dev_set_drvdata(dev
, drvdata
);
430 pfn_sb
= kzalloc(sizeof(*pfn_sb
), GFP_KERNEL
);
431 nd_pfn
= to_nd_pfn(dev
);
432 nd_pfn
->pfn_sb
= pfn_sb
;
433 rc
= nd_pfn_validate(nd_pfn
);
434 nd_pfn
->pfn_sb
= NULL
;
436 dev_dbg(&ndns
->dev
, "%s: pfn: %s\n", __func__
,
437 rc
== 0 ? dev_name(dev
) : "<none>");
439 __nd_detach_ndns(dev
, &nd_pfn
->ndns
);
442 __nd_device_register(&nd_pfn
->dev
);
446 EXPORT_SYMBOL(nd_pfn_probe
);