i40e: define proper net_device::neigh_priv_len
[linux/fpc-iii.git] / drivers / dax / device.c
blob948806e57cee33f74024adb442f398579319b89d
1 /*
2 * Copyright(c) 2016 - 2017 Intel Corporation. All rights reserved.
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of version 2 of the GNU General Public License as
6 * published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 #include <linux/pagemap.h>
14 #include <linux/module.h>
15 #include <linux/device.h>
16 #include <linux/pfn_t.h>
17 #include <linux/cdev.h>
18 #include <linux/slab.h>
19 #include <linux/dax.h>
20 #include <linux/fs.h>
21 #include <linux/mm.h>
22 #include <linux/mman.h>
23 #include "dax-private.h"
24 #include "dax.h"
26 static struct class *dax_class;
29 * Rely on the fact that drvdata is set before the attributes are
30 * registered, and that the attributes are unregistered before drvdata
31 * is cleared to assume that drvdata is always valid.
33 static ssize_t id_show(struct device *dev,
34 struct device_attribute *attr, char *buf)
36 struct dax_region *dax_region = dev_get_drvdata(dev);
38 return sprintf(buf, "%d\n", dax_region->id);
40 static DEVICE_ATTR_RO(id);
42 static ssize_t region_size_show(struct device *dev,
43 struct device_attribute *attr, char *buf)
45 struct dax_region *dax_region = dev_get_drvdata(dev);
47 return sprintf(buf, "%llu\n", (unsigned long long)
48 resource_size(&dax_region->res));
50 static struct device_attribute dev_attr_region_size = __ATTR(size, 0444,
51 region_size_show, NULL);
53 static ssize_t align_show(struct device *dev,
54 struct device_attribute *attr, char *buf)
56 struct dax_region *dax_region = dev_get_drvdata(dev);
58 return sprintf(buf, "%u\n", dax_region->align);
60 static DEVICE_ATTR_RO(align);
62 static struct attribute *dax_region_attributes[] = {
63 &dev_attr_region_size.attr,
64 &dev_attr_align.attr,
65 &dev_attr_id.attr,
66 NULL,
69 static const struct attribute_group dax_region_attribute_group = {
70 .name = "dax_region",
71 .attrs = dax_region_attributes,
74 static const struct attribute_group *dax_region_attribute_groups[] = {
75 &dax_region_attribute_group,
76 NULL,
79 static void dax_region_free(struct kref *kref)
81 struct dax_region *dax_region;
83 dax_region = container_of(kref, struct dax_region, kref);
84 kfree(dax_region);
87 void dax_region_put(struct dax_region *dax_region)
89 kref_put(&dax_region->kref, dax_region_free);
91 EXPORT_SYMBOL_GPL(dax_region_put);
93 static void dax_region_unregister(void *region)
95 struct dax_region *dax_region = region;
97 sysfs_remove_groups(&dax_region->dev->kobj,
98 dax_region_attribute_groups);
99 dax_region_put(dax_region);
102 struct dax_region *alloc_dax_region(struct device *parent, int region_id,
103 struct resource *res, unsigned int align, void *addr,
104 unsigned long pfn_flags)
106 struct dax_region *dax_region;
109 * The DAX core assumes that it can store its private data in
110 * parent->driver_data. This WARN is a reminder / safeguard for
111 * developers of device-dax drivers.
113 if (dev_get_drvdata(parent)) {
114 dev_WARN(parent, "dax core failed to setup private data\n");
115 return NULL;
118 if (!IS_ALIGNED(res->start, align)
119 || !IS_ALIGNED(resource_size(res), align))
120 return NULL;
122 dax_region = kzalloc(sizeof(*dax_region), GFP_KERNEL);
123 if (!dax_region)
124 return NULL;
126 dev_set_drvdata(parent, dax_region);
127 memcpy(&dax_region->res, res, sizeof(*res));
128 dax_region->pfn_flags = pfn_flags;
129 kref_init(&dax_region->kref);
130 dax_region->id = region_id;
131 ida_init(&dax_region->ida);
132 dax_region->align = align;
133 dax_region->dev = parent;
134 dax_region->base = addr;
135 if (sysfs_create_groups(&parent->kobj, dax_region_attribute_groups)) {
136 kfree(dax_region);
137 return NULL;
140 kref_get(&dax_region->kref);
141 if (devm_add_action_or_reset(parent, dax_region_unregister, dax_region))
142 return NULL;
143 return dax_region;
145 EXPORT_SYMBOL_GPL(alloc_dax_region);
147 static struct dev_dax *to_dev_dax(struct device *dev)
149 return container_of(dev, struct dev_dax, dev);
152 static ssize_t size_show(struct device *dev,
153 struct device_attribute *attr, char *buf)
155 struct dev_dax *dev_dax = to_dev_dax(dev);
156 unsigned long long size = 0;
157 int i;
159 for (i = 0; i < dev_dax->num_resources; i++)
160 size += resource_size(&dev_dax->res[i]);
162 return sprintf(buf, "%llu\n", size);
164 static DEVICE_ATTR_RO(size);
166 static struct attribute *dev_dax_attributes[] = {
167 &dev_attr_size.attr,
168 NULL,
171 static const struct attribute_group dev_dax_attribute_group = {
172 .attrs = dev_dax_attributes,
175 static const struct attribute_group *dax_attribute_groups[] = {
176 &dev_dax_attribute_group,
177 NULL,
180 static int check_vma(struct dev_dax *dev_dax, struct vm_area_struct *vma,
181 const char *func)
183 struct dax_region *dax_region = dev_dax->region;
184 struct device *dev = &dev_dax->dev;
185 unsigned long mask;
187 if (!dax_alive(dev_dax->dax_dev))
188 return -ENXIO;
190 /* prevent private mappings from being established */
191 if ((vma->vm_flags & VM_MAYSHARE) != VM_MAYSHARE) {
192 dev_info_ratelimited(dev,
193 "%s: %s: fail, attempted private mapping\n",
194 current->comm, func);
195 return -EINVAL;
198 mask = dax_region->align - 1;
199 if (vma->vm_start & mask || vma->vm_end & mask) {
200 dev_info_ratelimited(dev,
201 "%s: %s: fail, unaligned vma (%#lx - %#lx, %#lx)\n",
202 current->comm, func, vma->vm_start, vma->vm_end,
203 mask);
204 return -EINVAL;
207 if ((dax_region->pfn_flags & (PFN_DEV|PFN_MAP)) == PFN_DEV
208 && (vma->vm_flags & VM_DONTCOPY) == 0) {
209 dev_info_ratelimited(dev,
210 "%s: %s: fail, dax range requires MADV_DONTFORK\n",
211 current->comm, func);
212 return -EINVAL;
215 if (!vma_is_dax(vma)) {
216 dev_info_ratelimited(dev,
217 "%s: %s: fail, vma is not DAX capable\n",
218 current->comm, func);
219 return -EINVAL;
222 return 0;
225 /* see "strong" declaration in tools/testing/nvdimm/dax-dev.c */
226 __weak phys_addr_t dax_pgoff_to_phys(struct dev_dax *dev_dax, pgoff_t pgoff,
227 unsigned long size)
229 struct resource *res;
230 /* gcc-4.6.3-nolibc for i386 complains that this is uninitialized */
231 phys_addr_t uninitialized_var(phys);
232 int i;
234 for (i = 0; i < dev_dax->num_resources; i++) {
235 res = &dev_dax->res[i];
236 phys = pgoff * PAGE_SIZE + res->start;
237 if (phys >= res->start && phys <= res->end)
238 break;
239 pgoff -= PHYS_PFN(resource_size(res));
242 if (i < dev_dax->num_resources) {
243 res = &dev_dax->res[i];
244 if (phys + size - 1 <= res->end)
245 return phys;
248 return -1;
251 static vm_fault_t __dev_dax_pte_fault(struct dev_dax *dev_dax,
252 struct vm_fault *vmf, pfn_t *pfn)
254 struct device *dev = &dev_dax->dev;
255 struct dax_region *dax_region;
256 phys_addr_t phys;
257 unsigned int fault_size = PAGE_SIZE;
259 if (check_vma(dev_dax, vmf->vma, __func__))
260 return VM_FAULT_SIGBUS;
262 dax_region = dev_dax->region;
263 if (dax_region->align > PAGE_SIZE) {
264 dev_dbg(dev, "alignment (%#x) > fault size (%#x)\n",
265 dax_region->align, fault_size);
266 return VM_FAULT_SIGBUS;
269 if (fault_size != dax_region->align)
270 return VM_FAULT_SIGBUS;
272 phys = dax_pgoff_to_phys(dev_dax, vmf->pgoff, PAGE_SIZE);
273 if (phys == -1) {
274 dev_dbg(dev, "pgoff_to_phys(%#lx) failed\n", vmf->pgoff);
275 return VM_FAULT_SIGBUS;
278 *pfn = phys_to_pfn_t(phys, dax_region->pfn_flags);
280 return vmf_insert_mixed(vmf->vma, vmf->address, *pfn);
283 static vm_fault_t __dev_dax_pmd_fault(struct dev_dax *dev_dax,
284 struct vm_fault *vmf, pfn_t *pfn)
286 unsigned long pmd_addr = vmf->address & PMD_MASK;
287 struct device *dev = &dev_dax->dev;
288 struct dax_region *dax_region;
289 phys_addr_t phys;
290 pgoff_t pgoff;
291 unsigned int fault_size = PMD_SIZE;
293 if (check_vma(dev_dax, vmf->vma, __func__))
294 return VM_FAULT_SIGBUS;
296 dax_region = dev_dax->region;
297 if (dax_region->align > PMD_SIZE) {
298 dev_dbg(dev, "alignment (%#x) > fault size (%#x)\n",
299 dax_region->align, fault_size);
300 return VM_FAULT_SIGBUS;
303 /* dax pmd mappings require pfn_t_devmap() */
304 if ((dax_region->pfn_flags & (PFN_DEV|PFN_MAP)) != (PFN_DEV|PFN_MAP)) {
305 dev_dbg(dev, "region lacks devmap flags\n");
306 return VM_FAULT_SIGBUS;
309 if (fault_size < dax_region->align)
310 return VM_FAULT_SIGBUS;
311 else if (fault_size > dax_region->align)
312 return VM_FAULT_FALLBACK;
314 /* if we are outside of the VMA */
315 if (pmd_addr < vmf->vma->vm_start ||
316 (pmd_addr + PMD_SIZE) > vmf->vma->vm_end)
317 return VM_FAULT_SIGBUS;
319 pgoff = linear_page_index(vmf->vma, pmd_addr);
320 phys = dax_pgoff_to_phys(dev_dax, pgoff, PMD_SIZE);
321 if (phys == -1) {
322 dev_dbg(dev, "pgoff_to_phys(%#lx) failed\n", pgoff);
323 return VM_FAULT_SIGBUS;
326 *pfn = phys_to_pfn_t(phys, dax_region->pfn_flags);
328 return vmf_insert_pfn_pmd(vmf->vma, vmf->address, vmf->pmd, *pfn,
329 vmf->flags & FAULT_FLAG_WRITE);
332 #ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
333 static vm_fault_t __dev_dax_pud_fault(struct dev_dax *dev_dax,
334 struct vm_fault *vmf, pfn_t *pfn)
336 unsigned long pud_addr = vmf->address & PUD_MASK;
337 struct device *dev = &dev_dax->dev;
338 struct dax_region *dax_region;
339 phys_addr_t phys;
340 pgoff_t pgoff;
341 unsigned int fault_size = PUD_SIZE;
344 if (check_vma(dev_dax, vmf->vma, __func__))
345 return VM_FAULT_SIGBUS;
347 dax_region = dev_dax->region;
348 if (dax_region->align > PUD_SIZE) {
349 dev_dbg(dev, "alignment (%#x) > fault size (%#x)\n",
350 dax_region->align, fault_size);
351 return VM_FAULT_SIGBUS;
354 /* dax pud mappings require pfn_t_devmap() */
355 if ((dax_region->pfn_flags & (PFN_DEV|PFN_MAP)) != (PFN_DEV|PFN_MAP)) {
356 dev_dbg(dev, "region lacks devmap flags\n");
357 return VM_FAULT_SIGBUS;
360 if (fault_size < dax_region->align)
361 return VM_FAULT_SIGBUS;
362 else if (fault_size > dax_region->align)
363 return VM_FAULT_FALLBACK;
365 /* if we are outside of the VMA */
366 if (pud_addr < vmf->vma->vm_start ||
367 (pud_addr + PUD_SIZE) > vmf->vma->vm_end)
368 return VM_FAULT_SIGBUS;
370 pgoff = linear_page_index(vmf->vma, pud_addr);
371 phys = dax_pgoff_to_phys(dev_dax, pgoff, PUD_SIZE);
372 if (phys == -1) {
373 dev_dbg(dev, "pgoff_to_phys(%#lx) failed\n", pgoff);
374 return VM_FAULT_SIGBUS;
377 *pfn = phys_to_pfn_t(phys, dax_region->pfn_flags);
379 return vmf_insert_pfn_pud(vmf->vma, vmf->address, vmf->pud, *pfn,
380 vmf->flags & FAULT_FLAG_WRITE);
382 #else
383 static vm_fault_t __dev_dax_pud_fault(struct dev_dax *dev_dax,
384 struct vm_fault *vmf, pfn_t *pfn)
386 return VM_FAULT_FALLBACK;
388 #endif /* !CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */
390 static vm_fault_t dev_dax_huge_fault(struct vm_fault *vmf,
391 enum page_entry_size pe_size)
393 struct file *filp = vmf->vma->vm_file;
394 unsigned long fault_size;
395 vm_fault_t rc = VM_FAULT_SIGBUS;
396 int id;
397 pfn_t pfn;
398 struct dev_dax *dev_dax = filp->private_data;
400 dev_dbg(&dev_dax->dev, "%s: %s (%#lx - %#lx) size = %d\n", current->comm,
401 (vmf->flags & FAULT_FLAG_WRITE) ? "write" : "read",
402 vmf->vma->vm_start, vmf->vma->vm_end, pe_size);
404 id = dax_read_lock();
405 switch (pe_size) {
406 case PE_SIZE_PTE:
407 fault_size = PAGE_SIZE;
408 rc = __dev_dax_pte_fault(dev_dax, vmf, &pfn);
409 break;
410 case PE_SIZE_PMD:
411 fault_size = PMD_SIZE;
412 rc = __dev_dax_pmd_fault(dev_dax, vmf, &pfn);
413 break;
414 case PE_SIZE_PUD:
415 fault_size = PUD_SIZE;
416 rc = __dev_dax_pud_fault(dev_dax, vmf, &pfn);
417 break;
418 default:
419 rc = VM_FAULT_SIGBUS;
422 if (rc == VM_FAULT_NOPAGE) {
423 unsigned long i;
424 pgoff_t pgoff;
427 * In the device-dax case the only possibility for a
428 * VM_FAULT_NOPAGE result is when device-dax capacity is
429 * mapped. No need to consider the zero page, or racing
430 * conflicting mappings.
432 pgoff = linear_page_index(vmf->vma, vmf->address
433 & ~(fault_size - 1));
434 for (i = 0; i < fault_size / PAGE_SIZE; i++) {
435 struct page *page;
437 page = pfn_to_page(pfn_t_to_pfn(pfn) + i);
438 if (page->mapping)
439 continue;
440 page->mapping = filp->f_mapping;
441 page->index = pgoff + i;
444 dax_read_unlock(id);
446 return rc;
449 static vm_fault_t dev_dax_fault(struct vm_fault *vmf)
451 return dev_dax_huge_fault(vmf, PE_SIZE_PTE);
454 static int dev_dax_split(struct vm_area_struct *vma, unsigned long addr)
456 struct file *filp = vma->vm_file;
457 struct dev_dax *dev_dax = filp->private_data;
458 struct dax_region *dax_region = dev_dax->region;
460 if (!IS_ALIGNED(addr, dax_region->align))
461 return -EINVAL;
462 return 0;
465 static unsigned long dev_dax_pagesize(struct vm_area_struct *vma)
467 struct file *filp = vma->vm_file;
468 struct dev_dax *dev_dax = filp->private_data;
469 struct dax_region *dax_region = dev_dax->region;
471 return dax_region->align;
474 static const struct vm_operations_struct dax_vm_ops = {
475 .fault = dev_dax_fault,
476 .huge_fault = dev_dax_huge_fault,
477 .split = dev_dax_split,
478 .pagesize = dev_dax_pagesize,
481 static int dax_mmap(struct file *filp, struct vm_area_struct *vma)
483 struct dev_dax *dev_dax = filp->private_data;
484 int rc, id;
486 dev_dbg(&dev_dax->dev, "trace\n");
489 * We lock to check dax_dev liveness and will re-check at
490 * fault time.
492 id = dax_read_lock();
493 rc = check_vma(dev_dax, vma, __func__);
494 dax_read_unlock(id);
495 if (rc)
496 return rc;
498 vma->vm_ops = &dax_vm_ops;
499 vma->vm_flags |= VM_HUGEPAGE;
500 return 0;
503 /* return an unmapped area aligned to the dax region specified alignment */
504 static unsigned long dax_get_unmapped_area(struct file *filp,
505 unsigned long addr, unsigned long len, unsigned long pgoff,
506 unsigned long flags)
508 unsigned long off, off_end, off_align, len_align, addr_align, align;
509 struct dev_dax *dev_dax = filp ? filp->private_data : NULL;
510 struct dax_region *dax_region;
512 if (!dev_dax || addr)
513 goto out;
515 dax_region = dev_dax->region;
516 align = dax_region->align;
517 off = pgoff << PAGE_SHIFT;
518 off_end = off + len;
519 off_align = round_up(off, align);
521 if ((off_end <= off_align) || ((off_end - off_align) < align))
522 goto out;
524 len_align = len + align;
525 if ((off + len_align) < off)
526 goto out;
528 addr_align = current->mm->get_unmapped_area(filp, addr, len_align,
529 pgoff, flags);
530 if (!IS_ERR_VALUE(addr_align)) {
531 addr_align += (off - addr_align) & (align - 1);
532 return addr_align;
534 out:
535 return current->mm->get_unmapped_area(filp, addr, len, pgoff, flags);
538 static const struct address_space_operations dev_dax_aops = {
539 .set_page_dirty = noop_set_page_dirty,
540 .invalidatepage = noop_invalidatepage,
543 static int dax_open(struct inode *inode, struct file *filp)
545 struct dax_device *dax_dev = inode_dax(inode);
546 struct inode *__dax_inode = dax_inode(dax_dev);
547 struct dev_dax *dev_dax = dax_get_private(dax_dev);
549 dev_dbg(&dev_dax->dev, "trace\n");
550 inode->i_mapping = __dax_inode->i_mapping;
551 inode->i_mapping->host = __dax_inode;
552 inode->i_mapping->a_ops = &dev_dax_aops;
553 filp->f_mapping = inode->i_mapping;
554 filp->f_wb_err = filemap_sample_wb_err(filp->f_mapping);
555 filp->private_data = dev_dax;
556 inode->i_flags = S_DAX;
558 return 0;
561 static int dax_release(struct inode *inode, struct file *filp)
563 struct dev_dax *dev_dax = filp->private_data;
565 dev_dbg(&dev_dax->dev, "trace\n");
566 return 0;
569 static const struct file_operations dax_fops = {
570 .llseek = noop_llseek,
571 .owner = THIS_MODULE,
572 .open = dax_open,
573 .release = dax_release,
574 .get_unmapped_area = dax_get_unmapped_area,
575 .mmap = dax_mmap,
576 .mmap_supported_flags = MAP_SYNC,
579 static void dev_dax_release(struct device *dev)
581 struct dev_dax *dev_dax = to_dev_dax(dev);
582 struct dax_region *dax_region = dev_dax->region;
583 struct dax_device *dax_dev = dev_dax->dax_dev;
585 if (dev_dax->id >= 0)
586 ida_simple_remove(&dax_region->ida, dev_dax->id);
587 dax_region_put(dax_region);
588 put_dax(dax_dev);
589 kfree(dev_dax);
592 static void kill_dev_dax(struct dev_dax *dev_dax)
594 struct dax_device *dax_dev = dev_dax->dax_dev;
595 struct inode *inode = dax_inode(dax_dev);
597 kill_dax(dax_dev);
598 unmap_mapping_range(inode->i_mapping, 0, 0, 1);
601 static void unregister_dev_dax(void *dev)
603 struct dev_dax *dev_dax = to_dev_dax(dev);
604 struct dax_device *dax_dev = dev_dax->dax_dev;
605 struct inode *inode = dax_inode(dax_dev);
606 struct cdev *cdev = inode->i_cdev;
608 dev_dbg(dev, "trace\n");
610 kill_dev_dax(dev_dax);
611 cdev_device_del(cdev, dev);
612 put_device(dev);
615 struct dev_dax *devm_create_dev_dax(struct dax_region *dax_region,
616 int id, struct resource *res, int count)
618 struct device *parent = dax_region->dev;
619 struct dax_device *dax_dev;
620 struct dev_dax *dev_dax;
621 struct inode *inode;
622 struct device *dev;
623 struct cdev *cdev;
624 int rc, i;
626 if (!count)
627 return ERR_PTR(-EINVAL);
629 dev_dax = kzalloc(struct_size(dev_dax, res, count), GFP_KERNEL);
630 if (!dev_dax)
631 return ERR_PTR(-ENOMEM);
633 for (i = 0; i < count; i++) {
634 if (!IS_ALIGNED(res[i].start, dax_region->align)
635 || !IS_ALIGNED(resource_size(&res[i]),
636 dax_region->align)) {
637 rc = -EINVAL;
638 break;
640 dev_dax->res[i].start = res[i].start;
641 dev_dax->res[i].end = res[i].end;
644 if (i < count)
645 goto err_id;
647 if (id < 0) {
648 id = ida_simple_get(&dax_region->ida, 0, 0, GFP_KERNEL);
649 dev_dax->id = id;
650 if (id < 0) {
651 rc = id;
652 goto err_id;
654 } else {
655 /* region provider owns @id lifetime */
656 dev_dax->id = -1;
660 * No 'host' or dax_operations since there is no access to this
661 * device outside of mmap of the resulting character device.
663 dax_dev = alloc_dax(dev_dax, NULL, NULL);
664 if (!dax_dev) {
665 rc = -ENOMEM;
666 goto err_dax;
669 /* from here on we're committed to teardown via dax_dev_release() */
670 dev = &dev_dax->dev;
671 device_initialize(dev);
673 inode = dax_inode(dax_dev);
674 cdev = inode->i_cdev;
675 cdev_init(cdev, &dax_fops);
676 cdev->owner = parent->driver->owner;
678 dev_dax->num_resources = count;
679 dev_dax->dax_dev = dax_dev;
680 dev_dax->region = dax_region;
681 kref_get(&dax_region->kref);
683 dev->devt = inode->i_rdev;
684 dev->class = dax_class;
685 dev->parent = parent;
686 dev->groups = dax_attribute_groups;
687 dev->release = dev_dax_release;
688 dev_set_name(dev, "dax%d.%d", dax_region->id, id);
690 rc = cdev_device_add(cdev, dev);
691 if (rc) {
692 kill_dev_dax(dev_dax);
693 put_device(dev);
694 return ERR_PTR(rc);
697 rc = devm_add_action_or_reset(dax_region->dev, unregister_dev_dax, dev);
698 if (rc)
699 return ERR_PTR(rc);
701 return dev_dax;
703 err_dax:
704 if (dev_dax->id >= 0)
705 ida_simple_remove(&dax_region->ida, dev_dax->id);
706 err_id:
707 kfree(dev_dax);
709 return ERR_PTR(rc);
711 EXPORT_SYMBOL_GPL(devm_create_dev_dax);
713 static int __init dax_init(void)
715 dax_class = class_create(THIS_MODULE, "dax");
716 return PTR_ERR_OR_ZERO(dax_class);
719 static void __exit dax_exit(void)
721 class_destroy(dax_class);
724 MODULE_AUTHOR("Intel Corporation");
725 MODULE_LICENSE("GPL v2");
726 subsys_initcall(dax_init);
727 module_exit(dax_exit);