1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright(c) 2019 Intel Corporation. All rights rsvd. */
3 #include <linux/init.h>
4 #include <linux/kernel.h>
5 #include <linux/module.h>
7 #include <linux/device.h>
8 #include <linux/sched/task.h>
9 #include <linux/intel-svm.h>
10 #include <linux/io-64-nonatomic-lo-hi.h>
11 #include <linux/cdev.h>
13 #include <linux/poll.h>
14 #include <uapi/linux/idxd.h>
15 #include "registers.h"
18 struct idxd_cdev_context
{
25 * ictx is an array based off of accelerator types. enum idxd_type
28 static struct idxd_cdev_context ictx
[IDXD_TYPE_MAX
] = {
32 struct idxd_user_context
{
34 struct task_struct
*task
;
38 enum idxd_cdev_cleanup
{
43 static void idxd_cdev_dev_release(struct device
*dev
)
45 dev_dbg(dev
, "releasing cdev device\n");
49 static struct device_type idxd_cdev_device_type
= {
51 .release
= idxd_cdev_dev_release
,
54 static inline struct idxd_cdev
*inode_idxd_cdev(struct inode
*inode
)
56 struct cdev
*cdev
= inode
->i_cdev
;
58 return container_of(cdev
, struct idxd_cdev
, cdev
);
61 static inline struct idxd_wq
*idxd_cdev_wq(struct idxd_cdev
*idxd_cdev
)
63 return container_of(idxd_cdev
, struct idxd_wq
, idxd_cdev
);
66 static inline struct idxd_wq
*inode_wq(struct inode
*inode
)
68 return idxd_cdev_wq(inode_idxd_cdev(inode
));
71 static int idxd_cdev_open(struct inode
*inode
, struct file
*filp
)
73 struct idxd_user_context
*ctx
;
74 struct idxd_device
*idxd
;
80 dev
= &idxd
->pdev
->dev
;
82 dev_dbg(dev
, "%s called: %d\n", __func__
, idxd_wq_refcount(wq
));
84 if (idxd_wq_refcount(wq
) > 0 && wq_dedicated(wq
))
87 ctx
= kzalloc(sizeof(*ctx
), GFP_KERNEL
);
92 filp
->private_data
= ctx
;
97 static int idxd_cdev_release(struct inode
*node
, struct file
*filep
)
99 struct idxd_user_context
*ctx
= filep
->private_data
;
100 struct idxd_wq
*wq
= ctx
->wq
;
101 struct idxd_device
*idxd
= wq
->idxd
;
102 struct device
*dev
= &idxd
->pdev
->dev
;
104 dev_dbg(dev
, "%s called\n", __func__
);
105 filep
->private_data
= NULL
;
112 static int check_vma(struct idxd_wq
*wq
, struct vm_area_struct
*vma
,
115 struct device
*dev
= &wq
->idxd
->pdev
->dev
;
117 if ((vma
->vm_end
- vma
->vm_start
) > PAGE_SIZE
) {
118 dev_info_ratelimited(dev
,
119 "%s: %s: mapping too large: %lu\n",
121 vma
->vm_end
- vma
->vm_start
);
128 static int idxd_cdev_mmap(struct file
*filp
, struct vm_area_struct
*vma
)
130 struct idxd_user_context
*ctx
= filp
->private_data
;
131 struct idxd_wq
*wq
= ctx
->wq
;
132 struct idxd_device
*idxd
= wq
->idxd
;
133 struct pci_dev
*pdev
= idxd
->pdev
;
134 phys_addr_t base
= pci_resource_start(pdev
, IDXD_WQ_BAR
);
138 dev_dbg(&pdev
->dev
, "%s called\n", __func__
);
139 rc
= check_vma(wq
, vma
, __func__
);
143 vma
->vm_flags
|= VM_DONTCOPY
;
144 pfn
= (base
+ idxd_get_wq_portal_full_offset(wq
->id
,
145 IDXD_PORTAL_LIMITED
)) >> PAGE_SHIFT
;
146 vma
->vm_page_prot
= pgprot_noncached(vma
->vm_page_prot
);
147 vma
->vm_private_data
= ctx
;
149 return io_remap_pfn_range(vma
, vma
->vm_start
, pfn
, PAGE_SIZE
,
153 static __poll_t
idxd_cdev_poll(struct file
*filp
,
154 struct poll_table_struct
*wait
)
156 struct idxd_user_context
*ctx
= filp
->private_data
;
157 struct idxd_wq
*wq
= ctx
->wq
;
158 struct idxd_device
*idxd
= wq
->idxd
;
159 struct idxd_cdev
*idxd_cdev
= &wq
->idxd_cdev
;
163 poll_wait(filp
, &idxd_cdev
->err_queue
, wait
);
164 spin_lock_irqsave(&idxd
->dev_lock
, flags
);
165 if (idxd
->sw_err
.valid
)
166 out
= EPOLLIN
| EPOLLRDNORM
;
167 spin_unlock_irqrestore(&idxd
->dev_lock
, flags
);
172 static const struct file_operations idxd_cdev_fops
= {
173 .owner
= THIS_MODULE
,
174 .open
= idxd_cdev_open
,
175 .release
= idxd_cdev_release
,
176 .mmap
= idxd_cdev_mmap
,
177 .poll
= idxd_cdev_poll
,
180 int idxd_cdev_get_major(struct idxd_device
*idxd
)
182 return MAJOR(ictx
[idxd
->type
].devt
);
185 static int idxd_wq_cdev_dev_setup(struct idxd_wq
*wq
)
187 struct idxd_device
*idxd
= wq
->idxd
;
188 struct idxd_cdev
*idxd_cdev
= &wq
->idxd_cdev
;
189 struct idxd_cdev_context
*cdev_ctx
;
193 idxd_cdev
->dev
= kzalloc(sizeof(*idxd_cdev
->dev
), GFP_KERNEL
);
197 dev
= idxd_cdev
->dev
;
198 dev
->parent
= &idxd
->pdev
->dev
;
199 dev_set_name(dev
, "%s/wq%u.%u", idxd_get_dev_name(idxd
),
201 dev
->bus
= idxd_get_bus_type(idxd
);
203 cdev_ctx
= &ictx
[wq
->idxd
->type
];
204 minor
= ida_simple_get(&cdev_ctx
->minor_ida
, 0, MINORMASK
, GFP_KERNEL
);
211 dev
->devt
= MKDEV(MAJOR(cdev_ctx
->devt
), minor
);
212 dev
->type
= &idxd_cdev_device_type
;
213 rc
= device_register(dev
);
215 dev_err(&idxd
->pdev
->dev
, "device register failed\n");
218 idxd_cdev
->minor
= minor
;
223 ida_simple_remove(&cdev_ctx
->minor_ida
, MINOR(dev
->devt
));
226 idxd_cdev
->dev
= NULL
;
230 static void idxd_wq_cdev_cleanup(struct idxd_wq
*wq
,
231 enum idxd_cdev_cleanup cdev_state
)
233 struct idxd_cdev
*idxd_cdev
= &wq
->idxd_cdev
;
234 struct idxd_cdev_context
*cdev_ctx
;
236 cdev_ctx
= &ictx
[wq
->idxd
->type
];
237 if (cdev_state
== CDEV_NORMAL
)
238 cdev_del(&idxd_cdev
->cdev
);
239 device_unregister(idxd_cdev
->dev
);
241 * The device_type->release() will be called on the device and free
242 * the allocated struct device. We can just forget it.
244 ida_simple_remove(&cdev_ctx
->minor_ida
, idxd_cdev
->minor
);
245 idxd_cdev
->dev
= NULL
;
246 idxd_cdev
->minor
= -1;
249 int idxd_wq_add_cdev(struct idxd_wq
*wq
)
251 struct idxd_cdev
*idxd_cdev
= &wq
->idxd_cdev
;
252 struct cdev
*cdev
= &idxd_cdev
->cdev
;
256 rc
= idxd_wq_cdev_dev_setup(wq
);
260 dev
= idxd_cdev
->dev
;
261 cdev_init(cdev
, &idxd_cdev_fops
);
262 cdev_set_parent(cdev
, &dev
->kobj
);
263 rc
= cdev_add(cdev
, dev
->devt
, 1);
265 dev_dbg(&wq
->idxd
->pdev
->dev
, "cdev_add failed: %d\n", rc
);
266 idxd_wq_cdev_cleanup(wq
, CDEV_FAILED
);
270 init_waitqueue_head(&idxd_cdev
->err_queue
);
274 void idxd_wq_del_cdev(struct idxd_wq
*wq
)
276 idxd_wq_cdev_cleanup(wq
, CDEV_NORMAL
);
279 int idxd_cdev_register(void)
283 for (i
= 0; i
< IDXD_TYPE_MAX
; i
++) {
284 ida_init(&ictx
[i
].minor_ida
);
285 rc
= alloc_chrdev_region(&ictx
[i
].devt
, 0, MINORMASK
,
294 void idxd_cdev_remove(void)
298 for (i
= 0; i
< IDXD_TYPE_MAX
; i
++) {
299 unregister_chrdev_region(ictx
[i
].devt
, MINORMASK
);
300 ida_destroy(&ictx
[i
].minor_ida
);