Merge branch 'r6040-next'
[linux/fpc-iii.git] / drivers / misc / mic / scif / scif_fd.c
blobf7e826142a72fe382af3e7551f9be6a635076373
1 /*
2 * Intel MIC Platform Software Stack (MPSS)
4 * Copyright(c) 2014 Intel Corporation.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License, version 2, as
8 * published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License for more details.
15 * Intel SCIF driver.
18 #include "scif_main.h"
20 static int scif_fdopen(struct inode *inode, struct file *f)
22 struct scif_endpt *priv = scif_open();
24 if (!priv)
25 return -ENOMEM;
26 f->private_data = priv;
27 return 0;
30 static int scif_fdclose(struct inode *inode, struct file *f)
32 struct scif_endpt *priv = f->private_data;
34 return scif_close(priv);
37 static int scif_fdmmap(struct file *f, struct vm_area_struct *vma)
39 struct scif_endpt *priv = f->private_data;
41 return scif_mmap(vma, priv);
44 static unsigned int scif_fdpoll(struct file *f, poll_table *wait)
46 struct scif_endpt *priv = f->private_data;
48 return __scif_pollfd(f, wait, priv);
51 static int scif_fdflush(struct file *f, fl_owner_t id)
53 struct scif_endpt *ep = f->private_data;
55 spin_lock(&ep->lock);
57 * The listening endpoint stashes the open file information before
58 * waiting for incoming connections. The release callback would never be
59 * called if the application closed the endpoint, while waiting for
60 * incoming connections from a separate thread since the file descriptor
61 * reference count is bumped up in the accept IOCTL. Call the flush
62 * routine if the id matches the endpoint open file information so that
63 * the listening endpoint can be woken up and the fd released.
65 if (ep->files == id)
66 __scif_flush(ep);
67 spin_unlock(&ep->lock);
68 return 0;
71 static __always_inline void scif_err_debug(int err, const char *str)
74 * ENOTCONN is a common uninteresting error which is
75 * flooding debug messages to the console unnecessarily.
77 if (err < 0 && err != -ENOTCONN)
78 dev_dbg(scif_info.mdev.this_device, "%s err %d\n", str, err);
81 static long scif_fdioctl(struct file *f, unsigned int cmd, unsigned long arg)
83 struct scif_endpt *priv = f->private_data;
84 void __user *argp = (void __user *)arg;
85 int err = 0;
86 struct scifioctl_msg request;
87 bool non_block = false;
89 non_block = !!(f->f_flags & O_NONBLOCK);
91 switch (cmd) {
92 case SCIF_BIND:
94 int pn;
96 if (copy_from_user(&pn, argp, sizeof(pn)))
97 return -EFAULT;
99 pn = scif_bind(priv, pn);
100 if (pn < 0)
101 return pn;
103 if (copy_to_user(argp, &pn, sizeof(pn)))
104 return -EFAULT;
106 return 0;
108 case SCIF_LISTEN:
109 return scif_listen(priv, arg);
110 case SCIF_CONNECT:
112 struct scifioctl_connect req;
113 struct scif_endpt *ep = (struct scif_endpt *)priv;
115 if (copy_from_user(&req, argp, sizeof(req)))
116 return -EFAULT;
118 err = __scif_connect(priv, &req.peer, non_block);
119 if (err < 0)
120 return err;
122 req.self.node = ep->port.node;
123 req.self.port = ep->port.port;
125 if (copy_to_user(argp, &req, sizeof(req)))
126 return -EFAULT;
128 return 0;
131 * Accept is done in two halves. The request ioctl does the basic
132 * functionality of accepting the request and returning the information
133 * about it including the internal ID of the end point. The register
134 * is done with the internal ID on a new file descriptor opened by the
135 * requesting process.
137 case SCIF_ACCEPTREQ:
139 struct scifioctl_accept request;
140 scif_epd_t *ep = (scif_epd_t *)&request.endpt;
142 if (copy_from_user(&request, argp, sizeof(request)))
143 return -EFAULT;
145 err = scif_accept(priv, &request.peer, ep, request.flags);
146 if (err < 0)
147 return err;
149 if (copy_to_user(argp, &request, sizeof(request))) {
150 scif_close(*ep);
151 return -EFAULT;
154 * Add to the list of user mode eps where the second half
155 * of the accept is not yet completed.
157 mutex_lock(&scif_info.eplock);
158 list_add_tail(&((*ep)->miacceptlist), &scif_info.uaccept);
159 list_add_tail(&((*ep)->liacceptlist), &priv->li_accept);
160 (*ep)->listenep = priv;
161 priv->acceptcnt++;
162 mutex_unlock(&scif_info.eplock);
164 return 0;
166 case SCIF_ACCEPTREG:
168 struct scif_endpt *priv = f->private_data;
169 struct scif_endpt *newep;
170 struct scif_endpt *lisep;
171 struct scif_endpt *fep = NULL;
172 struct scif_endpt *tmpep;
173 struct list_head *pos, *tmpq;
175 /* Finally replace the pointer to the accepted endpoint */
176 if (copy_from_user(&newep, argp, sizeof(void *)))
177 return -EFAULT;
179 /* Remove form the user accept queue */
180 mutex_lock(&scif_info.eplock);
181 list_for_each_safe(pos, tmpq, &scif_info.uaccept) {
182 tmpep = list_entry(pos,
183 struct scif_endpt, miacceptlist);
184 if (tmpep == newep) {
185 list_del(pos);
186 fep = tmpep;
187 break;
191 if (!fep) {
192 mutex_unlock(&scif_info.eplock);
193 return -ENOENT;
196 lisep = newep->listenep;
197 list_for_each_safe(pos, tmpq, &lisep->li_accept) {
198 tmpep = list_entry(pos,
199 struct scif_endpt, liacceptlist);
200 if (tmpep == newep) {
201 list_del(pos);
202 lisep->acceptcnt--;
203 break;
207 mutex_unlock(&scif_info.eplock);
209 /* Free the resources automatically created from the open. */
210 scif_anon_inode_fput(priv);
211 scif_teardown_ep(priv);
212 scif_add_epd_to_zombie_list(priv, !SCIF_EPLOCK_HELD);
213 f->private_data = newep;
214 return 0;
216 case SCIF_SEND:
218 struct scif_endpt *priv = f->private_data;
220 if (copy_from_user(&request, argp,
221 sizeof(struct scifioctl_msg))) {
222 err = -EFAULT;
223 goto send_err;
225 err = scif_user_send(priv, (void __user *)request.msg,
226 request.len, request.flags);
227 if (err < 0)
228 goto send_err;
229 if (copy_to_user(&
230 ((struct scifioctl_msg __user *)argp)->out_len,
231 &err, sizeof(err))) {
232 err = -EFAULT;
233 goto send_err;
235 err = 0;
236 send_err:
237 scif_err_debug(err, "scif_send");
238 return err;
240 case SCIF_RECV:
242 struct scif_endpt *priv = f->private_data;
244 if (copy_from_user(&request, argp,
245 sizeof(struct scifioctl_msg))) {
246 err = -EFAULT;
247 goto recv_err;
250 err = scif_user_recv(priv, (void __user *)request.msg,
251 request.len, request.flags);
252 if (err < 0)
253 goto recv_err;
255 if (copy_to_user(&
256 ((struct scifioctl_msg __user *)argp)->out_len,
257 &err, sizeof(err))) {
258 err = -EFAULT;
259 goto recv_err;
261 err = 0;
262 recv_err:
263 scif_err_debug(err, "scif_recv");
264 return err;
266 case SCIF_GET_NODEIDS:
268 struct scifioctl_node_ids node_ids;
269 int entries;
270 u16 *nodes;
271 void __user *unodes, *uself;
272 u16 self;
274 if (copy_from_user(&node_ids, argp, sizeof(node_ids))) {
275 err = -EFAULT;
276 goto getnodes_err2;
279 entries = min_t(int, scif_info.maxid, node_ids.len);
280 nodes = kmalloc_array(entries, sizeof(u16), GFP_KERNEL);
281 if (entries && !nodes) {
282 err = -ENOMEM;
283 goto getnodes_err2;
285 node_ids.len = scif_get_node_ids(nodes, entries, &self);
287 unodes = (void __user *)node_ids.nodes;
288 if (copy_to_user(unodes, nodes, sizeof(u16) * entries)) {
289 err = -EFAULT;
290 goto getnodes_err1;
293 uself = (void __user *)node_ids.self;
294 if (copy_to_user(uself, &self, sizeof(u16))) {
295 err = -EFAULT;
296 goto getnodes_err1;
299 if (copy_to_user(argp, &node_ids, sizeof(node_ids))) {
300 err = -EFAULT;
301 goto getnodes_err1;
303 getnodes_err1:
304 kfree(nodes);
305 getnodes_err2:
306 return err;
308 case SCIF_REG:
310 struct scif_endpt *priv = f->private_data;
311 struct scifioctl_reg reg;
312 off_t ret;
314 if (copy_from_user(&reg, argp, sizeof(reg))) {
315 err = -EFAULT;
316 goto reg_err;
318 if (reg.flags & SCIF_MAP_KERNEL) {
319 err = -EINVAL;
320 goto reg_err;
322 ret = scif_register(priv, (void *)reg.addr, reg.len,
323 reg.offset, reg.prot, reg.flags);
324 if (ret < 0) {
325 err = (int)ret;
326 goto reg_err;
329 if (copy_to_user(&((struct scifioctl_reg __user *)argp)
330 ->out_offset, &ret, sizeof(reg.out_offset))) {
331 err = -EFAULT;
332 goto reg_err;
334 err = 0;
335 reg_err:
336 scif_err_debug(err, "scif_register");
337 return err;
339 case SCIF_UNREG:
341 struct scif_endpt *priv = f->private_data;
342 struct scifioctl_unreg unreg;
344 if (copy_from_user(&unreg, argp, sizeof(unreg))) {
345 err = -EFAULT;
346 goto unreg_err;
348 err = scif_unregister(priv, unreg.offset, unreg.len);
349 unreg_err:
350 scif_err_debug(err, "scif_unregister");
351 return err;
353 case SCIF_READFROM:
355 struct scif_endpt *priv = f->private_data;
356 struct scifioctl_copy copy;
358 if (copy_from_user(&copy, argp, sizeof(copy))) {
359 err = -EFAULT;
360 goto readfrom_err;
362 err = scif_readfrom(priv, copy.loffset, copy.len, copy.roffset,
363 copy.flags);
364 readfrom_err:
365 scif_err_debug(err, "scif_readfrom");
366 return err;
368 case SCIF_WRITETO:
370 struct scif_endpt *priv = f->private_data;
371 struct scifioctl_copy copy;
373 if (copy_from_user(&copy, argp, sizeof(copy))) {
374 err = -EFAULT;
375 goto writeto_err;
377 err = scif_writeto(priv, copy.loffset, copy.len, copy.roffset,
378 copy.flags);
379 writeto_err:
380 scif_err_debug(err, "scif_writeto");
381 return err;
383 case SCIF_VREADFROM:
385 struct scif_endpt *priv = f->private_data;
386 struct scifioctl_copy copy;
388 if (copy_from_user(&copy, argp, sizeof(copy))) {
389 err = -EFAULT;
390 goto vreadfrom_err;
392 err = scif_vreadfrom(priv, (void __force *)copy.addr, copy.len,
393 copy.roffset, copy.flags);
394 vreadfrom_err:
395 scif_err_debug(err, "scif_vreadfrom");
396 return err;
398 case SCIF_VWRITETO:
400 struct scif_endpt *priv = f->private_data;
401 struct scifioctl_copy copy;
403 if (copy_from_user(&copy, argp, sizeof(copy))) {
404 err = -EFAULT;
405 goto vwriteto_err;
407 err = scif_vwriteto(priv, (void __force *)copy.addr, copy.len,
408 copy.roffset, copy.flags);
409 vwriteto_err:
410 scif_err_debug(err, "scif_vwriteto");
411 return err;
413 case SCIF_FENCE_MARK:
415 struct scif_endpt *priv = f->private_data;
416 struct scifioctl_fence_mark mark;
417 int tmp_mark = 0;
419 if (copy_from_user(&mark, argp, sizeof(mark))) {
420 err = -EFAULT;
421 goto fence_mark_err;
423 err = scif_fence_mark(priv, mark.flags, &tmp_mark);
424 if (err)
425 goto fence_mark_err;
426 if (copy_to_user((void __user *)mark.mark, &tmp_mark,
427 sizeof(tmp_mark))) {
428 err = -EFAULT;
429 goto fence_mark_err;
431 fence_mark_err:
432 scif_err_debug(err, "scif_fence_mark");
433 return err;
435 case SCIF_FENCE_WAIT:
437 struct scif_endpt *priv = f->private_data;
439 err = scif_fence_wait(priv, arg);
440 scif_err_debug(err, "scif_fence_wait");
441 return err;
443 case SCIF_FENCE_SIGNAL:
445 struct scif_endpt *priv = f->private_data;
446 struct scifioctl_fence_signal signal;
448 if (copy_from_user(&signal, argp, sizeof(signal))) {
449 err = -EFAULT;
450 goto fence_signal_err;
453 err = scif_fence_signal(priv, signal.loff, signal.lval,
454 signal.roff, signal.rval, signal.flags);
455 fence_signal_err:
456 scif_err_debug(err, "scif_fence_signal");
457 return err;
460 return -EINVAL;
463 const struct file_operations scif_fops = {
464 .open = scif_fdopen,
465 .release = scif_fdclose,
466 .unlocked_ioctl = scif_fdioctl,
467 .mmap = scif_fdmmap,
468 .poll = scif_fdpoll,
469 .flush = scif_fdflush,
470 .owner = THIS_MODULE,