2 * Intel MIC Platform Software Stack (MPSS)
4 * Copyright(c) 2014 Intel Corporation.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License, version 2, as
8 * published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License for more details.
18 #include "scif_main.h"
20 static int scif_fdopen(struct inode
*inode
, struct file
*f
)
22 struct scif_endpt
*priv
= scif_open();
26 f
->private_data
= priv
;
30 static int scif_fdclose(struct inode
*inode
, struct file
*f
)
32 struct scif_endpt
*priv
= f
->private_data
;
34 return scif_close(priv
);
37 static int scif_fdmmap(struct file
*f
, struct vm_area_struct
*vma
)
39 struct scif_endpt
*priv
= f
->private_data
;
41 return scif_mmap(vma
, priv
);
44 static unsigned int scif_fdpoll(struct file
*f
, poll_table
*wait
)
46 struct scif_endpt
*priv
= f
->private_data
;
48 return __scif_pollfd(f
, wait
, priv
);
51 static int scif_fdflush(struct file
*f
, fl_owner_t id
)
53 struct scif_endpt
*ep
= f
->private_data
;
57 * The listening endpoint stashes the open file information before
58 * waiting for incoming connections. The release callback would never be
59 * called if the application closed the endpoint, while waiting for
60 * incoming connections from a separate thread since the file descriptor
61 * reference count is bumped up in the accept IOCTL. Call the flush
62 * routine if the id matches the endpoint open file information so that
63 * the listening endpoint can be woken up and the fd released.
67 spin_unlock(&ep
->lock
);
71 static __always_inline
void scif_err_debug(int err
, const char *str
)
74 * ENOTCONN is a common uninteresting error which is
75 * flooding debug messages to the console unnecessarily.
77 if (err
< 0 && err
!= -ENOTCONN
)
78 dev_dbg(scif_info
.mdev
.this_device
, "%s err %d\n", str
, err
);
81 static long scif_fdioctl(struct file
*f
, unsigned int cmd
, unsigned long arg
)
83 struct scif_endpt
*priv
= f
->private_data
;
84 void __user
*argp
= (void __user
*)arg
;
86 struct scifioctl_msg request
;
87 bool non_block
= false;
89 non_block
= !!(f
->f_flags
& O_NONBLOCK
);
96 if (copy_from_user(&pn
, argp
, sizeof(pn
)))
99 pn
= scif_bind(priv
, pn
);
103 if (copy_to_user(argp
, &pn
, sizeof(pn
)))
109 return scif_listen(priv
, arg
);
112 struct scifioctl_connect req
;
113 struct scif_endpt
*ep
= (struct scif_endpt
*)priv
;
115 if (copy_from_user(&req
, argp
, sizeof(req
)))
118 err
= __scif_connect(priv
, &req
.peer
, non_block
);
122 req
.self
.node
= ep
->port
.node
;
123 req
.self
.port
= ep
->port
.port
;
125 if (copy_to_user(argp
, &req
, sizeof(req
)))
131 * Accept is done in two halves. The request ioctl does the basic
132 * functionality of accepting the request and returning the information
133 * about it including the internal ID of the end point. The register
134 * is done with the internal ID on a new file descriptor opened by the
135 * requesting process.
139 struct scifioctl_accept request
;
140 scif_epd_t
*ep
= (scif_epd_t
*)&request
.endpt
;
142 if (copy_from_user(&request
, argp
, sizeof(request
)))
145 err
= scif_accept(priv
, &request
.peer
, ep
, request
.flags
);
149 if (copy_to_user(argp
, &request
, sizeof(request
))) {
154 * Add to the list of user mode eps where the second half
155 * of the accept is not yet completed.
157 mutex_lock(&scif_info
.eplock
);
158 list_add_tail(&((*ep
)->miacceptlist
), &scif_info
.uaccept
);
159 list_add_tail(&((*ep
)->liacceptlist
), &priv
->li_accept
);
160 (*ep
)->listenep
= priv
;
162 mutex_unlock(&scif_info
.eplock
);
168 struct scif_endpt
*priv
= f
->private_data
;
169 struct scif_endpt
*newep
;
170 struct scif_endpt
*lisep
;
171 struct scif_endpt
*fep
= NULL
;
172 struct scif_endpt
*tmpep
;
173 struct list_head
*pos
, *tmpq
;
175 /* Finally replace the pointer to the accepted endpoint */
176 if (copy_from_user(&newep
, argp
, sizeof(void *)))
179 /* Remove form the user accept queue */
180 mutex_lock(&scif_info
.eplock
);
181 list_for_each_safe(pos
, tmpq
, &scif_info
.uaccept
) {
182 tmpep
= list_entry(pos
,
183 struct scif_endpt
, miacceptlist
);
184 if (tmpep
== newep
) {
192 mutex_unlock(&scif_info
.eplock
);
196 lisep
= newep
->listenep
;
197 list_for_each_safe(pos
, tmpq
, &lisep
->li_accept
) {
198 tmpep
= list_entry(pos
,
199 struct scif_endpt
, liacceptlist
);
200 if (tmpep
== newep
) {
207 mutex_unlock(&scif_info
.eplock
);
209 /* Free the resources automatically created from the open. */
210 scif_anon_inode_fput(priv
);
211 scif_teardown_ep(priv
);
212 scif_add_epd_to_zombie_list(priv
, !SCIF_EPLOCK_HELD
);
213 f
->private_data
= newep
;
218 struct scif_endpt
*priv
= f
->private_data
;
220 if (copy_from_user(&request
, argp
,
221 sizeof(struct scifioctl_msg
))) {
225 err
= scif_user_send(priv
, (void __user
*)request
.msg
,
226 request
.len
, request
.flags
);
230 ((struct scifioctl_msg __user
*)argp
)->out_len
,
231 &err
, sizeof(err
))) {
237 scif_err_debug(err
, "scif_send");
242 struct scif_endpt
*priv
= f
->private_data
;
244 if (copy_from_user(&request
, argp
,
245 sizeof(struct scifioctl_msg
))) {
250 err
= scif_user_recv(priv
, (void __user
*)request
.msg
,
251 request
.len
, request
.flags
);
256 ((struct scifioctl_msg __user
*)argp
)->out_len
,
257 &err
, sizeof(err
))) {
263 scif_err_debug(err
, "scif_recv");
266 case SCIF_GET_NODEIDS
:
268 struct scifioctl_node_ids node_ids
;
271 void __user
*unodes
, *uself
;
274 if (copy_from_user(&node_ids
, argp
, sizeof(node_ids
))) {
279 entries
= min_t(int, scif_info
.maxid
, node_ids
.len
);
280 nodes
= kmalloc_array(entries
, sizeof(u16
), GFP_KERNEL
);
281 if (entries
&& !nodes
) {
285 node_ids
.len
= scif_get_node_ids(nodes
, entries
, &self
);
287 unodes
= (void __user
*)node_ids
.nodes
;
288 if (copy_to_user(unodes
, nodes
, sizeof(u16
) * entries
)) {
293 uself
= (void __user
*)node_ids
.self
;
294 if (copy_to_user(uself
, &self
, sizeof(u16
))) {
299 if (copy_to_user(argp
, &node_ids
, sizeof(node_ids
))) {
310 struct scif_endpt
*priv
= f
->private_data
;
311 struct scifioctl_reg reg
;
314 if (copy_from_user(®
, argp
, sizeof(reg
))) {
318 if (reg
.flags
& SCIF_MAP_KERNEL
) {
322 ret
= scif_register(priv
, (void *)reg
.addr
, reg
.len
,
323 reg
.offset
, reg
.prot
, reg
.flags
);
329 if (copy_to_user(&((struct scifioctl_reg __user
*)argp
)
330 ->out_offset
, &ret
, sizeof(reg
.out_offset
))) {
336 scif_err_debug(err
, "scif_register");
341 struct scif_endpt
*priv
= f
->private_data
;
342 struct scifioctl_unreg unreg
;
344 if (copy_from_user(&unreg
, argp
, sizeof(unreg
))) {
348 err
= scif_unregister(priv
, unreg
.offset
, unreg
.len
);
350 scif_err_debug(err
, "scif_unregister");
355 struct scif_endpt
*priv
= f
->private_data
;
356 struct scifioctl_copy copy
;
358 if (copy_from_user(©
, argp
, sizeof(copy
))) {
362 err
= scif_readfrom(priv
, copy
.loffset
, copy
.len
, copy
.roffset
,
365 scif_err_debug(err
, "scif_readfrom");
370 struct scif_endpt
*priv
= f
->private_data
;
371 struct scifioctl_copy copy
;
373 if (copy_from_user(©
, argp
, sizeof(copy
))) {
377 err
= scif_writeto(priv
, copy
.loffset
, copy
.len
, copy
.roffset
,
380 scif_err_debug(err
, "scif_writeto");
385 struct scif_endpt
*priv
= f
->private_data
;
386 struct scifioctl_copy copy
;
388 if (copy_from_user(©
, argp
, sizeof(copy
))) {
392 err
= scif_vreadfrom(priv
, (void __force
*)copy
.addr
, copy
.len
,
393 copy
.roffset
, copy
.flags
);
395 scif_err_debug(err
, "scif_vreadfrom");
400 struct scif_endpt
*priv
= f
->private_data
;
401 struct scifioctl_copy copy
;
403 if (copy_from_user(©
, argp
, sizeof(copy
))) {
407 err
= scif_vwriteto(priv
, (void __force
*)copy
.addr
, copy
.len
,
408 copy
.roffset
, copy
.flags
);
410 scif_err_debug(err
, "scif_vwriteto");
413 case SCIF_FENCE_MARK
:
415 struct scif_endpt
*priv
= f
->private_data
;
416 struct scifioctl_fence_mark mark
;
419 if (copy_from_user(&mark
, argp
, sizeof(mark
))) {
423 err
= scif_fence_mark(priv
, mark
.flags
, &tmp_mark
);
426 if (copy_to_user((void __user
*)mark
.mark
, &tmp_mark
,
432 scif_err_debug(err
, "scif_fence_mark");
435 case SCIF_FENCE_WAIT
:
437 struct scif_endpt
*priv
= f
->private_data
;
439 err
= scif_fence_wait(priv
, arg
);
440 scif_err_debug(err
, "scif_fence_wait");
443 case SCIF_FENCE_SIGNAL
:
445 struct scif_endpt
*priv
= f
->private_data
;
446 struct scifioctl_fence_signal signal
;
448 if (copy_from_user(&signal
, argp
, sizeof(signal
))) {
450 goto fence_signal_err
;
453 err
= scif_fence_signal(priv
, signal
.loff
, signal
.lval
,
454 signal
.roff
, signal
.rval
, signal
.flags
);
456 scif_err_debug(err
, "scif_fence_signal");
463 const struct file_operations scif_fops
= {
465 .release
= scif_fdclose
,
466 .unlocked_ioctl
= scif_fdioctl
,
469 .flush
= scif_fdflush
,
470 .owner
= THIS_MODULE
,