2 * Intel MIC Platform Software Stack (MPSS)
4 * Copyright(c) 2014 Intel Corporation.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License, version 2, as
8 * published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License for more details.
18 #include "scif_main.h"
20 static int scif_fdopen(struct inode
*inode
, struct file
*f
)
22 struct scif_endpt
*priv
= scif_open();
26 f
->private_data
= priv
;
30 static int scif_fdclose(struct inode
*inode
, struct file
*f
)
32 struct scif_endpt
*priv
= f
->private_data
;
34 return scif_close(priv
);
37 static int scif_fdflush(struct file
*f
, fl_owner_t id
)
39 struct scif_endpt
*ep
= f
->private_data
;
43 * The listening endpoint stashes the open file information before
44 * waiting for incoming connections. The release callback would never be
45 * called if the application closed the endpoint, while waiting for
46 * incoming connections from a separate thread since the file descriptor
47 * reference count is bumped up in the accept IOCTL. Call the flush
48 * routine if the id matches the endpoint open file information so that
49 * the listening endpoint can be woken up and the fd released.
53 spin_unlock(&ep
->lock
);
57 static __always_inline
void scif_err_debug(int err
, const char *str
)
60 * ENOTCONN is a common uninteresting error which is
61 * flooding debug messages to the console unnecessarily.
63 if (err
< 0 && err
!= -ENOTCONN
)
64 dev_dbg(scif_info
.mdev
.this_device
, "%s err %d\n", str
, err
);
67 static long scif_fdioctl(struct file
*f
, unsigned int cmd
, unsigned long arg
)
69 struct scif_endpt
*priv
= f
->private_data
;
70 void __user
*argp
= (void __user
*)arg
;
72 struct scifioctl_msg request
;
73 bool non_block
= false;
75 non_block
= !!(f
->f_flags
& O_NONBLOCK
);
82 if (copy_from_user(&pn
, argp
, sizeof(pn
)))
85 pn
= scif_bind(priv
, pn
);
89 if (copy_to_user(argp
, &pn
, sizeof(pn
)))
95 return scif_listen(priv
, arg
);
98 struct scifioctl_connect req
;
99 struct scif_endpt
*ep
= (struct scif_endpt
*)priv
;
101 if (copy_from_user(&req
, argp
, sizeof(req
)))
104 err
= __scif_connect(priv
, &req
.peer
, non_block
);
108 req
.self
.node
= ep
->port
.node
;
109 req
.self
.port
= ep
->port
.port
;
111 if (copy_to_user(argp
, &req
, sizeof(req
)))
117 * Accept is done in two halves. The request ioctl does the basic
118 * functionality of accepting the request and returning the information
119 * about it including the internal ID of the end point. The register
120 * is done with the internal ID on a new file descriptor opened by the
121 * requesting process.
125 struct scifioctl_accept request
;
126 scif_epd_t
*ep
= (scif_epd_t
*)&request
.endpt
;
128 if (copy_from_user(&request
, argp
, sizeof(request
)))
131 err
= scif_accept(priv
, &request
.peer
, ep
, request
.flags
);
135 if (copy_to_user(argp
, &request
, sizeof(request
))) {
140 * Add to the list of user mode eps where the second half
141 * of the accept is not yet completed.
143 spin_lock(&scif_info
.eplock
);
144 list_add_tail(&((*ep
)->miacceptlist
), &scif_info
.uaccept
);
145 list_add_tail(&((*ep
)->liacceptlist
), &priv
->li_accept
);
146 (*ep
)->listenep
= priv
;
148 spin_unlock(&scif_info
.eplock
);
154 struct scif_endpt
*priv
= f
->private_data
;
155 struct scif_endpt
*newep
;
156 struct scif_endpt
*lisep
;
157 struct scif_endpt
*fep
= NULL
;
158 struct scif_endpt
*tmpep
;
159 struct list_head
*pos
, *tmpq
;
161 /* Finally replace the pointer to the accepted endpoint */
162 if (copy_from_user(&newep
, argp
, sizeof(void *)))
165 /* Remove form the user accept queue */
166 spin_lock(&scif_info
.eplock
);
167 list_for_each_safe(pos
, tmpq
, &scif_info
.uaccept
) {
168 tmpep
= list_entry(pos
,
169 struct scif_endpt
, miacceptlist
);
170 if (tmpep
== newep
) {
178 spin_unlock(&scif_info
.eplock
);
182 lisep
= newep
->listenep
;
183 list_for_each_safe(pos
, tmpq
, &lisep
->li_accept
) {
184 tmpep
= list_entry(pos
,
185 struct scif_endpt
, liacceptlist
);
186 if (tmpep
== newep
) {
193 spin_unlock(&scif_info
.eplock
);
195 /* Free the resources automatically created from the open. */
196 scif_teardown_ep(priv
);
197 scif_add_epd_to_zombie_list(priv
, !SCIF_EPLOCK_HELD
);
198 f
->private_data
= newep
;
203 struct scif_endpt
*priv
= f
->private_data
;
205 if (copy_from_user(&request
, argp
,
206 sizeof(struct scifioctl_msg
))) {
210 err
= scif_user_send(priv
, (void __user
*)request
.msg
,
211 request
.len
, request
.flags
);
215 ((struct scifioctl_msg __user
*)argp
)->out_len
,
216 &err
, sizeof(err
))) {
222 scif_err_debug(err
, "scif_send");
227 struct scif_endpt
*priv
= f
->private_data
;
229 if (copy_from_user(&request
, argp
,
230 sizeof(struct scifioctl_msg
))) {
235 err
= scif_user_recv(priv
, (void __user
*)request
.msg
,
236 request
.len
, request
.flags
);
241 ((struct scifioctl_msg __user
*)argp
)->out_len
,
242 &err
, sizeof(err
))) {
248 scif_err_debug(err
, "scif_recv");
251 case SCIF_GET_NODEIDS
:
253 struct scifioctl_node_ids node_ids
;
256 void __user
*unodes
, *uself
;
259 if (copy_from_user(&node_ids
, argp
, sizeof(node_ids
))) {
264 entries
= min_t(int, scif_info
.maxid
, node_ids
.len
);
265 nodes
= kmalloc_array(entries
, sizeof(u16
), GFP_KERNEL
);
266 if (entries
&& !nodes
) {
270 node_ids
.len
= scif_get_node_ids(nodes
, entries
, &self
);
272 unodes
= (void __user
*)node_ids
.nodes
;
273 if (copy_to_user(unodes
, nodes
, sizeof(u16
) * entries
)) {
278 uself
= (void __user
*)node_ids
.self
;
279 if (copy_to_user(uself
, &self
, sizeof(u16
))) {
284 if (copy_to_user(argp
, &node_ids
, sizeof(node_ids
))) {
297 const struct file_operations scif_fops
= {
299 .release
= scif_fdclose
,
300 .unlocked_ioctl
= scif_fdioctl
,
301 .flush
= scif_fdflush
,
302 .owner
= THIS_MODULE
,