inet: frag: enforce memory limits earlier
[linux/fpc-iii.git] / drivers / usb / usbip / vudc_tx.c
blob3ab4c86486a740344fba10029e654c06e54ff215
1 /*
2 * Copyright (C) 2015 Karol Kosik <karo9@interia.eu>
3 * Copyright (C) 2015-2016 Samsung Electronics
4 * Igor Kotrasinski <i.kotrasinsk@samsung.com>
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program. If not, see <http://www.gnu.org/licenses/>.
20 #include <net/sock.h>
21 #include <linux/list.h>
22 #include <linux/kthread.h>
24 #include "usbip_common.h"
25 #include "vudc.h"
27 static inline void setup_base_pdu(struct usbip_header_basic *base,
28 __u32 command, __u32 seqnum)
30 base->command = command;
31 base->seqnum = seqnum;
32 base->devid = 0;
33 base->ep = 0;
34 base->direction = 0;
37 static void setup_ret_submit_pdu(struct usbip_header *rpdu, struct urbp *urb_p)
39 setup_base_pdu(&rpdu->base, USBIP_RET_SUBMIT, urb_p->seqnum);
40 usbip_pack_pdu(rpdu, urb_p->urb, USBIP_RET_SUBMIT, 1);
43 static void setup_ret_unlink_pdu(struct usbip_header *rpdu,
44 struct v_unlink *unlink)
46 setup_base_pdu(&rpdu->base, USBIP_RET_UNLINK, unlink->seqnum);
47 rpdu->u.ret_unlink.status = unlink->status;
50 static int v_send_ret_unlink(struct vudc *udc, struct v_unlink *unlink)
52 struct msghdr msg;
53 struct kvec iov[1];
54 size_t txsize;
56 int ret;
57 struct usbip_header pdu_header;
59 txsize = 0;
60 memset(&pdu_header, 0, sizeof(pdu_header));
61 memset(&msg, 0, sizeof(msg));
62 memset(&iov, 0, sizeof(iov));
64 /* 1. setup usbip_header */
65 setup_ret_unlink_pdu(&pdu_header, unlink);
66 usbip_header_correct_endian(&pdu_header, 1);
68 iov[0].iov_base = &pdu_header;
69 iov[0].iov_len = sizeof(pdu_header);
70 txsize += sizeof(pdu_header);
72 ret = kernel_sendmsg(udc->ud.tcp_socket, &msg, iov,
73 1, txsize);
74 if (ret != txsize) {
75 usbip_event_add(&udc->ud, VUDC_EVENT_ERROR_TCP);
76 if (ret >= 0)
77 return -EPIPE;
78 return ret;
80 kfree(unlink);
82 return txsize;
85 static int v_send_ret_submit(struct vudc *udc, struct urbp *urb_p)
87 struct urb *urb = urb_p->urb;
88 struct usbip_header pdu_header;
89 struct usbip_iso_packet_descriptor *iso_buffer = NULL;
90 struct kvec *iov = NULL;
91 int iovnum = 0;
92 int ret = 0;
93 size_t txsize;
94 struct msghdr msg;
96 txsize = 0;
97 memset(&pdu_header, 0, sizeof(pdu_header));
98 memset(&msg, 0, sizeof(msg));
100 if (urb->actual_length > 0 && !urb->transfer_buffer) {
101 dev_err(&udc->gadget.dev,
102 "urb: actual_length %d transfer_buffer null\n",
103 urb->actual_length);
104 return -1;
107 if (urb_p->type == USB_ENDPOINT_XFER_ISOC)
108 iovnum = 2 + urb->number_of_packets;
109 else
110 iovnum = 2;
112 iov = kcalloc(iovnum, sizeof(*iov), GFP_KERNEL);
113 if (!iov) {
114 usbip_event_add(&udc->ud, VUDC_EVENT_ERROR_MALLOC);
115 ret = -ENOMEM;
116 goto out;
118 iovnum = 0;
120 /* 1. setup usbip_header */
121 setup_ret_submit_pdu(&pdu_header, urb_p);
122 usbip_dbg_stub_tx("setup txdata seqnum: %d\n",
123 pdu_header.base.seqnum);
124 usbip_header_correct_endian(&pdu_header, 1);
126 iov[iovnum].iov_base = &pdu_header;
127 iov[iovnum].iov_len = sizeof(pdu_header);
128 iovnum++;
129 txsize += sizeof(pdu_header);
131 /* 2. setup transfer buffer */
132 if (urb_p->type != USB_ENDPOINT_XFER_ISOC &&
133 usb_pipein(urb->pipe) && urb->actual_length > 0) {
134 iov[iovnum].iov_base = urb->transfer_buffer;
135 iov[iovnum].iov_len = urb->actual_length;
136 iovnum++;
137 txsize += urb->actual_length;
138 } else if (urb_p->type == USB_ENDPOINT_XFER_ISOC &&
139 usb_pipein(urb->pipe)) {
140 /* FIXME - copypasted from stub_tx, refactor */
141 int i;
143 for (i = 0; i < urb->number_of_packets; i++) {
144 iov[iovnum].iov_base = urb->transfer_buffer +
145 urb->iso_frame_desc[i].offset;
146 iov[iovnum].iov_len =
147 urb->iso_frame_desc[i].actual_length;
148 iovnum++;
149 txsize += urb->iso_frame_desc[i].actual_length;
152 if (txsize != sizeof(pdu_header) + urb->actual_length) {
153 usbip_event_add(&udc->ud, VUDC_EVENT_ERROR_TCP);
154 ret = -EPIPE;
155 goto out;
158 /* else - no buffer to send */
160 /* 3. setup iso_packet_descriptor */
161 if (urb_p->type == USB_ENDPOINT_XFER_ISOC) {
162 ssize_t len = 0;
164 iso_buffer = usbip_alloc_iso_desc_pdu(urb, &len);
165 if (!iso_buffer) {
166 usbip_event_add(&udc->ud,
167 VUDC_EVENT_ERROR_MALLOC);
168 ret = -ENOMEM;
169 goto out;
172 iov[iovnum].iov_base = iso_buffer;
173 iov[iovnum].iov_len = len;
174 txsize += len;
175 iovnum++;
178 ret = kernel_sendmsg(udc->ud.tcp_socket, &msg,
179 iov, iovnum, txsize);
180 if (ret != txsize) {
181 usbip_event_add(&udc->ud, VUDC_EVENT_ERROR_TCP);
182 if (ret >= 0)
183 ret = -EPIPE;
184 goto out;
187 out:
188 kfree(iov);
189 kfree(iso_buffer);
190 free_urbp_and_urb(urb_p);
191 if (ret < 0)
192 return ret;
193 return txsize;
196 static int v_send_ret(struct vudc *udc)
198 unsigned long flags;
199 struct tx_item *txi;
200 size_t total_size = 0;
201 int ret = 0;
203 spin_lock_irqsave(&udc->lock_tx, flags);
204 while (!list_empty(&udc->tx_queue)) {
205 txi = list_first_entry(&udc->tx_queue, struct tx_item,
206 tx_entry);
207 list_del(&txi->tx_entry);
208 spin_unlock_irqrestore(&udc->lock_tx, flags);
210 switch (txi->type) {
211 case TX_SUBMIT:
212 ret = v_send_ret_submit(udc, txi->s);
213 break;
214 case TX_UNLINK:
215 ret = v_send_ret_unlink(udc, txi->u);
216 break;
218 kfree(txi);
220 if (ret < 0)
221 return ret;
223 total_size += ret;
225 spin_lock_irqsave(&udc->lock_tx, flags);
228 spin_unlock_irqrestore(&udc->lock_tx, flags);
229 return total_size;
233 int v_tx_loop(void *data)
235 struct usbip_device *ud = (struct usbip_device *) data;
236 struct vudc *udc = container_of(ud, struct vudc, ud);
237 int ret;
239 while (!kthread_should_stop()) {
240 if (usbip_event_happened(&udc->ud))
241 break;
242 ret = v_send_ret(udc);
243 if (ret < 0) {
244 pr_warn("v_tx exit with error %d", ret);
245 break;
247 wait_event_interruptible(udc->tx_waitq,
248 (!list_empty(&udc->tx_queue) ||
249 kthread_should_stop()));
252 return 0;
255 /* called with spinlocks held */
256 void v_enqueue_ret_unlink(struct vudc *udc, __u32 seqnum, __u32 status)
258 struct tx_item *txi;
259 struct v_unlink *unlink;
261 txi = kzalloc(sizeof(*txi), GFP_ATOMIC);
262 if (!txi) {
263 usbip_event_add(&udc->ud, VDEV_EVENT_ERROR_MALLOC);
264 return;
266 unlink = kzalloc(sizeof(*unlink), GFP_ATOMIC);
267 if (!unlink) {
268 kfree(txi);
269 usbip_event_add(&udc->ud, VDEV_EVENT_ERROR_MALLOC);
270 return;
273 unlink->seqnum = seqnum;
274 unlink->status = status;
275 txi->type = TX_UNLINK;
276 txi->u = unlink;
278 list_add_tail(&txi->tx_entry, &udc->tx_queue);
281 /* called with spinlocks held */
282 void v_enqueue_ret_submit(struct vudc *udc, struct urbp *urb_p)
284 struct tx_item *txi;
286 txi = kzalloc(sizeof(*txi), GFP_ATOMIC);
287 if (!txi) {
288 usbip_event_add(&udc->ud, VDEV_EVENT_ERROR_MALLOC);
289 return;
292 txi->type = TX_SUBMIT;
293 txi->s = urb_p;
295 list_add_tail(&txi->tx_entry, &udc->tx_queue);