coverity appeasement - redundant check
[minix.git] / servers / lwip / raw_ip.c
blob6bf4ead8992e8621d50419b6a2ad4556b8beb9b3
1 #include <stdlib.h>
3 #include <sys/ioc_net.h>
4 #include <net/gen/in.h>
5 #include <net/gen/ip_io.h>
7 #include <lwip/raw.h>
8 #include <lwip/ip_addr.h>
10 #include <minix/netsock.h>
11 #include "proto.h"
13 #define RAW_IP_BUF_SIZE (32 << 10)
15 #define sock_alloc_buf(s) debug_malloc(s)
16 #define sock_free_buf(x) debug_free(x)
18 struct raw_ip_recv_data {
19 ip_addr_t ip;
20 struct pbuf * pbuf;
23 #define raw_ip_recv_alloc() debug_malloc(sizeof(struct raw_ip_recv_data))
25 static void raw_ip_recv_free(void * data)
27 if (((struct raw_ip_recv_data *)data)->pbuf)
28 pbuf_free(((struct raw_ip_recv_data *)data)->pbuf);
29 debug_free(data);
33 static int raw_ip_op_open(struct socket * sock, __unused message * m)
35 debug_print("socket num %ld", get_sock_num(sock));
37 if (!(sock->buf = sock_alloc_buf(RAW_IP_BUF_SIZE))) {
38 return ENOMEM;
40 sock->buf_size = RAW_IP_BUF_SIZE;
42 return OK;
45 static void raw_ip_close(struct socket * sock)
47 /* deque and free all enqueued data before closing */
48 sock_dequeue_data_all(sock, raw_ip_recv_free);
50 if (sock->pcb)
51 raw_remove(sock->pcb);
52 if (sock->buf)
53 sock_free_buf(sock->buf);
55 /* mark it as unused */
56 sock->ops = NULL;
59 static void raw_ip_op_close(struct socket * sock, __unused message * m)
61 debug_print("socket num %ld", get_sock_num(sock));
63 raw_ip_close(sock);
65 sock_reply_close(sock, OK);
68 static int raw_ip_do_receive(message * m,
69 struct pbuf *pbuf)
71 struct pbuf * p;
72 unsigned rem_len = m->COUNT;
73 unsigned written = 0, hdr_sz = 0;
74 int err;
76 debug_print("user buffer size : %d\n", rem_len);
78 for (p = pbuf; p && rem_len; p = p->next) {
79 size_t cp_len;
81 cp_len = (rem_len < p->len) ? rem_len : p->len;
82 err = copy_to_user(m->m_source, p->payload, cp_len,
83 (cp_grant_id_t) m->IO_GRANT,
84 hdr_sz + written);
86 if (err != OK)
87 return err;
89 written += cp_len;
90 rem_len -= cp_len;
93 debug_print("copied %d bytes\n", written + hdr_sz);
94 return written + hdr_sz;
97 static u8_t raw_ip_op_receive(void *arg,
98 __unused struct raw_pcb *pcb,
99 struct pbuf *pbuf,
100 ip_addr_t *addr)
102 struct socket * sock = (struct socket *) arg;
103 struct raw_ip_recv_data * data;
104 int ret;
106 debug_print("socket num : %ld addr : %x\n",
107 get_sock_num(sock), (unsigned int) addr->addr);
109 if (sock->flags & SOCK_FLG_OP_PENDING) {
110 /* we are resuming a suspended operation */
111 ret = raw_ip_do_receive(&sock->mess, pbuf);
113 if (ret > 0) {
114 sock_reply(sock, ret);
115 sock->flags &= ~SOCK_FLG_OP_PENDING;
116 if (sock->usr_flags & NWIO_EXCL) {
117 pbuf_free(pbuf);
118 return 1;
119 } else
120 return 0;
121 } else {
122 sock_reply(sock, ret);
123 sock->flags &= ~SOCK_FLG_OP_PENDING;
127 /* Do not enqueue more data than allowed */
128 if (sock->recv_data_size > RAW_IP_BUF_SIZE)
129 return 0;
132 * nobody is waiting for the data or an error occured above, we enqueue
133 * the packet
135 if (!(data = raw_ip_recv_alloc())) {
136 return 0;
139 data->ip = *addr;
140 if (sock->usr_flags & NWIO_EXCL) {
141 data->pbuf = pbuf;
142 ret = 1;
143 } else {
144 /* we store a copy of this packet */
145 data->pbuf = pbuf_alloc(PBUF_RAW, pbuf->tot_len, PBUF_RAM);
146 if (data->pbuf == NULL) {
147 debug_print("LWIP : cannot allocated new pbuf\n");
148 raw_ip_recv_free(data);
149 return 0;
152 if (pbuf_copy(data->pbuf, pbuf) != ERR_OK) {
153 debug_print("LWIP : cannot copy pbuf\n");
154 raw_ip_recv_free(data);
155 return 0;
158 ret = 0;
162 * If we didn't managed to enqueue the packet we report it as not
163 * consumed
165 if (sock_enqueue_data(sock, data, data->pbuf->tot_len) != OK) {
166 raw_ip_recv_free(data);
167 ret = 0;
170 return ret;
173 static void raw_ip_op_read(struct socket * sock, message * m, int blk)
175 debug_print("socket num %ld", get_sock_num(sock));
177 if (sock->pcb == NULL) {
178 sock_reply(sock, EIO);
179 return;
182 if (sock->recv_head) {
183 /* data available receive immeditely */
185 struct raw_ip_recv_data * data;
186 int ret;
188 data = (struct raw_ip_recv_data *) sock->recv_head->data;
190 ret = raw_ip_do_receive(m, data->pbuf);
192 if (ret > 0) {
193 sock_dequeue_data(sock);
194 sock->recv_data_size -= data->pbuf->tot_len;
195 raw_ip_recv_free(data);
197 sock_reply(sock, ret);
198 } else if (!blk)
199 sock_reply(sock, EAGAIN);
200 else {
201 /* store the message so we know how to reply */
202 sock->mess = *m;
203 /* operation is being processes */
204 sock->flags |= SOCK_FLG_OP_PENDING;
206 debug_print("no data to read, suspending");
210 static void raw_ip_op_write(struct socket * sock, message * m, __unused int blk)
212 int ret;
213 struct pbuf * pbuf;
214 struct ip_hdr * ip_hdr;
216 debug_print("socket num %ld data size %d",
217 get_sock_num(sock), m->COUNT);
219 if (sock->pcb == NULL) {
220 ret = EIO;
221 goto write_err;
224 if ((size_t) m->COUNT > sock->buf_size) {
225 ret = ENOMEM;
226 goto write_err;
229 pbuf = pbuf_alloc(PBUF_LINK, m->COUNT, PBUF_RAM);
230 if (!pbuf) {
231 ret = ENOMEM;
232 goto write_err;
235 if ((ret = copy_from_user(m->m_source, pbuf->payload, m->COUNT,
236 (cp_grant_id_t) m->IO_GRANT, 0)) != OK) {
237 pbuf_free(pbuf);
238 goto write_err;
241 ip_hdr = (struct ip_hdr *) pbuf->payload;
242 if (pbuf_header(pbuf, -IP_HLEN)) {
243 pbuf_free(pbuf);
244 ret = EIO;
245 goto write_err;
248 if ((ret = raw_sendto((struct raw_pcb *)sock->pcb, pbuf,
249 (ip_addr_t *) &ip_hdr->dest)) != OK) {
250 debug_print("raw_sendto failed %d", ret);
251 ret = EIO;
252 } else
253 ret = m->COUNT;
256 pbuf_free(pbuf);
258 write_err:
259 sock_reply(sock, ret);
262 static void raw_ip_set_opt(struct socket * sock, message * m)
264 int err;
265 nwio_ipopt_t ipopt;
266 struct raw_pcb * pcb;
268 err = copy_from_user(m->m_source, &ipopt, sizeof(ipopt),
269 (cp_grant_id_t) m->IO_GRANT, 0);
271 if (err != OK)
272 sock_reply(sock, err);
274 debug_print("ipopt.nwio_flags = 0x%lx", ipopt.nwio_flags);
275 debug_print("ipopt.nwio_proto = 0x%x", ipopt.nwio_proto);
276 debug_print("ipopt.nwio_rem = 0x%x",
277 (unsigned int) ipopt.nwio_rem);
279 if (sock->pcb == NULL) {
280 if (!(pcb = raw_new(ipopt.nwio_proto))) {
281 raw_ip_close(sock);
282 sock_reply(sock, ENOMEM);
283 return;
286 sock->pcb = pcb;
287 } else
288 pcb = (struct raw_pcb *) sock->pcb;
290 if (pcb->protocol != ipopt.nwio_proto) {
291 debug_print("conflicting ip socket protocols\n");
292 sock_reply(sock, EBADIOCTL);
295 sock->usr_flags = ipopt.nwio_flags;
297 #if 0
298 if (raw_bind(pcb, (ip_addr_t *)&ipopt.nwio_rem) == ERR_USE) {
299 raw_ip_close(sock);
300 sock_reply(sock, EADDRINUSE);
301 return;
303 #endif
305 /* register a receive hook */
306 raw_recv((struct raw_pcb *) sock->pcb, raw_ip_op_receive, sock);
308 sock_reply(sock, OK);
311 static void raw_ip_get_opt(struct socket * sock, message * m)
313 int err;
314 nwio_ipopt_t ipopt;
315 struct raw_pcb * pcb = (struct raw_pcb *) sock->pcb;
317 assert(pcb);
319 ipopt.nwio_rem = pcb->remote_ip.addr;
320 ipopt.nwio_flags = sock->usr_flags;
322 if ((unsigned) m->COUNT < sizeof(ipopt)) {
323 sock_reply(sock, EINVAL);
324 return;
327 err = copy_to_user(m->m_source, &ipopt, sizeof(ipopt),
328 (cp_grant_id_t) m->IO_GRANT, 0);
330 if (err != OK)
331 sock_reply(sock, err);
333 sock_reply(sock, OK);
336 static void raw_ip_op_ioctl(struct socket * sock, message * m, __unused int blk)
338 debug_print("socket num %ld req %c %d %d",
339 get_sock_num(sock),
340 (m->REQUEST >> 8) & 0xff,
341 m->REQUEST & 0xff,
342 (m->REQUEST >> 16) & _IOCPARM_MASK);
344 switch (m->REQUEST) {
345 case NWIOSIPOPT:
346 raw_ip_set_opt(sock, m);
347 break;
348 case NWIOGIPOPT:
349 raw_ip_get_opt(sock, m);
350 break;
351 default:
353 * /dev/ip can be also accessed as a default device to be
354 * configured
356 nic_default_ioctl(m);
357 return;
361 struct sock_ops sock_raw_ip_ops = {
362 .open = raw_ip_op_open,
363 .close = raw_ip_op_close,
364 .read = raw_ip_op_read,
365 .write = raw_ip_op_write,
366 .ioctl = raw_ip_op_ioctl,
367 .select = generic_op_select,
368 .select_reply = generic_op_select_reply