gro: Allow tunnel stacking in the case of FOU/GUE
[linux/fpc-iii.git] / fs / ncpfs / sock.c
blob471bc3d1139ebf678b2dec63c65a71ba470ad4ff
1 /*
2 * linux/fs/ncpfs/sock.c
4 * Copyright (C) 1992, 1993 Rick Sladkey
6 * Modified 1995, 1996 by Volker Lendecke to be usable for ncp
7 * Modified 1997 Peter Waltenberg, Bill Hawes, David Woodhouse for 2.1 dcache
9 */
11 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
13 #include <linux/time.h>
14 #include <linux/errno.h>
15 #include <linux/socket.h>
16 #include <linux/fcntl.h>
17 #include <linux/stat.h>
18 #include <linux/string.h>
19 #include <asm/uaccess.h>
20 #include <linux/in.h>
21 #include <linux/net.h>
22 #include <linux/mm.h>
23 #include <linux/netdevice.h>
24 #include <linux/signal.h>
25 #include <linux/slab.h>
26 #include <net/scm.h>
27 #include <net/sock.h>
28 #include <linux/ipx.h>
29 #include <linux/poll.h>
30 #include <linux/file.h>
32 #include "ncp_fs.h"
34 #include "ncpsign_kernel.h"
36 static int _recv(struct socket *sock, void *buf, int size, unsigned flags)
38 struct msghdr msg = {NULL, };
39 struct kvec iov = {buf, size};
40 return kernel_recvmsg(sock, &msg, &iov, 1, size, flags);
43 static inline int do_send(struct socket *sock, struct kvec *vec, int count,
44 int len, unsigned flags)
46 struct msghdr msg = { .msg_flags = flags };
47 return kernel_sendmsg(sock, &msg, vec, count, len);
50 static int _send(struct socket *sock, const void *buff, int len)
52 struct kvec vec;
53 vec.iov_base = (void *) buff;
54 vec.iov_len = len;
55 return do_send(sock, &vec, 1, len, 0);
58 struct ncp_request_reply {
59 struct list_head req;
60 wait_queue_head_t wq;
61 atomic_t refs;
62 unsigned char* reply_buf;
63 size_t datalen;
64 int result;
65 enum { RQ_DONE, RQ_INPROGRESS, RQ_QUEUED, RQ_IDLE, RQ_ABANDONED } status;
66 struct kvec* tx_ciov;
67 size_t tx_totallen;
68 size_t tx_iovlen;
69 struct kvec tx_iov[3];
70 u_int16_t tx_type;
71 u_int32_t sign[6];
74 static inline struct ncp_request_reply* ncp_alloc_req(void)
76 struct ncp_request_reply *req;
78 req = kmalloc(sizeof(struct ncp_request_reply), GFP_KERNEL);
79 if (!req)
80 return NULL;
82 init_waitqueue_head(&req->wq);
83 atomic_set(&req->refs, (1));
84 req->status = RQ_IDLE;
86 return req;
89 static void ncp_req_get(struct ncp_request_reply *req)
91 atomic_inc(&req->refs);
94 static void ncp_req_put(struct ncp_request_reply *req)
96 if (atomic_dec_and_test(&req->refs))
97 kfree(req);
100 void ncp_tcp_data_ready(struct sock *sk)
102 struct ncp_server *server = sk->sk_user_data;
104 server->data_ready(sk);
105 schedule_work(&server->rcv.tq);
108 void ncp_tcp_error_report(struct sock *sk)
110 struct ncp_server *server = sk->sk_user_data;
112 server->error_report(sk);
113 schedule_work(&server->rcv.tq);
116 void ncp_tcp_write_space(struct sock *sk)
118 struct ncp_server *server = sk->sk_user_data;
120 /* We do not need any locking: we first set tx.creq, and then we do sendmsg,
121 not vice versa... */
122 server->write_space(sk);
123 if (server->tx.creq)
124 schedule_work(&server->tx.tq);
127 void ncpdgram_timeout_call(unsigned long v)
129 struct ncp_server *server = (void*)v;
131 schedule_work(&server->timeout_tq);
134 static inline void ncp_finish_request(struct ncp_server *server, struct ncp_request_reply *req, int result)
136 req->result = result;
137 if (req->status != RQ_ABANDONED)
138 memcpy(req->reply_buf, server->rxbuf, req->datalen);
139 req->status = RQ_DONE;
140 wake_up_all(&req->wq);
141 ncp_req_put(req);
144 static void __abort_ncp_connection(struct ncp_server *server)
146 struct ncp_request_reply *req;
148 ncp_invalidate_conn(server);
149 del_timer(&server->timeout_tm);
150 while (!list_empty(&server->tx.requests)) {
151 req = list_entry(server->tx.requests.next, struct ncp_request_reply, req);
153 list_del_init(&req->req);
154 ncp_finish_request(server, req, -EIO);
156 req = server->rcv.creq;
157 if (req) {
158 server->rcv.creq = NULL;
159 ncp_finish_request(server, req, -EIO);
160 server->rcv.ptr = NULL;
161 server->rcv.state = 0;
163 req = server->tx.creq;
164 if (req) {
165 server->tx.creq = NULL;
166 ncp_finish_request(server, req, -EIO);
170 static inline int get_conn_number(struct ncp_reply_header *rp)
172 return rp->conn_low | (rp->conn_high << 8);
175 static inline void __ncp_abort_request(struct ncp_server *server, struct ncp_request_reply *req, int err)
177 /* If req is done, we got signal, but we also received answer... */
178 switch (req->status) {
179 case RQ_IDLE:
180 case RQ_DONE:
181 break;
182 case RQ_QUEUED:
183 list_del_init(&req->req);
184 ncp_finish_request(server, req, err);
185 break;
186 case RQ_INPROGRESS:
187 req->status = RQ_ABANDONED;
188 break;
189 case RQ_ABANDONED:
190 break;
194 static inline void ncp_abort_request(struct ncp_server *server, struct ncp_request_reply *req, int err)
196 mutex_lock(&server->rcv.creq_mutex);
197 __ncp_abort_request(server, req, err);
198 mutex_unlock(&server->rcv.creq_mutex);
201 static inline void __ncptcp_abort(struct ncp_server *server)
203 __abort_ncp_connection(server);
206 static int ncpdgram_send(struct socket *sock, struct ncp_request_reply *req)
208 struct kvec vec[3];
209 /* sock_sendmsg updates iov pointers for us :-( */
210 memcpy(vec, req->tx_ciov, req->tx_iovlen * sizeof(vec[0]));
211 return do_send(sock, vec, req->tx_iovlen,
212 req->tx_totallen, MSG_DONTWAIT);
215 static void __ncptcp_try_send(struct ncp_server *server)
217 struct ncp_request_reply *rq;
218 struct kvec *iov;
219 struct kvec iovc[3];
220 int result;
222 rq = server->tx.creq;
223 if (!rq)
224 return;
226 /* sock_sendmsg updates iov pointers for us :-( */
227 memcpy(iovc, rq->tx_ciov, rq->tx_iovlen * sizeof(iov[0]));
228 result = do_send(server->ncp_sock, iovc, rq->tx_iovlen,
229 rq->tx_totallen, MSG_NOSIGNAL | MSG_DONTWAIT);
231 if (result == -EAGAIN)
232 return;
234 if (result < 0) {
235 pr_err("tcp: Send failed: %d\n", result);
236 __ncp_abort_request(server, rq, result);
237 return;
239 if (result >= rq->tx_totallen) {
240 server->rcv.creq = rq;
241 server->tx.creq = NULL;
242 return;
244 rq->tx_totallen -= result;
245 iov = rq->tx_ciov;
246 while (iov->iov_len <= result) {
247 result -= iov->iov_len;
248 iov++;
249 rq->tx_iovlen--;
251 iov->iov_base += result;
252 iov->iov_len -= result;
253 rq->tx_ciov = iov;
256 static inline void ncp_init_header(struct ncp_server *server, struct ncp_request_reply *req, struct ncp_request_header *h)
258 req->status = RQ_INPROGRESS;
259 h->conn_low = server->connection;
260 h->conn_high = server->connection >> 8;
261 h->sequence = ++server->sequence;
264 static void ncpdgram_start_request(struct ncp_server *server, struct ncp_request_reply *req)
266 size_t signlen;
267 struct ncp_request_header* h;
269 req->tx_ciov = req->tx_iov + 1;
271 h = req->tx_iov[1].iov_base;
272 ncp_init_header(server, req, h);
273 signlen = sign_packet(server, req->tx_iov[1].iov_base + sizeof(struct ncp_request_header) - 1,
274 req->tx_iov[1].iov_len - sizeof(struct ncp_request_header) + 1,
275 cpu_to_le32(req->tx_totallen), req->sign);
276 if (signlen) {
277 req->tx_ciov[1].iov_base = req->sign;
278 req->tx_ciov[1].iov_len = signlen;
279 req->tx_iovlen += 1;
280 req->tx_totallen += signlen;
282 server->rcv.creq = req;
283 server->timeout_last = server->m.time_out;
284 server->timeout_retries = server->m.retry_count;
285 ncpdgram_send(server->ncp_sock, req);
286 mod_timer(&server->timeout_tm, jiffies + server->m.time_out);
289 #define NCP_TCP_XMIT_MAGIC (0x446D6454)
290 #define NCP_TCP_XMIT_VERSION (1)
291 #define NCP_TCP_RCVD_MAGIC (0x744E6350)
293 static void ncptcp_start_request(struct ncp_server *server, struct ncp_request_reply *req)
295 size_t signlen;
296 struct ncp_request_header* h;
298 req->tx_ciov = req->tx_iov;
299 h = req->tx_iov[1].iov_base;
300 ncp_init_header(server, req, h);
301 signlen = sign_packet(server, req->tx_iov[1].iov_base + sizeof(struct ncp_request_header) - 1,
302 req->tx_iov[1].iov_len - sizeof(struct ncp_request_header) + 1,
303 cpu_to_be32(req->tx_totallen + 24), req->sign + 4) + 16;
305 req->sign[0] = htonl(NCP_TCP_XMIT_MAGIC);
306 req->sign[1] = htonl(req->tx_totallen + signlen);
307 req->sign[2] = htonl(NCP_TCP_XMIT_VERSION);
308 req->sign[3] = htonl(req->datalen + 8);
309 req->tx_iov[0].iov_base = req->sign;
310 req->tx_iov[0].iov_len = signlen;
311 req->tx_iovlen += 1;
312 req->tx_totallen += signlen;
314 server->tx.creq = req;
315 __ncptcp_try_send(server);
318 static inline void __ncp_start_request(struct ncp_server *server, struct ncp_request_reply *req)
320 /* we copy the data so that we do not depend on the caller
321 staying alive */
322 memcpy(server->txbuf, req->tx_iov[1].iov_base, req->tx_iov[1].iov_len);
323 req->tx_iov[1].iov_base = server->txbuf;
325 if (server->ncp_sock->type == SOCK_STREAM)
326 ncptcp_start_request(server, req);
327 else
328 ncpdgram_start_request(server, req);
331 static int ncp_add_request(struct ncp_server *server, struct ncp_request_reply *req)
333 mutex_lock(&server->rcv.creq_mutex);
334 if (!ncp_conn_valid(server)) {
335 mutex_unlock(&server->rcv.creq_mutex);
336 pr_err("tcp: Server died\n");
337 return -EIO;
339 ncp_req_get(req);
340 if (server->tx.creq || server->rcv.creq) {
341 req->status = RQ_QUEUED;
342 list_add_tail(&req->req, &server->tx.requests);
343 mutex_unlock(&server->rcv.creq_mutex);
344 return 0;
346 __ncp_start_request(server, req);
347 mutex_unlock(&server->rcv.creq_mutex);
348 return 0;
351 static void __ncp_next_request(struct ncp_server *server)
353 struct ncp_request_reply *req;
355 server->rcv.creq = NULL;
356 if (list_empty(&server->tx.requests)) {
357 return;
359 req = list_entry(server->tx.requests.next, struct ncp_request_reply, req);
360 list_del_init(&req->req);
361 __ncp_start_request(server, req);
364 static void info_server(struct ncp_server *server, unsigned int id, const void * data, size_t len)
366 if (server->info_sock) {
367 struct kvec iov[2];
368 __be32 hdr[2];
370 hdr[0] = cpu_to_be32(len + 8);
371 hdr[1] = cpu_to_be32(id);
373 iov[0].iov_base = hdr;
374 iov[0].iov_len = 8;
375 iov[1].iov_base = (void *) data;
376 iov[1].iov_len = len;
378 do_send(server->info_sock, iov, 2, len + 8, MSG_NOSIGNAL);
382 void ncpdgram_rcv_proc(struct work_struct *work)
384 struct ncp_server *server =
385 container_of(work, struct ncp_server, rcv.tq);
386 struct socket* sock;
388 sock = server->ncp_sock;
390 while (1) {
391 struct ncp_reply_header reply;
392 int result;
394 result = _recv(sock, &reply, sizeof(reply), MSG_PEEK | MSG_DONTWAIT);
395 if (result < 0) {
396 break;
398 if (result >= sizeof(reply)) {
399 struct ncp_request_reply *req;
401 if (reply.type == NCP_WATCHDOG) {
402 unsigned char buf[10];
404 if (server->connection != get_conn_number(&reply)) {
405 goto drop;
407 result = _recv(sock, buf, sizeof(buf), MSG_DONTWAIT);
408 if (result < 0) {
409 ncp_dbg(1, "recv failed with %d\n", result);
410 continue;
412 if (result < 10) {
413 ncp_dbg(1, "too short (%u) watchdog packet\n", result);
414 continue;
416 if (buf[9] != '?') {
417 ncp_dbg(1, "bad signature (%02X) in watchdog packet\n", buf[9]);
418 continue;
420 buf[9] = 'Y';
421 _send(sock, buf, sizeof(buf));
422 continue;
424 if (reply.type != NCP_POSITIVE_ACK && reply.type != NCP_REPLY) {
425 result = _recv(sock, server->unexpected_packet.data, sizeof(server->unexpected_packet.data), MSG_DONTWAIT);
426 if (result < 0) {
427 continue;
429 info_server(server, 0, server->unexpected_packet.data, result);
430 continue;
432 mutex_lock(&server->rcv.creq_mutex);
433 req = server->rcv.creq;
434 if (req && (req->tx_type == NCP_ALLOC_SLOT_REQUEST || (server->sequence == reply.sequence &&
435 server->connection == get_conn_number(&reply)))) {
436 if (reply.type == NCP_POSITIVE_ACK) {
437 server->timeout_retries = server->m.retry_count;
438 server->timeout_last = NCP_MAX_RPC_TIMEOUT;
439 mod_timer(&server->timeout_tm, jiffies + NCP_MAX_RPC_TIMEOUT);
440 } else if (reply.type == NCP_REPLY) {
441 result = _recv(sock, server->rxbuf, req->datalen, MSG_DONTWAIT);
442 #ifdef CONFIG_NCPFS_PACKET_SIGNING
443 if (result >= 0 && server->sign_active && req->tx_type != NCP_DEALLOC_SLOT_REQUEST) {
444 if (result < 8 + 8) {
445 result = -EIO;
446 } else {
447 unsigned int hdrl;
449 result -= 8;
450 hdrl = sock->sk->sk_family == AF_INET ? 8 : 6;
451 if (sign_verify_reply(server, server->rxbuf + hdrl, result - hdrl, cpu_to_le32(result), server->rxbuf + result)) {
452 pr_info("Signature violation\n");
453 result = -EIO;
457 #endif
458 del_timer(&server->timeout_tm);
459 server->rcv.creq = NULL;
460 ncp_finish_request(server, req, result);
461 __ncp_next_request(server);
462 mutex_unlock(&server->rcv.creq_mutex);
463 continue;
466 mutex_unlock(&server->rcv.creq_mutex);
468 drop:;
469 _recv(sock, &reply, sizeof(reply), MSG_DONTWAIT);
473 static void __ncpdgram_timeout_proc(struct ncp_server *server)
475 /* If timer is pending, we are processing another request... */
476 if (!timer_pending(&server->timeout_tm)) {
477 struct ncp_request_reply* req;
479 req = server->rcv.creq;
480 if (req) {
481 int timeout;
483 if (server->m.flags & NCP_MOUNT_SOFT) {
484 if (server->timeout_retries-- == 0) {
485 __ncp_abort_request(server, req, -ETIMEDOUT);
486 return;
489 /* Ignore errors */
490 ncpdgram_send(server->ncp_sock, req);
491 timeout = server->timeout_last << 1;
492 if (timeout > NCP_MAX_RPC_TIMEOUT) {
493 timeout = NCP_MAX_RPC_TIMEOUT;
495 server->timeout_last = timeout;
496 mod_timer(&server->timeout_tm, jiffies + timeout);
501 void ncpdgram_timeout_proc(struct work_struct *work)
503 struct ncp_server *server =
504 container_of(work, struct ncp_server, timeout_tq);
505 mutex_lock(&server->rcv.creq_mutex);
506 __ncpdgram_timeout_proc(server);
507 mutex_unlock(&server->rcv.creq_mutex);
510 static int do_tcp_rcv(struct ncp_server *server, void *buffer, size_t len)
512 int result;
514 if (buffer) {
515 result = _recv(server->ncp_sock, buffer, len, MSG_DONTWAIT);
516 } else {
517 static unsigned char dummy[1024];
519 if (len > sizeof(dummy)) {
520 len = sizeof(dummy);
522 result = _recv(server->ncp_sock, dummy, len, MSG_DONTWAIT);
524 if (result < 0) {
525 return result;
527 if (result > len) {
528 pr_err("tcp: bug in recvmsg (%u > %Zu)\n", result, len);
529 return -EIO;
531 return result;
534 static int __ncptcp_rcv_proc(struct ncp_server *server)
536 /* We have to check the result, so store the complete header */
537 while (1) {
538 int result;
539 struct ncp_request_reply *req;
540 int datalen;
541 int type;
543 while (server->rcv.len) {
544 result = do_tcp_rcv(server, server->rcv.ptr, server->rcv.len);
545 if (result == -EAGAIN) {
546 return 0;
548 if (result <= 0) {
549 req = server->rcv.creq;
550 if (req) {
551 __ncp_abort_request(server, req, -EIO);
552 } else {
553 __ncptcp_abort(server);
555 if (result < 0) {
556 pr_err("tcp: error in recvmsg: %d\n", result);
557 } else {
558 ncp_dbg(1, "tcp: EOF\n");
560 return -EIO;
562 if (server->rcv.ptr) {
563 server->rcv.ptr += result;
565 server->rcv.len -= result;
567 switch (server->rcv.state) {
568 case 0:
569 if (server->rcv.buf.magic != htonl(NCP_TCP_RCVD_MAGIC)) {
570 pr_err("tcp: Unexpected reply type %08X\n", ntohl(server->rcv.buf.magic));
571 __ncptcp_abort(server);
572 return -EIO;
574 datalen = ntohl(server->rcv.buf.len) & 0x0FFFFFFF;
575 if (datalen < 10) {
576 pr_err("tcp: Unexpected reply len %d\n", datalen);
577 __ncptcp_abort(server);
578 return -EIO;
580 #ifdef CONFIG_NCPFS_PACKET_SIGNING
581 if (server->sign_active) {
582 if (datalen < 18) {
583 pr_err("tcp: Unexpected reply len %d\n", datalen);
584 __ncptcp_abort(server);
585 return -EIO;
587 server->rcv.buf.len = datalen - 8;
588 server->rcv.ptr = (unsigned char*)&server->rcv.buf.p1;
589 server->rcv.len = 8;
590 server->rcv.state = 4;
591 break;
593 #endif
594 type = ntohs(server->rcv.buf.type);
595 #ifdef CONFIG_NCPFS_PACKET_SIGNING
596 cont:;
597 #endif
598 if (type != NCP_REPLY) {
599 if (datalen - 8 <= sizeof(server->unexpected_packet.data)) {
600 *(__u16*)(server->unexpected_packet.data) = htons(type);
601 server->unexpected_packet.len = datalen - 8;
603 server->rcv.state = 5;
604 server->rcv.ptr = server->unexpected_packet.data + 2;
605 server->rcv.len = datalen - 10;
606 break;
608 ncp_dbg(1, "tcp: Unexpected NCP type %02X\n", type);
609 skipdata2:;
610 server->rcv.state = 2;
611 skipdata:;
612 server->rcv.ptr = NULL;
613 server->rcv.len = datalen - 10;
614 break;
616 req = server->rcv.creq;
617 if (!req) {
618 ncp_dbg(1, "Reply without appropriate request\n");
619 goto skipdata2;
621 if (datalen > req->datalen + 8) {
622 pr_err("tcp: Unexpected reply len %d (expected at most %Zd)\n", datalen, req->datalen + 8);
623 server->rcv.state = 3;
624 goto skipdata;
626 req->datalen = datalen - 8;
627 ((struct ncp_reply_header*)server->rxbuf)->type = NCP_REPLY;
628 server->rcv.ptr = server->rxbuf + 2;
629 server->rcv.len = datalen - 10;
630 server->rcv.state = 1;
631 break;
632 #ifdef CONFIG_NCPFS_PACKET_SIGNING
633 case 4:
634 datalen = server->rcv.buf.len;
635 type = ntohs(server->rcv.buf.type2);
636 goto cont;
637 #endif
638 case 1:
639 req = server->rcv.creq;
640 if (req->tx_type != NCP_ALLOC_SLOT_REQUEST) {
641 if (((struct ncp_reply_header*)server->rxbuf)->sequence != server->sequence) {
642 pr_err("tcp: Bad sequence number\n");
643 __ncp_abort_request(server, req, -EIO);
644 return -EIO;
646 if ((((struct ncp_reply_header*)server->rxbuf)->conn_low | (((struct ncp_reply_header*)server->rxbuf)->conn_high << 8)) != server->connection) {
647 pr_err("tcp: Connection number mismatch\n");
648 __ncp_abort_request(server, req, -EIO);
649 return -EIO;
652 #ifdef CONFIG_NCPFS_PACKET_SIGNING
653 if (server->sign_active && req->tx_type != NCP_DEALLOC_SLOT_REQUEST) {
654 if (sign_verify_reply(server, server->rxbuf + 6, req->datalen - 6, cpu_to_be32(req->datalen + 16), &server->rcv.buf.type)) {
655 pr_err("tcp: Signature violation\n");
656 __ncp_abort_request(server, req, -EIO);
657 return -EIO;
660 #endif
661 ncp_finish_request(server, req, req->datalen);
662 nextreq:;
663 __ncp_next_request(server);
664 case 2:
665 next:;
666 server->rcv.ptr = (unsigned char*)&server->rcv.buf;
667 server->rcv.len = 10;
668 server->rcv.state = 0;
669 break;
670 case 3:
671 ncp_finish_request(server, server->rcv.creq, -EIO);
672 goto nextreq;
673 case 5:
674 info_server(server, 0, server->unexpected_packet.data, server->unexpected_packet.len);
675 goto next;
680 void ncp_tcp_rcv_proc(struct work_struct *work)
682 struct ncp_server *server =
683 container_of(work, struct ncp_server, rcv.tq);
685 mutex_lock(&server->rcv.creq_mutex);
686 __ncptcp_rcv_proc(server);
687 mutex_unlock(&server->rcv.creq_mutex);
690 void ncp_tcp_tx_proc(struct work_struct *work)
692 struct ncp_server *server =
693 container_of(work, struct ncp_server, tx.tq);
695 mutex_lock(&server->rcv.creq_mutex);
696 __ncptcp_try_send(server);
697 mutex_unlock(&server->rcv.creq_mutex);
700 static int do_ncp_rpc_call(struct ncp_server *server, int size,
701 unsigned char* reply_buf, int max_reply_size)
703 int result;
704 struct ncp_request_reply *req;
706 req = ncp_alloc_req();
707 if (!req)
708 return -ENOMEM;
710 req->reply_buf = reply_buf;
711 req->datalen = max_reply_size;
712 req->tx_iov[1].iov_base = server->packet;
713 req->tx_iov[1].iov_len = size;
714 req->tx_iovlen = 1;
715 req->tx_totallen = size;
716 req->tx_type = *(u_int16_t*)server->packet;
718 result = ncp_add_request(server, req);
719 if (result < 0)
720 goto out;
722 if (wait_event_interruptible(req->wq, req->status == RQ_DONE)) {
723 ncp_abort_request(server, req, -EINTR);
724 result = -EINTR;
725 goto out;
728 result = req->result;
730 out:
731 ncp_req_put(req);
733 return result;
737 * We need the server to be locked here, so check!
740 static int ncp_do_request(struct ncp_server *server, int size,
741 void* reply, int max_reply_size)
743 int result;
745 if (server->lock == 0) {
746 pr_err("Server not locked!\n");
747 return -EIO;
749 if (!ncp_conn_valid(server)) {
750 return -EIO;
753 sigset_t old_set;
754 unsigned long mask, flags;
756 spin_lock_irqsave(&current->sighand->siglock, flags);
757 old_set = current->blocked;
758 if (current->flags & PF_EXITING)
759 mask = 0;
760 else
761 mask = sigmask(SIGKILL);
762 if (server->m.flags & NCP_MOUNT_INTR) {
763 /* FIXME: This doesn't seem right at all. So, like,
764 we can't handle SIGINT and get whatever to stop?
765 What if we've blocked it ourselves? What about
766 alarms? Why, in fact, are we mucking with the
767 sigmask at all? -- r~ */
768 if (current->sighand->action[SIGINT - 1].sa.sa_handler == SIG_DFL)
769 mask |= sigmask(SIGINT);
770 if (current->sighand->action[SIGQUIT - 1].sa.sa_handler == SIG_DFL)
771 mask |= sigmask(SIGQUIT);
773 siginitsetinv(&current->blocked, mask);
774 recalc_sigpending();
775 spin_unlock_irqrestore(&current->sighand->siglock, flags);
777 result = do_ncp_rpc_call(server, size, reply, max_reply_size);
779 spin_lock_irqsave(&current->sighand->siglock, flags);
780 current->blocked = old_set;
781 recalc_sigpending();
782 spin_unlock_irqrestore(&current->sighand->siglock, flags);
785 ncp_dbg(2, "do_ncp_rpc_call returned %d\n", result);
787 return result;
790 /* ncp_do_request assures that at least a complete reply header is
791 * received. It assumes that server->current_size contains the ncp
792 * request size
794 int ncp_request2(struct ncp_server *server, int function,
795 void* rpl, int size)
797 struct ncp_request_header *h;
798 struct ncp_reply_header* reply = rpl;
799 int result;
801 h = (struct ncp_request_header *) (server->packet);
802 if (server->has_subfunction != 0) {
803 *(__u16 *) & (h->data[0]) = htons(server->current_size - sizeof(*h) - 2);
805 h->type = NCP_REQUEST;
807 * The server shouldn't know or care what task is making a
808 * request, so we always use the same task number.
810 h->task = 2; /* (current->pid) & 0xff; */
811 h->function = function;
813 result = ncp_do_request(server, server->current_size, reply, size);
814 if (result < 0) {
815 ncp_dbg(1, "ncp_request_error: %d\n", result);
816 goto out;
818 server->completion = reply->completion_code;
819 server->conn_status = reply->connection_state;
820 server->reply_size = result;
821 server->ncp_reply_size = result - sizeof(struct ncp_reply_header);
823 result = reply->completion_code;
825 if (result != 0)
826 ncp_vdbg("completion code=%x\n", result);
827 out:
828 return result;
831 int ncp_connect(struct ncp_server *server)
833 struct ncp_request_header *h;
834 int result;
836 server->connection = 0xFFFF;
837 server->sequence = 255;
839 h = (struct ncp_request_header *) (server->packet);
840 h->type = NCP_ALLOC_SLOT_REQUEST;
841 h->task = 2; /* see above */
842 h->function = 0;
844 result = ncp_do_request(server, sizeof(*h), server->packet, server->packet_size);
845 if (result < 0)
846 goto out;
847 server->connection = h->conn_low + (h->conn_high * 256);
848 result = 0;
849 out:
850 return result;
853 int ncp_disconnect(struct ncp_server *server)
855 struct ncp_request_header *h;
857 h = (struct ncp_request_header *) (server->packet);
858 h->type = NCP_DEALLOC_SLOT_REQUEST;
859 h->task = 2; /* see above */
860 h->function = 0;
862 return ncp_do_request(server, sizeof(*h), server->packet, server->packet_size);
865 void ncp_lock_server(struct ncp_server *server)
867 mutex_lock(&server->mutex);
868 if (server->lock)
869 pr_warn("%s: was locked!\n", __func__);
870 server->lock = 1;
873 void ncp_unlock_server(struct ncp_server *server)
875 if (!server->lock) {
876 pr_warn("%s: was not locked!\n", __func__);
877 return;
879 server->lock = 0;
880 mutex_unlock(&server->mutex);