ARM: 7409/1: Do not call flush_cache_user_range with mmap_sem held
[linux/fpc-iii.git] / fs / ncpfs / sock.c
blob3a1587222c8a7a3eeea9b2990a3d734d313e767f
1 /*
2 * linux/fs/ncpfs/sock.c
4 * Copyright (C) 1992, 1993 Rick Sladkey
6 * Modified 1995, 1996 by Volker Lendecke to be usable for ncp
7 * Modified 1997 Peter Waltenberg, Bill Hawes, David Woodhouse for 2.1 dcache
9 */
12 #include <linux/time.h>
13 #include <linux/errno.h>
14 #include <linux/socket.h>
15 #include <linux/fcntl.h>
16 #include <linux/stat.h>
17 #include <linux/string.h>
18 #include <asm/uaccess.h>
19 #include <linux/in.h>
20 #include <linux/net.h>
21 #include <linux/mm.h>
22 #include <linux/netdevice.h>
23 #include <linux/signal.h>
24 #include <linux/slab.h>
25 #include <net/scm.h>
26 #include <net/sock.h>
27 #include <linux/ipx.h>
28 #include <linux/poll.h>
29 #include <linux/file.h>
31 #include "ncp_fs.h"
33 #include "ncpsign_kernel.h"
35 static int _recv(struct socket *sock, void *buf, int size, unsigned flags)
37 struct msghdr msg = {NULL, };
38 struct kvec iov = {buf, size};
39 return kernel_recvmsg(sock, &msg, &iov, 1, size, flags);
42 static inline int do_send(struct socket *sock, struct kvec *vec, int count,
43 int len, unsigned flags)
45 struct msghdr msg = { .msg_flags = flags };
46 return kernel_sendmsg(sock, &msg, vec, count, len);
49 static int _send(struct socket *sock, const void *buff, int len)
51 struct kvec vec;
52 vec.iov_base = (void *) buff;
53 vec.iov_len = len;
54 return do_send(sock, &vec, 1, len, 0);
57 struct ncp_request_reply {
58 struct list_head req;
59 wait_queue_head_t wq;
60 atomic_t refs;
61 unsigned char* reply_buf;
62 size_t datalen;
63 int result;
64 enum { RQ_DONE, RQ_INPROGRESS, RQ_QUEUED, RQ_IDLE, RQ_ABANDONED } status;
65 struct kvec* tx_ciov;
66 size_t tx_totallen;
67 size_t tx_iovlen;
68 struct kvec tx_iov[3];
69 u_int16_t tx_type;
70 u_int32_t sign[6];
73 static inline struct ncp_request_reply* ncp_alloc_req(void)
75 struct ncp_request_reply *req;
77 req = kmalloc(sizeof(struct ncp_request_reply), GFP_KERNEL);
78 if (!req)
79 return NULL;
81 init_waitqueue_head(&req->wq);
82 atomic_set(&req->refs, (1));
83 req->status = RQ_IDLE;
85 return req;
88 static void ncp_req_get(struct ncp_request_reply *req)
90 atomic_inc(&req->refs);
93 static void ncp_req_put(struct ncp_request_reply *req)
95 if (atomic_dec_and_test(&req->refs))
96 kfree(req);
99 void ncp_tcp_data_ready(struct sock *sk, int len)
101 struct ncp_server *server = sk->sk_user_data;
103 server->data_ready(sk, len);
104 schedule_work(&server->rcv.tq);
107 void ncp_tcp_error_report(struct sock *sk)
109 struct ncp_server *server = sk->sk_user_data;
111 server->error_report(sk);
112 schedule_work(&server->rcv.tq);
115 void ncp_tcp_write_space(struct sock *sk)
117 struct ncp_server *server = sk->sk_user_data;
119 /* We do not need any locking: we first set tx.creq, and then we do sendmsg,
120 not vice versa... */
121 server->write_space(sk);
122 if (server->tx.creq)
123 schedule_work(&server->tx.tq);
126 void ncpdgram_timeout_call(unsigned long v)
128 struct ncp_server *server = (void*)v;
130 schedule_work(&server->timeout_tq);
133 static inline void ncp_finish_request(struct ncp_server *server, struct ncp_request_reply *req, int result)
135 req->result = result;
136 if (req->status != RQ_ABANDONED)
137 memcpy(req->reply_buf, server->rxbuf, req->datalen);
138 req->status = RQ_DONE;
139 wake_up_all(&req->wq);
140 ncp_req_put(req);
143 static void __abort_ncp_connection(struct ncp_server *server)
145 struct ncp_request_reply *req;
147 ncp_invalidate_conn(server);
148 del_timer(&server->timeout_tm);
149 while (!list_empty(&server->tx.requests)) {
150 req = list_entry(server->tx.requests.next, struct ncp_request_reply, req);
152 list_del_init(&req->req);
153 ncp_finish_request(server, req, -EIO);
155 req = server->rcv.creq;
156 if (req) {
157 server->rcv.creq = NULL;
158 ncp_finish_request(server, req, -EIO);
159 server->rcv.ptr = NULL;
160 server->rcv.state = 0;
162 req = server->tx.creq;
163 if (req) {
164 server->tx.creq = NULL;
165 ncp_finish_request(server, req, -EIO);
169 static inline int get_conn_number(struct ncp_reply_header *rp)
171 return rp->conn_low | (rp->conn_high << 8);
174 static inline void __ncp_abort_request(struct ncp_server *server, struct ncp_request_reply *req, int err)
176 /* If req is done, we got signal, but we also received answer... */
177 switch (req->status) {
178 case RQ_IDLE:
179 case RQ_DONE:
180 break;
181 case RQ_QUEUED:
182 list_del_init(&req->req);
183 ncp_finish_request(server, req, err);
184 break;
185 case RQ_INPROGRESS:
186 req->status = RQ_ABANDONED;
187 break;
188 case RQ_ABANDONED:
189 break;
193 static inline void ncp_abort_request(struct ncp_server *server, struct ncp_request_reply *req, int err)
195 mutex_lock(&server->rcv.creq_mutex);
196 __ncp_abort_request(server, req, err);
197 mutex_unlock(&server->rcv.creq_mutex);
200 static inline void __ncptcp_abort(struct ncp_server *server)
202 __abort_ncp_connection(server);
205 static int ncpdgram_send(struct socket *sock, struct ncp_request_reply *req)
207 struct kvec vec[3];
208 /* sock_sendmsg updates iov pointers for us :-( */
209 memcpy(vec, req->tx_ciov, req->tx_iovlen * sizeof(vec[0]));
210 return do_send(sock, vec, req->tx_iovlen,
211 req->tx_totallen, MSG_DONTWAIT);
214 static void __ncptcp_try_send(struct ncp_server *server)
216 struct ncp_request_reply *rq;
217 struct kvec *iov;
218 struct kvec iovc[3];
219 int result;
221 rq = server->tx.creq;
222 if (!rq)
223 return;
225 /* sock_sendmsg updates iov pointers for us :-( */
226 memcpy(iovc, rq->tx_ciov, rq->tx_iovlen * sizeof(iov[0]));
227 result = do_send(server->ncp_sock, iovc, rq->tx_iovlen,
228 rq->tx_totallen, MSG_NOSIGNAL | MSG_DONTWAIT);
230 if (result == -EAGAIN)
231 return;
233 if (result < 0) {
234 printk(KERN_ERR "ncpfs: tcp: Send failed: %d\n", result);
235 __ncp_abort_request(server, rq, result);
236 return;
238 if (result >= rq->tx_totallen) {
239 server->rcv.creq = rq;
240 server->tx.creq = NULL;
241 return;
243 rq->tx_totallen -= result;
244 iov = rq->tx_ciov;
245 while (iov->iov_len <= result) {
246 result -= iov->iov_len;
247 iov++;
248 rq->tx_iovlen--;
250 iov->iov_base += result;
251 iov->iov_len -= result;
252 rq->tx_ciov = iov;
255 static inline void ncp_init_header(struct ncp_server *server, struct ncp_request_reply *req, struct ncp_request_header *h)
257 req->status = RQ_INPROGRESS;
258 h->conn_low = server->connection;
259 h->conn_high = server->connection >> 8;
260 h->sequence = ++server->sequence;
263 static void ncpdgram_start_request(struct ncp_server *server, struct ncp_request_reply *req)
265 size_t signlen;
266 struct ncp_request_header* h;
268 req->tx_ciov = req->tx_iov + 1;
270 h = req->tx_iov[1].iov_base;
271 ncp_init_header(server, req, h);
272 signlen = sign_packet(server, req->tx_iov[1].iov_base + sizeof(struct ncp_request_header) - 1,
273 req->tx_iov[1].iov_len - sizeof(struct ncp_request_header) + 1,
274 cpu_to_le32(req->tx_totallen), req->sign);
275 if (signlen) {
276 req->tx_ciov[1].iov_base = req->sign;
277 req->tx_ciov[1].iov_len = signlen;
278 req->tx_iovlen += 1;
279 req->tx_totallen += signlen;
281 server->rcv.creq = req;
282 server->timeout_last = server->m.time_out;
283 server->timeout_retries = server->m.retry_count;
284 ncpdgram_send(server->ncp_sock, req);
285 mod_timer(&server->timeout_tm, jiffies + server->m.time_out);
288 #define NCP_TCP_XMIT_MAGIC (0x446D6454)
289 #define NCP_TCP_XMIT_VERSION (1)
290 #define NCP_TCP_RCVD_MAGIC (0x744E6350)
292 static void ncptcp_start_request(struct ncp_server *server, struct ncp_request_reply *req)
294 size_t signlen;
295 struct ncp_request_header* h;
297 req->tx_ciov = req->tx_iov;
298 h = req->tx_iov[1].iov_base;
299 ncp_init_header(server, req, h);
300 signlen = sign_packet(server, req->tx_iov[1].iov_base + sizeof(struct ncp_request_header) - 1,
301 req->tx_iov[1].iov_len - sizeof(struct ncp_request_header) + 1,
302 cpu_to_be32(req->tx_totallen + 24), req->sign + 4) + 16;
304 req->sign[0] = htonl(NCP_TCP_XMIT_MAGIC);
305 req->sign[1] = htonl(req->tx_totallen + signlen);
306 req->sign[2] = htonl(NCP_TCP_XMIT_VERSION);
307 req->sign[3] = htonl(req->datalen + 8);
308 req->tx_iov[0].iov_base = req->sign;
309 req->tx_iov[0].iov_len = signlen;
310 req->tx_iovlen += 1;
311 req->tx_totallen += signlen;
313 server->tx.creq = req;
314 __ncptcp_try_send(server);
317 static inline void __ncp_start_request(struct ncp_server *server, struct ncp_request_reply *req)
319 /* we copy the data so that we do not depend on the caller
320 staying alive */
321 memcpy(server->txbuf, req->tx_iov[1].iov_base, req->tx_iov[1].iov_len);
322 req->tx_iov[1].iov_base = server->txbuf;
324 if (server->ncp_sock->type == SOCK_STREAM)
325 ncptcp_start_request(server, req);
326 else
327 ncpdgram_start_request(server, req);
330 static int ncp_add_request(struct ncp_server *server, struct ncp_request_reply *req)
332 mutex_lock(&server->rcv.creq_mutex);
333 if (!ncp_conn_valid(server)) {
334 mutex_unlock(&server->rcv.creq_mutex);
335 printk(KERN_ERR "ncpfs: tcp: Server died\n");
336 return -EIO;
338 ncp_req_get(req);
339 if (server->tx.creq || server->rcv.creq) {
340 req->status = RQ_QUEUED;
341 list_add_tail(&req->req, &server->tx.requests);
342 mutex_unlock(&server->rcv.creq_mutex);
343 return 0;
345 __ncp_start_request(server, req);
346 mutex_unlock(&server->rcv.creq_mutex);
347 return 0;
350 static void __ncp_next_request(struct ncp_server *server)
352 struct ncp_request_reply *req;
354 server->rcv.creq = NULL;
355 if (list_empty(&server->tx.requests)) {
356 return;
358 req = list_entry(server->tx.requests.next, struct ncp_request_reply, req);
359 list_del_init(&req->req);
360 __ncp_start_request(server, req);
363 static void info_server(struct ncp_server *server, unsigned int id, const void * data, size_t len)
365 if (server->info_sock) {
366 struct kvec iov[2];
367 __be32 hdr[2];
369 hdr[0] = cpu_to_be32(len + 8);
370 hdr[1] = cpu_to_be32(id);
372 iov[0].iov_base = hdr;
373 iov[0].iov_len = 8;
374 iov[1].iov_base = (void *) data;
375 iov[1].iov_len = len;
377 do_send(server->info_sock, iov, 2, len + 8, MSG_NOSIGNAL);
381 void ncpdgram_rcv_proc(struct work_struct *work)
383 struct ncp_server *server =
384 container_of(work, struct ncp_server, rcv.tq);
385 struct socket* sock;
387 sock = server->ncp_sock;
389 while (1) {
390 struct ncp_reply_header reply;
391 int result;
393 result = _recv(sock, &reply, sizeof(reply), MSG_PEEK | MSG_DONTWAIT);
394 if (result < 0) {
395 break;
397 if (result >= sizeof(reply)) {
398 struct ncp_request_reply *req;
400 if (reply.type == NCP_WATCHDOG) {
401 unsigned char buf[10];
403 if (server->connection != get_conn_number(&reply)) {
404 goto drop;
406 result = _recv(sock, buf, sizeof(buf), MSG_DONTWAIT);
407 if (result < 0) {
408 DPRINTK("recv failed with %d\n", result);
409 continue;
411 if (result < 10) {
412 DPRINTK("too short (%u) watchdog packet\n", result);
413 continue;
415 if (buf[9] != '?') {
416 DPRINTK("bad signature (%02X) in watchdog packet\n", buf[9]);
417 continue;
419 buf[9] = 'Y';
420 _send(sock, buf, sizeof(buf));
421 continue;
423 if (reply.type != NCP_POSITIVE_ACK && reply.type != NCP_REPLY) {
424 result = _recv(sock, server->unexpected_packet.data, sizeof(server->unexpected_packet.data), MSG_DONTWAIT);
425 if (result < 0) {
426 continue;
428 info_server(server, 0, server->unexpected_packet.data, result);
429 continue;
431 mutex_lock(&server->rcv.creq_mutex);
432 req = server->rcv.creq;
433 if (req && (req->tx_type == NCP_ALLOC_SLOT_REQUEST || (server->sequence == reply.sequence &&
434 server->connection == get_conn_number(&reply)))) {
435 if (reply.type == NCP_POSITIVE_ACK) {
436 server->timeout_retries = server->m.retry_count;
437 server->timeout_last = NCP_MAX_RPC_TIMEOUT;
438 mod_timer(&server->timeout_tm, jiffies + NCP_MAX_RPC_TIMEOUT);
439 } else if (reply.type == NCP_REPLY) {
440 result = _recv(sock, server->rxbuf, req->datalen, MSG_DONTWAIT);
441 #ifdef CONFIG_NCPFS_PACKET_SIGNING
442 if (result >= 0 && server->sign_active && req->tx_type != NCP_DEALLOC_SLOT_REQUEST) {
443 if (result < 8 + 8) {
444 result = -EIO;
445 } else {
446 unsigned int hdrl;
448 result -= 8;
449 hdrl = sock->sk->sk_family == AF_INET ? 8 : 6;
450 if (sign_verify_reply(server, server->rxbuf + hdrl, result - hdrl, cpu_to_le32(result), server->rxbuf + result)) {
451 printk(KERN_INFO "ncpfs: Signature violation\n");
452 result = -EIO;
456 #endif
457 del_timer(&server->timeout_tm);
458 server->rcv.creq = NULL;
459 ncp_finish_request(server, req, result);
460 __ncp_next_request(server);
461 mutex_unlock(&server->rcv.creq_mutex);
462 continue;
465 mutex_unlock(&server->rcv.creq_mutex);
467 drop:;
468 _recv(sock, &reply, sizeof(reply), MSG_DONTWAIT);
472 static void __ncpdgram_timeout_proc(struct ncp_server *server)
474 /* If timer is pending, we are processing another request... */
475 if (!timer_pending(&server->timeout_tm)) {
476 struct ncp_request_reply* req;
478 req = server->rcv.creq;
479 if (req) {
480 int timeout;
482 if (server->m.flags & NCP_MOUNT_SOFT) {
483 if (server->timeout_retries-- == 0) {
484 __ncp_abort_request(server, req, -ETIMEDOUT);
485 return;
488 /* Ignore errors */
489 ncpdgram_send(server->ncp_sock, req);
490 timeout = server->timeout_last << 1;
491 if (timeout > NCP_MAX_RPC_TIMEOUT) {
492 timeout = NCP_MAX_RPC_TIMEOUT;
494 server->timeout_last = timeout;
495 mod_timer(&server->timeout_tm, jiffies + timeout);
500 void ncpdgram_timeout_proc(struct work_struct *work)
502 struct ncp_server *server =
503 container_of(work, struct ncp_server, timeout_tq);
504 mutex_lock(&server->rcv.creq_mutex);
505 __ncpdgram_timeout_proc(server);
506 mutex_unlock(&server->rcv.creq_mutex);
509 static int do_tcp_rcv(struct ncp_server *server, void *buffer, size_t len)
511 int result;
513 if (buffer) {
514 result = _recv(server->ncp_sock, buffer, len, MSG_DONTWAIT);
515 } else {
516 static unsigned char dummy[1024];
518 if (len > sizeof(dummy)) {
519 len = sizeof(dummy);
521 result = _recv(server->ncp_sock, dummy, len, MSG_DONTWAIT);
523 if (result < 0) {
524 return result;
526 if (result > len) {
527 printk(KERN_ERR "ncpfs: tcp: bug in recvmsg (%u > %Zu)\n", result, len);
528 return -EIO;
530 return result;
533 static int __ncptcp_rcv_proc(struct ncp_server *server)
535 /* We have to check the result, so store the complete header */
536 while (1) {
537 int result;
538 struct ncp_request_reply *req;
539 int datalen;
540 int type;
542 while (server->rcv.len) {
543 result = do_tcp_rcv(server, server->rcv.ptr, server->rcv.len);
544 if (result == -EAGAIN) {
545 return 0;
547 if (result <= 0) {
548 req = server->rcv.creq;
549 if (req) {
550 __ncp_abort_request(server, req, -EIO);
551 } else {
552 __ncptcp_abort(server);
554 if (result < 0) {
555 printk(KERN_ERR "ncpfs: tcp: error in recvmsg: %d\n", result);
556 } else {
557 DPRINTK(KERN_ERR "ncpfs: tcp: EOF\n");
559 return -EIO;
561 if (server->rcv.ptr) {
562 server->rcv.ptr += result;
564 server->rcv.len -= result;
566 switch (server->rcv.state) {
567 case 0:
568 if (server->rcv.buf.magic != htonl(NCP_TCP_RCVD_MAGIC)) {
569 printk(KERN_ERR "ncpfs: tcp: Unexpected reply type %08X\n", ntohl(server->rcv.buf.magic));
570 __ncptcp_abort(server);
571 return -EIO;
573 datalen = ntohl(server->rcv.buf.len) & 0x0FFFFFFF;
574 if (datalen < 10) {
575 printk(KERN_ERR "ncpfs: tcp: Unexpected reply len %d\n", datalen);
576 __ncptcp_abort(server);
577 return -EIO;
579 #ifdef CONFIG_NCPFS_PACKET_SIGNING
580 if (server->sign_active) {
581 if (datalen < 18) {
582 printk(KERN_ERR "ncpfs: tcp: Unexpected reply len %d\n", datalen);
583 __ncptcp_abort(server);
584 return -EIO;
586 server->rcv.buf.len = datalen - 8;
587 server->rcv.ptr = (unsigned char*)&server->rcv.buf.p1;
588 server->rcv.len = 8;
589 server->rcv.state = 4;
590 break;
592 #endif
593 type = ntohs(server->rcv.buf.type);
594 #ifdef CONFIG_NCPFS_PACKET_SIGNING
595 cont:;
596 #endif
597 if (type != NCP_REPLY) {
598 if (datalen - 8 <= sizeof(server->unexpected_packet.data)) {
599 *(__u16*)(server->unexpected_packet.data) = htons(type);
600 server->unexpected_packet.len = datalen - 8;
602 server->rcv.state = 5;
603 server->rcv.ptr = server->unexpected_packet.data + 2;
604 server->rcv.len = datalen - 10;
605 break;
607 DPRINTK("ncpfs: tcp: Unexpected NCP type %02X\n", type);
608 skipdata2:;
609 server->rcv.state = 2;
610 skipdata:;
611 server->rcv.ptr = NULL;
612 server->rcv.len = datalen - 10;
613 break;
615 req = server->rcv.creq;
616 if (!req) {
617 DPRINTK(KERN_ERR "ncpfs: Reply without appropriate request\n");
618 goto skipdata2;
620 if (datalen > req->datalen + 8) {
621 printk(KERN_ERR "ncpfs: tcp: Unexpected reply len %d (expected at most %Zd)\n", datalen, req->datalen + 8);
622 server->rcv.state = 3;
623 goto skipdata;
625 req->datalen = datalen - 8;
626 ((struct ncp_reply_header*)server->rxbuf)->type = NCP_REPLY;
627 server->rcv.ptr = server->rxbuf + 2;
628 server->rcv.len = datalen - 10;
629 server->rcv.state = 1;
630 break;
631 #ifdef CONFIG_NCPFS_PACKET_SIGNING
632 case 4:
633 datalen = server->rcv.buf.len;
634 type = ntohs(server->rcv.buf.type2);
635 goto cont;
636 #endif
637 case 1:
638 req = server->rcv.creq;
639 if (req->tx_type != NCP_ALLOC_SLOT_REQUEST) {
640 if (((struct ncp_reply_header*)server->rxbuf)->sequence != server->sequence) {
641 printk(KERN_ERR "ncpfs: tcp: Bad sequence number\n");
642 __ncp_abort_request(server, req, -EIO);
643 return -EIO;
645 if ((((struct ncp_reply_header*)server->rxbuf)->conn_low | (((struct ncp_reply_header*)server->rxbuf)->conn_high << 8)) != server->connection) {
646 printk(KERN_ERR "ncpfs: tcp: Connection number mismatch\n");
647 __ncp_abort_request(server, req, -EIO);
648 return -EIO;
651 #ifdef CONFIG_NCPFS_PACKET_SIGNING
652 if (server->sign_active && req->tx_type != NCP_DEALLOC_SLOT_REQUEST) {
653 if (sign_verify_reply(server, server->rxbuf + 6, req->datalen - 6, cpu_to_be32(req->datalen + 16), &server->rcv.buf.type)) {
654 printk(KERN_ERR "ncpfs: tcp: Signature violation\n");
655 __ncp_abort_request(server, req, -EIO);
656 return -EIO;
659 #endif
660 ncp_finish_request(server, req, req->datalen);
661 nextreq:;
662 __ncp_next_request(server);
663 case 2:
664 next:;
665 server->rcv.ptr = (unsigned char*)&server->rcv.buf;
666 server->rcv.len = 10;
667 server->rcv.state = 0;
668 break;
669 case 3:
670 ncp_finish_request(server, server->rcv.creq, -EIO);
671 goto nextreq;
672 case 5:
673 info_server(server, 0, server->unexpected_packet.data, server->unexpected_packet.len);
674 goto next;
679 void ncp_tcp_rcv_proc(struct work_struct *work)
681 struct ncp_server *server =
682 container_of(work, struct ncp_server, rcv.tq);
684 mutex_lock(&server->rcv.creq_mutex);
685 __ncptcp_rcv_proc(server);
686 mutex_unlock(&server->rcv.creq_mutex);
689 void ncp_tcp_tx_proc(struct work_struct *work)
691 struct ncp_server *server =
692 container_of(work, struct ncp_server, tx.tq);
694 mutex_lock(&server->rcv.creq_mutex);
695 __ncptcp_try_send(server);
696 mutex_unlock(&server->rcv.creq_mutex);
699 static int do_ncp_rpc_call(struct ncp_server *server, int size,
700 unsigned char* reply_buf, int max_reply_size)
702 int result;
703 struct ncp_request_reply *req;
705 req = ncp_alloc_req();
706 if (!req)
707 return -ENOMEM;
709 req->reply_buf = reply_buf;
710 req->datalen = max_reply_size;
711 req->tx_iov[1].iov_base = server->packet;
712 req->tx_iov[1].iov_len = size;
713 req->tx_iovlen = 1;
714 req->tx_totallen = size;
715 req->tx_type = *(u_int16_t*)server->packet;
717 result = ncp_add_request(server, req);
718 if (result < 0)
719 goto out;
721 if (wait_event_interruptible(req->wq, req->status == RQ_DONE)) {
722 ncp_abort_request(server, req, -EINTR);
723 result = -EINTR;
724 goto out;
727 result = req->result;
729 out:
730 ncp_req_put(req);
732 return result;
736 * We need the server to be locked here, so check!
739 static int ncp_do_request(struct ncp_server *server, int size,
740 void* reply, int max_reply_size)
742 int result;
744 if (server->lock == 0) {
745 printk(KERN_ERR "ncpfs: Server not locked!\n");
746 return -EIO;
748 if (!ncp_conn_valid(server)) {
749 return -EIO;
752 sigset_t old_set;
753 unsigned long mask, flags;
755 spin_lock_irqsave(&current->sighand->siglock, flags);
756 old_set = current->blocked;
757 if (current->flags & PF_EXITING)
758 mask = 0;
759 else
760 mask = sigmask(SIGKILL);
761 if (server->m.flags & NCP_MOUNT_INTR) {
762 /* FIXME: This doesn't seem right at all. So, like,
763 we can't handle SIGINT and get whatever to stop?
764 What if we've blocked it ourselves? What about
765 alarms? Why, in fact, are we mucking with the
766 sigmask at all? -- r~ */
767 if (current->sighand->action[SIGINT - 1].sa.sa_handler == SIG_DFL)
768 mask |= sigmask(SIGINT);
769 if (current->sighand->action[SIGQUIT - 1].sa.sa_handler == SIG_DFL)
770 mask |= sigmask(SIGQUIT);
772 siginitsetinv(&current->blocked, mask);
773 recalc_sigpending();
774 spin_unlock_irqrestore(&current->sighand->siglock, flags);
776 result = do_ncp_rpc_call(server, size, reply, max_reply_size);
778 spin_lock_irqsave(&current->sighand->siglock, flags);
779 current->blocked = old_set;
780 recalc_sigpending();
781 spin_unlock_irqrestore(&current->sighand->siglock, flags);
784 DDPRINTK("do_ncp_rpc_call returned %d\n", result);
786 return result;
789 /* ncp_do_request assures that at least a complete reply header is
790 * received. It assumes that server->current_size contains the ncp
791 * request size
793 int ncp_request2(struct ncp_server *server, int function,
794 void* rpl, int size)
796 struct ncp_request_header *h;
797 struct ncp_reply_header* reply = rpl;
798 int result;
800 h = (struct ncp_request_header *) (server->packet);
801 if (server->has_subfunction != 0) {
802 *(__u16 *) & (h->data[0]) = htons(server->current_size - sizeof(*h) - 2);
804 h->type = NCP_REQUEST;
806 * The server shouldn't know or care what task is making a
807 * request, so we always use the same task number.
809 h->task = 2; /* (current->pid) & 0xff; */
810 h->function = function;
812 result = ncp_do_request(server, server->current_size, reply, size);
813 if (result < 0) {
814 DPRINTK("ncp_request_error: %d\n", result);
815 goto out;
817 server->completion = reply->completion_code;
818 server->conn_status = reply->connection_state;
819 server->reply_size = result;
820 server->ncp_reply_size = result - sizeof(struct ncp_reply_header);
822 result = reply->completion_code;
824 if (result != 0)
825 PPRINTK("ncp_request: completion code=%x\n", result);
826 out:
827 return result;
830 int ncp_connect(struct ncp_server *server)
832 struct ncp_request_header *h;
833 int result;
835 server->connection = 0xFFFF;
836 server->sequence = 255;
838 h = (struct ncp_request_header *) (server->packet);
839 h->type = NCP_ALLOC_SLOT_REQUEST;
840 h->task = 2; /* see above */
841 h->function = 0;
843 result = ncp_do_request(server, sizeof(*h), server->packet, server->packet_size);
844 if (result < 0)
845 goto out;
846 server->connection = h->conn_low + (h->conn_high * 256);
847 result = 0;
848 out:
849 return result;
852 int ncp_disconnect(struct ncp_server *server)
854 struct ncp_request_header *h;
856 h = (struct ncp_request_header *) (server->packet);
857 h->type = NCP_DEALLOC_SLOT_REQUEST;
858 h->task = 2; /* see above */
859 h->function = 0;
861 return ncp_do_request(server, sizeof(*h), server->packet, server->packet_size);
864 void ncp_lock_server(struct ncp_server *server)
866 mutex_lock(&server->mutex);
867 if (server->lock)
868 printk(KERN_WARNING "ncp_lock_server: was locked!\n");
869 server->lock = 1;
872 void ncp_unlock_server(struct ncp_server *server)
874 if (!server->lock) {
875 printk(KERN_WARNING "ncp_unlock_server: was not locked!\n");
876 return;
878 server->lock = 0;
879 mutex_unlock(&server->mutex);