2 * linux/fs/ncpfs/sock.c
4 * Copyright (C) 1992, 1993 Rick Sladkey
6 * Modified 1995, 1996 by Volker Lendecke to be usable for ncp
7 * Modified 1997 Peter Waltenberg, Bill Hawes, David Woodhouse for 2.1 dcache
11 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
13 #include <linux/time.h>
14 #include <linux/errno.h>
15 #include <linux/socket.h>
16 #include <linux/fcntl.h>
17 #include <linux/stat.h>
18 #include <linux/string.h>
19 #include <asm/uaccess.h>
21 #include <linux/net.h>
23 #include <linux/netdevice.h>
24 #include <linux/signal.h>
25 #include <linux/slab.h>
28 #include <linux/ipx.h>
29 #include <linux/poll.h>
30 #include <linux/file.h>
34 #include "ncpsign_kernel.h"
36 static int _recv(struct socket
*sock
, void *buf
, int size
, unsigned flags
)
38 struct msghdr msg
= {NULL
, };
39 struct kvec iov
= {buf
, size
};
40 return kernel_recvmsg(sock
, &msg
, &iov
, 1, size
, flags
);
43 static inline int do_send(struct socket
*sock
, struct kvec
*vec
, int count
,
44 int len
, unsigned flags
)
46 struct msghdr msg
= { .msg_flags
= flags
};
47 return kernel_sendmsg(sock
, &msg
, vec
, count
, len
);
50 static int _send(struct socket
*sock
, const void *buff
, int len
)
53 vec
.iov_base
= (void *) buff
;
55 return do_send(sock
, &vec
, 1, len
, 0);
58 struct ncp_request_reply
{
62 unsigned char* reply_buf
;
65 enum { RQ_DONE
, RQ_INPROGRESS
, RQ_QUEUED
, RQ_IDLE
, RQ_ABANDONED
} status
;
69 struct kvec tx_iov
[3];
74 static inline struct ncp_request_reply
* ncp_alloc_req(void)
76 struct ncp_request_reply
*req
;
78 req
= kmalloc(sizeof(struct ncp_request_reply
), GFP_KERNEL
);
82 init_waitqueue_head(&req
->wq
);
83 atomic_set(&req
->refs
, (1));
84 req
->status
= RQ_IDLE
;
89 static void ncp_req_get(struct ncp_request_reply
*req
)
91 atomic_inc(&req
->refs
);
94 static void ncp_req_put(struct ncp_request_reply
*req
)
96 if (atomic_dec_and_test(&req
->refs
))
100 void ncp_tcp_data_ready(struct sock
*sk
)
102 struct ncp_server
*server
= sk
->sk_user_data
;
104 server
->data_ready(sk
);
105 schedule_work(&server
->rcv
.tq
);
108 void ncp_tcp_error_report(struct sock
*sk
)
110 struct ncp_server
*server
= sk
->sk_user_data
;
112 server
->error_report(sk
);
113 schedule_work(&server
->rcv
.tq
);
116 void ncp_tcp_write_space(struct sock
*sk
)
118 struct ncp_server
*server
= sk
->sk_user_data
;
120 /* We do not need any locking: we first set tx.creq, and then we do sendmsg,
122 server
->write_space(sk
);
124 schedule_work(&server
->tx
.tq
);
127 void ncpdgram_timeout_call(unsigned long v
)
129 struct ncp_server
*server
= (void*)v
;
131 schedule_work(&server
->timeout_tq
);
134 static inline void ncp_finish_request(struct ncp_server
*server
, struct ncp_request_reply
*req
, int result
)
136 req
->result
= result
;
137 if (req
->status
!= RQ_ABANDONED
)
138 memcpy(req
->reply_buf
, server
->rxbuf
, req
->datalen
);
139 req
->status
= RQ_DONE
;
140 wake_up_all(&req
->wq
);
144 static void __abort_ncp_connection(struct ncp_server
*server
)
146 struct ncp_request_reply
*req
;
148 ncp_invalidate_conn(server
);
149 del_timer(&server
->timeout_tm
);
150 while (!list_empty(&server
->tx
.requests
)) {
151 req
= list_entry(server
->tx
.requests
.next
, struct ncp_request_reply
, req
);
153 list_del_init(&req
->req
);
154 ncp_finish_request(server
, req
, -EIO
);
156 req
= server
->rcv
.creq
;
158 server
->rcv
.creq
= NULL
;
159 ncp_finish_request(server
, req
, -EIO
);
160 server
->rcv
.ptr
= NULL
;
161 server
->rcv
.state
= 0;
163 req
= server
->tx
.creq
;
165 server
->tx
.creq
= NULL
;
166 ncp_finish_request(server
, req
, -EIO
);
170 static inline int get_conn_number(struct ncp_reply_header
*rp
)
172 return rp
->conn_low
| (rp
->conn_high
<< 8);
175 static inline void __ncp_abort_request(struct ncp_server
*server
, struct ncp_request_reply
*req
, int err
)
177 /* If req is done, we got signal, but we also received answer... */
178 switch (req
->status
) {
183 list_del_init(&req
->req
);
184 ncp_finish_request(server
, req
, err
);
187 req
->status
= RQ_ABANDONED
;
194 static inline void ncp_abort_request(struct ncp_server
*server
, struct ncp_request_reply
*req
, int err
)
196 mutex_lock(&server
->rcv
.creq_mutex
);
197 __ncp_abort_request(server
, req
, err
);
198 mutex_unlock(&server
->rcv
.creq_mutex
);
201 static inline void __ncptcp_abort(struct ncp_server
*server
)
203 __abort_ncp_connection(server
);
206 static int ncpdgram_send(struct socket
*sock
, struct ncp_request_reply
*req
)
209 /* sock_sendmsg updates iov pointers for us :-( */
210 memcpy(vec
, req
->tx_ciov
, req
->tx_iovlen
* sizeof(vec
[0]));
211 return do_send(sock
, vec
, req
->tx_iovlen
,
212 req
->tx_totallen
, MSG_DONTWAIT
);
215 static void __ncptcp_try_send(struct ncp_server
*server
)
217 struct ncp_request_reply
*rq
;
222 rq
= server
->tx
.creq
;
226 /* sock_sendmsg updates iov pointers for us :-( */
227 memcpy(iovc
, rq
->tx_ciov
, rq
->tx_iovlen
* sizeof(iov
[0]));
228 result
= do_send(server
->ncp_sock
, iovc
, rq
->tx_iovlen
,
229 rq
->tx_totallen
, MSG_NOSIGNAL
| MSG_DONTWAIT
);
231 if (result
== -EAGAIN
)
235 pr_err("tcp: Send failed: %d\n", result
);
236 __ncp_abort_request(server
, rq
, result
);
239 if (result
>= rq
->tx_totallen
) {
240 server
->rcv
.creq
= rq
;
241 server
->tx
.creq
= NULL
;
244 rq
->tx_totallen
-= result
;
246 while (iov
->iov_len
<= result
) {
247 result
-= iov
->iov_len
;
251 iov
->iov_base
+= result
;
252 iov
->iov_len
-= result
;
256 static inline void ncp_init_header(struct ncp_server
*server
, struct ncp_request_reply
*req
, struct ncp_request_header
*h
)
258 req
->status
= RQ_INPROGRESS
;
259 h
->conn_low
= server
->connection
;
260 h
->conn_high
= server
->connection
>> 8;
261 h
->sequence
= ++server
->sequence
;
264 static void ncpdgram_start_request(struct ncp_server
*server
, struct ncp_request_reply
*req
)
267 struct ncp_request_header
* h
;
269 req
->tx_ciov
= req
->tx_iov
+ 1;
271 h
= req
->tx_iov
[1].iov_base
;
272 ncp_init_header(server
, req
, h
);
273 signlen
= sign_packet(server
, req
->tx_iov
[1].iov_base
+ sizeof(struct ncp_request_header
) - 1,
274 req
->tx_iov
[1].iov_len
- sizeof(struct ncp_request_header
) + 1,
275 cpu_to_le32(req
->tx_totallen
), req
->sign
);
277 req
->tx_ciov
[1].iov_base
= req
->sign
;
278 req
->tx_ciov
[1].iov_len
= signlen
;
280 req
->tx_totallen
+= signlen
;
282 server
->rcv
.creq
= req
;
283 server
->timeout_last
= server
->m
.time_out
;
284 server
->timeout_retries
= server
->m
.retry_count
;
285 ncpdgram_send(server
->ncp_sock
, req
);
286 mod_timer(&server
->timeout_tm
, jiffies
+ server
->m
.time_out
);
289 #define NCP_TCP_XMIT_MAGIC (0x446D6454)
290 #define NCP_TCP_XMIT_VERSION (1)
291 #define NCP_TCP_RCVD_MAGIC (0x744E6350)
293 static void ncptcp_start_request(struct ncp_server
*server
, struct ncp_request_reply
*req
)
296 struct ncp_request_header
* h
;
298 req
->tx_ciov
= req
->tx_iov
;
299 h
= req
->tx_iov
[1].iov_base
;
300 ncp_init_header(server
, req
, h
);
301 signlen
= sign_packet(server
, req
->tx_iov
[1].iov_base
+ sizeof(struct ncp_request_header
) - 1,
302 req
->tx_iov
[1].iov_len
- sizeof(struct ncp_request_header
) + 1,
303 cpu_to_be32(req
->tx_totallen
+ 24), req
->sign
+ 4) + 16;
305 req
->sign
[0] = htonl(NCP_TCP_XMIT_MAGIC
);
306 req
->sign
[1] = htonl(req
->tx_totallen
+ signlen
);
307 req
->sign
[2] = htonl(NCP_TCP_XMIT_VERSION
);
308 req
->sign
[3] = htonl(req
->datalen
+ 8);
309 req
->tx_iov
[0].iov_base
= req
->sign
;
310 req
->tx_iov
[0].iov_len
= signlen
;
312 req
->tx_totallen
+= signlen
;
314 server
->tx
.creq
= req
;
315 __ncptcp_try_send(server
);
318 static inline void __ncp_start_request(struct ncp_server
*server
, struct ncp_request_reply
*req
)
320 /* we copy the data so that we do not depend on the caller
322 memcpy(server
->txbuf
, req
->tx_iov
[1].iov_base
, req
->tx_iov
[1].iov_len
);
323 req
->tx_iov
[1].iov_base
= server
->txbuf
;
325 if (server
->ncp_sock
->type
== SOCK_STREAM
)
326 ncptcp_start_request(server
, req
);
328 ncpdgram_start_request(server
, req
);
331 static int ncp_add_request(struct ncp_server
*server
, struct ncp_request_reply
*req
)
333 mutex_lock(&server
->rcv
.creq_mutex
);
334 if (!ncp_conn_valid(server
)) {
335 mutex_unlock(&server
->rcv
.creq_mutex
);
336 pr_err("tcp: Server died\n");
340 if (server
->tx
.creq
|| server
->rcv
.creq
) {
341 req
->status
= RQ_QUEUED
;
342 list_add_tail(&req
->req
, &server
->tx
.requests
);
343 mutex_unlock(&server
->rcv
.creq_mutex
);
346 __ncp_start_request(server
, req
);
347 mutex_unlock(&server
->rcv
.creq_mutex
);
351 static void __ncp_next_request(struct ncp_server
*server
)
353 struct ncp_request_reply
*req
;
355 server
->rcv
.creq
= NULL
;
356 if (list_empty(&server
->tx
.requests
)) {
359 req
= list_entry(server
->tx
.requests
.next
, struct ncp_request_reply
, req
);
360 list_del_init(&req
->req
);
361 __ncp_start_request(server
, req
);
364 static void info_server(struct ncp_server
*server
, unsigned int id
, const void * data
, size_t len
)
366 if (server
->info_sock
) {
370 hdr
[0] = cpu_to_be32(len
+ 8);
371 hdr
[1] = cpu_to_be32(id
);
373 iov
[0].iov_base
= hdr
;
375 iov
[1].iov_base
= (void *) data
;
376 iov
[1].iov_len
= len
;
378 do_send(server
->info_sock
, iov
, 2, len
+ 8, MSG_NOSIGNAL
);
382 void ncpdgram_rcv_proc(struct work_struct
*work
)
384 struct ncp_server
*server
=
385 container_of(work
, struct ncp_server
, rcv
.tq
);
388 sock
= server
->ncp_sock
;
391 struct ncp_reply_header reply
;
394 result
= _recv(sock
, &reply
, sizeof(reply
), MSG_PEEK
| MSG_DONTWAIT
);
398 if (result
>= sizeof(reply
)) {
399 struct ncp_request_reply
*req
;
401 if (reply
.type
== NCP_WATCHDOG
) {
402 unsigned char buf
[10];
404 if (server
->connection
!= get_conn_number(&reply
)) {
407 result
= _recv(sock
, buf
, sizeof(buf
), MSG_DONTWAIT
);
409 ncp_dbg(1, "recv failed with %d\n", result
);
413 ncp_dbg(1, "too short (%u) watchdog packet\n", result
);
417 ncp_dbg(1, "bad signature (%02X) in watchdog packet\n", buf
[9]);
421 _send(sock
, buf
, sizeof(buf
));
424 if (reply
.type
!= NCP_POSITIVE_ACK
&& reply
.type
!= NCP_REPLY
) {
425 result
= _recv(sock
, server
->unexpected_packet
.data
, sizeof(server
->unexpected_packet
.data
), MSG_DONTWAIT
);
429 info_server(server
, 0, server
->unexpected_packet
.data
, result
);
432 mutex_lock(&server
->rcv
.creq_mutex
);
433 req
= server
->rcv
.creq
;
434 if (req
&& (req
->tx_type
== NCP_ALLOC_SLOT_REQUEST
|| (server
->sequence
== reply
.sequence
&&
435 server
->connection
== get_conn_number(&reply
)))) {
436 if (reply
.type
== NCP_POSITIVE_ACK
) {
437 server
->timeout_retries
= server
->m
.retry_count
;
438 server
->timeout_last
= NCP_MAX_RPC_TIMEOUT
;
439 mod_timer(&server
->timeout_tm
, jiffies
+ NCP_MAX_RPC_TIMEOUT
);
440 } else if (reply
.type
== NCP_REPLY
) {
441 result
= _recv(sock
, server
->rxbuf
, req
->datalen
, MSG_DONTWAIT
);
442 #ifdef CONFIG_NCPFS_PACKET_SIGNING
443 if (result
>= 0 && server
->sign_active
&& req
->tx_type
!= NCP_DEALLOC_SLOT_REQUEST
) {
444 if (result
< 8 + 8) {
450 hdrl
= sock
->sk
->sk_family
== AF_INET
? 8 : 6;
451 if (sign_verify_reply(server
, server
->rxbuf
+ hdrl
, result
- hdrl
, cpu_to_le32(result
), server
->rxbuf
+ result
)) {
452 pr_info("Signature violation\n");
458 del_timer(&server
->timeout_tm
);
459 server
->rcv
.creq
= NULL
;
460 ncp_finish_request(server
, req
, result
);
461 __ncp_next_request(server
);
462 mutex_unlock(&server
->rcv
.creq_mutex
);
466 mutex_unlock(&server
->rcv
.creq_mutex
);
469 _recv(sock
, &reply
, sizeof(reply
), MSG_DONTWAIT
);
473 static void __ncpdgram_timeout_proc(struct ncp_server
*server
)
475 /* If timer is pending, we are processing another request... */
476 if (!timer_pending(&server
->timeout_tm
)) {
477 struct ncp_request_reply
* req
;
479 req
= server
->rcv
.creq
;
483 if (server
->m
.flags
& NCP_MOUNT_SOFT
) {
484 if (server
->timeout_retries
-- == 0) {
485 __ncp_abort_request(server
, req
, -ETIMEDOUT
);
490 ncpdgram_send(server
->ncp_sock
, req
);
491 timeout
= server
->timeout_last
<< 1;
492 if (timeout
> NCP_MAX_RPC_TIMEOUT
) {
493 timeout
= NCP_MAX_RPC_TIMEOUT
;
495 server
->timeout_last
= timeout
;
496 mod_timer(&server
->timeout_tm
, jiffies
+ timeout
);
501 void ncpdgram_timeout_proc(struct work_struct
*work
)
503 struct ncp_server
*server
=
504 container_of(work
, struct ncp_server
, timeout_tq
);
505 mutex_lock(&server
->rcv
.creq_mutex
);
506 __ncpdgram_timeout_proc(server
);
507 mutex_unlock(&server
->rcv
.creq_mutex
);
510 static int do_tcp_rcv(struct ncp_server
*server
, void *buffer
, size_t len
)
515 result
= _recv(server
->ncp_sock
, buffer
, len
, MSG_DONTWAIT
);
517 static unsigned char dummy
[1024];
519 if (len
> sizeof(dummy
)) {
522 result
= _recv(server
->ncp_sock
, dummy
, len
, MSG_DONTWAIT
);
528 pr_err("tcp: bug in recvmsg (%u > %Zu)\n", result
, len
);
534 static int __ncptcp_rcv_proc(struct ncp_server
*server
)
536 /* We have to check the result, so store the complete header */
539 struct ncp_request_reply
*req
;
543 while (server
->rcv
.len
) {
544 result
= do_tcp_rcv(server
, server
->rcv
.ptr
, server
->rcv
.len
);
545 if (result
== -EAGAIN
) {
549 req
= server
->rcv
.creq
;
551 __ncp_abort_request(server
, req
, -EIO
);
553 __ncptcp_abort(server
);
556 pr_err("tcp: error in recvmsg: %d\n", result
);
558 ncp_dbg(1, "tcp: EOF\n");
562 if (server
->rcv
.ptr
) {
563 server
->rcv
.ptr
+= result
;
565 server
->rcv
.len
-= result
;
567 switch (server
->rcv
.state
) {
569 if (server
->rcv
.buf
.magic
!= htonl(NCP_TCP_RCVD_MAGIC
)) {
570 pr_err("tcp: Unexpected reply type %08X\n", ntohl(server
->rcv
.buf
.magic
));
571 __ncptcp_abort(server
);
574 datalen
= ntohl(server
->rcv
.buf
.len
) & 0x0FFFFFFF;
576 pr_err("tcp: Unexpected reply len %d\n", datalen
);
577 __ncptcp_abort(server
);
580 #ifdef CONFIG_NCPFS_PACKET_SIGNING
581 if (server
->sign_active
) {
583 pr_err("tcp: Unexpected reply len %d\n", datalen
);
584 __ncptcp_abort(server
);
587 server
->rcv
.buf
.len
= datalen
- 8;
588 server
->rcv
.ptr
= (unsigned char*)&server
->rcv
.buf
.p1
;
590 server
->rcv
.state
= 4;
594 type
= ntohs(server
->rcv
.buf
.type
);
595 #ifdef CONFIG_NCPFS_PACKET_SIGNING
598 if (type
!= NCP_REPLY
) {
599 if (datalen
- 8 <= sizeof(server
->unexpected_packet
.data
)) {
600 *(__u16
*)(server
->unexpected_packet
.data
) = htons(type
);
601 server
->unexpected_packet
.len
= datalen
- 8;
603 server
->rcv
.state
= 5;
604 server
->rcv
.ptr
= server
->unexpected_packet
.data
+ 2;
605 server
->rcv
.len
= datalen
- 10;
608 ncp_dbg(1, "tcp: Unexpected NCP type %02X\n", type
);
610 server
->rcv
.state
= 2;
612 server
->rcv
.ptr
= NULL
;
613 server
->rcv
.len
= datalen
- 10;
616 req
= server
->rcv
.creq
;
618 ncp_dbg(1, "Reply without appropriate request\n");
621 if (datalen
> req
->datalen
+ 8) {
622 pr_err("tcp: Unexpected reply len %d (expected at most %Zd)\n", datalen
, req
->datalen
+ 8);
623 server
->rcv
.state
= 3;
626 req
->datalen
= datalen
- 8;
627 ((struct ncp_reply_header
*)server
->rxbuf
)->type
= NCP_REPLY
;
628 server
->rcv
.ptr
= server
->rxbuf
+ 2;
629 server
->rcv
.len
= datalen
- 10;
630 server
->rcv
.state
= 1;
632 #ifdef CONFIG_NCPFS_PACKET_SIGNING
634 datalen
= server
->rcv
.buf
.len
;
635 type
= ntohs(server
->rcv
.buf
.type2
);
639 req
= server
->rcv
.creq
;
640 if (req
->tx_type
!= NCP_ALLOC_SLOT_REQUEST
) {
641 if (((struct ncp_reply_header
*)server
->rxbuf
)->sequence
!= server
->sequence
) {
642 pr_err("tcp: Bad sequence number\n");
643 __ncp_abort_request(server
, req
, -EIO
);
646 if ((((struct ncp_reply_header
*)server
->rxbuf
)->conn_low
| (((struct ncp_reply_header
*)server
->rxbuf
)->conn_high
<< 8)) != server
->connection
) {
647 pr_err("tcp: Connection number mismatch\n");
648 __ncp_abort_request(server
, req
, -EIO
);
652 #ifdef CONFIG_NCPFS_PACKET_SIGNING
653 if (server
->sign_active
&& req
->tx_type
!= NCP_DEALLOC_SLOT_REQUEST
) {
654 if (sign_verify_reply(server
, server
->rxbuf
+ 6, req
->datalen
- 6, cpu_to_be32(req
->datalen
+ 16), &server
->rcv
.buf
.type
)) {
655 pr_err("tcp: Signature violation\n");
656 __ncp_abort_request(server
, req
, -EIO
);
661 ncp_finish_request(server
, req
, req
->datalen
);
663 __ncp_next_request(server
);
666 server
->rcv
.ptr
= (unsigned char*)&server
->rcv
.buf
;
667 server
->rcv
.len
= 10;
668 server
->rcv
.state
= 0;
671 ncp_finish_request(server
, server
->rcv
.creq
, -EIO
);
674 info_server(server
, 0, server
->unexpected_packet
.data
, server
->unexpected_packet
.len
);
680 void ncp_tcp_rcv_proc(struct work_struct
*work
)
682 struct ncp_server
*server
=
683 container_of(work
, struct ncp_server
, rcv
.tq
);
685 mutex_lock(&server
->rcv
.creq_mutex
);
686 __ncptcp_rcv_proc(server
);
687 mutex_unlock(&server
->rcv
.creq_mutex
);
690 void ncp_tcp_tx_proc(struct work_struct
*work
)
692 struct ncp_server
*server
=
693 container_of(work
, struct ncp_server
, tx
.tq
);
695 mutex_lock(&server
->rcv
.creq_mutex
);
696 __ncptcp_try_send(server
);
697 mutex_unlock(&server
->rcv
.creq_mutex
);
700 static int do_ncp_rpc_call(struct ncp_server
*server
, int size
,
701 unsigned char* reply_buf
, int max_reply_size
)
704 struct ncp_request_reply
*req
;
706 req
= ncp_alloc_req();
710 req
->reply_buf
= reply_buf
;
711 req
->datalen
= max_reply_size
;
712 req
->tx_iov
[1].iov_base
= server
->packet
;
713 req
->tx_iov
[1].iov_len
= size
;
715 req
->tx_totallen
= size
;
716 req
->tx_type
= *(u_int16_t
*)server
->packet
;
718 result
= ncp_add_request(server
, req
);
722 if (wait_event_interruptible(req
->wq
, req
->status
== RQ_DONE
)) {
723 ncp_abort_request(server
, req
, -EINTR
);
728 result
= req
->result
;
737 * We need the server to be locked here, so check!
740 static int ncp_do_request(struct ncp_server
*server
, int size
,
741 void* reply
, int max_reply_size
)
745 if (server
->lock
== 0) {
746 pr_err("Server not locked!\n");
749 if (!ncp_conn_valid(server
)) {
754 unsigned long mask
, flags
;
756 spin_lock_irqsave(¤t
->sighand
->siglock
, flags
);
757 old_set
= current
->blocked
;
758 if (current
->flags
& PF_EXITING
)
761 mask
= sigmask(SIGKILL
);
762 if (server
->m
.flags
& NCP_MOUNT_INTR
) {
763 /* FIXME: This doesn't seem right at all. So, like,
764 we can't handle SIGINT and get whatever to stop?
765 What if we've blocked it ourselves? What about
766 alarms? Why, in fact, are we mucking with the
767 sigmask at all? -- r~ */
768 if (current
->sighand
->action
[SIGINT
- 1].sa
.sa_handler
== SIG_DFL
)
769 mask
|= sigmask(SIGINT
);
770 if (current
->sighand
->action
[SIGQUIT
- 1].sa
.sa_handler
== SIG_DFL
)
771 mask
|= sigmask(SIGQUIT
);
773 siginitsetinv(¤t
->blocked
, mask
);
775 spin_unlock_irqrestore(¤t
->sighand
->siglock
, flags
);
777 result
= do_ncp_rpc_call(server
, size
, reply
, max_reply_size
);
779 spin_lock_irqsave(¤t
->sighand
->siglock
, flags
);
780 current
->blocked
= old_set
;
782 spin_unlock_irqrestore(¤t
->sighand
->siglock
, flags
);
785 ncp_dbg(2, "do_ncp_rpc_call returned %d\n", result
);
790 /* ncp_do_request assures that at least a complete reply header is
791 * received. It assumes that server->current_size contains the ncp
794 int ncp_request2(struct ncp_server
*server
, int function
,
797 struct ncp_request_header
*h
;
798 struct ncp_reply_header
* reply
= rpl
;
801 h
= (struct ncp_request_header
*) (server
->packet
);
802 if (server
->has_subfunction
!= 0) {
803 *(__u16
*) & (h
->data
[0]) = htons(server
->current_size
- sizeof(*h
) - 2);
805 h
->type
= NCP_REQUEST
;
807 * The server shouldn't know or care what task is making a
808 * request, so we always use the same task number.
810 h
->task
= 2; /* (current->pid) & 0xff; */
811 h
->function
= function
;
813 result
= ncp_do_request(server
, server
->current_size
, reply
, size
);
815 ncp_dbg(1, "ncp_request_error: %d\n", result
);
818 server
->completion
= reply
->completion_code
;
819 server
->conn_status
= reply
->connection_state
;
820 server
->reply_size
= result
;
821 server
->ncp_reply_size
= result
- sizeof(struct ncp_reply_header
);
823 result
= reply
->completion_code
;
826 ncp_vdbg("completion code=%x\n", result
);
831 int ncp_connect(struct ncp_server
*server
)
833 struct ncp_request_header
*h
;
836 server
->connection
= 0xFFFF;
837 server
->sequence
= 255;
839 h
= (struct ncp_request_header
*) (server
->packet
);
840 h
->type
= NCP_ALLOC_SLOT_REQUEST
;
841 h
->task
= 2; /* see above */
844 result
= ncp_do_request(server
, sizeof(*h
), server
->packet
, server
->packet_size
);
847 server
->connection
= h
->conn_low
+ (h
->conn_high
* 256);
853 int ncp_disconnect(struct ncp_server
*server
)
855 struct ncp_request_header
*h
;
857 h
= (struct ncp_request_header
*) (server
->packet
);
858 h
->type
= NCP_DEALLOC_SLOT_REQUEST
;
859 h
->task
= 2; /* see above */
862 return ncp_do_request(server
, sizeof(*h
), server
->packet
, server
->packet_size
);
865 void ncp_lock_server(struct ncp_server
*server
)
867 mutex_lock(&server
->mutex
);
869 pr_warn("%s: was locked!\n", __func__
);
873 void ncp_unlock_server(struct ncp_server
*server
)
876 pr_warn("%s: was not locked!\n", __func__
);
880 mutex_unlock(&server
->mutex
);