1 // SPDX-License-Identifier: GPL-2.0
3 * linux/fs/ncpfs/sock.c
5 * Copyright (C) 1992, 1993 Rick Sladkey
7 * Modified 1995, 1996 by Volker Lendecke to be usable for ncp
8 * Modified 1997 Peter Waltenberg, Bill Hawes, David Woodhouse for 2.1 dcache
12 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
14 #include <linux/time.h>
15 #include <linux/errno.h>
16 #include <linux/socket.h>
17 #include <linux/fcntl.h>
18 #include <linux/stat.h>
19 #include <linux/string.h>
20 #include <linux/sched/signal.h>
21 #include <linux/uaccess.h>
23 #include <linux/net.h>
25 #include <linux/netdevice.h>
26 #include <linux/signal.h>
27 #include <linux/slab.h>
30 #include <linux/ipx.h>
31 #include <linux/poll.h>
32 #include <linux/file.h>
36 #include "ncpsign_kernel.h"
38 static int _recv(struct socket
*sock
, void *buf
, int size
, unsigned flags
)
40 struct msghdr msg
= {NULL
, };
41 struct kvec iov
= {buf
, size
};
42 iov_iter_kvec(&msg
.msg_iter
, READ
| ITER_KVEC
, &iov
, 1, size
);
43 return sock_recvmsg(sock
, &msg
, flags
);
46 static int _send(struct socket
*sock
, const void *buff
, int len
)
48 struct msghdr msg
= { .msg_flags
= 0 };
49 struct kvec vec
= {.iov_base
= (void *)buff
, .iov_len
= len
};
50 iov_iter_kvec(&msg
.msg_iter
, WRITE
| ITER_KVEC
, &vec
, 1, len
);
51 return sock_sendmsg(sock
, &msg
);
54 struct ncp_request_reply
{
58 unsigned char* reply_buf
;
61 enum { RQ_DONE
, RQ_INPROGRESS
, RQ_QUEUED
, RQ_IDLE
, RQ_ABANDONED
} status
;
63 struct kvec tx_iov
[3];
68 static inline struct ncp_request_reply
* ncp_alloc_req(void)
70 struct ncp_request_reply
*req
;
72 req
= kmalloc(sizeof(struct ncp_request_reply
), GFP_KERNEL
);
76 init_waitqueue_head(&req
->wq
);
77 atomic_set(&req
->refs
, (1));
78 req
->status
= RQ_IDLE
;
83 static void ncp_req_get(struct ncp_request_reply
*req
)
85 atomic_inc(&req
->refs
);
88 static void ncp_req_put(struct ncp_request_reply
*req
)
90 if (atomic_dec_and_test(&req
->refs
))
94 void ncp_tcp_data_ready(struct sock
*sk
)
96 struct ncp_server
*server
= sk
->sk_user_data
;
98 server
->data_ready(sk
);
99 schedule_work(&server
->rcv
.tq
);
102 void ncp_tcp_error_report(struct sock
*sk
)
104 struct ncp_server
*server
= sk
->sk_user_data
;
106 server
->error_report(sk
);
107 schedule_work(&server
->rcv
.tq
);
110 void ncp_tcp_write_space(struct sock
*sk
)
112 struct ncp_server
*server
= sk
->sk_user_data
;
114 /* We do not need any locking: we first set tx.creq, and then we do sendmsg,
116 server
->write_space(sk
);
118 schedule_work(&server
->tx
.tq
);
121 void ncpdgram_timeout_call(struct timer_list
*t
)
123 struct ncp_server
*server
= from_timer(server
, t
, timeout_tm
);
125 schedule_work(&server
->timeout_tq
);
128 static inline void ncp_finish_request(struct ncp_server
*server
, struct ncp_request_reply
*req
, int result
)
130 req
->result
= result
;
131 if (req
->status
!= RQ_ABANDONED
)
132 memcpy(req
->reply_buf
, server
->rxbuf
, req
->datalen
);
133 req
->status
= RQ_DONE
;
134 wake_up_all(&req
->wq
);
138 static void __abort_ncp_connection(struct ncp_server
*server
)
140 struct ncp_request_reply
*req
;
142 ncp_invalidate_conn(server
);
143 del_timer(&server
->timeout_tm
);
144 while (!list_empty(&server
->tx
.requests
)) {
145 req
= list_entry(server
->tx
.requests
.next
, struct ncp_request_reply
, req
);
147 list_del_init(&req
->req
);
148 ncp_finish_request(server
, req
, -EIO
);
150 req
= server
->rcv
.creq
;
152 server
->rcv
.creq
= NULL
;
153 ncp_finish_request(server
, req
, -EIO
);
154 server
->rcv
.ptr
= NULL
;
155 server
->rcv
.state
= 0;
157 req
= server
->tx
.creq
;
159 server
->tx
.creq
= NULL
;
160 ncp_finish_request(server
, req
, -EIO
);
164 static inline int get_conn_number(struct ncp_reply_header
*rp
)
166 return rp
->conn_low
| (rp
->conn_high
<< 8);
169 static inline void __ncp_abort_request(struct ncp_server
*server
, struct ncp_request_reply
*req
, int err
)
171 /* If req is done, we got signal, but we also received answer... */
172 switch (req
->status
) {
177 list_del_init(&req
->req
);
178 ncp_finish_request(server
, req
, err
);
181 req
->status
= RQ_ABANDONED
;
188 static inline void ncp_abort_request(struct ncp_server
*server
, struct ncp_request_reply
*req
, int err
)
190 mutex_lock(&server
->rcv
.creq_mutex
);
191 __ncp_abort_request(server
, req
, err
);
192 mutex_unlock(&server
->rcv
.creq_mutex
);
195 static inline void __ncptcp_abort(struct ncp_server
*server
)
197 __abort_ncp_connection(server
);
200 static int ncpdgram_send(struct socket
*sock
, struct ncp_request_reply
*req
)
202 struct msghdr msg
= { .msg_iter
= req
->from
, .msg_flags
= MSG_DONTWAIT
};
203 return sock_sendmsg(sock
, &msg
);
206 static void __ncptcp_try_send(struct ncp_server
*server
)
208 struct ncp_request_reply
*rq
;
209 struct msghdr msg
= { .msg_flags
= MSG_NOSIGNAL
| MSG_DONTWAIT
};
212 rq
= server
->tx
.creq
;
216 msg
.msg_iter
= rq
->from
;
217 result
= sock_sendmsg(server
->ncp_sock
, &msg
);
219 if (result
== -EAGAIN
)
223 pr_err("tcp: Send failed: %d\n", result
);
224 __ncp_abort_request(server
, rq
, result
);
227 if (!msg_data_left(&msg
)) {
228 server
->rcv
.creq
= rq
;
229 server
->tx
.creq
= NULL
;
232 rq
->from
= msg
.msg_iter
;
235 static inline void ncp_init_header(struct ncp_server
*server
, struct ncp_request_reply
*req
, struct ncp_request_header
*h
)
237 req
->status
= RQ_INPROGRESS
;
238 h
->conn_low
= server
->connection
;
239 h
->conn_high
= server
->connection
>> 8;
240 h
->sequence
= ++server
->sequence
;
243 static void ncpdgram_start_request(struct ncp_server
*server
, struct ncp_request_reply
*req
)
245 size_t signlen
, len
= req
->tx_iov
[1].iov_len
;
246 struct ncp_request_header
*h
= req
->tx_iov
[1].iov_base
;
248 ncp_init_header(server
, req
, h
);
249 signlen
= sign_packet(server
,
250 req
->tx_iov
[1].iov_base
+ sizeof(struct ncp_request_header
) - 1,
251 len
- sizeof(struct ncp_request_header
) + 1,
252 cpu_to_le32(len
), req
->sign
);
254 /* NCP over UDP appends signature */
255 req
->tx_iov
[2].iov_base
= req
->sign
;
256 req
->tx_iov
[2].iov_len
= signlen
;
258 iov_iter_kvec(&req
->from
, WRITE
| ITER_KVEC
,
259 req
->tx_iov
+ 1, signlen
? 2 : 1, len
+ signlen
);
260 server
->rcv
.creq
= req
;
261 server
->timeout_last
= server
->m
.time_out
;
262 server
->timeout_retries
= server
->m
.retry_count
;
263 ncpdgram_send(server
->ncp_sock
, req
);
264 mod_timer(&server
->timeout_tm
, jiffies
+ server
->m
.time_out
);
267 #define NCP_TCP_XMIT_MAGIC (0x446D6454)
268 #define NCP_TCP_XMIT_VERSION (1)
269 #define NCP_TCP_RCVD_MAGIC (0x744E6350)
271 static void ncptcp_start_request(struct ncp_server
*server
, struct ncp_request_reply
*req
)
273 size_t signlen
, len
= req
->tx_iov
[1].iov_len
;
274 struct ncp_request_header
*h
= req
->tx_iov
[1].iov_base
;
276 ncp_init_header(server
, req
, h
);
277 signlen
= sign_packet(server
, req
->tx_iov
[1].iov_base
+ sizeof(struct ncp_request_header
) - 1,
278 len
- sizeof(struct ncp_request_header
) + 1,
279 cpu_to_be32(len
+ 24), req
->sign
+ 4) + 16;
281 req
->sign
[0] = htonl(NCP_TCP_XMIT_MAGIC
);
282 req
->sign
[1] = htonl(len
+ signlen
);
283 req
->sign
[2] = htonl(NCP_TCP_XMIT_VERSION
);
284 req
->sign
[3] = htonl(req
->datalen
+ 8);
285 /* NCP over TCP prepends signature */
286 req
->tx_iov
[0].iov_base
= req
->sign
;
287 req
->tx_iov
[0].iov_len
= signlen
;
288 iov_iter_kvec(&req
->from
, WRITE
| ITER_KVEC
,
289 req
->tx_iov
, 2, len
+ signlen
);
291 server
->tx
.creq
= req
;
292 __ncptcp_try_send(server
);
295 static inline void __ncp_start_request(struct ncp_server
*server
, struct ncp_request_reply
*req
)
297 /* we copy the data so that we do not depend on the caller
299 memcpy(server
->txbuf
, req
->tx_iov
[1].iov_base
, req
->tx_iov
[1].iov_len
);
300 req
->tx_iov
[1].iov_base
= server
->txbuf
;
302 if (server
->ncp_sock
->type
== SOCK_STREAM
)
303 ncptcp_start_request(server
, req
);
305 ncpdgram_start_request(server
, req
);
308 static int ncp_add_request(struct ncp_server
*server
, struct ncp_request_reply
*req
)
310 mutex_lock(&server
->rcv
.creq_mutex
);
311 if (!ncp_conn_valid(server
)) {
312 mutex_unlock(&server
->rcv
.creq_mutex
);
313 pr_err("tcp: Server died\n");
317 if (server
->tx
.creq
|| server
->rcv
.creq
) {
318 req
->status
= RQ_QUEUED
;
319 list_add_tail(&req
->req
, &server
->tx
.requests
);
320 mutex_unlock(&server
->rcv
.creq_mutex
);
323 __ncp_start_request(server
, req
);
324 mutex_unlock(&server
->rcv
.creq_mutex
);
328 static void __ncp_next_request(struct ncp_server
*server
)
330 struct ncp_request_reply
*req
;
332 server
->rcv
.creq
= NULL
;
333 if (list_empty(&server
->tx
.requests
)) {
336 req
= list_entry(server
->tx
.requests
.next
, struct ncp_request_reply
, req
);
337 list_del_init(&req
->req
);
338 __ncp_start_request(server
, req
);
341 static void info_server(struct ncp_server
*server
, unsigned int id
, const void * data
, size_t len
)
343 if (server
->info_sock
) {
344 struct msghdr msg
= { .msg_flags
= MSG_NOSIGNAL
};
345 __be32 hdr
[2] = {cpu_to_be32(len
+ 8), cpu_to_be32(id
)};
346 struct kvec iov
[2] = {
347 {.iov_base
= hdr
, .iov_len
= 8},
348 {.iov_base
= (void *)data
, .iov_len
= len
},
351 iov_iter_kvec(&msg
.msg_iter
, ITER_KVEC
| WRITE
,
354 sock_sendmsg(server
->info_sock
, &msg
);
358 void ncpdgram_rcv_proc(struct work_struct
*work
)
360 struct ncp_server
*server
=
361 container_of(work
, struct ncp_server
, rcv
.tq
);
364 sock
= server
->ncp_sock
;
367 struct ncp_reply_header reply
;
370 result
= _recv(sock
, &reply
, sizeof(reply
), MSG_PEEK
| MSG_DONTWAIT
);
374 if (result
>= sizeof(reply
)) {
375 struct ncp_request_reply
*req
;
377 if (reply
.type
== NCP_WATCHDOG
) {
378 unsigned char buf
[10];
380 if (server
->connection
!= get_conn_number(&reply
)) {
383 result
= _recv(sock
, buf
, sizeof(buf
), MSG_DONTWAIT
);
385 ncp_dbg(1, "recv failed with %d\n", result
);
389 ncp_dbg(1, "too short (%u) watchdog packet\n", result
);
393 ncp_dbg(1, "bad signature (%02X) in watchdog packet\n", buf
[9]);
397 _send(sock
, buf
, sizeof(buf
));
400 if (reply
.type
!= NCP_POSITIVE_ACK
&& reply
.type
!= NCP_REPLY
) {
401 result
= _recv(sock
, server
->unexpected_packet
.data
, sizeof(server
->unexpected_packet
.data
), MSG_DONTWAIT
);
405 info_server(server
, 0, server
->unexpected_packet
.data
, result
);
408 mutex_lock(&server
->rcv
.creq_mutex
);
409 req
= server
->rcv
.creq
;
410 if (req
&& (req
->tx_type
== NCP_ALLOC_SLOT_REQUEST
|| (server
->sequence
== reply
.sequence
&&
411 server
->connection
== get_conn_number(&reply
)))) {
412 if (reply
.type
== NCP_POSITIVE_ACK
) {
413 server
->timeout_retries
= server
->m
.retry_count
;
414 server
->timeout_last
= NCP_MAX_RPC_TIMEOUT
;
415 mod_timer(&server
->timeout_tm
, jiffies
+ NCP_MAX_RPC_TIMEOUT
);
416 } else if (reply
.type
== NCP_REPLY
) {
417 result
= _recv(sock
, server
->rxbuf
, req
->datalen
, MSG_DONTWAIT
);
418 #ifdef CONFIG_NCPFS_PACKET_SIGNING
419 if (result
>= 0 && server
->sign_active
&& req
->tx_type
!= NCP_DEALLOC_SLOT_REQUEST
) {
420 if (result
< 8 + 8) {
426 hdrl
= sock
->sk
->sk_family
== AF_INET
? 8 : 6;
427 if (sign_verify_reply(server
, server
->rxbuf
+ hdrl
, result
- hdrl
, cpu_to_le32(result
), server
->rxbuf
+ result
)) {
428 pr_info("Signature violation\n");
434 del_timer(&server
->timeout_tm
);
435 server
->rcv
.creq
= NULL
;
436 ncp_finish_request(server
, req
, result
);
437 __ncp_next_request(server
);
438 mutex_unlock(&server
->rcv
.creq_mutex
);
442 mutex_unlock(&server
->rcv
.creq_mutex
);
445 _recv(sock
, &reply
, sizeof(reply
), MSG_DONTWAIT
);
449 static void __ncpdgram_timeout_proc(struct ncp_server
*server
)
451 /* If timer is pending, we are processing another request... */
452 if (!timer_pending(&server
->timeout_tm
)) {
453 struct ncp_request_reply
* req
;
455 req
= server
->rcv
.creq
;
459 if (server
->m
.flags
& NCP_MOUNT_SOFT
) {
460 if (server
->timeout_retries
-- == 0) {
461 __ncp_abort_request(server
, req
, -ETIMEDOUT
);
466 ncpdgram_send(server
->ncp_sock
, req
);
467 timeout
= server
->timeout_last
<< 1;
468 if (timeout
> NCP_MAX_RPC_TIMEOUT
) {
469 timeout
= NCP_MAX_RPC_TIMEOUT
;
471 server
->timeout_last
= timeout
;
472 mod_timer(&server
->timeout_tm
, jiffies
+ timeout
);
477 void ncpdgram_timeout_proc(struct work_struct
*work
)
479 struct ncp_server
*server
=
480 container_of(work
, struct ncp_server
, timeout_tq
);
481 mutex_lock(&server
->rcv
.creq_mutex
);
482 __ncpdgram_timeout_proc(server
);
483 mutex_unlock(&server
->rcv
.creq_mutex
);
486 static int do_tcp_rcv(struct ncp_server
*server
, void *buffer
, size_t len
)
491 result
= _recv(server
->ncp_sock
, buffer
, len
, MSG_DONTWAIT
);
493 static unsigned char dummy
[1024];
495 if (len
> sizeof(dummy
)) {
498 result
= _recv(server
->ncp_sock
, dummy
, len
, MSG_DONTWAIT
);
504 pr_err("tcp: bug in recvmsg (%u > %zu)\n", result
, len
);
510 static int __ncptcp_rcv_proc(struct ncp_server
*server
)
512 /* We have to check the result, so store the complete header */
515 struct ncp_request_reply
*req
;
519 while (server
->rcv
.len
) {
520 result
= do_tcp_rcv(server
, server
->rcv
.ptr
, server
->rcv
.len
);
521 if (result
== -EAGAIN
) {
525 req
= server
->rcv
.creq
;
527 __ncp_abort_request(server
, req
, -EIO
);
529 __ncptcp_abort(server
);
532 pr_err("tcp: error in recvmsg: %d\n", result
);
534 ncp_dbg(1, "tcp: EOF\n");
538 if (server
->rcv
.ptr
) {
539 server
->rcv
.ptr
+= result
;
541 server
->rcv
.len
-= result
;
543 switch (server
->rcv
.state
) {
545 if (server
->rcv
.buf
.magic
!= htonl(NCP_TCP_RCVD_MAGIC
)) {
546 pr_err("tcp: Unexpected reply type %08X\n", ntohl(server
->rcv
.buf
.magic
));
547 __ncptcp_abort(server
);
550 datalen
= ntohl(server
->rcv
.buf
.len
) & 0x0FFFFFFF;
552 pr_err("tcp: Unexpected reply len %d\n", datalen
);
553 __ncptcp_abort(server
);
556 #ifdef CONFIG_NCPFS_PACKET_SIGNING
557 if (server
->sign_active
) {
559 pr_err("tcp: Unexpected reply len %d\n", datalen
);
560 __ncptcp_abort(server
);
563 server
->rcv
.buf
.len
= datalen
- 8;
564 server
->rcv
.ptr
= (unsigned char*)&server
->rcv
.buf
.p1
;
566 server
->rcv
.state
= 4;
570 type
= ntohs(server
->rcv
.buf
.type
);
571 #ifdef CONFIG_NCPFS_PACKET_SIGNING
574 if (type
!= NCP_REPLY
) {
575 if (datalen
- 8 <= sizeof(server
->unexpected_packet
.data
)) {
576 *(__u16
*)(server
->unexpected_packet
.data
) = htons(type
);
577 server
->unexpected_packet
.len
= datalen
- 8;
579 server
->rcv
.state
= 5;
580 server
->rcv
.ptr
= server
->unexpected_packet
.data
+ 2;
581 server
->rcv
.len
= datalen
- 10;
584 ncp_dbg(1, "tcp: Unexpected NCP type %02X\n", type
);
586 server
->rcv
.state
= 2;
588 server
->rcv
.ptr
= NULL
;
589 server
->rcv
.len
= datalen
- 10;
592 req
= server
->rcv
.creq
;
594 ncp_dbg(1, "Reply without appropriate request\n");
597 if (datalen
> req
->datalen
+ 8) {
598 pr_err("tcp: Unexpected reply len %d (expected at most %zd)\n", datalen
, req
->datalen
+ 8);
599 server
->rcv
.state
= 3;
602 req
->datalen
= datalen
- 8;
603 ((struct ncp_reply_header
*)server
->rxbuf
)->type
= NCP_REPLY
;
604 server
->rcv
.ptr
= server
->rxbuf
+ 2;
605 server
->rcv
.len
= datalen
- 10;
606 server
->rcv
.state
= 1;
608 #ifdef CONFIG_NCPFS_PACKET_SIGNING
610 datalen
= server
->rcv
.buf
.len
;
611 type
= ntohs(server
->rcv
.buf
.type2
);
615 req
= server
->rcv
.creq
;
616 if (req
->tx_type
!= NCP_ALLOC_SLOT_REQUEST
) {
617 if (((struct ncp_reply_header
*)server
->rxbuf
)->sequence
!= server
->sequence
) {
618 pr_err("tcp: Bad sequence number\n");
619 __ncp_abort_request(server
, req
, -EIO
);
622 if ((((struct ncp_reply_header
*)server
->rxbuf
)->conn_low
| (((struct ncp_reply_header
*)server
->rxbuf
)->conn_high
<< 8)) != server
->connection
) {
623 pr_err("tcp: Connection number mismatch\n");
624 __ncp_abort_request(server
, req
, -EIO
);
628 #ifdef CONFIG_NCPFS_PACKET_SIGNING
629 if (server
->sign_active
&& req
->tx_type
!= NCP_DEALLOC_SLOT_REQUEST
) {
630 if (sign_verify_reply(server
, server
->rxbuf
+ 6, req
->datalen
- 6, cpu_to_be32(req
->datalen
+ 16), &server
->rcv
.buf
.type
)) {
631 pr_err("tcp: Signature violation\n");
632 __ncp_abort_request(server
, req
, -EIO
);
637 ncp_finish_request(server
, req
, req
->datalen
);
639 __ncp_next_request(server
);
642 server
->rcv
.ptr
= (unsigned char*)&server
->rcv
.buf
;
643 server
->rcv
.len
= 10;
644 server
->rcv
.state
= 0;
647 ncp_finish_request(server
, server
->rcv
.creq
, -EIO
);
650 info_server(server
, 0, server
->unexpected_packet
.data
, server
->unexpected_packet
.len
);
656 void ncp_tcp_rcv_proc(struct work_struct
*work
)
658 struct ncp_server
*server
=
659 container_of(work
, struct ncp_server
, rcv
.tq
);
661 mutex_lock(&server
->rcv
.creq_mutex
);
662 __ncptcp_rcv_proc(server
);
663 mutex_unlock(&server
->rcv
.creq_mutex
);
666 void ncp_tcp_tx_proc(struct work_struct
*work
)
668 struct ncp_server
*server
=
669 container_of(work
, struct ncp_server
, tx
.tq
);
671 mutex_lock(&server
->rcv
.creq_mutex
);
672 __ncptcp_try_send(server
);
673 mutex_unlock(&server
->rcv
.creq_mutex
);
676 static int do_ncp_rpc_call(struct ncp_server
*server
, int size
,
677 unsigned char* reply_buf
, int max_reply_size
)
680 struct ncp_request_reply
*req
;
682 req
= ncp_alloc_req();
686 req
->reply_buf
= reply_buf
;
687 req
->datalen
= max_reply_size
;
688 req
->tx_iov
[1].iov_base
= server
->packet
;
689 req
->tx_iov
[1].iov_len
= size
;
690 req
->tx_type
= *(u_int16_t
*)server
->packet
;
692 result
= ncp_add_request(server
, req
);
696 if (wait_event_interruptible(req
->wq
, req
->status
== RQ_DONE
)) {
697 ncp_abort_request(server
, req
, -EINTR
);
702 result
= req
->result
;
711 * We need the server to be locked here, so check!
714 static int ncp_do_request(struct ncp_server
*server
, int size
,
715 void* reply
, int max_reply_size
)
719 if (server
->lock
== 0) {
720 pr_err("Server not locked!\n");
723 if (!ncp_conn_valid(server
)) {
728 unsigned long mask
, flags
;
730 spin_lock_irqsave(¤t
->sighand
->siglock
, flags
);
731 old_set
= current
->blocked
;
732 if (current
->flags
& PF_EXITING
)
735 mask
= sigmask(SIGKILL
);
736 if (server
->m
.flags
& NCP_MOUNT_INTR
) {
737 /* FIXME: This doesn't seem right at all. So, like,
738 we can't handle SIGINT and get whatever to stop?
739 What if we've blocked it ourselves? What about
740 alarms? Why, in fact, are we mucking with the
741 sigmask at all? -- r~ */
742 if (current
->sighand
->action
[SIGINT
- 1].sa
.sa_handler
== SIG_DFL
)
743 mask
|= sigmask(SIGINT
);
744 if (current
->sighand
->action
[SIGQUIT
- 1].sa
.sa_handler
== SIG_DFL
)
745 mask
|= sigmask(SIGQUIT
);
747 siginitsetinv(¤t
->blocked
, mask
);
749 spin_unlock_irqrestore(¤t
->sighand
->siglock
, flags
);
751 result
= do_ncp_rpc_call(server
, size
, reply
, max_reply_size
);
753 spin_lock_irqsave(¤t
->sighand
->siglock
, flags
);
754 current
->blocked
= old_set
;
756 spin_unlock_irqrestore(¤t
->sighand
->siglock
, flags
);
759 ncp_dbg(2, "do_ncp_rpc_call returned %d\n", result
);
764 /* ncp_do_request assures that at least a complete reply header is
765 * received. It assumes that server->current_size contains the ncp
768 int ncp_request2(struct ncp_server
*server
, int function
,
771 struct ncp_request_header
*h
;
772 struct ncp_reply_header
* reply
= rpl
;
775 h
= (struct ncp_request_header
*) (server
->packet
);
776 if (server
->has_subfunction
!= 0) {
777 *(__u16
*) & (h
->data
[0]) = htons(server
->current_size
- sizeof(*h
) - 2);
779 h
->type
= NCP_REQUEST
;
781 * The server shouldn't know or care what task is making a
782 * request, so we always use the same task number.
784 h
->task
= 2; /* (current->pid) & 0xff; */
785 h
->function
= function
;
787 result
= ncp_do_request(server
, server
->current_size
, reply
, size
);
789 ncp_dbg(1, "ncp_request_error: %d\n", result
);
792 server
->completion
= reply
->completion_code
;
793 server
->conn_status
= reply
->connection_state
;
794 server
->reply_size
= result
;
795 server
->ncp_reply_size
= result
- sizeof(struct ncp_reply_header
);
797 result
= reply
->completion_code
;
800 ncp_vdbg("completion code=%x\n", result
);
805 int ncp_connect(struct ncp_server
*server
)
807 struct ncp_request_header
*h
;
810 server
->connection
= 0xFFFF;
811 server
->sequence
= 255;
813 h
= (struct ncp_request_header
*) (server
->packet
);
814 h
->type
= NCP_ALLOC_SLOT_REQUEST
;
815 h
->task
= 2; /* see above */
818 result
= ncp_do_request(server
, sizeof(*h
), server
->packet
, server
->packet_size
);
821 server
->connection
= h
->conn_low
+ (h
->conn_high
* 256);
827 int ncp_disconnect(struct ncp_server
*server
)
829 struct ncp_request_header
*h
;
831 h
= (struct ncp_request_header
*) (server
->packet
);
832 h
->type
= NCP_DEALLOC_SLOT_REQUEST
;
833 h
->task
= 2; /* see above */
836 return ncp_do_request(server
, sizeof(*h
), server
->packet
, server
->packet_size
);
839 void ncp_lock_server(struct ncp_server
*server
)
841 mutex_lock(&server
->mutex
);
843 pr_warn("%s: was locked!\n", __func__
);
847 void ncp_unlock_server(struct ncp_server
*server
)
850 pr_warn("%s: was not locked!\n", __func__
);
854 mutex_unlock(&server
->mutex
);