1 // SPDX-License-Identifier: GPL-2.0
3 * linux/fs/ncpfs/sock.c
5 * Copyright (C) 1992, 1993 Rick Sladkey
7 * Modified 1995, 1996 by Volker Lendecke to be usable for ncp
8 * Modified 1997 Peter Waltenberg, Bill Hawes, David Woodhouse for 2.1 dcache
12 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
14 #include <linux/time.h>
15 #include <linux/errno.h>
16 #include <linux/socket.h>
17 #include <linux/fcntl.h>
18 #include <linux/stat.h>
19 #include <linux/string.h>
20 #include <linux/sched/signal.h>
21 #include <linux/uaccess.h>
23 #include <linux/net.h>
25 #include <linux/netdevice.h>
26 #include <linux/signal.h>
27 #include <linux/slab.h>
30 #include <linux/ipx.h>
31 #include <linux/poll.h>
32 #include <linux/file.h>
36 #include "ncpsign_kernel.h"
38 static int _recv(struct socket
*sock
, void *buf
, int size
, unsigned flags
)
40 struct msghdr msg
= {NULL
, };
41 struct kvec iov
= {buf
, size
};
42 return kernel_recvmsg(sock
, &msg
, &iov
, 1, size
, flags
);
45 static int _send(struct socket
*sock
, const void *buff
, int len
)
47 struct msghdr msg
= { .msg_flags
= 0 };
48 struct kvec vec
= {.iov_base
= (void *)buff
, .iov_len
= len
};
49 iov_iter_kvec(&msg
.msg_iter
, WRITE
| ITER_KVEC
, &vec
, 1, len
);
50 return sock_sendmsg(sock
, &msg
);
53 struct ncp_request_reply
{
57 unsigned char* reply_buf
;
60 enum { RQ_DONE
, RQ_INPROGRESS
, RQ_QUEUED
, RQ_IDLE
, RQ_ABANDONED
} status
;
62 struct kvec tx_iov
[3];
67 static inline struct ncp_request_reply
* ncp_alloc_req(void)
69 struct ncp_request_reply
*req
;
71 req
= kmalloc(sizeof(struct ncp_request_reply
), GFP_KERNEL
);
75 init_waitqueue_head(&req
->wq
);
76 atomic_set(&req
->refs
, (1));
77 req
->status
= RQ_IDLE
;
82 static void ncp_req_get(struct ncp_request_reply
*req
)
84 atomic_inc(&req
->refs
);
87 static void ncp_req_put(struct ncp_request_reply
*req
)
89 if (atomic_dec_and_test(&req
->refs
))
93 void ncp_tcp_data_ready(struct sock
*sk
)
95 struct ncp_server
*server
= sk
->sk_user_data
;
97 server
->data_ready(sk
);
98 schedule_work(&server
->rcv
.tq
);
101 void ncp_tcp_error_report(struct sock
*sk
)
103 struct ncp_server
*server
= sk
->sk_user_data
;
105 server
->error_report(sk
);
106 schedule_work(&server
->rcv
.tq
);
109 void ncp_tcp_write_space(struct sock
*sk
)
111 struct ncp_server
*server
= sk
->sk_user_data
;
113 /* We do not need any locking: we first set tx.creq, and then we do sendmsg,
115 server
->write_space(sk
);
117 schedule_work(&server
->tx
.tq
);
120 void ncpdgram_timeout_call(struct timer_list
*t
)
122 struct ncp_server
*server
= from_timer(server
, t
, timeout_tm
);
124 schedule_work(&server
->timeout_tq
);
127 static inline void ncp_finish_request(struct ncp_server
*server
, struct ncp_request_reply
*req
, int result
)
129 req
->result
= result
;
130 if (req
->status
!= RQ_ABANDONED
)
131 memcpy(req
->reply_buf
, server
->rxbuf
, req
->datalen
);
132 req
->status
= RQ_DONE
;
133 wake_up_all(&req
->wq
);
137 static void __abort_ncp_connection(struct ncp_server
*server
)
139 struct ncp_request_reply
*req
;
141 ncp_invalidate_conn(server
);
142 del_timer(&server
->timeout_tm
);
143 while (!list_empty(&server
->tx
.requests
)) {
144 req
= list_entry(server
->tx
.requests
.next
, struct ncp_request_reply
, req
);
146 list_del_init(&req
->req
);
147 ncp_finish_request(server
, req
, -EIO
);
149 req
= server
->rcv
.creq
;
151 server
->rcv
.creq
= NULL
;
152 ncp_finish_request(server
, req
, -EIO
);
153 server
->rcv
.ptr
= NULL
;
154 server
->rcv
.state
= 0;
156 req
= server
->tx
.creq
;
158 server
->tx
.creq
= NULL
;
159 ncp_finish_request(server
, req
, -EIO
);
163 static inline int get_conn_number(struct ncp_reply_header
*rp
)
165 return rp
->conn_low
| (rp
->conn_high
<< 8);
168 static inline void __ncp_abort_request(struct ncp_server
*server
, struct ncp_request_reply
*req
, int err
)
170 /* If req is done, we got signal, but we also received answer... */
171 switch (req
->status
) {
176 list_del_init(&req
->req
);
177 ncp_finish_request(server
, req
, err
);
180 req
->status
= RQ_ABANDONED
;
187 static inline void ncp_abort_request(struct ncp_server
*server
, struct ncp_request_reply
*req
, int err
)
189 mutex_lock(&server
->rcv
.creq_mutex
);
190 __ncp_abort_request(server
, req
, err
);
191 mutex_unlock(&server
->rcv
.creq_mutex
);
194 static inline void __ncptcp_abort(struct ncp_server
*server
)
196 __abort_ncp_connection(server
);
199 static int ncpdgram_send(struct socket
*sock
, struct ncp_request_reply
*req
)
201 struct msghdr msg
= { .msg_iter
= req
->from
, .msg_flags
= MSG_DONTWAIT
};
202 return sock_sendmsg(sock
, &msg
);
205 static void __ncptcp_try_send(struct ncp_server
*server
)
207 struct ncp_request_reply
*rq
;
208 struct msghdr msg
= { .msg_flags
= MSG_NOSIGNAL
| MSG_DONTWAIT
};
211 rq
= server
->tx
.creq
;
215 msg
.msg_iter
= rq
->from
;
216 result
= sock_sendmsg(server
->ncp_sock
, &msg
);
218 if (result
== -EAGAIN
)
222 pr_err("tcp: Send failed: %d\n", result
);
223 __ncp_abort_request(server
, rq
, result
);
226 if (!msg_data_left(&msg
)) {
227 server
->rcv
.creq
= rq
;
228 server
->tx
.creq
= NULL
;
231 rq
->from
= msg
.msg_iter
;
234 static inline void ncp_init_header(struct ncp_server
*server
, struct ncp_request_reply
*req
, struct ncp_request_header
*h
)
236 req
->status
= RQ_INPROGRESS
;
237 h
->conn_low
= server
->connection
;
238 h
->conn_high
= server
->connection
>> 8;
239 h
->sequence
= ++server
->sequence
;
242 static void ncpdgram_start_request(struct ncp_server
*server
, struct ncp_request_reply
*req
)
244 size_t signlen
, len
= req
->tx_iov
[1].iov_len
;
245 struct ncp_request_header
*h
= req
->tx_iov
[1].iov_base
;
247 ncp_init_header(server
, req
, h
);
248 signlen
= sign_packet(server
,
249 req
->tx_iov
[1].iov_base
+ sizeof(struct ncp_request_header
) - 1,
250 len
- sizeof(struct ncp_request_header
) + 1,
251 cpu_to_le32(len
), req
->sign
);
253 /* NCP over UDP appends signature */
254 req
->tx_iov
[2].iov_base
= req
->sign
;
255 req
->tx_iov
[2].iov_len
= signlen
;
257 iov_iter_kvec(&req
->from
, WRITE
| ITER_KVEC
,
258 req
->tx_iov
+ 1, signlen
? 2 : 1, len
+ signlen
);
259 server
->rcv
.creq
= req
;
260 server
->timeout_last
= server
->m
.time_out
;
261 server
->timeout_retries
= server
->m
.retry_count
;
262 ncpdgram_send(server
->ncp_sock
, req
);
263 mod_timer(&server
->timeout_tm
, jiffies
+ server
->m
.time_out
);
266 #define NCP_TCP_XMIT_MAGIC (0x446D6454)
267 #define NCP_TCP_XMIT_VERSION (1)
268 #define NCP_TCP_RCVD_MAGIC (0x744E6350)
270 static void ncptcp_start_request(struct ncp_server
*server
, struct ncp_request_reply
*req
)
272 size_t signlen
, len
= req
->tx_iov
[1].iov_len
;
273 struct ncp_request_header
*h
= req
->tx_iov
[1].iov_base
;
275 ncp_init_header(server
, req
, h
);
276 signlen
= sign_packet(server
, req
->tx_iov
[1].iov_base
+ sizeof(struct ncp_request_header
) - 1,
277 len
- sizeof(struct ncp_request_header
) + 1,
278 cpu_to_be32(len
+ 24), req
->sign
+ 4) + 16;
280 req
->sign
[0] = htonl(NCP_TCP_XMIT_MAGIC
);
281 req
->sign
[1] = htonl(len
+ signlen
);
282 req
->sign
[2] = htonl(NCP_TCP_XMIT_VERSION
);
283 req
->sign
[3] = htonl(req
->datalen
+ 8);
284 /* NCP over TCP prepends signature */
285 req
->tx_iov
[0].iov_base
= req
->sign
;
286 req
->tx_iov
[0].iov_len
= signlen
;
287 iov_iter_kvec(&req
->from
, WRITE
| ITER_KVEC
,
288 req
->tx_iov
, 2, len
+ signlen
);
290 server
->tx
.creq
= req
;
291 __ncptcp_try_send(server
);
294 static inline void __ncp_start_request(struct ncp_server
*server
, struct ncp_request_reply
*req
)
296 /* we copy the data so that we do not depend on the caller
298 memcpy(server
->txbuf
, req
->tx_iov
[1].iov_base
, req
->tx_iov
[1].iov_len
);
299 req
->tx_iov
[1].iov_base
= server
->txbuf
;
301 if (server
->ncp_sock
->type
== SOCK_STREAM
)
302 ncptcp_start_request(server
, req
);
304 ncpdgram_start_request(server
, req
);
307 static int ncp_add_request(struct ncp_server
*server
, struct ncp_request_reply
*req
)
309 mutex_lock(&server
->rcv
.creq_mutex
);
310 if (!ncp_conn_valid(server
)) {
311 mutex_unlock(&server
->rcv
.creq_mutex
);
312 pr_err("tcp: Server died\n");
316 if (server
->tx
.creq
|| server
->rcv
.creq
) {
317 req
->status
= RQ_QUEUED
;
318 list_add_tail(&req
->req
, &server
->tx
.requests
);
319 mutex_unlock(&server
->rcv
.creq_mutex
);
322 __ncp_start_request(server
, req
);
323 mutex_unlock(&server
->rcv
.creq_mutex
);
327 static void __ncp_next_request(struct ncp_server
*server
)
329 struct ncp_request_reply
*req
;
331 server
->rcv
.creq
= NULL
;
332 if (list_empty(&server
->tx
.requests
)) {
335 req
= list_entry(server
->tx
.requests
.next
, struct ncp_request_reply
, req
);
336 list_del_init(&req
->req
);
337 __ncp_start_request(server
, req
);
340 static void info_server(struct ncp_server
*server
, unsigned int id
, const void * data
, size_t len
)
342 if (server
->info_sock
) {
343 struct msghdr msg
= { .msg_flags
= MSG_NOSIGNAL
};
344 __be32 hdr
[2] = {cpu_to_be32(len
+ 8), cpu_to_be32(id
)};
345 struct kvec iov
[2] = {
346 {.iov_base
= hdr
, .iov_len
= 8},
347 {.iov_base
= (void *)data
, .iov_len
= len
},
350 iov_iter_kvec(&msg
.msg_iter
, ITER_KVEC
| WRITE
,
353 sock_sendmsg(server
->info_sock
, &msg
);
357 void ncpdgram_rcv_proc(struct work_struct
*work
)
359 struct ncp_server
*server
=
360 container_of(work
, struct ncp_server
, rcv
.tq
);
363 sock
= server
->ncp_sock
;
366 struct ncp_reply_header reply
;
369 result
= _recv(sock
, &reply
, sizeof(reply
), MSG_PEEK
| MSG_DONTWAIT
);
373 if (result
>= sizeof(reply
)) {
374 struct ncp_request_reply
*req
;
376 if (reply
.type
== NCP_WATCHDOG
) {
377 unsigned char buf
[10];
379 if (server
->connection
!= get_conn_number(&reply
)) {
382 result
= _recv(sock
, buf
, sizeof(buf
), MSG_DONTWAIT
);
384 ncp_dbg(1, "recv failed with %d\n", result
);
388 ncp_dbg(1, "too short (%u) watchdog packet\n", result
);
392 ncp_dbg(1, "bad signature (%02X) in watchdog packet\n", buf
[9]);
396 _send(sock
, buf
, sizeof(buf
));
399 if (reply
.type
!= NCP_POSITIVE_ACK
&& reply
.type
!= NCP_REPLY
) {
400 result
= _recv(sock
, server
->unexpected_packet
.data
, sizeof(server
->unexpected_packet
.data
), MSG_DONTWAIT
);
404 info_server(server
, 0, server
->unexpected_packet
.data
, result
);
407 mutex_lock(&server
->rcv
.creq_mutex
);
408 req
= server
->rcv
.creq
;
409 if (req
&& (req
->tx_type
== NCP_ALLOC_SLOT_REQUEST
|| (server
->sequence
== reply
.sequence
&&
410 server
->connection
== get_conn_number(&reply
)))) {
411 if (reply
.type
== NCP_POSITIVE_ACK
) {
412 server
->timeout_retries
= server
->m
.retry_count
;
413 server
->timeout_last
= NCP_MAX_RPC_TIMEOUT
;
414 mod_timer(&server
->timeout_tm
, jiffies
+ NCP_MAX_RPC_TIMEOUT
);
415 } else if (reply
.type
== NCP_REPLY
) {
416 result
= _recv(sock
, server
->rxbuf
, req
->datalen
, MSG_DONTWAIT
);
417 #ifdef CONFIG_NCPFS_PACKET_SIGNING
418 if (result
>= 0 && server
->sign_active
&& req
->tx_type
!= NCP_DEALLOC_SLOT_REQUEST
) {
419 if (result
< 8 + 8) {
425 hdrl
= sock
->sk
->sk_family
== AF_INET
? 8 : 6;
426 if (sign_verify_reply(server
, server
->rxbuf
+ hdrl
, result
- hdrl
, cpu_to_le32(result
), server
->rxbuf
+ result
)) {
427 pr_info("Signature violation\n");
433 del_timer(&server
->timeout_tm
);
434 server
->rcv
.creq
= NULL
;
435 ncp_finish_request(server
, req
, result
);
436 __ncp_next_request(server
);
437 mutex_unlock(&server
->rcv
.creq_mutex
);
441 mutex_unlock(&server
->rcv
.creq_mutex
);
444 _recv(sock
, &reply
, sizeof(reply
), MSG_DONTWAIT
);
448 static void __ncpdgram_timeout_proc(struct ncp_server
*server
)
450 /* If timer is pending, we are processing another request... */
451 if (!timer_pending(&server
->timeout_tm
)) {
452 struct ncp_request_reply
* req
;
454 req
= server
->rcv
.creq
;
458 if (server
->m
.flags
& NCP_MOUNT_SOFT
) {
459 if (server
->timeout_retries
-- == 0) {
460 __ncp_abort_request(server
, req
, -ETIMEDOUT
);
465 ncpdgram_send(server
->ncp_sock
, req
);
466 timeout
= server
->timeout_last
<< 1;
467 if (timeout
> NCP_MAX_RPC_TIMEOUT
) {
468 timeout
= NCP_MAX_RPC_TIMEOUT
;
470 server
->timeout_last
= timeout
;
471 mod_timer(&server
->timeout_tm
, jiffies
+ timeout
);
476 void ncpdgram_timeout_proc(struct work_struct
*work
)
478 struct ncp_server
*server
=
479 container_of(work
, struct ncp_server
, timeout_tq
);
480 mutex_lock(&server
->rcv
.creq_mutex
);
481 __ncpdgram_timeout_proc(server
);
482 mutex_unlock(&server
->rcv
.creq_mutex
);
485 static int do_tcp_rcv(struct ncp_server
*server
, void *buffer
, size_t len
)
490 result
= _recv(server
->ncp_sock
, buffer
, len
, MSG_DONTWAIT
);
492 static unsigned char dummy
[1024];
494 if (len
> sizeof(dummy
)) {
497 result
= _recv(server
->ncp_sock
, dummy
, len
, MSG_DONTWAIT
);
503 pr_err("tcp: bug in recvmsg (%u > %zu)\n", result
, len
);
509 static int __ncptcp_rcv_proc(struct ncp_server
*server
)
511 /* We have to check the result, so store the complete header */
514 struct ncp_request_reply
*req
;
518 while (server
->rcv
.len
) {
519 result
= do_tcp_rcv(server
, server
->rcv
.ptr
, server
->rcv
.len
);
520 if (result
== -EAGAIN
) {
524 req
= server
->rcv
.creq
;
526 __ncp_abort_request(server
, req
, -EIO
);
528 __ncptcp_abort(server
);
531 pr_err("tcp: error in recvmsg: %d\n", result
);
533 ncp_dbg(1, "tcp: EOF\n");
537 if (server
->rcv
.ptr
) {
538 server
->rcv
.ptr
+= result
;
540 server
->rcv
.len
-= result
;
542 switch (server
->rcv
.state
) {
544 if (server
->rcv
.buf
.magic
!= htonl(NCP_TCP_RCVD_MAGIC
)) {
545 pr_err("tcp: Unexpected reply type %08X\n", ntohl(server
->rcv
.buf
.magic
));
546 __ncptcp_abort(server
);
549 datalen
= ntohl(server
->rcv
.buf
.len
) & 0x0FFFFFFF;
551 pr_err("tcp: Unexpected reply len %d\n", datalen
);
552 __ncptcp_abort(server
);
555 #ifdef CONFIG_NCPFS_PACKET_SIGNING
556 if (server
->sign_active
) {
558 pr_err("tcp: Unexpected reply len %d\n", datalen
);
559 __ncptcp_abort(server
);
562 server
->rcv
.buf
.len
= datalen
- 8;
563 server
->rcv
.ptr
= (unsigned char*)&server
->rcv
.buf
.p1
;
565 server
->rcv
.state
= 4;
569 type
= ntohs(server
->rcv
.buf
.type
);
570 #ifdef CONFIG_NCPFS_PACKET_SIGNING
573 if (type
!= NCP_REPLY
) {
574 if (datalen
- 8 <= sizeof(server
->unexpected_packet
.data
)) {
575 *(__u16
*)(server
->unexpected_packet
.data
) = htons(type
);
576 server
->unexpected_packet
.len
= datalen
- 8;
578 server
->rcv
.state
= 5;
579 server
->rcv
.ptr
= server
->unexpected_packet
.data
+ 2;
580 server
->rcv
.len
= datalen
- 10;
583 ncp_dbg(1, "tcp: Unexpected NCP type %02X\n", type
);
585 server
->rcv
.state
= 2;
587 server
->rcv
.ptr
= NULL
;
588 server
->rcv
.len
= datalen
- 10;
591 req
= server
->rcv
.creq
;
593 ncp_dbg(1, "Reply without appropriate request\n");
596 if (datalen
> req
->datalen
+ 8) {
597 pr_err("tcp: Unexpected reply len %d (expected at most %zd)\n", datalen
, req
->datalen
+ 8);
598 server
->rcv
.state
= 3;
601 req
->datalen
= datalen
- 8;
602 ((struct ncp_reply_header
*)server
->rxbuf
)->type
= NCP_REPLY
;
603 server
->rcv
.ptr
= server
->rxbuf
+ 2;
604 server
->rcv
.len
= datalen
- 10;
605 server
->rcv
.state
= 1;
607 #ifdef CONFIG_NCPFS_PACKET_SIGNING
609 datalen
= server
->rcv
.buf
.len
;
610 type
= ntohs(server
->rcv
.buf
.type2
);
614 req
= server
->rcv
.creq
;
615 if (req
->tx_type
!= NCP_ALLOC_SLOT_REQUEST
) {
616 if (((struct ncp_reply_header
*)server
->rxbuf
)->sequence
!= server
->sequence
) {
617 pr_err("tcp: Bad sequence number\n");
618 __ncp_abort_request(server
, req
, -EIO
);
621 if ((((struct ncp_reply_header
*)server
->rxbuf
)->conn_low
| (((struct ncp_reply_header
*)server
->rxbuf
)->conn_high
<< 8)) != server
->connection
) {
622 pr_err("tcp: Connection number mismatch\n");
623 __ncp_abort_request(server
, req
, -EIO
);
627 #ifdef CONFIG_NCPFS_PACKET_SIGNING
628 if (server
->sign_active
&& req
->tx_type
!= NCP_DEALLOC_SLOT_REQUEST
) {
629 if (sign_verify_reply(server
, server
->rxbuf
+ 6, req
->datalen
- 6, cpu_to_be32(req
->datalen
+ 16), &server
->rcv
.buf
.type
)) {
630 pr_err("tcp: Signature violation\n");
631 __ncp_abort_request(server
, req
, -EIO
);
636 ncp_finish_request(server
, req
, req
->datalen
);
638 __ncp_next_request(server
);
641 server
->rcv
.ptr
= (unsigned char*)&server
->rcv
.buf
;
642 server
->rcv
.len
= 10;
643 server
->rcv
.state
= 0;
646 ncp_finish_request(server
, server
->rcv
.creq
, -EIO
);
649 info_server(server
, 0, server
->unexpected_packet
.data
, server
->unexpected_packet
.len
);
655 void ncp_tcp_rcv_proc(struct work_struct
*work
)
657 struct ncp_server
*server
=
658 container_of(work
, struct ncp_server
, rcv
.tq
);
660 mutex_lock(&server
->rcv
.creq_mutex
);
661 __ncptcp_rcv_proc(server
);
662 mutex_unlock(&server
->rcv
.creq_mutex
);
665 void ncp_tcp_tx_proc(struct work_struct
*work
)
667 struct ncp_server
*server
=
668 container_of(work
, struct ncp_server
, tx
.tq
);
670 mutex_lock(&server
->rcv
.creq_mutex
);
671 __ncptcp_try_send(server
);
672 mutex_unlock(&server
->rcv
.creq_mutex
);
675 static int do_ncp_rpc_call(struct ncp_server
*server
, int size
,
676 unsigned char* reply_buf
, int max_reply_size
)
679 struct ncp_request_reply
*req
;
681 req
= ncp_alloc_req();
685 req
->reply_buf
= reply_buf
;
686 req
->datalen
= max_reply_size
;
687 req
->tx_iov
[1].iov_base
= server
->packet
;
688 req
->tx_iov
[1].iov_len
= size
;
689 req
->tx_type
= *(u_int16_t
*)server
->packet
;
691 result
= ncp_add_request(server
, req
);
695 if (wait_event_interruptible(req
->wq
, req
->status
== RQ_DONE
)) {
696 ncp_abort_request(server
, req
, -EINTR
);
701 result
= req
->result
;
710 * We need the server to be locked here, so check!
713 static int ncp_do_request(struct ncp_server
*server
, int size
,
714 void* reply
, int max_reply_size
)
718 if (server
->lock
== 0) {
719 pr_err("Server not locked!\n");
722 if (!ncp_conn_valid(server
)) {
727 unsigned long mask
, flags
;
729 spin_lock_irqsave(¤t
->sighand
->siglock
, flags
);
730 old_set
= current
->blocked
;
731 if (current
->flags
& PF_EXITING
)
734 mask
= sigmask(SIGKILL
);
735 if (server
->m
.flags
& NCP_MOUNT_INTR
) {
736 /* FIXME: This doesn't seem right at all. So, like,
737 we can't handle SIGINT and get whatever to stop?
738 What if we've blocked it ourselves? What about
739 alarms? Why, in fact, are we mucking with the
740 sigmask at all? -- r~ */
741 if (current
->sighand
->action
[SIGINT
- 1].sa
.sa_handler
== SIG_DFL
)
742 mask
|= sigmask(SIGINT
);
743 if (current
->sighand
->action
[SIGQUIT
- 1].sa
.sa_handler
== SIG_DFL
)
744 mask
|= sigmask(SIGQUIT
);
746 siginitsetinv(¤t
->blocked
, mask
);
748 spin_unlock_irqrestore(¤t
->sighand
->siglock
, flags
);
750 result
= do_ncp_rpc_call(server
, size
, reply
, max_reply_size
);
752 spin_lock_irqsave(¤t
->sighand
->siglock
, flags
);
753 current
->blocked
= old_set
;
755 spin_unlock_irqrestore(¤t
->sighand
->siglock
, flags
);
758 ncp_dbg(2, "do_ncp_rpc_call returned %d\n", result
);
763 /* ncp_do_request assures that at least a complete reply header is
764 * received. It assumes that server->current_size contains the ncp
767 int ncp_request2(struct ncp_server
*server
, int function
,
770 struct ncp_request_header
*h
;
771 struct ncp_reply_header
* reply
= rpl
;
774 h
= (struct ncp_request_header
*) (server
->packet
);
775 if (server
->has_subfunction
!= 0) {
776 *(__u16
*) & (h
->data
[0]) = htons(server
->current_size
- sizeof(*h
) - 2);
778 h
->type
= NCP_REQUEST
;
780 * The server shouldn't know or care what task is making a
781 * request, so we always use the same task number.
783 h
->task
= 2; /* (current->pid) & 0xff; */
784 h
->function
= function
;
786 result
= ncp_do_request(server
, server
->current_size
, reply
, size
);
788 ncp_dbg(1, "ncp_request_error: %d\n", result
);
791 server
->completion
= reply
->completion_code
;
792 server
->conn_status
= reply
->connection_state
;
793 server
->reply_size
= result
;
794 server
->ncp_reply_size
= result
- sizeof(struct ncp_reply_header
);
796 result
= reply
->completion_code
;
799 ncp_vdbg("completion code=%x\n", result
);
804 int ncp_connect(struct ncp_server
*server
)
806 struct ncp_request_header
*h
;
809 server
->connection
= 0xFFFF;
810 server
->sequence
= 255;
812 h
= (struct ncp_request_header
*) (server
->packet
);
813 h
->type
= NCP_ALLOC_SLOT_REQUEST
;
814 h
->task
= 2; /* see above */
817 result
= ncp_do_request(server
, sizeof(*h
), server
->packet
, server
->packet_size
);
820 server
->connection
= h
->conn_low
+ (h
->conn_high
* 256);
826 int ncp_disconnect(struct ncp_server
*server
)
828 struct ncp_request_header
*h
;
830 h
= (struct ncp_request_header
*) (server
->packet
);
831 h
->type
= NCP_DEALLOC_SLOT_REQUEST
;
832 h
->task
= 2; /* see above */
835 return ncp_do_request(server
, sizeof(*h
), server
->packet
, server
->packet_size
);
838 void ncp_lock_server(struct ncp_server
*server
)
840 mutex_lock(&server
->mutex
);
842 pr_warn("%s: was locked!\n", __func__
);
846 void ncp_unlock_server(struct ncp_server
*server
)
849 pr_warn("%s: was not locked!\n", __func__
);
853 mutex_unlock(&server
->mutex
);