2 * linux/fs/ncpfs/sock.c
4 * Copyright (C) 1992, 1993 Rick Sladkey
6 * Modified 1995, 1996 by Volker Lendecke to be usable for ncp
7 * Modified 1997 Peter Waltenberg, Bill Hawes, David Woodhouse for 2.1 dcache
11 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
13 #include <linux/time.h>
14 #include <linux/errno.h>
15 #include <linux/socket.h>
16 #include <linux/fcntl.h>
17 #include <linux/stat.h>
18 #include <linux/string.h>
19 #include <linux/sched/signal.h>
20 #include <linux/uaccess.h>
22 #include <linux/net.h>
24 #include <linux/netdevice.h>
25 #include <linux/signal.h>
26 #include <linux/slab.h>
29 #include <linux/ipx.h>
30 #include <linux/poll.h>
31 #include <linux/file.h>
35 #include "ncpsign_kernel.h"
37 static int _recv(struct socket
*sock
, void *buf
, int size
, unsigned flags
)
39 struct msghdr msg
= {NULL
, };
40 struct kvec iov
= {buf
, size
};
41 return kernel_recvmsg(sock
, &msg
, &iov
, 1, size
, flags
);
44 static int _send(struct socket
*sock
, const void *buff
, int len
)
46 struct msghdr msg
= { .msg_flags
= 0 };
47 struct kvec vec
= {.iov_base
= (void *)buff
, .iov_len
= len
};
48 iov_iter_kvec(&msg
.msg_iter
, WRITE
| ITER_KVEC
, &vec
, 1, len
);
49 return sock_sendmsg(sock
, &msg
);
52 struct ncp_request_reply
{
56 unsigned char* reply_buf
;
59 enum { RQ_DONE
, RQ_INPROGRESS
, RQ_QUEUED
, RQ_IDLE
, RQ_ABANDONED
} status
;
61 struct kvec tx_iov
[3];
66 static inline struct ncp_request_reply
* ncp_alloc_req(void)
68 struct ncp_request_reply
*req
;
70 req
= kmalloc(sizeof(struct ncp_request_reply
), GFP_KERNEL
);
74 init_waitqueue_head(&req
->wq
);
75 atomic_set(&req
->refs
, (1));
76 req
->status
= RQ_IDLE
;
81 static void ncp_req_get(struct ncp_request_reply
*req
)
83 atomic_inc(&req
->refs
);
86 static void ncp_req_put(struct ncp_request_reply
*req
)
88 if (atomic_dec_and_test(&req
->refs
))
92 void ncp_tcp_data_ready(struct sock
*sk
)
94 struct ncp_server
*server
= sk
->sk_user_data
;
96 server
->data_ready(sk
);
97 schedule_work(&server
->rcv
.tq
);
100 void ncp_tcp_error_report(struct sock
*sk
)
102 struct ncp_server
*server
= sk
->sk_user_data
;
104 server
->error_report(sk
);
105 schedule_work(&server
->rcv
.tq
);
108 void ncp_tcp_write_space(struct sock
*sk
)
110 struct ncp_server
*server
= sk
->sk_user_data
;
112 /* We do not need any locking: we first set tx.creq, and then we do sendmsg,
114 server
->write_space(sk
);
116 schedule_work(&server
->tx
.tq
);
119 void ncpdgram_timeout_call(unsigned long v
)
121 struct ncp_server
*server
= (void*)v
;
123 schedule_work(&server
->timeout_tq
);
126 static inline void ncp_finish_request(struct ncp_server
*server
, struct ncp_request_reply
*req
, int result
)
128 req
->result
= result
;
129 if (req
->status
!= RQ_ABANDONED
)
130 memcpy(req
->reply_buf
, server
->rxbuf
, req
->datalen
);
131 req
->status
= RQ_DONE
;
132 wake_up_all(&req
->wq
);
136 static void __abort_ncp_connection(struct ncp_server
*server
)
138 struct ncp_request_reply
*req
;
140 ncp_invalidate_conn(server
);
141 del_timer(&server
->timeout_tm
);
142 while (!list_empty(&server
->tx
.requests
)) {
143 req
= list_entry(server
->tx
.requests
.next
, struct ncp_request_reply
, req
);
145 list_del_init(&req
->req
);
146 ncp_finish_request(server
, req
, -EIO
);
148 req
= server
->rcv
.creq
;
150 server
->rcv
.creq
= NULL
;
151 ncp_finish_request(server
, req
, -EIO
);
152 server
->rcv
.ptr
= NULL
;
153 server
->rcv
.state
= 0;
155 req
= server
->tx
.creq
;
157 server
->tx
.creq
= NULL
;
158 ncp_finish_request(server
, req
, -EIO
);
162 static inline int get_conn_number(struct ncp_reply_header
*rp
)
164 return rp
->conn_low
| (rp
->conn_high
<< 8);
167 static inline void __ncp_abort_request(struct ncp_server
*server
, struct ncp_request_reply
*req
, int err
)
169 /* If req is done, we got signal, but we also received answer... */
170 switch (req
->status
) {
175 list_del_init(&req
->req
);
176 ncp_finish_request(server
, req
, err
);
179 req
->status
= RQ_ABANDONED
;
186 static inline void ncp_abort_request(struct ncp_server
*server
, struct ncp_request_reply
*req
, int err
)
188 mutex_lock(&server
->rcv
.creq_mutex
);
189 __ncp_abort_request(server
, req
, err
);
190 mutex_unlock(&server
->rcv
.creq_mutex
);
193 static inline void __ncptcp_abort(struct ncp_server
*server
)
195 __abort_ncp_connection(server
);
198 static int ncpdgram_send(struct socket
*sock
, struct ncp_request_reply
*req
)
200 struct msghdr msg
= { .msg_iter
= req
->from
, .msg_flags
= MSG_DONTWAIT
};
201 return sock_sendmsg(sock
, &msg
);
204 static void __ncptcp_try_send(struct ncp_server
*server
)
206 struct ncp_request_reply
*rq
;
207 struct msghdr msg
= { .msg_flags
= MSG_NOSIGNAL
| MSG_DONTWAIT
};
210 rq
= server
->tx
.creq
;
214 msg
.msg_iter
= rq
->from
;
215 result
= sock_sendmsg(server
->ncp_sock
, &msg
);
217 if (result
== -EAGAIN
)
221 pr_err("tcp: Send failed: %d\n", result
);
222 __ncp_abort_request(server
, rq
, result
);
225 if (!msg_data_left(&msg
)) {
226 server
->rcv
.creq
= rq
;
227 server
->tx
.creq
= NULL
;
230 rq
->from
= msg
.msg_iter
;
233 static inline void ncp_init_header(struct ncp_server
*server
, struct ncp_request_reply
*req
, struct ncp_request_header
*h
)
235 req
->status
= RQ_INPROGRESS
;
236 h
->conn_low
= server
->connection
;
237 h
->conn_high
= server
->connection
>> 8;
238 h
->sequence
= ++server
->sequence
;
241 static void ncpdgram_start_request(struct ncp_server
*server
, struct ncp_request_reply
*req
)
243 size_t signlen
, len
= req
->tx_iov
[1].iov_len
;
244 struct ncp_request_header
*h
= req
->tx_iov
[1].iov_base
;
246 ncp_init_header(server
, req
, h
);
247 signlen
= sign_packet(server
,
248 req
->tx_iov
[1].iov_base
+ sizeof(struct ncp_request_header
) - 1,
249 len
- sizeof(struct ncp_request_header
) + 1,
250 cpu_to_le32(len
), req
->sign
);
252 /* NCP over UDP appends signature */
253 req
->tx_iov
[2].iov_base
= req
->sign
;
254 req
->tx_iov
[2].iov_len
= signlen
;
256 iov_iter_kvec(&req
->from
, WRITE
| ITER_KVEC
,
257 req
->tx_iov
+ 1, signlen
? 2 : 1, len
+ signlen
);
258 server
->rcv
.creq
= req
;
259 server
->timeout_last
= server
->m
.time_out
;
260 server
->timeout_retries
= server
->m
.retry_count
;
261 ncpdgram_send(server
->ncp_sock
, req
);
262 mod_timer(&server
->timeout_tm
, jiffies
+ server
->m
.time_out
);
265 #define NCP_TCP_XMIT_MAGIC (0x446D6454)
266 #define NCP_TCP_XMIT_VERSION (1)
267 #define NCP_TCP_RCVD_MAGIC (0x744E6350)
269 static void ncptcp_start_request(struct ncp_server
*server
, struct ncp_request_reply
*req
)
271 size_t signlen
, len
= req
->tx_iov
[1].iov_len
;
272 struct ncp_request_header
*h
= req
->tx_iov
[1].iov_base
;
274 ncp_init_header(server
, req
, h
);
275 signlen
= sign_packet(server
, req
->tx_iov
[1].iov_base
+ sizeof(struct ncp_request_header
) - 1,
276 len
- sizeof(struct ncp_request_header
) + 1,
277 cpu_to_be32(len
+ 24), req
->sign
+ 4) + 16;
279 req
->sign
[0] = htonl(NCP_TCP_XMIT_MAGIC
);
280 req
->sign
[1] = htonl(len
+ signlen
);
281 req
->sign
[2] = htonl(NCP_TCP_XMIT_VERSION
);
282 req
->sign
[3] = htonl(req
->datalen
+ 8);
283 /* NCP over TCP prepends signature */
284 req
->tx_iov
[0].iov_base
= req
->sign
;
285 req
->tx_iov
[0].iov_len
= signlen
;
286 iov_iter_kvec(&req
->from
, WRITE
| ITER_KVEC
,
287 req
->tx_iov
, 2, len
+ signlen
);
289 server
->tx
.creq
= req
;
290 __ncptcp_try_send(server
);
293 static inline void __ncp_start_request(struct ncp_server
*server
, struct ncp_request_reply
*req
)
295 /* we copy the data so that we do not depend on the caller
297 memcpy(server
->txbuf
, req
->tx_iov
[1].iov_base
, req
->tx_iov
[1].iov_len
);
298 req
->tx_iov
[1].iov_base
= server
->txbuf
;
300 if (server
->ncp_sock
->type
== SOCK_STREAM
)
301 ncptcp_start_request(server
, req
);
303 ncpdgram_start_request(server
, req
);
306 static int ncp_add_request(struct ncp_server
*server
, struct ncp_request_reply
*req
)
308 mutex_lock(&server
->rcv
.creq_mutex
);
309 if (!ncp_conn_valid(server
)) {
310 mutex_unlock(&server
->rcv
.creq_mutex
);
311 pr_err("tcp: Server died\n");
315 if (server
->tx
.creq
|| server
->rcv
.creq
) {
316 req
->status
= RQ_QUEUED
;
317 list_add_tail(&req
->req
, &server
->tx
.requests
);
318 mutex_unlock(&server
->rcv
.creq_mutex
);
321 __ncp_start_request(server
, req
);
322 mutex_unlock(&server
->rcv
.creq_mutex
);
326 static void __ncp_next_request(struct ncp_server
*server
)
328 struct ncp_request_reply
*req
;
330 server
->rcv
.creq
= NULL
;
331 if (list_empty(&server
->tx
.requests
)) {
334 req
= list_entry(server
->tx
.requests
.next
, struct ncp_request_reply
, req
);
335 list_del_init(&req
->req
);
336 __ncp_start_request(server
, req
);
339 static void info_server(struct ncp_server
*server
, unsigned int id
, const void * data
, size_t len
)
341 if (server
->info_sock
) {
342 struct msghdr msg
= { .msg_flags
= MSG_NOSIGNAL
};
343 __be32 hdr
[2] = {cpu_to_be32(len
+ 8), cpu_to_be32(id
)};
344 struct kvec iov
[2] = {
345 {.iov_base
= hdr
, .iov_len
= 8},
346 {.iov_base
= (void *)data
, .iov_len
= len
},
349 iov_iter_kvec(&msg
.msg_iter
, ITER_KVEC
| WRITE
,
352 sock_sendmsg(server
->info_sock
, &msg
);
356 void ncpdgram_rcv_proc(struct work_struct
*work
)
358 struct ncp_server
*server
=
359 container_of(work
, struct ncp_server
, rcv
.tq
);
362 sock
= server
->ncp_sock
;
365 struct ncp_reply_header reply
;
368 result
= _recv(sock
, &reply
, sizeof(reply
), MSG_PEEK
| MSG_DONTWAIT
);
372 if (result
>= sizeof(reply
)) {
373 struct ncp_request_reply
*req
;
375 if (reply
.type
== NCP_WATCHDOG
) {
376 unsigned char buf
[10];
378 if (server
->connection
!= get_conn_number(&reply
)) {
381 result
= _recv(sock
, buf
, sizeof(buf
), MSG_DONTWAIT
);
383 ncp_dbg(1, "recv failed with %d\n", result
);
387 ncp_dbg(1, "too short (%u) watchdog packet\n", result
);
391 ncp_dbg(1, "bad signature (%02X) in watchdog packet\n", buf
[9]);
395 _send(sock
, buf
, sizeof(buf
));
398 if (reply
.type
!= NCP_POSITIVE_ACK
&& reply
.type
!= NCP_REPLY
) {
399 result
= _recv(sock
, server
->unexpected_packet
.data
, sizeof(server
->unexpected_packet
.data
), MSG_DONTWAIT
);
403 info_server(server
, 0, server
->unexpected_packet
.data
, result
);
406 mutex_lock(&server
->rcv
.creq_mutex
);
407 req
= server
->rcv
.creq
;
408 if (req
&& (req
->tx_type
== NCP_ALLOC_SLOT_REQUEST
|| (server
->sequence
== reply
.sequence
&&
409 server
->connection
== get_conn_number(&reply
)))) {
410 if (reply
.type
== NCP_POSITIVE_ACK
) {
411 server
->timeout_retries
= server
->m
.retry_count
;
412 server
->timeout_last
= NCP_MAX_RPC_TIMEOUT
;
413 mod_timer(&server
->timeout_tm
, jiffies
+ NCP_MAX_RPC_TIMEOUT
);
414 } else if (reply
.type
== NCP_REPLY
) {
415 result
= _recv(sock
, server
->rxbuf
, req
->datalen
, MSG_DONTWAIT
);
416 #ifdef CONFIG_NCPFS_PACKET_SIGNING
417 if (result
>= 0 && server
->sign_active
&& req
->tx_type
!= NCP_DEALLOC_SLOT_REQUEST
) {
418 if (result
< 8 + 8) {
424 hdrl
= sock
->sk
->sk_family
== AF_INET
? 8 : 6;
425 if (sign_verify_reply(server
, server
->rxbuf
+ hdrl
, result
- hdrl
, cpu_to_le32(result
), server
->rxbuf
+ result
)) {
426 pr_info("Signature violation\n");
432 del_timer(&server
->timeout_tm
);
433 server
->rcv
.creq
= NULL
;
434 ncp_finish_request(server
, req
, result
);
435 __ncp_next_request(server
);
436 mutex_unlock(&server
->rcv
.creq_mutex
);
440 mutex_unlock(&server
->rcv
.creq_mutex
);
443 _recv(sock
, &reply
, sizeof(reply
), MSG_DONTWAIT
);
447 static void __ncpdgram_timeout_proc(struct ncp_server
*server
)
449 /* If timer is pending, we are processing another request... */
450 if (!timer_pending(&server
->timeout_tm
)) {
451 struct ncp_request_reply
* req
;
453 req
= server
->rcv
.creq
;
457 if (server
->m
.flags
& NCP_MOUNT_SOFT
) {
458 if (server
->timeout_retries
-- == 0) {
459 __ncp_abort_request(server
, req
, -ETIMEDOUT
);
464 ncpdgram_send(server
->ncp_sock
, req
);
465 timeout
= server
->timeout_last
<< 1;
466 if (timeout
> NCP_MAX_RPC_TIMEOUT
) {
467 timeout
= NCP_MAX_RPC_TIMEOUT
;
469 server
->timeout_last
= timeout
;
470 mod_timer(&server
->timeout_tm
, jiffies
+ timeout
);
475 void ncpdgram_timeout_proc(struct work_struct
*work
)
477 struct ncp_server
*server
=
478 container_of(work
, struct ncp_server
, timeout_tq
);
479 mutex_lock(&server
->rcv
.creq_mutex
);
480 __ncpdgram_timeout_proc(server
);
481 mutex_unlock(&server
->rcv
.creq_mutex
);
484 static int do_tcp_rcv(struct ncp_server
*server
, void *buffer
, size_t len
)
489 result
= _recv(server
->ncp_sock
, buffer
, len
, MSG_DONTWAIT
);
491 static unsigned char dummy
[1024];
493 if (len
> sizeof(dummy
)) {
496 result
= _recv(server
->ncp_sock
, dummy
, len
, MSG_DONTWAIT
);
502 pr_err("tcp: bug in recvmsg (%u > %zu)\n", result
, len
);
508 static int __ncptcp_rcv_proc(struct ncp_server
*server
)
510 /* We have to check the result, so store the complete header */
513 struct ncp_request_reply
*req
;
517 while (server
->rcv
.len
) {
518 result
= do_tcp_rcv(server
, server
->rcv
.ptr
, server
->rcv
.len
);
519 if (result
== -EAGAIN
) {
523 req
= server
->rcv
.creq
;
525 __ncp_abort_request(server
, req
, -EIO
);
527 __ncptcp_abort(server
);
530 pr_err("tcp: error in recvmsg: %d\n", result
);
532 ncp_dbg(1, "tcp: EOF\n");
536 if (server
->rcv
.ptr
) {
537 server
->rcv
.ptr
+= result
;
539 server
->rcv
.len
-= result
;
541 switch (server
->rcv
.state
) {
543 if (server
->rcv
.buf
.magic
!= htonl(NCP_TCP_RCVD_MAGIC
)) {
544 pr_err("tcp: Unexpected reply type %08X\n", ntohl(server
->rcv
.buf
.magic
));
545 __ncptcp_abort(server
);
548 datalen
= ntohl(server
->rcv
.buf
.len
) & 0x0FFFFFFF;
550 pr_err("tcp: Unexpected reply len %d\n", datalen
);
551 __ncptcp_abort(server
);
554 #ifdef CONFIG_NCPFS_PACKET_SIGNING
555 if (server
->sign_active
) {
557 pr_err("tcp: Unexpected reply len %d\n", datalen
);
558 __ncptcp_abort(server
);
561 server
->rcv
.buf
.len
= datalen
- 8;
562 server
->rcv
.ptr
= (unsigned char*)&server
->rcv
.buf
.p1
;
564 server
->rcv
.state
= 4;
568 type
= ntohs(server
->rcv
.buf
.type
);
569 #ifdef CONFIG_NCPFS_PACKET_SIGNING
572 if (type
!= NCP_REPLY
) {
573 if (datalen
- 8 <= sizeof(server
->unexpected_packet
.data
)) {
574 *(__u16
*)(server
->unexpected_packet
.data
) = htons(type
);
575 server
->unexpected_packet
.len
= datalen
- 8;
577 server
->rcv
.state
= 5;
578 server
->rcv
.ptr
= server
->unexpected_packet
.data
+ 2;
579 server
->rcv
.len
= datalen
- 10;
582 ncp_dbg(1, "tcp: Unexpected NCP type %02X\n", type
);
584 server
->rcv
.state
= 2;
586 server
->rcv
.ptr
= NULL
;
587 server
->rcv
.len
= datalen
- 10;
590 req
= server
->rcv
.creq
;
592 ncp_dbg(1, "Reply without appropriate request\n");
595 if (datalen
> req
->datalen
+ 8) {
596 pr_err("tcp: Unexpected reply len %d (expected at most %zd)\n", datalen
, req
->datalen
+ 8);
597 server
->rcv
.state
= 3;
600 req
->datalen
= datalen
- 8;
601 ((struct ncp_reply_header
*)server
->rxbuf
)->type
= NCP_REPLY
;
602 server
->rcv
.ptr
= server
->rxbuf
+ 2;
603 server
->rcv
.len
= datalen
- 10;
604 server
->rcv
.state
= 1;
606 #ifdef CONFIG_NCPFS_PACKET_SIGNING
608 datalen
= server
->rcv
.buf
.len
;
609 type
= ntohs(server
->rcv
.buf
.type2
);
613 req
= server
->rcv
.creq
;
614 if (req
->tx_type
!= NCP_ALLOC_SLOT_REQUEST
) {
615 if (((struct ncp_reply_header
*)server
->rxbuf
)->sequence
!= server
->sequence
) {
616 pr_err("tcp: Bad sequence number\n");
617 __ncp_abort_request(server
, req
, -EIO
);
620 if ((((struct ncp_reply_header
*)server
->rxbuf
)->conn_low
| (((struct ncp_reply_header
*)server
->rxbuf
)->conn_high
<< 8)) != server
->connection
) {
621 pr_err("tcp: Connection number mismatch\n");
622 __ncp_abort_request(server
, req
, -EIO
);
626 #ifdef CONFIG_NCPFS_PACKET_SIGNING
627 if (server
->sign_active
&& req
->tx_type
!= NCP_DEALLOC_SLOT_REQUEST
) {
628 if (sign_verify_reply(server
, server
->rxbuf
+ 6, req
->datalen
- 6, cpu_to_be32(req
->datalen
+ 16), &server
->rcv
.buf
.type
)) {
629 pr_err("tcp: Signature violation\n");
630 __ncp_abort_request(server
, req
, -EIO
);
635 ncp_finish_request(server
, req
, req
->datalen
);
637 __ncp_next_request(server
);
640 server
->rcv
.ptr
= (unsigned char*)&server
->rcv
.buf
;
641 server
->rcv
.len
= 10;
642 server
->rcv
.state
= 0;
645 ncp_finish_request(server
, server
->rcv
.creq
, -EIO
);
648 info_server(server
, 0, server
->unexpected_packet
.data
, server
->unexpected_packet
.len
);
654 void ncp_tcp_rcv_proc(struct work_struct
*work
)
656 struct ncp_server
*server
=
657 container_of(work
, struct ncp_server
, rcv
.tq
);
659 mutex_lock(&server
->rcv
.creq_mutex
);
660 __ncptcp_rcv_proc(server
);
661 mutex_unlock(&server
->rcv
.creq_mutex
);
664 void ncp_tcp_tx_proc(struct work_struct
*work
)
666 struct ncp_server
*server
=
667 container_of(work
, struct ncp_server
, tx
.tq
);
669 mutex_lock(&server
->rcv
.creq_mutex
);
670 __ncptcp_try_send(server
);
671 mutex_unlock(&server
->rcv
.creq_mutex
);
674 static int do_ncp_rpc_call(struct ncp_server
*server
, int size
,
675 unsigned char* reply_buf
, int max_reply_size
)
678 struct ncp_request_reply
*req
;
680 req
= ncp_alloc_req();
684 req
->reply_buf
= reply_buf
;
685 req
->datalen
= max_reply_size
;
686 req
->tx_iov
[1].iov_base
= server
->packet
;
687 req
->tx_iov
[1].iov_len
= size
;
688 req
->tx_type
= *(u_int16_t
*)server
->packet
;
690 result
= ncp_add_request(server
, req
);
694 if (wait_event_interruptible(req
->wq
, req
->status
== RQ_DONE
)) {
695 ncp_abort_request(server
, req
, -EINTR
);
700 result
= req
->result
;
709 * We need the server to be locked here, so check!
712 static int ncp_do_request(struct ncp_server
*server
, int size
,
713 void* reply
, int max_reply_size
)
717 if (server
->lock
== 0) {
718 pr_err("Server not locked!\n");
721 if (!ncp_conn_valid(server
)) {
726 unsigned long mask
, flags
;
728 spin_lock_irqsave(¤t
->sighand
->siglock
, flags
);
729 old_set
= current
->blocked
;
730 if (current
->flags
& PF_EXITING
)
733 mask
= sigmask(SIGKILL
);
734 if (server
->m
.flags
& NCP_MOUNT_INTR
) {
735 /* FIXME: This doesn't seem right at all. So, like,
736 we can't handle SIGINT and get whatever to stop?
737 What if we've blocked it ourselves? What about
738 alarms? Why, in fact, are we mucking with the
739 sigmask at all? -- r~ */
740 if (current
->sighand
->action
[SIGINT
- 1].sa
.sa_handler
== SIG_DFL
)
741 mask
|= sigmask(SIGINT
);
742 if (current
->sighand
->action
[SIGQUIT
- 1].sa
.sa_handler
== SIG_DFL
)
743 mask
|= sigmask(SIGQUIT
);
745 siginitsetinv(¤t
->blocked
, mask
);
747 spin_unlock_irqrestore(¤t
->sighand
->siglock
, flags
);
749 result
= do_ncp_rpc_call(server
, size
, reply
, max_reply_size
);
751 spin_lock_irqsave(¤t
->sighand
->siglock
, flags
);
752 current
->blocked
= old_set
;
754 spin_unlock_irqrestore(¤t
->sighand
->siglock
, flags
);
757 ncp_dbg(2, "do_ncp_rpc_call returned %d\n", result
);
762 /* ncp_do_request assures that at least a complete reply header is
763 * received. It assumes that server->current_size contains the ncp
766 int ncp_request2(struct ncp_server
*server
, int function
,
769 struct ncp_request_header
*h
;
770 struct ncp_reply_header
* reply
= rpl
;
773 h
= (struct ncp_request_header
*) (server
->packet
);
774 if (server
->has_subfunction
!= 0) {
775 *(__u16
*) & (h
->data
[0]) = htons(server
->current_size
- sizeof(*h
) - 2);
777 h
->type
= NCP_REQUEST
;
779 * The server shouldn't know or care what task is making a
780 * request, so we always use the same task number.
782 h
->task
= 2; /* (current->pid) & 0xff; */
783 h
->function
= function
;
785 result
= ncp_do_request(server
, server
->current_size
, reply
, size
);
787 ncp_dbg(1, "ncp_request_error: %d\n", result
);
790 server
->completion
= reply
->completion_code
;
791 server
->conn_status
= reply
->connection_state
;
792 server
->reply_size
= result
;
793 server
->ncp_reply_size
= result
- sizeof(struct ncp_reply_header
);
795 result
= reply
->completion_code
;
798 ncp_vdbg("completion code=%x\n", result
);
803 int ncp_connect(struct ncp_server
*server
)
805 struct ncp_request_header
*h
;
808 server
->connection
= 0xFFFF;
809 server
->sequence
= 255;
811 h
= (struct ncp_request_header
*) (server
->packet
);
812 h
->type
= NCP_ALLOC_SLOT_REQUEST
;
813 h
->task
= 2; /* see above */
816 result
= ncp_do_request(server
, sizeof(*h
), server
->packet
, server
->packet_size
);
819 server
->connection
= h
->conn_low
+ (h
->conn_high
* 256);
825 int ncp_disconnect(struct ncp_server
*server
)
827 struct ncp_request_header
*h
;
829 h
= (struct ncp_request_header
*) (server
->packet
);
830 h
->type
= NCP_DEALLOC_SLOT_REQUEST
;
831 h
->task
= 2; /* see above */
834 return ncp_do_request(server
, sizeof(*h
), server
->packet
, server
->packet_size
);
837 void ncp_lock_server(struct ncp_server
*server
)
839 mutex_lock(&server
->mutex
);
841 pr_warn("%s: was locked!\n", __func__
);
845 void ncp_unlock_server(struct ncp_server
*server
)
848 pr_warn("%s: was not locked!\n", __func__
);
852 mutex_unlock(&server
->mutex
);