6 * Copyright (C) 2004 by Eric Van Hensbergen <ericvh@gmail.com>
7 * Copyright (C) 2004-2005 by Latchesar Ionkov <lucho@ionkov.net>
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; either version 2 of the License, or
12 * (at your option) any later version.
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to:
21 * Free Software Foundation
22 * 51 Franklin Street, Fifth Floor
23 * Boston, MA 02111-1301 USA
27 #include <linux/config.h>
28 #include <linux/module.h>
29 #include <linux/errno.h>
31 #include <linux/poll.h>
32 #include <linux/kthread.h>
33 #include <linux/idr.h>
39 #include "transport.h"
43 #define SCHED_TIMEOUT 10
44 #define MAXPOLLWADDR 2
47 Rworksched
= 1, /* read work scheduled or running */
48 Rpending
= 2, /* can read */
49 Wworksched
= 4, /* write work scheduled or running */
50 Wpending
= 8, /* can write */
53 struct v9fs_mux_poll_task
;
57 struct v9fs_fcall
*tcall
;
58 struct v9fs_fcall
*rcall
;
60 v9fs_mux_req_callback cb
;
62 struct list_head req_list
;
65 struct v9fs_mux_data
{
67 struct list_head mux_list
;
68 struct v9fs_mux_poll_task
*poll_task
;
70 unsigned char *extended
;
71 struct v9fs_transport
*trans
;
72 struct v9fs_idpool tidpool
;
74 wait_queue_head_t equeue
;
75 struct list_head req_list
;
76 struct list_head unsent_req_list
;
77 struct v9fs_fcall
*rcall
;
83 wait_queue_t poll_wait
[MAXPOLLWADDR
];
84 wait_queue_head_t
*poll_waddr
[MAXPOLLWADDR
];
86 struct work_struct rq
;
87 struct work_struct wq
;
91 struct v9fs_mux_poll_task
{
92 struct task_struct
*task
;
93 struct list_head mux_list
;
98 struct v9fs_mux_data
*m
;
101 struct v9fs_fcall
*rcall
;
102 wait_queue_head_t wqueue
;
105 static int v9fs_poll_proc(void *);
106 static void v9fs_read_work(void *);
107 static void v9fs_write_work(void *);
108 static void v9fs_pollwait(struct file
*filp
, wait_queue_head_t
* wait_address
,
110 static u16
v9fs_mux_get_tag(struct v9fs_mux_data
*);
111 static void v9fs_mux_put_tag(struct v9fs_mux_data
*, u16
);
113 static DECLARE_MUTEX(v9fs_mux_task_lock
);
114 static struct workqueue_struct
*v9fs_mux_wq
;
116 static int v9fs_mux_num
;
117 static int v9fs_mux_poll_task_num
;
118 static struct v9fs_mux_poll_task v9fs_mux_poll_tasks
[100];
120 int v9fs_mux_global_init(void)
124 for (i
= 0; i
< ARRAY_SIZE(v9fs_mux_poll_tasks
); i
++)
125 v9fs_mux_poll_tasks
[i
].task
= NULL
;
127 v9fs_mux_wq
= create_workqueue("v9fs");
134 void v9fs_mux_global_exit(void)
136 destroy_workqueue(v9fs_mux_wq
);
140 * v9fs_mux_calc_poll_procs - calculates the number of polling procs
141 * based on the number of mounted v9fs filesystems.
143 * The current implementation returns sqrt of the number of mounts.
145 inline int v9fs_mux_calc_poll_procs(int muxnum
)
149 if (v9fs_mux_poll_task_num
)
150 n
= muxnum
/ v9fs_mux_poll_task_num
+
151 (muxnum
% v9fs_mux_poll_task_num
? 1 : 0);
155 if (n
> ARRAY_SIZE(v9fs_mux_poll_tasks
))
156 n
= ARRAY_SIZE(v9fs_mux_poll_tasks
);
161 static int v9fs_mux_poll_start(struct v9fs_mux_data
*m
)
164 struct v9fs_mux_poll_task
*vpt
, *vptlast
;
165 struct task_struct
*pproc
;
167 dprintk(DEBUG_MUX
, "mux %p muxnum %d procnum %d\n", m
, v9fs_mux_num
,
168 v9fs_mux_poll_task_num
);
169 up(&v9fs_mux_task_lock
);
171 n
= v9fs_mux_calc_poll_procs(v9fs_mux_num
+ 1);
172 if (n
> v9fs_mux_poll_task_num
) {
173 for (i
= 0; i
< ARRAY_SIZE(v9fs_mux_poll_tasks
); i
++) {
174 if (v9fs_mux_poll_tasks
[i
].task
== NULL
) {
175 vpt
= &v9fs_mux_poll_tasks
[i
];
176 dprintk(DEBUG_MUX
, "create proc %p\n", vpt
);
177 pproc
= kthread_create(v9fs_poll_proc
, vpt
,
180 if (!IS_ERR(pproc
)) {
182 INIT_LIST_HEAD(&vpt
->mux_list
);
184 v9fs_mux_poll_task_num
++;
185 wake_up_process(vpt
->task
);
191 if (i
>= ARRAY_SIZE(v9fs_mux_poll_tasks
))
192 dprintk(DEBUG_ERROR
, "warning: no free poll slots\n");
195 n
= (v9fs_mux_num
+ 1) / v9fs_mux_poll_task_num
+
196 ((v9fs_mux_num
+ 1) % v9fs_mux_poll_task_num
? 1 : 0);
199 for (i
= 0; i
< ARRAY_SIZE(v9fs_mux_poll_tasks
); i
++) {
200 vpt
= &v9fs_mux_poll_tasks
[i
];
201 if (vpt
->task
!= NULL
) {
203 if (vpt
->muxnum
< n
) {
204 dprintk(DEBUG_MUX
, "put in proc %d\n", i
);
205 list_add(&m
->mux_list
, &vpt
->mux_list
);
208 memset(&m
->poll_waddr
, 0, sizeof(m
->poll_waddr
));
209 init_poll_funcptr(&m
->pt
, v9fs_pollwait
);
215 if (i
>= ARRAY_SIZE(v9fs_mux_poll_tasks
)) {
219 dprintk(DEBUG_MUX
, "put in proc %d\n", i
);
220 list_add(&m
->mux_list
, &vptlast
->mux_list
);
222 m
->poll_task
= vptlast
;
223 memset(&m
->poll_waddr
, 0, sizeof(m
->poll_waddr
));
224 init_poll_funcptr(&m
->pt
, v9fs_pollwait
);
228 down(&v9fs_mux_task_lock
);
233 static void v9fs_mux_poll_stop(struct v9fs_mux_data
*m
)
236 struct v9fs_mux_poll_task
*vpt
;
238 up(&v9fs_mux_task_lock
);
240 list_del(&m
->mux_list
);
241 for(i
= 0; i
< ARRAY_SIZE(m
->poll_waddr
); i
++) {
242 if (m
->poll_waddr
[i
] != NULL
) {
243 remove_wait_queue(m
->poll_waddr
[i
], &m
->poll_wait
[i
]);
244 m
->poll_waddr
[i
] = NULL
;
249 dprintk(DEBUG_MUX
, "destroy proc %p\n", vpt
);
250 send_sig(SIGKILL
, vpt
->task
, 1);
252 v9fs_mux_poll_task_num
--;
255 down(&v9fs_mux_task_lock
);
259 * v9fs_mux_init - allocate and initialize the per-session mux data
260 * Creates the polling task if this is the first session.
262 * @trans - transport structure
263 * @msize - maximum message size
264 * @extended - pointer to the extended flag
266 struct v9fs_mux_data
*v9fs_mux_init(struct v9fs_transport
*trans
, int msize
,
267 unsigned char *extended
)
270 struct v9fs_mux_data
*m
, *mtmp
;
272 dprintk(DEBUG_MUX
, "transport %p msize %d\n", trans
, msize
);
273 m
= kmalloc(sizeof(struct v9fs_mux_data
), GFP_KERNEL
);
275 return ERR_PTR(-ENOMEM
);
277 spin_lock_init(&m
->lock
);
278 INIT_LIST_HEAD(&m
->mux_list
);
280 m
->extended
= extended
;
282 idr_init(&m
->tidpool
.pool
);
283 init_MUTEX(&m
->tidpool
.lock
);
285 init_waitqueue_head(&m
->equeue
);
286 INIT_LIST_HEAD(&m
->req_list
);
287 INIT_LIST_HEAD(&m
->unsent_req_list
);
291 m
->wpos
= m
->wsize
= 0;
293 INIT_WORK(&m
->rq
, v9fs_read_work
, m
);
294 INIT_WORK(&m
->wq
, v9fs_write_work
, m
);
296 memset(&m
->poll_waddr
, 0, sizeof(m
->poll_waddr
));
298 n
= v9fs_mux_poll_start(m
);
302 n
= trans
->poll(trans
, &m
->pt
);
304 dprintk(DEBUG_MUX
, "mux %p can read\n", m
);
305 set_bit(Rpending
, &m
->wsched
);
309 dprintk(DEBUG_MUX
, "mux %p can write\n", m
);
310 set_bit(Wpending
, &m
->wsched
);
313 for(i
= 0; i
< ARRAY_SIZE(m
->poll_waddr
); i
++) {
314 if (IS_ERR(m
->poll_waddr
[i
])) {
315 v9fs_mux_poll_stop(m
);
316 mtmp
= (void *)m
->poll_waddr
; /* the error code */
327 * v9fs_mux_destroy - cancels all pending requests and frees mux resources
329 void v9fs_mux_destroy(struct v9fs_mux_data
*m
)
331 dprintk(DEBUG_MUX
, "mux %p prev %p next %p\n", m
,
332 m
->mux_list
.prev
, m
->mux_list
.next
);
333 v9fs_mux_cancel(m
, -ECONNRESET
);
335 if (!list_empty(&m
->req_list
)) {
336 /* wait until all processes waiting on this session exit */
337 dprintk(DEBUG_MUX
, "mux %p waiting for empty request queue\n",
339 wait_event_timeout(m
->equeue
, (list_empty(&m
->req_list
)), 5000);
340 dprintk(DEBUG_MUX
, "mux %p request queue empty: %d\n", m
,
341 list_empty(&m
->req_list
));
344 v9fs_mux_poll_stop(m
);
351 * v9fs_pollwait - called by files poll operation to add v9fs-poll task
352 * to files wait queue
355 v9fs_pollwait(struct file
*filp
, wait_queue_head_t
* wait_address
,
359 struct v9fs_mux_data
*m
;
361 m
= container_of(p
, struct v9fs_mux_data
, pt
);
362 for(i
= 0; i
< ARRAY_SIZE(m
->poll_waddr
); i
++)
363 if (m
->poll_waddr
[i
] == NULL
)
366 if (i
>= ARRAY_SIZE(m
->poll_waddr
)) {
367 dprintk(DEBUG_ERROR
, "not enough wait_address slots\n");
371 m
->poll_waddr
[i
] = wait_address
;
374 dprintk(DEBUG_ERROR
, "no wait_address\n");
375 m
->poll_waddr
[i
] = ERR_PTR(-EIO
);
379 init_waitqueue_entry(&m
->poll_wait
[i
], m
->poll_task
->task
);
380 add_wait_queue(wait_address
, &m
->poll_wait
[i
]);
384 * v9fs_poll_mux - polls a mux and schedules read or write works if necessary
386 static inline void v9fs_poll_mux(struct v9fs_mux_data
*m
)
393 n
= m
->trans
->poll(m
->trans
, NULL
);
394 if (n
< 0 || n
& (POLLERR
| POLLHUP
| POLLNVAL
)) {
395 dprintk(DEBUG_MUX
, "error mux %p err %d\n", m
, n
);
398 v9fs_mux_cancel(m
, n
);
402 set_bit(Rpending
, &m
->wsched
);
403 dprintk(DEBUG_MUX
, "mux %p can read\n", m
);
404 if (!test_and_set_bit(Rworksched
, &m
->wsched
)) {
405 dprintk(DEBUG_MUX
, "schedule read work mux %p\n", m
);
406 queue_work(v9fs_mux_wq
, &m
->rq
);
411 set_bit(Wpending
, &m
->wsched
);
412 dprintk(DEBUG_MUX
, "mux %p can write\n", m
);
413 if ((m
->wsize
|| !list_empty(&m
->unsent_req_list
))
414 && !test_and_set_bit(Wworksched
, &m
->wsched
)) {
415 dprintk(DEBUG_MUX
, "schedule write work mux %p\n", m
);
416 queue_work(v9fs_mux_wq
, &m
->wq
);
422 * v9fs_poll_proc - polls all v9fs transports for new events and queues
423 * the appropriate work to the work queue
425 static int v9fs_poll_proc(void *a
)
427 struct v9fs_mux_data
*m
, *mtmp
;
428 struct v9fs_mux_poll_task
*vpt
;
431 dprintk(DEBUG_MUX
, "start %p %p\n", current
, vpt
);
432 allow_signal(SIGKILL
);
433 while (!kthread_should_stop()) {
434 set_current_state(TASK_INTERRUPTIBLE
);
435 if (signal_pending(current
))
438 list_for_each_entry_safe(m
, mtmp
, &vpt
->mux_list
, mux_list
) {
442 dprintk(DEBUG_MUX
, "sleeping...\n");
443 schedule_timeout(SCHED_TIMEOUT
* HZ
);
446 __set_current_state(TASK_RUNNING
);
447 dprintk(DEBUG_MUX
, "finish\n");
452 * v9fs_write_work - called when a transport can send some data
454 static void v9fs_write_work(void *a
)
457 struct v9fs_mux_data
*m
;
458 struct v9fs_req
*req
;
463 clear_bit(Wworksched
, &m
->wsched
);
468 if (list_empty(&m
->unsent_req_list
)) {
469 clear_bit(Wworksched
, &m
->wsched
);
475 req
= list_entry(m
->unsent_req_list
.next
, struct v9fs_req
,
477 list_move_tail(&req
->req_list
, &m
->req_list
);
478 if (req
->err
== ERREQFLUSH
)
481 m
->wbuf
= req
->tcall
->sdata
;
482 m
->wsize
= req
->tcall
->size
;
484 dump_data(m
->wbuf
, m
->wsize
);
485 spin_unlock(&m
->lock
);
488 dprintk(DEBUG_MUX
, "mux %p pos %d size %d\n", m
, m
->wpos
, m
->wsize
);
489 clear_bit(Wpending
, &m
->wsched
);
490 err
= m
->trans
->write(m
->trans
, m
->wbuf
+ m
->wpos
, m
->wsize
- m
->wpos
);
491 dprintk(DEBUG_MUX
, "mux %p sent %d bytes\n", m
, err
);
492 if (err
== -EAGAIN
) {
493 clear_bit(Wworksched
, &m
->wsched
);
501 if (m
->wpos
== m
->wsize
)
502 m
->wpos
= m
->wsize
= 0;
504 if (m
->wsize
== 0 && !list_empty(&m
->unsent_req_list
)) {
505 if (test_and_clear_bit(Wpending
, &m
->wsched
))
508 n
= m
->trans
->poll(m
->trans
, NULL
);
511 dprintk(DEBUG_MUX
, "schedule write work mux %p\n", m
);
512 queue_work(v9fs_mux_wq
, &m
->wq
);
514 clear_bit(Wworksched
, &m
->wsched
);
516 clear_bit(Wworksched
, &m
->wsched
);
521 v9fs_mux_cancel(m
, err
);
522 clear_bit(Wworksched
, &m
->wsched
);
525 static void process_request(struct v9fs_mux_data
*m
, struct v9fs_req
*req
)
528 struct v9fs_str
*ename
;
531 if (!req
->err
&& req
->rcall
->id
== RERROR
) {
532 ecode
= req
->rcall
->params
.rerror
.errno
;
533 ename
= &req
->rcall
->params
.rerror
.error
;
535 dprintk(DEBUG_MUX
, "Rerror %.*s\n", ename
->len
, ename
->str
);
541 req
->err
= v9fs_errstr2errno(ename
->str
, ename
->len
);
543 if (!req
->err
) { /* string match failed */
544 PRINT_FCALL_ERROR("unknown error", req
->rcall
);
548 req
->err
= -ESERVERFAULT
;
550 } else if (req
->tcall
&& req
->rcall
->id
!= req
->tcall
->id
+ 1) {
551 dprintk(DEBUG_ERROR
, "fcall mismatch: expected %d, got %d\n",
552 req
->tcall
->id
+ 1, req
->rcall
->id
);
557 if (req
->err
== ERREQFLUSH
)
561 dprintk(DEBUG_MUX
, "calling callback tcall %p rcall %p\n",
562 req
->tcall
, req
->rcall
);
564 (*req
->cb
) (req
->cba
, req
->tcall
, req
->rcall
, req
->err
);
569 v9fs_mux_put_tag(m
, tag
);
576 * v9fs_read_work - called when there is some data to be read from a transport
578 static void v9fs_read_work(void *a
)
581 struct v9fs_mux_data
*m
;
582 struct v9fs_req
*req
, *rptr
, *rreq
;
583 struct v9fs_fcall
*rcall
;
592 dprintk(DEBUG_MUX
, "start mux %p pos %d\n", m
, m
->rpos
);
596 kmalloc(sizeof(struct v9fs_fcall
) + m
->msize
, GFP_KERNEL
);
602 m
->rbuf
= (char *)m
->rcall
+ sizeof(struct v9fs_fcall
);
606 clear_bit(Rpending
, &m
->wsched
);
607 err
= m
->trans
->read(m
->trans
, m
->rbuf
+ m
->rpos
, m
->msize
- m
->rpos
);
608 dprintk(DEBUG_MUX
, "mux %p got %d bytes\n", m
, err
);
609 if (err
== -EAGAIN
) {
610 clear_bit(Rworksched
, &m
->wsched
);
618 while (m
->rpos
> 4) {
619 n
= le32_to_cpu(*(__le32
*) m
->rbuf
);
622 "requested packet size too big: %d\n", n
);
630 dump_data(m
->rbuf
, n
);
632 v9fs_deserialize_fcall(m
->rbuf
, n
, m
->rcall
, *m
->extended
);
640 m
->rcall
= kmalloc(sizeof(struct v9fs_fcall
) + m
->msize
,
647 m
->rbuf
= (char *)m
->rcall
+ sizeof(struct v9fs_fcall
);
648 memmove(m
->rbuf
, rbuf
+ n
, m
->rpos
- n
);
656 dprintk(DEBUG_MUX
, "mux %p fcall id %d tag %d\n", m
, rcall
->id
,
661 list_for_each_entry_safe(rreq
, rptr
, &m
->req_list
, req_list
) {
662 if (rreq
->tag
== rcall
->tag
) {
665 list_del(&req
->req_list
);
666 spin_unlock(&m
->lock
);
667 process_request(m
, req
);
674 spin_unlock(&m
->lock
);
675 if (err
>= 0 && rcall
->id
!= RFLUSH
)
677 "unexpected response mux %p id %d tag %d\n",
678 m
, rcall
->id
, rcall
->tag
);
683 if (!list_empty(&m
->req_list
)) {
684 if (test_and_clear_bit(Rpending
, &m
->wsched
))
687 n
= m
->trans
->poll(m
->trans
, NULL
);
690 dprintk(DEBUG_MUX
, "schedule read work mux %p\n", m
);
691 queue_work(v9fs_mux_wq
, &m
->rq
);
693 clear_bit(Rworksched
, &m
->wsched
);
695 clear_bit(Rworksched
, &m
->wsched
);
700 v9fs_mux_cancel(m
, err
);
701 clear_bit(Rworksched
, &m
->wsched
);
705 * v9fs_send_request - send 9P request
706 * The function can sleep until the request is scheduled for sending.
707 * The function can be interrupted. Return from the function is not
708 * a guarantee that the request is sent succesfully. Can return errors
709 * that can be retrieved by PTR_ERR macros.
712 * @tc: request to be sent
713 * @cb: callback function to call when response is received
714 * @cba: parameter to pass to the callback function
716 static struct v9fs_req
*v9fs_send_request(struct v9fs_mux_data
*m
,
717 struct v9fs_fcall
*tc
,
718 v9fs_mux_req_callback cb
, void *cba
)
721 struct v9fs_req
*req
;
723 dprintk(DEBUG_MUX
, "mux %p task %p tcall %p id %d\n", m
, current
,
726 return ERR_PTR(m
->err
);
728 req
= kmalloc(sizeof(struct v9fs_req
), GFP_KERNEL
);
730 return ERR_PTR(-ENOMEM
);
732 if (tc
->id
== TVERSION
)
735 n
= v9fs_mux_get_tag(m
);
738 return ERR_PTR(-ENOMEM
);
750 list_add_tail(&req
->req_list
, &m
->unsent_req_list
);
751 spin_unlock(&m
->lock
);
753 if (test_and_clear_bit(Wpending
, &m
->wsched
))
756 n
= m
->trans
->poll(m
->trans
, NULL
);
758 if (n
& POLLOUT
&& !test_and_set_bit(Wworksched
, &m
->wsched
))
759 queue_work(v9fs_mux_wq
, &m
->wq
);
765 v9fs_mux_flush_cb(void *a
, struct v9fs_fcall
*tc
, struct v9fs_fcall
*rc
,
768 v9fs_mux_req_callback cb
;
770 struct v9fs_mux_data
*m
;
771 struct v9fs_req
*req
, *rptr
;
774 dprintk(DEBUG_MUX
, "mux %p tc %p rc %p err %d oldtag %d\n", m
, tc
,
775 rc
, err
, tc
->params
.tflush
.oldtag
);
779 tag
= tc
->params
.tflush
.oldtag
;
780 list_for_each_entry_safe(req
, rptr
, &m
->req_list
, req_list
) {
781 if (req
->tag
== tag
) {
782 list_del(&req
->req_list
);
786 spin_unlock(&m
->lock
);
787 (*cb
) (req
->cba
, req
->tcall
, req
->rcall
,
797 spin_unlock(&m
->lock
);
799 v9fs_mux_put_tag(m
, tag
);
805 v9fs_mux_flush_request(struct v9fs_mux_data
*m
, struct v9fs_req
*req
)
807 struct v9fs_fcall
*fc
;
809 dprintk(DEBUG_MUX
, "mux %p req %p tag %d\n", m
, req
, req
->tag
);
811 fc
= v9fs_create_tflush(req
->tag
);
812 v9fs_send_request(m
, fc
, v9fs_mux_flush_cb
, m
);
816 v9fs_mux_rpc_cb(void *a
, struct v9fs_fcall
*tc
, struct v9fs_fcall
*rc
, int err
)
818 struct v9fs_mux_rpc
*r
;
820 if (err
== ERREQFLUSH
) {
822 dprintk(DEBUG_MUX
, "err req flush\n");
827 dprintk(DEBUG_MUX
, "mux %p req %p tc %p rc %p err %d\n", r
->m
, r
->req
,
835 * v9fs_mux_rpc - sends 9P request and waits until a response is available.
836 * The function can be interrupted.
838 * @tc: request to be sent
839 * @rc: pointer where a pointer to the response is stored
842 v9fs_mux_rpc(struct v9fs_mux_data
*m
, struct v9fs_fcall
*tc
,
843 struct v9fs_fcall
**rc
)
847 struct v9fs_req
*req
;
848 struct v9fs_mux_rpc r
;
853 init_waitqueue_head(&r
.wqueue
);
858 req
= v9fs_send_request(m
, tc
, v9fs_mux_rpc_cb
, &r
);
861 dprintk(DEBUG_MUX
, "error %d\n", err
);
866 dprintk(DEBUG_MUX
, "mux %p tc %p tag %d rpc %p req %p\n", m
, tc
,
868 err
= wait_event_interruptible(r
.wqueue
, r
.rcall
!= NULL
|| r
.err
< 0);
872 if (err
== -ERESTARTSYS
&& m
->trans
->status
== Connected
&& m
->err
== 0) {
875 req
->err
= ERREQFLUSH
;
876 spin_unlock(&m
->lock
);
878 clear_thread_flag(TIF_SIGPENDING
);
879 v9fs_mux_flush_request(m
, req
);
880 spin_lock_irqsave(¤t
->sighand
->siglock
, flags
);
882 spin_unlock_irqrestore(¤t
->sighand
->siglock
, flags
);
887 dprintk(DEBUG_MUX
, "got response id %d tag %d\n",
888 r
.rcall
->id
, r
.rcall
->tag
);
896 dprintk(DEBUG_MUX
, "got error %d\n", err
);
905 * v9fs_mux_rpcnb - sends 9P request without waiting for response.
907 * @tc: request to be sent
908 * @cb: callback function to be called when response arrives
909 * @cba: value to pass to the callback function
911 int v9fs_mux_rpcnb(struct v9fs_mux_data
*m
, struct v9fs_fcall
*tc
,
912 v9fs_mux_req_callback cb
, void *a
)
915 struct v9fs_req
*req
;
917 req
= v9fs_send_request(m
, tc
, cb
, a
);
920 dprintk(DEBUG_MUX
, "error %d\n", err
);
924 dprintk(DEBUG_MUX
, "mux %p tc %p tag %d\n", m
, tc
, req
->tag
);
929 * v9fs_mux_cancel - cancel all pending requests with error
933 void v9fs_mux_cancel(struct v9fs_mux_data
*m
, int err
)
935 struct v9fs_req
*req
, *rtmp
;
936 LIST_HEAD(cancel_list
);
938 dprintk(DEBUG_MUX
, "mux %p err %d\n", m
, err
);
941 list_for_each_entry_safe(req
, rtmp
, &m
->req_list
, req_list
) {
942 list_move(&req
->req_list
, &cancel_list
);
944 spin_unlock(&m
->lock
);
946 list_for_each_entry_safe(req
, rtmp
, &cancel_list
, req_list
) {
947 list_del(&req
->req_list
);
952 (*req
->cb
) (req
->cba
, req
->tcall
, req
->rcall
, req
->err
);
962 static u16
v9fs_mux_get_tag(struct v9fs_mux_data
*m
)
966 tag
= v9fs_get_idpool(&m
->tidpool
);
973 static void v9fs_mux_put_tag(struct v9fs_mux_data
*m
, u16 tag
)
975 if (tag
!= V9FS_NOTAG
&& v9fs_check_idpool(tag
, &m
->tidpool
))
976 v9fs_put_idpool(tag
, &m
->tidpool
);