6 * Copyright (C) 2004 by Eric Van Hensbergen <ericvh@gmail.com>
7 * Copyright (C) 2004-2005 by Latchesar Ionkov <lucho@ionkov.net>
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2
11 * as published by the Free Software Foundation.
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to:
20 * Free Software Foundation
21 * 51 Franklin Street, Fifth Floor
22 * Boston, MA 02111-1301 USA
26 #include <linux/module.h>
27 #include <linux/errno.h>
29 #include <linux/poll.h>
30 #include <linux/kthread.h>
31 #include <linux/idr.h>
32 #include <linux/mutex.h>
33 #include <net/9p/9p.h>
34 #include <net/9p/transport.h>
35 #include <net/9p/conn.h>
38 #define SCHED_TIMEOUT 10
39 #define MAXPOLLWADDR 2
42 Rworksched
= 1, /* read work scheduled or running */
43 Rpending
= 2, /* can read */
44 Wworksched
= 4, /* write work scheduled or running */
45 Wpending
= 8, /* can write */
54 struct p9_mux_poll_task
;
57 spinlock_t lock
; /* protect request structure */
59 struct p9_fcall
*tcall
;
60 struct p9_fcall
*rcall
;
62 p9_conn_req_callback cb
;
65 struct list_head req_list
;
69 spinlock_t lock
; /* protect lock structure */
70 struct list_head mux_list
;
71 struct p9_mux_poll_task
*poll_task
;
73 unsigned char *extended
;
74 struct p9_transport
*trans
;
75 struct p9_idpool
*tagpool
;
77 wait_queue_head_t equeue
;
78 struct list_head req_list
;
79 struct list_head unsent_req_list
;
80 struct p9_fcall
*rcall
;
86 wait_queue_t poll_wait
[MAXPOLLWADDR
];
87 wait_queue_head_t
*poll_waddr
[MAXPOLLWADDR
];
89 struct work_struct rq
;
90 struct work_struct wq
;
94 struct p9_mux_poll_task
{
95 struct task_struct
*task
;
96 struct list_head mux_list
;
103 struct p9_fcall
*tcall
;
104 struct p9_fcall
*rcall
;
105 wait_queue_head_t wqueue
;
108 static int p9_poll_proc(void *);
109 static void p9_read_work(struct work_struct
*work
);
110 static void p9_write_work(struct work_struct
*work
);
111 static void p9_pollwait(struct file
*filp
, wait_queue_head_t
*wait_address
,
113 static u16
p9_mux_get_tag(struct p9_conn
*);
114 static void p9_mux_put_tag(struct p9_conn
*, u16
);
116 static DEFINE_MUTEX(p9_mux_task_lock
);
117 static struct workqueue_struct
*p9_mux_wq
;
119 static int p9_mux_num
;
120 static int p9_mux_poll_task_num
;
121 static struct p9_mux_poll_task p9_mux_poll_tasks
[100];
123 int p9_mux_global_init(void)
127 for (i
= 0; i
< ARRAY_SIZE(p9_mux_poll_tasks
); i
++)
128 p9_mux_poll_tasks
[i
].task
= NULL
;
130 p9_mux_wq
= create_workqueue("v9fs");
132 printk(KERN_WARNING
"v9fs: mux: creating workqueue failed\n");
139 void p9_mux_global_exit(void)
141 destroy_workqueue(p9_mux_wq
);
145 * p9_mux_calc_poll_procs - calculates the number of polling procs
146 * based on the number of mounted v9fs filesystems.
148 * The current implementation returns sqrt of the number of mounts.
150 static int p9_mux_calc_poll_procs(int muxnum
)
154 if (p9_mux_poll_task_num
)
155 n
= muxnum
/ p9_mux_poll_task_num
+
156 (muxnum
% p9_mux_poll_task_num
? 1 : 0);
160 if (n
> ARRAY_SIZE(p9_mux_poll_tasks
))
161 n
= ARRAY_SIZE(p9_mux_poll_tasks
);
166 static int p9_mux_poll_start(struct p9_conn
*m
)
169 struct p9_mux_poll_task
*vpt
, *vptlast
;
170 struct task_struct
*pproc
;
172 P9_DPRINTK(P9_DEBUG_MUX
, "mux %p muxnum %d procnum %d\n", m
, p9_mux_num
,
173 p9_mux_poll_task_num
);
174 mutex_lock(&p9_mux_task_lock
);
176 n
= p9_mux_calc_poll_procs(p9_mux_num
+ 1);
177 if (n
> p9_mux_poll_task_num
) {
178 for (i
= 0; i
< ARRAY_SIZE(p9_mux_poll_tasks
); i
++) {
179 if (p9_mux_poll_tasks
[i
].task
== NULL
) {
180 vpt
= &p9_mux_poll_tasks
[i
];
181 P9_DPRINTK(P9_DEBUG_MUX
, "create proc %p\n",
183 pproc
= kthread_create(p9_poll_proc
, vpt
,
186 if (!IS_ERR(pproc
)) {
188 INIT_LIST_HEAD(&vpt
->mux_list
);
190 p9_mux_poll_task_num
++;
191 wake_up_process(vpt
->task
);
197 if (i
>= ARRAY_SIZE(p9_mux_poll_tasks
))
198 P9_DPRINTK(P9_DEBUG_ERROR
,
199 "warning: no free poll slots\n");
202 n
= (p9_mux_num
+ 1) / p9_mux_poll_task_num
+
203 ((p9_mux_num
+ 1) % p9_mux_poll_task_num
? 1 : 0);
206 for (i
= 0; i
< ARRAY_SIZE(p9_mux_poll_tasks
); i
++) {
207 vpt
= &p9_mux_poll_tasks
[i
];
208 if (vpt
->task
!= NULL
) {
210 if (vpt
->muxnum
< n
) {
211 P9_DPRINTK(P9_DEBUG_MUX
, "put in proc %d\n", i
);
212 list_add(&m
->mux_list
, &vpt
->mux_list
);
215 memset(&m
->poll_waddr
, 0,
216 sizeof(m
->poll_waddr
));
217 init_poll_funcptr(&m
->pt
, p9_pollwait
);
223 if (i
>= ARRAY_SIZE(p9_mux_poll_tasks
)) {
227 P9_DPRINTK(P9_DEBUG_MUX
, "put in proc %d\n", i
);
228 list_add(&m
->mux_list
, &vptlast
->mux_list
);
230 m
->poll_task
= vptlast
;
231 memset(&m
->poll_waddr
, 0, sizeof(m
->poll_waddr
));
232 init_poll_funcptr(&m
->pt
, p9_pollwait
);
236 mutex_unlock(&p9_mux_task_lock
);
241 static void p9_mux_poll_stop(struct p9_conn
*m
)
244 struct p9_mux_poll_task
*vpt
;
246 mutex_lock(&p9_mux_task_lock
);
248 list_del(&m
->mux_list
);
249 for (i
= 0; i
< ARRAY_SIZE(m
->poll_waddr
); i
++) {
250 if (m
->poll_waddr
[i
] != NULL
) {
251 remove_wait_queue(m
->poll_waddr
[i
], &m
->poll_wait
[i
]);
252 m
->poll_waddr
[i
] = NULL
;
257 P9_DPRINTK(P9_DEBUG_MUX
, "destroy proc %p\n", vpt
);
258 kthread_stop(vpt
->task
);
260 p9_mux_poll_task_num
--;
263 mutex_unlock(&p9_mux_task_lock
);
267 * p9_conn_create - allocate and initialize the per-session mux data
268 * Creates the polling task if this is the first session.
270 * @trans - transport structure
271 * @msize - maximum message size
272 * @extended - pointer to the extended flag
274 struct p9_conn
*p9_conn_create(struct p9_transport
*trans
, int msize
,
275 unsigned char *extended
)
278 struct p9_conn
*m
, *mtmp
;
280 P9_DPRINTK(P9_DEBUG_MUX
, "transport %p msize %d\n", trans
, msize
);
281 m
= kmalloc(sizeof(struct p9_conn
), GFP_KERNEL
);
283 return ERR_PTR(-ENOMEM
);
285 spin_lock_init(&m
->lock
);
286 INIT_LIST_HEAD(&m
->mux_list
);
288 m
->extended
= extended
;
290 m
->tagpool
= p9_idpool_create();
291 if (IS_ERR(m
->tagpool
)) {
292 mtmp
= ERR_PTR(-ENOMEM
);
298 init_waitqueue_head(&m
->equeue
);
299 INIT_LIST_HEAD(&m
->req_list
);
300 INIT_LIST_HEAD(&m
->unsent_req_list
);
304 m
->wpos
= m
->wsize
= 0;
306 INIT_WORK(&m
->rq
, p9_read_work
);
307 INIT_WORK(&m
->wq
, p9_write_work
);
309 memset(&m
->poll_waddr
, 0, sizeof(m
->poll_waddr
));
311 n
= p9_mux_poll_start(m
);
317 n
= trans
->poll(trans
, &m
->pt
);
319 P9_DPRINTK(P9_DEBUG_MUX
, "mux %p can read\n", m
);
320 set_bit(Rpending
, &m
->wsched
);
324 P9_DPRINTK(P9_DEBUG_MUX
, "mux %p can write\n", m
);
325 set_bit(Wpending
, &m
->wsched
);
328 for (i
= 0; i
< ARRAY_SIZE(m
->poll_waddr
); i
++) {
329 if (IS_ERR(m
->poll_waddr
[i
])) {
331 mtmp
= (void *)m
->poll_waddr
; /* the error code */
340 EXPORT_SYMBOL(p9_conn_create
);
343 * p9_mux_destroy - cancels all pending requests and frees mux resources
345 void p9_conn_destroy(struct p9_conn
*m
)
347 P9_DPRINTK(P9_DEBUG_MUX
, "mux %p prev %p next %p\n", m
,
348 m
->mux_list
.prev
, m
->mux_list
.next
);
349 p9_conn_cancel(m
, -ECONNRESET
);
351 if (!list_empty(&m
->req_list
)) {
352 /* wait until all processes waiting on this session exit */
353 P9_DPRINTK(P9_DEBUG_MUX
,
354 "mux %p waiting for empty request queue\n", m
);
355 wait_event_timeout(m
->equeue
, (list_empty(&m
->req_list
)), 5000);
356 P9_DPRINTK(P9_DEBUG_MUX
, "mux %p request queue empty: %d\n", m
,
357 list_empty(&m
->req_list
));
362 p9_idpool_destroy(m
->tagpool
);
365 EXPORT_SYMBOL(p9_conn_destroy
);
368 * p9_pollwait - called by files poll operation to add v9fs-poll task
369 * to files wait queue
372 p9_pollwait(struct file
*filp
, wait_queue_head_t
*wait_address
,
378 m
= container_of(p
, struct p9_conn
, pt
);
379 for (i
= 0; i
< ARRAY_SIZE(m
->poll_waddr
); i
++)
380 if (m
->poll_waddr
[i
] == NULL
)
383 if (i
>= ARRAY_SIZE(m
->poll_waddr
)) {
384 P9_DPRINTK(P9_DEBUG_ERROR
, "not enough wait_address slots\n");
388 m
->poll_waddr
[i
] = wait_address
;
391 P9_DPRINTK(P9_DEBUG_ERROR
, "no wait_address\n");
392 m
->poll_waddr
[i
] = ERR_PTR(-EIO
);
396 init_waitqueue_entry(&m
->poll_wait
[i
], m
->poll_task
->task
);
397 add_wait_queue(wait_address
, &m
->poll_wait
[i
]);
401 * p9_poll_mux - polls a mux and schedules read or write works if necessary
403 static void p9_poll_mux(struct p9_conn
*m
)
410 n
= m
->trans
->poll(m
->trans
, NULL
);
411 if (n
< 0 || n
& (POLLERR
| POLLHUP
| POLLNVAL
)) {
412 P9_DPRINTK(P9_DEBUG_MUX
, "error mux %p err %d\n", m
, n
);
415 p9_conn_cancel(m
, n
);
419 set_bit(Rpending
, &m
->wsched
);
420 P9_DPRINTK(P9_DEBUG_MUX
, "mux %p can read\n", m
);
421 if (!test_and_set_bit(Rworksched
, &m
->wsched
)) {
422 P9_DPRINTK(P9_DEBUG_MUX
, "schedule read work %p\n", m
);
423 queue_work(p9_mux_wq
, &m
->rq
);
428 set_bit(Wpending
, &m
->wsched
);
429 P9_DPRINTK(P9_DEBUG_MUX
, "mux %p can write\n", m
);
430 if ((m
->wsize
|| !list_empty(&m
->unsent_req_list
))
431 && !test_and_set_bit(Wworksched
, &m
->wsched
)) {
432 P9_DPRINTK(P9_DEBUG_MUX
, "schedule write work %p\n", m
);
433 queue_work(p9_mux_wq
, &m
->wq
);
439 * p9_poll_proc - polls all v9fs transports for new events and queues
440 * the appropriate work to the work queue
442 static int p9_poll_proc(void *a
)
444 struct p9_conn
*m
, *mtmp
;
445 struct p9_mux_poll_task
*vpt
;
448 P9_DPRINTK(P9_DEBUG_MUX
, "start %p %p\n", current
, vpt
);
449 while (!kthread_should_stop()) {
450 set_current_state(TASK_INTERRUPTIBLE
);
452 list_for_each_entry_safe(m
, mtmp
, &vpt
->mux_list
, mux_list
) {
456 P9_DPRINTK(P9_DEBUG_MUX
, "sleeping...\n");
457 schedule_timeout(SCHED_TIMEOUT
* HZ
);
460 __set_current_state(TASK_RUNNING
);
461 P9_DPRINTK(P9_DEBUG_MUX
, "finish\n");
466 * p9_write_work - called when a transport can send some data
468 static void p9_write_work(struct work_struct
*work
)
474 m
= container_of(work
, struct p9_conn
, wq
);
477 clear_bit(Wworksched
, &m
->wsched
);
482 if (list_empty(&m
->unsent_req_list
)) {
483 clear_bit(Wworksched
, &m
->wsched
);
489 req
= list_entry(m
->unsent_req_list
.next
, struct p9_req
,
491 list_move_tail(&req
->req_list
, &m
->req_list
);
492 if (req
->err
== ERREQFLUSH
)
495 m
->wbuf
= req
->tcall
->sdata
;
496 m
->wsize
= req
->tcall
->size
;
498 spin_unlock(&m
->lock
);
501 P9_DPRINTK(P9_DEBUG_MUX
, "mux %p pos %d size %d\n", m
, m
->wpos
,
503 clear_bit(Wpending
, &m
->wsched
);
504 err
= m
->trans
->write(m
->trans
, m
->wbuf
+ m
->wpos
, m
->wsize
- m
->wpos
);
505 P9_DPRINTK(P9_DEBUG_MUX
, "mux %p sent %d bytes\n", m
, err
);
506 if (err
== -EAGAIN
) {
507 clear_bit(Wworksched
, &m
->wsched
);
519 if (m
->wpos
== m
->wsize
)
520 m
->wpos
= m
->wsize
= 0;
522 if (m
->wsize
== 0 && !list_empty(&m
->unsent_req_list
)) {
523 if (test_and_clear_bit(Wpending
, &m
->wsched
))
526 n
= m
->trans
->poll(m
->trans
, NULL
);
529 P9_DPRINTK(P9_DEBUG_MUX
, "schedule write work %p\n", m
);
530 queue_work(p9_mux_wq
, &m
->wq
);
532 clear_bit(Wworksched
, &m
->wsched
);
534 clear_bit(Wworksched
, &m
->wsched
);
539 p9_conn_cancel(m
, err
);
540 clear_bit(Wworksched
, &m
->wsched
);
543 static void process_request(struct p9_conn
*m
, struct p9_req
*req
)
546 struct p9_str
*ename
;
548 if (!req
->err
&& req
->rcall
->id
== P9_RERROR
) {
549 ecode
= req
->rcall
->params
.rerror
.errno
;
550 ename
= &req
->rcall
->params
.rerror
.error
;
552 P9_DPRINTK(P9_DEBUG_MUX
, "Rerror %.*s\n", ename
->len
,
559 req
->err
= p9_errstr2errno(ename
->str
, ename
->len
);
561 if (!req
->err
) { /* string match failed */
562 PRINT_FCALL_ERROR("unknown error", req
->rcall
);
566 req
->err
= -ESERVERFAULT
;
568 } else if (req
->tcall
&& req
->rcall
->id
!= req
->tcall
->id
+ 1) {
569 P9_DPRINTK(P9_DEBUG_ERROR
,
570 "fcall mismatch: expected %d, got %d\n",
571 req
->tcall
->id
+ 1, req
->rcall
->id
);
578 * p9_read_work - called when there is some data to be read from a transport
580 static void p9_read_work(struct work_struct
*work
)
584 struct p9_req
*req
, *rptr
, *rreq
;
585 struct p9_fcall
*rcall
;
588 m
= container_of(work
, struct p9_conn
, rq
);
594 P9_DPRINTK(P9_DEBUG_MUX
, "start mux %p pos %d\n", m
, m
->rpos
);
598 kmalloc(sizeof(struct p9_fcall
) + m
->msize
, GFP_KERNEL
);
604 m
->rbuf
= (char *)m
->rcall
+ sizeof(struct p9_fcall
);
608 clear_bit(Rpending
, &m
->wsched
);
609 err
= m
->trans
->read(m
->trans
, m
->rbuf
+ m
->rpos
, m
->msize
- m
->rpos
);
610 P9_DPRINTK(P9_DEBUG_MUX
, "mux %p got %d bytes\n", m
, err
);
611 if (err
== -EAGAIN
) {
612 clear_bit(Rworksched
, &m
->wsched
);
620 while (m
->rpos
> 4) {
621 n
= le32_to_cpu(*(__le32
*) m
->rbuf
);
623 P9_DPRINTK(P9_DEBUG_ERROR
,
624 "requested packet size too big: %d\n", n
);
633 p9_deserialize_fcall(m
->rbuf
, n
, m
->rcall
, *m
->extended
);
638 #ifdef CONFIG_NET_9P_DEBUG
639 if ((p9_debug_level
&P9_DEBUG_FCALL
) == P9_DEBUG_FCALL
) {
642 p9_printfcall(buf
, sizeof(buf
), m
->rcall
,
644 printk(KERN_NOTICE
">>> %p %s\n", m
, buf
);
651 m
->rcall
= kmalloc(sizeof(struct p9_fcall
) + m
->msize
,
658 m
->rbuf
= (char *)m
->rcall
+ sizeof(struct p9_fcall
);
659 memmove(m
->rbuf
, rbuf
+ n
, m
->rpos
- n
);
667 P9_DPRINTK(P9_DEBUG_MUX
, "mux %p fcall id %d tag %d\n", m
,
668 rcall
->id
, rcall
->tag
);
672 list_for_each_entry_safe(rreq
, rptr
, &m
->req_list
, req_list
) {
673 if (rreq
->tag
== rcall
->tag
) {
675 if (req
->flush
!= Flushing
)
676 list_del(&req
->req_list
);
680 spin_unlock(&m
->lock
);
684 process_request(m
, req
);
686 if (req
->flush
!= Flushing
) {
688 (*req
->cb
) (req
, req
->cba
);
695 if (err
>= 0 && rcall
->id
!= P9_RFLUSH
)
696 P9_DPRINTK(P9_DEBUG_ERROR
,
697 "unexpected response mux %p id %d tag %d\n",
698 m
, rcall
->id
, rcall
->tag
);
703 if (!list_empty(&m
->req_list
)) {
704 if (test_and_clear_bit(Rpending
, &m
->wsched
))
707 n
= m
->trans
->poll(m
->trans
, NULL
);
710 P9_DPRINTK(P9_DEBUG_MUX
, "schedule read work %p\n", m
);
711 queue_work(p9_mux_wq
, &m
->rq
);
713 clear_bit(Rworksched
, &m
->wsched
);
715 clear_bit(Rworksched
, &m
->wsched
);
720 p9_conn_cancel(m
, err
);
721 clear_bit(Rworksched
, &m
->wsched
);
725 * p9_send_request - send 9P request
726 * The function can sleep until the request is scheduled for sending.
727 * The function can be interrupted. Return from the function is not
728 * a guarantee that the request is sent successfully. Can return errors
729 * that can be retrieved by PTR_ERR macros.
732 * @tc: request to be sent
733 * @cb: callback function to call when response is received
734 * @cba: parameter to pass to the callback function
736 static struct p9_req
*p9_send_request(struct p9_conn
*m
,
738 p9_conn_req_callback cb
, void *cba
)
743 P9_DPRINTK(P9_DEBUG_MUX
, "mux %p task %p tcall %p id %d\n", m
, current
,
746 return ERR_PTR(m
->err
);
748 req
= kmalloc(sizeof(struct p9_req
), GFP_KERNEL
);
750 return ERR_PTR(-ENOMEM
);
752 if (tc
->id
== P9_TVERSION
)
755 n
= p9_mux_get_tag(m
);
758 return ERR_PTR(-ENOMEM
);
762 #ifdef CONFIG_NET_9P_DEBUG
763 if ((p9_debug_level
&P9_DEBUG_FCALL
) == P9_DEBUG_FCALL
) {
766 p9_printfcall(buf
, sizeof(buf
), tc
, *m
->extended
);
767 printk(KERN_NOTICE
"<<< %p %s\n", m
, buf
);
771 spin_lock_init(&req
->lock
);
781 list_add_tail(&req
->req_list
, &m
->unsent_req_list
);
782 spin_unlock(&m
->lock
);
784 if (test_and_clear_bit(Wpending
, &m
->wsched
))
787 n
= m
->trans
->poll(m
->trans
, NULL
);
789 if (n
& POLLOUT
&& !test_and_set_bit(Wworksched
, &m
->wsched
))
790 queue_work(p9_mux_wq
, &m
->wq
);
795 static void p9_mux_free_request(struct p9_conn
*m
, struct p9_req
*req
)
797 p9_mux_put_tag(m
, req
->tag
);
801 static void p9_mux_flush_cb(struct p9_req
*freq
, void *a
)
803 p9_conn_req_callback cb
;
806 struct p9_req
*req
, *rreq
, *rptr
;
809 P9_DPRINTK(P9_DEBUG_MUX
, "mux %p tc %p rc %p err %d oldtag %d\n", m
,
810 freq
->tcall
, freq
->rcall
, freq
->err
,
811 freq
->tcall
->params
.tflush
.oldtag
);
815 tag
= freq
->tcall
->params
.tflush
.oldtag
;
817 list_for_each_entry_safe(rreq
, rptr
, &m
->req_list
, req_list
) {
818 if (rreq
->tag
== tag
) {
820 list_del(&req
->req_list
);
824 spin_unlock(&m
->lock
);
827 spin_lock(&req
->lock
);
828 req
->flush
= Flushed
;
829 spin_unlock(&req
->lock
);
832 (*req
->cb
) (req
, req
->cba
);
841 p9_mux_free_request(m
, freq
);
845 p9_mux_flush_request(struct p9_conn
*m
, struct p9_req
*req
)
848 struct p9_req
*rreq
, *rptr
;
850 P9_DPRINTK(P9_DEBUG_MUX
, "mux %p req %p tag %d\n", m
, req
, req
->tag
);
852 /* if a response was received for a request, do nothing */
853 spin_lock(&req
->lock
);
854 if (req
->rcall
|| req
->err
) {
855 spin_unlock(&req
->lock
);
856 P9_DPRINTK(P9_DEBUG_MUX
,
857 "mux %p req %p response already received\n", m
, req
);
861 req
->flush
= Flushing
;
862 spin_unlock(&req
->lock
);
865 /* if the request is not sent yet, just remove it from the list */
866 list_for_each_entry_safe(rreq
, rptr
, &m
->unsent_req_list
, req_list
) {
867 if (rreq
->tag
== req
->tag
) {
868 P9_DPRINTK(P9_DEBUG_MUX
,
869 "mux %p req %p request is not sent yet\n", m
, req
);
870 list_del(&rreq
->req_list
);
871 req
->flush
= Flushed
;
872 spin_unlock(&m
->lock
);
874 (*req
->cb
) (req
, req
->cba
);
878 spin_unlock(&m
->lock
);
880 clear_thread_flag(TIF_SIGPENDING
);
881 fc
= p9_create_tflush(req
->tag
);
882 p9_send_request(m
, fc
, p9_mux_flush_cb
, m
);
887 p9_conn_rpc_cb(struct p9_req
*req
, void *a
)
889 struct p9_mux_rpc
*r
;
891 P9_DPRINTK(P9_DEBUG_MUX
, "req %p r %p\n", req
, a
);
893 r
->rcall
= req
->rcall
;
896 if (req
->flush
!= None
&& !req
->err
)
897 r
->err
= -ERESTARTSYS
;
903 * p9_mux_rpc - sends 9P request and waits until a response is available.
904 * The function can be interrupted.
906 * @tc: request to be sent
907 * @rc: pointer where a pointer to the response is stored
910 p9_conn_rpc(struct p9_conn
*m
, struct p9_fcall
*tc
,
911 struct p9_fcall
**rc
)
922 init_waitqueue_head(&r
.wqueue
);
928 if (signal_pending(current
)) {
930 clear_thread_flag(TIF_SIGPENDING
);
933 req
= p9_send_request(m
, tc
, p9_conn_rpc_cb
, &r
);
936 P9_DPRINTK(P9_DEBUG_MUX
, "error %d\n", err
);
940 err
= wait_event_interruptible(r
.wqueue
, r
.rcall
!= NULL
|| r
.err
< 0);
944 if (err
== -ERESTARTSYS
&& m
->trans
->status
== Connected
946 if (p9_mux_flush_request(m
, req
)) {
947 /* wait until we get response of the flush message */
949 clear_thread_flag(TIF_SIGPENDING
);
950 err
= wait_event_interruptible(r
.wqueue
,
952 } while (!r
.rcall
&& !r
.err
&& err
== -ERESTARTSYS
&&
953 m
->trans
->status
== Connected
&& !m
->err
);
961 spin_lock_irqsave(¤t
->sighand
->siglock
, flags
);
963 spin_unlock_irqrestore(¤t
->sighand
->siglock
, flags
);
971 p9_mux_free_request(m
, req
);
977 EXPORT_SYMBOL(p9_conn_rpc
);
981 * p9_conn_rpcnb - sends 9P request without waiting for response.
983 * @tc: request to be sent
984 * @cb: callback function to be called when response arrives
985 * @cba: value to pass to the callback function
987 int p9_conn_rpcnb(struct p9_conn
*m
, struct p9_fcall
*tc
,
988 p9_conn_req_callback cb
, void *a
)
993 req
= p9_send_request(m
, tc
, cb
, a
);
996 P9_DPRINTK(P9_DEBUG_MUX
, "error %d\n", err
);
1000 P9_DPRINTK(P9_DEBUG_MUX
, "mux %p tc %p tag %d\n", m
, tc
, req
->tag
);
1003 EXPORT_SYMBOL(p9_conn_rpcnb
);
1004 #endif /* P9_NONBLOCK */
1007 * p9_conn_cancel - cancel all pending requests with error
1011 void p9_conn_cancel(struct p9_conn
*m
, int err
)
1013 struct p9_req
*req
, *rtmp
;
1014 LIST_HEAD(cancel_list
);
1016 P9_DPRINTK(P9_DEBUG_ERROR
, "mux %p err %d\n", m
, err
);
1018 spin_lock(&m
->lock
);
1019 list_for_each_entry_safe(req
, rtmp
, &m
->req_list
, req_list
) {
1020 list_move(&req
->req_list
, &cancel_list
);
1022 list_for_each_entry_safe(req
, rtmp
, &m
->unsent_req_list
, req_list
) {
1023 list_move(&req
->req_list
, &cancel_list
);
1025 spin_unlock(&m
->lock
);
1027 list_for_each_entry_safe(req
, rtmp
, &cancel_list
, req_list
) {
1028 list_del(&req
->req_list
);
1033 (*req
->cb
) (req
, req
->cba
);
1038 wake_up(&m
->equeue
);
1040 EXPORT_SYMBOL(p9_conn_cancel
);
1042 static u16
p9_mux_get_tag(struct p9_conn
*m
)
1046 tag
= p9_idpool_get(m
->tagpool
);
1053 static void p9_mux_put_tag(struct p9_conn
*m
, u16 tag
)
1055 if (tag
!= P9_NOTAG
&& p9_idpool_check(tag
, m
->tagpool
))
1056 p9_idpool_put(tag
, m
->tagpool
);