2 * 2007+ Copyright (c) Evgeniy Polyakov <zbr@ioremap.net>
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
16 #include <linux/module.h>
17 #include <linux/crypto.h>
19 #include <linux/jhash.h>
20 #include <linux/hash.h>
21 #include <linux/ktime.h>
22 #include <linux/mempool.h>
24 #include <linux/mount.h>
25 #include <linux/pagemap.h>
26 #include <linux/parser.h>
27 #include <linux/poll.h>
28 #include <linux/swap.h>
29 #include <linux/slab.h>
30 #include <linux/statfs.h>
31 #include <linux/writeback.h>
35 static struct kmem_cache
*netfs_trans_dst
;
36 static mempool_t
*netfs_trans_dst_pool
;
38 static void netfs_trans_init_static(struct netfs_trans
*t
, int num
, int size
)
42 atomic_set(&t
->refcnt
, 1);
44 spin_lock_init(&t
->dst_lock
);
45 INIT_LIST_HEAD(&t
->dst_list
);
48 static int netfs_trans_send_pages(struct netfs_trans
*t
, struct netfs_state
*st
)
51 unsigned int i
, attached_pages
= t
->attached_pages
, ci
;
53 struct page
**pages
= (t
->eng
) ? t
->eng
->pages
: t
->pages
;
59 msg
.msg_control
= NULL
;
60 msg
.msg_controllen
= 0;
61 msg
.msg_flags
= MSG_WAITALL
| MSG_MORE
;
64 for (i
= 0; i
< t
->page_num
; ++i
) {
65 struct page
*page
= pages
[ci
];
74 size
= page_private(p
);
77 io
.iov_len
= sizeof(struct netfs_cmd
);
79 cmd
.cmd
= NETFS_WRITE_PAGE
;
84 cmd
.start
<<= PAGE_CACHE_SHIFT
;
87 cmd
.iv
= pohmelfs_gen_iv(t
);
89 netfs_convert_cmd(&cmd
);
93 msg
.msg_flags
= MSG_WAITALL
| MSG_MORE
;
95 err
= kernel_sendmsg(st
->socket
, &msg
, (struct kvec
*)msg
.msg_iov
, 1, sizeof(struct netfs_cmd
));
97 printk("%s: %d/%d failed to send transaction header: t: %p, gen: %u, err: %d.\n",
98 __func__
, i
, t
->page_num
, t
, t
->gen
, err
);
104 msg
.msg_flags
= MSG_WAITALL
| (attached_pages
== 1 ? 0 :
107 err
= kernel_sendpage(st
->socket
, page
, 0, size
, msg
.msg_flags
);
109 printk("%s: %d/%d failed to send transaction page: t: %p, gen: %u, size: %u, err: %d.\n",
110 __func__
, i
, t
->page_num
, t
, t
->gen
, size
, err
);
116 dprintk("%s: %d/%d sent t: %p, gen: %u, page: %p/%p, size: %u.\n",
117 __func__
, i
, t
->page_num
, t
, t
->gen
, page
, p
, size
);
128 printk("%s: t: %p, gen: %u, err: %d.\n", __func__
, t
, t
->gen
, err
);
129 netfs_state_exit(st
);
136 int netfs_trans_send(struct netfs_trans
*t
, struct netfs_state
*st
)
141 BUG_ON(!t
->iovec
.iov_len
);
142 BUG_ON(t
->iovec
.iov_len
> 1024*1024*1024);
144 netfs_state_lock_send(st
);
146 err
= netfs_state_init(st
);
148 goto err_out_unlock_return
;
151 msg
.msg_iov
= &t
->iovec
;
155 msg
.msg_control
= NULL
;
156 msg
.msg_controllen
= 0;
157 msg
.msg_flags
= MSG_WAITALL
;
159 if (t
->attached_pages
)
160 msg
.msg_flags
|= MSG_MORE
;
162 err
= kernel_sendmsg(st
->socket
, &msg
, (struct kvec
*)msg
.msg_iov
, 1, t
->iovec
.iov_len
);
164 printk("%s: failed to send contig transaction: t: %p, gen: %u, size: %zu, err: %d.\n",
165 __func__
, t
, t
->gen
, t
->iovec
.iov_len
, err
);
168 goto err_out_unlock_return
;
171 dprintk("%s: sent %s transaction: t: %p, gen: %u, size: %zu, page_num: %u.\n",
172 __func__
, (t
->page_num
) ? "partial" : "full",
173 t
, t
->gen
, t
->iovec
.iov_len
, t
->page_num
);
176 if (t
->attached_pages
)
177 err
= netfs_trans_send_pages(t
, st
);
179 err_out_unlock_return
:
182 netfs_state_exit(st
);
184 netfs_state_unlock_send(st
);
186 dprintk("%s: t: %p, gen: %u, err: %d.\n",
187 __func__
, t
, t
->gen
, err
);
193 static inline int netfs_trans_cmp(unsigned int gen
, unsigned int new)
202 struct netfs_trans_dst
*netfs_trans_search(struct netfs_state
*st
, unsigned int gen
)
204 struct rb_root
*root
= &st
->trans_root
;
205 struct rb_node
*n
= root
->rb_node
;
206 struct netfs_trans_dst
*tmp
, *ret
= NULL
;
207 struct netfs_trans
*t
;
211 tmp
= rb_entry(n
, struct netfs_trans_dst
, state_entry
);
214 cmp
= netfs_trans_cmp(t
->gen
, gen
);
228 static int netfs_trans_insert(struct netfs_trans_dst
*ndst
, struct netfs_state
*st
)
230 struct rb_root
*root
= &st
->trans_root
;
231 struct rb_node
**n
= &root
->rb_node
, *parent
= NULL
;
232 struct netfs_trans_dst
*ret
= NULL
, *tmp
;
233 struct netfs_trans
*t
= NULL
, *new = ndst
->trans
;
239 tmp
= rb_entry(parent
, struct netfs_trans_dst
, state_entry
);
242 cmp
= netfs_trans_cmp(t
->gen
, new->gen
);
244 n
= &parent
->rb_left
;
246 n
= &parent
->rb_right
;
254 printk("%s: exist: old: gen: %u, flags: %x, send_time: %lu, "
255 "new: gen: %u, flags: %x, send_time: %lu.\n",
256 __func__
, t
->gen
, t
->flags
, ret
->send_time
,
257 new->gen
, new->flags
, ndst
->send_time
);
261 rb_link_node(&ndst
->state_entry
, parent
, n
);
262 rb_insert_color(&ndst
->state_entry
, root
);
263 ndst
->send_time
= jiffies
;
268 int netfs_trans_remove_nolock(struct netfs_trans_dst
*dst
, struct netfs_state
*st
)
270 if (dst
&& dst
->state_entry
.rb_parent_color
) {
271 rb_erase(&dst
->state_entry
, &st
->trans_root
);
272 dst
->state_entry
.rb_parent_color
= 0;
278 static int netfs_trans_remove_state(struct netfs_trans_dst
*dst
)
281 struct netfs_state
*st
= dst
->state
;
283 mutex_lock(&st
->trans_lock
);
284 ret
= netfs_trans_remove_nolock(dst
, st
);
285 mutex_unlock(&st
->trans_lock
);
291 * Create new destination for given transaction associated with given network state.
292 * Transaction's reference counter is bumped and will be dropped when either
293 * reply is received or when async timeout detection task will fail resending
294 * and drop transaction.
296 static int netfs_trans_push_dst(struct netfs_trans
*t
, struct netfs_state
*st
)
298 struct netfs_trans_dst
*dst
;
301 dst
= mempool_alloc(netfs_trans_dst_pool
, GFP_KERNEL
);
311 mutex_lock(&st
->trans_lock
);
312 err
= netfs_trans_insert(dst
, st
);
313 mutex_unlock(&st
->trans_lock
);
318 spin_lock(&t
->dst_lock
);
319 list_add_tail(&dst
->trans_entry
, &t
->dst_list
);
320 spin_unlock(&t
->dst_lock
);
327 mempool_free(dst
, netfs_trans_dst_pool
);
331 static void netfs_trans_free_dst(struct netfs_trans_dst
*dst
)
333 netfs_trans_put(dst
->trans
);
334 mempool_free(dst
, netfs_trans_dst_pool
);
337 static void netfs_trans_remove_dst(struct netfs_trans_dst
*dst
)
339 if (netfs_trans_remove_state(dst
))
340 netfs_trans_free_dst(dst
);
344 * Drop destination transaction entry when we know it.
346 void netfs_trans_drop_dst(struct netfs_trans_dst
*dst
)
348 struct netfs_trans
*t
= dst
->trans
;
350 spin_lock(&t
->dst_lock
);
351 list_del_init(&dst
->trans_entry
);
352 spin_unlock(&t
->dst_lock
);
354 netfs_trans_remove_dst(dst
);
358 * Drop destination transaction entry when we know it and when we
359 * already removed dst from state tree.
361 void netfs_trans_drop_dst_nostate(struct netfs_trans_dst
*dst
)
363 struct netfs_trans
*t
= dst
->trans
;
365 spin_lock(&t
->dst_lock
);
366 list_del_init(&dst
->trans_entry
);
367 spin_unlock(&t
->dst_lock
);
369 netfs_trans_free_dst(dst
);
373 * This drops destination transaction entry from appropriate network state
374 * tree and drops related reference counter. It is possible that transaction
375 * will be freed here if its reference counter hits zero.
376 * Destination transaction entry will be freed.
378 void netfs_trans_drop_trans(struct netfs_trans
*t
, struct netfs_state
*st
)
380 struct netfs_trans_dst
*dst
, *tmp
, *ret
= NULL
;
382 spin_lock(&t
->dst_lock
);
383 list_for_each_entry_safe(dst
, tmp
, &t
->dst_list
, trans_entry
) {
384 if (dst
->state
== st
) {
386 list_del(&dst
->trans_entry
);
390 spin_unlock(&t
->dst_lock
);
393 netfs_trans_remove_dst(ret
);
397 * This drops destination transaction entry from appropriate network state
398 * tree and drops related reference counter. It is possible that transaction
399 * will be freed here if its reference counter hits zero.
400 * Destination transaction entry will be freed.
402 void netfs_trans_drop_last(struct netfs_trans
*t
, struct netfs_state
*st
)
404 struct netfs_trans_dst
*dst
, *tmp
, *ret
;
406 spin_lock(&t
->dst_lock
);
407 ret
= list_entry(t
->dst_list
.prev
, struct netfs_trans_dst
, trans_entry
);
408 if (ret
->state
!= st
) {
410 list_for_each_entry_safe(dst
, tmp
, &t
->dst_list
, trans_entry
) {
411 if (dst
->state
== st
) {
413 list_del_init(&dst
->trans_entry
);
418 list_del(&ret
->trans_entry
);
420 spin_unlock(&t
->dst_lock
);
423 netfs_trans_remove_dst(ret
);
426 static int netfs_trans_push(struct netfs_trans
*t
, struct netfs_state
*st
)
430 err
= netfs_trans_push_dst(t
, st
);
434 err
= netfs_trans_send(t
, st
);
438 if (t
->flags
& NETFS_TRANS_SINGLE_DST
)
439 pohmelfs_switch_active(st
->psb
);
445 netfs_trans_drop_last(t
, st
);
450 int netfs_trans_finish_send(struct netfs_trans
*t
, struct pohmelfs_sb
*psb
)
452 struct pohmelfs_config
*c
;
454 struct netfs_state
*st
;
456 dprintk("%s: t: %p, gen: %u, size: %u, page_num: %u, active: %p.\n",
457 __func__
, t
, t
->gen
, t
->iovec
.iov_len
, t
->page_num
, psb
->active_state
);
459 mutex_lock(&psb
->state_lock
);
460 list_for_each_entry(c
, &psb
->state_list
, config_entry
) {
463 if (t
->flags
& NETFS_TRANS_SINGLE_DST
) {
464 if (!(st
->ctl
.perm
& POHMELFS_IO_PERM_READ
))
467 if (!(st
->ctl
.perm
& POHMELFS_IO_PERM_WRITE
))
471 if (psb
->active_state
&& (psb
->active_state
->state
.ctl
.prio
>= st
->ctl
.prio
) &&
472 (t
->flags
& NETFS_TRANS_SINGLE_DST
))
473 st
= &psb
->active_state
->state
;
475 err
= netfs_trans_push(t
, st
);
476 if (!err
&& (t
->flags
& NETFS_TRANS_SINGLE_DST
))
480 mutex_unlock(&psb
->state_lock
);
482 dprintk("%s: fully sent t: %p, gen: %u, size: %u, page_num: %u, err: %d.\n",
483 __func__
, t
, t
->gen
, t
->iovec
.iov_len
, t
->page_num
, err
);
490 int netfs_trans_finish(struct netfs_trans
*t
, struct pohmelfs_sb
*psb
)
493 struct netfs_cmd
*cmd
= t
->iovec
.iov_base
;
495 t
->gen
= atomic_inc_return(&psb
->trans_gen
);
497 cmd
->size
= t
->iovec
.iov_len
- sizeof(struct netfs_cmd
) +
498 t
->attached_size
+ t
->attached_pages
* sizeof(struct netfs_cmd
);
499 cmd
->cmd
= NETFS_TRANS
;
503 if (psb
->perform_crypto
) {
504 cmd
->ext
= psb
->crypto_attached_size
;
505 cmd
->csize
= psb
->crypto_attached_size
;
508 dprintk("%s: t: %u, size: %u, iov_len: %zu, attached_size: %u, attached_pages: %u.\n",
509 __func__
, t
->gen
, cmd
->size
, t
->iovec
.iov_len
, t
->attached_size
, t
->attached_pages
);
510 err
= pohmelfs_trans_crypt(t
, psb
);
513 netfs_convert_cmd(cmd
);
514 dprintk("%s: trans: %llu, crypto_attached_size: %u, attached_size: %u, attached_pages: %d, trans_size: %u, err: %d.\n",
515 __func__
, cmd
->start
, psb
->crypto_attached_size
, t
->attached_size
, t
->attached_pages
, cmd
->size
, err
);
522 * Resend transaction to remote server(s).
523 * If new servers were added into superblock, we can try to send data
526 * It is called under superblock's state_lock, so we can safely
527 * dereference psb->state_list. Also, transaction's reference counter is
528 * bumped, so it can not go away under us, thus we can safely access all
529 * its members. State is locked.
531 * This function returns 0 if transaction was successfully sent to at
532 * least one destination target.
534 int netfs_trans_resend(struct netfs_trans
*t
, struct pohmelfs_sb
*psb
)
536 struct netfs_trans_dst
*dst
;
537 struct netfs_state
*st
;
538 struct pohmelfs_config
*c
;
539 int err
, exist
, error
= -ENODEV
;
541 list_for_each_entry(c
, &psb
->state_list
, config_entry
) {
545 spin_lock(&t
->dst_lock
);
546 list_for_each_entry(dst
, &t
->dst_list
, trans_entry
) {
547 if (st
== dst
->state
) {
552 spin_unlock(&t
->dst_lock
);
555 if (!(t
->flags
& NETFS_TRANS_SINGLE_DST
) ||
556 (c
->config_entry
.next
== &psb
->state_list
)) {
557 dprintk("%s: resending st: %p, t: %p, gen: %u.\n",
558 __func__
, st
, t
, t
->gen
);
559 err
= netfs_trans_send(t
, st
);
566 dprintk("%s: pushing/resending st: %p, t: %p, gen: %u.\n",
567 __func__
, st
, t
, t
->gen
);
568 err
= netfs_trans_push(t
, st
);
572 if (t
->flags
& NETFS_TRANS_SINGLE_DST
)
580 void *netfs_trans_add(struct netfs_trans
*t
, unsigned int size
)
582 struct iovec
*io
= &t
->iovec
;
585 if (size
> t
->total_size
) {
586 ptr
= ERR_PTR(-EINVAL
);
590 if (io
->iov_len
+ size
> t
->total_size
) {
591 dprintk("%s: too big size t: %p, gen: %u, iov_len: %zu, size: %u, total: %u.\n",
592 __func__
, t
, t
->gen
, io
->iov_len
, size
, t
->total_size
);
593 ptr
= ERR_PTR(-E2BIG
);
597 ptr
= io
->iov_base
+ io
->iov_len
;
601 dprintk("%s: t: %p, gen: %u, size: %u, total: %zu.\n",
602 __func__
, t
, t
->gen
, size
, io
->iov_len
);
606 void netfs_trans_free(struct netfs_trans
*t
)
609 pohmelfs_crypto_thread_make_ready(t
->eng
->thread
);
613 struct netfs_trans
*netfs_trans_alloc(struct pohmelfs_sb
*psb
, unsigned int size
,
614 unsigned int flags
, unsigned int nr
)
616 struct netfs_trans
*t
;
617 unsigned int num
, cont
, pad
, size_no_trans
;
618 unsigned int crypto_added
= 0;
619 struct netfs_cmd
*cmd
;
621 if (psb
->perform_crypto
)
622 crypto_added
= psb
->crypto_attached_size
;
625 * |sizeof(struct netfs_trans)|
626 * |sizeof(struct netfs_cmd)| - transaction header
627 * |size| - buffer with requested size
628 * |padding| - crypto padding, zero bytes
629 * |nr * sizeof(struct page *)| - array of page pointers
631 * Overall size should be less than PAGE_SIZE for guaranteed allocation.
635 size
= ALIGN(size
, psb
->crypto_align_size
);
638 size_no_trans
= size
+ sizeof(struct netfs_cmd
) * 2 + crypto_added
;
640 cont
= sizeof(struct netfs_trans
) + size_no_trans
;
642 num
= (PAGE_SIZE
- cont
)/sizeof(struct page
*);
647 t
= kzalloc(cont
+ nr
*sizeof(struct page
*), GFP_NOIO
);
651 t
->iovec
.iov_base
= (void *)(t
+ 1);
652 t
->pages
= (struct page
**)(t
->iovec
.iov_base
+ size_no_trans
);
655 * Reserving space for transaction header.
657 t
->iovec
.iov_len
= sizeof(struct netfs_cmd
) + crypto_added
;
659 netfs_trans_init_static(t
, nr
, size_no_trans
);
664 cmd
= (struct netfs_cmd
*)t
->iovec
.iov_base
;
668 cmd
->csize
= crypto_added
;
670 dprintk("%s: t: %p, gen: %u, size: %u, padding: %u, align_size: %u, flags: %x, "
671 "page_num: %u, base: %p, pages: %p.\n",
672 __func__
, t
, t
->gen
, size
, pad
, psb
->crypto_align_size
, flags
, nr
,
673 t
->iovec
.iov_base
, t
->pages
);
681 int netfs_trans_init(void)
685 netfs_trans_dst
= kmem_cache_create("netfs_trans_dst", sizeof(struct netfs_trans_dst
),
687 if (!netfs_trans_dst
)
690 netfs_trans_dst_pool
= mempool_create_slab_pool(256, netfs_trans_dst
);
691 if (!netfs_trans_dst_pool
)
697 kmem_cache_destroy(netfs_trans_dst
);
702 void netfs_trans_exit(void)
704 mempool_destroy(netfs_trans_dst_pool
);
705 kmem_cache_destroy(netfs_trans_dst
);