2 * linux/fs/nfs/blocklayout/blocklayout.c
4 * Module for the NFSv4.1 pNFS block layout driver.
6 * Copyright (c) 2006 The Regents of the University of Michigan.
9 * Andy Adamson <andros@citi.umich.edu>
10 * Fred Isaman <iisaman@umich.edu>
12 * permission is granted to use, copy, create derivative works and
13 * redistribute this software and such derivative works for any purpose,
14 * so long as the name of the university of michigan is not used in
15 * any advertising or publicity pertaining to the use or distribution
16 * of this software without specific, written prior authorization. if
17 * the above copyright notice or any other identification of the
18 * university of michigan is included in any copy of any portion of
19 * this software, then the disclaimer below must also be included.
21 * this software is provided as is, without representation from the
22 * university of michigan as to its fitness for any purpose, and without
23 * warranty by the university of michigan of any kind, either express
24 * or implied, including without limitation the implied warranties of
25 * merchantability and fitness for a particular purpose. the regents
26 * of the university of michigan shall not be liable for any damages,
27 * including special, indirect, incidental, or consequential damages,
28 * with respect to any claim arising out or in connection with the use
29 * of the software, even if it has been or is hereafter advised of the
30 * possibility of such damages.
33 #include <linux/module.h>
34 #include <linux/init.h>
35 #include <linux/mount.h>
36 #include <linux/namei.h>
37 #include <linux/bio.h> /* struct bio */
38 #include <linux/buffer_head.h> /* various write calls */
39 #include <linux/prefetch.h>
41 #include "blocklayout.h"
43 #define NFSDBG_FACILITY NFSDBG_PNFS_LD
45 MODULE_LICENSE("GPL");
46 MODULE_AUTHOR("Andy Adamson <andros@citi.umich.edu>");
47 MODULE_DESCRIPTION("The NFSv4.1 pNFS Block layout driver");
49 struct dentry
*bl_device_pipe
;
50 wait_queue_head_t bl_wq
;
52 static void print_page(struct page
*page
)
54 dprintk("PRINTPAGE page %p\n", page
);
55 dprintk(" PagePrivate %d\n", PagePrivate(page
));
56 dprintk(" PageUptodate %d\n", PageUptodate(page
));
57 dprintk(" PageError %d\n", PageError(page
));
58 dprintk(" PageDirty %d\n", PageDirty(page
));
59 dprintk(" PageReferenced %d\n", PageReferenced(page
));
60 dprintk(" PageLocked %d\n", PageLocked(page
));
61 dprintk(" PageWriteback %d\n", PageWriteback(page
));
62 dprintk(" PageMappedToDisk %d\n", PageMappedToDisk(page
));
66 /* Given the be associated with isect, determine if page data needs to be
69 static int is_hole(struct pnfs_block_extent
*be
, sector_t isect
)
71 if (be
->be_state
== PNFS_BLOCK_NONE_DATA
)
73 else if (be
->be_state
!= PNFS_BLOCK_INVALID_DATA
)
76 return !bl_is_sector_init(be
->be_inval
, isect
);
79 /* Given the be associated with isect, determine if page data can be
82 static int is_writable(struct pnfs_block_extent
*be
, sector_t isect
)
84 return (be
->be_state
== PNFS_BLOCK_READWRITE_DATA
||
85 be
->be_state
== PNFS_BLOCK_INVALID_DATA
);
88 /* The data we are handed might be spread across several bios. We need
89 * to track when the last one is finished.
93 struct rpc_call_ops call_ops
;
94 void (*pnfs_callback
) (void *data
);
98 static inline struct parallel_io
*alloc_parallel(void *data
)
100 struct parallel_io
*rv
;
102 rv
= kmalloc(sizeof(*rv
), GFP_NOFS
);
105 kref_init(&rv
->refcnt
);
110 static inline void get_parallel(struct parallel_io
*p
)
112 kref_get(&p
->refcnt
);
115 static void destroy_parallel(struct kref
*kref
)
117 struct parallel_io
*p
= container_of(kref
, struct parallel_io
, refcnt
);
119 dprintk("%s enter\n", __func__
);
120 p
->pnfs_callback(p
->data
);
124 static inline void put_parallel(struct parallel_io
*p
)
126 kref_put(&p
->refcnt
, destroy_parallel
);
130 bl_submit_bio(int rw
, struct bio
*bio
)
133 get_parallel(bio
->bi_private
);
134 dprintk("%s submitting %s bio %u@%llu\n", __func__
,
135 rw
== READ
? "read" : "write",
136 bio
->bi_size
, (unsigned long long)bio
->bi_sector
);
142 static struct bio
*bl_alloc_init_bio(int npg
, sector_t isect
,
143 struct pnfs_block_extent
*be
,
144 void (*end_io
)(struct bio
*, int err
),
145 struct parallel_io
*par
)
149 bio
= bio_alloc(GFP_NOIO
, npg
);
153 bio
->bi_sector
= isect
- be
->be_f_offset
+ be
->be_v_offset
;
154 bio
->bi_bdev
= be
->be_mdev
;
155 bio
->bi_end_io
= end_io
;
156 bio
->bi_private
= par
;
160 static struct bio
*bl_add_page_to_bio(struct bio
*bio
, int npg
, int rw
,
161 sector_t isect
, struct page
*page
,
162 struct pnfs_block_extent
*be
,
163 void (*end_io
)(struct bio
*, int err
),
164 struct parallel_io
*par
)
168 bio
= bl_alloc_init_bio(npg
, isect
, be
, end_io
, par
);
170 return ERR_PTR(-ENOMEM
);
172 if (bio_add_page(bio
, page
, PAGE_CACHE_SIZE
, 0) < PAGE_CACHE_SIZE
) {
173 bio
= bl_submit_bio(rw
, bio
);
179 /* This is basically copied from mpage_end_io_read */
180 static void bl_end_io_read(struct bio
*bio
, int err
)
182 struct parallel_io
*par
= bio
->bi_private
;
183 const int uptodate
= test_bit(BIO_UPTODATE
, &bio
->bi_flags
);
184 struct bio_vec
*bvec
= bio
->bi_io_vec
+ bio
->bi_vcnt
- 1;
185 struct nfs_read_data
*rdata
= (struct nfs_read_data
*)par
->data
;
188 struct page
*page
= bvec
->bv_page
;
190 if (--bvec
>= bio
->bi_io_vec
)
191 prefetchw(&bvec
->bv_page
->flags
);
193 SetPageUptodate(page
);
194 } while (bvec
>= bio
->bi_io_vec
);
196 if (!rdata
->pnfs_error
)
197 rdata
->pnfs_error
= -EIO
;
198 pnfs_set_lo_fail(rdata
->lseg
);
204 static void bl_read_cleanup(struct work_struct
*work
)
206 struct rpc_task
*task
;
207 struct nfs_read_data
*rdata
;
208 dprintk("%s enter\n", __func__
);
209 task
= container_of(work
, struct rpc_task
, u
.tk_work
);
210 rdata
= container_of(task
, struct nfs_read_data
, task
);
211 pnfs_ld_read_done(rdata
);
215 bl_end_par_io_read(void *data
)
217 struct nfs_read_data
*rdata
= data
;
219 INIT_WORK(&rdata
->task
.u
.tk_work
, bl_read_cleanup
);
220 schedule_work(&rdata
->task
.u
.tk_work
);
223 /* We don't want normal .rpc_call_done callback used, so we replace it
226 static void bl_rpc_do_nothing(struct rpc_task
*task
, void *calldata
)
231 static enum pnfs_try_status
232 bl_read_pagelist(struct nfs_read_data
*rdata
)
235 struct bio
*bio
= NULL
;
236 struct pnfs_block_extent
*be
= NULL
, *cow_read
= NULL
;
237 sector_t isect
, extent_length
= 0;
238 struct parallel_io
*par
;
239 loff_t f_offset
= rdata
->args
.offset
;
240 size_t count
= rdata
->args
.count
;
241 struct page
**pages
= rdata
->args
.pages
;
242 int pg_index
= rdata
->args
.pgbase
>> PAGE_CACHE_SHIFT
;
244 dprintk("%s enter nr_pages %u offset %lld count %Zd\n", __func__
,
245 rdata
->npages
, f_offset
, count
);
247 par
= alloc_parallel(rdata
);
250 par
->call_ops
= *rdata
->mds_ops
;
251 par
->call_ops
.rpc_call_done
= bl_rpc_do_nothing
;
252 par
->pnfs_callback
= bl_end_par_io_read
;
253 /* At this point, we can no longer jump to use_mds */
255 isect
= (sector_t
) (f_offset
>> SECTOR_SHIFT
);
256 /* Code assumes extents are page-aligned */
257 for (i
= pg_index
; i
< rdata
->npages
; i
++) {
258 if (!extent_length
) {
259 /* We've used up the previous extent */
261 bl_put_extent(cow_read
);
262 bio
= bl_submit_bio(READ
, bio
);
263 /* Get the next one */
264 be
= bl_find_get_extent(BLK_LSEG2EXT(rdata
->lseg
),
267 rdata
->pnfs_error
= -EIO
;
270 extent_length
= be
->be_length
-
271 (isect
- be
->be_f_offset
);
273 sector_t cow_length
= cow_read
->be_length
-
274 (isect
- cow_read
->be_f_offset
);
275 extent_length
= min(extent_length
, cow_length
);
278 hole
= is_hole(be
, isect
);
279 if (hole
&& !cow_read
) {
280 bio
= bl_submit_bio(READ
, bio
);
281 /* Fill hole w/ zeroes w/o accessing device */
282 dprintk("%s Zeroing page for hole\n", __func__
);
283 zero_user_segment(pages
[i
], 0, PAGE_CACHE_SIZE
);
284 print_page(pages
[i
]);
285 SetPageUptodate(pages
[i
]);
287 struct pnfs_block_extent
*be_read
;
289 be_read
= (hole
&& cow_read
) ? cow_read
: be
;
290 bio
= bl_add_page_to_bio(bio
, rdata
->npages
- i
, READ
,
291 isect
, pages
[i
], be_read
,
292 bl_end_io_read
, par
);
294 rdata
->pnfs_error
= PTR_ERR(bio
);
299 isect
+= PAGE_CACHE_SECTORS
;
300 extent_length
-= PAGE_CACHE_SECTORS
;
302 if ((isect
<< SECTOR_SHIFT
) >= rdata
->inode
->i_size
) {
304 rdata
->res
.count
= rdata
->inode
->i_size
- f_offset
;
306 rdata
->res
.count
= (isect
<< SECTOR_SHIFT
) - f_offset
;
310 bl_put_extent(cow_read
);
311 bl_submit_bio(READ
, bio
);
313 return PNFS_ATTEMPTED
;
316 dprintk("Giving up and using normal NFS\n");
317 return PNFS_NOT_ATTEMPTED
;
320 static void mark_extents_written(struct pnfs_block_layout
*bl
,
321 __u64 offset
, __u32 count
)
324 struct pnfs_block_extent
*be
;
326 dprintk("%s(%llu, %u)\n", __func__
, offset
, count
);
329 isect
= (offset
& (long)(PAGE_CACHE_MASK
)) >> SECTOR_SHIFT
;
330 end
= (offset
+ count
+ PAGE_CACHE_SIZE
- 1) & (long)(PAGE_CACHE_MASK
);
331 end
>>= SECTOR_SHIFT
;
332 while (isect
< end
) {
334 be
= bl_find_get_extent(bl
, isect
, NULL
);
335 BUG_ON(!be
); /* FIXME */
336 len
= min(end
, be
->be_f_offset
+ be
->be_length
) - isect
;
337 if (be
->be_state
== PNFS_BLOCK_INVALID_DATA
)
338 bl_mark_for_commit(be
, isect
, len
); /* What if fails? */
344 static void bl_end_io_write_zero(struct bio
*bio
, int err
)
346 struct parallel_io
*par
= bio
->bi_private
;
347 const int uptodate
= test_bit(BIO_UPTODATE
, &bio
->bi_flags
);
348 struct bio_vec
*bvec
= bio
->bi_io_vec
+ bio
->bi_vcnt
- 1;
349 struct nfs_write_data
*wdata
= (struct nfs_write_data
*)par
->data
;
352 struct page
*page
= bvec
->bv_page
;
354 if (--bvec
>= bio
->bi_io_vec
)
355 prefetchw(&bvec
->bv_page
->flags
);
356 /* This is the zeroing page we added */
357 end_page_writeback(page
);
358 page_cache_release(page
);
359 } while (bvec
>= bio
->bi_io_vec
);
361 if (!wdata
->pnfs_error
)
362 wdata
->pnfs_error
= -EIO
;
363 pnfs_set_lo_fail(wdata
->lseg
);
369 /* This is basically copied from mpage_end_io_read */
370 static void bl_end_io_write(struct bio
*bio
, int err
)
372 struct parallel_io
*par
= bio
->bi_private
;
373 const int uptodate
= test_bit(BIO_UPTODATE
, &bio
->bi_flags
);
374 struct nfs_write_data
*wdata
= (struct nfs_write_data
*)par
->data
;
377 if (!wdata
->pnfs_error
)
378 wdata
->pnfs_error
= -EIO
;
379 pnfs_set_lo_fail(wdata
->lseg
);
385 /* Function scheduled for call during bl_end_par_io_write,
386 * it marks sectors as written and extends the commitlist.
388 static void bl_write_cleanup(struct work_struct
*work
)
390 struct rpc_task
*task
;
391 struct nfs_write_data
*wdata
;
392 dprintk("%s enter\n", __func__
);
393 task
= container_of(work
, struct rpc_task
, u
.tk_work
);
394 wdata
= container_of(task
, struct nfs_write_data
, task
);
395 if (!wdata
->pnfs_error
) {
396 /* Marks for LAYOUTCOMMIT */
397 mark_extents_written(BLK_LSEG2EXT(wdata
->lseg
),
398 wdata
->args
.offset
, wdata
->args
.count
);
400 pnfs_ld_write_done(wdata
);
403 /* Called when last of bios associated with a bl_write_pagelist call finishes */
404 static void bl_end_par_io_write(void *data
)
406 struct nfs_write_data
*wdata
= data
;
408 wdata
->task
.tk_status
= 0;
409 wdata
->verf
.committed
= NFS_FILE_SYNC
;
410 INIT_WORK(&wdata
->task
.u
.tk_work
, bl_write_cleanup
);
411 schedule_work(&wdata
->task
.u
.tk_work
);
414 /* FIXME STUB - mark intersection of layout and page as bad, so is not
417 static void mark_bad_read(void)
423 * map_block: map a requested I/0 block (isect) into an offset in the LVM
427 map_block(struct buffer_head
*bh
, sector_t isect
, struct pnfs_block_extent
*be
)
429 dprintk("%s enter be=%p\n", __func__
, be
);
431 set_buffer_mapped(bh
);
432 bh
->b_bdev
= be
->be_mdev
;
433 bh
->b_blocknr
= (isect
- be
->be_f_offset
+ be
->be_v_offset
) >>
434 (be
->be_mdev
->bd_inode
->i_blkbits
- SECTOR_SHIFT
);
436 dprintk("%s isect %llu, bh->b_blocknr %ld, using bsize %Zd\n",
437 __func__
, (unsigned long long)isect
, (long)bh
->b_blocknr
,
442 /* Given an unmapped page, zero it or read in page for COW, page is locked
446 init_page_for_write(struct page
*page
, struct pnfs_block_extent
*cow_read
)
448 struct buffer_head
*bh
= NULL
;
452 dprintk("%s enter, %p\n", __func__
, page
);
453 BUG_ON(PageUptodate(page
));
455 zero_user_segment(page
, 0, PAGE_SIZE
);
456 SetPageUptodate(page
);
460 bh
= alloc_page_buffers(page
, PAGE_CACHE_SIZE
, 0);
466 isect
= (sector_t
) page
->index
<< PAGE_CACHE_SECTOR_SHIFT
;
467 map_block(bh
, isect
, cow_read
);
468 if (!bh_uptodate_or_lock(bh
))
469 ret
= bh_submit_read(bh
);
472 SetPageUptodate(page
);
475 bl_put_extent(cow_read
);
477 free_buffer_head(bh
);
479 /* Need to mark layout with bad read...should now
480 * just use nfs4 for reads and writes.
487 static enum pnfs_try_status
488 bl_write_pagelist(struct nfs_write_data
*wdata
, int sync
)
490 int i
, ret
, npg_zero
, pg_index
, last
= 0;
491 struct bio
*bio
= NULL
;
492 struct pnfs_block_extent
*be
= NULL
, *cow_read
= NULL
;
493 sector_t isect
, last_isect
= 0, extent_length
= 0;
494 struct parallel_io
*par
;
495 loff_t offset
= wdata
->args
.offset
;
496 size_t count
= wdata
->args
.count
;
497 struct page
**pages
= wdata
->args
.pages
;
502 NFS_SERVER(wdata
->inode
)->pnfs_blksize
>> PAGE_CACHE_SHIFT
;
504 dprintk("%s enter, %Zu@%lld\n", __func__
, count
, offset
);
505 /* At this point, wdata->pages is a (sequential) list of nfs_pages.
506 * We want to write each, and if there is an error set pnfs_error
507 * to have it redone using nfs.
509 par
= alloc_parallel(wdata
);
511 return PNFS_NOT_ATTEMPTED
;
512 par
->call_ops
= *wdata
->mds_ops
;
513 par
->call_ops
.rpc_call_done
= bl_rpc_do_nothing
;
514 par
->pnfs_callback
= bl_end_par_io_write
;
515 /* At this point, have to be more careful with error handling */
517 isect
= (sector_t
) ((offset
& (long)PAGE_CACHE_MASK
) >> SECTOR_SHIFT
);
518 be
= bl_find_get_extent(BLK_LSEG2EXT(wdata
->lseg
), isect
, &cow_read
);
519 if (!be
|| !is_writable(be
, isect
)) {
520 dprintk("%s no matching extents!\n", __func__
);
521 wdata
->pnfs_error
= -EINVAL
;
525 /* First page inside INVALID extent */
526 if (be
->be_state
== PNFS_BLOCK_INVALID_DATA
) {
527 temp
= offset
>> PAGE_CACHE_SHIFT
;
528 npg_zero
= do_div(temp
, npg_per_block
);
529 isect
= (sector_t
) (((offset
- npg_zero
* PAGE_CACHE_SIZE
) &
530 (long)PAGE_CACHE_MASK
) >> SECTOR_SHIFT
);
531 extent_length
= be
->be_length
- (isect
- be
->be_f_offset
);
534 dprintk("%s need to zero %d pages\n", __func__
, npg_zero
);
535 for (;npg_zero
> 0; npg_zero
--) {
536 if (bl_is_sector_init(be
->be_inval
, isect
)) {
537 dprintk("isect %llu already init\n",
538 (unsigned long long)isect
);
541 /* page ref released in bl_end_io_write_zero */
542 index
= isect
>> PAGE_CACHE_SECTOR_SHIFT
;
543 dprintk("%s zero %dth page: index %lu isect %llu\n",
544 __func__
, npg_zero
, index
,
545 (unsigned long long)isect
);
547 find_or_create_page(wdata
->inode
->i_mapping
, index
,
550 dprintk("%s oom\n", __func__
);
551 wdata
->pnfs_error
= -ENOMEM
;
555 /* PageDirty: Other will write this out
556 * PageWriteback: Other is writing this out
557 * PageUptodate: It was read before
558 * sector_initialized: already written out
560 if (PageDirty(page
) || PageWriteback(page
)) {
563 page_cache_release(page
);
566 if (!PageUptodate(page
)) {
567 /* New page, readin or zero it */
568 init_page_for_write(page
, cow_read
);
570 set_page_writeback(page
);
573 ret
= bl_mark_sectors_init(be
->be_inval
, isect
,
577 dprintk("%s bl_mark_sectors_init fail %d\n",
579 end_page_writeback(page
);
580 page_cache_release(page
);
581 wdata
->pnfs_error
= ret
;
584 bio
= bl_add_page_to_bio(bio
, npg_zero
, WRITE
,
586 bl_end_io_write_zero
, par
);
588 wdata
->pnfs_error
= PTR_ERR(bio
);
592 /* FIXME: This should be done in bi_end_io */
593 mark_extents_written(BLK_LSEG2EXT(wdata
->lseg
),
594 page
->index
<< PAGE_CACHE_SHIFT
,
597 isect
+= PAGE_CACHE_SECTORS
;
598 extent_length
-= PAGE_CACHE_SECTORS
;
603 bio
= bl_submit_bio(WRITE
, bio
);
606 pg_index
= wdata
->args
.pgbase
>> PAGE_CACHE_SHIFT
;
607 for (i
= pg_index
; i
< wdata
->npages
; i
++) {
608 if (!extent_length
) {
609 /* We've used up the previous extent */
611 bio
= bl_submit_bio(WRITE
, bio
);
612 /* Get the next one */
613 be
= bl_find_get_extent(BLK_LSEG2EXT(wdata
->lseg
),
615 if (!be
|| !is_writable(be
, isect
)) {
616 wdata
->pnfs_error
= -EINVAL
;
619 extent_length
= be
->be_length
-
620 (isect
- be
->be_f_offset
);
622 if (be
->be_state
== PNFS_BLOCK_INVALID_DATA
) {
623 ret
= bl_mark_sectors_init(be
->be_inval
, isect
,
627 dprintk("%s bl_mark_sectors_init fail %d\n",
629 wdata
->pnfs_error
= ret
;
633 bio
= bl_add_page_to_bio(bio
, wdata
->npages
- i
, WRITE
,
635 bl_end_io_write
, par
);
637 wdata
->pnfs_error
= PTR_ERR(bio
);
641 isect
+= PAGE_CACHE_SECTORS
;
643 extent_length
-= PAGE_CACHE_SECTORS
;
646 /* Last page inside INVALID extent */
647 if (be
->be_state
== PNFS_BLOCK_INVALID_DATA
) {
648 bio
= bl_submit_bio(WRITE
, bio
);
649 temp
= last_isect
>> PAGE_CACHE_SECTOR_SHIFT
;
650 npg_zero
= npg_per_block
- do_div(temp
, npg_per_block
);
651 if (npg_zero
< npg_per_block
) {
653 goto fill_invalid_ext
;
658 wdata
->res
.count
= (last_isect
<< SECTOR_SHIFT
) - (offset
);
659 if (count
< wdata
->res
.count
) {
660 wdata
->res
.count
= count
;
664 bl_submit_bio(WRITE
, bio
);
666 return PNFS_ATTEMPTED
;
669 /* FIXME - range ignored */
671 release_extents(struct pnfs_block_layout
*bl
, struct pnfs_layout_range
*range
)
674 struct pnfs_block_extent
*be
;
676 spin_lock(&bl
->bl_ext_lock
);
677 for (i
= 0; i
< EXTENT_LISTS
; i
++) {
678 while (!list_empty(&bl
->bl_extents
[i
])) {
679 be
= list_first_entry(&bl
->bl_extents
[i
],
680 struct pnfs_block_extent
,
682 list_del(&be
->be_node
);
686 spin_unlock(&bl
->bl_ext_lock
);
690 release_inval_marks(struct pnfs_inval_markings
*marks
)
692 struct pnfs_inval_tracking
*pos
, *temp
;
694 list_for_each_entry_safe(pos
, temp
, &marks
->im_tree
.mtt_stub
, it_link
) {
695 list_del(&pos
->it_link
);
701 static void bl_free_layout_hdr(struct pnfs_layout_hdr
*lo
)
703 struct pnfs_block_layout
*bl
= BLK_LO2EXT(lo
);
705 dprintk("%s enter\n", __func__
);
706 release_extents(bl
, NULL
);
707 release_inval_marks(&bl
->bl_inval
);
711 static struct pnfs_layout_hdr
*bl_alloc_layout_hdr(struct inode
*inode
,
714 struct pnfs_block_layout
*bl
;
716 dprintk("%s enter\n", __func__
);
717 bl
= kzalloc(sizeof(*bl
), gfp_flags
);
720 spin_lock_init(&bl
->bl_ext_lock
);
721 INIT_LIST_HEAD(&bl
->bl_extents
[0]);
722 INIT_LIST_HEAD(&bl
->bl_extents
[1]);
723 INIT_LIST_HEAD(&bl
->bl_commit
);
724 INIT_LIST_HEAD(&bl
->bl_committing
);
726 bl
->bl_blocksize
= NFS_SERVER(inode
)->pnfs_blksize
>> SECTOR_SHIFT
;
727 BL_INIT_INVAL_MARKS(&bl
->bl_inval
, bl
->bl_blocksize
);
728 return &bl
->bl_layout
;
731 static void bl_free_lseg(struct pnfs_layout_segment
*lseg
)
733 dprintk("%s enter\n", __func__
);
737 /* We pretty much ignore lseg, and store all data layout wide, so we
738 * can correctly merge.
740 static struct pnfs_layout_segment
*bl_alloc_lseg(struct pnfs_layout_hdr
*lo
,
741 struct nfs4_layoutget_res
*lgr
,
744 struct pnfs_layout_segment
*lseg
;
747 dprintk("%s enter\n", __func__
);
748 lseg
= kzalloc(sizeof(*lseg
), gfp_flags
);
750 return ERR_PTR(-ENOMEM
);
751 status
= nfs4_blk_process_layoutget(lo
, lgr
, gfp_flags
);
753 /* We don't want to call the full-blown bl_free_lseg,
754 * since on error extents were not touched.
757 return ERR_PTR(status
);
763 bl_encode_layoutcommit(struct pnfs_layout_hdr
*lo
, struct xdr_stream
*xdr
,
764 const struct nfs4_layoutcommit_args
*arg
)
766 dprintk("%s enter\n", __func__
);
767 encode_pnfs_block_layoutupdate(BLK_LO2EXT(lo
), xdr
, arg
);
771 bl_cleanup_layoutcommit(struct nfs4_layoutcommit_data
*lcdata
)
773 struct pnfs_layout_hdr
*lo
= NFS_I(lcdata
->args
.inode
)->layout
;
775 dprintk("%s enter\n", __func__
);
776 clean_pnfs_block_layoutupdate(BLK_LO2EXT(lo
), &lcdata
->args
, lcdata
->res
.status
);
779 static void free_blk_mountid(struct block_mount_id
*mid
)
782 struct pnfs_block_dev
*dev
;
783 spin_lock(&mid
->bm_lock
);
784 while (!list_empty(&mid
->bm_devlist
)) {
785 dev
= list_first_entry(&mid
->bm_devlist
,
786 struct pnfs_block_dev
,
788 list_del(&dev
->bm_node
);
789 bl_free_block_dev(dev
);
791 spin_unlock(&mid
->bm_lock
);
796 /* This is mostly copied from the filelayout's get_device_info function.
797 * It seems much of this should be at the generic pnfs level.
799 static struct pnfs_block_dev
*
800 nfs4_blk_get_deviceinfo(struct nfs_server
*server
, const struct nfs_fh
*fh
,
801 struct nfs4_deviceid
*d_id
)
803 struct pnfs_device
*dev
;
804 struct pnfs_block_dev
*rv
;
807 struct page
**pages
= NULL
;
811 * Use the session max response size as the basis for setting
812 * GETDEVICEINFO's maxcount
814 max_resp_sz
= server
->nfs_client
->cl_session
->fc_attrs
.max_resp_sz
;
815 max_pages
= max_resp_sz
>> PAGE_SHIFT
;
816 dprintk("%s max_resp_sz %u max_pages %d\n",
817 __func__
, max_resp_sz
, max_pages
);
819 dev
= kmalloc(sizeof(*dev
), GFP_NOFS
);
821 dprintk("%s kmalloc failed\n", __func__
);
822 return ERR_PTR(-ENOMEM
);
825 pages
= kzalloc(max_pages
* sizeof(struct page
*), GFP_NOFS
);
828 return ERR_PTR(-ENOMEM
);
830 for (i
= 0; i
< max_pages
; i
++) {
831 pages
[i
] = alloc_page(GFP_NOFS
);
833 rv
= ERR_PTR(-ENOMEM
);
838 memcpy(&dev
->dev_id
, d_id
, sizeof(*d_id
));
839 dev
->layout_type
= LAYOUT_BLOCK_VOLUME
;
842 dev
->pglen
= PAGE_SIZE
* max_pages
;
845 dprintk("%s: dev_id: %s\n", __func__
, dev
->dev_id
.data
);
846 rc
= nfs4_proc_getdeviceinfo(server
, dev
);
847 dprintk("%s getdevice info returns %d\n", __func__
, rc
);
853 rv
= nfs4_blk_decode_device(server
, dev
);
855 for (i
= 0; i
< max_pages
; i
++)
856 __free_page(pages
[i
]);
863 bl_set_layoutdriver(struct nfs_server
*server
, const struct nfs_fh
*fh
)
865 struct block_mount_id
*b_mt_id
= NULL
;
866 struct pnfs_devicelist
*dlist
= NULL
;
867 struct pnfs_block_dev
*bdev
;
868 LIST_HEAD(block_disklist
);
871 dprintk("%s enter\n", __func__
);
873 if (server
->pnfs_blksize
== 0) {
874 dprintk("%s Server did not return blksize\n", __func__
);
877 b_mt_id
= kzalloc(sizeof(struct block_mount_id
), GFP_NOFS
);
882 /* Initialize nfs4 block layout mount id */
883 spin_lock_init(&b_mt_id
->bm_lock
);
884 INIT_LIST_HEAD(&b_mt_id
->bm_devlist
);
886 dlist
= kmalloc(sizeof(struct pnfs_devicelist
), GFP_NOFS
);
892 while (!dlist
->eof
) {
893 status
= nfs4_proc_getdevicelist(server
, fh
, dlist
);
896 dprintk("%s GETDEVICELIST numdevs=%i, eof=%i\n",
897 __func__
, dlist
->num_devs
, dlist
->eof
);
898 for (i
= 0; i
< dlist
->num_devs
; i
++) {
899 bdev
= nfs4_blk_get_deviceinfo(server
, fh
,
902 status
= PTR_ERR(bdev
);
905 spin_lock(&b_mt_id
->bm_lock
);
906 list_add(&bdev
->bm_node
, &b_mt_id
->bm_devlist
);
907 spin_unlock(&b_mt_id
->bm_lock
);
910 dprintk("%s SUCCESS\n", __func__
);
911 server
->pnfs_ld_data
= b_mt_id
;
918 free_blk_mountid(b_mt_id
);
923 bl_clear_layoutdriver(struct nfs_server
*server
)
925 struct block_mount_id
*b_mt_id
= server
->pnfs_ld_data
;
927 dprintk("%s enter\n", __func__
);
928 free_blk_mountid(b_mt_id
);
929 dprintk("%s RETURNS\n", __func__
);
933 static const struct nfs_pageio_ops bl_pg_read_ops
= {
934 .pg_init
= pnfs_generic_pg_init_read
,
935 .pg_test
= pnfs_generic_pg_test
,
936 .pg_doio
= pnfs_generic_pg_readpages
,
939 static const struct nfs_pageio_ops bl_pg_write_ops
= {
940 .pg_init
= pnfs_generic_pg_init_write
,
941 .pg_test
= pnfs_generic_pg_test
,
942 .pg_doio
= pnfs_generic_pg_writepages
,
945 static struct pnfs_layoutdriver_type blocklayout_type
= {
946 .id
= LAYOUT_BLOCK_VOLUME
,
947 .name
= "LAYOUT_BLOCK_VOLUME",
948 .read_pagelist
= bl_read_pagelist
,
949 .write_pagelist
= bl_write_pagelist
,
950 .alloc_layout_hdr
= bl_alloc_layout_hdr
,
951 .free_layout_hdr
= bl_free_layout_hdr
,
952 .alloc_lseg
= bl_alloc_lseg
,
953 .free_lseg
= bl_free_lseg
,
954 .encode_layoutcommit
= bl_encode_layoutcommit
,
955 .cleanup_layoutcommit
= bl_cleanup_layoutcommit
,
956 .set_layoutdriver
= bl_set_layoutdriver
,
957 .clear_layoutdriver
= bl_clear_layoutdriver
,
958 .pg_read_ops
= &bl_pg_read_ops
,
959 .pg_write_ops
= &bl_pg_write_ops
,
962 static const struct rpc_pipe_ops bl_upcall_ops
= {
963 .upcall
= rpc_pipe_generic_upcall
,
964 .downcall
= bl_pipe_downcall
,
965 .destroy_msg
= bl_pipe_destroy_msg
,
968 static int __init
nfs4blocklayout_init(void)
970 struct vfsmount
*mnt
;
974 dprintk("%s: NFSv4 Block Layout Driver Registering...\n", __func__
);
976 ret
= pnfs_register_layoutdriver(&blocklayout_type
);
980 init_waitqueue_head(&bl_wq
);
982 mnt
= rpc_get_mount();
988 ret
= vfs_path_lookup(mnt
->mnt_root
,
990 NFS_PIPE_DIRNAME
, 0, &path
);
994 bl_device_pipe
= rpc_mkpipe(path
.dentry
, "blocklayout", NULL
,
997 if (IS_ERR(bl_device_pipe
)) {
998 ret
= PTR_ERR(bl_device_pipe
);
1007 pnfs_unregister_layoutdriver(&blocklayout_type
);
1011 static void __exit
nfs4blocklayout_exit(void)
1013 dprintk("%s: NFSv4 Block Layout Driver Unregistering...\n",
1016 pnfs_unregister_layoutdriver(&blocklayout_type
);
1017 rpc_unlink(bl_device_pipe
);
1021 MODULE_ALIAS("nfs-layouttype4-3");
1023 module_init(nfs4blocklayout_init
);
1024 module_exit(nfs4blocklayout_exit
);