2 * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
3 * Copyright (C) 2004-2006 Red Hat, Inc. All rights reserved.
5 * This copyrighted material is made available to anyone wishing to use,
6 * modify, copy, or redistribute it subject to the terms and conditions
7 * of the GNU General Public License version 2.
11 * Implements Extendible Hashing as described in:
12 * "Extendible Hashing" by Fagin, et al in
13 * __ACM Trans. on Database Systems__, Sept 1979.
16 * Here's the layout of dirents which is essentially the same as that of ext2
17 * within a single block. The field de_name_len is the number of bytes
18 * actually required for the name (no null terminator). The field de_rec_len
19 * is the number of bytes allocated to the dirent. The offset of the next
20 * dirent in the block is (dirent + dirent->de_rec_len). When a dirent is
21 * deleted, the preceding dirent inherits its allocated space, ie
22 * prev->de_rec_len += deleted->de_rec_len. Since the next dirent is obtained
23 * by adding de_rec_len to the current dirent, this essentially causes the
24 * deleted dirent to get jumped over when iterating through all the dirents.
26 * When deleting the first dirent in a block, there is no previous dirent so
27 * the field de_ino is set to zero to designate it as deleted. When allocating
28 * a dirent, gfs2_dirent_alloc iterates through the dirents in a block. If the
29 * first dirent has (de_ino == 0) and de_rec_len is large enough, this first
30 * dirent is allocated. Otherwise it must go through all the 'used' dirents
31 * searching for one in which the amount of total space minus the amount of
32 * used space will provide enough space for the new dirent.
34 * There are two types of blocks in which dirents reside. In a stuffed dinode,
35 * the dirents begin at offset sizeof(struct gfs2_dinode) from the beginning of
36 * the block. In leaves, they begin at offset sizeof(struct gfs2_leaf) from the
37 * beginning of the leaf block. The dirents reside in leaves when
39 * dip->i_diskflags & GFS2_DIF_EXHASH is true
41 * Otherwise, the dirents are "linear", within a single stuffed dinode block.
43 * When the dirents are in leaves, the actual contents of the directory file are
44 * used as an array of 64-bit block pointers pointing to the leaf blocks. The
45 * dirents are NOT in the directory file itself. There can be more than one
46 * block pointer in the array that points to the same leaf. In fact, when a
47 * directory is first converted from linear to exhash, all of the pointers
48 * point to the same leaf.
50 * When a leaf is completely full, the size of the hash table can be
51 * doubled unless it is already at the maximum size which is hard coded into
52 * GFS2_DIR_MAX_DEPTH. After that, leaves are chained together in a linked list,
53 * but never before the maximum hash table size has been reached.
56 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
58 #include <linux/slab.h>
59 #include <linux/spinlock.h>
60 #include <linux/buffer_head.h>
61 #include <linux/sort.h>
62 #include <linux/gfs2_ondisk.h>
63 #include <linux/crc32.h>
64 #include <linux/vmalloc.h>
65 #include <linux/bio.h>
79 #define IS_LEAF 1 /* Hashed (leaf) directory */
80 #define IS_DINODE 2 /* Linear (stuffed dinode block) directory */
82 #define MAX_RA_BLOCKS 32 /* max read-ahead blocks */
84 #define gfs2_disk_hash2offset(h) (((u64)(h)) >> 1)
85 #define gfs2_dir_offset2hash(p) ((u32)(((u64)(p)) << 1))
86 #define GFS2_HASH_INDEX_MASK 0xffffc000
87 #define GFS2_USE_HASH_FLAG 0x2000
89 struct qstr gfs2_qdot __read_mostly
;
90 struct qstr gfs2_qdotdot __read_mostly
;
92 typedef int (*gfs2_dscan_t
)(const struct gfs2_dirent
*dent
,
93 const struct qstr
*name
, void *opaque
);
95 int gfs2_dir_get_new_buffer(struct gfs2_inode
*ip
, u64 block
,
96 struct buffer_head
**bhp
)
98 struct buffer_head
*bh
;
100 bh
= gfs2_meta_new(ip
->i_gl
, block
);
101 gfs2_trans_add_meta(ip
->i_gl
, bh
);
102 gfs2_metatype_set(bh
, GFS2_METATYPE_JD
, GFS2_FORMAT_JD
);
103 gfs2_buffer_clear_tail(bh
, sizeof(struct gfs2_meta_header
));
108 static int gfs2_dir_get_existing_buffer(struct gfs2_inode
*ip
, u64 block
,
109 struct buffer_head
**bhp
)
111 struct buffer_head
*bh
;
114 error
= gfs2_meta_read(ip
->i_gl
, block
, DIO_WAIT
, 0, &bh
);
117 if (gfs2_metatype_check(GFS2_SB(&ip
->i_inode
), bh
, GFS2_METATYPE_JD
)) {
125 static int gfs2_dir_write_stuffed(struct gfs2_inode
*ip
, const char *buf
,
126 unsigned int offset
, unsigned int size
)
128 struct buffer_head
*dibh
;
131 error
= gfs2_meta_inode_buffer(ip
, &dibh
);
135 gfs2_trans_add_meta(ip
->i_gl
, dibh
);
136 memcpy(dibh
->b_data
+ offset
+ sizeof(struct gfs2_dinode
), buf
, size
);
137 if (ip
->i_inode
.i_size
< offset
+ size
)
138 i_size_write(&ip
->i_inode
, offset
+ size
);
139 ip
->i_inode
.i_mtime
= ip
->i_inode
.i_ctime
= current_time(&ip
->i_inode
);
140 gfs2_dinode_out(ip
, dibh
->b_data
);
150 * gfs2_dir_write_data - Write directory information to the inode
151 * @ip: The GFS2 inode
152 * @buf: The buffer containing information to be written
153 * @offset: The file offset to start writing at
154 * @size: The amount of data to write
156 * Returns: The number of bytes correctly written or error code
158 static int gfs2_dir_write_data(struct gfs2_inode
*ip
, const char *buf
,
159 u64 offset
, unsigned int size
)
161 struct gfs2_sbd
*sdp
= GFS2_SB(&ip
->i_inode
);
162 struct buffer_head
*dibh
;
173 if (gfs2_is_stuffed(ip
) && offset
+ size
<= gfs2_max_stuffed_size(ip
))
174 return gfs2_dir_write_stuffed(ip
, buf
, (unsigned int)offset
,
177 if (gfs2_assert_warn(sdp
, gfs2_is_jdata(ip
)))
180 if (gfs2_is_stuffed(ip
)) {
181 error
= gfs2_unstuff_dinode(ip
, NULL
);
187 o
= do_div(lblock
, sdp
->sd_jbsize
) + sizeof(struct gfs2_meta_header
);
189 while (copied
< size
) {
191 struct buffer_head
*bh
;
193 amount
= size
- copied
;
194 if (amount
> sdp
->sd_sb
.sb_bsize
- o
)
195 amount
= sdp
->sd_sb
.sb_bsize
- o
;
199 error
= gfs2_extent_map(&ip
->i_inode
, lblock
, &new,
204 if (gfs2_assert_withdraw(sdp
, dblock
))
208 if (amount
== sdp
->sd_jbsize
|| new)
209 error
= gfs2_dir_get_new_buffer(ip
, dblock
, &bh
);
211 error
= gfs2_dir_get_existing_buffer(ip
, dblock
, &bh
);
216 gfs2_trans_add_meta(ip
->i_gl
, bh
);
217 memcpy(bh
->b_data
+ o
, buf
, amount
);
226 o
= sizeof(struct gfs2_meta_header
);
230 error
= gfs2_meta_inode_buffer(ip
, &dibh
);
234 if (ip
->i_inode
.i_size
< offset
+ copied
)
235 i_size_write(&ip
->i_inode
, offset
+ copied
);
236 ip
->i_inode
.i_mtime
= ip
->i_inode
.i_ctime
= current_time(&ip
->i_inode
);
238 gfs2_trans_add_meta(ip
->i_gl
, dibh
);
239 gfs2_dinode_out(ip
, dibh
->b_data
);
249 static int gfs2_dir_read_stuffed(struct gfs2_inode
*ip
, __be64
*buf
,
252 struct buffer_head
*dibh
;
255 error
= gfs2_meta_inode_buffer(ip
, &dibh
);
257 memcpy(buf
, dibh
->b_data
+ sizeof(struct gfs2_dinode
), size
);
261 return (error
) ? error
: size
;
266 * gfs2_dir_read_data - Read a data from a directory inode
267 * @ip: The GFS2 Inode
268 * @buf: The buffer to place result into
269 * @size: Amount of data to transfer
271 * Returns: The amount of data actually copied or the error
273 static int gfs2_dir_read_data(struct gfs2_inode
*ip
, __be64
*buf
,
276 struct gfs2_sbd
*sdp
= GFS2_SB(&ip
->i_inode
);
283 if (gfs2_is_stuffed(ip
))
284 return gfs2_dir_read_stuffed(ip
, buf
, size
);
286 if (gfs2_assert_warn(sdp
, gfs2_is_jdata(ip
)))
290 o
= do_div(lblock
, sdp
->sd_jbsize
) + sizeof(struct gfs2_meta_header
);
292 while (copied
< size
) {
294 struct buffer_head
*bh
;
297 amount
= size
- copied
;
298 if (amount
> sdp
->sd_sb
.sb_bsize
- o
)
299 amount
= sdp
->sd_sb
.sb_bsize
- o
;
303 error
= gfs2_extent_map(&ip
->i_inode
, lblock
, &new,
305 if (error
|| !dblock
)
308 bh
= gfs2_meta_ra(ip
->i_gl
, dblock
, extlen
);
310 error
= gfs2_meta_read(ip
->i_gl
, dblock
, DIO_WAIT
, 0, &bh
);
314 error
= gfs2_metatype_check(sdp
, bh
, GFS2_METATYPE_JD
);
321 memcpy(buf
, bh
->b_data
+ o
, amount
);
323 buf
+= (amount
/sizeof(__be64
));
326 o
= sizeof(struct gfs2_meta_header
);
331 return (copied
) ? copied
: error
;
335 * gfs2_dir_get_hash_table - Get pointer to the dir hash table
336 * @ip: The inode in question
338 * Returns: The hash table or an error
341 static __be64
*gfs2_dir_get_hash_table(struct gfs2_inode
*ip
)
343 struct inode
*inode
= &ip
->i_inode
;
348 BUG_ON(!(ip
->i_diskflags
& GFS2_DIF_EXHASH
));
350 hc
= ip
->i_hash_cache
;
354 hsize
= BIT(ip
->i_depth
);
355 hsize
*= sizeof(__be64
);
356 if (hsize
!= i_size_read(&ip
->i_inode
)) {
357 gfs2_consist_inode(ip
);
358 return ERR_PTR(-EIO
);
361 hc
= kmalloc(hsize
, GFP_NOFS
| __GFP_NOWARN
);
363 hc
= __vmalloc(hsize
, GFP_NOFS
, PAGE_KERNEL
);
366 return ERR_PTR(-ENOMEM
);
368 ret
= gfs2_dir_read_data(ip
, hc
, hsize
);
374 spin_lock(&inode
->i_lock
);
375 if (likely(!ip
->i_hash_cache
)) {
376 ip
->i_hash_cache
= hc
;
379 spin_unlock(&inode
->i_lock
);
382 return ip
->i_hash_cache
;
386 * gfs2_dir_hash_inval - Invalidate dir hash
387 * @ip: The directory inode
389 * Must be called with an exclusive glock, or during glock invalidation.
391 void gfs2_dir_hash_inval(struct gfs2_inode
*ip
)
395 spin_lock(&ip
->i_inode
.i_lock
);
396 hc
= ip
->i_hash_cache
;
397 ip
->i_hash_cache
= NULL
;
398 spin_unlock(&ip
->i_inode
.i_lock
);
403 static inline int gfs2_dirent_sentinel(const struct gfs2_dirent
*dent
)
405 return dent
->de_inum
.no_addr
== 0 || dent
->de_inum
.no_formal_ino
== 0;
408 static inline int __gfs2_dirent_find(const struct gfs2_dirent
*dent
,
409 const struct qstr
*name
, int ret
)
411 if (!gfs2_dirent_sentinel(dent
) &&
412 be32_to_cpu(dent
->de_hash
) == name
->hash
&&
413 be16_to_cpu(dent
->de_name_len
) == name
->len
&&
414 memcmp(dent
+1, name
->name
, name
->len
) == 0)
419 static int gfs2_dirent_find(const struct gfs2_dirent
*dent
,
420 const struct qstr
*name
,
423 return __gfs2_dirent_find(dent
, name
, 1);
426 static int gfs2_dirent_prev(const struct gfs2_dirent
*dent
,
427 const struct qstr
*name
,
430 return __gfs2_dirent_find(dent
, name
, 2);
434 * name->name holds ptr to start of block.
435 * name->len holds size of block.
437 static int gfs2_dirent_last(const struct gfs2_dirent
*dent
,
438 const struct qstr
*name
,
441 const char *start
= name
->name
;
442 const char *end
= (const char *)dent
+ be16_to_cpu(dent
->de_rec_len
);
443 if (name
->len
== (end
- start
))
448 /* Look for the dirent that contains the offset specified in data. Once we
449 * find that dirent, there must be space available there for the new dirent */
450 static int gfs2_dirent_find_offset(const struct gfs2_dirent
*dent
,
451 const struct qstr
*name
,
454 unsigned required
= GFS2_DIRENT_SIZE(name
->len
);
455 unsigned actual
= GFS2_DIRENT_SIZE(be16_to_cpu(dent
->de_name_len
));
456 unsigned totlen
= be16_to_cpu(dent
->de_rec_len
);
458 if (ptr
< (void *)dent
|| ptr
>= (void *)dent
+ totlen
)
460 if (gfs2_dirent_sentinel(dent
))
462 if (ptr
< (void *)dent
+ actual
)
464 if ((void *)dent
+ totlen
>= ptr
+ required
)
469 static int gfs2_dirent_find_space(const struct gfs2_dirent
*dent
,
470 const struct qstr
*name
,
473 unsigned required
= GFS2_DIRENT_SIZE(name
->len
);
474 unsigned actual
= GFS2_DIRENT_SIZE(be16_to_cpu(dent
->de_name_len
));
475 unsigned totlen
= be16_to_cpu(dent
->de_rec_len
);
477 if (gfs2_dirent_sentinel(dent
))
479 if (totlen
- actual
>= required
)
484 struct dirent_gather
{
485 const struct gfs2_dirent
**pdent
;
489 static int gfs2_dirent_gather(const struct gfs2_dirent
*dent
,
490 const struct qstr
*name
,
493 struct dirent_gather
*g
= opaque
;
494 if (!gfs2_dirent_sentinel(dent
)) {
495 g
->pdent
[g
->offset
++] = dent
;
501 * Other possible things to check:
502 * - Inode located within filesystem size (and on valid block)
503 * - Valid directory entry type
504 * Not sure how heavy-weight we want to make this... could also check
505 * hash is correct for example, but that would take a lot of extra time.
506 * For now the most important thing is to check that the various sizes
509 static int gfs2_check_dirent(struct gfs2_sbd
*sdp
,
510 struct gfs2_dirent
*dent
, unsigned int offset
,
511 unsigned int size
, unsigned int len
, int first
)
513 const char *msg
= "gfs2_dirent too small";
514 if (unlikely(size
< sizeof(struct gfs2_dirent
)))
516 msg
= "gfs2_dirent misaligned";
517 if (unlikely(offset
& 0x7))
519 msg
= "gfs2_dirent points beyond end of block";
520 if (unlikely(offset
+ size
> len
))
522 msg
= "zero inode number";
523 if (unlikely(!first
&& gfs2_dirent_sentinel(dent
)))
525 msg
= "name length is greater than space in dirent";
526 if (!gfs2_dirent_sentinel(dent
) &&
527 unlikely(sizeof(struct gfs2_dirent
)+be16_to_cpu(dent
->de_name_len
) >
532 fs_warn(sdp
, "%s: %s (%s)\n",
533 __func__
, msg
, first
? "first in block" : "not first in block");
537 static int gfs2_dirent_offset(struct gfs2_sbd
*sdp
, const void *buf
)
539 const struct gfs2_meta_header
*h
= buf
;
544 switch(be32_to_cpu(h
->mh_type
)) {
545 case GFS2_METATYPE_LF
:
546 offset
= sizeof(struct gfs2_leaf
);
548 case GFS2_METATYPE_DI
:
549 offset
= sizeof(struct gfs2_dinode
);
556 fs_warn(sdp
, "%s: wrong block type %u\n", __func__
,
557 be32_to_cpu(h
->mh_type
));
561 static struct gfs2_dirent
*gfs2_dirent_scan(struct inode
*inode
, void *buf
,
562 unsigned int len
, gfs2_dscan_t scan
,
563 const struct qstr
*name
,
566 struct gfs2_dirent
*dent
, *prev
;
571 ret
= gfs2_dirent_offset(GFS2_SB(inode
), buf
);
578 size
= be16_to_cpu(dent
->de_rec_len
);
579 if (gfs2_check_dirent(GFS2_SB(inode
), dent
, offset
, size
, len
, 1))
582 ret
= scan(dent
, name
, opaque
);
590 size
= be16_to_cpu(dent
->de_rec_len
);
591 if (gfs2_check_dirent(GFS2_SB(inode
), dent
, offset
, size
,
602 return prev
? prev
: dent
;
609 gfs2_consist_inode(GFS2_I(inode
));
610 return ERR_PTR(-EIO
);
613 static int dirent_check_reclen(struct gfs2_inode
*dip
,
614 const struct gfs2_dirent
*d
, const void *end_p
)
617 u16 rec_len
= be16_to_cpu(d
->de_rec_len
);
619 if (unlikely(rec_len
< sizeof(struct gfs2_dirent
)))
627 gfs2_consist_inode(dip
);
632 * dirent_next - Next dirent
633 * @dip: the directory
635 * @dent: Pointer to list of dirents
637 * Returns: 0 on success, error code otherwise
640 static int dirent_next(struct gfs2_inode
*dip
, struct buffer_head
*bh
,
641 struct gfs2_dirent
**dent
)
643 struct gfs2_dirent
*cur
= *dent
, *tmp
;
644 char *bh_end
= bh
->b_data
+ bh
->b_size
;
647 ret
= dirent_check_reclen(dip
, cur
, bh_end
);
651 tmp
= (void *)cur
+ ret
;
652 ret
= dirent_check_reclen(dip
, tmp
, bh_end
);
656 /* Only the first dent could ever have de_inum.no_addr == 0 */
657 if (gfs2_dirent_sentinel(tmp
)) {
658 gfs2_consist_inode(dip
);
667 * dirent_del - Delete a dirent
668 * @dip: The GFS2 inode
670 * @prev: The previous dirent
671 * @cur: The current dirent
675 static void dirent_del(struct gfs2_inode
*dip
, struct buffer_head
*bh
,
676 struct gfs2_dirent
*prev
, struct gfs2_dirent
*cur
)
678 u16 cur_rec_len
, prev_rec_len
;
680 if (gfs2_dirent_sentinel(cur
)) {
681 gfs2_consist_inode(dip
);
685 gfs2_trans_add_meta(dip
->i_gl
, bh
);
687 /* If there is no prev entry, this is the first entry in the block.
688 The de_rec_len is already as big as it needs to be. Just zero
689 out the inode number and return. */
692 cur
->de_inum
.no_addr
= 0;
693 cur
->de_inum
.no_formal_ino
= 0;
697 /* Combine this dentry with the previous one. */
699 prev_rec_len
= be16_to_cpu(prev
->de_rec_len
);
700 cur_rec_len
= be16_to_cpu(cur
->de_rec_len
);
702 if ((char *)prev
+ prev_rec_len
!= (char *)cur
)
703 gfs2_consist_inode(dip
);
704 if ((char *)cur
+ cur_rec_len
> bh
->b_data
+ bh
->b_size
)
705 gfs2_consist_inode(dip
);
707 prev_rec_len
+= cur_rec_len
;
708 prev
->de_rec_len
= cpu_to_be16(prev_rec_len
);
712 static struct gfs2_dirent
*do_init_dirent(struct inode
*inode
,
713 struct gfs2_dirent
*dent
,
714 const struct qstr
*name
,
715 struct buffer_head
*bh
,
718 struct gfs2_inode
*ip
= GFS2_I(inode
);
719 struct gfs2_dirent
*ndent
;
722 totlen
= be16_to_cpu(dent
->de_rec_len
);
723 BUG_ON(offset
+ name
->len
> totlen
);
724 gfs2_trans_add_meta(ip
->i_gl
, bh
);
725 ndent
= (struct gfs2_dirent
*)((char *)dent
+ offset
);
726 dent
->de_rec_len
= cpu_to_be16(offset
);
727 gfs2_qstr2dirent(name
, totlen
- offset
, ndent
);
733 * Takes a dent from which to grab space as an argument. Returns the
734 * newly created dent.
736 static struct gfs2_dirent
*gfs2_init_dirent(struct inode
*inode
,
737 struct gfs2_dirent
*dent
,
738 const struct qstr
*name
,
739 struct buffer_head
*bh
)
743 if (!gfs2_dirent_sentinel(dent
))
744 offset
= GFS2_DIRENT_SIZE(be16_to_cpu(dent
->de_name_len
));
745 return do_init_dirent(inode
, dent
, name
, bh
, offset
);
748 static struct gfs2_dirent
*gfs2_dirent_split_alloc(struct inode
*inode
,
749 struct buffer_head
*bh
,
750 const struct qstr
*name
,
753 struct gfs2_dirent
*dent
;
754 dent
= gfs2_dirent_scan(inode
, bh
->b_data
, bh
->b_size
,
755 gfs2_dirent_find_offset
, name
, ptr
);
756 if (!dent
|| IS_ERR(dent
))
758 return do_init_dirent(inode
, dent
, name
, bh
,
759 (unsigned)(ptr
- (void *)dent
));
762 static int get_leaf(struct gfs2_inode
*dip
, u64 leaf_no
,
763 struct buffer_head
**bhp
)
767 error
= gfs2_meta_read(dip
->i_gl
, leaf_no
, DIO_WAIT
, 0, bhp
);
768 if (!error
&& gfs2_metatype_check(GFS2_SB(&dip
->i_inode
), *bhp
, GFS2_METATYPE_LF
)) {
769 /* pr_info("block num=%llu\n", leaf_no); */
777 * get_leaf_nr - Get a leaf number associated with the index
778 * @dip: The GFS2 inode
782 * Returns: 0 on success, error code otherwise
785 static int get_leaf_nr(struct gfs2_inode
*dip
, u32 index
,
791 hash
= gfs2_dir_get_hash_table(dip
);
792 error
= PTR_ERR_OR_ZERO(hash
);
795 *leaf_out
= be64_to_cpu(*(hash
+ index
));
800 static int get_first_leaf(struct gfs2_inode
*dip
, u32 index
,
801 struct buffer_head
**bh_out
)
806 error
= get_leaf_nr(dip
, index
, &leaf_no
);
808 error
= get_leaf(dip
, leaf_no
, bh_out
);
813 static struct gfs2_dirent
*gfs2_dirent_search(struct inode
*inode
,
814 const struct qstr
*name
,
816 struct buffer_head
**pbh
)
818 struct buffer_head
*bh
;
819 struct gfs2_dirent
*dent
;
820 struct gfs2_inode
*ip
= GFS2_I(inode
);
823 if (ip
->i_diskflags
& GFS2_DIF_EXHASH
) {
824 struct gfs2_leaf
*leaf
;
825 unsigned int hsize
= BIT(ip
->i_depth
);
828 if (hsize
* sizeof(u64
) != i_size_read(inode
)) {
829 gfs2_consist_inode(ip
);
830 return ERR_PTR(-EIO
);
833 index
= name
->hash
>> (32 - ip
->i_depth
);
834 error
= get_first_leaf(ip
, index
, &bh
);
836 return ERR_PTR(error
);
838 dent
= gfs2_dirent_scan(inode
, bh
->b_data
, bh
->b_size
,
842 leaf
= (struct gfs2_leaf
*)bh
->b_data
;
843 ln
= be64_to_cpu(leaf
->lf_next
);
848 error
= get_leaf(ip
, ln
, &bh
);
851 return error
? ERR_PTR(error
) : NULL
;
855 error
= gfs2_meta_inode_buffer(ip
, &bh
);
857 return ERR_PTR(error
);
858 dent
= gfs2_dirent_scan(inode
, bh
->b_data
, bh
->b_size
, scan
, name
, NULL
);
860 if (unlikely(dent
== NULL
|| IS_ERR(dent
))) {
868 static struct gfs2_leaf
*new_leaf(struct inode
*inode
, struct buffer_head
**pbh
, u16 depth
)
870 struct gfs2_inode
*ip
= GFS2_I(inode
);
874 struct buffer_head
*bh
;
875 struct gfs2_leaf
*leaf
;
876 struct gfs2_dirent
*dent
;
877 struct timespec64 tv
= current_time(inode
);
879 error
= gfs2_alloc_blocks(ip
, &bn
, &n
, 0, NULL
);
882 bh
= gfs2_meta_new(ip
->i_gl
, bn
);
886 gfs2_trans_remove_revoke(GFS2_SB(inode
), bn
, 1);
887 gfs2_trans_add_meta(ip
->i_gl
, bh
);
888 gfs2_metatype_set(bh
, GFS2_METATYPE_LF
, GFS2_FORMAT_LF
);
889 leaf
= (struct gfs2_leaf
*)bh
->b_data
;
890 leaf
->lf_depth
= cpu_to_be16(depth
);
891 leaf
->lf_entries
= 0;
892 leaf
->lf_dirent_format
= cpu_to_be32(GFS2_FORMAT_DE
);
894 leaf
->lf_inode
= cpu_to_be64(ip
->i_no_addr
);
895 leaf
->lf_dist
= cpu_to_be32(1);
896 leaf
->lf_nsec
= cpu_to_be32(tv
.tv_nsec
);
897 leaf
->lf_sec
= cpu_to_be64(tv
.tv_sec
);
898 memset(leaf
->lf_reserved2
, 0, sizeof(leaf
->lf_reserved2
));
899 dent
= (struct gfs2_dirent
*)(leaf
+1);
900 gfs2_qstr2dirent(&empty_name
, bh
->b_size
- sizeof(struct gfs2_leaf
), dent
);
906 * dir_make_exhash - Convert a stuffed directory into an ExHash directory
907 * @dip: The GFS2 inode
909 * Returns: 0 on success, error code otherwise
912 static int dir_make_exhash(struct inode
*inode
)
914 struct gfs2_inode
*dip
= GFS2_I(inode
);
915 struct gfs2_sbd
*sdp
= GFS2_SB(inode
);
916 struct gfs2_dirent
*dent
;
918 struct buffer_head
*bh
, *dibh
;
919 struct gfs2_leaf
*leaf
;
926 error
= gfs2_meta_inode_buffer(dip
, &dibh
);
930 /* Turn over a new leaf */
932 leaf
= new_leaf(inode
, &bh
, 0);
937 gfs2_assert(sdp
, dip
->i_entries
< BIT(16));
938 leaf
->lf_entries
= cpu_to_be16(dip
->i_entries
);
942 gfs2_buffer_copy_tail(bh
, sizeof(struct gfs2_leaf
), dibh
,
943 sizeof(struct gfs2_dinode
));
945 /* Find last entry */
948 args
.len
= bh
->b_size
- sizeof(struct gfs2_dinode
) +
949 sizeof(struct gfs2_leaf
);
950 args
.name
= bh
->b_data
;
951 dent
= gfs2_dirent_scan(&dip
->i_inode
, bh
->b_data
, bh
->b_size
,
952 gfs2_dirent_last
, &args
, NULL
);
961 return PTR_ERR(dent
);
964 /* Adjust the last dirent's record length
965 (Remember that dent still points to the last entry.) */
967 dent
->de_rec_len
= cpu_to_be16(be16_to_cpu(dent
->de_rec_len
) +
968 sizeof(struct gfs2_dinode
) -
969 sizeof(struct gfs2_leaf
));
973 /* We're done with the new leaf block, now setup the new
976 gfs2_trans_add_meta(dip
->i_gl
, dibh
);
977 gfs2_buffer_clear_tail(dibh
, sizeof(struct gfs2_dinode
));
979 lp
= (__be64
*)(dibh
->b_data
+ sizeof(struct gfs2_dinode
));
981 for (x
= sdp
->sd_hash_ptrs
; x
--; lp
++)
982 *lp
= cpu_to_be64(bn
);
984 i_size_write(inode
, sdp
->sd_sb
.sb_bsize
/ 2);
985 gfs2_add_inode_blocks(&dip
->i_inode
, 1);
986 dip
->i_diskflags
|= GFS2_DIF_EXHASH
;
988 for (x
= sdp
->sd_hash_ptrs
, y
= -1; x
; x
>>= 1, y
++) ;
991 gfs2_dinode_out(dip
, dibh
->b_data
);
999 * dir_split_leaf - Split a leaf block into two
1000 * @dip: The GFS2 inode
1004 * Returns: 0 on success, error code on failure
1007 static int dir_split_leaf(struct inode
*inode
, const struct qstr
*name
)
1009 struct gfs2_inode
*dip
= GFS2_I(inode
);
1010 struct buffer_head
*nbh
, *obh
, *dibh
;
1011 struct gfs2_leaf
*nleaf
, *oleaf
;
1012 struct gfs2_dirent
*dent
= NULL
, *prev
= NULL
, *next
= NULL
, *new;
1013 u32 start
, len
, half_len
, divider
;
1020 index
= name
->hash
>> (32 - dip
->i_depth
);
1021 error
= get_leaf_nr(dip
, index
, &leaf_no
);
1025 /* Get the old leaf block */
1026 error
= get_leaf(dip
, leaf_no
, &obh
);
1030 oleaf
= (struct gfs2_leaf
*)obh
->b_data
;
1031 if (dip
->i_depth
== be16_to_cpu(oleaf
->lf_depth
)) {
1033 return 1; /* can't split */
1036 gfs2_trans_add_meta(dip
->i_gl
, obh
);
1038 nleaf
= new_leaf(inode
, &nbh
, be16_to_cpu(oleaf
->lf_depth
) + 1);
1043 bn
= nbh
->b_blocknr
;
1045 /* Compute the start and len of leaf pointers in the hash table. */
1046 len
= BIT(dip
->i_depth
- be16_to_cpu(oleaf
->lf_depth
));
1047 half_len
= len
>> 1;
1049 fs_warn(GFS2_SB(inode
), "i_depth %u lf_depth %u index %u\n",
1050 dip
->i_depth
, be16_to_cpu(oleaf
->lf_depth
), index
);
1051 gfs2_consist_inode(dip
);
1056 start
= (index
& ~(len
- 1));
1058 /* Change the pointers.
1059 Don't bother distinguishing stuffed from non-stuffed.
1060 This code is complicated enough already. */
1061 lp
= kmalloc_array(half_len
, sizeof(__be64
), GFP_NOFS
);
1067 /* Change the pointers */
1068 for (x
= 0; x
< half_len
; x
++)
1069 lp
[x
] = cpu_to_be64(bn
);
1071 gfs2_dir_hash_inval(dip
);
1073 error
= gfs2_dir_write_data(dip
, (char *)lp
, start
* sizeof(u64
),
1074 half_len
* sizeof(u64
));
1075 if (error
!= half_len
* sizeof(u64
)) {
1083 /* Compute the divider */
1084 divider
= (start
+ half_len
) << (32 - dip
->i_depth
);
1086 /* Copy the entries */
1087 dent
= (struct gfs2_dirent
*)(obh
->b_data
+ sizeof(struct gfs2_leaf
));
1091 if (dirent_next(dip
, obh
, &next
))
1094 if (!gfs2_dirent_sentinel(dent
) &&
1095 be32_to_cpu(dent
->de_hash
) < divider
) {
1097 void *ptr
= ((char *)dent
- obh
->b_data
) + nbh
->b_data
;
1098 str
.name
= (char*)(dent
+1);
1099 str
.len
= be16_to_cpu(dent
->de_name_len
);
1100 str
.hash
= be32_to_cpu(dent
->de_hash
);
1101 new = gfs2_dirent_split_alloc(inode
, nbh
, &str
, ptr
);
1103 error
= PTR_ERR(new);
1107 new->de_inum
= dent
->de_inum
; /* No endian worries */
1108 new->de_type
= dent
->de_type
; /* No endian worries */
1109 be16_add_cpu(&nleaf
->lf_entries
, 1);
1111 dirent_del(dip
, obh
, prev
, dent
);
1113 if (!oleaf
->lf_entries
)
1114 gfs2_consist_inode(dip
);
1115 be16_add_cpu(&oleaf
->lf_entries
, -1);
1125 oleaf
->lf_depth
= nleaf
->lf_depth
;
1127 error
= gfs2_meta_inode_buffer(dip
, &dibh
);
1128 if (!gfs2_assert_withdraw(GFS2_SB(&dip
->i_inode
), !error
)) {
1129 gfs2_trans_add_meta(dip
->i_gl
, dibh
);
1130 gfs2_add_inode_blocks(&dip
->i_inode
, 1);
1131 gfs2_dinode_out(dip
, dibh
->b_data
);
1150 * dir_double_exhash - Double size of ExHash table
1151 * @dip: The GFS2 dinode
1153 * Returns: 0 on success, error code on failure
1156 static int dir_double_exhash(struct gfs2_inode
*dip
)
1158 struct buffer_head
*dibh
;
1166 hsize
= BIT(dip
->i_depth
);
1167 hsize_bytes
= hsize
* sizeof(__be64
);
1169 hc
= gfs2_dir_get_hash_table(dip
);
1173 hc2
= kmalloc_array(hsize_bytes
, 2, GFP_NOFS
| __GFP_NOWARN
);
1175 hc2
= __vmalloc(hsize_bytes
* 2, GFP_NOFS
, PAGE_KERNEL
);
1181 error
= gfs2_meta_inode_buffer(dip
, &dibh
);
1185 for (x
= 0; x
< hsize
; x
++) {
1191 error
= gfs2_dir_write_data(dip
, (char *)hc2
, 0, hsize_bytes
* 2);
1192 if (error
!= (hsize_bytes
* 2))
1195 gfs2_dir_hash_inval(dip
);
1196 dip
->i_hash_cache
= hc2
;
1198 gfs2_dinode_out(dip
, dibh
->b_data
);
1203 /* Replace original hash table & size */
1204 gfs2_dir_write_data(dip
, (char *)hc
, 0, hsize_bytes
);
1205 i_size_write(&dip
->i_inode
, hsize_bytes
);
1206 gfs2_dinode_out(dip
, dibh
->b_data
);
1214 * compare_dents - compare directory entries by hash value
1218 * When comparing the hash entries of @a to @b:
1224 static int compare_dents(const void *a
, const void *b
)
1226 const struct gfs2_dirent
*dent_a
, *dent_b
;
1230 dent_a
= *(const struct gfs2_dirent
**)a
;
1231 hash_a
= dent_a
->de_cookie
;
1233 dent_b
= *(const struct gfs2_dirent
**)b
;
1234 hash_b
= dent_b
->de_cookie
;
1236 if (hash_a
> hash_b
)
1238 else if (hash_a
< hash_b
)
1241 unsigned int len_a
= be16_to_cpu(dent_a
->de_name_len
);
1242 unsigned int len_b
= be16_to_cpu(dent_b
->de_name_len
);
1246 else if (len_a
< len_b
)
1249 ret
= memcmp(dent_a
+ 1, dent_b
+ 1, len_a
);
1256 * do_filldir_main - read out directory entries
1257 * @dip: The GFS2 inode
1258 * @ctx: what to feed the entries to
1259 * @darr: an array of struct gfs2_dirent pointers to read
1260 * @entries: the number of entries in darr
1261 * @copied: pointer to int that's non-zero if a entry has been copied out
1263 * Jump through some hoops to make sure that if there are hash collsions,
1264 * they are read out at the beginning of a buffer. We want to minimize
1265 * the possibility that they will fall into different readdir buffers or
1266 * that someone will want to seek to that location.
1268 * Returns: errno, >0 if the actor tells you to stop
1271 static int do_filldir_main(struct gfs2_inode
*dip
, struct dir_context
*ctx
,
1272 struct gfs2_dirent
**darr
, u32 entries
,
1273 u32 sort_start
, int *copied
)
1275 const struct gfs2_dirent
*dent
, *dent_next
;
1280 if (sort_start
< entries
)
1281 sort(&darr
[sort_start
], entries
- sort_start
,
1282 sizeof(struct gfs2_dirent
*), compare_dents
, NULL
);
1284 dent_next
= darr
[0];
1285 off_next
= dent_next
->de_cookie
;
1287 for (x
= 0, y
= 1; x
< entries
; x
++, y
++) {
1292 dent_next
= darr
[y
];
1293 off_next
= dent_next
->de_cookie
;
1299 if (off_next
== off
) {
1300 if (*copied
&& !run
)
1311 if (!dir_emit(ctx
, (const char *)(dent
+ 1),
1312 be16_to_cpu(dent
->de_name_len
),
1313 be64_to_cpu(dent
->de_inum
.no_addr
),
1314 be16_to_cpu(dent
->de_type
)))
1320 /* Increment the ctx->pos by one, so the next time we come into the
1321 do_filldir fxn, we get the next entry instead of the last one in the
1329 static void *gfs2_alloc_sort_buffer(unsigned size
)
1333 if (size
< KMALLOC_MAX_SIZE
)
1334 ptr
= kmalloc(size
, GFP_NOFS
| __GFP_NOWARN
);
1336 ptr
= __vmalloc(size
, GFP_NOFS
, PAGE_KERNEL
);
1341 static int gfs2_set_cookies(struct gfs2_sbd
*sdp
, struct buffer_head
*bh
,
1342 unsigned leaf_nr
, struct gfs2_dirent
**darr
,
1348 for (i
= 0; i
< entries
; i
++) {
1351 darr
[i
]->de_cookie
= be32_to_cpu(darr
[i
]->de_hash
);
1352 darr
[i
]->de_cookie
= gfs2_disk_hash2offset(darr
[i
]->de_cookie
);
1354 if (!sdp
->sd_args
.ar_loccookie
)
1356 offset
= (char *)(darr
[i
]) -
1357 (bh
->b_data
+ gfs2_dirent_offset(sdp
, bh
->b_data
));
1358 offset
/= GFS2_MIN_DIRENT_SIZE
;
1359 offset
+= leaf_nr
* sdp
->sd_max_dents_per_leaf
;
1360 if (offset
>= GFS2_USE_HASH_FLAG
||
1361 leaf_nr
>= GFS2_USE_HASH_FLAG
) {
1362 darr
[i
]->de_cookie
|= GFS2_USE_HASH_FLAG
;
1367 darr
[i
]->de_cookie
&= GFS2_HASH_INDEX_MASK
;
1368 darr
[i
]->de_cookie
|= offset
;
1374 static int gfs2_dir_read_leaf(struct inode
*inode
, struct dir_context
*ctx
,
1375 int *copied
, unsigned *depth
,
1378 struct gfs2_inode
*ip
= GFS2_I(inode
);
1379 struct gfs2_sbd
*sdp
= GFS2_SB(inode
);
1380 struct buffer_head
*bh
;
1381 struct gfs2_leaf
*lf
;
1382 unsigned entries
= 0, entries2
= 0;
1383 unsigned leaves
= 0, leaf
= 0, offset
, sort_offset
;
1384 struct gfs2_dirent
**darr
, *dent
;
1385 struct dirent_gather g
;
1386 struct buffer_head
**larr
;
1387 int error
, i
, need_sort
= 0, sort_id
;
1391 error
= get_leaf(ip
, lfn
, &bh
);
1394 lf
= (struct gfs2_leaf
*)bh
->b_data
;
1396 *depth
= be16_to_cpu(lf
->lf_depth
);
1397 entries
+= be16_to_cpu(lf
->lf_entries
);
1399 lfn
= be64_to_cpu(lf
->lf_next
);
1403 if (*depth
< GFS2_DIR_MAX_DEPTH
|| !sdp
->sd_args
.ar_loccookie
) {
1413 * The extra 99 entries are not normally used, but are a buffer
1414 * zone in case the number of entries in the leaf is corrupt.
1415 * 99 is the maximum number of entries that can fit in a single
1418 larr
= gfs2_alloc_sort_buffer((leaves
+ entries
+ 99) * sizeof(void *));
1421 darr
= (struct gfs2_dirent
**)(larr
+ leaves
);
1422 g
.pdent
= (const struct gfs2_dirent
**)darr
;
1427 error
= get_leaf(ip
, lfn
, &bh
);
1430 lf
= (struct gfs2_leaf
*)bh
->b_data
;
1431 lfn
= be64_to_cpu(lf
->lf_next
);
1432 if (lf
->lf_entries
) {
1434 entries2
+= be16_to_cpu(lf
->lf_entries
);
1435 dent
= gfs2_dirent_scan(inode
, bh
->b_data
, bh
->b_size
,
1436 gfs2_dirent_gather
, NULL
, &g
);
1437 error
= PTR_ERR(dent
);
1440 if (entries2
!= g
.offset
) {
1441 fs_warn(sdp
, "Number of entries corrupt in dir "
1442 "leaf %llu, entries2 (%u) != "
1444 (unsigned long long)bh
->b_blocknr
,
1445 entries2
, g
.offset
);
1446 gfs2_consist_inode(ip
);
1451 sort_id
= gfs2_set_cookies(sdp
, bh
, leaf
, &darr
[offset
],
1452 be16_to_cpu(lf
->lf_entries
));
1453 if (!need_sort
&& sort_id
>= 0) {
1455 sort_offset
= offset
+ sort_id
;
1459 larr
[leaf
++] = NULL
;
1464 BUG_ON(entries2
!= entries
);
1465 error
= do_filldir_main(ip
, ctx
, darr
, entries
, need_sort
?
1466 sort_offset
: entries
, copied
);
1468 for(i
= 0; i
< leaf
; i
++)
1477 * gfs2_dir_readahead - Issue read-ahead requests for leaf blocks.
1479 * Note: we can't calculate each index like dir_e_read can because we don't
1480 * have the leaf, and therefore we don't have the depth, and therefore we
1481 * don't have the length. So we have to just read enough ahead to make up
1482 * for the loss of information.
1484 static void gfs2_dir_readahead(struct inode
*inode
, unsigned hsize
, u32 index
,
1485 struct file_ra_state
*f_ra
)
1487 struct gfs2_inode
*ip
= GFS2_I(inode
);
1488 struct gfs2_glock
*gl
= ip
->i_gl
;
1489 struct buffer_head
*bh
;
1490 u64 blocknr
= 0, last
;
1493 /* First check if we've already read-ahead for the whole range. */
1494 if (index
+ MAX_RA_BLOCKS
< f_ra
->start
)
1497 f_ra
->start
= max((pgoff_t
)index
, f_ra
->start
);
1498 for (count
= 0; count
< MAX_RA_BLOCKS
; count
++) {
1499 if (f_ra
->start
>= hsize
) /* if exceeded the hash table */
1503 blocknr
= be64_to_cpu(ip
->i_hash_cache
[f_ra
->start
]);
1505 if (blocknr
== last
)
1508 bh
= gfs2_getbuf(gl
, blocknr
, 1);
1509 if (trylock_buffer(bh
)) {
1510 if (buffer_uptodate(bh
)) {
1515 bh
->b_end_io
= end_buffer_read_sync
;
1516 submit_bh(REQ_OP_READ
,
1517 REQ_RAHEAD
| REQ_META
| REQ_PRIO
,
1526 * dir_e_read - Reads the entries from a directory into a filldir buffer
1527 * @dip: dinode pointer
1528 * @ctx: actor to feed the entries to
1533 static int dir_e_read(struct inode
*inode
, struct dir_context
*ctx
,
1534 struct file_ra_state
*f_ra
)
1536 struct gfs2_inode
*dip
= GFS2_I(inode
);
1544 hsize
= BIT(dip
->i_depth
);
1545 hash
= gfs2_dir_offset2hash(ctx
->pos
);
1546 index
= hash
>> (32 - dip
->i_depth
);
1548 if (dip
->i_hash_cache
== NULL
)
1550 lp
= gfs2_dir_get_hash_table(dip
);
1554 gfs2_dir_readahead(inode
, hsize
, index
, f_ra
);
1556 while (index
< hsize
) {
1557 error
= gfs2_dir_read_leaf(inode
, ctx
,
1559 be64_to_cpu(lp
[index
]));
1563 len
= BIT(dip
->i_depth
- depth
);
1564 index
= (index
& ~(len
- 1)) + len
;
1572 int gfs2_dir_read(struct inode
*inode
, struct dir_context
*ctx
,
1573 struct file_ra_state
*f_ra
)
1575 struct gfs2_inode
*dip
= GFS2_I(inode
);
1576 struct gfs2_sbd
*sdp
= GFS2_SB(inode
);
1577 struct dirent_gather g
;
1578 struct gfs2_dirent
**darr
, *dent
;
1579 struct buffer_head
*dibh
;
1583 if (!dip
->i_entries
)
1586 if (dip
->i_diskflags
& GFS2_DIF_EXHASH
)
1587 return dir_e_read(inode
, ctx
, f_ra
);
1589 if (!gfs2_is_stuffed(dip
)) {
1590 gfs2_consist_inode(dip
);
1594 error
= gfs2_meta_inode_buffer(dip
, &dibh
);
1599 /* 96 is max number of dirents which can be stuffed into an inode */
1600 darr
= kmalloc_array(96, sizeof(struct gfs2_dirent
*), GFP_NOFS
);
1602 g
.pdent
= (const struct gfs2_dirent
**)darr
;
1604 dent
= gfs2_dirent_scan(inode
, dibh
->b_data
, dibh
->b_size
,
1605 gfs2_dirent_gather
, NULL
, &g
);
1607 error
= PTR_ERR(dent
);
1610 if (dip
->i_entries
!= g
.offset
) {
1611 fs_warn(sdp
, "Number of entries corrupt in dir %llu, "
1612 "ip->i_entries (%u) != g.offset (%u)\n",
1613 (unsigned long long)dip
->i_no_addr
,
1616 gfs2_consist_inode(dip
);
1620 gfs2_set_cookies(sdp
, dibh
, 0, darr
, dip
->i_entries
);
1621 error
= do_filldir_main(dip
, ctx
, darr
,
1622 dip
->i_entries
, 0, &copied
);
1636 * gfs2_dir_search - Search a directory
1637 * @dip: The GFS2 dir inode
1638 * @name: The name we are looking up
1639 * @fail_on_exist: Fail if the name exists rather than looking it up
1641 * This routine searches a directory for a file or another directory.
1642 * Assumes a glock is held on dip.
1647 struct inode
*gfs2_dir_search(struct inode
*dir
, const struct qstr
*name
,
1650 struct buffer_head
*bh
;
1651 struct gfs2_dirent
*dent
;
1652 u64 addr
, formal_ino
;
1655 dent
= gfs2_dirent_search(dir
, name
, gfs2_dirent_find
, &bh
);
1657 struct inode
*inode
;
1661 return ERR_CAST(dent
);
1662 dtype
= be16_to_cpu(dent
->de_type
);
1663 rahead
= be16_to_cpu(dent
->de_rahead
);
1664 addr
= be64_to_cpu(dent
->de_inum
.no_addr
);
1665 formal_ino
= be64_to_cpu(dent
->de_inum
.no_formal_ino
);
1668 return ERR_PTR(-EEXIST
);
1669 inode
= gfs2_inode_lookup(dir
->i_sb
, dtype
, addr
, formal_ino
,
1670 GFS2_BLKST_FREE
/* ignore */);
1672 GFS2_I(inode
)->i_rahead
= rahead
;
1675 return ERR_PTR(-ENOENT
);
1678 int gfs2_dir_check(struct inode
*dir
, const struct qstr
*name
,
1679 const struct gfs2_inode
*ip
)
1681 struct buffer_head
*bh
;
1682 struct gfs2_dirent
*dent
;
1685 dent
= gfs2_dirent_search(dir
, name
, gfs2_dirent_find
, &bh
);
1688 return PTR_ERR(dent
);
1690 if (be64_to_cpu(dent
->de_inum
.no_addr
) != ip
->i_no_addr
)
1692 if (be64_to_cpu(dent
->de_inum
.no_formal_ino
) !=
1693 ip
->i_no_formal_ino
)
1695 if (unlikely(IF2DT(ip
->i_inode
.i_mode
) !=
1696 be16_to_cpu(dent
->de_type
))) {
1697 gfs2_consist_inode(GFS2_I(dir
));
1710 * dir_new_leaf - Add a new leaf onto hash chain
1711 * @inode: The directory
1712 * @name: The name we are adding
1714 * This adds a new dir leaf onto an existing leaf when there is not
1715 * enough space to add a new dir entry. This is a last resort after
1716 * we've expanded the hash table to max size and also split existing
1717 * leaf blocks, so it will only occur for very large directories.
1719 * The dist parameter is set to 1 for leaf blocks directly attached
1720 * to the hash table, 2 for one layer of indirection, 3 for two layers
1721 * etc. We are thus able to tell the difference between an old leaf
1722 * with dist set to zero (i.e. "don't know") and a new one where we
1723 * set this information for debug/fsck purposes.
1725 * Returns: 0 on success, or -ve on error
1728 static int dir_new_leaf(struct inode
*inode
, const struct qstr
*name
)
1730 struct buffer_head
*bh
, *obh
;
1731 struct gfs2_inode
*ip
= GFS2_I(inode
);
1732 struct gfs2_leaf
*leaf
, *oleaf
;
1738 index
= name
->hash
>> (32 - ip
->i_depth
);
1739 error
= get_first_leaf(ip
, index
, &obh
);
1744 oleaf
= (struct gfs2_leaf
*)obh
->b_data
;
1745 bn
= be64_to_cpu(oleaf
->lf_next
);
1749 error
= get_leaf(ip
, bn
, &obh
);
1754 gfs2_trans_add_meta(ip
->i_gl
, obh
);
1756 leaf
= new_leaf(inode
, &bh
, be16_to_cpu(oleaf
->lf_depth
));
1761 leaf
->lf_dist
= cpu_to_be32(dist
);
1762 oleaf
->lf_next
= cpu_to_be64(bh
->b_blocknr
);
1766 error
= gfs2_meta_inode_buffer(ip
, &bh
);
1769 gfs2_trans_add_meta(ip
->i_gl
, bh
);
1770 gfs2_add_inode_blocks(&ip
->i_inode
, 1);
1771 gfs2_dinode_out(ip
, bh
->b_data
);
1776 static u16
gfs2_inode_ra_len(const struct gfs2_inode
*ip
)
1778 u64 where
= ip
->i_no_addr
+ 1;
1779 if (ip
->i_eattr
== where
)
1785 * gfs2_dir_add - Add new filename into directory
1786 * @inode: The directory inode
1787 * @name: The new name
1788 * @nip: The GFS2 inode to be linked in to the directory
1789 * @da: The directory addition info
1791 * If the call to gfs2_diradd_alloc_required resulted in there being
1792 * no need to allocate any new directory blocks, then it will contain
1793 * a pointer to the directory entry and the bh in which it resides. We
1794 * can use that without having to repeat the search. If there was no
1795 * free space, then we must now create more space.
1797 * Returns: 0 on success, error code on failure
1800 int gfs2_dir_add(struct inode
*inode
, const struct qstr
*name
,
1801 const struct gfs2_inode
*nip
, struct gfs2_diradd
*da
)
1803 struct gfs2_inode
*ip
= GFS2_I(inode
);
1804 struct buffer_head
*bh
= da
->bh
;
1805 struct gfs2_dirent
*dent
= da
->dent
;
1806 struct timespec64 tv
;
1807 struct gfs2_leaf
*leaf
;
1811 if (da
->bh
== NULL
) {
1812 dent
= gfs2_dirent_search(inode
, name
,
1813 gfs2_dirent_find_space
, &bh
);
1817 return PTR_ERR(dent
);
1818 dent
= gfs2_init_dirent(inode
, dent
, name
, bh
);
1819 gfs2_inum_out(nip
, dent
);
1820 dent
->de_type
= cpu_to_be16(IF2DT(nip
->i_inode
.i_mode
));
1821 dent
->de_rahead
= cpu_to_be16(gfs2_inode_ra_len(nip
));
1822 tv
= current_time(&ip
->i_inode
);
1823 if (ip
->i_diskflags
& GFS2_DIF_EXHASH
) {
1824 leaf
= (struct gfs2_leaf
*)bh
->b_data
;
1825 be16_add_cpu(&leaf
->lf_entries
, 1);
1826 leaf
->lf_nsec
= cpu_to_be32(tv
.tv_nsec
);
1827 leaf
->lf_sec
= cpu_to_be64(tv
.tv_sec
);
1833 ip
->i_inode
.i_mtime
= ip
->i_inode
.i_ctime
= tv
;
1834 if (S_ISDIR(nip
->i_inode
.i_mode
))
1835 inc_nlink(&ip
->i_inode
);
1836 mark_inode_dirty(inode
);
1840 if (!(ip
->i_diskflags
& GFS2_DIF_EXHASH
)) {
1841 error
= dir_make_exhash(inode
);
1846 error
= dir_split_leaf(inode
, name
);
1851 if (ip
->i_depth
< GFS2_DIR_MAX_DEPTH
) {
1852 error
= dir_double_exhash(ip
);
1855 error
= dir_split_leaf(inode
, name
);
1861 error
= dir_new_leaf(inode
, name
);
1872 * gfs2_dir_del - Delete a directory entry
1873 * @dip: The GFS2 inode
1874 * @filename: The filename
1876 * Returns: 0 on success, error code on failure
1879 int gfs2_dir_del(struct gfs2_inode
*dip
, const struct dentry
*dentry
)
1881 const struct qstr
*name
= &dentry
->d_name
;
1882 struct gfs2_dirent
*dent
, *prev
= NULL
;
1883 struct buffer_head
*bh
;
1884 struct timespec64 tv
= current_time(&dip
->i_inode
);
1886 /* Returns _either_ the entry (if its first in block) or the
1887 previous entry otherwise */
1888 dent
= gfs2_dirent_search(&dip
->i_inode
, name
, gfs2_dirent_prev
, &bh
);
1890 gfs2_consist_inode(dip
);
1894 gfs2_consist_inode(dip
);
1895 return PTR_ERR(dent
);
1897 /* If not first in block, adjust pointers accordingly */
1898 if (gfs2_dirent_find(dent
, name
, NULL
) == 0) {
1900 dent
= (struct gfs2_dirent
*)((char *)dent
+ be16_to_cpu(prev
->de_rec_len
));
1903 dirent_del(dip
, bh
, prev
, dent
);
1904 if (dip
->i_diskflags
& GFS2_DIF_EXHASH
) {
1905 struct gfs2_leaf
*leaf
= (struct gfs2_leaf
*)bh
->b_data
;
1906 u16 entries
= be16_to_cpu(leaf
->lf_entries
);
1908 gfs2_consist_inode(dip
);
1909 leaf
->lf_entries
= cpu_to_be16(--entries
);
1910 leaf
->lf_nsec
= cpu_to_be32(tv
.tv_nsec
);
1911 leaf
->lf_sec
= cpu_to_be64(tv
.tv_sec
);
1915 if (!dip
->i_entries
)
1916 gfs2_consist_inode(dip
);
1918 dip
->i_inode
.i_mtime
= dip
->i_inode
.i_ctime
= tv
;
1919 if (d_is_dir(dentry
))
1920 drop_nlink(&dip
->i_inode
);
1921 mark_inode_dirty(&dip
->i_inode
);
1927 * gfs2_dir_mvino - Change inode number of directory entry
1928 * @dip: The GFS2 inode
1932 * This routine changes the inode number of a directory entry. It's used
1933 * by rename to change ".." when a directory is moved.
1934 * Assumes a glock is held on dvp.
1939 int gfs2_dir_mvino(struct gfs2_inode
*dip
, const struct qstr
*filename
,
1940 const struct gfs2_inode
*nip
, unsigned int new_type
)
1942 struct buffer_head
*bh
;
1943 struct gfs2_dirent
*dent
;
1945 dent
= gfs2_dirent_search(&dip
->i_inode
, filename
, gfs2_dirent_find
, &bh
);
1947 gfs2_consist_inode(dip
);
1951 return PTR_ERR(dent
);
1953 gfs2_trans_add_meta(dip
->i_gl
, bh
);
1954 gfs2_inum_out(nip
, dent
);
1955 dent
->de_type
= cpu_to_be16(new_type
);
1958 dip
->i_inode
.i_mtime
= dip
->i_inode
.i_ctime
= current_time(&dip
->i_inode
);
1959 mark_inode_dirty_sync(&dip
->i_inode
);
1964 * leaf_dealloc - Deallocate a directory leaf
1965 * @dip: the directory
1966 * @index: the hash table offset in the directory
1967 * @len: the number of pointers to this leaf
1968 * @leaf_no: the leaf number
1969 * @leaf_bh: buffer_head for the starting leaf
1970 * last_dealloc: 1 if this is the final dealloc for the leaf, else 0
1975 static int leaf_dealloc(struct gfs2_inode
*dip
, u32 index
, u32 len
,
1976 u64 leaf_no
, struct buffer_head
*leaf_bh
,
1979 struct gfs2_sbd
*sdp
= GFS2_SB(&dip
->i_inode
);
1980 struct gfs2_leaf
*tmp_leaf
;
1981 struct gfs2_rgrp_list rlist
;
1982 struct buffer_head
*bh
, *dibh
;
1984 unsigned int rg_blocks
= 0, l_blocks
= 0;
1986 unsigned int x
, size
= len
* sizeof(u64
);
1989 error
= gfs2_rindex_update(sdp
);
1993 memset(&rlist
, 0, sizeof(struct gfs2_rgrp_list
));
1995 ht
= kzalloc(size
, GFP_NOFS
| __GFP_NOWARN
);
1997 ht
= __vmalloc(size
, GFP_NOFS
| __GFP_NOWARN
| __GFP_ZERO
,
2002 error
= gfs2_quota_hold(dip
, NO_UID_QUOTA_CHANGE
, NO_GID_QUOTA_CHANGE
);
2006 /* Count the number of leaves */
2009 for (blk
= leaf_no
; blk
; blk
= nblk
) {
2010 if (blk
!= leaf_no
) {
2011 error
= get_leaf(dip
, blk
, &bh
);
2015 tmp_leaf
= (struct gfs2_leaf
*)bh
->b_data
;
2016 nblk
= be64_to_cpu(tmp_leaf
->lf_next
);
2020 gfs2_rlist_add(dip
, &rlist
, blk
);
2024 gfs2_rlist_alloc(&rlist
);
2026 for (x
= 0; x
< rlist
.rl_rgrps
; x
++) {
2027 struct gfs2_rgrpd
*rgd
= gfs2_glock2rgrp(rlist
.rl_ghs
[x
].gh_gl
);
2029 rg_blocks
+= rgd
->rd_length
;
2032 error
= gfs2_glock_nq_m(rlist
.rl_rgrps
, rlist
.rl_ghs
);
2036 error
= gfs2_trans_begin(sdp
,
2037 rg_blocks
+ (DIV_ROUND_UP(size
, sdp
->sd_jbsize
) + 1) +
2038 RES_DINODE
+ RES_STATFS
+ RES_QUOTA
, l_blocks
);
2040 goto out_rg_gunlock
;
2044 for (blk
= leaf_no
; blk
; blk
= nblk
) {
2045 struct gfs2_rgrpd
*rgd
;
2047 if (blk
!= leaf_no
) {
2048 error
= get_leaf(dip
, blk
, &bh
);
2052 tmp_leaf
= (struct gfs2_leaf
*)bh
->b_data
;
2053 nblk
= be64_to_cpu(tmp_leaf
->lf_next
);
2057 rgd
= gfs2_blk2rgrpd(sdp
, blk
, true);
2058 gfs2_free_meta(dip
, rgd
, blk
, 1);
2059 gfs2_add_inode_blocks(&dip
->i_inode
, -1);
2062 error
= gfs2_dir_write_data(dip
, ht
, index
* sizeof(u64
), size
);
2063 if (error
!= size
) {
2069 error
= gfs2_meta_inode_buffer(dip
, &dibh
);
2073 gfs2_trans_add_meta(dip
->i_gl
, dibh
);
2074 /* On the last dealloc, make this a regular file in case we crash.
2075 (We don't want to free these blocks a second time.) */
2077 dip
->i_inode
.i_mode
= S_IFREG
;
2078 gfs2_dinode_out(dip
, dibh
->b_data
);
2082 gfs2_trans_end(sdp
);
2084 gfs2_glock_dq_m(rlist
.rl_rgrps
, rlist
.rl_ghs
);
2086 gfs2_rlist_free(&rlist
);
2087 gfs2_quota_unhold(dip
);
2094 * gfs2_dir_exhash_dealloc - free all the leaf blocks in a directory
2095 * @dip: the directory
2097 * Dealloc all on-disk directory leaves to FREEMETA state
2098 * Change on-disk inode type to "regular file"
2103 int gfs2_dir_exhash_dealloc(struct gfs2_inode
*dip
)
2105 struct buffer_head
*bh
;
2106 struct gfs2_leaf
*leaf
;
2108 u32 index
= 0, next_index
;
2111 int error
= 0, last
;
2113 hsize
= BIT(dip
->i_depth
);
2115 lp
= gfs2_dir_get_hash_table(dip
);
2119 while (index
< hsize
) {
2120 leaf_no
= be64_to_cpu(lp
[index
]);
2122 error
= get_leaf(dip
, leaf_no
, &bh
);
2125 leaf
= (struct gfs2_leaf
*)bh
->b_data
;
2126 len
= BIT(dip
->i_depth
- be16_to_cpu(leaf
->lf_depth
));
2128 next_index
= (index
& ~(len
- 1)) + len
;
2129 last
= ((next_index
>= hsize
) ? 1 : 0);
2130 error
= leaf_dealloc(dip
, index
, len
, leaf_no
, bh
,
2140 if (index
!= hsize
) {
2141 gfs2_consist_inode(dip
);
2151 * gfs2_diradd_alloc_required - find if adding entry will require an allocation
2152 * @ip: the file being written to
2153 * @filname: the filename that's going to be added
2154 * @da: The structure to return dir alloc info
2156 * Returns: 0 if ok, -ve on error
2159 int gfs2_diradd_alloc_required(struct inode
*inode
, const struct qstr
*name
,
2160 struct gfs2_diradd
*da
)
2162 struct gfs2_inode
*ip
= GFS2_I(inode
);
2163 struct gfs2_sbd
*sdp
= GFS2_SB(inode
);
2164 const unsigned int extra
= sizeof(struct gfs2_dinode
) - sizeof(struct gfs2_leaf
);
2165 struct gfs2_dirent
*dent
;
2166 struct buffer_head
*bh
;
2172 dent
= gfs2_dirent_search(inode
, name
, gfs2_dirent_find_space
, &bh
);
2174 da
->nr_blocks
= sdp
->sd_max_dirres
;
2175 if (!(ip
->i_diskflags
& GFS2_DIF_EXHASH
) &&
2176 (GFS2_DIRENT_SIZE(name
->len
) < extra
))
2181 return PTR_ERR(dent
);