1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/ceph/ceph_debug.h>
4 #include <linux/spinlock.h>
5 #include <linux/namei.h>
6 #include <linux/slab.h>
7 #include <linux/sched.h>
8 #include <linux/xattr.h>
11 #include "mds_client.h"
15 * Directory operations: readdir, lookup, create, link, unlink,
20 * Ceph MDS operations are specified in terms of a base ino and
21 * relative path. Thus, the client can specify an operation on a
22 * specific inode (e.g., a getattr due to fstat(2)), or as a path
23 * relative to, say, the root directory.
25 * Normally, we limit ourselves to strict inode ops (no path component)
26 * or dentry operations (a single path component relative to an ino). The
27 * exception to this is open_root_dentry(), which will open the mount
31 const struct dentry_operations ceph_dentry_ops
;
33 static bool __dentry_lease_is_valid(struct ceph_dentry_info
*di
);
34 static int __dir_lease_try_check(const struct dentry
*dentry
);
37 * Initialize ceph dentry state.
39 static int ceph_d_init(struct dentry
*dentry
)
41 struct ceph_dentry_info
*di
;
42 struct ceph_mds_client
*mdsc
= ceph_sb_to_mdsc(dentry
->d_sb
);
44 di
= kmem_cache_zalloc(ceph_dentry_cachep
, GFP_KERNEL
);
46 return -ENOMEM
; /* oh well */
49 di
->lease_session
= NULL
;
51 dentry
->d_fsdata
= di
;
52 INIT_LIST_HEAD(&di
->lease_list
);
54 atomic64_inc(&mdsc
->metric
.total_dentries
);
60 * for f_pos for readdir:
62 * (0xff << 52) | ((24 bits hash) << 28) |
63 * (the nth entry has hash collision);
65 * ((frag value) << 28) | (the nth entry in frag);
67 #define OFFSET_BITS 28
68 #define OFFSET_MASK ((1 << OFFSET_BITS) - 1)
69 #define HASH_ORDER (0xffull << (OFFSET_BITS + 24))
70 loff_t
ceph_make_fpos(unsigned high
, unsigned off
, bool hash_order
)
72 loff_t fpos
= ((loff_t
)high
<< 28) | (loff_t
)off
;
78 static bool is_hash_order(loff_t p
)
80 return (p
& HASH_ORDER
) == HASH_ORDER
;
83 static unsigned fpos_frag(loff_t p
)
85 return p
>> OFFSET_BITS
;
88 static unsigned fpos_hash(loff_t p
)
90 return ceph_frag_value(fpos_frag(p
));
93 static unsigned fpos_off(loff_t p
)
95 return p
& OFFSET_MASK
;
98 static int fpos_cmp(loff_t l
, loff_t r
)
100 int v
= ceph_frag_compare(fpos_frag(l
), fpos_frag(r
));
103 return (int)(fpos_off(l
) - fpos_off(r
));
107 * make note of the last dentry we read, so we can
108 * continue at the same lexicographical point,
109 * regardless of what dir changes take place on the
112 static int note_last_dentry(struct ceph_fs_client
*fsc
,
113 struct ceph_dir_file_info
*dfi
,
115 int len
, unsigned next_offset
)
117 char *buf
= kmalloc(len
+1, GFP_KERNEL
);
120 kfree(dfi
->last_name
);
121 dfi
->last_name
= buf
;
122 memcpy(dfi
->last_name
, name
, len
);
123 dfi
->last_name
[len
] = 0;
124 dfi
->next_offset
= next_offset
;
125 doutc(fsc
->client
, "'%s'\n", dfi
->last_name
);
130 static struct dentry
*
131 __dcache_find_get_entry(struct dentry
*parent
, u64 idx
,
132 struct ceph_readdir_cache_control
*cache_ctl
)
134 struct inode
*dir
= d_inode(parent
);
135 struct ceph_client
*cl
= ceph_inode_to_client(dir
);
136 struct dentry
*dentry
;
137 unsigned idx_mask
= (PAGE_SIZE
/ sizeof(struct dentry
*)) - 1;
138 loff_t ptr_pos
= idx
* sizeof(struct dentry
*);
139 pgoff_t ptr_pgoff
= ptr_pos
>> PAGE_SHIFT
;
141 if (ptr_pos
>= i_size_read(dir
))
144 if (!cache_ctl
->page
|| ptr_pgoff
!= cache_ctl
->page
->index
) {
145 ceph_readdir_cache_release(cache_ctl
);
146 cache_ctl
->page
= find_lock_page(&dir
->i_data
, ptr_pgoff
);
147 if (!cache_ctl
->page
) {
148 doutc(cl
, " page %lu not found\n", ptr_pgoff
);
149 return ERR_PTR(-EAGAIN
);
151 /* reading/filling the cache are serialized by
152 i_rwsem, no need to use page lock */
153 unlock_page(cache_ctl
->page
);
154 cache_ctl
->dentries
= kmap(cache_ctl
->page
);
157 cache_ctl
->index
= idx
& idx_mask
;
160 spin_lock(&parent
->d_lock
);
161 /* check i_size again here, because empty directory can be
162 * marked as complete while not holding the i_rwsem. */
163 if (ceph_dir_is_complete_ordered(dir
) && ptr_pos
< i_size_read(dir
))
164 dentry
= cache_ctl
->dentries
[cache_ctl
->index
];
167 spin_unlock(&parent
->d_lock
);
168 if (dentry
&& !lockref_get_not_dead(&dentry
->d_lockref
))
171 return dentry
? : ERR_PTR(-EAGAIN
);
175 * When possible, we try to satisfy a readdir by peeking at the
176 * dcache. We make this work by carefully ordering dentries on
177 * d_children when we initially get results back from the MDS, and
178 * falling back to a "normal" sync readdir if any dentries in the dir
181 * Complete dir indicates that we have all dentries in the dir. It is
182 * defined IFF we hold CEPH_CAP_FILE_SHARED (which will be revoked by
183 * the MDS if/when the directory is modified).
185 static int __dcache_readdir(struct file
*file
, struct dir_context
*ctx
,
188 struct ceph_dir_file_info
*dfi
= file
->private_data
;
189 struct dentry
*parent
= file
->f_path
.dentry
;
190 struct inode
*dir
= d_inode(parent
);
191 struct ceph_fs_client
*fsc
= ceph_inode_to_fs_client(dir
);
192 struct ceph_client
*cl
= ceph_inode_to_client(dir
);
193 struct dentry
*dentry
, *last
= NULL
;
194 struct ceph_dentry_info
*di
;
195 struct ceph_readdir_cache_control cache_ctl
= {};
199 doutc(cl
, "%p %llx.%llx v%u at %llx\n", dir
, ceph_vinop(dir
),
200 (unsigned)shared_gen
, ctx
->pos
);
202 /* search start position */
204 u64 count
= div_u64(i_size_read(dir
), sizeof(struct dentry
*));
206 u64 step
= count
>> 1;
207 dentry
= __dcache_find_get_entry(parent
, idx
+ step
,
210 /* use linar search */
214 if (IS_ERR(dentry
)) {
215 err
= PTR_ERR(dentry
);
218 di
= ceph_dentry(dentry
);
219 spin_lock(&dentry
->d_lock
);
220 if (fpos_cmp(di
->offset
, ctx
->pos
) < 0) {
226 spin_unlock(&dentry
->d_lock
);
230 doutc(cl
, "%p %llx.%llx cache idx %llu\n", dir
,
231 ceph_vinop(dir
), idx
);
236 bool emit_dentry
= false;
237 dentry
= __dcache_find_get_entry(parent
, idx
++, &cache_ctl
);
239 dfi
->file_info
.flags
|= CEPH_F_ATEND
;
243 if (IS_ERR(dentry
)) {
244 err
= PTR_ERR(dentry
);
248 spin_lock(&dentry
->d_lock
);
249 di
= ceph_dentry(dentry
);
250 if (d_unhashed(dentry
) ||
251 d_really_is_negative(dentry
) ||
252 di
->lease_shared_gen
!= shared_gen
||
253 ((dentry
->d_flags
& DCACHE_NOKEY_NAME
) &&
254 fscrypt_has_encryption_key(dir
))) {
255 spin_unlock(&dentry
->d_lock
);
260 if (fpos_cmp(ctx
->pos
, di
->offset
) <= 0) {
261 __ceph_dentry_dir_lease_touch(di
);
264 spin_unlock(&dentry
->d_lock
);
267 doutc(cl
, " %llx dentry %p %pd %p\n", di
->offset
,
268 dentry
, dentry
, d_inode(dentry
));
269 ctx
->pos
= di
->offset
;
270 if (!dir_emit(ctx
, dentry
->d_name
.name
,
271 dentry
->d_name
.len
, ceph_present_inode(d_inode(dentry
)),
272 d_inode(dentry
)->i_mode
>> 12)) {
287 ceph_readdir_cache_release(&cache_ctl
);
290 di
= ceph_dentry(last
);
291 ret
= note_last_dentry(fsc
, dfi
, last
->d_name
.name
,
293 fpos_off(di
->offset
) + 1);
297 /* last_name no longer match cache index */
298 if (dfi
->readdir_cache_idx
>= 0) {
299 dfi
->readdir_cache_idx
= -1;
300 dfi
->dir_release_count
= 0;
306 static bool need_send_readdir(struct ceph_dir_file_info
*dfi
, loff_t pos
)
308 if (!dfi
->last_readdir
)
310 if (is_hash_order(pos
))
311 return !ceph_frag_contains_value(dfi
->frag
, fpos_hash(pos
));
313 return dfi
->frag
!= fpos_frag(pos
);
316 static int ceph_readdir(struct file
*file
, struct dir_context
*ctx
)
318 struct ceph_dir_file_info
*dfi
= file
->private_data
;
319 struct inode
*inode
= file_inode(file
);
320 struct ceph_inode_info
*ci
= ceph_inode(inode
);
321 struct ceph_fs_client
*fsc
= ceph_inode_to_fs_client(inode
);
322 struct ceph_mds_client
*mdsc
= fsc
->mdsc
;
323 struct ceph_client
*cl
= fsc
->client
;
327 struct ceph_mds_reply_info_parsed
*rinfo
;
329 doutc(cl
, "%p %llx.%llx file %p pos %llx\n", inode
,
330 ceph_vinop(inode
), file
, ctx
->pos
);
331 if (dfi
->file_info
.flags
& CEPH_F_ATEND
)
334 /* always start with . and .. */
336 doutc(cl
, "%p %llx.%llx off 0 -> '.'\n", inode
,
338 if (!dir_emit(ctx
, ".", 1, ceph_present_inode(inode
),
339 inode
->i_mode
>> 12))
345 struct dentry
*dentry
= file
->f_path
.dentry
;
347 spin_lock(&dentry
->d_lock
);
348 ino
= ceph_present_inode(dentry
->d_parent
->d_inode
);
349 spin_unlock(&dentry
->d_lock
);
351 doutc(cl
, "%p %llx.%llx off 1 -> '..'\n", inode
,
353 if (!dir_emit(ctx
, "..", 2, ino
, inode
->i_mode
>> 12))
358 err
= ceph_fscrypt_prepare_readdir(inode
);
362 spin_lock(&ci
->i_ceph_lock
);
363 /* request Fx cap. if have Fx, we don't need to release Fs cap
364 * for later create/unlink. */
365 __ceph_touch_fmode(ci
, mdsc
, CEPH_FILE_MODE_WR
);
366 /* can we use the dcache? */
367 if (ceph_test_mount_opt(fsc
, DCACHE
) &&
368 !ceph_test_mount_opt(fsc
, NOASYNCREADDIR
) &&
369 ceph_snap(inode
) != CEPH_SNAPDIR
&&
370 __ceph_dir_is_complete_ordered(ci
) &&
371 __ceph_caps_issued_mask_metric(ci
, CEPH_CAP_FILE_SHARED
, 1)) {
372 int shared_gen
= atomic_read(&ci
->i_shared_gen
);
374 spin_unlock(&ci
->i_ceph_lock
);
375 err
= __dcache_readdir(file
, ctx
, shared_gen
);
379 spin_unlock(&ci
->i_ceph_lock
);
382 /* proceed with a normal readdir */
384 /* do we have the correct frag content buffered? */
385 if (need_send_readdir(dfi
, ctx
->pos
)) {
386 struct ceph_mds_request
*req
;
387 int op
= ceph_snap(inode
) == CEPH_SNAPDIR
?
388 CEPH_MDS_OP_LSSNAP
: CEPH_MDS_OP_READDIR
;
390 /* discard old result, if any */
391 if (dfi
->last_readdir
) {
392 ceph_mdsc_put_request(dfi
->last_readdir
);
393 dfi
->last_readdir
= NULL
;
396 if (is_hash_order(ctx
->pos
)) {
397 /* fragtree isn't always accurate. choose frag
398 * based on previous reply when possible. */
399 if (frag
== (unsigned)-1)
400 frag
= ceph_choose_frag(ci
, fpos_hash(ctx
->pos
),
403 frag
= fpos_frag(ctx
->pos
);
406 doutc(cl
, "fetching %p %llx.%llx frag %x offset '%s'\n",
407 inode
, ceph_vinop(inode
), frag
, dfi
->last_name
);
408 req
= ceph_mdsc_create_request(mdsc
, op
, USE_AUTH_MDS
);
412 err
= ceph_alloc_readdir_reply_buffer(req
, inode
);
414 ceph_mdsc_put_request(req
);
417 /* hints to request -> mds selection code */
418 req
->r_direct_mode
= USE_AUTH_MDS
;
419 if (op
== CEPH_MDS_OP_READDIR
) {
420 req
->r_direct_hash
= ceph_frag_value(frag
);
421 __set_bit(CEPH_MDS_R_DIRECT_IS_HASH
, &req
->r_req_flags
);
422 req
->r_inode_drop
= CEPH_CAP_FILE_EXCL
;
424 if (dfi
->last_name
) {
425 struct qstr d_name
= { .name
= dfi
->last_name
,
426 .len
= strlen(dfi
->last_name
) };
428 req
->r_path2
= kzalloc(NAME_MAX
+ 1, GFP_KERNEL
);
430 ceph_mdsc_put_request(req
);
434 err
= ceph_encode_encrypted_dname(inode
, &d_name
,
437 ceph_mdsc_put_request(req
);
440 } else if (is_hash_order(ctx
->pos
)) {
441 req
->r_args
.readdir
.offset_hash
=
442 cpu_to_le32(fpos_hash(ctx
->pos
));
445 req
->r_dir_release_cnt
= dfi
->dir_release_count
;
446 req
->r_dir_ordered_cnt
= dfi
->dir_ordered_count
;
447 req
->r_readdir_cache_idx
= dfi
->readdir_cache_idx
;
448 req
->r_readdir_offset
= dfi
->next_offset
;
449 req
->r_args
.readdir
.frag
= cpu_to_le32(frag
);
450 req
->r_args
.readdir
.flags
=
451 cpu_to_le16(CEPH_READDIR_REPLY_BITFLAGS
);
453 req
->r_inode
= inode
;
455 req
->r_dentry
= dget(file
->f_path
.dentry
);
456 err
= ceph_mdsc_do_request(mdsc
, NULL
, req
);
458 ceph_mdsc_put_request(req
);
461 doutc(cl
, "%p %llx.%llx got and parsed readdir result=%d"
462 "on frag %x, end=%d, complete=%d, hash_order=%d\n",
463 inode
, ceph_vinop(inode
), err
, frag
,
464 (int)req
->r_reply_info
.dir_end
,
465 (int)req
->r_reply_info
.dir_complete
,
466 (int)req
->r_reply_info
.hash_order
);
468 rinfo
= &req
->r_reply_info
;
469 if (le32_to_cpu(rinfo
->dir_dir
->frag
) != frag
) {
470 frag
= le32_to_cpu(rinfo
->dir_dir
->frag
);
471 if (!rinfo
->hash_order
) {
472 dfi
->next_offset
= req
->r_readdir_offset
;
473 /* adjust ctx->pos to beginning of frag */
474 ctx
->pos
= ceph_make_fpos(frag
,
481 dfi
->last_readdir
= req
;
483 if (test_bit(CEPH_MDS_R_DID_PREPOPULATE
, &req
->r_req_flags
)) {
484 dfi
->readdir_cache_idx
= req
->r_readdir_cache_idx
;
485 if (dfi
->readdir_cache_idx
< 0) {
486 /* preclude from marking dir ordered */
487 dfi
->dir_ordered_count
= 0;
488 } else if (ceph_frag_is_leftmost(frag
) &&
489 dfi
->next_offset
== 2) {
490 /* note dir version at start of readdir so
491 * we can tell if any dentries get dropped */
492 dfi
->dir_release_count
= req
->r_dir_release_cnt
;
493 dfi
->dir_ordered_count
= req
->r_dir_ordered_cnt
;
496 doutc(cl
, "%p %llx.%llx !did_prepopulate\n", inode
,
498 /* disable readdir cache */
499 dfi
->readdir_cache_idx
= -1;
500 /* preclude from marking dir complete */
501 dfi
->dir_release_count
= 0;
504 /* note next offset and last dentry name */
505 if (rinfo
->dir_nr
> 0) {
506 struct ceph_mds_reply_dir_entry
*rde
=
507 rinfo
->dir_entries
+ (rinfo
->dir_nr
-1);
508 unsigned next_offset
= req
->r_reply_info
.dir_end
?
509 2 : (fpos_off(rde
->offset
) + 1);
510 err
= note_last_dentry(fsc
, dfi
, rde
->name
,
511 rde
->name_len
, next_offset
);
513 ceph_mdsc_put_request(dfi
->last_readdir
);
514 dfi
->last_readdir
= NULL
;
517 } else if (req
->r_reply_info
.dir_end
) {
518 dfi
->next_offset
= 2;
523 rinfo
= &dfi
->last_readdir
->r_reply_info
;
524 doutc(cl
, "%p %llx.%llx frag %x num %d pos %llx chunk first %llx\n",
525 inode
, ceph_vinop(inode
), dfi
->frag
, rinfo
->dir_nr
, ctx
->pos
,
526 rinfo
->dir_nr
? rinfo
->dir_entries
[0].offset
: 0LL);
529 /* search start position */
530 if (rinfo
->dir_nr
> 0) {
531 int step
, nr
= rinfo
->dir_nr
;
534 if (rinfo
->dir_entries
[i
+ step
].offset
< ctx
->pos
) {
542 for (; i
< rinfo
->dir_nr
; i
++) {
543 struct ceph_mds_reply_dir_entry
*rde
= rinfo
->dir_entries
+ i
;
545 if (rde
->offset
< ctx
->pos
) {
547 "%p %llx.%llx rde->offset 0x%llx ctx->pos 0x%llx\n",
548 inode
, ceph_vinop(inode
), rde
->offset
, ctx
->pos
);
552 if (WARN_ON_ONCE(!rde
->inode
.in
))
555 ctx
->pos
= rde
->offset
;
556 doutc(cl
, "%p %llx.%llx (%d/%d) -> %llx '%.*s' %p\n", inode
,
557 ceph_vinop(inode
), i
, rinfo
->dir_nr
, ctx
->pos
,
558 rde
->name_len
, rde
->name
, &rde
->inode
.in
);
560 if (!dir_emit(ctx
, rde
->name
, rde
->name_len
,
561 ceph_present_ino(inode
->i_sb
, le64_to_cpu(rde
->inode
.in
->ino
)),
562 le32_to_cpu(rde
->inode
.in
->mode
) >> 12)) {
564 * NOTE: Here no need to put the 'dfi->last_readdir',
565 * because when dir_emit stops us it's most likely
566 * doesn't have enough memory, etc. So for next readdir
569 doutc(cl
, "filldir stopping us...\n");
573 /* Reset the lengths to their original allocated vals */
577 ceph_mdsc_put_request(dfi
->last_readdir
);
578 dfi
->last_readdir
= NULL
;
580 if (dfi
->next_offset
> 2) {
586 if (!ceph_frag_is_rightmost(dfi
->frag
)) {
587 frag
= ceph_frag_next(dfi
->frag
);
588 if (is_hash_order(ctx
->pos
)) {
589 loff_t new_pos
= ceph_make_fpos(ceph_frag_value(frag
),
590 dfi
->next_offset
, true);
591 if (new_pos
> ctx
->pos
)
595 ctx
->pos
= ceph_make_fpos(frag
, dfi
->next_offset
,
597 kfree(dfi
->last_name
);
598 dfi
->last_name
= NULL
;
600 doutc(cl
, "%p %llx.%llx next frag is %x\n", inode
,
601 ceph_vinop(inode
), frag
);
604 dfi
->file_info
.flags
|= CEPH_F_ATEND
;
607 * if dir_release_count still matches the dir, no dentries
608 * were released during the whole readdir, and we should have
609 * the complete dir contents in our cache.
611 if (atomic64_read(&ci
->i_release_count
) ==
612 dfi
->dir_release_count
) {
613 spin_lock(&ci
->i_ceph_lock
);
614 if (dfi
->dir_ordered_count
==
615 atomic64_read(&ci
->i_ordered_count
)) {
616 doutc(cl
, " marking %p %llx.%llx complete and ordered\n",
617 inode
, ceph_vinop(inode
));
618 /* use i_size to track number of entries in
620 BUG_ON(dfi
->readdir_cache_idx
< 0);
621 i_size_write(inode
, dfi
->readdir_cache_idx
*
622 sizeof(struct dentry
*));
624 doutc(cl
, " marking %llx.%llx complete\n",
627 __ceph_dir_set_complete(ci
, dfi
->dir_release_count
,
628 dfi
->dir_ordered_count
);
629 spin_unlock(&ci
->i_ceph_lock
);
631 doutc(cl
, "%p %llx.%llx file %p done.\n", inode
, ceph_vinop(inode
),
636 static void reset_readdir(struct ceph_dir_file_info
*dfi
)
638 if (dfi
->last_readdir
) {
639 ceph_mdsc_put_request(dfi
->last_readdir
);
640 dfi
->last_readdir
= NULL
;
642 kfree(dfi
->last_name
);
643 dfi
->last_name
= NULL
;
644 dfi
->dir_release_count
= 0;
645 dfi
->readdir_cache_idx
= -1;
646 dfi
->next_offset
= 2; /* compensate for . and .. */
647 dfi
->file_info
.flags
&= ~CEPH_F_ATEND
;
651 * discard buffered readdir content on seekdir(0), or seek to new frag,
652 * or seek prior to current chunk
654 static bool need_reset_readdir(struct ceph_dir_file_info
*dfi
, loff_t new_pos
)
656 struct ceph_mds_reply_info_parsed
*rinfo
;
660 if (is_hash_order(new_pos
)) {
661 /* no need to reset last_name for a forward seek when
662 * dentries are sotred in hash order */
663 } else if (dfi
->frag
!= fpos_frag(new_pos
)) {
666 rinfo
= dfi
->last_readdir
? &dfi
->last_readdir
->r_reply_info
: NULL
;
667 if (!rinfo
|| !rinfo
->dir_nr
)
669 chunk_offset
= rinfo
->dir_entries
[0].offset
;
670 return new_pos
< chunk_offset
||
671 is_hash_order(new_pos
) != is_hash_order(chunk_offset
);
674 static loff_t
ceph_dir_llseek(struct file
*file
, loff_t offset
, int whence
)
676 struct ceph_dir_file_info
*dfi
= file
->private_data
;
677 struct inode
*inode
= file
->f_mapping
->host
;
678 struct ceph_client
*cl
= ceph_inode_to_client(inode
);
685 offset
+= file
->f_pos
;
690 retval
= -EOPNOTSUPP
;
697 if (need_reset_readdir(dfi
, offset
)) {
698 doutc(cl
, "%p %llx.%llx dropping %p content\n",
699 inode
, ceph_vinop(inode
), file
);
701 } else if (is_hash_order(offset
) && offset
> file
->f_pos
) {
702 /* for hash offset, we don't know if a forward seek
703 * is within same frag */
704 dfi
->dir_release_count
= 0;
705 dfi
->readdir_cache_idx
= -1;
708 if (offset
!= file
->f_pos
) {
709 file
->f_pos
= offset
;
710 dfi
->file_info
.flags
&= ~CEPH_F_ATEND
;
720 * Handle lookups for the hidden .snap directory.
722 struct dentry
*ceph_handle_snapdir(struct ceph_mds_request
*req
,
723 struct dentry
*dentry
)
725 struct ceph_fs_client
*fsc
= ceph_sb_to_fs_client(dentry
->d_sb
);
726 struct inode
*parent
= d_inode(dentry
->d_parent
); /* we hold i_rwsem */
727 struct ceph_client
*cl
= ceph_inode_to_client(parent
);
730 if (ceph_snap(parent
) == CEPH_NOSNAP
&&
731 strcmp(dentry
->d_name
.name
, fsc
->mount_options
->snapdir_name
) == 0) {
733 struct inode
*inode
= ceph_get_snapdir(parent
);
735 res
= d_splice_alias(inode
, dentry
);
736 doutc(cl
, "ENOENT on snapdir %p '%pd', linking to "
737 "snapdir %p %llx.%llx. Spliced dentry %p\n",
738 dentry
, dentry
, inode
, ceph_vinop(inode
), res
);
746 * Figure out final result of a lookup/open request.
748 * Mainly, make sure we return the final req->r_dentry (if it already
749 * existed) in place of the original VFS-provided dentry when they
752 * Gracefully handle the case where the MDS replies with -ENOENT and
753 * no trace (which it may do, at its discretion, e.g., if it doesn't
754 * care to issue a lease on the negative dentry).
756 struct dentry
*ceph_finish_lookup(struct ceph_mds_request
*req
,
757 struct dentry
*dentry
, int err
)
759 struct ceph_client
*cl
= req
->r_mdsc
->fsc
->client
;
761 if (err
== -ENOENT
) {
764 if (!req
->r_reply_info
.head
->is_dentry
) {
766 "ENOENT and no trace, dentry %p inode %llx.%llx\n",
767 dentry
, ceph_vinop(d_inode(dentry
)));
768 if (d_really_is_positive(dentry
)) {
777 dentry
= ERR_PTR(err
);
778 else if (dentry
!= req
->r_dentry
)
779 dentry
= dget(req
->r_dentry
); /* we got spliced */
785 static bool is_root_ceph_dentry(struct inode
*inode
, struct dentry
*dentry
)
787 return ceph_ino(inode
) == CEPH_INO_ROOT
&&
788 strncmp(dentry
->d_name
.name
, ".ceph", 5) == 0;
792 * Look up a single dir entry. If there is a lookup intent, inform
793 * the MDS so that it gets our 'caps wanted' value in a single op.
795 static struct dentry
*ceph_lookup(struct inode
*dir
, struct dentry
*dentry
,
798 struct ceph_fs_client
*fsc
= ceph_sb_to_fs_client(dir
->i_sb
);
799 struct ceph_mds_client
*mdsc
= ceph_sb_to_mdsc(dir
->i_sb
);
800 struct ceph_client
*cl
= fsc
->client
;
801 struct ceph_mds_request
*req
;
806 doutc(cl
, "%p %llx.%llx/'%pd' dentry %p\n", dir
, ceph_vinop(dir
),
809 if (dentry
->d_name
.len
> NAME_MAX
)
810 return ERR_PTR(-ENAMETOOLONG
);
812 if (IS_ENCRYPTED(dir
)) {
813 bool had_key
= fscrypt_has_encryption_key(dir
);
815 err
= fscrypt_prepare_lookup_partial(dir
, dentry
);
819 /* mark directory as incomplete if it has been unlocked */
820 if (!had_key
&& fscrypt_has_encryption_key(dir
))
821 ceph_dir_clear_complete(dir
);
824 /* can we conclude ENOENT locally? */
825 if (d_really_is_negative(dentry
)) {
826 struct ceph_inode_info
*ci
= ceph_inode(dir
);
827 struct ceph_dentry_info
*di
= ceph_dentry(dentry
);
829 spin_lock(&ci
->i_ceph_lock
);
830 doutc(cl
, " dir %llx.%llx flags are 0x%lx\n",
831 ceph_vinop(dir
), ci
->i_ceph_flags
);
832 if (strncmp(dentry
->d_name
.name
,
833 fsc
->mount_options
->snapdir_name
,
834 dentry
->d_name
.len
) &&
835 !is_root_ceph_dentry(dir
, dentry
) &&
836 ceph_test_mount_opt(fsc
, DCACHE
) &&
837 __ceph_dir_is_complete(ci
) &&
838 __ceph_caps_issued_mask_metric(ci
, CEPH_CAP_FILE_SHARED
, 1)) {
839 __ceph_touch_fmode(ci
, mdsc
, CEPH_FILE_MODE_RD
);
840 spin_unlock(&ci
->i_ceph_lock
);
841 doutc(cl
, " dir %llx.%llx complete, -ENOENT\n",
844 di
->lease_shared_gen
= atomic_read(&ci
->i_shared_gen
);
847 spin_unlock(&ci
->i_ceph_lock
);
850 op
= ceph_snap(dir
) == CEPH_SNAPDIR
?
851 CEPH_MDS_OP_LOOKUPSNAP
: CEPH_MDS_OP_LOOKUP
;
852 req
= ceph_mdsc_create_request(mdsc
, op
, USE_ANY_MDS
);
854 return ERR_CAST(req
);
855 req
->r_dentry
= dget(dentry
);
858 mask
= CEPH_STAT_CAP_INODE
| CEPH_CAP_AUTH_SHARED
;
859 if (ceph_security_xattr_wanted(dir
))
860 mask
|= CEPH_CAP_XATTR_SHARED
;
861 req
->r_args
.getattr
.mask
= cpu_to_le32(mask
);
865 set_bit(CEPH_MDS_R_PARENT_LOCKED
, &req
->r_req_flags
);
866 err
= ceph_mdsc_do_request(mdsc
, NULL
, req
);
867 if (err
== -ENOENT
) {
870 res
= ceph_handle_snapdir(req
, dentry
);
878 dentry
= ceph_finish_lookup(req
, dentry
, err
);
879 ceph_mdsc_put_request(req
); /* will dput(dentry) */
880 doutc(cl
, "result=%p\n", dentry
);
885 * If we do a create but get no trace back from the MDS, follow up with
886 * a lookup (the VFS expects us to link up the provided dentry).
888 int ceph_handle_notrace_create(struct inode
*dir
, struct dentry
*dentry
)
890 struct dentry
*result
= ceph_lookup(dir
, dentry
, 0);
892 if (result
&& !IS_ERR(result
)) {
894 * We created the item, then did a lookup, and found
895 * it was already linked to another inode we already
896 * had in our cache (and thus got spliced). To not
897 * confuse VFS (especially when inode is a directory),
898 * we don't link our dentry to that inode, return an
901 * This event should be rare and it happens only when
902 * we talk to old MDS. Recent MDS does not send traceless
903 * reply for request that creates new inode.
908 return PTR_ERR(result
);
911 static int ceph_mknod(struct mnt_idmap
*idmap
, struct inode
*dir
,
912 struct dentry
*dentry
, umode_t mode
, dev_t rdev
)
914 struct ceph_mds_client
*mdsc
= ceph_sb_to_mdsc(dir
->i_sb
);
915 struct ceph_client
*cl
= mdsc
->fsc
->client
;
916 struct ceph_mds_request
*req
;
917 struct ceph_acl_sec_ctx as_ctx
= {};
920 if (ceph_snap(dir
) != CEPH_NOSNAP
)
923 err
= ceph_wait_on_conflict_unlink(dentry
);
927 if (ceph_quota_is_max_files_exceeded(dir
)) {
932 doutc(cl
, "%p %llx.%llx/'%pd' dentry %p mode 0%ho rdev %d\n",
933 dir
, ceph_vinop(dir
), dentry
, dentry
, mode
, rdev
);
934 req
= ceph_mdsc_create_request(mdsc
, CEPH_MDS_OP_MKNOD
, USE_AUTH_MDS
);
940 req
->r_new_inode
= ceph_new_inode(dir
, dentry
, &mode
, &as_ctx
);
941 if (IS_ERR(req
->r_new_inode
)) {
942 err
= PTR_ERR(req
->r_new_inode
);
943 req
->r_new_inode
= NULL
;
947 if (S_ISREG(mode
) && IS_ENCRYPTED(dir
))
948 set_bit(CEPH_MDS_R_FSCRYPT_FILE
, &req
->r_req_flags
);
950 req
->r_dentry
= dget(dentry
);
954 set_bit(CEPH_MDS_R_PARENT_LOCKED
, &req
->r_req_flags
);
955 req
->r_mnt_idmap
= mnt_idmap_get(idmap
);
956 req
->r_args
.mknod
.mode
= cpu_to_le32(mode
);
957 req
->r_args
.mknod
.rdev
= cpu_to_le32(rdev
);
958 req
->r_dentry_drop
= CEPH_CAP_FILE_SHARED
| CEPH_CAP_AUTH_EXCL
|
960 req
->r_dentry_unless
= CEPH_CAP_FILE_EXCL
;
962 ceph_as_ctx_to_req(req
, &as_ctx
);
964 err
= ceph_mdsc_do_request(mdsc
, dir
, req
);
965 if (!err
&& !req
->r_reply_info
.head
->is_dentry
)
966 err
= ceph_handle_notrace_create(dir
, dentry
);
968 ceph_mdsc_put_request(req
);
971 ceph_init_inode_acls(d_inode(dentry
), &as_ctx
);
974 ceph_release_acl_sec_ctx(&as_ctx
);
978 static int ceph_create(struct mnt_idmap
*idmap
, struct inode
*dir
,
979 struct dentry
*dentry
, umode_t mode
, bool excl
)
981 return ceph_mknod(idmap
, dir
, dentry
, mode
, 0);
984 #if IS_ENABLED(CONFIG_FS_ENCRYPTION)
985 static int prep_encrypted_symlink_target(struct ceph_mds_request
*req
,
989 int len
= strlen(dest
);
990 struct fscrypt_str osd_link
= FSTR_INIT(NULL
, 0);
992 err
= fscrypt_prepare_symlink(req
->r_parent
, dest
, len
, PATH_MAX
,
997 err
= fscrypt_encrypt_symlink(req
->r_new_inode
, dest
, len
, &osd_link
);
1001 req
->r_path2
= kmalloc(CEPH_BASE64_CHARS(osd_link
.len
) + 1, GFP_KERNEL
);
1002 if (!req
->r_path2
) {
1007 len
= ceph_base64_encode(osd_link
.name
, osd_link
.len
, req
->r_path2
);
1008 req
->r_path2
[len
] = '\0';
1010 fscrypt_fname_free_buffer(&osd_link
);
1014 static int prep_encrypted_symlink_target(struct ceph_mds_request
*req
,
1021 static int ceph_symlink(struct mnt_idmap
*idmap
, struct inode
*dir
,
1022 struct dentry
*dentry
, const char *dest
)
1024 struct ceph_mds_client
*mdsc
= ceph_sb_to_mdsc(dir
->i_sb
);
1025 struct ceph_client
*cl
= mdsc
->fsc
->client
;
1026 struct ceph_mds_request
*req
;
1027 struct ceph_acl_sec_ctx as_ctx
= {};
1028 umode_t mode
= S_IFLNK
| 0777;
1031 if (ceph_snap(dir
) != CEPH_NOSNAP
)
1034 err
= ceph_wait_on_conflict_unlink(dentry
);
1038 if (ceph_quota_is_max_files_exceeded(dir
)) {
1043 doutc(cl
, "%p %llx.%llx/'%pd' to '%s'\n", dir
, ceph_vinop(dir
), dentry
,
1045 req
= ceph_mdsc_create_request(mdsc
, CEPH_MDS_OP_SYMLINK
, USE_AUTH_MDS
);
1051 req
->r_new_inode
= ceph_new_inode(dir
, dentry
, &mode
, &as_ctx
);
1052 if (IS_ERR(req
->r_new_inode
)) {
1053 err
= PTR_ERR(req
->r_new_inode
);
1054 req
->r_new_inode
= NULL
;
1058 req
->r_parent
= dir
;
1061 if (IS_ENCRYPTED(req
->r_new_inode
)) {
1062 err
= prep_encrypted_symlink_target(req
, dest
);
1066 req
->r_path2
= kstrdup(dest
, GFP_KERNEL
);
1067 if (!req
->r_path2
) {
1073 set_bit(CEPH_MDS_R_PARENT_LOCKED
, &req
->r_req_flags
);
1074 req
->r_mnt_idmap
= mnt_idmap_get(idmap
);
1075 req
->r_dentry
= dget(dentry
);
1076 req
->r_num_caps
= 2;
1077 req
->r_dentry_drop
= CEPH_CAP_FILE_SHARED
| CEPH_CAP_AUTH_EXCL
|
1078 CEPH_CAP_XATTR_EXCL
;
1079 req
->r_dentry_unless
= CEPH_CAP_FILE_EXCL
;
1081 ceph_as_ctx_to_req(req
, &as_ctx
);
1083 err
= ceph_mdsc_do_request(mdsc
, dir
, req
);
1084 if (!err
&& !req
->r_reply_info
.head
->is_dentry
)
1085 err
= ceph_handle_notrace_create(dir
, dentry
);
1087 ceph_mdsc_put_request(req
);
1091 ceph_release_acl_sec_ctx(&as_ctx
);
1095 static int ceph_mkdir(struct mnt_idmap
*idmap
, struct inode
*dir
,
1096 struct dentry
*dentry
, umode_t mode
)
1098 struct ceph_mds_client
*mdsc
= ceph_sb_to_mdsc(dir
->i_sb
);
1099 struct ceph_client
*cl
= mdsc
->fsc
->client
;
1100 struct ceph_mds_request
*req
;
1101 struct ceph_acl_sec_ctx as_ctx
= {};
1105 err
= ceph_wait_on_conflict_unlink(dentry
);
1109 if (ceph_snap(dir
) == CEPH_SNAPDIR
) {
1110 /* mkdir .snap/foo is a MKSNAP */
1111 op
= CEPH_MDS_OP_MKSNAP
;
1112 doutc(cl
, "mksnap %llx.%llx/'%pd' dentry %p\n",
1113 ceph_vinop(dir
), dentry
, dentry
);
1114 } else if (ceph_snap(dir
) == CEPH_NOSNAP
) {
1115 doutc(cl
, "mkdir %llx.%llx/'%pd' dentry %p mode 0%ho\n",
1116 ceph_vinop(dir
), dentry
, dentry
, mode
);
1117 op
= CEPH_MDS_OP_MKDIR
;
1123 if (op
== CEPH_MDS_OP_MKDIR
&&
1124 ceph_quota_is_max_files_exceeded(dir
)) {
1128 if ((op
== CEPH_MDS_OP_MKSNAP
) && IS_ENCRYPTED(dir
) &&
1129 !fscrypt_has_encryption_key(dir
)) {
1135 req
= ceph_mdsc_create_request(mdsc
, op
, USE_AUTH_MDS
);
1142 req
->r_new_inode
= ceph_new_inode(dir
, dentry
, &mode
, &as_ctx
);
1143 if (IS_ERR(req
->r_new_inode
)) {
1144 err
= PTR_ERR(req
->r_new_inode
);
1145 req
->r_new_inode
= NULL
;
1149 req
->r_dentry
= dget(dentry
);
1150 req
->r_num_caps
= 2;
1151 req
->r_parent
= dir
;
1153 set_bit(CEPH_MDS_R_PARENT_LOCKED
, &req
->r_req_flags
);
1154 if (op
== CEPH_MDS_OP_MKDIR
)
1155 req
->r_mnt_idmap
= mnt_idmap_get(idmap
);
1156 req
->r_args
.mkdir
.mode
= cpu_to_le32(mode
);
1157 req
->r_dentry_drop
= CEPH_CAP_FILE_SHARED
| CEPH_CAP_AUTH_EXCL
|
1158 CEPH_CAP_XATTR_EXCL
;
1159 req
->r_dentry_unless
= CEPH_CAP_FILE_EXCL
;
1161 ceph_as_ctx_to_req(req
, &as_ctx
);
1163 err
= ceph_mdsc_do_request(mdsc
, dir
, req
);
1165 !req
->r_reply_info
.head
->is_target
&&
1166 !req
->r_reply_info
.head
->is_dentry
)
1167 err
= ceph_handle_notrace_create(dir
, dentry
);
1169 ceph_mdsc_put_request(req
);
1172 ceph_init_inode_acls(d_inode(dentry
), &as_ctx
);
1175 ceph_release_acl_sec_ctx(&as_ctx
);
1179 static int ceph_link(struct dentry
*old_dentry
, struct inode
*dir
,
1180 struct dentry
*dentry
)
1182 struct ceph_mds_client
*mdsc
= ceph_sb_to_mdsc(dir
->i_sb
);
1183 struct ceph_client
*cl
= mdsc
->fsc
->client
;
1184 struct ceph_mds_request
*req
;
1187 if (dentry
->d_flags
& DCACHE_DISCONNECTED
)
1190 err
= ceph_wait_on_conflict_unlink(dentry
);
1194 if (ceph_snap(dir
) != CEPH_NOSNAP
)
1197 err
= fscrypt_prepare_link(old_dentry
, dir
, dentry
);
1201 doutc(cl
, "%p %llx.%llx/'%pd' to '%pd'\n", dir
, ceph_vinop(dir
),
1202 old_dentry
, dentry
);
1203 req
= ceph_mdsc_create_request(mdsc
, CEPH_MDS_OP_LINK
, USE_AUTH_MDS
);
1206 return PTR_ERR(req
);
1208 req
->r_dentry
= dget(dentry
);
1209 req
->r_num_caps
= 2;
1210 req
->r_old_dentry
= dget(old_dentry
);
1212 * The old_dentry maybe a DCACHE_DISCONNECTED dentry, then we
1213 * will just pass the ino# to MDSs.
1215 if (old_dentry
->d_flags
& DCACHE_DISCONNECTED
)
1216 req
->r_ino2
= ceph_vino(d_inode(old_dentry
));
1217 req
->r_parent
= dir
;
1219 set_bit(CEPH_MDS_R_PARENT_LOCKED
, &req
->r_req_flags
);
1220 req
->r_dentry_drop
= CEPH_CAP_FILE_SHARED
| CEPH_CAP_XATTR_EXCL
;
1221 req
->r_dentry_unless
= CEPH_CAP_FILE_EXCL
;
1222 /* release LINK_SHARED on source inode (mds will lock it) */
1223 req
->r_old_inode_drop
= CEPH_CAP_LINK_SHARED
| CEPH_CAP_LINK_EXCL
;
1224 err
= ceph_mdsc_do_request(mdsc
, dir
, req
);
1227 } else if (!req
->r_reply_info
.head
->is_dentry
) {
1228 ihold(d_inode(old_dentry
));
1229 d_instantiate(dentry
, d_inode(old_dentry
));
1231 ceph_mdsc_put_request(req
);
1235 static void ceph_async_unlink_cb(struct ceph_mds_client
*mdsc
,
1236 struct ceph_mds_request
*req
)
1238 struct dentry
*dentry
= req
->r_dentry
;
1239 struct ceph_fs_client
*fsc
= ceph_sb_to_fs_client(dentry
->d_sb
);
1240 struct ceph_client
*cl
= fsc
->client
;
1241 struct ceph_dentry_info
*di
= ceph_dentry(dentry
);
1242 int result
= req
->r_err
? req
->r_err
:
1243 le32_to_cpu(req
->r_reply_info
.head
->result
);
1245 if (!test_bit(CEPH_DENTRY_ASYNC_UNLINK_BIT
, &di
->flags
))
1247 "dentry %p:%pd async unlink bit is not set\n",
1250 spin_lock(&fsc
->async_unlink_conflict_lock
);
1251 hash_del_rcu(&di
->hnode
);
1252 spin_unlock(&fsc
->async_unlink_conflict_lock
);
1254 spin_lock(&dentry
->d_lock
);
1255 di
->flags
&= ~CEPH_DENTRY_ASYNC_UNLINK
;
1256 wake_up_bit(&di
->flags
, CEPH_DENTRY_ASYNC_UNLINK_BIT
);
1257 spin_unlock(&dentry
->d_lock
);
1261 if (result
== -EJUKEBOX
)
1264 /* If op failed, mark everyone involved for errors */
1268 char *path
= ceph_mdsc_build_path(mdsc
, dentry
, &pathlen
,
1271 /* mark error on parent + clear complete */
1272 mapping_set_error(req
->r_parent
->i_mapping
, result
);
1273 ceph_dir_clear_complete(req
->r_parent
);
1275 /* drop the dentry -- we don't know its status */
1276 if (!d_unhashed(dentry
))
1279 /* mark inode itself for an error (since metadata is bogus) */
1280 mapping_set_error(req
->r_old_inode
->i_mapping
, result
);
1282 pr_warn_client(cl
, "failure path=(%llx)%s result=%d!\n",
1283 base
, IS_ERR(path
) ? "<<bad>>" : path
, result
);
1284 ceph_mdsc_free_path(path
, pathlen
);
1287 iput(req
->r_old_inode
);
1288 ceph_mdsc_release_dir_caps(req
);
1291 static int get_caps_for_async_unlink(struct inode
*dir
, struct dentry
*dentry
)
1293 struct ceph_inode_info
*ci
= ceph_inode(dir
);
1294 struct ceph_dentry_info
*di
;
1295 int got
= 0, want
= CEPH_CAP_FILE_EXCL
| CEPH_CAP_DIR_UNLINK
;
1297 spin_lock(&ci
->i_ceph_lock
);
1298 if ((__ceph_caps_issued(ci
, NULL
) & want
) == want
) {
1299 ceph_take_cap_refs(ci
, want
, false);
1302 spin_unlock(&ci
->i_ceph_lock
);
1304 /* If we didn't get anything, return 0 */
1308 spin_lock(&dentry
->d_lock
);
1309 di
= ceph_dentry(dentry
);
1311 * - We are holding Fx, which implies Fs caps.
1312 * - Only support async unlink for primary linkage
1314 if (atomic_read(&ci
->i_shared_gen
) != di
->lease_shared_gen
||
1315 !(di
->flags
& CEPH_DENTRY_PRIMARY_LINK
))
1317 spin_unlock(&dentry
->d_lock
);
1319 /* Do we still want what we've got? */
1323 ceph_put_cap_refs(ci
, got
);
1328 * rmdir and unlink are differ only by the metadata op code
1330 static int ceph_unlink(struct inode
*dir
, struct dentry
*dentry
)
1332 struct ceph_fs_client
*fsc
= ceph_sb_to_fs_client(dir
->i_sb
);
1333 struct ceph_client
*cl
= fsc
->client
;
1334 struct ceph_mds_client
*mdsc
= fsc
->mdsc
;
1335 struct inode
*inode
= d_inode(dentry
);
1336 struct ceph_mds_request
*req
;
1337 bool try_async
= ceph_test_mount_opt(fsc
, ASYNC_DIROPS
);
1345 if (ceph_snap(dir
) == CEPH_SNAPDIR
) {
1346 /* rmdir .snap/foo is RMSNAP */
1347 doutc(cl
, "rmsnap %llx.%llx/'%pd' dn\n", ceph_vinop(dir
),
1349 op
= CEPH_MDS_OP_RMSNAP
;
1350 } else if (ceph_snap(dir
) == CEPH_NOSNAP
) {
1351 doutc(cl
, "unlink/rmdir %llx.%llx/'%pd' inode %llx.%llx\n",
1352 ceph_vinop(dir
), dentry
, ceph_vinop(inode
));
1353 op
= d_is_dir(dentry
) ?
1354 CEPH_MDS_OP_RMDIR
: CEPH_MDS_OP_UNLINK
;
1358 dn
= d_find_alias(dir
);
1362 path
= ceph_mdsc_build_path(mdsc
, dn
, &pathlen
, &pathbase
, 0);
1367 err
= ceph_mds_check_access(mdsc
, path
, MAY_WRITE
);
1369 ceph_mdsc_free_path(path
, pathlen
);
1372 /* For none EACCES cases will let the MDS do the mds auth check */
1373 if (err
== -EACCES
) {
1375 } else if (err
< 0) {
1382 req
= ceph_mdsc_create_request(mdsc
, op
, USE_AUTH_MDS
);
1387 req
->r_dentry
= dget(dentry
);
1388 req
->r_num_caps
= 2;
1389 req
->r_parent
= dir
;
1391 req
->r_dentry_drop
= CEPH_CAP_FILE_SHARED
| CEPH_CAP_XATTR_EXCL
;
1392 req
->r_dentry_unless
= CEPH_CAP_FILE_EXCL
;
1393 req
->r_inode_drop
= ceph_drop_caps_for_unlink(inode
);
1395 if (try_async
&& op
== CEPH_MDS_OP_UNLINK
&&
1396 (req
->r_dir_caps
= get_caps_for_async_unlink(dir
, dentry
))) {
1397 struct ceph_dentry_info
*di
= ceph_dentry(dentry
);
1399 doutc(cl
, "async unlink on %llx.%llx/'%pd' caps=%s",
1400 ceph_vinop(dir
), dentry
,
1401 ceph_cap_string(req
->r_dir_caps
));
1402 set_bit(CEPH_MDS_R_ASYNC
, &req
->r_req_flags
);
1403 req
->r_callback
= ceph_async_unlink_cb
;
1404 req
->r_old_inode
= d_inode(dentry
);
1405 ihold(req
->r_old_inode
);
1407 spin_lock(&dentry
->d_lock
);
1408 di
->flags
|= CEPH_DENTRY_ASYNC_UNLINK
;
1409 spin_unlock(&dentry
->d_lock
);
1411 spin_lock(&fsc
->async_unlink_conflict_lock
);
1412 hash_add_rcu(fsc
->async_unlink_conflict
, &di
->hnode
,
1413 dentry
->d_name
.hash
);
1414 spin_unlock(&fsc
->async_unlink_conflict_lock
);
1416 err
= ceph_mdsc_submit_request(mdsc
, dir
, req
);
1419 * We have enough caps, so we assume that the unlink
1420 * will succeed. Fix up the target inode and dcache.
1425 spin_lock(&fsc
->async_unlink_conflict_lock
);
1426 hash_del_rcu(&di
->hnode
);
1427 spin_unlock(&fsc
->async_unlink_conflict_lock
);
1429 spin_lock(&dentry
->d_lock
);
1430 di
->flags
&= ~CEPH_DENTRY_ASYNC_UNLINK
;
1431 spin_unlock(&dentry
->d_lock
);
1433 if (err
== -EJUKEBOX
) {
1435 ceph_mdsc_put_request(req
);
1440 set_bit(CEPH_MDS_R_PARENT_LOCKED
, &req
->r_req_flags
);
1441 err
= ceph_mdsc_do_request(mdsc
, dir
, req
);
1442 if (!err
&& !req
->r_reply_info
.head
->is_dentry
)
1446 ceph_mdsc_put_request(req
);
1451 static int ceph_rename(struct mnt_idmap
*idmap
, struct inode
*old_dir
,
1452 struct dentry
*old_dentry
, struct inode
*new_dir
,
1453 struct dentry
*new_dentry
, unsigned int flags
)
1455 struct ceph_mds_client
*mdsc
= ceph_sb_to_mdsc(old_dir
->i_sb
);
1456 struct ceph_client
*cl
= mdsc
->fsc
->client
;
1457 struct ceph_mds_request
*req
;
1458 int op
= CEPH_MDS_OP_RENAME
;
1464 if (ceph_snap(old_dir
) != ceph_snap(new_dir
))
1466 if (ceph_snap(old_dir
) != CEPH_NOSNAP
) {
1467 if (old_dir
== new_dir
&& ceph_snap(old_dir
) == CEPH_SNAPDIR
)
1468 op
= CEPH_MDS_OP_RENAMESNAP
;
1472 /* don't allow cross-quota renames */
1473 if ((old_dir
!= new_dir
) &&
1474 (!ceph_quota_is_same_realm(old_dir
, new_dir
)))
1477 err
= ceph_wait_on_conflict_unlink(new_dentry
);
1481 err
= fscrypt_prepare_rename(old_dir
, old_dentry
, new_dir
, new_dentry
,
1486 doutc(cl
, "%llx.%llx/'%pd' to %llx.%llx/'%pd'\n",
1487 ceph_vinop(old_dir
), old_dentry
, ceph_vinop(new_dir
),
1489 req
= ceph_mdsc_create_request(mdsc
, op
, USE_AUTH_MDS
);
1491 return PTR_ERR(req
);
1493 req
->r_dentry
= dget(new_dentry
);
1494 req
->r_num_caps
= 2;
1495 req
->r_old_dentry
= dget(old_dentry
);
1496 req
->r_old_dentry_dir
= old_dir
;
1497 req
->r_parent
= new_dir
;
1499 set_bit(CEPH_MDS_R_PARENT_LOCKED
, &req
->r_req_flags
);
1500 req
->r_old_dentry_drop
= CEPH_CAP_FILE_SHARED
| CEPH_CAP_XATTR_EXCL
;
1501 req
->r_old_dentry_unless
= CEPH_CAP_FILE_EXCL
;
1502 req
->r_dentry_drop
= CEPH_CAP_FILE_SHARED
| CEPH_CAP_XATTR_EXCL
;
1503 req
->r_dentry_unless
= CEPH_CAP_FILE_EXCL
;
1504 /* release LINK_RDCACHE on source inode (mds will lock it) */
1505 req
->r_old_inode_drop
= CEPH_CAP_LINK_SHARED
| CEPH_CAP_LINK_EXCL
;
1506 if (d_really_is_positive(new_dentry
)) {
1508 ceph_drop_caps_for_unlink(d_inode(new_dentry
));
1510 err
= ceph_mdsc_do_request(mdsc
, old_dir
, req
);
1511 if (!err
&& !req
->r_reply_info
.head
->is_dentry
) {
1513 * Normally d_move() is done by fill_trace (called by
1514 * do_request, above). If there is no trace, we need
1517 d_move(old_dentry
, new_dentry
);
1519 ceph_mdsc_put_request(req
);
1524 * Move dentry to tail of mdsc->dentry_leases list when lease is updated.
1525 * Leases at front of the list will expire first. (Assume all leases have
1528 * Called under dentry->d_lock.
1530 void __ceph_dentry_lease_touch(struct ceph_dentry_info
*di
)
1532 struct dentry
*dn
= di
->dentry
;
1533 struct ceph_mds_client
*mdsc
= ceph_sb_to_fs_client(dn
->d_sb
)->mdsc
;
1534 struct ceph_client
*cl
= mdsc
->fsc
->client
;
1536 doutc(cl
, "%p %p '%pd'\n", di
, dn
, dn
);
1538 di
->flags
|= CEPH_DENTRY_LEASE_LIST
;
1539 if (di
->flags
& CEPH_DENTRY_SHRINK_LIST
) {
1540 di
->flags
|= CEPH_DENTRY_REFERENCED
;
1544 spin_lock(&mdsc
->dentry_list_lock
);
1545 list_move_tail(&di
->lease_list
, &mdsc
->dentry_leases
);
1546 spin_unlock(&mdsc
->dentry_list_lock
);
1549 static void __dentry_dir_lease_touch(struct ceph_mds_client
* mdsc
,
1550 struct ceph_dentry_info
*di
)
1552 di
->flags
&= ~(CEPH_DENTRY_LEASE_LIST
| CEPH_DENTRY_REFERENCED
);
1555 list_move_tail(&di
->lease_list
, &mdsc
->dentry_dir_leases
);
1559 * When dir lease is used, add dentry to tail of mdsc->dentry_dir_leases
1560 * list if it's not in the list, otherwise set 'referenced' flag.
1562 * Called under dentry->d_lock.
1564 void __ceph_dentry_dir_lease_touch(struct ceph_dentry_info
*di
)
1566 struct dentry
*dn
= di
->dentry
;
1567 struct ceph_mds_client
*mdsc
= ceph_sb_to_fs_client(dn
->d_sb
)->mdsc
;
1568 struct ceph_client
*cl
= mdsc
->fsc
->client
;
1570 doutc(cl
, "%p %p '%pd' (offset 0x%llx)\n", di
, dn
, dn
, di
->offset
);
1572 if (!list_empty(&di
->lease_list
)) {
1573 if (di
->flags
& CEPH_DENTRY_LEASE_LIST
) {
1574 /* don't remove dentry from dentry lease list
1575 * if its lease is valid */
1576 if (__dentry_lease_is_valid(di
))
1579 di
->flags
|= CEPH_DENTRY_REFERENCED
;
1584 if (di
->flags
& CEPH_DENTRY_SHRINK_LIST
) {
1585 di
->flags
|= CEPH_DENTRY_REFERENCED
;
1586 di
->flags
&= ~CEPH_DENTRY_LEASE_LIST
;
1590 spin_lock(&mdsc
->dentry_list_lock
);
1591 __dentry_dir_lease_touch(mdsc
, di
);
1592 spin_unlock(&mdsc
->dentry_list_lock
);
1595 static void __dentry_lease_unlist(struct ceph_dentry_info
*di
)
1597 struct ceph_mds_client
*mdsc
;
1598 if (di
->flags
& CEPH_DENTRY_SHRINK_LIST
)
1600 if (list_empty(&di
->lease_list
))
1603 mdsc
= ceph_sb_to_fs_client(di
->dentry
->d_sb
)->mdsc
;
1604 spin_lock(&mdsc
->dentry_list_lock
);
1605 list_del_init(&di
->lease_list
);
1606 spin_unlock(&mdsc
->dentry_list_lock
);
1616 struct ceph_lease_walk_control
{
1618 bool expire_dir_lease
;
1619 unsigned long nr_to_scan
;
1620 unsigned long dir_lease_ttl
;
1623 static int __dir_lease_check(const struct dentry
*, struct ceph_lease_walk_control
*);
1624 static int __dentry_lease_check(const struct dentry
*);
1626 static unsigned long
1627 __dentry_leases_walk(struct ceph_mds_client
*mdsc
,
1628 struct ceph_lease_walk_control
*lwc
)
1630 struct ceph_dentry_info
*di
, *tmp
;
1631 struct dentry
*dentry
, *last
= NULL
;
1632 struct list_head
* list
;
1634 unsigned long freed
= 0;
1637 list
= lwc
->dir_lease
? &mdsc
->dentry_dir_leases
: &mdsc
->dentry_leases
;
1638 spin_lock(&mdsc
->dentry_list_lock
);
1639 list_for_each_entry_safe(di
, tmp
, list
, lease_list
) {
1640 if (!lwc
->nr_to_scan
)
1644 dentry
= di
->dentry
;
1648 if (!spin_trylock(&dentry
->d_lock
))
1651 if (__lockref_is_dead(&dentry
->d_lockref
)) {
1652 list_del_init(&di
->lease_list
);
1657 ret
= __dir_lease_check(dentry
, lwc
);
1659 ret
= __dentry_lease_check(dentry
);
1661 /* move it into tail of dir lease list */
1662 __dentry_dir_lease_touch(mdsc
, di
);
1668 di
->flags
&= ~CEPH_DENTRY_REFERENCED
;
1669 if (dentry
->d_lockref
.count
> 0) {
1670 /* update_dentry_lease() will re-add
1671 * it to lease list, or
1672 * ceph_d_delete() will return 1 when
1673 * last reference is dropped */
1674 list_del_init(&di
->lease_list
);
1676 di
->flags
|= CEPH_DENTRY_SHRINK_LIST
;
1677 list_move_tail(&di
->lease_list
, &dispose
);
1682 spin_unlock(&dentry
->d_lock
);
1686 spin_unlock(&mdsc
->dentry_list_lock
);
1688 while (!list_empty(&dispose
)) {
1689 di
= list_first_entry(&dispose
, struct ceph_dentry_info
,
1691 dentry
= di
->dentry
;
1692 spin_lock(&dentry
->d_lock
);
1694 list_del_init(&di
->lease_list
);
1695 di
->flags
&= ~CEPH_DENTRY_SHRINK_LIST
;
1696 if (di
->flags
& CEPH_DENTRY_REFERENCED
) {
1697 spin_lock(&mdsc
->dentry_list_lock
);
1698 if (di
->flags
& CEPH_DENTRY_LEASE_LIST
) {
1699 list_add_tail(&di
->lease_list
,
1700 &mdsc
->dentry_leases
);
1702 __dentry_dir_lease_touch(mdsc
, di
);
1704 spin_unlock(&mdsc
->dentry_list_lock
);
1709 spin_unlock(&dentry
->d_lock
);
1710 /* ceph_d_delete() does the trick */
1716 static int __dentry_lease_check(const struct dentry
*dentry
)
1718 struct ceph_dentry_info
*di
= ceph_dentry(dentry
);
1721 if (__dentry_lease_is_valid(di
))
1723 ret
= __dir_lease_try_check(dentry
);
1731 static int __dir_lease_check(const struct dentry
*dentry
,
1732 struct ceph_lease_walk_control
*lwc
)
1734 struct ceph_dentry_info
*di
= ceph_dentry(dentry
);
1736 int ret
= __dir_lease_try_check(dentry
);
1740 if (time_before(jiffies
, di
->time
+ lwc
->dir_lease_ttl
))
1742 /* Move dentry to tail of dir lease list if we don't want
1743 * to delete it. So dentries in the list are checked in a
1744 * round robin manner */
1745 if (!lwc
->expire_dir_lease
)
1747 if (dentry
->d_lockref
.count
> 0 ||
1748 (di
->flags
& CEPH_DENTRY_REFERENCED
))
1750 /* invalidate dir lease */
1751 di
->lease_shared_gen
= 0;
1756 int ceph_trim_dentries(struct ceph_mds_client
*mdsc
)
1758 struct ceph_lease_walk_control lwc
;
1759 unsigned long count
;
1760 unsigned long freed
;
1762 spin_lock(&mdsc
->caps_list_lock
);
1763 if (mdsc
->caps_use_max
> 0 &&
1764 mdsc
->caps_use_count
> mdsc
->caps_use_max
)
1765 count
= mdsc
->caps_use_count
- mdsc
->caps_use_max
;
1768 spin_unlock(&mdsc
->caps_list_lock
);
1770 lwc
.dir_lease
= false;
1771 lwc
.nr_to_scan
= CEPH_CAPS_PER_RELEASE
* 2;
1772 freed
= __dentry_leases_walk(mdsc
, &lwc
);
1773 if (!lwc
.nr_to_scan
) /* more invalid leases */
1776 if (lwc
.nr_to_scan
< CEPH_CAPS_PER_RELEASE
)
1777 lwc
.nr_to_scan
= CEPH_CAPS_PER_RELEASE
;
1779 lwc
.dir_lease
= true;
1780 lwc
.expire_dir_lease
= freed
< count
;
1781 lwc
.dir_lease_ttl
= mdsc
->fsc
->mount_options
->caps_wanted_delay_max
* HZ
;
1782 freed
+=__dentry_leases_walk(mdsc
, &lwc
);
1783 if (!lwc
.nr_to_scan
) /* more to check */
1786 return freed
> 0 ? 1 : 0;
1790 * Ensure a dentry lease will no longer revalidate.
1792 void ceph_invalidate_dentry_lease(struct dentry
*dentry
)
1794 struct ceph_dentry_info
*di
= ceph_dentry(dentry
);
1795 spin_lock(&dentry
->d_lock
);
1797 di
->lease_shared_gen
= 0;
1798 di
->flags
&= ~CEPH_DENTRY_PRIMARY_LINK
;
1799 __dentry_lease_unlist(di
);
1800 spin_unlock(&dentry
->d_lock
);
1804 * Check if dentry lease is valid. If not, delete the lease. Try to
1805 * renew if the least is more than half up.
1807 static bool __dentry_lease_is_valid(struct ceph_dentry_info
*di
)
1809 struct ceph_mds_session
*session
;
1814 session
= di
->lease_session
;
1819 gen
= atomic_read(&session
->s_cap_gen
);
1820 ttl
= session
->s_cap_ttl
;
1822 if (di
->lease_gen
== gen
&&
1823 time_before(jiffies
, ttl
) &&
1824 time_before(jiffies
, di
->time
))
1831 static int dentry_lease_is_valid(struct dentry
*dentry
, unsigned int flags
)
1833 struct ceph_dentry_info
*di
;
1834 struct ceph_mds_session
*session
= NULL
;
1835 struct ceph_mds_client
*mdsc
= ceph_sb_to_fs_client(dentry
->d_sb
)->mdsc
;
1836 struct ceph_client
*cl
= mdsc
->fsc
->client
;
1840 spin_lock(&dentry
->d_lock
);
1841 di
= ceph_dentry(dentry
);
1842 if (di
&& __dentry_lease_is_valid(di
)) {
1845 if (di
->lease_renew_after
&&
1846 time_after(jiffies
, di
->lease_renew_after
)) {
1848 * We should renew. If we're in RCU walk mode
1849 * though, we can't do that so just return
1852 if (flags
& LOOKUP_RCU
) {
1855 session
= ceph_get_mds_session(di
->lease_session
);
1856 seq
= di
->lease_seq
;
1857 di
->lease_renew_after
= 0;
1858 di
->lease_renew_from
= jiffies
;
1862 spin_unlock(&dentry
->d_lock
);
1865 ceph_mdsc_lease_send_msg(session
, dentry
,
1866 CEPH_MDS_LEASE_RENEW
, seq
);
1867 ceph_put_mds_session(session
);
1869 doutc(cl
, "dentry %p = %d\n", dentry
, valid
);
1874 * Called under dentry->d_lock.
1876 static int __dir_lease_try_check(const struct dentry
*dentry
)
1878 struct ceph_dentry_info
*di
= ceph_dentry(dentry
);
1880 struct ceph_inode_info
*ci
;
1883 if (!di
->lease_shared_gen
)
1885 if (IS_ROOT(dentry
))
1888 dir
= d_inode(dentry
->d_parent
);
1889 ci
= ceph_inode(dir
);
1891 if (spin_trylock(&ci
->i_ceph_lock
)) {
1892 if (atomic_read(&ci
->i_shared_gen
) == di
->lease_shared_gen
&&
1893 __ceph_caps_issued_mask(ci
, CEPH_CAP_FILE_SHARED
, 0))
1895 spin_unlock(&ci
->i_ceph_lock
);
1901 di
->lease_shared_gen
= 0;
1906 * Check if directory-wide content lease/cap is valid.
1908 static int dir_lease_is_valid(struct inode
*dir
, struct dentry
*dentry
,
1909 struct ceph_mds_client
*mdsc
)
1911 struct ceph_inode_info
*ci
= ceph_inode(dir
);
1912 struct ceph_client
*cl
= mdsc
->fsc
->client
;
1916 spin_lock(&ci
->i_ceph_lock
);
1917 valid
= __ceph_caps_issued_mask(ci
, CEPH_CAP_FILE_SHARED
, 1);
1919 __ceph_touch_fmode(ci
, mdsc
, CEPH_FILE_MODE_RD
);
1920 shared_gen
= atomic_read(&ci
->i_shared_gen
);
1922 spin_unlock(&ci
->i_ceph_lock
);
1924 struct ceph_dentry_info
*di
;
1925 spin_lock(&dentry
->d_lock
);
1926 di
= ceph_dentry(dentry
);
1927 if (dir
== d_inode(dentry
->d_parent
) &&
1928 di
&& di
->lease_shared_gen
== shared_gen
)
1929 __ceph_dentry_dir_lease_touch(di
);
1932 spin_unlock(&dentry
->d_lock
);
1934 doutc(cl
, "dir %p %llx.%llx v%u dentry %p '%pd' = %d\n", dir
,
1935 ceph_vinop(dir
), (unsigned)atomic_read(&ci
->i_shared_gen
),
1936 dentry
, dentry
, valid
);
1941 * Check if cached dentry can be trusted.
1943 static int ceph_d_revalidate(struct dentry
*dentry
, unsigned int flags
)
1945 struct ceph_mds_client
*mdsc
= ceph_sb_to_fs_client(dentry
->d_sb
)->mdsc
;
1946 struct ceph_client
*cl
= mdsc
->fsc
->client
;
1948 struct dentry
*parent
;
1949 struct inode
*dir
, *inode
;
1951 valid
= fscrypt_d_revalidate(dentry
, flags
);
1955 if (flags
& LOOKUP_RCU
) {
1956 parent
= READ_ONCE(dentry
->d_parent
);
1957 dir
= d_inode_rcu(parent
);
1960 inode
= d_inode_rcu(dentry
);
1962 parent
= dget_parent(dentry
);
1963 dir
= d_inode(parent
);
1964 inode
= d_inode(dentry
);
1967 doutc(cl
, "%p '%pd' inode %p offset 0x%llx nokey %d\n",
1968 dentry
, dentry
, inode
, ceph_dentry(dentry
)->offset
,
1969 !!(dentry
->d_flags
& DCACHE_NOKEY_NAME
));
1971 mdsc
= ceph_sb_to_fs_client(dir
->i_sb
)->mdsc
;
1973 /* always trust cached snapped dentries, snapdir dentry */
1974 if (ceph_snap(dir
) != CEPH_NOSNAP
) {
1975 doutc(cl
, "%p '%pd' inode %p is SNAPPED\n", dentry
,
1978 } else if (inode
&& ceph_snap(inode
) == CEPH_SNAPDIR
) {
1981 valid
= dentry_lease_is_valid(dentry
, flags
);
1982 if (valid
== -ECHILD
)
1984 if (valid
|| dir_lease_is_valid(dir
, dentry
, mdsc
)) {
1986 valid
= ceph_is_any_caps(inode
);
1993 struct ceph_mds_request
*req
;
1997 if (flags
& LOOKUP_RCU
)
2000 percpu_counter_inc(&mdsc
->metric
.d_lease_mis
);
2002 op
= ceph_snap(dir
) == CEPH_SNAPDIR
?
2003 CEPH_MDS_OP_LOOKUPSNAP
: CEPH_MDS_OP_LOOKUP
;
2004 req
= ceph_mdsc_create_request(mdsc
, op
, USE_ANY_MDS
);
2006 req
->r_dentry
= dget(dentry
);
2007 req
->r_num_caps
= 2;
2008 req
->r_parent
= dir
;
2011 mask
= CEPH_STAT_CAP_INODE
| CEPH_CAP_AUTH_SHARED
;
2012 if (ceph_security_xattr_wanted(dir
))
2013 mask
|= CEPH_CAP_XATTR_SHARED
;
2014 req
->r_args
.getattr
.mask
= cpu_to_le32(mask
);
2016 err
= ceph_mdsc_do_request(mdsc
, NULL
, req
);
2019 if (d_really_is_positive(dentry
) &&
2020 d_inode(dentry
) == req
->r_target_inode
)
2024 if (d_really_is_negative(dentry
))
2030 ceph_mdsc_put_request(req
);
2031 doutc(cl
, "%p '%pd', lookup result=%d\n", dentry
,
2035 percpu_counter_inc(&mdsc
->metric
.d_lease_hit
);
2038 doutc(cl
, "%p '%pd' %s\n", dentry
, dentry
, valid
? "valid" : "invalid");
2040 ceph_dir_clear_complete(dir
);
2042 if (!(flags
& LOOKUP_RCU
))
2048 * Delete unused dentry that doesn't have valid lease
2050 * Called under dentry->d_lock.
2052 static int ceph_d_delete(const struct dentry
*dentry
)
2054 struct ceph_dentry_info
*di
;
2056 /* won't release caps */
2057 if (d_really_is_negative(dentry
))
2059 if (ceph_snap(d_inode(dentry
)) != CEPH_NOSNAP
)
2062 di
= ceph_dentry(dentry
);
2064 if (__dentry_lease_is_valid(di
))
2066 if (__dir_lease_try_check(dentry
))
2073 * Release our ceph_dentry_info.
2075 static void ceph_d_release(struct dentry
*dentry
)
2077 struct ceph_dentry_info
*di
= ceph_dentry(dentry
);
2078 struct ceph_fs_client
*fsc
= ceph_sb_to_fs_client(dentry
->d_sb
);
2080 doutc(fsc
->client
, "dentry %p '%pd'\n", dentry
, dentry
);
2082 atomic64_dec(&fsc
->mdsc
->metric
.total_dentries
);
2084 spin_lock(&dentry
->d_lock
);
2085 __dentry_lease_unlist(di
);
2086 dentry
->d_fsdata
= NULL
;
2087 spin_unlock(&dentry
->d_lock
);
2089 ceph_put_mds_session(di
->lease_session
);
2090 kmem_cache_free(ceph_dentry_cachep
, di
);
2094 * When the VFS prunes a dentry from the cache, we need to clear the
2095 * complete flag on the parent directory.
2097 * Called under dentry->d_lock.
2099 static void ceph_d_prune(struct dentry
*dentry
)
2101 struct ceph_mds_client
*mdsc
= ceph_sb_to_mdsc(dentry
->d_sb
);
2102 struct ceph_client
*cl
= mdsc
->fsc
->client
;
2103 struct ceph_inode_info
*dir_ci
;
2104 struct ceph_dentry_info
*di
;
2106 doutc(cl
, "dentry %p '%pd'\n", dentry
, dentry
);
2108 /* do we have a valid parent? */
2109 if (IS_ROOT(dentry
))
2112 /* we hold d_lock, so d_parent is stable */
2113 dir_ci
= ceph_inode(d_inode(dentry
->d_parent
));
2114 if (dir_ci
->i_vino
.snap
== CEPH_SNAPDIR
)
2117 /* who calls d_delete() should also disable dcache readdir */
2118 if (d_really_is_negative(dentry
))
2121 /* d_fsdata does not get cleared until d_release */
2122 if (!d_unhashed(dentry
)) {
2123 __ceph_dir_clear_complete(dir_ci
);
2127 /* Disable dcache readdir just in case that someone called d_drop()
2128 * or d_invalidate(), but MDS didn't revoke CEPH_CAP_FILE_SHARED
2129 * properly (dcache readdir is still enabled) */
2130 di
= ceph_dentry(dentry
);
2131 if (di
->offset
> 0 &&
2132 di
->lease_shared_gen
== atomic_read(&dir_ci
->i_shared_gen
))
2133 __ceph_dir_clear_ordered(dir_ci
);
2137 * read() on a dir. This weird interface hack only works if mounted
2138 * with '-o dirstat'.
2140 static ssize_t
ceph_read_dir(struct file
*file
, char __user
*buf
, size_t size
,
2143 struct ceph_dir_file_info
*dfi
= file
->private_data
;
2144 struct inode
*inode
= file_inode(file
);
2145 struct ceph_inode_info
*ci
= ceph_inode(inode
);
2147 const int bufsize
= 1024;
2149 if (!ceph_test_mount_opt(ceph_sb_to_fs_client(inode
->i_sb
), DIRSTAT
))
2152 if (!dfi
->dir_info
) {
2153 dfi
->dir_info
= kmalloc(bufsize
, GFP_KERNEL
);
2157 snprintf(dfi
->dir_info
, bufsize
,
2160 " subdirs: %20lld\n"
2161 "rentries: %20lld\n"
2163 " rsubdirs: %20lld\n"
2165 "rctime: %10lld.%09ld\n",
2166 ci
->i_files
+ ci
->i_subdirs
,
2169 ci
->i_rfiles
+ ci
->i_rsubdirs
,
2173 ci
->i_rctime
.tv_sec
,
2174 ci
->i_rctime
.tv_nsec
);
2177 if (*ppos
>= dfi
->dir_info_len
)
2179 size
= min_t(unsigned, size
, dfi
->dir_info_len
-*ppos
);
2180 left
= copy_to_user(buf
, dfi
->dir_info
+ *ppos
, size
);
2183 *ppos
+= (size
- left
);
2190 * Return name hash for a given dentry. This is dependent on
2191 * the parent directory's hash function.
2193 unsigned ceph_dentry_hash(struct inode
*dir
, struct dentry
*dn
)
2195 struct ceph_inode_info
*dci
= ceph_inode(dir
);
2198 switch (dci
->i_dir_layout
.dl_dir_hash
) {
2199 case 0: /* for backward compat */
2200 case CEPH_STR_HASH_LINUX
:
2201 return dn
->d_name
.hash
;
2204 spin_lock(&dn
->d_lock
);
2205 hash
= ceph_str_hash(dci
->i_dir_layout
.dl_dir_hash
,
2206 dn
->d_name
.name
, dn
->d_name
.len
);
2207 spin_unlock(&dn
->d_lock
);
2212 WRAP_DIR_ITER(ceph_readdir
) // FIXME!
2213 const struct file_operations ceph_dir_fops
= {
2214 .read
= ceph_read_dir
,
2215 .iterate_shared
= shared_ceph_readdir
,
2216 .llseek
= ceph_dir_llseek
,
2218 .release
= ceph_release
,
2219 .unlocked_ioctl
= ceph_ioctl
,
2220 .compat_ioctl
= compat_ptr_ioctl
,
2221 .fsync
= ceph_fsync
,
2223 .flock
= ceph_flock
,
2226 const struct file_operations ceph_snapdir_fops
= {
2227 .iterate_shared
= shared_ceph_readdir
,
2228 .llseek
= ceph_dir_llseek
,
2230 .release
= ceph_release
,
2233 const struct inode_operations ceph_dir_iops
= {
2234 .lookup
= ceph_lookup
,
2235 .permission
= ceph_permission
,
2236 .getattr
= ceph_getattr
,
2237 .setattr
= ceph_setattr
,
2238 .listxattr
= ceph_listxattr
,
2239 .get_inode_acl
= ceph_get_acl
,
2240 .set_acl
= ceph_set_acl
,
2241 .mknod
= ceph_mknod
,
2242 .symlink
= ceph_symlink
,
2243 .mkdir
= ceph_mkdir
,
2245 .unlink
= ceph_unlink
,
2246 .rmdir
= ceph_unlink
,
2247 .rename
= ceph_rename
,
2248 .create
= ceph_create
,
2249 .atomic_open
= ceph_atomic_open
,
2252 const struct inode_operations ceph_snapdir_iops
= {
2253 .lookup
= ceph_lookup
,
2254 .permission
= ceph_permission
,
2255 .getattr
= ceph_getattr
,
2256 .mkdir
= ceph_mkdir
,
2257 .rmdir
= ceph_unlink
,
2258 .rename
= ceph_rename
,
2261 const struct dentry_operations ceph_dentry_ops
= {
2262 .d_revalidate
= ceph_d_revalidate
,
2263 .d_delete
= ceph_d_delete
,
2264 .d_release
= ceph_d_release
,
2265 .d_prune
= ceph_d_prune
,
2266 .d_init
= ceph_d_init
,