HID: hiddev: Fix slab-out-of-bounds write in hiddev_ioctl_usage()
[linux/fpc-iii.git] / fs / ceph / dir.c
blobd636e2660e629b9e7f627bd6f36f3cf3defd5efe
1 #include <linux/ceph/ceph_debug.h>
3 #include <linux/spinlock.h>
4 #include <linux/fs_struct.h>
5 #include <linux/namei.h>
6 #include <linux/slab.h>
7 #include <linux/sched.h>
9 #include "super.h"
10 #include "mds_client.h"
13 * Directory operations: readdir, lookup, create, link, unlink,
14 * rename, etc.
18 * Ceph MDS operations are specified in terms of a base ino and
19 * relative path. Thus, the client can specify an operation on a
20 * specific inode (e.g., a getattr due to fstat(2)), or as a path
21 * relative to, say, the root directory.
23 * Normally, we limit ourselves to strict inode ops (no path component)
24 * or dentry operations (a single path component relative to an ino). The
25 * exception to this is open_root_dentry(), which will open the mount
26 * point by name.
29 const struct dentry_operations ceph_dentry_ops;
32 * Initialize ceph dentry state.
34 int ceph_init_dentry(struct dentry *dentry)
36 struct ceph_dentry_info *di;
38 if (dentry->d_fsdata)
39 return 0;
41 di = kmem_cache_alloc(ceph_dentry_cachep, GFP_KERNEL | __GFP_ZERO);
42 if (!di)
43 return -ENOMEM; /* oh well */
45 spin_lock(&dentry->d_lock);
46 if (dentry->d_fsdata) {
47 /* lost a race */
48 kmem_cache_free(ceph_dentry_cachep, di);
49 goto out_unlock;
52 if (ceph_snap(d_inode(dentry->d_parent)) == CEPH_NOSNAP)
53 d_set_d_op(dentry, &ceph_dentry_ops);
54 else if (ceph_snap(d_inode(dentry->d_parent)) == CEPH_SNAPDIR)
55 d_set_d_op(dentry, &ceph_snapdir_dentry_ops);
56 else
57 d_set_d_op(dentry, &ceph_snap_dentry_ops);
59 di->dentry = dentry;
60 di->lease_session = NULL;
61 dentry->d_time = jiffies;
62 /* avoid reordering d_fsdata setup so that the check above is safe */
63 smp_mb();
64 dentry->d_fsdata = di;
65 ceph_dentry_lru_add(dentry);
66 out_unlock:
67 spin_unlock(&dentry->d_lock);
68 return 0;
71 struct inode *ceph_get_dentry_parent_inode(struct dentry *dentry)
73 struct inode *inode = NULL;
75 if (!dentry)
76 return NULL;
78 spin_lock(&dentry->d_lock);
79 if (!IS_ROOT(dentry)) {
80 inode = d_inode(dentry->d_parent);
81 ihold(inode);
83 spin_unlock(&dentry->d_lock);
84 return inode;
89 * for readdir, we encode the directory frag and offset within that
90 * frag into f_pos.
92 static unsigned fpos_frag(loff_t p)
94 return p >> 32;
96 static unsigned fpos_off(loff_t p)
98 return p & 0xffffffff;
101 static int fpos_cmp(loff_t l, loff_t r)
103 int v = ceph_frag_compare(fpos_frag(l), fpos_frag(r));
104 if (v)
105 return v;
106 return (int)(fpos_off(l) - fpos_off(r));
110 * make note of the last dentry we read, so we can
111 * continue at the same lexicographical point,
112 * regardless of what dir changes take place on the
113 * server.
115 static int note_last_dentry(struct ceph_file_info *fi, const char *name,
116 int len, unsigned next_offset)
118 char *buf = kmalloc(len+1, GFP_KERNEL);
119 if (!buf)
120 return -ENOMEM;
121 kfree(fi->last_name);
122 fi->last_name = buf;
123 memcpy(fi->last_name, name, len);
124 fi->last_name[len] = 0;
125 fi->next_offset = next_offset;
126 dout("note_last_dentry '%s'\n", fi->last_name);
127 return 0;
131 * When possible, we try to satisfy a readdir by peeking at the
132 * dcache. We make this work by carefully ordering dentries on
133 * d_child when we initially get results back from the MDS, and
134 * falling back to a "normal" sync readdir if any dentries in the dir
135 * are dropped.
137 * Complete dir indicates that we have all dentries in the dir. It is
138 * defined IFF we hold CEPH_CAP_FILE_SHARED (which will be revoked by
139 * the MDS if/when the directory is modified).
141 static int __dcache_readdir(struct file *file, struct dir_context *ctx,
142 u32 shared_gen)
144 struct ceph_file_info *fi = file->private_data;
145 struct dentry *parent = file->f_path.dentry;
146 struct inode *dir = d_inode(parent);
147 struct dentry *dentry, *last = NULL;
148 struct ceph_dentry_info *di;
149 unsigned nsize = PAGE_CACHE_SIZE / sizeof(struct dentry *);
150 int err = 0;
151 loff_t ptr_pos = 0;
152 struct ceph_readdir_cache_control cache_ctl = {};
154 dout("__dcache_readdir %p v%u at %llu\n", dir, shared_gen, ctx->pos);
156 /* we can calculate cache index for the first dirfrag */
157 if (ceph_frag_is_leftmost(fpos_frag(ctx->pos))) {
158 cache_ctl.index = fpos_off(ctx->pos) - 2;
159 BUG_ON(cache_ctl.index < 0);
160 ptr_pos = cache_ctl.index * sizeof(struct dentry *);
163 while (true) {
164 pgoff_t pgoff;
165 bool emit_dentry;
167 if (ptr_pos >= i_size_read(dir)) {
168 fi->flags |= CEPH_F_ATEND;
169 err = 0;
170 break;
173 err = -EAGAIN;
174 pgoff = ptr_pos >> PAGE_CACHE_SHIFT;
175 if (!cache_ctl.page || pgoff != page_index(cache_ctl.page)) {
176 ceph_readdir_cache_release(&cache_ctl);
177 cache_ctl.page = find_lock_page(&dir->i_data, pgoff);
178 if (!cache_ctl.page) {
179 dout(" page %lu not found\n", pgoff);
180 break;
182 /* reading/filling the cache are serialized by
183 * i_mutex, no need to use page lock */
184 unlock_page(cache_ctl.page);
185 cache_ctl.dentries = kmap(cache_ctl.page);
188 rcu_read_lock();
189 spin_lock(&parent->d_lock);
190 /* check i_size again here, because empty directory can be
191 * marked as complete while not holding the i_mutex. */
192 if (ceph_dir_is_complete_ordered(dir) &&
193 ptr_pos < i_size_read(dir))
194 dentry = cache_ctl.dentries[cache_ctl.index % nsize];
195 else
196 dentry = NULL;
197 spin_unlock(&parent->d_lock);
198 if (dentry && !lockref_get_not_dead(&dentry->d_lockref))
199 dentry = NULL;
200 rcu_read_unlock();
201 if (!dentry)
202 break;
204 emit_dentry = false;
205 di = ceph_dentry(dentry);
206 spin_lock(&dentry->d_lock);
207 if (di->lease_shared_gen == shared_gen &&
208 d_really_is_positive(dentry) &&
209 ceph_snap(d_inode(dentry)) != CEPH_SNAPDIR &&
210 ceph_ino(d_inode(dentry)) != CEPH_INO_CEPH &&
211 fpos_cmp(ctx->pos, di->offset) <= 0) {
212 emit_dentry = true;
214 spin_unlock(&dentry->d_lock);
216 if (emit_dentry) {
217 dout(" %llu (%llu) dentry %p %pd %p\n", di->offset, ctx->pos,
218 dentry, dentry, d_inode(dentry));
219 ctx->pos = di->offset;
220 if (!dir_emit(ctx, dentry->d_name.name,
221 dentry->d_name.len,
222 ceph_translate_ino(dentry->d_sb,
223 d_inode(dentry)->i_ino),
224 d_inode(dentry)->i_mode >> 12)) {
225 dput(dentry);
226 err = 0;
227 break;
229 ctx->pos++;
231 if (last)
232 dput(last);
233 last = dentry;
234 } else {
235 dput(dentry);
238 cache_ctl.index++;
239 ptr_pos += sizeof(struct dentry *);
241 ceph_readdir_cache_release(&cache_ctl);
242 if (last) {
243 int ret;
244 di = ceph_dentry(last);
245 ret = note_last_dentry(fi, last->d_name.name, last->d_name.len,
246 fpos_off(di->offset) + 1);
247 if (ret < 0)
248 err = ret;
249 dput(last);
250 /* last_name no longer match cache index */
251 if (fi->readdir_cache_idx >= 0) {
252 fi->readdir_cache_idx = -1;
253 fi->dir_release_count = 0;
256 return err;
259 static int ceph_readdir(struct file *file, struct dir_context *ctx)
261 struct ceph_file_info *fi = file->private_data;
262 struct inode *inode = file_inode(file);
263 struct ceph_inode_info *ci = ceph_inode(inode);
264 struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
265 struct ceph_mds_client *mdsc = fsc->mdsc;
266 unsigned frag = fpos_frag(ctx->pos);
267 int off = fpos_off(ctx->pos);
268 int err;
269 u32 ftype;
270 struct ceph_mds_reply_info_parsed *rinfo;
272 dout("readdir %p file %p frag %u off %u\n", inode, file, frag, off);
273 if (fi->flags & CEPH_F_ATEND)
274 return 0;
276 /* always start with . and .. */
277 if (ctx->pos == 0) {
278 dout("readdir off 0 -> '.'\n");
279 if (!dir_emit(ctx, ".", 1,
280 ceph_translate_ino(inode->i_sb, inode->i_ino),
281 inode->i_mode >> 12))
282 return 0;
283 ctx->pos = 1;
284 off = 1;
286 if (ctx->pos == 1) {
287 ino_t ino = parent_ino(file->f_path.dentry);
288 dout("readdir off 1 -> '..'\n");
289 if (!dir_emit(ctx, "..", 2,
290 ceph_translate_ino(inode->i_sb, ino),
291 inode->i_mode >> 12))
292 return 0;
293 ctx->pos = 2;
294 off = 2;
297 /* can we use the dcache? */
298 spin_lock(&ci->i_ceph_lock);
299 if (ceph_test_mount_opt(fsc, DCACHE) &&
300 !ceph_test_mount_opt(fsc, NOASYNCREADDIR) &&
301 ceph_snap(inode) != CEPH_SNAPDIR &&
302 __ceph_dir_is_complete_ordered(ci) &&
303 __ceph_caps_issued_mask(ci, CEPH_CAP_FILE_SHARED, 1)) {
304 u32 shared_gen = ci->i_shared_gen;
305 spin_unlock(&ci->i_ceph_lock);
306 err = __dcache_readdir(file, ctx, shared_gen);
307 if (err != -EAGAIN)
308 return err;
309 frag = fpos_frag(ctx->pos);
310 off = fpos_off(ctx->pos);
311 } else {
312 spin_unlock(&ci->i_ceph_lock);
315 /* proceed with a normal readdir */
316 more:
317 /* do we have the correct frag content buffered? */
318 if (fi->frag != frag || fi->last_readdir == NULL) {
319 struct ceph_mds_request *req;
320 int op = ceph_snap(inode) == CEPH_SNAPDIR ?
321 CEPH_MDS_OP_LSSNAP : CEPH_MDS_OP_READDIR;
323 /* discard old result, if any */
324 if (fi->last_readdir) {
325 ceph_mdsc_put_request(fi->last_readdir);
326 fi->last_readdir = NULL;
329 dout("readdir fetching %llx.%llx frag %x offset '%s'\n",
330 ceph_vinop(inode), frag, fi->last_name);
331 req = ceph_mdsc_create_request(mdsc, op, USE_AUTH_MDS);
332 if (IS_ERR(req))
333 return PTR_ERR(req);
334 err = ceph_alloc_readdir_reply_buffer(req, inode);
335 if (err) {
336 ceph_mdsc_put_request(req);
337 return err;
339 /* hints to request -> mds selection code */
340 req->r_direct_mode = USE_AUTH_MDS;
341 req->r_direct_hash = ceph_frag_value(frag);
342 req->r_direct_is_hash = true;
343 if (fi->last_name) {
344 req->r_path2 = kstrdup(fi->last_name, GFP_KERNEL);
345 if (!req->r_path2) {
346 ceph_mdsc_put_request(req);
347 return -ENOMEM;
350 req->r_dir_release_cnt = fi->dir_release_count;
351 req->r_dir_ordered_cnt = fi->dir_ordered_count;
352 req->r_readdir_cache_idx = fi->readdir_cache_idx;
353 req->r_readdir_offset = fi->next_offset;
354 req->r_args.readdir.frag = cpu_to_le32(frag);
356 req->r_inode = inode;
357 ihold(inode);
358 req->r_dentry = dget(file->f_path.dentry);
359 err = ceph_mdsc_do_request(mdsc, NULL, req);
360 if (err < 0) {
361 ceph_mdsc_put_request(req);
362 return err;
364 dout("readdir got and parsed readdir result=%d"
365 " on frag %x, end=%d, complete=%d\n", err, frag,
366 (int)req->r_reply_info.dir_end,
367 (int)req->r_reply_info.dir_complete);
370 /* note next offset and last dentry name */
371 rinfo = &req->r_reply_info;
372 if (le32_to_cpu(rinfo->dir_dir->frag) != frag) {
373 frag = le32_to_cpu(rinfo->dir_dir->frag);
374 off = req->r_readdir_offset;
375 fi->next_offset = off;
378 fi->frag = frag;
379 fi->offset = fi->next_offset;
380 fi->last_readdir = req;
382 if (req->r_did_prepopulate) {
383 fi->readdir_cache_idx = req->r_readdir_cache_idx;
384 if (fi->readdir_cache_idx < 0) {
385 /* preclude from marking dir ordered */
386 fi->dir_ordered_count = 0;
387 } else if (ceph_frag_is_leftmost(frag) && off == 2) {
388 /* note dir version at start of readdir so
389 * we can tell if any dentries get dropped */
390 fi->dir_release_count = req->r_dir_release_cnt;
391 fi->dir_ordered_count = req->r_dir_ordered_cnt;
393 } else {
394 dout("readdir !did_prepopulate");
395 /* disable readdir cache */
396 fi->readdir_cache_idx = -1;
397 /* preclude from marking dir complete */
398 fi->dir_release_count = 0;
401 if (req->r_reply_info.dir_end) {
402 kfree(fi->last_name);
403 fi->last_name = NULL;
404 if (ceph_frag_is_rightmost(frag))
405 fi->next_offset = 2;
406 else
407 fi->next_offset = 0;
408 } else {
409 err = note_last_dentry(fi,
410 rinfo->dir_dname[rinfo->dir_nr-1],
411 rinfo->dir_dname_len[rinfo->dir_nr-1],
412 fi->next_offset + rinfo->dir_nr);
413 if (err)
414 return err;
418 rinfo = &fi->last_readdir->r_reply_info;
419 dout("readdir frag %x num %d off %d chunkoff %d\n", frag,
420 rinfo->dir_nr, off, fi->offset);
422 ctx->pos = ceph_make_fpos(frag, off);
423 while (off >= fi->offset && off - fi->offset < rinfo->dir_nr) {
424 struct ceph_mds_reply_inode *in =
425 rinfo->dir_in[off - fi->offset].in;
426 struct ceph_vino vino;
427 ino_t ino;
429 dout("readdir off %d (%d/%d) -> %lld '%.*s' %p\n",
430 off, off - fi->offset, rinfo->dir_nr, ctx->pos,
431 rinfo->dir_dname_len[off - fi->offset],
432 rinfo->dir_dname[off - fi->offset], in);
433 BUG_ON(!in);
434 ftype = le32_to_cpu(in->mode) >> 12;
435 vino.ino = le64_to_cpu(in->ino);
436 vino.snap = le64_to_cpu(in->snapid);
437 ino = ceph_vino_to_ino(vino);
438 if (!dir_emit(ctx,
439 rinfo->dir_dname[off - fi->offset],
440 rinfo->dir_dname_len[off - fi->offset],
441 ceph_translate_ino(inode->i_sb, ino), ftype)) {
442 dout("filldir stopping us...\n");
443 return 0;
445 off++;
446 ctx->pos++;
449 if (fi->last_name) {
450 ceph_mdsc_put_request(fi->last_readdir);
451 fi->last_readdir = NULL;
452 goto more;
455 /* more frags? */
456 if (!ceph_frag_is_rightmost(frag)) {
457 frag = ceph_frag_next(frag);
458 off = 0;
459 ctx->pos = ceph_make_fpos(frag, off);
460 dout("readdir next frag is %x\n", frag);
461 goto more;
463 fi->flags |= CEPH_F_ATEND;
466 * if dir_release_count still matches the dir, no dentries
467 * were released during the whole readdir, and we should have
468 * the complete dir contents in our cache.
470 if (atomic64_read(&ci->i_release_count) == fi->dir_release_count) {
471 spin_lock(&ci->i_ceph_lock);
472 if (fi->dir_ordered_count == atomic64_read(&ci->i_ordered_count)) {
473 dout(" marking %p complete and ordered\n", inode);
474 /* use i_size to track number of entries in
475 * readdir cache */
476 BUG_ON(fi->readdir_cache_idx < 0);
477 i_size_write(inode, fi->readdir_cache_idx *
478 sizeof(struct dentry*));
479 } else {
480 dout(" marking %p complete\n", inode);
482 __ceph_dir_set_complete(ci, fi->dir_release_count,
483 fi->dir_ordered_count);
484 spin_unlock(&ci->i_ceph_lock);
487 dout("readdir %p file %p done.\n", inode, file);
488 return 0;
491 static void reset_readdir(struct ceph_file_info *fi, unsigned frag)
493 if (fi->last_readdir) {
494 ceph_mdsc_put_request(fi->last_readdir);
495 fi->last_readdir = NULL;
497 kfree(fi->last_name);
498 fi->last_name = NULL;
499 fi->dir_release_count = 0;
500 fi->readdir_cache_idx = -1;
501 if (ceph_frag_is_leftmost(frag))
502 fi->next_offset = 2; /* compensate for . and .. */
503 else
504 fi->next_offset = 0;
505 fi->flags &= ~CEPH_F_ATEND;
508 static loff_t ceph_dir_llseek(struct file *file, loff_t offset, int whence)
510 struct ceph_file_info *fi = file->private_data;
511 struct inode *inode = file->f_mapping->host;
512 loff_t old_offset = ceph_make_fpos(fi->frag, fi->next_offset);
513 loff_t retval;
515 mutex_lock(&inode->i_mutex);
516 retval = -EINVAL;
517 switch (whence) {
518 case SEEK_CUR:
519 offset += file->f_pos;
520 case SEEK_SET:
521 break;
522 case SEEK_END:
523 retval = -EOPNOTSUPP;
524 default:
525 goto out;
528 if (offset >= 0) {
529 if (offset != file->f_pos) {
530 file->f_pos = offset;
531 file->f_version = 0;
532 fi->flags &= ~CEPH_F_ATEND;
534 retval = offset;
536 if (offset == 0 ||
537 fpos_frag(offset) != fi->frag ||
538 fpos_off(offset) < fi->offset) {
539 /* discard buffered readdir content on seekdir(0), or
540 * seek to new frag, or seek prior to current chunk */
541 dout("dir_llseek dropping %p content\n", file);
542 reset_readdir(fi, fpos_frag(offset));
543 } else if (fpos_cmp(offset, old_offset) > 0) {
544 /* reset dir_release_count if we did a forward seek */
545 fi->dir_release_count = 0;
546 fi->readdir_cache_idx = -1;
549 out:
550 mutex_unlock(&inode->i_mutex);
551 return retval;
555 * Handle lookups for the hidden .snap directory.
557 int ceph_handle_snapdir(struct ceph_mds_request *req,
558 struct dentry *dentry, int err)
560 struct ceph_fs_client *fsc = ceph_sb_to_client(dentry->d_sb);
561 struct inode *parent = d_inode(dentry->d_parent); /* we hold i_mutex */
563 /* .snap dir? */
564 if (err == -ENOENT &&
565 ceph_snap(parent) == CEPH_NOSNAP &&
566 strcmp(dentry->d_name.name,
567 fsc->mount_options->snapdir_name) == 0) {
568 struct inode *inode = ceph_get_snapdir(parent);
569 dout("ENOENT on snapdir %p '%pd', linking to snapdir %p\n",
570 dentry, dentry, inode);
571 BUG_ON(!d_unhashed(dentry));
572 d_add(dentry, inode);
573 err = 0;
575 return err;
579 * Figure out final result of a lookup/open request.
581 * Mainly, make sure we return the final req->r_dentry (if it already
582 * existed) in place of the original VFS-provided dentry when they
583 * differ.
585 * Gracefully handle the case where the MDS replies with -ENOENT and
586 * no trace (which it may do, at its discretion, e.g., if it doesn't
587 * care to issue a lease on the negative dentry).
589 struct dentry *ceph_finish_lookup(struct ceph_mds_request *req,
590 struct dentry *dentry, int err)
592 if (err == -ENOENT) {
593 /* no trace? */
594 err = 0;
595 if (!req->r_reply_info.head->is_dentry) {
596 dout("ENOENT and no trace, dentry %p inode %p\n",
597 dentry, d_inode(dentry));
598 if (d_really_is_positive(dentry)) {
599 d_drop(dentry);
600 err = -ENOENT;
601 } else {
602 d_add(dentry, NULL);
606 if (err)
607 dentry = ERR_PTR(err);
608 else if (dentry != req->r_dentry)
609 dentry = dget(req->r_dentry); /* we got spliced */
610 else
611 dentry = NULL;
612 return dentry;
615 static int is_root_ceph_dentry(struct inode *inode, struct dentry *dentry)
617 return ceph_ino(inode) == CEPH_INO_ROOT &&
618 strncmp(dentry->d_name.name, ".ceph", 5) == 0;
622 * Look up a single dir entry. If there is a lookup intent, inform
623 * the MDS so that it gets our 'caps wanted' value in a single op.
625 static struct dentry *ceph_lookup(struct inode *dir, struct dentry *dentry,
626 unsigned int flags)
628 struct ceph_fs_client *fsc = ceph_sb_to_client(dir->i_sb);
629 struct ceph_mds_client *mdsc = fsc->mdsc;
630 struct ceph_mds_request *req;
631 int op;
632 int err;
634 dout("lookup %p dentry %p '%pd'\n",
635 dir, dentry, dentry);
637 if (dentry->d_name.len > NAME_MAX)
638 return ERR_PTR(-ENAMETOOLONG);
640 err = ceph_init_dentry(dentry);
641 if (err < 0)
642 return ERR_PTR(err);
644 /* can we conclude ENOENT locally? */
645 if (d_really_is_negative(dentry)) {
646 struct ceph_inode_info *ci = ceph_inode(dir);
647 struct ceph_dentry_info *di = ceph_dentry(dentry);
649 spin_lock(&ci->i_ceph_lock);
650 dout(" dir %p flags are %d\n", dir, ci->i_ceph_flags);
651 if (strncmp(dentry->d_name.name,
652 fsc->mount_options->snapdir_name,
653 dentry->d_name.len) &&
654 !is_root_ceph_dentry(dir, dentry) &&
655 ceph_test_mount_opt(fsc, DCACHE) &&
656 __ceph_dir_is_complete(ci) &&
657 (__ceph_caps_issued_mask(ci, CEPH_CAP_FILE_SHARED, 1))) {
658 spin_unlock(&ci->i_ceph_lock);
659 dout(" dir %p complete, -ENOENT\n", dir);
660 d_add(dentry, NULL);
661 di->lease_shared_gen = ci->i_shared_gen;
662 return NULL;
664 spin_unlock(&ci->i_ceph_lock);
667 op = ceph_snap(dir) == CEPH_SNAPDIR ?
668 CEPH_MDS_OP_LOOKUPSNAP : CEPH_MDS_OP_LOOKUP;
669 req = ceph_mdsc_create_request(mdsc, op, USE_ANY_MDS);
670 if (IS_ERR(req))
671 return ERR_CAST(req);
672 req->r_dentry = dget(dentry);
673 req->r_num_caps = 2;
674 /* we only need inode linkage */
675 req->r_args.getattr.mask = cpu_to_le32(CEPH_STAT_CAP_INODE);
676 req->r_locked_dir = dir;
677 err = ceph_mdsc_do_request(mdsc, NULL, req);
678 err = ceph_handle_snapdir(req, dentry, err);
679 dentry = ceph_finish_lookup(req, dentry, err);
680 ceph_mdsc_put_request(req); /* will dput(dentry) */
681 dout("lookup result=%p\n", dentry);
682 return dentry;
686 * If we do a create but get no trace back from the MDS, follow up with
687 * a lookup (the VFS expects us to link up the provided dentry).
689 int ceph_handle_notrace_create(struct inode *dir, struct dentry *dentry)
691 struct dentry *result = ceph_lookup(dir, dentry, 0);
693 if (result && !IS_ERR(result)) {
695 * We created the item, then did a lookup, and found
696 * it was already linked to another inode we already
697 * had in our cache (and thus got spliced). To not
698 * confuse VFS (especially when inode is a directory),
699 * we don't link our dentry to that inode, return an
700 * error instead.
702 * This event should be rare and it happens only when
703 * we talk to old MDS. Recent MDS does not send traceless
704 * reply for request that creates new inode.
706 d_drop(result);
707 return -ESTALE;
709 return PTR_ERR(result);
712 static int ceph_mknod(struct inode *dir, struct dentry *dentry,
713 umode_t mode, dev_t rdev)
715 struct ceph_fs_client *fsc = ceph_sb_to_client(dir->i_sb);
716 struct ceph_mds_client *mdsc = fsc->mdsc;
717 struct ceph_mds_request *req;
718 struct ceph_acls_info acls = {};
719 int err;
721 if (ceph_snap(dir) != CEPH_NOSNAP)
722 return -EROFS;
724 err = ceph_pre_init_acls(dir, &mode, &acls);
725 if (err < 0)
726 return err;
728 dout("mknod in dir %p dentry %p mode 0%ho rdev %d\n",
729 dir, dentry, mode, rdev);
730 req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_MKNOD, USE_AUTH_MDS);
731 if (IS_ERR(req)) {
732 err = PTR_ERR(req);
733 goto out;
735 req->r_dentry = dget(dentry);
736 req->r_num_caps = 2;
737 req->r_locked_dir = dir;
738 req->r_args.mknod.mode = cpu_to_le32(mode);
739 req->r_args.mknod.rdev = cpu_to_le32(rdev);
740 req->r_dentry_drop = CEPH_CAP_FILE_SHARED;
741 req->r_dentry_unless = CEPH_CAP_FILE_EXCL;
742 if (acls.pagelist) {
743 req->r_pagelist = acls.pagelist;
744 acls.pagelist = NULL;
746 err = ceph_mdsc_do_request(mdsc, dir, req);
747 if (!err && !req->r_reply_info.head->is_dentry)
748 err = ceph_handle_notrace_create(dir, dentry);
749 ceph_mdsc_put_request(req);
750 out:
751 if (!err)
752 ceph_init_inode_acls(d_inode(dentry), &acls);
753 else
754 d_drop(dentry);
755 ceph_release_acls_info(&acls);
756 return err;
759 static int ceph_create(struct inode *dir, struct dentry *dentry, umode_t mode,
760 bool excl)
762 return ceph_mknod(dir, dentry, mode, 0);
765 static int ceph_symlink(struct inode *dir, struct dentry *dentry,
766 const char *dest)
768 struct ceph_fs_client *fsc = ceph_sb_to_client(dir->i_sb);
769 struct ceph_mds_client *mdsc = fsc->mdsc;
770 struct ceph_mds_request *req;
771 int err;
773 if (ceph_snap(dir) != CEPH_NOSNAP)
774 return -EROFS;
776 dout("symlink in dir %p dentry %p to '%s'\n", dir, dentry, dest);
777 req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_SYMLINK, USE_AUTH_MDS);
778 if (IS_ERR(req)) {
779 err = PTR_ERR(req);
780 goto out;
782 req->r_path2 = kstrdup(dest, GFP_KERNEL);
783 if (!req->r_path2) {
784 err = -ENOMEM;
785 ceph_mdsc_put_request(req);
786 goto out;
788 req->r_locked_dir = dir;
789 req->r_dentry = dget(dentry);
790 req->r_num_caps = 2;
791 req->r_dentry_drop = CEPH_CAP_FILE_SHARED;
792 req->r_dentry_unless = CEPH_CAP_FILE_EXCL;
793 err = ceph_mdsc_do_request(mdsc, dir, req);
794 if (!err && !req->r_reply_info.head->is_dentry)
795 err = ceph_handle_notrace_create(dir, dentry);
796 ceph_mdsc_put_request(req);
797 out:
798 if (err)
799 d_drop(dentry);
800 return err;
803 static int ceph_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
805 struct ceph_fs_client *fsc = ceph_sb_to_client(dir->i_sb);
806 struct ceph_mds_client *mdsc = fsc->mdsc;
807 struct ceph_mds_request *req;
808 struct ceph_acls_info acls = {};
809 int err = -EROFS;
810 int op;
812 if (ceph_snap(dir) == CEPH_SNAPDIR) {
813 /* mkdir .snap/foo is a MKSNAP */
814 op = CEPH_MDS_OP_MKSNAP;
815 dout("mksnap dir %p snap '%pd' dn %p\n", dir,
816 dentry, dentry);
817 } else if (ceph_snap(dir) == CEPH_NOSNAP) {
818 dout("mkdir dir %p dn %p mode 0%ho\n", dir, dentry, mode);
819 op = CEPH_MDS_OP_MKDIR;
820 } else {
821 goto out;
824 mode |= S_IFDIR;
825 err = ceph_pre_init_acls(dir, &mode, &acls);
826 if (err < 0)
827 goto out;
829 req = ceph_mdsc_create_request(mdsc, op, USE_AUTH_MDS);
830 if (IS_ERR(req)) {
831 err = PTR_ERR(req);
832 goto out;
835 req->r_dentry = dget(dentry);
836 req->r_num_caps = 2;
837 req->r_locked_dir = dir;
838 req->r_args.mkdir.mode = cpu_to_le32(mode);
839 req->r_dentry_drop = CEPH_CAP_FILE_SHARED;
840 req->r_dentry_unless = CEPH_CAP_FILE_EXCL;
841 if (acls.pagelist) {
842 req->r_pagelist = acls.pagelist;
843 acls.pagelist = NULL;
845 err = ceph_mdsc_do_request(mdsc, dir, req);
846 if (!err &&
847 !req->r_reply_info.head->is_target &&
848 !req->r_reply_info.head->is_dentry)
849 err = ceph_handle_notrace_create(dir, dentry);
850 ceph_mdsc_put_request(req);
851 out:
852 if (!err)
853 ceph_init_inode_acls(d_inode(dentry), &acls);
854 else
855 d_drop(dentry);
856 ceph_release_acls_info(&acls);
857 return err;
860 static int ceph_link(struct dentry *old_dentry, struct inode *dir,
861 struct dentry *dentry)
863 struct ceph_fs_client *fsc = ceph_sb_to_client(dir->i_sb);
864 struct ceph_mds_client *mdsc = fsc->mdsc;
865 struct ceph_mds_request *req;
866 int err;
868 if (ceph_snap(dir) != CEPH_NOSNAP)
869 return -EROFS;
871 dout("link in dir %p old_dentry %p dentry %p\n", dir,
872 old_dentry, dentry);
873 req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_LINK, USE_AUTH_MDS);
874 if (IS_ERR(req)) {
875 d_drop(dentry);
876 return PTR_ERR(req);
878 req->r_dentry = dget(dentry);
879 req->r_num_caps = 2;
880 req->r_old_dentry = dget(old_dentry);
881 req->r_locked_dir = dir;
882 req->r_dentry_drop = CEPH_CAP_FILE_SHARED;
883 req->r_dentry_unless = CEPH_CAP_FILE_EXCL;
884 /* release LINK_SHARED on source inode (mds will lock it) */
885 req->r_old_inode_drop = CEPH_CAP_LINK_SHARED;
886 err = ceph_mdsc_do_request(mdsc, dir, req);
887 if (err) {
888 d_drop(dentry);
889 } else if (!req->r_reply_info.head->is_dentry) {
890 ihold(d_inode(old_dentry));
891 d_instantiate(dentry, d_inode(old_dentry));
893 ceph_mdsc_put_request(req);
894 return err;
898 * For a soon-to-be unlinked file, drop the AUTH_RDCACHE caps. If it
899 * looks like the link count will hit 0, drop any other caps (other
900 * than PIN) we don't specifically want (due to the file still being
901 * open).
903 static int drop_caps_for_unlink(struct inode *inode)
905 struct ceph_inode_info *ci = ceph_inode(inode);
906 int drop = CEPH_CAP_LINK_SHARED | CEPH_CAP_LINK_EXCL;
908 spin_lock(&ci->i_ceph_lock);
909 if (inode->i_nlink == 1) {
910 drop |= ~(__ceph_caps_wanted(ci) | CEPH_CAP_PIN);
911 ci->i_ceph_flags |= CEPH_I_NODELAY;
913 spin_unlock(&ci->i_ceph_lock);
914 return drop;
918 * rmdir and unlink are differ only by the metadata op code
920 static int ceph_unlink(struct inode *dir, struct dentry *dentry)
922 struct ceph_fs_client *fsc = ceph_sb_to_client(dir->i_sb);
923 struct ceph_mds_client *mdsc = fsc->mdsc;
924 struct inode *inode = d_inode(dentry);
925 struct ceph_mds_request *req;
926 int err = -EROFS;
927 int op;
929 if (ceph_snap(dir) == CEPH_SNAPDIR) {
930 /* rmdir .snap/foo is RMSNAP */
931 dout("rmsnap dir %p '%pd' dn %p\n", dir, dentry, dentry);
932 op = CEPH_MDS_OP_RMSNAP;
933 } else if (ceph_snap(dir) == CEPH_NOSNAP) {
934 dout("unlink/rmdir dir %p dn %p inode %p\n",
935 dir, dentry, inode);
936 op = d_is_dir(dentry) ?
937 CEPH_MDS_OP_RMDIR : CEPH_MDS_OP_UNLINK;
938 } else
939 goto out;
940 req = ceph_mdsc_create_request(mdsc, op, USE_AUTH_MDS);
941 if (IS_ERR(req)) {
942 err = PTR_ERR(req);
943 goto out;
945 req->r_dentry = dget(dentry);
946 req->r_num_caps = 2;
947 req->r_locked_dir = dir;
948 req->r_dentry_drop = CEPH_CAP_FILE_SHARED;
949 req->r_dentry_unless = CEPH_CAP_FILE_EXCL;
950 req->r_inode_drop = drop_caps_for_unlink(inode);
951 err = ceph_mdsc_do_request(mdsc, dir, req);
952 if (!err && !req->r_reply_info.head->is_dentry)
953 d_delete(dentry);
954 ceph_mdsc_put_request(req);
955 out:
956 return err;
959 static int ceph_rename(struct inode *old_dir, struct dentry *old_dentry,
960 struct inode *new_dir, struct dentry *new_dentry)
962 struct ceph_fs_client *fsc = ceph_sb_to_client(old_dir->i_sb);
963 struct ceph_mds_client *mdsc = fsc->mdsc;
964 struct ceph_mds_request *req;
965 int op = CEPH_MDS_OP_RENAME;
966 int err;
968 if (ceph_snap(old_dir) != ceph_snap(new_dir))
969 return -EXDEV;
970 if (ceph_snap(old_dir) != CEPH_NOSNAP) {
971 if (old_dir == new_dir && ceph_snap(old_dir) == CEPH_SNAPDIR)
972 op = CEPH_MDS_OP_RENAMESNAP;
973 else
974 return -EROFS;
976 dout("rename dir %p dentry %p to dir %p dentry %p\n",
977 old_dir, old_dentry, new_dir, new_dentry);
978 req = ceph_mdsc_create_request(mdsc, op, USE_AUTH_MDS);
979 if (IS_ERR(req))
980 return PTR_ERR(req);
981 ihold(old_dir);
982 req->r_dentry = dget(new_dentry);
983 req->r_num_caps = 2;
984 req->r_old_dentry = dget(old_dentry);
985 req->r_old_dentry_dir = old_dir;
986 req->r_locked_dir = new_dir;
987 req->r_old_dentry_drop = CEPH_CAP_FILE_SHARED;
988 req->r_old_dentry_unless = CEPH_CAP_FILE_EXCL;
989 req->r_dentry_drop = CEPH_CAP_FILE_SHARED;
990 req->r_dentry_unless = CEPH_CAP_FILE_EXCL;
991 /* release LINK_RDCACHE on source inode (mds will lock it) */
992 req->r_old_inode_drop = CEPH_CAP_LINK_SHARED;
993 if (d_really_is_positive(new_dentry))
994 req->r_inode_drop = drop_caps_for_unlink(d_inode(new_dentry));
995 err = ceph_mdsc_do_request(mdsc, old_dir, req);
996 if (!err && !req->r_reply_info.head->is_dentry) {
998 * Normally d_move() is done by fill_trace (called by
999 * do_request, above). If there is no trace, we need
1000 * to do it here.
1003 /* d_move screws up sibling dentries' offsets */
1004 ceph_dir_clear_complete(old_dir);
1005 ceph_dir_clear_complete(new_dir);
1007 d_move(old_dentry, new_dentry);
1009 /* ensure target dentry is invalidated, despite
1010 rehashing bug in vfs_rename_dir */
1011 ceph_invalidate_dentry_lease(new_dentry);
1013 ceph_mdsc_put_request(req);
1014 return err;
1018 * Ensure a dentry lease will no longer revalidate.
1020 void ceph_invalidate_dentry_lease(struct dentry *dentry)
1022 spin_lock(&dentry->d_lock);
1023 dentry->d_time = jiffies;
1024 ceph_dentry(dentry)->lease_shared_gen = 0;
1025 spin_unlock(&dentry->d_lock);
1029 * Check if dentry lease is valid. If not, delete the lease. Try to
1030 * renew if the least is more than half up.
1032 static int dentry_lease_is_valid(struct dentry *dentry)
1034 struct ceph_dentry_info *di;
1035 struct ceph_mds_session *s;
1036 int valid = 0;
1037 u32 gen;
1038 unsigned long ttl;
1039 struct ceph_mds_session *session = NULL;
1040 struct inode *dir = NULL;
1041 u32 seq = 0;
1043 spin_lock(&dentry->d_lock);
1044 di = ceph_dentry(dentry);
1045 if (di->lease_session) {
1046 s = di->lease_session;
1047 spin_lock(&s->s_gen_ttl_lock);
1048 gen = s->s_cap_gen;
1049 ttl = s->s_cap_ttl;
1050 spin_unlock(&s->s_gen_ttl_lock);
1052 if (di->lease_gen == gen &&
1053 time_before(jiffies, dentry->d_time) &&
1054 time_before(jiffies, ttl)) {
1055 valid = 1;
1056 if (di->lease_renew_after &&
1057 time_after(jiffies, di->lease_renew_after)) {
1058 /* we should renew */
1059 dir = d_inode(dentry->d_parent);
1060 session = ceph_get_mds_session(s);
1061 seq = di->lease_seq;
1062 di->lease_renew_after = 0;
1063 di->lease_renew_from = jiffies;
1067 spin_unlock(&dentry->d_lock);
1069 if (session) {
1070 ceph_mdsc_lease_send_msg(session, dir, dentry,
1071 CEPH_MDS_LEASE_RENEW, seq);
1072 ceph_put_mds_session(session);
1074 dout("dentry_lease_is_valid - dentry %p = %d\n", dentry, valid);
1075 return valid;
1079 * Check if directory-wide content lease/cap is valid.
1081 static int dir_lease_is_valid(struct inode *dir, struct dentry *dentry)
1083 struct ceph_inode_info *ci = ceph_inode(dir);
1084 struct ceph_dentry_info *di = ceph_dentry(dentry);
1085 int valid = 0;
1087 spin_lock(&ci->i_ceph_lock);
1088 if (ci->i_shared_gen == di->lease_shared_gen)
1089 valid = __ceph_caps_issued_mask(ci, CEPH_CAP_FILE_SHARED, 1);
1090 spin_unlock(&ci->i_ceph_lock);
1091 dout("dir_lease_is_valid dir %p v%u dentry %p v%u = %d\n",
1092 dir, (unsigned)ci->i_shared_gen, dentry,
1093 (unsigned)di->lease_shared_gen, valid);
1094 return valid;
1098 * Check if cached dentry can be trusted.
1100 static int ceph_d_revalidate(struct dentry *dentry, unsigned int flags)
1102 int valid = 0;
1103 struct inode *dir;
1105 if (flags & LOOKUP_RCU)
1106 return -ECHILD;
1108 dout("d_revalidate %p '%pd' inode %p offset %lld\n", dentry,
1109 dentry, d_inode(dentry), ceph_dentry(dentry)->offset);
1111 dir = ceph_get_dentry_parent_inode(dentry);
1113 /* always trust cached snapped dentries, snapdir dentry */
1114 if (ceph_snap(dir) != CEPH_NOSNAP) {
1115 dout("d_revalidate %p '%pd' inode %p is SNAPPED\n", dentry,
1116 dentry, d_inode(dentry));
1117 valid = 1;
1118 } else if (d_really_is_positive(dentry) &&
1119 ceph_snap(d_inode(dentry)) == CEPH_SNAPDIR) {
1120 valid = 1;
1121 } else if (dentry_lease_is_valid(dentry) ||
1122 dir_lease_is_valid(dir, dentry)) {
1123 if (d_really_is_positive(dentry))
1124 valid = ceph_is_any_caps(d_inode(dentry));
1125 else
1126 valid = 1;
1129 dout("d_revalidate %p %s\n", dentry, valid ? "valid" : "invalid");
1130 if (valid) {
1131 ceph_dentry_lru_touch(dentry);
1132 } else {
1133 ceph_dir_clear_complete(dir);
1135 iput(dir);
1136 return valid;
1140 * Release our ceph_dentry_info.
1142 static void ceph_d_release(struct dentry *dentry)
1144 struct ceph_dentry_info *di = ceph_dentry(dentry);
1146 dout("d_release %p\n", dentry);
1147 ceph_dentry_lru_del(dentry);
1148 if (di->lease_session)
1149 ceph_put_mds_session(di->lease_session);
1150 kmem_cache_free(ceph_dentry_cachep, di);
1151 dentry->d_fsdata = NULL;
1154 static int ceph_snapdir_d_revalidate(struct dentry *dentry,
1155 unsigned int flags)
1158 * Eventually, we'll want to revalidate snapped metadata
1159 * too... probably...
1161 return 1;
1165 * When the VFS prunes a dentry from the cache, we need to clear the
1166 * complete flag on the parent directory.
1168 * Called under dentry->d_lock.
1170 static void ceph_d_prune(struct dentry *dentry)
1172 dout("ceph_d_prune %p\n", dentry);
1174 /* do we have a valid parent? */
1175 if (IS_ROOT(dentry))
1176 return;
1178 /* if we are not hashed, we don't affect dir's completeness */
1179 if (d_unhashed(dentry))
1180 return;
1183 * we hold d_lock, so d_parent is stable, and d_fsdata is never
1184 * cleared until d_release
1186 ceph_dir_clear_complete(d_inode(dentry->d_parent));
1190 * read() on a dir. This weird interface hack only works if mounted
1191 * with '-o dirstat'.
1193 static ssize_t ceph_read_dir(struct file *file, char __user *buf, size_t size,
1194 loff_t *ppos)
1196 struct ceph_file_info *cf = file->private_data;
1197 struct inode *inode = file_inode(file);
1198 struct ceph_inode_info *ci = ceph_inode(inode);
1199 int left;
1200 const int bufsize = 1024;
1202 if (!ceph_test_mount_opt(ceph_sb_to_client(inode->i_sb), DIRSTAT))
1203 return -EISDIR;
1205 if (!cf->dir_info) {
1206 cf->dir_info = kmalloc(bufsize, GFP_KERNEL);
1207 if (!cf->dir_info)
1208 return -ENOMEM;
1209 cf->dir_info_len =
1210 snprintf(cf->dir_info, bufsize,
1211 "entries: %20lld\n"
1212 " files: %20lld\n"
1213 " subdirs: %20lld\n"
1214 "rentries: %20lld\n"
1215 " rfiles: %20lld\n"
1216 " rsubdirs: %20lld\n"
1217 "rbytes: %20lld\n"
1218 "rctime: %10ld.%09ld\n",
1219 ci->i_files + ci->i_subdirs,
1220 ci->i_files,
1221 ci->i_subdirs,
1222 ci->i_rfiles + ci->i_rsubdirs,
1223 ci->i_rfiles,
1224 ci->i_rsubdirs,
1225 ci->i_rbytes,
1226 (long)ci->i_rctime.tv_sec,
1227 (long)ci->i_rctime.tv_nsec);
1230 if (*ppos >= cf->dir_info_len)
1231 return 0;
1232 size = min_t(unsigned, size, cf->dir_info_len-*ppos);
1233 left = copy_to_user(buf, cf->dir_info + *ppos, size);
1234 if (left == size)
1235 return -EFAULT;
1236 *ppos += (size - left);
1237 return size - left;
1241 * We maintain a private dentry LRU.
1243 * FIXME: this needs to be changed to a per-mds lru to be useful.
1245 void ceph_dentry_lru_add(struct dentry *dn)
1247 struct ceph_dentry_info *di = ceph_dentry(dn);
1248 struct ceph_mds_client *mdsc;
1250 dout("dentry_lru_add %p %p '%pd'\n", di, dn, dn);
1251 mdsc = ceph_sb_to_client(dn->d_sb)->mdsc;
1252 spin_lock(&mdsc->dentry_lru_lock);
1253 list_add_tail(&di->lru, &mdsc->dentry_lru);
1254 mdsc->num_dentry++;
1255 spin_unlock(&mdsc->dentry_lru_lock);
1258 void ceph_dentry_lru_touch(struct dentry *dn)
1260 struct ceph_dentry_info *di = ceph_dentry(dn);
1261 struct ceph_mds_client *mdsc;
1263 dout("dentry_lru_touch %p %p '%pd' (offset %lld)\n", di, dn, dn,
1264 di->offset);
1265 mdsc = ceph_sb_to_client(dn->d_sb)->mdsc;
1266 spin_lock(&mdsc->dentry_lru_lock);
1267 list_move_tail(&di->lru, &mdsc->dentry_lru);
1268 spin_unlock(&mdsc->dentry_lru_lock);
1271 void ceph_dentry_lru_del(struct dentry *dn)
1273 struct ceph_dentry_info *di = ceph_dentry(dn);
1274 struct ceph_mds_client *mdsc;
1276 dout("dentry_lru_del %p %p '%pd'\n", di, dn, dn);
1277 mdsc = ceph_sb_to_client(dn->d_sb)->mdsc;
1278 spin_lock(&mdsc->dentry_lru_lock);
1279 list_del_init(&di->lru);
1280 mdsc->num_dentry--;
1281 spin_unlock(&mdsc->dentry_lru_lock);
1285 * Return name hash for a given dentry. This is dependent on
1286 * the parent directory's hash function.
1288 unsigned ceph_dentry_hash(struct inode *dir, struct dentry *dn)
1290 struct ceph_inode_info *dci = ceph_inode(dir);
1291 unsigned hash;
1293 switch (dci->i_dir_layout.dl_dir_hash) {
1294 case 0: /* for backward compat */
1295 case CEPH_STR_HASH_LINUX:
1296 return dn->d_name.hash;
1298 default:
1299 spin_lock(&dn->d_lock);
1300 hash = ceph_str_hash(dci->i_dir_layout.dl_dir_hash,
1301 dn->d_name.name, dn->d_name.len);
1302 spin_unlock(&dn->d_lock);
1303 return hash;
1307 const struct file_operations ceph_dir_fops = {
1308 .read = ceph_read_dir,
1309 .iterate = ceph_readdir,
1310 .llseek = ceph_dir_llseek,
1311 .open = ceph_open,
1312 .release = ceph_release,
1313 .unlocked_ioctl = ceph_ioctl,
1314 .fsync = ceph_fsync,
1317 const struct file_operations ceph_snapdir_fops = {
1318 .iterate = ceph_readdir,
1319 .llseek = ceph_dir_llseek,
1320 .open = ceph_open,
1321 .release = ceph_release,
1324 const struct inode_operations ceph_dir_iops = {
1325 .lookup = ceph_lookup,
1326 .permission = ceph_permission,
1327 .getattr = ceph_getattr,
1328 .setattr = ceph_setattr,
1329 .setxattr = ceph_setxattr,
1330 .getxattr = ceph_getxattr,
1331 .listxattr = ceph_listxattr,
1332 .removexattr = ceph_removexattr,
1333 .get_acl = ceph_get_acl,
1334 .set_acl = ceph_set_acl,
1335 .mknod = ceph_mknod,
1336 .symlink = ceph_symlink,
1337 .mkdir = ceph_mkdir,
1338 .link = ceph_link,
1339 .unlink = ceph_unlink,
1340 .rmdir = ceph_unlink,
1341 .rename = ceph_rename,
1342 .create = ceph_create,
1343 .atomic_open = ceph_atomic_open,
1346 const struct inode_operations ceph_snapdir_iops = {
1347 .lookup = ceph_lookup,
1348 .permission = ceph_permission,
1349 .getattr = ceph_getattr,
1350 .mkdir = ceph_mkdir,
1351 .rmdir = ceph_unlink,
1352 .rename = ceph_rename,
1355 const struct dentry_operations ceph_dentry_ops = {
1356 .d_revalidate = ceph_d_revalidate,
1357 .d_release = ceph_d_release,
1358 .d_prune = ceph_d_prune,
1361 const struct dentry_operations ceph_snapdir_dentry_ops = {
1362 .d_revalidate = ceph_snapdir_d_revalidate,
1363 .d_release = ceph_d_release,
1366 const struct dentry_operations ceph_snap_dentry_ops = {
1367 .d_release = ceph_d_release,
1368 .d_prune = ceph_d_prune,