gro: Allow tunnel stacking in the case of FOU/GUE
[linux/fpc-iii.git] / fs / ceph / snap.c
bloba97e39f09ba683349bb5f97e44f0d229b3a88936
1 #include <linux/ceph/ceph_debug.h>
3 #include <linux/sort.h>
4 #include <linux/slab.h>
6 #include "super.h"
7 #include "mds_client.h"
9 #include <linux/ceph/decode.h>
12 * Snapshots in ceph are driven in large part by cooperation from the
13 * client. In contrast to local file systems or file servers that
14 * implement snapshots at a single point in the system, ceph's
15 * distributed access to storage requires clients to help decide
16 * whether a write logically occurs before or after a recently created
17 * snapshot.
19 * This provides a perfect instantanous client-wide snapshot. Between
20 * clients, however, snapshots may appear to be applied at slightly
21 * different points in time, depending on delays in delivering the
22 * snapshot notification.
24 * Snapshots are _not_ file system-wide. Instead, each snapshot
25 * applies to the subdirectory nested beneath some directory. This
26 * effectively divides the hierarchy into multiple "realms," where all
27 * of the files contained by each realm share the same set of
28 * snapshots. An individual realm's snap set contains snapshots
29 * explicitly created on that realm, as well as any snaps in its
30 * parent's snap set _after_ the point at which the parent became it's
31 * parent (due to, say, a rename). Similarly, snaps from prior parents
32 * during the time intervals during which they were the parent are included.
34 * The client is spared most of this detail, fortunately... it must only
35 * maintains a hierarchy of realms reflecting the current parent/child
36 * realm relationship, and for each realm has an explicit list of snaps
37 * inherited from prior parents.
39 * A snap_realm struct is maintained for realms containing every inode
40 * with an open cap in the system. (The needed snap realm information is
41 * provided by the MDS whenever a cap is issued, i.e., on open.) A 'seq'
42 * version number is used to ensure that as realm parameters change (new
43 * snapshot, new parent, etc.) the client's realm hierarchy is updated.
45 * The realm hierarchy drives the generation of a 'snap context' for each
46 * realm, which simply lists the resulting set of snaps for the realm. This
47 * is attached to any writes sent to OSDs.
50 * Unfortunately error handling is a bit mixed here. If we get a snap
51 * update, but don't have enough memory to update our realm hierarchy,
52 * it's not clear what we can do about it (besides complaining to the
53 * console).
58 * increase ref count for the realm
60 * caller must hold snap_rwsem for write.
62 void ceph_get_snap_realm(struct ceph_mds_client *mdsc,
63 struct ceph_snap_realm *realm)
65 dout("get_realm %p %d -> %d\n", realm,
66 atomic_read(&realm->nref), atomic_read(&realm->nref)+1);
68 * since we _only_ increment realm refs or empty the empty
69 * list with snap_rwsem held, adjusting the empty list here is
70 * safe. we do need to protect against concurrent empty list
71 * additions, however.
73 if (atomic_inc_return(&realm->nref) == 1) {
74 spin_lock(&mdsc->snap_empty_lock);
75 list_del_init(&realm->empty_item);
76 spin_unlock(&mdsc->snap_empty_lock);
80 static void __insert_snap_realm(struct rb_root *root,
81 struct ceph_snap_realm *new)
83 struct rb_node **p = &root->rb_node;
84 struct rb_node *parent = NULL;
85 struct ceph_snap_realm *r = NULL;
87 while (*p) {
88 parent = *p;
89 r = rb_entry(parent, struct ceph_snap_realm, node);
90 if (new->ino < r->ino)
91 p = &(*p)->rb_left;
92 else if (new->ino > r->ino)
93 p = &(*p)->rb_right;
94 else
95 BUG();
98 rb_link_node(&new->node, parent, p);
99 rb_insert_color(&new->node, root);
103 * create and get the realm rooted at @ino and bump its ref count.
105 * caller must hold snap_rwsem for write.
107 static struct ceph_snap_realm *ceph_create_snap_realm(
108 struct ceph_mds_client *mdsc,
109 u64 ino)
111 struct ceph_snap_realm *realm;
113 realm = kzalloc(sizeof(*realm), GFP_NOFS);
114 if (!realm)
115 return ERR_PTR(-ENOMEM);
117 atomic_set(&realm->nref, 1); /* for caller */
118 realm->ino = ino;
119 INIT_LIST_HEAD(&realm->children);
120 INIT_LIST_HEAD(&realm->child_item);
121 INIT_LIST_HEAD(&realm->empty_item);
122 INIT_LIST_HEAD(&realm->dirty_item);
123 INIT_LIST_HEAD(&realm->inodes_with_caps);
124 spin_lock_init(&realm->inodes_with_caps_lock);
125 __insert_snap_realm(&mdsc->snap_realms, realm);
126 dout("create_snap_realm %llx %p\n", realm->ino, realm);
127 return realm;
131 * lookup the realm rooted at @ino.
133 * caller must hold snap_rwsem for write.
135 static struct ceph_snap_realm *__lookup_snap_realm(struct ceph_mds_client *mdsc,
136 u64 ino)
138 struct rb_node *n = mdsc->snap_realms.rb_node;
139 struct ceph_snap_realm *r;
141 while (n) {
142 r = rb_entry(n, struct ceph_snap_realm, node);
143 if (ino < r->ino)
144 n = n->rb_left;
145 else if (ino > r->ino)
146 n = n->rb_right;
147 else {
148 dout("lookup_snap_realm %llx %p\n", r->ino, r);
149 return r;
152 return NULL;
155 struct ceph_snap_realm *ceph_lookup_snap_realm(struct ceph_mds_client *mdsc,
156 u64 ino)
158 struct ceph_snap_realm *r;
159 r = __lookup_snap_realm(mdsc, ino);
160 if (r)
161 ceph_get_snap_realm(mdsc, r);
162 return r;
165 static void __put_snap_realm(struct ceph_mds_client *mdsc,
166 struct ceph_snap_realm *realm);
169 * called with snap_rwsem (write)
171 static void __destroy_snap_realm(struct ceph_mds_client *mdsc,
172 struct ceph_snap_realm *realm)
174 dout("__destroy_snap_realm %p %llx\n", realm, realm->ino);
176 rb_erase(&realm->node, &mdsc->snap_realms);
178 if (realm->parent) {
179 list_del_init(&realm->child_item);
180 __put_snap_realm(mdsc, realm->parent);
183 kfree(realm->prior_parent_snaps);
184 kfree(realm->snaps);
185 ceph_put_snap_context(realm->cached_context);
186 kfree(realm);
190 * caller holds snap_rwsem (write)
192 static void __put_snap_realm(struct ceph_mds_client *mdsc,
193 struct ceph_snap_realm *realm)
195 dout("__put_snap_realm %llx %p %d -> %d\n", realm->ino, realm,
196 atomic_read(&realm->nref), atomic_read(&realm->nref)-1);
197 if (atomic_dec_and_test(&realm->nref))
198 __destroy_snap_realm(mdsc, realm);
202 * caller needn't hold any locks
204 void ceph_put_snap_realm(struct ceph_mds_client *mdsc,
205 struct ceph_snap_realm *realm)
207 dout("put_snap_realm %llx %p %d -> %d\n", realm->ino, realm,
208 atomic_read(&realm->nref), atomic_read(&realm->nref)-1);
209 if (!atomic_dec_and_test(&realm->nref))
210 return;
212 if (down_write_trylock(&mdsc->snap_rwsem)) {
213 __destroy_snap_realm(mdsc, realm);
214 up_write(&mdsc->snap_rwsem);
215 } else {
216 spin_lock(&mdsc->snap_empty_lock);
217 list_add(&realm->empty_item, &mdsc->snap_empty);
218 spin_unlock(&mdsc->snap_empty_lock);
223 * Clean up any realms whose ref counts have dropped to zero. Note
224 * that this does not include realms who were created but not yet
225 * used.
227 * Called under snap_rwsem (write)
229 static void __cleanup_empty_realms(struct ceph_mds_client *mdsc)
231 struct ceph_snap_realm *realm;
233 spin_lock(&mdsc->snap_empty_lock);
234 while (!list_empty(&mdsc->snap_empty)) {
235 realm = list_first_entry(&mdsc->snap_empty,
236 struct ceph_snap_realm, empty_item);
237 list_del(&realm->empty_item);
238 spin_unlock(&mdsc->snap_empty_lock);
239 __destroy_snap_realm(mdsc, realm);
240 spin_lock(&mdsc->snap_empty_lock);
242 spin_unlock(&mdsc->snap_empty_lock);
245 void ceph_cleanup_empty_realms(struct ceph_mds_client *mdsc)
247 down_write(&mdsc->snap_rwsem);
248 __cleanup_empty_realms(mdsc);
249 up_write(&mdsc->snap_rwsem);
253 * adjust the parent realm of a given @realm. adjust child list, and parent
254 * pointers, and ref counts appropriately.
256 * return true if parent was changed, 0 if unchanged, <0 on error.
258 * caller must hold snap_rwsem for write.
260 static int adjust_snap_realm_parent(struct ceph_mds_client *mdsc,
261 struct ceph_snap_realm *realm,
262 u64 parentino)
264 struct ceph_snap_realm *parent;
266 if (realm->parent_ino == parentino)
267 return 0;
269 parent = ceph_lookup_snap_realm(mdsc, parentino);
270 if (!parent) {
271 parent = ceph_create_snap_realm(mdsc, parentino);
272 if (IS_ERR(parent))
273 return PTR_ERR(parent);
275 dout("adjust_snap_realm_parent %llx %p: %llx %p -> %llx %p\n",
276 realm->ino, realm, realm->parent_ino, realm->parent,
277 parentino, parent);
278 if (realm->parent) {
279 list_del_init(&realm->child_item);
280 ceph_put_snap_realm(mdsc, realm->parent);
282 realm->parent_ino = parentino;
283 realm->parent = parent;
284 list_add(&realm->child_item, &parent->children);
285 return 1;
289 static int cmpu64_rev(const void *a, const void *b)
291 if (*(u64 *)a < *(u64 *)b)
292 return 1;
293 if (*(u64 *)a > *(u64 *)b)
294 return -1;
295 return 0;
299 static struct ceph_snap_context *empty_snapc;
302 * build the snap context for a given realm.
304 static int build_snap_context(struct ceph_snap_realm *realm)
306 struct ceph_snap_realm *parent = realm->parent;
307 struct ceph_snap_context *snapc;
308 int err = 0;
309 u32 num = realm->num_prior_parent_snaps + realm->num_snaps;
312 * build parent context, if it hasn't been built.
313 * conservatively estimate that all parent snaps might be
314 * included by us.
316 if (parent) {
317 if (!parent->cached_context) {
318 err = build_snap_context(parent);
319 if (err)
320 goto fail;
322 num += parent->cached_context->num_snaps;
325 /* do i actually need to update? not if my context seq
326 matches realm seq, and my parents' does to. (this works
327 because we rebuild_snap_realms() works _downward_ in
328 hierarchy after each update.) */
329 if (realm->cached_context &&
330 realm->cached_context->seq == realm->seq &&
331 (!parent ||
332 realm->cached_context->seq >= parent->cached_context->seq)) {
333 dout("build_snap_context %llx %p: %p seq %lld (%u snaps)"
334 " (unchanged)\n",
335 realm->ino, realm, realm->cached_context,
336 realm->cached_context->seq,
337 (unsigned int) realm->cached_context->num_snaps);
338 return 0;
341 if (num == 0 && realm->seq == empty_snapc->seq) {
342 ceph_get_snap_context(empty_snapc);
343 snapc = empty_snapc;
344 goto done;
347 /* alloc new snap context */
348 err = -ENOMEM;
349 if (num > (SIZE_MAX - sizeof(*snapc)) / sizeof(u64))
350 goto fail;
351 snapc = ceph_create_snap_context(num, GFP_NOFS);
352 if (!snapc)
353 goto fail;
355 /* build (reverse sorted) snap vector */
356 num = 0;
357 snapc->seq = realm->seq;
358 if (parent) {
359 u32 i;
361 /* include any of parent's snaps occurring _after_ my
362 parent became my parent */
363 for (i = 0; i < parent->cached_context->num_snaps; i++)
364 if (parent->cached_context->snaps[i] >=
365 realm->parent_since)
366 snapc->snaps[num++] =
367 parent->cached_context->snaps[i];
368 if (parent->cached_context->seq > snapc->seq)
369 snapc->seq = parent->cached_context->seq;
371 memcpy(snapc->snaps + num, realm->snaps,
372 sizeof(u64)*realm->num_snaps);
373 num += realm->num_snaps;
374 memcpy(snapc->snaps + num, realm->prior_parent_snaps,
375 sizeof(u64)*realm->num_prior_parent_snaps);
376 num += realm->num_prior_parent_snaps;
378 sort(snapc->snaps, num, sizeof(u64), cmpu64_rev, NULL);
379 snapc->num_snaps = num;
380 dout("build_snap_context %llx %p: %p seq %lld (%u snaps)\n",
381 realm->ino, realm, snapc, snapc->seq,
382 (unsigned int) snapc->num_snaps);
384 done:
385 ceph_put_snap_context(realm->cached_context);
386 realm->cached_context = snapc;
387 return 0;
389 fail:
391 * if we fail, clear old (incorrect) cached_context... hopefully
392 * we'll have better luck building it later
394 if (realm->cached_context) {
395 ceph_put_snap_context(realm->cached_context);
396 realm->cached_context = NULL;
398 pr_err("build_snap_context %llx %p fail %d\n", realm->ino,
399 realm, err);
400 return err;
404 * rebuild snap context for the given realm and all of its children.
406 static void rebuild_snap_realms(struct ceph_snap_realm *realm)
408 struct ceph_snap_realm *child;
410 dout("rebuild_snap_realms %llx %p\n", realm->ino, realm);
411 build_snap_context(realm);
413 list_for_each_entry(child, &realm->children, child_item)
414 rebuild_snap_realms(child);
419 * helper to allocate and decode an array of snapids. free prior
420 * instance, if any.
422 static int dup_array(u64 **dst, __le64 *src, u32 num)
424 u32 i;
426 kfree(*dst);
427 if (num) {
428 *dst = kcalloc(num, sizeof(u64), GFP_NOFS);
429 if (!*dst)
430 return -ENOMEM;
431 for (i = 0; i < num; i++)
432 (*dst)[i] = get_unaligned_le64(src + i);
433 } else {
434 *dst = NULL;
436 return 0;
441 * When a snapshot is applied, the size/mtime inode metadata is queued
442 * in a ceph_cap_snap (one for each snapshot) until writeback
443 * completes and the metadata can be flushed back to the MDS.
445 * However, if a (sync) write is currently in-progress when we apply
446 * the snapshot, we have to wait until the write succeeds or fails
447 * (and a final size/mtime is known). In this case the
448 * cap_snap->writing = 1, and is said to be "pending." When the write
449 * finishes, we __ceph_finish_cap_snap().
451 * Caller must hold snap_rwsem for read (i.e., the realm topology won't
452 * change).
454 void ceph_queue_cap_snap(struct ceph_inode_info *ci)
456 struct inode *inode = &ci->vfs_inode;
457 struct ceph_cap_snap *capsnap;
458 int used, dirty;
460 capsnap = kzalloc(sizeof(*capsnap), GFP_NOFS);
461 if (!capsnap) {
462 pr_err("ENOMEM allocating ceph_cap_snap on %p\n", inode);
463 return;
466 spin_lock(&ci->i_ceph_lock);
467 used = __ceph_caps_used(ci);
468 dirty = __ceph_caps_dirty(ci);
471 * If there is a write in progress, treat that as a dirty Fw,
472 * even though it hasn't completed yet; by the time we finish
473 * up this capsnap it will be.
475 if (used & CEPH_CAP_FILE_WR)
476 dirty |= CEPH_CAP_FILE_WR;
478 if (__ceph_have_pending_cap_snap(ci)) {
479 /* there is no point in queuing multiple "pending" cap_snaps,
480 as no new writes are allowed to start when pending, so any
481 writes in progress now were started before the previous
482 cap_snap. lucky us. */
483 dout("queue_cap_snap %p already pending\n", inode);
484 kfree(capsnap);
485 } else if (ci->i_snap_realm->cached_context == empty_snapc) {
486 dout("queue_cap_snap %p empty snapc\n", inode);
487 kfree(capsnap);
488 } else if (dirty & (CEPH_CAP_AUTH_EXCL|CEPH_CAP_XATTR_EXCL|
489 CEPH_CAP_FILE_EXCL|CEPH_CAP_FILE_WR)) {
490 struct ceph_snap_context *snapc = ci->i_head_snapc;
493 * if we are a sync write, we may need to go to the snaprealm
494 * to get the current snapc.
496 if (!snapc)
497 snapc = ci->i_snap_realm->cached_context;
499 dout("queue_cap_snap %p cap_snap %p queuing under %p %s\n",
500 inode, capsnap, snapc, ceph_cap_string(dirty));
501 ihold(inode);
503 atomic_set(&capsnap->nref, 1);
504 capsnap->ci = ci;
505 INIT_LIST_HEAD(&capsnap->ci_item);
506 INIT_LIST_HEAD(&capsnap->flushing_item);
508 capsnap->follows = snapc->seq;
509 capsnap->issued = __ceph_caps_issued(ci, NULL);
510 capsnap->dirty = dirty;
512 capsnap->mode = inode->i_mode;
513 capsnap->uid = inode->i_uid;
514 capsnap->gid = inode->i_gid;
516 if (dirty & CEPH_CAP_XATTR_EXCL) {
517 __ceph_build_xattrs_blob(ci);
518 capsnap->xattr_blob =
519 ceph_buffer_get(ci->i_xattrs.blob);
520 capsnap->xattr_version = ci->i_xattrs.version;
521 } else {
522 capsnap->xattr_blob = NULL;
523 capsnap->xattr_version = 0;
526 capsnap->inline_data = ci->i_inline_version != CEPH_INLINE_NONE;
528 /* dirty page count moved from _head to this cap_snap;
529 all subsequent writes page dirties occur _after_ this
530 snapshot. */
531 capsnap->dirty_pages = ci->i_wrbuffer_ref_head;
532 ci->i_wrbuffer_ref_head = 0;
533 capsnap->context = snapc;
534 ci->i_head_snapc =
535 ceph_get_snap_context(ci->i_snap_realm->cached_context);
536 dout(" new snapc is %p\n", ci->i_head_snapc);
537 list_add_tail(&capsnap->ci_item, &ci->i_cap_snaps);
539 if (used & CEPH_CAP_FILE_WR) {
540 dout("queue_cap_snap %p cap_snap %p snapc %p"
541 " seq %llu used WR, now pending\n", inode,
542 capsnap, snapc, snapc->seq);
543 capsnap->writing = 1;
544 } else {
545 /* note mtime, size NOW. */
546 __ceph_finish_cap_snap(ci, capsnap);
548 } else {
549 dout("queue_cap_snap %p nothing dirty|writing\n", inode);
550 kfree(capsnap);
553 spin_unlock(&ci->i_ceph_lock);
557 * Finalize the size, mtime for a cap_snap.. that is, settle on final values
558 * to be used for the snapshot, to be flushed back to the mds.
560 * If capsnap can now be flushed, add to snap_flush list, and return 1.
562 * Caller must hold i_ceph_lock.
564 int __ceph_finish_cap_snap(struct ceph_inode_info *ci,
565 struct ceph_cap_snap *capsnap)
567 struct inode *inode = &ci->vfs_inode;
568 struct ceph_mds_client *mdsc = ceph_sb_to_client(inode->i_sb)->mdsc;
570 BUG_ON(capsnap->writing);
571 capsnap->size = inode->i_size;
572 capsnap->mtime = inode->i_mtime;
573 capsnap->atime = inode->i_atime;
574 capsnap->ctime = inode->i_ctime;
575 capsnap->time_warp_seq = ci->i_time_warp_seq;
576 if (capsnap->dirty_pages) {
577 dout("finish_cap_snap %p cap_snap %p snapc %p %llu %s s=%llu "
578 "still has %d dirty pages\n", inode, capsnap,
579 capsnap->context, capsnap->context->seq,
580 ceph_cap_string(capsnap->dirty), capsnap->size,
581 capsnap->dirty_pages);
582 return 0;
584 dout("finish_cap_snap %p cap_snap %p snapc %p %llu %s s=%llu\n",
585 inode, capsnap, capsnap->context,
586 capsnap->context->seq, ceph_cap_string(capsnap->dirty),
587 capsnap->size);
589 spin_lock(&mdsc->snap_flush_lock);
590 list_add_tail(&ci->i_snap_flush_item, &mdsc->snap_flush_list);
591 spin_unlock(&mdsc->snap_flush_lock);
592 return 1; /* caller may want to ceph_flush_snaps */
596 * Queue cap_snaps for snap writeback for this realm and its children.
597 * Called under snap_rwsem, so realm topology won't change.
599 static void queue_realm_cap_snaps(struct ceph_snap_realm *realm)
601 struct ceph_inode_info *ci;
602 struct inode *lastinode = NULL;
603 struct ceph_snap_realm *child;
605 dout("queue_realm_cap_snaps %p %llx inodes\n", realm, realm->ino);
607 spin_lock(&realm->inodes_with_caps_lock);
608 list_for_each_entry(ci, &realm->inodes_with_caps,
609 i_snap_realm_item) {
610 struct inode *inode = igrab(&ci->vfs_inode);
611 if (!inode)
612 continue;
613 spin_unlock(&realm->inodes_with_caps_lock);
614 iput(lastinode);
615 lastinode = inode;
616 ceph_queue_cap_snap(ci);
617 spin_lock(&realm->inodes_with_caps_lock);
619 spin_unlock(&realm->inodes_with_caps_lock);
620 iput(lastinode);
622 list_for_each_entry(child, &realm->children, child_item) {
623 dout("queue_realm_cap_snaps %p %llx queue child %p %llx\n",
624 realm, realm->ino, child, child->ino);
625 list_del_init(&child->dirty_item);
626 list_add(&child->dirty_item, &realm->dirty_item);
629 list_del_init(&realm->dirty_item);
630 dout("queue_realm_cap_snaps %p %llx done\n", realm, realm->ino);
634 * Parse and apply a snapblob "snap trace" from the MDS. This specifies
635 * the snap realm parameters from a given realm and all of its ancestors,
636 * up to the root.
638 * Caller must hold snap_rwsem for write.
640 int ceph_update_snap_trace(struct ceph_mds_client *mdsc,
641 void *p, void *e, bool deletion,
642 struct ceph_snap_realm **realm_ret)
644 struct ceph_mds_snap_realm *ri; /* encoded */
645 __le64 *snaps; /* encoded */
646 __le64 *prior_parent_snaps; /* encoded */
647 struct ceph_snap_realm *realm = NULL;
648 struct ceph_snap_realm *first_realm = NULL;
649 int invalidate = 0;
650 int err = -ENOMEM;
651 LIST_HEAD(dirty_realms);
653 dout("update_snap_trace deletion=%d\n", deletion);
654 more:
655 ceph_decode_need(&p, e, sizeof(*ri), bad);
656 ri = p;
657 p += sizeof(*ri);
658 ceph_decode_need(&p, e, sizeof(u64)*(le32_to_cpu(ri->num_snaps) +
659 le32_to_cpu(ri->num_prior_parent_snaps)), bad);
660 snaps = p;
661 p += sizeof(u64) * le32_to_cpu(ri->num_snaps);
662 prior_parent_snaps = p;
663 p += sizeof(u64) * le32_to_cpu(ri->num_prior_parent_snaps);
665 realm = ceph_lookup_snap_realm(mdsc, le64_to_cpu(ri->ino));
666 if (!realm) {
667 realm = ceph_create_snap_realm(mdsc, le64_to_cpu(ri->ino));
668 if (IS_ERR(realm)) {
669 err = PTR_ERR(realm);
670 goto fail;
674 /* ensure the parent is correct */
675 err = adjust_snap_realm_parent(mdsc, realm, le64_to_cpu(ri->parent));
676 if (err < 0)
677 goto fail;
678 invalidate += err;
680 if (le64_to_cpu(ri->seq) > realm->seq) {
681 dout("update_snap_trace updating %llx %p %lld -> %lld\n",
682 realm->ino, realm, realm->seq, le64_to_cpu(ri->seq));
683 /* update realm parameters, snap lists */
684 realm->seq = le64_to_cpu(ri->seq);
685 realm->created = le64_to_cpu(ri->created);
686 realm->parent_since = le64_to_cpu(ri->parent_since);
688 realm->num_snaps = le32_to_cpu(ri->num_snaps);
689 err = dup_array(&realm->snaps, snaps, realm->num_snaps);
690 if (err < 0)
691 goto fail;
693 realm->num_prior_parent_snaps =
694 le32_to_cpu(ri->num_prior_parent_snaps);
695 err = dup_array(&realm->prior_parent_snaps, prior_parent_snaps,
696 realm->num_prior_parent_snaps);
697 if (err < 0)
698 goto fail;
700 /* queue realm for cap_snap creation */
701 list_add(&realm->dirty_item, &dirty_realms);
703 invalidate = 1;
704 } else if (!realm->cached_context) {
705 dout("update_snap_trace %llx %p seq %lld new\n",
706 realm->ino, realm, realm->seq);
707 invalidate = 1;
708 } else {
709 dout("update_snap_trace %llx %p seq %lld unchanged\n",
710 realm->ino, realm, realm->seq);
713 dout("done with %llx %p, invalidated=%d, %p %p\n", realm->ino,
714 realm, invalidate, p, e);
716 /* invalidate when we reach the _end_ (root) of the trace */
717 if (invalidate && p >= e)
718 rebuild_snap_realms(realm);
720 if (!first_realm)
721 first_realm = realm;
722 else
723 ceph_put_snap_realm(mdsc, realm);
725 if (p < e)
726 goto more;
729 * queue cap snaps _after_ we've built the new snap contexts,
730 * so that i_head_snapc can be set appropriately.
732 while (!list_empty(&dirty_realms)) {
733 realm = list_first_entry(&dirty_realms, struct ceph_snap_realm,
734 dirty_item);
735 queue_realm_cap_snaps(realm);
738 if (realm_ret)
739 *realm_ret = first_realm;
740 else
741 ceph_put_snap_realm(mdsc, first_realm);
743 __cleanup_empty_realms(mdsc);
744 return 0;
746 bad:
747 err = -EINVAL;
748 fail:
749 if (realm && !IS_ERR(realm))
750 ceph_put_snap_realm(mdsc, realm);
751 if (first_realm)
752 ceph_put_snap_realm(mdsc, first_realm);
753 pr_err("update_snap_trace error %d\n", err);
754 return err;
759 * Send any cap_snaps that are queued for flush. Try to carry
760 * s_mutex across multiple snap flushes to avoid locking overhead.
762 * Caller holds no locks.
764 static void flush_snaps(struct ceph_mds_client *mdsc)
766 struct ceph_inode_info *ci;
767 struct inode *inode;
768 struct ceph_mds_session *session = NULL;
770 dout("flush_snaps\n");
771 spin_lock(&mdsc->snap_flush_lock);
772 while (!list_empty(&mdsc->snap_flush_list)) {
773 ci = list_first_entry(&mdsc->snap_flush_list,
774 struct ceph_inode_info, i_snap_flush_item);
775 inode = &ci->vfs_inode;
776 ihold(inode);
777 spin_unlock(&mdsc->snap_flush_lock);
778 spin_lock(&ci->i_ceph_lock);
779 __ceph_flush_snaps(ci, &session, 0);
780 spin_unlock(&ci->i_ceph_lock);
781 iput(inode);
782 spin_lock(&mdsc->snap_flush_lock);
784 spin_unlock(&mdsc->snap_flush_lock);
786 if (session) {
787 mutex_unlock(&session->s_mutex);
788 ceph_put_mds_session(session);
790 dout("flush_snaps done\n");
795 * Handle a snap notification from the MDS.
797 * This can take two basic forms: the simplest is just a snap creation
798 * or deletion notification on an existing realm. This should update the
799 * realm and its children.
801 * The more difficult case is realm creation, due to snap creation at a
802 * new point in the file hierarchy, or due to a rename that moves a file or
803 * directory into another realm.
805 void ceph_handle_snap(struct ceph_mds_client *mdsc,
806 struct ceph_mds_session *session,
807 struct ceph_msg *msg)
809 struct super_block *sb = mdsc->fsc->sb;
810 int mds = session->s_mds;
811 u64 split;
812 int op;
813 int trace_len;
814 struct ceph_snap_realm *realm = NULL;
815 void *p = msg->front.iov_base;
816 void *e = p + msg->front.iov_len;
817 struct ceph_mds_snap_head *h;
818 int num_split_inos, num_split_realms;
819 __le64 *split_inos = NULL, *split_realms = NULL;
820 int i;
821 int locked_rwsem = 0;
823 /* decode */
824 if (msg->front.iov_len < sizeof(*h))
825 goto bad;
826 h = p;
827 op = le32_to_cpu(h->op);
828 split = le64_to_cpu(h->split); /* non-zero if we are splitting an
829 * existing realm */
830 num_split_inos = le32_to_cpu(h->num_split_inos);
831 num_split_realms = le32_to_cpu(h->num_split_realms);
832 trace_len = le32_to_cpu(h->trace_len);
833 p += sizeof(*h);
835 dout("handle_snap from mds%d op %s split %llx tracelen %d\n", mds,
836 ceph_snap_op_name(op), split, trace_len);
838 mutex_lock(&session->s_mutex);
839 session->s_seq++;
840 mutex_unlock(&session->s_mutex);
842 down_write(&mdsc->snap_rwsem);
843 locked_rwsem = 1;
845 if (op == CEPH_SNAP_OP_SPLIT) {
846 struct ceph_mds_snap_realm *ri;
849 * A "split" breaks part of an existing realm off into
850 * a new realm. The MDS provides a list of inodes
851 * (with caps) and child realms that belong to the new
852 * child.
854 split_inos = p;
855 p += sizeof(u64) * num_split_inos;
856 split_realms = p;
857 p += sizeof(u64) * num_split_realms;
858 ceph_decode_need(&p, e, sizeof(*ri), bad);
859 /* we will peek at realm info here, but will _not_
860 * advance p, as the realm update will occur below in
861 * ceph_update_snap_trace. */
862 ri = p;
864 realm = ceph_lookup_snap_realm(mdsc, split);
865 if (!realm) {
866 realm = ceph_create_snap_realm(mdsc, split);
867 if (IS_ERR(realm))
868 goto out;
871 dout("splitting snap_realm %llx %p\n", realm->ino, realm);
872 for (i = 0; i < num_split_inos; i++) {
873 struct ceph_vino vino = {
874 .ino = le64_to_cpu(split_inos[i]),
875 .snap = CEPH_NOSNAP,
877 struct inode *inode = ceph_find_inode(sb, vino);
878 struct ceph_inode_info *ci;
879 struct ceph_snap_realm *oldrealm;
881 if (!inode)
882 continue;
883 ci = ceph_inode(inode);
885 spin_lock(&ci->i_ceph_lock);
886 if (!ci->i_snap_realm)
887 goto skip_inode;
889 * If this inode belongs to a realm that was
890 * created after our new realm, we experienced
891 * a race (due to another split notifications
892 * arriving from a different MDS). So skip
893 * this inode.
895 if (ci->i_snap_realm->created >
896 le64_to_cpu(ri->created)) {
897 dout(" leaving %p in newer realm %llx %p\n",
898 inode, ci->i_snap_realm->ino,
899 ci->i_snap_realm);
900 goto skip_inode;
902 dout(" will move %p to split realm %llx %p\n",
903 inode, realm->ino, realm);
905 * Move the inode to the new realm
907 spin_lock(&realm->inodes_with_caps_lock);
908 list_del_init(&ci->i_snap_realm_item);
909 list_add(&ci->i_snap_realm_item,
910 &realm->inodes_with_caps);
911 oldrealm = ci->i_snap_realm;
912 ci->i_snap_realm = realm;
913 spin_unlock(&realm->inodes_with_caps_lock);
914 spin_unlock(&ci->i_ceph_lock);
916 ceph_get_snap_realm(mdsc, realm);
917 ceph_put_snap_realm(mdsc, oldrealm);
919 iput(inode);
920 continue;
922 skip_inode:
923 spin_unlock(&ci->i_ceph_lock);
924 iput(inode);
927 /* we may have taken some of the old realm's children. */
928 for (i = 0; i < num_split_realms; i++) {
929 struct ceph_snap_realm *child =
930 __lookup_snap_realm(mdsc,
931 le64_to_cpu(split_realms[i]));
932 if (!child)
933 continue;
934 adjust_snap_realm_parent(mdsc, child, realm->ino);
939 * update using the provided snap trace. if we are deleting a
940 * snap, we can avoid queueing cap_snaps.
942 ceph_update_snap_trace(mdsc, p, e,
943 op == CEPH_SNAP_OP_DESTROY, NULL);
945 if (op == CEPH_SNAP_OP_SPLIT)
946 /* we took a reference when we created the realm, above */
947 ceph_put_snap_realm(mdsc, realm);
949 __cleanup_empty_realms(mdsc);
951 up_write(&mdsc->snap_rwsem);
953 flush_snaps(mdsc);
954 return;
956 bad:
957 pr_err("corrupt snap message from mds%d\n", mds);
958 ceph_msg_dump(msg);
959 out:
960 if (locked_rwsem)
961 up_write(&mdsc->snap_rwsem);
962 return;
965 int __init ceph_snap_init(void)
967 empty_snapc = ceph_create_snap_context(0, GFP_NOFS);
968 if (!empty_snapc)
969 return -ENOMEM;
970 empty_snapc->seq = 1;
971 return 0;
974 void ceph_snap_exit(void)
976 ceph_put_snap_context(empty_snapc);