cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

snap.c (35663B)


      1// SPDX-License-Identifier: GPL-2.0
      2#include <linux/ceph/ceph_debug.h>
      3
      4#include <linux/sort.h>
      5#include <linux/slab.h>
      6#include <linux/iversion.h>
      7#include "super.h"
      8#include "mds_client.h"
      9#include <linux/ceph/decode.h>
     10
     11/* unused map expires after 5 minutes */
     12#define CEPH_SNAPID_MAP_TIMEOUT	(5 * 60 * HZ)
     13
     14/*
     15 * Snapshots in ceph are driven in large part by cooperation from the
     16 * client.  In contrast to local file systems or file servers that
     17 * implement snapshots at a single point in the system, ceph's
     18 * distributed access to storage requires clients to help decide
     19 * whether a write logically occurs before or after a recently created
     20 * snapshot.
     21 *
     22 * This provides a perfect instantanous client-wide snapshot.  Between
     23 * clients, however, snapshots may appear to be applied at slightly
     24 * different points in time, depending on delays in delivering the
     25 * snapshot notification.
     26 *
     27 * Snapshots are _not_ file system-wide.  Instead, each snapshot
     28 * applies to the subdirectory nested beneath some directory.  This
     29 * effectively divides the hierarchy into multiple "realms," where all
     30 * of the files contained by each realm share the same set of
     31 * snapshots.  An individual realm's snap set contains snapshots
     32 * explicitly created on that realm, as well as any snaps in its
     33 * parent's snap set _after_ the point at which the parent became it's
     34 * parent (due to, say, a rename).  Similarly, snaps from prior parents
     35 * during the time intervals during which they were the parent are included.
     36 *
     37 * The client is spared most of this detail, fortunately... it must only
     38 * maintains a hierarchy of realms reflecting the current parent/child
     39 * realm relationship, and for each realm has an explicit list of snaps
     40 * inherited from prior parents.
     41 *
     42 * A snap_realm struct is maintained for realms containing every inode
     43 * with an open cap in the system.  (The needed snap realm information is
     44 * provided by the MDS whenever a cap is issued, i.e., on open.)  A 'seq'
     45 * version number is used to ensure that as realm parameters change (new
     46 * snapshot, new parent, etc.) the client's realm hierarchy is updated.
     47 *
     48 * The realm hierarchy drives the generation of a 'snap context' for each
     49 * realm, which simply lists the resulting set of snaps for the realm.  This
     50 * is attached to any writes sent to OSDs.
     51 */
     52/*
     53 * Unfortunately error handling is a bit mixed here.  If we get a snap
     54 * update, but don't have enough memory to update our realm hierarchy,
     55 * it's not clear what we can do about it (besides complaining to the
     56 * console).
     57 */
     58
     59
     60/*
     61 * increase ref count for the realm
     62 *
     63 * caller must hold snap_rwsem.
     64 */
     65void ceph_get_snap_realm(struct ceph_mds_client *mdsc,
     66			 struct ceph_snap_realm *realm)
     67{
     68	lockdep_assert_held(&mdsc->snap_rwsem);
     69
     70	/*
     71	 * The 0->1 and 1->0 transitions must take the snap_empty_lock
     72	 * atomically with the refcount change. Go ahead and bump the
     73	 * nref here, unless it's 0, in which case we take the spinlock
     74	 * and then do the increment and remove it from the list.
     75	 */
     76	if (atomic_inc_not_zero(&realm->nref))
     77		return;
     78
     79	spin_lock(&mdsc->snap_empty_lock);
     80	if (atomic_inc_return(&realm->nref) == 1)
     81		list_del_init(&realm->empty_item);
     82	spin_unlock(&mdsc->snap_empty_lock);
     83}
     84
     85static void __insert_snap_realm(struct rb_root *root,
     86				struct ceph_snap_realm *new)
     87{
     88	struct rb_node **p = &root->rb_node;
     89	struct rb_node *parent = NULL;
     90	struct ceph_snap_realm *r = NULL;
     91
     92	while (*p) {
     93		parent = *p;
     94		r = rb_entry(parent, struct ceph_snap_realm, node);
     95		if (new->ino < r->ino)
     96			p = &(*p)->rb_left;
     97		else if (new->ino > r->ino)
     98			p = &(*p)->rb_right;
     99		else
    100			BUG();
    101	}
    102
    103	rb_link_node(&new->node, parent, p);
    104	rb_insert_color(&new->node, root);
    105}
    106
    107/*
    108 * create and get the realm rooted at @ino and bump its ref count.
    109 *
    110 * caller must hold snap_rwsem for write.
    111 */
    112static struct ceph_snap_realm *ceph_create_snap_realm(
    113	struct ceph_mds_client *mdsc,
    114	u64 ino)
    115{
    116	struct ceph_snap_realm *realm;
    117
    118	lockdep_assert_held_write(&mdsc->snap_rwsem);
    119
    120	realm = kzalloc(sizeof(*realm), GFP_NOFS);
    121	if (!realm)
    122		return ERR_PTR(-ENOMEM);
    123
    124	/* Do not release the global dummy snaprealm until unmouting */
    125	if (ino == CEPH_INO_GLOBAL_SNAPREALM)
    126		atomic_set(&realm->nref, 2);
    127	else
    128		atomic_set(&realm->nref, 1);
    129	realm->ino = ino;
    130	INIT_LIST_HEAD(&realm->children);
    131	INIT_LIST_HEAD(&realm->child_item);
    132	INIT_LIST_HEAD(&realm->empty_item);
    133	INIT_LIST_HEAD(&realm->dirty_item);
    134	INIT_LIST_HEAD(&realm->rebuild_item);
    135	INIT_LIST_HEAD(&realm->inodes_with_caps);
    136	spin_lock_init(&realm->inodes_with_caps_lock);
    137	__insert_snap_realm(&mdsc->snap_realms, realm);
    138	mdsc->num_snap_realms++;
    139
    140	dout("%s %llx %p\n", __func__, realm->ino, realm);
    141	return realm;
    142}
    143
    144/*
    145 * lookup the realm rooted at @ino.
    146 *
    147 * caller must hold snap_rwsem.
    148 */
    149static struct ceph_snap_realm *__lookup_snap_realm(struct ceph_mds_client *mdsc,
    150						   u64 ino)
    151{
    152	struct rb_node *n = mdsc->snap_realms.rb_node;
    153	struct ceph_snap_realm *r;
    154
    155	lockdep_assert_held(&mdsc->snap_rwsem);
    156
    157	while (n) {
    158		r = rb_entry(n, struct ceph_snap_realm, node);
    159		if (ino < r->ino)
    160			n = n->rb_left;
    161		else if (ino > r->ino)
    162			n = n->rb_right;
    163		else {
    164			dout("%s %llx %p\n", __func__, r->ino, r);
    165			return r;
    166		}
    167	}
    168	return NULL;
    169}
    170
    171struct ceph_snap_realm *ceph_lookup_snap_realm(struct ceph_mds_client *mdsc,
    172					       u64 ino)
    173{
    174	struct ceph_snap_realm *r;
    175	r = __lookup_snap_realm(mdsc, ino);
    176	if (r)
    177		ceph_get_snap_realm(mdsc, r);
    178	return r;
    179}
    180
    181static void __put_snap_realm(struct ceph_mds_client *mdsc,
    182			     struct ceph_snap_realm *realm);
    183
    184/*
    185 * called with snap_rwsem (write)
    186 */
    187static void __destroy_snap_realm(struct ceph_mds_client *mdsc,
    188				 struct ceph_snap_realm *realm)
    189{
    190	lockdep_assert_held_write(&mdsc->snap_rwsem);
    191
    192	dout("%s %p %llx\n", __func__, realm, realm->ino);
    193
    194	rb_erase(&realm->node, &mdsc->snap_realms);
    195	mdsc->num_snap_realms--;
    196
    197	if (realm->parent) {
    198		list_del_init(&realm->child_item);
    199		__put_snap_realm(mdsc, realm->parent);
    200	}
    201
    202	kfree(realm->prior_parent_snaps);
    203	kfree(realm->snaps);
    204	ceph_put_snap_context(realm->cached_context);
    205	kfree(realm);
    206}
    207
    208/*
    209 * caller holds snap_rwsem (write)
    210 */
    211static void __put_snap_realm(struct ceph_mds_client *mdsc,
    212			     struct ceph_snap_realm *realm)
    213{
    214	lockdep_assert_held_write(&mdsc->snap_rwsem);
    215
    216	/*
    217	 * We do not require the snap_empty_lock here, as any caller that
    218	 * increments the value must hold the snap_rwsem.
    219	 */
    220	if (atomic_dec_and_test(&realm->nref))
    221		__destroy_snap_realm(mdsc, realm);
    222}
    223
    224/*
    225 * See comments in ceph_get_snap_realm. Caller needn't hold any locks.
    226 */
    227void ceph_put_snap_realm(struct ceph_mds_client *mdsc,
    228			 struct ceph_snap_realm *realm)
    229{
    230	if (!atomic_dec_and_lock(&realm->nref, &mdsc->snap_empty_lock))
    231		return;
    232
    233	if (down_write_trylock(&mdsc->snap_rwsem)) {
    234		spin_unlock(&mdsc->snap_empty_lock);
    235		__destroy_snap_realm(mdsc, realm);
    236		up_write(&mdsc->snap_rwsem);
    237	} else {
    238		list_add(&realm->empty_item, &mdsc->snap_empty);
    239		spin_unlock(&mdsc->snap_empty_lock);
    240	}
    241}
    242
    243/*
    244 * Clean up any realms whose ref counts have dropped to zero.  Note
    245 * that this does not include realms who were created but not yet
    246 * used.
    247 *
    248 * Called under snap_rwsem (write)
    249 */
    250static void __cleanup_empty_realms(struct ceph_mds_client *mdsc)
    251{
    252	struct ceph_snap_realm *realm;
    253
    254	lockdep_assert_held_write(&mdsc->snap_rwsem);
    255
    256	spin_lock(&mdsc->snap_empty_lock);
    257	while (!list_empty(&mdsc->snap_empty)) {
    258		realm = list_first_entry(&mdsc->snap_empty,
    259				   struct ceph_snap_realm, empty_item);
    260		list_del(&realm->empty_item);
    261		spin_unlock(&mdsc->snap_empty_lock);
    262		__destroy_snap_realm(mdsc, realm);
    263		spin_lock(&mdsc->snap_empty_lock);
    264	}
    265	spin_unlock(&mdsc->snap_empty_lock);
    266}
    267
    268void ceph_cleanup_global_and_empty_realms(struct ceph_mds_client *mdsc)
    269{
    270	struct ceph_snap_realm *global_realm;
    271
    272	down_write(&mdsc->snap_rwsem);
    273	global_realm = __lookup_snap_realm(mdsc, CEPH_INO_GLOBAL_SNAPREALM);
    274	if (global_realm)
    275		ceph_put_snap_realm(mdsc, global_realm);
    276	__cleanup_empty_realms(mdsc);
    277	up_write(&mdsc->snap_rwsem);
    278}
    279
    280/*
    281 * adjust the parent realm of a given @realm.  adjust child list, and parent
    282 * pointers, and ref counts appropriately.
    283 *
    284 * return true if parent was changed, 0 if unchanged, <0 on error.
    285 *
    286 * caller must hold snap_rwsem for write.
    287 */
    288static int adjust_snap_realm_parent(struct ceph_mds_client *mdsc,
    289				    struct ceph_snap_realm *realm,
    290				    u64 parentino)
    291{
    292	struct ceph_snap_realm *parent;
    293
    294	lockdep_assert_held_write(&mdsc->snap_rwsem);
    295
    296	if (realm->parent_ino == parentino)
    297		return 0;
    298
    299	parent = ceph_lookup_snap_realm(mdsc, parentino);
    300	if (!parent) {
    301		parent = ceph_create_snap_realm(mdsc, parentino);
    302		if (IS_ERR(parent))
    303			return PTR_ERR(parent);
    304	}
    305	dout("%s %llx %p: %llx %p -> %llx %p\n", __func__, realm->ino,
    306	     realm, realm->parent_ino, realm->parent, parentino, parent);
    307	if (realm->parent) {
    308		list_del_init(&realm->child_item);
    309		ceph_put_snap_realm(mdsc, realm->parent);
    310	}
    311	realm->parent_ino = parentino;
    312	realm->parent = parent;
    313	list_add(&realm->child_item, &parent->children);
    314	return 1;
    315}
    316
    317
    318static int cmpu64_rev(const void *a, const void *b)
    319{
    320	if (*(u64 *)a < *(u64 *)b)
    321		return 1;
    322	if (*(u64 *)a > *(u64 *)b)
    323		return -1;
    324	return 0;
    325}
    326
    327
    328/*
    329 * build the snap context for a given realm.
    330 */
    331static int build_snap_context(struct ceph_snap_realm *realm,
    332			      struct list_head *realm_queue,
    333			      struct list_head *dirty_realms)
    334{
    335	struct ceph_snap_realm *parent = realm->parent;
    336	struct ceph_snap_context *snapc;
    337	int err = 0;
    338	u32 num = realm->num_prior_parent_snaps + realm->num_snaps;
    339
    340	/*
    341	 * build parent context, if it hasn't been built.
    342	 * conservatively estimate that all parent snaps might be
    343	 * included by us.
    344	 */
    345	if (parent) {
    346		if (!parent->cached_context) {
    347			/* add to the queue head */
    348			list_add(&parent->rebuild_item, realm_queue);
    349			return 1;
    350		}
    351		num += parent->cached_context->num_snaps;
    352	}
    353
    354	/* do i actually need to update?  not if my context seq
    355	   matches realm seq, and my parents' does to.  (this works
    356	   because we rebuild_snap_realms() works _downward_ in
    357	   hierarchy after each update.) */
    358	if (realm->cached_context &&
    359	    realm->cached_context->seq == realm->seq &&
    360	    (!parent ||
    361	     realm->cached_context->seq >= parent->cached_context->seq)) {
    362		dout("%s %llx %p: %p seq %lld (%u snaps) (unchanged)\n",
    363		     __func__, realm->ino, realm, realm->cached_context,
    364		     realm->cached_context->seq,
    365		     (unsigned int)realm->cached_context->num_snaps);
    366		return 0;
    367	}
    368
    369	/* alloc new snap context */
    370	err = -ENOMEM;
    371	if (num > (SIZE_MAX - sizeof(*snapc)) / sizeof(u64))
    372		goto fail;
    373	snapc = ceph_create_snap_context(num, GFP_NOFS);
    374	if (!snapc)
    375		goto fail;
    376
    377	/* build (reverse sorted) snap vector */
    378	num = 0;
    379	snapc->seq = realm->seq;
    380	if (parent) {
    381		u32 i;
    382
    383		/* include any of parent's snaps occurring _after_ my
    384		   parent became my parent */
    385		for (i = 0; i < parent->cached_context->num_snaps; i++)
    386			if (parent->cached_context->snaps[i] >=
    387			    realm->parent_since)
    388				snapc->snaps[num++] =
    389					parent->cached_context->snaps[i];
    390		if (parent->cached_context->seq > snapc->seq)
    391			snapc->seq = parent->cached_context->seq;
    392	}
    393	memcpy(snapc->snaps + num, realm->snaps,
    394	       sizeof(u64)*realm->num_snaps);
    395	num += realm->num_snaps;
    396	memcpy(snapc->snaps + num, realm->prior_parent_snaps,
    397	       sizeof(u64)*realm->num_prior_parent_snaps);
    398	num += realm->num_prior_parent_snaps;
    399
    400	sort(snapc->snaps, num, sizeof(u64), cmpu64_rev, NULL);
    401	snapc->num_snaps = num;
    402	dout("%s %llx %p: %p seq %lld (%u snaps)\n", __func__, realm->ino,
    403	     realm, snapc, snapc->seq, (unsigned int) snapc->num_snaps);
    404
    405	ceph_put_snap_context(realm->cached_context);
    406	realm->cached_context = snapc;
    407	/* queue realm for cap_snap creation */
    408	list_add_tail(&realm->dirty_item, dirty_realms);
    409	return 0;
    410
    411fail:
    412	/*
    413	 * if we fail, clear old (incorrect) cached_context... hopefully
    414	 * we'll have better luck building it later
    415	 */
    416	if (realm->cached_context) {
    417		ceph_put_snap_context(realm->cached_context);
    418		realm->cached_context = NULL;
    419	}
    420	pr_err("%s %llx %p fail %d\n", __func__, realm->ino, realm, err);
    421	return err;
    422}
    423
    424/*
    425 * rebuild snap context for the given realm and all of its children.
    426 */
    427static void rebuild_snap_realms(struct ceph_snap_realm *realm,
    428				struct list_head *dirty_realms)
    429{
    430	LIST_HEAD(realm_queue);
    431	int last = 0;
    432	bool skip = false;
    433
    434	list_add_tail(&realm->rebuild_item, &realm_queue);
    435
    436	while (!list_empty(&realm_queue)) {
    437		struct ceph_snap_realm *_realm, *child;
    438
    439		_realm = list_first_entry(&realm_queue,
    440					  struct ceph_snap_realm,
    441					  rebuild_item);
    442
    443		/*
    444		 * If the last building failed dues to memory
    445		 * issue, just empty the realm_queue and return
    446		 * to avoid infinite loop.
    447		 */
    448		if (last < 0) {
    449			list_del_init(&_realm->rebuild_item);
    450			continue;
    451		}
    452
    453		last = build_snap_context(_realm, &realm_queue, dirty_realms);
    454		dout("%s %llx %p, %s\n", __func__, _realm->ino, _realm,
    455		     last > 0 ? "is deferred" : !last ? "succeeded" : "failed");
    456
    457		/* is any child in the list ? */
    458		list_for_each_entry(child, &_realm->children, child_item) {
    459			if (!list_empty(&child->rebuild_item)) {
    460				skip = true;
    461				break;
    462			}
    463		}
    464
    465		if (!skip) {
    466			list_for_each_entry(child, &_realm->children, child_item)
    467				list_add_tail(&child->rebuild_item, &realm_queue);
    468		}
    469
    470		/* last == 1 means need to build parent first */
    471		if (last <= 0)
    472			list_del_init(&_realm->rebuild_item);
    473	}
    474}
    475
    476
    477/*
    478 * helper to allocate and decode an array of snapids.  free prior
    479 * instance, if any.
    480 */
    481static int dup_array(u64 **dst, __le64 *src, u32 num)
    482{
    483	u32 i;
    484
    485	kfree(*dst);
    486	if (num) {
    487		*dst = kcalloc(num, sizeof(u64), GFP_NOFS);
    488		if (!*dst)
    489			return -ENOMEM;
    490		for (i = 0; i < num; i++)
    491			(*dst)[i] = get_unaligned_le64(src + i);
    492	} else {
    493		*dst = NULL;
    494	}
    495	return 0;
    496}
    497
    498static bool has_new_snaps(struct ceph_snap_context *o,
    499			  struct ceph_snap_context *n)
    500{
    501	if (n->num_snaps == 0)
    502		return false;
    503	/* snaps are in descending order */
    504	return n->snaps[0] > o->seq;
    505}
    506
    507/*
    508 * When a snapshot is applied, the size/mtime inode metadata is queued
    509 * in a ceph_cap_snap (one for each snapshot) until writeback
    510 * completes and the metadata can be flushed back to the MDS.
    511 *
    512 * However, if a (sync) write is currently in-progress when we apply
    513 * the snapshot, we have to wait until the write succeeds or fails
    514 * (and a final size/mtime is known).  In this case the
    515 * cap_snap->writing = 1, and is said to be "pending."  When the write
    516 * finishes, we __ceph_finish_cap_snap().
    517 *
    518 * Caller must hold snap_rwsem for read (i.e., the realm topology won't
    519 * change).
    520 */
    521static void ceph_queue_cap_snap(struct ceph_inode_info *ci,
    522				struct ceph_cap_snap **pcapsnap)
    523{
    524	struct inode *inode = &ci->netfs.inode;
    525	struct ceph_snap_context *old_snapc, *new_snapc;
    526	struct ceph_cap_snap *capsnap = *pcapsnap;
    527	struct ceph_buffer *old_blob = NULL;
    528	int used, dirty;
    529
    530	spin_lock(&ci->i_ceph_lock);
    531	used = __ceph_caps_used(ci);
    532	dirty = __ceph_caps_dirty(ci);
    533
    534	old_snapc = ci->i_head_snapc;
    535	new_snapc = ci->i_snap_realm->cached_context;
    536
    537	/*
    538	 * If there is a write in progress, treat that as a dirty Fw,
    539	 * even though it hasn't completed yet; by the time we finish
    540	 * up this capsnap it will be.
    541	 */
    542	if (used & CEPH_CAP_FILE_WR)
    543		dirty |= CEPH_CAP_FILE_WR;
    544
    545	if (__ceph_have_pending_cap_snap(ci)) {
    546		/* there is no point in queuing multiple "pending" cap_snaps,
    547		   as no new writes are allowed to start when pending, so any
    548		   writes in progress now were started before the previous
    549		   cap_snap.  lucky us. */
    550		dout("%s %p %llx.%llx already pending\n",
    551		     __func__, inode, ceph_vinop(inode));
    552		goto update_snapc;
    553	}
    554	if (ci->i_wrbuffer_ref_head == 0 &&
    555	    !(dirty & (CEPH_CAP_ANY_EXCL|CEPH_CAP_FILE_WR))) {
    556		dout("%s %p %llx.%llx nothing dirty|writing\n",
    557		     __func__, inode, ceph_vinop(inode));
    558		goto update_snapc;
    559	}
    560
    561	BUG_ON(!old_snapc);
    562
    563	/*
    564	 * There is no need to send FLUSHSNAP message to MDS if there is
    565	 * no new snapshot. But when there is dirty pages or on-going
    566	 * writes, we still need to create cap_snap. cap_snap is needed
    567	 * by the write path and page writeback path.
    568	 *
    569	 * also see ceph_try_drop_cap_snap()
    570	 */
    571	if (has_new_snaps(old_snapc, new_snapc)) {
    572		if (dirty & (CEPH_CAP_ANY_EXCL|CEPH_CAP_FILE_WR))
    573			capsnap->need_flush = true;
    574	} else {
    575		if (!(used & CEPH_CAP_FILE_WR) &&
    576		    ci->i_wrbuffer_ref_head == 0) {
    577			dout("%s %p %llx.%llx no new_snap|dirty_page|writing\n",
    578			     __func__, inode, ceph_vinop(inode));
    579			goto update_snapc;
    580		}
    581	}
    582
    583	dout("%s %p %llx.%llx cap_snap %p queuing under %p %s %s\n",
    584	     __func__, inode, ceph_vinop(inode), capsnap, old_snapc,
    585	     ceph_cap_string(dirty), capsnap->need_flush ? "" : "no_flush");
    586	ihold(inode);
    587
    588	capsnap->follows = old_snapc->seq;
    589	capsnap->issued = __ceph_caps_issued(ci, NULL);
    590	capsnap->dirty = dirty;
    591
    592	capsnap->mode = inode->i_mode;
    593	capsnap->uid = inode->i_uid;
    594	capsnap->gid = inode->i_gid;
    595
    596	if (dirty & CEPH_CAP_XATTR_EXCL) {
    597		old_blob = __ceph_build_xattrs_blob(ci);
    598		capsnap->xattr_blob =
    599			ceph_buffer_get(ci->i_xattrs.blob);
    600		capsnap->xattr_version = ci->i_xattrs.version;
    601	} else {
    602		capsnap->xattr_blob = NULL;
    603		capsnap->xattr_version = 0;
    604	}
    605
    606	capsnap->inline_data = ci->i_inline_version != CEPH_INLINE_NONE;
    607
    608	/* dirty page count moved from _head to this cap_snap;
    609	   all subsequent writes page dirties occur _after_ this
    610	   snapshot. */
    611	capsnap->dirty_pages = ci->i_wrbuffer_ref_head;
    612	ci->i_wrbuffer_ref_head = 0;
    613	capsnap->context = old_snapc;
    614	list_add_tail(&capsnap->ci_item, &ci->i_cap_snaps);
    615
    616	if (used & CEPH_CAP_FILE_WR) {
    617		dout("%s %p %llx.%llx cap_snap %p snapc %p seq %llu used WR,"
    618		     " now pending\n", __func__, inode, ceph_vinop(inode),
    619		     capsnap, old_snapc, old_snapc->seq);
    620		capsnap->writing = 1;
    621	} else {
    622		/* note mtime, size NOW. */
    623		__ceph_finish_cap_snap(ci, capsnap);
    624	}
    625	*pcapsnap = NULL;
    626	old_snapc = NULL;
    627
    628update_snapc:
    629	if (ci->i_wrbuffer_ref_head == 0 &&
    630	    ci->i_wr_ref == 0 &&
    631	    ci->i_dirty_caps == 0 &&
    632	    ci->i_flushing_caps == 0) {
    633		ci->i_head_snapc = NULL;
    634	} else {
    635		ci->i_head_snapc = ceph_get_snap_context(new_snapc);
    636		dout(" new snapc is %p\n", new_snapc);
    637	}
    638	spin_unlock(&ci->i_ceph_lock);
    639
    640	ceph_buffer_put(old_blob);
    641	ceph_put_snap_context(old_snapc);
    642}
    643
    644/*
    645 * Finalize the size, mtime for a cap_snap.. that is, settle on final values
    646 * to be used for the snapshot, to be flushed back to the mds.
    647 *
    648 * If capsnap can now be flushed, add to snap_flush list, and return 1.
    649 *
    650 * Caller must hold i_ceph_lock.
    651 */
    652int __ceph_finish_cap_snap(struct ceph_inode_info *ci,
    653			    struct ceph_cap_snap *capsnap)
    654{
    655	struct inode *inode = &ci->netfs.inode;
    656	struct ceph_mds_client *mdsc = ceph_sb_to_mdsc(inode->i_sb);
    657
    658	BUG_ON(capsnap->writing);
    659	capsnap->size = i_size_read(inode);
    660	capsnap->mtime = inode->i_mtime;
    661	capsnap->atime = inode->i_atime;
    662	capsnap->ctime = inode->i_ctime;
    663	capsnap->btime = ci->i_btime;
    664	capsnap->change_attr = inode_peek_iversion_raw(inode);
    665	capsnap->time_warp_seq = ci->i_time_warp_seq;
    666	capsnap->truncate_size = ci->i_truncate_size;
    667	capsnap->truncate_seq = ci->i_truncate_seq;
    668	if (capsnap->dirty_pages) {
    669		dout("%s %p %llx.%llx cap_snap %p snapc %p %llu %s s=%llu "
    670		     "still has %d dirty pages\n", __func__, inode,
    671		     ceph_vinop(inode), capsnap, capsnap->context,
    672		     capsnap->context->seq, ceph_cap_string(capsnap->dirty),
    673		     capsnap->size, capsnap->dirty_pages);
    674		return 0;
    675	}
    676
    677	/* Fb cap still in use, delay it */
    678	if (ci->i_wb_ref) {
    679		dout("%s %p %llx.%llx cap_snap %p snapc %p %llu %s s=%llu "
    680		     "used WRBUFFER, delaying\n", __func__, inode,
    681		     ceph_vinop(inode), capsnap, capsnap->context,
    682		     capsnap->context->seq, ceph_cap_string(capsnap->dirty),
    683		     capsnap->size);
    684		capsnap->writing = 1;
    685		return 0;
    686	}
    687
    688	ci->i_ceph_flags |= CEPH_I_FLUSH_SNAPS;
    689	dout("%s %p %llx.%llx cap_snap %p snapc %p %llu %s s=%llu\n",
    690	     __func__, inode, ceph_vinop(inode), capsnap, capsnap->context,
    691	     capsnap->context->seq, ceph_cap_string(capsnap->dirty),
    692	     capsnap->size);
    693
    694	spin_lock(&mdsc->snap_flush_lock);
    695	if (list_empty(&ci->i_snap_flush_item))
    696		list_add_tail(&ci->i_snap_flush_item, &mdsc->snap_flush_list);
    697	spin_unlock(&mdsc->snap_flush_lock);
    698	return 1;  /* caller may want to ceph_flush_snaps */
    699}
    700
    701/*
    702 * Queue cap_snaps for snap writeback for this realm and its children.
    703 * Called under snap_rwsem, so realm topology won't change.
    704 */
    705static void queue_realm_cap_snaps(struct ceph_snap_realm *realm)
    706{
    707	struct ceph_inode_info *ci;
    708	struct inode *lastinode = NULL;
    709	struct ceph_cap_snap *capsnap = NULL;
    710
    711	dout("%s %p %llx inode\n", __func__, realm, realm->ino);
    712
    713	spin_lock(&realm->inodes_with_caps_lock);
    714	list_for_each_entry(ci, &realm->inodes_with_caps, i_snap_realm_item) {
    715		struct inode *inode = igrab(&ci->netfs.inode);
    716		if (!inode)
    717			continue;
    718		spin_unlock(&realm->inodes_with_caps_lock);
    719		iput(lastinode);
    720		lastinode = inode;
    721
    722		/*
    723		 * Allocate the capsnap memory outside of ceph_queue_cap_snap()
    724		 * to reduce very possible but unnecessary frequently memory
    725		 * allocate/free in this loop.
    726		 */
    727		if (!capsnap) {
    728			capsnap = kmem_cache_zalloc(ceph_cap_snap_cachep, GFP_NOFS);
    729			if (!capsnap) {
    730				pr_err("ENOMEM allocating ceph_cap_snap on %p\n",
    731				       inode);
    732				return;
    733			}
    734		}
    735		capsnap->cap_flush.is_capsnap = true;
    736		refcount_set(&capsnap->nref, 1);
    737		INIT_LIST_HEAD(&capsnap->cap_flush.i_list);
    738		INIT_LIST_HEAD(&capsnap->cap_flush.g_list);
    739		INIT_LIST_HEAD(&capsnap->ci_item);
    740
    741		ceph_queue_cap_snap(ci, &capsnap);
    742		spin_lock(&realm->inodes_with_caps_lock);
    743	}
    744	spin_unlock(&realm->inodes_with_caps_lock);
    745	iput(lastinode);
    746
    747	if (capsnap)
    748		kmem_cache_free(ceph_cap_snap_cachep, capsnap);
    749	dout("%s %p %llx done\n", __func__, realm, realm->ino);
    750}
    751
    752/*
    753 * Parse and apply a snapblob "snap trace" from the MDS.  This specifies
    754 * the snap realm parameters from a given realm and all of its ancestors,
    755 * up to the root.
    756 *
    757 * Caller must hold snap_rwsem for write.
    758 */
    759int ceph_update_snap_trace(struct ceph_mds_client *mdsc,
    760			   void *p, void *e, bool deletion,
    761			   struct ceph_snap_realm **realm_ret)
    762{
    763	struct ceph_mds_snap_realm *ri;    /* encoded */
    764	__le64 *snaps;                     /* encoded */
    765	__le64 *prior_parent_snaps;        /* encoded */
    766	struct ceph_snap_realm *realm = NULL;
    767	struct ceph_snap_realm *first_realm = NULL;
    768	struct ceph_snap_realm *realm_to_rebuild = NULL;
    769	int rebuild_snapcs;
    770	int err = -ENOMEM;
    771	LIST_HEAD(dirty_realms);
    772
    773	lockdep_assert_held_write(&mdsc->snap_rwsem);
    774
    775	dout("%s deletion=%d\n", __func__, deletion);
    776more:
    777	rebuild_snapcs = 0;
    778	ceph_decode_need(&p, e, sizeof(*ri), bad);
    779	ri = p;
    780	p += sizeof(*ri);
    781	ceph_decode_need(&p, e, sizeof(u64)*(le32_to_cpu(ri->num_snaps) +
    782			    le32_to_cpu(ri->num_prior_parent_snaps)), bad);
    783	snaps = p;
    784	p += sizeof(u64) * le32_to_cpu(ri->num_snaps);
    785	prior_parent_snaps = p;
    786	p += sizeof(u64) * le32_to_cpu(ri->num_prior_parent_snaps);
    787
    788	realm = ceph_lookup_snap_realm(mdsc, le64_to_cpu(ri->ino));
    789	if (!realm) {
    790		realm = ceph_create_snap_realm(mdsc, le64_to_cpu(ri->ino));
    791		if (IS_ERR(realm)) {
    792			err = PTR_ERR(realm);
    793			goto fail;
    794		}
    795	}
    796
    797	/* ensure the parent is correct */
    798	err = adjust_snap_realm_parent(mdsc, realm, le64_to_cpu(ri->parent));
    799	if (err < 0)
    800		goto fail;
    801	rebuild_snapcs += err;
    802
    803	if (le64_to_cpu(ri->seq) > realm->seq) {
    804		dout("%s updating %llx %p %lld -> %lld\n", __func__,
    805		     realm->ino, realm, realm->seq, le64_to_cpu(ri->seq));
    806		/* update realm parameters, snap lists */
    807		realm->seq = le64_to_cpu(ri->seq);
    808		realm->created = le64_to_cpu(ri->created);
    809		realm->parent_since = le64_to_cpu(ri->parent_since);
    810
    811		realm->num_snaps = le32_to_cpu(ri->num_snaps);
    812		err = dup_array(&realm->snaps, snaps, realm->num_snaps);
    813		if (err < 0)
    814			goto fail;
    815
    816		realm->num_prior_parent_snaps =
    817			le32_to_cpu(ri->num_prior_parent_snaps);
    818		err = dup_array(&realm->prior_parent_snaps, prior_parent_snaps,
    819				realm->num_prior_parent_snaps);
    820		if (err < 0)
    821			goto fail;
    822
    823		if (realm->seq > mdsc->last_snap_seq)
    824			mdsc->last_snap_seq = realm->seq;
    825
    826		rebuild_snapcs = 1;
    827	} else if (!realm->cached_context) {
    828		dout("%s %llx %p seq %lld new\n", __func__,
    829		     realm->ino, realm, realm->seq);
    830		rebuild_snapcs = 1;
    831	} else {
    832		dout("%s %llx %p seq %lld unchanged\n", __func__,
    833		     realm->ino, realm, realm->seq);
    834	}
    835
    836	dout("done with %llx %p, rebuild_snapcs=%d, %p %p\n", realm->ino,
    837	     realm, rebuild_snapcs, p, e);
    838
    839	/*
    840	 * this will always track the uppest parent realm from which
    841	 * we need to rebuild the snapshot contexts _downward_ in
    842	 * hierarchy.
    843	 */
    844	if (rebuild_snapcs)
    845		realm_to_rebuild = realm;
    846
    847	/* rebuild_snapcs when we reach the _end_ (root) of the trace */
    848	if (realm_to_rebuild && p >= e)
    849		rebuild_snap_realms(realm_to_rebuild, &dirty_realms);
    850
    851	if (!first_realm)
    852		first_realm = realm;
    853	else
    854		ceph_put_snap_realm(mdsc, realm);
    855
    856	if (p < e)
    857		goto more;
    858
    859	/*
    860	 * queue cap snaps _after_ we've built the new snap contexts,
    861	 * so that i_head_snapc can be set appropriately.
    862	 */
    863	while (!list_empty(&dirty_realms)) {
    864		realm = list_first_entry(&dirty_realms, struct ceph_snap_realm,
    865					 dirty_item);
    866		list_del_init(&realm->dirty_item);
    867		queue_realm_cap_snaps(realm);
    868	}
    869
    870	if (realm_ret)
    871		*realm_ret = first_realm;
    872	else
    873		ceph_put_snap_realm(mdsc, first_realm);
    874
    875	__cleanup_empty_realms(mdsc);
    876	return 0;
    877
    878bad:
    879	err = -EIO;
    880fail:
    881	if (realm && !IS_ERR(realm))
    882		ceph_put_snap_realm(mdsc, realm);
    883	if (first_realm)
    884		ceph_put_snap_realm(mdsc, first_realm);
    885	pr_err("%s error %d\n", __func__, err);
    886	return err;
    887}
    888
    889
    890/*
    891 * Send any cap_snaps that are queued for flush.  Try to carry
    892 * s_mutex across multiple snap flushes to avoid locking overhead.
    893 *
    894 * Caller holds no locks.
    895 */
    896static void flush_snaps(struct ceph_mds_client *mdsc)
    897{
    898	struct ceph_inode_info *ci;
    899	struct inode *inode;
    900	struct ceph_mds_session *session = NULL;
    901
    902	dout("%s\n", __func__);
    903	spin_lock(&mdsc->snap_flush_lock);
    904	while (!list_empty(&mdsc->snap_flush_list)) {
    905		ci = list_first_entry(&mdsc->snap_flush_list,
    906				struct ceph_inode_info, i_snap_flush_item);
    907		inode = &ci->netfs.inode;
    908		ihold(inode);
    909		spin_unlock(&mdsc->snap_flush_lock);
    910		ceph_flush_snaps(ci, &session);
    911		iput(inode);
    912		spin_lock(&mdsc->snap_flush_lock);
    913	}
    914	spin_unlock(&mdsc->snap_flush_lock);
    915
    916	ceph_put_mds_session(session);
    917	dout("%s done\n", __func__);
    918}
    919
    920/**
    921 * ceph_change_snap_realm - change the snap_realm for an inode
    922 * @inode: inode to move to new snap realm
    923 * @realm: new realm to move inode into (may be NULL)
    924 *
    925 * Detach an inode from its old snaprealm (if any) and attach it to
    926 * the new snaprealm (if any). The old snap realm reference held by
    927 * the inode is put. If realm is non-NULL, then the caller's reference
    928 * to it is taken over by the inode.
    929 */
    930void ceph_change_snap_realm(struct inode *inode, struct ceph_snap_realm *realm)
    931{
    932	struct ceph_inode_info *ci = ceph_inode(inode);
    933	struct ceph_mds_client *mdsc = ceph_inode_to_client(inode)->mdsc;
    934	struct ceph_snap_realm *oldrealm = ci->i_snap_realm;
    935
    936	lockdep_assert_held(&ci->i_ceph_lock);
    937
    938	if (oldrealm) {
    939		spin_lock(&oldrealm->inodes_with_caps_lock);
    940		list_del_init(&ci->i_snap_realm_item);
    941		if (oldrealm->ino == ci->i_vino.ino)
    942			oldrealm->inode = NULL;
    943		spin_unlock(&oldrealm->inodes_with_caps_lock);
    944		ceph_put_snap_realm(mdsc, oldrealm);
    945	}
    946
    947	ci->i_snap_realm = realm;
    948
    949	if (realm) {
    950		spin_lock(&realm->inodes_with_caps_lock);
    951		list_add(&ci->i_snap_realm_item, &realm->inodes_with_caps);
    952		if (realm->ino == ci->i_vino.ino)
    953			realm->inode = inode;
    954		spin_unlock(&realm->inodes_with_caps_lock);
    955	}
    956}
    957
    958/*
    959 * Handle a snap notification from the MDS.
    960 *
    961 * This can take two basic forms: the simplest is just a snap creation
    962 * or deletion notification on an existing realm.  This should update the
    963 * realm and its children.
    964 *
    965 * The more difficult case is realm creation, due to snap creation at a
    966 * new point in the file hierarchy, or due to a rename that moves a file or
    967 * directory into another realm.
    968 */
    969void ceph_handle_snap(struct ceph_mds_client *mdsc,
    970		      struct ceph_mds_session *session,
    971		      struct ceph_msg *msg)
    972{
    973	struct super_block *sb = mdsc->fsc->sb;
    974	int mds = session->s_mds;
    975	u64 split;
    976	int op;
    977	int trace_len;
    978	struct ceph_snap_realm *realm = NULL;
    979	void *p = msg->front.iov_base;
    980	void *e = p + msg->front.iov_len;
    981	struct ceph_mds_snap_head *h;
    982	int num_split_inos, num_split_realms;
    983	__le64 *split_inos = NULL, *split_realms = NULL;
    984	int i;
    985	int locked_rwsem = 0;
    986
    987	/* decode */
    988	if (msg->front.iov_len < sizeof(*h))
    989		goto bad;
    990	h = p;
    991	op = le32_to_cpu(h->op);
    992	split = le64_to_cpu(h->split);   /* non-zero if we are splitting an
    993					  * existing realm */
    994	num_split_inos = le32_to_cpu(h->num_split_inos);
    995	num_split_realms = le32_to_cpu(h->num_split_realms);
    996	trace_len = le32_to_cpu(h->trace_len);
    997	p += sizeof(*h);
    998
    999	dout("%s from mds%d op %s split %llx tracelen %d\n", __func__,
   1000	     mds, ceph_snap_op_name(op), split, trace_len);
   1001
   1002	mutex_lock(&session->s_mutex);
   1003	inc_session_sequence(session);
   1004	mutex_unlock(&session->s_mutex);
   1005
   1006	down_write(&mdsc->snap_rwsem);
   1007	locked_rwsem = 1;
   1008
   1009	if (op == CEPH_SNAP_OP_SPLIT) {
   1010		struct ceph_mds_snap_realm *ri;
   1011
   1012		/*
   1013		 * A "split" breaks part of an existing realm off into
   1014		 * a new realm.  The MDS provides a list of inodes
   1015		 * (with caps) and child realms that belong to the new
   1016		 * child.
   1017		 */
   1018		split_inos = p;
   1019		p += sizeof(u64) * num_split_inos;
   1020		split_realms = p;
   1021		p += sizeof(u64) * num_split_realms;
   1022		ceph_decode_need(&p, e, sizeof(*ri), bad);
   1023		/* we will peek at realm info here, but will _not_
   1024		 * advance p, as the realm update will occur below in
   1025		 * ceph_update_snap_trace. */
   1026		ri = p;
   1027
   1028		realm = ceph_lookup_snap_realm(mdsc, split);
   1029		if (!realm) {
   1030			realm = ceph_create_snap_realm(mdsc, split);
   1031			if (IS_ERR(realm))
   1032				goto out;
   1033		}
   1034
   1035		dout("splitting snap_realm %llx %p\n", realm->ino, realm);
   1036		for (i = 0; i < num_split_inos; i++) {
   1037			struct ceph_vino vino = {
   1038				.ino = le64_to_cpu(split_inos[i]),
   1039				.snap = CEPH_NOSNAP,
   1040			};
   1041			struct inode *inode = ceph_find_inode(sb, vino);
   1042			struct ceph_inode_info *ci;
   1043
   1044			if (!inode)
   1045				continue;
   1046			ci = ceph_inode(inode);
   1047
   1048			spin_lock(&ci->i_ceph_lock);
   1049			if (!ci->i_snap_realm)
   1050				goto skip_inode;
   1051			/*
   1052			 * If this inode belongs to a realm that was
   1053			 * created after our new realm, we experienced
   1054			 * a race (due to another split notifications
   1055			 * arriving from a different MDS).  So skip
   1056			 * this inode.
   1057			 */
   1058			if (ci->i_snap_realm->created >
   1059			    le64_to_cpu(ri->created)) {
   1060				dout(" leaving %p %llx.%llx in newer realm %llx %p\n",
   1061				     inode, ceph_vinop(inode), ci->i_snap_realm->ino,
   1062				     ci->i_snap_realm);
   1063				goto skip_inode;
   1064			}
   1065			dout(" will move %p %llx.%llx to split realm %llx %p\n",
   1066			     inode, ceph_vinop(inode), realm->ino, realm);
   1067
   1068			ceph_get_snap_realm(mdsc, realm);
   1069			ceph_change_snap_realm(inode, realm);
   1070			spin_unlock(&ci->i_ceph_lock);
   1071			iput(inode);
   1072			continue;
   1073
   1074skip_inode:
   1075			spin_unlock(&ci->i_ceph_lock);
   1076			iput(inode);
   1077		}
   1078
   1079		/* we may have taken some of the old realm's children. */
   1080		for (i = 0; i < num_split_realms; i++) {
   1081			struct ceph_snap_realm *child =
   1082				__lookup_snap_realm(mdsc,
   1083					   le64_to_cpu(split_realms[i]));
   1084			if (!child)
   1085				continue;
   1086			adjust_snap_realm_parent(mdsc, child, realm->ino);
   1087		}
   1088	}
   1089
   1090	/*
   1091	 * update using the provided snap trace. if we are deleting a
   1092	 * snap, we can avoid queueing cap_snaps.
   1093	 */
   1094	ceph_update_snap_trace(mdsc, p, e,
   1095			       op == CEPH_SNAP_OP_DESTROY, NULL);
   1096
   1097	if (op == CEPH_SNAP_OP_SPLIT)
   1098		/* we took a reference when we created the realm, above */
   1099		ceph_put_snap_realm(mdsc, realm);
   1100
   1101	__cleanup_empty_realms(mdsc);
   1102
   1103	up_write(&mdsc->snap_rwsem);
   1104
   1105	flush_snaps(mdsc);
   1106	return;
   1107
   1108bad:
   1109	pr_err("%s corrupt snap message from mds%d\n", __func__, mds);
   1110	ceph_msg_dump(msg);
   1111out:
   1112	if (locked_rwsem)
   1113		up_write(&mdsc->snap_rwsem);
   1114	return;
   1115}
   1116
   1117struct ceph_snapid_map* ceph_get_snapid_map(struct ceph_mds_client *mdsc,
   1118					    u64 snap)
   1119{
   1120	struct ceph_snapid_map *sm, *exist;
   1121	struct rb_node **p, *parent;
   1122	int ret;
   1123
   1124	exist = NULL;
   1125	spin_lock(&mdsc->snapid_map_lock);
   1126	p = &mdsc->snapid_map_tree.rb_node;
   1127	while (*p) {
   1128		exist = rb_entry(*p, struct ceph_snapid_map, node);
   1129		if (snap > exist->snap) {
   1130			p = &(*p)->rb_left;
   1131		} else if (snap < exist->snap) {
   1132			p = &(*p)->rb_right;
   1133		} else {
   1134			if (atomic_inc_return(&exist->ref) == 1)
   1135				list_del_init(&exist->lru);
   1136			break;
   1137		}
   1138		exist = NULL;
   1139	}
   1140	spin_unlock(&mdsc->snapid_map_lock);
   1141	if (exist) {
   1142		dout("%s found snapid map %llx -> %x\n", __func__,
   1143		     exist->snap, exist->dev);
   1144		return exist;
   1145	}
   1146
   1147	sm = kmalloc(sizeof(*sm), GFP_NOFS);
   1148	if (!sm)
   1149		return NULL;
   1150
   1151	ret = get_anon_bdev(&sm->dev);
   1152	if (ret < 0) {
   1153		kfree(sm);
   1154		return NULL;
   1155	}
   1156
   1157	INIT_LIST_HEAD(&sm->lru);
   1158	atomic_set(&sm->ref, 1);
   1159	sm->snap = snap;
   1160
   1161	exist = NULL;
   1162	parent = NULL;
   1163	p = &mdsc->snapid_map_tree.rb_node;
   1164	spin_lock(&mdsc->snapid_map_lock);
   1165	while (*p) {
   1166		parent = *p;
   1167		exist = rb_entry(*p, struct ceph_snapid_map, node);
   1168		if (snap > exist->snap)
   1169			p = &(*p)->rb_left;
   1170		else if (snap < exist->snap)
   1171			p = &(*p)->rb_right;
   1172		else
   1173			break;
   1174		exist = NULL;
   1175	}
   1176	if (exist) {
   1177		if (atomic_inc_return(&exist->ref) == 1)
   1178			list_del_init(&exist->lru);
   1179	} else {
   1180		rb_link_node(&sm->node, parent, p);
   1181		rb_insert_color(&sm->node, &mdsc->snapid_map_tree);
   1182	}
   1183	spin_unlock(&mdsc->snapid_map_lock);
   1184	if (exist) {
   1185		free_anon_bdev(sm->dev);
   1186		kfree(sm);
   1187		dout("%s found snapid map %llx -> %x\n", __func__,
   1188		     exist->snap, exist->dev);
   1189		return exist;
   1190	}
   1191
   1192	dout("%s create snapid map %llx -> %x\n", __func__,
   1193	     sm->snap, sm->dev);
   1194	return sm;
   1195}
   1196
   1197void ceph_put_snapid_map(struct ceph_mds_client* mdsc,
   1198			 struct ceph_snapid_map *sm)
   1199{
   1200	if (!sm)
   1201		return;
   1202	if (atomic_dec_and_lock(&sm->ref, &mdsc->snapid_map_lock)) {
   1203		if (!RB_EMPTY_NODE(&sm->node)) {
   1204			sm->last_used = jiffies;
   1205			list_add_tail(&sm->lru, &mdsc->snapid_map_lru);
   1206			spin_unlock(&mdsc->snapid_map_lock);
   1207		} else {
   1208			/* already cleaned up by
   1209			 * ceph_cleanup_snapid_map() */
   1210			spin_unlock(&mdsc->snapid_map_lock);
   1211			kfree(sm);
   1212		}
   1213	}
   1214}
   1215
   1216void ceph_trim_snapid_map(struct ceph_mds_client *mdsc)
   1217{
   1218	struct ceph_snapid_map *sm;
   1219	unsigned long now;
   1220	LIST_HEAD(to_free);
   1221
   1222	spin_lock(&mdsc->snapid_map_lock);
   1223	now = jiffies;
   1224
   1225	while (!list_empty(&mdsc->snapid_map_lru)) {
   1226		sm = list_first_entry(&mdsc->snapid_map_lru,
   1227				      struct ceph_snapid_map, lru);
   1228		if (time_after(sm->last_used + CEPH_SNAPID_MAP_TIMEOUT, now))
   1229			break;
   1230
   1231		rb_erase(&sm->node, &mdsc->snapid_map_tree);
   1232		list_move(&sm->lru, &to_free);
   1233	}
   1234	spin_unlock(&mdsc->snapid_map_lock);
   1235
   1236	while (!list_empty(&to_free)) {
   1237		sm = list_first_entry(&to_free, struct ceph_snapid_map, lru);
   1238		list_del(&sm->lru);
   1239		dout("trim snapid map %llx -> %x\n", sm->snap, sm->dev);
   1240		free_anon_bdev(sm->dev);
   1241		kfree(sm);
   1242	}
   1243}
   1244
   1245void ceph_cleanup_snapid_map(struct ceph_mds_client *mdsc)
   1246{
   1247	struct ceph_snapid_map *sm;
   1248	struct rb_node *p;
   1249	LIST_HEAD(to_free);
   1250
   1251	spin_lock(&mdsc->snapid_map_lock);
   1252	while ((p = rb_first(&mdsc->snapid_map_tree))) {
   1253		sm = rb_entry(p, struct ceph_snapid_map, node);
   1254		rb_erase(p, &mdsc->snapid_map_tree);
   1255		RB_CLEAR_NODE(p);
   1256		list_move(&sm->lru, &to_free);
   1257	}
   1258	spin_unlock(&mdsc->snapid_map_lock);
   1259
   1260	while (!list_empty(&to_free)) {
   1261		sm = list_first_entry(&to_free, struct ceph_snapid_map, lru);
   1262		list_del(&sm->lru);
   1263		free_anon_bdev(sm->dev);
   1264		if (WARN_ON_ONCE(atomic_read(&sm->ref))) {
   1265			pr_err("snapid map %llx -> %x still in use\n",
   1266			       sm->snap, sm->dev);
   1267		}
   1268		kfree(sm);
   1269	}
   1270}