cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

run.c (22280B)


      1// SPDX-License-Identifier: GPL-2.0
      2/*
      3 *
      4 * Copyright (C) 2019-2021 Paragon Software GmbH, All rights reserved.
      5 *
      6 * TODO: try to use extents tree (instead of array)
      7 */
      8
      9#include <linux/blkdev.h>
     10#include <linux/fs.h>
     11#include <linux/log2.h>
     12
     13#include "debug.h"
     14#include "ntfs.h"
     15#include "ntfs_fs.h"
     16
     17/* runs_tree is a continues memory. Try to avoid big size. */
     18#define NTFS3_RUN_MAX_BYTES 0x10000
     19
     20struct ntfs_run {
     21	CLST vcn; /* Virtual cluster number. */
     22	CLST len; /* Length in clusters. */
     23	CLST lcn; /* Logical cluster number. */
     24};
     25
     26/*
     27 * run_lookup - Lookup the index of a MCB entry that is first <= vcn.
     28 *
     29 * Case of success it will return non-zero value and set
     30 * @index parameter to index of entry been found.
     31 * Case of entry missing from list 'index' will be set to
     32 * point to insertion position for the entry question.
     33 */
     34bool run_lookup(const struct runs_tree *run, CLST vcn, size_t *index)
     35{
     36	size_t min_idx, max_idx, mid_idx;
     37	struct ntfs_run *r;
     38
     39	if (!run->count) {
     40		*index = 0;
     41		return false;
     42	}
     43
     44	min_idx = 0;
     45	max_idx = run->count - 1;
     46
     47	/* Check boundary cases specially, 'cause they cover the often requests. */
     48	r = run->runs;
     49	if (vcn < r->vcn) {
     50		*index = 0;
     51		return false;
     52	}
     53
     54	if (vcn < r->vcn + r->len) {
     55		*index = 0;
     56		return true;
     57	}
     58
     59	r += max_idx;
     60	if (vcn >= r->vcn + r->len) {
     61		*index = run->count;
     62		return false;
     63	}
     64
     65	if (vcn >= r->vcn) {
     66		*index = max_idx;
     67		return true;
     68	}
     69
     70	do {
     71		mid_idx = min_idx + ((max_idx - min_idx) >> 1);
     72		r = run->runs + mid_idx;
     73
     74		if (vcn < r->vcn) {
     75			max_idx = mid_idx - 1;
     76			if (!mid_idx)
     77				break;
     78		} else if (vcn >= r->vcn + r->len) {
     79			min_idx = mid_idx + 1;
     80		} else {
     81			*index = mid_idx;
     82			return true;
     83		}
     84	} while (min_idx <= max_idx);
     85
     86	*index = max_idx + 1;
     87	return false;
     88}
     89
     90/*
     91 * run_consolidate - Consolidate runs starting from a given one.
     92 */
     93static void run_consolidate(struct runs_tree *run, size_t index)
     94{
     95	size_t i;
     96	struct ntfs_run *r = run->runs + index;
     97
     98	while (index + 1 < run->count) {
     99		/*
    100		 * I should merge current run with next
    101		 * if start of the next run lies inside one being tested.
    102		 */
    103		struct ntfs_run *n = r + 1;
    104		CLST end = r->vcn + r->len;
    105		CLST dl;
    106
    107		/* Stop if runs are not aligned one to another. */
    108		if (n->vcn > end)
    109			break;
    110
    111		dl = end - n->vcn;
    112
    113		/*
    114		 * If range at index overlaps with next one
    115		 * then I will either adjust it's start position
    116		 * or (if completely matches) dust remove one from the list.
    117		 */
    118		if (dl > 0) {
    119			if (n->len <= dl)
    120				goto remove_next_range;
    121
    122			n->len -= dl;
    123			n->vcn += dl;
    124			if (n->lcn != SPARSE_LCN)
    125				n->lcn += dl;
    126			dl = 0;
    127		}
    128
    129		/*
    130		 * Stop if sparse mode does not match
    131		 * both current and next runs.
    132		 */
    133		if ((n->lcn == SPARSE_LCN) != (r->lcn == SPARSE_LCN)) {
    134			index += 1;
    135			r = n;
    136			continue;
    137		}
    138
    139		/*
    140		 * Check if volume block
    141		 * of a next run lcn does not match
    142		 * last volume block of the current run.
    143		 */
    144		if (n->lcn != SPARSE_LCN && n->lcn != r->lcn + r->len)
    145			break;
    146
    147		/*
    148		 * Next and current are siblings.
    149		 * Eat/join.
    150		 */
    151		r->len += n->len - dl;
    152
    153remove_next_range:
    154		i = run->count - (index + 1);
    155		if (i > 1)
    156			memmove(n, n + 1, sizeof(*n) * (i - 1));
    157
    158		run->count -= 1;
    159	}
    160}
    161
    162/*
    163 * run_is_mapped_full
    164 *
    165 * Return: True if range [svcn - evcn] is mapped.
    166 */
    167bool run_is_mapped_full(const struct runs_tree *run, CLST svcn, CLST evcn)
    168{
    169	size_t i;
    170	const struct ntfs_run *r, *end;
    171	CLST next_vcn;
    172
    173	if (!run_lookup(run, svcn, &i))
    174		return false;
    175
    176	end = run->runs + run->count;
    177	r = run->runs + i;
    178
    179	for (;;) {
    180		next_vcn = r->vcn + r->len;
    181		if (next_vcn > evcn)
    182			return true;
    183
    184		if (++r >= end)
    185			return false;
    186
    187		if (r->vcn != next_vcn)
    188			return false;
    189	}
    190}
    191
    192bool run_lookup_entry(const struct runs_tree *run, CLST vcn, CLST *lcn,
    193		      CLST *len, size_t *index)
    194{
    195	size_t idx;
    196	CLST gap;
    197	struct ntfs_run *r;
    198
    199	/* Fail immediately if nrun was not touched yet. */
    200	if (!run->runs)
    201		return false;
    202
    203	if (!run_lookup(run, vcn, &idx))
    204		return false;
    205
    206	r = run->runs + idx;
    207
    208	if (vcn >= r->vcn + r->len)
    209		return false;
    210
    211	gap = vcn - r->vcn;
    212	if (r->len <= gap)
    213		return false;
    214
    215	*lcn = r->lcn == SPARSE_LCN ? SPARSE_LCN : (r->lcn + gap);
    216
    217	if (len)
    218		*len = r->len - gap;
    219	if (index)
    220		*index = idx;
    221
    222	return true;
    223}
    224
    225/*
    226 * run_truncate_head - Decommit the range before vcn.
    227 */
    228void run_truncate_head(struct runs_tree *run, CLST vcn)
    229{
    230	size_t index;
    231	struct ntfs_run *r;
    232
    233	if (run_lookup(run, vcn, &index)) {
    234		r = run->runs + index;
    235
    236		if (vcn > r->vcn) {
    237			CLST dlen = vcn - r->vcn;
    238
    239			r->vcn = vcn;
    240			r->len -= dlen;
    241			if (r->lcn != SPARSE_LCN)
    242				r->lcn += dlen;
    243		}
    244
    245		if (!index)
    246			return;
    247	}
    248	r = run->runs;
    249	memmove(r, r + index, sizeof(*r) * (run->count - index));
    250
    251	run->count -= index;
    252
    253	if (!run->count) {
    254		kvfree(run->runs);
    255		run->runs = NULL;
    256		run->allocated = 0;
    257	}
    258}
    259
    260/*
    261 * run_truncate - Decommit the range after vcn.
    262 */
    263void run_truncate(struct runs_tree *run, CLST vcn)
    264{
    265	size_t index;
    266
    267	/*
    268	 * If I hit the range then
    269	 * I have to truncate one.
    270	 * If range to be truncated is becoming empty
    271	 * then it will entirely be removed.
    272	 */
    273	if (run_lookup(run, vcn, &index)) {
    274		struct ntfs_run *r = run->runs + index;
    275
    276		r->len = vcn - r->vcn;
    277
    278		if (r->len > 0)
    279			index += 1;
    280	}
    281
    282	/*
    283	 * At this point 'index' is set to position that
    284	 * should be thrown away (including index itself)
    285	 * Simple one - just set the limit.
    286	 */
    287	run->count = index;
    288
    289	/* Do not reallocate array 'runs'. Only free if possible. */
    290	if (!index) {
    291		kvfree(run->runs);
    292		run->runs = NULL;
    293		run->allocated = 0;
    294	}
    295}
    296
    297/*
    298 * run_truncate_around - Trim head and tail if necessary.
    299 */
    300void run_truncate_around(struct runs_tree *run, CLST vcn)
    301{
    302	run_truncate_head(run, vcn);
    303
    304	if (run->count >= NTFS3_RUN_MAX_BYTES / sizeof(struct ntfs_run) / 2)
    305		run_truncate(run, (run->runs + (run->count >> 1))->vcn);
    306}
    307
    308/*
    309 * run_add_entry
    310 *
    311 * Sets location to known state.
    312 * Run to be added may overlap with existing location.
    313 *
    314 * Return: false if of memory.
    315 */
    316bool run_add_entry(struct runs_tree *run, CLST vcn, CLST lcn, CLST len,
    317		   bool is_mft)
    318{
    319	size_t used, index;
    320	struct ntfs_run *r;
    321	bool inrange;
    322	CLST tail_vcn = 0, tail_len = 0, tail_lcn = 0;
    323	bool should_add_tail = false;
    324
    325	/*
    326	 * Lookup the insertion point.
    327	 *
    328	 * Execute bsearch for the entry containing
    329	 * start position question.
    330	 */
    331	inrange = run_lookup(run, vcn, &index);
    332
    333	/*
    334	 * Shortcut here would be case of
    335	 * range not been found but one been added
    336	 * continues previous run.
    337	 * This case I can directly make use of
    338	 * existing range as my start point.
    339	 */
    340	if (!inrange && index > 0) {
    341		struct ntfs_run *t = run->runs + index - 1;
    342
    343		if (t->vcn + t->len == vcn &&
    344		    (t->lcn == SPARSE_LCN) == (lcn == SPARSE_LCN) &&
    345		    (lcn == SPARSE_LCN || lcn == t->lcn + t->len)) {
    346			inrange = true;
    347			index -= 1;
    348		}
    349	}
    350
    351	/*
    352	 * At this point 'index' either points to the range
    353	 * containing start position or to the insertion position
    354	 * for a new range.
    355	 * So first let's check if range I'm probing is here already.
    356	 */
    357	if (!inrange) {
    358requires_new_range:
    359		/*
    360		 * Range was not found.
    361		 * Insert at position 'index'
    362		 */
    363		used = run->count * sizeof(struct ntfs_run);
    364
    365		/*
    366		 * Check allocated space.
    367		 * If one is not enough to get one more entry
    368		 * then it will be reallocated.
    369		 */
    370		if (run->allocated < used + sizeof(struct ntfs_run)) {
    371			size_t bytes;
    372			struct ntfs_run *new_ptr;
    373
    374			/* Use power of 2 for 'bytes'. */
    375			if (!used) {
    376				bytes = 64;
    377			} else if (used <= 16 * PAGE_SIZE) {
    378				if (is_power_of_2(run->allocated))
    379					bytes = run->allocated << 1;
    380				else
    381					bytes = (size_t)1
    382						<< (2 + blksize_bits(used));
    383			} else {
    384				bytes = run->allocated + (16 * PAGE_SIZE);
    385			}
    386
    387			WARN_ON(!is_mft && bytes > NTFS3_RUN_MAX_BYTES);
    388
    389			new_ptr = kvmalloc(bytes, GFP_KERNEL);
    390
    391			if (!new_ptr)
    392				return false;
    393
    394			r = new_ptr + index;
    395			memcpy(new_ptr, run->runs,
    396			       index * sizeof(struct ntfs_run));
    397			memcpy(r + 1, run->runs + index,
    398			       sizeof(struct ntfs_run) * (run->count - index));
    399
    400			kvfree(run->runs);
    401			run->runs = new_ptr;
    402			run->allocated = bytes;
    403
    404		} else {
    405			size_t i = run->count - index;
    406
    407			r = run->runs + index;
    408
    409			/* memmove appears to be a bottle neck here... */
    410			if (i > 0)
    411				memmove(r + 1, r, sizeof(struct ntfs_run) * i);
    412		}
    413
    414		r->vcn = vcn;
    415		r->lcn = lcn;
    416		r->len = len;
    417		run->count += 1;
    418	} else {
    419		r = run->runs + index;
    420
    421		/*
    422		 * If one of ranges was not allocated then we
    423		 * have to split location we just matched and
    424		 * insert current one.
    425		 * A common case this requires tail to be reinserted
    426		 * a recursive call.
    427		 */
    428		if (((lcn == SPARSE_LCN) != (r->lcn == SPARSE_LCN)) ||
    429		    (lcn != SPARSE_LCN && lcn != r->lcn + (vcn - r->vcn))) {
    430			CLST to_eat = vcn - r->vcn;
    431			CLST Tovcn = to_eat + len;
    432
    433			should_add_tail = Tovcn < r->len;
    434
    435			if (should_add_tail) {
    436				tail_lcn = r->lcn == SPARSE_LCN
    437						   ? SPARSE_LCN
    438						   : (r->lcn + Tovcn);
    439				tail_vcn = r->vcn + Tovcn;
    440				tail_len = r->len - Tovcn;
    441			}
    442
    443			if (to_eat > 0) {
    444				r->len = to_eat;
    445				inrange = false;
    446				index += 1;
    447				goto requires_new_range;
    448			}
    449
    450			/* lcn should match one were going to add. */
    451			r->lcn = lcn;
    452		}
    453
    454		/*
    455		 * If existing range fits then were done.
    456		 * Otherwise extend found one and fall back to range jocode.
    457		 */
    458		if (r->vcn + r->len < vcn + len)
    459			r->len += len - ((r->vcn + r->len) - vcn);
    460	}
    461
    462	/*
    463	 * And normalize it starting from insertion point.
    464	 * It's possible that no insertion needed case if
    465	 * start point lies within the range of an entry
    466	 * that 'index' points to.
    467	 */
    468	if (inrange && index > 0)
    469		index -= 1;
    470	run_consolidate(run, index);
    471	run_consolidate(run, index + 1);
    472
    473	/*
    474	 * A special case.
    475	 * We have to add extra range a tail.
    476	 */
    477	if (should_add_tail &&
    478	    !run_add_entry(run, tail_vcn, tail_lcn, tail_len, is_mft))
    479		return false;
    480
    481	return true;
    482}
    483
    484/* run_collapse_range
    485 *
    486 * Helper for attr_collapse_range(),
    487 * which is helper for fallocate(collapse_range).
    488 */
    489bool run_collapse_range(struct runs_tree *run, CLST vcn, CLST len)
    490{
    491	size_t index, eat;
    492	struct ntfs_run *r, *e, *eat_start, *eat_end;
    493	CLST end;
    494
    495	if (WARN_ON(!run_lookup(run, vcn, &index)))
    496		return true; /* Should never be here. */
    497
    498	e = run->runs + run->count;
    499	r = run->runs + index;
    500	end = vcn + len;
    501
    502	if (vcn > r->vcn) {
    503		if (r->vcn + r->len <= end) {
    504			/* Collapse tail of run .*/
    505			r->len = vcn - r->vcn;
    506		} else if (r->lcn == SPARSE_LCN) {
    507			/* Collapse a middle part of sparsed run. */
    508			r->len -= len;
    509		} else {
    510			/* Collapse a middle part of normal run, split. */
    511			if (!run_add_entry(run, vcn, SPARSE_LCN, len, false))
    512				return false;
    513			return run_collapse_range(run, vcn, len);
    514		}
    515
    516		r += 1;
    517	}
    518
    519	eat_start = r;
    520	eat_end = r;
    521
    522	for (; r < e; r++) {
    523		CLST d;
    524
    525		if (r->vcn >= end) {
    526			r->vcn -= len;
    527			continue;
    528		}
    529
    530		if (r->vcn + r->len <= end) {
    531			/* Eat this run. */
    532			eat_end = r + 1;
    533			continue;
    534		}
    535
    536		d = end - r->vcn;
    537		if (r->lcn != SPARSE_LCN)
    538			r->lcn += d;
    539		r->len -= d;
    540		r->vcn -= len - d;
    541	}
    542
    543	eat = eat_end - eat_start;
    544	memmove(eat_start, eat_end, (e - eat_end) * sizeof(*r));
    545	run->count -= eat;
    546
    547	return true;
    548}
    549
    550/*
    551 * run_get_entry - Return index-th mapped region.
    552 */
    553bool run_get_entry(const struct runs_tree *run, size_t index, CLST *vcn,
    554		   CLST *lcn, CLST *len)
    555{
    556	const struct ntfs_run *r;
    557
    558	if (index >= run->count)
    559		return false;
    560
    561	r = run->runs + index;
    562
    563	if (!r->len)
    564		return false;
    565
    566	if (vcn)
    567		*vcn = r->vcn;
    568	if (lcn)
    569		*lcn = r->lcn;
    570	if (len)
    571		*len = r->len;
    572	return true;
    573}
    574
    575/*
    576 * run_packed_size - Calculate the size of packed int64.
    577 */
    578#ifdef __BIG_ENDIAN
    579static inline int run_packed_size(const s64 n)
    580{
    581	const u8 *p = (const u8 *)&n + sizeof(n) - 1;
    582
    583	if (n >= 0) {
    584		if (p[-7] || p[-6] || p[-5] || p[-4])
    585			p -= 4;
    586		if (p[-3] || p[-2])
    587			p -= 2;
    588		if (p[-1])
    589			p -= 1;
    590		if (p[0] & 0x80)
    591			p -= 1;
    592	} else {
    593		if (p[-7] != 0xff || p[-6] != 0xff || p[-5] != 0xff ||
    594		    p[-4] != 0xff)
    595			p -= 4;
    596		if (p[-3] != 0xff || p[-2] != 0xff)
    597			p -= 2;
    598		if (p[-1] != 0xff)
    599			p -= 1;
    600		if (!(p[0] & 0x80))
    601			p -= 1;
    602	}
    603	return (const u8 *)&n + sizeof(n) - p;
    604}
    605
    606/* Full trusted function. It does not check 'size' for errors. */
    607static inline void run_pack_s64(u8 *run_buf, u8 size, s64 v)
    608{
    609	const u8 *p = (u8 *)&v;
    610
    611	switch (size) {
    612	case 8:
    613		run_buf[7] = p[0];
    614		fallthrough;
    615	case 7:
    616		run_buf[6] = p[1];
    617		fallthrough;
    618	case 6:
    619		run_buf[5] = p[2];
    620		fallthrough;
    621	case 5:
    622		run_buf[4] = p[3];
    623		fallthrough;
    624	case 4:
    625		run_buf[3] = p[4];
    626		fallthrough;
    627	case 3:
    628		run_buf[2] = p[5];
    629		fallthrough;
    630	case 2:
    631		run_buf[1] = p[6];
    632		fallthrough;
    633	case 1:
    634		run_buf[0] = p[7];
    635	}
    636}
    637
    638/* Full trusted function. It does not check 'size' for errors. */
    639static inline s64 run_unpack_s64(const u8 *run_buf, u8 size, s64 v)
    640{
    641	u8 *p = (u8 *)&v;
    642
    643	switch (size) {
    644	case 8:
    645		p[0] = run_buf[7];
    646		fallthrough;
    647	case 7:
    648		p[1] = run_buf[6];
    649		fallthrough;
    650	case 6:
    651		p[2] = run_buf[5];
    652		fallthrough;
    653	case 5:
    654		p[3] = run_buf[4];
    655		fallthrough;
    656	case 4:
    657		p[4] = run_buf[3];
    658		fallthrough;
    659	case 3:
    660		p[5] = run_buf[2];
    661		fallthrough;
    662	case 2:
    663		p[6] = run_buf[1];
    664		fallthrough;
    665	case 1:
    666		p[7] = run_buf[0];
    667	}
    668	return v;
    669}
    670
    671#else
    672
    673static inline int run_packed_size(const s64 n)
    674{
    675	const u8 *p = (const u8 *)&n;
    676
    677	if (n >= 0) {
    678		if (p[7] || p[6] || p[5] || p[4])
    679			p += 4;
    680		if (p[3] || p[2])
    681			p += 2;
    682		if (p[1])
    683			p += 1;
    684		if (p[0] & 0x80)
    685			p += 1;
    686	} else {
    687		if (p[7] != 0xff || p[6] != 0xff || p[5] != 0xff ||
    688		    p[4] != 0xff)
    689			p += 4;
    690		if (p[3] != 0xff || p[2] != 0xff)
    691			p += 2;
    692		if (p[1] != 0xff)
    693			p += 1;
    694		if (!(p[0] & 0x80))
    695			p += 1;
    696	}
    697
    698	return 1 + p - (const u8 *)&n;
    699}
    700
    701/* Full trusted function. It does not check 'size' for errors. */
    702static inline void run_pack_s64(u8 *run_buf, u8 size, s64 v)
    703{
    704	const u8 *p = (u8 *)&v;
    705
    706	/* memcpy( run_buf, &v, size); Is it faster? */
    707	switch (size) {
    708	case 8:
    709		run_buf[7] = p[7];
    710		fallthrough;
    711	case 7:
    712		run_buf[6] = p[6];
    713		fallthrough;
    714	case 6:
    715		run_buf[5] = p[5];
    716		fallthrough;
    717	case 5:
    718		run_buf[4] = p[4];
    719		fallthrough;
    720	case 4:
    721		run_buf[3] = p[3];
    722		fallthrough;
    723	case 3:
    724		run_buf[2] = p[2];
    725		fallthrough;
    726	case 2:
    727		run_buf[1] = p[1];
    728		fallthrough;
    729	case 1:
    730		run_buf[0] = p[0];
    731	}
    732}
    733
    734/* full trusted function. It does not check 'size' for errors */
    735static inline s64 run_unpack_s64(const u8 *run_buf, u8 size, s64 v)
    736{
    737	u8 *p = (u8 *)&v;
    738
    739	/* memcpy( &v, run_buf, size); Is it faster? */
    740	switch (size) {
    741	case 8:
    742		p[7] = run_buf[7];
    743		fallthrough;
    744	case 7:
    745		p[6] = run_buf[6];
    746		fallthrough;
    747	case 6:
    748		p[5] = run_buf[5];
    749		fallthrough;
    750	case 5:
    751		p[4] = run_buf[4];
    752		fallthrough;
    753	case 4:
    754		p[3] = run_buf[3];
    755		fallthrough;
    756	case 3:
    757		p[2] = run_buf[2];
    758		fallthrough;
    759	case 2:
    760		p[1] = run_buf[1];
    761		fallthrough;
    762	case 1:
    763		p[0] = run_buf[0];
    764	}
    765	return v;
    766}
    767#endif
    768
    769/*
    770 * run_pack - Pack runs into buffer.
    771 *
    772 * packed_vcns - How much runs we have packed.
    773 * packed_size - How much bytes we have used run_buf.
    774 */
    775int run_pack(const struct runs_tree *run, CLST svcn, CLST len, u8 *run_buf,
    776	     u32 run_buf_size, CLST *packed_vcns)
    777{
    778	CLST next_vcn, vcn, lcn;
    779	CLST prev_lcn = 0;
    780	CLST evcn1 = svcn + len;
    781	int packed_size = 0;
    782	size_t i;
    783	bool ok;
    784	s64 dlcn;
    785	int offset_size, size_size, tmp;
    786
    787	next_vcn = vcn = svcn;
    788
    789	*packed_vcns = 0;
    790
    791	if (!len)
    792		goto out;
    793
    794	ok = run_lookup_entry(run, vcn, &lcn, &len, &i);
    795
    796	if (!ok)
    797		goto error;
    798
    799	if (next_vcn != vcn)
    800		goto error;
    801
    802	for (;;) {
    803		next_vcn = vcn + len;
    804		if (next_vcn > evcn1)
    805			len = evcn1 - vcn;
    806
    807		/* How much bytes required to pack len. */
    808		size_size = run_packed_size(len);
    809
    810		/* offset_size - How much bytes is packed dlcn. */
    811		if (lcn == SPARSE_LCN) {
    812			offset_size = 0;
    813			dlcn = 0;
    814		} else {
    815			/* NOTE: lcn can be less than prev_lcn! */
    816			dlcn = (s64)lcn - prev_lcn;
    817			offset_size = run_packed_size(dlcn);
    818			prev_lcn = lcn;
    819		}
    820
    821		tmp = run_buf_size - packed_size - 2 - offset_size;
    822		if (tmp <= 0)
    823			goto out;
    824
    825		/* Can we store this entire run. */
    826		if (tmp < size_size)
    827			goto out;
    828
    829		if (run_buf) {
    830			/* Pack run header. */
    831			run_buf[0] = ((u8)(size_size | (offset_size << 4)));
    832			run_buf += 1;
    833
    834			/* Pack the length of run. */
    835			run_pack_s64(run_buf, size_size, len);
    836
    837			run_buf += size_size;
    838			/* Pack the offset from previous LCN. */
    839			run_pack_s64(run_buf, offset_size, dlcn);
    840			run_buf += offset_size;
    841		}
    842
    843		packed_size += 1 + offset_size + size_size;
    844		*packed_vcns += len;
    845
    846		if (packed_size + 1 >= run_buf_size || next_vcn >= evcn1)
    847			goto out;
    848
    849		ok = run_get_entry(run, ++i, &vcn, &lcn, &len);
    850		if (!ok)
    851			goto error;
    852
    853		if (next_vcn != vcn)
    854			goto error;
    855	}
    856
    857out:
    858	/* Store last zero. */
    859	if (run_buf)
    860		run_buf[0] = 0;
    861
    862	return packed_size + 1;
    863
    864error:
    865	return -EOPNOTSUPP;
    866}
    867
    868/*
    869 * run_unpack - Unpack packed runs from @run_buf.
    870 *
    871 * Return: Error if negative, or real used bytes.
    872 */
    873int run_unpack(struct runs_tree *run, struct ntfs_sb_info *sbi, CLST ino,
    874	       CLST svcn, CLST evcn, CLST vcn, const u8 *run_buf,
    875	       u32 run_buf_size)
    876{
    877	u64 prev_lcn, vcn64, lcn, next_vcn;
    878	const u8 *run_last, *run_0;
    879	bool is_mft = ino == MFT_REC_MFT;
    880
    881	/* Check for empty. */
    882	if (evcn + 1 == svcn)
    883		return 0;
    884
    885	if (evcn < svcn)
    886		return -EINVAL;
    887
    888	run_0 = run_buf;
    889	run_last = run_buf + run_buf_size;
    890	prev_lcn = 0;
    891	vcn64 = svcn;
    892
    893	/* Read all runs the chain. */
    894	/* size_size - How much bytes is packed len. */
    895	while (run_buf < run_last) {
    896		/* size_size - How much bytes is packed len. */
    897		u8 size_size = *run_buf & 0xF;
    898		/* offset_size - How much bytes is packed dlcn. */
    899		u8 offset_size = *run_buf++ >> 4;
    900		u64 len;
    901
    902		if (!size_size)
    903			break;
    904
    905		/*
    906		 * Unpack runs.
    907		 * NOTE: Runs are stored little endian order
    908		 * "len" is unsigned value, "dlcn" is signed.
    909		 * Large positive number requires to store 5 bytes
    910		 * e.g.: 05 FF 7E FF FF 00 00 00
    911		 */
    912		if (size_size > 8)
    913			return -EINVAL;
    914
    915		len = run_unpack_s64(run_buf, size_size, 0);
    916		/* Skip size_size. */
    917		run_buf += size_size;
    918
    919		if (!len)
    920			return -EINVAL;
    921
    922		if (!offset_size)
    923			lcn = SPARSE_LCN64;
    924		else if (offset_size <= 8) {
    925			s64 dlcn;
    926
    927			/* Initial value of dlcn is -1 or 0. */
    928			dlcn = (run_buf[offset_size - 1] & 0x80) ? (s64)-1 : 0;
    929			dlcn = run_unpack_s64(run_buf, offset_size, dlcn);
    930			/* Skip offset_size. */
    931			run_buf += offset_size;
    932
    933			if (!dlcn)
    934				return -EINVAL;
    935			lcn = prev_lcn + dlcn;
    936			prev_lcn = lcn;
    937		} else
    938			return -EINVAL;
    939
    940		next_vcn = vcn64 + len;
    941		/* Check boundary. */
    942		if (next_vcn > evcn + 1)
    943			return -EINVAL;
    944
    945#ifndef CONFIG_NTFS3_64BIT_CLUSTER
    946		if (next_vcn > 0x100000000ull || (lcn + len) > 0x100000000ull) {
    947			ntfs_err(
    948				sbi->sb,
    949				"This driver is compiled without CONFIG_NTFS3_64BIT_CLUSTER (like windows driver).\n"
    950				"Volume contains 64 bits run: vcn %llx, lcn %llx, len %llx.\n"
    951				"Activate CONFIG_NTFS3_64BIT_CLUSTER to process this case",
    952				vcn64, lcn, len);
    953			return -EOPNOTSUPP;
    954		}
    955#endif
    956		if (lcn != SPARSE_LCN64 && lcn + len > sbi->used.bitmap.nbits) {
    957			/* LCN range is out of volume. */
    958			return -EINVAL;
    959		}
    960
    961		if (!run)
    962			; /* Called from check_attr(fslog.c) to check run. */
    963		else if (run == RUN_DEALLOCATE) {
    964			/*
    965			 * Called from ni_delete_all to free clusters
    966			 * without storing in run.
    967			 */
    968			if (lcn != SPARSE_LCN64)
    969				mark_as_free_ex(sbi, lcn, len, true);
    970		} else if (vcn64 >= vcn) {
    971			if (!run_add_entry(run, vcn64, lcn, len, is_mft))
    972				return -ENOMEM;
    973		} else if (next_vcn > vcn) {
    974			u64 dlen = vcn - vcn64;
    975
    976			if (!run_add_entry(run, vcn, lcn + dlen, len - dlen,
    977					   is_mft))
    978				return -ENOMEM;
    979		}
    980
    981		vcn64 = next_vcn;
    982	}
    983
    984	if (vcn64 != evcn + 1) {
    985		/* Not expected length of unpacked runs. */
    986		return -EINVAL;
    987	}
    988
    989	return run_buf - run_0;
    990}
    991
    992#ifdef NTFS3_CHECK_FREE_CLST
    993/*
    994 * run_unpack_ex - Unpack packed runs from "run_buf".
    995 *
    996 * Checks unpacked runs to be used in bitmap.
    997 *
    998 * Return: Error if negative, or real used bytes.
    999 */
   1000int run_unpack_ex(struct runs_tree *run, struct ntfs_sb_info *sbi, CLST ino,
   1001		  CLST svcn, CLST evcn, CLST vcn, const u8 *run_buf,
   1002		  u32 run_buf_size)
   1003{
   1004	int ret, err;
   1005	CLST next_vcn, lcn, len;
   1006	size_t index;
   1007	bool ok;
   1008	struct wnd_bitmap *wnd;
   1009
   1010	ret = run_unpack(run, sbi, ino, svcn, evcn, vcn, run_buf, run_buf_size);
   1011	if (ret <= 0)
   1012		return ret;
   1013
   1014	if (!sbi->used.bitmap.sb || !run || run == RUN_DEALLOCATE)
   1015		return ret;
   1016
   1017	if (ino == MFT_REC_BADCLUST)
   1018		return ret;
   1019
   1020	next_vcn = vcn = svcn;
   1021	wnd = &sbi->used.bitmap;
   1022
   1023	for (ok = run_lookup_entry(run, vcn, &lcn, &len, &index);
   1024	     next_vcn <= evcn;
   1025	     ok = run_get_entry(run, ++index, &vcn, &lcn, &len)) {
   1026		if (!ok || next_vcn != vcn)
   1027			return -EINVAL;
   1028
   1029		next_vcn = vcn + len;
   1030
   1031		if (lcn == SPARSE_LCN)
   1032			continue;
   1033
   1034		if (sbi->flags & NTFS_FLAGS_NEED_REPLAY)
   1035			continue;
   1036
   1037		down_read_nested(&wnd->rw_lock, BITMAP_MUTEX_CLUSTERS);
   1038		/* Check for free blocks. */
   1039		ok = wnd_is_used(wnd, lcn, len);
   1040		up_read(&wnd->rw_lock);
   1041		if (ok)
   1042			continue;
   1043
   1044		/* Looks like volume is corrupted. */
   1045		ntfs_set_state(sbi, NTFS_DIRTY_ERROR);
   1046
   1047		if (down_write_trylock(&wnd->rw_lock)) {
   1048			/* Mark all zero bits as used in range [lcn, lcn+len). */
   1049			CLST i, lcn_f = 0, len_f = 0;
   1050
   1051			err = 0;
   1052			for (i = 0; i < len; i++) {
   1053				if (wnd_is_free(wnd, lcn + i, 1)) {
   1054					if (!len_f)
   1055						lcn_f = lcn + i;
   1056					len_f += 1;
   1057				} else if (len_f) {
   1058					err = wnd_set_used(wnd, lcn_f, len_f);
   1059					len_f = 0;
   1060					if (err)
   1061						break;
   1062				}
   1063			}
   1064
   1065			if (len_f)
   1066				err = wnd_set_used(wnd, lcn_f, len_f);
   1067
   1068			up_write(&wnd->rw_lock);
   1069			if (err)
   1070				return err;
   1071		}
   1072	}
   1073
   1074	return ret;
   1075}
   1076#endif
   1077
   1078/*
   1079 * run_get_highest_vcn
   1080 *
   1081 * Return the highest vcn from a mapping pairs array
   1082 * it used while replaying log file.
   1083 */
   1084int run_get_highest_vcn(CLST vcn, const u8 *run_buf, u64 *highest_vcn)
   1085{
   1086	u64 vcn64 = vcn;
   1087	u8 size_size;
   1088
   1089	while ((size_size = *run_buf & 0xF)) {
   1090		u8 offset_size = *run_buf++ >> 4;
   1091		u64 len;
   1092
   1093		if (size_size > 8 || offset_size > 8)
   1094			return -EINVAL;
   1095
   1096		len = run_unpack_s64(run_buf, size_size, 0);
   1097		if (!len)
   1098			return -EINVAL;
   1099
   1100		run_buf += size_size + offset_size;
   1101		vcn64 += len;
   1102
   1103#ifndef CONFIG_NTFS3_64BIT_CLUSTER
   1104		if (vcn64 > 0x100000000ull)
   1105			return -EINVAL;
   1106#endif
   1107	}
   1108
   1109	*highest_vcn = vcn64 - 1;
   1110	return 0;
   1111}