cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

zstd.c (18821B)


      1// SPDX-License-Identifier: GPL-2.0
      2/*
      3 * Copyright (c) 2016-present, Facebook, Inc.
      4 * All rights reserved.
      5 *
      6 */
      7
      8#include <linux/bio.h>
      9#include <linux/bitmap.h>
     10#include <linux/err.h>
     11#include <linux/init.h>
     12#include <linux/kernel.h>
     13#include <linux/mm.h>
     14#include <linux/sched/mm.h>
     15#include <linux/pagemap.h>
     16#include <linux/refcount.h>
     17#include <linux/sched.h>
     18#include <linux/slab.h>
     19#include <linux/zstd.h>
     20#include "misc.h"
     21#include "compression.h"
     22#include "ctree.h"
     23
     24#define ZSTD_BTRFS_MAX_WINDOWLOG 17
     25#define ZSTD_BTRFS_MAX_INPUT (1 << ZSTD_BTRFS_MAX_WINDOWLOG)
     26#define ZSTD_BTRFS_DEFAULT_LEVEL 3
     27#define ZSTD_BTRFS_MAX_LEVEL 15
     28/* 307s to avoid pathologically clashing with transaction commit */
     29#define ZSTD_BTRFS_RECLAIM_JIFFIES (307 * HZ)
     30
     31static zstd_parameters zstd_get_btrfs_parameters(unsigned int level,
     32						 size_t src_len)
     33{
     34	zstd_parameters params = zstd_get_params(level, src_len);
     35
     36	if (params.cParams.windowLog > ZSTD_BTRFS_MAX_WINDOWLOG)
     37		params.cParams.windowLog = ZSTD_BTRFS_MAX_WINDOWLOG;
     38	WARN_ON(src_len > ZSTD_BTRFS_MAX_INPUT);
     39	return params;
     40}
     41
     42struct workspace {
     43	void *mem;
     44	size_t size;
     45	char *buf;
     46	unsigned int level;
     47	unsigned int req_level;
     48	unsigned long last_used; /* jiffies */
     49	struct list_head list;
     50	struct list_head lru_list;
     51	zstd_in_buffer in_buf;
     52	zstd_out_buffer out_buf;
     53};
     54
     55/*
     56 * Zstd Workspace Management
     57 *
     58 * Zstd workspaces have different memory requirements depending on the level.
     59 * The zstd workspaces are managed by having individual lists for each level
     60 * and a global lru.  Forward progress is maintained by protecting a max level
     61 * workspace.
     62 *
     63 * Getting a workspace is done by using the bitmap to identify the levels that
     64 * have available workspaces and scans up.  This lets us recycle higher level
     65 * workspaces because of the monotonic memory guarantee.  A workspace's
     66 * last_used is only updated if it is being used by the corresponding memory
     67 * level.  Putting a workspace involves adding it back to the appropriate places
     68 * and adding it back to the lru if necessary.
     69 *
     70 * A timer is used to reclaim workspaces if they have not been used for
     71 * ZSTD_BTRFS_RECLAIM_JIFFIES.  This helps keep only active workspaces around.
     72 * The upper bound is provided by the workqueue limit which is 2 (percpu limit).
     73 */
     74
     75struct zstd_workspace_manager {
     76	const struct btrfs_compress_op *ops;
     77	spinlock_t lock;
     78	struct list_head lru_list;
     79	struct list_head idle_ws[ZSTD_BTRFS_MAX_LEVEL];
     80	unsigned long active_map;
     81	wait_queue_head_t wait;
     82	struct timer_list timer;
     83};
     84
     85static struct zstd_workspace_manager wsm;
     86
     87static size_t zstd_ws_mem_sizes[ZSTD_BTRFS_MAX_LEVEL];
     88
     89static inline struct workspace *list_to_workspace(struct list_head *list)
     90{
     91	return container_of(list, struct workspace, list);
     92}
     93
     94void zstd_free_workspace(struct list_head *ws);
     95struct list_head *zstd_alloc_workspace(unsigned int level);
     96
     97/**
     98 * Timer callback to free unused workspaces.
     99 *
    100 * @t: timer
    101 *
    102 * This scans the lru_list and attempts to reclaim any workspace that hasn't
    103 * been used for ZSTD_BTRFS_RECLAIM_JIFFIES.
    104 *
    105 * The context is softirq and does not need the _bh locking primitives.
    106 */
    107static void zstd_reclaim_timer_fn(struct timer_list *timer)
    108{
    109	unsigned long reclaim_threshold = jiffies - ZSTD_BTRFS_RECLAIM_JIFFIES;
    110	struct list_head *pos, *next;
    111
    112	spin_lock(&wsm.lock);
    113
    114	if (list_empty(&wsm.lru_list)) {
    115		spin_unlock(&wsm.lock);
    116		return;
    117	}
    118
    119	list_for_each_prev_safe(pos, next, &wsm.lru_list) {
    120		struct workspace *victim = container_of(pos, struct workspace,
    121							lru_list);
    122		unsigned int level;
    123
    124		if (time_after(victim->last_used, reclaim_threshold))
    125			break;
    126
    127		/* workspace is in use */
    128		if (victim->req_level)
    129			continue;
    130
    131		level = victim->level;
    132		list_del(&victim->lru_list);
    133		list_del(&victim->list);
    134		zstd_free_workspace(&victim->list);
    135
    136		if (list_empty(&wsm.idle_ws[level - 1]))
    137			clear_bit(level - 1, &wsm.active_map);
    138
    139	}
    140
    141	if (!list_empty(&wsm.lru_list))
    142		mod_timer(&wsm.timer, jiffies + ZSTD_BTRFS_RECLAIM_JIFFIES);
    143
    144	spin_unlock(&wsm.lock);
    145}
    146
    147/*
    148 * zstd_calc_ws_mem_sizes - calculate monotonic memory bounds
    149 *
    150 * It is possible based on the level configurations that a higher level
    151 * workspace uses less memory than a lower level workspace.  In order to reuse
    152 * workspaces, this must be made a monotonic relationship.  This precomputes
    153 * the required memory for each level and enforces the monotonicity between
    154 * level and memory required.
    155 */
    156static void zstd_calc_ws_mem_sizes(void)
    157{
    158	size_t max_size = 0;
    159	unsigned int level;
    160
    161	for (level = 1; level <= ZSTD_BTRFS_MAX_LEVEL; level++) {
    162		zstd_parameters params =
    163			zstd_get_btrfs_parameters(level, ZSTD_BTRFS_MAX_INPUT);
    164		size_t level_size =
    165			max_t(size_t,
    166			      zstd_cstream_workspace_bound(&params.cParams),
    167			      zstd_dstream_workspace_bound(ZSTD_BTRFS_MAX_INPUT));
    168
    169		max_size = max_t(size_t, max_size, level_size);
    170		zstd_ws_mem_sizes[level - 1] = max_size;
    171	}
    172}
    173
    174void zstd_init_workspace_manager(void)
    175{
    176	struct list_head *ws;
    177	int i;
    178
    179	zstd_calc_ws_mem_sizes();
    180
    181	wsm.ops = &btrfs_zstd_compress;
    182	spin_lock_init(&wsm.lock);
    183	init_waitqueue_head(&wsm.wait);
    184	timer_setup(&wsm.timer, zstd_reclaim_timer_fn, 0);
    185
    186	INIT_LIST_HEAD(&wsm.lru_list);
    187	for (i = 0; i < ZSTD_BTRFS_MAX_LEVEL; i++)
    188		INIT_LIST_HEAD(&wsm.idle_ws[i]);
    189
    190	ws = zstd_alloc_workspace(ZSTD_BTRFS_MAX_LEVEL);
    191	if (IS_ERR(ws)) {
    192		pr_warn(
    193		"BTRFS: cannot preallocate zstd compression workspace\n");
    194	} else {
    195		set_bit(ZSTD_BTRFS_MAX_LEVEL - 1, &wsm.active_map);
    196		list_add(ws, &wsm.idle_ws[ZSTD_BTRFS_MAX_LEVEL - 1]);
    197	}
    198}
    199
    200void zstd_cleanup_workspace_manager(void)
    201{
    202	struct workspace *workspace;
    203	int i;
    204
    205	spin_lock_bh(&wsm.lock);
    206	for (i = 0; i < ZSTD_BTRFS_MAX_LEVEL; i++) {
    207		while (!list_empty(&wsm.idle_ws[i])) {
    208			workspace = container_of(wsm.idle_ws[i].next,
    209						 struct workspace, list);
    210			list_del(&workspace->list);
    211			list_del(&workspace->lru_list);
    212			zstd_free_workspace(&workspace->list);
    213		}
    214	}
    215	spin_unlock_bh(&wsm.lock);
    216
    217	del_timer_sync(&wsm.timer);
    218}
    219
    220/*
    221 * zstd_find_workspace - find workspace
    222 * @level: compression level
    223 *
    224 * This iterates over the set bits in the active_map beginning at the requested
    225 * compression level.  This lets us utilize already allocated workspaces before
    226 * allocating a new one.  If the workspace is of a larger size, it is used, but
    227 * the place in the lru_list and last_used times are not updated.  This is to
    228 * offer the opportunity to reclaim the workspace in favor of allocating an
    229 * appropriately sized one in the future.
    230 */
    231static struct list_head *zstd_find_workspace(unsigned int level)
    232{
    233	struct list_head *ws;
    234	struct workspace *workspace;
    235	int i = level - 1;
    236
    237	spin_lock_bh(&wsm.lock);
    238	for_each_set_bit_from(i, &wsm.active_map, ZSTD_BTRFS_MAX_LEVEL) {
    239		if (!list_empty(&wsm.idle_ws[i])) {
    240			ws = wsm.idle_ws[i].next;
    241			workspace = list_to_workspace(ws);
    242			list_del_init(ws);
    243			/* keep its place if it's a lower level using this */
    244			workspace->req_level = level;
    245			if (level == workspace->level)
    246				list_del(&workspace->lru_list);
    247			if (list_empty(&wsm.idle_ws[i]))
    248				clear_bit(i, &wsm.active_map);
    249			spin_unlock_bh(&wsm.lock);
    250			return ws;
    251		}
    252	}
    253	spin_unlock_bh(&wsm.lock);
    254
    255	return NULL;
    256}
    257
    258/*
    259 * zstd_get_workspace - zstd's get_workspace
    260 * @level: compression level
    261 *
    262 * If @level is 0, then any compression level can be used.  Therefore, we begin
    263 * scanning from 1.  We first scan through possible workspaces and then after
    264 * attempt to allocate a new workspace.  If we fail to allocate one due to
    265 * memory pressure, go to sleep waiting for the max level workspace to free up.
    266 */
    267struct list_head *zstd_get_workspace(unsigned int level)
    268{
    269	struct list_head *ws;
    270	unsigned int nofs_flag;
    271
    272	/* level == 0 means we can use any workspace */
    273	if (!level)
    274		level = 1;
    275
    276again:
    277	ws = zstd_find_workspace(level);
    278	if (ws)
    279		return ws;
    280
    281	nofs_flag = memalloc_nofs_save();
    282	ws = zstd_alloc_workspace(level);
    283	memalloc_nofs_restore(nofs_flag);
    284
    285	if (IS_ERR(ws)) {
    286		DEFINE_WAIT(wait);
    287
    288		prepare_to_wait(&wsm.wait, &wait, TASK_UNINTERRUPTIBLE);
    289		schedule();
    290		finish_wait(&wsm.wait, &wait);
    291
    292		goto again;
    293	}
    294
    295	return ws;
    296}
    297
    298/*
    299 * zstd_put_workspace - zstd put_workspace
    300 * @ws: list_head for the workspace
    301 *
    302 * When putting back a workspace, we only need to update the LRU if we are of
    303 * the requested compression level.  Here is where we continue to protect the
    304 * max level workspace or update last_used accordingly.  If the reclaim timer
    305 * isn't set, it is also set here.  Only the max level workspace tries and wakes
    306 * up waiting workspaces.
    307 */
    308void zstd_put_workspace(struct list_head *ws)
    309{
    310	struct workspace *workspace = list_to_workspace(ws);
    311
    312	spin_lock_bh(&wsm.lock);
    313
    314	/* A node is only taken off the lru if we are the corresponding level */
    315	if (workspace->req_level == workspace->level) {
    316		/* Hide a max level workspace from reclaim */
    317		if (list_empty(&wsm.idle_ws[ZSTD_BTRFS_MAX_LEVEL - 1])) {
    318			INIT_LIST_HEAD(&workspace->lru_list);
    319		} else {
    320			workspace->last_used = jiffies;
    321			list_add(&workspace->lru_list, &wsm.lru_list);
    322			if (!timer_pending(&wsm.timer))
    323				mod_timer(&wsm.timer,
    324					  jiffies + ZSTD_BTRFS_RECLAIM_JIFFIES);
    325		}
    326	}
    327
    328	set_bit(workspace->level - 1, &wsm.active_map);
    329	list_add(&workspace->list, &wsm.idle_ws[workspace->level - 1]);
    330	workspace->req_level = 0;
    331
    332	spin_unlock_bh(&wsm.lock);
    333
    334	if (workspace->level == ZSTD_BTRFS_MAX_LEVEL)
    335		cond_wake_up(&wsm.wait);
    336}
    337
    338void zstd_free_workspace(struct list_head *ws)
    339{
    340	struct workspace *workspace = list_entry(ws, struct workspace, list);
    341
    342	kvfree(workspace->mem);
    343	kfree(workspace->buf);
    344	kfree(workspace);
    345}
    346
    347struct list_head *zstd_alloc_workspace(unsigned int level)
    348{
    349	struct workspace *workspace;
    350
    351	workspace = kzalloc(sizeof(*workspace), GFP_KERNEL);
    352	if (!workspace)
    353		return ERR_PTR(-ENOMEM);
    354
    355	workspace->size = zstd_ws_mem_sizes[level - 1];
    356	workspace->level = level;
    357	workspace->req_level = level;
    358	workspace->last_used = jiffies;
    359	workspace->mem = kvmalloc(workspace->size, GFP_KERNEL);
    360	workspace->buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
    361	if (!workspace->mem || !workspace->buf)
    362		goto fail;
    363
    364	INIT_LIST_HEAD(&workspace->list);
    365	INIT_LIST_HEAD(&workspace->lru_list);
    366
    367	return &workspace->list;
    368fail:
    369	zstd_free_workspace(&workspace->list);
    370	return ERR_PTR(-ENOMEM);
    371}
    372
    373int zstd_compress_pages(struct list_head *ws, struct address_space *mapping,
    374		u64 start, struct page **pages, unsigned long *out_pages,
    375		unsigned long *total_in, unsigned long *total_out)
    376{
    377	struct workspace *workspace = list_entry(ws, struct workspace, list);
    378	zstd_cstream *stream;
    379	int ret = 0;
    380	int nr_pages = 0;
    381	struct page *in_page = NULL;  /* The current page to read */
    382	struct page *out_page = NULL; /* The current page to write to */
    383	unsigned long tot_in = 0;
    384	unsigned long tot_out = 0;
    385	unsigned long len = *total_out;
    386	const unsigned long nr_dest_pages = *out_pages;
    387	unsigned long max_out = nr_dest_pages * PAGE_SIZE;
    388	zstd_parameters params = zstd_get_btrfs_parameters(workspace->req_level,
    389							   len);
    390
    391	*out_pages = 0;
    392	*total_out = 0;
    393	*total_in = 0;
    394
    395	/* Initialize the stream */
    396	stream = zstd_init_cstream(&params, len, workspace->mem,
    397			workspace->size);
    398	if (!stream) {
    399		pr_warn("BTRFS: zstd_init_cstream failed\n");
    400		ret = -EIO;
    401		goto out;
    402	}
    403
    404	/* map in the first page of input data */
    405	in_page = find_get_page(mapping, start >> PAGE_SHIFT);
    406	workspace->in_buf.src = kmap(in_page);
    407	workspace->in_buf.pos = 0;
    408	workspace->in_buf.size = min_t(size_t, len, PAGE_SIZE);
    409
    410
    411	/* Allocate and map in the output buffer */
    412	out_page = alloc_page(GFP_NOFS);
    413	if (out_page == NULL) {
    414		ret = -ENOMEM;
    415		goto out;
    416	}
    417	pages[nr_pages++] = out_page;
    418	workspace->out_buf.dst = kmap(out_page);
    419	workspace->out_buf.pos = 0;
    420	workspace->out_buf.size = min_t(size_t, max_out, PAGE_SIZE);
    421
    422	while (1) {
    423		size_t ret2;
    424
    425		ret2 = zstd_compress_stream(stream, &workspace->out_buf,
    426				&workspace->in_buf);
    427		if (zstd_is_error(ret2)) {
    428			pr_debug("BTRFS: zstd_compress_stream returned %d\n",
    429					zstd_get_error_code(ret2));
    430			ret = -EIO;
    431			goto out;
    432		}
    433
    434		/* Check to see if we are making it bigger */
    435		if (tot_in + workspace->in_buf.pos > 8192 &&
    436				tot_in + workspace->in_buf.pos <
    437				tot_out + workspace->out_buf.pos) {
    438			ret = -E2BIG;
    439			goto out;
    440		}
    441
    442		/* We've reached the end of our output range */
    443		if (workspace->out_buf.pos >= max_out) {
    444			tot_out += workspace->out_buf.pos;
    445			ret = -E2BIG;
    446			goto out;
    447		}
    448
    449		/* Check if we need more output space */
    450		if (workspace->out_buf.pos == workspace->out_buf.size) {
    451			tot_out += PAGE_SIZE;
    452			max_out -= PAGE_SIZE;
    453			kunmap(out_page);
    454			if (nr_pages == nr_dest_pages) {
    455				out_page = NULL;
    456				ret = -E2BIG;
    457				goto out;
    458			}
    459			out_page = alloc_page(GFP_NOFS);
    460			if (out_page == NULL) {
    461				ret = -ENOMEM;
    462				goto out;
    463			}
    464			pages[nr_pages++] = out_page;
    465			workspace->out_buf.dst = kmap(out_page);
    466			workspace->out_buf.pos = 0;
    467			workspace->out_buf.size = min_t(size_t, max_out,
    468							PAGE_SIZE);
    469		}
    470
    471		/* We've reached the end of the input */
    472		if (workspace->in_buf.pos >= len) {
    473			tot_in += workspace->in_buf.pos;
    474			break;
    475		}
    476
    477		/* Check if we need more input */
    478		if (workspace->in_buf.pos == workspace->in_buf.size) {
    479			tot_in += PAGE_SIZE;
    480			kunmap(in_page);
    481			put_page(in_page);
    482
    483			start += PAGE_SIZE;
    484			len -= PAGE_SIZE;
    485			in_page = find_get_page(mapping, start >> PAGE_SHIFT);
    486			workspace->in_buf.src = kmap(in_page);
    487			workspace->in_buf.pos = 0;
    488			workspace->in_buf.size = min_t(size_t, len, PAGE_SIZE);
    489		}
    490	}
    491	while (1) {
    492		size_t ret2;
    493
    494		ret2 = zstd_end_stream(stream, &workspace->out_buf);
    495		if (zstd_is_error(ret2)) {
    496			pr_debug("BTRFS: zstd_end_stream returned %d\n",
    497					zstd_get_error_code(ret2));
    498			ret = -EIO;
    499			goto out;
    500		}
    501		if (ret2 == 0) {
    502			tot_out += workspace->out_buf.pos;
    503			break;
    504		}
    505		if (workspace->out_buf.pos >= max_out) {
    506			tot_out += workspace->out_buf.pos;
    507			ret = -E2BIG;
    508			goto out;
    509		}
    510
    511		tot_out += PAGE_SIZE;
    512		max_out -= PAGE_SIZE;
    513		kunmap(out_page);
    514		if (nr_pages == nr_dest_pages) {
    515			out_page = NULL;
    516			ret = -E2BIG;
    517			goto out;
    518		}
    519		out_page = alloc_page(GFP_NOFS);
    520		if (out_page == NULL) {
    521			ret = -ENOMEM;
    522			goto out;
    523		}
    524		pages[nr_pages++] = out_page;
    525		workspace->out_buf.dst = kmap(out_page);
    526		workspace->out_buf.pos = 0;
    527		workspace->out_buf.size = min_t(size_t, max_out, PAGE_SIZE);
    528	}
    529
    530	if (tot_out >= tot_in) {
    531		ret = -E2BIG;
    532		goto out;
    533	}
    534
    535	ret = 0;
    536	*total_in = tot_in;
    537	*total_out = tot_out;
    538out:
    539	*out_pages = nr_pages;
    540	/* Cleanup */
    541	if (in_page) {
    542		kunmap(in_page);
    543		put_page(in_page);
    544	}
    545	if (out_page)
    546		kunmap(out_page);
    547	return ret;
    548}
    549
    550int zstd_decompress_bio(struct list_head *ws, struct compressed_bio *cb)
    551{
    552	struct workspace *workspace = list_entry(ws, struct workspace, list);
    553	struct page **pages_in = cb->compressed_pages;
    554	size_t srclen = cb->compressed_len;
    555	zstd_dstream *stream;
    556	int ret = 0;
    557	unsigned long page_in_index = 0;
    558	unsigned long total_pages_in = DIV_ROUND_UP(srclen, PAGE_SIZE);
    559	unsigned long buf_start;
    560	unsigned long total_out = 0;
    561
    562	stream = zstd_init_dstream(
    563			ZSTD_BTRFS_MAX_INPUT, workspace->mem, workspace->size);
    564	if (!stream) {
    565		pr_debug("BTRFS: zstd_init_dstream failed\n");
    566		ret = -EIO;
    567		goto done;
    568	}
    569
    570	workspace->in_buf.src = kmap(pages_in[page_in_index]);
    571	workspace->in_buf.pos = 0;
    572	workspace->in_buf.size = min_t(size_t, srclen, PAGE_SIZE);
    573
    574	workspace->out_buf.dst = workspace->buf;
    575	workspace->out_buf.pos = 0;
    576	workspace->out_buf.size = PAGE_SIZE;
    577
    578	while (1) {
    579		size_t ret2;
    580
    581		ret2 = zstd_decompress_stream(stream, &workspace->out_buf,
    582				&workspace->in_buf);
    583		if (zstd_is_error(ret2)) {
    584			pr_debug("BTRFS: zstd_decompress_stream returned %d\n",
    585					zstd_get_error_code(ret2));
    586			ret = -EIO;
    587			goto done;
    588		}
    589		buf_start = total_out;
    590		total_out += workspace->out_buf.pos;
    591		workspace->out_buf.pos = 0;
    592
    593		ret = btrfs_decompress_buf2page(workspace->out_buf.dst,
    594				total_out - buf_start, cb, buf_start);
    595		if (ret == 0)
    596			break;
    597
    598		if (workspace->in_buf.pos >= srclen)
    599			break;
    600
    601		/* Check if we've hit the end of a frame */
    602		if (ret2 == 0)
    603			break;
    604
    605		if (workspace->in_buf.pos == workspace->in_buf.size) {
    606			kunmap(pages_in[page_in_index++]);
    607			if (page_in_index >= total_pages_in) {
    608				workspace->in_buf.src = NULL;
    609				ret = -EIO;
    610				goto done;
    611			}
    612			srclen -= PAGE_SIZE;
    613			workspace->in_buf.src = kmap(pages_in[page_in_index]);
    614			workspace->in_buf.pos = 0;
    615			workspace->in_buf.size = min_t(size_t, srclen, PAGE_SIZE);
    616		}
    617	}
    618	ret = 0;
    619	zero_fill_bio(cb->orig_bio);
    620done:
    621	if (workspace->in_buf.src)
    622		kunmap(pages_in[page_in_index]);
    623	return ret;
    624}
    625
    626int zstd_decompress(struct list_head *ws, unsigned char *data_in,
    627		struct page *dest_page, unsigned long start_byte, size_t srclen,
    628		size_t destlen)
    629{
    630	struct workspace *workspace = list_entry(ws, struct workspace, list);
    631	zstd_dstream *stream;
    632	int ret = 0;
    633	size_t ret2;
    634	unsigned long total_out = 0;
    635	unsigned long pg_offset = 0;
    636
    637	stream = zstd_init_dstream(
    638			ZSTD_BTRFS_MAX_INPUT, workspace->mem, workspace->size);
    639	if (!stream) {
    640		pr_warn("BTRFS: zstd_init_dstream failed\n");
    641		ret = -EIO;
    642		goto finish;
    643	}
    644
    645	destlen = min_t(size_t, destlen, PAGE_SIZE);
    646
    647	workspace->in_buf.src = data_in;
    648	workspace->in_buf.pos = 0;
    649	workspace->in_buf.size = srclen;
    650
    651	workspace->out_buf.dst = workspace->buf;
    652	workspace->out_buf.pos = 0;
    653	workspace->out_buf.size = PAGE_SIZE;
    654
    655	ret2 = 1;
    656	while (pg_offset < destlen
    657	       && workspace->in_buf.pos < workspace->in_buf.size) {
    658		unsigned long buf_start;
    659		unsigned long buf_offset;
    660		unsigned long bytes;
    661
    662		/* Check if the frame is over and we still need more input */
    663		if (ret2 == 0) {
    664			pr_debug("BTRFS: zstd_decompress_stream ended early\n");
    665			ret = -EIO;
    666			goto finish;
    667		}
    668		ret2 = zstd_decompress_stream(stream, &workspace->out_buf,
    669				&workspace->in_buf);
    670		if (zstd_is_error(ret2)) {
    671			pr_debug("BTRFS: zstd_decompress_stream returned %d\n",
    672					zstd_get_error_code(ret2));
    673			ret = -EIO;
    674			goto finish;
    675		}
    676
    677		buf_start = total_out;
    678		total_out += workspace->out_buf.pos;
    679		workspace->out_buf.pos = 0;
    680
    681		if (total_out <= start_byte)
    682			continue;
    683
    684		if (total_out > start_byte && buf_start < start_byte)
    685			buf_offset = start_byte - buf_start;
    686		else
    687			buf_offset = 0;
    688
    689		bytes = min_t(unsigned long, destlen - pg_offset,
    690				workspace->out_buf.size - buf_offset);
    691
    692		memcpy_to_page(dest_page, pg_offset,
    693			       workspace->out_buf.dst + buf_offset, bytes);
    694
    695		pg_offset += bytes;
    696	}
    697	ret = 0;
    698finish:
    699	if (pg_offset < destlen) {
    700		memzero_page(dest_page, pg_offset, destlen - pg_offset);
    701	}
    702	return ret;
    703}
    704
    705const struct btrfs_compress_op btrfs_zstd_compress = {
    706	/* ZSTD uses own workspace manager */
    707	.workspace_manager = NULL,
    708	.max_level	= ZSTD_BTRFS_MAX_LEVEL,
    709	.default_level	= ZSTD_BTRFS_DEFAULT_LEVEL,
    710};