cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

lz4_decompress.c (20949B)


      1/*
      2 * LZ4 - Fast LZ compression algorithm
      3 * Copyright (C) 2011 - 2016, Yann Collet.
      4 * BSD 2 - Clause License (http://www.opensource.org/licenses/bsd - license.php)
      5 * Redistribution and use in source and binary forms, with or without
      6 * modification, are permitted provided that the following conditions are
      7 * met:
      8 *	* Redistributions of source code must retain the above copyright
      9 *	  notice, this list of conditions and the following disclaimer.
     10 *	* Redistributions in binary form must reproduce the above
     11 * copyright notice, this list of conditions and the following disclaimer
     12 * in the documentation and/or other materials provided with the
     13 * distribution.
     14 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
     15 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
     16 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
     17 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
     18 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
     19 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
     20 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
     21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
     22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
     23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
     24 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
     25 * You can contact the author at :
     26 *	- LZ4 homepage : http://www.lz4.org
     27 *	- LZ4 source repository : https://github.com/lz4/lz4
     28 *
     29 *	Changed for kernel usage by:
     30 *	Sven Schmidt <4sschmid@informatik.uni-hamburg.de>
     31 */
     32
     33/*-************************************
     34 *	Dependencies
     35 **************************************/
     36#include <linux/lz4.h>
     37#include "lz4defs.h"
     38#include <linux/init.h>
     39#include <linux/module.h>
     40#include <linux/kernel.h>
     41#include <asm/unaligned.h>
     42
     43/*-*****************************
     44 *	Decompression functions
     45 *******************************/
     46
     47#define DEBUGLOG(l, ...) {}	/* disabled */
     48
     49#ifndef assert
     50#define assert(condition) ((void)0)
     51#endif
     52
     53/*
     54 * LZ4_decompress_generic() :
     55 * This generic decompression function covers all use cases.
     56 * It shall be instantiated several times, using different sets of directives.
     57 * Note that it is important for performance that this function really get inlined,
     58 * in order to remove useless branches during compilation optimization.
     59 */
     60static FORCE_INLINE int LZ4_decompress_generic(
     61	 const char * const src,
     62	 char * const dst,
     63	 int srcSize,
     64		/*
     65		 * If endOnInput == endOnInputSize,
     66		 * this value is `dstCapacity`
     67		 */
     68	 int outputSize,
     69	 /* endOnOutputSize, endOnInputSize */
     70	 endCondition_directive endOnInput,
     71	 /* full, partial */
     72	 earlyEnd_directive partialDecoding,
     73	 /* noDict, withPrefix64k, usingExtDict */
     74	 dict_directive dict,
     75	 /* always <= dst, == dst when no prefix */
     76	 const BYTE * const lowPrefix,
     77	 /* only if dict == usingExtDict */
     78	 const BYTE * const dictStart,
     79	 /* note : = 0 if noDict */
     80	 const size_t dictSize
     81	 )
     82{
     83	const BYTE *ip = (const BYTE *) src;
     84	const BYTE * const iend = ip + srcSize;
     85
     86	BYTE *op = (BYTE *) dst;
     87	BYTE * const oend = op + outputSize;
     88	BYTE *cpy;
     89
     90	const BYTE * const dictEnd = (const BYTE *)dictStart + dictSize;
     91	static const unsigned int inc32table[8] = {0, 1, 2, 1, 0, 4, 4, 4};
     92	static const int dec64table[8] = {0, 0, 0, -1, -4, 1, 2, 3};
     93
     94	const int safeDecode = (endOnInput == endOnInputSize);
     95	const int checkOffset = ((safeDecode) && (dictSize < (int)(64 * KB)));
     96
     97	/* Set up the "end" pointers for the shortcut. */
     98	const BYTE *const shortiend = iend -
     99		(endOnInput ? 14 : 8) /*maxLL*/ - 2 /*offset*/;
    100	const BYTE *const shortoend = oend -
    101		(endOnInput ? 14 : 8) /*maxLL*/ - 18 /*maxML*/;
    102
    103	DEBUGLOG(5, "%s (srcSize:%i, dstSize:%i)", __func__,
    104		 srcSize, outputSize);
    105
    106	/* Special cases */
    107	assert(lowPrefix <= op);
    108	assert(src != NULL);
    109
    110	/* Empty output buffer */
    111	if ((endOnInput) && (unlikely(outputSize == 0)))
    112		return ((srcSize == 1) && (*ip == 0)) ? 0 : -1;
    113
    114	if ((!endOnInput) && (unlikely(outputSize == 0)))
    115		return (*ip == 0 ? 1 : -1);
    116
    117	if ((endOnInput) && unlikely(srcSize == 0))
    118		return -1;
    119
    120	/* Main Loop : decode sequences */
    121	while (1) {
    122		size_t length;
    123		const BYTE *match;
    124		size_t offset;
    125
    126		/* get literal length */
    127		unsigned int const token = *ip++;
    128		length = token>>ML_BITS;
    129
    130		/* ip < iend before the increment */
    131		assert(!endOnInput || ip <= iend);
    132
    133		/*
    134		 * A two-stage shortcut for the most common case:
    135		 * 1) If the literal length is 0..14, and there is enough
    136		 * space, enter the shortcut and copy 16 bytes on behalf
    137		 * of the literals (in the fast mode, only 8 bytes can be
    138		 * safely copied this way).
    139		 * 2) Further if the match length is 4..18, copy 18 bytes
    140		 * in a similar manner; but we ensure that there's enough
    141		 * space in the output for those 18 bytes earlier, upon
    142		 * entering the shortcut (in other words, there is a
    143		 * combined check for both stages).
    144		 *
    145		 * The & in the likely() below is intentionally not && so that
    146		 * some compilers can produce better parallelized runtime code
    147		 */
    148		if ((endOnInput ? length != RUN_MASK : length <= 8)
    149		   /*
    150		    * strictly "less than" on input, to re-enter
    151		    * the loop with at least one byte
    152		    */
    153		   && likely((endOnInput ? ip < shortiend : 1) &
    154			     (op <= shortoend))) {
    155			/* Copy the literals */
    156			LZ4_memcpy(op, ip, endOnInput ? 16 : 8);
    157			op += length; ip += length;
    158
    159			/*
    160			 * The second stage:
    161			 * prepare for match copying, decode full info.
    162			 * If it doesn't work out, the info won't be wasted.
    163			 */
    164			length = token & ML_MASK; /* match length */
    165			offset = LZ4_readLE16(ip);
    166			ip += 2;
    167			match = op - offset;
    168			assert(match <= op); /* check overflow */
    169
    170			/* Do not deal with overlapping matches. */
    171			if ((length != ML_MASK) &&
    172			    (offset >= 8) &&
    173			    (dict == withPrefix64k || match >= lowPrefix)) {
    174				/* Copy the match. */
    175				LZ4_memcpy(op + 0, match + 0, 8);
    176				LZ4_memcpy(op + 8, match + 8, 8);
    177				LZ4_memcpy(op + 16, match + 16, 2);
    178				op += length + MINMATCH;
    179				/* Both stages worked, load the next token. */
    180				continue;
    181			}
    182
    183			/*
    184			 * The second stage didn't work out, but the info
    185			 * is ready. Propel it right to the point of match
    186			 * copying.
    187			 */
    188			goto _copy_match;
    189		}
    190
    191		/* decode literal length */
    192		if (length == RUN_MASK) {
    193			unsigned int s;
    194
    195			if (unlikely(endOnInput ? ip >= iend - RUN_MASK : 0)) {
    196				/* overflow detection */
    197				goto _output_error;
    198			}
    199			do {
    200				s = *ip++;
    201				length += s;
    202			} while (likely(endOnInput
    203				? ip < iend - RUN_MASK
    204				: 1) & (s == 255));
    205
    206			if ((safeDecode)
    207			    && unlikely((uptrval)(op) +
    208					length < (uptrval)(op))) {
    209				/* overflow detection */
    210				goto _output_error;
    211			}
    212			if ((safeDecode)
    213			    && unlikely((uptrval)(ip) +
    214					length < (uptrval)(ip))) {
    215				/* overflow detection */
    216				goto _output_error;
    217			}
    218		}
    219
    220		/* copy literals */
    221		cpy = op + length;
    222		LZ4_STATIC_ASSERT(MFLIMIT >= WILDCOPYLENGTH);
    223
    224		if (((endOnInput) && ((cpy > oend - MFLIMIT)
    225			|| (ip + length > iend - (2 + 1 + LASTLITERALS))))
    226			|| ((!endOnInput) && (cpy > oend - WILDCOPYLENGTH))) {
    227			if (partialDecoding) {
    228				if (cpy > oend) {
    229					/*
    230					 * Partial decoding :
    231					 * stop in the middle of literal segment
    232					 */
    233					cpy = oend;
    234					length = oend - op;
    235				}
    236				if ((endOnInput)
    237					&& (ip + length > iend)) {
    238					/*
    239					 * Error :
    240					 * read attempt beyond
    241					 * end of input buffer
    242					 */
    243					goto _output_error;
    244				}
    245			} else {
    246				if ((!endOnInput)
    247					&& (cpy != oend)) {
    248					/*
    249					 * Error :
    250					 * block decoding must
    251					 * stop exactly there
    252					 */
    253					goto _output_error;
    254				}
    255				if ((endOnInput)
    256					&& ((ip + length != iend)
    257					|| (cpy > oend))) {
    258					/*
    259					 * Error :
    260					 * input must be consumed
    261					 */
    262					goto _output_error;
    263				}
    264			}
    265
    266			/*
    267			 * supports overlapping memory regions; only matters
    268			 * for in-place decompression scenarios
    269			 */
    270			LZ4_memmove(op, ip, length);
    271			ip += length;
    272			op += length;
    273
    274			/* Necessarily EOF when !partialDecoding.
    275			 * When partialDecoding, it is EOF if we've either
    276			 * filled the output buffer or
    277			 * can't proceed with reading an offset for following match.
    278			 */
    279			if (!partialDecoding || (cpy == oend) || (ip >= (iend - 2)))
    280				break;
    281		} else {
    282			/* may overwrite up to WILDCOPYLENGTH beyond cpy */
    283			LZ4_wildCopy(op, ip, cpy);
    284			ip += length;
    285			op = cpy;
    286		}
    287
    288		/* get offset */
    289		offset = LZ4_readLE16(ip);
    290		ip += 2;
    291		match = op - offset;
    292
    293		/* get matchlength */
    294		length = token & ML_MASK;
    295
    296_copy_match:
    297		if ((checkOffset) && (unlikely(match + dictSize < lowPrefix))) {
    298			/* Error : offset outside buffers */
    299			goto _output_error;
    300		}
    301
    302		/* costs ~1%; silence an msan warning when offset == 0 */
    303		/*
    304		 * note : when partialDecoding, there is no guarantee that
    305		 * at least 4 bytes remain available in output buffer
    306		 */
    307		if (!partialDecoding) {
    308			assert(oend > op);
    309			assert(oend - op >= 4);
    310
    311			LZ4_write32(op, (U32)offset);
    312		}
    313
    314		if (length == ML_MASK) {
    315			unsigned int s;
    316
    317			do {
    318				s = *ip++;
    319
    320				if ((endOnInput) && (ip > iend - LASTLITERALS))
    321					goto _output_error;
    322
    323				length += s;
    324			} while (s == 255);
    325
    326			if ((safeDecode)
    327				&& unlikely(
    328					(uptrval)(op) + length < (uptrval)op)) {
    329				/* overflow detection */
    330				goto _output_error;
    331			}
    332		}
    333
    334		length += MINMATCH;
    335
    336		/* match starting within external dictionary */
    337		if ((dict == usingExtDict) && (match < lowPrefix)) {
    338			if (unlikely(op + length > oend - LASTLITERALS)) {
    339				/* doesn't respect parsing restriction */
    340				if (!partialDecoding)
    341					goto _output_error;
    342				length = min(length, (size_t)(oend - op));
    343			}
    344
    345			if (length <= (size_t)(lowPrefix - match)) {
    346				/*
    347				 * match fits entirely within external
    348				 * dictionary : just copy
    349				 */
    350				memmove(op, dictEnd - (lowPrefix - match),
    351					length);
    352				op += length;
    353			} else {
    354				/*
    355				 * match stretches into both external
    356				 * dictionary and current block
    357				 */
    358				size_t const copySize = (size_t)(lowPrefix - match);
    359				size_t const restSize = length - copySize;
    360
    361				LZ4_memcpy(op, dictEnd - copySize, copySize);
    362				op += copySize;
    363				if (restSize > (size_t)(op - lowPrefix)) {
    364					/* overlap copy */
    365					BYTE * const endOfMatch = op + restSize;
    366					const BYTE *copyFrom = lowPrefix;
    367
    368					while (op < endOfMatch)
    369						*op++ = *copyFrom++;
    370				} else {
    371					LZ4_memcpy(op, lowPrefix, restSize);
    372					op += restSize;
    373				}
    374			}
    375			continue;
    376		}
    377
    378		/* copy match within block */
    379		cpy = op + length;
    380
    381		/*
    382		 * partialDecoding :
    383		 * may not respect endBlock parsing restrictions
    384		 */
    385		assert(op <= oend);
    386		if (partialDecoding &&
    387		    (cpy > oend - MATCH_SAFEGUARD_DISTANCE)) {
    388			size_t const mlen = min(length, (size_t)(oend - op));
    389			const BYTE * const matchEnd = match + mlen;
    390			BYTE * const copyEnd = op + mlen;
    391
    392			if (matchEnd > op) {
    393				/* overlap copy */
    394				while (op < copyEnd)
    395					*op++ = *match++;
    396			} else {
    397				LZ4_memcpy(op, match, mlen);
    398			}
    399			op = copyEnd;
    400			if (op == oend)
    401				break;
    402			continue;
    403		}
    404
    405		if (unlikely(offset < 8)) {
    406			op[0] = match[0];
    407			op[1] = match[1];
    408			op[2] = match[2];
    409			op[3] = match[3];
    410			match += inc32table[offset];
    411			LZ4_memcpy(op + 4, match, 4);
    412			match -= dec64table[offset];
    413		} else {
    414			LZ4_copy8(op, match);
    415			match += 8;
    416		}
    417
    418		op += 8;
    419
    420		if (unlikely(cpy > oend - MATCH_SAFEGUARD_DISTANCE)) {
    421			BYTE * const oCopyLimit = oend - (WILDCOPYLENGTH - 1);
    422
    423			if (cpy > oend - LASTLITERALS) {
    424				/*
    425				 * Error : last LASTLITERALS bytes
    426				 * must be literals (uncompressed)
    427				 */
    428				goto _output_error;
    429			}
    430
    431			if (op < oCopyLimit) {
    432				LZ4_wildCopy(op, match, oCopyLimit);
    433				match += oCopyLimit - op;
    434				op = oCopyLimit;
    435			}
    436			while (op < cpy)
    437				*op++ = *match++;
    438		} else {
    439			LZ4_copy8(op, match);
    440			if (length > 16)
    441				LZ4_wildCopy(op + 8, match + 8, cpy);
    442		}
    443		op = cpy; /* wildcopy correction */
    444	}
    445
    446	/* end of decoding */
    447	if (endOnInput) {
    448		/* Nb of output bytes decoded */
    449		return (int) (((char *)op) - dst);
    450	} else {
    451		/* Nb of input bytes read */
    452		return (int) (((const char *)ip) - src);
    453	}
    454
    455	/* Overflow error detected */
    456_output_error:
    457	return (int) (-(((const char *)ip) - src)) - 1;
    458}
    459
    460int LZ4_decompress_safe(const char *source, char *dest,
    461	int compressedSize, int maxDecompressedSize)
    462{
    463	return LZ4_decompress_generic(source, dest,
    464				      compressedSize, maxDecompressedSize,
    465				      endOnInputSize, decode_full_block,
    466				      noDict, (BYTE *)dest, NULL, 0);
    467}
    468
    469int LZ4_decompress_safe_partial(const char *src, char *dst,
    470	int compressedSize, int targetOutputSize, int dstCapacity)
    471{
    472	dstCapacity = min(targetOutputSize, dstCapacity);
    473	return LZ4_decompress_generic(src, dst, compressedSize, dstCapacity,
    474				      endOnInputSize, partial_decode,
    475				      noDict, (BYTE *)dst, NULL, 0);
    476}
    477
    478int LZ4_decompress_fast(const char *source, char *dest, int originalSize)
    479{
    480	return LZ4_decompress_generic(source, dest, 0, originalSize,
    481				      endOnOutputSize, decode_full_block,
    482				      withPrefix64k,
    483				      (BYTE *)dest - 64 * KB, NULL, 0);
    484}
    485
    486/* ===== Instantiate a few more decoding cases, used more than once. ===== */
    487
    488static int LZ4_decompress_safe_withPrefix64k(const char *source, char *dest,
    489				      int compressedSize, int maxOutputSize)
    490{
    491	return LZ4_decompress_generic(source, dest,
    492				      compressedSize, maxOutputSize,
    493				      endOnInputSize, decode_full_block,
    494				      withPrefix64k,
    495				      (BYTE *)dest - 64 * KB, NULL, 0);
    496}
    497
    498static int LZ4_decompress_safe_withSmallPrefix(const char *source, char *dest,
    499					       int compressedSize,
    500					       int maxOutputSize,
    501					       size_t prefixSize)
    502{
    503	return LZ4_decompress_generic(source, dest,
    504				      compressedSize, maxOutputSize,
    505				      endOnInputSize, decode_full_block,
    506				      noDict,
    507				      (BYTE *)dest - prefixSize, NULL, 0);
    508}
    509
    510int LZ4_decompress_safe_forceExtDict(const char *source, char *dest,
    511				     int compressedSize, int maxOutputSize,
    512				     const void *dictStart, size_t dictSize)
    513{
    514	return LZ4_decompress_generic(source, dest,
    515				      compressedSize, maxOutputSize,
    516				      endOnInputSize, decode_full_block,
    517				      usingExtDict, (BYTE *)dest,
    518				      (const BYTE *)dictStart, dictSize);
    519}
    520
    521static int LZ4_decompress_fast_extDict(const char *source, char *dest,
    522				       int originalSize,
    523				       const void *dictStart, size_t dictSize)
    524{
    525	return LZ4_decompress_generic(source, dest,
    526				      0, originalSize,
    527				      endOnOutputSize, decode_full_block,
    528				      usingExtDict, (BYTE *)dest,
    529				      (const BYTE *)dictStart, dictSize);
    530}
    531
    532/*
    533 * The "double dictionary" mode, for use with e.g. ring buffers: the first part
    534 * of the dictionary is passed as prefix, and the second via dictStart + dictSize.
    535 * These routines are used only once, in LZ4_decompress_*_continue().
    536 */
    537static FORCE_INLINE
    538int LZ4_decompress_safe_doubleDict(const char *source, char *dest,
    539				   int compressedSize, int maxOutputSize,
    540				   size_t prefixSize,
    541				   const void *dictStart, size_t dictSize)
    542{
    543	return LZ4_decompress_generic(source, dest,
    544				      compressedSize, maxOutputSize,
    545				      endOnInputSize, decode_full_block,
    546				      usingExtDict, (BYTE *)dest - prefixSize,
    547				      (const BYTE *)dictStart, dictSize);
    548}
    549
    550static FORCE_INLINE
    551int LZ4_decompress_fast_doubleDict(const char *source, char *dest,
    552				   int originalSize, size_t prefixSize,
    553				   const void *dictStart, size_t dictSize)
    554{
    555	return LZ4_decompress_generic(source, dest,
    556				      0, originalSize,
    557				      endOnOutputSize, decode_full_block,
    558				      usingExtDict, (BYTE *)dest - prefixSize,
    559				      (const BYTE *)dictStart, dictSize);
    560}
    561
    562/* ===== streaming decompression functions ===== */
    563
    564int LZ4_setStreamDecode(LZ4_streamDecode_t *LZ4_streamDecode,
    565	const char *dictionary, int dictSize)
    566{
    567	LZ4_streamDecode_t_internal *lz4sd =
    568		&LZ4_streamDecode->internal_donotuse;
    569
    570	lz4sd->prefixSize = (size_t) dictSize;
    571	lz4sd->prefixEnd = (const BYTE *) dictionary + dictSize;
    572	lz4sd->externalDict = NULL;
    573	lz4sd->extDictSize	= 0;
    574	return 1;
    575}
    576
    577/*
    578 * *_continue() :
    579 * These decoding functions allow decompression of multiple blocks
    580 * in "streaming" mode.
    581 * Previously decoded blocks must still be available at the memory
    582 * position where they were decoded.
    583 * If it's not possible, save the relevant part of
    584 * decoded data into a safe buffer,
    585 * and indicate where it stands using LZ4_setStreamDecode()
    586 */
    587int LZ4_decompress_safe_continue(LZ4_streamDecode_t *LZ4_streamDecode,
    588	const char *source, char *dest, int compressedSize, int maxOutputSize)
    589{
    590	LZ4_streamDecode_t_internal *lz4sd =
    591		&LZ4_streamDecode->internal_donotuse;
    592	int result;
    593
    594	if (lz4sd->prefixSize == 0) {
    595		/* The first call, no dictionary yet. */
    596		assert(lz4sd->extDictSize == 0);
    597		result = LZ4_decompress_safe(source, dest,
    598			compressedSize, maxOutputSize);
    599		if (result <= 0)
    600			return result;
    601		lz4sd->prefixSize = result;
    602		lz4sd->prefixEnd = (BYTE *)dest + result;
    603	} else if (lz4sd->prefixEnd == (BYTE *)dest) {
    604		/* They're rolling the current segment. */
    605		if (lz4sd->prefixSize >= 64 * KB - 1)
    606			result = LZ4_decompress_safe_withPrefix64k(source, dest,
    607				compressedSize, maxOutputSize);
    608		else if (lz4sd->extDictSize == 0)
    609			result = LZ4_decompress_safe_withSmallPrefix(source,
    610				dest, compressedSize, maxOutputSize,
    611				lz4sd->prefixSize);
    612		else
    613			result = LZ4_decompress_safe_doubleDict(source, dest,
    614				compressedSize, maxOutputSize,
    615				lz4sd->prefixSize,
    616				lz4sd->externalDict, lz4sd->extDictSize);
    617		if (result <= 0)
    618			return result;
    619		lz4sd->prefixSize += result;
    620		lz4sd->prefixEnd  += result;
    621	} else {
    622		/*
    623		 * The buffer wraps around, or they're
    624		 * switching to another buffer.
    625		 */
    626		lz4sd->extDictSize = lz4sd->prefixSize;
    627		lz4sd->externalDict = lz4sd->prefixEnd - lz4sd->extDictSize;
    628		result = LZ4_decompress_safe_forceExtDict(source, dest,
    629			compressedSize, maxOutputSize,
    630			lz4sd->externalDict, lz4sd->extDictSize);
    631		if (result <= 0)
    632			return result;
    633		lz4sd->prefixSize = result;
    634		lz4sd->prefixEnd  = (BYTE *)dest + result;
    635	}
    636
    637	return result;
    638}
    639
    640int LZ4_decompress_fast_continue(LZ4_streamDecode_t *LZ4_streamDecode,
    641	const char *source, char *dest, int originalSize)
    642{
    643	LZ4_streamDecode_t_internal *lz4sd = &LZ4_streamDecode->internal_donotuse;
    644	int result;
    645
    646	if (lz4sd->prefixSize == 0) {
    647		assert(lz4sd->extDictSize == 0);
    648		result = LZ4_decompress_fast(source, dest, originalSize);
    649		if (result <= 0)
    650			return result;
    651		lz4sd->prefixSize = originalSize;
    652		lz4sd->prefixEnd = (BYTE *)dest + originalSize;
    653	} else if (lz4sd->prefixEnd == (BYTE *)dest) {
    654		if (lz4sd->prefixSize >= 64 * KB - 1 ||
    655		    lz4sd->extDictSize == 0)
    656			result = LZ4_decompress_fast(source, dest,
    657						     originalSize);
    658		else
    659			result = LZ4_decompress_fast_doubleDict(source, dest,
    660				originalSize, lz4sd->prefixSize,
    661				lz4sd->externalDict, lz4sd->extDictSize);
    662		if (result <= 0)
    663			return result;
    664		lz4sd->prefixSize += originalSize;
    665		lz4sd->prefixEnd  += originalSize;
    666	} else {
    667		lz4sd->extDictSize = lz4sd->prefixSize;
    668		lz4sd->externalDict = lz4sd->prefixEnd - lz4sd->extDictSize;
    669		result = LZ4_decompress_fast_extDict(source, dest,
    670			originalSize, lz4sd->externalDict, lz4sd->extDictSize);
    671		if (result <= 0)
    672			return result;
    673		lz4sd->prefixSize = originalSize;
    674		lz4sd->prefixEnd = (BYTE *)dest + originalSize;
    675	}
    676	return result;
    677}
    678
    679int LZ4_decompress_safe_usingDict(const char *source, char *dest,
    680				  int compressedSize, int maxOutputSize,
    681				  const char *dictStart, int dictSize)
    682{
    683	if (dictSize == 0)
    684		return LZ4_decompress_safe(source, dest,
    685					   compressedSize, maxOutputSize);
    686	if (dictStart+dictSize == dest) {
    687		if (dictSize >= 64 * KB - 1)
    688			return LZ4_decompress_safe_withPrefix64k(source, dest,
    689				compressedSize, maxOutputSize);
    690		return LZ4_decompress_safe_withSmallPrefix(source, dest,
    691			compressedSize, maxOutputSize, dictSize);
    692	}
    693	return LZ4_decompress_safe_forceExtDict(source, dest,
    694		compressedSize, maxOutputSize, dictStart, dictSize);
    695}
    696
    697int LZ4_decompress_fast_usingDict(const char *source, char *dest,
    698				  int originalSize,
    699				  const char *dictStart, int dictSize)
    700{
    701	if (dictSize == 0 || dictStart + dictSize == dest)
    702		return LZ4_decompress_fast(source, dest, originalSize);
    703
    704	return LZ4_decompress_fast_extDict(source, dest, originalSize,
    705		dictStart, dictSize);
    706}
    707
    708#ifndef STATIC
    709EXPORT_SYMBOL(LZ4_decompress_safe);
    710EXPORT_SYMBOL(LZ4_decompress_safe_partial);
    711EXPORT_SYMBOL(LZ4_decompress_fast);
    712EXPORT_SYMBOL(LZ4_setStreamDecode);
    713EXPORT_SYMBOL(LZ4_decompress_safe_continue);
    714EXPORT_SYMBOL(LZ4_decompress_fast_continue);
    715EXPORT_SYMBOL(LZ4_decompress_safe_usingDict);
    716EXPORT_SYMBOL(LZ4_decompress_fast_usingDict);
    717
    718MODULE_LICENSE("Dual BSD/GPL");
    719MODULE_DESCRIPTION("LZ4 decompressor");
    720#endif