zstd_compress_superblock.c (43548B)
1/* 2 * Copyright (c) Yann Collet, Facebook, Inc. 3 * All rights reserved. 4 * 5 * This source code is licensed under both the BSD-style license (found in the 6 * LICENSE file in the root directory of this source tree) and the GPLv2 (found 7 * in the COPYING file in the root directory of this source tree). 8 * You may select, at your option, one of the above-listed licenses. 9 */ 10 11 /*-************************************* 12 * Dependencies 13 ***************************************/ 14#include "zstd_compress_superblock.h" 15 16#include "../common/zstd_internal.h" /* ZSTD_getSequenceLength */ 17#include "hist.h" /* HIST_countFast_wksp */ 18#include "zstd_compress_internal.h" 19#include "zstd_compress_sequences.h" 20#include "zstd_compress_literals.h" 21 22/*-************************************* 23* Superblock entropy buffer structs 24***************************************/ 25/* ZSTD_hufCTablesMetadata_t : 26 * Stores Literals Block Type for a super-block in hType, and 27 * huffman tree description in hufDesBuffer. 28 * hufDesSize refers to the size of huffman tree description in bytes. 29 * This metadata is populated in ZSTD_buildSuperBlockEntropy_literal() */ 30typedef struct { 31 symbolEncodingType_e hType; 32 BYTE hufDesBuffer[ZSTD_MAX_HUF_HEADER_SIZE]; 33 size_t hufDesSize; 34} ZSTD_hufCTablesMetadata_t; 35 36/* ZSTD_fseCTablesMetadata_t : 37 * Stores symbol compression modes for a super-block in {ll, ol, ml}Type, and 38 * fse tables in fseTablesBuffer. 39 * fseTablesSize refers to the size of fse tables in bytes. 40 * This metadata is populated in ZSTD_buildSuperBlockEntropy_sequences() */ 41typedef struct { 42 symbolEncodingType_e llType; 43 symbolEncodingType_e ofType; 44 symbolEncodingType_e mlType; 45 BYTE fseTablesBuffer[ZSTD_MAX_FSE_HEADERS_SIZE]; 46 size_t fseTablesSize; 47 size_t lastCountSize; /* This is to account for bug in 1.3.4. More detail in ZSTD_compressSubBlock_sequences() */ 48} ZSTD_fseCTablesMetadata_t; 49 50typedef struct { 51 ZSTD_hufCTablesMetadata_t hufMetadata; 52 ZSTD_fseCTablesMetadata_t fseMetadata; 53} ZSTD_entropyCTablesMetadata_t; 54 55 56/* ZSTD_buildSuperBlockEntropy_literal() : 57 * Builds entropy for the super-block literals. 58 * Stores literals block type (raw, rle, compressed, repeat) and 59 * huffman description table to hufMetadata. 60 * @return : size of huffman description table or error code */ 61static size_t ZSTD_buildSuperBlockEntropy_literal(void* const src, size_t srcSize, 62 const ZSTD_hufCTables_t* prevHuf, 63 ZSTD_hufCTables_t* nextHuf, 64 ZSTD_hufCTablesMetadata_t* hufMetadata, 65 const int disableLiteralsCompression, 66 void* workspace, size_t wkspSize) 67{ 68 BYTE* const wkspStart = (BYTE*)workspace; 69 BYTE* const wkspEnd = wkspStart + wkspSize; 70 BYTE* const countWkspStart = wkspStart; 71 unsigned* const countWksp = (unsigned*)workspace; 72 const size_t countWkspSize = (HUF_SYMBOLVALUE_MAX + 1) * sizeof(unsigned); 73 BYTE* const nodeWksp = countWkspStart + countWkspSize; 74 const size_t nodeWkspSize = wkspEnd-nodeWksp; 75 unsigned maxSymbolValue = 255; 76 unsigned huffLog = HUF_TABLELOG_DEFAULT; 77 HUF_repeat repeat = prevHuf->repeatMode; 78 79 DEBUGLOG(5, "ZSTD_buildSuperBlockEntropy_literal (srcSize=%zu)", srcSize); 80 81 /* Prepare nextEntropy assuming reusing the existing table */ 82 ZSTD_memcpy(nextHuf, prevHuf, sizeof(*prevHuf)); 83 84 if (disableLiteralsCompression) { 85 DEBUGLOG(5, "set_basic - disabled"); 86 hufMetadata->hType = set_basic; 87 return 0; 88 } 89 90 /* small ? don't even attempt compression (speed opt) */ 91# define COMPRESS_LITERALS_SIZE_MIN 63 92 { size_t const minLitSize = (prevHuf->repeatMode == HUF_repeat_valid) ? 6 : COMPRESS_LITERALS_SIZE_MIN; 93 if (srcSize <= minLitSize) { 94 DEBUGLOG(5, "set_basic - too small"); 95 hufMetadata->hType = set_basic; 96 return 0; 97 } 98 } 99 100 /* Scan input and build symbol stats */ 101 { size_t const largest = HIST_count_wksp (countWksp, &maxSymbolValue, (const BYTE*)src, srcSize, workspace, wkspSize); 102 FORWARD_IF_ERROR(largest, "HIST_count_wksp failed"); 103 if (largest == srcSize) { 104 DEBUGLOG(5, "set_rle"); 105 hufMetadata->hType = set_rle; 106 return 0; 107 } 108 if (largest <= (srcSize >> 7)+4) { 109 DEBUGLOG(5, "set_basic - no gain"); 110 hufMetadata->hType = set_basic; 111 return 0; 112 } 113 } 114 115 /* Validate the previous Huffman table */ 116 if (repeat == HUF_repeat_check && !HUF_validateCTable((HUF_CElt const*)prevHuf->CTable, countWksp, maxSymbolValue)) { 117 repeat = HUF_repeat_none; 118 } 119 120 /* Build Huffman Tree */ 121 ZSTD_memset(nextHuf->CTable, 0, sizeof(nextHuf->CTable)); 122 huffLog = HUF_optimalTableLog(huffLog, srcSize, maxSymbolValue); 123 { size_t const maxBits = HUF_buildCTable_wksp((HUF_CElt*)nextHuf->CTable, countWksp, 124 maxSymbolValue, huffLog, 125 nodeWksp, nodeWkspSize); 126 FORWARD_IF_ERROR(maxBits, "HUF_buildCTable_wksp"); 127 huffLog = (U32)maxBits; 128 { /* Build and write the CTable */ 129 size_t const newCSize = HUF_estimateCompressedSize( 130 (HUF_CElt*)nextHuf->CTable, countWksp, maxSymbolValue); 131 size_t const hSize = HUF_writeCTable_wksp( 132 hufMetadata->hufDesBuffer, sizeof(hufMetadata->hufDesBuffer), 133 (HUF_CElt*)nextHuf->CTable, maxSymbolValue, huffLog, 134 nodeWksp, nodeWkspSize); 135 /* Check against repeating the previous CTable */ 136 if (repeat != HUF_repeat_none) { 137 size_t const oldCSize = HUF_estimateCompressedSize( 138 (HUF_CElt const*)prevHuf->CTable, countWksp, maxSymbolValue); 139 if (oldCSize < srcSize && (oldCSize <= hSize + newCSize || hSize + 12 >= srcSize)) { 140 DEBUGLOG(5, "set_repeat - smaller"); 141 ZSTD_memcpy(nextHuf, prevHuf, sizeof(*prevHuf)); 142 hufMetadata->hType = set_repeat; 143 return 0; 144 } 145 } 146 if (newCSize + hSize >= srcSize) { 147 DEBUGLOG(5, "set_basic - no gains"); 148 ZSTD_memcpy(nextHuf, prevHuf, sizeof(*prevHuf)); 149 hufMetadata->hType = set_basic; 150 return 0; 151 } 152 DEBUGLOG(5, "set_compressed (hSize=%u)", (U32)hSize); 153 hufMetadata->hType = set_compressed; 154 nextHuf->repeatMode = HUF_repeat_check; 155 return hSize; 156 } 157 } 158} 159 160/* ZSTD_buildSuperBlockEntropy_sequences() : 161 * Builds entropy for the super-block sequences. 162 * Stores symbol compression modes and fse table to fseMetadata. 163 * @return : size of fse tables or error code */ 164static size_t ZSTD_buildSuperBlockEntropy_sequences(seqStore_t* seqStorePtr, 165 const ZSTD_fseCTables_t* prevEntropy, 166 ZSTD_fseCTables_t* nextEntropy, 167 const ZSTD_CCtx_params* cctxParams, 168 ZSTD_fseCTablesMetadata_t* fseMetadata, 169 void* workspace, size_t wkspSize) 170{ 171 BYTE* const wkspStart = (BYTE*)workspace; 172 BYTE* const wkspEnd = wkspStart + wkspSize; 173 BYTE* const countWkspStart = wkspStart; 174 unsigned* const countWksp = (unsigned*)workspace; 175 const size_t countWkspSize = (MaxSeq + 1) * sizeof(unsigned); 176 BYTE* const cTableWksp = countWkspStart + countWkspSize; 177 const size_t cTableWkspSize = wkspEnd-cTableWksp; 178 ZSTD_strategy const strategy = cctxParams->cParams.strategy; 179 FSE_CTable* CTable_LitLength = nextEntropy->litlengthCTable; 180 FSE_CTable* CTable_OffsetBits = nextEntropy->offcodeCTable; 181 FSE_CTable* CTable_MatchLength = nextEntropy->matchlengthCTable; 182 const BYTE* const ofCodeTable = seqStorePtr->ofCode; 183 const BYTE* const llCodeTable = seqStorePtr->llCode; 184 const BYTE* const mlCodeTable = seqStorePtr->mlCode; 185 size_t const nbSeq = seqStorePtr->sequences - seqStorePtr->sequencesStart; 186 BYTE* const ostart = fseMetadata->fseTablesBuffer; 187 BYTE* const oend = ostart + sizeof(fseMetadata->fseTablesBuffer); 188 BYTE* op = ostart; 189 190 assert(cTableWkspSize >= (1 << MaxFSELog) * sizeof(FSE_FUNCTION_TYPE)); 191 DEBUGLOG(5, "ZSTD_buildSuperBlockEntropy_sequences (nbSeq=%zu)", nbSeq); 192 ZSTD_memset(workspace, 0, wkspSize); 193 194 fseMetadata->lastCountSize = 0; 195 /* convert length/distances into codes */ 196 ZSTD_seqToCodes(seqStorePtr); 197 /* build CTable for Literal Lengths */ 198 { U32 LLtype; 199 unsigned max = MaxLL; 200 size_t const mostFrequent = HIST_countFast_wksp(countWksp, &max, llCodeTable, nbSeq, workspace, wkspSize); /* can't fail */ 201 DEBUGLOG(5, "Building LL table"); 202 nextEntropy->litlength_repeatMode = prevEntropy->litlength_repeatMode; 203 LLtype = ZSTD_selectEncodingType(&nextEntropy->litlength_repeatMode, 204 countWksp, max, mostFrequent, nbSeq, 205 LLFSELog, prevEntropy->litlengthCTable, 206 LL_defaultNorm, LL_defaultNormLog, 207 ZSTD_defaultAllowed, strategy); 208 assert(set_basic < set_compressed && set_rle < set_compressed); 209 assert(!(LLtype < set_compressed && nextEntropy->litlength_repeatMode != FSE_repeat_none)); /* We don't copy tables */ 210 { size_t const countSize = ZSTD_buildCTable(op, oend - op, CTable_LitLength, LLFSELog, (symbolEncodingType_e)LLtype, 211 countWksp, max, llCodeTable, nbSeq, LL_defaultNorm, LL_defaultNormLog, MaxLL, 212 prevEntropy->litlengthCTable, sizeof(prevEntropy->litlengthCTable), 213 cTableWksp, cTableWkspSize); 214 FORWARD_IF_ERROR(countSize, "ZSTD_buildCTable for LitLens failed"); 215 if (LLtype == set_compressed) 216 fseMetadata->lastCountSize = countSize; 217 op += countSize; 218 fseMetadata->llType = (symbolEncodingType_e) LLtype; 219 } } 220 /* build CTable for Offsets */ 221 { U32 Offtype; 222 unsigned max = MaxOff; 223 size_t const mostFrequent = HIST_countFast_wksp(countWksp, &max, ofCodeTable, nbSeq, workspace, wkspSize); /* can't fail */ 224 /* We can only use the basic table if max <= DefaultMaxOff, otherwise the offsets are too large */ 225 ZSTD_defaultPolicy_e const defaultPolicy = (max <= DefaultMaxOff) ? ZSTD_defaultAllowed : ZSTD_defaultDisallowed; 226 DEBUGLOG(5, "Building OF table"); 227 nextEntropy->offcode_repeatMode = prevEntropy->offcode_repeatMode; 228 Offtype = ZSTD_selectEncodingType(&nextEntropy->offcode_repeatMode, 229 countWksp, max, mostFrequent, nbSeq, 230 OffFSELog, prevEntropy->offcodeCTable, 231 OF_defaultNorm, OF_defaultNormLog, 232 defaultPolicy, strategy); 233 assert(!(Offtype < set_compressed && nextEntropy->offcode_repeatMode != FSE_repeat_none)); /* We don't copy tables */ 234 { size_t const countSize = ZSTD_buildCTable(op, oend - op, CTable_OffsetBits, OffFSELog, (symbolEncodingType_e)Offtype, 235 countWksp, max, ofCodeTable, nbSeq, OF_defaultNorm, OF_defaultNormLog, DefaultMaxOff, 236 prevEntropy->offcodeCTable, sizeof(prevEntropy->offcodeCTable), 237 cTableWksp, cTableWkspSize); 238 FORWARD_IF_ERROR(countSize, "ZSTD_buildCTable for Offsets failed"); 239 if (Offtype == set_compressed) 240 fseMetadata->lastCountSize = countSize; 241 op += countSize; 242 fseMetadata->ofType = (symbolEncodingType_e) Offtype; 243 } } 244 /* build CTable for MatchLengths */ 245 { U32 MLtype; 246 unsigned max = MaxML; 247 size_t const mostFrequent = HIST_countFast_wksp(countWksp, &max, mlCodeTable, nbSeq, workspace, wkspSize); /* can't fail */ 248 DEBUGLOG(5, "Building ML table (remaining space : %i)", (int)(oend-op)); 249 nextEntropy->matchlength_repeatMode = prevEntropy->matchlength_repeatMode; 250 MLtype = ZSTD_selectEncodingType(&nextEntropy->matchlength_repeatMode, 251 countWksp, max, mostFrequent, nbSeq, 252 MLFSELog, prevEntropy->matchlengthCTable, 253 ML_defaultNorm, ML_defaultNormLog, 254 ZSTD_defaultAllowed, strategy); 255 assert(!(MLtype < set_compressed && nextEntropy->matchlength_repeatMode != FSE_repeat_none)); /* We don't copy tables */ 256 { size_t const countSize = ZSTD_buildCTable(op, oend - op, CTable_MatchLength, MLFSELog, (symbolEncodingType_e)MLtype, 257 countWksp, max, mlCodeTable, nbSeq, ML_defaultNorm, ML_defaultNormLog, MaxML, 258 prevEntropy->matchlengthCTable, sizeof(prevEntropy->matchlengthCTable), 259 cTableWksp, cTableWkspSize); 260 FORWARD_IF_ERROR(countSize, "ZSTD_buildCTable for MatchLengths failed"); 261 if (MLtype == set_compressed) 262 fseMetadata->lastCountSize = countSize; 263 op += countSize; 264 fseMetadata->mlType = (symbolEncodingType_e) MLtype; 265 } } 266 assert((size_t) (op-ostart) <= sizeof(fseMetadata->fseTablesBuffer)); 267 return op-ostart; 268} 269 270 271/* ZSTD_buildSuperBlockEntropy() : 272 * Builds entropy for the super-block. 273 * @return : 0 on success or error code */ 274static size_t 275ZSTD_buildSuperBlockEntropy(seqStore_t* seqStorePtr, 276 const ZSTD_entropyCTables_t* prevEntropy, 277 ZSTD_entropyCTables_t* nextEntropy, 278 const ZSTD_CCtx_params* cctxParams, 279 ZSTD_entropyCTablesMetadata_t* entropyMetadata, 280 void* workspace, size_t wkspSize) 281{ 282 size_t const litSize = seqStorePtr->lit - seqStorePtr->litStart; 283 DEBUGLOG(5, "ZSTD_buildSuperBlockEntropy"); 284 entropyMetadata->hufMetadata.hufDesSize = 285 ZSTD_buildSuperBlockEntropy_literal(seqStorePtr->litStart, litSize, 286 &prevEntropy->huf, &nextEntropy->huf, 287 &entropyMetadata->hufMetadata, 288 ZSTD_disableLiteralsCompression(cctxParams), 289 workspace, wkspSize); 290 FORWARD_IF_ERROR(entropyMetadata->hufMetadata.hufDesSize, "ZSTD_buildSuperBlockEntropy_literal failed"); 291 entropyMetadata->fseMetadata.fseTablesSize = 292 ZSTD_buildSuperBlockEntropy_sequences(seqStorePtr, 293 &prevEntropy->fse, &nextEntropy->fse, 294 cctxParams, 295 &entropyMetadata->fseMetadata, 296 workspace, wkspSize); 297 FORWARD_IF_ERROR(entropyMetadata->fseMetadata.fseTablesSize, "ZSTD_buildSuperBlockEntropy_sequences failed"); 298 return 0; 299} 300 301/* ZSTD_compressSubBlock_literal() : 302 * Compresses literals section for a sub-block. 303 * When we have to write the Huffman table we will sometimes choose a header 304 * size larger than necessary. This is because we have to pick the header size 305 * before we know the table size + compressed size, so we have a bound on the 306 * table size. If we guessed incorrectly, we fall back to uncompressed literals. 307 * 308 * We write the header when writeEntropy=1 and set entropyWritten=1 when we succeeded 309 * in writing the header, otherwise it is set to 0. 310 * 311 * hufMetadata->hType has literals block type info. 312 * If it is set_basic, all sub-blocks literals section will be Raw_Literals_Block. 313 * If it is set_rle, all sub-blocks literals section will be RLE_Literals_Block. 314 * If it is set_compressed, first sub-block's literals section will be Compressed_Literals_Block 315 * If it is set_compressed, first sub-block's literals section will be Treeless_Literals_Block 316 * and the following sub-blocks' literals sections will be Treeless_Literals_Block. 317 * @return : compressed size of literals section of a sub-block 318 * Or 0 if it unable to compress. 319 * Or error code */ 320static size_t ZSTD_compressSubBlock_literal(const HUF_CElt* hufTable, 321 const ZSTD_hufCTablesMetadata_t* hufMetadata, 322 const BYTE* literals, size_t litSize, 323 void* dst, size_t dstSize, 324 const int bmi2, int writeEntropy, int* entropyWritten) 325{ 326 size_t const header = writeEntropy ? 200 : 0; 327 size_t const lhSize = 3 + (litSize >= (1 KB - header)) + (litSize >= (16 KB - header)); 328 BYTE* const ostart = (BYTE*)dst; 329 BYTE* const oend = ostart + dstSize; 330 BYTE* op = ostart + lhSize; 331 U32 const singleStream = lhSize == 3; 332 symbolEncodingType_e hType = writeEntropy ? hufMetadata->hType : set_repeat; 333 size_t cLitSize = 0; 334 335 (void)bmi2; /* TODO bmi2... */ 336 337 DEBUGLOG(5, "ZSTD_compressSubBlock_literal (litSize=%zu, lhSize=%zu, writeEntropy=%d)", litSize, lhSize, writeEntropy); 338 339 *entropyWritten = 0; 340 if (litSize == 0 || hufMetadata->hType == set_basic) { 341 DEBUGLOG(5, "ZSTD_compressSubBlock_literal using raw literal"); 342 return ZSTD_noCompressLiterals(dst, dstSize, literals, litSize); 343 } else if (hufMetadata->hType == set_rle) { 344 DEBUGLOG(5, "ZSTD_compressSubBlock_literal using rle literal"); 345 return ZSTD_compressRleLiteralsBlock(dst, dstSize, literals, litSize); 346 } 347 348 assert(litSize > 0); 349 assert(hufMetadata->hType == set_compressed || hufMetadata->hType == set_repeat); 350 351 if (writeEntropy && hufMetadata->hType == set_compressed) { 352 ZSTD_memcpy(op, hufMetadata->hufDesBuffer, hufMetadata->hufDesSize); 353 op += hufMetadata->hufDesSize; 354 cLitSize += hufMetadata->hufDesSize; 355 DEBUGLOG(5, "ZSTD_compressSubBlock_literal (hSize=%zu)", hufMetadata->hufDesSize); 356 } 357 358 /* TODO bmi2 */ 359 { const size_t cSize = singleStream ? HUF_compress1X_usingCTable(op, oend-op, literals, litSize, hufTable) 360 : HUF_compress4X_usingCTable(op, oend-op, literals, litSize, hufTable); 361 op += cSize; 362 cLitSize += cSize; 363 if (cSize == 0 || ERR_isError(cSize)) { 364 DEBUGLOG(5, "Failed to write entropy tables %s", ZSTD_getErrorName(cSize)); 365 return 0; 366 } 367 /* If we expand and we aren't writing a header then emit uncompressed */ 368 if (!writeEntropy && cLitSize >= litSize) { 369 DEBUGLOG(5, "ZSTD_compressSubBlock_literal using raw literal because uncompressible"); 370 return ZSTD_noCompressLiterals(dst, dstSize, literals, litSize); 371 } 372 /* If we are writing headers then allow expansion that doesn't change our header size. */ 373 if (lhSize < (size_t)(3 + (cLitSize >= 1 KB) + (cLitSize >= 16 KB))) { 374 assert(cLitSize > litSize); 375 DEBUGLOG(5, "Literals expanded beyond allowed header size"); 376 return ZSTD_noCompressLiterals(dst, dstSize, literals, litSize); 377 } 378 DEBUGLOG(5, "ZSTD_compressSubBlock_literal (cSize=%zu)", cSize); 379 } 380 381 /* Build header */ 382 switch(lhSize) 383 { 384 case 3: /* 2 - 2 - 10 - 10 */ 385 { U32 const lhc = hType + ((!singleStream) << 2) + ((U32)litSize<<4) + ((U32)cLitSize<<14); 386 MEM_writeLE24(ostart, lhc); 387 break; 388 } 389 case 4: /* 2 - 2 - 14 - 14 */ 390 { U32 const lhc = hType + (2 << 2) + ((U32)litSize<<4) + ((U32)cLitSize<<18); 391 MEM_writeLE32(ostart, lhc); 392 break; 393 } 394 case 5: /* 2 - 2 - 18 - 18 */ 395 { U32 const lhc = hType + (3 << 2) + ((U32)litSize<<4) + ((U32)cLitSize<<22); 396 MEM_writeLE32(ostart, lhc); 397 ostart[4] = (BYTE)(cLitSize >> 10); 398 break; 399 } 400 default: /* not possible : lhSize is {3,4,5} */ 401 assert(0); 402 } 403 *entropyWritten = 1; 404 DEBUGLOG(5, "Compressed literals: %u -> %u", (U32)litSize, (U32)(op-ostart)); 405 return op-ostart; 406} 407 408static size_t ZSTD_seqDecompressedSize(seqStore_t const* seqStore, const seqDef* sequences, size_t nbSeq, size_t litSize, int lastSequence) { 409 const seqDef* const sstart = sequences; 410 const seqDef* const send = sequences + nbSeq; 411 const seqDef* sp = sstart; 412 size_t matchLengthSum = 0; 413 size_t litLengthSum = 0; 414 /* Only used by assert(), suppress unused variable warnings in production. */ 415 (void)litLengthSum; 416 while (send-sp > 0) { 417 ZSTD_sequenceLength const seqLen = ZSTD_getSequenceLength(seqStore, sp); 418 litLengthSum += seqLen.litLength; 419 matchLengthSum += seqLen.matchLength; 420 sp++; 421 } 422 assert(litLengthSum <= litSize); 423 if (!lastSequence) { 424 assert(litLengthSum == litSize); 425 } 426 return matchLengthSum + litSize; 427} 428 429/* ZSTD_compressSubBlock_sequences() : 430 * Compresses sequences section for a sub-block. 431 * fseMetadata->llType, fseMetadata->ofType, and fseMetadata->mlType have 432 * symbol compression modes for the super-block. 433 * The first successfully compressed block will have these in its header. 434 * We set entropyWritten=1 when we succeed in compressing the sequences. 435 * The following sub-blocks will always have repeat mode. 436 * @return : compressed size of sequences section of a sub-block 437 * Or 0 if it is unable to compress 438 * Or error code. */ 439static size_t ZSTD_compressSubBlock_sequences(const ZSTD_fseCTables_t* fseTables, 440 const ZSTD_fseCTablesMetadata_t* fseMetadata, 441 const seqDef* sequences, size_t nbSeq, 442 const BYTE* llCode, const BYTE* mlCode, const BYTE* ofCode, 443 const ZSTD_CCtx_params* cctxParams, 444 void* dst, size_t dstCapacity, 445 const int bmi2, int writeEntropy, int* entropyWritten) 446{ 447 const int longOffsets = cctxParams->cParams.windowLog > STREAM_ACCUMULATOR_MIN; 448 BYTE* const ostart = (BYTE*)dst; 449 BYTE* const oend = ostart + dstCapacity; 450 BYTE* op = ostart; 451 BYTE* seqHead; 452 453 DEBUGLOG(5, "ZSTD_compressSubBlock_sequences (nbSeq=%zu, writeEntropy=%d, longOffsets=%d)", nbSeq, writeEntropy, longOffsets); 454 455 *entropyWritten = 0; 456 /* Sequences Header */ 457 RETURN_ERROR_IF((oend-op) < 3 /*max nbSeq Size*/ + 1 /*seqHead*/, 458 dstSize_tooSmall, ""); 459 if (nbSeq < 0x7F) 460 *op++ = (BYTE)nbSeq; 461 else if (nbSeq < LONGNBSEQ) 462 op[0] = (BYTE)((nbSeq>>8) + 0x80), op[1] = (BYTE)nbSeq, op+=2; 463 else 464 op[0]=0xFF, MEM_writeLE16(op+1, (U16)(nbSeq - LONGNBSEQ)), op+=3; 465 if (nbSeq==0) { 466 return op - ostart; 467 } 468 469 /* seqHead : flags for FSE encoding type */ 470 seqHead = op++; 471 472 DEBUGLOG(5, "ZSTD_compressSubBlock_sequences (seqHeadSize=%u)", (unsigned)(op-ostart)); 473 474 if (writeEntropy) { 475 const U32 LLtype = fseMetadata->llType; 476 const U32 Offtype = fseMetadata->ofType; 477 const U32 MLtype = fseMetadata->mlType; 478 DEBUGLOG(5, "ZSTD_compressSubBlock_sequences (fseTablesSize=%zu)", fseMetadata->fseTablesSize); 479 *seqHead = (BYTE)((LLtype<<6) + (Offtype<<4) + (MLtype<<2)); 480 ZSTD_memcpy(op, fseMetadata->fseTablesBuffer, fseMetadata->fseTablesSize); 481 op += fseMetadata->fseTablesSize; 482 } else { 483 const U32 repeat = set_repeat; 484 *seqHead = (BYTE)((repeat<<6) + (repeat<<4) + (repeat<<2)); 485 } 486 487 { size_t const bitstreamSize = ZSTD_encodeSequences( 488 op, oend - op, 489 fseTables->matchlengthCTable, mlCode, 490 fseTables->offcodeCTable, ofCode, 491 fseTables->litlengthCTable, llCode, 492 sequences, nbSeq, 493 longOffsets, bmi2); 494 FORWARD_IF_ERROR(bitstreamSize, "ZSTD_encodeSequences failed"); 495 op += bitstreamSize; 496 /* zstd versions <= 1.3.4 mistakenly report corruption when 497 * FSE_readNCount() receives a buffer < 4 bytes. 498 * Fixed by https://github.com/facebook/zstd/pull/1146. 499 * This can happen when the last set_compressed table present is 2 500 * bytes and the bitstream is only one byte. 501 * In this exceedingly rare case, we will simply emit an uncompressed 502 * block, since it isn't worth optimizing. 503 */ 504#ifndef FUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION 505 if (writeEntropy && fseMetadata->lastCountSize && fseMetadata->lastCountSize + bitstreamSize < 4) { 506 /* NCountSize >= 2 && bitstreamSize > 0 ==> lastCountSize == 3 */ 507 assert(fseMetadata->lastCountSize + bitstreamSize == 3); 508 DEBUGLOG(5, "Avoiding bug in zstd decoder in versions <= 1.3.4 by " 509 "emitting an uncompressed block."); 510 return 0; 511 } 512#endif 513 DEBUGLOG(5, "ZSTD_compressSubBlock_sequences (bitstreamSize=%zu)", bitstreamSize); 514 } 515 516 /* zstd versions <= 1.4.0 mistakenly report error when 517 * sequences section body size is less than 3 bytes. 518 * Fixed by https://github.com/facebook/zstd/pull/1664. 519 * This can happen when the previous sequences section block is compressed 520 * with rle mode and the current block's sequences section is compressed 521 * with repeat mode where sequences section body size can be 1 byte. 522 */ 523#ifndef FUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION 524 if (op-seqHead < 4) { 525 DEBUGLOG(5, "Avoiding bug in zstd decoder in versions <= 1.4.0 by emitting " 526 "an uncompressed block when sequences are < 4 bytes"); 527 return 0; 528 } 529#endif 530 531 *entropyWritten = 1; 532 return op - ostart; 533} 534 535/* ZSTD_compressSubBlock() : 536 * Compresses a single sub-block. 537 * @return : compressed size of the sub-block 538 * Or 0 if it failed to compress. */ 539static size_t ZSTD_compressSubBlock(const ZSTD_entropyCTables_t* entropy, 540 const ZSTD_entropyCTablesMetadata_t* entropyMetadata, 541 const seqDef* sequences, size_t nbSeq, 542 const BYTE* literals, size_t litSize, 543 const BYTE* llCode, const BYTE* mlCode, const BYTE* ofCode, 544 const ZSTD_CCtx_params* cctxParams, 545 void* dst, size_t dstCapacity, 546 const int bmi2, 547 int writeLitEntropy, int writeSeqEntropy, 548 int* litEntropyWritten, int* seqEntropyWritten, 549 U32 lastBlock) 550{ 551 BYTE* const ostart = (BYTE*)dst; 552 BYTE* const oend = ostart + dstCapacity; 553 BYTE* op = ostart + ZSTD_blockHeaderSize; 554 DEBUGLOG(5, "ZSTD_compressSubBlock (litSize=%zu, nbSeq=%zu, writeLitEntropy=%d, writeSeqEntropy=%d, lastBlock=%d)", 555 litSize, nbSeq, writeLitEntropy, writeSeqEntropy, lastBlock); 556 { size_t cLitSize = ZSTD_compressSubBlock_literal((const HUF_CElt*)entropy->huf.CTable, 557 &entropyMetadata->hufMetadata, literals, litSize, 558 op, oend-op, bmi2, writeLitEntropy, litEntropyWritten); 559 FORWARD_IF_ERROR(cLitSize, "ZSTD_compressSubBlock_literal failed"); 560 if (cLitSize == 0) return 0; 561 op += cLitSize; 562 } 563 { size_t cSeqSize = ZSTD_compressSubBlock_sequences(&entropy->fse, 564 &entropyMetadata->fseMetadata, 565 sequences, nbSeq, 566 llCode, mlCode, ofCode, 567 cctxParams, 568 op, oend-op, 569 bmi2, writeSeqEntropy, seqEntropyWritten); 570 FORWARD_IF_ERROR(cSeqSize, "ZSTD_compressSubBlock_sequences failed"); 571 if (cSeqSize == 0) return 0; 572 op += cSeqSize; 573 } 574 /* Write block header */ 575 { size_t cSize = (op-ostart)-ZSTD_blockHeaderSize; 576 U32 const cBlockHeader24 = lastBlock + (((U32)bt_compressed)<<1) + (U32)(cSize << 3); 577 MEM_writeLE24(ostart, cBlockHeader24); 578 } 579 return op-ostart; 580} 581 582static size_t ZSTD_estimateSubBlockSize_literal(const BYTE* literals, size_t litSize, 583 const ZSTD_hufCTables_t* huf, 584 const ZSTD_hufCTablesMetadata_t* hufMetadata, 585 void* workspace, size_t wkspSize, 586 int writeEntropy) 587{ 588 unsigned* const countWksp = (unsigned*)workspace; 589 unsigned maxSymbolValue = 255; 590 size_t literalSectionHeaderSize = 3; /* Use hard coded size of 3 bytes */ 591 592 if (hufMetadata->hType == set_basic) return litSize; 593 else if (hufMetadata->hType == set_rle) return 1; 594 else if (hufMetadata->hType == set_compressed || hufMetadata->hType == set_repeat) { 595 size_t const largest = HIST_count_wksp (countWksp, &maxSymbolValue, (const BYTE*)literals, litSize, workspace, wkspSize); 596 if (ZSTD_isError(largest)) return litSize; 597 { size_t cLitSizeEstimate = HUF_estimateCompressedSize((const HUF_CElt*)huf->CTable, countWksp, maxSymbolValue); 598 if (writeEntropy) cLitSizeEstimate += hufMetadata->hufDesSize; 599 return cLitSizeEstimate + literalSectionHeaderSize; 600 } } 601 assert(0); /* impossible */ 602 return 0; 603} 604 605static size_t ZSTD_estimateSubBlockSize_symbolType(symbolEncodingType_e type, 606 const BYTE* codeTable, unsigned maxCode, 607 size_t nbSeq, const FSE_CTable* fseCTable, 608 const U32* additionalBits, 609 short const* defaultNorm, U32 defaultNormLog, U32 defaultMax, 610 void* workspace, size_t wkspSize) 611{ 612 unsigned* const countWksp = (unsigned*)workspace; 613 const BYTE* ctp = codeTable; 614 const BYTE* const ctStart = ctp; 615 const BYTE* const ctEnd = ctStart + nbSeq; 616 size_t cSymbolTypeSizeEstimateInBits = 0; 617 unsigned max = maxCode; 618 619 HIST_countFast_wksp(countWksp, &max, codeTable, nbSeq, workspace, wkspSize); /* can't fail */ 620 if (type == set_basic) { 621 /* We selected this encoding type, so it must be valid. */ 622 assert(max <= defaultMax); 623 cSymbolTypeSizeEstimateInBits = max <= defaultMax 624 ? ZSTD_crossEntropyCost(defaultNorm, defaultNormLog, countWksp, max) 625 : ERROR(GENERIC); 626 } else if (type == set_rle) { 627 cSymbolTypeSizeEstimateInBits = 0; 628 } else if (type == set_compressed || type == set_repeat) { 629 cSymbolTypeSizeEstimateInBits = ZSTD_fseBitCost(fseCTable, countWksp, max); 630 } 631 if (ZSTD_isError(cSymbolTypeSizeEstimateInBits)) return nbSeq * 10; 632 while (ctp < ctEnd) { 633 if (additionalBits) cSymbolTypeSizeEstimateInBits += additionalBits[*ctp]; 634 else cSymbolTypeSizeEstimateInBits += *ctp; /* for offset, offset code is also the number of additional bits */ 635 ctp++; 636 } 637 return cSymbolTypeSizeEstimateInBits / 8; 638} 639 640static size_t ZSTD_estimateSubBlockSize_sequences(const BYTE* ofCodeTable, 641 const BYTE* llCodeTable, 642 const BYTE* mlCodeTable, 643 size_t nbSeq, 644 const ZSTD_fseCTables_t* fseTables, 645 const ZSTD_fseCTablesMetadata_t* fseMetadata, 646 void* workspace, size_t wkspSize, 647 int writeEntropy) 648{ 649 size_t sequencesSectionHeaderSize = 3; /* Use hard coded size of 3 bytes */ 650 size_t cSeqSizeEstimate = 0; 651 cSeqSizeEstimate += ZSTD_estimateSubBlockSize_symbolType(fseMetadata->ofType, ofCodeTable, MaxOff, 652 nbSeq, fseTables->offcodeCTable, NULL, 653 OF_defaultNorm, OF_defaultNormLog, DefaultMaxOff, 654 workspace, wkspSize); 655 cSeqSizeEstimate += ZSTD_estimateSubBlockSize_symbolType(fseMetadata->llType, llCodeTable, MaxLL, 656 nbSeq, fseTables->litlengthCTable, LL_bits, 657 LL_defaultNorm, LL_defaultNormLog, MaxLL, 658 workspace, wkspSize); 659 cSeqSizeEstimate += ZSTD_estimateSubBlockSize_symbolType(fseMetadata->mlType, mlCodeTable, MaxML, 660 nbSeq, fseTables->matchlengthCTable, ML_bits, 661 ML_defaultNorm, ML_defaultNormLog, MaxML, 662 workspace, wkspSize); 663 if (writeEntropy) cSeqSizeEstimate += fseMetadata->fseTablesSize; 664 return cSeqSizeEstimate + sequencesSectionHeaderSize; 665} 666 667static size_t ZSTD_estimateSubBlockSize(const BYTE* literals, size_t litSize, 668 const BYTE* ofCodeTable, 669 const BYTE* llCodeTable, 670 const BYTE* mlCodeTable, 671 size_t nbSeq, 672 const ZSTD_entropyCTables_t* entropy, 673 const ZSTD_entropyCTablesMetadata_t* entropyMetadata, 674 void* workspace, size_t wkspSize, 675 int writeLitEntropy, int writeSeqEntropy) { 676 size_t cSizeEstimate = 0; 677 cSizeEstimate += ZSTD_estimateSubBlockSize_literal(literals, litSize, 678 &entropy->huf, &entropyMetadata->hufMetadata, 679 workspace, wkspSize, writeLitEntropy); 680 cSizeEstimate += ZSTD_estimateSubBlockSize_sequences(ofCodeTable, llCodeTable, mlCodeTable, 681 nbSeq, &entropy->fse, &entropyMetadata->fseMetadata, 682 workspace, wkspSize, writeSeqEntropy); 683 return cSizeEstimate + ZSTD_blockHeaderSize; 684} 685 686static int ZSTD_needSequenceEntropyTables(ZSTD_fseCTablesMetadata_t const* fseMetadata) 687{ 688 if (fseMetadata->llType == set_compressed || fseMetadata->llType == set_rle) 689 return 1; 690 if (fseMetadata->mlType == set_compressed || fseMetadata->mlType == set_rle) 691 return 1; 692 if (fseMetadata->ofType == set_compressed || fseMetadata->ofType == set_rle) 693 return 1; 694 return 0; 695} 696 697/* ZSTD_compressSubBlock_multi() : 698 * Breaks super-block into multiple sub-blocks and compresses them. 699 * Entropy will be written to the first block. 700 * The following blocks will use repeat mode to compress. 701 * All sub-blocks are compressed blocks (no raw or rle blocks). 702 * @return : compressed size of the super block (which is multiple ZSTD blocks) 703 * Or 0 if it failed to compress. */ 704static size_t ZSTD_compressSubBlock_multi(const seqStore_t* seqStorePtr, 705 const ZSTD_compressedBlockState_t* prevCBlock, 706 ZSTD_compressedBlockState_t* nextCBlock, 707 const ZSTD_entropyCTablesMetadata_t* entropyMetadata, 708 const ZSTD_CCtx_params* cctxParams, 709 void* dst, size_t dstCapacity, 710 const void* src, size_t srcSize, 711 const int bmi2, U32 lastBlock, 712 void* workspace, size_t wkspSize) 713{ 714 const seqDef* const sstart = seqStorePtr->sequencesStart; 715 const seqDef* const send = seqStorePtr->sequences; 716 const seqDef* sp = sstart; 717 const BYTE* const lstart = seqStorePtr->litStart; 718 const BYTE* const lend = seqStorePtr->lit; 719 const BYTE* lp = lstart; 720 BYTE const* ip = (BYTE const*)src; 721 BYTE const* const iend = ip + srcSize; 722 BYTE* const ostart = (BYTE*)dst; 723 BYTE* const oend = ostart + dstCapacity; 724 BYTE* op = ostart; 725 const BYTE* llCodePtr = seqStorePtr->llCode; 726 const BYTE* mlCodePtr = seqStorePtr->mlCode; 727 const BYTE* ofCodePtr = seqStorePtr->ofCode; 728 size_t targetCBlockSize = cctxParams->targetCBlockSize; 729 size_t litSize, seqCount; 730 int writeLitEntropy = entropyMetadata->hufMetadata.hType == set_compressed; 731 int writeSeqEntropy = 1; 732 int lastSequence = 0; 733 734 DEBUGLOG(5, "ZSTD_compressSubBlock_multi (litSize=%u, nbSeq=%u)", 735 (unsigned)(lend-lp), (unsigned)(send-sstart)); 736 737 litSize = 0; 738 seqCount = 0; 739 do { 740 size_t cBlockSizeEstimate = 0; 741 if (sstart == send) { 742 lastSequence = 1; 743 } else { 744 const seqDef* const sequence = sp + seqCount; 745 lastSequence = sequence == send - 1; 746 litSize += ZSTD_getSequenceLength(seqStorePtr, sequence).litLength; 747 seqCount++; 748 } 749 if (lastSequence) { 750 assert(lp <= lend); 751 assert(litSize <= (size_t)(lend - lp)); 752 litSize = (size_t)(lend - lp); 753 } 754 /* I think there is an optimization opportunity here. 755 * Calling ZSTD_estimateSubBlockSize for every sequence can be wasteful 756 * since it recalculates estimate from scratch. 757 * For example, it would recount literal distribution and symbol codes everytime. 758 */ 759 cBlockSizeEstimate = ZSTD_estimateSubBlockSize(lp, litSize, ofCodePtr, llCodePtr, mlCodePtr, seqCount, 760 &nextCBlock->entropy, entropyMetadata, 761 workspace, wkspSize, writeLitEntropy, writeSeqEntropy); 762 if (cBlockSizeEstimate > targetCBlockSize || lastSequence) { 763 int litEntropyWritten = 0; 764 int seqEntropyWritten = 0; 765 const size_t decompressedSize = ZSTD_seqDecompressedSize(seqStorePtr, sp, seqCount, litSize, lastSequence); 766 const size_t cSize = ZSTD_compressSubBlock(&nextCBlock->entropy, entropyMetadata, 767 sp, seqCount, 768 lp, litSize, 769 llCodePtr, mlCodePtr, ofCodePtr, 770 cctxParams, 771 op, oend-op, 772 bmi2, writeLitEntropy, writeSeqEntropy, 773 &litEntropyWritten, &seqEntropyWritten, 774 lastBlock && lastSequence); 775 FORWARD_IF_ERROR(cSize, "ZSTD_compressSubBlock failed"); 776 if (cSize > 0 && cSize < decompressedSize) { 777 DEBUGLOG(5, "Committed the sub-block"); 778 assert(ip + decompressedSize <= iend); 779 ip += decompressedSize; 780 sp += seqCount; 781 lp += litSize; 782 op += cSize; 783 llCodePtr += seqCount; 784 mlCodePtr += seqCount; 785 ofCodePtr += seqCount; 786 litSize = 0; 787 seqCount = 0; 788 /* Entropy only needs to be written once */ 789 if (litEntropyWritten) { 790 writeLitEntropy = 0; 791 } 792 if (seqEntropyWritten) { 793 writeSeqEntropy = 0; 794 } 795 } 796 } 797 } while (!lastSequence); 798 if (writeLitEntropy) { 799 DEBUGLOG(5, "ZSTD_compressSubBlock_multi has literal entropy tables unwritten"); 800 ZSTD_memcpy(&nextCBlock->entropy.huf, &prevCBlock->entropy.huf, sizeof(prevCBlock->entropy.huf)); 801 } 802 if (writeSeqEntropy && ZSTD_needSequenceEntropyTables(&entropyMetadata->fseMetadata)) { 803 /* If we haven't written our entropy tables, then we've violated our contract and 804 * must emit an uncompressed block. 805 */ 806 DEBUGLOG(5, "ZSTD_compressSubBlock_multi has sequence entropy tables unwritten"); 807 return 0; 808 } 809 if (ip < iend) { 810 size_t const cSize = ZSTD_noCompressBlock(op, oend - op, ip, iend - ip, lastBlock); 811 DEBUGLOG(5, "ZSTD_compressSubBlock_multi last sub-block uncompressed, %zu bytes", (size_t)(iend - ip)); 812 FORWARD_IF_ERROR(cSize, "ZSTD_noCompressBlock failed"); 813 assert(cSize != 0); 814 op += cSize; 815 /* We have to regenerate the repcodes because we've skipped some sequences */ 816 if (sp < send) { 817 seqDef const* seq; 818 repcodes_t rep; 819 ZSTD_memcpy(&rep, prevCBlock->rep, sizeof(rep)); 820 for (seq = sstart; seq < sp; ++seq) { 821 rep = ZSTD_updateRep(rep.rep, seq->offset - 1, ZSTD_getSequenceLength(seqStorePtr, seq).litLength == 0); 822 } 823 ZSTD_memcpy(nextCBlock->rep, &rep, sizeof(rep)); 824 } 825 } 826 DEBUGLOG(5, "ZSTD_compressSubBlock_multi compressed"); 827 return op-ostart; 828} 829 830size_t ZSTD_compressSuperBlock(ZSTD_CCtx* zc, 831 void* dst, size_t dstCapacity, 832 void const* src, size_t srcSize, 833 unsigned lastBlock) { 834 ZSTD_entropyCTablesMetadata_t entropyMetadata; 835 836 FORWARD_IF_ERROR(ZSTD_buildSuperBlockEntropy(&zc->seqStore, 837 &zc->blockState.prevCBlock->entropy, 838 &zc->blockState.nextCBlock->entropy, 839 &zc->appliedParams, 840 &entropyMetadata, 841 zc->entropyWorkspace, ENTROPY_WORKSPACE_SIZE /* statically allocated in resetCCtx */), ""); 842 843 return ZSTD_compressSubBlock_multi(&zc->seqStore, 844 zc->blockState.prevCBlock, 845 zc->blockState.nextCBlock, 846 &entropyMetadata, 847 &zc->appliedParams, 848 dst, dstCapacity, 849 src, srcSize, 850 zc->bmi2, lastBlock, 851 zc->entropyWorkspace, ENTROPY_WORKSPACE_SIZE /* statically allocated in resetCCtx */); 852}