zstd_cwksp.h (16779B)
1/* 2 * Copyright (c) Yann Collet, Facebook, Inc. 3 * All rights reserved. 4 * 5 * This source code is licensed under both the BSD-style license (found in the 6 * LICENSE file in the root directory of this source tree) and the GPLv2 (found 7 * in the COPYING file in the root directory of this source tree). 8 * You may select, at your option, one of the above-listed licenses. 9 */ 10 11#ifndef ZSTD_CWKSP_H 12#define ZSTD_CWKSP_H 13 14/*-************************************* 15* Dependencies 16***************************************/ 17#include "../common/zstd_internal.h" 18 19 20/*-************************************* 21* Constants 22***************************************/ 23 24/* Since the workspace is effectively its own little malloc implementation / 25 * arena, when we run under ASAN, we should similarly insert redzones between 26 * each internal element of the workspace, so ASAN will catch overruns that 27 * reach outside an object but that stay inside the workspace. 28 * 29 * This defines the size of that redzone. 30 */ 31#ifndef ZSTD_CWKSP_ASAN_REDZONE_SIZE 32#define ZSTD_CWKSP_ASAN_REDZONE_SIZE 128 33#endif 34 35/*-************************************* 36* Structures 37***************************************/ 38typedef enum { 39 ZSTD_cwksp_alloc_objects, 40 ZSTD_cwksp_alloc_buffers, 41 ZSTD_cwksp_alloc_aligned 42} ZSTD_cwksp_alloc_phase_e; 43 44/* 45 * Used to describe whether the workspace is statically allocated (and will not 46 * necessarily ever be freed), or if it's dynamically allocated and we can 47 * expect a well-formed caller to free this. 48 */ 49typedef enum { 50 ZSTD_cwksp_dynamic_alloc, 51 ZSTD_cwksp_static_alloc 52} ZSTD_cwksp_static_alloc_e; 53 54/* 55 * Zstd fits all its internal datastructures into a single continuous buffer, 56 * so that it only needs to perform a single OS allocation (or so that a buffer 57 * can be provided to it and it can perform no allocations at all). This buffer 58 * is called the workspace. 59 * 60 * Several optimizations complicate that process of allocating memory ranges 61 * from this workspace for each internal datastructure: 62 * 63 * - These different internal datastructures have different setup requirements: 64 * 65 * - The static objects need to be cleared once and can then be trivially 66 * reused for each compression. 67 * 68 * - Various buffers don't need to be initialized at all--they are always 69 * written into before they're read. 70 * 71 * - The matchstate tables have a unique requirement that they don't need 72 * their memory to be totally cleared, but they do need the memory to have 73 * some bound, i.e., a guarantee that all values in the memory they've been 74 * allocated is less than some maximum value (which is the starting value 75 * for the indices that they will then use for compression). When this 76 * guarantee is provided to them, they can use the memory without any setup 77 * work. When it can't, they have to clear the area. 78 * 79 * - These buffers also have different alignment requirements. 80 * 81 * - We would like to reuse the objects in the workspace for multiple 82 * compressions without having to perform any expensive reallocation or 83 * reinitialization work. 84 * 85 * - We would like to be able to efficiently reuse the workspace across 86 * multiple compressions **even when the compression parameters change** and 87 * we need to resize some of the objects (where possible). 88 * 89 * To attempt to manage this buffer, given these constraints, the ZSTD_cwksp 90 * abstraction was created. It works as follows: 91 * 92 * Workspace Layout: 93 * 94 * [ ... workspace ... ] 95 * [objects][tables ... ->] free space [<- ... aligned][<- ... buffers] 96 * 97 * The various objects that live in the workspace are divided into the 98 * following categories, and are allocated separately: 99 * 100 * - Static objects: this is optionally the enclosing ZSTD_CCtx or ZSTD_CDict, 101 * so that literally everything fits in a single buffer. Note: if present, 102 * this must be the first object in the workspace, since ZSTD_customFree{CCtx, 103 * CDict}() rely on a pointer comparison to see whether one or two frees are 104 * required. 105 * 106 * - Fixed size objects: these are fixed-size, fixed-count objects that are 107 * nonetheless "dynamically" allocated in the workspace so that we can 108 * control how they're initialized separately from the broader ZSTD_CCtx. 109 * Examples: 110 * - Entropy Workspace 111 * - 2 x ZSTD_compressedBlockState_t 112 * - CDict dictionary contents 113 * 114 * - Tables: these are any of several different datastructures (hash tables, 115 * chain tables, binary trees) that all respect a common format: they are 116 * uint32_t arrays, all of whose values are between 0 and (nextSrc - base). 117 * Their sizes depend on the cparams. 118 * 119 * - Aligned: these buffers are used for various purposes that require 4 byte 120 * alignment, but don't require any initialization before they're used. 121 * 122 * - Buffers: these buffers are used for various purposes that don't require 123 * any alignment or initialization before they're used. This means they can 124 * be moved around at no cost for a new compression. 125 * 126 * Allocating Memory: 127 * 128 * The various types of objects must be allocated in order, so they can be 129 * correctly packed into the workspace buffer. That order is: 130 * 131 * 1. Objects 132 * 2. Buffers 133 * 3. Aligned 134 * 4. Tables 135 * 136 * Attempts to reserve objects of different types out of order will fail. 137 */ 138typedef struct { 139 void* workspace; 140 void* workspaceEnd; 141 142 void* objectEnd; 143 void* tableEnd; 144 void* tableValidEnd; 145 void* allocStart; 146 147 BYTE allocFailed; 148 int workspaceOversizedDuration; 149 ZSTD_cwksp_alloc_phase_e phase; 150 ZSTD_cwksp_static_alloc_e isStatic; 151} ZSTD_cwksp; 152 153/*-************************************* 154* Functions 155***************************************/ 156 157MEM_STATIC size_t ZSTD_cwksp_available_space(ZSTD_cwksp* ws); 158 159MEM_STATIC void ZSTD_cwksp_assert_internal_consistency(ZSTD_cwksp* ws) { 160 (void)ws; 161 assert(ws->workspace <= ws->objectEnd); 162 assert(ws->objectEnd <= ws->tableEnd); 163 assert(ws->objectEnd <= ws->tableValidEnd); 164 assert(ws->tableEnd <= ws->allocStart); 165 assert(ws->tableValidEnd <= ws->allocStart); 166 assert(ws->allocStart <= ws->workspaceEnd); 167} 168 169/* 170 * Align must be a power of 2. 171 */ 172MEM_STATIC size_t ZSTD_cwksp_align(size_t size, size_t const align) { 173 size_t const mask = align - 1; 174 assert((align & mask) == 0); 175 return (size + mask) & ~mask; 176} 177 178/* 179 * Use this to determine how much space in the workspace we will consume to 180 * allocate this object. (Normally it should be exactly the size of the object, 181 * but under special conditions, like ASAN, where we pad each object, it might 182 * be larger.) 183 * 184 * Since tables aren't currently redzoned, you don't need to call through this 185 * to figure out how much space you need for the matchState tables. Everything 186 * else is though. 187 */ 188MEM_STATIC size_t ZSTD_cwksp_alloc_size(size_t size) { 189 if (size == 0) 190 return 0; 191 return size; 192} 193 194MEM_STATIC void ZSTD_cwksp_internal_advance_phase( 195 ZSTD_cwksp* ws, ZSTD_cwksp_alloc_phase_e phase) { 196 assert(phase >= ws->phase); 197 if (phase > ws->phase) { 198 if (ws->phase < ZSTD_cwksp_alloc_buffers && 199 phase >= ZSTD_cwksp_alloc_buffers) { 200 ws->tableValidEnd = ws->objectEnd; 201 } 202 if (ws->phase < ZSTD_cwksp_alloc_aligned && 203 phase >= ZSTD_cwksp_alloc_aligned) { 204 /* If unaligned allocations down from a too-large top have left us 205 * unaligned, we need to realign our alloc ptr. Technically, this 206 * can consume space that is unaccounted for in the neededSpace 207 * calculation. However, I believe this can only happen when the 208 * workspace is too large, and specifically when it is too large 209 * by a larger margin than the space that will be consumed. */ 210 /* TODO: cleaner, compiler warning friendly way to do this??? */ 211 ws->allocStart = (BYTE*)ws->allocStart - ((size_t)ws->allocStart & (sizeof(U32)-1)); 212 if (ws->allocStart < ws->tableValidEnd) { 213 ws->tableValidEnd = ws->allocStart; 214 } 215 } 216 ws->phase = phase; 217 } 218} 219 220/* 221 * Returns whether this object/buffer/etc was allocated in this workspace. 222 */ 223MEM_STATIC int ZSTD_cwksp_owns_buffer(const ZSTD_cwksp* ws, const void* ptr) { 224 return (ptr != NULL) && (ws->workspace <= ptr) && (ptr <= ws->workspaceEnd); 225} 226 227/* 228 * Internal function. Do not use directly. 229 */ 230MEM_STATIC void* ZSTD_cwksp_reserve_internal( 231 ZSTD_cwksp* ws, size_t bytes, ZSTD_cwksp_alloc_phase_e phase) { 232 void* alloc; 233 void* bottom = ws->tableEnd; 234 ZSTD_cwksp_internal_advance_phase(ws, phase); 235 alloc = (BYTE *)ws->allocStart - bytes; 236 237 if (bytes == 0) 238 return NULL; 239 240 241 DEBUGLOG(5, "cwksp: reserving %p %zd bytes, %zd bytes remaining", 242 alloc, bytes, ZSTD_cwksp_available_space(ws) - bytes); 243 ZSTD_cwksp_assert_internal_consistency(ws); 244 assert(alloc >= bottom); 245 if (alloc < bottom) { 246 DEBUGLOG(4, "cwksp: alloc failed!"); 247 ws->allocFailed = 1; 248 return NULL; 249 } 250 if (alloc < ws->tableValidEnd) { 251 ws->tableValidEnd = alloc; 252 } 253 ws->allocStart = alloc; 254 255 256 return alloc; 257} 258 259/* 260 * Reserves and returns unaligned memory. 261 */ 262MEM_STATIC BYTE* ZSTD_cwksp_reserve_buffer(ZSTD_cwksp* ws, size_t bytes) { 263 return (BYTE*)ZSTD_cwksp_reserve_internal(ws, bytes, ZSTD_cwksp_alloc_buffers); 264} 265 266/* 267 * Reserves and returns memory sized on and aligned on sizeof(unsigned). 268 */ 269MEM_STATIC void* ZSTD_cwksp_reserve_aligned(ZSTD_cwksp* ws, size_t bytes) { 270 assert((bytes & (sizeof(U32)-1)) == 0); 271 return ZSTD_cwksp_reserve_internal(ws, ZSTD_cwksp_align(bytes, sizeof(U32)), ZSTD_cwksp_alloc_aligned); 272} 273 274/* 275 * Aligned on sizeof(unsigned). These buffers have the special property that 276 * their values remain constrained, allowing us to re-use them without 277 * memset()-ing them. 278 */ 279MEM_STATIC void* ZSTD_cwksp_reserve_table(ZSTD_cwksp* ws, size_t bytes) { 280 const ZSTD_cwksp_alloc_phase_e phase = ZSTD_cwksp_alloc_aligned; 281 void* alloc = ws->tableEnd; 282 void* end = (BYTE *)alloc + bytes; 283 void* top = ws->allocStart; 284 285 DEBUGLOG(5, "cwksp: reserving %p table %zd bytes, %zd bytes remaining", 286 alloc, bytes, ZSTD_cwksp_available_space(ws) - bytes); 287 assert((bytes & (sizeof(U32)-1)) == 0); 288 ZSTD_cwksp_internal_advance_phase(ws, phase); 289 ZSTD_cwksp_assert_internal_consistency(ws); 290 assert(end <= top); 291 if (end > top) { 292 DEBUGLOG(4, "cwksp: table alloc failed!"); 293 ws->allocFailed = 1; 294 return NULL; 295 } 296 ws->tableEnd = end; 297 298 299 return alloc; 300} 301 302/* 303 * Aligned on sizeof(void*). 304 */ 305MEM_STATIC void* ZSTD_cwksp_reserve_object(ZSTD_cwksp* ws, size_t bytes) { 306 size_t roundedBytes = ZSTD_cwksp_align(bytes, sizeof(void*)); 307 void* alloc = ws->objectEnd; 308 void* end = (BYTE*)alloc + roundedBytes; 309 310 311 DEBUGLOG(5, 312 "cwksp: reserving %p object %zd bytes (rounded to %zd), %zd bytes remaining", 313 alloc, bytes, roundedBytes, ZSTD_cwksp_available_space(ws) - roundedBytes); 314 assert(((size_t)alloc & (sizeof(void*)-1)) == 0); 315 assert((bytes & (sizeof(void*)-1)) == 0); 316 ZSTD_cwksp_assert_internal_consistency(ws); 317 /* we must be in the first phase, no advance is possible */ 318 if (ws->phase != ZSTD_cwksp_alloc_objects || end > ws->workspaceEnd) { 319 DEBUGLOG(4, "cwksp: object alloc failed!"); 320 ws->allocFailed = 1; 321 return NULL; 322 } 323 ws->objectEnd = end; 324 ws->tableEnd = end; 325 ws->tableValidEnd = end; 326 327 328 return alloc; 329} 330 331MEM_STATIC void ZSTD_cwksp_mark_tables_dirty(ZSTD_cwksp* ws) { 332 DEBUGLOG(4, "cwksp: ZSTD_cwksp_mark_tables_dirty"); 333 334 335 assert(ws->tableValidEnd >= ws->objectEnd); 336 assert(ws->tableValidEnd <= ws->allocStart); 337 ws->tableValidEnd = ws->objectEnd; 338 ZSTD_cwksp_assert_internal_consistency(ws); 339} 340 341MEM_STATIC void ZSTD_cwksp_mark_tables_clean(ZSTD_cwksp* ws) { 342 DEBUGLOG(4, "cwksp: ZSTD_cwksp_mark_tables_clean"); 343 assert(ws->tableValidEnd >= ws->objectEnd); 344 assert(ws->tableValidEnd <= ws->allocStart); 345 if (ws->tableValidEnd < ws->tableEnd) { 346 ws->tableValidEnd = ws->tableEnd; 347 } 348 ZSTD_cwksp_assert_internal_consistency(ws); 349} 350 351/* 352 * Zero the part of the allocated tables not already marked clean. 353 */ 354MEM_STATIC void ZSTD_cwksp_clean_tables(ZSTD_cwksp* ws) { 355 DEBUGLOG(4, "cwksp: ZSTD_cwksp_clean_tables"); 356 assert(ws->tableValidEnd >= ws->objectEnd); 357 assert(ws->tableValidEnd <= ws->allocStart); 358 if (ws->tableValidEnd < ws->tableEnd) { 359 ZSTD_memset(ws->tableValidEnd, 0, (BYTE*)ws->tableEnd - (BYTE*)ws->tableValidEnd); 360 } 361 ZSTD_cwksp_mark_tables_clean(ws); 362} 363 364/* 365 * Invalidates table allocations. 366 * All other allocations remain valid. 367 */ 368MEM_STATIC void ZSTD_cwksp_clear_tables(ZSTD_cwksp* ws) { 369 DEBUGLOG(4, "cwksp: clearing tables!"); 370 371 372 ws->tableEnd = ws->objectEnd; 373 ZSTD_cwksp_assert_internal_consistency(ws); 374} 375 376/* 377 * Invalidates all buffer, aligned, and table allocations. 378 * Object allocations remain valid. 379 */ 380MEM_STATIC void ZSTD_cwksp_clear(ZSTD_cwksp* ws) { 381 DEBUGLOG(4, "cwksp: clearing!"); 382 383 384 385 ws->tableEnd = ws->objectEnd; 386 ws->allocStart = ws->workspaceEnd; 387 ws->allocFailed = 0; 388 if (ws->phase > ZSTD_cwksp_alloc_buffers) { 389 ws->phase = ZSTD_cwksp_alloc_buffers; 390 } 391 ZSTD_cwksp_assert_internal_consistency(ws); 392} 393 394/* 395 * The provided workspace takes ownership of the buffer [start, start+size). 396 * Any existing values in the workspace are ignored (the previously managed 397 * buffer, if present, must be separately freed). 398 */ 399MEM_STATIC void ZSTD_cwksp_init(ZSTD_cwksp* ws, void* start, size_t size, ZSTD_cwksp_static_alloc_e isStatic) { 400 DEBUGLOG(4, "cwksp: init'ing workspace with %zd bytes", size); 401 assert(((size_t)start & (sizeof(void*)-1)) == 0); /* ensure correct alignment */ 402 ws->workspace = start; 403 ws->workspaceEnd = (BYTE*)start + size; 404 ws->objectEnd = ws->workspace; 405 ws->tableValidEnd = ws->objectEnd; 406 ws->phase = ZSTD_cwksp_alloc_objects; 407 ws->isStatic = isStatic; 408 ZSTD_cwksp_clear(ws); 409 ws->workspaceOversizedDuration = 0; 410 ZSTD_cwksp_assert_internal_consistency(ws); 411} 412 413MEM_STATIC size_t ZSTD_cwksp_create(ZSTD_cwksp* ws, size_t size, ZSTD_customMem customMem) { 414 void* workspace = ZSTD_customMalloc(size, customMem); 415 DEBUGLOG(4, "cwksp: creating new workspace with %zd bytes", size); 416 RETURN_ERROR_IF(workspace == NULL, memory_allocation, "NULL pointer!"); 417 ZSTD_cwksp_init(ws, workspace, size, ZSTD_cwksp_dynamic_alloc); 418 return 0; 419} 420 421MEM_STATIC void ZSTD_cwksp_free(ZSTD_cwksp* ws, ZSTD_customMem customMem) { 422 void *ptr = ws->workspace; 423 DEBUGLOG(4, "cwksp: freeing workspace"); 424 ZSTD_memset(ws, 0, sizeof(ZSTD_cwksp)); 425 ZSTD_customFree(ptr, customMem); 426} 427 428/* 429 * Moves the management of a workspace from one cwksp to another. The src cwksp 430 * is left in an invalid state (src must be re-init()'ed before it's used again). 431 */ 432MEM_STATIC void ZSTD_cwksp_move(ZSTD_cwksp* dst, ZSTD_cwksp* src) { 433 *dst = *src; 434 ZSTD_memset(src, 0, sizeof(ZSTD_cwksp)); 435} 436 437MEM_STATIC size_t ZSTD_cwksp_sizeof(const ZSTD_cwksp* ws) { 438 return (size_t)((BYTE*)ws->workspaceEnd - (BYTE*)ws->workspace); 439} 440 441MEM_STATIC size_t ZSTD_cwksp_used(const ZSTD_cwksp* ws) { 442 return (size_t)((BYTE*)ws->tableEnd - (BYTE*)ws->workspace) 443 + (size_t)((BYTE*)ws->workspaceEnd - (BYTE*)ws->allocStart); 444} 445 446MEM_STATIC int ZSTD_cwksp_reserve_failed(const ZSTD_cwksp* ws) { 447 return ws->allocFailed; 448} 449 450/*-************************************* 451* Functions Checking Free Space 452***************************************/ 453 454MEM_STATIC size_t ZSTD_cwksp_available_space(ZSTD_cwksp* ws) { 455 return (size_t)((BYTE*)ws->allocStart - (BYTE*)ws->tableEnd); 456} 457 458MEM_STATIC int ZSTD_cwksp_check_available(ZSTD_cwksp* ws, size_t additionalNeededSpace) { 459 return ZSTD_cwksp_available_space(ws) >= additionalNeededSpace; 460} 461 462MEM_STATIC int ZSTD_cwksp_check_too_large(ZSTD_cwksp* ws, size_t additionalNeededSpace) { 463 return ZSTD_cwksp_check_available( 464 ws, additionalNeededSpace * ZSTD_WORKSPACETOOLARGE_FACTOR); 465} 466 467MEM_STATIC int ZSTD_cwksp_check_wasteful(ZSTD_cwksp* ws, size_t additionalNeededSpace) { 468 return ZSTD_cwksp_check_too_large(ws, additionalNeededSpace) 469 && ws->workspaceOversizedDuration > ZSTD_WORKSPACETOOLARGE_MAXDURATION; 470} 471 472MEM_STATIC void ZSTD_cwksp_bump_oversized_duration( 473 ZSTD_cwksp* ws, size_t additionalNeededSpace) { 474 if (ZSTD_cwksp_check_too_large(ws, additionalNeededSpace)) { 475 ws->workspaceOversizedDuration++; 476 } else { 477 ws->workspaceOversizedDuration = 0; 478 } 479} 480 481 482#endif /* ZSTD_CWKSP_H */