/** * \file zstd.c * Single-file Zstandard library. * * Generate using: * \code * combine.sh -r ../../lib -o zstd.c zstd-in.c * \endcode */ /* * Copyright (c) 2016-2021, Yann Collet, Facebook, Inc. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the * LICENSE file in the root directory of this source tree) and the GPLv2 (found * in the COPYING file in the root directory of this source tree). * You may select, at your option, one of the above-listed licenses. */ /* * Settings to bake for the single library file. * * Note: It's important that none of these affects 'zstd.h' (only the * implementation files we're amalgamating). * * Note: MEM_MODULE stops xxhash redefining BYTE, U16, etc., which are also * defined in mem.h (breaking C99 compatibility). * * Note: the undefs for xxHash allow Zstd's implementation to coinside with with * standalone xxHash usage (with global defines). * * Note: multithreading is enabled for all platforms apart from Emscripten. */ #define DEBUGLEVEL 0 #define MEM_MODULE #undef XXH_NAMESPACE #define XXH_NAMESPACE ZSTD_ #undef XXH_PRIVATE_API #define XXH_PRIVATE_API #undef XXH_INLINE_ALL #define XXH_INLINE_ALL #define ZSTD_LEGACY_SUPPORT 0 #ifndef __EMSCRIPTEN__ #define ZSTD_MULTITHREAD #endif #define ZSTD_TRACE 0 /* Include zstd_deps.h first with all the options we need enabled. */ #define ZSTD_DEPS_NEED_MALLOC #define ZSTD_DEPS_NEED_MATH64 /**** start inlining common/zstd_deps.h ****/ /* * Copyright (c) 2016-2021, Facebook, Inc. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the * LICENSE file in the root directory of this source tree) and the GPLv2 (found * in the COPYING file in the root directory of this source tree). * You may select, at your option, one of the above-listed licenses. */ /* This file provides common libc dependencies that zstd requires. * The purpose is to allow replacing this file with a custom implementation * to compile zstd without libc support. */ /* Need: * NULL * INT_MAX * UINT_MAX * ZSTD_memcpy() * ZSTD_memset() * ZSTD_memmove() */ #ifndef ZSTD_DEPS_COMMON #define ZSTD_DEPS_COMMON #include #include #include #if defined(__GNUC__) && __GNUC__ >= 4 # define ZSTD_memcpy(d,s,l) __builtin_memcpy((d),(s),(l)) # define ZSTD_memmove(d,s,l) __builtin_memmove((d),(s),(l)) # define ZSTD_memset(p,v,l) __builtin_memset((p),(v),(l)) #else # define ZSTD_memcpy(d,s,l) memcpy((d),(s),(l)) # define ZSTD_memmove(d,s,l) memmove((d),(s),(l)) # define ZSTD_memset(p,v,l) memset((p),(v),(l)) #endif #endif /* ZSTD_DEPS_COMMON */ /* Need: * ZSTD_malloc() * ZSTD_free() * ZSTD_calloc() */ #ifdef ZSTD_DEPS_NEED_MALLOC #ifndef ZSTD_DEPS_MALLOC #define ZSTD_DEPS_MALLOC #include #define ZSTD_malloc(s) malloc(s) #define ZSTD_calloc(n,s) calloc((n), (s)) #define ZSTD_free(p) free((p)) #endif /* ZSTD_DEPS_MALLOC */ #endif /* ZSTD_DEPS_NEED_MALLOC */ /* * Provides 64-bit math support. * Need: * U64 ZSTD_div64(U64 dividend, U32 divisor) */ #ifdef ZSTD_DEPS_NEED_MATH64 #ifndef ZSTD_DEPS_MATH64 #define ZSTD_DEPS_MATH64 #define ZSTD_div64(dividend, divisor) ((dividend) / (divisor)) #endif /* ZSTD_DEPS_MATH64 */ #endif /* ZSTD_DEPS_NEED_MATH64 */ /* Need: * assert() */ #ifdef ZSTD_DEPS_NEED_ASSERT #ifndef ZSTD_DEPS_ASSERT #define ZSTD_DEPS_ASSERT #include #endif /* ZSTD_DEPS_ASSERT */ #endif /* ZSTD_DEPS_NEED_ASSERT */ /* Need: * ZSTD_DEBUG_PRINT() */ #ifdef ZSTD_DEPS_NEED_IO #ifndef ZSTD_DEPS_IO #define ZSTD_DEPS_IO #include #define ZSTD_DEBUG_PRINT(...) fprintf(stderr, __VA_ARGS__) #endif /* ZSTD_DEPS_IO */ #endif /* ZSTD_DEPS_NEED_IO */ /* Only requested when is known to be present. * Need: * intptr_t */ #ifdef ZSTD_DEPS_NEED_STDINT #ifndef ZSTD_DEPS_STDINT #define ZSTD_DEPS_STDINT #include #endif /* ZSTD_DEPS_STDINT */ #endif /* ZSTD_DEPS_NEED_STDINT */ /**** ended inlining common/zstd_deps.h ****/ /**** start inlining common/debug.c ****/ /* ****************************************************************** * debug * Part of FSE library * Copyright (c) 2013-2021, Yann Collet, Facebook, Inc. * * You can contact the author at : * - Source repository : https://github.com/Cyan4973/FiniteStateEntropy * * This source code is licensed under both the BSD-style license (found in the * LICENSE file in the root directory of this source tree) and the GPLv2 (found * in the COPYING file in the root directory of this source tree). * You may select, at your option, one of the above-listed licenses. ****************************************************************** */ /* * This module only hosts one global variable * which can be used to dynamically influence the verbosity of traces, * such as DEBUGLOG and RAWLOG */ /**** start inlining debug.h ****/ /* ****************************************************************** * debug * Part of FSE library * Copyright (c) 2013-2021, Yann Collet, Facebook, Inc. * * You can contact the author at : * - Source repository : https://github.com/Cyan4973/FiniteStateEntropy * * This source code is licensed under both the BSD-style license (found in the * LICENSE file in the root directory of this source tree) and the GPLv2 (found * in the COPYING file in the root directory of this source tree). * You may select, at your option, one of the above-listed licenses. ****************************************************************** */ /* * The purpose of this header is to enable debug functions. * They regroup assert(), DEBUGLOG() and RAWLOG() for run-time, * and DEBUG_STATIC_ASSERT() for compile-time. * * By default, DEBUGLEVEL==0, which means run-time debug is disabled. * * Level 1 enables assert() only. * Starting level 2, traces can be generated and pushed to stderr. * The higher the level, the more verbose the traces. * * It's possible to dynamically adjust level using variable g_debug_level, * which is only declared if DEBUGLEVEL>=2, * and is a global variable, not multi-thread protected (use with care) */ #ifndef DEBUG_H_12987983217 #define DEBUG_H_12987983217 #if defined (__cplusplus) extern "C" { #endif /* static assert is triggered at compile time, leaving no runtime artefact. * static assert only works with compile-time constants. * Also, this variant can only be used inside a function. */ #define DEBUG_STATIC_ASSERT(c) (void)sizeof(char[(c) ? 1 : -1]) /* DEBUGLEVEL is expected to be defined externally, * typically through compiler command line. * Value must be a number. */ #ifndef DEBUGLEVEL # define DEBUGLEVEL 0 #endif /* recommended values for DEBUGLEVEL : * 0 : release mode, no debug, all run-time checks disabled * 1 : enables assert() only, no display * 2 : reserved, for currently active debug path * 3 : events once per object lifetime (CCtx, CDict, etc.) * 4 : events once per frame * 5 : events once per block * 6 : events once per sequence (verbose) * 7+: events at every position (*very* verbose) * * It's generally inconvenient to output traces > 5. * In which case, it's possible to selectively trigger high verbosity levels * by modifying g_debug_level. */ #if (DEBUGLEVEL>=1) # define ZSTD_DEPS_NEED_ASSERT /**** skipping file: zstd_deps.h ****/ #else # ifndef assert /* assert may be already defined, due to prior #include */ # define assert(condition) ((void)0) /* disable assert (default) */ # endif #endif #if (DEBUGLEVEL>=2) # define ZSTD_DEPS_NEED_IO /**** skipping file: zstd_deps.h ****/ extern int g_debuglevel; /* the variable is only declared, it actually lives in debug.c, and is shared by the whole process. It's not thread-safe. It's useful when enabling very verbose levels on selective conditions (such as position in src) */ # define RAWLOG(l, ...) { \ if (l<=g_debuglevel) { \ ZSTD_DEBUG_PRINT(__VA_ARGS__); \ } } # define DEBUGLOG(l, ...) { \ if (l<=g_debuglevel) { \ ZSTD_DEBUG_PRINT(__FILE__ ": " __VA_ARGS__); \ ZSTD_DEBUG_PRINT(" \n"); \ } } #else # define RAWLOG(l, ...) {} /* disabled */ # define DEBUGLOG(l, ...) {} /* disabled */ #endif #if defined (__cplusplus) } #endif #endif /* DEBUG_H_12987983217 */ /**** ended inlining debug.h ****/ int g_debuglevel = DEBUGLEVEL; /**** ended inlining common/debug.c ****/ /**** start inlining common/entropy_common.c ****/ /* ****************************************************************** * Common functions of New Generation Entropy library * Copyright (c) 2016-2021, Yann Collet, Facebook, Inc. * * You can contact the author at : * - FSE+HUF source repository : https://github.com/Cyan4973/FiniteStateEntropy * - Public forum : https://groups.google.com/forum/#!forum/lz4c * * This source code is licensed under both the BSD-style license (found in the * LICENSE file in the root directory of this source tree) and the GPLv2 (found * in the COPYING file in the root directory of this source tree). * You may select, at your option, one of the above-listed licenses. ****************************************************************** */ /* ************************************* * Dependencies ***************************************/ /**** start inlining mem.h ****/ /* * Copyright (c) 2016-2021, Yann Collet, Facebook, Inc. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the * LICENSE file in the root directory of this source tree) and the GPLv2 (found * in the COPYING file in the root directory of this source tree). * You may select, at your option, one of the above-listed licenses. */ #ifndef MEM_H_MODULE #define MEM_H_MODULE #if defined (__cplusplus) extern "C" { #endif /*-**************************************** * Dependencies ******************************************/ #include /* size_t, ptrdiff_t */ /**** start inlining compiler.h ****/ /* * Copyright (c) 2016-2021, Yann Collet, Facebook, Inc. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the * LICENSE file in the root directory of this source tree) and the GPLv2 (found * in the COPYING file in the root directory of this source tree). * You may select, at your option, one of the above-listed licenses. */ #ifndef ZSTD_COMPILER_H #define ZSTD_COMPILER_H /*-******************************************************* * Compiler specifics *********************************************************/ /* force inlining */ #if !defined(ZSTD_NO_INLINE) #if (defined(__GNUC__) && !defined(__STRICT_ANSI__)) || defined(__cplusplus) || defined(__STDC_VERSION__) && __STDC_VERSION__ >= 199901L /* C99 */ # define INLINE_KEYWORD inline #else # define INLINE_KEYWORD #endif #if defined(__GNUC__) || defined(__ICCARM__) # define FORCE_INLINE_ATTR __attribute__((always_inline)) #elif defined(_MSC_VER) # define FORCE_INLINE_ATTR __forceinline #else # define FORCE_INLINE_ATTR #endif #else #define INLINE_KEYWORD #define FORCE_INLINE_ATTR #endif /** On MSVC qsort requires that functions passed into it use the __cdecl calling conversion(CC). This explictly marks such functions as __cdecl so that the code will still compile if a CC other than __cdecl has been made the default. */ #if defined(_MSC_VER) # define WIN_CDECL __cdecl #else # define WIN_CDECL #endif /** * FORCE_INLINE_TEMPLATE is used to define C "templates", which take constant * parameters. They must be inlined for the compiler to eliminate the constant * branches. */ #define FORCE_INLINE_TEMPLATE static INLINE_KEYWORD FORCE_INLINE_ATTR /** * HINT_INLINE is used to help the compiler generate better code. It is *not* * used for "templates", so it can be tweaked based on the compilers * performance. * * gcc-4.8 and gcc-4.9 have been shown to benefit from leaving off the * always_inline attribute. * * clang up to 5.0.0 (trunk) benefit tremendously from the always_inline * attribute. */ #if !defined(__clang__) && defined(__GNUC__) && __GNUC__ >= 4 && __GNUC_MINOR__ >= 8 && __GNUC__ < 5 # define HINT_INLINE static INLINE_KEYWORD #else # define HINT_INLINE static INLINE_KEYWORD FORCE_INLINE_ATTR #endif /* UNUSED_ATTR tells the compiler it is okay if the function is unused. */ #if defined(__GNUC__) # define UNUSED_ATTR __attribute__((unused)) #else # define UNUSED_ATTR #endif /* force no inlining */ #ifdef _MSC_VER # define FORCE_NOINLINE static __declspec(noinline) #else # if defined(__GNUC__) || defined(__ICCARM__) # define FORCE_NOINLINE static __attribute__((__noinline__)) # else # define FORCE_NOINLINE static # endif #endif /* target attribute */ #ifndef __has_attribute #define __has_attribute(x) 0 /* Compatibility with non-clang compilers. */ #endif #if defined(__GNUC__) || defined(__ICCARM__) # define TARGET_ATTRIBUTE(target) __attribute__((__target__(target))) #else # define TARGET_ATTRIBUTE(target) #endif /* Enable runtime BMI2 dispatch based on the CPU. * Enabled for clang & gcc >=4.8 on x86 when BMI2 isn't enabled by default. */ #ifndef DYNAMIC_BMI2 #if ((defined(__clang__) && __has_attribute(__target__)) \ || (defined(__GNUC__) \ && (__GNUC__ >= 5 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 8)))) \ && (defined(__x86_64__) || defined(_M_X86)) \ && !defined(__BMI2__) # define DYNAMIC_BMI2 1 #else # define DYNAMIC_BMI2 0 #endif #endif /* prefetch * can be disabled, by declaring NO_PREFETCH build macro */ #if defined(NO_PREFETCH) # define PREFETCH_L1(ptr) (void)(ptr) /* disabled */ # define PREFETCH_L2(ptr) (void)(ptr) /* disabled */ #else # if defined(_MSC_VER) && (defined(_M_X64) || defined(_M_I86)) /* _mm_prefetch() is not defined outside of x86/x64 */ # include /* https://msdn.microsoft.com/fr-fr/library/84szxsww(v=vs.90).aspx */ # define PREFETCH_L1(ptr) _mm_prefetch((const char*)(ptr), _MM_HINT_T0) # define PREFETCH_L2(ptr) _mm_prefetch((const char*)(ptr), _MM_HINT_T1) # elif defined(__GNUC__) && ( (__GNUC__ >= 4) || ( (__GNUC__ == 3) && (__GNUC_MINOR__ >= 1) ) ) # define PREFETCH_L1(ptr) __builtin_prefetch((ptr), 0 /* rw==read */, 3 /* locality */) # define PREFETCH_L2(ptr) __builtin_prefetch((ptr), 0 /* rw==read */, 2 /* locality */) # elif defined(__aarch64__) # define PREFETCH_L1(ptr) __asm__ __volatile__("prfm pldl1keep, %0" ::"Q"(*(ptr))) # define PREFETCH_L2(ptr) __asm__ __volatile__("prfm pldl2keep, %0" ::"Q"(*(ptr))) # else # define PREFETCH_L1(ptr) (void)(ptr) /* disabled */ # define PREFETCH_L2(ptr) (void)(ptr) /* disabled */ # endif #endif /* NO_PREFETCH */ #define CACHELINE_SIZE 64 #define PREFETCH_AREA(p, s) { \ const char* const _ptr = (const char*)(p); \ size_t const _size = (size_t)(s); \ size_t _pos; \ for (_pos=0; _pos<_size; _pos+=CACHELINE_SIZE) { \ PREFETCH_L2(_ptr + _pos); \ } \ } /* vectorization * older GCC (pre gcc-4.3 picked as the cutoff) uses a different syntax */ #if !defined(__INTEL_COMPILER) && !defined(__clang__) && defined(__GNUC__) # if (__GNUC__ == 4 && __GNUC_MINOR__ > 3) || (__GNUC__ >= 5) # define DONT_VECTORIZE __attribute__((optimize("no-tree-vectorize"))) # else # define DONT_VECTORIZE _Pragma("GCC optimize(\"no-tree-vectorize\")") # endif #else # define DONT_VECTORIZE #endif /* Tell the compiler that a branch is likely or unlikely. * Only use these macros if it causes the compiler to generate better code. * If you can remove a LIKELY/UNLIKELY annotation without speed changes in gcc * and clang, please do. */ #if defined(__GNUC__) #define LIKELY(x) (__builtin_expect((x), 1)) #define UNLIKELY(x) (__builtin_expect((x), 0)) #else #define LIKELY(x) (x) #define UNLIKELY(x) (x) #endif /* disable warnings */ #ifdef _MSC_VER /* Visual Studio */ # include /* For Visual 2005 */ # pragma warning(disable : 4100) /* disable: C4100: unreferenced formal parameter */ # pragma warning(disable : 4127) /* disable: C4127: conditional expression is constant */ # pragma warning(disable : 4204) /* disable: C4204: non-constant aggregate initializer */ # pragma warning(disable : 4214) /* disable: C4214: non-int bitfields */ # pragma warning(disable : 4324) /* disable: C4324: padded structure */ #endif /*Like DYNAMIC_BMI2 but for compile time determination of BMI2 support*/ #ifndef STATIC_BMI2 # if defined(_MSC_VER) && (defined(_M_X64) || defined(_M_I86)) # ifdef __AVX2__ //MSVC does not have a BMI2 specific flag, but every CPU that supports AVX2 also supports BMI2 # define STATIC_BMI2 1 # endif # endif #endif #ifndef STATIC_BMI2 #define STATIC_BMI2 0 #endif /* compat. with non-clang compilers */ #ifndef __has_builtin # define __has_builtin(x) 0 #endif /* compat. with non-clang compilers */ #ifndef __has_feature # define __has_feature(x) 0 #endif /* detects whether we are being compiled under msan */ #ifndef ZSTD_MEMORY_SANITIZER # if __has_feature(memory_sanitizer) # define ZSTD_MEMORY_SANITIZER 1 # else # define ZSTD_MEMORY_SANITIZER 0 # endif #endif #if ZSTD_MEMORY_SANITIZER /* Not all platforms that support msan provide sanitizers/msan_interface.h. * We therefore declare the functions we need ourselves, rather than trying to * include the header file... */ #include /* size_t */ #define ZSTD_DEPS_NEED_STDINT /**** skipping file: zstd_deps.h ****/ /* Make memory region fully initialized (without changing its contents). */ void __msan_unpoison(const volatile void *a, size_t size); /* Make memory region fully uninitialized (without changing its contents). This is a legacy interface that does not update origin information. Use __msan_allocated_memory() instead. */ void __msan_poison(const volatile void *a, size_t size); /* Returns the offset of the first (at least partially) poisoned byte in the memory range, or -1 if the whole range is good. */ intptr_t __msan_test_shadow(const volatile void *x, size_t size); #endif /* detects whether we are being compiled under asan */ #ifndef ZSTD_ADDRESS_SANITIZER # if __has_feature(address_sanitizer) # define ZSTD_ADDRESS_SANITIZER 1 # elif defined(__SANITIZE_ADDRESS__) # define ZSTD_ADDRESS_SANITIZER 1 # else # define ZSTD_ADDRESS_SANITIZER 0 # endif #endif #if ZSTD_ADDRESS_SANITIZER /* Not all platforms that support asan provide sanitizers/asan_interface.h. * We therefore declare the functions we need ourselves, rather than trying to * include the header file... */ #include /* size_t */ /** * Marks a memory region ([addr, addr+size)) as unaddressable. * * This memory must be previously allocated by your program. Instrumented * code is forbidden from accessing addresses in this region until it is * unpoisoned. This function is not guaranteed to poison the entire region - * it could poison only a subregion of [addr, addr+size) due to ASan * alignment restrictions. * * \note This function is not thread-safe because no two threads can poison or * unpoison memory in the same memory region simultaneously. * * \param addr Start of memory region. * \param size Size of memory region. */ void __asan_poison_memory_region(void const volatile *addr, size_t size); /** * Marks a memory region ([addr, addr+size)) as addressable. * * This memory must be previously allocated by your program. Accessing * addresses in this region is allowed until this region is poisoned again. * This function could unpoison a super-region of [addr, addr+size) due * to ASan alignment restrictions. * * \note This function is not thread-safe because no two threads can * poison or unpoison memory in the same memory region simultaneously. * * \param addr Start of memory region. * \param size Size of memory region. */ void __asan_unpoison_memory_region(void const volatile *addr, size_t size); #endif #endif /* ZSTD_COMPILER_H */ /**** ended inlining compiler.h ****/ /**** skipping file: debug.h ****/ /**** skipping file: zstd_deps.h ****/ /*-**************************************** * Compiler specifics ******************************************/ #if defined(_MSC_VER) /* Visual Studio */ # include /* _byteswap_ulong */ # include /* _byteswap_* */ #endif #if defined(__GNUC__) # define MEM_STATIC static __inline __attribute__((unused)) #elif defined (__cplusplus) || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */) # define MEM_STATIC static inline #elif defined(_MSC_VER) # define MEM_STATIC static __inline #else # define MEM_STATIC static /* this version may generate warnings for unused static functions; disable the relevant warning */ #endif /*-************************************************************** * Basic Types *****************************************************************/ #if !defined (__VMS) && (defined (__cplusplus) || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */) ) # if defined(_AIX) # include # else # include /* intptr_t */ # endif typedef uint8_t BYTE; typedef uint16_t U16; typedef int16_t S16; typedef uint32_t U32; typedef int32_t S32; typedef uint64_t U64; typedef int64_t S64; #else # include #if CHAR_BIT != 8 # error "this implementation requires char to be exactly 8-bit type" #endif typedef unsigned char BYTE; #if USHRT_MAX != 65535 # error "this implementation requires short to be exactly 16-bit type" #endif typedef unsigned short U16; typedef signed short S16; #if UINT_MAX != 4294967295 # error "this implementation requires int to be exactly 32-bit type" #endif typedef unsigned int U32; typedef signed int S32; /* note : there are no limits defined for long long type in C90. * limits exist in C99, however, in such case, is preferred */ typedef unsigned long long U64; typedef signed long long S64; #endif /*-************************************************************** * Memory I/O API *****************************************************************/ /*=== Static platform detection ===*/ MEM_STATIC unsigned MEM_32bits(void); MEM_STATIC unsigned MEM_64bits(void); MEM_STATIC unsigned MEM_isLittleEndian(void); /*=== Native unaligned read/write ===*/ MEM_STATIC U16 MEM_read16(const void* memPtr); MEM_STATIC U32 MEM_read32(const void* memPtr); MEM_STATIC U64 MEM_read64(const void* memPtr); MEM_STATIC size_t MEM_readST(const void* memPtr); MEM_STATIC void MEM_write16(void* memPtr, U16 value); MEM_STATIC void MEM_write32(void* memPtr, U32 value); MEM_STATIC void MEM_write64(void* memPtr, U64 value); /*=== Little endian unaligned read/write ===*/ MEM_STATIC U16 MEM_readLE16(const void* memPtr); MEM_STATIC U32 MEM_readLE24(const void* memPtr); MEM_STATIC U32 MEM_readLE32(const void* memPtr); MEM_STATIC U64 MEM_readLE64(const void* memPtr); MEM_STATIC size_t MEM_readLEST(const void* memPtr); MEM_STATIC void MEM_writeLE16(void* memPtr, U16 val); MEM_STATIC void MEM_writeLE24(void* memPtr, U32 val); MEM_STATIC void MEM_writeLE32(void* memPtr, U32 val32); MEM_STATIC void MEM_writeLE64(void* memPtr, U64 val64); MEM_STATIC void MEM_writeLEST(void* memPtr, size_t val); /*=== Big endian unaligned read/write ===*/ MEM_STATIC U32 MEM_readBE32(const void* memPtr); MEM_STATIC U64 MEM_readBE64(const void* memPtr); MEM_STATIC size_t MEM_readBEST(const void* memPtr); MEM_STATIC void MEM_writeBE32(void* memPtr, U32 val32); MEM_STATIC void MEM_writeBE64(void* memPtr, U64 val64); MEM_STATIC void MEM_writeBEST(void* memPtr, size_t val); /*=== Byteswap ===*/ MEM_STATIC U32 MEM_swap32(U32 in); MEM_STATIC U64 MEM_swap64(U64 in); MEM_STATIC size_t MEM_swapST(size_t in); /*-************************************************************** * Memory I/O Implementation *****************************************************************/ /* MEM_FORCE_MEMORY_ACCESS : * By default, access to unaligned memory is controlled by `memcpy()`, which is safe and portable. * Unfortunately, on some target/compiler combinations, the generated assembly is sub-optimal. * The below switch allow to select different access method for improved performance. * Method 0 (default) : use `memcpy()`. Safe and portable. * Method 1 : `__packed` statement. It depends on compiler extension (i.e., not portable). * This method is safe if your compiler supports it, and *generally* as fast or faster than `memcpy`. * Method 2 : direct access. This method is portable but violate C standard. * It can generate buggy code on targets depending on alignment. * In some circumstances, it's the only known way to get the most performance (i.e. GCC + ARMv6) * See http://fastcompression.blogspot.fr/2015/08/accessing-unaligned-memory.html for details. * Prefer these methods in priority order (0 > 1 > 2) */ #ifndef MEM_FORCE_MEMORY_ACCESS /* can be defined externally, on command line for example */ # if defined(__GNUC__) && ( defined(__ARM_ARCH_6__) || defined(__ARM_ARCH_6J__) || defined(__ARM_ARCH_6K__) || defined(__ARM_ARCH_6Z__) || defined(__ARM_ARCH_6ZK__) || defined(__ARM_ARCH_6T2__) ) # define MEM_FORCE_MEMORY_ACCESS 2 # elif defined(__INTEL_COMPILER) || defined(__GNUC__) || defined(__ICCARM__) # define MEM_FORCE_MEMORY_ACCESS 1 # endif #endif MEM_STATIC unsigned MEM_32bits(void) { return sizeof(size_t)==4; } MEM_STATIC unsigned MEM_64bits(void) { return sizeof(size_t)==8; } MEM_STATIC unsigned MEM_isLittleEndian(void) { const union { U32 u; BYTE c[4]; } one = { 1 }; /* don't use static : performance detrimental */ return one.c[0]; } #if defined(MEM_FORCE_MEMORY_ACCESS) && (MEM_FORCE_MEMORY_ACCESS==2) /* violates C standard, by lying on structure alignment. Only use if no other choice to achieve best performance on target platform */ MEM_STATIC U16 MEM_read16(const void* memPtr) { return *(const U16*) memPtr; } MEM_STATIC U32 MEM_read32(const void* memPtr) { return *(const U32*) memPtr; } MEM_STATIC U64 MEM_read64(const void* memPtr) { return *(const U64*) memPtr; } MEM_STATIC size_t MEM_readST(const void* memPtr) { return *(const size_t*) memPtr; } MEM_STATIC void MEM_write16(void* memPtr, U16 value) { *(U16*)memPtr = value; } MEM_STATIC void MEM_write32(void* memPtr, U32 value) { *(U32*)memPtr = value; } MEM_STATIC void MEM_write64(void* memPtr, U64 value) { *(U64*)memPtr = value; } #elif defined(MEM_FORCE_MEMORY_ACCESS) && (MEM_FORCE_MEMORY_ACCESS==1) /* __pack instructions are safer, but compiler specific, hence potentially problematic for some compilers */ /* currently only defined for gcc and icc */ #if defined(_MSC_VER) || (defined(__INTEL_COMPILER) && defined(WIN32)) __pragma( pack(push, 1) ) typedef struct { U16 v; } unalign16; typedef struct { U32 v; } unalign32; typedef struct { U64 v; } unalign64; typedef struct { size_t v; } unalignArch; __pragma( pack(pop) ) #else typedef struct { U16 v; } __attribute__((packed)) unalign16; typedef struct { U32 v; } __attribute__((packed)) unalign32; typedef struct { U64 v; } __attribute__((packed)) unalign64; typedef struct { size_t v; } __attribute__((packed)) unalignArch; #endif MEM_STATIC U16 MEM_read16(const void* ptr) { return ((const unalign16*)ptr)->v; } MEM_STATIC U32 MEM_read32(const void* ptr) { return ((const unalign32*)ptr)->v; } MEM_STATIC U64 MEM_read64(const void* ptr) { return ((const unalign64*)ptr)->v; } MEM_STATIC size_t MEM_readST(const void* ptr) { return ((const unalignArch*)ptr)->v; } MEM_STATIC void MEM_write16(void* memPtr, U16 value) { ((unalign16*)memPtr)->v = value; } MEM_STATIC void MEM_write32(void* memPtr, U32 value) { ((unalign32*)memPtr)->v = value; } MEM_STATIC void MEM_write64(void* memPtr, U64 value) { ((unalign64*)memPtr)->v = value; } #else /* default method, safe and standard. can sometimes prove slower */ MEM_STATIC U16 MEM_read16(const void* memPtr) { U16 val; ZSTD_memcpy(&val, memPtr, sizeof(val)); return val; } MEM_STATIC U32 MEM_read32(const void* memPtr) { U32 val; ZSTD_memcpy(&val, memPtr, sizeof(val)); return val; } MEM_STATIC U64 MEM_read64(const void* memPtr) { U64 val; ZSTD_memcpy(&val, memPtr, sizeof(val)); return val; } MEM_STATIC size_t MEM_readST(const void* memPtr) { size_t val; ZSTD_memcpy(&val, memPtr, sizeof(val)); return val; } MEM_STATIC void MEM_write16(void* memPtr, U16 value) { ZSTD_memcpy(memPtr, &value, sizeof(value)); } MEM_STATIC void MEM_write32(void* memPtr, U32 value) { ZSTD_memcpy(memPtr, &value, sizeof(value)); } MEM_STATIC void MEM_write64(void* memPtr, U64 value) { ZSTD_memcpy(memPtr, &value, sizeof(value)); } #endif /* MEM_FORCE_MEMORY_ACCESS */ MEM_STATIC U32 MEM_swap32(U32 in) { #if defined(_MSC_VER) /* Visual Studio */ return _byteswap_ulong(in); #elif (defined (__GNUC__) && (__GNUC__ * 100 + __GNUC_MINOR__ >= 403)) \ || (defined(__clang__) && __has_builtin(__builtin_bswap32)) return __builtin_bswap32(in); #else return ((in << 24) & 0xff000000 ) | ((in << 8) & 0x00ff0000 ) | ((in >> 8) & 0x0000ff00 ) | ((in >> 24) & 0x000000ff ); #endif } MEM_STATIC U64 MEM_swap64(U64 in) { #if defined(_MSC_VER) /* Visual Studio */ return _byteswap_uint64(in); #elif (defined (__GNUC__) && (__GNUC__ * 100 + __GNUC_MINOR__ >= 403)) \ || (defined(__clang__) && __has_builtin(__builtin_bswap64)) return __builtin_bswap64(in); #else return ((in << 56) & 0xff00000000000000ULL) | ((in << 40) & 0x00ff000000000000ULL) | ((in << 24) & 0x0000ff0000000000ULL) | ((in << 8) & 0x000000ff00000000ULL) | ((in >> 8) & 0x00000000ff000000ULL) | ((in >> 24) & 0x0000000000ff0000ULL) | ((in >> 40) & 0x000000000000ff00ULL) | ((in >> 56) & 0x00000000000000ffULL); #endif } MEM_STATIC size_t MEM_swapST(size_t in) { if (MEM_32bits()) return (size_t)MEM_swap32((U32)in); else return (size_t)MEM_swap64((U64)in); } /*=== Little endian r/w ===*/ MEM_STATIC U16 MEM_readLE16(const void* memPtr) { if (MEM_isLittleEndian()) return MEM_read16(memPtr); else { const BYTE* p = (const BYTE*)memPtr; return (U16)(p[0] + (p[1]<<8)); } } MEM_STATIC void MEM_writeLE16(void* memPtr, U16 val) { if (MEM_isLittleEndian()) { MEM_write16(memPtr, val); } else { BYTE* p = (BYTE*)memPtr; p[0] = (BYTE)val; p[1] = (BYTE)(val>>8); } } MEM_STATIC U32 MEM_readLE24(const void* memPtr) { return MEM_readLE16(memPtr) + (((const BYTE*)memPtr)[2] << 16); } MEM_STATIC void MEM_writeLE24(void* memPtr, U32 val) { MEM_writeLE16(memPtr, (U16)val); ((BYTE*)memPtr)[2] = (BYTE)(val>>16); } MEM_STATIC U32 MEM_readLE32(const void* memPtr) { if (MEM_isLittleEndian()) return MEM_read32(memPtr); else return MEM_swap32(MEM_read32(memPtr)); } MEM_STATIC void MEM_writeLE32(void* memPtr, U32 val32) { if (MEM_isLittleEndian()) MEM_write32(memPtr, val32); else MEM_write32(memPtr, MEM_swap32(val32)); } MEM_STATIC U64 MEM_readLE64(const void* memPtr) { if (MEM_isLittleEndian()) return MEM_read64(memPtr); else return MEM_swap64(MEM_read64(memPtr)); } MEM_STATIC void MEM_writeLE64(void* memPtr, U64 val64) { if (MEM_isLittleEndian()) MEM_write64(memPtr, val64); else MEM_write64(memPtr, MEM_swap64(val64)); } MEM_STATIC size_t MEM_readLEST(const void* memPtr) { if (MEM_32bits()) return (size_t)MEM_readLE32(memPtr); else return (size_t)MEM_readLE64(memPtr); } MEM_STATIC void MEM_writeLEST(void* memPtr, size_t val) { if (MEM_32bits()) MEM_writeLE32(memPtr, (U32)val); else MEM_writeLE64(memPtr, (U64)val); } /*=== Big endian r/w ===*/ MEM_STATIC U32 MEM_readBE32(const void* memPtr) { if (MEM_isLittleEndian()) return MEM_swap32(MEM_read32(memPtr)); else return MEM_read32(memPtr); } MEM_STATIC void MEM_writeBE32(void* memPtr, U32 val32) { if (MEM_isLittleEndian()) MEM_write32(memPtr, MEM_swap32(val32)); else MEM_write32(memPtr, val32); } MEM_STATIC U64 MEM_readBE64(const void* memPtr) { if (MEM_isLittleEndian()) return MEM_swap64(MEM_read64(memPtr)); else return MEM_read64(memPtr); } MEM_STATIC void MEM_writeBE64(void* memPtr, U64 val64) { if (MEM_isLittleEndian()) MEM_write64(memPtr, MEM_swap64(val64)); else MEM_write64(memPtr, val64); } MEM_STATIC size_t MEM_readBEST(const void* memPtr) { if (MEM_32bits()) return (size_t)MEM_readBE32(memPtr); else return (size_t)MEM_readBE64(memPtr); } MEM_STATIC void MEM_writeBEST(void* memPtr, size_t val) { if (MEM_32bits()) MEM_writeBE32(memPtr, (U32)val); else MEM_writeBE64(memPtr, (U64)val); } /* code only tested on 32 and 64 bits systems */ MEM_STATIC void MEM_check(void) { DEBUG_STATIC_ASSERT((sizeof(size_t)==4) || (sizeof(size_t)==8)); } #if defined (__cplusplus) } #endif #endif /* MEM_H_MODULE */ /**** ended inlining mem.h ****/ /**** start inlining error_private.h ****/ /* * Copyright (c) 2016-2021, Yann Collet, Facebook, Inc. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the * LICENSE file in the root directory of this source tree) and the GPLv2 (found * in the COPYING file in the root directory of this source tree). * You may select, at your option, one of the above-listed licenses. */ /* Note : this module is expected to remain private, do not expose it */ #ifndef ERROR_H_MODULE #define ERROR_H_MODULE #if defined (__cplusplus) extern "C" { #endif /* **************************************** * Dependencies ******************************************/ /**** skipping file: zstd_deps.h ****/ /**** start inlining zstd_errors.h ****/ /* * Copyright (c) 2016-2021, Yann Collet, Facebook, Inc. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the * LICENSE file in the root directory of this source tree) and the GPLv2 (found * in the COPYING file in the root directory of this source tree). * You may select, at your option, one of the above-listed licenses. */ #ifndef ZSTD_ERRORS_H_398273423 #define ZSTD_ERRORS_H_398273423 #if defined (__cplusplus) extern "C" { #endif /*===== dependency =====*/ #include /* size_t */ /* ===== ZSTDERRORLIB_API : control library symbols visibility ===== */ #ifndef ZSTDERRORLIB_VISIBILITY # if defined(__GNUC__) && (__GNUC__ >= 4) # define ZSTDERRORLIB_VISIBILITY __attribute__ ((visibility ("default"))) # else # define ZSTDERRORLIB_VISIBILITY # endif #endif #if defined(ZSTD_DLL_EXPORT) && (ZSTD_DLL_EXPORT==1) # define ZSTDERRORLIB_API __declspec(dllexport) ZSTDERRORLIB_VISIBILITY #elif defined(ZSTD_DLL_IMPORT) && (ZSTD_DLL_IMPORT==1) # define ZSTDERRORLIB_API __declspec(dllimport) ZSTDERRORLIB_VISIBILITY /* It isn't required but allows to generate better code, saving a function pointer load from the IAT and an indirect jump.*/ #else # define ZSTDERRORLIB_API ZSTDERRORLIB_VISIBILITY #endif /*-********************************************* * Error codes list *-********************************************* * Error codes _values_ are pinned down since v1.3.1 only. * Therefore, don't rely on values if you may link to any version < v1.3.1. * * Only values < 100 are considered stable. * * note 1 : this API shall be used with static linking only. * dynamic linking is not yet officially supported. * note 2 : Prefer relying on the enum than on its value whenever possible * This is the only supported way to use the error list < v1.3.1 * note 3 : ZSTD_isError() is always correct, whatever the library version. **********************************************/ typedef enum { ZSTD_error_no_error = 0, ZSTD_error_GENERIC = 1, ZSTD_error_prefix_unknown = 10, ZSTD_error_version_unsupported = 12, ZSTD_error_frameParameter_unsupported = 14, ZSTD_error_frameParameter_windowTooLarge = 16, ZSTD_error_corruption_detected = 20, ZSTD_error_checksum_wrong = 22, ZSTD_error_dictionary_corrupted = 30, ZSTD_error_dictionary_wrong = 32, ZSTD_error_dictionaryCreation_failed = 34, ZSTD_error_parameter_unsupported = 40, ZSTD_error_parameter_outOfBound = 42, ZSTD_error_tableLog_tooLarge = 44, ZSTD_error_maxSymbolValue_tooLarge = 46, ZSTD_error_maxSymbolValue_tooSmall = 48, ZSTD_error_stage_wrong = 60, ZSTD_error_init_missing = 62, ZSTD_error_memory_allocation = 64, ZSTD_error_workSpace_tooSmall= 66, ZSTD_error_dstSize_tooSmall = 70, ZSTD_error_srcSize_wrong = 72, ZSTD_error_dstBuffer_null = 74, /* following error codes are __NOT STABLE__, they can be removed or changed in future versions */ ZSTD_error_frameIndex_tooLarge = 100, ZSTD_error_seekableIO = 102, ZSTD_error_dstBuffer_wrong = 104, ZSTD_error_srcBuffer_wrong = 105, ZSTD_error_maxCode = 120 /* never EVER use this value directly, it can change in future versions! Use ZSTD_isError() instead */ } ZSTD_ErrorCode; /*! ZSTD_getErrorCode() : convert a `size_t` function result into a `ZSTD_ErrorCode` enum type, which can be used to compare with enum list published above */ ZSTDERRORLIB_API ZSTD_ErrorCode ZSTD_getErrorCode(size_t functionResult); ZSTDERRORLIB_API const char* ZSTD_getErrorString(ZSTD_ErrorCode code); /**< Same as ZSTD_getErrorName, but using a `ZSTD_ErrorCode` enum argument */ #if defined (__cplusplus) } #endif #endif /* ZSTD_ERRORS_H_398273423 */ /**** ended inlining zstd_errors.h ****/ /* **************************************** * Compiler-specific ******************************************/ #if defined(__GNUC__) # define ERR_STATIC static __attribute__((unused)) #elif defined (__cplusplus) || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */) # define ERR_STATIC static inline #elif defined(_MSC_VER) # define ERR_STATIC static __inline #else # define ERR_STATIC static /* this version may generate warnings for unused static functions; disable the relevant warning */ #endif /*-**************************************** * Customization (error_public.h) ******************************************/ typedef ZSTD_ErrorCode ERR_enum; #define PREFIX(name) ZSTD_error_##name /*-**************************************** * Error codes handling ******************************************/ #undef ERROR /* already defined on Visual Studio */ #define ERROR(name) ZSTD_ERROR(name) #define ZSTD_ERROR(name) ((size_t)-PREFIX(name)) ERR_STATIC unsigned ERR_isError(size_t code) { return (code > ERROR(maxCode)); } ERR_STATIC ERR_enum ERR_getErrorCode(size_t code) { if (!ERR_isError(code)) return (ERR_enum)0; return (ERR_enum) (0-code); } /* check and forward error code */ #define CHECK_V_F(e, f) size_t const e = f; if (ERR_isError(e)) return e #define CHECK_F(f) { CHECK_V_F(_var_err__, f); } /*-**************************************** * Error Strings ******************************************/ const char* ERR_getErrorString(ERR_enum code); /* error_private.c */ ERR_STATIC const char* ERR_getErrorName(size_t code) { return ERR_getErrorString(ERR_getErrorCode(code)); } #if defined (__cplusplus) } #endif #endif /* ERROR_H_MODULE */ /**** ended inlining error_private.h ****/ #define FSE_STATIC_LINKING_ONLY /* FSE_MIN_TABLELOG */ /**** start inlining fse.h ****/ /* ****************************************************************** * FSE : Finite State Entropy codec * Public Prototypes declaration * Copyright (c) 2013-2021, Yann Collet, Facebook, Inc. * * You can contact the author at : * - Source repository : https://github.com/Cyan4973/FiniteStateEntropy * * This source code is licensed under both the BSD-style license (found in the * LICENSE file in the root directory of this source tree) and the GPLv2 (found * in the COPYING file in the root directory of this source tree). * You may select, at your option, one of the above-listed licenses. ****************************************************************** */ #if defined (__cplusplus) extern "C" { #endif #ifndef FSE_H #define FSE_H /*-***************************************** * Dependencies ******************************************/ /**** skipping file: zstd_deps.h ****/ /*-***************************************** * FSE_PUBLIC_API : control library symbols visibility ******************************************/ #if defined(FSE_DLL_EXPORT) && (FSE_DLL_EXPORT==1) && defined(__GNUC__) && (__GNUC__ >= 4) # define FSE_PUBLIC_API __attribute__ ((visibility ("default"))) #elif defined(FSE_DLL_EXPORT) && (FSE_DLL_EXPORT==1) /* Visual expected */ # define FSE_PUBLIC_API __declspec(dllexport) #elif defined(FSE_DLL_IMPORT) && (FSE_DLL_IMPORT==1) # define FSE_PUBLIC_API __declspec(dllimport) /* It isn't required but allows to generate better code, saving a function pointer load from the IAT and an indirect jump.*/ #else # define FSE_PUBLIC_API #endif /*------ Version ------*/ #define FSE_VERSION_MAJOR 0 #define FSE_VERSION_MINOR 9 #define FSE_VERSION_RELEASE 0 #define FSE_LIB_VERSION FSE_VERSION_MAJOR.FSE_VERSION_MINOR.FSE_VERSION_RELEASE #define FSE_QUOTE(str) #str #define FSE_EXPAND_AND_QUOTE(str) FSE_QUOTE(str) #define FSE_VERSION_STRING FSE_EXPAND_AND_QUOTE(FSE_LIB_VERSION) #define FSE_VERSION_NUMBER (FSE_VERSION_MAJOR *100*100 + FSE_VERSION_MINOR *100 + FSE_VERSION_RELEASE) FSE_PUBLIC_API unsigned FSE_versionNumber(void); /**< library version number; to be used when checking dll version */ /*-**************************************** * FSE simple functions ******************************************/ /*! FSE_compress() : Compress content of buffer 'src', of size 'srcSize', into destination buffer 'dst'. 'dst' buffer must be already allocated. Compression runs faster is dstCapacity >= FSE_compressBound(srcSize). @return : size of compressed data (<= dstCapacity). Special values : if return == 0, srcData is not compressible => Nothing is stored within dst !!! if return == 1, srcData is a single byte symbol * srcSize times. Use RLE compression instead. if FSE_isError(return), compression failed (more details using FSE_getErrorName()) */ FSE_PUBLIC_API size_t FSE_compress(void* dst, size_t dstCapacity, const void* src, size_t srcSize); /*! FSE_decompress(): Decompress FSE data from buffer 'cSrc', of size 'cSrcSize', into already allocated destination buffer 'dst', of size 'dstCapacity'. @return : size of regenerated data (<= maxDstSize), or an error code, which can be tested using FSE_isError() . ** Important ** : FSE_decompress() does not decompress non-compressible nor RLE data !!! Why ? : making this distinction requires a header. Header management is intentionally delegated to the user layer, which can better manage special cases. */ FSE_PUBLIC_API size_t FSE_decompress(void* dst, size_t dstCapacity, const void* cSrc, size_t cSrcSize); /*-***************************************** * Tool functions ******************************************/ FSE_PUBLIC_API size_t FSE_compressBound(size_t size); /* maximum compressed size */ /* Error Management */ FSE_PUBLIC_API unsigned FSE_isError(size_t code); /* tells if a return value is an error code */ FSE_PUBLIC_API const char* FSE_getErrorName(size_t code); /* provides error code string (useful for debugging) */ /*-***************************************** * FSE advanced functions ******************************************/ /*! FSE_compress2() : Same as FSE_compress(), but allows the selection of 'maxSymbolValue' and 'tableLog' Both parameters can be defined as '0' to mean : use default value @return : size of compressed data Special values : if return == 0, srcData is not compressible => Nothing is stored within cSrc !!! if return == 1, srcData is a single byte symbol * srcSize times. Use RLE compression. if FSE_isError(return), it's an error code. */ FSE_PUBLIC_API size_t FSE_compress2 (void* dst, size_t dstSize, const void* src, size_t srcSize, unsigned maxSymbolValue, unsigned tableLog); /*-***************************************** * FSE detailed API ******************************************/ /*! FSE_compress() does the following: 1. count symbol occurrence from source[] into table count[] (see hist.h) 2. normalize counters so that sum(count[]) == Power_of_2 (2^tableLog) 3. save normalized counters to memory buffer using writeNCount() 4. build encoding table 'CTable' from normalized counters 5. encode the data stream using encoding table 'CTable' FSE_decompress() does the following: 1. read normalized counters with readNCount() 2. build decoding table 'DTable' from normalized counters 3. decode the data stream using decoding table 'DTable' The following API allows targeting specific sub-functions for advanced tasks. For example, it's possible to compress several blocks using the same 'CTable', or to save and provide normalized distribution using external method. */ /* *** COMPRESSION *** */ /*! FSE_optimalTableLog(): dynamically downsize 'tableLog' when conditions are met. It saves CPU time, by using smaller tables, while preserving or even improving compression ratio. @return : recommended tableLog (necessarily <= 'maxTableLog') */ FSE_PUBLIC_API unsigned FSE_optimalTableLog(unsigned maxTableLog, size_t srcSize, unsigned maxSymbolValue); /*! FSE_normalizeCount(): normalize counts so that sum(count[]) == Power_of_2 (2^tableLog) 'normalizedCounter' is a table of short, of minimum size (maxSymbolValue+1). useLowProbCount is a boolean parameter which trades off compressed size for faster header decoding. When it is set to 1, the compressed data will be slightly smaller. And when it is set to 0, FSE_readNCount() and FSE_buildDTable() will be faster. If you are compressing a small amount of data (< 2 KB) then useLowProbCount=0 is a good default, since header deserialization makes a big speed difference. Otherwise, useLowProbCount=1 is a good default, since the speed difference is small. @return : tableLog, or an errorCode, which can be tested using FSE_isError() */ FSE_PUBLIC_API size_t FSE_normalizeCount(short* normalizedCounter, unsigned tableLog, const unsigned* count, size_t srcSize, unsigned maxSymbolValue, unsigned useLowProbCount); /*! FSE_NCountWriteBound(): Provides the maximum possible size of an FSE normalized table, given 'maxSymbolValue' and 'tableLog'. Typically useful for allocation purpose. */ FSE_PUBLIC_API size_t FSE_NCountWriteBound(unsigned maxSymbolValue, unsigned tableLog); /*! FSE_writeNCount(): Compactly save 'normalizedCounter' into 'buffer'. @return : size of the compressed table, or an errorCode, which can be tested using FSE_isError(). */ FSE_PUBLIC_API size_t FSE_writeNCount (void* buffer, size_t bufferSize, const short* normalizedCounter, unsigned maxSymbolValue, unsigned tableLog); /*! Constructor and Destructor of FSE_CTable. Note that FSE_CTable size depends on 'tableLog' and 'maxSymbolValue' */ typedef unsigned FSE_CTable; /* don't allocate that. It's only meant to be more restrictive than void* */ FSE_PUBLIC_API FSE_CTable* FSE_createCTable (unsigned maxSymbolValue, unsigned tableLog); FSE_PUBLIC_API void FSE_freeCTable (FSE_CTable* ct); /*! FSE_buildCTable(): Builds `ct`, which must be already allocated, using FSE_createCTable(). @return : 0, or an errorCode, which can be tested using FSE_isError() */ FSE_PUBLIC_API size_t FSE_buildCTable(FSE_CTable* ct, const short* normalizedCounter, unsigned maxSymbolValue, unsigned tableLog); /*! FSE_compress_usingCTable(): Compress `src` using `ct` into `dst` which must be already allocated. @return : size of compressed data (<= `dstCapacity`), or 0 if compressed data could not fit into `dst`, or an errorCode, which can be tested using FSE_isError() */ FSE_PUBLIC_API size_t FSE_compress_usingCTable (void* dst, size_t dstCapacity, const void* src, size_t srcSize, const FSE_CTable* ct); /*! Tutorial : ---------- The first step is to count all symbols. FSE_count() does this job very fast. Result will be saved into 'count', a table of unsigned int, which must be already allocated, and have 'maxSymbolValuePtr[0]+1' cells. 'src' is a table of bytes of size 'srcSize'. All values within 'src' MUST be <= maxSymbolValuePtr[0] maxSymbolValuePtr[0] will be updated, with its real value (necessarily <= original value) FSE_count() will return the number of occurrence of the most frequent symbol. This can be used to know if there is a single symbol within 'src', and to quickly evaluate its compressibility. If there is an error, the function will return an ErrorCode (which can be tested using FSE_isError()). The next step is to normalize the frequencies. FSE_normalizeCount() will ensure that sum of frequencies is == 2 ^'tableLog'. It also guarantees a minimum of 1 to any Symbol with frequency >= 1. You can use 'tableLog'==0 to mean "use default tableLog value". If you are unsure of which tableLog value to use, you can ask FSE_optimalTableLog(), which will provide the optimal valid tableLog given sourceSize, maxSymbolValue, and a user-defined maximum (0 means "default"). The result of FSE_normalizeCount() will be saved into a table, called 'normalizedCounter', which is a table of signed short. 'normalizedCounter' must be already allocated, and have at least 'maxSymbolValue+1' cells. The return value is tableLog if everything proceeded as expected. It is 0 if there is a single symbol within distribution. If there is an error (ex: invalid tableLog value), the function will return an ErrorCode (which can be tested using FSE_isError()). 'normalizedCounter' can be saved in a compact manner to a memory area using FSE_writeNCount(). 'buffer' must be already allocated. For guaranteed success, buffer size must be at least FSE_headerBound(). The result of the function is the number of bytes written into 'buffer'. If there is an error, the function will return an ErrorCode (which can be tested using FSE_isError(); ex : buffer size too small). 'normalizedCounter' can then be used to create the compression table 'CTable'. The space required by 'CTable' must be already allocated, using FSE_createCTable(). You can then use FSE_buildCTable() to fill 'CTable'. If there is an error, both functions will return an ErrorCode (which can be tested using FSE_isError()). 'CTable' can then be used to compress 'src', with FSE_compress_usingCTable(). Similar to FSE_count(), the convention is that 'src' is assumed to be a table of char of size 'srcSize' The function returns the size of compressed data (without header), necessarily <= `dstCapacity`. If it returns '0', compressed data could not fit into 'dst'. If there is an error, the function will return an ErrorCode (which can be tested using FSE_isError()). */ /* *** DECOMPRESSION *** */ /*! FSE_readNCount(): Read compactly saved 'normalizedCounter' from 'rBuffer'. @return : size read from 'rBuffer', or an errorCode, which can be tested using FSE_isError(). maxSymbolValuePtr[0] and tableLogPtr[0] will also be updated with their respective values */ FSE_PUBLIC_API size_t FSE_readNCount (short* normalizedCounter, unsigned* maxSymbolValuePtr, unsigned* tableLogPtr, const void* rBuffer, size_t rBuffSize); /*! FSE_readNCount_bmi2(): * Same as FSE_readNCount() but pass bmi2=1 when your CPU supports BMI2 and 0 otherwise. */ FSE_PUBLIC_API size_t FSE_readNCount_bmi2(short* normalizedCounter, unsigned* maxSymbolValuePtr, unsigned* tableLogPtr, const void* rBuffer, size_t rBuffSize, int bmi2); /*! Constructor and Destructor of FSE_DTable. Note that its size depends on 'tableLog' */ typedef unsigned FSE_DTable; /* don't allocate that. It's just a way to be more restrictive than void* */ FSE_PUBLIC_API FSE_DTable* FSE_createDTable(unsigned tableLog); FSE_PUBLIC_API void FSE_freeDTable(FSE_DTable* dt); /*! FSE_buildDTable(): Builds 'dt', which must be already allocated, using FSE_createDTable(). return : 0, or an errorCode, which can be tested using FSE_isError() */ FSE_PUBLIC_API size_t FSE_buildDTable (FSE_DTable* dt, const short* normalizedCounter, unsigned maxSymbolValue, unsigned tableLog); /*! FSE_decompress_usingDTable(): Decompress compressed source `cSrc` of size `cSrcSize` using `dt` into `dst` which must be already allocated. @return : size of regenerated data (necessarily <= `dstCapacity`), or an errorCode, which can be tested using FSE_isError() */ FSE_PUBLIC_API size_t FSE_decompress_usingDTable(void* dst, size_t dstCapacity, const void* cSrc, size_t cSrcSize, const FSE_DTable* dt); /*! Tutorial : ---------- (Note : these functions only decompress FSE-compressed blocks. If block is uncompressed, use memcpy() instead If block is a single repeated byte, use memset() instead ) The first step is to obtain the normalized frequencies of symbols. This can be performed by FSE_readNCount() if it was saved using FSE_writeNCount(). 'normalizedCounter' must be already allocated, and have at least 'maxSymbolValuePtr[0]+1' cells of signed short. In practice, that means it's necessary to know 'maxSymbolValue' beforehand, or size the table to handle worst case situations (typically 256). FSE_readNCount() will provide 'tableLog' and 'maxSymbolValue'. The result of FSE_readNCount() is the number of bytes read from 'rBuffer'. Note that 'rBufferSize' must be at least 4 bytes, even if useful information is less than that. If there is an error, the function will return an error code, which can be tested using FSE_isError(). The next step is to build the decompression tables 'FSE_DTable' from 'normalizedCounter'. This is performed by the function FSE_buildDTable(). The space required by 'FSE_DTable' must be already allocated using FSE_createDTable(). If there is an error, the function will return an error code, which can be tested using FSE_isError(). `FSE_DTable` can then be used to decompress `cSrc`, with FSE_decompress_usingDTable(). `cSrcSize` must be strictly correct, otherwise decompression will fail. FSE_decompress_usingDTable() result will tell how many bytes were regenerated (<=`dstCapacity`). If there is an error, the function will return an error code, which can be tested using FSE_isError(). (ex: dst buffer too small) */ #endif /* FSE_H */ #if defined(FSE_STATIC_LINKING_ONLY) && !defined(FSE_H_FSE_STATIC_LINKING_ONLY) #define FSE_H_FSE_STATIC_LINKING_ONLY /* *** Dependency *** */ /**** start inlining bitstream.h ****/ /* ****************************************************************** * bitstream * Part of FSE library * Copyright (c) 2013-2021, Yann Collet, Facebook, Inc. * * You can contact the author at : * - Source repository : https://github.com/Cyan4973/FiniteStateEntropy * * This source code is licensed under both the BSD-style license (found in the * LICENSE file in the root directory of this source tree) and the GPLv2 (found * in the COPYING file in the root directory of this source tree). * You may select, at your option, one of the above-listed licenses. ****************************************************************** */ #ifndef BITSTREAM_H_MODULE #define BITSTREAM_H_MODULE #if defined (__cplusplus) extern "C" { #endif /* * This API consists of small unitary functions, which must be inlined for best performance. * Since link-time-optimization is not available for all compilers, * these functions are defined into a .h to be included. */ /*-**************************************** * Dependencies ******************************************/ /**** skipping file: mem.h ****/ /**** skipping file: compiler.h ****/ /**** skipping file: debug.h ****/ /**** skipping file: error_private.h ****/ /*========================================= * Target specific =========================================*/ #ifndef ZSTD_NO_INTRINSICS # if defined(__BMI__) && defined(__GNUC__) # include /* support for bextr (experimental) */ # elif defined(__ICCARM__) # include # endif #endif #define STREAM_ACCUMULATOR_MIN_32 25 #define STREAM_ACCUMULATOR_MIN_64 57 #define STREAM_ACCUMULATOR_MIN ((U32)(MEM_32bits() ? STREAM_ACCUMULATOR_MIN_32 : STREAM_ACCUMULATOR_MIN_64)) /*-****************************************** * bitStream encoding API (write forward) ********************************************/ /* bitStream can mix input from multiple sources. * A critical property of these streams is that they encode and decode in **reverse** direction. * So the first bit sequence you add will be the last to be read, like a LIFO stack. */ typedef struct { size_t bitContainer; unsigned bitPos; char* startPtr; char* ptr; char* endPtr; } BIT_CStream_t; MEM_STATIC size_t BIT_initCStream(BIT_CStream_t* bitC, void* dstBuffer, size_t dstCapacity); MEM_STATIC void BIT_addBits(BIT_CStream_t* bitC, size_t value, unsigned nbBits); MEM_STATIC void BIT_flushBits(BIT_CStream_t* bitC); MEM_STATIC size_t BIT_closeCStream(BIT_CStream_t* bitC); /* Start with initCStream, providing the size of buffer to write into. * bitStream will never write outside of this buffer. * `dstCapacity` must be >= sizeof(bitD->bitContainer), otherwise @return will be an error code. * * bits are first added to a local register. * Local register is size_t, hence 64-bits on 64-bits systems, or 32-bits on 32-bits systems. * Writing data into memory is an explicit operation, performed by the flushBits function. * Hence keep track how many bits are potentially stored into local register to avoid register overflow. * After a flushBits, a maximum of 7 bits might still be stored into local register. * * Avoid storing elements of more than 24 bits if you want compatibility with 32-bits bitstream readers. * * Last operation is to close the bitStream. * The function returns the final size of CStream in bytes. * If data couldn't fit into `dstBuffer`, it will return a 0 ( == not storable) */ /*-******************************************** * bitStream decoding API (read backward) **********************************************/ typedef struct { size_t bitContainer; unsigned bitsConsumed; const char* ptr; const char* start; const char* limitPtr; } BIT_DStream_t; typedef enum { BIT_DStream_unfinished = 0, BIT_DStream_endOfBuffer = 1, BIT_DStream_completed = 2, BIT_DStream_overflow = 3 } BIT_DStream_status; /* result of BIT_reloadDStream() */ /* 1,2,4,8 would be better for bitmap combinations, but slows down performance a bit ... :( */ MEM_STATIC size_t BIT_initDStream(BIT_DStream_t* bitD, const void* srcBuffer, size_t srcSize); MEM_STATIC size_t BIT_readBits(BIT_DStream_t* bitD, unsigned nbBits); MEM_STATIC BIT_DStream_status BIT_reloadDStream(BIT_DStream_t* bitD); MEM_STATIC unsigned BIT_endOfDStream(const BIT_DStream_t* bitD); /* Start by invoking BIT_initDStream(). * A chunk of the bitStream is then stored into a local register. * Local register size is 64-bits on 64-bits systems, 32-bits on 32-bits systems (size_t). * You can then retrieve bitFields stored into the local register, **in reverse order**. * Local register is explicitly reloaded from memory by the BIT_reloadDStream() method. * A reload guarantee a minimum of ((8*sizeof(bitD->bitContainer))-7) bits when its result is BIT_DStream_unfinished. * Otherwise, it can be less than that, so proceed accordingly. * Checking if DStream has reached its end can be performed with BIT_endOfDStream(). */ /*-**************************************** * unsafe API ******************************************/ MEM_STATIC void BIT_addBitsFast(BIT_CStream_t* bitC, size_t value, unsigned nbBits); /* faster, but works only if value is "clean", meaning all high bits above nbBits are 0 */ MEM_STATIC void BIT_flushBitsFast(BIT_CStream_t* bitC); /* unsafe version; does not check buffer overflow */ MEM_STATIC size_t BIT_readBitsFast(BIT_DStream_t* bitD, unsigned nbBits); /* faster, but works only if nbBits >= 1 */ /*-************************************************************** * Internal functions ****************************************************************/ MEM_STATIC unsigned BIT_highbit32 (U32 val) { assert(val != 0); { # if defined(_MSC_VER) /* Visual */ # if STATIC_BMI2 == 1 return _lzcnt_u32(val) ^ 31; # else unsigned long r = 0; return _BitScanReverse(&r, val) ? (unsigned)r : 0; # endif # elif defined(__GNUC__) && (__GNUC__ >= 3) /* Use GCC Intrinsic */ return __builtin_clz (val) ^ 31; # elif defined(__ICCARM__) /* IAR Intrinsic */ return 31 - __CLZ(val); # else /* Software version */ static const unsigned DeBruijnClz[32] = { 0, 9, 1, 10, 13, 21, 2, 29, 11, 14, 16, 18, 22, 25, 3, 30, 8, 12, 20, 28, 15, 17, 24, 7, 19, 27, 23, 6, 26, 5, 4, 31 }; U32 v = val; v |= v >> 1; v |= v >> 2; v |= v >> 4; v |= v >> 8; v |= v >> 16; return DeBruijnClz[ (U32) (v * 0x07C4ACDDU) >> 27]; # endif } } /*===== Local Constants =====*/ static const unsigned BIT_mask[] = { 0, 1, 3, 7, 0xF, 0x1F, 0x3F, 0x7F, 0xFF, 0x1FF, 0x3FF, 0x7FF, 0xFFF, 0x1FFF, 0x3FFF, 0x7FFF, 0xFFFF, 0x1FFFF, 0x3FFFF, 0x7FFFF, 0xFFFFF, 0x1FFFFF, 0x3FFFFF, 0x7FFFFF, 0xFFFFFF, 0x1FFFFFF, 0x3FFFFFF, 0x7FFFFFF, 0xFFFFFFF, 0x1FFFFFFF, 0x3FFFFFFF, 0x7FFFFFFF}; /* up to 31 bits */ #define BIT_MASK_SIZE (sizeof(BIT_mask) / sizeof(BIT_mask[0])) /*-************************************************************** * bitStream encoding ****************************************************************/ /*! BIT_initCStream() : * `dstCapacity` must be > sizeof(size_t) * @return : 0 if success, * otherwise an error code (can be tested using ERR_isError()) */ MEM_STATIC size_t BIT_initCStream(BIT_CStream_t* bitC, void* startPtr, size_t dstCapacity) { bitC->bitContainer = 0; bitC->bitPos = 0; bitC->startPtr = (char*)startPtr; bitC->ptr = bitC->startPtr; bitC->endPtr = bitC->startPtr + dstCapacity - sizeof(bitC->bitContainer); if (dstCapacity <= sizeof(bitC->bitContainer)) return ERROR(dstSize_tooSmall); return 0; } /*! BIT_addBits() : * can add up to 31 bits into `bitC`. * Note : does not check for register overflow ! */ MEM_STATIC void BIT_addBits(BIT_CStream_t* bitC, size_t value, unsigned nbBits) { DEBUG_STATIC_ASSERT(BIT_MASK_SIZE == 32); assert(nbBits < BIT_MASK_SIZE); assert(nbBits + bitC->bitPos < sizeof(bitC->bitContainer) * 8); bitC->bitContainer |= (value & BIT_mask[nbBits]) << bitC->bitPos; bitC->bitPos += nbBits; } /*! BIT_addBitsFast() : * works only if `value` is _clean_, * meaning all high bits above nbBits are 0 */ MEM_STATIC void BIT_addBitsFast(BIT_CStream_t* bitC, size_t value, unsigned nbBits) { assert((value>>nbBits) == 0); assert(nbBits + bitC->bitPos < sizeof(bitC->bitContainer) * 8); bitC->bitContainer |= value << bitC->bitPos; bitC->bitPos += nbBits; } /*! BIT_flushBitsFast() : * assumption : bitContainer has not overflowed * unsafe version; does not check buffer overflow */ MEM_STATIC void BIT_flushBitsFast(BIT_CStream_t* bitC) { size_t const nbBytes = bitC->bitPos >> 3; assert(bitC->bitPos < sizeof(bitC->bitContainer) * 8); assert(bitC->ptr <= bitC->endPtr); MEM_writeLEST(bitC->ptr, bitC->bitContainer); bitC->ptr += nbBytes; bitC->bitPos &= 7; bitC->bitContainer >>= nbBytes*8; } /*! BIT_flushBits() : * assumption : bitContainer has not overflowed * safe version; check for buffer overflow, and prevents it. * note : does not signal buffer overflow. * overflow will be revealed later on using BIT_closeCStream() */ MEM_STATIC void BIT_flushBits(BIT_CStream_t* bitC) { size_t const nbBytes = bitC->bitPos >> 3; assert(bitC->bitPos < sizeof(bitC->bitContainer) * 8); assert(bitC->ptr <= bitC->endPtr); MEM_writeLEST(bitC->ptr, bitC->bitContainer); bitC->ptr += nbBytes; if (bitC->ptr > bitC->endPtr) bitC->ptr = bitC->endPtr; bitC->bitPos &= 7; bitC->bitContainer >>= nbBytes*8; } /*! BIT_closeCStream() : * @return : size of CStream, in bytes, * or 0 if it could not fit into dstBuffer */ MEM_STATIC size_t BIT_closeCStream(BIT_CStream_t* bitC) { BIT_addBitsFast(bitC, 1, 1); /* endMark */ BIT_flushBits(bitC); if (bitC->ptr >= bitC->endPtr) return 0; /* overflow detected */ return (bitC->ptr - bitC->startPtr) + (bitC->bitPos > 0); } /*-******************************************************** * bitStream decoding **********************************************************/ /*! BIT_initDStream() : * Initialize a BIT_DStream_t. * `bitD` : a pointer to an already allocated BIT_DStream_t structure. * `srcSize` must be the *exact* size of the bitStream, in bytes. * @return : size of stream (== srcSize), or an errorCode if a problem is detected */ MEM_STATIC size_t BIT_initDStream(BIT_DStream_t* bitD, const void* srcBuffer, size_t srcSize) { if (srcSize < 1) { ZSTD_memset(bitD, 0, sizeof(*bitD)); return ERROR(srcSize_wrong); } bitD->start = (const char*)srcBuffer; bitD->limitPtr = bitD->start + sizeof(bitD->bitContainer); if (srcSize >= sizeof(bitD->bitContainer)) { /* normal case */ bitD->ptr = (const char*)srcBuffer + srcSize - sizeof(bitD->bitContainer); bitD->bitContainer = MEM_readLEST(bitD->ptr); { BYTE const lastByte = ((const BYTE*)srcBuffer)[srcSize-1]; bitD->bitsConsumed = lastByte ? 8 - BIT_highbit32(lastByte) : 0; /* ensures bitsConsumed is always set */ if (lastByte == 0) return ERROR(GENERIC); /* endMark not present */ } } else { bitD->ptr = bitD->start; bitD->bitContainer = *(const BYTE*)(bitD->start); switch(srcSize) { case 7: bitD->bitContainer += (size_t)(((const BYTE*)(srcBuffer))[6]) << (sizeof(bitD->bitContainer)*8 - 16); /* fall-through */ case 6: bitD->bitContainer += (size_t)(((const BYTE*)(srcBuffer))[5]) << (sizeof(bitD->bitContainer)*8 - 24); /* fall-through */ case 5: bitD->bitContainer += (size_t)(((const BYTE*)(srcBuffer))[4]) << (sizeof(bitD->bitContainer)*8 - 32); /* fall-through */ case 4: bitD->bitContainer += (size_t)(((const BYTE*)(srcBuffer))[3]) << 24; /* fall-through */ case 3: bitD->bitContainer += (size_t)(((const BYTE*)(srcBuffer))[2]) << 16; /* fall-through */ case 2: bitD->bitContainer += (size_t)(((const BYTE*)(srcBuffer))[1]) << 8; /* fall-through */ default: break; } { BYTE const lastByte = ((const BYTE*)srcBuffer)[srcSize-1]; bitD->bitsConsumed = lastByte ? 8 - BIT_highbit32(lastByte) : 0; if (lastByte == 0) return ERROR(corruption_detected); /* endMark not present */ } bitD->bitsConsumed += (U32)(sizeof(bitD->bitContainer) - srcSize)*8; } return srcSize; } MEM_STATIC FORCE_INLINE_ATTR size_t BIT_getUpperBits(size_t bitContainer, U32 const start) { return bitContainer >> start; } MEM_STATIC FORCE_INLINE_ATTR size_t BIT_getMiddleBits(size_t bitContainer, U32 const start, U32 const nbBits) { U32 const regMask = sizeof(bitContainer)*8 - 1; /* if start > regMask, bitstream is corrupted, and result is undefined */ assert(nbBits < BIT_MASK_SIZE); return (bitContainer >> (start & regMask)) & BIT_mask[nbBits]; } MEM_STATIC FORCE_INLINE_ATTR size_t BIT_getLowerBits(size_t bitContainer, U32 const nbBits) { #if defined(STATIC_BMI2) && STATIC_BMI2 == 1 return _bzhi_u64(bitContainer, nbBits); #else assert(nbBits < BIT_MASK_SIZE); return bitContainer & BIT_mask[nbBits]; #endif } /*! BIT_lookBits() : * Provides next n bits from local register. * local register is not modified. * On 32-bits, maxNbBits==24. * On 64-bits, maxNbBits==56. * @return : value extracted */ MEM_STATIC FORCE_INLINE_ATTR size_t BIT_lookBits(const BIT_DStream_t* bitD, U32 nbBits) { /* arbitrate between double-shift and shift+mask */ #if 1 /* if bitD->bitsConsumed + nbBits > sizeof(bitD->bitContainer)*8, * bitstream is likely corrupted, and result is undefined */ return BIT_getMiddleBits(bitD->bitContainer, (sizeof(bitD->bitContainer)*8) - bitD->bitsConsumed - nbBits, nbBits); #else /* this code path is slower on my os-x laptop */ U32 const regMask = sizeof(bitD->bitContainer)*8 - 1; return ((bitD->bitContainer << (bitD->bitsConsumed & regMask)) >> 1) >> ((regMask-nbBits) & regMask); #endif } /*! BIT_lookBitsFast() : * unsafe version; only works if nbBits >= 1 */ MEM_STATIC size_t BIT_lookBitsFast(const BIT_DStream_t* bitD, U32 nbBits) { U32 const regMask = sizeof(bitD->bitContainer)*8 - 1; assert(nbBits >= 1); return (bitD->bitContainer << (bitD->bitsConsumed & regMask)) >> (((regMask+1)-nbBits) & regMask); } MEM_STATIC FORCE_INLINE_ATTR void BIT_skipBits(BIT_DStream_t* bitD, U32 nbBits) { bitD->bitsConsumed += nbBits; } /*! BIT_readBits() : * Read (consume) next n bits from local register and update. * Pay attention to not read more than nbBits contained into local register. * @return : extracted value. */ MEM_STATIC FORCE_INLINE_ATTR size_t BIT_readBits(BIT_DStream_t* bitD, unsigned nbBits) { size_t const value = BIT_lookBits(bitD, nbBits); BIT_skipBits(bitD, nbBits); return value; } /*! BIT_readBitsFast() : * unsafe version; only works only if nbBits >= 1 */ MEM_STATIC size_t BIT_readBitsFast(BIT_DStream_t* bitD, unsigned nbBits) { size_t const value = BIT_lookBitsFast(bitD, nbBits); assert(nbBits >= 1); BIT_skipBits(bitD, nbBits); return value; } /*! BIT_reloadDStreamFast() : * Similar to BIT_reloadDStream(), but with two differences: * 1. bitsConsumed <= sizeof(bitD->bitContainer)*8 must hold! * 2. Returns BIT_DStream_overflow when bitD->ptr < bitD->limitPtr, at this * point you must use BIT_reloadDStream() to reload. */ MEM_STATIC BIT_DStream_status BIT_reloadDStreamFast(BIT_DStream_t* bitD) { if (UNLIKELY(bitD->ptr < bitD->limitPtr)) return BIT_DStream_overflow; assert(bitD->bitsConsumed <= sizeof(bitD->bitContainer)*8); bitD->ptr -= bitD->bitsConsumed >> 3; bitD->bitsConsumed &= 7; bitD->bitContainer = MEM_readLEST(bitD->ptr); return BIT_DStream_unfinished; } /*! BIT_reloadDStream() : * Refill `bitD` from buffer previously set in BIT_initDStream() . * This function is safe, it guarantees it will not read beyond src buffer. * @return : status of `BIT_DStream_t` internal register. * when status == BIT_DStream_unfinished, internal register is filled with at least 25 or 57 bits */ MEM_STATIC BIT_DStream_status BIT_reloadDStream(BIT_DStream_t* bitD) { if (bitD->bitsConsumed > (sizeof(bitD->bitContainer)*8)) /* overflow detected, like end of stream */ return BIT_DStream_overflow; if (bitD->ptr >= bitD->limitPtr) { return BIT_reloadDStreamFast(bitD); } if (bitD->ptr == bitD->start) { if (bitD->bitsConsumed < sizeof(bitD->bitContainer)*8) return BIT_DStream_endOfBuffer; return BIT_DStream_completed; } /* start < ptr < limitPtr */ { U32 nbBytes = bitD->bitsConsumed >> 3; BIT_DStream_status result = BIT_DStream_unfinished; if (bitD->ptr - nbBytes < bitD->start) { nbBytes = (U32)(bitD->ptr - bitD->start); /* ptr > start */ result = BIT_DStream_endOfBuffer; } bitD->ptr -= nbBytes; bitD->bitsConsumed -= nbBytes*8; bitD->bitContainer = MEM_readLEST(bitD->ptr); /* reminder : srcSize > sizeof(bitD->bitContainer), otherwise bitD->ptr == bitD->start */ return result; } } /*! BIT_endOfDStream() : * @return : 1 if DStream has _exactly_ reached its end (all bits consumed). */ MEM_STATIC unsigned BIT_endOfDStream(const BIT_DStream_t* DStream) { return ((DStream->ptr == DStream->start) && (DStream->bitsConsumed == sizeof(DStream->bitContainer)*8)); } #if defined (__cplusplus) } #endif #endif /* BITSTREAM_H_MODULE */ /**** ended inlining bitstream.h ****/ /* ***************************************** * Static allocation *******************************************/ /* FSE buffer bounds */ #define FSE_NCOUNTBOUND 512 #define FSE_BLOCKBOUND(size) ((size) + ((size)>>7) + 4 /* fse states */ + sizeof(size_t) /* bitContainer */) #define FSE_COMPRESSBOUND(size) (FSE_NCOUNTBOUND + FSE_BLOCKBOUND(size)) /* Macro version, useful for static allocation */ /* It is possible to statically allocate FSE CTable/DTable as a table of FSE_CTable/FSE_DTable using below macros */ #define FSE_CTABLE_SIZE_U32(maxTableLog, maxSymbolValue) (1 + (1<<((maxTableLog)-1)) + (((maxSymbolValue)+1)*2)) #define FSE_DTABLE_SIZE_U32(maxTableLog) (1 + (1<<(maxTableLog))) /* or use the size to malloc() space directly. Pay attention to alignment restrictions though */ #define FSE_CTABLE_SIZE(maxTableLog, maxSymbolValue) (FSE_CTABLE_SIZE_U32(maxTableLog, maxSymbolValue) * sizeof(FSE_CTable)) #define FSE_DTABLE_SIZE(maxTableLog) (FSE_DTABLE_SIZE_U32(maxTableLog) * sizeof(FSE_DTable)) /* ***************************************** * FSE advanced API ***************************************** */ unsigned FSE_optimalTableLog_internal(unsigned maxTableLog, size_t srcSize, unsigned maxSymbolValue, unsigned minus); /**< same as FSE_optimalTableLog(), which used `minus==2` */ /* FSE_compress_wksp() : * Same as FSE_compress2(), but using an externally allocated scratch buffer (`workSpace`). * FSE_COMPRESS_WKSP_SIZE_U32() provides the minimum size required for `workSpace` as a table of FSE_CTable. */ #define FSE_COMPRESS_WKSP_SIZE_U32(maxTableLog, maxSymbolValue) ( FSE_CTABLE_SIZE_U32(maxTableLog, maxSymbolValue) + ((maxTableLog > 12) ? (1 << (maxTableLog - 2)) : 1024) ) size_t FSE_compress_wksp (void* dst, size_t dstSize, const void* src, size_t srcSize, unsigned maxSymbolValue, unsigned tableLog, void* workSpace, size_t wkspSize); size_t FSE_buildCTable_raw (FSE_CTable* ct, unsigned nbBits); /**< build a fake FSE_CTable, designed for a flat distribution, where each symbol uses nbBits */ size_t FSE_buildCTable_rle (FSE_CTable* ct, unsigned char symbolValue); /**< build a fake FSE_CTable, designed to compress always the same symbolValue */ /* FSE_buildCTable_wksp() : * Same as FSE_buildCTable(), but using an externally allocated scratch buffer (`workSpace`). * `wkspSize` must be >= `FSE_BUILD_CTABLE_WORKSPACE_SIZE_U32(maxSymbolValue, tableLog)` of `unsigned`. */ #define FSE_BUILD_CTABLE_WORKSPACE_SIZE_U32(maxSymbolValue, tableLog) (maxSymbolValue + 2 + (1ull << (tableLog - 2))) #define FSE_BUILD_CTABLE_WORKSPACE_SIZE(maxSymbolValue, tableLog) (sizeof(unsigned) * FSE_BUILD_CTABLE_WORKSPACE_SIZE_U32(maxSymbolValue, tableLog)) size_t FSE_buildCTable_wksp(FSE_CTable* ct, const short* normalizedCounter, unsigned maxSymbolValue, unsigned tableLog, void* workSpace, size_t wkspSize); #define FSE_BUILD_DTABLE_WKSP_SIZE(maxTableLog, maxSymbolValue) (sizeof(short) * (maxSymbolValue + 1) + (1ULL << maxTableLog) + 8) #define FSE_BUILD_DTABLE_WKSP_SIZE_U32(maxTableLog, maxSymbolValue) ((FSE_BUILD_DTABLE_WKSP_SIZE(maxTableLog, maxSymbolValue) + sizeof(unsigned) - 1) / sizeof(unsigned)) FSE_PUBLIC_API size_t FSE_buildDTable_wksp(FSE_DTable* dt, const short* normalizedCounter, unsigned maxSymbolValue, unsigned tableLog, void* workSpace, size_t wkspSize); /**< Same as FSE_buildDTable(), using an externally allocated `workspace` produced with `FSE_BUILD_DTABLE_WKSP_SIZE_U32(maxSymbolValue)` */ size_t FSE_buildDTable_raw (FSE_DTable* dt, unsigned nbBits); /**< build a fake FSE_DTable, designed to read a flat distribution where each symbol uses nbBits */ size_t FSE_buildDTable_rle (FSE_DTable* dt, unsigned char symbolValue); /**< build a fake FSE_DTable, designed to always generate the same symbolValue */ #define FSE_DECOMPRESS_WKSP_SIZE_U32(maxTableLog, maxSymbolValue) (FSE_DTABLE_SIZE_U32(maxTableLog) + FSE_BUILD_DTABLE_WKSP_SIZE_U32(maxTableLog, maxSymbolValue)) #define FSE_DECOMPRESS_WKSP_SIZE(maxTableLog, maxSymbolValue) (FSE_DECOMPRESS_WKSP_SIZE_U32(maxTableLog, maxSymbolValue) * sizeof(unsigned)) size_t FSE_decompress_wksp(void* dst, size_t dstCapacity, const void* cSrc, size_t cSrcSize, unsigned maxLog, void* workSpace, size_t wkspSize); /**< same as FSE_decompress(), using an externally allocated `workSpace` produced with `FSE_DECOMPRESS_WKSP_SIZE_U32(maxLog, maxSymbolValue)` */ size_t FSE_decompress_wksp_bmi2(void* dst, size_t dstCapacity, const void* cSrc, size_t cSrcSize, unsigned maxLog, void* workSpace, size_t wkspSize, int bmi2); /**< Same as FSE_decompress_wksp() but with dynamic BMI2 support. Pass 1 if your CPU supports BMI2 or 0 if it doesn't. */ typedef enum { FSE_repeat_none, /**< Cannot use the previous table */ FSE_repeat_check, /**< Can use the previous table but it must be checked */ FSE_repeat_valid /**< Can use the previous table and it is assumed to be valid */ } FSE_repeat; /* ***************************************** * FSE symbol compression API *******************************************/ /*! This API consists of small unitary functions, which highly benefit from being inlined. Hence their body are included in next section. */ typedef struct { ptrdiff_t value; const void* stateTable; const void* symbolTT; unsigned stateLog; } FSE_CState_t; static void FSE_initCState(FSE_CState_t* CStatePtr, const FSE_CTable* ct); static void FSE_encodeSymbol(BIT_CStream_t* bitC, FSE_CState_t* CStatePtr, unsigned symbol); static void FSE_flushCState(BIT_CStream_t* bitC, const FSE_CState_t* CStatePtr); /**< These functions are inner components of FSE_compress_usingCTable(). They allow the creation of custom streams, mixing multiple tables and bit sources. A key property to keep in mind is that encoding and decoding are done **in reverse direction**. So the first symbol you will encode is the last you will decode, like a LIFO stack. You will need a few variables to track your CStream. They are : FSE_CTable ct; // Provided by FSE_buildCTable() BIT_CStream_t bitStream; // bitStream tracking structure FSE_CState_t state; // State tracking structure (can have several) The first thing to do is to init bitStream and state. size_t errorCode = BIT_initCStream(&bitStream, dstBuffer, maxDstSize); FSE_initCState(&state, ct); Note that BIT_initCStream() can produce an error code, so its result should be tested, using FSE_isError(); You can then encode your input data, byte after byte. FSE_encodeSymbol() outputs a maximum of 'tableLog' bits at a time. Remember decoding will be done in reverse direction. FSE_encodeByte(&bitStream, &state, symbol); At any time, you can also add any bit sequence. Note : maximum allowed nbBits is 25, for compatibility with 32-bits decoders BIT_addBits(&bitStream, bitField, nbBits); The above methods don't commit data to memory, they just store it into local register, for speed. Local register size is 64-bits on 64-bits systems, 32-bits on 32-bits systems (size_t). Writing data to memory is a manual operation, performed by the flushBits function. BIT_flushBits(&bitStream); Your last FSE encoding operation shall be to flush your last state value(s). FSE_flushState(&bitStream, &state); Finally, you must close the bitStream. The function returns the size of CStream in bytes. If data couldn't fit into dstBuffer, it will return a 0 ( == not compressible) If there is an error, it returns an errorCode (which can be tested using FSE_isError()). size_t size = BIT_closeCStream(&bitStream); */ /* ***************************************** * FSE symbol decompression API *******************************************/ typedef struct { size_t state; const void* table; /* precise table may vary, depending on U16 */ } FSE_DState_t; static void FSE_initDState(FSE_DState_t* DStatePtr, BIT_DStream_t* bitD, const FSE_DTable* dt); static unsigned char FSE_decodeSymbol(FSE_DState_t* DStatePtr, BIT_DStream_t* bitD); static unsigned FSE_endOfDState(const FSE_DState_t* DStatePtr); /**< Let's now decompose FSE_decompress_usingDTable() into its unitary components. You will decode FSE-encoded symbols from the bitStream, and also any other bitFields you put in, **in reverse order**. You will need a few variables to track your bitStream. They are : BIT_DStream_t DStream; // Stream context FSE_DState_t DState; // State context. Multiple ones are possible FSE_DTable* DTablePtr; // Decoding table, provided by FSE_buildDTable() The first thing to do is to init the bitStream. errorCode = BIT_initDStream(&DStream, srcBuffer, srcSize); You should then retrieve your initial state(s) (in reverse flushing order if you have several ones) : errorCode = FSE_initDState(&DState, &DStream, DTablePtr); You can then decode your data, symbol after symbol. For information the maximum number of bits read by FSE_decodeSymbol() is 'tableLog'. Keep in mind that symbols are decoded in reverse order, like a LIFO stack (last in, first out). unsigned char symbol = FSE_decodeSymbol(&DState, &DStream); You can retrieve any bitfield you eventually stored into the bitStream (in reverse order) Note : maximum allowed nbBits is 25, for 32-bits compatibility size_t bitField = BIT_readBits(&DStream, nbBits); All above operations only read from local register (which size depends on size_t). Refueling the register from memory is manually performed by the reload method. endSignal = FSE_reloadDStream(&DStream); BIT_reloadDStream() result tells if there is still some more data to read from DStream. BIT_DStream_unfinished : there is still some data left into the DStream. BIT_DStream_endOfBuffer : Dstream reached end of buffer. Its container may no longer be completely filled. BIT_DStream_completed : Dstream reached its exact end, corresponding in general to decompression completed. BIT_DStream_tooFar : Dstream went too far. Decompression result is corrupted. When reaching end of buffer (BIT_DStream_endOfBuffer), progress slowly, notably if you decode multiple symbols per loop, to properly detect the exact end of stream. After each decoded symbol, check if DStream is fully consumed using this simple test : BIT_reloadDStream(&DStream) >= BIT_DStream_completed When it's done, verify decompression is fully completed, by checking both DStream and the relevant states. Checking if DStream has reached its end is performed by : BIT_endOfDStream(&DStream); Check also the states. There might be some symbols left there, if some high probability ones (>50%) are possible. FSE_endOfDState(&DState); */ /* ***************************************** * FSE unsafe API *******************************************/ static unsigned char FSE_decodeSymbolFast(FSE_DState_t* DStatePtr, BIT_DStream_t* bitD); /* faster, but works only if nbBits is always >= 1 (otherwise, result will be corrupted) */ /* ***************************************** * Implementation of inlined functions *******************************************/ typedef struct { int deltaFindState; U32 deltaNbBits; } FSE_symbolCompressionTransform; /* total 8 bytes */ MEM_STATIC void FSE_initCState(FSE_CState_t* statePtr, const FSE_CTable* ct) { const void* ptr = ct; const U16* u16ptr = (const U16*) ptr; const U32 tableLog = MEM_read16(ptr); statePtr->value = (ptrdiff_t)1<stateTable = u16ptr+2; statePtr->symbolTT = ct + 1 + (tableLog ? (1<<(tableLog-1)) : 1); statePtr->stateLog = tableLog; } /*! FSE_initCState2() : * Same as FSE_initCState(), but the first symbol to include (which will be the last to be read) * uses the smallest state value possible, saving the cost of this symbol */ MEM_STATIC void FSE_initCState2(FSE_CState_t* statePtr, const FSE_CTable* ct, U32 symbol) { FSE_initCState(statePtr, ct); { const FSE_symbolCompressionTransform symbolTT = ((const FSE_symbolCompressionTransform*)(statePtr->symbolTT))[symbol]; const U16* stateTable = (const U16*)(statePtr->stateTable); U32 nbBitsOut = (U32)((symbolTT.deltaNbBits + (1<<15)) >> 16); statePtr->value = (nbBitsOut << 16) - symbolTT.deltaNbBits; statePtr->value = stateTable[(statePtr->value >> nbBitsOut) + symbolTT.deltaFindState]; } } MEM_STATIC void FSE_encodeSymbol(BIT_CStream_t* bitC, FSE_CState_t* statePtr, unsigned symbol) { FSE_symbolCompressionTransform const symbolTT = ((const FSE_symbolCompressionTransform*)(statePtr->symbolTT))[symbol]; const U16* const stateTable = (const U16*)(statePtr->stateTable); U32 const nbBitsOut = (U32)((statePtr->value + symbolTT.deltaNbBits) >> 16); BIT_addBits(bitC, statePtr->value, nbBitsOut); statePtr->value = stateTable[ (statePtr->value >> nbBitsOut) + symbolTT.deltaFindState]; } MEM_STATIC void FSE_flushCState(BIT_CStream_t* bitC, const FSE_CState_t* statePtr) { BIT_addBits(bitC, statePtr->value, statePtr->stateLog); BIT_flushBits(bitC); } /* FSE_getMaxNbBits() : * Approximate maximum cost of a symbol, in bits. * Fractional get rounded up (i.e : a symbol with a normalized frequency of 3 gives the same result as a frequency of 2) * note 1 : assume symbolValue is valid (<= maxSymbolValue) * note 2 : if freq[symbolValue]==0, @return a fake cost of tableLog+1 bits */ MEM_STATIC U32 FSE_getMaxNbBits(const void* symbolTTPtr, U32 symbolValue) { const FSE_symbolCompressionTransform* symbolTT = (const FSE_symbolCompressionTransform*) symbolTTPtr; return (symbolTT[symbolValue].deltaNbBits + ((1<<16)-1)) >> 16; } /* FSE_bitCost() : * Approximate symbol cost, as fractional value, using fixed-point format (accuracyLog fractional bits) * note 1 : assume symbolValue is valid (<= maxSymbolValue) * note 2 : if freq[symbolValue]==0, @return a fake cost of tableLog+1 bits */ MEM_STATIC U32 FSE_bitCost(const void* symbolTTPtr, U32 tableLog, U32 symbolValue, U32 accuracyLog) { const FSE_symbolCompressionTransform* symbolTT = (const FSE_symbolCompressionTransform*) symbolTTPtr; U32 const minNbBits = symbolTT[symbolValue].deltaNbBits >> 16; U32 const threshold = (minNbBits+1) << 16; assert(tableLog < 16); assert(accuracyLog < 31-tableLog); /* ensure enough room for renormalization double shift */ { U32 const tableSize = 1 << tableLog; U32 const deltaFromThreshold = threshold - (symbolTT[symbolValue].deltaNbBits + tableSize); U32 const normalizedDeltaFromThreshold = (deltaFromThreshold << accuracyLog) >> tableLog; /* linear interpolation (very approximate) */ U32 const bitMultiplier = 1 << accuracyLog; assert(symbolTT[symbolValue].deltaNbBits + tableSize <= threshold); assert(normalizedDeltaFromThreshold <= bitMultiplier); return (minNbBits+1)*bitMultiplier - normalizedDeltaFromThreshold; } } /* ====== Decompression ====== */ typedef struct { U16 tableLog; U16 fastMode; } FSE_DTableHeader; /* sizeof U32 */ typedef struct { unsigned short newState; unsigned char symbol; unsigned char nbBits; } FSE_decode_t; /* size == U32 */ MEM_STATIC void FSE_initDState(FSE_DState_t* DStatePtr, BIT_DStream_t* bitD, const FSE_DTable* dt) { const void* ptr = dt; const FSE_DTableHeader* const DTableH = (const FSE_DTableHeader*)ptr; DStatePtr->state = BIT_readBits(bitD, DTableH->tableLog); BIT_reloadDStream(bitD); DStatePtr->table = dt + 1; } MEM_STATIC BYTE FSE_peekSymbol(const FSE_DState_t* DStatePtr) { FSE_decode_t const DInfo = ((const FSE_decode_t*)(DStatePtr->table))[DStatePtr->state]; return DInfo.symbol; } MEM_STATIC void FSE_updateState(FSE_DState_t* DStatePtr, BIT_DStream_t* bitD) { FSE_decode_t const DInfo = ((const FSE_decode_t*)(DStatePtr->table))[DStatePtr->state]; U32 const nbBits = DInfo.nbBits; size_t const lowBits = BIT_readBits(bitD, nbBits); DStatePtr->state = DInfo.newState + lowBits; } MEM_STATIC BYTE FSE_decodeSymbol(FSE_DState_t* DStatePtr, BIT_DStream_t* bitD) { FSE_decode_t const DInfo = ((const FSE_decode_t*)(DStatePtr->table))[DStatePtr->state]; U32 const nbBits = DInfo.nbBits; BYTE const symbol = DInfo.symbol; size_t const lowBits = BIT_readBits(bitD, nbBits); DStatePtr->state = DInfo.newState + lowBits; return symbol; } /*! FSE_decodeSymbolFast() : unsafe, only works if no symbol has a probability > 50% */ MEM_STATIC BYTE FSE_decodeSymbolFast(FSE_DState_t* DStatePtr, BIT_DStream_t* bitD) { FSE_decode_t const DInfo = ((const FSE_decode_t*)(DStatePtr->table))[DStatePtr->state]; U32 const nbBits = DInfo.nbBits; BYTE const symbol = DInfo.symbol; size_t const lowBits = BIT_readBitsFast(bitD, nbBits); DStatePtr->state = DInfo.newState + lowBits; return symbol; } MEM_STATIC unsigned FSE_endOfDState(const FSE_DState_t* DStatePtr) { return DStatePtr->state == 0; } #ifndef FSE_COMMONDEFS_ONLY /* ************************************************************** * Tuning parameters ****************************************************************/ /*!MEMORY_USAGE : * Memory usage formula : N->2^N Bytes (examples : 10 -> 1KB; 12 -> 4KB ; 16 -> 64KB; 20 -> 1MB; etc.) * Increasing memory usage improves compression ratio * Reduced memory usage can improve speed, due to cache effect * Recommended max value is 14, for 16KB, which nicely fits into Intel x86 L1 cache */ #ifndef FSE_MAX_MEMORY_USAGE # define FSE_MAX_MEMORY_USAGE 14 #endif #ifndef FSE_DEFAULT_MEMORY_USAGE # define FSE_DEFAULT_MEMORY_USAGE 13 #endif #if (FSE_DEFAULT_MEMORY_USAGE > FSE_MAX_MEMORY_USAGE) # error "FSE_DEFAULT_MEMORY_USAGE must be <= FSE_MAX_MEMORY_USAGE" #endif /*!FSE_MAX_SYMBOL_VALUE : * Maximum symbol value authorized. * Required for proper stack allocation */ #ifndef FSE_MAX_SYMBOL_VALUE # define FSE_MAX_SYMBOL_VALUE 255 #endif /* ************************************************************** * template functions type & suffix ****************************************************************/ #define FSE_FUNCTION_TYPE BYTE #define FSE_FUNCTION_EXTENSION #define FSE_DECODE_TYPE FSE_decode_t #endif /* !FSE_COMMONDEFS_ONLY */ /* *************************************************************** * Constants *****************************************************************/ #define FSE_MAX_TABLELOG (FSE_MAX_MEMORY_USAGE-2) #define FSE_MAX_TABLESIZE (1U< FSE_TABLELOG_ABSOLUTE_MAX # error "FSE_MAX_TABLELOG > FSE_TABLELOG_ABSOLUTE_MAX is not supported" #endif #define FSE_TABLESTEP(tableSize) (((tableSize)>>1) + ((tableSize)>>3) + 3) #endif /* FSE_STATIC_LINKING_ONLY */ #if defined (__cplusplus) } #endif /**** ended inlining fse.h ****/ #define HUF_STATIC_LINKING_ONLY /* HUF_TABLELOG_ABSOLUTEMAX */ /**** start inlining huf.h ****/ /* ****************************************************************** * huff0 huffman codec, * part of Finite State Entropy library * Copyright (c) 2013-2021, Yann Collet, Facebook, Inc. * * You can contact the author at : * - Source repository : https://github.com/Cyan4973/FiniteStateEntropy * * This source code is licensed under both the BSD-style license (found in the * LICENSE file in the root directory of this source tree) and the GPLv2 (found * in the COPYING file in the root directory of this source tree). * You may select, at your option, one of the above-listed licenses. ****************************************************************** */ #if defined (__cplusplus) extern "C" { #endif #ifndef HUF_H_298734234 #define HUF_H_298734234 /* *** Dependencies *** */ /**** skipping file: zstd_deps.h ****/ /* *** library symbols visibility *** */ /* Note : when linking with -fvisibility=hidden on gcc, or by default on Visual, * HUF symbols remain "private" (internal symbols for library only). * Set macro FSE_DLL_EXPORT to 1 if you want HUF symbols visible on DLL interface */ #if defined(FSE_DLL_EXPORT) && (FSE_DLL_EXPORT==1) && defined(__GNUC__) && (__GNUC__ >= 4) # define HUF_PUBLIC_API __attribute__ ((visibility ("default"))) #elif defined(FSE_DLL_EXPORT) && (FSE_DLL_EXPORT==1) /* Visual expected */ # define HUF_PUBLIC_API __declspec(dllexport) #elif defined(FSE_DLL_IMPORT) && (FSE_DLL_IMPORT==1) # define HUF_PUBLIC_API __declspec(dllimport) /* not required, just to generate faster code (saves a function pointer load from IAT and an indirect jump) */ #else # define HUF_PUBLIC_API #endif /* ========================== */ /* *** simple functions *** */ /* ========================== */ /** HUF_compress() : * Compress content from buffer 'src', of size 'srcSize', into buffer 'dst'. * 'dst' buffer must be already allocated. * Compression runs faster if `dstCapacity` >= HUF_compressBound(srcSize). * `srcSize` must be <= `HUF_BLOCKSIZE_MAX` == 128 KB. * @return : size of compressed data (<= `dstCapacity`). * Special values : if return == 0, srcData is not compressible => Nothing is stored within dst !!! * if HUF_isError(return), compression failed (more details using HUF_getErrorName()) */ HUF_PUBLIC_API size_t HUF_compress(void* dst, size_t dstCapacity, const void* src, size_t srcSize); /** HUF_decompress() : * Decompress HUF data from buffer 'cSrc', of size 'cSrcSize', * into already allocated buffer 'dst', of minimum size 'dstSize'. * `originalSize` : **must** be the ***exact*** size of original (uncompressed) data. * Note : in contrast with FSE, HUF_decompress can regenerate * RLE (cSrcSize==1) and uncompressed (cSrcSize==dstSize) data, * because it knows size to regenerate (originalSize). * @return : size of regenerated data (== originalSize), * or an error code, which can be tested using HUF_isError() */ HUF_PUBLIC_API size_t HUF_decompress(void* dst, size_t originalSize, const void* cSrc, size_t cSrcSize); /* *** Tool functions *** */ #define HUF_BLOCKSIZE_MAX (128 * 1024) /**< maximum input size for a single block compressed with HUF_compress */ HUF_PUBLIC_API size_t HUF_compressBound(size_t size); /**< maximum compressed size (worst case) */ /* Error Management */ HUF_PUBLIC_API unsigned HUF_isError(size_t code); /**< tells if a return value is an error code */ HUF_PUBLIC_API const char* HUF_getErrorName(size_t code); /**< provides error code string (useful for debugging) */ /* *** Advanced function *** */ /** HUF_compress2() : * Same as HUF_compress(), but offers control over `maxSymbolValue` and `tableLog`. * `maxSymbolValue` must be <= HUF_SYMBOLVALUE_MAX . * `tableLog` must be `<= HUF_TABLELOG_MAX` . */ HUF_PUBLIC_API size_t HUF_compress2 (void* dst, size_t dstCapacity, const void* src, size_t srcSize, unsigned maxSymbolValue, unsigned tableLog); /** HUF_compress4X_wksp() : * Same as HUF_compress2(), but uses externally allocated `workSpace`. * `workspace` must have minimum alignment of 4, and be at least as large as HUF_WORKSPACE_SIZE */ #define HUF_WORKSPACE_SIZE ((6 << 10) + 256) #define HUF_WORKSPACE_SIZE_U32 (HUF_WORKSPACE_SIZE / sizeof(U32)) HUF_PUBLIC_API size_t HUF_compress4X_wksp (void* dst, size_t dstCapacity, const void* src, size_t srcSize, unsigned maxSymbolValue, unsigned tableLog, void* workSpace, size_t wkspSize); #endif /* HUF_H_298734234 */ /* ****************************************************************** * WARNING !! * The following section contains advanced and experimental definitions * which shall never be used in the context of a dynamic library, * because they are not guaranteed to remain stable in the future. * Only consider them in association with static linking. * *****************************************************************/ #if defined(HUF_STATIC_LINKING_ONLY) && !defined(HUF_H_HUF_STATIC_LINKING_ONLY) #define HUF_H_HUF_STATIC_LINKING_ONLY /* *** Dependencies *** */ /**** skipping file: mem.h ****/ #define FSE_STATIC_LINKING_ONLY /**** skipping file: fse.h ****/ /* *** Constants *** */ #define HUF_TABLELOG_MAX 12 /* max runtime value of tableLog (due to static allocation); can be modified up to HUF_ABSOLUTEMAX_TABLELOG */ #define HUF_TABLELOG_DEFAULT 11 /* default tableLog value when none specified */ #define HUF_SYMBOLVALUE_MAX 255 #define HUF_TABLELOG_ABSOLUTEMAX 15 /* absolute limit of HUF_MAX_TABLELOG. Beyond that value, code does not work */ #if (HUF_TABLELOG_MAX > HUF_TABLELOG_ABSOLUTEMAX) # error "HUF_TABLELOG_MAX is too large !" #endif /* **************************************** * Static allocation ******************************************/ /* HUF buffer bounds */ #define HUF_CTABLEBOUND 129 #define HUF_BLOCKBOUND(size) (size + (size>>8) + 8) /* only true when incompressible is pre-filtered with fast heuristic */ #define HUF_COMPRESSBOUND(size) (HUF_CTABLEBOUND + HUF_BLOCKBOUND(size)) /* Macro version, useful for static allocation */ /* static allocation of HUF's Compression Table */ /* this is a private definition, just exposed for allocation and strict aliasing purpose. never EVER access its members directly */ struct HUF_CElt_s { U16 val; BYTE nbBits; }; /* typedef'd to HUF_CElt */ typedef struct HUF_CElt_s HUF_CElt; /* consider it an incomplete type */ #define HUF_CTABLE_SIZE_U32(maxSymbolValue) ((maxSymbolValue)+1) /* Use tables of U32, for proper alignment */ #define HUF_CTABLE_SIZE(maxSymbolValue) (HUF_CTABLE_SIZE_U32(maxSymbolValue) * sizeof(U32)) #define HUF_CREATE_STATIC_CTABLE(name, maxSymbolValue) \ HUF_CElt name[HUF_CTABLE_SIZE_U32(maxSymbolValue)] /* no final ; */ /* static allocation of HUF's DTable */ typedef U32 HUF_DTable; #define HUF_DTABLE_SIZE(maxTableLog) (1 + (1<<(maxTableLog))) #define HUF_CREATE_STATIC_DTABLEX1(DTable, maxTableLog) \ HUF_DTable DTable[HUF_DTABLE_SIZE((maxTableLog)-1)] = { ((U32)((maxTableLog)-1) * 0x01000001) } #define HUF_CREATE_STATIC_DTABLEX2(DTable, maxTableLog) \ HUF_DTable DTable[HUF_DTABLE_SIZE(maxTableLog)] = { ((U32)(maxTableLog) * 0x01000001) } /* **************************************** * Advanced decompression functions ******************************************/ size_t HUF_decompress4X1 (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize); /**< single-symbol decoder */ #ifndef HUF_FORCE_DECOMPRESS_X1 size_t HUF_decompress4X2 (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize); /**< double-symbols decoder */ #endif size_t HUF_decompress4X_DCtx (HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize); /**< decodes RLE and uncompressed */ size_t HUF_decompress4X_hufOnly(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize); /**< considers RLE and uncompressed as errors */ size_t HUF_decompress4X_hufOnly_wksp(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, void* workSpace, size_t wkspSize); /**< considers RLE and uncompressed as errors */ size_t HUF_decompress4X1_DCtx(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize); /**< single-symbol decoder */ size_t HUF_decompress4X1_DCtx_wksp(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, void* workSpace, size_t wkspSize); /**< single-symbol decoder */ #ifndef HUF_FORCE_DECOMPRESS_X1 size_t HUF_decompress4X2_DCtx(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize); /**< double-symbols decoder */ size_t HUF_decompress4X2_DCtx_wksp(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, void* workSpace, size_t wkspSize); /**< double-symbols decoder */ #endif /* **************************************** * HUF detailed API * ****************************************/ /*! HUF_compress() does the following: * 1. count symbol occurrence from source[] into table count[] using FSE_count() (exposed within "fse.h") * 2. (optional) refine tableLog using HUF_optimalTableLog() * 3. build Huffman table from count using HUF_buildCTable() * 4. save Huffman table to memory buffer using HUF_writeCTable() * 5. encode the data stream using HUF_compress4X_usingCTable() * * The following API allows targeting specific sub-functions for advanced tasks. * For example, it's possible to compress several blocks using the same 'CTable', * or to save and regenerate 'CTable' using external methods. */ unsigned HUF_optimalTableLog(unsigned maxTableLog, size_t srcSize, unsigned maxSymbolValue); size_t HUF_buildCTable (HUF_CElt* CTable, const unsigned* count, unsigned maxSymbolValue, unsigned maxNbBits); /* @return : maxNbBits; CTable and count can overlap. In which case, CTable will overwrite count content */ size_t HUF_writeCTable (void* dst, size_t maxDstSize, const HUF_CElt* CTable, unsigned maxSymbolValue, unsigned huffLog); size_t HUF_compress4X_usingCTable(void* dst, size_t dstSize, const void* src, size_t srcSize, const HUF_CElt* CTable); size_t HUF_estimateCompressedSize(const HUF_CElt* CTable, const unsigned* count, unsigned maxSymbolValue); int HUF_validateCTable(const HUF_CElt* CTable, const unsigned* count, unsigned maxSymbolValue); typedef enum { HUF_repeat_none, /**< Cannot use the previous table */ HUF_repeat_check, /**< Can use the previous table but it must be checked. Note : The previous table must have been constructed by HUF_compress{1, 4}X_repeat */ HUF_repeat_valid /**< Can use the previous table and it is assumed to be valid */ } HUF_repeat; /** HUF_compress4X_repeat() : * Same as HUF_compress4X_wksp(), but considers using hufTable if *repeat != HUF_repeat_none. * If it uses hufTable it does not modify hufTable or repeat. * If it doesn't, it sets *repeat = HUF_repeat_none, and it sets hufTable to the table used. * If preferRepeat then the old table will always be used if valid. */ size_t HUF_compress4X_repeat(void* dst, size_t dstSize, const void* src, size_t srcSize, unsigned maxSymbolValue, unsigned tableLog, void* workSpace, size_t wkspSize, /**< `workSpace` must be aligned on 4-bytes boundaries, `wkspSize` must be >= HUF_WORKSPACE_SIZE */ HUF_CElt* hufTable, HUF_repeat* repeat, int preferRepeat, int bmi2); /** HUF_buildCTable_wksp() : * Same as HUF_buildCTable(), but using externally allocated scratch buffer. * `workSpace` must be aligned on 4-bytes boundaries, and its size must be >= HUF_CTABLE_WORKSPACE_SIZE. */ #define HUF_CTABLE_WORKSPACE_SIZE_U32 (2*HUF_SYMBOLVALUE_MAX +1 +1) #define HUF_CTABLE_WORKSPACE_SIZE (HUF_CTABLE_WORKSPACE_SIZE_U32 * sizeof(unsigned)) size_t HUF_buildCTable_wksp (HUF_CElt* tree, const unsigned* count, U32 maxSymbolValue, U32 maxNbBits, void* workSpace, size_t wkspSize); /*! HUF_readStats() : * Read compact Huffman tree, saved by HUF_writeCTable(). * `huffWeight` is destination buffer. * @return : size read from `src` , or an error Code . * Note : Needed by HUF_readCTable() and HUF_readDTableXn() . */ size_t HUF_readStats(BYTE* huffWeight, size_t hwSize, U32* rankStats, U32* nbSymbolsPtr, U32* tableLogPtr, const void* src, size_t srcSize); /*! HUF_readStats_wksp() : * Same as HUF_readStats() but takes an external workspace which must be * 4-byte aligned and its size must be >= HUF_READ_STATS_WORKSPACE_SIZE. * If the CPU has BMI2 support, pass bmi2=1, otherwise pass bmi2=0. */ #define HUF_READ_STATS_WORKSPACE_SIZE_U32 FSE_DECOMPRESS_WKSP_SIZE_U32(6, HUF_TABLELOG_MAX-1) #define HUF_READ_STATS_WORKSPACE_SIZE (HUF_READ_STATS_WORKSPACE_SIZE_U32 * sizeof(unsigned)) size_t HUF_readStats_wksp(BYTE* huffWeight, size_t hwSize, U32* rankStats, U32* nbSymbolsPtr, U32* tableLogPtr, const void* src, size_t srcSize, void* workspace, size_t wkspSize, int bmi2); /** HUF_readCTable() : * Loading a CTable saved with HUF_writeCTable() */ size_t HUF_readCTable (HUF_CElt* CTable, unsigned* maxSymbolValuePtr, const void* src, size_t srcSize, unsigned *hasZeroWeights); /** HUF_getNbBits() : * Read nbBits from CTable symbolTable, for symbol `symbolValue` presumed <= HUF_SYMBOLVALUE_MAX * Note 1 : is not inlined, as HUF_CElt definition is private * Note 2 : const void* used, so that it can provide a statically allocated table as argument (which uses type U32) */ U32 HUF_getNbBits(const void* symbolTable, U32 symbolValue); /* * HUF_decompress() does the following: * 1. select the decompression algorithm (X1, X2) based on pre-computed heuristics * 2. build Huffman table from save, using HUF_readDTableX?() * 3. decode 1 or 4 segments in parallel using HUF_decompress?X?_usingDTable() */ /** HUF_selectDecoder() : * Tells which decoder is likely to decode faster, * based on a set of pre-computed metrics. * @return : 0==HUF_decompress4X1, 1==HUF_decompress4X2 . * Assumption : 0 < dstSize <= 128 KB */ U32 HUF_selectDecoder (size_t dstSize, size_t cSrcSize); /** * The minimum workspace size for the `workSpace` used in * HUF_readDTableX1_wksp() and HUF_readDTableX2_wksp(). * * The space used depends on HUF_TABLELOG_MAX, ranging from ~1500 bytes when * HUF_TABLE_LOG_MAX=12 to ~1850 bytes when HUF_TABLE_LOG_MAX=15. * Buffer overflow errors may potentially occur if code modifications result in * a required workspace size greater than that specified in the following * macro. */ #define HUF_DECOMPRESS_WORKSPACE_SIZE (2 << 10) #define HUF_DECOMPRESS_WORKSPACE_SIZE_U32 (HUF_DECOMPRESS_WORKSPACE_SIZE / sizeof(U32)) #ifndef HUF_FORCE_DECOMPRESS_X2 size_t HUF_readDTableX1 (HUF_DTable* DTable, const void* src, size_t srcSize); size_t HUF_readDTableX1_wksp (HUF_DTable* DTable, const void* src, size_t srcSize, void* workSpace, size_t wkspSize); #endif #ifndef HUF_FORCE_DECOMPRESS_X1 size_t HUF_readDTableX2 (HUF_DTable* DTable, const void* src, size_t srcSize); size_t HUF_readDTableX2_wksp (HUF_DTable* DTable, const void* src, size_t srcSize, void* workSpace, size_t wkspSize); #endif size_t HUF_decompress4X_usingDTable(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const HUF_DTable* DTable); #ifndef HUF_FORCE_DECOMPRESS_X2 size_t HUF_decompress4X1_usingDTable(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const HUF_DTable* DTable); #endif #ifndef HUF_FORCE_DECOMPRESS_X1 size_t HUF_decompress4X2_usingDTable(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const HUF_DTable* DTable); #endif /* ====================== */ /* single stream variants */ /* ====================== */ size_t HUF_compress1X (void* dst, size_t dstSize, const void* src, size_t srcSize, unsigned maxSymbolValue, unsigned tableLog); size_t HUF_compress1X_wksp (void* dst, size_t dstSize, const void* src, size_t srcSize, unsigned maxSymbolValue, unsigned tableLog, void* workSpace, size_t wkspSize); /**< `workSpace` must be a table of at least HUF_WORKSPACE_SIZE_U32 unsigned */ size_t HUF_compress1X_usingCTable(void* dst, size_t dstSize, const void* src, size_t srcSize, const HUF_CElt* CTable); /** HUF_compress1X_repeat() : * Same as HUF_compress1X_wksp(), but considers using hufTable if *repeat != HUF_repeat_none. * If it uses hufTable it does not modify hufTable or repeat. * If it doesn't, it sets *repeat = HUF_repeat_none, and it sets hufTable to the table used. * If preferRepeat then the old table will always be used if valid. */ size_t HUF_compress1X_repeat(void* dst, size_t dstSize, const void* src, size_t srcSize, unsigned maxSymbolValue, unsigned tableLog, void* workSpace, size_t wkspSize, /**< `workSpace` must be aligned on 4-bytes boundaries, `wkspSize` must be >= HUF_WORKSPACE_SIZE */ HUF_CElt* hufTable, HUF_repeat* repeat, int preferRepeat, int bmi2); size_t HUF_decompress1X1 (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize); /* single-symbol decoder */ #ifndef HUF_FORCE_DECOMPRESS_X1 size_t HUF_decompress1X2 (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize); /* double-symbol decoder */ #endif size_t HUF_decompress1X_DCtx (HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize); size_t HUF_decompress1X_DCtx_wksp (HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, void* workSpace, size_t wkspSize); #ifndef HUF_FORCE_DECOMPRESS_X2 size_t HUF_decompress1X1_DCtx(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize); /**< single-symbol decoder */ size_t HUF_decompress1X1_DCtx_wksp(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, void* workSpace, size_t wkspSize); /**< single-symbol decoder */ #endif #ifndef HUF_FORCE_DECOMPRESS_X1 size_t HUF_decompress1X2_DCtx(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize); /**< double-symbols decoder */ size_t HUF_decompress1X2_DCtx_wksp(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, void* workSpace, size_t wkspSize); /**< double-symbols decoder */ #endif size_t HUF_decompress1X_usingDTable(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const HUF_DTable* DTable); /**< automatic selection of sing or double symbol decoder, based on DTable */ #ifndef HUF_FORCE_DECOMPRESS_X2 size_t HUF_decompress1X1_usingDTable(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const HUF_DTable* DTable); #endif #ifndef HUF_FORCE_DECOMPRESS_X1 size_t HUF_decompress1X2_usingDTable(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const HUF_DTable* DTable); #endif /* BMI2 variants. * If the CPU has BMI2 support, pass bmi2=1, otherwise pass bmi2=0. */ size_t HUF_decompress1X_usingDTable_bmi2(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const HUF_DTable* DTable, int bmi2); #ifndef HUF_FORCE_DECOMPRESS_X2 size_t HUF_decompress1X1_DCtx_wksp_bmi2(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, void* workSpace, size_t wkspSize, int bmi2); #endif size_t HUF_decompress4X_usingDTable_bmi2(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const HUF_DTable* DTable, int bmi2); size_t HUF_decompress4X_hufOnly_wksp_bmi2(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, void* workSpace, size_t wkspSize, int bmi2); #ifndef HUF_FORCE_DECOMPRESS_X2 size_t HUF_readDTableX1_wksp_bmi2(HUF_DTable* DTable, const void* src, size_t srcSize, void* workSpace, size_t wkspSize, int bmi2); #endif #endif /* HUF_STATIC_LINKING_ONLY */ #if defined (__cplusplus) } #endif /**** ended inlining huf.h ****/ /*=== Version ===*/ unsigned FSE_versionNumber(void) { return FSE_VERSION_NUMBER; } /*=== Error Management ===*/ unsigned FSE_isError(size_t code) { return ERR_isError(code); } const char* FSE_getErrorName(size_t code) { return ERR_getErrorName(code); } unsigned HUF_isError(size_t code) { return ERR_isError(code); } const char* HUF_getErrorName(size_t code) { return ERR_getErrorName(code); } /*-************************************************************** * FSE NCount encoding-decoding ****************************************************************/ static U32 FSE_ctz(U32 val) { assert(val != 0); { # if defined(_MSC_VER) /* Visual */ unsigned long r=0; return _BitScanForward(&r, val) ? (unsigned)r : 0; # elif defined(__GNUC__) && (__GNUC__ >= 3) /* GCC Intrinsic */ return __builtin_ctz(val); # elif defined(__ICCARM__) /* IAR Intrinsic */ return __CTZ(val); # else /* Software version */ U32 count = 0; while ((val & 1) == 0) { val >>= 1; ++count; } return count; # endif } } FORCE_INLINE_TEMPLATE size_t FSE_readNCount_body(short* normalizedCounter, unsigned* maxSVPtr, unsigned* tableLogPtr, const void* headerBuffer, size_t hbSize) { const BYTE* const istart = (const BYTE*) headerBuffer; const BYTE* const iend = istart + hbSize; const BYTE* ip = istart; int nbBits; int remaining; int threshold; U32 bitStream; int bitCount; unsigned charnum = 0; unsigned const maxSV1 = *maxSVPtr + 1; int previous0 = 0; if (hbSize < 8) { /* This function only works when hbSize >= 8 */ char buffer[8] = {0}; ZSTD_memcpy(buffer, headerBuffer, hbSize); { size_t const countSize = FSE_readNCount(normalizedCounter, maxSVPtr, tableLogPtr, buffer, sizeof(buffer)); if (FSE_isError(countSize)) return countSize; if (countSize > hbSize) return ERROR(corruption_detected); return countSize; } } assert(hbSize >= 8); /* init */ ZSTD_memset(normalizedCounter, 0, (*maxSVPtr+1) * sizeof(normalizedCounter[0])); /* all symbols not present in NCount have a frequency of 0 */ bitStream = MEM_readLE32(ip); nbBits = (bitStream & 0xF) + FSE_MIN_TABLELOG; /* extract tableLog */ if (nbBits > FSE_TABLELOG_ABSOLUTE_MAX) return ERROR(tableLog_tooLarge); bitStream >>= 4; bitCount = 4; *tableLogPtr = nbBits; remaining = (1<> 1; while (repeats >= 12) { charnum += 3 * 12; if (LIKELY(ip <= iend-7)) { ip += 3; } else { bitCount -= (int)(8 * (iend - 7 - ip)); bitCount &= 31; ip = iend - 4; } bitStream = MEM_readLE32(ip) >> bitCount; repeats = FSE_ctz(~bitStream | 0x80000000) >> 1; } charnum += 3 * repeats; bitStream >>= 2 * repeats; bitCount += 2 * repeats; /* Add the final repeat which isn't 0b11. */ assert((bitStream & 3) < 3); charnum += bitStream & 3; bitCount += 2; /* This is an error, but break and return an error * at the end, because returning out of a loop makes * it harder for the compiler to optimize. */ if (charnum >= maxSV1) break; /* We don't need to set the normalized count to 0 * because we already memset the whole buffer to 0. */ if (LIKELY(ip <= iend-7) || (ip + (bitCount>>3) <= iend-4)) { assert((bitCount >> 3) <= 3); /* For first condition to work */ ip += bitCount>>3; bitCount &= 7; } else { bitCount -= (int)(8 * (iend - 4 - ip)); bitCount &= 31; ip = iend - 4; } bitStream = MEM_readLE32(ip) >> bitCount; } { int const max = (2*threshold-1) - remaining; int count; if ((bitStream & (threshold-1)) < (U32)max) { count = bitStream & (threshold-1); bitCount += nbBits-1; } else { count = bitStream & (2*threshold-1); if (count >= threshold) count -= max; bitCount += nbBits; } count--; /* extra accuracy */ /* When it matters (small blocks), this is a * predictable branch, because we don't use -1. */ if (count >= 0) { remaining -= count; } else { assert(count == -1); remaining += count; } normalizedCounter[charnum++] = (short)count; previous0 = !count; assert(threshold > 1); if (remaining < threshold) { /* This branch can be folded into the * threshold update condition because we * know that threshold > 1. */ if (remaining <= 1) break; nbBits = BIT_highbit32(remaining) + 1; threshold = 1 << (nbBits - 1); } if (charnum >= maxSV1) break; if (LIKELY(ip <= iend-7) || (ip + (bitCount>>3) <= iend-4)) { ip += bitCount>>3; bitCount &= 7; } else { bitCount -= (int)(8 * (iend - 4 - ip)); bitCount &= 31; ip = iend - 4; } bitStream = MEM_readLE32(ip) >> bitCount; } } if (remaining != 1) return ERROR(corruption_detected); /* Only possible when there are too many zeros. */ if (charnum > maxSV1) return ERROR(maxSymbolValue_tooSmall); if (bitCount > 32) return ERROR(corruption_detected); *maxSVPtr = charnum-1; ip += (bitCount+7)>>3; return ip-istart; } /* Avoids the FORCE_INLINE of the _body() function. */ static size_t FSE_readNCount_body_default( short* normalizedCounter, unsigned* maxSVPtr, unsigned* tableLogPtr, const void* headerBuffer, size_t hbSize) { return FSE_readNCount_body(normalizedCounter, maxSVPtr, tableLogPtr, headerBuffer, hbSize); } #if DYNAMIC_BMI2 TARGET_ATTRIBUTE("bmi2") static size_t FSE_readNCount_body_bmi2( short* normalizedCounter, unsigned* maxSVPtr, unsigned* tableLogPtr, const void* headerBuffer, size_t hbSize) { return FSE_readNCount_body(normalizedCounter, maxSVPtr, tableLogPtr, headerBuffer, hbSize); } #endif size_t FSE_readNCount_bmi2( short* normalizedCounter, unsigned* maxSVPtr, unsigned* tableLogPtr, const void* headerBuffer, size_t hbSize, int bmi2) { #if DYNAMIC_BMI2 if (bmi2) { return FSE_readNCount_body_bmi2(normalizedCounter, maxSVPtr, tableLogPtr, headerBuffer, hbSize); } #endif (void)bmi2; return FSE_readNCount_body_default(normalizedCounter, maxSVPtr, tableLogPtr, headerBuffer, hbSize); } size_t FSE_readNCount( short* normalizedCounter, unsigned* maxSVPtr, unsigned* tableLogPtr, const void* headerBuffer, size_t hbSize) { return FSE_readNCount_bmi2(normalizedCounter, maxSVPtr, tableLogPtr, headerBuffer, hbSize, /* bmi2 */ 0); } /*! HUF_readStats() : Read compact Huffman tree, saved by HUF_writeCTable(). `huffWeight` is destination buffer. `rankStats` is assumed to be a table of at least HUF_TABLELOG_MAX U32. @return : size read from `src` , or an error Code . Note : Needed by HUF_readCTable() and HUF_readDTableX?() . */ size_t HUF_readStats(BYTE* huffWeight, size_t hwSize, U32* rankStats, U32* nbSymbolsPtr, U32* tableLogPtr, const void* src, size_t srcSize) { U32 wksp[HUF_READ_STATS_WORKSPACE_SIZE_U32]; return HUF_readStats_wksp(huffWeight, hwSize, rankStats, nbSymbolsPtr, tableLogPtr, src, srcSize, wksp, sizeof(wksp), /* bmi2 */ 0); } FORCE_INLINE_TEMPLATE size_t HUF_readStats_body(BYTE* huffWeight, size_t hwSize, U32* rankStats, U32* nbSymbolsPtr, U32* tableLogPtr, const void* src, size_t srcSize, void* workSpace, size_t wkspSize, int bmi2) { U32 weightTotal; const BYTE* ip = (const BYTE*) src; size_t iSize; size_t oSize; if (!srcSize) return ERROR(srcSize_wrong); iSize = ip[0]; /* ZSTD_memset(huffWeight, 0, hwSize); *//* is not necessary, even though some analyzer complain ... */ if (iSize >= 128) { /* special header */ oSize = iSize - 127; iSize = ((oSize+1)/2); if (iSize+1 > srcSize) return ERROR(srcSize_wrong); if (oSize >= hwSize) return ERROR(corruption_detected); ip += 1; { U32 n; for (n=0; n> 4; huffWeight[n+1] = ip[n/2] & 15; } } } else { /* header compressed with FSE (normal case) */ if (iSize+1 > srcSize) return ERROR(srcSize_wrong); /* max (hwSize-1) values decoded, as last one is implied */ oSize = FSE_decompress_wksp_bmi2(huffWeight, hwSize-1, ip+1, iSize, 6, workSpace, wkspSize, bmi2); if (FSE_isError(oSize)) return oSize; } /* collect weight stats */ ZSTD_memset(rankStats, 0, (HUF_TABLELOG_MAX + 1) * sizeof(U32)); weightTotal = 0; { U32 n; for (n=0; n= HUF_TABLELOG_MAX) return ERROR(corruption_detected); rankStats[huffWeight[n]]++; weightTotal += (1 << huffWeight[n]) >> 1; } } if (weightTotal == 0) return ERROR(corruption_detected); /* get last non-null symbol weight (implied, total must be 2^n) */ { U32 const tableLog = BIT_highbit32(weightTotal) + 1; if (tableLog > HUF_TABLELOG_MAX) return ERROR(corruption_detected); *tableLogPtr = tableLog; /* determine last weight */ { U32 const total = 1 << tableLog; U32 const rest = total - weightTotal; U32 const verif = 1 << BIT_highbit32(rest); U32 const lastWeight = BIT_highbit32(rest) + 1; if (verif != rest) return ERROR(corruption_detected); /* last value must be a clean power of 2 */ huffWeight[oSize] = (BYTE)lastWeight; rankStats[lastWeight]++; } } /* check tree construction validity */ if ((rankStats[1] < 2) || (rankStats[1] & 1)) return ERROR(corruption_detected); /* by construction : at least 2 elts of rank 1, must be even */ /* results */ *nbSymbolsPtr = (U32)(oSize+1); return iSize+1; } /* Avoids the FORCE_INLINE of the _body() function. */ static size_t HUF_readStats_body_default(BYTE* huffWeight, size_t hwSize, U32* rankStats, U32* nbSymbolsPtr, U32* tableLogPtr, const void* src, size_t srcSize, void* workSpace, size_t wkspSize) { return HUF_readStats_body(huffWeight, hwSize, rankStats, nbSymbolsPtr, tableLogPtr, src, srcSize, workSpace, wkspSize, 0); } #if DYNAMIC_BMI2 static TARGET_ATTRIBUTE("bmi2") size_t HUF_readStats_body_bmi2(BYTE* huffWeight, size_t hwSize, U32* rankStats, U32* nbSymbolsPtr, U32* tableLogPtr, const void* src, size_t srcSize, void* workSpace, size_t wkspSize) { return HUF_readStats_body(huffWeight, hwSize, rankStats, nbSymbolsPtr, tableLogPtr, src, srcSize, workSpace, wkspSize, 1); } #endif size_t HUF_readStats_wksp(BYTE* huffWeight, size_t hwSize, U32* rankStats, U32* nbSymbolsPtr, U32* tableLogPtr, const void* src, size_t srcSize, void* workSpace, size_t wkspSize, int bmi2) { #if DYNAMIC_BMI2 if (bmi2) { return HUF_readStats_body_bmi2(huffWeight, hwSize, rankStats, nbSymbolsPtr, tableLogPtr, src, srcSize, workSpace, wkspSize); } #endif (void)bmi2; return HUF_readStats_body_default(huffWeight, hwSize, rankStats, nbSymbolsPtr, tableLogPtr, src, srcSize, workSpace, wkspSize); } /**** ended inlining common/entropy_common.c ****/ /**** start inlining common/error_private.c ****/ /* * Copyright (c) 2016-2021, Yann Collet, Facebook, Inc. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the * LICENSE file in the root directory of this source tree) and the GPLv2 (found * in the COPYING file in the root directory of this source tree). * You may select, at your option, one of the above-listed licenses. */ /* The purpose of this file is to have a single list of error strings embedded in binary */ /**** skipping file: error_private.h ****/ const char* ERR_getErrorString(ERR_enum code) { #ifdef ZSTD_STRIP_ERROR_STRINGS (void)code; return "Error strings stripped"; #else static const char* const notErrorCode = "Unspecified error code"; switch( code ) { case PREFIX(no_error): return "No error detected"; case PREFIX(GENERIC): return "Error (generic)"; case PREFIX(prefix_unknown): return "Unknown frame descriptor"; case PREFIX(version_unsupported): return "Version not supported"; case PREFIX(frameParameter_unsupported): return "Unsupported frame parameter"; case PREFIX(frameParameter_windowTooLarge): return "Frame requires too much memory for decoding"; case PREFIX(corruption_detected): return "Corrupted block detected"; case PREFIX(checksum_wrong): return "Restored data doesn't match checksum"; case PREFIX(parameter_unsupported): return "Unsupported parameter"; case PREFIX(parameter_outOfBound): return "Parameter is out of bound"; case PREFIX(init_missing): return "Context should be init first"; case PREFIX(memory_allocation): return "Allocation error : not enough memory"; case PREFIX(workSpace_tooSmall): return "workSpace buffer is not large enough"; case PREFIX(stage_wrong): return "Operation not authorized at current processing stage"; case PREFIX(tableLog_tooLarge): return "tableLog requires too much memory : unsupported"; case PREFIX(maxSymbolValue_tooLarge): return "Unsupported max Symbol Value : too large"; case PREFIX(maxSymbolValue_tooSmall): return "Specified maxSymbolValue is too small"; case PREFIX(dictionary_corrupted): return "Dictionary is corrupted"; case PREFIX(dictionary_wrong): return "Dictionary mismatch"; case PREFIX(dictionaryCreation_failed): return "Cannot create Dictionary from provided samples"; case PREFIX(dstSize_tooSmall): return "Destination buffer is too small"; case PREFIX(srcSize_wrong): return "Src size is incorrect"; case PREFIX(dstBuffer_null): return "Operation on NULL destination buffer"; /* following error codes are not stable and may be removed or changed in a future version */ case PREFIX(frameIndex_tooLarge): return "Frame index is too large"; case PREFIX(seekableIO): return "An I/O error occurred when reading/seeking"; case PREFIX(dstBuffer_wrong): return "Destination buffer is wrong"; case PREFIX(srcBuffer_wrong): return "Source buffer is wrong"; case PREFIX(maxCode): default: return notErrorCode; } #endif } /**** ended inlining common/error_private.c ****/ /**** start inlining common/fse_decompress.c ****/ /* ****************************************************************** * FSE : Finite State Entropy decoder * Copyright (c) 2013-2021, Yann Collet, Facebook, Inc. * * You can contact the author at : * - FSE source repository : https://github.com/Cyan4973/FiniteStateEntropy * - Public forum : https://groups.google.com/forum/#!forum/lz4c * * This source code is licensed under both the BSD-style license (found in the * LICENSE file in the root directory of this source tree) and the GPLv2 (found * in the COPYING file in the root directory of this source tree). * You may select, at your option, one of the above-listed licenses. ****************************************************************** */ /* ************************************************************** * Includes ****************************************************************/ /**** skipping file: debug.h ****/ /**** skipping file: bitstream.h ****/ /**** skipping file: compiler.h ****/ #define FSE_STATIC_LINKING_ONLY /**** skipping file: fse.h ****/ /**** skipping file: error_private.h ****/ #define ZSTD_DEPS_NEED_MALLOC /**** skipping file: zstd_deps.h ****/ /* ************************************************************** * Error Management ****************************************************************/ #define FSE_isError ERR_isError #define FSE_STATIC_ASSERT(c) DEBUG_STATIC_ASSERT(c) /* use only *after* variable declarations */ /* ************************************************************** * Templates ****************************************************************/ /* designed to be included for type-specific functions (template emulation in C) Objective is to write these functions only once, for improved maintenance */ /* safety checks */ #ifndef FSE_FUNCTION_EXTENSION # error "FSE_FUNCTION_EXTENSION must be defined" #endif #ifndef FSE_FUNCTION_TYPE # error "FSE_FUNCTION_TYPE must be defined" #endif /* Function names */ #define FSE_CAT(X,Y) X##Y #define FSE_FUNCTION_NAME(X,Y) FSE_CAT(X,Y) #define FSE_TYPE_NAME(X,Y) FSE_CAT(X,Y) /* Function templates */ FSE_DTable* FSE_createDTable (unsigned tableLog) { if (tableLog > FSE_TABLELOG_ABSOLUTE_MAX) tableLog = FSE_TABLELOG_ABSOLUTE_MAX; return (FSE_DTable*)ZSTD_malloc( FSE_DTABLE_SIZE_U32(tableLog) * sizeof (U32) ); } void FSE_freeDTable (FSE_DTable* dt) { ZSTD_free(dt); } static size_t FSE_buildDTable_internal(FSE_DTable* dt, const short* normalizedCounter, unsigned maxSymbolValue, unsigned tableLog, void* workSpace, size_t wkspSize) { void* const tdPtr = dt+1; /* because *dt is unsigned, 32-bits aligned on 32-bits */ FSE_DECODE_TYPE* const tableDecode = (FSE_DECODE_TYPE*) (tdPtr); U16* symbolNext = (U16*)workSpace; BYTE* spread = (BYTE*)(symbolNext + maxSymbolValue + 1); U32 const maxSV1 = maxSymbolValue + 1; U32 const tableSize = 1 << tableLog; U32 highThreshold = tableSize-1; /* Sanity Checks */ if (FSE_BUILD_DTABLE_WKSP_SIZE(tableLog, maxSymbolValue) > wkspSize) return ERROR(maxSymbolValue_tooLarge); if (maxSymbolValue > FSE_MAX_SYMBOL_VALUE) return ERROR(maxSymbolValue_tooLarge); if (tableLog > FSE_MAX_TABLELOG) return ERROR(tableLog_tooLarge); /* Init, lay down lowprob symbols */ { FSE_DTableHeader DTableH; DTableH.tableLog = (U16)tableLog; DTableH.fastMode = 1; { S16 const largeLimit= (S16)(1 << (tableLog-1)); U32 s; for (s=0; s= largeLimit) DTableH.fastMode=0; symbolNext[s] = normalizedCounter[s]; } } } ZSTD_memcpy(dt, &DTableH, sizeof(DTableH)); } /* Spread symbols */ if (highThreshold == tableSize - 1) { size_t const tableMask = tableSize-1; size_t const step = FSE_TABLESTEP(tableSize); /* First lay down the symbols in order. * We use a uint64_t to lay down 8 bytes at a time. This reduces branch * misses since small blocks generally have small table logs, so nearly * all symbols have counts <= 8. We ensure we have 8 bytes at the end of * our buffer to handle the over-write. */ { U64 const add = 0x0101010101010101ull; size_t pos = 0; U64 sv = 0; U32 s; for (s=0; s highThreshold) position = (position + step) & tableMask; /* lowprob area */ } } if (position!=0) return ERROR(GENERIC); /* position must reach all cells once, otherwise normalizedCounter is incorrect */ } /* Build Decoding table */ { U32 u; for (u=0; utableLog = 0; DTableH->fastMode = 0; cell->newState = 0; cell->symbol = symbolValue; cell->nbBits = 0; return 0; } size_t FSE_buildDTable_raw (FSE_DTable* dt, unsigned nbBits) { void* ptr = dt; FSE_DTableHeader* const DTableH = (FSE_DTableHeader*)ptr; void* dPtr = dt + 1; FSE_decode_t* const dinfo = (FSE_decode_t*)dPtr; const unsigned tableSize = 1 << nbBits; const unsigned tableMask = tableSize - 1; const unsigned maxSV1 = tableMask+1; unsigned s; /* Sanity checks */ if (nbBits < 1) return ERROR(GENERIC); /* min size */ /* Build Decoding Table */ DTableH->tableLog = (U16)nbBits; DTableH->fastMode = 1; for (s=0; s sizeof(bitD.bitContainer)*8) /* This test must be static */ BIT_reloadDStream(&bitD); op[1] = FSE_GETSYMBOL(&state2); if (FSE_MAX_TABLELOG*4+7 > sizeof(bitD.bitContainer)*8) /* This test must be static */ { if (BIT_reloadDStream(&bitD) > BIT_DStream_unfinished) { op+=2; break; } } op[2] = FSE_GETSYMBOL(&state1); if (FSE_MAX_TABLELOG*2+7 > sizeof(bitD.bitContainer)*8) /* This test must be static */ BIT_reloadDStream(&bitD); op[3] = FSE_GETSYMBOL(&state2); } /* tail */ /* note : BIT_reloadDStream(&bitD) >= FSE_DStream_partiallyFilled; Ends at exactly BIT_DStream_completed */ while (1) { if (op>(omax-2)) return ERROR(dstSize_tooSmall); *op++ = FSE_GETSYMBOL(&state1); if (BIT_reloadDStream(&bitD)==BIT_DStream_overflow) { *op++ = FSE_GETSYMBOL(&state2); break; } if (op>(omax-2)) return ERROR(dstSize_tooSmall); *op++ = FSE_GETSYMBOL(&state2); if (BIT_reloadDStream(&bitD)==BIT_DStream_overflow) { *op++ = FSE_GETSYMBOL(&state1); break; } } return op-ostart; } size_t FSE_decompress_usingDTable(void* dst, size_t originalSize, const void* cSrc, size_t cSrcSize, const FSE_DTable* dt) { const void* ptr = dt; const FSE_DTableHeader* DTableH = (const FSE_DTableHeader*)ptr; const U32 fastMode = DTableH->fastMode; /* select fast mode (static) */ if (fastMode) return FSE_decompress_usingDTable_generic(dst, originalSize, cSrc, cSrcSize, dt, 1); return FSE_decompress_usingDTable_generic(dst, originalSize, cSrc, cSrcSize, dt, 0); } size_t FSE_decompress_wksp(void* dst, size_t dstCapacity, const void* cSrc, size_t cSrcSize, unsigned maxLog, void* workSpace, size_t wkspSize) { return FSE_decompress_wksp_bmi2(dst, dstCapacity, cSrc, cSrcSize, maxLog, workSpace, wkspSize, /* bmi2 */ 0); } FORCE_INLINE_TEMPLATE size_t FSE_decompress_wksp_body( void* dst, size_t dstCapacity, const void* cSrc, size_t cSrcSize, unsigned maxLog, void* workSpace, size_t wkspSize, int bmi2) { const BYTE* const istart = (const BYTE*)cSrc; const BYTE* ip = istart; short counting[FSE_MAX_SYMBOL_VALUE+1]; unsigned tableLog; unsigned maxSymbolValue = FSE_MAX_SYMBOL_VALUE; FSE_DTable* const dtable = (FSE_DTable*)workSpace; /* normal FSE decoding mode */ size_t const NCountLength = FSE_readNCount_bmi2(counting, &maxSymbolValue, &tableLog, istart, cSrcSize, bmi2); if (FSE_isError(NCountLength)) return NCountLength; if (tableLog > maxLog) return ERROR(tableLog_tooLarge); assert(NCountLength <= cSrcSize); ip += NCountLength; cSrcSize -= NCountLength; if (FSE_DECOMPRESS_WKSP_SIZE(tableLog, maxSymbolValue) > wkspSize) return ERROR(tableLog_tooLarge); workSpace = dtable + FSE_DTABLE_SIZE_U32(tableLog); wkspSize -= FSE_DTABLE_SIZE(tableLog); CHECK_F( FSE_buildDTable_internal(dtable, counting, maxSymbolValue, tableLog, workSpace, wkspSize) ); { const void* ptr = dtable; const FSE_DTableHeader* DTableH = (const FSE_DTableHeader*)ptr; const U32 fastMode = DTableH->fastMode; /* select fast mode (static) */ if (fastMode) return FSE_decompress_usingDTable_generic(dst, dstCapacity, ip, cSrcSize, dtable, 1); return FSE_decompress_usingDTable_generic(dst, dstCapacity, ip, cSrcSize, dtable, 0); } } /* Avoids the FORCE_INLINE of the _body() function. */ static size_t FSE_decompress_wksp_body_default(void* dst, size_t dstCapacity, const void* cSrc, size_t cSrcSize, unsigned maxLog, void* workSpace, size_t wkspSize) { return FSE_decompress_wksp_body(dst, dstCapacity, cSrc, cSrcSize, maxLog, workSpace, wkspSize, 0); } #if DYNAMIC_BMI2 TARGET_ATTRIBUTE("bmi2") static size_t FSE_decompress_wksp_body_bmi2(void* dst, size_t dstCapacity, const void* cSrc, size_t cSrcSize, unsigned maxLog, void* workSpace, size_t wkspSize) { return FSE_decompress_wksp_body(dst, dstCapacity, cSrc, cSrcSize, maxLog, workSpace, wkspSize, 1); } #endif size_t FSE_decompress_wksp_bmi2(void* dst, size_t dstCapacity, const void* cSrc, size_t cSrcSize, unsigned maxLog, void* workSpace, size_t wkspSize, int bmi2) { #if DYNAMIC_BMI2 if (bmi2) { return FSE_decompress_wksp_body_bmi2(dst, dstCapacity, cSrc, cSrcSize, maxLog, workSpace, wkspSize); } #endif (void)bmi2; return FSE_decompress_wksp_body_default(dst, dstCapacity, cSrc, cSrcSize, maxLog, workSpace, wkspSize); } typedef FSE_DTable DTable_max_t[FSE_DTABLE_SIZE_U32(FSE_MAX_TABLELOG)]; #ifndef ZSTD_NO_UNUSED_FUNCTIONS size_t FSE_buildDTable(FSE_DTable* dt, const short* normalizedCounter, unsigned maxSymbolValue, unsigned tableLog) { U32 wksp[FSE_BUILD_DTABLE_WKSP_SIZE_U32(FSE_TABLELOG_ABSOLUTE_MAX, FSE_MAX_SYMBOL_VALUE)]; return FSE_buildDTable_wksp(dt, normalizedCounter, maxSymbolValue, tableLog, wksp, sizeof(wksp)); } size_t FSE_decompress(void* dst, size_t dstCapacity, const void* cSrc, size_t cSrcSize) { /* Static analyzer seems unable to understand this table will be properly initialized later */ U32 wksp[FSE_DECOMPRESS_WKSP_SIZE_U32(FSE_MAX_TABLELOG, FSE_MAX_SYMBOL_VALUE)]; return FSE_decompress_wksp(dst, dstCapacity, cSrc, cSrcSize, FSE_MAX_TABLELOG, wksp, sizeof(wksp)); } #endif #endif /* FSE_COMMONDEFS_ONLY */ /**** ended inlining common/fse_decompress.c ****/ /**** start inlining common/threading.c ****/ /** * Copyright (c) 2016 Tino Reichardt * All rights reserved. * * You can contact the author at: * - zstdmt source repository: https://github.com/mcmilk/zstdmt * * This source code is licensed under both the BSD-style license (found in the * LICENSE file in the root directory of this source tree) and the GPLv2 (found * in the COPYING file in the root directory of this source tree). * You may select, at your option, one of the above-listed licenses. */ /** * This file will hold wrapper for systems, which do not support pthreads */ /**** start inlining threading.h ****/ /** * Copyright (c) 2016 Tino Reichardt * All rights reserved. * * You can contact the author at: * - zstdmt source repository: https://github.com/mcmilk/zstdmt * * This source code is licensed under both the BSD-style license (found in the * LICENSE file in the root directory of this source tree) and the GPLv2 (found * in the COPYING file in the root directory of this source tree). * You may select, at your option, one of the above-listed licenses. */ #ifndef THREADING_H_938743 #define THREADING_H_938743 /**** skipping file: debug.h ****/ #if defined (__cplusplus) extern "C" { #endif #if defined(ZSTD_MULTITHREAD) && defined(_WIN32) /** * Windows minimalist Pthread Wrapper, based on : * http://www.cse.wustl.edu/~schmidt/win32-cv-1.html */ #ifdef WINVER # undef WINVER #endif #define WINVER 0x0600 #ifdef _WIN32_WINNT # undef _WIN32_WINNT #endif #define _WIN32_WINNT 0x0600 #ifndef WIN32_LEAN_AND_MEAN # define WIN32_LEAN_AND_MEAN #endif #undef ERROR /* reported already defined on VS 2015 (Rich Geldreich) */ #include #undef ERROR #define ERROR(name) ZSTD_ERROR(name) /* mutex */ #define ZSTD_pthread_mutex_t CRITICAL_SECTION #define ZSTD_pthread_mutex_init(a, b) ((void)(b), InitializeCriticalSection((a)), 0) #define ZSTD_pthread_mutex_destroy(a) DeleteCriticalSection((a)) #define ZSTD_pthread_mutex_lock(a) EnterCriticalSection((a)) #define ZSTD_pthread_mutex_unlock(a) LeaveCriticalSection((a)) /* condition variable */ #define ZSTD_pthread_cond_t CONDITION_VARIABLE #define ZSTD_pthread_cond_init(a, b) ((void)(b), InitializeConditionVariable((a)), 0) #define ZSTD_pthread_cond_destroy(a) ((void)(a)) #define ZSTD_pthread_cond_wait(a, b) SleepConditionVariableCS((a), (b), INFINITE) #define ZSTD_pthread_cond_signal(a) WakeConditionVariable((a)) #define ZSTD_pthread_cond_broadcast(a) WakeAllConditionVariable((a)) /* ZSTD_pthread_create() and ZSTD_pthread_join() */ typedef struct { HANDLE handle; void* (*start_routine)(void*); void* arg; } ZSTD_pthread_t; int ZSTD_pthread_create(ZSTD_pthread_t* thread, const void* unused, void* (*start_routine) (void*), void* arg); int ZSTD_pthread_join(ZSTD_pthread_t thread, void** value_ptr); /** * add here more wrappers as required */ #elif defined(ZSTD_MULTITHREAD) /* posix assumed ; need a better detection method */ /* === POSIX Systems === */ # include #if DEBUGLEVEL < 1 #define ZSTD_pthread_mutex_t pthread_mutex_t #define ZSTD_pthread_mutex_init(a, b) pthread_mutex_init((a), (b)) #define ZSTD_pthread_mutex_destroy(a) pthread_mutex_destroy((a)) #define ZSTD_pthread_mutex_lock(a) pthread_mutex_lock((a)) #define ZSTD_pthread_mutex_unlock(a) pthread_mutex_unlock((a)) #define ZSTD_pthread_cond_t pthread_cond_t #define ZSTD_pthread_cond_init(a, b) pthread_cond_init((a), (b)) #define ZSTD_pthread_cond_destroy(a) pthread_cond_destroy((a)) #define ZSTD_pthread_cond_wait(a, b) pthread_cond_wait((a), (b)) #define ZSTD_pthread_cond_signal(a) pthread_cond_signal((a)) #define ZSTD_pthread_cond_broadcast(a) pthread_cond_broadcast((a)) #define ZSTD_pthread_t pthread_t #define ZSTD_pthread_create(a, b, c, d) pthread_create((a), (b), (c), (d)) #define ZSTD_pthread_join(a, b) pthread_join((a),(b)) #else /* DEBUGLEVEL >= 1 */ /* Debug implementation of threading. * In this implementation we use pointers for mutexes and condition variables. * This way, if we forget to init/destroy them the program will crash or ASAN * will report leaks. */ #define ZSTD_pthread_mutex_t pthread_mutex_t* int ZSTD_pthread_mutex_init(ZSTD_pthread_mutex_t* mutex, pthread_mutexattr_t const* attr); int ZSTD_pthread_mutex_destroy(ZSTD_pthread_mutex_t* mutex); #define ZSTD_pthread_mutex_lock(a) pthread_mutex_lock(*(a)) #define ZSTD_pthread_mutex_unlock(a) pthread_mutex_unlock(*(a)) #define ZSTD_pthread_cond_t pthread_cond_t* int ZSTD_pthread_cond_init(ZSTD_pthread_cond_t* cond, pthread_condattr_t const* attr); int ZSTD_pthread_cond_destroy(ZSTD_pthread_cond_t* cond); #define ZSTD_pthread_cond_wait(a, b) pthread_cond_wait(*(a), *(b)) #define ZSTD_pthread_cond_signal(a) pthread_cond_signal(*(a)) #define ZSTD_pthread_cond_broadcast(a) pthread_cond_broadcast(*(a)) #define ZSTD_pthread_t pthread_t #define ZSTD_pthread_create(a, b, c, d) pthread_create((a), (b), (c), (d)) #define ZSTD_pthread_join(a, b) pthread_join((a),(b)) #endif #else /* ZSTD_MULTITHREAD not defined */ /* No multithreading support */ typedef int ZSTD_pthread_mutex_t; #define ZSTD_pthread_mutex_init(a, b) ((void)(a), (void)(b), 0) #define ZSTD_pthread_mutex_destroy(a) ((void)(a)) #define ZSTD_pthread_mutex_lock(a) ((void)(a)) #define ZSTD_pthread_mutex_unlock(a) ((void)(a)) typedef int ZSTD_pthread_cond_t; #define ZSTD_pthread_cond_init(a, b) ((void)(a), (void)(b), 0) #define ZSTD_pthread_cond_destroy(a) ((void)(a)) #define ZSTD_pthread_cond_wait(a, b) ((void)(a), (void)(b)) #define ZSTD_pthread_cond_signal(a) ((void)(a)) #define ZSTD_pthread_cond_broadcast(a) ((void)(a)) /* do not use ZSTD_pthread_t */ #endif /* ZSTD_MULTITHREAD */ #if defined (__cplusplus) } #endif #endif /* THREADING_H_938743 */ /**** ended inlining threading.h ****/ /* create fake symbol to avoid empty translation unit warning */ int g_ZSTD_threading_useless_symbol; #if defined(ZSTD_MULTITHREAD) && defined(_WIN32) /** * Windows minimalist Pthread Wrapper, based on : * http://www.cse.wustl.edu/~schmidt/win32-cv-1.html */ /* === Dependencies === */ #include #include /* === Implementation === */ static unsigned __stdcall worker(void *arg) { ZSTD_pthread_t* const thread = (ZSTD_pthread_t*) arg; thread->arg = thread->start_routine(thread->arg); return 0; } int ZSTD_pthread_create(ZSTD_pthread_t* thread, const void* unused, void* (*start_routine) (void*), void* arg) { (void)unused; thread->arg = arg; thread->start_routine = start_routine; thread->handle = (HANDLE) _beginthreadex(NULL, 0, worker, thread, 0, NULL); if (!thread->handle) return errno; else return 0; } int ZSTD_pthread_join(ZSTD_pthread_t thread, void **value_ptr) { DWORD result; if (!thread.handle) return 0; result = WaitForSingleObject(thread.handle, INFINITE); switch (result) { case WAIT_OBJECT_0: if (value_ptr) *value_ptr = thread.arg; return 0; case WAIT_ABANDONED: return EINVAL; default: return GetLastError(); } } #endif /* ZSTD_MULTITHREAD */ #if defined(ZSTD_MULTITHREAD) && DEBUGLEVEL >= 1 && !defined(_WIN32) #define ZSTD_DEPS_NEED_MALLOC /**** skipping file: zstd_deps.h ****/ int ZSTD_pthread_mutex_init(ZSTD_pthread_mutex_t* mutex, pthread_mutexattr_t const* attr) { *mutex = (pthread_mutex_t*)ZSTD_malloc(sizeof(pthread_mutex_t)); if (!*mutex) return 1; return pthread_mutex_init(*mutex, attr); } int ZSTD_pthread_mutex_destroy(ZSTD_pthread_mutex_t* mutex) { if (!*mutex) return 0; { int const ret = pthread_mutex_destroy(*mutex); ZSTD_free(*mutex); return ret; } } int ZSTD_pthread_cond_init(ZSTD_pthread_cond_t* cond, pthread_condattr_t const* attr) { *cond = (pthread_cond_t*)ZSTD_malloc(sizeof(pthread_cond_t)); if (!*cond) return 1; return pthread_cond_init(*cond, attr); } int ZSTD_pthread_cond_destroy(ZSTD_pthread_cond_t* cond) { if (!*cond) return 0; { int const ret = pthread_cond_destroy(*cond); ZSTD_free(*cond); return ret; } } #endif /**** ended inlining common/threading.c ****/ /**** start inlining common/pool.c ****/ /* * Copyright (c) 2016-2021, Yann Collet, Facebook, Inc. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the * LICENSE file in the root directory of this source tree) and the GPLv2 (found * in the COPYING file in the root directory of this source tree). * You may select, at your option, one of the above-listed licenses. */ /* ====== Dependencies ======= */ /**** skipping file: zstd_deps.h ****/ /**** skipping file: debug.h ****/ /**** start inlining zstd_internal.h ****/ /* * Copyright (c) 2016-2021, Yann Collet, Facebook, Inc. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the * LICENSE file in the root directory of this source tree) and the GPLv2 (found * in the COPYING file in the root directory of this source tree). * You may select, at your option, one of the above-listed licenses. */ #ifndef ZSTD_CCOMMON_H_MODULE #define ZSTD_CCOMMON_H_MODULE /* this module contains definitions which must be identical * across compression, decompression and dictBuilder. * It also contains a few functions useful to at least 2 of them * and which benefit from being inlined */ /*-************************************* * Dependencies ***************************************/ #if !defined(ZSTD_NO_INTRINSICS) && defined(__ARM_NEON) #include #endif /**** skipping file: compiler.h ****/ /**** skipping file: mem.h ****/ /**** skipping file: debug.h ****/ /**** skipping file: error_private.h ****/ #define ZSTD_STATIC_LINKING_ONLY /**** start inlining ../zstd.h ****/ /* * Copyright (c) 2016-2021, Yann Collet, Facebook, Inc. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the * LICENSE file in the root directory of this source tree) and the GPLv2 (found * in the COPYING file in the root directory of this source tree). * You may select, at your option, one of the above-listed licenses. */ #if defined (__cplusplus) extern "C" { #endif #ifndef ZSTD_H_235446 #define ZSTD_H_235446 /* ====== Dependency ======*/ #include /* INT_MAX */ #include /* size_t */ /* ===== ZSTDLIB_API : control library symbols visibility ===== */ #ifndef ZSTDLIB_VISIBILITY # if defined(__GNUC__) && (__GNUC__ >= 4) # define ZSTDLIB_VISIBILITY __attribute__ ((visibility ("default"))) # else # define ZSTDLIB_VISIBILITY # endif #endif #if defined(ZSTD_DLL_EXPORT) && (ZSTD_DLL_EXPORT==1) # define ZSTDLIB_API __declspec(dllexport) ZSTDLIB_VISIBILITY #elif defined(ZSTD_DLL_IMPORT) && (ZSTD_DLL_IMPORT==1) # define ZSTDLIB_API __declspec(dllimport) ZSTDLIB_VISIBILITY /* It isn't required but allows to generate better code, saving a function pointer load from the IAT and an indirect jump.*/ #else # define ZSTDLIB_API ZSTDLIB_VISIBILITY #endif /******************************************************************************* Introduction zstd, short for Zstandard, is a fast lossless compression algorithm, targeting real-time compression scenarios at zlib-level and better compression ratios. The zstd compression library provides in-memory compression and decompression functions. The library supports regular compression levels from 1 up to ZSTD_maxCLevel(), which is currently 22. Levels >= 20, labeled `--ultra`, should be used with caution, as they require more memory. The library also offers negative compression levels, which extend the range of speed vs. ratio preferences. The lower the level, the faster the speed (at the cost of compression). Compression can be done in: - a single step (described as Simple API) - a single step, reusing a context (described as Explicit context) - unbounded multiple steps (described as Streaming compression) The compression ratio achievable on small data can be highly improved using a dictionary. Dictionary compression can be performed in: - a single step (described as Simple dictionary API) - a single step, reusing a dictionary (described as Bulk-processing dictionary API) Advanced experimental functions can be accessed using `#define ZSTD_STATIC_LINKING_ONLY` before including zstd.h. Advanced experimental APIs should never be used with a dynamically-linked library. They are not "stable"; their definitions or signatures may change in the future. Only static linking is allowed. *******************************************************************************/ /*------ Version ------*/ #define ZSTD_VERSION_MAJOR 1 #define ZSTD_VERSION_MINOR 4 #define ZSTD_VERSION_RELEASE 9 #define ZSTD_VERSION_NUMBER (ZSTD_VERSION_MAJOR *100*100 + ZSTD_VERSION_MINOR *100 + ZSTD_VERSION_RELEASE) /*! ZSTD_versionNumber() : * Return runtime library version, the value is (MAJOR*100*100 + MINOR*100 + RELEASE). */ ZSTDLIB_API unsigned ZSTD_versionNumber(void); #define ZSTD_LIB_VERSION ZSTD_VERSION_MAJOR.ZSTD_VERSION_MINOR.ZSTD_VERSION_RELEASE #define ZSTD_QUOTE(str) #str #define ZSTD_EXPAND_AND_QUOTE(str) ZSTD_QUOTE(str) #define ZSTD_VERSION_STRING ZSTD_EXPAND_AND_QUOTE(ZSTD_LIB_VERSION) /*! ZSTD_versionString() : * Return runtime library version, like "1.4.5". Requires v1.3.0+. */ ZSTDLIB_API const char* ZSTD_versionString(void); /* ************************************* * Default constant ***************************************/ #ifndef ZSTD_CLEVEL_DEFAULT # define ZSTD_CLEVEL_DEFAULT 3 #endif /* ************************************* * Constants ***************************************/ /* All magic numbers are supposed read/written to/from files/memory using little-endian convention */ #define ZSTD_MAGICNUMBER 0xFD2FB528 /* valid since v0.8.0 */ #define ZSTD_MAGIC_DICTIONARY 0xEC30A437 /* valid since v0.7.0 */ #define ZSTD_MAGIC_SKIPPABLE_START 0x184D2A50 /* all 16 values, from 0x184D2A50 to 0x184D2A5F, signal the beginning of a skippable frame */ #define ZSTD_MAGIC_SKIPPABLE_MASK 0xFFFFFFF0 #define ZSTD_BLOCKSIZELOG_MAX 17 #define ZSTD_BLOCKSIZE_MAX (1<= `ZSTD_compressBound(srcSize)`. * @return : compressed size written into `dst` (<= `dstCapacity), * or an error code if it fails (which can be tested using ZSTD_isError()). */ ZSTDLIB_API size_t ZSTD_compress( void* dst, size_t dstCapacity, const void* src, size_t srcSize, int compressionLevel); /*! ZSTD_decompress() : * `compressedSize` : must be the _exact_ size of some number of compressed and/or skippable frames. * `dstCapacity` is an upper bound of originalSize to regenerate. * If user cannot imply a maximum upper bound, it's better to use streaming mode to decompress data. * @return : the number of bytes decompressed into `dst` (<= `dstCapacity`), * or an errorCode if it fails (which can be tested using ZSTD_isError()). */ ZSTDLIB_API size_t ZSTD_decompress( void* dst, size_t dstCapacity, const void* src, size_t compressedSize); /*! ZSTD_getFrameContentSize() : requires v1.3.0+ * `src` should point to the start of a ZSTD encoded frame. * `srcSize` must be at least as large as the frame header. * hint : any size >= `ZSTD_frameHeaderSize_max` is large enough. * @return : - decompressed size of `src` frame content, if known * - ZSTD_CONTENTSIZE_UNKNOWN if the size cannot be determined * - ZSTD_CONTENTSIZE_ERROR if an error occurred (e.g. invalid magic number, srcSize too small) * note 1 : a 0 return value means the frame is valid but "empty". * note 2 : decompressed size is an optional field, it may not be present, typically in streaming mode. * When `return==ZSTD_CONTENTSIZE_UNKNOWN`, data to decompress could be any size. * In which case, it's necessary to use streaming mode to decompress data. * Optionally, application can rely on some implicit limit, * as ZSTD_decompress() only needs an upper bound of decompressed size. * (For example, data could be necessarily cut into blocks <= 16 KB). * note 3 : decompressed size is always present when compression is completed using single-pass functions, * such as ZSTD_compress(), ZSTD_compressCCtx() ZSTD_compress_usingDict() or ZSTD_compress_usingCDict(). * note 4 : decompressed size can be very large (64-bits value), * potentially larger than what local system can handle as a single memory segment. * In which case, it's necessary to use streaming mode to decompress data. * note 5 : If source is untrusted, decompressed size could be wrong or intentionally modified. * Always ensure return value fits within application's authorized limits. * Each application can set its own limits. * note 6 : This function replaces ZSTD_getDecompressedSize() */ #define ZSTD_CONTENTSIZE_UNKNOWN (0ULL - 1) #define ZSTD_CONTENTSIZE_ERROR (0ULL - 2) ZSTDLIB_API unsigned long long ZSTD_getFrameContentSize(const void *src, size_t srcSize); /*! ZSTD_getDecompressedSize() : * NOTE: This function is now obsolete, in favor of ZSTD_getFrameContentSize(). * Both functions work the same way, but ZSTD_getDecompressedSize() blends * "empty", "unknown" and "error" results to the same return value (0), * while ZSTD_getFrameContentSize() gives them separate return values. * @return : decompressed size of `src` frame content _if known and not empty_, 0 otherwise. */ ZSTDLIB_API unsigned long long ZSTD_getDecompressedSize(const void* src, size_t srcSize); /*! ZSTD_findFrameCompressedSize() : * `src` should point to the start of a ZSTD frame or skippable frame. * `srcSize` must be >= first frame size * @return : the compressed size of the first frame starting at `src`, * suitable to pass as `srcSize` to `ZSTD_decompress` or similar, * or an error code if input is invalid */ ZSTDLIB_API size_t ZSTD_findFrameCompressedSize(const void* src, size_t srcSize); /*====== Helper functions ======*/ #define ZSTD_COMPRESSBOUND(srcSize) ((srcSize) + ((srcSize)>>8) + (((srcSize) < (128<<10)) ? (((128<<10) - (srcSize)) >> 11) /* margin, from 64 to 0 */ : 0)) /* this formula ensures that bound(A) + bound(B) <= bound(A+B) as long as A and B >= 128 KB */ ZSTDLIB_API size_t ZSTD_compressBound(size_t srcSize); /*!< maximum compressed size in worst case single-pass scenario */ ZSTDLIB_API unsigned ZSTD_isError(size_t code); /*!< tells if a `size_t` function result is an error code */ ZSTDLIB_API const char* ZSTD_getErrorName(size_t code); /*!< provides readable string from an error code */ ZSTDLIB_API int ZSTD_minCLevel(void); /*!< minimum negative compression level allowed */ ZSTDLIB_API int ZSTD_maxCLevel(void); /*!< maximum compression level available */ /*************************************** * Explicit context ***************************************/ /*= Compression context * When compressing many times, * it is recommended to allocate a context just once, * and re-use it for each successive compression operation. * This will make workload friendlier for system's memory. * Note : re-using context is just a speed / resource optimization. * It doesn't change the compression ratio, which remains identical. * Note 2 : In multi-threaded environments, * use one different context per thread for parallel execution. */ typedef struct ZSTD_CCtx_s ZSTD_CCtx; ZSTDLIB_API ZSTD_CCtx* ZSTD_createCCtx(void); ZSTDLIB_API size_t ZSTD_freeCCtx(ZSTD_CCtx* cctx); /*! ZSTD_compressCCtx() : * Same as ZSTD_compress(), using an explicit ZSTD_CCtx. * Important : in order to behave similarly to `ZSTD_compress()`, * this function compresses at requested compression level, * __ignoring any other parameter__ . * If any advanced parameter was set using the advanced API, * they will all be reset. Only `compressionLevel` remains. */ ZSTDLIB_API size_t ZSTD_compressCCtx(ZSTD_CCtx* cctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize, int compressionLevel); /*= Decompression context * When decompressing many times, * it is recommended to allocate a context only once, * and re-use it for each successive compression operation. * This will make workload friendlier for system's memory. * Use one context per thread for parallel execution. */ typedef struct ZSTD_DCtx_s ZSTD_DCtx; ZSTDLIB_API ZSTD_DCtx* ZSTD_createDCtx(void); ZSTDLIB_API size_t ZSTD_freeDCtx(ZSTD_DCtx* dctx); /*! ZSTD_decompressDCtx() : * Same as ZSTD_decompress(), * requires an allocated ZSTD_DCtx. * Compatible with sticky parameters. */ ZSTDLIB_API size_t ZSTD_decompressDCtx(ZSTD_DCtx* dctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize); /*************************************** * Advanced compression API ***************************************/ /* API design : * Parameters are pushed one by one into an existing context, * using ZSTD_CCtx_set*() functions. * Pushed parameters are sticky : they are valid for next compressed frame, and any subsequent frame. * "sticky" parameters are applicable to `ZSTD_compress2()` and `ZSTD_compressStream*()` ! * __They do not apply to "simple" one-shot variants such as ZSTD_compressCCtx()__ . * * It's possible to reset all parameters to "default" using ZSTD_CCtx_reset(). * * This API supercedes all other "advanced" API entry points in the experimental section. * In the future, we expect to remove from experimental API entry points which are redundant with this API. */ /* Compression strategies, listed from fastest to strongest */ typedef enum { ZSTD_fast=1, ZSTD_dfast=2, ZSTD_greedy=3, ZSTD_lazy=4, ZSTD_lazy2=5, ZSTD_btlazy2=6, ZSTD_btopt=7, ZSTD_btultra=8, ZSTD_btultra2=9 /* note : new strategies _might_ be added in the future. Only the order (from fast to strong) is guaranteed */ } ZSTD_strategy; typedef enum { /* compression parameters * Note: When compressing with a ZSTD_CDict these parameters are superseded * by the parameters used to construct the ZSTD_CDict. * See ZSTD_CCtx_refCDict() for more info (superseded-by-cdict). */ ZSTD_c_compressionLevel=100, /* Set compression parameters according to pre-defined cLevel table. * Note that exact compression parameters are dynamically determined, * depending on both compression level and srcSize (when known). * Default level is ZSTD_CLEVEL_DEFAULT==3. * Special: value 0 means default, which is controlled by ZSTD_CLEVEL_DEFAULT. * Note 1 : it's possible to pass a negative compression level. * Note 2 : setting a level does not automatically set all other compression parameters * to default. Setting this will however eventually dynamically impact the compression * parameters which have not been manually set. The manually set * ones will 'stick'. */ /* Advanced compression parameters : * It's possible to pin down compression parameters to some specific values. * In which case, these values are no longer dynamically selected by the compressor */ ZSTD_c_windowLog=101, /* Maximum allowed back-reference distance, expressed as power of 2. * This will set a memory budget for streaming decompression, * with larger values requiring more memory * and typically compressing more. * Must be clamped between ZSTD_WINDOWLOG_MIN and ZSTD_WINDOWLOG_MAX. * Special: value 0 means "use default windowLog". * Note: Using a windowLog greater than ZSTD_WINDOWLOG_LIMIT_DEFAULT * requires explicitly allowing such size at streaming decompression stage. */ ZSTD_c_hashLog=102, /* Size of the initial probe table, as a power of 2. * Resulting memory usage is (1 << (hashLog+2)). * Must be clamped between ZSTD_HASHLOG_MIN and ZSTD_HASHLOG_MAX. * Larger tables improve compression ratio of strategies <= dFast, * and improve speed of strategies > dFast. * Special: value 0 means "use default hashLog". */ ZSTD_c_chainLog=103, /* Size of the multi-probe search table, as a power of 2. * Resulting memory usage is (1 << (chainLog+2)). * Must be clamped between ZSTD_CHAINLOG_MIN and ZSTD_CHAINLOG_MAX. * Larger tables result in better and slower compression. * This parameter is useless for "fast" strategy. * It's still useful when using "dfast" strategy, * in which case it defines a secondary probe table. * Special: value 0 means "use default chainLog". */ ZSTD_c_searchLog=104, /* Number of search attempts, as a power of 2. * More attempts result in better and slower compression. * This parameter is useless for "fast" and "dFast" strategies. * Special: value 0 means "use default searchLog". */ ZSTD_c_minMatch=105, /* Minimum size of searched matches. * Note that Zstandard can still find matches of smaller size, * it just tweaks its search algorithm to look for this size and larger. * Larger values increase compression and decompression speed, but decrease ratio. * Must be clamped between ZSTD_MINMATCH_MIN and ZSTD_MINMATCH_MAX. * Note that currently, for all strategies < btopt, effective minimum is 4. * , for all strategies > fast, effective maximum is 6. * Special: value 0 means "use default minMatchLength". */ ZSTD_c_targetLength=106, /* Impact of this field depends on strategy. * For strategies btopt, btultra & btultra2: * Length of Match considered "good enough" to stop search. * Larger values make compression stronger, and slower. * For strategy fast: * Distance between match sampling. * Larger values make compression faster, and weaker. * Special: value 0 means "use default targetLength". */ ZSTD_c_strategy=107, /* See ZSTD_strategy enum definition. * The higher the value of selected strategy, the more complex it is, * resulting in stronger and slower compression. * Special: value 0 means "use default strategy". */ /* LDM mode parameters */ ZSTD_c_enableLongDistanceMatching=160, /* Enable long distance matching. * This parameter is designed to improve compression ratio * for large inputs, by finding large matches at long distance. * It increases memory usage and window size. * Note: enabling this parameter increases default ZSTD_c_windowLog to 128 MB * except when expressly set to a different value. * Note: will be enabled by default if ZSTD_c_windowLog >= 128 MB and * compression strategy >= ZSTD_btopt (== compression level 16+) */ ZSTD_c_ldmHashLog=161, /* Size of the table for long distance matching, as a power of 2. * Larger values increase memory usage and compression ratio, * but decrease compression speed. * Must be clamped between ZSTD_HASHLOG_MIN and ZSTD_HASHLOG_MAX * default: windowlog - 7. * Special: value 0 means "automatically determine hashlog". */ ZSTD_c_ldmMinMatch=162, /* Minimum match size for long distance matcher. * Larger/too small values usually decrease compression ratio. * Must be clamped between ZSTD_LDM_MINMATCH_MIN and ZSTD_LDM_MINMATCH_MAX. * Special: value 0 means "use default value" (default: 64). */ ZSTD_c_ldmBucketSizeLog=163, /* Log size of each bucket in the LDM hash table for collision resolution. * Larger values improve collision resolution but decrease compression speed. * The maximum value is ZSTD_LDM_BUCKETSIZELOG_MAX. * Special: value 0 means "use default value" (default: 3). */ ZSTD_c_ldmHashRateLog=164, /* Frequency of inserting/looking up entries into the LDM hash table. * Must be clamped between 0 and (ZSTD_WINDOWLOG_MAX - ZSTD_HASHLOG_MIN). * Default is MAX(0, (windowLog - ldmHashLog)), optimizing hash table usage. * Larger values improve compression speed. * Deviating far from default value will likely result in a compression ratio decrease. * Special: value 0 means "automatically determine hashRateLog". */ /* frame parameters */ ZSTD_c_contentSizeFlag=200, /* Content size will be written into frame header _whenever known_ (default:1) * Content size must be known at the beginning of compression. * This is automatically the case when using ZSTD_compress2(), * For streaming scenarios, content size must be provided with ZSTD_CCtx_setPledgedSrcSize() */ ZSTD_c_checksumFlag=201, /* A 32-bits checksum of content is written at end of frame (default:0) */ ZSTD_c_dictIDFlag=202, /* When applicable, dictionary's ID is written into frame header (default:1) */ /* multi-threading parameters */ /* These parameters are only active if multi-threading is enabled (compiled with build macro ZSTD_MULTITHREAD). * Otherwise, trying to set any other value than default (0) will be a no-op and return an error. * In a situation where it's unknown if the linked library supports multi-threading or not, * setting ZSTD_c_nbWorkers to any value >= 1 and consulting the return value provides a quick way to check this property. */ ZSTD_c_nbWorkers=400, /* Select how many threads will be spawned to compress in parallel. * When nbWorkers >= 1, triggers asynchronous mode when invoking ZSTD_compressStream*() : * ZSTD_compressStream*() consumes input and flush output if possible, but immediately gives back control to caller, * while compression is performed in parallel, within worker thread(s). * (note : a strong exception to this rule is when first invocation of ZSTD_compressStream2() sets ZSTD_e_end : * in which case, ZSTD_compressStream2() delegates to ZSTD_compress2(), which is always a blocking call). * More workers improve speed, but also increase memory usage. * Default value is `0`, aka "single-threaded mode" : no worker is spawned, * compression is performed inside Caller's thread, and all invocations are blocking */ ZSTD_c_jobSize=401, /* Size of a compression job. This value is enforced only when nbWorkers >= 1. * Each compression job is completed in parallel, so this value can indirectly impact the nb of active threads. * 0 means default, which is dynamically determined based on compression parameters. * Job size must be a minimum of overlap size, or 1 MB, whichever is largest. * The minimum size is automatically and transparently enforced. */ ZSTD_c_overlapLog=402, /* Control the overlap size, as a fraction of window size. * The overlap size is an amount of data reloaded from previous job at the beginning of a new job. * It helps preserve compression ratio, while each job is compressed in parallel. * This value is enforced only when nbWorkers >= 1. * Larger values increase compression ratio, but decrease speed. * Possible values range from 0 to 9 : * - 0 means "default" : value will be determined by the library, depending on strategy * - 1 means "no overlap" * - 9 means "full overlap", using a full window size. * Each intermediate rank increases/decreases load size by a factor 2 : * 9: full window; 8: w/2; 7: w/4; 6: w/8; 5:w/16; 4: w/32; 3:w/64; 2:w/128; 1:no overlap; 0:default * default value varies between 6 and 9, depending on strategy */ /* note : additional experimental parameters are also available * within the experimental section of the API. * At the time of this writing, they include : * ZSTD_c_rsyncable * ZSTD_c_format * ZSTD_c_forceMaxWindow * ZSTD_c_forceAttachDict * ZSTD_c_literalCompressionMode * ZSTD_c_targetCBlockSize * ZSTD_c_srcSizeHint * ZSTD_c_enableDedicatedDictSearch * ZSTD_c_stableInBuffer * ZSTD_c_stableOutBuffer * ZSTD_c_blockDelimiters * ZSTD_c_validateSequences * Because they are not stable, it's necessary to define ZSTD_STATIC_LINKING_ONLY to access them. * note : never ever use experimentalParam? names directly; * also, the enums values themselves are unstable and can still change. */ ZSTD_c_experimentalParam1=500, ZSTD_c_experimentalParam2=10, ZSTD_c_experimentalParam3=1000, ZSTD_c_experimentalParam4=1001, ZSTD_c_experimentalParam5=1002, ZSTD_c_experimentalParam6=1003, ZSTD_c_experimentalParam7=1004, ZSTD_c_experimentalParam8=1005, ZSTD_c_experimentalParam9=1006, ZSTD_c_experimentalParam10=1007, ZSTD_c_experimentalParam11=1008, ZSTD_c_experimentalParam12=1009 } ZSTD_cParameter; typedef struct { size_t error; int lowerBound; int upperBound; } ZSTD_bounds; /*! ZSTD_cParam_getBounds() : * All parameters must belong to an interval with lower and upper bounds, * otherwise they will either trigger an error or be automatically clamped. * @return : a structure, ZSTD_bounds, which contains * - an error status field, which must be tested using ZSTD_isError() * - lower and upper bounds, both inclusive */ ZSTDLIB_API ZSTD_bounds ZSTD_cParam_getBounds(ZSTD_cParameter cParam); /*! ZSTD_CCtx_setParameter() : * Set one compression parameter, selected by enum ZSTD_cParameter. * All parameters have valid bounds. Bounds can be queried using ZSTD_cParam_getBounds(). * Providing a value beyond bound will either clamp it, or trigger an error (depending on parameter). * Setting a parameter is generally only possible during frame initialization (before starting compression). * Exception : when using multi-threading mode (nbWorkers >= 1), * the following parameters can be updated _during_ compression (within same frame): * => compressionLevel, hashLog, chainLog, searchLog, minMatch, targetLength and strategy. * new parameters will be active for next job only (after a flush()). * @return : an error code (which can be tested using ZSTD_isError()). */ ZSTDLIB_API size_t ZSTD_CCtx_setParameter(ZSTD_CCtx* cctx, ZSTD_cParameter param, int value); /*! ZSTD_CCtx_setPledgedSrcSize() : * Total input data size to be compressed as a single frame. * Value will be written in frame header, unless if explicitly forbidden using ZSTD_c_contentSizeFlag. * This value will also be controlled at end of frame, and trigger an error if not respected. * @result : 0, or an error code (which can be tested with ZSTD_isError()). * Note 1 : pledgedSrcSize==0 actually means zero, aka an empty frame. * In order to mean "unknown content size", pass constant ZSTD_CONTENTSIZE_UNKNOWN. * ZSTD_CONTENTSIZE_UNKNOWN is default value for any new frame. * Note 2 : pledgedSrcSize is only valid once, for the next frame. * It's discarded at the end of the frame, and replaced by ZSTD_CONTENTSIZE_UNKNOWN. * Note 3 : Whenever all input data is provided and consumed in a single round, * for example with ZSTD_compress2(), * or invoking immediately ZSTD_compressStream2(,,,ZSTD_e_end), * this value is automatically overridden by srcSize instead. */ ZSTDLIB_API size_t ZSTD_CCtx_setPledgedSrcSize(ZSTD_CCtx* cctx, unsigned long long pledgedSrcSize); typedef enum { ZSTD_reset_session_only = 1, ZSTD_reset_parameters = 2, ZSTD_reset_session_and_parameters = 3 } ZSTD_ResetDirective; /*! ZSTD_CCtx_reset() : * There are 2 different things that can be reset, independently or jointly : * - The session : will stop compressing current frame, and make CCtx ready to start a new one. * Useful after an error, or to interrupt any ongoing compression. * Any internal data not yet flushed is cancelled. * Compression parameters and dictionary remain unchanged. * They will be used to compress next frame. * Resetting session never fails. * - The parameters : changes all parameters back to "default". * This removes any reference to any dictionary too. * Parameters can only be changed between 2 sessions (i.e. no compression is currently ongoing) * otherwise the reset fails, and function returns an error value (which can be tested using ZSTD_isError()) * - Both : similar to resetting the session, followed by resetting parameters. */ ZSTDLIB_API size_t ZSTD_CCtx_reset(ZSTD_CCtx* cctx, ZSTD_ResetDirective reset); /*! ZSTD_compress2() : * Behave the same as ZSTD_compressCCtx(), but compression parameters are set using the advanced API. * ZSTD_compress2() always starts a new frame. * Should cctx hold data from a previously unfinished frame, everything about it is forgotten. * - Compression parameters are pushed into CCtx before starting compression, using ZSTD_CCtx_set*() * - The function is always blocking, returns when compression is completed. * Hint : compression runs faster if `dstCapacity` >= `ZSTD_compressBound(srcSize)`. * @return : compressed size written into `dst` (<= `dstCapacity), * or an error code if it fails (which can be tested using ZSTD_isError()). */ ZSTDLIB_API size_t ZSTD_compress2( ZSTD_CCtx* cctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize); /*************************************** * Advanced decompression API ***************************************/ /* The advanced API pushes parameters one by one into an existing DCtx context. * Parameters are sticky, and remain valid for all following frames * using the same DCtx context. * It's possible to reset parameters to default values using ZSTD_DCtx_reset(). * Note : This API is compatible with existing ZSTD_decompressDCtx() and ZSTD_decompressStream(). * Therefore, no new decompression function is necessary. */ typedef enum { ZSTD_d_windowLogMax=100, /* Select a size limit (in power of 2) beyond which * the streaming API will refuse to allocate memory buffer * in order to protect the host from unreasonable memory requirements. * This parameter is only useful in streaming mode, since no internal buffer is allocated in single-pass mode. * By default, a decompression context accepts window sizes <= (1 << ZSTD_WINDOWLOG_LIMIT_DEFAULT). * Special: value 0 means "use default maximum windowLog". */ /* note : additional experimental parameters are also available * within the experimental section of the API. * At the time of this writing, they include : * ZSTD_d_format * ZSTD_d_stableOutBuffer * ZSTD_d_forceIgnoreChecksum * ZSTD_d_refMultipleDDicts * Because they are not stable, it's necessary to define ZSTD_STATIC_LINKING_ONLY to access them. * note : never ever use experimentalParam? names directly */ ZSTD_d_experimentalParam1=1000, ZSTD_d_experimentalParam2=1001, ZSTD_d_experimentalParam3=1002, ZSTD_d_experimentalParam4=1003 } ZSTD_dParameter; /*! ZSTD_dParam_getBounds() : * All parameters must belong to an interval with lower and upper bounds, * otherwise they will either trigger an error or be automatically clamped. * @return : a structure, ZSTD_bounds, which contains * - an error status field, which must be tested using ZSTD_isError() * - both lower and upper bounds, inclusive */ ZSTDLIB_API ZSTD_bounds ZSTD_dParam_getBounds(ZSTD_dParameter dParam); /*! ZSTD_DCtx_setParameter() : * Set one compression parameter, selected by enum ZSTD_dParameter. * All parameters have valid bounds. Bounds can be queried using ZSTD_dParam_getBounds(). * Providing a value beyond bound will either clamp it, or trigger an error (depending on parameter). * Setting a parameter is only possible during frame initialization (before starting decompression). * @return : 0, or an error code (which can be tested using ZSTD_isError()). */ ZSTDLIB_API size_t ZSTD_DCtx_setParameter(ZSTD_DCtx* dctx, ZSTD_dParameter param, int value); /*! ZSTD_DCtx_reset() : * Return a DCtx to clean state. * Session and parameters can be reset jointly or separately. * Parameters can only be reset when no active frame is being decompressed. * @return : 0, or an error code, which can be tested with ZSTD_isError() */ ZSTDLIB_API size_t ZSTD_DCtx_reset(ZSTD_DCtx* dctx, ZSTD_ResetDirective reset); /**************************** * Streaming ****************************/ typedef struct ZSTD_inBuffer_s { const void* src; /**< start of input buffer */ size_t size; /**< size of input buffer */ size_t pos; /**< position where reading stopped. Will be updated. Necessarily 0 <= pos <= size */ } ZSTD_inBuffer; typedef struct ZSTD_outBuffer_s { void* dst; /**< start of output buffer */ size_t size; /**< size of output buffer */ size_t pos; /**< position where writing stopped. Will be updated. Necessarily 0 <= pos <= size */ } ZSTD_outBuffer; /*-*********************************************************************** * Streaming compression - HowTo * * A ZSTD_CStream object is required to track streaming operation. * Use ZSTD_createCStream() and ZSTD_freeCStream() to create/release resources. * ZSTD_CStream objects can be reused multiple times on consecutive compression operations. * It is recommended to re-use ZSTD_CStream since it will play nicer with system's memory, by re-using already allocated memory. * * For parallel execution, use one separate ZSTD_CStream per thread. * * note : since v1.3.0, ZSTD_CStream and ZSTD_CCtx are the same thing. * * Parameters are sticky : when starting a new compression on the same context, * it will re-use the same sticky parameters as previous compression session. * When in doubt, it's recommended to fully initialize the context before usage. * Use ZSTD_CCtx_reset() to reset the context and ZSTD_CCtx_setParameter(), * ZSTD_CCtx_setPledgedSrcSize(), or ZSTD_CCtx_loadDictionary() and friends to * set more specific parameters, the pledged source size, or load a dictionary. * * Use ZSTD_compressStream2() with ZSTD_e_continue as many times as necessary to * consume input stream. The function will automatically update both `pos` * fields within `input` and `output`. * Note that the function may not consume the entire input, for example, because * the output buffer is already full, in which case `input.pos < input.size`. * The caller must check if input has been entirely consumed. * If not, the caller must make some room to receive more compressed data, * and then present again remaining input data. * note: ZSTD_e_continue is guaranteed to make some forward progress when called, * but doesn't guarantee maximal forward progress. This is especially relevant * when compressing with multiple threads. The call won't block if it can * consume some input, but if it can't it will wait for some, but not all, * output to be flushed. * @return : provides a minimum amount of data remaining to be flushed from internal buffers * or an error code, which can be tested using ZSTD_isError(). * * At any moment, it's possible to flush whatever data might remain stuck within internal buffer, * using ZSTD_compressStream2() with ZSTD_e_flush. `output->pos` will be updated. * Note that, if `output->size` is too small, a single invocation with ZSTD_e_flush might not be enough (return code > 0). * In which case, make some room to receive more compressed data, and call again ZSTD_compressStream2() with ZSTD_e_flush. * You must continue calling ZSTD_compressStream2() with ZSTD_e_flush until it returns 0, at which point you can change the * operation. * note: ZSTD_e_flush will flush as much output as possible, meaning when compressing with multiple threads, it will * block until the flush is complete or the output buffer is full. * @return : 0 if internal buffers are entirely flushed, * >0 if some data still present within internal buffer (the value is minimal estimation of remaining size), * or an error code, which can be tested using ZSTD_isError(). * * Calling ZSTD_compressStream2() with ZSTD_e_end instructs to finish a frame. * It will perform a flush and write frame epilogue. * The epilogue is required for decoders to consider a frame completed. * flush operation is the same, and follows same rules as calling ZSTD_compressStream2() with ZSTD_e_flush. * You must continue calling ZSTD_compressStream2() with ZSTD_e_end until it returns 0, at which point you are free to * start a new frame. * note: ZSTD_e_end will flush as much output as possible, meaning when compressing with multiple threads, it will * block until the flush is complete or the output buffer is full. * @return : 0 if frame fully completed and fully flushed, * >0 if some data still present within internal buffer (the value is minimal estimation of remaining size), * or an error code, which can be tested using ZSTD_isError(). * * *******************************************************************/ typedef ZSTD_CCtx ZSTD_CStream; /**< CCtx and CStream are now effectively same object (>= v1.3.0) */ /* Continue to distinguish them for compatibility with older versions <= v1.2.0 */ /*===== ZSTD_CStream management functions =====*/ ZSTDLIB_API ZSTD_CStream* ZSTD_createCStream(void); ZSTDLIB_API size_t ZSTD_freeCStream(ZSTD_CStream* zcs); /*===== Streaming compression functions =====*/ typedef enum { ZSTD_e_continue=0, /* collect more data, encoder decides when to output compressed result, for optimal compression ratio */ ZSTD_e_flush=1, /* flush any data provided so far, * it creates (at least) one new block, that can be decoded immediately on reception; * frame will continue: any future data can still reference previously compressed data, improving compression. * note : multithreaded compression will block to flush as much output as possible. */ ZSTD_e_end=2 /* flush any remaining data _and_ close current frame. * note that frame is only closed after compressed data is fully flushed (return value == 0). * After that point, any additional data starts a new frame. * note : each frame is independent (does not reference any content from previous frame). : note : multithreaded compression will block to flush as much output as possible. */ } ZSTD_EndDirective; /*! ZSTD_compressStream2() : * Behaves about the same as ZSTD_compressStream, with additional control on end directive. * - Compression parameters are pushed into CCtx before starting compression, using ZSTD_CCtx_set*() * - Compression parameters cannot be changed once compression is started (save a list of exceptions in multi-threading mode) * - output->pos must be <= dstCapacity, input->pos must be <= srcSize * - output->pos and input->pos will be updated. They are guaranteed to remain below their respective limit. * - endOp must be a valid directive * - When nbWorkers==0 (default), function is blocking : it completes its job before returning to caller. * - When nbWorkers>=1, function is non-blocking : it copies a portion of input, distributes jobs to internal worker threads, flush to output whatever is available, * and then immediately returns, just indicating that there is some data remaining to be flushed. * The function nonetheless guarantees forward progress : it will return only after it reads or write at least 1+ byte. * - Exception : if the first call requests a ZSTD_e_end directive and provides enough dstCapacity, the function delegates to ZSTD_compress2() which is always blocking. * - @return provides a minimum amount of data remaining to be flushed from internal buffers * or an error code, which can be tested using ZSTD_isError(). * if @return != 0, flush is not fully completed, there is still some data left within internal buffers. * This is useful for ZSTD_e_flush, since in this case more flushes are necessary to empty all buffers. * For ZSTD_e_end, @return == 0 when internal buffers are fully flushed and frame is completed. * - after a ZSTD_e_end directive, if internal buffer is not fully flushed (@return != 0), * only ZSTD_e_end or ZSTD_e_flush operations are allowed. * Before starting a new compression job, or changing compression parameters, * it is required to fully flush internal buffers. */ ZSTDLIB_API size_t ZSTD_compressStream2( ZSTD_CCtx* cctx, ZSTD_outBuffer* output, ZSTD_inBuffer* input, ZSTD_EndDirective endOp); /* These buffer sizes are softly recommended. * They are not required : ZSTD_compressStream*() happily accepts any buffer size, for both input and output. * Respecting the recommended size just makes it a bit easier for ZSTD_compressStream*(), * reducing the amount of memory shuffling and buffering, resulting in minor performance savings. * * However, note that these recommendations are from the perspective of a C caller program. * If the streaming interface is invoked from some other language, * especially managed ones such as Java or Go, through a foreign function interface such as jni or cgo, * a major performance rule is to reduce crossing such interface to an absolute minimum. * It's not rare that performance ends being spent more into the interface, rather than compression itself. * In which cases, prefer using large buffers, as large as practical, * for both input and output, to reduce the nb of roundtrips. */ ZSTDLIB_API size_t ZSTD_CStreamInSize(void); /**< recommended size for input buffer */ ZSTDLIB_API size_t ZSTD_CStreamOutSize(void); /**< recommended size for output buffer. Guarantee to successfully flush at least one complete compressed block. */ /* ***************************************************************************** * This following is a legacy streaming API. * It can be replaced by ZSTD_CCtx_reset() and ZSTD_compressStream2(). * It is redundant, but remains fully supported. * Advanced parameters and dictionary compression can only be used through the * new API. ******************************************************************************/ /*! * Equivalent to: * * ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only); * ZSTD_CCtx_refCDict(zcs, NULL); // clear the dictionary (if any) * ZSTD_CCtx_setParameter(zcs, ZSTD_c_compressionLevel, compressionLevel); */ ZSTDLIB_API size_t ZSTD_initCStream(ZSTD_CStream* zcs, int compressionLevel); /*! * Alternative for ZSTD_compressStream2(zcs, output, input, ZSTD_e_continue). * NOTE: The return value is different. ZSTD_compressStream() returns a hint for * the next read size (if non-zero and not an error). ZSTD_compressStream2() * returns the minimum nb of bytes left to flush (if non-zero and not an error). */ ZSTDLIB_API size_t ZSTD_compressStream(ZSTD_CStream* zcs, ZSTD_outBuffer* output, ZSTD_inBuffer* input); /*! Equivalent to ZSTD_compressStream2(zcs, output, &emptyInput, ZSTD_e_flush). */ ZSTDLIB_API size_t ZSTD_flushStream(ZSTD_CStream* zcs, ZSTD_outBuffer* output); /*! Equivalent to ZSTD_compressStream2(zcs, output, &emptyInput, ZSTD_e_end). */ ZSTDLIB_API size_t ZSTD_endStream(ZSTD_CStream* zcs, ZSTD_outBuffer* output); /*-*************************************************************************** * Streaming decompression - HowTo * * A ZSTD_DStream object is required to track streaming operations. * Use ZSTD_createDStream() and ZSTD_freeDStream() to create/release resources. * ZSTD_DStream objects can be re-used multiple times. * * Use ZSTD_initDStream() to start a new decompression operation. * @return : recommended first input size * Alternatively, use advanced API to set specific properties. * * Use ZSTD_decompressStream() repetitively to consume your input. * The function will update both `pos` fields. * If `input.pos < input.size`, some input has not been consumed. * It's up to the caller to present again remaining data. * The function tries to flush all data decoded immediately, respecting output buffer size. * If `output.pos < output.size`, decoder has flushed everything it could. * But if `output.pos == output.size`, there might be some data left within internal buffers., * In which case, call ZSTD_decompressStream() again to flush whatever remains in the buffer. * Note : with no additional input provided, amount of data flushed is necessarily <= ZSTD_BLOCKSIZE_MAX. * @return : 0 when a frame is completely decoded and fully flushed, * or an error code, which can be tested using ZSTD_isError(), * or any other value > 0, which means there is still some decoding or flushing to do to complete current frame : * the return value is a suggested next input size (just a hint for better latency) * that will never request more than the remaining frame size. * *******************************************************************************/ typedef ZSTD_DCtx ZSTD_DStream; /**< DCtx and DStream are now effectively same object (>= v1.3.0) */ /* For compatibility with versions <= v1.2.0, prefer differentiating them. */ /*===== ZSTD_DStream management functions =====*/ ZSTDLIB_API ZSTD_DStream* ZSTD_createDStream(void); ZSTDLIB_API size_t ZSTD_freeDStream(ZSTD_DStream* zds); /*===== Streaming decompression functions =====*/ /* This function is redundant with the advanced API and equivalent to: * * ZSTD_DCtx_reset(zds, ZSTD_reset_session_only); * ZSTD_DCtx_refDDict(zds, NULL); */ ZSTDLIB_API size_t ZSTD_initDStream(ZSTD_DStream* zds); ZSTDLIB_API size_t ZSTD_decompressStream(ZSTD_DStream* zds, ZSTD_outBuffer* output, ZSTD_inBuffer* input); ZSTDLIB_API size_t ZSTD_DStreamInSize(void); /*!< recommended size for input buffer */ ZSTDLIB_API size_t ZSTD_DStreamOutSize(void); /*!< recommended size for output buffer. Guarantee to successfully flush at least one complete block in all circumstances. */ /************************** * Simple dictionary API ***************************/ /*! ZSTD_compress_usingDict() : * Compression at an explicit compression level using a Dictionary. * A dictionary can be any arbitrary data segment (also called a prefix), * or a buffer with specified information (see dictBuilder/zdict.h). * Note : This function loads the dictionary, resulting in significant startup delay. * It's intended for a dictionary used only once. * Note 2 : When `dict == NULL || dictSize < 8` no dictionary is used. */ ZSTDLIB_API size_t ZSTD_compress_usingDict(ZSTD_CCtx* ctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize, const void* dict,size_t dictSize, int compressionLevel); /*! ZSTD_decompress_usingDict() : * Decompression using a known Dictionary. * Dictionary must be identical to the one used during compression. * Note : This function loads the dictionary, resulting in significant startup delay. * It's intended for a dictionary used only once. * Note : When `dict == NULL || dictSize < 8` no dictionary is used. */ ZSTDLIB_API size_t ZSTD_decompress_usingDict(ZSTD_DCtx* dctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize, const void* dict,size_t dictSize); /*********************************** * Bulk processing dictionary API **********************************/ typedef struct ZSTD_CDict_s ZSTD_CDict; /*! ZSTD_createCDict() : * When compressing multiple messages or blocks using the same dictionary, * it's recommended to digest the dictionary only once, since it's a costly operation. * ZSTD_createCDict() will create a state from digesting a dictionary. * The resulting state can be used for future compression operations with very limited startup cost. * ZSTD_CDict can be created once and shared by multiple threads concurrently, since its usage is read-only. * @dictBuffer can be released after ZSTD_CDict creation, because its content is copied within CDict. * Note 1 : Consider experimental function `ZSTD_createCDict_byReference()` if you prefer to not duplicate @dictBuffer content. * Note 2 : A ZSTD_CDict can be created from an empty @dictBuffer, * in which case the only thing that it transports is the @compressionLevel. * This can be useful in a pipeline featuring ZSTD_compress_usingCDict() exclusively, * expecting a ZSTD_CDict parameter with any data, including those without a known dictionary. */ ZSTDLIB_API ZSTD_CDict* ZSTD_createCDict(const void* dictBuffer, size_t dictSize, int compressionLevel); /*! ZSTD_freeCDict() : * Function frees memory allocated by ZSTD_createCDict(). */ ZSTDLIB_API size_t ZSTD_freeCDict(ZSTD_CDict* CDict); /*! ZSTD_compress_usingCDict() : * Compression using a digested Dictionary. * Recommended when same dictionary is used multiple times. * Note : compression level is _decided at dictionary creation time_, * and frame parameters are hardcoded (dictID=yes, contentSize=yes, checksum=no) */ ZSTDLIB_API size_t ZSTD_compress_usingCDict(ZSTD_CCtx* cctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize, const ZSTD_CDict* cdict); typedef struct ZSTD_DDict_s ZSTD_DDict; /*! ZSTD_createDDict() : * Create a digested dictionary, ready to start decompression operation without startup delay. * dictBuffer can be released after DDict creation, as its content is copied inside DDict. */ ZSTDLIB_API ZSTD_DDict* ZSTD_createDDict(const void* dictBuffer, size_t dictSize); /*! ZSTD_freeDDict() : * Function frees memory allocated with ZSTD_createDDict() */ ZSTDLIB_API size_t ZSTD_freeDDict(ZSTD_DDict* ddict); /*! ZSTD_decompress_usingDDict() : * Decompression using a digested Dictionary. * Recommended when same dictionary is used multiple times. */ ZSTDLIB_API size_t ZSTD_decompress_usingDDict(ZSTD_DCtx* dctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize, const ZSTD_DDict* ddict); /******************************** * Dictionary helper functions *******************************/ /*! ZSTD_getDictID_fromDict() : * Provides the dictID stored within dictionary. * if @return == 0, the dictionary is not conformant with Zstandard specification. * It can still be loaded, but as a content-only dictionary. */ ZSTDLIB_API unsigned ZSTD_getDictID_fromDict(const void* dict, size_t dictSize); /*! ZSTD_getDictID_fromDDict() : * Provides the dictID of the dictionary loaded into `ddict`. * If @return == 0, the dictionary is not conformant to Zstandard specification, or empty. * Non-conformant dictionaries can still be loaded, but as content-only dictionaries. */ ZSTDLIB_API unsigned ZSTD_getDictID_fromDDict(const ZSTD_DDict* ddict); /*! ZSTD_getDictID_fromFrame() : * Provides the dictID required to decompressed the frame stored within `src`. * If @return == 0, the dictID could not be decoded. * This could for one of the following reasons : * - The frame does not require a dictionary to be decoded (most common case). * - The frame was built with dictID intentionally removed. Whatever dictionary is necessary is a hidden information. * Note : this use case also happens when using a non-conformant dictionary. * - `srcSize` is too small, and as a result, the frame header could not be decoded (only possible if `srcSize < ZSTD_FRAMEHEADERSIZE_MAX`). * - This is not a Zstandard frame. * When identifying the exact failure cause, it's possible to use ZSTD_getFrameHeader(), which will provide a more precise error code. */ ZSTDLIB_API unsigned ZSTD_getDictID_fromFrame(const void* src, size_t srcSize); /******************************************************************************* * Advanced dictionary and prefix API * * This API allows dictionaries to be used with ZSTD_compress2(), * ZSTD_compressStream2(), and ZSTD_decompress(). Dictionaries are sticky, and * only reset with the context is reset with ZSTD_reset_parameters or * ZSTD_reset_session_and_parameters. Prefixes are single-use. ******************************************************************************/ /*! ZSTD_CCtx_loadDictionary() : * Create an internal CDict from `dict` buffer. * Decompression will have to use same dictionary. * @result : 0, or an error code (which can be tested with ZSTD_isError()). * Special: Loading a NULL (or 0-size) dictionary invalidates previous dictionary, * meaning "return to no-dictionary mode". * Note 1 : Dictionary is sticky, it will be used for all future compressed frames. * To return to "no-dictionary" situation, load a NULL dictionary (or reset parameters). * Note 2 : Loading a dictionary involves building tables. * It's also a CPU consuming operation, with non-negligible impact on latency. * Tables are dependent on compression parameters, and for this reason, * compression parameters can no longer be changed after loading a dictionary. * Note 3 :`dict` content will be copied internally. * Use experimental ZSTD_CCtx_loadDictionary_byReference() to reference content instead. * In such a case, dictionary buffer must outlive its users. * Note 4 : Use ZSTD_CCtx_loadDictionary_advanced() * to precisely select how dictionary content must be interpreted. */ ZSTDLIB_API size_t ZSTD_CCtx_loadDictionary(ZSTD_CCtx* cctx, const void* dict, size_t dictSize); /*! ZSTD_CCtx_refCDict() : * Reference a prepared dictionary, to be used for all next compressed frames. * Note that compression parameters are enforced from within CDict, * and supersede any compression parameter previously set within CCtx. * The parameters ignored are labelled as "superseded-by-cdict" in the ZSTD_cParameter enum docs. * The ignored parameters will be used again if the CCtx is returned to no-dictionary mode. * The dictionary will remain valid for future compressed frames using same CCtx. * @result : 0, or an error code (which can be tested with ZSTD_isError()). * Special : Referencing a NULL CDict means "return to no-dictionary mode". * Note 1 : Currently, only one dictionary can be managed. * Referencing a new dictionary effectively "discards" any previous one. * Note 2 : CDict is just referenced, its lifetime must outlive its usage within CCtx. */ ZSTDLIB_API size_t ZSTD_CCtx_refCDict(ZSTD_CCtx* cctx, const ZSTD_CDict* cdict); /*! ZSTD_CCtx_refPrefix() : * Reference a prefix (single-usage dictionary) for next compressed frame. * A prefix is **only used once**. Tables are discarded at end of frame (ZSTD_e_end). * Decompression will need same prefix to properly regenerate data. * Compressing with a prefix is similar in outcome as performing a diff and compressing it, * but performs much faster, especially during decompression (compression speed is tunable with compression level). * @result : 0, or an error code (which can be tested with ZSTD_isError()). * Special: Adding any prefix (including NULL) invalidates any previous prefix or dictionary * Note 1 : Prefix buffer is referenced. It **must** outlive compression. * Its content must remain unmodified during compression. * Note 2 : If the intention is to diff some large src data blob with some prior version of itself, * ensure that the window size is large enough to contain the entire source. * See ZSTD_c_windowLog. * Note 3 : Referencing a prefix involves building tables, which are dependent on compression parameters. * It's a CPU consuming operation, with non-negligible impact on latency. * If there is a need to use the same prefix multiple times, consider loadDictionary instead. * Note 4 : By default, the prefix is interpreted as raw content (ZSTD_dct_rawContent). * Use experimental ZSTD_CCtx_refPrefix_advanced() to alter dictionary interpretation. */ ZSTDLIB_API size_t ZSTD_CCtx_refPrefix(ZSTD_CCtx* cctx, const void* prefix, size_t prefixSize); /*! ZSTD_DCtx_loadDictionary() : * Create an internal DDict from dict buffer, * to be used to decompress next frames. * The dictionary remains valid for all future frames, until explicitly invalidated. * @result : 0, or an error code (which can be tested with ZSTD_isError()). * Special : Adding a NULL (or 0-size) dictionary invalidates any previous dictionary, * meaning "return to no-dictionary mode". * Note 1 : Loading a dictionary involves building tables, * which has a non-negligible impact on CPU usage and latency. * It's recommended to "load once, use many times", to amortize the cost * Note 2 :`dict` content will be copied internally, so `dict` can be released after loading. * Use ZSTD_DCtx_loadDictionary_byReference() to reference dictionary content instead. * Note 3 : Use ZSTD_DCtx_loadDictionary_advanced() to take control of * how dictionary content is loaded and interpreted. */ ZSTDLIB_API size_t ZSTD_DCtx_loadDictionary(ZSTD_DCtx* dctx, const void* dict, size_t dictSize); /*! ZSTD_DCtx_refDDict() : * Reference a prepared dictionary, to be used to decompress next frames. * The dictionary remains active for decompression of future frames using same DCtx. * * If called with ZSTD_d_refMultipleDDicts enabled, repeated calls of this function * will store the DDict references in a table, and the DDict used for decompression * will be determined at decompression time, as per the dict ID in the frame. * The memory for the table is allocated on the first call to refDDict, and can be * freed with ZSTD_freeDCtx(). * * @result : 0, or an error code (which can be tested with ZSTD_isError()). * Note 1 : Currently, only one dictionary can be managed. * Referencing a new dictionary effectively "discards" any previous one. * Special: referencing a NULL DDict means "return to no-dictionary mode". * Note 2 : DDict is just referenced, its lifetime must outlive its usage from DCtx. */ ZSTDLIB_API size_t ZSTD_DCtx_refDDict(ZSTD_DCtx* dctx, const ZSTD_DDict* ddict); /*! ZSTD_DCtx_refPrefix() : * Reference a prefix (single-usage dictionary) to decompress next frame. * This is the reverse operation of ZSTD_CCtx_refPrefix(), * and must use the same prefix as the one used during compression. * Prefix is **only used once**. Reference is discarded at end of frame. * End of frame is reached when ZSTD_decompressStream() returns 0. * @result : 0, or an error code (which can be tested with ZSTD_isError()). * Note 1 : Adding any prefix (including NULL) invalidates any previously set prefix or dictionary * Note 2 : Prefix buffer is referenced. It **must** outlive decompression. * Prefix buffer must remain unmodified up to the end of frame, * reached when ZSTD_decompressStream() returns 0. * Note 3 : By default, the prefix is treated as raw content (ZSTD_dct_rawContent). * Use ZSTD_CCtx_refPrefix_advanced() to alter dictMode (Experimental section) * Note 4 : Referencing a raw content prefix has almost no cpu nor memory cost. * A full dictionary is more costly, as it requires building tables. */ ZSTDLIB_API size_t ZSTD_DCtx_refPrefix(ZSTD_DCtx* dctx, const void* prefix, size_t prefixSize); /* === Memory management === */ /*! ZSTD_sizeof_*() : * These functions give the _current_ memory usage of selected object. * Note that object memory usage can evolve (increase or decrease) over time. */ ZSTDLIB_API size_t ZSTD_sizeof_CCtx(const ZSTD_CCtx* cctx); ZSTDLIB_API size_t ZSTD_sizeof_DCtx(const ZSTD_DCtx* dctx); ZSTDLIB_API size_t ZSTD_sizeof_CStream(const ZSTD_CStream* zcs); ZSTDLIB_API size_t ZSTD_sizeof_DStream(const ZSTD_DStream* zds); ZSTDLIB_API size_t ZSTD_sizeof_CDict(const ZSTD_CDict* cdict); ZSTDLIB_API size_t ZSTD_sizeof_DDict(const ZSTD_DDict* ddict); #endif /* ZSTD_H_235446 */ /* ************************************************************************************** * ADVANCED AND EXPERIMENTAL FUNCTIONS **************************************************************************************** * The definitions in the following section are considered experimental. * They are provided for advanced scenarios. * They should never be used with a dynamic library, as prototypes may change in the future. * Use them only in association with static linking. * ***************************************************************************************/ #if defined(ZSTD_STATIC_LINKING_ONLY) && !defined(ZSTD_H_ZSTD_STATIC_LINKING_ONLY) #define ZSTD_H_ZSTD_STATIC_LINKING_ONLY /**************************************************************************************** * experimental API (static linking only) **************************************************************************************** * The following symbols and constants * are not planned to join "stable API" status in the near future. * They can still change in future versions. * Some of them are planned to remain in the static_only section indefinitely. * Some of them might be removed in the future (especially when redundant with existing stable functions) * ***************************************************************************************/ #define ZSTD_FRAMEHEADERSIZE_PREFIX(format) ((format) == ZSTD_f_zstd1 ? 5 : 1) /* minimum input size required to query frame header size */ #define ZSTD_FRAMEHEADERSIZE_MIN(format) ((format) == ZSTD_f_zstd1 ? 6 : 2) #define ZSTD_FRAMEHEADERSIZE_MAX 18 /* can be useful for static allocation */ #define ZSTD_SKIPPABLEHEADERSIZE 8 /* compression parameter bounds */ #define ZSTD_WINDOWLOG_MAX_32 30 #define ZSTD_WINDOWLOG_MAX_64 31 #define ZSTD_WINDOWLOG_MAX ((int)(sizeof(size_t) == 4 ? ZSTD_WINDOWLOG_MAX_32 : ZSTD_WINDOWLOG_MAX_64)) #define ZSTD_WINDOWLOG_MIN 10 #define ZSTD_HASHLOG_MAX ((ZSTD_WINDOWLOG_MAX < 30) ? ZSTD_WINDOWLOG_MAX : 30) #define ZSTD_HASHLOG_MIN 6 #define ZSTD_CHAINLOG_MAX_32 29 #define ZSTD_CHAINLOG_MAX_64 30 #define ZSTD_CHAINLOG_MAX ((int)(sizeof(size_t) == 4 ? ZSTD_CHAINLOG_MAX_32 : ZSTD_CHAINLOG_MAX_64)) #define ZSTD_CHAINLOG_MIN ZSTD_HASHLOG_MIN #define ZSTD_SEARCHLOG_MAX (ZSTD_WINDOWLOG_MAX-1) #define ZSTD_SEARCHLOG_MIN 1 #define ZSTD_MINMATCH_MAX 7 /* only for ZSTD_fast, other strategies are limited to 6 */ #define ZSTD_MINMATCH_MIN 3 /* only for ZSTD_btopt+, faster strategies are limited to 4 */ #define ZSTD_TARGETLENGTH_MAX ZSTD_BLOCKSIZE_MAX #define ZSTD_TARGETLENGTH_MIN 0 /* note : comparing this constant to an unsigned results in a tautological test */ #define ZSTD_STRATEGY_MIN ZSTD_fast #define ZSTD_STRATEGY_MAX ZSTD_btultra2 #define ZSTD_OVERLAPLOG_MIN 0 #define ZSTD_OVERLAPLOG_MAX 9 #define ZSTD_WINDOWLOG_LIMIT_DEFAULT 27 /* by default, the streaming decoder will refuse any frame * requiring larger than (1< 0: * If litLength != 0: * rep == 1 --> offset == repeat_offset_1 * rep == 2 --> offset == repeat_offset_2 * rep == 3 --> offset == repeat_offset_3 * If litLength == 0: * rep == 1 --> offset == repeat_offset_2 * rep == 2 --> offset == repeat_offset_3 * rep == 3 --> offset == repeat_offset_1 - 1 * * Note: This field is optional. ZSTD_generateSequences() will calculate the value of * 'rep', but repeat offsets do not necessarily need to be calculated from an external * sequence provider's perspective. For example, ZSTD_compressSequences() does not * use this 'rep' field at all (as of now). */ } ZSTD_Sequence; typedef struct { unsigned windowLog; /**< largest match distance : larger == more compression, more memory needed during decompression */ unsigned chainLog; /**< fully searched segment : larger == more compression, slower, more memory (useless for fast) */ unsigned hashLog; /**< dispatch table : larger == faster, more memory */ unsigned searchLog; /**< nb of searches : larger == more compression, slower */ unsigned minMatch; /**< match length searched : larger == faster decompression, sometimes less compression */ unsigned targetLength; /**< acceptable match size for optimal parser (only) : larger == more compression, slower */ ZSTD_strategy strategy; /**< see ZSTD_strategy definition above */ } ZSTD_compressionParameters; typedef struct { int contentSizeFlag; /**< 1: content size will be in frame header (when known) */ int checksumFlag; /**< 1: generate a 32-bits checksum using XXH64 algorithm at end of frame, for error detection */ int noDictIDFlag; /**< 1: no dictID will be saved into frame header (dictID is only useful for dictionary compression) */ } ZSTD_frameParameters; typedef struct { ZSTD_compressionParameters cParams; ZSTD_frameParameters fParams; } ZSTD_parameters; typedef enum { ZSTD_dct_auto = 0, /* dictionary is "full" when starting with ZSTD_MAGIC_DICTIONARY, otherwise it is "rawContent" */ ZSTD_dct_rawContent = 1, /* ensures dictionary is always loaded as rawContent, even if it starts with ZSTD_MAGIC_DICTIONARY */ ZSTD_dct_fullDict = 2 /* refuses to load a dictionary if it does not respect Zstandard's specification, starting with ZSTD_MAGIC_DICTIONARY */ } ZSTD_dictContentType_e; typedef enum { ZSTD_dlm_byCopy = 0, /**< Copy dictionary content internally */ ZSTD_dlm_byRef = 1 /**< Reference dictionary content -- the dictionary buffer must outlive its users. */ } ZSTD_dictLoadMethod_e; typedef enum { ZSTD_f_zstd1 = 0, /* zstd frame format, specified in zstd_compression_format.md (default) */ ZSTD_f_zstd1_magicless = 1 /* Variant of zstd frame format, without initial 4-bytes magic number. * Useful to save 4 bytes per generated frame. * Decoder cannot recognise automatically this format, requiring this instruction. */ } ZSTD_format_e; typedef enum { /* Note: this enum controls ZSTD_d_forceIgnoreChecksum */ ZSTD_d_validateChecksum = 0, ZSTD_d_ignoreChecksum = 1 } ZSTD_forceIgnoreChecksum_e; typedef enum { /* Note: this enum controls ZSTD_d_refMultipleDDicts */ ZSTD_rmd_refSingleDDict = 0, ZSTD_rmd_refMultipleDDicts = 1 } ZSTD_refMultipleDDicts_e; typedef enum { /* Note: this enum and the behavior it controls are effectively internal * implementation details of the compressor. They are expected to continue * to evolve and should be considered only in the context of extremely * advanced performance tuning. * * Zstd currently supports the use of a CDict in three ways: * * - The contents of the CDict can be copied into the working context. This * means that the compression can search both the dictionary and input * while operating on a single set of internal tables. This makes * the compression faster per-byte of input. However, the initial copy of * the CDict's tables incurs a fixed cost at the beginning of the * compression. For small compressions (< 8 KB), that copy can dominate * the cost of the compression. * * - The CDict's tables can be used in-place. In this model, compression is * slower per input byte, because the compressor has to search two sets of * tables. However, this model incurs no start-up cost (as long as the * working context's tables can be reused). For small inputs, this can be * faster than copying the CDict's tables. * * - The CDict's tables are not used at all, and instead we use the working * context alone to reload the dictionary and use params based on the source * size. See ZSTD_compress_insertDictionary() and ZSTD_compress_usingDict(). * This method is effective when the dictionary sizes are very small relative * to the input size, and the input size is fairly large to begin with. * * Zstd has a simple internal heuristic that selects which strategy to use * at the beginning of a compression. However, if experimentation shows that * Zstd is making poor choices, it is possible to override that choice with * this enum. */ ZSTD_dictDefaultAttach = 0, /* Use the default heuristic. */ ZSTD_dictForceAttach = 1, /* Never copy the dictionary. */ ZSTD_dictForceCopy = 2, /* Always copy the dictionary. */ ZSTD_dictForceLoad = 3 /* Always reload the dictionary */ } ZSTD_dictAttachPref_e; typedef enum { ZSTD_lcm_auto = 0, /**< Automatically determine the compression mode based on the compression level. * Negative compression levels will be uncompressed, and positive compression * levels will be compressed. */ ZSTD_lcm_huffman = 1, /**< Always attempt Huffman compression. Uncompressed literals will still be * emitted if Huffman compression is not profitable. */ ZSTD_lcm_uncompressed = 2 /**< Always emit uncompressed literals. */ } ZSTD_literalCompressionMode_e; /*************************************** * Frame size functions ***************************************/ /*! ZSTD_findDecompressedSize() : * `src` should point to the start of a series of ZSTD encoded and/or skippable frames * `srcSize` must be the _exact_ size of this series * (i.e. there should be a frame boundary at `src + srcSize`) * @return : - decompressed size of all data in all successive frames * - if the decompressed size cannot be determined: ZSTD_CONTENTSIZE_UNKNOWN * - if an error occurred: ZSTD_CONTENTSIZE_ERROR * * note 1 : decompressed size is an optional field, that may not be present, especially in streaming mode. * When `return==ZSTD_CONTENTSIZE_UNKNOWN`, data to decompress could be any size. * In which case, it's necessary to use streaming mode to decompress data. * note 2 : decompressed size is always present when compression is done with ZSTD_compress() * note 3 : decompressed size can be very large (64-bits value), * potentially larger than what local system can handle as a single memory segment. * In which case, it's necessary to use streaming mode to decompress data. * note 4 : If source is untrusted, decompressed size could be wrong or intentionally modified. * Always ensure result fits within application's authorized limits. * Each application can set its own limits. * note 5 : ZSTD_findDecompressedSize handles multiple frames, and so it must traverse the input to * read each contained frame header. This is fast as most of the data is skipped, * however it does mean that all frame data must be present and valid. */ ZSTDLIB_API unsigned long long ZSTD_findDecompressedSize(const void* src, size_t srcSize); /*! ZSTD_decompressBound() : * `src` should point to the start of a series of ZSTD encoded and/or skippable frames * `srcSize` must be the _exact_ size of this series * (i.e. there should be a frame boundary at `src + srcSize`) * @return : - upper-bound for the decompressed size of all data in all successive frames * - if an error occurred: ZSTD_CONTENTSIZE_ERROR * * note 1 : an error can occur if `src` contains an invalid or incorrectly formatted frame. * note 2 : the upper-bound is exact when the decompressed size field is available in every ZSTD encoded frame of `src`. * in this case, `ZSTD_findDecompressedSize` and `ZSTD_decompressBound` return the same value. * note 3 : when the decompressed size field isn't available, the upper-bound for that frame is calculated by: * upper-bound = # blocks * min(128 KB, Window_Size) */ ZSTDLIB_API unsigned long long ZSTD_decompressBound(const void* src, size_t srcSize); /*! ZSTD_frameHeaderSize() : * srcSize must be >= ZSTD_FRAMEHEADERSIZE_PREFIX. * @return : size of the Frame Header, * or an error code (if srcSize is too small) */ ZSTDLIB_API size_t ZSTD_frameHeaderSize(const void* src, size_t srcSize); typedef enum { ZSTD_sf_noBlockDelimiters = 0, /* Representation of ZSTD_Sequence has no block delimiters, sequences only */ ZSTD_sf_explicitBlockDelimiters = 1 /* Representation of ZSTD_Sequence contains explicit block delimiters */ } ZSTD_sequenceFormat_e; /*! ZSTD_generateSequences() : * Generate sequences using ZSTD_compress2, given a source buffer. * * Each block will end with a dummy sequence * with offset == 0, matchLength == 0, and litLength == length of last literals. * litLength may be == 0, and if so, then the sequence of (of: 0 ml: 0 ll: 0) * simply acts as a block delimiter. * * zc can be used to insert custom compression params. * This function invokes ZSTD_compress2 * * The output of this function can be fed into ZSTD_compressSequences() with CCtx * setting of ZSTD_c_blockDelimiters as ZSTD_sf_explicitBlockDelimiters * @return : number of sequences generated */ ZSTDLIB_API size_t ZSTD_generateSequences(ZSTD_CCtx* zc, ZSTD_Sequence* outSeqs, size_t outSeqsSize, const void* src, size_t srcSize); /*! ZSTD_mergeBlockDelimiters() : * Given an array of ZSTD_Sequence, remove all sequences that represent block delimiters/last literals * by merging them into into the literals of the next sequence. * * As such, the final generated result has no explicit representation of block boundaries, * and the final last literals segment is not represented in the sequences. * * The output of this function can be fed into ZSTD_compressSequences() with CCtx * setting of ZSTD_c_blockDelimiters as ZSTD_sf_noBlockDelimiters * @return : number of sequences left after merging */ ZSTDLIB_API size_t ZSTD_mergeBlockDelimiters(ZSTD_Sequence* sequences, size_t seqsSize); /*! ZSTD_compressSequences() : * Compress an array of ZSTD_Sequence, generated from the original source buffer, into dst. * If a dictionary is included, then the cctx should reference the dict. (see: ZSTD_CCtx_refCDict(), ZSTD_CCtx_loadDictionary(), etc.) * The entire source is compressed into a single frame. * * The compression behavior changes based on cctx params. In particular: * If ZSTD_c_blockDelimiters == ZSTD_sf_noBlockDelimiters, the array of ZSTD_Sequence is expected to contain * no block delimiters (defined in ZSTD_Sequence). Block boundaries are roughly determined based on * the block size derived from the cctx, and sequences may be split. This is the default setting. * * If ZSTD_c_blockDelimiters == ZSTD_sf_explicitBlockDelimiters, the array of ZSTD_Sequence is expected to contain * block delimiters (defined in ZSTD_Sequence). Behavior is undefined if no block delimiters are provided. * * If ZSTD_c_validateSequences == 0, this function will blindly accept the sequences provided. Invalid sequences cause undefined * behavior. If ZSTD_c_validateSequences == 1, then if sequence is invalid (see doc/zstd_compression_format.md for * specifics regarding offset/matchlength requirements) then the function will bail out and return an error. * * In addition to the two adjustable experimental params, there are other important cctx params. * - ZSTD_c_minMatch MUST be set as less than or equal to the smallest match generated by the match finder. It has a minimum value of ZSTD_MINMATCH_MIN. * - ZSTD_c_compressionLevel accordingly adjusts the strength of the entropy coder, as it would in typical compression. * - ZSTD_c_windowLog affects offset validation: this function will return an error at higher debug levels if a provided offset * is larger than what the spec allows for a given window log and dictionary (if present). See: doc/zstd_compression_format.md * * Note: Repcodes are, as of now, always re-calculated within this function, so ZSTD_Sequence::rep is unused. * Note 2: Once we integrate ability to ingest repcodes, the explicit block delims mode must respect those repcodes exactly, * and cannot emit an RLE block that disagrees with the repcode history * @return : final compressed size or a ZSTD error. */ ZSTDLIB_API size_t ZSTD_compressSequences(ZSTD_CCtx* const cctx, void* dst, size_t dstSize, const ZSTD_Sequence* inSeqs, size_t inSeqsSize, const void* src, size_t srcSize); /*! ZSTD_writeSkippableFrame() : * Generates a zstd skippable frame containing data given by src, and writes it to dst buffer. * * Skippable frames begin with a a 4-byte magic number. There are 16 possible choices of magic number, * ranging from ZSTD_MAGIC_SKIPPABLE_START to ZSTD_MAGIC_SKIPPABLE_START+15. * As such, the parameter magicVariant controls the exact skippable frame magic number variant used, so * the magic number used will be ZSTD_MAGIC_SKIPPABLE_START + magicVariant. * * Returns an error if destination buffer is not large enough, if the source size is not representable * with a 4-byte unsigned int, or if the parameter magicVariant is greater than 15 (and therefore invalid). * * @return : number of bytes written or a ZSTD error. */ ZSTDLIB_API size_t ZSTD_writeSkippableFrame(void* dst, size_t dstCapacity, const void* src, size_t srcSize, unsigned magicVariant); /*************************************** * Memory management ***************************************/ /*! ZSTD_estimate*() : * These functions make it possible to estimate memory usage * of a future {D,C}Ctx, before its creation. * * ZSTD_estimateCCtxSize() will provide a memory budget large enough * for any compression level up to selected one. * Note : Unlike ZSTD_estimateCStreamSize*(), this estimate * does not include space for a window buffer. * Therefore, the estimation is only guaranteed for single-shot compressions, not streaming. * The estimate will assume the input may be arbitrarily large, * which is the worst case. * * When srcSize can be bound by a known and rather "small" value, * this fact can be used to provide a tighter estimation * because the CCtx compression context will need less memory. * This tighter estimation can be provided by more advanced functions * ZSTD_estimateCCtxSize_usingCParams(), which can be used in tandem with ZSTD_getCParams(), * and ZSTD_estimateCCtxSize_usingCCtxParams(), which can be used in tandem with ZSTD_CCtxParams_setParameter(). * Both can be used to estimate memory using custom compression parameters and arbitrary srcSize limits. * * Note 2 : only single-threaded compression is supported. * ZSTD_estimateCCtxSize_usingCCtxParams() will return an error code if ZSTD_c_nbWorkers is >= 1. */ ZSTDLIB_API size_t ZSTD_estimateCCtxSize(int compressionLevel); ZSTDLIB_API size_t ZSTD_estimateCCtxSize_usingCParams(ZSTD_compressionParameters cParams); ZSTDLIB_API size_t ZSTD_estimateCCtxSize_usingCCtxParams(const ZSTD_CCtx_params* params); ZSTDLIB_API size_t ZSTD_estimateDCtxSize(void); /*! ZSTD_estimateCStreamSize() : * ZSTD_estimateCStreamSize() will provide a budget large enough for any compression level up to selected one. * It will also consider src size to be arbitrarily "large", which is worst case. * If srcSize is known to always be small, ZSTD_estimateCStreamSize_usingCParams() can provide a tighter estimation. * ZSTD_estimateCStreamSize_usingCParams() can be used in tandem with ZSTD_getCParams() to create cParams from compressionLevel. * ZSTD_estimateCStreamSize_usingCCtxParams() can be used in tandem with ZSTD_CCtxParams_setParameter(). Only single-threaded compression is supported. This function will return an error code if ZSTD_c_nbWorkers is >= 1. * Note : CStream size estimation is only correct for single-threaded compression. * ZSTD_DStream memory budget depends on window Size. * This information can be passed manually, using ZSTD_estimateDStreamSize, * or deducted from a valid frame Header, using ZSTD_estimateDStreamSize_fromFrame(); * Note : if streaming is init with function ZSTD_init?Stream_usingDict(), * an internal ?Dict will be created, which additional size is not estimated here. * In this case, get total size by adding ZSTD_estimate?DictSize */ ZSTDLIB_API size_t ZSTD_estimateCStreamSize(int compressionLevel); ZSTDLIB_API size_t ZSTD_estimateCStreamSize_usingCParams(ZSTD_compressionParameters cParams); ZSTDLIB_API size_t ZSTD_estimateCStreamSize_usingCCtxParams(const ZSTD_CCtx_params* params); ZSTDLIB_API size_t ZSTD_estimateDStreamSize(size_t windowSize); ZSTDLIB_API size_t ZSTD_estimateDStreamSize_fromFrame(const void* src, size_t srcSize); /*! ZSTD_estimate?DictSize() : * ZSTD_estimateCDictSize() will bet that src size is relatively "small", and content is copied, like ZSTD_createCDict(). * ZSTD_estimateCDictSize_advanced() makes it possible to control compression parameters precisely, like ZSTD_createCDict_advanced(). * Note : dictionaries created by reference (`ZSTD_dlm_byRef`) are logically smaller. */ ZSTDLIB_API size_t ZSTD_estimateCDictSize(size_t dictSize, int compressionLevel); ZSTDLIB_API size_t ZSTD_estimateCDictSize_advanced(size_t dictSize, ZSTD_compressionParameters cParams, ZSTD_dictLoadMethod_e dictLoadMethod); ZSTDLIB_API size_t ZSTD_estimateDDictSize(size_t dictSize, ZSTD_dictLoadMethod_e dictLoadMethod); /*! ZSTD_initStatic*() : * Initialize an object using a pre-allocated fixed-size buffer. * workspace: The memory area to emplace the object into. * Provided pointer *must be 8-bytes aligned*. * Buffer must outlive object. * workspaceSize: Use ZSTD_estimate*Size() to determine * how large workspace must be to support target scenario. * @return : pointer to object (same address as workspace, just different type), * or NULL if error (size too small, incorrect alignment, etc.) * Note : zstd will never resize nor malloc() when using a static buffer. * If the object requires more memory than available, * zstd will just error out (typically ZSTD_error_memory_allocation). * Note 2 : there is no corresponding "free" function. * Since workspace is allocated externally, it must be freed externally too. * Note 3 : cParams : use ZSTD_getCParams() to convert a compression level * into its associated cParams. * Limitation 1 : currently not compatible with internal dictionary creation, triggered by * ZSTD_CCtx_loadDictionary(), ZSTD_initCStream_usingDict() or ZSTD_initDStream_usingDict(). * Limitation 2 : static cctx currently not compatible with multi-threading. * Limitation 3 : static dctx is incompatible with legacy support. */ ZSTDLIB_API ZSTD_CCtx* ZSTD_initStaticCCtx(void* workspace, size_t workspaceSize); ZSTDLIB_API ZSTD_CStream* ZSTD_initStaticCStream(void* workspace, size_t workspaceSize); /**< same as ZSTD_initStaticCCtx() */ ZSTDLIB_API ZSTD_DCtx* ZSTD_initStaticDCtx(void* workspace, size_t workspaceSize); ZSTDLIB_API ZSTD_DStream* ZSTD_initStaticDStream(void* workspace, size_t workspaceSize); /**< same as ZSTD_initStaticDCtx() */ ZSTDLIB_API const ZSTD_CDict* ZSTD_initStaticCDict( void* workspace, size_t workspaceSize, const void* dict, size_t dictSize, ZSTD_dictLoadMethod_e dictLoadMethod, ZSTD_dictContentType_e dictContentType, ZSTD_compressionParameters cParams); ZSTDLIB_API const ZSTD_DDict* ZSTD_initStaticDDict( void* workspace, size_t workspaceSize, const void* dict, size_t dictSize, ZSTD_dictLoadMethod_e dictLoadMethod, ZSTD_dictContentType_e dictContentType); /*! Custom memory allocation : * These prototypes make it possible to pass your own allocation/free functions. * ZSTD_customMem is provided at creation time, using ZSTD_create*_advanced() variants listed below. * All allocation/free operations will be completed using these custom variants instead of regular ones. */ typedef void* (*ZSTD_allocFunction) (void* opaque, size_t size); typedef void (*ZSTD_freeFunction) (void* opaque, void* address); typedef struct { ZSTD_allocFunction customAlloc; ZSTD_freeFunction customFree; void* opaque; } ZSTD_customMem; static #ifdef __GNUC__ __attribute__((__unused__)) #endif ZSTD_customMem const ZSTD_defaultCMem = { NULL, NULL, NULL }; /**< this constant defers to stdlib's functions */ ZSTDLIB_API ZSTD_CCtx* ZSTD_createCCtx_advanced(ZSTD_customMem customMem); ZSTDLIB_API ZSTD_CStream* ZSTD_createCStream_advanced(ZSTD_customMem customMem); ZSTDLIB_API ZSTD_DCtx* ZSTD_createDCtx_advanced(ZSTD_customMem customMem); ZSTDLIB_API ZSTD_DStream* ZSTD_createDStream_advanced(ZSTD_customMem customMem); ZSTDLIB_API ZSTD_CDict* ZSTD_createCDict_advanced(const void* dict, size_t dictSize, ZSTD_dictLoadMethod_e dictLoadMethod, ZSTD_dictContentType_e dictContentType, ZSTD_compressionParameters cParams, ZSTD_customMem customMem); /* ! Thread pool : * These prototypes make it possible to share a thread pool among multiple compression contexts. * This can limit resources for applications with multiple threads where each one uses * a threaded compression mode (via ZSTD_c_nbWorkers parameter). * ZSTD_createThreadPool creates a new thread pool with a given number of threads. * Note that the lifetime of such pool must exist while being used. * ZSTD_CCtx_refThreadPool assigns a thread pool to a context (use NULL argument value * to use an internal thread pool). * ZSTD_freeThreadPool frees a thread pool. */ typedef struct POOL_ctx_s ZSTD_threadPool; ZSTDLIB_API ZSTD_threadPool* ZSTD_createThreadPool(size_t numThreads); ZSTDLIB_API void ZSTD_freeThreadPool (ZSTD_threadPool* pool); ZSTDLIB_API size_t ZSTD_CCtx_refThreadPool(ZSTD_CCtx* cctx, ZSTD_threadPool* pool); /* * This API is temporary and is expected to change or disappear in the future! */ ZSTDLIB_API ZSTD_CDict* ZSTD_createCDict_advanced2( const void* dict, size_t dictSize, ZSTD_dictLoadMethod_e dictLoadMethod, ZSTD_dictContentType_e dictContentType, const ZSTD_CCtx_params* cctxParams, ZSTD_customMem customMem); ZSTDLIB_API ZSTD_DDict* ZSTD_createDDict_advanced( const void* dict, size_t dictSize, ZSTD_dictLoadMethod_e dictLoadMethod, ZSTD_dictContentType_e dictContentType, ZSTD_customMem customMem); /*************************************** * Advanced compression functions ***************************************/ /*! ZSTD_createCDict_byReference() : * Create a digested dictionary for compression * Dictionary content is just referenced, not duplicated. * As a consequence, `dictBuffer` **must** outlive CDict, * and its content must remain unmodified throughout the lifetime of CDict. * note: equivalent to ZSTD_createCDict_advanced(), with dictLoadMethod==ZSTD_dlm_byRef */ ZSTDLIB_API ZSTD_CDict* ZSTD_createCDict_byReference(const void* dictBuffer, size_t dictSize, int compressionLevel); /*! ZSTD_getDictID_fromCDict() : * Provides the dictID of the dictionary loaded into `cdict`. * If @return == 0, the dictionary is not conformant to Zstandard specification, or empty. * Non-conformant dictionaries can still be loaded, but as content-only dictionaries. */ ZSTDLIB_API unsigned ZSTD_getDictID_fromCDict(const ZSTD_CDict* cdict); /*! ZSTD_getCParams() : * @return ZSTD_compressionParameters structure for a selected compression level and estimated srcSize. * `estimatedSrcSize` value is optional, select 0 if not known */ ZSTDLIB_API ZSTD_compressionParameters ZSTD_getCParams(int compressionLevel, unsigned long long estimatedSrcSize, size_t dictSize); /*! ZSTD_getParams() : * same as ZSTD_getCParams(), but @return a full `ZSTD_parameters` object instead of sub-component `ZSTD_compressionParameters`. * All fields of `ZSTD_frameParameters` are set to default : contentSize=1, checksum=0, noDictID=0 */ ZSTDLIB_API ZSTD_parameters ZSTD_getParams(int compressionLevel, unsigned long long estimatedSrcSize, size_t dictSize); /*! ZSTD_checkCParams() : * Ensure param values remain within authorized range. * @return 0 on success, or an error code (can be checked with ZSTD_isError()) */ ZSTDLIB_API size_t ZSTD_checkCParams(ZSTD_compressionParameters params); /*! ZSTD_adjustCParams() : * optimize params for a given `srcSize` and `dictSize`. * `srcSize` can be unknown, in which case use ZSTD_CONTENTSIZE_UNKNOWN. * `dictSize` must be `0` when there is no dictionary. * cPar can be invalid : all parameters will be clamped within valid range in the @return struct. * This function never fails (wide contract) */ ZSTDLIB_API ZSTD_compressionParameters ZSTD_adjustCParams(ZSTD_compressionParameters cPar, unsigned long long srcSize, size_t dictSize); /*! ZSTD_compress_advanced() : * Note : this function is now DEPRECATED. * It can be replaced by ZSTD_compress2(), in combination with ZSTD_CCtx_setParameter() and other parameter setters. * This prototype will be marked as deprecated and generate compilation warning on reaching v1.5.x */ ZSTDLIB_API size_t ZSTD_compress_advanced(ZSTD_CCtx* cctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize, const void* dict,size_t dictSize, ZSTD_parameters params); /*! ZSTD_compress_usingCDict_advanced() : * Note : this function is now REDUNDANT. * It can be replaced by ZSTD_compress2(), in combination with ZSTD_CCtx_loadDictionary() and other parameter setters. * This prototype will be marked as deprecated and generate compilation warning in some future version */ ZSTDLIB_API size_t ZSTD_compress_usingCDict_advanced(ZSTD_CCtx* cctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize, const ZSTD_CDict* cdict, ZSTD_frameParameters fParams); /*! ZSTD_CCtx_loadDictionary_byReference() : * Same as ZSTD_CCtx_loadDictionary(), but dictionary content is referenced, instead of being copied into CCtx. * It saves some memory, but also requires that `dict` outlives its usage within `cctx` */ ZSTDLIB_API size_t ZSTD_CCtx_loadDictionary_byReference(ZSTD_CCtx* cctx, const void* dict, size_t dictSize); /*! ZSTD_CCtx_loadDictionary_advanced() : * Same as ZSTD_CCtx_loadDictionary(), but gives finer control over * how to load the dictionary (by copy ? by reference ?) * and how to interpret it (automatic ? force raw mode ? full mode only ?) */ ZSTDLIB_API size_t ZSTD_CCtx_loadDictionary_advanced(ZSTD_CCtx* cctx, const void* dict, size_t dictSize, ZSTD_dictLoadMethod_e dictLoadMethod, ZSTD_dictContentType_e dictContentType); /*! ZSTD_CCtx_refPrefix_advanced() : * Same as ZSTD_CCtx_refPrefix(), but gives finer control over * how to interpret prefix content (automatic ? force raw mode (default) ? full mode only ?) */ ZSTDLIB_API size_t ZSTD_CCtx_refPrefix_advanced(ZSTD_CCtx* cctx, const void* prefix, size_t prefixSize, ZSTD_dictContentType_e dictContentType); /* === experimental parameters === */ /* these parameters can be used with ZSTD_setParameter() * they are not guaranteed to remain supported in the future */ /* Enables rsyncable mode, * which makes compressed files more rsync friendly * by adding periodic synchronization points to the compressed data. * The target average block size is ZSTD_c_jobSize / 2. * It's possible to modify the job size to increase or decrease * the granularity of the synchronization point. * Once the jobSize is smaller than the window size, * it will result in compression ratio degradation. * NOTE 1: rsyncable mode only works when multithreading is enabled. * NOTE 2: rsyncable performs poorly in combination with long range mode, * since it will decrease the effectiveness of synchronization points, * though mileage may vary. * NOTE 3: Rsyncable mode limits maximum compression speed to ~400 MB/s. * If the selected compression level is already running significantly slower, * the overall speed won't be significantly impacted. */ #define ZSTD_c_rsyncable ZSTD_c_experimentalParam1 /* Select a compression format. * The value must be of type ZSTD_format_e. * See ZSTD_format_e enum definition for details */ #define ZSTD_c_format ZSTD_c_experimentalParam2 /* Force back-reference distances to remain < windowSize, * even when referencing into Dictionary content (default:0) */ #define ZSTD_c_forceMaxWindow ZSTD_c_experimentalParam3 /* Controls whether the contents of a CDict * are used in place, or copied into the working context. * Accepts values from the ZSTD_dictAttachPref_e enum. * See the comments on that enum for an explanation of the feature. */ #define ZSTD_c_forceAttachDict ZSTD_c_experimentalParam4 /* Controls how the literals are compressed (default is auto). * The value must be of type ZSTD_literalCompressionMode_e. * See ZSTD_literalCompressionMode_t enum definition for details. */ #define ZSTD_c_literalCompressionMode ZSTD_c_experimentalParam5 /* Tries to fit compressed block size to be around targetCBlockSize. * No target when targetCBlockSize == 0. * There is no guarantee on compressed block size (default:0) */ #define ZSTD_c_targetCBlockSize ZSTD_c_experimentalParam6 /* User's best guess of source size. * Hint is not valid when srcSizeHint == 0. * There is no guarantee that hint is close to actual source size, * but compression ratio may regress significantly if guess considerably underestimates */ #define ZSTD_c_srcSizeHint ZSTD_c_experimentalParam7 /* Controls whether the new and experimental "dedicated dictionary search * structure" can be used. This feature is still rough around the edges, be * prepared for surprising behavior! * * How to use it: * * When using a CDict, whether to use this feature or not is controlled at * CDict creation, and it must be set in a CCtxParams set passed into that * construction (via ZSTD_createCDict_advanced2()). A compression will then * use the feature or not based on how the CDict was constructed; the value of * this param, set in the CCtx, will have no effect. * * However, when a dictionary buffer is passed into a CCtx, such as via * ZSTD_CCtx_loadDictionary(), this param can be set on the CCtx to control * whether the CDict that is created internally can use the feature or not. * * What it does: * * Normally, the internal data structures of the CDict are analogous to what * would be stored in a CCtx after compressing the contents of a dictionary. * To an approximation, a compression using a dictionary can then use those * data structures to simply continue what is effectively a streaming * compression where the simulated compression of the dictionary left off. * Which is to say, the search structures in the CDict are normally the same * format as in the CCtx. * * It is possible to do better, since the CDict is not like a CCtx: the search * structures are written once during CDict creation, and then are only read * after that, while the search structures in the CCtx are both read and * written as the compression goes along. This means we can choose a search * structure for the dictionary that is read-optimized. * * This feature enables the use of that different structure. * * Note that some of the members of the ZSTD_compressionParameters struct have * different semantics and constraints in the dedicated search structure. It is * highly recommended that you simply set a compression level in the CCtxParams * you pass into the CDict creation call, and avoid messing with the cParams * directly. * * Effects: * * This will only have any effect when the selected ZSTD_strategy * implementation supports this feature. Currently, that's limited to * ZSTD_greedy, ZSTD_lazy, and ZSTD_lazy2. * * Note that this means that the CDict tables can no longer be copied into the * CCtx, so the dict attachment mode ZSTD_dictForceCopy will no longer be * useable. The dictionary can only be attached or reloaded. * * In general, you should expect compression to be faster--sometimes very much * so--and CDict creation to be slightly slower. Eventually, we will probably * make this mode the default. */ #define ZSTD_c_enableDedicatedDictSearch ZSTD_c_experimentalParam8 /* ZSTD_c_stableInBuffer * Experimental parameter. * Default is 0 == disabled. Set to 1 to enable. * * Tells the compressor that the ZSTD_inBuffer will ALWAYS be the same * between calls, except for the modifications that zstd makes to pos (the * caller must not modify pos). This is checked by the compressor, and * compression will fail if it ever changes. This means the only flush * mode that makes sense is ZSTD_e_end, so zstd will error if ZSTD_e_end * is not used. The data in the ZSTD_inBuffer in the range [src, src + pos) * MUST not be modified during compression or you will get data corruption. * * When this flag is enabled zstd won't allocate an input window buffer, * because the user guarantees it can reference the ZSTD_inBuffer until * the frame is complete. But, it will still allocate an output buffer * large enough to fit a block (see ZSTD_c_stableOutBuffer). This will also * avoid the memcpy() from the input buffer to the input window buffer. * * NOTE: ZSTD_compressStream2() will error if ZSTD_e_end is not used. * That means this flag cannot be used with ZSTD_compressStream(). * * NOTE: So long as the ZSTD_inBuffer always points to valid memory, using * this flag is ALWAYS memory safe, and will never access out-of-bounds * memory. However, compression WILL fail if you violate the preconditions. * * WARNING: The data in the ZSTD_inBuffer in the range [dst, dst + pos) MUST * not be modified during compression or you will get data corruption. This * is because zstd needs to reference data in the ZSTD_inBuffer to find * matches. Normally zstd maintains its own window buffer for this purpose, * but passing this flag tells zstd to use the user provided buffer. */ #define ZSTD_c_stableInBuffer ZSTD_c_experimentalParam9 /* ZSTD_c_stableOutBuffer * Experimental parameter. * Default is 0 == disabled. Set to 1 to enable. * * Tells he compressor that the ZSTD_outBuffer will not be resized between * calls. Specifically: (out.size - out.pos) will never grow. This gives the * compressor the freedom to say: If the compressed data doesn't fit in the * output buffer then return ZSTD_error_dstSizeTooSmall. This allows us to * always decompress directly into the output buffer, instead of decompressing * into an internal buffer and copying to the output buffer. * * When this flag is enabled zstd won't allocate an output buffer, because * it can write directly to the ZSTD_outBuffer. It will still allocate the * input window buffer (see ZSTD_c_stableInBuffer). * * Zstd will check that (out.size - out.pos) never grows and return an error * if it does. While not strictly necessary, this should prevent surprises. */ #define ZSTD_c_stableOutBuffer ZSTD_c_experimentalParam10 /* ZSTD_c_blockDelimiters * Default is 0 == ZSTD_sf_noBlockDelimiters. * * For use with sequence compression API: ZSTD_compressSequences(). * * Designates whether or not the given array of ZSTD_Sequence contains block delimiters * and last literals, which are defined as sequences with offset == 0 and matchLength == 0. * See the definition of ZSTD_Sequence for more specifics. */ #define ZSTD_c_blockDelimiters ZSTD_c_experimentalParam11 /* ZSTD_c_validateSequences * Default is 0 == disabled. Set to 1 to enable sequence validation. * * For use with sequence compression API: ZSTD_compressSequences(). * Designates whether or not we validate sequences provided to ZSTD_compressSequences() * during function execution. * * Without validation, providing a sequence that does not conform to the zstd spec will cause * undefined behavior, and may produce a corrupted block. * * With validation enabled, a if sequence is invalid (see doc/zstd_compression_format.md for * specifics regarding offset/matchlength requirements) then the function will bail out and * return an error. * */ #define ZSTD_c_validateSequences ZSTD_c_experimentalParam12 /*! ZSTD_CCtx_getParameter() : * Get the requested compression parameter value, selected by enum ZSTD_cParameter, * and store it into int* value. * @return : 0, or an error code (which can be tested with ZSTD_isError()). */ ZSTDLIB_API size_t ZSTD_CCtx_getParameter(const ZSTD_CCtx* cctx, ZSTD_cParameter param, int* value); /*! ZSTD_CCtx_params : * Quick howto : * - ZSTD_createCCtxParams() : Create a ZSTD_CCtx_params structure * - ZSTD_CCtxParams_setParameter() : Push parameters one by one into * an existing ZSTD_CCtx_params structure. * This is similar to * ZSTD_CCtx_setParameter(). * - ZSTD_CCtx_setParametersUsingCCtxParams() : Apply parameters to * an existing CCtx. * These parameters will be applied to * all subsequent frames. * - ZSTD_compressStream2() : Do compression using the CCtx. * - ZSTD_freeCCtxParams() : Free the memory. * * This can be used with ZSTD_estimateCCtxSize_advanced_usingCCtxParams() * for static allocation of CCtx for single-threaded compression. */ ZSTDLIB_API ZSTD_CCtx_params* ZSTD_createCCtxParams(void); ZSTDLIB_API size_t ZSTD_freeCCtxParams(ZSTD_CCtx_params* params); /*! ZSTD_CCtxParams_reset() : * Reset params to default values. */ ZSTDLIB_API size_t ZSTD_CCtxParams_reset(ZSTD_CCtx_params* params); /*! ZSTD_CCtxParams_init() : * Initializes the compression parameters of cctxParams according to * compression level. All other parameters are reset to their default values. */ ZSTDLIB_API size_t ZSTD_CCtxParams_init(ZSTD_CCtx_params* cctxParams, int compressionLevel); /*! ZSTD_CCtxParams_init_advanced() : * Initializes the compression and frame parameters of cctxParams according to * params. All other parameters are reset to their default values. */ ZSTDLIB_API size_t ZSTD_CCtxParams_init_advanced(ZSTD_CCtx_params* cctxParams, ZSTD_parameters params); /*! ZSTD_CCtxParams_setParameter() : * Similar to ZSTD_CCtx_setParameter. * Set one compression parameter, selected by enum ZSTD_cParameter. * Parameters must be applied to a ZSTD_CCtx using * ZSTD_CCtx_setParametersUsingCCtxParams(). * @result : a code representing success or failure (which can be tested with * ZSTD_isError()). */ ZSTDLIB_API size_t ZSTD_CCtxParams_setParameter(ZSTD_CCtx_params* params, ZSTD_cParameter param, int value); /*! ZSTD_CCtxParams_getParameter() : * Similar to ZSTD_CCtx_getParameter. * Get the requested value of one compression parameter, selected by enum ZSTD_cParameter. * @result : 0, or an error code (which can be tested with ZSTD_isError()). */ ZSTDLIB_API size_t ZSTD_CCtxParams_getParameter(const ZSTD_CCtx_params* params, ZSTD_cParameter param, int* value); /*! ZSTD_CCtx_setParametersUsingCCtxParams() : * Apply a set of ZSTD_CCtx_params to the compression context. * This can be done even after compression is started, * if nbWorkers==0, this will have no impact until a new compression is started. * if nbWorkers>=1, new parameters will be picked up at next job, * with a few restrictions (windowLog, pledgedSrcSize, nbWorkers, jobSize, and overlapLog are not updated). */ ZSTDLIB_API size_t ZSTD_CCtx_setParametersUsingCCtxParams( ZSTD_CCtx* cctx, const ZSTD_CCtx_params* params); /*! ZSTD_compressStream2_simpleArgs() : * Same as ZSTD_compressStream2(), * but using only integral types as arguments. * This variant might be helpful for binders from dynamic languages * which have troubles handling structures containing memory pointers. */ ZSTDLIB_API size_t ZSTD_compressStream2_simpleArgs ( ZSTD_CCtx* cctx, void* dst, size_t dstCapacity, size_t* dstPos, const void* src, size_t srcSize, size_t* srcPos, ZSTD_EndDirective endOp); /*************************************** * Advanced decompression functions ***************************************/ /*! ZSTD_isFrame() : * Tells if the content of `buffer` starts with a valid Frame Identifier. * Note : Frame Identifier is 4 bytes. If `size < 4`, @return will always be 0. * Note 2 : Legacy Frame Identifiers are considered valid only if Legacy Support is enabled. * Note 3 : Skippable Frame Identifiers are considered valid. */ ZSTDLIB_API unsigned ZSTD_isFrame(const void* buffer, size_t size); /*! ZSTD_createDDict_byReference() : * Create a digested dictionary, ready to start decompression operation without startup delay. * Dictionary content is referenced, and therefore stays in dictBuffer. * It is important that dictBuffer outlives DDict, * it must remain read accessible throughout the lifetime of DDict */ ZSTDLIB_API ZSTD_DDict* ZSTD_createDDict_byReference(const void* dictBuffer, size_t dictSize); /*! ZSTD_DCtx_loadDictionary_byReference() : * Same as ZSTD_DCtx_loadDictionary(), * but references `dict` content instead of copying it into `dctx`. * This saves memory if `dict` remains around., * However, it's imperative that `dict` remains accessible (and unmodified) while being used, so it must outlive decompression. */ ZSTDLIB_API size_t ZSTD_DCtx_loadDictionary_byReference(ZSTD_DCtx* dctx, const void* dict, size_t dictSize); /*! ZSTD_DCtx_loadDictionary_advanced() : * Same as ZSTD_DCtx_loadDictionary(), * but gives direct control over * how to load the dictionary (by copy ? by reference ?) * and how to interpret it (automatic ? force raw mode ? full mode only ?). */ ZSTDLIB_API size_t ZSTD_DCtx_loadDictionary_advanced(ZSTD_DCtx* dctx, const void* dict, size_t dictSize, ZSTD_dictLoadMethod_e dictLoadMethod, ZSTD_dictContentType_e dictContentType); /*! ZSTD_DCtx_refPrefix_advanced() : * Same as ZSTD_DCtx_refPrefix(), but gives finer control over * how to interpret prefix content (automatic ? force raw mode (default) ? full mode only ?) */ ZSTDLIB_API size_t ZSTD_DCtx_refPrefix_advanced(ZSTD_DCtx* dctx, const void* prefix, size_t prefixSize, ZSTD_dictContentType_e dictContentType); /*! ZSTD_DCtx_setMaxWindowSize() : * Refuses allocating internal buffers for frames requiring a window size larger than provided limit. * This protects a decoder context from reserving too much memory for itself (potential attack scenario). * This parameter is only useful in streaming mode, since no internal buffer is allocated in single-pass mode. * By default, a decompression context accepts all window sizes <= (1 << ZSTD_WINDOWLOG_LIMIT_DEFAULT) * @return : 0, or an error code (which can be tested using ZSTD_isError()). */ ZSTDLIB_API size_t ZSTD_DCtx_setMaxWindowSize(ZSTD_DCtx* dctx, size_t maxWindowSize); /*! ZSTD_DCtx_getParameter() : * Get the requested decompression parameter value, selected by enum ZSTD_dParameter, * and store it into int* value. * @return : 0, or an error code (which can be tested with ZSTD_isError()). */ ZSTDLIB_API size_t ZSTD_DCtx_getParameter(ZSTD_DCtx* dctx, ZSTD_dParameter param, int* value); /* ZSTD_d_format * experimental parameter, * allowing selection between ZSTD_format_e input compression formats */ #define ZSTD_d_format ZSTD_d_experimentalParam1 /* ZSTD_d_stableOutBuffer * Experimental parameter. * Default is 0 == disabled. Set to 1 to enable. * * Tells the decompressor that the ZSTD_outBuffer will ALWAYS be the same * between calls, except for the modifications that zstd makes to pos (the * caller must not modify pos). This is checked by the decompressor, and * decompression will fail if it ever changes. Therefore the ZSTD_outBuffer * MUST be large enough to fit the entire decompressed frame. This will be * checked when the frame content size is known. The data in the ZSTD_outBuffer * in the range [dst, dst + pos) MUST not be modified during decompression * or you will get data corruption. * * When this flags is enabled zstd won't allocate an output buffer, because * it can write directly to the ZSTD_outBuffer, but it will still allocate * an input buffer large enough to fit any compressed block. This will also * avoid the memcpy() from the internal output buffer to the ZSTD_outBuffer. * If you need to avoid the input buffer allocation use the buffer-less * streaming API. * * NOTE: So long as the ZSTD_outBuffer always points to valid memory, using * this flag is ALWAYS memory safe, and will never access out-of-bounds * memory. However, decompression WILL fail if you violate the preconditions. * * WARNING: The data in the ZSTD_outBuffer in the range [dst, dst + pos) MUST * not be modified during decompression or you will get data corruption. This * is because zstd needs to reference data in the ZSTD_outBuffer to regenerate * matches. Normally zstd maintains its own buffer for this purpose, but passing * this flag tells zstd to use the user provided buffer. */ #define ZSTD_d_stableOutBuffer ZSTD_d_experimentalParam2 /* ZSTD_d_forceIgnoreChecksum * Experimental parameter. * Default is 0 == disabled. Set to 1 to enable * * Tells the decompressor to skip checksum validation during decompression, regardless * of whether checksumming was specified during compression. This offers some * slight performance benefits, and may be useful for debugging. * Param has values of type ZSTD_forceIgnoreChecksum_e */ #define ZSTD_d_forceIgnoreChecksum ZSTD_d_experimentalParam3 /* ZSTD_d_refMultipleDDicts * Experimental parameter. * Default is 0 == disabled. Set to 1 to enable * * If enabled and dctx is allocated on the heap, then additional memory will be allocated * to store references to multiple ZSTD_DDict. That is, multiple calls of ZSTD_refDDict() * using a given ZSTD_DCtx, rather than overwriting the previous DDict reference, will instead * store all references. At decompression time, the appropriate dictID is selected * from the set of DDicts based on the dictID in the frame. * * Usage is simply calling ZSTD_refDDict() on multiple dict buffers. * * Param has values of byte ZSTD_refMultipleDDicts_e * * WARNING: Enabling this parameter and calling ZSTD_DCtx_refDDict(), will trigger memory * allocation for the hash table. ZSTD_freeDCtx() also frees this memory. * Memory is allocated as per ZSTD_DCtx::customMem. * * Although this function allocates memory for the table, the user is still responsible for * memory management of the underlying ZSTD_DDict* themselves. */ #define ZSTD_d_refMultipleDDicts ZSTD_d_experimentalParam4 /*! ZSTD_DCtx_setFormat() : * Instruct the decoder context about what kind of data to decode next. * This instruction is mandatory to decode data without a fully-formed header, * such ZSTD_f_zstd1_magicless for example. * @return : 0, or an error code (which can be tested using ZSTD_isError()). */ ZSTDLIB_API size_t ZSTD_DCtx_setFormat(ZSTD_DCtx* dctx, ZSTD_format_e format); /*! ZSTD_decompressStream_simpleArgs() : * Same as ZSTD_decompressStream(), * but using only integral types as arguments. * This can be helpful for binders from dynamic languages * which have troubles handling structures containing memory pointers. */ ZSTDLIB_API size_t ZSTD_decompressStream_simpleArgs ( ZSTD_DCtx* dctx, void* dst, size_t dstCapacity, size_t* dstPos, const void* src, size_t srcSize, size_t* srcPos); /******************************************************************** * Advanced streaming functions * Warning : most of these functions are now redundant with the Advanced API. * Once Advanced API reaches "stable" status, * redundant functions will be deprecated, and then at some point removed. ********************************************************************/ /*===== Advanced Streaming compression functions =====*/ /*! ZSTD_initCStream_srcSize() : * This function is deprecated, and equivalent to: * ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only); * ZSTD_CCtx_refCDict(zcs, NULL); // clear the dictionary (if any) * ZSTD_CCtx_setParameter(zcs, ZSTD_c_compressionLevel, compressionLevel); * ZSTD_CCtx_setPledgedSrcSize(zcs, pledgedSrcSize); * * pledgedSrcSize must be correct. If it is not known at init time, use * ZSTD_CONTENTSIZE_UNKNOWN. Note that, for compatibility with older programs, * "0" also disables frame content size field. It may be enabled in the future. * Note : this prototype will be marked as deprecated and generate compilation warnings on reaching v1.5.x */ ZSTDLIB_API size_t ZSTD_initCStream_srcSize(ZSTD_CStream* zcs, int compressionLevel, unsigned long long pledgedSrcSize); /*! ZSTD_initCStream_usingDict() : * This function is deprecated, and is equivalent to: * ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only); * ZSTD_CCtx_setParameter(zcs, ZSTD_c_compressionLevel, compressionLevel); * ZSTD_CCtx_loadDictionary(zcs, dict, dictSize); * * Creates of an internal CDict (incompatible with static CCtx), except if * dict == NULL or dictSize < 8, in which case no dict is used. * Note: dict is loaded with ZSTD_dct_auto (treated as a full zstd dictionary if * it begins with ZSTD_MAGIC_DICTIONARY, else as raw content) and ZSTD_dlm_byCopy. * Note : this prototype will be marked as deprecated and generate compilation warnings on reaching v1.5.x */ ZSTDLIB_API size_t ZSTD_initCStream_usingDict(ZSTD_CStream* zcs, const void* dict, size_t dictSize, int compressionLevel); /*! ZSTD_initCStream_advanced() : * This function is deprecated, and is approximately equivalent to: * ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only); * // Pseudocode: Set each zstd parameter and leave the rest as-is. * for ((param, value) : params) { * ZSTD_CCtx_setParameter(zcs, param, value); * } * ZSTD_CCtx_setPledgedSrcSize(zcs, pledgedSrcSize); * ZSTD_CCtx_loadDictionary(zcs, dict, dictSize); * * dict is loaded with ZSTD_dct_auto and ZSTD_dlm_byCopy. * pledgedSrcSize must be correct. * If srcSize is not known at init time, use value ZSTD_CONTENTSIZE_UNKNOWN. * Note : this prototype will be marked as deprecated and generate compilation warnings on reaching v1.5.x */ ZSTDLIB_API size_t ZSTD_initCStream_advanced(ZSTD_CStream* zcs, const void* dict, size_t dictSize, ZSTD_parameters params, unsigned long long pledgedSrcSize); /*! ZSTD_initCStream_usingCDict() : * This function is deprecated, and equivalent to: * ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only); * ZSTD_CCtx_refCDict(zcs, cdict); * * note : cdict will just be referenced, and must outlive compression session * Note : this prototype will be marked as deprecated and generate compilation warnings on reaching v1.5.x */ ZSTDLIB_API size_t ZSTD_initCStream_usingCDict(ZSTD_CStream* zcs, const ZSTD_CDict* cdict); /*! ZSTD_initCStream_usingCDict_advanced() : * This function is DEPRECATED, and is approximately equivalent to: * ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only); * // Pseudocode: Set each zstd frame parameter and leave the rest as-is. * for ((fParam, value) : fParams) { * ZSTD_CCtx_setParameter(zcs, fParam, value); * } * ZSTD_CCtx_setPledgedSrcSize(zcs, pledgedSrcSize); * ZSTD_CCtx_refCDict(zcs, cdict); * * same as ZSTD_initCStream_usingCDict(), with control over frame parameters. * pledgedSrcSize must be correct. If srcSize is not known at init time, use * value ZSTD_CONTENTSIZE_UNKNOWN. * Note : this prototype will be marked as deprecated and generate compilation warnings on reaching v1.5.x */ ZSTDLIB_API size_t ZSTD_initCStream_usingCDict_advanced(ZSTD_CStream* zcs, const ZSTD_CDict* cdict, ZSTD_frameParameters fParams, unsigned long long pledgedSrcSize); /*! ZSTD_resetCStream() : * This function is deprecated, and is equivalent to: * ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only); * ZSTD_CCtx_setPledgedSrcSize(zcs, pledgedSrcSize); * * start a new frame, using same parameters from previous frame. * This is typically useful to skip dictionary loading stage, since it will re-use it in-place. * Note that zcs must be init at least once before using ZSTD_resetCStream(). * If pledgedSrcSize is not known at reset time, use macro ZSTD_CONTENTSIZE_UNKNOWN. * If pledgedSrcSize > 0, its value must be correct, as it will be written in header, and controlled at the end. * For the time being, pledgedSrcSize==0 is interpreted as "srcSize unknown" for compatibility with older programs, * but it will change to mean "empty" in future version, so use macro ZSTD_CONTENTSIZE_UNKNOWN instead. * @return : 0, or an error code (which can be tested using ZSTD_isError()) * Note : this prototype will be marked as deprecated and generate compilation warnings on reaching v1.5.x */ ZSTDLIB_API size_t ZSTD_resetCStream(ZSTD_CStream* zcs, unsigned long long pledgedSrcSize); typedef struct { unsigned long long ingested; /* nb input bytes read and buffered */ unsigned long long consumed; /* nb input bytes actually compressed */ unsigned long long produced; /* nb of compressed bytes generated and buffered */ unsigned long long flushed; /* nb of compressed bytes flushed : not provided; can be tracked from caller side */ unsigned currentJobID; /* MT only : latest started job nb */ unsigned nbActiveWorkers; /* MT only : nb of workers actively compressing at probe time */ } ZSTD_frameProgression; /* ZSTD_getFrameProgression() : * tells how much data has been ingested (read from input) * consumed (input actually compressed) and produced (output) for current frame. * Note : (ingested - consumed) is amount of input data buffered internally, not yet compressed. * Aggregates progression inside active worker threads. */ ZSTDLIB_API ZSTD_frameProgression ZSTD_getFrameProgression(const ZSTD_CCtx* cctx); /*! ZSTD_toFlushNow() : * Tell how many bytes are ready to be flushed immediately. * Useful for multithreading scenarios (nbWorkers >= 1). * Probe the oldest active job, defined as oldest job not yet entirely flushed, * and check its output buffer. * @return : amount of data stored in oldest job and ready to be flushed immediately. * if @return == 0, it means either : * + there is no active job (could be checked with ZSTD_frameProgression()), or * + oldest job is still actively compressing data, * but everything it has produced has also been flushed so far, * therefore flush speed is limited by production speed of oldest job * irrespective of the speed of concurrent (and newer) jobs. */ ZSTDLIB_API size_t ZSTD_toFlushNow(ZSTD_CCtx* cctx); /*===== Advanced Streaming decompression functions =====*/ /*! * This function is deprecated, and is equivalent to: * * ZSTD_DCtx_reset(zds, ZSTD_reset_session_only); * ZSTD_DCtx_loadDictionary(zds, dict, dictSize); * * note: no dictionary will be used if dict == NULL or dictSize < 8 * Note : this prototype will be marked as deprecated and generate compilation warnings on reaching v1.5.x */ ZSTDLIB_API size_t ZSTD_initDStream_usingDict(ZSTD_DStream* zds, const void* dict, size_t dictSize); /*! * This function is deprecated, and is equivalent to: * * ZSTD_DCtx_reset(zds, ZSTD_reset_session_only); * ZSTD_DCtx_refDDict(zds, ddict); * * note : ddict is referenced, it must outlive decompression session * Note : this prototype will be marked as deprecated and generate compilation warnings on reaching v1.5.x */ ZSTDLIB_API size_t ZSTD_initDStream_usingDDict(ZSTD_DStream* zds, const ZSTD_DDict* ddict); /*! * This function is deprecated, and is equivalent to: * * ZSTD_DCtx_reset(zds, ZSTD_reset_session_only); * * re-use decompression parameters from previous init; saves dictionary loading * Note : this prototype will be marked as deprecated and generate compilation warnings on reaching v1.5.x */ ZSTDLIB_API size_t ZSTD_resetDStream(ZSTD_DStream* zds); /********************************************************************* * Buffer-less and synchronous inner streaming functions * * This is an advanced API, giving full control over buffer management, for users which need direct control over memory. * But it's also a complex one, with several restrictions, documented below. * Prefer normal streaming API for an easier experience. ********************************************************************* */ /** Buffer-less streaming compression (synchronous mode) A ZSTD_CCtx object is required to track streaming operations. Use ZSTD_createCCtx() / ZSTD_freeCCtx() to manage resource. ZSTD_CCtx object can be re-used multiple times within successive compression operations. Start by initializing a context. Use ZSTD_compressBegin(), or ZSTD_compressBegin_usingDict() for dictionary compression, or ZSTD_compressBegin_advanced(), for finer parameter control. It's also possible to duplicate a reference context which has already been initialized, using ZSTD_copyCCtx() Then, consume your input using ZSTD_compressContinue(). There are some important considerations to keep in mind when using this advanced function : - ZSTD_compressContinue() has no internal buffer. It uses externally provided buffers only. - Interface is synchronous : input is consumed entirely and produces 1+ compressed blocks. - Caller must ensure there is enough space in `dst` to store compressed data under worst case scenario. Worst case evaluation is provided by ZSTD_compressBound(). ZSTD_compressContinue() doesn't guarantee recover after a failed compression. - ZSTD_compressContinue() presumes prior input ***is still accessible and unmodified*** (up to maximum distance size, see WindowLog). It remembers all previous contiguous blocks, plus one separated memory segment (which can itself consists of multiple contiguous blocks) - ZSTD_compressContinue() detects that prior input has been overwritten when `src` buffer overlaps. In which case, it will "discard" the relevant memory section from its history. Finish a frame with ZSTD_compressEnd(), which will write the last block(s) and optional checksum. It's possible to use srcSize==0, in which case, it will write a final empty block to end the frame. Without last block mark, frames are considered unfinished (hence corrupted) by compliant decoders. `ZSTD_CCtx` object can be re-used (ZSTD_compressBegin()) to compress again. */ /*===== Buffer-less streaming compression functions =====*/ ZSTDLIB_API size_t ZSTD_compressBegin(ZSTD_CCtx* cctx, int compressionLevel); ZSTDLIB_API size_t ZSTD_compressBegin_usingDict(ZSTD_CCtx* cctx, const void* dict, size_t dictSize, int compressionLevel); ZSTDLIB_API size_t ZSTD_compressBegin_advanced(ZSTD_CCtx* cctx, const void* dict, size_t dictSize, ZSTD_parameters params, unsigned long long pledgedSrcSize); /**< pledgedSrcSize : If srcSize is not known at init time, use ZSTD_CONTENTSIZE_UNKNOWN */ ZSTDLIB_API size_t ZSTD_compressBegin_usingCDict(ZSTD_CCtx* cctx, const ZSTD_CDict* cdict); /**< note: fails if cdict==NULL */ ZSTDLIB_API size_t ZSTD_compressBegin_usingCDict_advanced(ZSTD_CCtx* const cctx, const ZSTD_CDict* const cdict, ZSTD_frameParameters const fParams, unsigned long long const pledgedSrcSize); /* compression parameters are already set within cdict. pledgedSrcSize must be correct. If srcSize is not known, use macro ZSTD_CONTENTSIZE_UNKNOWN */ ZSTDLIB_API size_t ZSTD_copyCCtx(ZSTD_CCtx* cctx, const ZSTD_CCtx* preparedCCtx, unsigned long long pledgedSrcSize); /**< note: if pledgedSrcSize is not known, use ZSTD_CONTENTSIZE_UNKNOWN */ ZSTDLIB_API size_t ZSTD_compressContinue(ZSTD_CCtx* cctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize); ZSTDLIB_API size_t ZSTD_compressEnd(ZSTD_CCtx* cctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize); /** Buffer-less streaming decompression (synchronous mode) A ZSTD_DCtx object is required to track streaming operations. Use ZSTD_createDCtx() / ZSTD_freeDCtx() to manage it. A ZSTD_DCtx object can be re-used multiple times. First typical operation is to retrieve frame parameters, using ZSTD_getFrameHeader(). Frame header is extracted from the beginning of compressed frame, so providing only the frame's beginning is enough. Data fragment must be large enough to ensure successful decoding. `ZSTD_frameHeaderSize_max` bytes is guaranteed to always be large enough. @result : 0 : successful decoding, the `ZSTD_frameHeader` structure is correctly filled. >0 : `srcSize` is too small, please provide at least @result bytes on next attempt. errorCode, which can be tested using ZSTD_isError(). It fills a ZSTD_frameHeader structure with important information to correctly decode the frame, such as the dictionary ID, content size, or maximum back-reference distance (`windowSize`). Note that these values could be wrong, either because of data corruption, or because a 3rd party deliberately spoofs false information. As a consequence, check that values remain within valid application range. For example, do not allocate memory blindly, check that `windowSize` is within expectation. Each application can set its own limits, depending on local restrictions. For extended interoperability, it is recommended to support `windowSize` of at least 8 MB. ZSTD_decompressContinue() needs previous data blocks during decompression, up to `windowSize` bytes. ZSTD_decompressContinue() is very sensitive to contiguity, if 2 blocks don't follow each other, make sure that either the compressor breaks contiguity at the same place, or that previous contiguous segment is large enough to properly handle maximum back-reference distance. There are multiple ways to guarantee this condition. The most memory efficient way is to use a round buffer of sufficient size. Sufficient size is determined by invoking ZSTD_decodingBufferSize_min(), which can @return an error code if required value is too large for current system (in 32-bits mode). In a round buffer methodology, ZSTD_decompressContinue() decompresses each block next to previous one, up to the moment there is not enough room left in the buffer to guarantee decoding another full block, which maximum size is provided in `ZSTD_frameHeader` structure, field `blockSizeMax`. At which point, decoding can resume from the beginning of the buffer. Note that already decoded data stored in the buffer should be flushed before being overwritten. There are alternatives possible, for example using two or more buffers of size `windowSize` each, though they consume more memory. Finally, if you control the compression process, you can also ignore all buffer size rules, as long as the encoder and decoder progress in "lock-step", aka use exactly the same buffer sizes, break contiguity at the same place, etc. Once buffers are setup, start decompression, with ZSTD_decompressBegin(). If decompression requires a dictionary, use ZSTD_decompressBegin_usingDict() or ZSTD_decompressBegin_usingDDict(). Then use ZSTD_nextSrcSizeToDecompress() and ZSTD_decompressContinue() alternatively. ZSTD_nextSrcSizeToDecompress() tells how many bytes to provide as 'srcSize' to ZSTD_decompressContinue(). ZSTD_decompressContinue() requires this _exact_ amount of bytes, or it will fail. @result of ZSTD_decompressContinue() is the number of bytes regenerated within 'dst' (necessarily <= dstCapacity). It can be zero : it just means ZSTD_decompressContinue() has decoded some metadata item. It can also be an error code, which can be tested with ZSTD_isError(). A frame is fully decoded when ZSTD_nextSrcSizeToDecompress() returns zero. Context can then be reset to start a new decompression. Note : it's possible to know if next input to present is a header or a block, using ZSTD_nextInputType(). This information is not required to properly decode a frame. == Special case : skippable frames == Skippable frames allow integration of user-defined data into a flow of concatenated frames. Skippable frames will be ignored (skipped) by decompressor. The format of skippable frames is as follows : a) Skippable frame ID - 4 Bytes, Little endian format, any value from 0x184D2A50 to 0x184D2A5F b) Frame Size - 4 Bytes, Little endian format, unsigned 32-bits c) Frame Content - any content (User Data) of length equal to Frame Size For skippable frames ZSTD_getFrameHeader() returns zfhPtr->frameType==ZSTD_skippableFrame. For skippable frames ZSTD_decompressContinue() always returns 0 : it only skips the content. */ /*===== Buffer-less streaming decompression functions =====*/ typedef enum { ZSTD_frame, ZSTD_skippableFrame } ZSTD_frameType_e; typedef struct { unsigned long long frameContentSize; /* if == ZSTD_CONTENTSIZE_UNKNOWN, it means this field is not available. 0 means "empty" */ unsigned long long windowSize; /* can be very large, up to <= frameContentSize */ unsigned blockSizeMax; ZSTD_frameType_e frameType; /* if == ZSTD_skippableFrame, frameContentSize is the size of skippable content */ unsigned headerSize; unsigned dictID; unsigned checksumFlag; } ZSTD_frameHeader; /*! ZSTD_getFrameHeader() : * decode Frame Header, or requires larger `srcSize`. * @return : 0, `zfhPtr` is correctly filled, * >0, `srcSize` is too small, value is wanted `srcSize` amount, * or an error code, which can be tested using ZSTD_isError() */ ZSTDLIB_API size_t ZSTD_getFrameHeader(ZSTD_frameHeader* zfhPtr, const void* src, size_t srcSize); /**< doesn't consume input */ /*! ZSTD_getFrameHeader_advanced() : * same as ZSTD_getFrameHeader(), * with added capability to select a format (like ZSTD_f_zstd1_magicless) */ ZSTDLIB_API size_t ZSTD_getFrameHeader_advanced(ZSTD_frameHeader* zfhPtr, const void* src, size_t srcSize, ZSTD_format_e format); ZSTDLIB_API size_t ZSTD_decodingBufferSize_min(unsigned long long windowSize, unsigned long long frameContentSize); /**< when frame content size is not known, pass in frameContentSize == ZSTD_CONTENTSIZE_UNKNOWN */ ZSTDLIB_API size_t ZSTD_decompressBegin(ZSTD_DCtx* dctx); ZSTDLIB_API size_t ZSTD_decompressBegin_usingDict(ZSTD_DCtx* dctx, const void* dict, size_t dictSize); ZSTDLIB_API size_t ZSTD_decompressBegin_usingDDict(ZSTD_DCtx* dctx, const ZSTD_DDict* ddict); ZSTDLIB_API size_t ZSTD_nextSrcSizeToDecompress(ZSTD_DCtx* dctx); ZSTDLIB_API size_t ZSTD_decompressContinue(ZSTD_DCtx* dctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize); /* misc */ ZSTDLIB_API void ZSTD_copyDCtx(ZSTD_DCtx* dctx, const ZSTD_DCtx* preparedDCtx); typedef enum { ZSTDnit_frameHeader, ZSTDnit_blockHeader, ZSTDnit_block, ZSTDnit_lastBlock, ZSTDnit_checksum, ZSTDnit_skippableFrame } ZSTD_nextInputType_e; ZSTDLIB_API ZSTD_nextInputType_e ZSTD_nextInputType(ZSTD_DCtx* dctx); /* ============================ */ /** Block level API */ /* ============================ */ /*! Block functions produce and decode raw zstd blocks, without frame metadata. Frame metadata cost is typically ~12 bytes, which can be non-negligible for very small blocks (< 100 bytes). But users will have to take in charge needed metadata to regenerate data, such as compressed and content sizes. A few rules to respect : - Compressing and decompressing require a context structure + Use ZSTD_createCCtx() and ZSTD_createDCtx() - It is necessary to init context before starting + compression : any ZSTD_compressBegin*() variant, including with dictionary + decompression : any ZSTD_decompressBegin*() variant, including with dictionary + copyCCtx() and copyDCtx() can be used too - Block size is limited, it must be <= ZSTD_getBlockSize() <= ZSTD_BLOCKSIZE_MAX == 128 KB + If input is larger than a block size, it's necessary to split input data into multiple blocks + For inputs larger than a single block, consider using regular ZSTD_compress() instead. Frame metadata is not that costly, and quickly becomes negligible as source size grows larger than a block. - When a block is considered not compressible enough, ZSTD_compressBlock() result will be 0 (zero) ! ===> In which case, nothing is produced into `dst` ! + User __must__ test for such outcome and deal directly with uncompressed data + A block cannot be declared incompressible if ZSTD_compressBlock() return value was != 0. Doing so would mess up with statistics history, leading to potential data corruption. + ZSTD_decompressBlock() _doesn't accept uncompressed data as input_ !! + In case of multiple successive blocks, should some of them be uncompressed, decoder must be informed of their existence in order to follow proper history. Use ZSTD_insertBlock() for such a case. */ /*===== Raw zstd block functions =====*/ ZSTDLIB_API size_t ZSTD_getBlockSize (const ZSTD_CCtx* cctx); ZSTDLIB_API size_t ZSTD_compressBlock (ZSTD_CCtx* cctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize); ZSTDLIB_API size_t ZSTD_decompressBlock(ZSTD_DCtx* dctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize); ZSTDLIB_API size_t ZSTD_insertBlock (ZSTD_DCtx* dctx, const void* blockStart, size_t blockSize); /**< insert uncompressed block into `dctx` history. Useful for multi-blocks decompression. */ #endif /* ZSTD_H_ZSTD_STATIC_LINKING_ONLY */ #if defined (__cplusplus) } #endif /**** ended inlining ../zstd.h ****/ #define FSE_STATIC_LINKING_ONLY /**** skipping file: fse.h ****/ #define HUF_STATIC_LINKING_ONLY /**** skipping file: huf.h ****/ #ifndef XXH_STATIC_LINKING_ONLY # define XXH_STATIC_LINKING_ONLY /* XXH64_state_t */ #endif /**** start inlining xxhash.h ****/ /* * xxHash - Extremely Fast Hash algorithm * Header File * Copyright (c) 2012-2021, Yann Collet, Facebook, Inc. * * You can contact the author at : * - xxHash source repository : https://github.com/Cyan4973/xxHash * * This source code is licensed under both the BSD-style license (found in the * LICENSE file in the root directory of this source tree) and the GPLv2 (found * in the COPYING file in the root directory of this source tree). * You may select, at your option, one of the above-listed licenses. */ /* Notice extracted from xxHash homepage : xxHash is an extremely fast Hash algorithm, running at RAM speed limits. It also successfully passes all tests from the SMHasher suite. Comparison (single thread, Windows Seven 32 bits, using SMHasher on a Core 2 Duo @3GHz) Name Speed Q.Score Author xxHash 5.4 GB/s 10 CrapWow 3.2 GB/s 2 Andrew MumurHash 3a 2.7 GB/s 10 Austin Appleby SpookyHash 2.0 GB/s 10 Bob Jenkins SBox 1.4 GB/s 9 Bret Mulvey Lookup3 1.2 GB/s 9 Bob Jenkins SuperFastHash 1.2 GB/s 1 Paul Hsieh CityHash64 1.05 GB/s 10 Pike & Alakuijala FNV 0.55 GB/s 5 Fowler, Noll, Vo CRC32 0.43 GB/s 9 MD5-32 0.33 GB/s 10 Ronald L. Rivest SHA1-32 0.28 GB/s 10 Q.Score is a measure of quality of the hash function. It depends on successfully passing SMHasher test set. 10 is a perfect score. A 64-bits version, named XXH64, is available since r35. It offers much better speed, but for 64-bits applications only. Name Speed on 64 bits Speed on 32 bits XXH64 13.8 GB/s 1.9 GB/s XXH32 6.8 GB/s 6.0 GB/s */ #if defined (__cplusplus) extern "C" { #endif #ifndef XXHASH_H_5627135585666179 #define XXHASH_H_5627135585666179 1 /* **************************** * Definitions ******************************/ /**** skipping file: zstd_deps.h ****/ typedef enum { XXH_OK=0, XXH_ERROR } XXH_errorcode; /* **************************** * API modifier ******************************/ /** XXH_PRIVATE_API * This is useful if you want to include xxhash functions in `static` mode * in order to inline them, and remove their symbol from the public list. * Methodology : * #define XXH_PRIVATE_API * #include "xxhash.h" * `xxhash.c` is automatically included. * It's not useful to compile and link it as a separate module anymore. */ #ifdef XXH_PRIVATE_API # ifndef XXH_STATIC_LINKING_ONLY # define XXH_STATIC_LINKING_ONLY # endif # if defined(__GNUC__) # define XXH_PUBLIC_API static __inline __attribute__((unused)) # elif defined (__cplusplus) || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */) # define XXH_PUBLIC_API static inline # elif defined(_MSC_VER) # define XXH_PUBLIC_API static __inline # else # define XXH_PUBLIC_API static /* this version may generate warnings for unused static functions; disable the relevant warning */ # endif #else # define XXH_PUBLIC_API /* do nothing */ #endif /* XXH_PRIVATE_API */ /*!XXH_NAMESPACE, aka Namespace Emulation : If you want to include _and expose_ xxHash functions from within your own library, but also want to avoid symbol collisions with another library which also includes xxHash, you can use XXH_NAMESPACE, to automatically prefix any public symbol from xxhash library with the value of XXH_NAMESPACE (so avoid to keep it NULL and avoid numeric values). Note that no change is required within the calling program as long as it includes `xxhash.h` : regular symbol name will be automatically translated by this header. */ #ifdef XXH_NAMESPACE # define XXH_CAT(A,B) A##B # define XXH_NAME2(A,B) XXH_CAT(A,B) # define XXH32 XXH_NAME2(XXH_NAMESPACE, XXH32) # define XXH64 XXH_NAME2(XXH_NAMESPACE, XXH64) # define XXH_versionNumber XXH_NAME2(XXH_NAMESPACE, XXH_versionNumber) # define XXH32_createState XXH_NAME2(XXH_NAMESPACE, XXH32_createState) # define XXH64_createState XXH_NAME2(XXH_NAMESPACE, XXH64_createState) # define XXH32_freeState XXH_NAME2(XXH_NAMESPACE, XXH32_freeState) # define XXH64_freeState XXH_NAME2(XXH_NAMESPACE, XXH64_freeState) # define XXH32_reset XXH_NAME2(XXH_NAMESPACE, XXH32_reset) # define XXH64_reset XXH_NAME2(XXH_NAMESPACE, XXH64_reset) # define XXH32_update XXH_NAME2(XXH_NAMESPACE, XXH32_update) # define XXH64_update XXH_NAME2(XXH_NAMESPACE, XXH64_update) # define XXH32_digest XXH_NAME2(XXH_NAMESPACE, XXH32_digest) # define XXH64_digest XXH_NAME2(XXH_NAMESPACE, XXH64_digest) # define XXH32_copyState XXH_NAME2(XXH_NAMESPACE, XXH32_copyState) # define XXH64_copyState XXH_NAME2(XXH_NAMESPACE, XXH64_copyState) # define XXH32_canonicalFromHash XXH_NAME2(XXH_NAMESPACE, XXH32_canonicalFromHash) # define XXH64_canonicalFromHash XXH_NAME2(XXH_NAMESPACE, XXH64_canonicalFromHash) # define XXH32_hashFromCanonical XXH_NAME2(XXH_NAMESPACE, XXH32_hashFromCanonical) # define XXH64_hashFromCanonical XXH_NAME2(XXH_NAMESPACE, XXH64_hashFromCanonical) #endif /* ************************************* * Version ***************************************/ #define XXH_VERSION_MAJOR 0 #define XXH_VERSION_MINOR 6 #define XXH_VERSION_RELEASE 2 #define XXH_VERSION_NUMBER (XXH_VERSION_MAJOR *100*100 + XXH_VERSION_MINOR *100 + XXH_VERSION_RELEASE) XXH_PUBLIC_API unsigned XXH_versionNumber (void); /* **************************** * Simple Hash Functions ******************************/ typedef unsigned int XXH32_hash_t; typedef unsigned long long XXH64_hash_t; XXH_PUBLIC_API XXH32_hash_t XXH32 (const void* input, size_t length, unsigned int seed); XXH_PUBLIC_API XXH64_hash_t XXH64 (const void* input, size_t length, unsigned long long seed); /*! XXH32() : Calculate the 32-bits hash of sequence "length" bytes stored at memory address "input". The memory between input & input+length must be valid (allocated and read-accessible). "seed" can be used to alter the result predictably. Speed on Core 2 Duo @ 3 GHz (single thread, SMHasher benchmark) : 5.4 GB/s XXH64() : Calculate the 64-bits hash of sequence of length "len" stored at memory address "input". "seed" can be used to alter the result predictably. This function runs 2x faster on 64-bits systems, but slower on 32-bits systems (see benchmark). */ /* **************************** * Streaming Hash Functions ******************************/ typedef struct XXH32_state_s XXH32_state_t; /* incomplete type */ typedef struct XXH64_state_s XXH64_state_t; /* incomplete type */ /*! State allocation, compatible with dynamic libraries */ XXH_PUBLIC_API XXH32_state_t* XXH32_createState(void); XXH_PUBLIC_API XXH_errorcode XXH32_freeState(XXH32_state_t* statePtr); XXH_PUBLIC_API XXH64_state_t* XXH64_createState(void); XXH_PUBLIC_API XXH_errorcode XXH64_freeState(XXH64_state_t* statePtr); /* hash streaming */ XXH_PUBLIC_API XXH_errorcode XXH32_reset (XXH32_state_t* statePtr, unsigned int seed); XXH_PUBLIC_API XXH_errorcode XXH32_update (XXH32_state_t* statePtr, const void* input, size_t length); XXH_PUBLIC_API XXH32_hash_t XXH32_digest (const XXH32_state_t* statePtr); XXH_PUBLIC_API XXH_errorcode XXH64_reset (XXH64_state_t* statePtr, unsigned long long seed); XXH_PUBLIC_API XXH_errorcode XXH64_update (XXH64_state_t* statePtr, const void* input, size_t length); XXH_PUBLIC_API XXH64_hash_t XXH64_digest (const XXH64_state_t* statePtr); /* These functions generate the xxHash of an input provided in multiple segments. Note that, for small input, they are slower than single-call functions, due to state management. For small input, prefer `XXH32()` and `XXH64()` . XXH state must first be allocated, using XXH*_createState() . Start a new hash by initializing state with a seed, using XXH*_reset(). Then, feed the hash state by calling XXH*_update() as many times as necessary. Obviously, input must be allocated and read accessible. The function returns an error code, with 0 meaning OK, and any other value meaning there is an error. Finally, a hash value can be produced anytime, by using XXH*_digest(). This function returns the nn-bits hash as an int or long long. It's still possible to continue inserting input into the hash state after a digest, and generate some new hashes later on, by calling again XXH*_digest(). When done, free XXH state space if it was allocated dynamically. */ /* ************************** * Utils ****************************/ #if !(defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L)) /* ! C99 */ # define restrict /* disable restrict */ #endif XXH_PUBLIC_API void XXH32_copyState(XXH32_state_t* restrict dst_state, const XXH32_state_t* restrict src_state); XXH_PUBLIC_API void XXH64_copyState(XXH64_state_t* restrict dst_state, const XXH64_state_t* restrict src_state); /* ************************** * Canonical representation ****************************/ /* Default result type for XXH functions are primitive unsigned 32 and 64 bits. * The canonical representation uses human-readable write convention, aka big-endian (large digits first). * These functions allow transformation of hash result into and from its canonical format. * This way, hash values can be written into a file / memory, and remain comparable on different systems and programs. */ typedef struct { unsigned char digest[4]; } XXH32_canonical_t; typedef struct { unsigned char digest[8]; } XXH64_canonical_t; XXH_PUBLIC_API void XXH32_canonicalFromHash(XXH32_canonical_t* dst, XXH32_hash_t hash); XXH_PUBLIC_API void XXH64_canonicalFromHash(XXH64_canonical_t* dst, XXH64_hash_t hash); XXH_PUBLIC_API XXH32_hash_t XXH32_hashFromCanonical(const XXH32_canonical_t* src); XXH_PUBLIC_API XXH64_hash_t XXH64_hashFromCanonical(const XXH64_canonical_t* src); #endif /* XXHASH_H_5627135585666179 */ /* ================================================================================================ This section contains definitions which are not guaranteed to remain stable. They may change in future versions, becoming incompatible with a different version of the library. They shall only be used with static linking. Never use these definitions in association with dynamic linking ! =================================================================================================== */ #if defined(XXH_STATIC_LINKING_ONLY) && !defined(XXH_STATIC_H_3543687687345) #define XXH_STATIC_H_3543687687345 /* These definitions are only meant to allow allocation of XXH state statically, on stack, or in a struct for example. Do not use members directly. */ struct XXH32_state_s { unsigned total_len_32; unsigned large_len; unsigned v1; unsigned v2; unsigned v3; unsigned v4; unsigned mem32[4]; /* buffer defined as U32 for alignment */ unsigned memsize; unsigned reserved; /* never read nor write, will be removed in a future version */ }; /* typedef'd to XXH32_state_t */ struct XXH64_state_s { unsigned long long total_len; unsigned long long v1; unsigned long long v2; unsigned long long v3; unsigned long long v4; unsigned long long mem64[4]; /* buffer defined as U64 for alignment */ unsigned memsize; unsigned reserved[2]; /* never read nor write, will be removed in a future version */ }; /* typedef'd to XXH64_state_t */ # ifdef XXH_PRIVATE_API /**** start inlining xxhash.c ****/ /* * xxHash - Fast Hash algorithm * Copyright (c) 2012-2021, Yann Collet, Facebook, Inc. * * You can contact the author at : * - xxHash homepage: http://www.xxhash.com * - xxHash source repository : https://github.com/Cyan4973/xxHash * * This source code is licensed under both the BSD-style license (found in the * LICENSE file in the root directory of this source tree) and the GPLv2 (found * in the COPYING file in the root directory of this source tree). * You may select, at your option, one of the above-listed licenses. */ /* ************************************* * Tuning parameters ***************************************/ /*!XXH_FORCE_MEMORY_ACCESS : * By default, access to unaligned memory is controlled by `memcpy()`, which is safe and portable. * Unfortunately, on some target/compiler combinations, the generated assembly is sub-optimal. * The below switch allow to select different access method for improved performance. * Method 0 (default) : use `memcpy()`. Safe and portable. * Method 1 : `__packed` statement. It depends on compiler extension (ie, not portable). * This method is safe if your compiler supports it, and *generally* as fast or faster than `memcpy`. * Method 2 : direct access. This method doesn't depend on compiler but violate C standard. * It can generate buggy code on targets which do not support unaligned memory accesses. * But in some circumstances, it's the only known way to get the most performance (ie GCC + ARMv6) * See http://stackoverflow.com/a/32095106/646947 for details. * Prefer these methods in priority order (0 > 1 > 2) */ #ifndef XXH_FORCE_MEMORY_ACCESS /* can be defined externally, on command line for example */ # if defined(__GNUC__) && ( defined(__ARM_ARCH_6__) || defined(__ARM_ARCH_6J__) || defined(__ARM_ARCH_6K__) || defined(__ARM_ARCH_6Z__) || defined(__ARM_ARCH_6ZK__) || defined(__ARM_ARCH_6T2__) ) # define XXH_FORCE_MEMORY_ACCESS 2 # elif (defined(__INTEL_COMPILER) && !defined(WIN32)) || \ (defined(__GNUC__) && ( defined(__ARM_ARCH_7__) || defined(__ARM_ARCH_7A__) || defined(__ARM_ARCH_7R__) || defined(__ARM_ARCH_7M__) || defined(__ARM_ARCH_7S__) )) || \ defined(__ICCARM__) # define XXH_FORCE_MEMORY_ACCESS 1 # endif #endif /*!XXH_ACCEPT_NULL_INPUT_POINTER : * If the input pointer is a null pointer, xxHash default behavior is to trigger a memory access error, since it is a bad pointer. * When this option is enabled, xxHash output for null input pointers will be the same as a null-length input. * By default, this option is disabled. To enable it, uncomment below define : */ /* #define XXH_ACCEPT_NULL_INPUT_POINTER 1 */ /*!XXH_FORCE_NATIVE_FORMAT : * By default, xxHash library provides endian-independent Hash values, based on little-endian convention. * Results are therefore identical for little-endian and big-endian CPU. * This comes at a performance cost for big-endian CPU, since some swapping is required to emulate little-endian format. * Should endian-independence be of no importance for your application, you may set the #define below to 1, * to improve speed for Big-endian CPU. * This option has no impact on Little_Endian CPU. */ #ifndef XXH_FORCE_NATIVE_FORMAT /* can be defined externally */ # define XXH_FORCE_NATIVE_FORMAT 0 #endif /*!XXH_FORCE_ALIGN_CHECK : * This is a minor performance trick, only useful with lots of very small keys. * It means : check for aligned/unaligned input. * The check costs one initial branch per hash; set to 0 when the input data * is guaranteed to be aligned. */ #ifndef XXH_FORCE_ALIGN_CHECK /* can be defined externally */ # if defined(__i386) || defined(_M_IX86) || defined(__x86_64__) || defined(_M_X64) # define XXH_FORCE_ALIGN_CHECK 0 # else # define XXH_FORCE_ALIGN_CHECK 1 # endif #endif /* ************************************* * Includes & Memory related functions ***************************************/ /* Modify the local functions below should you wish to use some other memory routines */ /* for ZSTD_malloc(), ZSTD_free() */ #define ZSTD_DEPS_NEED_MALLOC /**** skipping file: zstd_deps.h ****/ static void* XXH_malloc(size_t s) { return ZSTD_malloc(s); } static void XXH_free (void* p) { ZSTD_free(p); } static void* XXH_memcpy(void* dest, const void* src, size_t size) { return ZSTD_memcpy(dest,src,size); } #ifndef XXH_STATIC_LINKING_ONLY # define XXH_STATIC_LINKING_ONLY #endif /**** skipping file: xxhash.h ****/ /* ************************************* * Compiler Specific Options ***************************************/ /**** skipping file: compiler.h ****/ /* ************************************* * Basic Types ***************************************/ /**** skipping file: mem.h ****/ #if (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS==2)) /* Force direct memory access. Only works on CPU which support unaligned memory access in hardware */ static U32 XXH_read32(const void* memPtr) { return *(const U32*) memPtr; } static U64 XXH_read64(const void* memPtr) { return *(const U64*) memPtr; } #elif (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS==1)) /* __pack instructions are safer, but compiler specific, hence potentially problematic for some compilers */ /* currently only defined for gcc and icc */ typedef union { U32 u32; U64 u64; } __attribute__((packed)) unalign; static U32 XXH_read32(const void* ptr) { return ((const unalign*)ptr)->u32; } static U64 XXH_read64(const void* ptr) { return ((const unalign*)ptr)->u64; } #else /* portable and safe solution. Generally efficient. * see : http://stackoverflow.com/a/32095106/646947 */ static U32 XXH_read32(const void* memPtr) { U32 val; ZSTD_memcpy(&val, memPtr, sizeof(val)); return val; } static U64 XXH_read64(const void* memPtr) { U64 val; ZSTD_memcpy(&val, memPtr, sizeof(val)); return val; } #endif /* XXH_FORCE_DIRECT_MEMORY_ACCESS */ /* **************************************** * Compiler-specific Functions and Macros ******************************************/ #define GCC_VERSION (__GNUC__ * 100 + __GNUC_MINOR__) /* Note : although _rotl exists for minGW (GCC under windows), performance seems poor */ #if defined(_MSC_VER) # define XXH_rotl32(x,r) _rotl(x,r) # define XXH_rotl64(x,r) _rotl64(x,r) #else #if defined(__ICCARM__) # include # define XXH_rotl32(x,r) __ROR(x,(32 - r)) #else # define XXH_rotl32(x,r) ((x << r) | (x >> (32 - r))) #endif # define XXH_rotl64(x,r) ((x << r) | (x >> (64 - r))) #endif #if defined(_MSC_VER) /* Visual Studio */ # define XXH_swap32 _byteswap_ulong # define XXH_swap64 _byteswap_uint64 #elif GCC_VERSION >= 403 # define XXH_swap32 __builtin_bswap32 # define XXH_swap64 __builtin_bswap64 #else static U32 XXH_swap32 (U32 x) { return ((x << 24) & 0xff000000 ) | ((x << 8) & 0x00ff0000 ) | ((x >> 8) & 0x0000ff00 ) | ((x >> 24) & 0x000000ff ); } static U64 XXH_swap64 (U64 x) { return ((x << 56) & 0xff00000000000000ULL) | ((x << 40) & 0x00ff000000000000ULL) | ((x << 24) & 0x0000ff0000000000ULL) | ((x << 8) & 0x000000ff00000000ULL) | ((x >> 8) & 0x00000000ff000000ULL) | ((x >> 24) & 0x0000000000ff0000ULL) | ((x >> 40) & 0x000000000000ff00ULL) | ((x >> 56) & 0x00000000000000ffULL); } #endif /* ************************************* * Architecture Macros ***************************************/ typedef enum { XXH_bigEndian=0, XXH_littleEndian=1 } XXH_endianess; /* XXH_CPU_LITTLE_ENDIAN can be defined externally, for example on the compiler command line */ #ifndef XXH_CPU_LITTLE_ENDIAN static const int g_one = 1; # define XXH_CPU_LITTLE_ENDIAN (*(const char*)(&g_one)) #endif /* *************************** * Memory reads *****************************/ typedef enum { XXH_aligned, XXH_unaligned } XXH_alignment; FORCE_INLINE_TEMPLATE U32 XXH_readLE32_align(const void* ptr, XXH_endianess endian, XXH_alignment align) { if (align==XXH_unaligned) return endian==XXH_littleEndian ? XXH_read32(ptr) : XXH_swap32(XXH_read32(ptr)); else return endian==XXH_littleEndian ? *(const U32*)ptr : XXH_swap32(*(const U32*)ptr); } FORCE_INLINE_TEMPLATE U32 XXH_readLE32(const void* ptr, XXH_endianess endian) { return XXH_readLE32_align(ptr, endian, XXH_unaligned); } static U32 XXH_readBE32(const void* ptr) { return XXH_CPU_LITTLE_ENDIAN ? XXH_swap32(XXH_read32(ptr)) : XXH_read32(ptr); } FORCE_INLINE_TEMPLATE U64 XXH_readLE64_align(const void* ptr, XXH_endianess endian, XXH_alignment align) { if (align==XXH_unaligned) return endian==XXH_littleEndian ? XXH_read64(ptr) : XXH_swap64(XXH_read64(ptr)); else return endian==XXH_littleEndian ? *(const U64*)ptr : XXH_swap64(*(const U64*)ptr); } FORCE_INLINE_TEMPLATE U64 XXH_readLE64(const void* ptr, XXH_endianess endian) { return XXH_readLE64_align(ptr, endian, XXH_unaligned); } static U64 XXH_readBE64(const void* ptr) { return XXH_CPU_LITTLE_ENDIAN ? XXH_swap64(XXH_read64(ptr)) : XXH_read64(ptr); } /* ************************************* * Macros ***************************************/ #define XXH_STATIC_ASSERT(c) { enum { XXH_static_assert = 1/(int)(!!(c)) }; } /* use only *after* variable declarations */ /* ************************************* * Constants ***************************************/ static const U32 PRIME32_1 = 2654435761U; static const U32 PRIME32_2 = 2246822519U; static const U32 PRIME32_3 = 3266489917U; static const U32 PRIME32_4 = 668265263U; static const U32 PRIME32_5 = 374761393U; static const U64 PRIME64_1 = 11400714785074694791ULL; static const U64 PRIME64_2 = 14029467366897019727ULL; static const U64 PRIME64_3 = 1609587929392839161ULL; static const U64 PRIME64_4 = 9650029242287828579ULL; static const U64 PRIME64_5 = 2870177450012600261ULL; XXH_PUBLIC_API unsigned XXH_versionNumber (void) { return XXH_VERSION_NUMBER; } /* ************************** * Utils ****************************/ XXH_PUBLIC_API void XXH32_copyState(XXH32_state_t* restrict dstState, const XXH32_state_t* restrict srcState) { ZSTD_memcpy(dstState, srcState, sizeof(*dstState)); } XXH_PUBLIC_API void XXH64_copyState(XXH64_state_t* restrict dstState, const XXH64_state_t* restrict srcState) { ZSTD_memcpy(dstState, srcState, sizeof(*dstState)); } /* *************************** * Simple Hash Functions *****************************/ static U32 XXH32_round(U32 seed, U32 input) { seed += input * PRIME32_2; seed = XXH_rotl32(seed, 13); seed *= PRIME32_1; return seed; } FORCE_INLINE_TEMPLATE U32 XXH32_endian_align(const void* input, size_t len, U32 seed, XXH_endianess endian, XXH_alignment align) { const BYTE* p = (const BYTE*)input; const BYTE* bEnd = p + len; U32 h32; #define XXH_get32bits(p) XXH_readLE32_align(p, endian, align) #ifdef XXH_ACCEPT_NULL_INPUT_POINTER if (p==NULL) { len=0; bEnd=p=(const BYTE*)(size_t)16; } #endif if (len>=16) { const BYTE* const limit = bEnd - 16; U32 v1 = seed + PRIME32_1 + PRIME32_2; U32 v2 = seed + PRIME32_2; U32 v3 = seed + 0; U32 v4 = seed - PRIME32_1; do { v1 = XXH32_round(v1, XXH_get32bits(p)); p+=4; v2 = XXH32_round(v2, XXH_get32bits(p)); p+=4; v3 = XXH32_round(v3, XXH_get32bits(p)); p+=4; v4 = XXH32_round(v4, XXH_get32bits(p)); p+=4; } while (p<=limit); h32 = XXH_rotl32(v1, 1) + XXH_rotl32(v2, 7) + XXH_rotl32(v3, 12) + XXH_rotl32(v4, 18); } else { h32 = seed + PRIME32_5; } h32 += (U32) len; while (p+4<=bEnd) { h32 += XXH_get32bits(p) * PRIME32_3; h32 = XXH_rotl32(h32, 17) * PRIME32_4 ; p+=4; } while (p> 15; h32 *= PRIME32_2; h32 ^= h32 >> 13; h32 *= PRIME32_3; h32 ^= h32 >> 16; return h32; } XXH_PUBLIC_API unsigned int XXH32 (const void* input, size_t len, unsigned int seed) { #if 0 /* Simple version, good for code maintenance, but unfortunately slow for small inputs */ XXH32_CREATESTATE_STATIC(state); XXH32_reset(state, seed); XXH32_update(state, input, len); return XXH32_digest(state); #else XXH_endianess endian_detected = (XXH_endianess)XXH_CPU_LITTLE_ENDIAN; if (XXH_FORCE_ALIGN_CHECK) { if ((((size_t)input) & 3) == 0) { /* Input is 4-bytes aligned, leverage the speed benefit */ if ((endian_detected==XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT) return XXH32_endian_align(input, len, seed, XXH_littleEndian, XXH_aligned); else return XXH32_endian_align(input, len, seed, XXH_bigEndian, XXH_aligned); } } if ((endian_detected==XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT) return XXH32_endian_align(input, len, seed, XXH_littleEndian, XXH_unaligned); else return XXH32_endian_align(input, len, seed, XXH_bigEndian, XXH_unaligned); #endif } static U64 XXH64_round(U64 acc, U64 input) { acc += input * PRIME64_2; acc = XXH_rotl64(acc, 31); acc *= PRIME64_1; return acc; } static U64 XXH64_mergeRound(U64 acc, U64 val) { val = XXH64_round(0, val); acc ^= val; acc = acc * PRIME64_1 + PRIME64_4; return acc; } FORCE_INLINE_TEMPLATE U64 XXH64_endian_align(const void* input, size_t len, U64 seed, XXH_endianess endian, XXH_alignment align) { const BYTE* p = (const BYTE*)input; const BYTE* const bEnd = p + len; U64 h64; #define XXH_get64bits(p) XXH_readLE64_align(p, endian, align) #ifdef XXH_ACCEPT_NULL_INPUT_POINTER if (p==NULL) { len=0; bEnd=p=(const BYTE*)(size_t)32; } #endif if (len>=32) { const BYTE* const limit = bEnd - 32; U64 v1 = seed + PRIME64_1 + PRIME64_2; U64 v2 = seed + PRIME64_2; U64 v3 = seed + 0; U64 v4 = seed - PRIME64_1; do { v1 = XXH64_round(v1, XXH_get64bits(p)); p+=8; v2 = XXH64_round(v2, XXH_get64bits(p)); p+=8; v3 = XXH64_round(v3, XXH_get64bits(p)); p+=8; v4 = XXH64_round(v4, XXH_get64bits(p)); p+=8; } while (p<=limit); h64 = XXH_rotl64(v1, 1) + XXH_rotl64(v2, 7) + XXH_rotl64(v3, 12) + XXH_rotl64(v4, 18); h64 = XXH64_mergeRound(h64, v1); h64 = XXH64_mergeRound(h64, v2); h64 = XXH64_mergeRound(h64, v3); h64 = XXH64_mergeRound(h64, v4); } else { h64 = seed + PRIME64_5; } h64 += (U64) len; while (p+8<=bEnd) { U64 const k1 = XXH64_round(0, XXH_get64bits(p)); h64 ^= k1; h64 = XXH_rotl64(h64,27) * PRIME64_1 + PRIME64_4; p+=8; } if (p+4<=bEnd) { h64 ^= (U64)(XXH_get32bits(p)) * PRIME64_1; h64 = XXH_rotl64(h64, 23) * PRIME64_2 + PRIME64_3; p+=4; } while (p> 33; h64 *= PRIME64_2; h64 ^= h64 >> 29; h64 *= PRIME64_3; h64 ^= h64 >> 32; return h64; } XXH_PUBLIC_API unsigned long long XXH64 (const void* input, size_t len, unsigned long long seed) { #if 0 /* Simple version, good for code maintenance, but unfortunately slow for small inputs */ XXH64_CREATESTATE_STATIC(state); XXH64_reset(state, seed); XXH64_update(state, input, len); return XXH64_digest(state); #else XXH_endianess endian_detected = (XXH_endianess)XXH_CPU_LITTLE_ENDIAN; if (XXH_FORCE_ALIGN_CHECK) { if ((((size_t)input) & 7)==0) { /* Input is aligned, let's leverage the speed advantage */ if ((endian_detected==XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT) return XXH64_endian_align(input, len, seed, XXH_littleEndian, XXH_aligned); else return XXH64_endian_align(input, len, seed, XXH_bigEndian, XXH_aligned); } } if ((endian_detected==XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT) return XXH64_endian_align(input, len, seed, XXH_littleEndian, XXH_unaligned); else return XXH64_endian_align(input, len, seed, XXH_bigEndian, XXH_unaligned); #endif } /* ************************************************** * Advanced Hash Functions ****************************************************/ XXH_PUBLIC_API XXH32_state_t* XXH32_createState(void) { return (XXH32_state_t*)XXH_malloc(sizeof(XXH32_state_t)); } XXH_PUBLIC_API XXH_errorcode XXH32_freeState(XXH32_state_t* statePtr) { XXH_free(statePtr); return XXH_OK; } XXH_PUBLIC_API XXH64_state_t* XXH64_createState(void) { return (XXH64_state_t*)XXH_malloc(sizeof(XXH64_state_t)); } XXH_PUBLIC_API XXH_errorcode XXH64_freeState(XXH64_state_t* statePtr) { XXH_free(statePtr); return XXH_OK; } /*** Hash feed ***/ XXH_PUBLIC_API XXH_errorcode XXH32_reset(XXH32_state_t* statePtr, unsigned int seed) { XXH32_state_t state; /* using a local state to memcpy() in order to avoid strict-aliasing warnings */ ZSTD_memset(&state, 0, sizeof(state)-4); /* do not write into reserved, for future removal */ state.v1 = seed + PRIME32_1 + PRIME32_2; state.v2 = seed + PRIME32_2; state.v3 = seed + 0; state.v4 = seed - PRIME32_1; ZSTD_memcpy(statePtr, &state, sizeof(state)); return XXH_OK; } XXH_PUBLIC_API XXH_errorcode XXH64_reset(XXH64_state_t* statePtr, unsigned long long seed) { XXH64_state_t state; /* using a local state to memcpy() in order to avoid strict-aliasing warnings */ ZSTD_memset(&state, 0, sizeof(state)-8); /* do not write into reserved, for future removal */ state.v1 = seed + PRIME64_1 + PRIME64_2; state.v2 = seed + PRIME64_2; state.v3 = seed + 0; state.v4 = seed - PRIME64_1; ZSTD_memcpy(statePtr, &state, sizeof(state)); return XXH_OK; } FORCE_INLINE_TEMPLATE XXH_errorcode XXH32_update_endian (XXH32_state_t* state, const void* input, size_t len, XXH_endianess endian) { const BYTE* p = (const BYTE*)input; const BYTE* const bEnd = p + len; #ifdef XXH_ACCEPT_NULL_INPUT_POINTER if (input==NULL) return XXH_ERROR; #endif state->total_len_32 += (unsigned)len; state->large_len |= (len>=16) | (state->total_len_32>=16); if (state->memsize + len < 16) { /* fill in tmp buffer */ XXH_memcpy((BYTE*)(state->mem32) + state->memsize, input, len); state->memsize += (unsigned)len; return XXH_OK; } if (state->memsize) { /* some data left from previous update */ XXH_memcpy((BYTE*)(state->mem32) + state->memsize, input, 16-state->memsize); { const U32* p32 = state->mem32; state->v1 = XXH32_round(state->v1, XXH_readLE32(p32, endian)); p32++; state->v2 = XXH32_round(state->v2, XXH_readLE32(p32, endian)); p32++; state->v3 = XXH32_round(state->v3, XXH_readLE32(p32, endian)); p32++; state->v4 = XXH32_round(state->v4, XXH_readLE32(p32, endian)); p32++; } p += 16-state->memsize; state->memsize = 0; } if (p <= bEnd-16) { const BYTE* const limit = bEnd - 16; U32 v1 = state->v1; U32 v2 = state->v2; U32 v3 = state->v3; U32 v4 = state->v4; do { v1 = XXH32_round(v1, XXH_readLE32(p, endian)); p+=4; v2 = XXH32_round(v2, XXH_readLE32(p, endian)); p+=4; v3 = XXH32_round(v3, XXH_readLE32(p, endian)); p+=4; v4 = XXH32_round(v4, XXH_readLE32(p, endian)); p+=4; } while (p<=limit); state->v1 = v1; state->v2 = v2; state->v3 = v3; state->v4 = v4; } if (p < bEnd) { XXH_memcpy(state->mem32, p, (size_t)(bEnd-p)); state->memsize = (unsigned)(bEnd-p); } return XXH_OK; } XXH_PUBLIC_API XXH_errorcode XXH32_update (XXH32_state_t* state_in, const void* input, size_t len) { XXH_endianess endian_detected = (XXH_endianess)XXH_CPU_LITTLE_ENDIAN; if ((endian_detected==XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT) return XXH32_update_endian(state_in, input, len, XXH_littleEndian); else return XXH32_update_endian(state_in, input, len, XXH_bigEndian); } FORCE_INLINE_TEMPLATE U32 XXH32_digest_endian (const XXH32_state_t* state, XXH_endianess endian) { const BYTE * p = (const BYTE*)state->mem32; const BYTE* const bEnd = (const BYTE*)(state->mem32) + state->memsize; U32 h32; if (state->large_len) { h32 = XXH_rotl32(state->v1, 1) + XXH_rotl32(state->v2, 7) + XXH_rotl32(state->v3, 12) + XXH_rotl32(state->v4, 18); } else { h32 = state->v3 /* == seed */ + PRIME32_5; } h32 += state->total_len_32; while (p+4<=bEnd) { h32 += XXH_readLE32(p, endian) * PRIME32_3; h32 = XXH_rotl32(h32, 17) * PRIME32_4; p+=4; } while (p> 15; h32 *= PRIME32_2; h32 ^= h32 >> 13; h32 *= PRIME32_3; h32 ^= h32 >> 16; return h32; } XXH_PUBLIC_API unsigned int XXH32_digest (const XXH32_state_t* state_in) { XXH_endianess endian_detected = (XXH_endianess)XXH_CPU_LITTLE_ENDIAN; if ((endian_detected==XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT) return XXH32_digest_endian(state_in, XXH_littleEndian); else return XXH32_digest_endian(state_in, XXH_bigEndian); } /* **** XXH64 **** */ FORCE_INLINE_TEMPLATE XXH_errorcode XXH64_update_endian (XXH64_state_t* state, const void* input, size_t len, XXH_endianess endian) { const BYTE* p = (const BYTE*)input; const BYTE* const bEnd = p + len; #ifdef XXH_ACCEPT_NULL_INPUT_POINTER if (input==NULL) return XXH_ERROR; #endif state->total_len += len; if (state->memsize + len < 32) { /* fill in tmp buffer */ if (input != NULL) { XXH_memcpy(((BYTE*)state->mem64) + state->memsize, input, len); } state->memsize += (U32)len; return XXH_OK; } if (state->memsize) { /* tmp buffer is full */ XXH_memcpy(((BYTE*)state->mem64) + state->memsize, input, 32-state->memsize); state->v1 = XXH64_round(state->v1, XXH_readLE64(state->mem64+0, endian)); state->v2 = XXH64_round(state->v2, XXH_readLE64(state->mem64+1, endian)); state->v3 = XXH64_round(state->v3, XXH_readLE64(state->mem64+2, endian)); state->v4 = XXH64_round(state->v4, XXH_readLE64(state->mem64+3, endian)); p += 32-state->memsize; state->memsize = 0; } if (p+32 <= bEnd) { const BYTE* const limit = bEnd - 32; U64 v1 = state->v1; U64 v2 = state->v2; U64 v3 = state->v3; U64 v4 = state->v4; do { v1 = XXH64_round(v1, XXH_readLE64(p, endian)); p+=8; v2 = XXH64_round(v2, XXH_readLE64(p, endian)); p+=8; v3 = XXH64_round(v3, XXH_readLE64(p, endian)); p+=8; v4 = XXH64_round(v4, XXH_readLE64(p, endian)); p+=8; } while (p<=limit); state->v1 = v1; state->v2 = v2; state->v3 = v3; state->v4 = v4; } if (p < bEnd) { XXH_memcpy(state->mem64, p, (size_t)(bEnd-p)); state->memsize = (unsigned)(bEnd-p); } return XXH_OK; } XXH_PUBLIC_API XXH_errorcode XXH64_update (XXH64_state_t* state_in, const void* input, size_t len) { XXH_endianess endian_detected = (XXH_endianess)XXH_CPU_LITTLE_ENDIAN; if ((endian_detected==XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT) return XXH64_update_endian(state_in, input, len, XXH_littleEndian); else return XXH64_update_endian(state_in, input, len, XXH_bigEndian); } FORCE_INLINE_TEMPLATE U64 XXH64_digest_endian (const XXH64_state_t* state, XXH_endianess endian) { const BYTE * p = (const BYTE*)state->mem64; const BYTE* const bEnd = (const BYTE*)state->mem64 + state->memsize; U64 h64; if (state->total_len >= 32) { U64 const v1 = state->v1; U64 const v2 = state->v2; U64 const v3 = state->v3; U64 const v4 = state->v4; h64 = XXH_rotl64(v1, 1) + XXH_rotl64(v2, 7) + XXH_rotl64(v3, 12) + XXH_rotl64(v4, 18); h64 = XXH64_mergeRound(h64, v1); h64 = XXH64_mergeRound(h64, v2); h64 = XXH64_mergeRound(h64, v3); h64 = XXH64_mergeRound(h64, v4); } else { h64 = state->v3 + PRIME64_5; } h64 += (U64) state->total_len; while (p+8<=bEnd) { U64 const k1 = XXH64_round(0, XXH_readLE64(p, endian)); h64 ^= k1; h64 = XXH_rotl64(h64,27) * PRIME64_1 + PRIME64_4; p+=8; } if (p+4<=bEnd) { h64 ^= (U64)(XXH_readLE32(p, endian)) * PRIME64_1; h64 = XXH_rotl64(h64, 23) * PRIME64_2 + PRIME64_3; p+=4; } while (p> 33; h64 *= PRIME64_2; h64 ^= h64 >> 29; h64 *= PRIME64_3; h64 ^= h64 >> 32; return h64; } XXH_PUBLIC_API unsigned long long XXH64_digest (const XXH64_state_t* state_in) { XXH_endianess endian_detected = (XXH_endianess)XXH_CPU_LITTLE_ENDIAN; if ((endian_detected==XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT) return XXH64_digest_endian(state_in, XXH_littleEndian); else return XXH64_digest_endian(state_in, XXH_bigEndian); } /* ************************** * Canonical representation ****************************/ /*! Default XXH result types are basic unsigned 32 and 64 bits. * The canonical representation follows human-readable write convention, aka big-endian (large digits first). * These functions allow transformation of hash result into and from its canonical format. * This way, hash values can be written into a file or buffer, and remain comparable across different systems and programs. */ XXH_PUBLIC_API void XXH32_canonicalFromHash(XXH32_canonical_t* dst, XXH32_hash_t hash) { XXH_STATIC_ASSERT(sizeof(XXH32_canonical_t) == sizeof(XXH32_hash_t)); if (XXH_CPU_LITTLE_ENDIAN) hash = XXH_swap32(hash); ZSTD_memcpy(dst, &hash, sizeof(*dst)); } XXH_PUBLIC_API void XXH64_canonicalFromHash(XXH64_canonical_t* dst, XXH64_hash_t hash) { XXH_STATIC_ASSERT(sizeof(XXH64_canonical_t) == sizeof(XXH64_hash_t)); if (XXH_CPU_LITTLE_ENDIAN) hash = XXH_swap64(hash); ZSTD_memcpy(dst, &hash, sizeof(*dst)); } XXH_PUBLIC_API XXH32_hash_t XXH32_hashFromCanonical(const XXH32_canonical_t* src) { return XXH_readBE32(src); } XXH_PUBLIC_API XXH64_hash_t XXH64_hashFromCanonical(const XXH64_canonical_t* src) { return XXH_readBE64(src); } /**** ended inlining xxhash.c ****/ # endif #endif /* XXH_STATIC_LINKING_ONLY && XXH_STATIC_H_3543687687345 */ #if defined (__cplusplus) } #endif /**** ended inlining xxhash.h ****/ #if defined (__cplusplus) extern "C" { #endif /* ---- static assert (debug) --- */ #define ZSTD_STATIC_ASSERT(c) DEBUG_STATIC_ASSERT(c) #define ZSTD_isError ERR_isError /* for inlining */ #define FSE_isError ERR_isError #define HUF_isError ERR_isError /*-************************************* * shared macros ***************************************/ #undef MIN #undef MAX #define MIN(a,b) ((a)<(b) ? (a) : (b)) #define MAX(a,b) ((a)>(b) ? (a) : (b)) /** * Ignore: this is an internal helper. * * This is a helper function to help force C99-correctness during compilation. * Under strict compilation modes, variadic macro arguments can't be empty. * However, variadic function arguments can be. Using a function therefore lets * us statically check that at least one (string) argument was passed, * independent of the compilation flags. */ static INLINE_KEYWORD UNUSED_ATTR void _force_has_format_string(const char *format, ...) { (void)format; } /** * Ignore: this is an internal helper. * * We want to force this function invocation to be syntactically correct, but * we don't want to force runtime evaluation of its arguments. */ #define _FORCE_HAS_FORMAT_STRING(...) \ if (0) { \ _force_has_format_string(__VA_ARGS__); \ } /** * Return the specified error if the condition evaluates to true. * * In debug modes, prints additional information. * In order to do that (particularly, printing the conditional that failed), * this can't just wrap RETURN_ERROR(). */ #define RETURN_ERROR_IF(cond, err, ...) \ if (cond) { \ RAWLOG(3, "%s:%d: ERROR!: check %s failed, returning %s", \ __FILE__, __LINE__, ZSTD_QUOTE(cond), ZSTD_QUOTE(ERROR(err))); \ _FORCE_HAS_FORMAT_STRING(__VA_ARGS__); \ RAWLOG(3, ": " __VA_ARGS__); \ RAWLOG(3, "\n"); \ return ERROR(err); \ } /** * Unconditionally return the specified error. * * In debug modes, prints additional information. */ #define RETURN_ERROR(err, ...) \ do { \ RAWLOG(3, "%s:%d: ERROR!: unconditional check failed, returning %s", \ __FILE__, __LINE__, ZSTD_QUOTE(ERROR(err))); \ _FORCE_HAS_FORMAT_STRING(__VA_ARGS__); \ RAWLOG(3, ": " __VA_ARGS__); \ RAWLOG(3, "\n"); \ return ERROR(err); \ } while(0); /** * If the provided expression evaluates to an error code, returns that error code. * * In debug modes, prints additional information. */ #define FORWARD_IF_ERROR(err, ...) \ do { \ size_t const err_code = (err); \ if (ERR_isError(err_code)) { \ RAWLOG(3, "%s:%d: ERROR!: forwarding error in %s: %s", \ __FILE__, __LINE__, ZSTD_QUOTE(err), ERR_getErrorName(err_code)); \ _FORCE_HAS_FORMAT_STRING(__VA_ARGS__); \ RAWLOG(3, ": " __VA_ARGS__); \ RAWLOG(3, "\n"); \ return err_code; \ } \ } while(0); /*-************************************* * Common constants ***************************************/ #define ZSTD_OPT_NUM (1<<12) #define ZSTD_REP_NUM 3 /* number of repcodes */ #define ZSTD_REP_MOVE (ZSTD_REP_NUM-1) static UNUSED_ATTR const U32 repStartValue[ZSTD_REP_NUM] = { 1, 4, 8 }; #define KB *(1 <<10) #define MB *(1 <<20) #define GB *(1U<<30) #define BIT7 128 #define BIT6 64 #define BIT5 32 #define BIT4 16 #define BIT1 2 #define BIT0 1 #define ZSTD_WINDOWLOG_ABSOLUTEMIN 10 static UNUSED_ATTR const size_t ZSTD_fcs_fieldSize[4] = { 0, 2, 4, 8 }; static UNUSED_ATTR const size_t ZSTD_did_fieldSize[4] = { 0, 1, 2, 4 }; #define ZSTD_FRAMEIDSIZE 4 /* magic number size */ #define ZSTD_BLOCKHEADERSIZE 3 /* C standard doesn't allow `static const` variable to be init using another `static const` variable */ static UNUSED_ATTR const size_t ZSTD_blockHeaderSize = ZSTD_BLOCKHEADERSIZE; typedef enum { bt_raw, bt_rle, bt_compressed, bt_reserved } blockType_e; #define ZSTD_FRAMECHECKSUMSIZE 4 #define MIN_SEQUENCES_SIZE 1 /* nbSeq==0 */ #define MIN_CBLOCK_SIZE (1 /*litCSize*/ + 1 /* RLE or RAW */ + MIN_SEQUENCES_SIZE /* nbSeq==0 */) /* for a non-null block */ #define HufLog 12 typedef enum { set_basic, set_rle, set_compressed, set_repeat } symbolEncodingType_e; #define LONGNBSEQ 0x7F00 #define MINMATCH 3 #define Litbits 8 #define MaxLit ((1<= 8 || (ovtype == ZSTD_no_overlap && diff <= -WILDCOPY_VECLEN)); if (ovtype == ZSTD_overlap_src_before_dst && diff < WILDCOPY_VECLEN) { /* Handle short offset copies. */ do { COPY8(op, ip) } while (op < oend); } else { assert(diff >= WILDCOPY_VECLEN || diff <= -WILDCOPY_VECLEN); /* Separate out the first COPY16() call because the copy length is * almost certain to be short, so the branches have different * probabilities. Since it is almost certain to be short, only do * one COPY16() in the first call. Then, do two calls per loop since * at that point it is more likely to have a high trip count. */ #ifdef __aarch64__ do { COPY16(op, ip); } while (op < oend); #else ZSTD_copy16(op, ip); if (16 >= length) return; op += 16; ip += 16; do { COPY16(op, ip); COPY16(op, ip); } while (op < oend); #endif } } MEM_STATIC size_t ZSTD_limitCopy(void* dst, size_t dstCapacity, const void* src, size_t srcSize) { size_t const length = MIN(dstCapacity, srcSize); if (length > 0) { ZSTD_memcpy(dst, src, length); } return length; } /* define "workspace is too large" as this number of times larger than needed */ #define ZSTD_WORKSPACETOOLARGE_FACTOR 3 /* when workspace is continuously too large * during at least this number of times, * context's memory usage is considered wasteful, * because it's sized to handle a worst case scenario which rarely happens. * In which case, resize it down to free some memory */ #define ZSTD_WORKSPACETOOLARGE_MAXDURATION 128 /* Controls whether the input/output buffer is buffered or stable. */ typedef enum { ZSTD_bm_buffered = 0, /* Buffer the input/output */ ZSTD_bm_stable = 1 /* ZSTD_inBuffer/ZSTD_outBuffer is stable */ } ZSTD_bufferMode_e; /*-******************************************* * Private declarations *********************************************/ typedef struct seqDef_s { U32 offset; /* Offset code of the sequence */ U16 litLength; U16 matchLength; } seqDef; typedef struct { seqDef* sequencesStart; seqDef* sequences; /* ptr to end of sequences */ BYTE* litStart; BYTE* lit; /* ptr to end of literals */ BYTE* llCode; BYTE* mlCode; BYTE* ofCode; size_t maxNbSeq; size_t maxNbLit; /* longLengthPos and longLengthID to allow us to represent either a single litLength or matchLength * in the seqStore that has a value larger than U16 (if it exists). To do so, we increment * the existing value of the litLength or matchLength by 0x10000. */ U32 longLengthID; /* 0 == no longLength; 1 == Represent the long literal; 2 == Represent the long match; */ U32 longLengthPos; /* Index of the sequence to apply long length modification to */ } seqStore_t; typedef struct { U32 litLength; U32 matchLength; } ZSTD_sequenceLength; /** * Returns the ZSTD_sequenceLength for the given sequences. It handles the decoding of long sequences * indicated by longLengthPos and longLengthID, and adds MINMATCH back to matchLength. */ MEM_STATIC ZSTD_sequenceLength ZSTD_getSequenceLength(seqStore_t const* seqStore, seqDef const* seq) { ZSTD_sequenceLength seqLen; seqLen.litLength = seq->litLength; seqLen.matchLength = seq->matchLength + MINMATCH; if (seqStore->longLengthPos == (U32)(seq - seqStore->sequencesStart)) { if (seqStore->longLengthID == 1) { seqLen.litLength += 0xFFFF; } if (seqStore->longLengthID == 2) { seqLen.matchLength += 0xFFFF; } } return seqLen; } /** * Contains the compressed frame size and an upper-bound for the decompressed frame size. * Note: before using `compressedSize`, check for errors using ZSTD_isError(). * similarly, before using `decompressedBound`, check for errors using: * `decompressedBound != ZSTD_CONTENTSIZE_ERROR` */ typedef struct { size_t compressedSize; unsigned long long decompressedBound; } ZSTD_frameSizeInfo; /* decompress & legacy */ const seqStore_t* ZSTD_getSeqStore(const ZSTD_CCtx* ctx); /* compress & dictBuilder */ void ZSTD_seqToCodes(const seqStore_t* seqStorePtr); /* compress, dictBuilder, decodeCorpus (shouldn't get its definition from here) */ /* custom memory allocation functions */ void* ZSTD_customMalloc(size_t size, ZSTD_customMem customMem); void* ZSTD_customCalloc(size_t size, ZSTD_customMem customMem); void ZSTD_customFree(void* ptr, ZSTD_customMem customMem); MEM_STATIC U32 ZSTD_highbit32(U32 val) /* compress, dictBuilder, decodeCorpus */ { assert(val != 0); { # if defined(_MSC_VER) /* Visual */ # if STATIC_BMI2 == 1 return _lzcnt_u32(val)^31; # else unsigned long r=0; return _BitScanReverse(&r, val) ? (unsigned)r : 0; # endif # elif defined(__GNUC__) && (__GNUC__ >= 3) /* GCC Intrinsic */ return __builtin_clz (val) ^ 31; # elif defined(__ICCARM__) /* IAR Intrinsic */ return 31 - __CLZ(val); # else /* Software version */ static const U32 DeBruijnClz[32] = { 0, 9, 1, 10, 13, 21, 2, 29, 11, 14, 16, 18, 22, 25, 3, 30, 8, 12, 20, 28, 15, 17, 24, 7, 19, 27, 23, 6, 26, 5, 4, 31 }; U32 v = val; v |= v >> 1; v |= v >> 2; v |= v >> 4; v |= v >> 8; v |= v >> 16; return DeBruijnClz[(v * 0x07C4ACDDU) >> 27]; # endif } } /* ZSTD_invalidateRepCodes() : * ensures next compression will not use repcodes from previous block. * Note : only works with regular variant; * do not use with extDict variant ! */ void ZSTD_invalidateRepCodes(ZSTD_CCtx* cctx); /* zstdmt, adaptive_compression (shouldn't get this definition from here) */ typedef struct { blockType_e blockType; U32 lastBlock; U32 origSize; } blockProperties_t; /* declared here for decompress and fullbench */ /*! ZSTD_getcBlockSize() : * Provides the size of compressed block from block header `src` */ /* Used by: decompress, fullbench (does not get its definition from here) */ size_t ZSTD_getcBlockSize(const void* src, size_t srcSize, blockProperties_t* bpPtr); /*! ZSTD_decodeSeqHeaders() : * decode sequence header from src */ /* Used by: decompress, fullbench (does not get its definition from here) */ size_t ZSTD_decodeSeqHeaders(ZSTD_DCtx* dctx, int* nbSeqPtr, const void* src, size_t srcSize); #if defined (__cplusplus) } #endif #endif /* ZSTD_CCOMMON_H_MODULE */ /**** ended inlining zstd_internal.h ****/ /**** start inlining pool.h ****/ /* * Copyright (c) 2016-2021, Yann Collet, Facebook, Inc. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the * LICENSE file in the root directory of this source tree) and the GPLv2 (found * in the COPYING file in the root directory of this source tree). * You may select, at your option, one of the above-listed licenses. */ #ifndef POOL_H #define POOL_H #if defined (__cplusplus) extern "C" { #endif /**** skipping file: zstd_deps.h ****/ #define ZSTD_STATIC_LINKING_ONLY /* ZSTD_customMem */ /**** skipping file: ../zstd.h ****/ typedef struct POOL_ctx_s POOL_ctx; /*! POOL_create() : * Create a thread pool with at most `numThreads` threads. * `numThreads` must be at least 1. * The maximum number of queued jobs before blocking is `queueSize`. * @return : POOL_ctx pointer on success, else NULL. */ POOL_ctx* POOL_create(size_t numThreads, size_t queueSize); POOL_ctx* POOL_create_advanced(size_t numThreads, size_t queueSize, ZSTD_customMem customMem); /*! POOL_free() : * Free a thread pool returned by POOL_create(). */ void POOL_free(POOL_ctx* ctx); /*! POOL_resize() : * Expands or shrinks pool's number of threads. * This is more efficient than releasing + creating a new context, * since it tries to preserve and re-use existing threads. * `numThreads` must be at least 1. * @return : 0 when resize was successful, * !0 (typically 1) if there is an error. * note : only numThreads can be resized, queueSize remains unchanged. */ int POOL_resize(POOL_ctx* ctx, size_t numThreads); /*! POOL_sizeof() : * @return threadpool memory usage * note : compatible with NULL (returns 0 in this case) */ size_t POOL_sizeof(POOL_ctx* ctx); /*! POOL_function : * The function type that can be added to a thread pool. */ typedef void (*POOL_function)(void*); /*! POOL_add() : * Add the job `function(opaque)` to the thread pool. `ctx` must be valid. * Possibly blocks until there is room in the queue. * Note : The function may be executed asynchronously, * therefore, `opaque` must live until function has been completed. */ void POOL_add(POOL_ctx* ctx, POOL_function function, void* opaque); /*! POOL_tryAdd() : * Add the job `function(opaque)` to thread pool _if_ a worker is available. * Returns immediately even if not (does not block). * @return : 1 if successful, 0 if not. */ int POOL_tryAdd(POOL_ctx* ctx, POOL_function function, void* opaque); #if defined (__cplusplus) } #endif #endif /**** ended inlining pool.h ****/ /* ====== Compiler specifics ====== */ #if defined(_MSC_VER) # pragma warning(disable : 4204) /* disable: C4204: non-constant aggregate initializer */ #endif #ifdef ZSTD_MULTITHREAD /**** skipping file: threading.h ****/ /* A job is a function and an opaque argument */ typedef struct POOL_job_s { POOL_function function; void *opaque; } POOL_job; struct POOL_ctx_s { ZSTD_customMem customMem; /* Keep track of the threads */ ZSTD_pthread_t* threads; size_t threadCapacity; size_t threadLimit; /* The queue is a circular buffer */ POOL_job *queue; size_t queueHead; size_t queueTail; size_t queueSize; /* The number of threads working on jobs */ size_t numThreadsBusy; /* Indicates if the queue is empty */ int queueEmpty; /* The mutex protects the queue */ ZSTD_pthread_mutex_t queueMutex; /* Condition variable for pushers to wait on when the queue is full */ ZSTD_pthread_cond_t queuePushCond; /* Condition variables for poppers to wait on when the queue is empty */ ZSTD_pthread_cond_t queuePopCond; /* Indicates if the queue is shutting down */ int shutdown; }; /* POOL_thread() : * Work thread for the thread pool. * Waits for jobs and executes them. * @returns : NULL on failure else non-null. */ static void* POOL_thread(void* opaque) { POOL_ctx* const ctx = (POOL_ctx*)opaque; if (!ctx) { return NULL; } for (;;) { /* Lock the mutex and wait for a non-empty queue or until shutdown */ ZSTD_pthread_mutex_lock(&ctx->queueMutex); while ( ctx->queueEmpty || (ctx->numThreadsBusy >= ctx->threadLimit) ) { if (ctx->shutdown) { /* even if !queueEmpty, (possible if numThreadsBusy >= threadLimit), * a few threads will be shutdown while !queueEmpty, * but enough threads will remain active to finish the queue */ ZSTD_pthread_mutex_unlock(&ctx->queueMutex); return opaque; } ZSTD_pthread_cond_wait(&ctx->queuePopCond, &ctx->queueMutex); } /* Pop a job off the queue */ { POOL_job const job = ctx->queue[ctx->queueHead]; ctx->queueHead = (ctx->queueHead + 1) % ctx->queueSize; ctx->numThreadsBusy++; ctx->queueEmpty = ctx->queueHead == ctx->queueTail; /* Unlock the mutex, signal a pusher, and run the job */ ZSTD_pthread_cond_signal(&ctx->queuePushCond); ZSTD_pthread_mutex_unlock(&ctx->queueMutex); job.function(job.opaque); /* If the intended queue size was 0, signal after finishing job */ ZSTD_pthread_mutex_lock(&ctx->queueMutex); ctx->numThreadsBusy--; if (ctx->queueSize == 1) { ZSTD_pthread_cond_signal(&ctx->queuePushCond); } ZSTD_pthread_mutex_unlock(&ctx->queueMutex); } } /* for (;;) */ assert(0); /* Unreachable */ } POOL_ctx* ZSTD_createThreadPool(size_t numThreads) { return POOL_create (numThreads, 0); } POOL_ctx* POOL_create(size_t numThreads, size_t queueSize) { return POOL_create_advanced(numThreads, queueSize, ZSTD_defaultCMem); } POOL_ctx* POOL_create_advanced(size_t numThreads, size_t queueSize, ZSTD_customMem customMem) { POOL_ctx* ctx; /* Check parameters */ if (!numThreads) { return NULL; } /* Allocate the context and zero initialize */ ctx = (POOL_ctx*)ZSTD_customCalloc(sizeof(POOL_ctx), customMem); if (!ctx) { return NULL; } /* Initialize the job queue. * It needs one extra space since one space is wasted to differentiate * empty and full queues. */ ctx->queueSize = queueSize + 1; ctx->queue = (POOL_job*)ZSTD_customMalloc(ctx->queueSize * sizeof(POOL_job), customMem); ctx->queueHead = 0; ctx->queueTail = 0; ctx->numThreadsBusy = 0; ctx->queueEmpty = 1; { int error = 0; error |= ZSTD_pthread_mutex_init(&ctx->queueMutex, NULL); error |= ZSTD_pthread_cond_init(&ctx->queuePushCond, NULL); error |= ZSTD_pthread_cond_init(&ctx->queuePopCond, NULL); if (error) { POOL_free(ctx); return NULL; } } ctx->shutdown = 0; /* Allocate space for the thread handles */ ctx->threads = (ZSTD_pthread_t*)ZSTD_customMalloc(numThreads * sizeof(ZSTD_pthread_t), customMem); ctx->threadCapacity = 0; ctx->customMem = customMem; /* Check for errors */ if (!ctx->threads || !ctx->queue) { POOL_free(ctx); return NULL; } /* Initialize the threads */ { size_t i; for (i = 0; i < numThreads; ++i) { if (ZSTD_pthread_create(&ctx->threads[i], NULL, &POOL_thread, ctx)) { ctx->threadCapacity = i; POOL_free(ctx); return NULL; } } ctx->threadCapacity = numThreads; ctx->threadLimit = numThreads; } return ctx; } /*! POOL_join() : Shutdown the queue, wake any sleeping threads, and join all of the threads. */ static void POOL_join(POOL_ctx* ctx) { /* Shut down the queue */ ZSTD_pthread_mutex_lock(&ctx->queueMutex); ctx->shutdown = 1; ZSTD_pthread_mutex_unlock(&ctx->queueMutex); /* Wake up sleeping threads */ ZSTD_pthread_cond_broadcast(&ctx->queuePushCond); ZSTD_pthread_cond_broadcast(&ctx->queuePopCond); /* Join all of the threads */ { size_t i; for (i = 0; i < ctx->threadCapacity; ++i) { ZSTD_pthread_join(ctx->threads[i], NULL); /* note : could fail */ } } } void POOL_free(POOL_ctx *ctx) { if (!ctx) { return; } POOL_join(ctx); ZSTD_pthread_mutex_destroy(&ctx->queueMutex); ZSTD_pthread_cond_destroy(&ctx->queuePushCond); ZSTD_pthread_cond_destroy(&ctx->queuePopCond); ZSTD_customFree(ctx->queue, ctx->customMem); ZSTD_customFree(ctx->threads, ctx->customMem); ZSTD_customFree(ctx, ctx->customMem); } void ZSTD_freeThreadPool (ZSTD_threadPool* pool) { POOL_free (pool); } size_t POOL_sizeof(POOL_ctx *ctx) { if (ctx==NULL) return 0; /* supports sizeof NULL */ return sizeof(*ctx) + ctx->queueSize * sizeof(POOL_job) + ctx->threadCapacity * sizeof(ZSTD_pthread_t); } /* @return : 0 on success, 1 on error */ static int POOL_resize_internal(POOL_ctx* ctx, size_t numThreads) { if (numThreads <= ctx->threadCapacity) { if (!numThreads) return 1; ctx->threadLimit = numThreads; return 0; } /* numThreads > threadCapacity */ { ZSTD_pthread_t* const threadPool = (ZSTD_pthread_t*)ZSTD_customMalloc(numThreads * sizeof(ZSTD_pthread_t), ctx->customMem); if (!threadPool) return 1; /* replace existing thread pool */ ZSTD_memcpy(threadPool, ctx->threads, ctx->threadCapacity * sizeof(*threadPool)); ZSTD_customFree(ctx->threads, ctx->customMem); ctx->threads = threadPool; /* Initialize additional threads */ { size_t threadId; for (threadId = ctx->threadCapacity; threadId < numThreads; ++threadId) { if (ZSTD_pthread_create(&threadPool[threadId], NULL, &POOL_thread, ctx)) { ctx->threadCapacity = threadId; return 1; } } } } /* successfully expanded */ ctx->threadCapacity = numThreads; ctx->threadLimit = numThreads; return 0; } /* @return : 0 on success, 1 on error */ int POOL_resize(POOL_ctx* ctx, size_t numThreads) { int result; if (ctx==NULL) return 1; ZSTD_pthread_mutex_lock(&ctx->queueMutex); result = POOL_resize_internal(ctx, numThreads); ZSTD_pthread_cond_broadcast(&ctx->queuePopCond); ZSTD_pthread_mutex_unlock(&ctx->queueMutex); return result; } /** * Returns 1 if the queue is full and 0 otherwise. * * When queueSize is 1 (pool was created with an intended queueSize of 0), * then a queue is empty if there is a thread free _and_ no job is waiting. */ static int isQueueFull(POOL_ctx const* ctx) { if (ctx->queueSize > 1) { return ctx->queueHead == ((ctx->queueTail + 1) % ctx->queueSize); } else { return (ctx->numThreadsBusy == ctx->threadLimit) || !ctx->queueEmpty; } } static void POOL_add_internal(POOL_ctx* ctx, POOL_function function, void *opaque) { POOL_job const job = {function, opaque}; assert(ctx != NULL); if (ctx->shutdown) return; ctx->queueEmpty = 0; ctx->queue[ctx->queueTail] = job; ctx->queueTail = (ctx->queueTail + 1) % ctx->queueSize; ZSTD_pthread_cond_signal(&ctx->queuePopCond); } void POOL_add(POOL_ctx* ctx, POOL_function function, void* opaque) { assert(ctx != NULL); ZSTD_pthread_mutex_lock(&ctx->queueMutex); /* Wait until there is space in the queue for the new job */ while (isQueueFull(ctx) && (!ctx->shutdown)) { ZSTD_pthread_cond_wait(&ctx->queuePushCond, &ctx->queueMutex); } POOL_add_internal(ctx, function, opaque); ZSTD_pthread_mutex_unlock(&ctx->queueMutex); } int POOL_tryAdd(POOL_ctx* ctx, POOL_function function, void* opaque) { assert(ctx != NULL); ZSTD_pthread_mutex_lock(&ctx->queueMutex); if (isQueueFull(ctx)) { ZSTD_pthread_mutex_unlock(&ctx->queueMutex); return 0; } POOL_add_internal(ctx, function, opaque); ZSTD_pthread_mutex_unlock(&ctx->queueMutex); return 1; } #else /* ZSTD_MULTITHREAD not defined */ /* ========================== */ /* No multi-threading support */ /* ========================== */ /* We don't need any data, but if it is empty, malloc() might return NULL. */ struct POOL_ctx_s { int dummy; }; static POOL_ctx g_poolCtx; POOL_ctx* POOL_create(size_t numThreads, size_t queueSize) { return POOL_create_advanced(numThreads, queueSize, ZSTD_defaultCMem); } POOL_ctx* POOL_create_advanced(size_t numThreads, size_t queueSize, ZSTD_customMem customMem) { (void)numThreads; (void)queueSize; (void)customMem; return &g_poolCtx; } void POOL_free(POOL_ctx* ctx) { assert(!ctx || ctx == &g_poolCtx); (void)ctx; } int POOL_resize(POOL_ctx* ctx, size_t numThreads) { (void)ctx; (void)numThreads; return 0; } void POOL_add(POOL_ctx* ctx, POOL_function function, void* opaque) { (void)ctx; function(opaque); } int POOL_tryAdd(POOL_ctx* ctx, POOL_function function, void* opaque) { (void)ctx; function(opaque); return 1; } size_t POOL_sizeof(POOL_ctx* ctx) { if (ctx==NULL) return 0; /* supports sizeof NULL */ assert(ctx == &g_poolCtx); return sizeof(*ctx); } #endif /* ZSTD_MULTITHREAD */ /**** ended inlining common/pool.c ****/ /**** start inlining common/zstd_common.c ****/ /* * Copyright (c) 2016-2021, Yann Collet, Facebook, Inc. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the * LICENSE file in the root directory of this source tree) and the GPLv2 (found * in the COPYING file in the root directory of this source tree). * You may select, at your option, one of the above-listed licenses. */ /*-************************************* * Dependencies ***************************************/ #define ZSTD_DEPS_NEED_MALLOC /**** skipping file: zstd_deps.h ****/ /**** skipping file: error_private.h ****/ /**** skipping file: zstd_internal.h ****/ /*-**************************************** * Version ******************************************/ unsigned ZSTD_versionNumber(void) { return ZSTD_VERSION_NUMBER; } const char* ZSTD_versionString(void) { return ZSTD_VERSION_STRING; } /*-**************************************** * ZSTD Error Management ******************************************/ #undef ZSTD_isError /* defined within zstd_internal.h */ /*! ZSTD_isError() : * tells if a return value is an error code * symbol is required for external callers */ unsigned ZSTD_isError(size_t code) { return ERR_isError(code); } /*! ZSTD_getErrorName() : * provides error code string from function result (useful for debugging) */ const char* ZSTD_getErrorName(size_t code) { return ERR_getErrorName(code); } /*! ZSTD_getError() : * convert a `size_t` function result into a proper ZSTD_errorCode enum */ ZSTD_ErrorCode ZSTD_getErrorCode(size_t code) { return ERR_getErrorCode(code); } /*! ZSTD_getErrorString() : * provides error code string from enum */ const char* ZSTD_getErrorString(ZSTD_ErrorCode code) { return ERR_getErrorString(code); } /*=************************************************************** * Custom allocator ****************************************************************/ void* ZSTD_customMalloc(size_t size, ZSTD_customMem customMem) { if (customMem.customAlloc) return customMem.customAlloc(customMem.opaque, size); return ZSTD_malloc(size); } void* ZSTD_customCalloc(size_t size, ZSTD_customMem customMem) { if (customMem.customAlloc) { /* calloc implemented as malloc+memset; * not as efficient as calloc, but next best guess for custom malloc */ void* const ptr = customMem.customAlloc(customMem.opaque, size); ZSTD_memset(ptr, 0, size); return ptr; } return ZSTD_calloc(1, size); } void ZSTD_customFree(void* ptr, ZSTD_customMem customMem) { if (ptr!=NULL) { if (customMem.customFree) customMem.customFree(customMem.opaque, ptr); else ZSTD_free(ptr); } } /**** ended inlining common/zstd_common.c ****/ /**** start inlining compress/fse_compress.c ****/ /* ****************************************************************** * FSE : Finite State Entropy encoder * Copyright (c) 2013-2021, Yann Collet, Facebook, Inc. * * You can contact the author at : * - FSE source repository : https://github.com/Cyan4973/FiniteStateEntropy * - Public forum : https://groups.google.com/forum/#!forum/lz4c * * This source code is licensed under both the BSD-style license (found in the * LICENSE file in the root directory of this source tree) and the GPLv2 (found * in the COPYING file in the root directory of this source tree). * You may select, at your option, one of the above-listed licenses. ****************************************************************** */ /* ************************************************************** * Includes ****************************************************************/ /**** skipping file: ../common/compiler.h ****/ /**** skipping file: ../common/mem.h ****/ /**** skipping file: ../common/debug.h ****/ /**** start inlining hist.h ****/ /* ****************************************************************** * hist : Histogram functions * part of Finite State Entropy project * Copyright (c) 2013-2021, Yann Collet, Facebook, Inc. * * You can contact the author at : * - FSE source repository : https://github.com/Cyan4973/FiniteStateEntropy * - Public forum : https://groups.google.com/forum/#!forum/lz4c * * This source code is licensed under both the BSD-style license (found in the * LICENSE file in the root directory of this source tree) and the GPLv2 (found * in the COPYING file in the root directory of this source tree). * You may select, at your option, one of the above-listed licenses. ****************************************************************** */ /* --- dependencies --- */ /**** skipping file: ../common/zstd_deps.h ****/ /* --- simple histogram functions --- */ /*! HIST_count(): * Provides the precise count of each byte within a table 'count'. * 'count' is a table of unsigned int, of minimum size (*maxSymbolValuePtr+1). * Updates *maxSymbolValuePtr with actual largest symbol value detected. * @return : count of the most frequent symbol (which isn't identified). * or an error code, which can be tested using HIST_isError(). * note : if return == srcSize, there is only one symbol. */ size_t HIST_count(unsigned* count, unsigned* maxSymbolValuePtr, const void* src, size_t srcSize); unsigned HIST_isError(size_t code); /**< tells if a return value is an error code */ /* --- advanced histogram functions --- */ #define HIST_WKSP_SIZE_U32 1024 #define HIST_WKSP_SIZE (HIST_WKSP_SIZE_U32 * sizeof(unsigned)) /** HIST_count_wksp() : * Same as HIST_count(), but using an externally provided scratch buffer. * Benefit is this function will use very little stack space. * `workSpace` is a writable buffer which must be 4-bytes aligned, * `workSpaceSize` must be >= HIST_WKSP_SIZE */ size_t HIST_count_wksp(unsigned* count, unsigned* maxSymbolValuePtr, const void* src, size_t srcSize, void* workSpace, size_t workSpaceSize); /** HIST_countFast() : * same as HIST_count(), but blindly trusts that all byte values within src are <= *maxSymbolValuePtr. * This function is unsafe, and will segfault if any value within `src` is `> *maxSymbolValuePtr` */ size_t HIST_countFast(unsigned* count, unsigned* maxSymbolValuePtr, const void* src, size_t srcSize); /** HIST_countFast_wksp() : * Same as HIST_countFast(), but using an externally provided scratch buffer. * `workSpace` is a writable buffer which must be 4-bytes aligned, * `workSpaceSize` must be >= HIST_WKSP_SIZE */ size_t HIST_countFast_wksp(unsigned* count, unsigned* maxSymbolValuePtr, const void* src, size_t srcSize, void* workSpace, size_t workSpaceSize); /*! HIST_count_simple() : * Same as HIST_countFast(), this function is unsafe, * and will segfault if any value within `src` is `> *maxSymbolValuePtr`. * It is also a bit slower for large inputs. * However, it does not need any additional memory (not even on stack). * @return : count of the most frequent symbol. * Note this function doesn't produce any error (i.e. it must succeed). */ unsigned HIST_count_simple(unsigned* count, unsigned* maxSymbolValuePtr, const void* src, size_t srcSize); /**** ended inlining hist.h ****/ /**** skipping file: ../common/bitstream.h ****/ #define FSE_STATIC_LINKING_ONLY /**** skipping file: ../common/fse.h ****/ /**** skipping file: ../common/error_private.h ****/ #define ZSTD_DEPS_NEED_MALLOC #define ZSTD_DEPS_NEED_MATH64 /**** skipping file: ../common/zstd_deps.h ****/ /* ************************************************************** * Error Management ****************************************************************/ #define FSE_isError ERR_isError /* ************************************************************** * Templates ****************************************************************/ /* designed to be included for type-specific functions (template emulation in C) Objective is to write these functions only once, for improved maintenance */ /* safety checks */ #ifndef FSE_FUNCTION_EXTENSION # error "FSE_FUNCTION_EXTENSION must be defined" #endif #ifndef FSE_FUNCTION_TYPE # error "FSE_FUNCTION_TYPE must be defined" #endif /* Function names */ #define FSE_CAT(X,Y) X##Y #define FSE_FUNCTION_NAME(X,Y) FSE_CAT(X,Y) #define FSE_TYPE_NAME(X,Y) FSE_CAT(X,Y) /* Function templates */ /* FSE_buildCTable_wksp() : * Same as FSE_buildCTable(), but using an externally allocated scratch buffer (`workSpace`). * wkspSize should be sized to handle worst case situation, which is `1<>1 : 1) ; FSE_symbolCompressionTransform* const symbolTT = (FSE_symbolCompressionTransform*) (FSCT); U32 const step = FSE_TABLESTEP(tableSize); U32* cumul = (U32*)workSpace; FSE_FUNCTION_TYPE* tableSymbol = (FSE_FUNCTION_TYPE*)(cumul + (maxSymbolValue + 2)); U32 highThreshold = tableSize-1; if ((size_t)workSpace & 3) return ERROR(GENERIC); /* Must be 4 byte aligned */ if (FSE_BUILD_CTABLE_WORKSPACE_SIZE(maxSymbolValue, tableLog) > wkspSize) return ERROR(tableLog_tooLarge); /* CTable header */ tableU16[-2] = (U16) tableLog; tableU16[-1] = (U16) maxSymbolValue; assert(tableLog < 16); /* required for threshold strategy to work */ /* For explanations on how to distribute symbol values over the table : * http://fastcompression.blogspot.fr/2014/02/fse-distributing-symbol-values.html */ #ifdef __clang_analyzer__ ZSTD_memset(tableSymbol, 0, sizeof(*tableSymbol) * tableSize); /* useless initialization, just to keep scan-build happy */ #endif /* symbol start positions */ { U32 u; cumul[0] = 0; for (u=1; u <= maxSymbolValue+1; u++) { if (normalizedCounter[u-1]==-1) { /* Low proba symbol */ cumul[u] = cumul[u-1] + 1; tableSymbol[highThreshold--] = (FSE_FUNCTION_TYPE)(u-1); } else { cumul[u] = cumul[u-1] + normalizedCounter[u-1]; } } cumul[maxSymbolValue+1] = tableSize+1; } /* Spread symbols */ { U32 position = 0; U32 symbol; for (symbol=0; symbol<=maxSymbolValue; symbol++) { int nbOccurrences; int const freq = normalizedCounter[symbol]; for (nbOccurrences=0; nbOccurrences highThreshold) position = (position + step) & tableMask; /* Low proba area */ } } assert(position==0); /* Must have initialized all positions */ } /* Build table */ { U32 u; for (u=0; u> 3) + 3; return maxSymbolValue ? maxHeaderSize : FSE_NCOUNTBOUND; /* maxSymbolValue==0 ? use default */ } static size_t FSE_writeNCount_generic (void* header, size_t headerBufferSize, const short* normalizedCounter, unsigned maxSymbolValue, unsigned tableLog, unsigned writeIsSafe) { BYTE* const ostart = (BYTE*) header; BYTE* out = ostart; BYTE* const oend = ostart + headerBufferSize; int nbBits; const int tableSize = 1 << tableLog; int remaining; int threshold; U32 bitStream = 0; int bitCount = 0; unsigned symbol = 0; unsigned const alphabetSize = maxSymbolValue + 1; int previousIs0 = 0; /* Table Size */ bitStream += (tableLog-FSE_MIN_TABLELOG) << bitCount; bitCount += 4; /* Init */ remaining = tableSize+1; /* +1 for extra accuracy */ threshold = tableSize; nbBits = tableLog+1; while ((symbol < alphabetSize) && (remaining>1)) { /* stops at 1 */ if (previousIs0) { unsigned start = symbol; while ((symbol < alphabetSize) && !normalizedCounter[symbol]) symbol++; if (symbol == alphabetSize) break; /* incorrect distribution */ while (symbol >= start+24) { start+=24; bitStream += 0xFFFFU << bitCount; if ((!writeIsSafe) && (out > oend-2)) return ERROR(dstSize_tooSmall); /* Buffer overflow */ out[0] = (BYTE) bitStream; out[1] = (BYTE)(bitStream>>8); out+=2; bitStream>>=16; } while (symbol >= start+3) { start+=3; bitStream += 3 << bitCount; bitCount += 2; } bitStream += (symbol-start) << bitCount; bitCount += 2; if (bitCount>16) { if ((!writeIsSafe) && (out > oend - 2)) return ERROR(dstSize_tooSmall); /* Buffer overflow */ out[0] = (BYTE)bitStream; out[1] = (BYTE)(bitStream>>8); out += 2; bitStream >>= 16; bitCount -= 16; } } { int count = normalizedCounter[symbol++]; int const max = (2*threshold-1) - remaining; remaining -= count < 0 ? -count : count; count++; /* +1 for extra accuracy */ if (count>=threshold) count += max; /* [0..max[ [max..threshold[ (...) [threshold+max 2*threshold[ */ bitStream += count << bitCount; bitCount += nbBits; bitCount -= (count>=1; } } if (bitCount>16) { if ((!writeIsSafe) && (out > oend - 2)) return ERROR(dstSize_tooSmall); /* Buffer overflow */ out[0] = (BYTE)bitStream; out[1] = (BYTE)(bitStream>>8); out += 2; bitStream >>= 16; bitCount -= 16; } } if (remaining != 1) return ERROR(GENERIC); /* incorrect normalized distribution */ assert(symbol <= alphabetSize); /* flush remaining bitStream */ if ((!writeIsSafe) && (out > oend - 2)) return ERROR(dstSize_tooSmall); /* Buffer overflow */ out[0] = (BYTE)bitStream; out[1] = (BYTE)(bitStream>>8); out+= (bitCount+7) /8; return (out-ostart); } size_t FSE_writeNCount (void* buffer, size_t bufferSize, const short* normalizedCounter, unsigned maxSymbolValue, unsigned tableLog) { if (tableLog > FSE_MAX_TABLELOG) return ERROR(tableLog_tooLarge); /* Unsupported */ if (tableLog < FSE_MIN_TABLELOG) return ERROR(GENERIC); /* Unsupported */ if (bufferSize < FSE_NCountWriteBound(maxSymbolValue, tableLog)) return FSE_writeNCount_generic(buffer, bufferSize, normalizedCounter, maxSymbolValue, tableLog, 0); return FSE_writeNCount_generic(buffer, bufferSize, normalizedCounter, maxSymbolValue, tableLog, 1 /* write in buffer is safe */); } /*-************************************************************** * FSE Compression Code ****************************************************************/ FSE_CTable* FSE_createCTable (unsigned maxSymbolValue, unsigned tableLog) { size_t size; if (tableLog > FSE_TABLELOG_ABSOLUTE_MAX) tableLog = FSE_TABLELOG_ABSOLUTE_MAX; size = FSE_CTABLE_SIZE_U32 (tableLog, maxSymbolValue) * sizeof(U32); return (FSE_CTable*)ZSTD_malloc(size); } void FSE_freeCTable (FSE_CTable* ct) { ZSTD_free(ct); } /* provides the minimum logSize to safely represent a distribution */ static unsigned FSE_minTableLog(size_t srcSize, unsigned maxSymbolValue) { U32 minBitsSrc = BIT_highbit32((U32)(srcSize)) + 1; U32 minBitsSymbols = BIT_highbit32(maxSymbolValue) + 2; U32 minBits = minBitsSrc < minBitsSymbols ? minBitsSrc : minBitsSymbols; assert(srcSize > 1); /* Not supported, RLE should be used instead */ return minBits; } unsigned FSE_optimalTableLog_internal(unsigned maxTableLog, size_t srcSize, unsigned maxSymbolValue, unsigned minus) { U32 maxBitsSrc = BIT_highbit32((U32)(srcSize - 1)) - minus; U32 tableLog = maxTableLog; U32 minBits = FSE_minTableLog(srcSize, maxSymbolValue); assert(srcSize > 1); /* Not supported, RLE should be used instead */ if (tableLog==0) tableLog = FSE_DEFAULT_TABLELOG; if (maxBitsSrc < tableLog) tableLog = maxBitsSrc; /* Accuracy can be reduced */ if (minBits > tableLog) tableLog = minBits; /* Need a minimum to safely represent all symbol values */ if (tableLog < FSE_MIN_TABLELOG) tableLog = FSE_MIN_TABLELOG; if (tableLog > FSE_MAX_TABLELOG) tableLog = FSE_MAX_TABLELOG; return tableLog; } unsigned FSE_optimalTableLog(unsigned maxTableLog, size_t srcSize, unsigned maxSymbolValue) { return FSE_optimalTableLog_internal(maxTableLog, srcSize, maxSymbolValue, 2); } /* Secondary normalization method. To be used when primary method fails. */ static size_t FSE_normalizeM2(short* norm, U32 tableLog, const unsigned* count, size_t total, U32 maxSymbolValue, short lowProbCount) { short const NOT_YET_ASSIGNED = -2; U32 s; U32 distributed = 0; U32 ToDistribute; /* Init */ U32 const lowThreshold = (U32)(total >> tableLog); U32 lowOne = (U32)((total * 3) >> (tableLog + 1)); for (s=0; s<=maxSymbolValue; s++) { if (count[s] == 0) { norm[s]=0; continue; } if (count[s] <= lowThreshold) { norm[s] = lowProbCount; distributed++; total -= count[s]; continue; } if (count[s] <= lowOne) { norm[s] = 1; distributed++; total -= count[s]; continue; } norm[s]=NOT_YET_ASSIGNED; } ToDistribute = (1 << tableLog) - distributed; if (ToDistribute == 0) return 0; if ((total / ToDistribute) > lowOne) { /* risk of rounding to zero */ lowOne = (U32)((total * 3) / (ToDistribute * 2)); for (s=0; s<=maxSymbolValue; s++) { if ((norm[s] == NOT_YET_ASSIGNED) && (count[s] <= lowOne)) { norm[s] = 1; distributed++; total -= count[s]; continue; } } ToDistribute = (1 << tableLog) - distributed; } if (distributed == maxSymbolValue+1) { /* all values are pretty poor; probably incompressible data (should have already been detected); find max, then give all remaining points to max */ U32 maxV = 0, maxC = 0; for (s=0; s<=maxSymbolValue; s++) if (count[s] > maxC) { maxV=s; maxC=count[s]; } norm[maxV] += (short)ToDistribute; return 0; } if (total == 0) { /* all of the symbols were low enough for the lowOne or lowThreshold */ for (s=0; ToDistribute > 0; s = (s+1)%(maxSymbolValue+1)) if (norm[s] > 0) { ToDistribute--; norm[s]++; } return 0; } { U64 const vStepLog = 62 - tableLog; U64 const mid = (1ULL << (vStepLog-1)) - 1; U64 const rStep = ZSTD_div64((((U64)1<> vStepLog); U32 const sEnd = (U32)(end >> vStepLog); U32 const weight = sEnd - sStart; if (weight < 1) return ERROR(GENERIC); norm[s] = (short)weight; tmpTotal = end; } } } return 0; } size_t FSE_normalizeCount (short* normalizedCounter, unsigned tableLog, const unsigned* count, size_t total, unsigned maxSymbolValue, unsigned useLowProbCount) { /* Sanity checks */ if (tableLog==0) tableLog = FSE_DEFAULT_TABLELOG; if (tableLog < FSE_MIN_TABLELOG) return ERROR(GENERIC); /* Unsupported size */ if (tableLog > FSE_MAX_TABLELOG) return ERROR(tableLog_tooLarge); /* Unsupported size */ if (tableLog < FSE_minTableLog(total, maxSymbolValue)) return ERROR(GENERIC); /* Too small tableLog, compression potentially impossible */ { static U32 const rtbTable[] = { 0, 473195, 504333, 520860, 550000, 700000, 750000, 830000 }; short const lowProbCount = useLowProbCount ? -1 : 1; U64 const scale = 62 - tableLog; U64 const step = ZSTD_div64((U64)1<<62, (U32)total); /* <== here, one division ! */ U64 const vStep = 1ULL<<(scale-20); int stillToDistribute = 1<> tableLog); for (s=0; s<=maxSymbolValue; s++) { if (count[s] == total) return 0; /* rle special case */ if (count[s] == 0) { normalizedCounter[s]=0; continue; } if (count[s] <= lowThreshold) { normalizedCounter[s] = lowProbCount; stillToDistribute--; } else { short proba = (short)((count[s]*step) >> scale); if (proba<8) { U64 restToBeat = vStep * rtbTable[proba]; proba += (count[s]*step) - ((U64)proba< restToBeat; } if (proba > largestP) { largestP=proba; largest=s; } normalizedCounter[s] = proba; stillToDistribute -= proba; } } if (-stillToDistribute >= (normalizedCounter[largest] >> 1)) { /* corner case, need another normalization method */ size_t const errorCode = FSE_normalizeM2(normalizedCounter, tableLog, count, total, maxSymbolValue, lowProbCount); if (FSE_isError(errorCode)) return errorCode; } else normalizedCounter[largest] += (short)stillToDistribute; } #if 0 { /* Print Table (debug) */ U32 s; U32 nTotal = 0; for (s=0; s<=maxSymbolValue; s++) RAWLOG(2, "%3i: %4i \n", s, normalizedCounter[s]); for (s=0; s<=maxSymbolValue; s++) nTotal += abs(normalizedCounter[s]); if (nTotal != (1U<>1); /* assumption : tableLog >= 1 */ FSE_symbolCompressionTransform* const symbolTT = (FSE_symbolCompressionTransform*) (FSCT); unsigned s; /* Sanity checks */ if (nbBits < 1) return ERROR(GENERIC); /* min size */ /* header */ tableU16[-2] = (U16) nbBits; tableU16[-1] = (U16) maxSymbolValue; /* Build table */ for (s=0; s FSE_MAX_TABLELOG*4+7 ) && (srcSize & 2)) { /* test bit 2 */ FSE_encodeSymbol(&bitC, &CState2, *--ip); FSE_encodeSymbol(&bitC, &CState1, *--ip); FSE_FLUSHBITS(&bitC); } /* 2 or 4 encoding per loop */ while ( ip>istart ) { FSE_encodeSymbol(&bitC, &CState2, *--ip); if (sizeof(bitC.bitContainer)*8 < FSE_MAX_TABLELOG*2+7 ) /* this test must be static */ FSE_FLUSHBITS(&bitC); FSE_encodeSymbol(&bitC, &CState1, *--ip); if (sizeof(bitC.bitContainer)*8 > FSE_MAX_TABLELOG*4+7 ) { /* this test must be static */ FSE_encodeSymbol(&bitC, &CState2, *--ip); FSE_encodeSymbol(&bitC, &CState1, *--ip); } FSE_FLUSHBITS(&bitC); } FSE_flushCState(&bitC, &CState2); FSE_flushCState(&bitC, &CState1); return BIT_closeCStream(&bitC); } size_t FSE_compress_usingCTable (void* dst, size_t dstSize, const void* src, size_t srcSize, const FSE_CTable* ct) { unsigned const fast = (dstSize >= FSE_BLOCKBOUND(srcSize)); if (fast) return FSE_compress_usingCTable_generic(dst, dstSize, src, srcSize, ct, 1); else return FSE_compress_usingCTable_generic(dst, dstSize, src, srcSize, ct, 0); } size_t FSE_compressBound(size_t size) { return FSE_COMPRESSBOUND(size); } #ifndef ZSTD_NO_UNUSED_FUNCTIONS /* FSE_compress_wksp() : * Same as FSE_compress2(), but using an externally allocated scratch buffer (`workSpace`). * `wkspSize` size must be `(1< not compressible */ if (maxCount < (srcSize >> 7)) return 0; /* Heuristic : not compressible enough */ } tableLog = FSE_optimalTableLog(tableLog, srcSize, maxSymbolValue); CHECK_F( FSE_normalizeCount(norm, tableLog, count, srcSize, maxSymbolValue, /* useLowProbCount */ srcSize >= 2048) ); /* Write table description header */ { CHECK_V_F(nc_err, FSE_writeNCount(op, oend-op, norm, maxSymbolValue, tableLog) ); op += nc_err; } /* Compress */ CHECK_F( FSE_buildCTable_wksp(CTable, norm, maxSymbolValue, tableLog, scratchBuffer, scratchBufferSize) ); { CHECK_V_F(cSize, FSE_compress_usingCTable(op, oend - op, src, srcSize, CTable) ); if (cSize == 0) return 0; /* not enough space for compressed data */ op += cSize; } /* check compressibility */ if ( (size_t)(op-ostart) >= srcSize-1 ) return 0; return op-ostart; } typedef struct { FSE_CTable CTable_max[FSE_CTABLE_SIZE_U32(FSE_MAX_TABLELOG, FSE_MAX_SYMBOL_VALUE)]; union { U32 hist_wksp[HIST_WKSP_SIZE_U32]; BYTE scratchBuffer[1 << FSE_MAX_TABLELOG]; } workspace; } fseWkspMax_t; size_t FSE_compress2 (void* dst, size_t dstCapacity, const void* src, size_t srcSize, unsigned maxSymbolValue, unsigned tableLog) { fseWkspMax_t scratchBuffer; DEBUG_STATIC_ASSERT(sizeof(scratchBuffer) >= FSE_COMPRESS_WKSP_SIZE_U32(FSE_MAX_TABLELOG, FSE_MAX_SYMBOL_VALUE)); /* compilation failures here means scratchBuffer is not large enough */ if (tableLog > FSE_MAX_TABLELOG) return ERROR(tableLog_tooLarge); return FSE_compress_wksp(dst, dstCapacity, src, srcSize, maxSymbolValue, tableLog, &scratchBuffer, sizeof(scratchBuffer)); } size_t FSE_compress (void* dst, size_t dstCapacity, const void* src, size_t srcSize) { return FSE_compress2(dst, dstCapacity, src, srcSize, FSE_MAX_SYMBOL_VALUE, FSE_DEFAULT_TABLELOG); } #endif #endif /* FSE_COMMONDEFS_ONLY */ /**** ended inlining compress/fse_compress.c ****/ /**** start inlining compress/hist.c ****/ /* ****************************************************************** * hist : Histogram functions * part of Finite State Entropy project * Copyright (c) 2013-2021, Yann Collet, Facebook, Inc. * * You can contact the author at : * - FSE source repository : https://github.com/Cyan4973/FiniteStateEntropy * - Public forum : https://groups.google.com/forum/#!forum/lz4c * * This source code is licensed under both the BSD-style license (found in the * LICENSE file in the root directory of this source tree) and the GPLv2 (found * in the COPYING file in the root directory of this source tree). * You may select, at your option, one of the above-listed licenses. ****************************************************************** */ /* --- dependencies --- */ /**** skipping file: ../common/mem.h ****/ /**** skipping file: ../common/debug.h ****/ /**** skipping file: ../common/error_private.h ****/ /**** skipping file: hist.h ****/ /* --- Error management --- */ unsigned HIST_isError(size_t code) { return ERR_isError(code); } /*-************************************************************** * Histogram functions ****************************************************************/ unsigned HIST_count_simple(unsigned* count, unsigned* maxSymbolValuePtr, const void* src, size_t srcSize) { const BYTE* ip = (const BYTE*)src; const BYTE* const end = ip + srcSize; unsigned maxSymbolValue = *maxSymbolValuePtr; unsigned largestCount=0; ZSTD_memset(count, 0, (maxSymbolValue+1) * sizeof(*count)); if (srcSize==0) { *maxSymbolValuePtr = 0; return 0; } while (ip largestCount) largestCount = count[s]; } return largestCount; } typedef enum { trustInput, checkMaxSymbolValue } HIST_checkInput_e; /* HIST_count_parallel_wksp() : * store histogram into 4 intermediate tables, recombined at the end. * this design makes better use of OoO cpus, * and is noticeably faster when some values are heavily repeated. * But it needs some additional workspace for intermediate tables. * `workSpace` must be a U32 table of size >= HIST_WKSP_SIZE_U32. * @return : largest histogram frequency, * or an error code (notably when histogram's alphabet is larger than *maxSymbolValuePtr) */ static size_t HIST_count_parallel_wksp( unsigned* count, unsigned* maxSymbolValuePtr, const void* source, size_t sourceSize, HIST_checkInput_e check, U32* const workSpace) { const BYTE* ip = (const BYTE*)source; const BYTE* const iend = ip+sourceSize; size_t const countSize = (*maxSymbolValuePtr + 1) * sizeof(*count); unsigned max=0; U32* const Counting1 = workSpace; U32* const Counting2 = Counting1 + 256; U32* const Counting3 = Counting2 + 256; U32* const Counting4 = Counting3 + 256; /* safety checks */ assert(*maxSymbolValuePtr <= 255); if (!sourceSize) { ZSTD_memset(count, 0, countSize); *maxSymbolValuePtr = 0; return 0; } ZSTD_memset(workSpace, 0, 4*256*sizeof(unsigned)); /* by stripes of 16 bytes */ { U32 cached = MEM_read32(ip); ip += 4; while (ip < iend-15) { U32 c = cached; cached = MEM_read32(ip); ip += 4; Counting1[(BYTE) c ]++; Counting2[(BYTE)(c>>8) ]++; Counting3[(BYTE)(c>>16)]++; Counting4[ c>>24 ]++; c = cached; cached = MEM_read32(ip); ip += 4; Counting1[(BYTE) c ]++; Counting2[(BYTE)(c>>8) ]++; Counting3[(BYTE)(c>>16)]++; Counting4[ c>>24 ]++; c = cached; cached = MEM_read32(ip); ip += 4; Counting1[(BYTE) c ]++; Counting2[(BYTE)(c>>8) ]++; Counting3[(BYTE)(c>>16)]++; Counting4[ c>>24 ]++; c = cached; cached = MEM_read32(ip); ip += 4; Counting1[(BYTE) c ]++; Counting2[(BYTE)(c>>8) ]++; Counting3[(BYTE)(c>>16)]++; Counting4[ c>>24 ]++; } ip-=4; } /* finish last symbols */ while (ip max) max = Counting1[s]; } } { unsigned maxSymbolValue = 255; while (!Counting1[maxSymbolValue]) maxSymbolValue--; if (check && maxSymbolValue > *maxSymbolValuePtr) return ERROR(maxSymbolValue_tooSmall); *maxSymbolValuePtr = maxSymbolValue; ZSTD_memmove(count, Counting1, countSize); /* in case count & Counting1 are overlapping */ } return (size_t)max; } /* HIST_countFast_wksp() : * Same as HIST_countFast(), but using an externally provided scratch buffer. * `workSpace` is a writable buffer which must be 4-bytes aligned, * `workSpaceSize` must be >= HIST_WKSP_SIZE */ size_t HIST_countFast_wksp(unsigned* count, unsigned* maxSymbolValuePtr, const void* source, size_t sourceSize, void* workSpace, size_t workSpaceSize) { if (sourceSize < 1500) /* heuristic threshold */ return HIST_count_simple(count, maxSymbolValuePtr, source, sourceSize); if ((size_t)workSpace & 3) return ERROR(GENERIC); /* must be aligned on 4-bytes boundaries */ if (workSpaceSize < HIST_WKSP_SIZE) return ERROR(workSpace_tooSmall); return HIST_count_parallel_wksp(count, maxSymbolValuePtr, source, sourceSize, trustInput, (U32*)workSpace); } /* HIST_count_wksp() : * Same as HIST_count(), but using an externally provided scratch buffer. * `workSpace` size must be table of >= HIST_WKSP_SIZE_U32 unsigned */ size_t HIST_count_wksp(unsigned* count, unsigned* maxSymbolValuePtr, const void* source, size_t sourceSize, void* workSpace, size_t workSpaceSize) { if ((size_t)workSpace & 3) return ERROR(GENERIC); /* must be aligned on 4-bytes boundaries */ if (workSpaceSize < HIST_WKSP_SIZE) return ERROR(workSpace_tooSmall); if (*maxSymbolValuePtr < 255) return HIST_count_parallel_wksp(count, maxSymbolValuePtr, source, sourceSize, checkMaxSymbolValue, (U32*)workSpace); *maxSymbolValuePtr = 255; return HIST_countFast_wksp(count, maxSymbolValuePtr, source, sourceSize, workSpace, workSpaceSize); } #ifndef ZSTD_NO_UNUSED_FUNCTIONS /* fast variant (unsafe : won't check if src contains values beyond count[] limit) */ size_t HIST_countFast(unsigned* count, unsigned* maxSymbolValuePtr, const void* source, size_t sourceSize) { unsigned tmpCounters[HIST_WKSP_SIZE_U32]; return HIST_countFast_wksp(count, maxSymbolValuePtr, source, sourceSize, tmpCounters, sizeof(tmpCounters)); } size_t HIST_count(unsigned* count, unsigned* maxSymbolValuePtr, const void* src, size_t srcSize) { unsigned tmpCounters[HIST_WKSP_SIZE_U32]; return HIST_count_wksp(count, maxSymbolValuePtr, src, srcSize, tmpCounters, sizeof(tmpCounters)); } #endif /**** ended inlining compress/hist.c ****/ /**** start inlining compress/huf_compress.c ****/ /* ****************************************************************** * Huffman encoder, part of New Generation Entropy library * Copyright (c) 2013-2021, Yann Collet, Facebook, Inc. * * You can contact the author at : * - FSE+HUF source repository : https://github.com/Cyan4973/FiniteStateEntropy * - Public forum : https://groups.google.com/forum/#!forum/lz4c * * This source code is licensed under both the BSD-style license (found in the * LICENSE file in the root directory of this source tree) and the GPLv2 (found * in the COPYING file in the root directory of this source tree). * You may select, at your option, one of the above-listed licenses. ****************************************************************** */ /* ************************************************************** * Compiler specifics ****************************************************************/ #ifdef _MSC_VER /* Visual Studio */ # pragma warning(disable : 4127) /* disable: C4127: conditional expression is constant */ #endif /* ************************************************************** * Includes ****************************************************************/ /**** skipping file: ../common/zstd_deps.h ****/ /**** skipping file: ../common/compiler.h ****/ /**** skipping file: ../common/bitstream.h ****/ /**** skipping file: hist.h ****/ #define FSE_STATIC_LINKING_ONLY /* FSE_optimalTableLog_internal */ /**** skipping file: ../common/fse.h ****/ #define HUF_STATIC_LINKING_ONLY /**** skipping file: ../common/huf.h ****/ /**** skipping file: ../common/error_private.h ****/ /* ************************************************************** * Error Management ****************************************************************/ #define HUF_isError ERR_isError #define HUF_STATIC_ASSERT(c) DEBUG_STATIC_ASSERT(c) /* use only *after* variable declarations */ /* ************************************************************** * Utils ****************************************************************/ unsigned HUF_optimalTableLog(unsigned maxTableLog, size_t srcSize, unsigned maxSymbolValue) { return FSE_optimalTableLog_internal(maxTableLog, srcSize, maxSymbolValue, 1); } /* ******************************************************* * HUF : Huffman block compression *********************************************************/ /* HUF_compressWeights() : * Same as FSE_compress(), but dedicated to huff0's weights compression. * The use case needs much less stack memory. * Note : all elements within weightTable are supposed to be <= HUF_TABLELOG_MAX. */ #define MAX_FSE_TABLELOG_FOR_HUFF_HEADER 6 static size_t HUF_compressWeights (void* dst, size_t dstSize, const void* weightTable, size_t wtSize) { BYTE* const ostart = (BYTE*) dst; BYTE* op = ostart; BYTE* const oend = ostart + dstSize; unsigned maxSymbolValue = HUF_TABLELOG_MAX; U32 tableLog = MAX_FSE_TABLELOG_FOR_HUFF_HEADER; FSE_CTable CTable[FSE_CTABLE_SIZE_U32(MAX_FSE_TABLELOG_FOR_HUFF_HEADER, HUF_TABLELOG_MAX)]; U32 scratchBuffer[FSE_BUILD_CTABLE_WORKSPACE_SIZE_U32(HUF_TABLELOG_MAX, MAX_FSE_TABLELOG_FOR_HUFF_HEADER)]; unsigned count[HUF_TABLELOG_MAX+1]; S16 norm[HUF_TABLELOG_MAX+1]; /* init conditions */ if (wtSize <= 1) return 0; /* Not compressible */ /* Scan input and build symbol stats */ { unsigned const maxCount = HIST_count_simple(count, &maxSymbolValue, weightTable, wtSize); /* never fails */ if (maxCount == wtSize) return 1; /* only a single symbol in src : rle */ if (maxCount == 1) return 0; /* each symbol present maximum once => not compressible */ } tableLog = FSE_optimalTableLog(tableLog, wtSize, maxSymbolValue); CHECK_F( FSE_normalizeCount(norm, tableLog, count, wtSize, maxSymbolValue, /* useLowProbCount */ 0) ); /* Write table description header */ { CHECK_V_F(hSize, FSE_writeNCount(op, (size_t)(oend-op), norm, maxSymbolValue, tableLog) ); op += hSize; } /* Compress */ CHECK_F( FSE_buildCTable_wksp(CTable, norm, maxSymbolValue, tableLog, scratchBuffer, sizeof(scratchBuffer)) ); { CHECK_V_F(cSize, FSE_compress_usingCTable(op, (size_t)(oend - op), weightTable, wtSize, CTable) ); if (cSize == 0) return 0; /* not enough space for compressed data */ op += cSize; } return (size_t)(op-ostart); } /*! HUF_writeCTable() : `CTable` : Huffman tree to save, using huf representation. @return : size of saved CTable */ size_t HUF_writeCTable (void* dst, size_t maxDstSize, const HUF_CElt* CTable, unsigned maxSymbolValue, unsigned huffLog) { BYTE bitsToWeight[HUF_TABLELOG_MAX + 1]; /* precomputed conversion table */ BYTE huffWeight[HUF_SYMBOLVALUE_MAX]; BYTE* op = (BYTE*)dst; U32 n; /* check conditions */ if (maxSymbolValue > HUF_SYMBOLVALUE_MAX) return ERROR(maxSymbolValue_tooLarge); /* convert to weight */ bitsToWeight[0] = 0; for (n=1; n1) & (hSize < maxSymbolValue/2)) { /* FSE compressed */ op[0] = (BYTE)hSize; return hSize+1; } } /* write raw values as 4-bits (max : 15) */ if (maxSymbolValue > (256-128)) return ERROR(GENERIC); /* should not happen : likely means source cannot be compressed */ if (((maxSymbolValue+1)/2) + 1 > maxDstSize) return ERROR(dstSize_tooSmall); /* not enough space within dst buffer */ op[0] = (BYTE)(128 /*special case*/ + (maxSymbolValue-1)); huffWeight[maxSymbolValue] = 0; /* to be sure it doesn't cause msan issue in final combination */ for (n=0; n 0); /* check result */ if (tableLog > HUF_TABLELOG_MAX) return ERROR(tableLog_tooLarge); if (nbSymbols > *maxSymbolValuePtr+1) return ERROR(maxSymbolValue_tooSmall); /* Prepare base value per rank */ { U32 n, nextRankStart = 0; for (n=1; n<=tableLog; n++) { U32 curr = nextRankStart; nextRankStart += (rankVal[n] << (n-1)); rankVal[n] = curr; } } /* fill nbBits */ { U32 n; for (n=0; nn=tableLog+1 */ U16 valPerRank[HUF_TABLELOG_MAX+2] = {0}; { U32 n; for (n=0; n0; n--) { /* start at n=tablelog <-> w=1 */ valPerRank[n] = min; /* get starting value within each rank */ min += nbPerRank[n]; min >>= 1; } } /* assign value within rank, symbol order */ { U32 n; for (n=0; n maxNbBits to be maxNbBits. Then it adjusts * the tree to so that it is a valid canonical Huffman tree. * * @pre The sum of the ranks of each symbol == 2^largestBits, * where largestBits == huffNode[lastNonNull].nbBits. * @post The sum of the ranks of each symbol == 2^largestBits, * where largestBits is the return value <= maxNbBits. * * @param huffNode The Huffman tree modified in place to enforce maxNbBits. * @param lastNonNull The symbol with the lowest count in the Huffman tree. * @param maxNbBits The maximum allowed number of bits, which the Huffman tree * may not respect. After this function the Huffman tree will * respect maxNbBits. * @return The maximum number of bits of the Huffman tree after adjustment, * necessarily no more than maxNbBits. */ static U32 HUF_setMaxHeight(nodeElt* huffNode, U32 lastNonNull, U32 maxNbBits) { const U32 largestBits = huffNode[lastNonNull].nbBits; /* early exit : no elt > maxNbBits, so the tree is already valid. */ if (largestBits <= maxNbBits) return largestBits; /* there are several too large elements (at least >= 2) */ { int totalCost = 0; const U32 baseCost = 1 << (largestBits - maxNbBits); int n = (int)lastNonNull; /* Adjust any ranks > maxNbBits to maxNbBits. * Compute totalCost, which is how far the sum of the ranks is * we are over 2^largestBits after adjust the offending ranks. */ while (huffNode[n].nbBits > maxNbBits) { totalCost += baseCost - (1 << (largestBits - huffNode[n].nbBits)); huffNode[n].nbBits = (BYTE)maxNbBits; n--; } /* n stops at huffNode[n].nbBits <= maxNbBits */ assert(huffNode[n].nbBits <= maxNbBits); /* n end at index of smallest symbol using < maxNbBits */ while (huffNode[n].nbBits == maxNbBits) --n; /* renorm totalCost from 2^largestBits to 2^maxNbBits * note : totalCost is necessarily a multiple of baseCost */ assert((totalCost & (baseCost - 1)) == 0); totalCost >>= (largestBits - maxNbBits); assert(totalCost > 0); /* repay normalized cost */ { U32 const noSymbol = 0xF0F0F0F0; U32 rankLast[HUF_TABLELOG_MAX+2]; /* Get pos of last (smallest = lowest cum. count) symbol per rank */ ZSTD_memset(rankLast, 0xF0, sizeof(rankLast)); { U32 currentNbBits = maxNbBits; int pos; for (pos=n ; pos >= 0; pos--) { if (huffNode[pos].nbBits >= currentNbBits) continue; currentNbBits = huffNode[pos].nbBits; /* < maxNbBits */ rankLast[maxNbBits-currentNbBits] = (U32)pos; } } while (totalCost > 0) { /* Try to reduce the next power of 2 above totalCost because we * gain back half the rank. */ U32 nBitsToDecrease = BIT_highbit32((U32)totalCost) + 1; for ( ; nBitsToDecrease > 1; nBitsToDecrease--) { U32 const highPos = rankLast[nBitsToDecrease]; U32 const lowPos = rankLast[nBitsToDecrease-1]; if (highPos == noSymbol) continue; /* Decrease highPos if no symbols of lowPos or if it is * not cheaper to remove 2 lowPos than highPos. */ if (lowPos == noSymbol) break; { U32 const highTotal = huffNode[highPos].count; U32 const lowTotal = 2 * huffNode[lowPos].count; if (highTotal <= lowTotal) break; } } /* only triggered when no more rank 1 symbol left => find closest one (note : there is necessarily at least one !) */ assert(rankLast[nBitsToDecrease] != noSymbol || nBitsToDecrease == 1); /* HUF_MAX_TABLELOG test just to please gcc 5+; but it should not be necessary */ while ((nBitsToDecrease<=HUF_TABLELOG_MAX) && (rankLast[nBitsToDecrease] == noSymbol)) nBitsToDecrease++; assert(rankLast[nBitsToDecrease] != noSymbol); /* Increase the number of bits to gain back half the rank cost. */ totalCost -= 1 << (nBitsToDecrease-1); huffNode[rankLast[nBitsToDecrease]].nbBits++; /* Fix up the new rank. * If the new rank was empty, this symbol is now its smallest. * Otherwise, this symbol will be the largest in the new rank so no adjustment. */ if (rankLast[nBitsToDecrease-1] == noSymbol) rankLast[nBitsToDecrease-1] = rankLast[nBitsToDecrease]; /* Fix up the old rank. * If the symbol was at position 0, meaning it was the highest weight symbol in the tree, * it must be the only symbol in its rank, so the old rank now has no symbols. * Otherwise, since the Huffman nodes are sorted by count, the previous position is now * the smallest node in the rank. If the previous position belongs to a different rank, * then the rank is now empty. */ if (rankLast[nBitsToDecrease] == 0) /* special case, reached largest symbol */ rankLast[nBitsToDecrease] = noSymbol; else { rankLast[nBitsToDecrease]--; if (huffNode[rankLast[nBitsToDecrease]].nbBits != maxNbBits-nBitsToDecrease) rankLast[nBitsToDecrease] = noSymbol; /* this rank is now empty */ } } /* while (totalCost > 0) */ /* If we've removed too much weight, then we have to add it back. * To avoid overshooting again, we only adjust the smallest rank. * We take the largest nodes from the lowest rank 0 and move them * to rank 1. There's guaranteed to be enough rank 0 symbols because * TODO. */ while (totalCost < 0) { /* Sometimes, cost correction overshoot */ /* special case : no rank 1 symbol (using maxNbBits-1); * let's create one from largest rank 0 (using maxNbBits). */ if (rankLast[1] == noSymbol) { while (huffNode[n].nbBits == maxNbBits) n--; huffNode[n+1].nbBits--; assert(n >= 0); rankLast[1] = (U32)(n+1); totalCost++; continue; } huffNode[ rankLast[1] + 1 ].nbBits--; rankLast[1]++; totalCost ++; } } /* repay normalized cost */ } /* there are several too large elements (at least >= 2) */ return maxNbBits; } typedef struct { U32 base; U32 curr; } rankPos; typedef nodeElt huffNodeTable[HUF_CTABLE_WORKSPACE_SIZE_U32]; #define RANK_POSITION_TABLE_SIZE 32 typedef struct { huffNodeTable huffNodeTbl; rankPos rankPosition[RANK_POSITION_TABLE_SIZE]; } HUF_buildCTable_wksp_tables; /** * HUF_sort(): * Sorts the symbols [0, maxSymbolValue] by count[symbol] in decreasing order. * * @param[out] huffNode Sorted symbols by decreasing count. Only members `.count` and `.byte` are filled. * Must have (maxSymbolValue + 1) entries. * @param[in] count Histogram of the symbols. * @param[in] maxSymbolValue Maximum symbol value. * @param rankPosition This is a scratch workspace. Must have RANK_POSITION_TABLE_SIZE entries. */ static void HUF_sort(nodeElt* huffNode, const unsigned* count, U32 maxSymbolValue, rankPos* rankPosition) { int n; int const maxSymbolValue1 = (int)maxSymbolValue + 1; /* Compute base and set curr to base. * For symbol s let lowerRank = BIT_highbit32(count[n]+1) and rank = lowerRank + 1. * Then 2^lowerRank <= count[n]+1 <= 2^rank. * We attribute each symbol to lowerRank's base value, because we want to know where * each rank begins in the output, so for rank R we want to count ranks R+1 and above. */ ZSTD_memset(rankPosition, 0, sizeof(*rankPosition) * RANK_POSITION_TABLE_SIZE); for (n = 0; n < maxSymbolValue1; ++n) { U32 lowerRank = BIT_highbit32(count[n] + 1); rankPosition[lowerRank].base++; } assert(rankPosition[RANK_POSITION_TABLE_SIZE - 1].base == 0); for (n = RANK_POSITION_TABLE_SIZE - 1; n > 0; --n) { rankPosition[n-1].base += rankPosition[n].base; rankPosition[n-1].curr = rankPosition[n-1].base; } /* Sort */ for (n = 0; n < maxSymbolValue1; ++n) { U32 const c = count[n]; U32 const r = BIT_highbit32(c+1) + 1; U32 pos = rankPosition[r].curr++; /* Insert into the correct position in the rank. * We have at most 256 symbols, so this insertion should be fine. */ while ((pos > rankPosition[r].base) && (c > huffNode[pos-1].count)) { huffNode[pos] = huffNode[pos-1]; pos--; } huffNode[pos].count = c; huffNode[pos].byte = (BYTE)n; } } /** HUF_buildCTable_wksp() : * Same as HUF_buildCTable(), but using externally allocated scratch buffer. * `workSpace` must be aligned on 4-bytes boundaries, and be at least as large as sizeof(HUF_buildCTable_wksp_tables). */ #define STARTNODE (HUF_SYMBOLVALUE_MAX+1) /* HUF_buildTree(): * Takes the huffNode array sorted by HUF_sort() and builds an unlimited-depth Huffman tree. * * @param huffNode The array sorted by HUF_sort(). Builds the Huffman tree in this array. * @param maxSymbolValue The maximum symbol value. * @return The smallest node in the Huffman tree (by count). */ static int HUF_buildTree(nodeElt* huffNode, U32 maxSymbolValue) { nodeElt* const huffNode0 = huffNode - 1; int nonNullRank; int lowS, lowN; int nodeNb = STARTNODE; int n, nodeRoot; /* init for parents */ nonNullRank = (int)maxSymbolValue; while(huffNode[nonNullRank].count == 0) nonNullRank--; lowS = nonNullRank; nodeRoot = nodeNb + lowS - 1; lowN = nodeNb; huffNode[nodeNb].count = huffNode[lowS].count + huffNode[lowS-1].count; huffNode[lowS].parent = huffNode[lowS-1].parent = (U16)nodeNb; nodeNb++; lowS-=2; for (n=nodeNb; n<=nodeRoot; n++) huffNode[n].count = (U32)(1U<<30); huffNode0[0].count = (U32)(1U<<31); /* fake entry, strong barrier */ /* create parents */ while (nodeNb <= nodeRoot) { int const n1 = (huffNode[lowS].count < huffNode[lowN].count) ? lowS-- : lowN++; int const n2 = (huffNode[lowS].count < huffNode[lowN].count) ? lowS-- : lowN++; huffNode[nodeNb].count = huffNode[n1].count + huffNode[n2].count; huffNode[n1].parent = huffNode[n2].parent = (U16)nodeNb; nodeNb++; } /* distribute weights (unlimited tree height) */ huffNode[nodeRoot].nbBits = 0; for (n=nodeRoot-1; n>=STARTNODE; n--) huffNode[n].nbBits = huffNode[ huffNode[n].parent ].nbBits + 1; for (n=0; n<=nonNullRank; n++) huffNode[n].nbBits = huffNode[ huffNode[n].parent ].nbBits + 1; return nonNullRank; } /** * HUF_buildCTableFromTree(): * Build the CTable given the Huffman tree in huffNode. * * @param[out] CTable The output Huffman CTable. * @param huffNode The Huffman tree. * @param nonNullRank The last and smallest node in the Huffman tree. * @param maxSymbolValue The maximum symbol value. * @param maxNbBits The exact maximum number of bits used in the Huffman tree. */ static void HUF_buildCTableFromTree(HUF_CElt* CTable, nodeElt const* huffNode, int nonNullRank, U32 maxSymbolValue, U32 maxNbBits) { /* fill result into ctable (val, nbBits) */ int n; U16 nbPerRank[HUF_TABLELOG_MAX+1] = {0}; U16 valPerRank[HUF_TABLELOG_MAX+1] = {0}; int const alphabetSize = (int)(maxSymbolValue + 1); for (n=0; n<=nonNullRank; n++) nbPerRank[huffNode[n].nbBits]++; /* determine starting value per rank */ { U16 min = 0; for (n=(int)maxNbBits; n>0; n--) { valPerRank[n] = min; /* get starting value within each rank */ min += nbPerRank[n]; min >>= 1; } } for (n=0; nhuffNodeTbl; nodeElt* const huffNode = huffNode0+1; int nonNullRank; /* safety checks */ if (((size_t)workSpace & 3) != 0) return ERROR(GENERIC); /* must be aligned on 4-bytes boundaries */ if (wkspSize < sizeof(HUF_buildCTable_wksp_tables)) return ERROR(workSpace_tooSmall); if (maxNbBits == 0) maxNbBits = HUF_TABLELOG_DEFAULT; if (maxSymbolValue > HUF_SYMBOLVALUE_MAX) return ERROR(maxSymbolValue_tooLarge); ZSTD_memset(huffNode0, 0, sizeof(huffNodeTable)); /* sort, decreasing order */ HUF_sort(huffNode, count, maxSymbolValue, wksp_tables->rankPosition); /* build tree */ nonNullRank = HUF_buildTree(huffNode, maxSymbolValue); /* enforce maxTableLog */ maxNbBits = HUF_setMaxHeight(huffNode, (U32)nonNullRank, maxNbBits); if (maxNbBits > HUF_TABLELOG_MAX) return ERROR(GENERIC); /* check fit into table */ HUF_buildCTableFromTree(tree, huffNode, nonNullRank, maxSymbolValue, maxNbBits); return maxNbBits; } size_t HUF_estimateCompressedSize(const HUF_CElt* CTable, const unsigned* count, unsigned maxSymbolValue) { size_t nbBits = 0; int s; for (s = 0; s <= (int)maxSymbolValue; ++s) { nbBits += CTable[s].nbBits * count[s]; } return nbBits >> 3; } int HUF_validateCTable(const HUF_CElt* CTable, const unsigned* count, unsigned maxSymbolValue) { int bad = 0; int s; for (s = 0; s <= (int)maxSymbolValue; ++s) { bad |= (count[s] != 0) & (CTable[s].nbBits == 0); } return !bad; } size_t HUF_compressBound(size_t size) { return HUF_COMPRESSBOUND(size); } FORCE_INLINE_TEMPLATE void HUF_encodeSymbol(BIT_CStream_t* bitCPtr, U32 symbol, const HUF_CElt* CTable) { BIT_addBitsFast(bitCPtr, CTable[symbol].val, CTable[symbol].nbBits); } #define HUF_FLUSHBITS(s) BIT_flushBits(s) #define HUF_FLUSHBITS_1(stream) \ if (sizeof((stream)->bitContainer)*8 < HUF_TABLELOG_MAX*2+7) HUF_FLUSHBITS(stream) #define HUF_FLUSHBITS_2(stream) \ if (sizeof((stream)->bitContainer)*8 < HUF_TABLELOG_MAX*4+7) HUF_FLUSHBITS(stream) FORCE_INLINE_TEMPLATE size_t HUF_compress1X_usingCTable_internal_body(void* dst, size_t dstSize, const void* src, size_t srcSize, const HUF_CElt* CTable) { const BYTE* ip = (const BYTE*) src; BYTE* const ostart = (BYTE*)dst; BYTE* const oend = ostart + dstSize; BYTE* op = ostart; size_t n; BIT_CStream_t bitC; /* init */ if (dstSize < 8) return 0; /* not enough space to compress */ { size_t const initErr = BIT_initCStream(&bitC, op, (size_t)(oend-op)); if (HUF_isError(initErr)) return 0; } n = srcSize & ~3; /* join to mod 4 */ switch (srcSize & 3) { case 3 : HUF_encodeSymbol(&bitC, ip[n+ 2], CTable); HUF_FLUSHBITS_2(&bitC); /* fall-through */ case 2 : HUF_encodeSymbol(&bitC, ip[n+ 1], CTable); HUF_FLUSHBITS_1(&bitC); /* fall-through */ case 1 : HUF_encodeSymbol(&bitC, ip[n+ 0], CTable); HUF_FLUSHBITS(&bitC); /* fall-through */ case 0 : /* fall-through */ default: break; } for (; n>0; n-=4) { /* note : n&3==0 at this stage */ HUF_encodeSymbol(&bitC, ip[n- 1], CTable); HUF_FLUSHBITS_1(&bitC); HUF_encodeSymbol(&bitC, ip[n- 2], CTable); HUF_FLUSHBITS_2(&bitC); HUF_encodeSymbol(&bitC, ip[n- 3], CTable); HUF_FLUSHBITS_1(&bitC); HUF_encodeSymbol(&bitC, ip[n- 4], CTable); HUF_FLUSHBITS(&bitC); } return BIT_closeCStream(&bitC); } #if DYNAMIC_BMI2 static TARGET_ATTRIBUTE("bmi2") size_t HUF_compress1X_usingCTable_internal_bmi2(void* dst, size_t dstSize, const void* src, size_t srcSize, const HUF_CElt* CTable) { return HUF_compress1X_usingCTable_internal_body(dst, dstSize, src, srcSize, CTable); } static size_t HUF_compress1X_usingCTable_internal_default(void* dst, size_t dstSize, const void* src, size_t srcSize, const HUF_CElt* CTable) { return HUF_compress1X_usingCTable_internal_body(dst, dstSize, src, srcSize, CTable); } static size_t HUF_compress1X_usingCTable_internal(void* dst, size_t dstSize, const void* src, size_t srcSize, const HUF_CElt* CTable, const int bmi2) { if (bmi2) { return HUF_compress1X_usingCTable_internal_bmi2(dst, dstSize, src, srcSize, CTable); } return HUF_compress1X_usingCTable_internal_default(dst, dstSize, src, srcSize, CTable); } #else static size_t HUF_compress1X_usingCTable_internal(void* dst, size_t dstSize, const void* src, size_t srcSize, const HUF_CElt* CTable, const int bmi2) { (void)bmi2; return HUF_compress1X_usingCTable_internal_body(dst, dstSize, src, srcSize, CTable); } #endif size_t HUF_compress1X_usingCTable(void* dst, size_t dstSize, const void* src, size_t srcSize, const HUF_CElt* CTable) { return HUF_compress1X_usingCTable_internal(dst, dstSize, src, srcSize, CTable, /* bmi2 */ 0); } static size_t HUF_compress4X_usingCTable_internal(void* dst, size_t dstSize, const void* src, size_t srcSize, const HUF_CElt* CTable, int bmi2) { size_t const segmentSize = (srcSize+3)/4; /* first 3 segments */ const BYTE* ip = (const BYTE*) src; const BYTE* const iend = ip + srcSize; BYTE* const ostart = (BYTE*) dst; BYTE* const oend = ostart + dstSize; BYTE* op = ostart; if (dstSize < 6 + 1 + 1 + 1 + 8) return 0; /* minimum space to compress successfully */ if (srcSize < 12) return 0; /* no saving possible : too small input */ op += 6; /* jumpTable */ assert(op <= oend); { CHECK_V_F(cSize, HUF_compress1X_usingCTable_internal(op, (size_t)(oend-op), ip, segmentSize, CTable, bmi2) ); if (cSize==0) return 0; assert(cSize <= 65535); MEM_writeLE16(ostart, (U16)cSize); op += cSize; } ip += segmentSize; assert(op <= oend); { CHECK_V_F(cSize, HUF_compress1X_usingCTable_internal(op, (size_t)(oend-op), ip, segmentSize, CTable, bmi2) ); if (cSize==0) return 0; assert(cSize <= 65535); MEM_writeLE16(ostart+2, (U16)cSize); op += cSize; } ip += segmentSize; assert(op <= oend); { CHECK_V_F(cSize, HUF_compress1X_usingCTable_internal(op, (size_t)(oend-op), ip, segmentSize, CTable, bmi2) ); if (cSize==0) return 0; assert(cSize <= 65535); MEM_writeLE16(ostart+4, (U16)cSize); op += cSize; } ip += segmentSize; assert(op <= oend); assert(ip <= iend); { CHECK_V_F(cSize, HUF_compress1X_usingCTable_internal(op, (size_t)(oend-op), ip, (size_t)(iend-ip), CTable, bmi2) ); if (cSize==0) return 0; op += cSize; } return (size_t)(op-ostart); } size_t HUF_compress4X_usingCTable(void* dst, size_t dstSize, const void* src, size_t srcSize, const HUF_CElt* CTable) { return HUF_compress4X_usingCTable_internal(dst, dstSize, src, srcSize, CTable, /* bmi2 */ 0); } typedef enum { HUF_singleStream, HUF_fourStreams } HUF_nbStreams_e; static size_t HUF_compressCTable_internal( BYTE* const ostart, BYTE* op, BYTE* const oend, const void* src, size_t srcSize, HUF_nbStreams_e nbStreams, const HUF_CElt* CTable, const int bmi2) { size_t const cSize = (nbStreams==HUF_singleStream) ? HUF_compress1X_usingCTable_internal(op, (size_t)(oend - op), src, srcSize, CTable, bmi2) : HUF_compress4X_usingCTable_internal(op, (size_t)(oend - op), src, srcSize, CTable, bmi2); if (HUF_isError(cSize)) { return cSize; } if (cSize==0) { return 0; } /* uncompressible */ op += cSize; /* check compressibility */ assert(op >= ostart); if ((size_t)(op-ostart) >= srcSize-1) { return 0; } return (size_t)(op-ostart); } typedef struct { unsigned count[HUF_SYMBOLVALUE_MAX + 1]; HUF_CElt CTable[HUF_SYMBOLVALUE_MAX + 1]; HUF_buildCTable_wksp_tables buildCTable_wksp; } HUF_compress_tables_t; /* HUF_compress_internal() : * `workSpace_align4` must be aligned on 4-bytes boundaries, * and occupies the same space as a table of HUF_WORKSPACE_SIZE_U32 unsigned */ static size_t HUF_compress_internal (void* dst, size_t dstSize, const void* src, size_t srcSize, unsigned maxSymbolValue, unsigned huffLog, HUF_nbStreams_e nbStreams, void* workSpace_align4, size_t wkspSize, HUF_CElt* oldHufTable, HUF_repeat* repeat, int preferRepeat, const int bmi2) { HUF_compress_tables_t* const table = (HUF_compress_tables_t*)workSpace_align4; BYTE* const ostart = (BYTE*)dst; BYTE* const oend = ostart + dstSize; BYTE* op = ostart; HUF_STATIC_ASSERT(sizeof(*table) <= HUF_WORKSPACE_SIZE); assert(((size_t)workSpace_align4 & 3) == 0); /* must be aligned on 4-bytes boundaries */ /* checks & inits */ if (wkspSize < HUF_WORKSPACE_SIZE) return ERROR(workSpace_tooSmall); if (!srcSize) return 0; /* Uncompressed */ if (!dstSize) return 0; /* cannot fit anything within dst budget */ if (srcSize > HUF_BLOCKSIZE_MAX) return ERROR(srcSize_wrong); /* current block size limit */ if (huffLog > HUF_TABLELOG_MAX) return ERROR(tableLog_tooLarge); if (maxSymbolValue > HUF_SYMBOLVALUE_MAX) return ERROR(maxSymbolValue_tooLarge); if (!maxSymbolValue) maxSymbolValue = HUF_SYMBOLVALUE_MAX; if (!huffLog) huffLog = HUF_TABLELOG_DEFAULT; /* Heuristic : If old table is valid, use it for small inputs */ if (preferRepeat && repeat && *repeat == HUF_repeat_valid) { return HUF_compressCTable_internal(ostart, op, oend, src, srcSize, nbStreams, oldHufTable, bmi2); } /* Scan input and build symbol stats */ { CHECK_V_F(largest, HIST_count_wksp (table->count, &maxSymbolValue, (const BYTE*)src, srcSize, workSpace_align4, wkspSize) ); if (largest == srcSize) { *ostart = ((const BYTE*)src)[0]; return 1; } /* single symbol, rle */ if (largest <= (srcSize >> 7)+4) return 0; /* heuristic : probably not compressible enough */ } /* Check validity of previous table */ if ( repeat && *repeat == HUF_repeat_check && !HUF_validateCTable(oldHufTable, table->count, maxSymbolValue)) { *repeat = HUF_repeat_none; } /* Heuristic : use existing table for small inputs */ if (preferRepeat && repeat && *repeat != HUF_repeat_none) { return HUF_compressCTable_internal(ostart, op, oend, src, srcSize, nbStreams, oldHufTable, bmi2); } /* Build Huffman Tree */ huffLog = HUF_optimalTableLog(huffLog, srcSize, maxSymbolValue); { size_t const maxBits = HUF_buildCTable_wksp(table->CTable, table->count, maxSymbolValue, huffLog, &table->buildCTable_wksp, sizeof(table->buildCTable_wksp)); CHECK_F(maxBits); huffLog = (U32)maxBits; /* Zero unused symbols in CTable, so we can check it for validity */ ZSTD_memset(table->CTable + (maxSymbolValue + 1), 0, sizeof(table->CTable) - ((maxSymbolValue + 1) * sizeof(HUF_CElt))); } /* Write table description header */ { CHECK_V_F(hSize, HUF_writeCTable (op, dstSize, table->CTable, maxSymbolValue, huffLog) ); /* Check if using previous huffman table is beneficial */ if (repeat && *repeat != HUF_repeat_none) { size_t const oldSize = HUF_estimateCompressedSize(oldHufTable, table->count, maxSymbolValue); size_t const newSize = HUF_estimateCompressedSize(table->CTable, table->count, maxSymbolValue); if (oldSize <= hSize + newSize || hSize + 12 >= srcSize) { return HUF_compressCTable_internal(ostart, op, oend, src, srcSize, nbStreams, oldHufTable, bmi2); } } /* Use the new huffman table */ if (hSize + 12ul >= srcSize) { return 0; } op += hSize; if (repeat) { *repeat = HUF_repeat_none; } if (oldHufTable) ZSTD_memcpy(oldHufTable, table->CTable, sizeof(table->CTable)); /* Save new table */ } return HUF_compressCTable_internal(ostart, op, oend, src, srcSize, nbStreams, table->CTable, bmi2); } size_t HUF_compress1X_wksp (void* dst, size_t dstSize, const void* src, size_t srcSize, unsigned maxSymbolValue, unsigned huffLog, void* workSpace, size_t wkspSize) { return HUF_compress_internal(dst, dstSize, src, srcSize, maxSymbolValue, huffLog, HUF_singleStream, workSpace, wkspSize, NULL, NULL, 0, 0 /*bmi2*/); } size_t HUF_compress1X_repeat (void* dst, size_t dstSize, const void* src, size_t srcSize, unsigned maxSymbolValue, unsigned huffLog, void* workSpace, size_t wkspSize, HUF_CElt* hufTable, HUF_repeat* repeat, int preferRepeat, int bmi2) { return HUF_compress_internal(dst, dstSize, src, srcSize, maxSymbolValue, huffLog, HUF_singleStream, workSpace, wkspSize, hufTable, repeat, preferRepeat, bmi2); } /* HUF_compress4X_repeat(): * compress input using 4 streams. * provide workspace to generate compression tables */ size_t HUF_compress4X_wksp (void* dst, size_t dstSize, const void* src, size_t srcSize, unsigned maxSymbolValue, unsigned huffLog, void* workSpace, size_t wkspSize) { return HUF_compress_internal(dst, dstSize, src, srcSize, maxSymbolValue, huffLog, HUF_fourStreams, workSpace, wkspSize, NULL, NULL, 0, 0 /*bmi2*/); } /* HUF_compress4X_repeat(): * compress input using 4 streams. * re-use an existing huffman compression table */ size_t HUF_compress4X_repeat (void* dst, size_t dstSize, const void* src, size_t srcSize, unsigned maxSymbolValue, unsigned huffLog, void* workSpace, size_t wkspSize, HUF_CElt* hufTable, HUF_repeat* repeat, int preferRepeat, int bmi2) { return HUF_compress_internal(dst, dstSize, src, srcSize, maxSymbolValue, huffLog, HUF_fourStreams, workSpace, wkspSize, hufTable, repeat, preferRepeat, bmi2); } #ifndef ZSTD_NO_UNUSED_FUNCTIONS /** HUF_buildCTable() : * @return : maxNbBits * Note : count is used before tree is written, so they can safely overlap */ size_t HUF_buildCTable (HUF_CElt* tree, const unsigned* count, unsigned maxSymbolValue, unsigned maxNbBits) { HUF_buildCTable_wksp_tables workspace; return HUF_buildCTable_wksp(tree, count, maxSymbolValue, maxNbBits, &workspace, sizeof(workspace)); } size_t HUF_compress1X (void* dst, size_t dstSize, const void* src, size_t srcSize, unsigned maxSymbolValue, unsigned huffLog) { unsigned workSpace[HUF_WORKSPACE_SIZE_U32]; return HUF_compress1X_wksp(dst, dstSize, src, srcSize, maxSymbolValue, huffLog, workSpace, sizeof(workSpace)); } size_t HUF_compress2 (void* dst, size_t dstSize, const void* src, size_t srcSize, unsigned maxSymbolValue, unsigned huffLog) { unsigned workSpace[HUF_WORKSPACE_SIZE_U32]; return HUF_compress4X_wksp(dst, dstSize, src, srcSize, maxSymbolValue, huffLog, workSpace, sizeof(workSpace)); } size_t HUF_compress (void* dst, size_t maxDstSize, const void* src, size_t srcSize) { return HUF_compress2(dst, maxDstSize, src, srcSize, 255, HUF_TABLELOG_DEFAULT); } #endif /**** ended inlining compress/huf_compress.c ****/ /**** start inlining compress/zstd_compress_literals.c ****/ /* * Copyright (c) 2016-2021, Yann Collet, Facebook, Inc. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the * LICENSE file in the root directory of this source tree) and the GPLv2 (found * in the COPYING file in the root directory of this source tree). * You may select, at your option, one of the above-listed licenses. */ /*-************************************* * Dependencies ***************************************/ /**** start inlining zstd_compress_literals.h ****/ /* * Copyright (c) 2016-2021, Yann Collet, Facebook, Inc. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the * LICENSE file in the root directory of this source tree) and the GPLv2 (found * in the COPYING file in the root directory of this source tree). * You may select, at your option, one of the above-listed licenses. */ #ifndef ZSTD_COMPRESS_LITERALS_H #define ZSTD_COMPRESS_LITERALS_H /**** start inlining zstd_compress_internal.h ****/ /* * Copyright (c) 2016-2021, Yann Collet, Facebook, Inc. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the * LICENSE file in the root directory of this source tree) and the GPLv2 (found * in the COPYING file in the root directory of this source tree). * You may select, at your option, one of the above-listed licenses. */ /* This header contains definitions * that shall **only** be used by modules within lib/compress. */ #ifndef ZSTD_COMPRESS_H #define ZSTD_COMPRESS_H /*-************************************* * Dependencies ***************************************/ /**** skipping file: ../common/zstd_internal.h ****/ /**** start inlining ../common/zstd_trace.h ****/ /* * Copyright (c) 2016-2021, Facebook, Inc. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the * LICENSE file in the root directory of this source tree) and the GPLv2 (found * in the COPYING file in the root directory of this source tree). * You may select, at your option, one of the above-listed licenses. */ #ifndef ZSTD_TRACE_H #define ZSTD_TRACE_H #if defined (__cplusplus) extern "C" { #endif #include /* weak symbol support */ #if !defined(ZSTD_HAVE_WEAK_SYMBOLS) && defined(__GNUC__) && \ !defined(__APPLE__) && !defined(_WIN32) && !defined(__MINGW32__) && \ !defined(__CYGWIN__) # define ZSTD_HAVE_WEAK_SYMBOLS 1 #else # define ZSTD_HAVE_WEAK_SYMBOLS 0 #endif #if ZSTD_HAVE_WEAK_SYMBOLS # define ZSTD_WEAK_ATTR __attribute__((__weak__)) #else # define ZSTD_WEAK_ATTR #endif /* Only enable tracing when weak symbols are available. */ #ifndef ZSTD_TRACE # define ZSTD_TRACE ZSTD_HAVE_WEAK_SYMBOLS #endif #if ZSTD_TRACE struct ZSTD_CCtx_s; struct ZSTD_DCtx_s; struct ZSTD_CCtx_params_s; typedef struct { /** * ZSTD_VERSION_NUMBER * * This is guaranteed to be the first member of ZSTD_trace. * Otherwise, this struct is not stable between versions. If * the version number does not match your expectation, you * should not interpret the rest of the struct. */ unsigned version; /** * Non-zero if streaming (de)compression is used. */ unsigned streaming; /** * The dictionary ID. */ unsigned dictionaryID; /** * Is the dictionary cold? * Only set on decompression. */ unsigned dictionaryIsCold; /** * The dictionary size or zero if no dictionary. */ size_t dictionarySize; /** * The uncompressed size of the data. */ size_t uncompressedSize; /** * The compressed size of the data. */ size_t compressedSize; /** * The fully resolved CCtx parameters (NULL on decompression). */ struct ZSTD_CCtx_params_s const* params; /** * The ZSTD_CCtx pointer (NULL on decompression). */ struct ZSTD_CCtx_s const* cctx; /** * The ZSTD_DCtx pointer (NULL on compression). */ struct ZSTD_DCtx_s const* dctx; } ZSTD_Trace; /** * A tracing context. It must be 0 when tracing is disabled. * Otherwise, any non-zero value returned by a tracing begin() * function is presented to any subsequent calls to end(). * * Any non-zero value is treated as tracing is enabled and not * interpreted by the library. * * Two possible uses are: * * A timestamp for when the begin() function was called. * * A unique key identifying the (de)compression, like the * address of the [dc]ctx pointer if you need to track * more information than just a timestamp. */ typedef unsigned long long ZSTD_TraceCtx; /** * Trace the beginning of a compression call. * @param cctx The dctx pointer for the compression. * It can be used as a key to map begin() to end(). * @returns Non-zero if tracing is enabled. The return value is * passed to ZSTD_trace_compress_end(). */ ZSTD_TraceCtx ZSTD_trace_compress_begin(struct ZSTD_CCtx_s const* cctx); /** * Trace the end of a compression call. * @param ctx The return value of ZSTD_trace_compress_begin(). * @param trace The zstd tracing info. */ void ZSTD_trace_compress_end( ZSTD_TraceCtx ctx, ZSTD_Trace const* trace); /** * Trace the beginning of a decompression call. * @param dctx The dctx pointer for the decompression. * It can be used as a key to map begin() to end(). * @returns Non-zero if tracing is enabled. The return value is * passed to ZSTD_trace_compress_end(). */ ZSTD_TraceCtx ZSTD_trace_decompress_begin(struct ZSTD_DCtx_s const* dctx); /** * Trace the end of a decompression call. * @param ctx The return value of ZSTD_trace_decompress_begin(). * @param trace The zstd tracing info. */ void ZSTD_trace_decompress_end( ZSTD_TraceCtx ctx, ZSTD_Trace const* trace); #endif /* ZSTD_TRACE */ #if defined (__cplusplus) } #endif #endif /* ZSTD_TRACE_H */ /**** ended inlining ../common/zstd_trace.h ****/ /**** start inlining zstd_cwksp.h ****/ /* * Copyright (c) 2016-2021, Yann Collet, Facebook, Inc. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the * LICENSE file in the root directory of this source tree) and the GPLv2 (found * in the COPYING file in the root directory of this source tree). * You may select, at your option, one of the above-listed licenses. */ #ifndef ZSTD_CWKSP_H #define ZSTD_CWKSP_H /*-************************************* * Dependencies ***************************************/ /**** skipping file: ../common/zstd_internal.h ****/ #if defined (__cplusplus) extern "C" { #endif /*-************************************* * Constants ***************************************/ /* Since the workspace is effectively its own little malloc implementation / * arena, when we run under ASAN, we should similarly insert redzones between * each internal element of the workspace, so ASAN will catch overruns that * reach outside an object but that stay inside the workspace. * * This defines the size of that redzone. */ #ifndef ZSTD_CWKSP_ASAN_REDZONE_SIZE #define ZSTD_CWKSP_ASAN_REDZONE_SIZE 128 #endif /*-************************************* * Structures ***************************************/ typedef enum { ZSTD_cwksp_alloc_objects, ZSTD_cwksp_alloc_buffers, ZSTD_cwksp_alloc_aligned } ZSTD_cwksp_alloc_phase_e; /** * Used to describe whether the workspace is statically allocated (and will not * necessarily ever be freed), or if it's dynamically allocated and we can * expect a well-formed caller to free this. */ typedef enum { ZSTD_cwksp_dynamic_alloc, ZSTD_cwksp_static_alloc } ZSTD_cwksp_static_alloc_e; /** * Zstd fits all its internal datastructures into a single continuous buffer, * so that it only needs to perform a single OS allocation (or so that a buffer * can be provided to it and it can perform no allocations at all). This buffer * is called the workspace. * * Several optimizations complicate that process of allocating memory ranges * from this workspace for each internal datastructure: * * - These different internal datastructures have different setup requirements: * * - The static objects need to be cleared once and can then be trivially * reused for each compression. * * - Various buffers don't need to be initialized at all--they are always * written into before they're read. * * - The matchstate tables have a unique requirement that they don't need * their memory to be totally cleared, but they do need the memory to have * some bound, i.e., a guarantee that all values in the memory they've been * allocated is less than some maximum value (which is the starting value * for the indices that they will then use for compression). When this * guarantee is provided to them, they can use the memory without any setup * work. When it can't, they have to clear the area. * * - These buffers also have different alignment requirements. * * - We would like to reuse the objects in the workspace for multiple * compressions without having to perform any expensive reallocation or * reinitialization work. * * - We would like to be able to efficiently reuse the workspace across * multiple compressions **even when the compression parameters change** and * we need to resize some of the objects (where possible). * * To attempt to manage this buffer, given these constraints, the ZSTD_cwksp * abstraction was created. It works as follows: * * Workspace Layout: * * [ ... workspace ... ] * [objects][tables ... ->] free space [<- ... aligned][<- ... buffers] * * The various objects that live in the workspace are divided into the * following categories, and are allocated separately: * * - Static objects: this is optionally the enclosing ZSTD_CCtx or ZSTD_CDict, * so that literally everything fits in a single buffer. Note: if present, * this must be the first object in the workspace, since ZSTD_customFree{CCtx, * CDict}() rely on a pointer comparison to see whether one or two frees are * required. * * - Fixed size objects: these are fixed-size, fixed-count objects that are * nonetheless "dynamically" allocated in the workspace so that we can * control how they're initialized separately from the broader ZSTD_CCtx. * Examples: * - Entropy Workspace * - 2 x ZSTD_compressedBlockState_t * - CDict dictionary contents * * - Tables: these are any of several different datastructures (hash tables, * chain tables, binary trees) that all respect a common format: they are * uint32_t arrays, all of whose values are between 0 and (nextSrc - base). * Their sizes depend on the cparams. * * - Aligned: these buffers are used for various purposes that require 4 byte * alignment, but don't require any initialization before they're used. * * - Buffers: these buffers are used for various purposes that don't require * any alignment or initialization before they're used. This means they can * be moved around at no cost for a new compression. * * Allocating Memory: * * The various types of objects must be allocated in order, so they can be * correctly packed into the workspace buffer. That order is: * * 1. Objects * 2. Buffers * 3. Aligned * 4. Tables * * Attempts to reserve objects of different types out of order will fail. */ typedef struct { void* workspace; void* workspaceEnd; void* objectEnd; void* tableEnd; void* tableValidEnd; void* allocStart; BYTE allocFailed; int workspaceOversizedDuration; ZSTD_cwksp_alloc_phase_e phase; ZSTD_cwksp_static_alloc_e isStatic; } ZSTD_cwksp; /*-************************************* * Functions ***************************************/ MEM_STATIC size_t ZSTD_cwksp_available_space(ZSTD_cwksp* ws); MEM_STATIC void ZSTD_cwksp_assert_internal_consistency(ZSTD_cwksp* ws) { (void)ws; assert(ws->workspace <= ws->objectEnd); assert(ws->objectEnd <= ws->tableEnd); assert(ws->objectEnd <= ws->tableValidEnd); assert(ws->tableEnd <= ws->allocStart); assert(ws->tableValidEnd <= ws->allocStart); assert(ws->allocStart <= ws->workspaceEnd); } /** * Align must be a power of 2. */ MEM_STATIC size_t ZSTD_cwksp_align(size_t size, size_t const align) { size_t const mask = align - 1; assert((align & mask) == 0); return (size + mask) & ~mask; } /** * Use this to determine how much space in the workspace we will consume to * allocate this object. (Normally it should be exactly the size of the object, * but under special conditions, like ASAN, where we pad each object, it might * be larger.) * * Since tables aren't currently redzoned, you don't need to call through this * to figure out how much space you need for the matchState tables. Everything * else is though. */ MEM_STATIC size_t ZSTD_cwksp_alloc_size(size_t size) { if (size == 0) return 0; #if ZSTD_ADDRESS_SANITIZER && !defined (ZSTD_ASAN_DONT_POISON_WORKSPACE) return size + 2 * ZSTD_CWKSP_ASAN_REDZONE_SIZE; #else return size; #endif } MEM_STATIC void ZSTD_cwksp_internal_advance_phase( ZSTD_cwksp* ws, ZSTD_cwksp_alloc_phase_e phase) { assert(phase >= ws->phase); if (phase > ws->phase) { if (ws->phase < ZSTD_cwksp_alloc_buffers && phase >= ZSTD_cwksp_alloc_buffers) { ws->tableValidEnd = ws->objectEnd; } if (ws->phase < ZSTD_cwksp_alloc_aligned && phase >= ZSTD_cwksp_alloc_aligned) { /* If unaligned allocations down from a too-large top have left us * unaligned, we need to realign our alloc ptr. Technically, this * can consume space that is unaccounted for in the neededSpace * calculation. However, I believe this can only happen when the * workspace is too large, and specifically when it is too large * by a larger margin than the space that will be consumed. */ /* TODO: cleaner, compiler warning friendly way to do this??? */ ws->allocStart = (BYTE*)ws->allocStart - ((size_t)ws->allocStart & (sizeof(U32)-1)); if (ws->allocStart < ws->tableValidEnd) { ws->tableValidEnd = ws->allocStart; } } ws->phase = phase; } } /** * Returns whether this object/buffer/etc was allocated in this workspace. */ MEM_STATIC int ZSTD_cwksp_owns_buffer(const ZSTD_cwksp* ws, const void* ptr) { return (ptr != NULL) && (ws->workspace <= ptr) && (ptr <= ws->workspaceEnd); } /** * Internal function. Do not use directly. */ MEM_STATIC void* ZSTD_cwksp_reserve_internal( ZSTD_cwksp* ws, size_t bytes, ZSTD_cwksp_alloc_phase_e phase) { void* alloc; void* bottom = ws->tableEnd; ZSTD_cwksp_internal_advance_phase(ws, phase); alloc = (BYTE *)ws->allocStart - bytes; if (bytes == 0) return NULL; #if ZSTD_ADDRESS_SANITIZER && !defined (ZSTD_ASAN_DONT_POISON_WORKSPACE) /* over-reserve space */ alloc = (BYTE *)alloc - 2 * ZSTD_CWKSP_ASAN_REDZONE_SIZE; #endif DEBUGLOG(5, "cwksp: reserving %p %zd bytes, %zd bytes remaining", alloc, bytes, ZSTD_cwksp_available_space(ws) - bytes); ZSTD_cwksp_assert_internal_consistency(ws); assert(alloc >= bottom); if (alloc < bottom) { DEBUGLOG(4, "cwksp: alloc failed!"); ws->allocFailed = 1; return NULL; } if (alloc < ws->tableValidEnd) { ws->tableValidEnd = alloc; } ws->allocStart = alloc; #if ZSTD_ADDRESS_SANITIZER && !defined (ZSTD_ASAN_DONT_POISON_WORKSPACE) /* Move alloc so there's ZSTD_CWKSP_ASAN_REDZONE_SIZE unused space on * either size. */ alloc = (BYTE *)alloc + ZSTD_CWKSP_ASAN_REDZONE_SIZE; if (ws->isStatic == ZSTD_cwksp_dynamic_alloc) { __asan_unpoison_memory_region(alloc, bytes); } #endif return alloc; } /** * Reserves and returns unaligned memory. */ MEM_STATIC BYTE* ZSTD_cwksp_reserve_buffer(ZSTD_cwksp* ws, size_t bytes) { return (BYTE*)ZSTD_cwksp_reserve_internal(ws, bytes, ZSTD_cwksp_alloc_buffers); } /** * Reserves and returns memory sized on and aligned on sizeof(unsigned). */ MEM_STATIC void* ZSTD_cwksp_reserve_aligned(ZSTD_cwksp* ws, size_t bytes) { assert((bytes & (sizeof(U32)-1)) == 0); return ZSTD_cwksp_reserve_internal(ws, ZSTD_cwksp_align(bytes, sizeof(U32)), ZSTD_cwksp_alloc_aligned); } /** * Aligned on sizeof(unsigned). These buffers have the special property that * their values remain constrained, allowing us to re-use them without * memset()-ing them. */ MEM_STATIC void* ZSTD_cwksp_reserve_table(ZSTD_cwksp* ws, size_t bytes) { const ZSTD_cwksp_alloc_phase_e phase = ZSTD_cwksp_alloc_aligned; void* alloc = ws->tableEnd; void* end = (BYTE *)alloc + bytes; void* top = ws->allocStart; DEBUGLOG(5, "cwksp: reserving %p table %zd bytes, %zd bytes remaining", alloc, bytes, ZSTD_cwksp_available_space(ws) - bytes); assert((bytes & (sizeof(U32)-1)) == 0); ZSTD_cwksp_internal_advance_phase(ws, phase); ZSTD_cwksp_assert_internal_consistency(ws); assert(end <= top); if (end > top) { DEBUGLOG(4, "cwksp: table alloc failed!"); ws->allocFailed = 1; return NULL; } ws->tableEnd = end; #if ZSTD_ADDRESS_SANITIZER && !defined (ZSTD_ASAN_DONT_POISON_WORKSPACE) if (ws->isStatic == ZSTD_cwksp_dynamic_alloc) { __asan_unpoison_memory_region(alloc, bytes); } #endif return alloc; } /** * Aligned on sizeof(void*). */ MEM_STATIC void* ZSTD_cwksp_reserve_object(ZSTD_cwksp* ws, size_t bytes) { size_t roundedBytes = ZSTD_cwksp_align(bytes, sizeof(void*)); void* alloc = ws->objectEnd; void* end = (BYTE*)alloc + roundedBytes; #if ZSTD_ADDRESS_SANITIZER && !defined (ZSTD_ASAN_DONT_POISON_WORKSPACE) /* over-reserve space */ end = (BYTE *)end + 2 * ZSTD_CWKSP_ASAN_REDZONE_SIZE; #endif DEBUGLOG(5, "cwksp: reserving %p object %zd bytes (rounded to %zd), %zd bytes remaining", alloc, bytes, roundedBytes, ZSTD_cwksp_available_space(ws) - roundedBytes); assert(((size_t)alloc & (sizeof(void*)-1)) == 0); assert((bytes & (sizeof(void*)-1)) == 0); ZSTD_cwksp_assert_internal_consistency(ws); /* we must be in the first phase, no advance is possible */ if (ws->phase != ZSTD_cwksp_alloc_objects || end > ws->workspaceEnd) { DEBUGLOG(4, "cwksp: object alloc failed!"); ws->allocFailed = 1; return NULL; } ws->objectEnd = end; ws->tableEnd = end; ws->tableValidEnd = end; #if ZSTD_ADDRESS_SANITIZER && !defined (ZSTD_ASAN_DONT_POISON_WORKSPACE) /* Move alloc so there's ZSTD_CWKSP_ASAN_REDZONE_SIZE unused space on * either size. */ alloc = (BYTE *)alloc + ZSTD_CWKSP_ASAN_REDZONE_SIZE; if (ws->isStatic == ZSTD_cwksp_dynamic_alloc) { __asan_unpoison_memory_region(alloc, bytes); } #endif return alloc; } MEM_STATIC void ZSTD_cwksp_mark_tables_dirty(ZSTD_cwksp* ws) { DEBUGLOG(4, "cwksp: ZSTD_cwksp_mark_tables_dirty"); #if ZSTD_MEMORY_SANITIZER && !defined (ZSTD_MSAN_DONT_POISON_WORKSPACE) /* To validate that the table re-use logic is sound, and that we don't * access table space that we haven't cleaned, we re-"poison" the table * space every time we mark it dirty. */ { size_t size = (BYTE*)ws->tableValidEnd - (BYTE*)ws->objectEnd; assert(__msan_test_shadow(ws->objectEnd, size) == -1); __msan_poison(ws->objectEnd, size); } #endif assert(ws->tableValidEnd >= ws->objectEnd); assert(ws->tableValidEnd <= ws->allocStart); ws->tableValidEnd = ws->objectEnd; ZSTD_cwksp_assert_internal_consistency(ws); } MEM_STATIC void ZSTD_cwksp_mark_tables_clean(ZSTD_cwksp* ws) { DEBUGLOG(4, "cwksp: ZSTD_cwksp_mark_tables_clean"); assert(ws->tableValidEnd >= ws->objectEnd); assert(ws->tableValidEnd <= ws->allocStart); if (ws->tableValidEnd < ws->tableEnd) { ws->tableValidEnd = ws->tableEnd; } ZSTD_cwksp_assert_internal_consistency(ws); } /** * Zero the part of the allocated tables not already marked clean. */ MEM_STATIC void ZSTD_cwksp_clean_tables(ZSTD_cwksp* ws) { DEBUGLOG(4, "cwksp: ZSTD_cwksp_clean_tables"); assert(ws->tableValidEnd >= ws->objectEnd); assert(ws->tableValidEnd <= ws->allocStart); if (ws->tableValidEnd < ws->tableEnd) { ZSTD_memset(ws->tableValidEnd, 0, (BYTE*)ws->tableEnd - (BYTE*)ws->tableValidEnd); } ZSTD_cwksp_mark_tables_clean(ws); } /** * Invalidates table allocations. * All other allocations remain valid. */ MEM_STATIC void ZSTD_cwksp_clear_tables(ZSTD_cwksp* ws) { DEBUGLOG(4, "cwksp: clearing tables!"); #if ZSTD_ADDRESS_SANITIZER && !defined (ZSTD_ASAN_DONT_POISON_WORKSPACE) /* We don't do this when the workspace is statically allocated, because * when that is the case, we have no capability to hook into the end of the * workspace's lifecycle to unpoison the memory. */ if (ws->isStatic == ZSTD_cwksp_dynamic_alloc) { size_t size = (BYTE*)ws->tableValidEnd - (BYTE*)ws->objectEnd; __asan_poison_memory_region(ws->objectEnd, size); } #endif ws->tableEnd = ws->objectEnd; ZSTD_cwksp_assert_internal_consistency(ws); } /** * Invalidates all buffer, aligned, and table allocations. * Object allocations remain valid. */ MEM_STATIC void ZSTD_cwksp_clear(ZSTD_cwksp* ws) { DEBUGLOG(4, "cwksp: clearing!"); #if ZSTD_MEMORY_SANITIZER && !defined (ZSTD_MSAN_DONT_POISON_WORKSPACE) /* To validate that the context re-use logic is sound, and that we don't * access stuff that this compression hasn't initialized, we re-"poison" * the workspace (or at least the non-static, non-table parts of it) * every time we start a new compression. */ { size_t size = (BYTE*)ws->workspaceEnd - (BYTE*)ws->tableValidEnd; __msan_poison(ws->tableValidEnd, size); } #endif #if ZSTD_ADDRESS_SANITIZER && !defined (ZSTD_ASAN_DONT_POISON_WORKSPACE) /* We don't do this when the workspace is statically allocated, because * when that is the case, we have no capability to hook into the end of the * workspace's lifecycle to unpoison the memory. */ if (ws->isStatic == ZSTD_cwksp_dynamic_alloc) { size_t size = (BYTE*)ws->workspaceEnd - (BYTE*)ws->objectEnd; __asan_poison_memory_region(ws->objectEnd, size); } #endif ws->tableEnd = ws->objectEnd; ws->allocStart = ws->workspaceEnd; ws->allocFailed = 0; if (ws->phase > ZSTD_cwksp_alloc_buffers) { ws->phase = ZSTD_cwksp_alloc_buffers; } ZSTD_cwksp_assert_internal_consistency(ws); } /** * The provided workspace takes ownership of the buffer [start, start+size). * Any existing values in the workspace are ignored (the previously managed * buffer, if present, must be separately freed). */ MEM_STATIC void ZSTD_cwksp_init(ZSTD_cwksp* ws, void* start, size_t size, ZSTD_cwksp_static_alloc_e isStatic) { DEBUGLOG(4, "cwksp: init'ing workspace with %zd bytes", size); assert(((size_t)start & (sizeof(void*)-1)) == 0); /* ensure correct alignment */ ws->workspace = start; ws->workspaceEnd = (BYTE*)start + size; ws->objectEnd = ws->workspace; ws->tableValidEnd = ws->objectEnd; ws->phase = ZSTD_cwksp_alloc_objects; ws->isStatic = isStatic; ZSTD_cwksp_clear(ws); ws->workspaceOversizedDuration = 0; ZSTD_cwksp_assert_internal_consistency(ws); } MEM_STATIC size_t ZSTD_cwksp_create(ZSTD_cwksp* ws, size_t size, ZSTD_customMem customMem) { void* workspace = ZSTD_customMalloc(size, customMem); DEBUGLOG(4, "cwksp: creating new workspace with %zd bytes", size); RETURN_ERROR_IF(workspace == NULL, memory_allocation, "NULL pointer!"); ZSTD_cwksp_init(ws, workspace, size, ZSTD_cwksp_dynamic_alloc); return 0; } MEM_STATIC void ZSTD_cwksp_free(ZSTD_cwksp* ws, ZSTD_customMem customMem) { void *ptr = ws->workspace; DEBUGLOG(4, "cwksp: freeing workspace"); ZSTD_memset(ws, 0, sizeof(ZSTD_cwksp)); ZSTD_customFree(ptr, customMem); } /** * Moves the management of a workspace from one cwksp to another. The src cwksp * is left in an invalid state (src must be re-init()'ed before it's used again). */ MEM_STATIC void ZSTD_cwksp_move(ZSTD_cwksp* dst, ZSTD_cwksp* src) { *dst = *src; ZSTD_memset(src, 0, sizeof(ZSTD_cwksp)); } MEM_STATIC size_t ZSTD_cwksp_sizeof(const ZSTD_cwksp* ws) { return (size_t)((BYTE*)ws->workspaceEnd - (BYTE*)ws->workspace); } MEM_STATIC size_t ZSTD_cwksp_used(const ZSTD_cwksp* ws) { return (size_t)((BYTE*)ws->tableEnd - (BYTE*)ws->workspace) + (size_t)((BYTE*)ws->workspaceEnd - (BYTE*)ws->allocStart); } MEM_STATIC int ZSTD_cwksp_reserve_failed(const ZSTD_cwksp* ws) { return ws->allocFailed; } /*-************************************* * Functions Checking Free Space ***************************************/ MEM_STATIC size_t ZSTD_cwksp_available_space(ZSTD_cwksp* ws) { return (size_t)((BYTE*)ws->allocStart - (BYTE*)ws->tableEnd); } MEM_STATIC int ZSTD_cwksp_check_available(ZSTD_cwksp* ws, size_t additionalNeededSpace) { return ZSTD_cwksp_available_space(ws) >= additionalNeededSpace; } MEM_STATIC int ZSTD_cwksp_check_too_large(ZSTD_cwksp* ws, size_t additionalNeededSpace) { return ZSTD_cwksp_check_available( ws, additionalNeededSpace * ZSTD_WORKSPACETOOLARGE_FACTOR); } MEM_STATIC int ZSTD_cwksp_check_wasteful(ZSTD_cwksp* ws, size_t additionalNeededSpace) { return ZSTD_cwksp_check_too_large(ws, additionalNeededSpace) && ws->workspaceOversizedDuration > ZSTD_WORKSPACETOOLARGE_MAXDURATION; } MEM_STATIC void ZSTD_cwksp_bump_oversized_duration( ZSTD_cwksp* ws, size_t additionalNeededSpace) { if (ZSTD_cwksp_check_too_large(ws, additionalNeededSpace)) { ws->workspaceOversizedDuration++; } else { ws->workspaceOversizedDuration = 0; } } #if defined (__cplusplus) } #endif #endif /* ZSTD_CWKSP_H */ /**** ended inlining zstd_cwksp.h ****/ #ifdef ZSTD_MULTITHREAD /**** start inlining zstdmt_compress.h ****/ /* * Copyright (c) 2016-2021, Yann Collet, Facebook, Inc. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the * LICENSE file in the root directory of this source tree) and the GPLv2 (found * in the COPYING file in the root directory of this source tree). * You may select, at your option, one of the above-listed licenses. */ #ifndef ZSTDMT_COMPRESS_H #define ZSTDMT_COMPRESS_H #if defined (__cplusplus) extern "C" { #endif /* Note : This is an internal API. * These APIs used to be exposed with ZSTDLIB_API, * because it used to be the only way to invoke MT compression. * Now, you must use ZSTD_compress2 and ZSTD_compressStream2() instead. * * This API requires ZSTD_MULTITHREAD to be defined during compilation, * otherwise ZSTDMT_createCCtx*() will fail. */ /* === Dependencies === */ /**** skipping file: ../common/zstd_deps.h ****/ #define ZSTD_STATIC_LINKING_ONLY /* ZSTD_parameters */ /**** skipping file: ../zstd.h ****/ /* === Constants === */ #ifndef ZSTDMT_NBWORKERS_MAX # define ZSTDMT_NBWORKERS_MAX 200 #endif #ifndef ZSTDMT_JOBSIZE_MIN # define ZSTDMT_JOBSIZE_MIN (1 MB) #endif #define ZSTDMT_JOBLOG_MAX (MEM_32bits() ? 29 : 30) #define ZSTDMT_JOBSIZE_MAX (MEM_32bits() ? (512 MB) : (1024 MB)) /* ======================================================== * === Private interface, for use by ZSTD_compress.c === * === Not exposed in libzstd. Never invoke directly === * ======================================================== */ /* === Memory management === */ typedef struct ZSTDMT_CCtx_s ZSTDMT_CCtx; /* Requires ZSTD_MULTITHREAD to be defined during compilation, otherwise it will return NULL. */ ZSTDMT_CCtx* ZSTDMT_createCCtx_advanced(unsigned nbWorkers, ZSTD_customMem cMem, ZSTD_threadPool *pool); size_t ZSTDMT_freeCCtx(ZSTDMT_CCtx* mtctx); size_t ZSTDMT_sizeof_CCtx(ZSTDMT_CCtx* mtctx); /* === Streaming functions === */ size_t ZSTDMT_nextInputSizeHint(const ZSTDMT_CCtx* mtctx); /*! ZSTDMT_initCStream_internal() : * Private use only. Init streaming operation. * expects params to be valid. * must receive dict, or cdict, or none, but not both. * @return : 0, or an error code */ size_t ZSTDMT_initCStream_internal(ZSTDMT_CCtx* zcs, const void* dict, size_t dictSize, ZSTD_dictContentType_e dictContentType, const ZSTD_CDict* cdict, ZSTD_CCtx_params params, unsigned long long pledgedSrcSize); /*! ZSTDMT_compressStream_generic() : * Combines ZSTDMT_compressStream() with optional ZSTDMT_flushStream() or ZSTDMT_endStream() * depending on flush directive. * @return : minimum amount of data still to be flushed * 0 if fully flushed * or an error code * note : needs to be init using any ZSTD_initCStream*() variant */ size_t ZSTDMT_compressStream_generic(ZSTDMT_CCtx* mtctx, ZSTD_outBuffer* output, ZSTD_inBuffer* input, ZSTD_EndDirective endOp); /*! ZSTDMT_toFlushNow() * Tell how many bytes are ready to be flushed immediately. * Probe the oldest active job (not yet entirely flushed) and check its output buffer. * If return 0, it means there is no active job, * or, it means oldest job is still active, but everything produced has been flushed so far, * therefore flushing is limited by speed of oldest job. */ size_t ZSTDMT_toFlushNow(ZSTDMT_CCtx* mtctx); /*! ZSTDMT_updateCParams_whileCompressing() : * Updates only a selected set of compression parameters, to remain compatible with current frame. * New parameters will be applied to next compression job. */ void ZSTDMT_updateCParams_whileCompressing(ZSTDMT_CCtx* mtctx, const ZSTD_CCtx_params* cctxParams); /*! ZSTDMT_getFrameProgression(): * tells how much data has been consumed (input) and produced (output) for current frame. * able to count progression inside worker threads. */ ZSTD_frameProgression ZSTDMT_getFrameProgression(ZSTDMT_CCtx* mtctx); #if defined (__cplusplus) } #endif #endif /* ZSTDMT_COMPRESS_H */ /**** ended inlining zstdmt_compress.h ****/ #endif #if defined (__cplusplus) extern "C" { #endif /*-************************************* * Constants ***************************************/ #define kSearchStrength 8 #define HASH_READ_SIZE 8 #define ZSTD_DUBT_UNSORTED_MARK 1 /* For btlazy2 strategy, index ZSTD_DUBT_UNSORTED_MARK==1 means "unsorted". It could be confused for a real successor at index "1", if sorted as larger than its predecessor. It's not a big deal though : candidate will just be sorted again. Additionally, candidate position 1 will be lost. But candidate 1 cannot hide a large tree of candidates, so it's a minimal loss. The benefit is that ZSTD_DUBT_UNSORTED_MARK cannot be mishandled after table re-use with a different strategy. This constant is required by ZSTD_compressBlock_btlazy2() and ZSTD_reduceTable_internal() */ /*-************************************* * Context memory management ***************************************/ typedef enum { ZSTDcs_created=0, ZSTDcs_init, ZSTDcs_ongoing, ZSTDcs_ending } ZSTD_compressionStage_e; typedef enum { zcss_init=0, zcss_load, zcss_flush } ZSTD_cStreamStage; typedef struct ZSTD_prefixDict_s { const void* dict; size_t dictSize; ZSTD_dictContentType_e dictContentType; } ZSTD_prefixDict; typedef struct { void* dictBuffer; void const* dict; size_t dictSize; ZSTD_dictContentType_e dictContentType; ZSTD_CDict* cdict; } ZSTD_localDict; typedef struct { HUF_CElt CTable[HUF_CTABLE_SIZE_U32(255)]; HUF_repeat repeatMode; } ZSTD_hufCTables_t; typedef struct { FSE_CTable offcodeCTable[FSE_CTABLE_SIZE_U32(OffFSELog, MaxOff)]; FSE_CTable matchlengthCTable[FSE_CTABLE_SIZE_U32(MLFSELog, MaxML)]; FSE_CTable litlengthCTable[FSE_CTABLE_SIZE_U32(LLFSELog, MaxLL)]; FSE_repeat offcode_repeatMode; FSE_repeat matchlength_repeatMode; FSE_repeat litlength_repeatMode; } ZSTD_fseCTables_t; typedef struct { ZSTD_hufCTables_t huf; ZSTD_fseCTables_t fse; } ZSTD_entropyCTables_t; typedef struct { U32 off; /* Offset code (offset + ZSTD_REP_MOVE) for the match */ U32 len; /* Raw length of match */ } ZSTD_match_t; typedef struct { U32 offset; /* Offset of sequence */ U32 litLength; /* Length of literals prior to match */ U32 matchLength; /* Raw length of match */ } rawSeq; typedef struct { rawSeq* seq; /* The start of the sequences */ size_t pos; /* The index in seq where reading stopped. pos <= size. */ size_t posInSequence; /* The position within the sequence at seq[pos] where reading stopped. posInSequence <= seq[pos].litLength + seq[pos].matchLength */ size_t size; /* The number of sequences. <= capacity. */ size_t capacity; /* The capacity starting from `seq` pointer */ } rawSeqStore_t; UNUSED_ATTR static const rawSeqStore_t kNullRawSeqStore = {NULL, 0, 0, 0, 0}; typedef struct { int price; U32 off; U32 mlen; U32 litlen; U32 rep[ZSTD_REP_NUM]; } ZSTD_optimal_t; typedef enum { zop_dynamic=0, zop_predef } ZSTD_OptPrice_e; typedef struct { /* All tables are allocated inside cctx->workspace by ZSTD_resetCCtx_internal() */ unsigned* litFreq; /* table of literals statistics, of size 256 */ unsigned* litLengthFreq; /* table of litLength statistics, of size (MaxLL+1) */ unsigned* matchLengthFreq; /* table of matchLength statistics, of size (MaxML+1) */ unsigned* offCodeFreq; /* table of offCode statistics, of size (MaxOff+1) */ ZSTD_match_t* matchTable; /* list of found matches, of size ZSTD_OPT_NUM+1 */ ZSTD_optimal_t* priceTable; /* All positions tracked by optimal parser, of size ZSTD_OPT_NUM+1 */ U32 litSum; /* nb of literals */ U32 litLengthSum; /* nb of litLength codes */ U32 matchLengthSum; /* nb of matchLength codes */ U32 offCodeSum; /* nb of offset codes */ U32 litSumBasePrice; /* to compare to log2(litfreq) */ U32 litLengthSumBasePrice; /* to compare to log2(llfreq) */ U32 matchLengthSumBasePrice;/* to compare to log2(mlfreq) */ U32 offCodeSumBasePrice; /* to compare to log2(offreq) */ ZSTD_OptPrice_e priceType; /* prices can be determined dynamically, or follow a pre-defined cost structure */ const ZSTD_entropyCTables_t* symbolCosts; /* pre-calculated dictionary statistics */ ZSTD_literalCompressionMode_e literalCompressionMode; } optState_t; typedef struct { ZSTD_entropyCTables_t entropy; U32 rep[ZSTD_REP_NUM]; } ZSTD_compressedBlockState_t; typedef struct { BYTE const* nextSrc; /* next block here to continue on current prefix */ BYTE const* base; /* All regular indexes relative to this position */ BYTE const* dictBase; /* extDict indexes relative to this position */ U32 dictLimit; /* below that point, need extDict */ U32 lowLimit; /* below that point, no more valid data */ } ZSTD_window_t; typedef struct ZSTD_matchState_t ZSTD_matchState_t; struct ZSTD_matchState_t { ZSTD_window_t window; /* State for window round buffer management */ U32 loadedDictEnd; /* index of end of dictionary, within context's referential. * When loadedDictEnd != 0, a dictionary is in use, and still valid. * This relies on a mechanism to set loadedDictEnd=0 when dictionary is no longer within distance. * Such mechanism is provided within ZSTD_window_enforceMaxDist() and ZSTD_checkDictValidity(). * When dict referential is copied into active context (i.e. not attached), * loadedDictEnd == dictSize, since referential starts from zero. */ U32 nextToUpdate; /* index from which to continue table update */ U32 hashLog3; /* dispatch table for matches of len==3 : larger == faster, more memory */ U32* hashTable; U32* hashTable3; U32* chainTable; int dedicatedDictSearch; /* Indicates whether this matchState is using the * dedicated dictionary search structure. */ optState_t opt; /* optimal parser state */ const ZSTD_matchState_t* dictMatchState; ZSTD_compressionParameters cParams; const rawSeqStore_t* ldmSeqStore; }; typedef struct { ZSTD_compressedBlockState_t* prevCBlock; ZSTD_compressedBlockState_t* nextCBlock; ZSTD_matchState_t matchState; } ZSTD_blockState_t; typedef struct { U32 offset; U32 checksum; } ldmEntry_t; typedef struct { BYTE const* split; U32 hash; U32 checksum; ldmEntry_t* bucket; } ldmMatchCandidate_t; #define LDM_BATCH_SIZE 64 typedef struct { ZSTD_window_t window; /* State for the window round buffer management */ ldmEntry_t* hashTable; U32 loadedDictEnd; BYTE* bucketOffsets; /* Next position in bucket to insert entry */ size_t splitIndices[LDM_BATCH_SIZE]; ldmMatchCandidate_t matchCandidates[LDM_BATCH_SIZE]; } ldmState_t; typedef struct { U32 enableLdm; /* 1 if enable long distance matching */ U32 hashLog; /* Log size of hashTable */ U32 bucketSizeLog; /* Log bucket size for collision resolution, at most 8 */ U32 minMatchLength; /* Minimum match length */ U32 hashRateLog; /* Log number of entries to skip */ U32 windowLog; /* Window log for the LDM */ } ldmParams_t; typedef struct { int collectSequences; ZSTD_Sequence* seqStart; size_t seqIndex; size_t maxSequences; } SeqCollector; struct ZSTD_CCtx_params_s { ZSTD_format_e format; ZSTD_compressionParameters cParams; ZSTD_frameParameters fParams; int compressionLevel; int forceWindow; /* force back-references to respect limit of * 1< 63) ? ZSTD_highbit32(litLength) + LL_deltaCode : LL_Code[litLength]; } /* ZSTD_MLcode() : * note : mlBase = matchLength - MINMATCH; * because it's the format it's stored in seqStore->sequences */ MEM_STATIC U32 ZSTD_MLcode(U32 mlBase) { static const BYTE ML_Code[128] = { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 32, 33, 33, 34, 34, 35, 35, 36, 36, 36, 36, 37, 37, 37, 37, 38, 38, 38, 38, 38, 38, 38, 38, 39, 39, 39, 39, 39, 39, 39, 39, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42 }; static const U32 ML_deltaCode = 36; return (mlBase > 127) ? ZSTD_highbit32(mlBase) + ML_deltaCode : ML_Code[mlBase]; } typedef struct repcodes_s { U32 rep[3]; } repcodes_t; MEM_STATIC repcodes_t ZSTD_updateRep(U32 const rep[3], U32 const offset, U32 const ll0) { repcodes_t newReps; if (offset >= ZSTD_REP_NUM) { /* full offset */ newReps.rep[2] = rep[1]; newReps.rep[1] = rep[0]; newReps.rep[0] = offset - ZSTD_REP_MOVE; } else { /* repcode */ U32 const repCode = offset + ll0; if (repCode > 0) { /* note : if repCode==0, no change */ U32 const currentOffset = (repCode==ZSTD_REP_NUM) ? (rep[0] - 1) : rep[repCode]; newReps.rep[2] = (repCode >= 2) ? rep[1] : rep[2]; newReps.rep[1] = rep[0]; newReps.rep[0] = currentOffset; } else { /* repCode == 0 */ ZSTD_memcpy(&newReps, rep, sizeof(newReps)); } } return newReps; } /* ZSTD_cParam_withinBounds: * @return 1 if value is within cParam bounds, * 0 otherwise */ MEM_STATIC int ZSTD_cParam_withinBounds(ZSTD_cParameter cParam, int value) { ZSTD_bounds const bounds = ZSTD_cParam_getBounds(cParam); if (ZSTD_isError(bounds.error)) return 0; if (value < bounds.lowerBound) return 0; if (value > bounds.upperBound) return 0; return 1; } /* ZSTD_noCompressBlock() : * Writes uncompressed block to dst buffer from given src. * Returns the size of the block */ MEM_STATIC size_t ZSTD_noCompressBlock (void* dst, size_t dstCapacity, const void* src, size_t srcSize, U32 lastBlock) { U32 const cBlockHeader24 = lastBlock + (((U32)bt_raw)<<1) + (U32)(srcSize << 3); RETURN_ERROR_IF(srcSize + ZSTD_blockHeaderSize > dstCapacity, dstSize_tooSmall, "dst buf too small for uncompressed block"); MEM_writeLE24(dst, cBlockHeader24); ZSTD_memcpy((BYTE*)dst + ZSTD_blockHeaderSize, src, srcSize); return ZSTD_blockHeaderSize + srcSize; } MEM_STATIC size_t ZSTD_rleCompressBlock (void* dst, size_t dstCapacity, BYTE src, size_t srcSize, U32 lastBlock) { BYTE* const op = (BYTE*)dst; U32 const cBlockHeader = lastBlock + (((U32)bt_rle)<<1) + (U32)(srcSize << 3); RETURN_ERROR_IF(dstCapacity < 4, dstSize_tooSmall, ""); MEM_writeLE24(op, cBlockHeader); op[3] = src; return 4; } /* ZSTD_minGain() : * minimum compression required * to generate a compress block or a compressed literals section. * note : use same formula for both situations */ MEM_STATIC size_t ZSTD_minGain(size_t srcSize, ZSTD_strategy strat) { U32 const minlog = (strat>=ZSTD_btultra) ? (U32)(strat) - 1 : 6; ZSTD_STATIC_ASSERT(ZSTD_btultra == 8); assert(ZSTD_cParam_withinBounds(ZSTD_c_strategy, strat)); return (srcSize >> minlog) + 2; } MEM_STATIC int ZSTD_disableLiteralsCompression(const ZSTD_CCtx_params* cctxParams) { switch (cctxParams->literalCompressionMode) { case ZSTD_lcm_huffman: return 0; case ZSTD_lcm_uncompressed: return 1; default: assert(0 /* impossible: pre-validated */); /* fall-through */ case ZSTD_lcm_auto: return (cctxParams->cParams.strategy == ZSTD_fast) && (cctxParams->cParams.targetLength > 0); } } /*! ZSTD_safecopyLiterals() : * memcpy() function that won't read beyond more than WILDCOPY_OVERLENGTH bytes past ilimit_w. * Only called when the sequence ends past ilimit_w, so it only needs to be optimized for single * large copies. */ static void ZSTD_safecopyLiterals(BYTE* op, BYTE const* ip, BYTE const* const iend, BYTE const* ilimit_w) { assert(iend > ilimit_w); if (ip <= ilimit_w) { ZSTD_wildcopy(op, ip, ilimit_w - ip, ZSTD_no_overlap); op += ilimit_w - ip; ip = ilimit_w; } while (ip < iend) *op++ = *ip++; } /*! ZSTD_storeSeq() : * Store a sequence (litlen, litPtr, offCode and mlBase) into seqStore_t. * `offCode` : distance to match + ZSTD_REP_MOVE (values <= ZSTD_REP_MOVE are repCodes). * `mlBase` : matchLength - MINMATCH * Allowed to overread literals up to litLimit. */ HINT_INLINE UNUSED_ATTR void ZSTD_storeSeq(seqStore_t* seqStorePtr, size_t litLength, const BYTE* literals, const BYTE* litLimit, U32 offCode, size_t mlBase) { BYTE const* const litLimit_w = litLimit - WILDCOPY_OVERLENGTH; BYTE const* const litEnd = literals + litLength; #if defined(DEBUGLEVEL) && (DEBUGLEVEL >= 6) static const BYTE* g_start = NULL; if (g_start==NULL) g_start = (const BYTE*)literals; /* note : index only works for compression within a single segment */ { U32 const pos = (U32)((const BYTE*)literals - g_start); DEBUGLOG(6, "Cpos%7u :%3u literals, match%4u bytes at offCode%7u", pos, (U32)litLength, (U32)mlBase+MINMATCH, (U32)offCode); } #endif assert((size_t)(seqStorePtr->sequences - seqStorePtr->sequencesStart) < seqStorePtr->maxNbSeq); /* copy Literals */ assert(seqStorePtr->maxNbLit <= 128 KB); assert(seqStorePtr->lit + litLength <= seqStorePtr->litStart + seqStorePtr->maxNbLit); assert(literals + litLength <= litLimit); if (litEnd <= litLimit_w) { /* Common case we can use wildcopy. * First copy 16 bytes, because literals are likely short. */ assert(WILDCOPY_OVERLENGTH >= 16); ZSTD_copy16(seqStorePtr->lit, literals); if (litLength > 16) { ZSTD_wildcopy(seqStorePtr->lit+16, literals+16, (ptrdiff_t)litLength-16, ZSTD_no_overlap); } } else { ZSTD_safecopyLiterals(seqStorePtr->lit, literals, litEnd, litLimit_w); } seqStorePtr->lit += litLength; /* literal Length */ if (litLength>0xFFFF) { assert(seqStorePtr->longLengthID == 0); /* there can only be a single long length */ seqStorePtr->longLengthID = 1; seqStorePtr->longLengthPos = (U32)(seqStorePtr->sequences - seqStorePtr->sequencesStart); } seqStorePtr->sequences[0].litLength = (U16)litLength; /* match offset */ seqStorePtr->sequences[0].offset = offCode + 1; /* match Length */ if (mlBase>0xFFFF) { assert(seqStorePtr->longLengthID == 0); /* there can only be a single long length */ seqStorePtr->longLengthID = 2; seqStorePtr->longLengthPos = (U32)(seqStorePtr->sequences - seqStorePtr->sequencesStart); } seqStorePtr->sequences[0].matchLength = (U16)mlBase; seqStorePtr->sequences++; } /*-************************************* * Match length counter ***************************************/ static unsigned ZSTD_NbCommonBytes (size_t val) { if (MEM_isLittleEndian()) { if (MEM_64bits()) { # if defined(_MSC_VER) && defined(_WIN64) # if STATIC_BMI2 return _tzcnt_u64(val) >> 3; # else unsigned long r = 0; return _BitScanForward64( &r, (U64)val ) ? (unsigned)(r >> 3) : 0; # endif # elif defined(__GNUC__) && (__GNUC__ >= 4) return (__builtin_ctzll((U64)val) >> 3); # else static const int DeBruijnBytePos[64] = { 0, 0, 0, 0, 0, 1, 1, 2, 0, 3, 1, 3, 1, 4, 2, 7, 0, 2, 3, 6, 1, 5, 3, 5, 1, 3, 4, 4, 2, 5, 6, 7, 7, 0, 1, 2, 3, 3, 4, 6, 2, 6, 5, 5, 3, 4, 5, 6, 7, 1, 2, 4, 6, 4, 4, 5, 7, 2, 6, 5, 7, 6, 7, 7 }; return DeBruijnBytePos[((U64)((val & -(long long)val) * 0x0218A392CDABBD3FULL)) >> 58]; # endif } else { /* 32 bits */ # if defined(_MSC_VER) unsigned long r=0; return _BitScanForward( &r, (U32)val ) ? (unsigned)(r >> 3) : 0; # elif defined(__GNUC__) && (__GNUC__ >= 3) return (__builtin_ctz((U32)val) >> 3); # else static const int DeBruijnBytePos[32] = { 0, 0, 3, 0, 3, 1, 3, 0, 3, 2, 2, 1, 3, 2, 0, 1, 3, 3, 1, 2, 2, 2, 2, 0, 3, 1, 2, 0, 1, 0, 1, 1 }; return DeBruijnBytePos[((U32)((val & -(S32)val) * 0x077CB531U)) >> 27]; # endif } } else { /* Big Endian CPU */ if (MEM_64bits()) { # if defined(_MSC_VER) && defined(_WIN64) # if STATIC_BMI2 return _lzcnt_u64(val) >> 3; # else unsigned long r = 0; return _BitScanReverse64(&r, (U64)val) ? (unsigned)(r >> 3) : 0; # endif # elif defined(__GNUC__) && (__GNUC__ >= 4) return (__builtin_clzll(val) >> 3); # else unsigned r; const unsigned n32 = sizeof(size_t)*4; /* calculate this way due to compiler complaining in 32-bits mode */ if (!(val>>n32)) { r=4; } else { r=0; val>>=n32; } if (!(val>>16)) { r+=2; val>>=8; } else { val>>=24; } r += (!val); return r; # endif } else { /* 32 bits */ # if defined(_MSC_VER) unsigned long r = 0; return _BitScanReverse( &r, (unsigned long)val ) ? (unsigned)(r >> 3) : 0; # elif defined(__GNUC__) && (__GNUC__ >= 3) return (__builtin_clz((U32)val) >> 3); # else unsigned r; if (!(val>>16)) { r=2; val>>=8; } else { r=0; val>>=24; } r += (!val); return r; # endif } } } MEM_STATIC size_t ZSTD_count(const BYTE* pIn, const BYTE* pMatch, const BYTE* const pInLimit) { const BYTE* const pStart = pIn; const BYTE* const pInLoopLimit = pInLimit - (sizeof(size_t)-1); if (pIn < pInLoopLimit) { { size_t const diff = MEM_readST(pMatch) ^ MEM_readST(pIn); if (diff) return ZSTD_NbCommonBytes(diff); } pIn+=sizeof(size_t); pMatch+=sizeof(size_t); while (pIn < pInLoopLimit) { size_t const diff = MEM_readST(pMatch) ^ MEM_readST(pIn); if (!diff) { pIn+=sizeof(size_t); pMatch+=sizeof(size_t); continue; } pIn += ZSTD_NbCommonBytes(diff); return (size_t)(pIn - pStart); } } if (MEM_64bits() && (pIn<(pInLimit-3)) && (MEM_read32(pMatch) == MEM_read32(pIn))) { pIn+=4; pMatch+=4; } if ((pIn<(pInLimit-1)) && (MEM_read16(pMatch) == MEM_read16(pIn))) { pIn+=2; pMatch+=2; } if ((pIn> (32-h) ; } MEM_STATIC size_t ZSTD_hash3Ptr(const void* ptr, U32 h) { return ZSTD_hash3(MEM_readLE32(ptr), h); } /* only in zstd_opt.h */ static const U32 prime4bytes = 2654435761U; static U32 ZSTD_hash4(U32 u, U32 h) { return (u * prime4bytes) >> (32-h) ; } static size_t ZSTD_hash4Ptr(const void* ptr, U32 h) { return ZSTD_hash4(MEM_read32(ptr), h); } static const U64 prime5bytes = 889523592379ULL; static size_t ZSTD_hash5(U64 u, U32 h) { return (size_t)(((u << (64-40)) * prime5bytes) >> (64-h)) ; } static size_t ZSTD_hash5Ptr(const void* p, U32 h) { return ZSTD_hash5(MEM_readLE64(p), h); } static const U64 prime6bytes = 227718039650203ULL; static size_t ZSTD_hash6(U64 u, U32 h) { return (size_t)(((u << (64-48)) * prime6bytes) >> (64-h)) ; } static size_t ZSTD_hash6Ptr(const void* p, U32 h) { return ZSTD_hash6(MEM_readLE64(p), h); } static const U64 prime7bytes = 58295818150454627ULL; static size_t ZSTD_hash7(U64 u, U32 h) { return (size_t)(((u << (64-56)) * prime7bytes) >> (64-h)) ; } static size_t ZSTD_hash7Ptr(const void* p, U32 h) { return ZSTD_hash7(MEM_readLE64(p), h); } static const U64 prime8bytes = 0xCF1BBCDCB7A56463ULL; static size_t ZSTD_hash8(U64 u, U32 h) { return (size_t)(((u) * prime8bytes) >> (64-h)) ; } static size_t ZSTD_hash8Ptr(const void* p, U32 h) { return ZSTD_hash8(MEM_readLE64(p), h); } MEM_STATIC FORCE_INLINE_ATTR size_t ZSTD_hashPtr(const void* p, U32 hBits, U32 mls) { switch(mls) { default: case 4: return ZSTD_hash4Ptr(p, hBits); case 5: return ZSTD_hash5Ptr(p, hBits); case 6: return ZSTD_hash6Ptr(p, hBits); case 7: return ZSTD_hash7Ptr(p, hBits); case 8: return ZSTD_hash8Ptr(p, hBits); } } /** ZSTD_ipow() : * Return base^exponent. */ static U64 ZSTD_ipow(U64 base, U64 exponent) { U64 power = 1; while (exponent) { if (exponent & 1) power *= base; exponent >>= 1; base *= base; } return power; } #define ZSTD_ROLL_HASH_CHAR_OFFSET 10 /** ZSTD_rollingHash_append() : * Add the buffer to the hash value. */ static U64 ZSTD_rollingHash_append(U64 hash, void const* buf, size_t size) { BYTE const* istart = (BYTE const*)buf; size_t pos; for (pos = 0; pos < size; ++pos) { hash *= prime8bytes; hash += istart[pos] + ZSTD_ROLL_HASH_CHAR_OFFSET; } return hash; } /** ZSTD_rollingHash_compute() : * Compute the rolling hash value of the buffer. */ MEM_STATIC U64 ZSTD_rollingHash_compute(void const* buf, size_t size) { return ZSTD_rollingHash_append(0, buf, size); } /** ZSTD_rollingHash_primePower() : * Compute the primePower to be passed to ZSTD_rollingHash_rotate() for a hash * over a window of length bytes. */ MEM_STATIC U64 ZSTD_rollingHash_primePower(U32 length) { return ZSTD_ipow(prime8bytes, length - 1); } /** ZSTD_rollingHash_rotate() : * Rotate the rolling hash by one byte. */ MEM_STATIC U64 ZSTD_rollingHash_rotate(U64 hash, BYTE toRemove, BYTE toAdd, U64 primePower) { hash -= (toRemove + ZSTD_ROLL_HASH_CHAR_OFFSET) * primePower; hash *= prime8bytes; hash += toAdd + ZSTD_ROLL_HASH_CHAR_OFFSET; return hash; } /*-************************************* * Round buffer management ***************************************/ #if (ZSTD_WINDOWLOG_MAX_64 > 31) # error "ZSTD_WINDOWLOG_MAX is too large : would overflow ZSTD_CURRENT_MAX" #endif /* Max current allowed */ #define ZSTD_CURRENT_MAX ((3U << 29) + (1U << ZSTD_WINDOWLOG_MAX)) /* Maximum chunk size before overflow correction needs to be called again */ #define ZSTD_CHUNKSIZE_MAX \ ( ((U32)-1) /* Maximum ending current index */ \ - ZSTD_CURRENT_MAX) /* Maximum beginning lowLimit */ /** * ZSTD_window_clear(): * Clears the window containing the history by simply setting it to empty. */ MEM_STATIC void ZSTD_window_clear(ZSTD_window_t* window) { size_t const endT = (size_t)(window->nextSrc - window->base); U32 const end = (U32)endT; window->lowLimit = end; window->dictLimit = end; } /** * ZSTD_window_hasExtDict(): * Returns non-zero if the window has a non-empty extDict. */ MEM_STATIC U32 ZSTD_window_hasExtDict(ZSTD_window_t const window) { return window.lowLimit < window.dictLimit; } /** * ZSTD_matchState_dictMode(): * Inspects the provided matchState and figures out what dictMode should be * passed to the compressor. */ MEM_STATIC ZSTD_dictMode_e ZSTD_matchState_dictMode(const ZSTD_matchState_t *ms) { return ZSTD_window_hasExtDict(ms->window) ? ZSTD_extDict : ms->dictMatchState != NULL ? (ms->dictMatchState->dedicatedDictSearch ? ZSTD_dedicatedDictSearch : ZSTD_dictMatchState) : ZSTD_noDict; } /** * ZSTD_window_needOverflowCorrection(): * Returns non-zero if the indices are getting too large and need overflow * protection. */ MEM_STATIC U32 ZSTD_window_needOverflowCorrection(ZSTD_window_t const window, void const* srcEnd) { U32 const curr = (U32)((BYTE const*)srcEnd - window.base); return curr > ZSTD_CURRENT_MAX; } /** * ZSTD_window_correctOverflow(): * Reduces the indices to protect from index overflow. * Returns the correction made to the indices, which must be applied to every * stored index. * * The least significant cycleLog bits of the indices must remain the same, * which may be 0. Every index up to maxDist in the past must be valid. * NOTE: (maxDist & cycleMask) must be zero. */ MEM_STATIC U32 ZSTD_window_correctOverflow(ZSTD_window_t* window, U32 cycleLog, U32 maxDist, void const* src) { /* preemptive overflow correction: * 1. correction is large enough: * lowLimit > (3<<29) ==> current > 3<<29 + 1< (3<<29 + 1< (3<<29) - (1< (3<<29) - (1<<30) (NOTE: chainLog <= 30) * > 1<<29 * * 2. (ip+ZSTD_CHUNKSIZE_MAX - cctx->base) doesn't overflow: * After correction, current is less than (1<base < 1<<32. * 3. (cctx->lowLimit + 1< 3<<29 + 1<base); U32 const currentCycle0 = curr & cycleMask; /* Exclude zero so that newCurrent - maxDist >= 1. */ U32 const currentCycle1 = currentCycle0 == 0 ? (1U << cycleLog) : currentCycle0; U32 const newCurrent = currentCycle1 + maxDist; U32 const correction = curr - newCurrent; assert((maxDist & cycleMask) == 0); assert(curr > newCurrent); /* Loose bound, should be around 1<<29 (see above) */ assert(correction > 1<<28); window->base += correction; window->dictBase += correction; if (window->lowLimit <= correction) window->lowLimit = 1; else window->lowLimit -= correction; if (window->dictLimit <= correction) window->dictLimit = 1; else window->dictLimit -= correction; /* Ensure we can still reference the full window. */ assert(newCurrent >= maxDist); assert(newCurrent - maxDist >= 1); /* Ensure that lowLimit and dictLimit didn't underflow. */ assert(window->lowLimit <= newCurrent); assert(window->dictLimit <= newCurrent); DEBUGLOG(4, "Correction of 0x%x bytes to lowLimit=0x%x", correction, window->lowLimit); return correction; } /** * ZSTD_window_enforceMaxDist(): * Updates lowLimit so that: * (srcEnd - base) - lowLimit == maxDist + loadedDictEnd * * It ensures index is valid as long as index >= lowLimit. * This must be called before a block compression call. * * loadedDictEnd is only defined if a dictionary is in use for current compression. * As the name implies, loadedDictEnd represents the index at end of dictionary. * The value lies within context's referential, it can be directly compared to blockEndIdx. * * If loadedDictEndPtr is NULL, no dictionary is in use, and we use loadedDictEnd == 0. * If loadedDictEndPtr is not NULL, we set it to zero after updating lowLimit. * This is because dictionaries are allowed to be referenced fully * as long as the last byte of the dictionary is in the window. * Once input has progressed beyond window size, dictionary cannot be referenced anymore. * * In normal dict mode, the dictionary lies between lowLimit and dictLimit. * In dictMatchState mode, lowLimit and dictLimit are the same, * and the dictionary is below them. * forceWindow and dictMatchState are therefore incompatible. */ MEM_STATIC void ZSTD_window_enforceMaxDist(ZSTD_window_t* window, const void* blockEnd, U32 maxDist, U32* loadedDictEndPtr, const ZSTD_matchState_t** dictMatchStatePtr) { U32 const blockEndIdx = (U32)((BYTE const*)blockEnd - window->base); U32 const loadedDictEnd = (loadedDictEndPtr != NULL) ? *loadedDictEndPtr : 0; DEBUGLOG(5, "ZSTD_window_enforceMaxDist: blockEndIdx=%u, maxDist=%u, loadedDictEnd=%u", (unsigned)blockEndIdx, (unsigned)maxDist, (unsigned)loadedDictEnd); /* - When there is no dictionary : loadedDictEnd == 0. In which case, the test (blockEndIdx > maxDist) is merely to avoid overflowing next operation `newLowLimit = blockEndIdx - maxDist`. - When there is a standard dictionary : Index referential is copied from the dictionary, which means it starts from 0. In which case, loadedDictEnd == dictSize, and it makes sense to compare `blockEndIdx > maxDist + dictSize` since `blockEndIdx` also starts from zero. - When there is an attached dictionary : loadedDictEnd is expressed within the referential of the context, so it can be directly compared against blockEndIdx. */ if (blockEndIdx > maxDist + loadedDictEnd) { U32 const newLowLimit = blockEndIdx - maxDist; if (window->lowLimit < newLowLimit) window->lowLimit = newLowLimit; if (window->dictLimit < window->lowLimit) { DEBUGLOG(5, "Update dictLimit to match lowLimit, from %u to %u", (unsigned)window->dictLimit, (unsigned)window->lowLimit); window->dictLimit = window->lowLimit; } /* On reaching window size, dictionaries are invalidated */ if (loadedDictEndPtr) *loadedDictEndPtr = 0; if (dictMatchStatePtr) *dictMatchStatePtr = NULL; } } /* Similar to ZSTD_window_enforceMaxDist(), * but only invalidates dictionary * when input progresses beyond window size. * assumption : loadedDictEndPtr and dictMatchStatePtr are valid (non NULL) * loadedDictEnd uses same referential as window->base * maxDist is the window size */ MEM_STATIC void ZSTD_checkDictValidity(const ZSTD_window_t* window, const void* blockEnd, U32 maxDist, U32* loadedDictEndPtr, const ZSTD_matchState_t** dictMatchStatePtr) { assert(loadedDictEndPtr != NULL); assert(dictMatchStatePtr != NULL); { U32 const blockEndIdx = (U32)((BYTE const*)blockEnd - window->base); U32 const loadedDictEnd = *loadedDictEndPtr; DEBUGLOG(5, "ZSTD_checkDictValidity: blockEndIdx=%u, maxDist=%u, loadedDictEnd=%u", (unsigned)blockEndIdx, (unsigned)maxDist, (unsigned)loadedDictEnd); assert(blockEndIdx >= loadedDictEnd); if (blockEndIdx > loadedDictEnd + maxDist) { /* On reaching window size, dictionaries are invalidated. * For simplification, if window size is reached anywhere within next block, * the dictionary is invalidated for the full block. */ DEBUGLOG(6, "invalidating dictionary for current block (distance > windowSize)"); *loadedDictEndPtr = 0; *dictMatchStatePtr = NULL; } else { if (*loadedDictEndPtr != 0) { DEBUGLOG(6, "dictionary considered valid for current block"); } } } } MEM_STATIC void ZSTD_window_init(ZSTD_window_t* window) { ZSTD_memset(window, 0, sizeof(*window)); window->base = (BYTE const*)""; window->dictBase = (BYTE const*)""; window->dictLimit = 1; /* start from 1, so that 1st position is valid */ window->lowLimit = 1; /* it ensures first and later CCtx usages compress the same */ window->nextSrc = window->base + 1; /* see issue #1241 */ } /** * ZSTD_window_update(): * Updates the window by appending [src, src + srcSize) to the window. * If it is not contiguous, the current prefix becomes the extDict, and we * forget about the extDict. Handles overlap of the prefix and extDict. * Returns non-zero if the segment is contiguous. */ MEM_STATIC U32 ZSTD_window_update(ZSTD_window_t* window, void const* src, size_t srcSize) { BYTE const* const ip = (BYTE const*)src; U32 contiguous = 1; DEBUGLOG(5, "ZSTD_window_update"); if (srcSize == 0) return contiguous; assert(window->base != NULL); assert(window->dictBase != NULL); /* Check if blocks follow each other */ if (src != window->nextSrc) { /* not contiguous */ size_t const distanceFromBase = (size_t)(window->nextSrc - window->base); DEBUGLOG(5, "Non contiguous blocks, new segment starts at %u", window->dictLimit); window->lowLimit = window->dictLimit; assert(distanceFromBase == (size_t)(U32)distanceFromBase); /* should never overflow */ window->dictLimit = (U32)distanceFromBase; window->dictBase = window->base; window->base = ip - distanceFromBase; /* ms->nextToUpdate = window->dictLimit; */ if (window->dictLimit - window->lowLimit < HASH_READ_SIZE) window->lowLimit = window->dictLimit; /* too small extDict */ contiguous = 0; } window->nextSrc = ip + srcSize; /* if input and dictionary overlap : reduce dictionary (area presumed modified by input) */ if ( (ip+srcSize > window->dictBase + window->lowLimit) & (ip < window->dictBase + window->dictLimit)) { ptrdiff_t const highInputIdx = (ip + srcSize) - window->dictBase; U32 const lowLimitMax = (highInputIdx > (ptrdiff_t)window->dictLimit) ? window->dictLimit : (U32)highInputIdx; window->lowLimit = lowLimitMax; DEBUGLOG(5, "Overlapping extDict and input : new lowLimit = %u", window->lowLimit); } return contiguous; } /** * Returns the lowest allowed match index. It may either be in the ext-dict or the prefix. */ MEM_STATIC U32 ZSTD_getLowestMatchIndex(const ZSTD_matchState_t* ms, U32 curr, unsigned windowLog) { U32 const maxDistance = 1U << windowLog; U32 const lowestValid = ms->window.lowLimit; U32 const withinWindow = (curr - lowestValid > maxDistance) ? curr - maxDistance : lowestValid; U32 const isDictionary = (ms->loadedDictEnd != 0); /* When using a dictionary the entire dictionary is valid if a single byte of the dictionary * is within the window. We invalidate the dictionary (and set loadedDictEnd to 0) when it isn't * valid for the entire block. So this check is sufficient to find the lowest valid match index. */ U32 const matchLowest = isDictionary ? lowestValid : withinWindow; return matchLowest; } /** * Returns the lowest allowed match index in the prefix. */ MEM_STATIC U32 ZSTD_getLowestPrefixIndex(const ZSTD_matchState_t* ms, U32 curr, unsigned windowLog) { U32 const maxDistance = 1U << windowLog; U32 const lowestValid = ms->window.dictLimit; U32 const withinWindow = (curr - lowestValid > maxDistance) ? curr - maxDistance : lowestValid; U32 const isDictionary = (ms->loadedDictEnd != 0); /* When computing the lowest prefix index we need to take the dictionary into account to handle * the edge case where the dictionary and the source are contiguous in memory. */ U32 const matchLowest = isDictionary ? lowestValid : withinWindow; return matchLowest; } /* debug functions */ #if (DEBUGLEVEL>=2) MEM_STATIC double ZSTD_fWeight(U32 rawStat) { U32 const fp_accuracy = 8; U32 const fp_multiplier = (1 << fp_accuracy); U32 const newStat = rawStat + 1; U32 const hb = ZSTD_highbit32(newStat); U32 const BWeight = hb * fp_multiplier; U32 const FWeight = (newStat << fp_accuracy) >> hb; U32 const weight = BWeight + FWeight; assert(hb + fp_accuracy < 31); return (double)weight / fp_multiplier; } /* display a table content, * listing each element, its frequency, and its predicted bit cost */ MEM_STATIC void ZSTD_debugTable(const U32* table, U32 max) { unsigned u, sum; for (u=0, sum=0; u<=max; u++) sum += table[u]; DEBUGLOG(2, "total nb elts: %u", sum); for (u=0; u<=max; u++) { DEBUGLOG(2, "%2u: %5u (%.2f)", u, table[u], ZSTD_fWeight(sum) - ZSTD_fWeight(table[u]) ); } } #endif #if defined (__cplusplus) } #endif /* =============================================================== * Shared internal declarations * These prototypes may be called from sources not in lib/compress * =============================================================== */ /* ZSTD_loadCEntropy() : * dict : must point at beginning of a valid zstd dictionary. * return : size of dictionary header (size of magic number + dict ID + entropy tables) * assumptions : magic number supposed already checked * and dictSize >= 8 */ size_t ZSTD_loadCEntropy(ZSTD_compressedBlockState_t* bs, void* workspace, const void* const dict, size_t dictSize); void ZSTD_reset_compressedBlockState(ZSTD_compressedBlockState_t* bs); /* ============================================================== * Private declarations * These prototypes shall only be called from within lib/compress * ============================================================== */ /* ZSTD_getCParamsFromCCtxParams() : * cParams are built depending on compressionLevel, src size hints, * LDM and manually set compression parameters. * Note: srcSizeHint == 0 means 0! */ ZSTD_compressionParameters ZSTD_getCParamsFromCCtxParams( const ZSTD_CCtx_params* CCtxParams, U64 srcSizeHint, size_t dictSize, ZSTD_cParamMode_e mode); /*! ZSTD_initCStream_internal() : * Private use only. Init streaming operation. * expects params to be valid. * must receive dict, or cdict, or none, but not both. * @return : 0, or an error code */ size_t ZSTD_initCStream_internal(ZSTD_CStream* zcs, const void* dict, size_t dictSize, const ZSTD_CDict* cdict, const ZSTD_CCtx_params* params, unsigned long long pledgedSrcSize); void ZSTD_resetSeqStore(seqStore_t* ssPtr); /*! ZSTD_getCParamsFromCDict() : * as the name implies */ ZSTD_compressionParameters ZSTD_getCParamsFromCDict(const ZSTD_CDict* cdict); /* ZSTD_compressBegin_advanced_internal() : * Private use only. To be called from zstdmt_compress.c. */ size_t ZSTD_compressBegin_advanced_internal(ZSTD_CCtx* cctx, const void* dict, size_t dictSize, ZSTD_dictContentType_e dictContentType, ZSTD_dictTableLoadMethod_e dtlm, const ZSTD_CDict* cdict, const ZSTD_CCtx_params* params, unsigned long long pledgedSrcSize); /* ZSTD_compress_advanced_internal() : * Private use only. To be called from zstdmt_compress.c. */ size_t ZSTD_compress_advanced_internal(ZSTD_CCtx* cctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize, const void* dict,size_t dictSize, const ZSTD_CCtx_params* params); /* ZSTD_writeLastEmptyBlock() : * output an empty Block with end-of-frame mark to complete a frame * @return : size of data written into `dst` (== ZSTD_blockHeaderSize (defined in zstd_internal.h)) * or an error code if `dstCapacity` is too small ( 1 */ U32 ZSTD_cycleLog(U32 hashLog, ZSTD_strategy strat); /** ZSTD_CCtx_trace() : * Trace the end of a compression call. */ void ZSTD_CCtx_trace(ZSTD_CCtx* cctx, size_t extraCSize); #endif /* ZSTD_COMPRESS_H */ /**** ended inlining zstd_compress_internal.h ****/ size_t ZSTD_noCompressLiterals (void* dst, size_t dstCapacity, const void* src, size_t srcSize); size_t ZSTD_compressRleLiteralsBlock (void* dst, size_t dstCapacity, const void* src, size_t srcSize); size_t ZSTD_compressLiterals (ZSTD_hufCTables_t const* prevHuf, ZSTD_hufCTables_t* nextHuf, ZSTD_strategy strategy, int disableLiteralCompression, void* dst, size_t dstCapacity, const void* src, size_t srcSize, void* entropyWorkspace, size_t entropyWorkspaceSize, const int bmi2); #endif /* ZSTD_COMPRESS_LITERALS_H */ /**** ended inlining zstd_compress_literals.h ****/ size_t ZSTD_noCompressLiterals (void* dst, size_t dstCapacity, const void* src, size_t srcSize) { BYTE* const ostart = (BYTE*)dst; U32 const flSize = 1 + (srcSize>31) + (srcSize>4095); RETURN_ERROR_IF(srcSize + flSize > dstCapacity, dstSize_tooSmall, ""); switch(flSize) { case 1: /* 2 - 1 - 5 */ ostart[0] = (BYTE)((U32)set_basic + (srcSize<<3)); break; case 2: /* 2 - 2 - 12 */ MEM_writeLE16(ostart, (U16)((U32)set_basic + (1<<2) + (srcSize<<4))); break; case 3: /* 2 - 2 - 20 */ MEM_writeLE32(ostart, (U32)((U32)set_basic + (3<<2) + (srcSize<<4))); break; default: /* not necessary : flSize is {1,2,3} */ assert(0); } ZSTD_memcpy(ostart + flSize, src, srcSize); DEBUGLOG(5, "Raw literals: %u -> %u", (U32)srcSize, (U32)(srcSize + flSize)); return srcSize + flSize; } size_t ZSTD_compressRleLiteralsBlock (void* dst, size_t dstCapacity, const void* src, size_t srcSize) { BYTE* const ostart = (BYTE*)dst; U32 const flSize = 1 + (srcSize>31) + (srcSize>4095); (void)dstCapacity; /* dstCapacity already guaranteed to be >=4, hence large enough */ switch(flSize) { case 1: /* 2 - 1 - 5 */ ostart[0] = (BYTE)((U32)set_rle + (srcSize<<3)); break; case 2: /* 2 - 2 - 12 */ MEM_writeLE16(ostart, (U16)((U32)set_rle + (1<<2) + (srcSize<<4))); break; case 3: /* 2 - 2 - 20 */ MEM_writeLE32(ostart, (U32)((U32)set_rle + (3<<2) + (srcSize<<4))); break; default: /* not necessary : flSize is {1,2,3} */ assert(0); } ostart[flSize] = *(const BYTE*)src; DEBUGLOG(5, "RLE literals: %u -> %u", (U32)srcSize, (U32)flSize + 1); return flSize+1; } size_t ZSTD_compressLiterals (ZSTD_hufCTables_t const* prevHuf, ZSTD_hufCTables_t* nextHuf, ZSTD_strategy strategy, int disableLiteralCompression, void* dst, size_t dstCapacity, const void* src, size_t srcSize, void* entropyWorkspace, size_t entropyWorkspaceSize, const int bmi2) { size_t const minGain = ZSTD_minGain(srcSize, strategy); size_t const lhSize = 3 + (srcSize >= 1 KB) + (srcSize >= 16 KB); BYTE* const ostart = (BYTE*)dst; U32 singleStream = srcSize < 256; symbolEncodingType_e hType = set_compressed; size_t cLitSize; DEBUGLOG(5,"ZSTD_compressLiterals (disableLiteralCompression=%i srcSize=%u)", disableLiteralCompression, (U32)srcSize); /* Prepare nextEntropy assuming reusing the existing table */ ZSTD_memcpy(nextHuf, prevHuf, sizeof(*prevHuf)); if (disableLiteralCompression) return ZSTD_noCompressLiterals(dst, dstCapacity, src, srcSize); /* small ? don't even attempt compression (speed opt) */ # define COMPRESS_LITERALS_SIZE_MIN 63 { size_t const minLitSize = (prevHuf->repeatMode == HUF_repeat_valid) ? 6 : COMPRESS_LITERALS_SIZE_MIN; if (srcSize <= minLitSize) return ZSTD_noCompressLiterals(dst, dstCapacity, src, srcSize); } RETURN_ERROR_IF(dstCapacity < lhSize+1, dstSize_tooSmall, "not enough space for compression"); { HUF_repeat repeat = prevHuf->repeatMode; int const preferRepeat = strategy < ZSTD_lazy ? srcSize <= 1024 : 0; if (repeat == HUF_repeat_valid && lhSize == 3) singleStream = 1; cLitSize = singleStream ? HUF_compress1X_repeat( ostart+lhSize, dstCapacity-lhSize, src, srcSize, HUF_SYMBOLVALUE_MAX, HUF_TABLELOG_DEFAULT, entropyWorkspace, entropyWorkspaceSize, (HUF_CElt*)nextHuf->CTable, &repeat, preferRepeat, bmi2) : HUF_compress4X_repeat( ostart+lhSize, dstCapacity-lhSize, src, srcSize, HUF_SYMBOLVALUE_MAX, HUF_TABLELOG_DEFAULT, entropyWorkspace, entropyWorkspaceSize, (HUF_CElt*)nextHuf->CTable, &repeat, preferRepeat, bmi2); if (repeat != HUF_repeat_none) { /* reused the existing table */ DEBUGLOG(5, "Reusing previous huffman table"); hType = set_repeat; } } if ((cLitSize==0) | (cLitSize >= srcSize - minGain) | ERR_isError(cLitSize)) { ZSTD_memcpy(nextHuf, prevHuf, sizeof(*prevHuf)); return ZSTD_noCompressLiterals(dst, dstCapacity, src, srcSize); } if (cLitSize==1) { ZSTD_memcpy(nextHuf, prevHuf, sizeof(*prevHuf)); return ZSTD_compressRleLiteralsBlock(dst, dstCapacity, src, srcSize); } if (hType == set_compressed) { /* using a newly constructed table */ nextHuf->repeatMode = HUF_repeat_check; } /* Build header */ switch(lhSize) { case 3: /* 2 - 2 - 10 - 10 */ { U32 const lhc = hType + ((!singleStream) << 2) + ((U32)srcSize<<4) + ((U32)cLitSize<<14); MEM_writeLE24(ostart, lhc); break; } case 4: /* 2 - 2 - 14 - 14 */ { U32 const lhc = hType + (2 << 2) + ((U32)srcSize<<4) + ((U32)cLitSize<<18); MEM_writeLE32(ostart, lhc); break; } case 5: /* 2 - 2 - 18 - 18 */ { U32 const lhc = hType + (3 << 2) + ((U32)srcSize<<4) + ((U32)cLitSize<<22); MEM_writeLE32(ostart, lhc); ostart[4] = (BYTE)(cLitSize >> 10); break; } default: /* not possible : lhSize is {3,4,5} */ assert(0); } DEBUGLOG(5, "Compressed literals: %u -> %u", (U32)srcSize, (U32)(lhSize+cLitSize)); return lhSize+cLitSize; } /**** ended inlining compress/zstd_compress_literals.c ****/ /**** start inlining compress/zstd_compress_sequences.c ****/ /* * Copyright (c) 2016-2021, Yann Collet, Facebook, Inc. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the * LICENSE file in the root directory of this source tree) and the GPLv2 (found * in the COPYING file in the root directory of this source tree). * You may select, at your option, one of the above-listed licenses. */ /*-************************************* * Dependencies ***************************************/ /**** start inlining zstd_compress_sequences.h ****/ /* * Copyright (c) 2016-2021, Yann Collet, Facebook, Inc. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the * LICENSE file in the root directory of this source tree) and the GPLv2 (found * in the COPYING file in the root directory of this source tree). * You may select, at your option, one of the above-listed licenses. */ #ifndef ZSTD_COMPRESS_SEQUENCES_H #define ZSTD_COMPRESS_SEQUENCES_H /**** skipping file: ../common/fse.h ****/ /**** skipping file: ../common/zstd_internal.h ****/ typedef enum { ZSTD_defaultDisallowed = 0, ZSTD_defaultAllowed = 1 } ZSTD_defaultPolicy_e; symbolEncodingType_e ZSTD_selectEncodingType( FSE_repeat* repeatMode, unsigned const* count, unsigned const max, size_t const mostFrequent, size_t nbSeq, unsigned const FSELog, FSE_CTable const* prevCTable, short const* defaultNorm, U32 defaultNormLog, ZSTD_defaultPolicy_e const isDefaultAllowed, ZSTD_strategy const strategy); size_t ZSTD_buildCTable(void* dst, size_t dstCapacity, FSE_CTable* nextCTable, U32 FSELog, symbolEncodingType_e type, unsigned* count, U32 max, const BYTE* codeTable, size_t nbSeq, const S16* defaultNorm, U32 defaultNormLog, U32 defaultMax, const FSE_CTable* prevCTable, size_t prevCTableSize, void* entropyWorkspace, size_t entropyWorkspaceSize); size_t ZSTD_encodeSequences( void* dst, size_t dstCapacity, FSE_CTable const* CTable_MatchLength, BYTE const* mlCodeTable, FSE_CTable const* CTable_OffsetBits, BYTE const* ofCodeTable, FSE_CTable const* CTable_LitLength, BYTE const* llCodeTable, seqDef const* sequences, size_t nbSeq, int longOffsets, int bmi2); size_t ZSTD_fseBitCost( FSE_CTable const* ctable, unsigned const* count, unsigned const max); size_t ZSTD_crossEntropyCost(short const* norm, unsigned accuracyLog, unsigned const* count, unsigned const max); #endif /* ZSTD_COMPRESS_SEQUENCES_H */ /**** ended inlining zstd_compress_sequences.h ****/ /** * -log2(x / 256) lookup table for x in [0, 256). * If x == 0: Return 0 * Else: Return floor(-log2(x / 256) * 256) */ static unsigned const kInverseProbabilityLog256[256] = { 0, 2048, 1792, 1642, 1536, 1453, 1386, 1329, 1280, 1236, 1197, 1162, 1130, 1100, 1073, 1047, 1024, 1001, 980, 960, 941, 923, 906, 889, 874, 859, 844, 830, 817, 804, 791, 779, 768, 756, 745, 734, 724, 714, 704, 694, 685, 676, 667, 658, 650, 642, 633, 626, 618, 610, 603, 595, 588, 581, 574, 567, 561, 554, 548, 542, 535, 529, 523, 517, 512, 506, 500, 495, 489, 484, 478, 473, 468, 463, 458, 453, 448, 443, 438, 434, 429, 424, 420, 415, 411, 407, 402, 398, 394, 390, 386, 382, 377, 373, 370, 366, 362, 358, 354, 350, 347, 343, 339, 336, 332, 329, 325, 322, 318, 315, 311, 308, 305, 302, 298, 295, 292, 289, 286, 282, 279, 276, 273, 270, 267, 264, 261, 258, 256, 253, 250, 247, 244, 241, 239, 236, 233, 230, 228, 225, 222, 220, 217, 215, 212, 209, 207, 204, 202, 199, 197, 194, 192, 190, 187, 185, 182, 180, 178, 175, 173, 171, 168, 166, 164, 162, 159, 157, 155, 153, 151, 149, 146, 144, 142, 140, 138, 136, 134, 132, 130, 128, 126, 123, 121, 119, 117, 115, 114, 112, 110, 108, 106, 104, 102, 100, 98, 96, 94, 93, 91, 89, 87, 85, 83, 82, 80, 78, 76, 74, 73, 71, 69, 67, 66, 64, 62, 61, 59, 57, 55, 54, 52, 50, 49, 47, 46, 44, 42, 41, 39, 37, 36, 34, 33, 31, 30, 28, 26, 25, 23, 22, 20, 19, 17, 16, 14, 13, 11, 10, 8, 7, 5, 4, 2, 1, }; static unsigned ZSTD_getFSEMaxSymbolValue(FSE_CTable const* ctable) { void const* ptr = ctable; U16 const* u16ptr = (U16 const*)ptr; U32 const maxSymbolValue = MEM_read16(u16ptr + 1); return maxSymbolValue; } /** * Returns true if we should use ncount=-1 else we should * use ncount=1 for low probability symbols instead. */ static unsigned ZSTD_useLowProbCount(size_t const nbSeq) { /* Heuristic: This should cover most blocks <= 16K and * start to fade out after 16K to about 32K depending on * comprssibility. */ return nbSeq >= 2048; } /** * Returns the cost in bytes of encoding the normalized count header. * Returns an error if any of the helper functions return an error. */ static size_t ZSTD_NCountCost(unsigned const* count, unsigned const max, size_t const nbSeq, unsigned const FSELog) { BYTE wksp[FSE_NCOUNTBOUND]; S16 norm[MaxSeq + 1]; const U32 tableLog = FSE_optimalTableLog(FSELog, nbSeq, max); FORWARD_IF_ERROR(FSE_normalizeCount(norm, tableLog, count, nbSeq, max, ZSTD_useLowProbCount(nbSeq)), ""); return FSE_writeNCount(wksp, sizeof(wksp), norm, max, tableLog); } /** * Returns the cost in bits of encoding the distribution described by count * using the entropy bound. */ static size_t ZSTD_entropyCost(unsigned const* count, unsigned const max, size_t const total) { unsigned cost = 0; unsigned s; for (s = 0; s <= max; ++s) { unsigned norm = (unsigned)((256 * count[s]) / total); if (count[s] != 0 && norm == 0) norm = 1; assert(count[s] < total); cost += count[s] * kInverseProbabilityLog256[norm]; } return cost >> 8; } /** * Returns the cost in bits of encoding the distribution in count using ctable. * Returns an error if ctable cannot represent all the symbols in count. */ size_t ZSTD_fseBitCost( FSE_CTable const* ctable, unsigned const* count, unsigned const max) { unsigned const kAccuracyLog = 8; size_t cost = 0; unsigned s; FSE_CState_t cstate; FSE_initCState(&cstate, ctable); if (ZSTD_getFSEMaxSymbolValue(ctable) < max) { DEBUGLOG(5, "Repeat FSE_CTable has maxSymbolValue %u < %u", ZSTD_getFSEMaxSymbolValue(ctable), max); return ERROR(GENERIC); } for (s = 0; s <= max; ++s) { unsigned const tableLog = cstate.stateLog; unsigned const badCost = (tableLog + 1) << kAccuracyLog; unsigned const bitCost = FSE_bitCost(cstate.symbolTT, tableLog, s, kAccuracyLog); if (count[s] == 0) continue; if (bitCost >= badCost) { DEBUGLOG(5, "Repeat FSE_CTable has Prob[%u] == 0", s); return ERROR(GENERIC); } cost += (size_t)count[s] * bitCost; } return cost >> kAccuracyLog; } /** * Returns the cost in bits of encoding the distribution in count using the * table described by norm. The max symbol support by norm is assumed >= max. * norm must be valid for every symbol with non-zero probability in count. */ size_t ZSTD_crossEntropyCost(short const* norm, unsigned accuracyLog, unsigned const* count, unsigned const max) { unsigned const shift = 8 - accuracyLog; size_t cost = 0; unsigned s; assert(accuracyLog <= 8); for (s = 0; s <= max; ++s) { unsigned const normAcc = (norm[s] != -1) ? (unsigned)norm[s] : 1; unsigned const norm256 = normAcc << shift; assert(norm256 > 0); assert(norm256 < 256); cost += count[s] * kInverseProbabilityLog256[norm256]; } return cost >> 8; } symbolEncodingType_e ZSTD_selectEncodingType( FSE_repeat* repeatMode, unsigned const* count, unsigned const max, size_t const mostFrequent, size_t nbSeq, unsigned const FSELog, FSE_CTable const* prevCTable, short const* defaultNorm, U32 defaultNormLog, ZSTD_defaultPolicy_e const isDefaultAllowed, ZSTD_strategy const strategy) { ZSTD_STATIC_ASSERT(ZSTD_defaultDisallowed == 0 && ZSTD_defaultAllowed != 0); if (mostFrequent == nbSeq) { *repeatMode = FSE_repeat_none; if (isDefaultAllowed && nbSeq <= 2) { /* Prefer set_basic over set_rle when there are 2 or less symbols, * since RLE uses 1 byte, but set_basic uses 5-6 bits per symbol. * If basic encoding isn't possible, always choose RLE. */ DEBUGLOG(5, "Selected set_basic"); return set_basic; } DEBUGLOG(5, "Selected set_rle"); return set_rle; } if (strategy < ZSTD_lazy) { if (isDefaultAllowed) { size_t const staticFse_nbSeq_max = 1000; size_t const mult = 10 - strategy; size_t const baseLog = 3; size_t const dynamicFse_nbSeq_min = (((size_t)1 << defaultNormLog) * mult) >> baseLog; /* 28-36 for offset, 56-72 for lengths */ assert(defaultNormLog >= 5 && defaultNormLog <= 6); /* xx_DEFAULTNORMLOG */ assert(mult <= 9 && mult >= 7); if ( (*repeatMode == FSE_repeat_valid) && (nbSeq < staticFse_nbSeq_max) ) { DEBUGLOG(5, "Selected set_repeat"); return set_repeat; } if ( (nbSeq < dynamicFse_nbSeq_min) || (mostFrequent < (nbSeq >> (defaultNormLog-1))) ) { DEBUGLOG(5, "Selected set_basic"); /* The format allows default tables to be repeated, but it isn't useful. * When using simple heuristics to select encoding type, we don't want * to confuse these tables with dictionaries. When running more careful * analysis, we don't need to waste time checking both repeating tables * and default tables. */ *repeatMode = FSE_repeat_none; return set_basic; } } } else { size_t const basicCost = isDefaultAllowed ? ZSTD_crossEntropyCost(defaultNorm, defaultNormLog, count, max) : ERROR(GENERIC); size_t const repeatCost = *repeatMode != FSE_repeat_none ? ZSTD_fseBitCost(prevCTable, count, max) : ERROR(GENERIC); size_t const NCountCost = ZSTD_NCountCost(count, max, nbSeq, FSELog); size_t const compressedCost = (NCountCost << 3) + ZSTD_entropyCost(count, max, nbSeq); if (isDefaultAllowed) { assert(!ZSTD_isError(basicCost)); assert(!(*repeatMode == FSE_repeat_valid && ZSTD_isError(repeatCost))); } assert(!ZSTD_isError(NCountCost)); assert(compressedCost < ERROR(maxCode)); DEBUGLOG(5, "Estimated bit costs: basic=%u\trepeat=%u\tcompressed=%u", (unsigned)basicCost, (unsigned)repeatCost, (unsigned)compressedCost); if (basicCost <= repeatCost && basicCost <= compressedCost) { DEBUGLOG(5, "Selected set_basic"); assert(isDefaultAllowed); *repeatMode = FSE_repeat_none; return set_basic; } if (repeatCost <= compressedCost) { DEBUGLOG(5, "Selected set_repeat"); assert(!ZSTD_isError(repeatCost)); return set_repeat; } assert(compressedCost < basicCost && compressedCost < repeatCost); } DEBUGLOG(5, "Selected set_compressed"); *repeatMode = FSE_repeat_check; return set_compressed; } size_t ZSTD_buildCTable(void* dst, size_t dstCapacity, FSE_CTable* nextCTable, U32 FSELog, symbolEncodingType_e type, unsigned* count, U32 max, const BYTE* codeTable, size_t nbSeq, const S16* defaultNorm, U32 defaultNormLog, U32 defaultMax, const FSE_CTable* prevCTable, size_t prevCTableSize, void* entropyWorkspace, size_t entropyWorkspaceSize) { BYTE* op = (BYTE*)dst; const BYTE* const oend = op + dstCapacity; DEBUGLOG(6, "ZSTD_buildCTable (dstCapacity=%u)", (unsigned)dstCapacity); switch (type) { case set_rle: FORWARD_IF_ERROR(FSE_buildCTable_rle(nextCTable, (BYTE)max), ""); RETURN_ERROR_IF(dstCapacity==0, dstSize_tooSmall, "not enough space"); *op = codeTable[0]; return 1; case set_repeat: ZSTD_memcpy(nextCTable, prevCTable, prevCTableSize); return 0; case set_basic: FORWARD_IF_ERROR(FSE_buildCTable_wksp(nextCTable, defaultNorm, defaultMax, defaultNormLog, entropyWorkspace, entropyWorkspaceSize), ""); /* note : could be pre-calculated */ return 0; case set_compressed: { S16 norm[MaxSeq + 1]; size_t nbSeq_1 = nbSeq; const U32 tableLog = FSE_optimalTableLog(FSELog, nbSeq, max); if (count[codeTable[nbSeq-1]] > 1) { count[codeTable[nbSeq-1]]--; nbSeq_1--; } assert(nbSeq_1 > 1); assert(entropyWorkspaceSize >= FSE_BUILD_CTABLE_WORKSPACE_SIZE(MaxSeq, MaxFSELog)); FORWARD_IF_ERROR(FSE_normalizeCount(norm, tableLog, count, nbSeq_1, max, ZSTD_useLowProbCount(nbSeq_1)), ""); { size_t const NCountSize = FSE_writeNCount(op, oend - op, norm, max, tableLog); /* overflow protected */ FORWARD_IF_ERROR(NCountSize, "FSE_writeNCount failed"); FORWARD_IF_ERROR(FSE_buildCTable_wksp(nextCTable, norm, max, tableLog, entropyWorkspace, entropyWorkspaceSize), ""); return NCountSize; } } default: assert(0); RETURN_ERROR(GENERIC, "impossible to reach"); } } FORCE_INLINE_TEMPLATE size_t ZSTD_encodeSequences_body( void* dst, size_t dstCapacity, FSE_CTable const* CTable_MatchLength, BYTE const* mlCodeTable, FSE_CTable const* CTable_OffsetBits, BYTE const* ofCodeTable, FSE_CTable const* CTable_LitLength, BYTE const* llCodeTable, seqDef const* sequences, size_t nbSeq, int longOffsets) { BIT_CStream_t blockStream; FSE_CState_t stateMatchLength; FSE_CState_t stateOffsetBits; FSE_CState_t stateLitLength; RETURN_ERROR_IF( ERR_isError(BIT_initCStream(&blockStream, dst, dstCapacity)), dstSize_tooSmall, "not enough space remaining"); DEBUGLOG(6, "available space for bitstream : %i (dstCapacity=%u)", (int)(blockStream.endPtr - blockStream.startPtr), (unsigned)dstCapacity); /* first symbols */ FSE_initCState2(&stateMatchLength, CTable_MatchLength, mlCodeTable[nbSeq-1]); FSE_initCState2(&stateOffsetBits, CTable_OffsetBits, ofCodeTable[nbSeq-1]); FSE_initCState2(&stateLitLength, CTable_LitLength, llCodeTable[nbSeq-1]); BIT_addBits(&blockStream, sequences[nbSeq-1].litLength, LL_bits[llCodeTable[nbSeq-1]]); if (MEM_32bits()) BIT_flushBits(&blockStream); BIT_addBits(&blockStream, sequences[nbSeq-1].matchLength, ML_bits[mlCodeTable[nbSeq-1]]); if (MEM_32bits()) BIT_flushBits(&blockStream); if (longOffsets) { U32 const ofBits = ofCodeTable[nbSeq-1]; unsigned const extraBits = ofBits - MIN(ofBits, STREAM_ACCUMULATOR_MIN-1); if (extraBits) { BIT_addBits(&blockStream, sequences[nbSeq-1].offset, extraBits); BIT_flushBits(&blockStream); } BIT_addBits(&blockStream, sequences[nbSeq-1].offset >> extraBits, ofBits - extraBits); } else { BIT_addBits(&blockStream, sequences[nbSeq-1].offset, ofCodeTable[nbSeq-1]); } BIT_flushBits(&blockStream); { size_t n; for (n=nbSeq-2 ; n= 64-7-(LLFSELog+MLFSELog+OffFSELog))) BIT_flushBits(&blockStream); /* (7)*/ BIT_addBits(&blockStream, sequences[n].litLength, llBits); if (MEM_32bits() && ((llBits+mlBits)>24)) BIT_flushBits(&blockStream); BIT_addBits(&blockStream, sequences[n].matchLength, mlBits); if (MEM_32bits() || (ofBits+mlBits+llBits > 56)) BIT_flushBits(&blockStream); if (longOffsets) { unsigned const extraBits = ofBits - MIN(ofBits, STREAM_ACCUMULATOR_MIN-1); if (extraBits) { BIT_addBits(&blockStream, sequences[n].offset, extraBits); BIT_flushBits(&blockStream); /* (7)*/ } BIT_addBits(&blockStream, sequences[n].offset >> extraBits, ofBits - extraBits); /* 31 */ } else { BIT_addBits(&blockStream, sequences[n].offset, ofBits); /* 31 */ } BIT_flushBits(&blockStream); /* (7)*/ DEBUGLOG(7, "remaining space : %i", (int)(blockStream.endPtr - blockStream.ptr)); } } DEBUGLOG(6, "ZSTD_encodeSequences: flushing ML state with %u bits", stateMatchLength.stateLog); FSE_flushCState(&blockStream, &stateMatchLength); DEBUGLOG(6, "ZSTD_encodeSequences: flushing Off state with %u bits", stateOffsetBits.stateLog); FSE_flushCState(&blockStream, &stateOffsetBits); DEBUGLOG(6, "ZSTD_encodeSequences: flushing LL state with %u bits", stateLitLength.stateLog); FSE_flushCState(&blockStream, &stateLitLength); { size_t const streamSize = BIT_closeCStream(&blockStream); RETURN_ERROR_IF(streamSize==0, dstSize_tooSmall, "not enough space"); return streamSize; } } static size_t ZSTD_encodeSequences_default( void* dst, size_t dstCapacity, FSE_CTable const* CTable_MatchLength, BYTE const* mlCodeTable, FSE_CTable const* CTable_OffsetBits, BYTE const* ofCodeTable, FSE_CTable const* CTable_LitLength, BYTE const* llCodeTable, seqDef const* sequences, size_t nbSeq, int longOffsets) { return ZSTD_encodeSequences_body(dst, dstCapacity, CTable_MatchLength, mlCodeTable, CTable_OffsetBits, ofCodeTable, CTable_LitLength, llCodeTable, sequences, nbSeq, longOffsets); } #if DYNAMIC_BMI2 static TARGET_ATTRIBUTE("bmi2") size_t ZSTD_encodeSequences_bmi2( void* dst, size_t dstCapacity, FSE_CTable const* CTable_MatchLength, BYTE const* mlCodeTable, FSE_CTable const* CTable_OffsetBits, BYTE const* ofCodeTable, FSE_CTable const* CTable_LitLength, BYTE const* llCodeTable, seqDef const* sequences, size_t nbSeq, int longOffsets) { return ZSTD_encodeSequences_body(dst, dstCapacity, CTable_MatchLength, mlCodeTable, CTable_OffsetBits, ofCodeTable, CTable_LitLength, llCodeTable, sequences, nbSeq, longOffsets); } #endif size_t ZSTD_encodeSequences( void* dst, size_t dstCapacity, FSE_CTable const* CTable_MatchLength, BYTE const* mlCodeTable, FSE_CTable const* CTable_OffsetBits, BYTE const* ofCodeTable, FSE_CTable const* CTable_LitLength, BYTE const* llCodeTable, seqDef const* sequences, size_t nbSeq, int longOffsets, int bmi2) { DEBUGLOG(5, "ZSTD_encodeSequences: dstCapacity = %u", (unsigned)dstCapacity); #if DYNAMIC_BMI2 if (bmi2) { return ZSTD_encodeSequences_bmi2(dst, dstCapacity, CTable_MatchLength, mlCodeTable, CTable_OffsetBits, ofCodeTable, CTable_LitLength, llCodeTable, sequences, nbSeq, longOffsets); } #endif (void)bmi2; return ZSTD_encodeSequences_default(dst, dstCapacity, CTable_MatchLength, mlCodeTable, CTable_OffsetBits, ofCodeTable, CTable_LitLength, llCodeTable, sequences, nbSeq, longOffsets); } /**** ended inlining compress/zstd_compress_sequences.c ****/ /**** start inlining compress/zstd_compress_superblock.c ****/ /* * Copyright (c) 2016-2021, Yann Collet, Facebook, Inc. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the * LICENSE file in the root directory of this source tree) and the GPLv2 (found * in the COPYING file in the root directory of this source tree). * You may select, at your option, one of the above-listed licenses. */ /*-************************************* * Dependencies ***************************************/ /**** start inlining zstd_compress_superblock.h ****/ /* * Copyright (c) 2016-2021, Yann Collet, Facebook, Inc. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the * LICENSE file in the root directory of this source tree) and the GPLv2 (found * in the COPYING file in the root directory of this source tree). * You may select, at your option, one of the above-listed licenses. */ #ifndef ZSTD_COMPRESS_ADVANCED_H #define ZSTD_COMPRESS_ADVANCED_H /*-************************************* * Dependencies ***************************************/ /**** skipping file: ../zstd.h ****/ /*-************************************* * Target Compressed Block Size ***************************************/ /* ZSTD_compressSuperBlock() : * Used to compress a super block when targetCBlockSize is being used. * The given block will be compressed into multiple sub blocks that are around targetCBlockSize. */ size_t ZSTD_compressSuperBlock(ZSTD_CCtx* zc, void* dst, size_t dstCapacity, void const* src, size_t srcSize, unsigned lastBlock); #endif /* ZSTD_COMPRESS_ADVANCED_H */ /**** ended inlining zstd_compress_superblock.h ****/ /**** skipping file: ../common/zstd_internal.h ****/ /**** skipping file: hist.h ****/ /**** skipping file: zstd_compress_internal.h ****/ /**** skipping file: zstd_compress_sequences.h ****/ /**** skipping file: zstd_compress_literals.h ****/ /*-************************************* * Superblock entropy buffer structs ***************************************/ /** ZSTD_hufCTablesMetadata_t : * Stores Literals Block Type for a super-block in hType, and * huffman tree description in hufDesBuffer. * hufDesSize refers to the size of huffman tree description in bytes. * This metadata is populated in ZSTD_buildSuperBlockEntropy_literal() */ typedef struct { symbolEncodingType_e hType; BYTE hufDesBuffer[ZSTD_MAX_HUF_HEADER_SIZE]; size_t hufDesSize; } ZSTD_hufCTablesMetadata_t; /** ZSTD_fseCTablesMetadata_t : * Stores symbol compression modes for a super-block in {ll, ol, ml}Type, and * fse tables in fseTablesBuffer. * fseTablesSize refers to the size of fse tables in bytes. * This metadata is populated in ZSTD_buildSuperBlockEntropy_sequences() */ typedef struct { symbolEncodingType_e llType; symbolEncodingType_e ofType; symbolEncodingType_e mlType; BYTE fseTablesBuffer[ZSTD_MAX_FSE_HEADERS_SIZE]; size_t fseTablesSize; size_t lastCountSize; /* This is to account for bug in 1.3.4. More detail in ZSTD_compressSubBlock_sequences() */ } ZSTD_fseCTablesMetadata_t; typedef struct { ZSTD_hufCTablesMetadata_t hufMetadata; ZSTD_fseCTablesMetadata_t fseMetadata; } ZSTD_entropyCTablesMetadata_t; /** ZSTD_buildSuperBlockEntropy_literal() : * Builds entropy for the super-block literals. * Stores literals block type (raw, rle, compressed, repeat) and * huffman description table to hufMetadata. * @return : size of huffman description table or error code */ static size_t ZSTD_buildSuperBlockEntropy_literal(void* const src, size_t srcSize, const ZSTD_hufCTables_t* prevHuf, ZSTD_hufCTables_t* nextHuf, ZSTD_hufCTablesMetadata_t* hufMetadata, const int disableLiteralsCompression, void* workspace, size_t wkspSize) { BYTE* const wkspStart = (BYTE*)workspace; BYTE* const wkspEnd = wkspStart + wkspSize; BYTE* const countWkspStart = wkspStart; unsigned* const countWksp = (unsigned*)workspace; const size_t countWkspSize = (HUF_SYMBOLVALUE_MAX + 1) * sizeof(unsigned); BYTE* const nodeWksp = countWkspStart + countWkspSize; const size_t nodeWkspSize = wkspEnd-nodeWksp; unsigned maxSymbolValue = 255; unsigned huffLog = HUF_TABLELOG_DEFAULT; HUF_repeat repeat = prevHuf->repeatMode; DEBUGLOG(5, "ZSTD_buildSuperBlockEntropy_literal (srcSize=%zu)", srcSize); /* Prepare nextEntropy assuming reusing the existing table */ ZSTD_memcpy(nextHuf, prevHuf, sizeof(*prevHuf)); if (disableLiteralsCompression) { DEBUGLOG(5, "set_basic - disabled"); hufMetadata->hType = set_basic; return 0; } /* small ? don't even attempt compression (speed opt) */ # define COMPRESS_LITERALS_SIZE_MIN 63 { size_t const minLitSize = (prevHuf->repeatMode == HUF_repeat_valid) ? 6 : COMPRESS_LITERALS_SIZE_MIN; if (srcSize <= minLitSize) { DEBUGLOG(5, "set_basic - too small"); hufMetadata->hType = set_basic; return 0; } } /* Scan input and build symbol stats */ { size_t const largest = HIST_count_wksp (countWksp, &maxSymbolValue, (const BYTE*)src, srcSize, workspace, wkspSize); FORWARD_IF_ERROR(largest, "HIST_count_wksp failed"); if (largest == srcSize) { DEBUGLOG(5, "set_rle"); hufMetadata->hType = set_rle; return 0; } if (largest <= (srcSize >> 7)+4) { DEBUGLOG(5, "set_basic - no gain"); hufMetadata->hType = set_basic; return 0; } } /* Validate the previous Huffman table */ if (repeat == HUF_repeat_check && !HUF_validateCTable((HUF_CElt const*)prevHuf->CTable, countWksp, maxSymbolValue)) { repeat = HUF_repeat_none; } /* Build Huffman Tree */ ZSTD_memset(nextHuf->CTable, 0, sizeof(nextHuf->CTable)); huffLog = HUF_optimalTableLog(huffLog, srcSize, maxSymbolValue); { size_t const maxBits = HUF_buildCTable_wksp((HUF_CElt*)nextHuf->CTable, countWksp, maxSymbolValue, huffLog, nodeWksp, nodeWkspSize); FORWARD_IF_ERROR(maxBits, "HUF_buildCTable_wksp"); huffLog = (U32)maxBits; { /* Build and write the CTable */ size_t const newCSize = HUF_estimateCompressedSize( (HUF_CElt*)nextHuf->CTable, countWksp, maxSymbolValue); size_t const hSize = HUF_writeCTable( hufMetadata->hufDesBuffer, sizeof(hufMetadata->hufDesBuffer), (HUF_CElt*)nextHuf->CTable, maxSymbolValue, huffLog); /* Check against repeating the previous CTable */ if (repeat != HUF_repeat_none) { size_t const oldCSize = HUF_estimateCompressedSize( (HUF_CElt const*)prevHuf->CTable, countWksp, maxSymbolValue); if (oldCSize < srcSize && (oldCSize <= hSize + newCSize || hSize + 12 >= srcSize)) { DEBUGLOG(5, "set_repeat - smaller"); ZSTD_memcpy(nextHuf, prevHuf, sizeof(*prevHuf)); hufMetadata->hType = set_repeat; return 0; } } if (newCSize + hSize >= srcSize) { DEBUGLOG(5, "set_basic - no gains"); ZSTD_memcpy(nextHuf, prevHuf, sizeof(*prevHuf)); hufMetadata->hType = set_basic; return 0; } DEBUGLOG(5, "set_compressed (hSize=%u)", (U32)hSize); hufMetadata->hType = set_compressed; nextHuf->repeatMode = HUF_repeat_check; return hSize; } } } /** ZSTD_buildSuperBlockEntropy_sequences() : * Builds entropy for the super-block sequences. * Stores symbol compression modes and fse table to fseMetadata. * @return : size of fse tables or error code */ static size_t ZSTD_buildSuperBlockEntropy_sequences(seqStore_t* seqStorePtr, const ZSTD_fseCTables_t* prevEntropy, ZSTD_fseCTables_t* nextEntropy, const ZSTD_CCtx_params* cctxParams, ZSTD_fseCTablesMetadata_t* fseMetadata, void* workspace, size_t wkspSize) { BYTE* const wkspStart = (BYTE*)workspace; BYTE* const wkspEnd = wkspStart + wkspSize; BYTE* const countWkspStart = wkspStart; unsigned* const countWksp = (unsigned*)workspace; const size_t countWkspSize = (MaxSeq + 1) * sizeof(unsigned); BYTE* const cTableWksp = countWkspStart + countWkspSize; const size_t cTableWkspSize = wkspEnd-cTableWksp; ZSTD_strategy const strategy = cctxParams->cParams.strategy; FSE_CTable* CTable_LitLength = nextEntropy->litlengthCTable; FSE_CTable* CTable_OffsetBits = nextEntropy->offcodeCTable; FSE_CTable* CTable_MatchLength = nextEntropy->matchlengthCTable; const BYTE* const ofCodeTable = seqStorePtr->ofCode; const BYTE* const llCodeTable = seqStorePtr->llCode; const BYTE* const mlCodeTable = seqStorePtr->mlCode; size_t const nbSeq = seqStorePtr->sequences - seqStorePtr->sequencesStart; BYTE* const ostart = fseMetadata->fseTablesBuffer; BYTE* const oend = ostart + sizeof(fseMetadata->fseTablesBuffer); BYTE* op = ostart; assert(cTableWkspSize >= (1 << MaxFSELog) * sizeof(FSE_FUNCTION_TYPE)); DEBUGLOG(5, "ZSTD_buildSuperBlockEntropy_sequences (nbSeq=%zu)", nbSeq); ZSTD_memset(workspace, 0, wkspSize); fseMetadata->lastCountSize = 0; /* convert length/distances into codes */ ZSTD_seqToCodes(seqStorePtr); /* build CTable for Literal Lengths */ { U32 LLtype; unsigned max = MaxLL; size_t const mostFrequent = HIST_countFast_wksp(countWksp, &max, llCodeTable, nbSeq, workspace, wkspSize); /* can't fail */ DEBUGLOG(5, "Building LL table"); nextEntropy->litlength_repeatMode = prevEntropy->litlength_repeatMode; LLtype = ZSTD_selectEncodingType(&nextEntropy->litlength_repeatMode, countWksp, max, mostFrequent, nbSeq, LLFSELog, prevEntropy->litlengthCTable, LL_defaultNorm, LL_defaultNormLog, ZSTD_defaultAllowed, strategy); assert(set_basic < set_compressed && set_rle < set_compressed); assert(!(LLtype < set_compressed && nextEntropy->litlength_repeatMode != FSE_repeat_none)); /* We don't copy tables */ { size_t const countSize = ZSTD_buildCTable(op, oend - op, CTable_LitLength, LLFSELog, (symbolEncodingType_e)LLtype, countWksp, max, llCodeTable, nbSeq, LL_defaultNorm, LL_defaultNormLog, MaxLL, prevEntropy->litlengthCTable, sizeof(prevEntropy->litlengthCTable), cTableWksp, cTableWkspSize); FORWARD_IF_ERROR(countSize, "ZSTD_buildCTable for LitLens failed"); if (LLtype == set_compressed) fseMetadata->lastCountSize = countSize; op += countSize; fseMetadata->llType = (symbolEncodingType_e) LLtype; } } /* build CTable for Offsets */ { U32 Offtype; unsigned max = MaxOff; size_t const mostFrequent = HIST_countFast_wksp(countWksp, &max, ofCodeTable, nbSeq, workspace, wkspSize); /* can't fail */ /* We can only use the basic table if max <= DefaultMaxOff, otherwise the offsets are too large */ ZSTD_defaultPolicy_e const defaultPolicy = (max <= DefaultMaxOff) ? ZSTD_defaultAllowed : ZSTD_defaultDisallowed; DEBUGLOG(5, "Building OF table"); nextEntropy->offcode_repeatMode = prevEntropy->offcode_repeatMode; Offtype = ZSTD_selectEncodingType(&nextEntropy->offcode_repeatMode, countWksp, max, mostFrequent, nbSeq, OffFSELog, prevEntropy->offcodeCTable, OF_defaultNorm, OF_defaultNormLog, defaultPolicy, strategy); assert(!(Offtype < set_compressed && nextEntropy->offcode_repeatMode != FSE_repeat_none)); /* We don't copy tables */ { size_t const countSize = ZSTD_buildCTable(op, oend - op, CTable_OffsetBits, OffFSELog, (symbolEncodingType_e)Offtype, countWksp, max, ofCodeTable, nbSeq, OF_defaultNorm, OF_defaultNormLog, DefaultMaxOff, prevEntropy->offcodeCTable, sizeof(prevEntropy->offcodeCTable), cTableWksp, cTableWkspSize); FORWARD_IF_ERROR(countSize, "ZSTD_buildCTable for Offsets failed"); if (Offtype == set_compressed) fseMetadata->lastCountSize = countSize; op += countSize; fseMetadata->ofType = (symbolEncodingType_e) Offtype; } } /* build CTable for MatchLengths */ { U32 MLtype; unsigned max = MaxML; size_t const mostFrequent = HIST_countFast_wksp(countWksp, &max, mlCodeTable, nbSeq, workspace, wkspSize); /* can't fail */ DEBUGLOG(5, "Building ML table (remaining space : %i)", (int)(oend-op)); nextEntropy->matchlength_repeatMode = prevEntropy->matchlength_repeatMode; MLtype = ZSTD_selectEncodingType(&nextEntropy->matchlength_repeatMode, countWksp, max, mostFrequent, nbSeq, MLFSELog, prevEntropy->matchlengthCTable, ML_defaultNorm, ML_defaultNormLog, ZSTD_defaultAllowed, strategy); assert(!(MLtype < set_compressed && nextEntropy->matchlength_repeatMode != FSE_repeat_none)); /* We don't copy tables */ { size_t const countSize = ZSTD_buildCTable(op, oend - op, CTable_MatchLength, MLFSELog, (symbolEncodingType_e)MLtype, countWksp, max, mlCodeTable, nbSeq, ML_defaultNorm, ML_defaultNormLog, MaxML, prevEntropy->matchlengthCTable, sizeof(prevEntropy->matchlengthCTable), cTableWksp, cTableWkspSize); FORWARD_IF_ERROR(countSize, "ZSTD_buildCTable for MatchLengths failed"); if (MLtype == set_compressed) fseMetadata->lastCountSize = countSize; op += countSize; fseMetadata->mlType = (symbolEncodingType_e) MLtype; } } assert((size_t) (op-ostart) <= sizeof(fseMetadata->fseTablesBuffer)); return op-ostart; } /** ZSTD_buildSuperBlockEntropy() : * Builds entropy for the super-block. * @return : 0 on success or error code */ static size_t ZSTD_buildSuperBlockEntropy(seqStore_t* seqStorePtr, const ZSTD_entropyCTables_t* prevEntropy, ZSTD_entropyCTables_t* nextEntropy, const ZSTD_CCtx_params* cctxParams, ZSTD_entropyCTablesMetadata_t* entropyMetadata, void* workspace, size_t wkspSize) { size_t const litSize = seqStorePtr->lit - seqStorePtr->litStart; DEBUGLOG(5, "ZSTD_buildSuperBlockEntropy"); entropyMetadata->hufMetadata.hufDesSize = ZSTD_buildSuperBlockEntropy_literal(seqStorePtr->litStart, litSize, &prevEntropy->huf, &nextEntropy->huf, &entropyMetadata->hufMetadata, ZSTD_disableLiteralsCompression(cctxParams), workspace, wkspSize); FORWARD_IF_ERROR(entropyMetadata->hufMetadata.hufDesSize, "ZSTD_buildSuperBlockEntropy_literal failed"); entropyMetadata->fseMetadata.fseTablesSize = ZSTD_buildSuperBlockEntropy_sequences(seqStorePtr, &prevEntropy->fse, &nextEntropy->fse, cctxParams, &entropyMetadata->fseMetadata, workspace, wkspSize); FORWARD_IF_ERROR(entropyMetadata->fseMetadata.fseTablesSize, "ZSTD_buildSuperBlockEntropy_sequences failed"); return 0; } /** ZSTD_compressSubBlock_literal() : * Compresses literals section for a sub-block. * When we have to write the Huffman table we will sometimes choose a header * size larger than necessary. This is because we have to pick the header size * before we know the table size + compressed size, so we have a bound on the * table size. If we guessed incorrectly, we fall back to uncompressed literals. * * We write the header when writeEntropy=1 and set entropyWritten=1 when we succeeded * in writing the header, otherwise it is set to 0. * * hufMetadata->hType has literals block type info. * If it is set_basic, all sub-blocks literals section will be Raw_Literals_Block. * If it is set_rle, all sub-blocks literals section will be RLE_Literals_Block. * If it is set_compressed, first sub-block's literals section will be Compressed_Literals_Block * If it is set_compressed, first sub-block's literals section will be Treeless_Literals_Block * and the following sub-blocks' literals sections will be Treeless_Literals_Block. * @return : compressed size of literals section of a sub-block * Or 0 if it unable to compress. * Or error code */ static size_t ZSTD_compressSubBlock_literal(const HUF_CElt* hufTable, const ZSTD_hufCTablesMetadata_t* hufMetadata, const BYTE* literals, size_t litSize, void* dst, size_t dstSize, const int bmi2, int writeEntropy, int* entropyWritten) { size_t const header = writeEntropy ? 200 : 0; size_t const lhSize = 3 + (litSize >= (1 KB - header)) + (litSize >= (16 KB - header)); BYTE* const ostart = (BYTE*)dst; BYTE* const oend = ostart + dstSize; BYTE* op = ostart + lhSize; U32 const singleStream = lhSize == 3; symbolEncodingType_e hType = writeEntropy ? hufMetadata->hType : set_repeat; size_t cLitSize = 0; (void)bmi2; /* TODO bmi2... */ DEBUGLOG(5, "ZSTD_compressSubBlock_literal (litSize=%zu, lhSize=%zu, writeEntropy=%d)", litSize, lhSize, writeEntropy); *entropyWritten = 0; if (litSize == 0 || hufMetadata->hType == set_basic) { DEBUGLOG(5, "ZSTD_compressSubBlock_literal using raw literal"); return ZSTD_noCompressLiterals(dst, dstSize, literals, litSize); } else if (hufMetadata->hType == set_rle) { DEBUGLOG(5, "ZSTD_compressSubBlock_literal using rle literal"); return ZSTD_compressRleLiteralsBlock(dst, dstSize, literals, litSize); } assert(litSize > 0); assert(hufMetadata->hType == set_compressed || hufMetadata->hType == set_repeat); if (writeEntropy && hufMetadata->hType == set_compressed) { ZSTD_memcpy(op, hufMetadata->hufDesBuffer, hufMetadata->hufDesSize); op += hufMetadata->hufDesSize; cLitSize += hufMetadata->hufDesSize; DEBUGLOG(5, "ZSTD_compressSubBlock_literal (hSize=%zu)", hufMetadata->hufDesSize); } /* TODO bmi2 */ { const size_t cSize = singleStream ? HUF_compress1X_usingCTable(op, oend-op, literals, litSize, hufTable) : HUF_compress4X_usingCTable(op, oend-op, literals, litSize, hufTable); op += cSize; cLitSize += cSize; if (cSize == 0 || ERR_isError(cSize)) { DEBUGLOG(5, "Failed to write entropy tables %s", ZSTD_getErrorName(cSize)); return 0; } /* If we expand and we aren't writing a header then emit uncompressed */ if (!writeEntropy && cLitSize >= litSize) { DEBUGLOG(5, "ZSTD_compressSubBlock_literal using raw literal because uncompressible"); return ZSTD_noCompressLiterals(dst, dstSize, literals, litSize); } /* If we are writing headers then allow expansion that doesn't change our header size. */ if (lhSize < (size_t)(3 + (cLitSize >= 1 KB) + (cLitSize >= 16 KB))) { assert(cLitSize > litSize); DEBUGLOG(5, "Literals expanded beyond allowed header size"); return ZSTD_noCompressLiterals(dst, dstSize, literals, litSize); } DEBUGLOG(5, "ZSTD_compressSubBlock_literal (cSize=%zu)", cSize); } /* Build header */ switch(lhSize) { case 3: /* 2 - 2 - 10 - 10 */ { U32 const lhc = hType + ((!singleStream) << 2) + ((U32)litSize<<4) + ((U32)cLitSize<<14); MEM_writeLE24(ostart, lhc); break; } case 4: /* 2 - 2 - 14 - 14 */ { U32 const lhc = hType + (2 << 2) + ((U32)litSize<<4) + ((U32)cLitSize<<18); MEM_writeLE32(ostart, lhc); break; } case 5: /* 2 - 2 - 18 - 18 */ { U32 const lhc = hType + (3 << 2) + ((U32)litSize<<4) + ((U32)cLitSize<<22); MEM_writeLE32(ostart, lhc); ostart[4] = (BYTE)(cLitSize >> 10); break; } default: /* not possible : lhSize is {3,4,5} */ assert(0); } *entropyWritten = 1; DEBUGLOG(5, "Compressed literals: %u -> %u", (U32)litSize, (U32)(op-ostart)); return op-ostart; } static size_t ZSTD_seqDecompressedSize(seqStore_t const* seqStore, const seqDef* sequences, size_t nbSeq, size_t litSize, int lastSequence) { const seqDef* const sstart = sequences; const seqDef* const send = sequences + nbSeq; const seqDef* sp = sstart; size_t matchLengthSum = 0; size_t litLengthSum = 0; while (send-sp > 0) { ZSTD_sequenceLength const seqLen = ZSTD_getSequenceLength(seqStore, sp); litLengthSum += seqLen.litLength; matchLengthSum += seqLen.matchLength; sp++; } assert(litLengthSum <= litSize); if (!lastSequence) { assert(litLengthSum == litSize); } return matchLengthSum + litSize; } /** ZSTD_compressSubBlock_sequences() : * Compresses sequences section for a sub-block. * fseMetadata->llType, fseMetadata->ofType, and fseMetadata->mlType have * symbol compression modes for the super-block. * The first successfully compressed block will have these in its header. * We set entropyWritten=1 when we succeed in compressing the sequences. * The following sub-blocks will always have repeat mode. * @return : compressed size of sequences section of a sub-block * Or 0 if it is unable to compress * Or error code. */ static size_t ZSTD_compressSubBlock_sequences(const ZSTD_fseCTables_t* fseTables, const ZSTD_fseCTablesMetadata_t* fseMetadata, const seqDef* sequences, size_t nbSeq, const BYTE* llCode, const BYTE* mlCode, const BYTE* ofCode, const ZSTD_CCtx_params* cctxParams, void* dst, size_t dstCapacity, const int bmi2, int writeEntropy, int* entropyWritten) { const int longOffsets = cctxParams->cParams.windowLog > STREAM_ACCUMULATOR_MIN; BYTE* const ostart = (BYTE*)dst; BYTE* const oend = ostart + dstCapacity; BYTE* op = ostart; BYTE* seqHead; DEBUGLOG(5, "ZSTD_compressSubBlock_sequences (nbSeq=%zu, writeEntropy=%d, longOffsets=%d)", nbSeq, writeEntropy, longOffsets); *entropyWritten = 0; /* Sequences Header */ RETURN_ERROR_IF((oend-op) < 3 /*max nbSeq Size*/ + 1 /*seqHead*/, dstSize_tooSmall, ""); if (nbSeq < 0x7F) *op++ = (BYTE)nbSeq; else if (nbSeq < LONGNBSEQ) op[0] = (BYTE)((nbSeq>>8) + 0x80), op[1] = (BYTE)nbSeq, op+=2; else op[0]=0xFF, MEM_writeLE16(op+1, (U16)(nbSeq - LONGNBSEQ)), op+=3; if (nbSeq==0) { return op - ostart; } /* seqHead : flags for FSE encoding type */ seqHead = op++; DEBUGLOG(5, "ZSTD_compressSubBlock_sequences (seqHeadSize=%u)", (unsigned)(op-ostart)); if (writeEntropy) { const U32 LLtype = fseMetadata->llType; const U32 Offtype = fseMetadata->ofType; const U32 MLtype = fseMetadata->mlType; DEBUGLOG(5, "ZSTD_compressSubBlock_sequences (fseTablesSize=%zu)", fseMetadata->fseTablesSize); *seqHead = (BYTE)((LLtype<<6) + (Offtype<<4) + (MLtype<<2)); ZSTD_memcpy(op, fseMetadata->fseTablesBuffer, fseMetadata->fseTablesSize); op += fseMetadata->fseTablesSize; } else { const U32 repeat = set_repeat; *seqHead = (BYTE)((repeat<<6) + (repeat<<4) + (repeat<<2)); } { size_t const bitstreamSize = ZSTD_encodeSequences( op, oend - op, fseTables->matchlengthCTable, mlCode, fseTables->offcodeCTable, ofCode, fseTables->litlengthCTable, llCode, sequences, nbSeq, longOffsets, bmi2); FORWARD_IF_ERROR(bitstreamSize, "ZSTD_encodeSequences failed"); op += bitstreamSize; /* zstd versions <= 1.3.4 mistakenly report corruption when * FSE_readNCount() receives a buffer < 4 bytes. * Fixed by https://github.com/facebook/zstd/pull/1146. * This can happen when the last set_compressed table present is 2 * bytes and the bitstream is only one byte. * In this exceedingly rare case, we will simply emit an uncompressed * block, since it isn't worth optimizing. */ #ifndef FUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION if (writeEntropy && fseMetadata->lastCountSize && fseMetadata->lastCountSize + bitstreamSize < 4) { /* NCountSize >= 2 && bitstreamSize > 0 ==> lastCountSize == 3 */ assert(fseMetadata->lastCountSize + bitstreamSize == 3); DEBUGLOG(5, "Avoiding bug in zstd decoder in versions <= 1.3.4 by " "emitting an uncompressed block."); return 0; } #endif DEBUGLOG(5, "ZSTD_compressSubBlock_sequences (bitstreamSize=%zu)", bitstreamSize); } /* zstd versions <= 1.4.0 mistakenly report error when * sequences section body size is less than 3 bytes. * Fixed by https://github.com/facebook/zstd/pull/1664. * This can happen when the previous sequences section block is compressed * with rle mode and the current block's sequences section is compressed * with repeat mode where sequences section body size can be 1 byte. */ #ifndef FUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION if (op-seqHead < 4) { DEBUGLOG(5, "Avoiding bug in zstd decoder in versions <= 1.4.0 by emitting " "an uncompressed block when sequences are < 4 bytes"); return 0; } #endif *entropyWritten = 1; return op - ostart; } /** ZSTD_compressSubBlock() : * Compresses a single sub-block. * @return : compressed size of the sub-block * Or 0 if it failed to compress. */ static size_t ZSTD_compressSubBlock(const ZSTD_entropyCTables_t* entropy, const ZSTD_entropyCTablesMetadata_t* entropyMetadata, const seqDef* sequences, size_t nbSeq, const BYTE* literals, size_t litSize, const BYTE* llCode, const BYTE* mlCode, const BYTE* ofCode, const ZSTD_CCtx_params* cctxParams, void* dst, size_t dstCapacity, const int bmi2, int writeLitEntropy, int writeSeqEntropy, int* litEntropyWritten, int* seqEntropyWritten, U32 lastBlock) { BYTE* const ostart = (BYTE*)dst; BYTE* const oend = ostart + dstCapacity; BYTE* op = ostart + ZSTD_blockHeaderSize; DEBUGLOG(5, "ZSTD_compressSubBlock (litSize=%zu, nbSeq=%zu, writeLitEntropy=%d, writeSeqEntropy=%d, lastBlock=%d)", litSize, nbSeq, writeLitEntropy, writeSeqEntropy, lastBlock); { size_t cLitSize = ZSTD_compressSubBlock_literal((const HUF_CElt*)entropy->huf.CTable, &entropyMetadata->hufMetadata, literals, litSize, op, oend-op, bmi2, writeLitEntropy, litEntropyWritten); FORWARD_IF_ERROR(cLitSize, "ZSTD_compressSubBlock_literal failed"); if (cLitSize == 0) return 0; op += cLitSize; } { size_t cSeqSize = ZSTD_compressSubBlock_sequences(&entropy->fse, &entropyMetadata->fseMetadata, sequences, nbSeq, llCode, mlCode, ofCode, cctxParams, op, oend-op, bmi2, writeSeqEntropy, seqEntropyWritten); FORWARD_IF_ERROR(cSeqSize, "ZSTD_compressSubBlock_sequences failed"); if (cSeqSize == 0) return 0; op += cSeqSize; } /* Write block header */ { size_t cSize = (op-ostart)-ZSTD_blockHeaderSize; U32 const cBlockHeader24 = lastBlock + (((U32)bt_compressed)<<1) + (U32)(cSize << 3); MEM_writeLE24(ostart, cBlockHeader24); } return op-ostart; } static size_t ZSTD_estimateSubBlockSize_literal(const BYTE* literals, size_t litSize, const ZSTD_hufCTables_t* huf, const ZSTD_hufCTablesMetadata_t* hufMetadata, void* workspace, size_t wkspSize, int writeEntropy) { unsigned* const countWksp = (unsigned*)workspace; unsigned maxSymbolValue = 255; size_t literalSectionHeaderSize = 3; /* Use hard coded size of 3 bytes */ if (hufMetadata->hType == set_basic) return litSize; else if (hufMetadata->hType == set_rle) return 1; else if (hufMetadata->hType == set_compressed || hufMetadata->hType == set_repeat) { size_t const largest = HIST_count_wksp (countWksp, &maxSymbolValue, (const BYTE*)literals, litSize, workspace, wkspSize); if (ZSTD_isError(largest)) return litSize; { size_t cLitSizeEstimate = HUF_estimateCompressedSize((const HUF_CElt*)huf->CTable, countWksp, maxSymbolValue); if (writeEntropy) cLitSizeEstimate += hufMetadata->hufDesSize; return cLitSizeEstimate + literalSectionHeaderSize; } } assert(0); /* impossible */ return 0; } static size_t ZSTD_estimateSubBlockSize_symbolType(symbolEncodingType_e type, const BYTE* codeTable, unsigned maxCode, size_t nbSeq, const FSE_CTable* fseCTable, const U32* additionalBits, short const* defaultNorm, U32 defaultNormLog, U32 defaultMax, void* workspace, size_t wkspSize) { unsigned* const countWksp = (unsigned*)workspace; const BYTE* ctp = codeTable; const BYTE* const ctStart = ctp; const BYTE* const ctEnd = ctStart + nbSeq; size_t cSymbolTypeSizeEstimateInBits = 0; unsigned max = maxCode; HIST_countFast_wksp(countWksp, &max, codeTable, nbSeq, workspace, wkspSize); /* can't fail */ if (type == set_basic) { /* We selected this encoding type, so it must be valid. */ assert(max <= defaultMax); cSymbolTypeSizeEstimateInBits = max <= defaultMax ? ZSTD_crossEntropyCost(defaultNorm, defaultNormLog, countWksp, max) : ERROR(GENERIC); } else if (type == set_rle) { cSymbolTypeSizeEstimateInBits = 0; } else if (type == set_compressed || type == set_repeat) { cSymbolTypeSizeEstimateInBits = ZSTD_fseBitCost(fseCTable, countWksp, max); } if (ZSTD_isError(cSymbolTypeSizeEstimateInBits)) return nbSeq * 10; while (ctp < ctEnd) { if (additionalBits) cSymbolTypeSizeEstimateInBits += additionalBits[*ctp]; else cSymbolTypeSizeEstimateInBits += *ctp; /* for offset, offset code is also the number of additional bits */ ctp++; } return cSymbolTypeSizeEstimateInBits / 8; } static size_t ZSTD_estimateSubBlockSize_sequences(const BYTE* ofCodeTable, const BYTE* llCodeTable, const BYTE* mlCodeTable, size_t nbSeq, const ZSTD_fseCTables_t* fseTables, const ZSTD_fseCTablesMetadata_t* fseMetadata, void* workspace, size_t wkspSize, int writeEntropy) { size_t sequencesSectionHeaderSize = 3; /* Use hard coded size of 3 bytes */ size_t cSeqSizeEstimate = 0; cSeqSizeEstimate += ZSTD_estimateSubBlockSize_symbolType(fseMetadata->ofType, ofCodeTable, MaxOff, nbSeq, fseTables->offcodeCTable, NULL, OF_defaultNorm, OF_defaultNormLog, DefaultMaxOff, workspace, wkspSize); cSeqSizeEstimate += ZSTD_estimateSubBlockSize_symbolType(fseMetadata->llType, llCodeTable, MaxLL, nbSeq, fseTables->litlengthCTable, LL_bits, LL_defaultNorm, LL_defaultNormLog, MaxLL, workspace, wkspSize); cSeqSizeEstimate += ZSTD_estimateSubBlockSize_symbolType(fseMetadata->mlType, mlCodeTable, MaxML, nbSeq, fseTables->matchlengthCTable, ML_bits, ML_defaultNorm, ML_defaultNormLog, MaxML, workspace, wkspSize); if (writeEntropy) cSeqSizeEstimate += fseMetadata->fseTablesSize; return cSeqSizeEstimate + sequencesSectionHeaderSize; } static size_t ZSTD_estimateSubBlockSize(const BYTE* literals, size_t litSize, const BYTE* ofCodeTable, const BYTE* llCodeTable, const BYTE* mlCodeTable, size_t nbSeq, const ZSTD_entropyCTables_t* entropy, const ZSTD_entropyCTablesMetadata_t* entropyMetadata, void* workspace, size_t wkspSize, int writeLitEntropy, int writeSeqEntropy) { size_t cSizeEstimate = 0; cSizeEstimate += ZSTD_estimateSubBlockSize_literal(literals, litSize, &entropy->huf, &entropyMetadata->hufMetadata, workspace, wkspSize, writeLitEntropy); cSizeEstimate += ZSTD_estimateSubBlockSize_sequences(ofCodeTable, llCodeTable, mlCodeTable, nbSeq, &entropy->fse, &entropyMetadata->fseMetadata, workspace, wkspSize, writeSeqEntropy); return cSizeEstimate + ZSTD_blockHeaderSize; } static int ZSTD_needSequenceEntropyTables(ZSTD_fseCTablesMetadata_t const* fseMetadata) { if (fseMetadata->llType == set_compressed || fseMetadata->llType == set_rle) return 1; if (fseMetadata->mlType == set_compressed || fseMetadata->mlType == set_rle) return 1; if (fseMetadata->ofType == set_compressed || fseMetadata->ofType == set_rle) return 1; return 0; } /** ZSTD_compressSubBlock_multi() : * Breaks super-block into multiple sub-blocks and compresses them. * Entropy will be written to the first block. * The following blocks will use repeat mode to compress. * All sub-blocks are compressed blocks (no raw or rle blocks). * @return : compressed size of the super block (which is multiple ZSTD blocks) * Or 0 if it failed to compress. */ static size_t ZSTD_compressSubBlock_multi(const seqStore_t* seqStorePtr, const ZSTD_compressedBlockState_t* prevCBlock, ZSTD_compressedBlockState_t* nextCBlock, const ZSTD_entropyCTablesMetadata_t* entropyMetadata, const ZSTD_CCtx_params* cctxParams, void* dst, size_t dstCapacity, const void* src, size_t srcSize, const int bmi2, U32 lastBlock, void* workspace, size_t wkspSize) { const seqDef* const sstart = seqStorePtr->sequencesStart; const seqDef* const send = seqStorePtr->sequences; const seqDef* sp = sstart; const BYTE* const lstart = seqStorePtr->litStart; const BYTE* const lend = seqStorePtr->lit; const BYTE* lp = lstart; BYTE const* ip = (BYTE const*)src; BYTE const* const iend = ip + srcSize; BYTE* const ostart = (BYTE*)dst; BYTE* const oend = ostart + dstCapacity; BYTE* op = ostart; const BYTE* llCodePtr = seqStorePtr->llCode; const BYTE* mlCodePtr = seqStorePtr->mlCode; const BYTE* ofCodePtr = seqStorePtr->ofCode; size_t targetCBlockSize = cctxParams->targetCBlockSize; size_t litSize, seqCount; int writeLitEntropy = entropyMetadata->hufMetadata.hType == set_compressed; int writeSeqEntropy = 1; int lastSequence = 0; DEBUGLOG(5, "ZSTD_compressSubBlock_multi (litSize=%u, nbSeq=%u)", (unsigned)(lend-lp), (unsigned)(send-sstart)); litSize = 0; seqCount = 0; do { size_t cBlockSizeEstimate = 0; if (sstart == send) { lastSequence = 1; } else { const seqDef* const sequence = sp + seqCount; lastSequence = sequence == send - 1; litSize += ZSTD_getSequenceLength(seqStorePtr, sequence).litLength; seqCount++; } if (lastSequence) { assert(lp <= lend); assert(litSize <= (size_t)(lend - lp)); litSize = (size_t)(lend - lp); } /* I think there is an optimization opportunity here. * Calling ZSTD_estimateSubBlockSize for every sequence can be wasteful * since it recalculates estimate from scratch. * For example, it would recount literal distribution and symbol codes everytime. */ cBlockSizeEstimate = ZSTD_estimateSubBlockSize(lp, litSize, ofCodePtr, llCodePtr, mlCodePtr, seqCount, &nextCBlock->entropy, entropyMetadata, workspace, wkspSize, writeLitEntropy, writeSeqEntropy); if (cBlockSizeEstimate > targetCBlockSize || lastSequence) { int litEntropyWritten = 0; int seqEntropyWritten = 0; const size_t decompressedSize = ZSTD_seqDecompressedSize(seqStorePtr, sp, seqCount, litSize, lastSequence); const size_t cSize = ZSTD_compressSubBlock(&nextCBlock->entropy, entropyMetadata, sp, seqCount, lp, litSize, llCodePtr, mlCodePtr, ofCodePtr, cctxParams, op, oend-op, bmi2, writeLitEntropy, writeSeqEntropy, &litEntropyWritten, &seqEntropyWritten, lastBlock && lastSequence); FORWARD_IF_ERROR(cSize, "ZSTD_compressSubBlock failed"); if (cSize > 0 && cSize < decompressedSize) { DEBUGLOG(5, "Committed the sub-block"); assert(ip + decompressedSize <= iend); ip += decompressedSize; sp += seqCount; lp += litSize; op += cSize; llCodePtr += seqCount; mlCodePtr += seqCount; ofCodePtr += seqCount; litSize = 0; seqCount = 0; /* Entropy only needs to be written once */ if (litEntropyWritten) { writeLitEntropy = 0; } if (seqEntropyWritten) { writeSeqEntropy = 0; } } } } while (!lastSequence); if (writeLitEntropy) { DEBUGLOG(5, "ZSTD_compressSubBlock_multi has literal entropy tables unwritten"); ZSTD_memcpy(&nextCBlock->entropy.huf, &prevCBlock->entropy.huf, sizeof(prevCBlock->entropy.huf)); } if (writeSeqEntropy && ZSTD_needSequenceEntropyTables(&entropyMetadata->fseMetadata)) { /* If we haven't written our entropy tables, then we've violated our contract and * must emit an uncompressed block. */ DEBUGLOG(5, "ZSTD_compressSubBlock_multi has sequence entropy tables unwritten"); return 0; } if (ip < iend) { size_t const cSize = ZSTD_noCompressBlock(op, oend - op, ip, iend - ip, lastBlock); DEBUGLOG(5, "ZSTD_compressSubBlock_multi last sub-block uncompressed, %zu bytes", (size_t)(iend - ip)); FORWARD_IF_ERROR(cSize, "ZSTD_noCompressBlock failed"); assert(cSize != 0); op += cSize; /* We have to regenerate the repcodes because we've skipped some sequences */ if (sp < send) { seqDef const* seq; repcodes_t rep; ZSTD_memcpy(&rep, prevCBlock->rep, sizeof(rep)); for (seq = sstart; seq < sp; ++seq) { rep = ZSTD_updateRep(rep.rep, seq->offset - 1, ZSTD_getSequenceLength(seqStorePtr, seq).litLength == 0); } ZSTD_memcpy(nextCBlock->rep, &rep, sizeof(rep)); } } DEBUGLOG(5, "ZSTD_compressSubBlock_multi compressed"); return op-ostart; } size_t ZSTD_compressSuperBlock(ZSTD_CCtx* zc, void* dst, size_t dstCapacity, void const* src, size_t srcSize, unsigned lastBlock) { ZSTD_entropyCTablesMetadata_t entropyMetadata; FORWARD_IF_ERROR(ZSTD_buildSuperBlockEntropy(&zc->seqStore, &zc->blockState.prevCBlock->entropy, &zc->blockState.nextCBlock->entropy, &zc->appliedParams, &entropyMetadata, zc->entropyWorkspace, ENTROPY_WORKSPACE_SIZE /* statically allocated in resetCCtx */), ""); return ZSTD_compressSubBlock_multi(&zc->seqStore, zc->blockState.prevCBlock, zc->blockState.nextCBlock, &entropyMetadata, &zc->appliedParams, dst, dstCapacity, src, srcSize, zc->bmi2, lastBlock, zc->entropyWorkspace, ENTROPY_WORKSPACE_SIZE /* statically allocated in resetCCtx */); } /**** ended inlining compress/zstd_compress_superblock.c ****/ /**** start inlining compress/zstd_compress.c ****/ /* * Copyright (c) 2016-2021, Yann Collet, Facebook, Inc. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the * LICENSE file in the root directory of this source tree) and the GPLv2 (found * in the COPYING file in the root directory of this source tree). * You may select, at your option, one of the above-listed licenses. */ /*-************************************* * Dependencies ***************************************/ /**** skipping file: ../common/zstd_deps.h ****/ /**** start inlining ../common/cpu.h ****/ /* * Copyright (c) 2018-2021, Facebook, Inc. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the * LICENSE file in the root directory of this source tree) and the GPLv2 (found * in the COPYING file in the root directory of this source tree). * You may select, at your option, one of the above-listed licenses. */ #ifndef ZSTD_COMMON_CPU_H #define ZSTD_COMMON_CPU_H /** * Implementation taken from folly/CpuId.h * https://github.com/facebook/folly/blob/master/folly/CpuId.h */ /**** skipping file: mem.h ****/ #ifdef _MSC_VER #include #endif typedef struct { U32 f1c; U32 f1d; U32 f7b; U32 f7c; } ZSTD_cpuid_t; MEM_STATIC ZSTD_cpuid_t ZSTD_cpuid(void) { U32 f1c = 0; U32 f1d = 0; U32 f7b = 0; U32 f7c = 0; #if defined(_MSC_VER) && (defined(_M_X64) || defined(_M_IX86)) int reg[4]; __cpuid((int*)reg, 0); { int const n = reg[0]; if (n >= 1) { __cpuid((int*)reg, 1); f1c = (U32)reg[2]; f1d = (U32)reg[3]; } if (n >= 7) { __cpuidex((int*)reg, 7, 0); f7b = (U32)reg[1]; f7c = (U32)reg[2]; } } #elif defined(__i386__) && defined(__PIC__) && !defined(__clang__) && defined(__GNUC__) /* The following block like the normal cpuid branch below, but gcc * reserves ebx for use of its pic register so we must specially * handle the save and restore to avoid clobbering the register */ U32 n; __asm__( "pushl %%ebx\n\t" "cpuid\n\t" "popl %%ebx\n\t" : "=a"(n) : "a"(0) : "ecx", "edx"); if (n >= 1) { U32 f1a; __asm__( "pushl %%ebx\n\t" "cpuid\n\t" "popl %%ebx\n\t" : "=a"(f1a), "=c"(f1c), "=d"(f1d) : "a"(1)); } if (n >= 7) { __asm__( "pushl %%ebx\n\t" "cpuid\n\t" "movl %%ebx, %%eax\n\t" "popl %%ebx" : "=a"(f7b), "=c"(f7c) : "a"(7), "c"(0) : "edx"); } #elif defined(__x86_64__) || defined(_M_X64) || defined(__i386__) U32 n; __asm__("cpuid" : "=a"(n) : "a"(0) : "ebx", "ecx", "edx"); if (n >= 1) { U32 f1a; __asm__("cpuid" : "=a"(f1a), "=c"(f1c), "=d"(f1d) : "a"(1) : "ebx"); } if (n >= 7) { U32 f7a; __asm__("cpuid" : "=a"(f7a), "=b"(f7b), "=c"(f7c) : "a"(7), "c"(0) : "edx"); } #endif { ZSTD_cpuid_t cpuid; cpuid.f1c = f1c; cpuid.f1d = f1d; cpuid.f7b = f7b; cpuid.f7c = f7c; return cpuid; } } #define X(name, r, bit) \ MEM_STATIC int ZSTD_cpuid_##name(ZSTD_cpuid_t const cpuid) { \ return ((cpuid.r) & (1U << bit)) != 0; \ } /* cpuid(1): Processor Info and Feature Bits. */ #define C(name, bit) X(name, f1c, bit) C(sse3, 0) C(pclmuldq, 1) C(dtes64, 2) C(monitor, 3) C(dscpl, 4) C(vmx, 5) C(smx, 6) C(eist, 7) C(tm2, 8) C(ssse3, 9) C(cnxtid, 10) C(fma, 12) C(cx16, 13) C(xtpr, 14) C(pdcm, 15) C(pcid, 17) C(dca, 18) C(sse41, 19) C(sse42, 20) C(x2apic, 21) C(movbe, 22) C(popcnt, 23) C(tscdeadline, 24) C(aes, 25) C(xsave, 26) C(osxsave, 27) C(avx, 28) C(f16c, 29) C(rdrand, 30) #undef C #define D(name, bit) X(name, f1d, bit) D(fpu, 0) D(vme, 1) D(de, 2) D(pse, 3) D(tsc, 4) D(msr, 5) D(pae, 6) D(mce, 7) D(cx8, 8) D(apic, 9) D(sep, 11) D(mtrr, 12) D(pge, 13) D(mca, 14) D(cmov, 15) D(pat, 16) D(pse36, 17) D(psn, 18) D(clfsh, 19) D(ds, 21) D(acpi, 22) D(mmx, 23) D(fxsr, 24) D(sse, 25) D(sse2, 26) D(ss, 27) D(htt, 28) D(tm, 29) D(pbe, 31) #undef D /* cpuid(7): Extended Features. */ #define B(name, bit) X(name, f7b, bit) B(bmi1, 3) B(hle, 4) B(avx2, 5) B(smep, 7) B(bmi2, 8) B(erms, 9) B(invpcid, 10) B(rtm, 11) B(mpx, 14) B(avx512f, 16) B(avx512dq, 17) B(rdseed, 18) B(adx, 19) B(smap, 20) B(avx512ifma, 21) B(pcommit, 22) B(clflushopt, 23) B(clwb, 24) B(avx512pf, 26) B(avx512er, 27) B(avx512cd, 28) B(sha, 29) B(avx512bw, 30) B(avx512vl, 31) #undef B #define C(name, bit) X(name, f7c, bit) C(prefetchwt1, 0) C(avx512vbmi, 1) #undef C #undef X #endif /* ZSTD_COMMON_CPU_H */ /**** ended inlining ../common/cpu.h ****/ /**** skipping file: ../common/mem.h ****/ /**** skipping file: ../common/zstd_trace.h ****/ /**** skipping file: hist.h ****/ #define FSE_STATIC_LINKING_ONLY /* FSE_encodeSymbol */ /**** skipping file: ../common/fse.h ****/ #define HUF_STATIC_LINKING_ONLY /**** skipping file: ../common/huf.h ****/ /**** skipping file: zstd_compress_internal.h ****/ /**** skipping file: zstd_compress_sequences.h ****/ /**** skipping file: zstd_compress_literals.h ****/ /**** start inlining zstd_fast.h ****/ /* * Copyright (c) 2016-2021, Yann Collet, Facebook, Inc. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the * LICENSE file in the root directory of this source tree) and the GPLv2 (found * in the COPYING file in the root directory of this source tree). * You may select, at your option, one of the above-listed licenses. */ #ifndef ZSTD_FAST_H #define ZSTD_FAST_H #if defined (__cplusplus) extern "C" { #endif /**** skipping file: ../common/mem.h ****/ /**** skipping file: zstd_compress_internal.h ****/ void ZSTD_fillHashTable(ZSTD_matchState_t* ms, void const* end, ZSTD_dictTableLoadMethod_e dtlm); size_t ZSTD_compressBlock_fast( ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], void const* src, size_t srcSize); size_t ZSTD_compressBlock_fast_dictMatchState( ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], void const* src, size_t srcSize); size_t ZSTD_compressBlock_fast_extDict( ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], void const* src, size_t srcSize); #if defined (__cplusplus) } #endif #endif /* ZSTD_FAST_H */ /**** ended inlining zstd_fast.h ****/ /**** start inlining zstd_double_fast.h ****/ /* * Copyright (c) 2016-2021, Yann Collet, Facebook, Inc. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the * LICENSE file in the root directory of this source tree) and the GPLv2 (found * in the COPYING file in the root directory of this source tree). * You may select, at your option, one of the above-listed licenses. */ #ifndef ZSTD_DOUBLE_FAST_H #define ZSTD_DOUBLE_FAST_H #if defined (__cplusplus) extern "C" { #endif /**** skipping file: ../common/mem.h ****/ /**** skipping file: zstd_compress_internal.h ****/ void ZSTD_fillDoubleHashTable(ZSTD_matchState_t* ms, void const* end, ZSTD_dictTableLoadMethod_e dtlm); size_t ZSTD_compressBlock_doubleFast( ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], void const* src, size_t srcSize); size_t ZSTD_compressBlock_doubleFast_dictMatchState( ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], void const* src, size_t srcSize); size_t ZSTD_compressBlock_doubleFast_extDict( ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], void const* src, size_t srcSize); #if defined (__cplusplus) } #endif #endif /* ZSTD_DOUBLE_FAST_H */ /**** ended inlining zstd_double_fast.h ****/ /**** start inlining zstd_lazy.h ****/ /* * Copyright (c) 2016-2021, Yann Collet, Facebook, Inc. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the * LICENSE file in the root directory of this source tree) and the GPLv2 (found * in the COPYING file in the root directory of this source tree). * You may select, at your option, one of the above-listed licenses. */ #ifndef ZSTD_LAZY_H #define ZSTD_LAZY_H #if defined (__cplusplus) extern "C" { #endif /**** skipping file: zstd_compress_internal.h ****/ /** * Dedicated Dictionary Search Structure bucket log. In the * ZSTD_dedicatedDictSearch mode, the hashTable has * 2 ** ZSTD_LAZY_DDSS_BUCKET_LOG entries in each bucket, rather than just * one. */ #define ZSTD_LAZY_DDSS_BUCKET_LOG 2 U32 ZSTD_insertAndFindFirstIndex(ZSTD_matchState_t* ms, const BYTE* ip); void ZSTD_dedicatedDictSearch_lazy_loadDictionary(ZSTD_matchState_t* ms, const BYTE* const ip); void ZSTD_preserveUnsortedMark (U32* const table, U32 const size, U32 const reducerValue); /*! used in ZSTD_reduceIndex(). preemptively increase value of ZSTD_DUBT_UNSORTED_MARK */ size_t ZSTD_compressBlock_btlazy2( ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], void const* src, size_t srcSize); size_t ZSTD_compressBlock_lazy2( ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], void const* src, size_t srcSize); size_t ZSTD_compressBlock_lazy( ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], void const* src, size_t srcSize); size_t ZSTD_compressBlock_greedy( ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], void const* src, size_t srcSize); size_t ZSTD_compressBlock_btlazy2_dictMatchState( ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], void const* src, size_t srcSize); size_t ZSTD_compressBlock_lazy2_dictMatchState( ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], void const* src, size_t srcSize); size_t ZSTD_compressBlock_lazy_dictMatchState( ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], void const* src, size_t srcSize); size_t ZSTD_compressBlock_greedy_dictMatchState( ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], void const* src, size_t srcSize); size_t ZSTD_compressBlock_lazy2_dedicatedDictSearch( ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], void const* src, size_t srcSize); size_t ZSTD_compressBlock_lazy_dedicatedDictSearch( ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], void const* src, size_t srcSize); size_t ZSTD_compressBlock_greedy_dedicatedDictSearch( ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], void const* src, size_t srcSize); size_t ZSTD_compressBlock_greedy_extDict( ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], void const* src, size_t srcSize); size_t ZSTD_compressBlock_lazy_extDict( ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], void const* src, size_t srcSize); size_t ZSTD_compressBlock_lazy2_extDict( ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], void const* src, size_t srcSize); size_t ZSTD_compressBlock_btlazy2_extDict( ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], void const* src, size_t srcSize); #if defined (__cplusplus) } #endif #endif /* ZSTD_LAZY_H */ /**** ended inlining zstd_lazy.h ****/ /**** start inlining zstd_opt.h ****/ /* * Copyright (c) 2016-2021, Yann Collet, Facebook, Inc. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the * LICENSE file in the root directory of this source tree) and the GPLv2 (found * in the COPYING file in the root directory of this source tree). * You may select, at your option, one of the above-listed licenses. */ #ifndef ZSTD_OPT_H #define ZSTD_OPT_H #if defined (__cplusplus) extern "C" { #endif /**** skipping file: zstd_compress_internal.h ****/ /* used in ZSTD_loadDictionaryContent() */ void ZSTD_updateTree(ZSTD_matchState_t* ms, const BYTE* ip, const BYTE* iend); size_t ZSTD_compressBlock_btopt( ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], void const* src, size_t srcSize); size_t ZSTD_compressBlock_btultra( ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], void const* src, size_t srcSize); size_t ZSTD_compressBlock_btultra2( ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], void const* src, size_t srcSize); size_t ZSTD_compressBlock_btopt_dictMatchState( ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], void const* src, size_t srcSize); size_t ZSTD_compressBlock_btultra_dictMatchState( ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], void const* src, size_t srcSize); size_t ZSTD_compressBlock_btopt_extDict( ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], void const* src, size_t srcSize); size_t ZSTD_compressBlock_btultra_extDict( ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], void const* src, size_t srcSize); /* note : no btultra2 variant for extDict nor dictMatchState, * because btultra2 is not meant to work with dictionaries * and is only specific for the first block (no prefix) */ #if defined (__cplusplus) } #endif #endif /* ZSTD_OPT_H */ /**** ended inlining zstd_opt.h ****/ /**** start inlining zstd_ldm.h ****/ /* * Copyright (c) 2016-2021, Yann Collet, Facebook, Inc. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the * LICENSE file in the root directory of this source tree) and the GPLv2 (found * in the COPYING file in the root directory of this source tree). * You may select, at your option, one of the above-listed licenses. */ #ifndef ZSTD_LDM_H #define ZSTD_LDM_H #if defined (__cplusplus) extern "C" { #endif /**** skipping file: zstd_compress_internal.h ****/ /**** skipping file: ../zstd.h ****/ /*-************************************* * Long distance matching ***************************************/ #define ZSTD_LDM_DEFAULT_WINDOW_LOG ZSTD_WINDOWLOG_LIMIT_DEFAULT void ZSTD_ldm_fillHashTable( ldmState_t* state, const BYTE* ip, const BYTE* iend, ldmParams_t const* params); /** * ZSTD_ldm_generateSequences(): * * Generates the sequences using the long distance match finder. * Generates long range matching sequences in `sequences`, which parse a prefix * of the source. `sequences` must be large enough to store every sequence, * which can be checked with `ZSTD_ldm_getMaxNbSeq()`. * @returns 0 or an error code. * * NOTE: The user must have called ZSTD_window_update() for all of the input * they have, even if they pass it to ZSTD_ldm_generateSequences() in chunks. * NOTE: This function returns an error if it runs out of space to store * sequences. */ size_t ZSTD_ldm_generateSequences( ldmState_t* ldms, rawSeqStore_t* sequences, ldmParams_t const* params, void const* src, size_t srcSize); /** * ZSTD_ldm_blockCompress(): * * Compresses a block using the predefined sequences, along with a secondary * block compressor. The literals section of every sequence is passed to the * secondary block compressor, and those sequences are interspersed with the * predefined sequences. Returns the length of the last literals. * Updates `rawSeqStore.pos` to indicate how many sequences have been consumed. * `rawSeqStore.seq` may also be updated to split the last sequence between two * blocks. * @return The length of the last literals. * * NOTE: The source must be at most the maximum block size, but the predefined * sequences can be any size, and may be longer than the block. In the case that * they are longer than the block, the last sequences may need to be split into * two. We handle that case correctly, and update `rawSeqStore` appropriately. * NOTE: This function does not return any errors. */ size_t ZSTD_ldm_blockCompress(rawSeqStore_t* rawSeqStore, ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], void const* src, size_t srcSize); /** * ZSTD_ldm_skipSequences(): * * Skip past `srcSize` bytes worth of sequences in `rawSeqStore`. * Avoids emitting matches less than `minMatch` bytes. * Must be called for data that is not passed to ZSTD_ldm_blockCompress(). */ void ZSTD_ldm_skipSequences(rawSeqStore_t* rawSeqStore, size_t srcSize, U32 const minMatch); /* ZSTD_ldm_skipRawSeqStoreBytes(): * Moves forward in rawSeqStore by nbBytes, updating fields 'pos' and 'posInSequence'. * Not to be used in conjunction with ZSTD_ldm_skipSequences(). * Must be called for data with is not passed to ZSTD_ldm_blockCompress(). */ void ZSTD_ldm_skipRawSeqStoreBytes(rawSeqStore_t* rawSeqStore, size_t nbBytes); /** ZSTD_ldm_getTableSize() : * Estimate the space needed for long distance matching tables or 0 if LDM is * disabled. */ size_t ZSTD_ldm_getTableSize(ldmParams_t params); /** ZSTD_ldm_getSeqSpace() : * Return an upper bound on the number of sequences that can be produced by * the long distance matcher, or 0 if LDM is disabled. */ size_t ZSTD_ldm_getMaxNbSeq(ldmParams_t params, size_t maxChunkSize); /** ZSTD_ldm_adjustParameters() : * If the params->hashRateLog is not set, set it to its default value based on * windowLog and params->hashLog. * * Ensures that params->bucketSizeLog is <= params->hashLog (setting it to * params->hashLog if it is not). * * Ensures that the minMatchLength >= targetLength during optimal parsing. */ void ZSTD_ldm_adjustParameters(ldmParams_t* params, ZSTD_compressionParameters const* cParams); #if defined (__cplusplus) } #endif #endif /* ZSTD_FAST_H */ /**** ended inlining zstd_ldm.h ****/ /**** skipping file: zstd_compress_superblock.h ****/ /* *************************************************************** * Tuning parameters *****************************************************************/ /*! * COMPRESS_HEAPMODE : * Select how default decompression function ZSTD_compress() allocates its context, * on stack (0, default), or into heap (1). * Note that functions with explicit context such as ZSTD_compressCCtx() are unaffected. */ #ifndef ZSTD_COMPRESS_HEAPMODE # define ZSTD_COMPRESS_HEAPMODE 0 #endif /*-************************************* * Helper functions ***************************************/ /* ZSTD_compressBound() * Note that the result from this function is only compatible with the "normal" * full-block strategy. * When there are a lot of small blocks due to frequent flush in streaming mode * the overhead of headers can make the compressed data to be larger than the * return value of ZSTD_compressBound(). */ size_t ZSTD_compressBound(size_t srcSize) { return ZSTD_COMPRESSBOUND(srcSize); } /*-************************************* * Context memory management ***************************************/ struct ZSTD_CDict_s { const void* dictContent; size_t dictContentSize; ZSTD_dictContentType_e dictContentType; /* The dictContentType the CDict was created with */ U32* entropyWorkspace; /* entropy workspace of HUF_WORKSPACE_SIZE bytes */ ZSTD_cwksp workspace; ZSTD_matchState_t matchState; ZSTD_compressedBlockState_t cBlockState; ZSTD_customMem customMem; U32 dictID; int compressionLevel; /* 0 indicates that advanced API was used to select CDict params */ }; /* typedef'd to ZSTD_CDict within "zstd.h" */ ZSTD_CCtx* ZSTD_createCCtx(void) { return ZSTD_createCCtx_advanced(ZSTD_defaultCMem); } static void ZSTD_initCCtx(ZSTD_CCtx* cctx, ZSTD_customMem memManager) { assert(cctx != NULL); ZSTD_memset(cctx, 0, sizeof(*cctx)); cctx->customMem = memManager; cctx->bmi2 = ZSTD_cpuid_bmi2(ZSTD_cpuid()); { size_t const err = ZSTD_CCtx_reset(cctx, ZSTD_reset_parameters); assert(!ZSTD_isError(err)); (void)err; } } ZSTD_CCtx* ZSTD_createCCtx_advanced(ZSTD_customMem customMem) { ZSTD_STATIC_ASSERT(zcss_init==0); ZSTD_STATIC_ASSERT(ZSTD_CONTENTSIZE_UNKNOWN==(0ULL - 1)); if ((!customMem.customAlloc) ^ (!customMem.customFree)) return NULL; { ZSTD_CCtx* const cctx = (ZSTD_CCtx*)ZSTD_customMalloc(sizeof(ZSTD_CCtx), customMem); if (!cctx) return NULL; ZSTD_initCCtx(cctx, customMem); return cctx; } } ZSTD_CCtx* ZSTD_initStaticCCtx(void* workspace, size_t workspaceSize) { ZSTD_cwksp ws; ZSTD_CCtx* cctx; if (workspaceSize <= sizeof(ZSTD_CCtx)) return NULL; /* minimum size */ if ((size_t)workspace & 7) return NULL; /* must be 8-aligned */ ZSTD_cwksp_init(&ws, workspace, workspaceSize, ZSTD_cwksp_static_alloc); cctx = (ZSTD_CCtx*)ZSTD_cwksp_reserve_object(&ws, sizeof(ZSTD_CCtx)); if (cctx == NULL) return NULL; ZSTD_memset(cctx, 0, sizeof(ZSTD_CCtx)); ZSTD_cwksp_move(&cctx->workspace, &ws); cctx->staticSize = workspaceSize; /* statically sized space. entropyWorkspace never moves (but prev/next block swap places) */ if (!ZSTD_cwksp_check_available(&cctx->workspace, ENTROPY_WORKSPACE_SIZE + 2 * sizeof(ZSTD_compressedBlockState_t))) return NULL; cctx->blockState.prevCBlock = (ZSTD_compressedBlockState_t*)ZSTD_cwksp_reserve_object(&cctx->workspace, sizeof(ZSTD_compressedBlockState_t)); cctx->blockState.nextCBlock = (ZSTD_compressedBlockState_t*)ZSTD_cwksp_reserve_object(&cctx->workspace, sizeof(ZSTD_compressedBlockState_t)); cctx->entropyWorkspace = (U32*)ZSTD_cwksp_reserve_object(&cctx->workspace, ENTROPY_WORKSPACE_SIZE); cctx->bmi2 = ZSTD_cpuid_bmi2(ZSTD_cpuid()); return cctx; } /** * Clears and frees all of the dictionaries in the CCtx. */ static void ZSTD_clearAllDicts(ZSTD_CCtx* cctx) { ZSTD_customFree(cctx->localDict.dictBuffer, cctx->customMem); ZSTD_freeCDict(cctx->localDict.cdict); ZSTD_memset(&cctx->localDict, 0, sizeof(cctx->localDict)); ZSTD_memset(&cctx->prefixDict, 0, sizeof(cctx->prefixDict)); cctx->cdict = NULL; } static size_t ZSTD_sizeof_localDict(ZSTD_localDict dict) { size_t const bufferSize = dict.dictBuffer != NULL ? dict.dictSize : 0; size_t const cdictSize = ZSTD_sizeof_CDict(dict.cdict); return bufferSize + cdictSize; } static void ZSTD_freeCCtxContent(ZSTD_CCtx* cctx) { assert(cctx != NULL); assert(cctx->staticSize == 0); ZSTD_clearAllDicts(cctx); #ifdef ZSTD_MULTITHREAD ZSTDMT_freeCCtx(cctx->mtctx); cctx->mtctx = NULL; #endif ZSTD_cwksp_free(&cctx->workspace, cctx->customMem); } size_t ZSTD_freeCCtx(ZSTD_CCtx* cctx) { if (cctx==NULL) return 0; /* support free on NULL */ RETURN_ERROR_IF(cctx->staticSize, memory_allocation, "not compatible with static CCtx"); { int cctxInWorkspace = ZSTD_cwksp_owns_buffer(&cctx->workspace, cctx); ZSTD_freeCCtxContent(cctx); if (!cctxInWorkspace) { ZSTD_customFree(cctx, cctx->customMem); } } return 0; } static size_t ZSTD_sizeof_mtctx(const ZSTD_CCtx* cctx) { #ifdef ZSTD_MULTITHREAD return ZSTDMT_sizeof_CCtx(cctx->mtctx); #else (void)cctx; return 0; #endif } size_t ZSTD_sizeof_CCtx(const ZSTD_CCtx* cctx) { if (cctx==NULL) return 0; /* support sizeof on NULL */ /* cctx may be in the workspace */ return (cctx->workspace.workspace == cctx ? 0 : sizeof(*cctx)) + ZSTD_cwksp_sizeof(&cctx->workspace) + ZSTD_sizeof_localDict(cctx->localDict) + ZSTD_sizeof_mtctx(cctx); } size_t ZSTD_sizeof_CStream(const ZSTD_CStream* zcs) { return ZSTD_sizeof_CCtx(zcs); /* same object */ } /* private API call, for dictBuilder only */ const seqStore_t* ZSTD_getSeqStore(const ZSTD_CCtx* ctx) { return &(ctx->seqStore); } /* Returns 1 if compression parameters are such that we should * enable long distance matching (wlog >= 27, strategy >= btopt). * Returns 0 otherwise. */ static U32 ZSTD_CParams_shouldEnableLdm(const ZSTD_compressionParameters* const cParams) { return cParams->strategy >= ZSTD_btopt && cParams->windowLog >= 27; } static ZSTD_CCtx_params ZSTD_makeCCtxParamsFromCParams( ZSTD_compressionParameters cParams) { ZSTD_CCtx_params cctxParams; /* should not matter, as all cParams are presumed properly defined */ ZSTD_CCtxParams_init(&cctxParams, ZSTD_CLEVEL_DEFAULT); cctxParams.cParams = cParams; if (ZSTD_CParams_shouldEnableLdm(&cParams)) { DEBUGLOG(4, "ZSTD_makeCCtxParamsFromCParams(): Including LDM into cctx params"); cctxParams.ldmParams.enableLdm = 1; /* LDM is enabled by default for optimal parser and window size >= 128MB */ ZSTD_ldm_adjustParameters(&cctxParams.ldmParams, &cParams); assert(cctxParams.ldmParams.hashLog >= cctxParams.ldmParams.bucketSizeLog); assert(cctxParams.ldmParams.hashRateLog < 32); } assert(!ZSTD_checkCParams(cParams)); return cctxParams; } static ZSTD_CCtx_params* ZSTD_createCCtxParams_advanced( ZSTD_customMem customMem) { ZSTD_CCtx_params* params; if ((!customMem.customAlloc) ^ (!customMem.customFree)) return NULL; params = (ZSTD_CCtx_params*)ZSTD_customCalloc( sizeof(ZSTD_CCtx_params), customMem); if (!params) { return NULL; } ZSTD_CCtxParams_init(params, ZSTD_CLEVEL_DEFAULT); params->customMem = customMem; return params; } ZSTD_CCtx_params* ZSTD_createCCtxParams(void) { return ZSTD_createCCtxParams_advanced(ZSTD_defaultCMem); } size_t ZSTD_freeCCtxParams(ZSTD_CCtx_params* params) { if (params == NULL) { return 0; } ZSTD_customFree(params, params->customMem); return 0; } size_t ZSTD_CCtxParams_reset(ZSTD_CCtx_params* params) { return ZSTD_CCtxParams_init(params, ZSTD_CLEVEL_DEFAULT); } size_t ZSTD_CCtxParams_init(ZSTD_CCtx_params* cctxParams, int compressionLevel) { RETURN_ERROR_IF(!cctxParams, GENERIC, "NULL pointer!"); ZSTD_memset(cctxParams, 0, sizeof(*cctxParams)); cctxParams->compressionLevel = compressionLevel; cctxParams->fParams.contentSizeFlag = 1; return 0; } #define ZSTD_NO_CLEVEL 0 /** * Initializes the cctxParams from params and compressionLevel. * @param compressionLevel If params are derived from a compression level then that compression level, otherwise ZSTD_NO_CLEVEL. */ static void ZSTD_CCtxParams_init_internal(ZSTD_CCtx_params* cctxParams, ZSTD_parameters const* params, int compressionLevel) { assert(!ZSTD_checkCParams(params->cParams)); ZSTD_memset(cctxParams, 0, sizeof(*cctxParams)); cctxParams->cParams = params->cParams; cctxParams->fParams = params->fParams; /* Should not matter, as all cParams are presumed properly defined. * But, set it for tracing anyway. */ cctxParams->compressionLevel = compressionLevel; } size_t ZSTD_CCtxParams_init_advanced(ZSTD_CCtx_params* cctxParams, ZSTD_parameters params) { RETURN_ERROR_IF(!cctxParams, GENERIC, "NULL pointer!"); FORWARD_IF_ERROR( ZSTD_checkCParams(params.cParams) , ""); ZSTD_CCtxParams_init_internal(cctxParams, ¶ms, ZSTD_NO_CLEVEL); return 0; } /** * Sets cctxParams' cParams and fParams from params, but otherwise leaves them alone. * @param param Validated zstd parameters. */ static void ZSTD_CCtxParams_setZstdParams( ZSTD_CCtx_params* cctxParams, const ZSTD_parameters* params) { assert(!ZSTD_checkCParams(params->cParams)); cctxParams->cParams = params->cParams; cctxParams->fParams = params->fParams; /* Should not matter, as all cParams are presumed properly defined. * But, set it for tracing anyway. */ cctxParams->compressionLevel = ZSTD_NO_CLEVEL; } ZSTD_bounds ZSTD_cParam_getBounds(ZSTD_cParameter param) { ZSTD_bounds bounds = { 0, 0, 0 }; switch(param) { case ZSTD_c_compressionLevel: bounds.lowerBound = ZSTD_minCLevel(); bounds.upperBound = ZSTD_maxCLevel(); return bounds; case ZSTD_c_windowLog: bounds.lowerBound = ZSTD_WINDOWLOG_MIN; bounds.upperBound = ZSTD_WINDOWLOG_MAX; return bounds; case ZSTD_c_hashLog: bounds.lowerBound = ZSTD_HASHLOG_MIN; bounds.upperBound = ZSTD_HASHLOG_MAX; return bounds; case ZSTD_c_chainLog: bounds.lowerBound = ZSTD_CHAINLOG_MIN; bounds.upperBound = ZSTD_CHAINLOG_MAX; return bounds; case ZSTD_c_searchLog: bounds.lowerBound = ZSTD_SEARCHLOG_MIN; bounds.upperBound = ZSTD_SEARCHLOG_MAX; return bounds; case ZSTD_c_minMatch: bounds.lowerBound = ZSTD_MINMATCH_MIN; bounds.upperBound = ZSTD_MINMATCH_MAX; return bounds; case ZSTD_c_targetLength: bounds.lowerBound = ZSTD_TARGETLENGTH_MIN; bounds.upperBound = ZSTD_TARGETLENGTH_MAX; return bounds; case ZSTD_c_strategy: bounds.lowerBound = ZSTD_STRATEGY_MIN; bounds.upperBound = ZSTD_STRATEGY_MAX; return bounds; case ZSTD_c_contentSizeFlag: bounds.lowerBound = 0; bounds.upperBound = 1; return bounds; case ZSTD_c_checksumFlag: bounds.lowerBound = 0; bounds.upperBound = 1; return bounds; case ZSTD_c_dictIDFlag: bounds.lowerBound = 0; bounds.upperBound = 1; return bounds; case ZSTD_c_nbWorkers: bounds.lowerBound = 0; #ifdef ZSTD_MULTITHREAD bounds.upperBound = ZSTDMT_NBWORKERS_MAX; #else bounds.upperBound = 0; #endif return bounds; case ZSTD_c_jobSize: bounds.lowerBound = 0; #ifdef ZSTD_MULTITHREAD bounds.upperBound = ZSTDMT_JOBSIZE_MAX; #else bounds.upperBound = 0; #endif return bounds; case ZSTD_c_overlapLog: #ifdef ZSTD_MULTITHREAD bounds.lowerBound = ZSTD_OVERLAPLOG_MIN; bounds.upperBound = ZSTD_OVERLAPLOG_MAX; #else bounds.lowerBound = 0; bounds.upperBound = 0; #endif return bounds; case ZSTD_c_enableDedicatedDictSearch: bounds.lowerBound = 0; bounds.upperBound = 1; return bounds; case ZSTD_c_enableLongDistanceMatching: bounds.lowerBound = 0; bounds.upperBound = 1; return bounds; case ZSTD_c_ldmHashLog: bounds.lowerBound = ZSTD_LDM_HASHLOG_MIN; bounds.upperBound = ZSTD_LDM_HASHLOG_MAX; return bounds; case ZSTD_c_ldmMinMatch: bounds.lowerBound = ZSTD_LDM_MINMATCH_MIN; bounds.upperBound = ZSTD_LDM_MINMATCH_MAX; return bounds; case ZSTD_c_ldmBucketSizeLog: bounds.lowerBound = ZSTD_LDM_BUCKETSIZELOG_MIN; bounds.upperBound = ZSTD_LDM_BUCKETSIZELOG_MAX; return bounds; case ZSTD_c_ldmHashRateLog: bounds.lowerBound = ZSTD_LDM_HASHRATELOG_MIN; bounds.upperBound = ZSTD_LDM_HASHRATELOG_MAX; return bounds; /* experimental parameters */ case ZSTD_c_rsyncable: bounds.lowerBound = 0; bounds.upperBound = 1; return bounds; case ZSTD_c_forceMaxWindow : bounds.lowerBound = 0; bounds.upperBound = 1; return bounds; case ZSTD_c_format: ZSTD_STATIC_ASSERT(ZSTD_f_zstd1 < ZSTD_f_zstd1_magicless); bounds.lowerBound = ZSTD_f_zstd1; bounds.upperBound = ZSTD_f_zstd1_magicless; /* note : how to ensure at compile time that this is the highest value enum ? */ return bounds; case ZSTD_c_forceAttachDict: ZSTD_STATIC_ASSERT(ZSTD_dictDefaultAttach < ZSTD_dictForceLoad); bounds.lowerBound = ZSTD_dictDefaultAttach; bounds.upperBound = ZSTD_dictForceLoad; /* note : how to ensure at compile time that this is the highest value enum ? */ return bounds; case ZSTD_c_literalCompressionMode: ZSTD_STATIC_ASSERT(ZSTD_lcm_auto < ZSTD_lcm_huffman && ZSTD_lcm_huffman < ZSTD_lcm_uncompressed); bounds.lowerBound = ZSTD_lcm_auto; bounds.upperBound = ZSTD_lcm_uncompressed; return bounds; case ZSTD_c_targetCBlockSize: bounds.lowerBound = ZSTD_TARGETCBLOCKSIZE_MIN; bounds.upperBound = ZSTD_TARGETCBLOCKSIZE_MAX; return bounds; case ZSTD_c_srcSizeHint: bounds.lowerBound = ZSTD_SRCSIZEHINT_MIN; bounds.upperBound = ZSTD_SRCSIZEHINT_MAX; return bounds; case ZSTD_c_stableInBuffer: case ZSTD_c_stableOutBuffer: bounds.lowerBound = (int)ZSTD_bm_buffered; bounds.upperBound = (int)ZSTD_bm_stable; return bounds; case ZSTD_c_blockDelimiters: bounds.lowerBound = (int)ZSTD_sf_noBlockDelimiters; bounds.upperBound = (int)ZSTD_sf_explicitBlockDelimiters; return bounds; case ZSTD_c_validateSequences: bounds.lowerBound = 0; bounds.upperBound = 1; return bounds; default: bounds.error = ERROR(parameter_unsupported); return bounds; } } /* ZSTD_cParam_clampBounds: * Clamps the value into the bounded range. */ static size_t ZSTD_cParam_clampBounds(ZSTD_cParameter cParam, int* value) { ZSTD_bounds const bounds = ZSTD_cParam_getBounds(cParam); if (ZSTD_isError(bounds.error)) return bounds.error; if (*value < bounds.lowerBound) *value = bounds.lowerBound; if (*value > bounds.upperBound) *value = bounds.upperBound; return 0; } #define BOUNDCHECK(cParam, val) { \ RETURN_ERROR_IF(!ZSTD_cParam_withinBounds(cParam,val), \ parameter_outOfBound, "Param out of bounds"); \ } static int ZSTD_isUpdateAuthorized(ZSTD_cParameter param) { switch(param) { case ZSTD_c_compressionLevel: case ZSTD_c_hashLog: case ZSTD_c_chainLog: case ZSTD_c_searchLog: case ZSTD_c_minMatch: case ZSTD_c_targetLength: case ZSTD_c_strategy: return 1; case ZSTD_c_format: case ZSTD_c_windowLog: case ZSTD_c_contentSizeFlag: case ZSTD_c_checksumFlag: case ZSTD_c_dictIDFlag: case ZSTD_c_forceMaxWindow : case ZSTD_c_nbWorkers: case ZSTD_c_jobSize: case ZSTD_c_overlapLog: case ZSTD_c_rsyncable: case ZSTD_c_enableDedicatedDictSearch: case ZSTD_c_enableLongDistanceMatching: case ZSTD_c_ldmHashLog: case ZSTD_c_ldmMinMatch: case ZSTD_c_ldmBucketSizeLog: case ZSTD_c_ldmHashRateLog: case ZSTD_c_forceAttachDict: case ZSTD_c_literalCompressionMode: case ZSTD_c_targetCBlockSize: case ZSTD_c_srcSizeHint: case ZSTD_c_stableInBuffer: case ZSTD_c_stableOutBuffer: case ZSTD_c_blockDelimiters: case ZSTD_c_validateSequences: default: return 0; } } size_t ZSTD_CCtx_setParameter(ZSTD_CCtx* cctx, ZSTD_cParameter param, int value) { DEBUGLOG(4, "ZSTD_CCtx_setParameter (%i, %i)", (int)param, value); if (cctx->streamStage != zcss_init) { if (ZSTD_isUpdateAuthorized(param)) { cctx->cParamsChanged = 1; } else { RETURN_ERROR(stage_wrong, "can only set params in ctx init stage"); } } switch(param) { case ZSTD_c_nbWorkers: RETURN_ERROR_IF((value!=0) && cctx->staticSize, parameter_unsupported, "MT not compatible with static alloc"); break; case ZSTD_c_compressionLevel: case ZSTD_c_windowLog: case ZSTD_c_hashLog: case ZSTD_c_chainLog: case ZSTD_c_searchLog: case ZSTD_c_minMatch: case ZSTD_c_targetLength: case ZSTD_c_strategy: case ZSTD_c_ldmHashRateLog: case ZSTD_c_format: case ZSTD_c_contentSizeFlag: case ZSTD_c_checksumFlag: case ZSTD_c_dictIDFlag: case ZSTD_c_forceMaxWindow: case ZSTD_c_forceAttachDict: case ZSTD_c_literalCompressionMode: case ZSTD_c_jobSize: case ZSTD_c_overlapLog: case ZSTD_c_rsyncable: case ZSTD_c_enableDedicatedDictSearch: case ZSTD_c_enableLongDistanceMatching: case ZSTD_c_ldmHashLog: case ZSTD_c_ldmMinMatch: case ZSTD_c_ldmBucketSizeLog: case ZSTD_c_targetCBlockSize: case ZSTD_c_srcSizeHint: case ZSTD_c_stableInBuffer: case ZSTD_c_stableOutBuffer: case ZSTD_c_blockDelimiters: case ZSTD_c_validateSequences: break; default: RETURN_ERROR(parameter_unsupported, "unknown parameter"); } return ZSTD_CCtxParams_setParameter(&cctx->requestedParams, param, value); } size_t ZSTD_CCtxParams_setParameter(ZSTD_CCtx_params* CCtxParams, ZSTD_cParameter param, int value) { DEBUGLOG(4, "ZSTD_CCtxParams_setParameter (%i, %i)", (int)param, value); switch(param) { case ZSTD_c_format : BOUNDCHECK(ZSTD_c_format, value); CCtxParams->format = (ZSTD_format_e)value; return (size_t)CCtxParams->format; case ZSTD_c_compressionLevel : { FORWARD_IF_ERROR(ZSTD_cParam_clampBounds(param, &value), ""); if (value == 0) CCtxParams->compressionLevel = ZSTD_CLEVEL_DEFAULT; /* 0 == default */ else CCtxParams->compressionLevel = value; if (CCtxParams->compressionLevel >= 0) return (size_t)CCtxParams->compressionLevel; return 0; /* return type (size_t) cannot represent negative values */ } case ZSTD_c_windowLog : if (value!=0) /* 0 => use default */ BOUNDCHECK(ZSTD_c_windowLog, value); CCtxParams->cParams.windowLog = (U32)value; return CCtxParams->cParams.windowLog; case ZSTD_c_hashLog : if (value!=0) /* 0 => use default */ BOUNDCHECK(ZSTD_c_hashLog, value); CCtxParams->cParams.hashLog = (U32)value; return CCtxParams->cParams.hashLog; case ZSTD_c_chainLog : if (value!=0) /* 0 => use default */ BOUNDCHECK(ZSTD_c_chainLog, value); CCtxParams->cParams.chainLog = (U32)value; return CCtxParams->cParams.chainLog; case ZSTD_c_searchLog : if (value!=0) /* 0 => use default */ BOUNDCHECK(ZSTD_c_searchLog, value); CCtxParams->cParams.searchLog = (U32)value; return (size_t)value; case ZSTD_c_minMatch : if (value!=0) /* 0 => use default */ BOUNDCHECK(ZSTD_c_minMatch, value); CCtxParams->cParams.minMatch = value; return CCtxParams->cParams.minMatch; case ZSTD_c_targetLength : BOUNDCHECK(ZSTD_c_targetLength, value); CCtxParams->cParams.targetLength = value; return CCtxParams->cParams.targetLength; case ZSTD_c_strategy : if (value!=0) /* 0 => use default */ BOUNDCHECK(ZSTD_c_strategy, value); CCtxParams->cParams.strategy = (ZSTD_strategy)value; return (size_t)CCtxParams->cParams.strategy; case ZSTD_c_contentSizeFlag : /* Content size written in frame header _when known_ (default:1) */ DEBUGLOG(4, "set content size flag = %u", (value!=0)); CCtxParams->fParams.contentSizeFlag = value != 0; return CCtxParams->fParams.contentSizeFlag; case ZSTD_c_checksumFlag : /* A 32-bits content checksum will be calculated and written at end of frame (default:0) */ CCtxParams->fParams.checksumFlag = value != 0; return CCtxParams->fParams.checksumFlag; case ZSTD_c_dictIDFlag : /* When applicable, dictionary's dictID is provided in frame header (default:1) */ DEBUGLOG(4, "set dictIDFlag = %u", (value!=0)); CCtxParams->fParams.noDictIDFlag = !value; return !CCtxParams->fParams.noDictIDFlag; case ZSTD_c_forceMaxWindow : CCtxParams->forceWindow = (value != 0); return CCtxParams->forceWindow; case ZSTD_c_forceAttachDict : { const ZSTD_dictAttachPref_e pref = (ZSTD_dictAttachPref_e)value; BOUNDCHECK(ZSTD_c_forceAttachDict, pref); CCtxParams->attachDictPref = pref; return CCtxParams->attachDictPref; } case ZSTD_c_literalCompressionMode : { const ZSTD_literalCompressionMode_e lcm = (ZSTD_literalCompressionMode_e)value; BOUNDCHECK(ZSTD_c_literalCompressionMode, lcm); CCtxParams->literalCompressionMode = lcm; return CCtxParams->literalCompressionMode; } case ZSTD_c_nbWorkers : #ifndef ZSTD_MULTITHREAD RETURN_ERROR_IF(value!=0, parameter_unsupported, "not compiled with multithreading"); return 0; #else FORWARD_IF_ERROR(ZSTD_cParam_clampBounds(param, &value), ""); CCtxParams->nbWorkers = value; return CCtxParams->nbWorkers; #endif case ZSTD_c_jobSize : #ifndef ZSTD_MULTITHREAD RETURN_ERROR_IF(value!=0, parameter_unsupported, "not compiled with multithreading"); return 0; #else /* Adjust to the minimum non-default value. */ if (value != 0 && value < ZSTDMT_JOBSIZE_MIN) value = ZSTDMT_JOBSIZE_MIN; FORWARD_IF_ERROR(ZSTD_cParam_clampBounds(param, &value), ""); assert(value >= 0); CCtxParams->jobSize = value; return CCtxParams->jobSize; #endif case ZSTD_c_overlapLog : #ifndef ZSTD_MULTITHREAD RETURN_ERROR_IF(value!=0, parameter_unsupported, "not compiled with multithreading"); return 0; #else FORWARD_IF_ERROR(ZSTD_cParam_clampBounds(ZSTD_c_overlapLog, &value), ""); CCtxParams->overlapLog = value; return CCtxParams->overlapLog; #endif case ZSTD_c_rsyncable : #ifndef ZSTD_MULTITHREAD RETURN_ERROR_IF(value!=0, parameter_unsupported, "not compiled with multithreading"); return 0; #else FORWARD_IF_ERROR(ZSTD_cParam_clampBounds(ZSTD_c_overlapLog, &value), ""); CCtxParams->rsyncable = value; return CCtxParams->rsyncable; #endif case ZSTD_c_enableDedicatedDictSearch : CCtxParams->enableDedicatedDictSearch = (value!=0); return CCtxParams->enableDedicatedDictSearch; case ZSTD_c_enableLongDistanceMatching : CCtxParams->ldmParams.enableLdm = (value!=0); return CCtxParams->ldmParams.enableLdm; case ZSTD_c_ldmHashLog : if (value!=0) /* 0 ==> auto */ BOUNDCHECK(ZSTD_c_ldmHashLog, value); CCtxParams->ldmParams.hashLog = value; return CCtxParams->ldmParams.hashLog; case ZSTD_c_ldmMinMatch : if (value!=0) /* 0 ==> default */ BOUNDCHECK(ZSTD_c_ldmMinMatch, value); CCtxParams->ldmParams.minMatchLength = value; return CCtxParams->ldmParams.minMatchLength; case ZSTD_c_ldmBucketSizeLog : if (value!=0) /* 0 ==> default */ BOUNDCHECK(ZSTD_c_ldmBucketSizeLog, value); CCtxParams->ldmParams.bucketSizeLog = value; return CCtxParams->ldmParams.bucketSizeLog; case ZSTD_c_ldmHashRateLog : RETURN_ERROR_IF(value > ZSTD_WINDOWLOG_MAX - ZSTD_HASHLOG_MIN, parameter_outOfBound, "Param out of bounds!"); CCtxParams->ldmParams.hashRateLog = value; return CCtxParams->ldmParams.hashRateLog; case ZSTD_c_targetCBlockSize : if (value!=0) /* 0 ==> default */ BOUNDCHECK(ZSTD_c_targetCBlockSize, value); CCtxParams->targetCBlockSize = value; return CCtxParams->targetCBlockSize; case ZSTD_c_srcSizeHint : if (value!=0) /* 0 ==> default */ BOUNDCHECK(ZSTD_c_srcSizeHint, value); CCtxParams->srcSizeHint = value; return CCtxParams->srcSizeHint; case ZSTD_c_stableInBuffer: BOUNDCHECK(ZSTD_c_stableInBuffer, value); CCtxParams->inBufferMode = (ZSTD_bufferMode_e)value; return CCtxParams->inBufferMode; case ZSTD_c_stableOutBuffer: BOUNDCHECK(ZSTD_c_stableOutBuffer, value); CCtxParams->outBufferMode = (ZSTD_bufferMode_e)value; return CCtxParams->outBufferMode; case ZSTD_c_blockDelimiters: BOUNDCHECK(ZSTD_c_blockDelimiters, value); CCtxParams->blockDelimiters = (ZSTD_sequenceFormat_e)value; return CCtxParams->blockDelimiters; case ZSTD_c_validateSequences: BOUNDCHECK(ZSTD_c_validateSequences, value); CCtxParams->validateSequences = value; return CCtxParams->validateSequences; default: RETURN_ERROR(parameter_unsupported, "unknown parameter"); } } size_t ZSTD_CCtx_getParameter(ZSTD_CCtx const* cctx, ZSTD_cParameter param, int* value) { return ZSTD_CCtxParams_getParameter(&cctx->requestedParams, param, value); } size_t ZSTD_CCtxParams_getParameter( ZSTD_CCtx_params const* CCtxParams, ZSTD_cParameter param, int* value) { switch(param) { case ZSTD_c_format : *value = CCtxParams->format; break; case ZSTD_c_compressionLevel : *value = CCtxParams->compressionLevel; break; case ZSTD_c_windowLog : *value = (int)CCtxParams->cParams.windowLog; break; case ZSTD_c_hashLog : *value = (int)CCtxParams->cParams.hashLog; break; case ZSTD_c_chainLog : *value = (int)CCtxParams->cParams.chainLog; break; case ZSTD_c_searchLog : *value = CCtxParams->cParams.searchLog; break; case ZSTD_c_minMatch : *value = CCtxParams->cParams.minMatch; break; case ZSTD_c_targetLength : *value = CCtxParams->cParams.targetLength; break; case ZSTD_c_strategy : *value = (unsigned)CCtxParams->cParams.strategy; break; case ZSTD_c_contentSizeFlag : *value = CCtxParams->fParams.contentSizeFlag; break; case ZSTD_c_checksumFlag : *value = CCtxParams->fParams.checksumFlag; break; case ZSTD_c_dictIDFlag : *value = !CCtxParams->fParams.noDictIDFlag; break; case ZSTD_c_forceMaxWindow : *value = CCtxParams->forceWindow; break; case ZSTD_c_forceAttachDict : *value = CCtxParams->attachDictPref; break; case ZSTD_c_literalCompressionMode : *value = CCtxParams->literalCompressionMode; break; case ZSTD_c_nbWorkers : #ifndef ZSTD_MULTITHREAD assert(CCtxParams->nbWorkers == 0); #endif *value = CCtxParams->nbWorkers; break; case ZSTD_c_jobSize : #ifndef ZSTD_MULTITHREAD RETURN_ERROR(parameter_unsupported, "not compiled with multithreading"); #else assert(CCtxParams->jobSize <= INT_MAX); *value = (int)CCtxParams->jobSize; break; #endif case ZSTD_c_overlapLog : #ifndef ZSTD_MULTITHREAD RETURN_ERROR(parameter_unsupported, "not compiled with multithreading"); #else *value = CCtxParams->overlapLog; break; #endif case ZSTD_c_rsyncable : #ifndef ZSTD_MULTITHREAD RETURN_ERROR(parameter_unsupported, "not compiled with multithreading"); #else *value = CCtxParams->rsyncable; break; #endif case ZSTD_c_enableDedicatedDictSearch : *value = CCtxParams->enableDedicatedDictSearch; break; case ZSTD_c_enableLongDistanceMatching : *value = CCtxParams->ldmParams.enableLdm; break; case ZSTD_c_ldmHashLog : *value = CCtxParams->ldmParams.hashLog; break; case ZSTD_c_ldmMinMatch : *value = CCtxParams->ldmParams.minMatchLength; break; case ZSTD_c_ldmBucketSizeLog : *value = CCtxParams->ldmParams.bucketSizeLog; break; case ZSTD_c_ldmHashRateLog : *value = CCtxParams->ldmParams.hashRateLog; break; case ZSTD_c_targetCBlockSize : *value = (int)CCtxParams->targetCBlockSize; break; case ZSTD_c_srcSizeHint : *value = (int)CCtxParams->srcSizeHint; break; case ZSTD_c_stableInBuffer : *value = (int)CCtxParams->inBufferMode; break; case ZSTD_c_stableOutBuffer : *value = (int)CCtxParams->outBufferMode; break; case ZSTD_c_blockDelimiters : *value = (int)CCtxParams->blockDelimiters; break; case ZSTD_c_validateSequences : *value = (int)CCtxParams->validateSequences; break; default: RETURN_ERROR(parameter_unsupported, "unknown parameter"); } return 0; } /** ZSTD_CCtx_setParametersUsingCCtxParams() : * just applies `params` into `cctx` * no action is performed, parameters are merely stored. * If ZSTDMT is enabled, parameters are pushed to cctx->mtctx. * This is possible even if a compression is ongoing. * In which case, new parameters will be applied on the fly, starting with next compression job. */ size_t ZSTD_CCtx_setParametersUsingCCtxParams( ZSTD_CCtx* cctx, const ZSTD_CCtx_params* params) { DEBUGLOG(4, "ZSTD_CCtx_setParametersUsingCCtxParams"); RETURN_ERROR_IF(cctx->streamStage != zcss_init, stage_wrong, "The context is in the wrong stage!"); RETURN_ERROR_IF(cctx->cdict, stage_wrong, "Can't override parameters with cdict attached (some must " "be inherited from the cdict)."); cctx->requestedParams = *params; return 0; } ZSTDLIB_API size_t ZSTD_CCtx_setPledgedSrcSize(ZSTD_CCtx* cctx, unsigned long long pledgedSrcSize) { DEBUGLOG(4, "ZSTD_CCtx_setPledgedSrcSize to %u bytes", (U32)pledgedSrcSize); RETURN_ERROR_IF(cctx->streamStage != zcss_init, stage_wrong, "Can't set pledgedSrcSize when not in init stage."); cctx->pledgedSrcSizePlusOne = pledgedSrcSize+1; return 0; } static ZSTD_compressionParameters ZSTD_dedicatedDictSearch_getCParams( int const compressionLevel, size_t const dictSize); static int ZSTD_dedicatedDictSearch_isSupported( const ZSTD_compressionParameters* cParams); static void ZSTD_dedicatedDictSearch_revertCParams( ZSTD_compressionParameters* cParams); /** * Initializes the local dict using the requested parameters. * NOTE: This does not use the pledged src size, because it may be used for more * than one compression. */ static size_t ZSTD_initLocalDict(ZSTD_CCtx* cctx) { ZSTD_localDict* const dl = &cctx->localDict; if (dl->dict == NULL) { /* No local dictionary. */ assert(dl->dictBuffer == NULL); assert(dl->cdict == NULL); assert(dl->dictSize == 0); return 0; } if (dl->cdict != NULL) { assert(cctx->cdict == dl->cdict); /* Local dictionary already initialized. */ return 0; } assert(dl->dictSize > 0); assert(cctx->cdict == NULL); assert(cctx->prefixDict.dict == NULL); dl->cdict = ZSTD_createCDict_advanced2( dl->dict, dl->dictSize, ZSTD_dlm_byRef, dl->dictContentType, &cctx->requestedParams, cctx->customMem); RETURN_ERROR_IF(!dl->cdict, memory_allocation, "ZSTD_createCDict_advanced failed"); cctx->cdict = dl->cdict; return 0; } size_t ZSTD_CCtx_loadDictionary_advanced( ZSTD_CCtx* cctx, const void* dict, size_t dictSize, ZSTD_dictLoadMethod_e dictLoadMethod, ZSTD_dictContentType_e dictContentType) { RETURN_ERROR_IF(cctx->streamStage != zcss_init, stage_wrong, "Can't load a dictionary when ctx is not in init stage."); DEBUGLOG(4, "ZSTD_CCtx_loadDictionary_advanced (size: %u)", (U32)dictSize); ZSTD_clearAllDicts(cctx); /* in case one already exists */ if (dict == NULL || dictSize == 0) /* no dictionary mode */ return 0; if (dictLoadMethod == ZSTD_dlm_byRef) { cctx->localDict.dict = dict; } else { void* dictBuffer; RETURN_ERROR_IF(cctx->staticSize, memory_allocation, "no malloc for static CCtx"); dictBuffer = ZSTD_customMalloc(dictSize, cctx->customMem); RETURN_ERROR_IF(!dictBuffer, memory_allocation, "NULL pointer!"); ZSTD_memcpy(dictBuffer, dict, dictSize); cctx->localDict.dictBuffer = dictBuffer; cctx->localDict.dict = dictBuffer; } cctx->localDict.dictSize = dictSize; cctx->localDict.dictContentType = dictContentType; return 0; } ZSTDLIB_API size_t ZSTD_CCtx_loadDictionary_byReference( ZSTD_CCtx* cctx, const void* dict, size_t dictSize) { return ZSTD_CCtx_loadDictionary_advanced( cctx, dict, dictSize, ZSTD_dlm_byRef, ZSTD_dct_auto); } ZSTDLIB_API size_t ZSTD_CCtx_loadDictionary(ZSTD_CCtx* cctx, const void* dict, size_t dictSize) { return ZSTD_CCtx_loadDictionary_advanced( cctx, dict, dictSize, ZSTD_dlm_byCopy, ZSTD_dct_auto); } size_t ZSTD_CCtx_refCDict(ZSTD_CCtx* cctx, const ZSTD_CDict* cdict) { RETURN_ERROR_IF(cctx->streamStage != zcss_init, stage_wrong, "Can't ref a dict when ctx not in init stage."); /* Free the existing local cdict (if any) to save memory. */ ZSTD_clearAllDicts(cctx); cctx->cdict = cdict; return 0; } size_t ZSTD_CCtx_refThreadPool(ZSTD_CCtx* cctx, ZSTD_threadPool* pool) { RETURN_ERROR_IF(cctx->streamStage != zcss_init, stage_wrong, "Can't ref a pool when ctx not in init stage."); cctx->pool = pool; return 0; } size_t ZSTD_CCtx_refPrefix(ZSTD_CCtx* cctx, const void* prefix, size_t prefixSize) { return ZSTD_CCtx_refPrefix_advanced(cctx, prefix, prefixSize, ZSTD_dct_rawContent); } size_t ZSTD_CCtx_refPrefix_advanced( ZSTD_CCtx* cctx, const void* prefix, size_t prefixSize, ZSTD_dictContentType_e dictContentType) { RETURN_ERROR_IF(cctx->streamStage != zcss_init, stage_wrong, "Can't ref a prefix when ctx not in init stage."); ZSTD_clearAllDicts(cctx); if (prefix != NULL && prefixSize > 0) { cctx->prefixDict.dict = prefix; cctx->prefixDict.dictSize = prefixSize; cctx->prefixDict.dictContentType = dictContentType; } return 0; } /*! ZSTD_CCtx_reset() : * Also dumps dictionary */ size_t ZSTD_CCtx_reset(ZSTD_CCtx* cctx, ZSTD_ResetDirective reset) { if ( (reset == ZSTD_reset_session_only) || (reset == ZSTD_reset_session_and_parameters) ) { cctx->streamStage = zcss_init; cctx->pledgedSrcSizePlusOne = 0; } if ( (reset == ZSTD_reset_parameters) || (reset == ZSTD_reset_session_and_parameters) ) { RETURN_ERROR_IF(cctx->streamStage != zcss_init, stage_wrong, "Can't reset parameters only when not in init stage."); ZSTD_clearAllDicts(cctx); return ZSTD_CCtxParams_reset(&cctx->requestedParams); } return 0; } /** ZSTD_checkCParams() : control CParam values remain within authorized range. @return : 0, or an error code if one value is beyond authorized range */ size_t ZSTD_checkCParams(ZSTD_compressionParameters cParams) { BOUNDCHECK(ZSTD_c_windowLog, (int)cParams.windowLog); BOUNDCHECK(ZSTD_c_chainLog, (int)cParams.chainLog); BOUNDCHECK(ZSTD_c_hashLog, (int)cParams.hashLog); BOUNDCHECK(ZSTD_c_searchLog, (int)cParams.searchLog); BOUNDCHECK(ZSTD_c_minMatch, (int)cParams.minMatch); BOUNDCHECK(ZSTD_c_targetLength,(int)cParams.targetLength); BOUNDCHECK(ZSTD_c_strategy, cParams.strategy); return 0; } /** ZSTD_clampCParams() : * make CParam values within valid range. * @return : valid CParams */ static ZSTD_compressionParameters ZSTD_clampCParams(ZSTD_compressionParameters cParams) { # define CLAMP_TYPE(cParam, val, type) { \ ZSTD_bounds const bounds = ZSTD_cParam_getBounds(cParam); \ if ((int)valbounds.upperBound) val=(type)bounds.upperBound; \ } # define CLAMP(cParam, val) CLAMP_TYPE(cParam, val, unsigned) CLAMP(ZSTD_c_windowLog, cParams.windowLog); CLAMP(ZSTD_c_chainLog, cParams.chainLog); CLAMP(ZSTD_c_hashLog, cParams.hashLog); CLAMP(ZSTD_c_searchLog, cParams.searchLog); CLAMP(ZSTD_c_minMatch, cParams.minMatch); CLAMP(ZSTD_c_targetLength,cParams.targetLength); CLAMP_TYPE(ZSTD_c_strategy,cParams.strategy, ZSTD_strategy); return cParams; } /** ZSTD_cycleLog() : * condition for correct operation : hashLog > 1 */ U32 ZSTD_cycleLog(U32 hashLog, ZSTD_strategy strat) { U32 const btScale = ((U32)strat >= (U32)ZSTD_btlazy2); return hashLog - btScale; } /** ZSTD_dictAndWindowLog() : * Returns an adjusted window log that is large enough to fit the source and the dictionary. * The zstd format says that the entire dictionary is valid if one byte of the dictionary * is within the window. So the hashLog and chainLog should be large enough to reference both * the dictionary and the window. So we must use this adjusted dictAndWindowLog when downsizing * the hashLog and windowLog. * NOTE: srcSize must not be ZSTD_CONTENTSIZE_UNKNOWN. */ static U32 ZSTD_dictAndWindowLog(U32 windowLog, U64 srcSize, U64 dictSize) { const U64 maxWindowSize = 1ULL << ZSTD_WINDOWLOG_MAX; /* No dictionary ==> No change */ if (dictSize == 0) { return windowLog; } assert(windowLog <= ZSTD_WINDOWLOG_MAX); assert(srcSize != ZSTD_CONTENTSIZE_UNKNOWN); /* Handled in ZSTD_adjustCParams_internal() */ { U64 const windowSize = 1ULL << windowLog; U64 const dictAndWindowSize = dictSize + windowSize; /* If the window size is already large enough to fit both the source and the dictionary * then just use the window size. Otherwise adjust so that it fits the dictionary and * the window. */ if (windowSize >= dictSize + srcSize) { return windowLog; /* Window size large enough already */ } else if (dictAndWindowSize >= maxWindowSize) { return ZSTD_WINDOWLOG_MAX; /* Larger than max window log */ } else { return ZSTD_highbit32((U32)dictAndWindowSize - 1) + 1; } } } /** ZSTD_adjustCParams_internal() : * optimize `cPar` for a specified input (`srcSize` and `dictSize`). * mostly downsize to reduce memory consumption and initialization latency. * `srcSize` can be ZSTD_CONTENTSIZE_UNKNOWN when not known. * `mode` is the mode for parameter adjustment. See docs for `ZSTD_cParamMode_e`. * note : `srcSize==0` means 0! * condition : cPar is presumed validated (can be checked using ZSTD_checkCParams()). */ static ZSTD_compressionParameters ZSTD_adjustCParams_internal(ZSTD_compressionParameters cPar, unsigned long long srcSize, size_t dictSize, ZSTD_cParamMode_e mode) { const U64 minSrcSize = 513; /* (1<<9) + 1 */ const U64 maxWindowResize = 1ULL << (ZSTD_WINDOWLOG_MAX-1); assert(ZSTD_checkCParams(cPar)==0); switch (mode) { case ZSTD_cpm_unknown: case ZSTD_cpm_noAttachDict: /* If we don't know the source size, don't make any * assumptions about it. We will already have selected * smaller parameters if a dictionary is in use. */ break; case ZSTD_cpm_createCDict: /* Assume a small source size when creating a dictionary * with an unkown source size. */ if (dictSize && srcSize == ZSTD_CONTENTSIZE_UNKNOWN) srcSize = minSrcSize; break; case ZSTD_cpm_attachDict: /* Dictionary has its own dedicated parameters which have * already been selected. We are selecting parameters * for only the source. */ dictSize = 0; break; default: assert(0); break; } /* resize windowLog if input is small enough, to use less memory */ if ( (srcSize < maxWindowResize) && (dictSize < maxWindowResize) ) { U32 const tSize = (U32)(srcSize + dictSize); static U32 const hashSizeMin = 1 << ZSTD_HASHLOG_MIN; U32 const srcLog = (tSize < hashSizeMin) ? ZSTD_HASHLOG_MIN : ZSTD_highbit32(tSize-1) + 1; if (cPar.windowLog > srcLog) cPar.windowLog = srcLog; } if (srcSize != ZSTD_CONTENTSIZE_UNKNOWN) { U32 const dictAndWindowLog = ZSTD_dictAndWindowLog(cPar.windowLog, (U64)srcSize, (U64)dictSize); U32 const cycleLog = ZSTD_cycleLog(cPar.chainLog, cPar.strategy); if (cPar.hashLog > dictAndWindowLog+1) cPar.hashLog = dictAndWindowLog+1; if (cycleLog > dictAndWindowLog) cPar.chainLog -= (cycleLog - dictAndWindowLog); } if (cPar.windowLog < ZSTD_WINDOWLOG_ABSOLUTEMIN) cPar.windowLog = ZSTD_WINDOWLOG_ABSOLUTEMIN; /* minimum wlog required for valid frame header */ return cPar; } ZSTD_compressionParameters ZSTD_adjustCParams(ZSTD_compressionParameters cPar, unsigned long long srcSize, size_t dictSize) { cPar = ZSTD_clampCParams(cPar); /* resulting cPar is necessarily valid (all parameters within range) */ if (srcSize == 0) srcSize = ZSTD_CONTENTSIZE_UNKNOWN; return ZSTD_adjustCParams_internal(cPar, srcSize, dictSize, ZSTD_cpm_unknown); } static ZSTD_compressionParameters ZSTD_getCParams_internal(int compressionLevel, unsigned long long srcSizeHint, size_t dictSize, ZSTD_cParamMode_e mode); static ZSTD_parameters ZSTD_getParams_internal(int compressionLevel, unsigned long long srcSizeHint, size_t dictSize, ZSTD_cParamMode_e mode); static void ZSTD_overrideCParams( ZSTD_compressionParameters* cParams, const ZSTD_compressionParameters* overrides) { if (overrides->windowLog) cParams->windowLog = overrides->windowLog; if (overrides->hashLog) cParams->hashLog = overrides->hashLog; if (overrides->chainLog) cParams->chainLog = overrides->chainLog; if (overrides->searchLog) cParams->searchLog = overrides->searchLog; if (overrides->minMatch) cParams->minMatch = overrides->minMatch; if (overrides->targetLength) cParams->targetLength = overrides->targetLength; if (overrides->strategy) cParams->strategy = overrides->strategy; } ZSTD_compressionParameters ZSTD_getCParamsFromCCtxParams( const ZSTD_CCtx_params* CCtxParams, U64 srcSizeHint, size_t dictSize, ZSTD_cParamMode_e mode) { ZSTD_compressionParameters cParams; if (srcSizeHint == ZSTD_CONTENTSIZE_UNKNOWN && CCtxParams->srcSizeHint > 0) { srcSizeHint = CCtxParams->srcSizeHint; } cParams = ZSTD_getCParams_internal(CCtxParams->compressionLevel, srcSizeHint, dictSize, mode); if (CCtxParams->ldmParams.enableLdm) cParams.windowLog = ZSTD_LDM_DEFAULT_WINDOW_LOG; ZSTD_overrideCParams(&cParams, &CCtxParams->cParams); assert(!ZSTD_checkCParams(cParams)); /* srcSizeHint == 0 means 0 */ return ZSTD_adjustCParams_internal(cParams, srcSizeHint, dictSize, mode); } static size_t ZSTD_sizeof_matchState(const ZSTD_compressionParameters* const cParams, const U32 forCCtx) { size_t const chainSize = (cParams->strategy == ZSTD_fast) ? 0 : ((size_t)1 << cParams->chainLog); size_t const hSize = ((size_t)1) << cParams->hashLog; U32 const hashLog3 = (forCCtx && cParams->minMatch==3) ? MIN(ZSTD_HASHLOG3_MAX, cParams->windowLog) : 0; size_t const h3Size = hashLog3 ? ((size_t)1) << hashLog3 : 0; /* We don't use ZSTD_cwksp_alloc_size() here because the tables aren't * surrounded by redzones in ASAN. */ size_t const tableSpace = chainSize * sizeof(U32) + hSize * sizeof(U32) + h3Size * sizeof(U32); size_t const optPotentialSpace = ZSTD_cwksp_alloc_size((MaxML+1) * sizeof(U32)) + ZSTD_cwksp_alloc_size((MaxLL+1) * sizeof(U32)) + ZSTD_cwksp_alloc_size((MaxOff+1) * sizeof(U32)) + ZSTD_cwksp_alloc_size((1<strategy >= ZSTD_btopt)) ? optPotentialSpace : 0; DEBUGLOG(4, "chainSize: %u - hSize: %u - h3Size: %u", (U32)chainSize, (U32)hSize, (U32)h3Size); return tableSpace + optSpace; } static size_t ZSTD_estimateCCtxSize_usingCCtxParams_internal( const ZSTD_compressionParameters* cParams, const ldmParams_t* ldmParams, const int isStatic, const size_t buffInSize, const size_t buffOutSize, const U64 pledgedSrcSize) { size_t const windowSize = MAX(1, (size_t)MIN(((U64)1 << cParams->windowLog), pledgedSrcSize)); size_t const blockSize = MIN(ZSTD_BLOCKSIZE_MAX, windowSize); U32 const divider = (cParams->minMatch==3) ? 3 : 4; size_t const maxNbSeq = blockSize / divider; size_t const tokenSpace = ZSTD_cwksp_alloc_size(WILDCOPY_OVERLENGTH + blockSize) + ZSTD_cwksp_alloc_size(maxNbSeq * sizeof(seqDef)) + 3 * ZSTD_cwksp_alloc_size(maxNbSeq * sizeof(BYTE)); size_t const entropySpace = ZSTD_cwksp_alloc_size(ENTROPY_WORKSPACE_SIZE); size_t const blockStateSpace = 2 * ZSTD_cwksp_alloc_size(sizeof(ZSTD_compressedBlockState_t)); size_t const matchStateSize = ZSTD_sizeof_matchState(cParams, /* forCCtx */ 1); size_t const ldmSpace = ZSTD_ldm_getTableSize(*ldmParams); size_t const maxNbLdmSeq = ZSTD_ldm_getMaxNbSeq(*ldmParams, blockSize); size_t const ldmSeqSpace = ldmParams->enableLdm ? ZSTD_cwksp_alloc_size(maxNbLdmSeq * sizeof(rawSeq)) : 0; size_t const bufferSpace = ZSTD_cwksp_alloc_size(buffInSize) + ZSTD_cwksp_alloc_size(buffOutSize); size_t const cctxSpace = isStatic ? ZSTD_cwksp_alloc_size(sizeof(ZSTD_CCtx)) : 0; size_t const neededSpace = cctxSpace + entropySpace + blockStateSpace + ldmSpace + ldmSeqSpace + matchStateSize + tokenSpace + bufferSpace; DEBUGLOG(5, "estimate workspace : %u", (U32)neededSpace); return neededSpace; } size_t ZSTD_estimateCCtxSize_usingCCtxParams(const ZSTD_CCtx_params* params) { ZSTD_compressionParameters const cParams = ZSTD_getCParamsFromCCtxParams(params, ZSTD_CONTENTSIZE_UNKNOWN, 0, ZSTD_cpm_noAttachDict); RETURN_ERROR_IF(params->nbWorkers > 0, GENERIC, "Estimate CCtx size is supported for single-threaded compression only."); /* estimateCCtxSize is for one-shot compression. So no buffers should * be needed. However, we still allocate two 0-sized buffers, which can * take space under ASAN. */ return ZSTD_estimateCCtxSize_usingCCtxParams_internal( &cParams, ¶ms->ldmParams, 1, 0, 0, ZSTD_CONTENTSIZE_UNKNOWN); } size_t ZSTD_estimateCCtxSize_usingCParams(ZSTD_compressionParameters cParams) { ZSTD_CCtx_params const params = ZSTD_makeCCtxParamsFromCParams(cParams); return ZSTD_estimateCCtxSize_usingCCtxParams(¶ms); } static size_t ZSTD_estimateCCtxSize_internal(int compressionLevel) { ZSTD_compressionParameters const cParams = ZSTD_getCParams_internal(compressionLevel, ZSTD_CONTENTSIZE_UNKNOWN, 0, ZSTD_cpm_noAttachDict); return ZSTD_estimateCCtxSize_usingCParams(cParams); } size_t ZSTD_estimateCCtxSize(int compressionLevel) { int level; size_t memBudget = 0; for (level=MIN(compressionLevel, 1); level<=compressionLevel; level++) { size_t const newMB = ZSTD_estimateCCtxSize_internal(level); if (newMB > memBudget) memBudget = newMB; } return memBudget; } size_t ZSTD_estimateCStreamSize_usingCCtxParams(const ZSTD_CCtx_params* params) { RETURN_ERROR_IF(params->nbWorkers > 0, GENERIC, "Estimate CCtx size is supported for single-threaded compression only."); { ZSTD_compressionParameters const cParams = ZSTD_getCParamsFromCCtxParams(params, ZSTD_CONTENTSIZE_UNKNOWN, 0, ZSTD_cpm_noAttachDict); size_t const blockSize = MIN(ZSTD_BLOCKSIZE_MAX, (size_t)1 << cParams.windowLog); size_t const inBuffSize = (params->inBufferMode == ZSTD_bm_buffered) ? ((size_t)1 << cParams.windowLog) + blockSize : 0; size_t const outBuffSize = (params->outBufferMode == ZSTD_bm_buffered) ? ZSTD_compressBound(blockSize) + 1 : 0; return ZSTD_estimateCCtxSize_usingCCtxParams_internal( &cParams, ¶ms->ldmParams, 1, inBuffSize, outBuffSize, ZSTD_CONTENTSIZE_UNKNOWN); } } size_t ZSTD_estimateCStreamSize_usingCParams(ZSTD_compressionParameters cParams) { ZSTD_CCtx_params const params = ZSTD_makeCCtxParamsFromCParams(cParams); return ZSTD_estimateCStreamSize_usingCCtxParams(¶ms); } static size_t ZSTD_estimateCStreamSize_internal(int compressionLevel) { ZSTD_compressionParameters const cParams = ZSTD_getCParams_internal(compressionLevel, ZSTD_CONTENTSIZE_UNKNOWN, 0, ZSTD_cpm_noAttachDict); return ZSTD_estimateCStreamSize_usingCParams(cParams); } size_t ZSTD_estimateCStreamSize(int compressionLevel) { int level; size_t memBudget = 0; for (level=MIN(compressionLevel, 1); level<=compressionLevel; level++) { size_t const newMB = ZSTD_estimateCStreamSize_internal(level); if (newMB > memBudget) memBudget = newMB; } return memBudget; } /* ZSTD_getFrameProgression(): * tells how much data has been consumed (input) and produced (output) for current frame. * able to count progression inside worker threads (non-blocking mode). */ ZSTD_frameProgression ZSTD_getFrameProgression(const ZSTD_CCtx* cctx) { #ifdef ZSTD_MULTITHREAD if (cctx->appliedParams.nbWorkers > 0) { return ZSTDMT_getFrameProgression(cctx->mtctx); } #endif { ZSTD_frameProgression fp; size_t const buffered = (cctx->inBuff == NULL) ? 0 : cctx->inBuffPos - cctx->inToCompress; if (buffered) assert(cctx->inBuffPos >= cctx->inToCompress); assert(buffered <= ZSTD_BLOCKSIZE_MAX); fp.ingested = cctx->consumedSrcSize + buffered; fp.consumed = cctx->consumedSrcSize; fp.produced = cctx->producedCSize; fp.flushed = cctx->producedCSize; /* simplified; some data might still be left within streaming output buffer */ fp.currentJobID = 0; fp.nbActiveWorkers = 0; return fp; } } /*! ZSTD_toFlushNow() * Only useful for multithreading scenarios currently (nbWorkers >= 1). */ size_t ZSTD_toFlushNow(ZSTD_CCtx* cctx) { #ifdef ZSTD_MULTITHREAD if (cctx->appliedParams.nbWorkers > 0) { return ZSTDMT_toFlushNow(cctx->mtctx); } #endif (void)cctx; return 0; /* over-simplification; could also check if context is currently running in streaming mode, and in which case, report how many bytes are left to be flushed within output buffer */ } static void ZSTD_assertEqualCParams(ZSTD_compressionParameters cParams1, ZSTD_compressionParameters cParams2) { (void)cParams1; (void)cParams2; assert(cParams1.windowLog == cParams2.windowLog); assert(cParams1.chainLog == cParams2.chainLog); assert(cParams1.hashLog == cParams2.hashLog); assert(cParams1.searchLog == cParams2.searchLog); assert(cParams1.minMatch == cParams2.minMatch); assert(cParams1.targetLength == cParams2.targetLength); assert(cParams1.strategy == cParams2.strategy); } void ZSTD_reset_compressedBlockState(ZSTD_compressedBlockState_t* bs) { int i; for (i = 0; i < ZSTD_REP_NUM; ++i) bs->rep[i] = repStartValue[i]; bs->entropy.huf.repeatMode = HUF_repeat_none; bs->entropy.fse.offcode_repeatMode = FSE_repeat_none; bs->entropy.fse.matchlength_repeatMode = FSE_repeat_none; bs->entropy.fse.litlength_repeatMode = FSE_repeat_none; } /*! ZSTD_invalidateMatchState() * Invalidate all the matches in the match finder tables. * Requires nextSrc and base to be set (can be NULL). */ static void ZSTD_invalidateMatchState(ZSTD_matchState_t* ms) { ZSTD_window_clear(&ms->window); ms->nextToUpdate = ms->window.dictLimit; ms->loadedDictEnd = 0; ms->opt.litLengthSum = 0; /* force reset of btopt stats */ ms->dictMatchState = NULL; } /** * Controls, for this matchState reset, whether the tables need to be cleared / * prepared for the coming compression (ZSTDcrp_makeClean), or whether the * tables can be left unclean (ZSTDcrp_leaveDirty), because we know that a * subsequent operation will overwrite the table space anyways (e.g., copying * the matchState contents in from a CDict). */ typedef enum { ZSTDcrp_makeClean, ZSTDcrp_leaveDirty } ZSTD_compResetPolicy_e; /** * Controls, for this matchState reset, whether indexing can continue where it * left off (ZSTDirp_continue), or whether it needs to be restarted from zero * (ZSTDirp_reset). */ typedef enum { ZSTDirp_continue, ZSTDirp_reset } ZSTD_indexResetPolicy_e; typedef enum { ZSTD_resetTarget_CDict, ZSTD_resetTarget_CCtx } ZSTD_resetTarget_e; static size_t ZSTD_reset_matchState(ZSTD_matchState_t* ms, ZSTD_cwksp* ws, const ZSTD_compressionParameters* cParams, const ZSTD_compResetPolicy_e crp, const ZSTD_indexResetPolicy_e forceResetIndex, const ZSTD_resetTarget_e forWho) { size_t const chainSize = (cParams->strategy == ZSTD_fast) ? 0 : ((size_t)1 << cParams->chainLog); size_t const hSize = ((size_t)1) << cParams->hashLog; U32 const hashLog3 = ((forWho == ZSTD_resetTarget_CCtx) && cParams->minMatch==3) ? MIN(ZSTD_HASHLOG3_MAX, cParams->windowLog) : 0; size_t const h3Size = hashLog3 ? ((size_t)1) << hashLog3 : 0; DEBUGLOG(4, "reset indices : %u", forceResetIndex == ZSTDirp_reset); if (forceResetIndex == ZSTDirp_reset) { ZSTD_window_init(&ms->window); ZSTD_cwksp_mark_tables_dirty(ws); } ms->hashLog3 = hashLog3; ZSTD_invalidateMatchState(ms); assert(!ZSTD_cwksp_reserve_failed(ws)); /* check that allocation hasn't already failed */ ZSTD_cwksp_clear_tables(ws); DEBUGLOG(5, "reserving table space"); /* table Space */ ms->hashTable = (U32*)ZSTD_cwksp_reserve_table(ws, hSize * sizeof(U32)); ms->chainTable = (U32*)ZSTD_cwksp_reserve_table(ws, chainSize * sizeof(U32)); ms->hashTable3 = (U32*)ZSTD_cwksp_reserve_table(ws, h3Size * sizeof(U32)); RETURN_ERROR_IF(ZSTD_cwksp_reserve_failed(ws), memory_allocation, "failed a workspace allocation in ZSTD_reset_matchState"); DEBUGLOG(4, "reset table : %u", crp!=ZSTDcrp_leaveDirty); if (crp!=ZSTDcrp_leaveDirty) { /* reset tables only */ ZSTD_cwksp_clean_tables(ws); } /* opt parser space */ if ((forWho == ZSTD_resetTarget_CCtx) && (cParams->strategy >= ZSTD_btopt)) { DEBUGLOG(4, "reserving optimal parser space"); ms->opt.litFreq = (unsigned*)ZSTD_cwksp_reserve_aligned(ws, (1<opt.litLengthFreq = (unsigned*)ZSTD_cwksp_reserve_aligned(ws, (MaxLL+1) * sizeof(unsigned)); ms->opt.matchLengthFreq = (unsigned*)ZSTD_cwksp_reserve_aligned(ws, (MaxML+1) * sizeof(unsigned)); ms->opt.offCodeFreq = (unsigned*)ZSTD_cwksp_reserve_aligned(ws, (MaxOff+1) * sizeof(unsigned)); ms->opt.matchTable = (ZSTD_match_t*)ZSTD_cwksp_reserve_aligned(ws, (ZSTD_OPT_NUM+1) * sizeof(ZSTD_match_t)); ms->opt.priceTable = (ZSTD_optimal_t*)ZSTD_cwksp_reserve_aligned(ws, (ZSTD_OPT_NUM+1) * sizeof(ZSTD_optimal_t)); } ms->cParams = *cParams; RETURN_ERROR_IF(ZSTD_cwksp_reserve_failed(ws), memory_allocation, "failed a workspace allocation in ZSTD_reset_matchState"); return 0; } /* ZSTD_indexTooCloseToMax() : * minor optimization : prefer memset() rather than reduceIndex() * which is measurably slow in some circumstances (reported for Visual Studio). * Works when re-using a context for a lot of smallish inputs : * if all inputs are smaller than ZSTD_INDEXOVERFLOW_MARGIN, * memset() will be triggered before reduceIndex(). */ #define ZSTD_INDEXOVERFLOW_MARGIN (16 MB) static int ZSTD_indexTooCloseToMax(ZSTD_window_t w) { return (size_t)(w.nextSrc - w.base) > (ZSTD_CURRENT_MAX - ZSTD_INDEXOVERFLOW_MARGIN); } /*! ZSTD_resetCCtx_internal() : note : `params` are assumed fully validated at this stage */ static size_t ZSTD_resetCCtx_internal(ZSTD_CCtx* zc, ZSTD_CCtx_params params, U64 const pledgedSrcSize, ZSTD_compResetPolicy_e const crp, ZSTD_buffered_policy_e const zbuff) { ZSTD_cwksp* const ws = &zc->workspace; DEBUGLOG(4, "ZSTD_resetCCtx_internal: pledgedSrcSize=%u, wlog=%u", (U32)pledgedSrcSize, params.cParams.windowLog); assert(!ZSTD_isError(ZSTD_checkCParams(params.cParams))); zc->isFirstBlock = 1; if (params.ldmParams.enableLdm) { /* Adjust long distance matching parameters */ ZSTD_ldm_adjustParameters(¶ms.ldmParams, ¶ms.cParams); assert(params.ldmParams.hashLog >= params.ldmParams.bucketSizeLog); assert(params.ldmParams.hashRateLog < 32); } { size_t const windowSize = MAX(1, (size_t)MIN(((U64)1 << params.cParams.windowLog), pledgedSrcSize)); size_t const blockSize = MIN(ZSTD_BLOCKSIZE_MAX, windowSize); U32 const divider = (params.cParams.minMatch==3) ? 3 : 4; size_t const maxNbSeq = blockSize / divider; size_t const buffOutSize = (zbuff == ZSTDb_buffered && params.outBufferMode == ZSTD_bm_buffered) ? ZSTD_compressBound(blockSize) + 1 : 0; size_t const buffInSize = (zbuff == ZSTDb_buffered && params.inBufferMode == ZSTD_bm_buffered) ? windowSize + blockSize : 0; size_t const maxNbLdmSeq = ZSTD_ldm_getMaxNbSeq(params.ldmParams, blockSize); int const indexTooClose = ZSTD_indexTooCloseToMax(zc->blockState.matchState.window); ZSTD_indexResetPolicy_e needsIndexReset = (!indexTooClose && zc->initialized) ? ZSTDirp_continue : ZSTDirp_reset; size_t const neededSpace = ZSTD_estimateCCtxSize_usingCCtxParams_internal( ¶ms.cParams, ¶ms.ldmParams, zc->staticSize != 0, buffInSize, buffOutSize, pledgedSrcSize); FORWARD_IF_ERROR(neededSpace, "cctx size estimate failed!"); if (!zc->staticSize) ZSTD_cwksp_bump_oversized_duration(ws, 0); /* Check if workspace is large enough, alloc a new one if needed */ { int const workspaceTooSmall = ZSTD_cwksp_sizeof(ws) < neededSpace; int const workspaceWasteful = ZSTD_cwksp_check_wasteful(ws, neededSpace); DEBUGLOG(4, "Need %zu B workspace", neededSpace); DEBUGLOG(4, "windowSize: %zu - blockSize: %zu", windowSize, blockSize); if (workspaceTooSmall || workspaceWasteful) { DEBUGLOG(4, "Resize workspaceSize from %zuKB to %zuKB", ZSTD_cwksp_sizeof(ws) >> 10, neededSpace >> 10); RETURN_ERROR_IF(zc->staticSize, memory_allocation, "static cctx : no resize"); needsIndexReset = ZSTDirp_reset; ZSTD_cwksp_free(ws, zc->customMem); FORWARD_IF_ERROR(ZSTD_cwksp_create(ws, neededSpace, zc->customMem), ""); DEBUGLOG(5, "reserving object space"); /* Statically sized space. * entropyWorkspace never moves, * though prev/next block swap places */ assert(ZSTD_cwksp_check_available(ws, 2 * sizeof(ZSTD_compressedBlockState_t))); zc->blockState.prevCBlock = (ZSTD_compressedBlockState_t*) ZSTD_cwksp_reserve_object(ws, sizeof(ZSTD_compressedBlockState_t)); RETURN_ERROR_IF(zc->blockState.prevCBlock == NULL, memory_allocation, "couldn't allocate prevCBlock"); zc->blockState.nextCBlock = (ZSTD_compressedBlockState_t*) ZSTD_cwksp_reserve_object(ws, sizeof(ZSTD_compressedBlockState_t)); RETURN_ERROR_IF(zc->blockState.nextCBlock == NULL, memory_allocation, "couldn't allocate nextCBlock"); zc->entropyWorkspace = (U32*) ZSTD_cwksp_reserve_object(ws, ENTROPY_WORKSPACE_SIZE); RETURN_ERROR_IF(zc->blockState.nextCBlock == NULL, memory_allocation, "couldn't allocate entropyWorkspace"); } } ZSTD_cwksp_clear(ws); /* init params */ zc->appliedParams = params; zc->blockState.matchState.cParams = params.cParams; zc->pledgedSrcSizePlusOne = pledgedSrcSize+1; zc->consumedSrcSize = 0; zc->producedCSize = 0; if (pledgedSrcSize == ZSTD_CONTENTSIZE_UNKNOWN) zc->appliedParams.fParams.contentSizeFlag = 0; DEBUGLOG(4, "pledged content size : %u ; flag : %u", (unsigned)pledgedSrcSize, zc->appliedParams.fParams.contentSizeFlag); zc->blockSize = blockSize; XXH64_reset(&zc->xxhState, 0); zc->stage = ZSTDcs_init; zc->dictID = 0; zc->dictContentSize = 0; ZSTD_reset_compressedBlockState(zc->blockState.prevCBlock); /* ZSTD_wildcopy() is used to copy into the literals buffer, * so we have to oversize the buffer by WILDCOPY_OVERLENGTH bytes. */ zc->seqStore.litStart = ZSTD_cwksp_reserve_buffer(ws, blockSize + WILDCOPY_OVERLENGTH); zc->seqStore.maxNbLit = blockSize; /* buffers */ zc->bufferedPolicy = zbuff; zc->inBuffSize = buffInSize; zc->inBuff = (char*)ZSTD_cwksp_reserve_buffer(ws, buffInSize); zc->outBuffSize = buffOutSize; zc->outBuff = (char*)ZSTD_cwksp_reserve_buffer(ws, buffOutSize); /* ldm bucketOffsets table */ if (params.ldmParams.enableLdm) { /* TODO: avoid memset? */ size_t const numBuckets = ((size_t)1) << (params.ldmParams.hashLog - params.ldmParams.bucketSizeLog); zc->ldmState.bucketOffsets = ZSTD_cwksp_reserve_buffer(ws, numBuckets); ZSTD_memset(zc->ldmState.bucketOffsets, 0, numBuckets); } /* sequences storage */ ZSTD_referenceExternalSequences(zc, NULL, 0); zc->seqStore.maxNbSeq = maxNbSeq; zc->seqStore.llCode = ZSTD_cwksp_reserve_buffer(ws, maxNbSeq * sizeof(BYTE)); zc->seqStore.mlCode = ZSTD_cwksp_reserve_buffer(ws, maxNbSeq * sizeof(BYTE)); zc->seqStore.ofCode = ZSTD_cwksp_reserve_buffer(ws, maxNbSeq * sizeof(BYTE)); zc->seqStore.sequencesStart = (seqDef*)ZSTD_cwksp_reserve_aligned(ws, maxNbSeq * sizeof(seqDef)); FORWARD_IF_ERROR(ZSTD_reset_matchState( &zc->blockState.matchState, ws, ¶ms.cParams, crp, needsIndexReset, ZSTD_resetTarget_CCtx), ""); /* ldm hash table */ if (params.ldmParams.enableLdm) { /* TODO: avoid memset? */ size_t const ldmHSize = ((size_t)1) << params.ldmParams.hashLog; zc->ldmState.hashTable = (ldmEntry_t*)ZSTD_cwksp_reserve_aligned(ws, ldmHSize * sizeof(ldmEntry_t)); ZSTD_memset(zc->ldmState.hashTable, 0, ldmHSize * sizeof(ldmEntry_t)); zc->ldmSequences = (rawSeq*)ZSTD_cwksp_reserve_aligned(ws, maxNbLdmSeq * sizeof(rawSeq)); zc->maxNbLdmSequences = maxNbLdmSeq; ZSTD_window_init(&zc->ldmState.window); ZSTD_window_clear(&zc->ldmState.window); zc->ldmState.loadedDictEnd = 0; } /* Due to alignment, when reusing a workspace, we can actually consume * up to 3 extra bytes for alignment. See the comments in zstd_cwksp.h */ assert(ZSTD_cwksp_used(ws) >= neededSpace && ZSTD_cwksp_used(ws) <= neededSpace + 3); DEBUGLOG(3, "wksp: finished allocating, %zd bytes remain available", ZSTD_cwksp_available_space(ws)); zc->initialized = 1; return 0; } } /* ZSTD_invalidateRepCodes() : * ensures next compression will not use repcodes from previous block. * Note : only works with regular variant; * do not use with extDict variant ! */ void ZSTD_invalidateRepCodes(ZSTD_CCtx* cctx) { int i; for (i=0; iblockState.prevCBlock->rep[i] = 0; assert(!ZSTD_window_hasExtDict(cctx->blockState.matchState.window)); } /* These are the approximate sizes for each strategy past which copying the * dictionary tables into the working context is faster than using them * in-place. */ static const size_t attachDictSizeCutoffs[ZSTD_STRATEGY_MAX+1] = { 8 KB, /* unused */ 8 KB, /* ZSTD_fast */ 16 KB, /* ZSTD_dfast */ 32 KB, /* ZSTD_greedy */ 32 KB, /* ZSTD_lazy */ 32 KB, /* ZSTD_lazy2 */ 32 KB, /* ZSTD_btlazy2 */ 32 KB, /* ZSTD_btopt */ 8 KB, /* ZSTD_btultra */ 8 KB /* ZSTD_btultra2 */ }; static int ZSTD_shouldAttachDict(const ZSTD_CDict* cdict, const ZSTD_CCtx_params* params, U64 pledgedSrcSize) { size_t cutoff = attachDictSizeCutoffs[cdict->matchState.cParams.strategy]; int const dedicatedDictSearch = cdict->matchState.dedicatedDictSearch; return dedicatedDictSearch || ( ( pledgedSrcSize <= cutoff || pledgedSrcSize == ZSTD_CONTENTSIZE_UNKNOWN || params->attachDictPref == ZSTD_dictForceAttach ) && params->attachDictPref != ZSTD_dictForceCopy && !params->forceWindow ); /* dictMatchState isn't correctly * handled in _enforceMaxDist */ } static size_t ZSTD_resetCCtx_byAttachingCDict(ZSTD_CCtx* cctx, const ZSTD_CDict* cdict, ZSTD_CCtx_params params, U64 pledgedSrcSize, ZSTD_buffered_policy_e zbuff) { { ZSTD_compressionParameters adjusted_cdict_cParams = cdict->matchState.cParams; unsigned const windowLog = params.cParams.windowLog; assert(windowLog != 0); /* Resize working context table params for input only, since the dict * has its own tables. */ /* pledgedSrcSize == 0 means 0! */ if (cdict->matchState.dedicatedDictSearch) { ZSTD_dedicatedDictSearch_revertCParams(&adjusted_cdict_cParams); } params.cParams = ZSTD_adjustCParams_internal(adjusted_cdict_cParams, pledgedSrcSize, cdict->dictContentSize, ZSTD_cpm_attachDict); params.cParams.windowLog = windowLog; FORWARD_IF_ERROR(ZSTD_resetCCtx_internal(cctx, params, pledgedSrcSize, ZSTDcrp_makeClean, zbuff), ""); assert(cctx->appliedParams.cParams.strategy == adjusted_cdict_cParams.strategy); } { const U32 cdictEnd = (U32)( cdict->matchState.window.nextSrc - cdict->matchState.window.base); const U32 cdictLen = cdictEnd - cdict->matchState.window.dictLimit; if (cdictLen == 0) { /* don't even attach dictionaries with no contents */ DEBUGLOG(4, "skipping attaching empty dictionary"); } else { DEBUGLOG(4, "attaching dictionary into context"); cctx->blockState.matchState.dictMatchState = &cdict->matchState; /* prep working match state so dict matches never have negative indices * when they are translated to the working context's index space. */ if (cctx->blockState.matchState.window.dictLimit < cdictEnd) { cctx->blockState.matchState.window.nextSrc = cctx->blockState.matchState.window.base + cdictEnd; ZSTD_window_clear(&cctx->blockState.matchState.window); } /* loadedDictEnd is expressed within the referential of the active context */ cctx->blockState.matchState.loadedDictEnd = cctx->blockState.matchState.window.dictLimit; } } cctx->dictID = cdict->dictID; cctx->dictContentSize = cdict->dictContentSize; /* copy block state */ ZSTD_memcpy(cctx->blockState.prevCBlock, &cdict->cBlockState, sizeof(cdict->cBlockState)); return 0; } static size_t ZSTD_resetCCtx_byCopyingCDict(ZSTD_CCtx* cctx, const ZSTD_CDict* cdict, ZSTD_CCtx_params params, U64 pledgedSrcSize, ZSTD_buffered_policy_e zbuff) { const ZSTD_compressionParameters *cdict_cParams = &cdict->matchState.cParams; assert(!cdict->matchState.dedicatedDictSearch); DEBUGLOG(4, "copying dictionary into context"); { unsigned const windowLog = params.cParams.windowLog; assert(windowLog != 0); /* Copy only compression parameters related to tables. */ params.cParams = *cdict_cParams; params.cParams.windowLog = windowLog; FORWARD_IF_ERROR(ZSTD_resetCCtx_internal(cctx, params, pledgedSrcSize, ZSTDcrp_leaveDirty, zbuff), ""); assert(cctx->appliedParams.cParams.strategy == cdict_cParams->strategy); assert(cctx->appliedParams.cParams.hashLog == cdict_cParams->hashLog); assert(cctx->appliedParams.cParams.chainLog == cdict_cParams->chainLog); } ZSTD_cwksp_mark_tables_dirty(&cctx->workspace); /* copy tables */ { size_t const chainSize = (cdict_cParams->strategy == ZSTD_fast) ? 0 : ((size_t)1 << cdict_cParams->chainLog); size_t const hSize = (size_t)1 << cdict_cParams->hashLog; ZSTD_memcpy(cctx->blockState.matchState.hashTable, cdict->matchState.hashTable, hSize * sizeof(U32)); ZSTD_memcpy(cctx->blockState.matchState.chainTable, cdict->matchState.chainTable, chainSize * sizeof(U32)); } /* Zero the hashTable3, since the cdict never fills it */ { int const h3log = cctx->blockState.matchState.hashLog3; size_t const h3Size = h3log ? ((size_t)1 << h3log) : 0; assert(cdict->matchState.hashLog3 == 0); ZSTD_memset(cctx->blockState.matchState.hashTable3, 0, h3Size * sizeof(U32)); } ZSTD_cwksp_mark_tables_clean(&cctx->workspace); /* copy dictionary offsets */ { ZSTD_matchState_t const* srcMatchState = &cdict->matchState; ZSTD_matchState_t* dstMatchState = &cctx->blockState.matchState; dstMatchState->window = srcMatchState->window; dstMatchState->nextToUpdate = srcMatchState->nextToUpdate; dstMatchState->loadedDictEnd= srcMatchState->loadedDictEnd; } cctx->dictID = cdict->dictID; cctx->dictContentSize = cdict->dictContentSize; /* copy block state */ ZSTD_memcpy(cctx->blockState.prevCBlock, &cdict->cBlockState, sizeof(cdict->cBlockState)); return 0; } /* We have a choice between copying the dictionary context into the working * context, or referencing the dictionary context from the working context * in-place. We decide here which strategy to use. */ static size_t ZSTD_resetCCtx_usingCDict(ZSTD_CCtx* cctx, const ZSTD_CDict* cdict, const ZSTD_CCtx_params* params, U64 pledgedSrcSize, ZSTD_buffered_policy_e zbuff) { DEBUGLOG(4, "ZSTD_resetCCtx_usingCDict (pledgedSrcSize=%u)", (unsigned)pledgedSrcSize); if (ZSTD_shouldAttachDict(cdict, params, pledgedSrcSize)) { return ZSTD_resetCCtx_byAttachingCDict( cctx, cdict, *params, pledgedSrcSize, zbuff); } else { return ZSTD_resetCCtx_byCopyingCDict( cctx, cdict, *params, pledgedSrcSize, zbuff); } } /*! ZSTD_copyCCtx_internal() : * Duplicate an existing context `srcCCtx` into another one `dstCCtx`. * Only works during stage ZSTDcs_init (i.e. after creation, but before first call to ZSTD_compressContinue()). * The "context", in this case, refers to the hash and chain tables, * entropy tables, and dictionary references. * `windowLog` value is enforced if != 0, otherwise value is copied from srcCCtx. * @return : 0, or an error code */ static size_t ZSTD_copyCCtx_internal(ZSTD_CCtx* dstCCtx, const ZSTD_CCtx* srcCCtx, ZSTD_frameParameters fParams, U64 pledgedSrcSize, ZSTD_buffered_policy_e zbuff) { DEBUGLOG(5, "ZSTD_copyCCtx_internal"); RETURN_ERROR_IF(srcCCtx->stage!=ZSTDcs_init, stage_wrong, "Can't copy a ctx that's not in init stage."); ZSTD_memcpy(&dstCCtx->customMem, &srcCCtx->customMem, sizeof(ZSTD_customMem)); { ZSTD_CCtx_params params = dstCCtx->requestedParams; /* Copy only compression parameters related to tables. */ params.cParams = srcCCtx->appliedParams.cParams; params.fParams = fParams; ZSTD_resetCCtx_internal(dstCCtx, params, pledgedSrcSize, ZSTDcrp_leaveDirty, zbuff); assert(dstCCtx->appliedParams.cParams.windowLog == srcCCtx->appliedParams.cParams.windowLog); assert(dstCCtx->appliedParams.cParams.strategy == srcCCtx->appliedParams.cParams.strategy); assert(dstCCtx->appliedParams.cParams.hashLog == srcCCtx->appliedParams.cParams.hashLog); assert(dstCCtx->appliedParams.cParams.chainLog == srcCCtx->appliedParams.cParams.chainLog); assert(dstCCtx->blockState.matchState.hashLog3 == srcCCtx->blockState.matchState.hashLog3); } ZSTD_cwksp_mark_tables_dirty(&dstCCtx->workspace); /* copy tables */ { size_t const chainSize = (srcCCtx->appliedParams.cParams.strategy == ZSTD_fast) ? 0 : ((size_t)1 << srcCCtx->appliedParams.cParams.chainLog); size_t const hSize = (size_t)1 << srcCCtx->appliedParams.cParams.hashLog; int const h3log = srcCCtx->blockState.matchState.hashLog3; size_t const h3Size = h3log ? ((size_t)1 << h3log) : 0; ZSTD_memcpy(dstCCtx->blockState.matchState.hashTable, srcCCtx->blockState.matchState.hashTable, hSize * sizeof(U32)); ZSTD_memcpy(dstCCtx->blockState.matchState.chainTable, srcCCtx->blockState.matchState.chainTable, chainSize * sizeof(U32)); ZSTD_memcpy(dstCCtx->blockState.matchState.hashTable3, srcCCtx->blockState.matchState.hashTable3, h3Size * sizeof(U32)); } ZSTD_cwksp_mark_tables_clean(&dstCCtx->workspace); /* copy dictionary offsets */ { const ZSTD_matchState_t* srcMatchState = &srcCCtx->blockState.matchState; ZSTD_matchState_t* dstMatchState = &dstCCtx->blockState.matchState; dstMatchState->window = srcMatchState->window; dstMatchState->nextToUpdate = srcMatchState->nextToUpdate; dstMatchState->loadedDictEnd= srcMatchState->loadedDictEnd; } dstCCtx->dictID = srcCCtx->dictID; dstCCtx->dictContentSize = srcCCtx->dictContentSize; /* copy block state */ ZSTD_memcpy(dstCCtx->blockState.prevCBlock, srcCCtx->blockState.prevCBlock, sizeof(*srcCCtx->blockState.prevCBlock)); return 0; } /*! ZSTD_copyCCtx() : * Duplicate an existing context `srcCCtx` into another one `dstCCtx`. * Only works during stage ZSTDcs_init (i.e. after creation, but before first call to ZSTD_compressContinue()). * pledgedSrcSize==0 means "unknown". * @return : 0, or an error code */ size_t ZSTD_copyCCtx(ZSTD_CCtx* dstCCtx, const ZSTD_CCtx* srcCCtx, unsigned long long pledgedSrcSize) { ZSTD_frameParameters fParams = { 1 /*content*/, 0 /*checksum*/, 0 /*noDictID*/ }; ZSTD_buffered_policy_e const zbuff = srcCCtx->bufferedPolicy; ZSTD_STATIC_ASSERT((U32)ZSTDb_buffered==1); if (pledgedSrcSize==0) pledgedSrcSize = ZSTD_CONTENTSIZE_UNKNOWN; fParams.contentSizeFlag = (pledgedSrcSize != ZSTD_CONTENTSIZE_UNKNOWN); return ZSTD_copyCCtx_internal(dstCCtx, srcCCtx, fParams, pledgedSrcSize, zbuff); } #define ZSTD_ROWSIZE 16 /*! ZSTD_reduceTable() : * reduce table indexes by `reducerValue`, or squash to zero. * PreserveMark preserves "unsorted mark" for btlazy2 strategy. * It must be set to a clear 0/1 value, to remove branch during inlining. * Presume table size is a multiple of ZSTD_ROWSIZE * to help auto-vectorization */ FORCE_INLINE_TEMPLATE void ZSTD_reduceTable_internal (U32* const table, U32 const size, U32 const reducerValue, int const preserveMark) { int const nbRows = (int)size / ZSTD_ROWSIZE; int cellNb = 0; int rowNb; assert((size & (ZSTD_ROWSIZE-1)) == 0); /* multiple of ZSTD_ROWSIZE */ assert(size < (1U<<31)); /* can be casted to int */ #if ZSTD_MEMORY_SANITIZER && !defined (ZSTD_MSAN_DONT_POISON_WORKSPACE) /* To validate that the table re-use logic is sound, and that we don't * access table space that we haven't cleaned, we re-"poison" the table * space every time we mark it dirty. * * This function however is intended to operate on those dirty tables and * re-clean them. So when this function is used correctly, we can unpoison * the memory it operated on. This introduces a blind spot though, since * if we now try to operate on __actually__ poisoned memory, we will not * detect that. */ __msan_unpoison(table, size * sizeof(U32)); #endif for (rowNb=0 ; rowNb < nbRows ; rowNb++) { int column; for (column=0; columncParams.hashLog; ZSTD_reduceTable(ms->hashTable, hSize, reducerValue); } if (params->cParams.strategy != ZSTD_fast) { U32 const chainSize = (U32)1 << params->cParams.chainLog; if (params->cParams.strategy == ZSTD_btlazy2) ZSTD_reduceTable_btlazy2(ms->chainTable, chainSize, reducerValue); else ZSTD_reduceTable(ms->chainTable, chainSize, reducerValue); } if (ms->hashLog3) { U32 const h3Size = (U32)1 << ms->hashLog3; ZSTD_reduceTable(ms->hashTable3, h3Size, reducerValue); } } /*-******************************************************* * Block entropic compression *********************************************************/ /* See doc/zstd_compression_format.md for detailed format description */ void ZSTD_seqToCodes(const seqStore_t* seqStorePtr) { const seqDef* const sequences = seqStorePtr->sequencesStart; BYTE* const llCodeTable = seqStorePtr->llCode; BYTE* const ofCodeTable = seqStorePtr->ofCode; BYTE* const mlCodeTable = seqStorePtr->mlCode; U32 const nbSeq = (U32)(seqStorePtr->sequences - seqStorePtr->sequencesStart); U32 u; assert(nbSeq <= seqStorePtr->maxNbSeq); for (u=0; ulongLengthID==1) llCodeTable[seqStorePtr->longLengthPos] = MaxLL; if (seqStorePtr->longLengthID==2) mlCodeTable[seqStorePtr->longLengthPos] = MaxML; } /* ZSTD_useTargetCBlockSize(): * Returns if target compressed block size param is being used. * If used, compression will do best effort to make a compressed block size to be around targetCBlockSize. * Returns 1 if true, 0 otherwise. */ static int ZSTD_useTargetCBlockSize(const ZSTD_CCtx_params* cctxParams) { DEBUGLOG(5, "ZSTD_useTargetCBlockSize (targetCBlockSize=%zu)", cctxParams->targetCBlockSize); return (cctxParams->targetCBlockSize != 0); } /* ZSTD_entropyCompressSequences_internal(): * actually compresses both literals and sequences */ MEM_STATIC size_t ZSTD_entropyCompressSequences_internal(seqStore_t* seqStorePtr, const ZSTD_entropyCTables_t* prevEntropy, ZSTD_entropyCTables_t* nextEntropy, const ZSTD_CCtx_params* cctxParams, void* dst, size_t dstCapacity, void* entropyWorkspace, size_t entropyWkspSize, const int bmi2) { const int longOffsets = cctxParams->cParams.windowLog > STREAM_ACCUMULATOR_MIN; ZSTD_strategy const strategy = cctxParams->cParams.strategy; unsigned* count = (unsigned*)entropyWorkspace; FSE_CTable* CTable_LitLength = nextEntropy->fse.litlengthCTable; FSE_CTable* CTable_OffsetBits = nextEntropy->fse.offcodeCTable; FSE_CTable* CTable_MatchLength = nextEntropy->fse.matchlengthCTable; U32 LLtype, Offtype, MLtype; /* compressed, raw or rle */ const seqDef* const sequences = seqStorePtr->sequencesStart; const BYTE* const ofCodeTable = seqStorePtr->ofCode; const BYTE* const llCodeTable = seqStorePtr->llCode; const BYTE* const mlCodeTable = seqStorePtr->mlCode; BYTE* const ostart = (BYTE*)dst; BYTE* const oend = ostart + dstCapacity; BYTE* op = ostart; size_t const nbSeq = (size_t)(seqStorePtr->sequences - seqStorePtr->sequencesStart); BYTE* seqHead; BYTE* lastNCount = NULL; entropyWorkspace = count + (MaxSeq + 1); entropyWkspSize -= (MaxSeq + 1) * sizeof(*count); DEBUGLOG(4, "ZSTD_entropyCompressSequences_internal (nbSeq=%zu)", nbSeq); ZSTD_STATIC_ASSERT(HUF_WORKSPACE_SIZE >= (1<= HUF_WORKSPACE_SIZE); /* Compress literals */ { const BYTE* const literals = seqStorePtr->litStart; size_t const litSize = (size_t)(seqStorePtr->lit - literals); size_t const cSize = ZSTD_compressLiterals( &prevEntropy->huf, &nextEntropy->huf, cctxParams->cParams.strategy, ZSTD_disableLiteralsCompression(cctxParams), op, dstCapacity, literals, litSize, entropyWorkspace, entropyWkspSize, bmi2); FORWARD_IF_ERROR(cSize, "ZSTD_compressLiterals failed"); assert(cSize <= dstCapacity); op += cSize; } /* Sequences Header */ RETURN_ERROR_IF((oend-op) < 3 /*max nbSeq Size*/ + 1 /*seqHead*/, dstSize_tooSmall, "Can't fit seq hdr in output buf!"); if (nbSeq < 128) { *op++ = (BYTE)nbSeq; } else if (nbSeq < LONGNBSEQ) { op[0] = (BYTE)((nbSeq>>8) + 0x80); op[1] = (BYTE)nbSeq; op+=2; } else { op[0]=0xFF; MEM_writeLE16(op+1, (U16)(nbSeq - LONGNBSEQ)); op+=3; } assert(op <= oend); if (nbSeq==0) { /* Copy the old tables over as if we repeated them */ ZSTD_memcpy(&nextEntropy->fse, &prevEntropy->fse, sizeof(prevEntropy->fse)); return (size_t)(op - ostart); } /* seqHead : flags for FSE encoding type */ seqHead = op++; assert(op <= oend); /* convert length/distances into codes */ ZSTD_seqToCodes(seqStorePtr); /* build CTable for Literal Lengths */ { unsigned max = MaxLL; size_t const mostFrequent = HIST_countFast_wksp(count, &max, llCodeTable, nbSeq, entropyWorkspace, entropyWkspSize); /* can't fail */ DEBUGLOG(5, "Building LL table"); nextEntropy->fse.litlength_repeatMode = prevEntropy->fse.litlength_repeatMode; LLtype = ZSTD_selectEncodingType(&nextEntropy->fse.litlength_repeatMode, count, max, mostFrequent, nbSeq, LLFSELog, prevEntropy->fse.litlengthCTable, LL_defaultNorm, LL_defaultNormLog, ZSTD_defaultAllowed, strategy); assert(set_basic < set_compressed && set_rle < set_compressed); assert(!(LLtype < set_compressed && nextEntropy->fse.litlength_repeatMode != FSE_repeat_none)); /* We don't copy tables */ { size_t const countSize = ZSTD_buildCTable( op, (size_t)(oend - op), CTable_LitLength, LLFSELog, (symbolEncodingType_e)LLtype, count, max, llCodeTable, nbSeq, LL_defaultNorm, LL_defaultNormLog, MaxLL, prevEntropy->fse.litlengthCTable, sizeof(prevEntropy->fse.litlengthCTable), entropyWorkspace, entropyWkspSize); FORWARD_IF_ERROR(countSize, "ZSTD_buildCTable for LitLens failed"); if (LLtype == set_compressed) lastNCount = op; op += countSize; assert(op <= oend); } } /* build CTable for Offsets */ { unsigned max = MaxOff; size_t const mostFrequent = HIST_countFast_wksp( count, &max, ofCodeTable, nbSeq, entropyWorkspace, entropyWkspSize); /* can't fail */ /* We can only use the basic table if max <= DefaultMaxOff, otherwise the offsets are too large */ ZSTD_defaultPolicy_e const defaultPolicy = (max <= DefaultMaxOff) ? ZSTD_defaultAllowed : ZSTD_defaultDisallowed; DEBUGLOG(5, "Building OF table"); nextEntropy->fse.offcode_repeatMode = prevEntropy->fse.offcode_repeatMode; Offtype = ZSTD_selectEncodingType(&nextEntropy->fse.offcode_repeatMode, count, max, mostFrequent, nbSeq, OffFSELog, prevEntropy->fse.offcodeCTable, OF_defaultNorm, OF_defaultNormLog, defaultPolicy, strategy); assert(!(Offtype < set_compressed && nextEntropy->fse.offcode_repeatMode != FSE_repeat_none)); /* We don't copy tables */ { size_t const countSize = ZSTD_buildCTable( op, (size_t)(oend - op), CTable_OffsetBits, OffFSELog, (symbolEncodingType_e)Offtype, count, max, ofCodeTable, nbSeq, OF_defaultNorm, OF_defaultNormLog, DefaultMaxOff, prevEntropy->fse.offcodeCTable, sizeof(prevEntropy->fse.offcodeCTable), entropyWorkspace, entropyWkspSize); FORWARD_IF_ERROR(countSize, "ZSTD_buildCTable for Offsets failed"); if (Offtype == set_compressed) lastNCount = op; op += countSize; assert(op <= oend); } } /* build CTable for MatchLengths */ { unsigned max = MaxML; size_t const mostFrequent = HIST_countFast_wksp( count, &max, mlCodeTable, nbSeq, entropyWorkspace, entropyWkspSize); /* can't fail */ DEBUGLOG(5, "Building ML table (remaining space : %i)", (int)(oend-op)); nextEntropy->fse.matchlength_repeatMode = prevEntropy->fse.matchlength_repeatMode; MLtype = ZSTD_selectEncodingType(&nextEntropy->fse.matchlength_repeatMode, count, max, mostFrequent, nbSeq, MLFSELog, prevEntropy->fse.matchlengthCTable, ML_defaultNorm, ML_defaultNormLog, ZSTD_defaultAllowed, strategy); assert(!(MLtype < set_compressed && nextEntropy->fse.matchlength_repeatMode != FSE_repeat_none)); /* We don't copy tables */ { size_t const countSize = ZSTD_buildCTable( op, (size_t)(oend - op), CTable_MatchLength, MLFSELog, (symbolEncodingType_e)MLtype, count, max, mlCodeTable, nbSeq, ML_defaultNorm, ML_defaultNormLog, MaxML, prevEntropy->fse.matchlengthCTable, sizeof(prevEntropy->fse.matchlengthCTable), entropyWorkspace, entropyWkspSize); FORWARD_IF_ERROR(countSize, "ZSTD_buildCTable for MatchLengths failed"); if (MLtype == set_compressed) lastNCount = op; op += countSize; assert(op <= oend); } } *seqHead = (BYTE)((LLtype<<6) + (Offtype<<4) + (MLtype<<2)); { size_t const bitstreamSize = ZSTD_encodeSequences( op, (size_t)(oend - op), CTable_MatchLength, mlCodeTable, CTable_OffsetBits, ofCodeTable, CTable_LitLength, llCodeTable, sequences, nbSeq, longOffsets, bmi2); FORWARD_IF_ERROR(bitstreamSize, "ZSTD_encodeSequences failed"); op += bitstreamSize; assert(op <= oend); /* zstd versions <= 1.3.4 mistakenly report corruption when * FSE_readNCount() receives a buffer < 4 bytes. * Fixed by https://github.com/facebook/zstd/pull/1146. * This can happen when the last set_compressed table present is 2 * bytes and the bitstream is only one byte. * In this exceedingly rare case, we will simply emit an uncompressed * block, since it isn't worth optimizing. */ if (lastNCount && (op - lastNCount) < 4) { /* NCountSize >= 2 && bitstreamSize > 0 ==> lastCountSize == 3 */ assert(op - lastNCount == 3); DEBUGLOG(5, "Avoiding bug in zstd decoder in versions <= 1.3.4 by " "emitting an uncompressed block."); return 0; } } DEBUGLOG(5, "compressed block size : %u", (unsigned)(op - ostart)); return (size_t)(op - ostart); } MEM_STATIC size_t ZSTD_entropyCompressSequences(seqStore_t* seqStorePtr, const ZSTD_entropyCTables_t* prevEntropy, ZSTD_entropyCTables_t* nextEntropy, const ZSTD_CCtx_params* cctxParams, void* dst, size_t dstCapacity, size_t srcSize, void* entropyWorkspace, size_t entropyWkspSize, int bmi2) { size_t const cSize = ZSTD_entropyCompressSequences_internal( seqStorePtr, prevEntropy, nextEntropy, cctxParams, dst, dstCapacity, entropyWorkspace, entropyWkspSize, bmi2); if (cSize == 0) return 0; /* When srcSize <= dstCapacity, there is enough space to write a raw uncompressed block. * Since we ran out of space, block must be not compressible, so fall back to raw uncompressed block. */ if ((cSize == ERROR(dstSize_tooSmall)) & (srcSize <= dstCapacity)) return 0; /* block not compressed */ FORWARD_IF_ERROR(cSize, "ZSTD_entropyCompressSequences_internal failed"); /* Check compressibility */ { size_t const maxCSize = srcSize - ZSTD_minGain(srcSize, cctxParams->cParams.strategy); if (cSize >= maxCSize) return 0; /* block not compressed */ } DEBUGLOG(4, "ZSTD_entropyCompressSequences() cSize: %zu\n", cSize); return cSize; } /* ZSTD_selectBlockCompressor() : * Not static, but internal use only (used by long distance matcher) * assumption : strat is a valid strategy */ ZSTD_blockCompressor ZSTD_selectBlockCompressor(ZSTD_strategy strat, ZSTD_dictMode_e dictMode) { static const ZSTD_blockCompressor blockCompressor[4][ZSTD_STRATEGY_MAX+1] = { { ZSTD_compressBlock_fast /* default for 0 */, ZSTD_compressBlock_fast, ZSTD_compressBlock_doubleFast, ZSTD_compressBlock_greedy, ZSTD_compressBlock_lazy, ZSTD_compressBlock_lazy2, ZSTD_compressBlock_btlazy2, ZSTD_compressBlock_btopt, ZSTD_compressBlock_btultra, ZSTD_compressBlock_btultra2 }, { ZSTD_compressBlock_fast_extDict /* default for 0 */, ZSTD_compressBlock_fast_extDict, ZSTD_compressBlock_doubleFast_extDict, ZSTD_compressBlock_greedy_extDict, ZSTD_compressBlock_lazy_extDict, ZSTD_compressBlock_lazy2_extDict, ZSTD_compressBlock_btlazy2_extDict, ZSTD_compressBlock_btopt_extDict, ZSTD_compressBlock_btultra_extDict, ZSTD_compressBlock_btultra_extDict }, { ZSTD_compressBlock_fast_dictMatchState /* default for 0 */, ZSTD_compressBlock_fast_dictMatchState, ZSTD_compressBlock_doubleFast_dictMatchState, ZSTD_compressBlock_greedy_dictMatchState, ZSTD_compressBlock_lazy_dictMatchState, ZSTD_compressBlock_lazy2_dictMatchState, ZSTD_compressBlock_btlazy2_dictMatchState, ZSTD_compressBlock_btopt_dictMatchState, ZSTD_compressBlock_btultra_dictMatchState, ZSTD_compressBlock_btultra_dictMatchState }, { NULL /* default for 0 */, NULL, NULL, ZSTD_compressBlock_greedy_dedicatedDictSearch, ZSTD_compressBlock_lazy_dedicatedDictSearch, ZSTD_compressBlock_lazy2_dedicatedDictSearch, NULL, NULL, NULL, NULL } }; ZSTD_blockCompressor selectedCompressor; ZSTD_STATIC_ASSERT((unsigned)ZSTD_fast == 1); assert(ZSTD_cParam_withinBounds(ZSTD_c_strategy, strat)); selectedCompressor = blockCompressor[(int)dictMode][(int)strat]; assert(selectedCompressor != NULL); return selectedCompressor; } static void ZSTD_storeLastLiterals(seqStore_t* seqStorePtr, const BYTE* anchor, size_t lastLLSize) { ZSTD_memcpy(seqStorePtr->lit, anchor, lastLLSize); seqStorePtr->lit += lastLLSize; } void ZSTD_resetSeqStore(seqStore_t* ssPtr) { ssPtr->lit = ssPtr->litStart; ssPtr->sequences = ssPtr->sequencesStart; ssPtr->longLengthID = 0; } typedef enum { ZSTDbss_compress, ZSTDbss_noCompress } ZSTD_buildSeqStore_e; static size_t ZSTD_buildSeqStore(ZSTD_CCtx* zc, const void* src, size_t srcSize) { ZSTD_matchState_t* const ms = &zc->blockState.matchState; DEBUGLOG(5, "ZSTD_buildSeqStore (srcSize=%zu)", srcSize); assert(srcSize <= ZSTD_BLOCKSIZE_MAX); /* Assert that we have correctly flushed the ctx params into the ms's copy */ ZSTD_assertEqualCParams(zc->appliedParams.cParams, ms->cParams); if (srcSize < MIN_CBLOCK_SIZE+ZSTD_blockHeaderSize+1) { if (zc->appliedParams.cParams.strategy >= ZSTD_btopt) { ZSTD_ldm_skipRawSeqStoreBytes(&zc->externSeqStore, srcSize); } else { ZSTD_ldm_skipSequences(&zc->externSeqStore, srcSize, zc->appliedParams.cParams.minMatch); } return ZSTDbss_noCompress; /* don't even attempt compression below a certain srcSize */ } ZSTD_resetSeqStore(&(zc->seqStore)); /* required for optimal parser to read stats from dictionary */ ms->opt.symbolCosts = &zc->blockState.prevCBlock->entropy; /* tell the optimal parser how we expect to compress literals */ ms->opt.literalCompressionMode = zc->appliedParams.literalCompressionMode; /* a gap between an attached dict and the current window is not safe, * they must remain adjacent, * and when that stops being the case, the dict must be unset */ assert(ms->dictMatchState == NULL || ms->loadedDictEnd == ms->window.dictLimit); /* limited update after a very long match */ { const BYTE* const base = ms->window.base; const BYTE* const istart = (const BYTE*)src; const U32 curr = (U32)(istart-base); if (sizeof(ptrdiff_t)==8) assert(istart - base < (ptrdiff_t)(U32)(-1)); /* ensure no overflow */ if (curr > ms->nextToUpdate + 384) ms->nextToUpdate = curr - MIN(192, (U32)(curr - ms->nextToUpdate - 384)); } /* select and store sequences */ { ZSTD_dictMode_e const dictMode = ZSTD_matchState_dictMode(ms); size_t lastLLSize; { int i; for (i = 0; i < ZSTD_REP_NUM; ++i) zc->blockState.nextCBlock->rep[i] = zc->blockState.prevCBlock->rep[i]; } if (zc->externSeqStore.pos < zc->externSeqStore.size) { assert(!zc->appliedParams.ldmParams.enableLdm); /* Updates ldmSeqStore.pos */ lastLLSize = ZSTD_ldm_blockCompress(&zc->externSeqStore, ms, &zc->seqStore, zc->blockState.nextCBlock->rep, src, srcSize); assert(zc->externSeqStore.pos <= zc->externSeqStore.size); } else if (zc->appliedParams.ldmParams.enableLdm) { rawSeqStore_t ldmSeqStore = kNullRawSeqStore; ldmSeqStore.seq = zc->ldmSequences; ldmSeqStore.capacity = zc->maxNbLdmSequences; /* Updates ldmSeqStore.size */ FORWARD_IF_ERROR(ZSTD_ldm_generateSequences(&zc->ldmState, &ldmSeqStore, &zc->appliedParams.ldmParams, src, srcSize), ""); /* Updates ldmSeqStore.pos */ lastLLSize = ZSTD_ldm_blockCompress(&ldmSeqStore, ms, &zc->seqStore, zc->blockState.nextCBlock->rep, src, srcSize); assert(ldmSeqStore.pos == ldmSeqStore.size); } else { /* not long range mode */ ZSTD_blockCompressor const blockCompressor = ZSTD_selectBlockCompressor(zc->appliedParams.cParams.strategy, dictMode); ms->ldmSeqStore = NULL; lastLLSize = blockCompressor(ms, &zc->seqStore, zc->blockState.nextCBlock->rep, src, srcSize); } { const BYTE* const lastLiterals = (const BYTE*)src + srcSize - lastLLSize; ZSTD_storeLastLiterals(&zc->seqStore, lastLiterals, lastLLSize); } } return ZSTDbss_compress; } static void ZSTD_copyBlockSequences(ZSTD_CCtx* zc) { const seqStore_t* seqStore = ZSTD_getSeqStore(zc); const seqDef* seqStoreSeqs = seqStore->sequencesStart; size_t seqStoreSeqSize = seqStore->sequences - seqStoreSeqs; size_t seqStoreLiteralsSize = (size_t)(seqStore->lit - seqStore->litStart); size_t literalsRead = 0; size_t lastLLSize; ZSTD_Sequence* outSeqs = &zc->seqCollector.seqStart[zc->seqCollector.seqIndex]; size_t i; repcodes_t updatedRepcodes; assert(zc->seqCollector.seqIndex + 1 < zc->seqCollector.maxSequences); /* Ensure we have enough space for last literals "sequence" */ assert(zc->seqCollector.maxSequences >= seqStoreSeqSize + 1); ZSTD_memcpy(updatedRepcodes.rep, zc->blockState.prevCBlock->rep, sizeof(repcodes_t)); for (i = 0; i < seqStoreSeqSize; ++i) { U32 rawOffset = seqStoreSeqs[i].offset - ZSTD_REP_NUM; outSeqs[i].litLength = seqStoreSeqs[i].litLength; outSeqs[i].matchLength = seqStoreSeqs[i].matchLength + MINMATCH; outSeqs[i].rep = 0; if (i == seqStore->longLengthPos) { if (seqStore->longLengthID == 1) { outSeqs[i].litLength += 0x10000; } else if (seqStore->longLengthID == 2) { outSeqs[i].matchLength += 0x10000; } } if (seqStoreSeqs[i].offset <= ZSTD_REP_NUM) { /* Derive the correct offset corresponding to a repcode */ outSeqs[i].rep = seqStoreSeqs[i].offset; if (outSeqs[i].litLength != 0) { rawOffset = updatedRepcodes.rep[outSeqs[i].rep - 1]; } else { if (outSeqs[i].rep == 3) { rawOffset = updatedRepcodes.rep[0] - 1; } else { rawOffset = updatedRepcodes.rep[outSeqs[i].rep]; } } } outSeqs[i].offset = rawOffset; /* seqStoreSeqs[i].offset == offCode+1, and ZSTD_updateRep() expects offCode so we provide seqStoreSeqs[i].offset - 1 */ updatedRepcodes = ZSTD_updateRep(updatedRepcodes.rep, seqStoreSeqs[i].offset - 1, seqStoreSeqs[i].litLength == 0); literalsRead += outSeqs[i].litLength; } /* Insert last literals (if any exist) in the block as a sequence with ml == off == 0. * If there are no last literals, then we'll emit (of: 0, ml: 0, ll: 0), which is a marker * for the block boundary, according to the API. */ assert(seqStoreLiteralsSize >= literalsRead); lastLLSize = seqStoreLiteralsSize - literalsRead; outSeqs[i].litLength = (U32)lastLLSize; outSeqs[i].matchLength = outSeqs[i].offset = outSeqs[i].rep = 0; seqStoreSeqSize++; zc->seqCollector.seqIndex += seqStoreSeqSize; } size_t ZSTD_generateSequences(ZSTD_CCtx* zc, ZSTD_Sequence* outSeqs, size_t outSeqsSize, const void* src, size_t srcSize) { const size_t dstCapacity = ZSTD_compressBound(srcSize); void* dst = ZSTD_customMalloc(dstCapacity, ZSTD_defaultCMem); SeqCollector seqCollector; RETURN_ERROR_IF(dst == NULL, memory_allocation, "NULL pointer!"); seqCollector.collectSequences = 1; seqCollector.seqStart = outSeqs; seqCollector.seqIndex = 0; seqCollector.maxSequences = outSeqsSize; zc->seqCollector = seqCollector; ZSTD_compress2(zc, dst, dstCapacity, src, srcSize); ZSTD_customFree(dst, ZSTD_defaultCMem); return zc->seqCollector.seqIndex; } size_t ZSTD_mergeBlockDelimiters(ZSTD_Sequence* sequences, size_t seqsSize) { size_t in = 0; size_t out = 0; for (; in < seqsSize; ++in) { if (sequences[in].offset == 0 && sequences[in].matchLength == 0) { if (in != seqsSize - 1) { sequences[in+1].litLength += sequences[in].litLength; } } else { sequences[out] = sequences[in]; ++out; } } return out; } /* Unrolled loop to read four size_ts of input at a time. Returns 1 if is RLE, 0 if not. */ static int ZSTD_isRLE(const BYTE* src, size_t length) { const BYTE* ip = src; const BYTE value = ip[0]; const size_t valueST = (size_t)((U64)value * 0x0101010101010101ULL); const size_t unrollSize = sizeof(size_t) * 4; const size_t unrollMask = unrollSize - 1; const size_t prefixLength = length & unrollMask; size_t i; size_t u; if (length == 1) return 1; /* Check if prefix is RLE first before using unrolled loop */ if (prefixLength && ZSTD_count(ip+1, ip, ip+prefixLength) != prefixLength-1) { return 0; } for (i = prefixLength; i != length; i += unrollSize) { for (u = 0; u < unrollSize; u += sizeof(size_t)) { if (MEM_readST(ip + i + u) != valueST) { return 0; } } } return 1; } /* Returns true if the given block may be RLE. * This is just a heuristic based on the compressibility. * It may return both false positives and false negatives. */ static int ZSTD_maybeRLE(seqStore_t const* seqStore) { size_t const nbSeqs = (size_t)(seqStore->sequences - seqStore->sequencesStart); size_t const nbLits = (size_t)(seqStore->lit - seqStore->litStart); return nbSeqs < 4 && nbLits < 10; } static void ZSTD_confirmRepcodesAndEntropyTables(ZSTD_CCtx* zc) { ZSTD_compressedBlockState_t* const tmp = zc->blockState.prevCBlock; zc->blockState.prevCBlock = zc->blockState.nextCBlock; zc->blockState.nextCBlock = tmp; } static size_t ZSTD_compressBlock_internal(ZSTD_CCtx* zc, void* dst, size_t dstCapacity, const void* src, size_t srcSize, U32 frame) { /* This the upper bound for the length of an rle block. * This isn't the actual upper bound. Finding the real threshold * needs further investigation. */ const U32 rleMaxLength = 25; size_t cSize; const BYTE* ip = (const BYTE*)src; BYTE* op = (BYTE*)dst; DEBUGLOG(5, "ZSTD_compressBlock_internal (dstCapacity=%u, dictLimit=%u, nextToUpdate=%u)", (unsigned)dstCapacity, (unsigned)zc->blockState.matchState.window.dictLimit, (unsigned)zc->blockState.matchState.nextToUpdate); { const size_t bss = ZSTD_buildSeqStore(zc, src, srcSize); FORWARD_IF_ERROR(bss, "ZSTD_buildSeqStore failed"); if (bss == ZSTDbss_noCompress) { cSize = 0; goto out; } } if (zc->seqCollector.collectSequences) { ZSTD_copyBlockSequences(zc); ZSTD_confirmRepcodesAndEntropyTables(zc); return 0; } /* encode sequences and literals */ cSize = ZSTD_entropyCompressSequences(&zc->seqStore, &zc->blockState.prevCBlock->entropy, &zc->blockState.nextCBlock->entropy, &zc->appliedParams, dst, dstCapacity, srcSize, zc->entropyWorkspace, ENTROPY_WORKSPACE_SIZE /* statically allocated in resetCCtx */, zc->bmi2); if (zc->seqCollector.collectSequences) { ZSTD_copyBlockSequences(zc); return 0; } if (frame && /* We don't want to emit our first block as a RLE even if it qualifies because * doing so will cause the decoder (cli only) to throw a "should consume all input error." * This is only an issue for zstd <= v1.4.3 */ !zc->isFirstBlock && cSize < rleMaxLength && ZSTD_isRLE(ip, srcSize)) { cSize = 1; op[0] = ip[0]; } out: if (!ZSTD_isError(cSize) && cSize > 1) { ZSTD_confirmRepcodesAndEntropyTables(zc); } /* We check that dictionaries have offset codes available for the first * block. After the first block, the offcode table might not have large * enough codes to represent the offsets in the data. */ if (zc->blockState.prevCBlock->entropy.fse.offcode_repeatMode == FSE_repeat_valid) zc->blockState.prevCBlock->entropy.fse.offcode_repeatMode = FSE_repeat_check; return cSize; } static size_t ZSTD_compressBlock_targetCBlockSize_body(ZSTD_CCtx* zc, void* dst, size_t dstCapacity, const void* src, size_t srcSize, const size_t bss, U32 lastBlock) { DEBUGLOG(6, "Attempting ZSTD_compressSuperBlock()"); if (bss == ZSTDbss_compress) { if (/* We don't want to emit our first block as a RLE even if it qualifies because * doing so will cause the decoder (cli only) to throw a "should consume all input error." * This is only an issue for zstd <= v1.4.3 */ !zc->isFirstBlock && ZSTD_maybeRLE(&zc->seqStore) && ZSTD_isRLE((BYTE const*)src, srcSize)) { return ZSTD_rleCompressBlock(dst, dstCapacity, *(BYTE const*)src, srcSize, lastBlock); } /* Attempt superblock compression. * * Note that compressed size of ZSTD_compressSuperBlock() is not bound by the * standard ZSTD_compressBound(). This is a problem, because even if we have * space now, taking an extra byte now could cause us to run out of space later * and violate ZSTD_compressBound(). * * Define blockBound(blockSize) = blockSize + ZSTD_blockHeaderSize. * * In order to respect ZSTD_compressBound() we must attempt to emit a raw * uncompressed block in these cases: * * cSize == 0: Return code for an uncompressed block. * * cSize == dstSize_tooSmall: We may have expanded beyond blockBound(srcSize). * ZSTD_noCompressBlock() will return dstSize_tooSmall if we are really out of * output space. * * cSize >= blockBound(srcSize): We have expanded the block too much so * emit an uncompressed block. */ { size_t const cSize = ZSTD_compressSuperBlock(zc, dst, dstCapacity, src, srcSize, lastBlock); if (cSize != ERROR(dstSize_tooSmall)) { size_t const maxCSize = srcSize - ZSTD_minGain(srcSize, zc->appliedParams.cParams.strategy); FORWARD_IF_ERROR(cSize, "ZSTD_compressSuperBlock failed"); if (cSize != 0 && cSize < maxCSize + ZSTD_blockHeaderSize) { ZSTD_confirmRepcodesAndEntropyTables(zc); return cSize; } } } } DEBUGLOG(6, "Resorting to ZSTD_noCompressBlock()"); /* Superblock compression failed, attempt to emit a single no compress block. * The decoder will be able to stream this block since it is uncompressed. */ return ZSTD_noCompressBlock(dst, dstCapacity, src, srcSize, lastBlock); } static size_t ZSTD_compressBlock_targetCBlockSize(ZSTD_CCtx* zc, void* dst, size_t dstCapacity, const void* src, size_t srcSize, U32 lastBlock) { size_t cSize = 0; const size_t bss = ZSTD_buildSeqStore(zc, src, srcSize); DEBUGLOG(5, "ZSTD_compressBlock_targetCBlockSize (dstCapacity=%u, dictLimit=%u, nextToUpdate=%u, srcSize=%zu)", (unsigned)dstCapacity, (unsigned)zc->blockState.matchState.window.dictLimit, (unsigned)zc->blockState.matchState.nextToUpdate, srcSize); FORWARD_IF_ERROR(bss, "ZSTD_buildSeqStore failed"); cSize = ZSTD_compressBlock_targetCBlockSize_body(zc, dst, dstCapacity, src, srcSize, bss, lastBlock); FORWARD_IF_ERROR(cSize, "ZSTD_compressBlock_targetCBlockSize_body failed"); if (zc->blockState.prevCBlock->entropy.fse.offcode_repeatMode == FSE_repeat_valid) zc->blockState.prevCBlock->entropy.fse.offcode_repeatMode = FSE_repeat_check; return cSize; } static void ZSTD_overflowCorrectIfNeeded(ZSTD_matchState_t* ms, ZSTD_cwksp* ws, ZSTD_CCtx_params const* params, void const* ip, void const* iend) { if (ZSTD_window_needOverflowCorrection(ms->window, iend)) { U32 const maxDist = (U32)1 << params->cParams.windowLog; U32 const cycleLog = ZSTD_cycleLog(params->cParams.chainLog, params->cParams.strategy); U32 const correction = ZSTD_window_correctOverflow(&ms->window, cycleLog, maxDist, ip); ZSTD_STATIC_ASSERT(ZSTD_CHAINLOG_MAX <= 30); ZSTD_STATIC_ASSERT(ZSTD_WINDOWLOG_MAX_32 <= 30); ZSTD_STATIC_ASSERT(ZSTD_WINDOWLOG_MAX <= 31); ZSTD_cwksp_mark_tables_dirty(ws); ZSTD_reduceIndex(ms, params, correction); ZSTD_cwksp_mark_tables_clean(ws); if (ms->nextToUpdate < correction) ms->nextToUpdate = 0; else ms->nextToUpdate -= correction; /* invalidate dictionaries on overflow correction */ ms->loadedDictEnd = 0; ms->dictMatchState = NULL; } } /*! ZSTD_compress_frameChunk() : * Compress a chunk of data into one or multiple blocks. * All blocks will be terminated, all input will be consumed. * Function will issue an error if there is not enough `dstCapacity` to hold the compressed content. * Frame is supposed already started (header already produced) * @return : compressed size, or an error code */ static size_t ZSTD_compress_frameChunk (ZSTD_CCtx* cctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize, U32 lastFrameChunk) { size_t blockSize = cctx->blockSize; size_t remaining = srcSize; const BYTE* ip = (const BYTE*)src; BYTE* const ostart = (BYTE*)dst; BYTE* op = ostart; U32 const maxDist = (U32)1 << cctx->appliedParams.cParams.windowLog; assert(cctx->appliedParams.cParams.windowLog <= ZSTD_WINDOWLOG_MAX); DEBUGLOG(4, "ZSTD_compress_frameChunk (blockSize=%u)", (unsigned)blockSize); if (cctx->appliedParams.fParams.checksumFlag && srcSize) XXH64_update(&cctx->xxhState, src, srcSize); while (remaining) { ZSTD_matchState_t* const ms = &cctx->blockState.matchState; U32 const lastBlock = lastFrameChunk & (blockSize >= remaining); RETURN_ERROR_IF(dstCapacity < ZSTD_blockHeaderSize + MIN_CBLOCK_SIZE, dstSize_tooSmall, "not enough space to store compressed block"); if (remaining < blockSize) blockSize = remaining; ZSTD_overflowCorrectIfNeeded( ms, &cctx->workspace, &cctx->appliedParams, ip, ip + blockSize); ZSTD_checkDictValidity(&ms->window, ip + blockSize, maxDist, &ms->loadedDictEnd, &ms->dictMatchState); /* Ensure hash/chain table insertion resumes no sooner than lowlimit */ if (ms->nextToUpdate < ms->window.lowLimit) ms->nextToUpdate = ms->window.lowLimit; { size_t cSize; if (ZSTD_useTargetCBlockSize(&cctx->appliedParams)) { cSize = ZSTD_compressBlock_targetCBlockSize(cctx, op, dstCapacity, ip, blockSize, lastBlock); FORWARD_IF_ERROR(cSize, "ZSTD_compressBlock_targetCBlockSize failed"); assert(cSize > 0); assert(cSize <= blockSize + ZSTD_blockHeaderSize); } else { cSize = ZSTD_compressBlock_internal(cctx, op+ZSTD_blockHeaderSize, dstCapacity-ZSTD_blockHeaderSize, ip, blockSize, 1 /* frame */); FORWARD_IF_ERROR(cSize, "ZSTD_compressBlock_internal failed"); if (cSize == 0) { /* block is not compressible */ cSize = ZSTD_noCompressBlock(op, dstCapacity, ip, blockSize, lastBlock); FORWARD_IF_ERROR(cSize, "ZSTD_noCompressBlock failed"); } else { U32 const cBlockHeader = cSize == 1 ? lastBlock + (((U32)bt_rle)<<1) + (U32)(blockSize << 3) : lastBlock + (((U32)bt_compressed)<<1) + (U32)(cSize << 3); MEM_writeLE24(op, cBlockHeader); cSize += ZSTD_blockHeaderSize; } } ip += blockSize; assert(remaining >= blockSize); remaining -= blockSize; op += cSize; assert(dstCapacity >= cSize); dstCapacity -= cSize; cctx->isFirstBlock = 0; DEBUGLOG(5, "ZSTD_compress_frameChunk: adding a block of size %u", (unsigned)cSize); } } if (lastFrameChunk && (op>ostart)) cctx->stage = ZSTDcs_ending; return (size_t)(op-ostart); } static size_t ZSTD_writeFrameHeader(void* dst, size_t dstCapacity, const ZSTD_CCtx_params* params, U64 pledgedSrcSize, U32 dictID) { BYTE* const op = (BYTE*)dst; U32 const dictIDSizeCodeLength = (dictID>0) + (dictID>=256) + (dictID>=65536); /* 0-3 */ U32 const dictIDSizeCode = params->fParams.noDictIDFlag ? 0 : dictIDSizeCodeLength; /* 0-3 */ U32 const checksumFlag = params->fParams.checksumFlag>0; U32 const windowSize = (U32)1 << params->cParams.windowLog; U32 const singleSegment = params->fParams.contentSizeFlag && (windowSize >= pledgedSrcSize); BYTE const windowLogByte = (BYTE)((params->cParams.windowLog - ZSTD_WINDOWLOG_ABSOLUTEMIN) << 3); U32 const fcsCode = params->fParams.contentSizeFlag ? (pledgedSrcSize>=256) + (pledgedSrcSize>=65536+256) + (pledgedSrcSize>=0xFFFFFFFFU) : 0; /* 0-3 */ BYTE const frameHeaderDescriptionByte = (BYTE)(dictIDSizeCode + (checksumFlag<<2) + (singleSegment<<5) + (fcsCode<<6) ); size_t pos=0; assert(!(params->fParams.contentSizeFlag && pledgedSrcSize == ZSTD_CONTENTSIZE_UNKNOWN)); RETURN_ERROR_IF(dstCapacity < ZSTD_FRAMEHEADERSIZE_MAX, dstSize_tooSmall, "dst buf is too small to fit worst-case frame header size."); DEBUGLOG(4, "ZSTD_writeFrameHeader : dictIDFlag : %u ; dictID : %u ; dictIDSizeCode : %u", !params->fParams.noDictIDFlag, (unsigned)dictID, (unsigned)dictIDSizeCode); if (params->format == ZSTD_f_zstd1) { MEM_writeLE32(dst, ZSTD_MAGICNUMBER); pos = 4; } op[pos++] = frameHeaderDescriptionByte; if (!singleSegment) op[pos++] = windowLogByte; switch(dictIDSizeCode) { default: assert(0); /* impossible */ case 0 : break; case 1 : op[pos] = (BYTE)(dictID); pos++; break; case 2 : MEM_writeLE16(op+pos, (U16)dictID); pos+=2; break; case 3 : MEM_writeLE32(op+pos, dictID); pos+=4; break; } switch(fcsCode) { default: assert(0); /* impossible */ case 0 : if (singleSegment) op[pos++] = (BYTE)(pledgedSrcSize); break; case 1 : MEM_writeLE16(op+pos, (U16)(pledgedSrcSize-256)); pos+=2; break; case 2 : MEM_writeLE32(op+pos, (U32)(pledgedSrcSize)); pos+=4; break; case 3 : MEM_writeLE64(op+pos, (U64)(pledgedSrcSize)); pos+=8; break; } return pos; } /* ZSTD_writeSkippableFrame_advanced() : * Writes out a skippable frame with the specified magic number variant (16 are supported), * from ZSTD_MAGIC_SKIPPABLE_START to ZSTD_MAGIC_SKIPPABLE_START+15, and the desired source data. * * Returns the total number of bytes written, or a ZSTD error code. */ size_t ZSTD_writeSkippableFrame(void* dst, size_t dstCapacity, const void* src, size_t srcSize, unsigned magicVariant) { BYTE* op = (BYTE*)dst; RETURN_ERROR_IF(dstCapacity < srcSize + ZSTD_SKIPPABLEHEADERSIZE /* Skippable frame overhead */, dstSize_tooSmall, "Not enough room for skippable frame"); RETURN_ERROR_IF(srcSize > (unsigned)0xFFFFFFFF, srcSize_wrong, "Src size too large for skippable frame"); RETURN_ERROR_IF(magicVariant > 15, parameter_outOfBound, "Skippable frame magic number variant not supported"); MEM_writeLE32(op, (U32)(ZSTD_MAGIC_SKIPPABLE_START + magicVariant)); MEM_writeLE32(op+4, (U32)srcSize); ZSTD_memcpy(op+8, src, srcSize); return srcSize + ZSTD_SKIPPABLEHEADERSIZE; } /* ZSTD_writeLastEmptyBlock() : * output an empty Block with end-of-frame mark to complete a frame * @return : size of data written into `dst` (== ZSTD_blockHeaderSize (defined in zstd_internal.h)) * or an error code if `dstCapacity` is too small (stage != ZSTDcs_init, stage_wrong, "wrong cctx stage"); RETURN_ERROR_IF(cctx->appliedParams.ldmParams.enableLdm, parameter_unsupported, "incompatible with ldm"); cctx->externSeqStore.seq = seq; cctx->externSeqStore.size = nbSeq; cctx->externSeqStore.capacity = nbSeq; cctx->externSeqStore.pos = 0; cctx->externSeqStore.posInSequence = 0; return 0; } static size_t ZSTD_compressContinue_internal (ZSTD_CCtx* cctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize, U32 frame, U32 lastFrameChunk) { ZSTD_matchState_t* const ms = &cctx->blockState.matchState; size_t fhSize = 0; DEBUGLOG(5, "ZSTD_compressContinue_internal, stage: %u, srcSize: %u", cctx->stage, (unsigned)srcSize); RETURN_ERROR_IF(cctx->stage==ZSTDcs_created, stage_wrong, "missing init (ZSTD_compressBegin)"); if (frame && (cctx->stage==ZSTDcs_init)) { fhSize = ZSTD_writeFrameHeader(dst, dstCapacity, &cctx->appliedParams, cctx->pledgedSrcSizePlusOne-1, cctx->dictID); FORWARD_IF_ERROR(fhSize, "ZSTD_writeFrameHeader failed"); assert(fhSize <= dstCapacity); dstCapacity -= fhSize; dst = (char*)dst + fhSize; cctx->stage = ZSTDcs_ongoing; } if (!srcSize) return fhSize; /* do not generate an empty block if no input */ if (!ZSTD_window_update(&ms->window, src, srcSize)) { ms->nextToUpdate = ms->window.dictLimit; } if (cctx->appliedParams.ldmParams.enableLdm) { ZSTD_window_update(&cctx->ldmState.window, src, srcSize); } if (!frame) { /* overflow check and correction for block mode */ ZSTD_overflowCorrectIfNeeded( ms, &cctx->workspace, &cctx->appliedParams, src, (BYTE const*)src + srcSize); } DEBUGLOG(5, "ZSTD_compressContinue_internal (blockSize=%u)", (unsigned)cctx->blockSize); { size_t const cSize = frame ? ZSTD_compress_frameChunk (cctx, dst, dstCapacity, src, srcSize, lastFrameChunk) : ZSTD_compressBlock_internal (cctx, dst, dstCapacity, src, srcSize, 0 /* frame */); FORWARD_IF_ERROR(cSize, "%s", frame ? "ZSTD_compress_frameChunk failed" : "ZSTD_compressBlock_internal failed"); cctx->consumedSrcSize += srcSize; cctx->producedCSize += (cSize + fhSize); assert(!(cctx->appliedParams.fParams.contentSizeFlag && cctx->pledgedSrcSizePlusOne == 0)); if (cctx->pledgedSrcSizePlusOne != 0) { /* control src size */ ZSTD_STATIC_ASSERT(ZSTD_CONTENTSIZE_UNKNOWN == (unsigned long long)-1); RETURN_ERROR_IF( cctx->consumedSrcSize+1 > cctx->pledgedSrcSizePlusOne, srcSize_wrong, "error : pledgedSrcSize = %u, while realSrcSize >= %u", (unsigned)cctx->pledgedSrcSizePlusOne-1, (unsigned)cctx->consumedSrcSize); } return cSize + fhSize; } } size_t ZSTD_compressContinue (ZSTD_CCtx* cctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize) { DEBUGLOG(5, "ZSTD_compressContinue (srcSize=%u)", (unsigned)srcSize); return ZSTD_compressContinue_internal(cctx, dst, dstCapacity, src, srcSize, 1 /* frame mode */, 0 /* last chunk */); } size_t ZSTD_getBlockSize(const ZSTD_CCtx* cctx) { ZSTD_compressionParameters const cParams = cctx->appliedParams.cParams; assert(!ZSTD_checkCParams(cParams)); return MIN (ZSTD_BLOCKSIZE_MAX, (U32)1 << cParams.windowLog); } size_t ZSTD_compressBlock(ZSTD_CCtx* cctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize) { DEBUGLOG(5, "ZSTD_compressBlock: srcSize = %u", (unsigned)srcSize); { size_t const blockSizeMax = ZSTD_getBlockSize(cctx); RETURN_ERROR_IF(srcSize > blockSizeMax, srcSize_wrong, "input is larger than a block"); } return ZSTD_compressContinue_internal(cctx, dst, dstCapacity, src, srcSize, 0 /* frame mode */, 0 /* last chunk */); } /*! ZSTD_loadDictionaryContent() : * @return : 0, or an error code */ static size_t ZSTD_loadDictionaryContent(ZSTD_matchState_t* ms, ldmState_t* ls, ZSTD_cwksp* ws, ZSTD_CCtx_params const* params, const void* src, size_t srcSize, ZSTD_dictTableLoadMethod_e dtlm) { const BYTE* ip = (const BYTE*) src; const BYTE* const iend = ip + srcSize; ZSTD_window_update(&ms->window, src, srcSize); ms->loadedDictEnd = params->forceWindow ? 0 : (U32)(iend - ms->window.base); if (params->ldmParams.enableLdm && ls != NULL) { ZSTD_window_update(&ls->window, src, srcSize); ls->loadedDictEnd = params->forceWindow ? 0 : (U32)(iend - ls->window.base); } /* Assert that we the ms params match the params we're being given */ ZSTD_assertEqualCParams(params->cParams, ms->cParams); if (srcSize <= HASH_READ_SIZE) return 0; while (iend - ip > HASH_READ_SIZE) { size_t const remaining = (size_t)(iend - ip); size_t const chunk = MIN(remaining, ZSTD_CHUNKSIZE_MAX); const BYTE* const ichunk = ip + chunk; ZSTD_overflowCorrectIfNeeded(ms, ws, params, ip, ichunk); if (params->ldmParams.enableLdm && ls != NULL) ZSTD_ldm_fillHashTable(ls, (const BYTE*)src, (const BYTE*)src + srcSize, ¶ms->ldmParams); switch(params->cParams.strategy) { case ZSTD_fast: ZSTD_fillHashTable(ms, ichunk, dtlm); break; case ZSTD_dfast: ZSTD_fillDoubleHashTable(ms, ichunk, dtlm); break; case ZSTD_greedy: case ZSTD_lazy: case ZSTD_lazy2: if (chunk >= HASH_READ_SIZE && ms->dedicatedDictSearch) { assert(chunk == remaining); /* must load everything in one go */ ZSTD_dedicatedDictSearch_lazy_loadDictionary(ms, ichunk-HASH_READ_SIZE); } else if (chunk >= HASH_READ_SIZE) { ZSTD_insertAndFindFirstIndex(ms, ichunk-HASH_READ_SIZE); } break; case ZSTD_btlazy2: /* we want the dictionary table fully sorted */ case ZSTD_btopt: case ZSTD_btultra: case ZSTD_btultra2: if (chunk >= HASH_READ_SIZE) ZSTD_updateTree(ms, ichunk-HASH_READ_SIZE, ichunk); break; default: assert(0); /* not possible : not a valid strategy id */ } ip = ichunk; } ms->nextToUpdate = (U32)(iend - ms->window.base); return 0; } /* Dictionaries that assign zero probability to symbols that show up causes problems * when FSE encoding. Mark dictionaries with zero probability symbols as FSE_repeat_check * and only dictionaries with 100% valid symbols can be assumed valid. */ static FSE_repeat ZSTD_dictNCountRepeat(short* normalizedCounter, unsigned dictMaxSymbolValue, unsigned maxSymbolValue) { U32 s; if (dictMaxSymbolValue < maxSymbolValue) { return FSE_repeat_check; } for (s = 0; s <= maxSymbolValue; ++s) { if (normalizedCounter[s] == 0) { return FSE_repeat_check; } } return FSE_repeat_valid; } size_t ZSTD_loadCEntropy(ZSTD_compressedBlockState_t* bs, void* workspace, const void* const dict, size_t dictSize) { short offcodeNCount[MaxOff+1]; unsigned offcodeMaxValue = MaxOff; const BYTE* dictPtr = (const BYTE*)dict; /* skip magic num and dict ID */ const BYTE* const dictEnd = dictPtr + dictSize; dictPtr += 8; bs->entropy.huf.repeatMode = HUF_repeat_check; { unsigned maxSymbolValue = 255; unsigned hasZeroWeights = 1; size_t const hufHeaderSize = HUF_readCTable((HUF_CElt*)bs->entropy.huf.CTable, &maxSymbolValue, dictPtr, dictEnd-dictPtr, &hasZeroWeights); /* We only set the loaded table as valid if it contains all non-zero * weights. Otherwise, we set it to check */ if (!hasZeroWeights) bs->entropy.huf.repeatMode = HUF_repeat_valid; RETURN_ERROR_IF(HUF_isError(hufHeaderSize), dictionary_corrupted, ""); RETURN_ERROR_IF(maxSymbolValue < 255, dictionary_corrupted, ""); dictPtr += hufHeaderSize; } { unsigned offcodeLog; size_t const offcodeHeaderSize = FSE_readNCount(offcodeNCount, &offcodeMaxValue, &offcodeLog, dictPtr, dictEnd-dictPtr); RETURN_ERROR_IF(FSE_isError(offcodeHeaderSize), dictionary_corrupted, ""); RETURN_ERROR_IF(offcodeLog > OffFSELog, dictionary_corrupted, ""); /* fill all offset symbols to avoid garbage at end of table */ RETURN_ERROR_IF(FSE_isError(FSE_buildCTable_wksp( bs->entropy.fse.offcodeCTable, offcodeNCount, MaxOff, offcodeLog, workspace, HUF_WORKSPACE_SIZE)), dictionary_corrupted, ""); /* Defer checking offcodeMaxValue because we need to know the size of the dictionary content */ dictPtr += offcodeHeaderSize; } { short matchlengthNCount[MaxML+1]; unsigned matchlengthMaxValue = MaxML, matchlengthLog; size_t const matchlengthHeaderSize = FSE_readNCount(matchlengthNCount, &matchlengthMaxValue, &matchlengthLog, dictPtr, dictEnd-dictPtr); RETURN_ERROR_IF(FSE_isError(matchlengthHeaderSize), dictionary_corrupted, ""); RETURN_ERROR_IF(matchlengthLog > MLFSELog, dictionary_corrupted, ""); RETURN_ERROR_IF(FSE_isError(FSE_buildCTable_wksp( bs->entropy.fse.matchlengthCTable, matchlengthNCount, matchlengthMaxValue, matchlengthLog, workspace, HUF_WORKSPACE_SIZE)), dictionary_corrupted, ""); bs->entropy.fse.matchlength_repeatMode = ZSTD_dictNCountRepeat(matchlengthNCount, matchlengthMaxValue, MaxML); dictPtr += matchlengthHeaderSize; } { short litlengthNCount[MaxLL+1]; unsigned litlengthMaxValue = MaxLL, litlengthLog; size_t const litlengthHeaderSize = FSE_readNCount(litlengthNCount, &litlengthMaxValue, &litlengthLog, dictPtr, dictEnd-dictPtr); RETURN_ERROR_IF(FSE_isError(litlengthHeaderSize), dictionary_corrupted, ""); RETURN_ERROR_IF(litlengthLog > LLFSELog, dictionary_corrupted, ""); RETURN_ERROR_IF(FSE_isError(FSE_buildCTable_wksp( bs->entropy.fse.litlengthCTable, litlengthNCount, litlengthMaxValue, litlengthLog, workspace, HUF_WORKSPACE_SIZE)), dictionary_corrupted, ""); bs->entropy.fse.litlength_repeatMode = ZSTD_dictNCountRepeat(litlengthNCount, litlengthMaxValue, MaxLL); dictPtr += litlengthHeaderSize; } RETURN_ERROR_IF(dictPtr+12 > dictEnd, dictionary_corrupted, ""); bs->rep[0] = MEM_readLE32(dictPtr+0); bs->rep[1] = MEM_readLE32(dictPtr+4); bs->rep[2] = MEM_readLE32(dictPtr+8); dictPtr += 12; { size_t const dictContentSize = (size_t)(dictEnd - dictPtr); U32 offcodeMax = MaxOff; if (dictContentSize <= ((U32)-1) - 128 KB) { U32 const maxOffset = (U32)dictContentSize + 128 KB; /* The maximum offset that must be supported */ offcodeMax = ZSTD_highbit32(maxOffset); /* Calculate minimum offset code required to represent maxOffset */ } /* All offset values <= dictContentSize + 128 KB must be representable for a valid table */ bs->entropy.fse.offcode_repeatMode = ZSTD_dictNCountRepeat(offcodeNCount, offcodeMaxValue, MIN(offcodeMax, MaxOff)); /* All repCodes must be <= dictContentSize and != 0 */ { U32 u; for (u=0; u<3; u++) { RETURN_ERROR_IF(bs->rep[u] == 0, dictionary_corrupted, ""); RETURN_ERROR_IF(bs->rep[u] > dictContentSize, dictionary_corrupted, ""); } } } return dictPtr - (const BYTE*)dict; } /* Dictionary format : * See : * https://github.com/facebook/zstd/blob/release/doc/zstd_compression_format.md#dictionary-format */ /*! ZSTD_loadZstdDictionary() : * @return : dictID, or an error code * assumptions : magic number supposed already checked * dictSize supposed >= 8 */ static size_t ZSTD_loadZstdDictionary(ZSTD_compressedBlockState_t* bs, ZSTD_matchState_t* ms, ZSTD_cwksp* ws, ZSTD_CCtx_params const* params, const void* dict, size_t dictSize, ZSTD_dictTableLoadMethod_e dtlm, void* workspace) { const BYTE* dictPtr = (const BYTE*)dict; const BYTE* const dictEnd = dictPtr + dictSize; size_t dictID; size_t eSize; ZSTD_STATIC_ASSERT(HUF_WORKSPACE_SIZE >= (1<= 8); assert(MEM_readLE32(dictPtr) == ZSTD_MAGIC_DICTIONARY); dictID = params->fParams.noDictIDFlag ? 0 : MEM_readLE32(dictPtr + 4 /* skip magic number */ ); eSize = ZSTD_loadCEntropy(bs, workspace, dict, dictSize); FORWARD_IF_ERROR(eSize, "ZSTD_loadCEntropy failed"); dictPtr += eSize; { size_t const dictContentSize = (size_t)(dictEnd - dictPtr); FORWARD_IF_ERROR(ZSTD_loadDictionaryContent( ms, NULL, ws, params, dictPtr, dictContentSize, dtlm), ""); } return dictID; } /** ZSTD_compress_insertDictionary() : * @return : dictID, or an error code */ static size_t ZSTD_compress_insertDictionary(ZSTD_compressedBlockState_t* bs, ZSTD_matchState_t* ms, ldmState_t* ls, ZSTD_cwksp* ws, const ZSTD_CCtx_params* params, const void* dict, size_t dictSize, ZSTD_dictContentType_e dictContentType, ZSTD_dictTableLoadMethod_e dtlm, void* workspace) { DEBUGLOG(4, "ZSTD_compress_insertDictionary (dictSize=%u)", (U32)dictSize); if ((dict==NULL) || (dictSize<8)) { RETURN_ERROR_IF(dictContentType == ZSTD_dct_fullDict, dictionary_wrong, ""); return 0; } ZSTD_reset_compressedBlockState(bs); /* dict restricted modes */ if (dictContentType == ZSTD_dct_rawContent) return ZSTD_loadDictionaryContent(ms, ls, ws, params, dict, dictSize, dtlm); if (MEM_readLE32(dict) != ZSTD_MAGIC_DICTIONARY) { if (dictContentType == ZSTD_dct_auto) { DEBUGLOG(4, "raw content dictionary detected"); return ZSTD_loadDictionaryContent( ms, ls, ws, params, dict, dictSize, dtlm); } RETURN_ERROR_IF(dictContentType == ZSTD_dct_fullDict, dictionary_wrong, ""); assert(0); /* impossible */ } /* dict as full zstd dictionary */ return ZSTD_loadZstdDictionary( bs, ms, ws, params, dict, dictSize, dtlm, workspace); } #define ZSTD_USE_CDICT_PARAMS_SRCSIZE_CUTOFF (128 KB) #define ZSTD_USE_CDICT_PARAMS_DICTSIZE_MULTIPLIER (6ULL) /*! ZSTD_compressBegin_internal() : * @return : 0, or an error code */ static size_t ZSTD_compressBegin_internal(ZSTD_CCtx* cctx, const void* dict, size_t dictSize, ZSTD_dictContentType_e dictContentType, ZSTD_dictTableLoadMethod_e dtlm, const ZSTD_CDict* cdict, const ZSTD_CCtx_params* params, U64 pledgedSrcSize, ZSTD_buffered_policy_e zbuff) { #if ZSTD_TRACE cctx->traceCtx = ZSTD_trace_compress_begin(cctx); #endif DEBUGLOG(4, "ZSTD_compressBegin_internal: wlog=%u", params->cParams.windowLog); /* params are supposed to be fully validated at this point */ assert(!ZSTD_isError(ZSTD_checkCParams(params->cParams))); assert(!((dict) && (cdict))); /* either dict or cdict, not both */ if ( (cdict) && (cdict->dictContentSize > 0) && ( pledgedSrcSize < ZSTD_USE_CDICT_PARAMS_SRCSIZE_CUTOFF || pledgedSrcSize < cdict->dictContentSize * ZSTD_USE_CDICT_PARAMS_DICTSIZE_MULTIPLIER || pledgedSrcSize == ZSTD_CONTENTSIZE_UNKNOWN || cdict->compressionLevel == 0) && (params->attachDictPref != ZSTD_dictForceLoad) ) { return ZSTD_resetCCtx_usingCDict(cctx, cdict, params, pledgedSrcSize, zbuff); } FORWARD_IF_ERROR( ZSTD_resetCCtx_internal(cctx, *params, pledgedSrcSize, ZSTDcrp_makeClean, zbuff) , ""); { size_t const dictID = cdict ? ZSTD_compress_insertDictionary( cctx->blockState.prevCBlock, &cctx->blockState.matchState, &cctx->ldmState, &cctx->workspace, &cctx->appliedParams, cdict->dictContent, cdict->dictContentSize, cdict->dictContentType, dtlm, cctx->entropyWorkspace) : ZSTD_compress_insertDictionary( cctx->blockState.prevCBlock, &cctx->blockState.matchState, &cctx->ldmState, &cctx->workspace, &cctx->appliedParams, dict, dictSize, dictContentType, dtlm, cctx->entropyWorkspace); FORWARD_IF_ERROR(dictID, "ZSTD_compress_insertDictionary failed"); assert(dictID <= UINT_MAX); cctx->dictID = (U32)dictID; cctx->dictContentSize = cdict ? cdict->dictContentSize : dictSize; } return 0; } size_t ZSTD_compressBegin_advanced_internal(ZSTD_CCtx* cctx, const void* dict, size_t dictSize, ZSTD_dictContentType_e dictContentType, ZSTD_dictTableLoadMethod_e dtlm, const ZSTD_CDict* cdict, const ZSTD_CCtx_params* params, unsigned long long pledgedSrcSize) { DEBUGLOG(4, "ZSTD_compressBegin_advanced_internal: wlog=%u", params->cParams.windowLog); /* compression parameters verification and optimization */ FORWARD_IF_ERROR( ZSTD_checkCParams(params->cParams) , ""); return ZSTD_compressBegin_internal(cctx, dict, dictSize, dictContentType, dtlm, cdict, params, pledgedSrcSize, ZSTDb_not_buffered); } /*! ZSTD_compressBegin_advanced() : * @return : 0, or an error code */ size_t ZSTD_compressBegin_advanced(ZSTD_CCtx* cctx, const void* dict, size_t dictSize, ZSTD_parameters params, unsigned long long pledgedSrcSize) { ZSTD_CCtx_params cctxParams; ZSTD_CCtxParams_init_internal(&cctxParams, ¶ms, ZSTD_NO_CLEVEL); return ZSTD_compressBegin_advanced_internal(cctx, dict, dictSize, ZSTD_dct_auto, ZSTD_dtlm_fast, NULL /*cdict*/, &cctxParams, pledgedSrcSize); } size_t ZSTD_compressBegin_usingDict(ZSTD_CCtx* cctx, const void* dict, size_t dictSize, int compressionLevel) { ZSTD_CCtx_params cctxParams; { ZSTD_parameters const params = ZSTD_getParams_internal(compressionLevel, ZSTD_CONTENTSIZE_UNKNOWN, dictSize, ZSTD_cpm_noAttachDict); ZSTD_CCtxParams_init_internal(&cctxParams, ¶ms, (compressionLevel == 0) ? ZSTD_CLEVEL_DEFAULT : compressionLevel); } DEBUGLOG(4, "ZSTD_compressBegin_usingDict (dictSize=%u)", (unsigned)dictSize); return ZSTD_compressBegin_internal(cctx, dict, dictSize, ZSTD_dct_auto, ZSTD_dtlm_fast, NULL, &cctxParams, ZSTD_CONTENTSIZE_UNKNOWN, ZSTDb_not_buffered); } size_t ZSTD_compressBegin(ZSTD_CCtx* cctx, int compressionLevel) { return ZSTD_compressBegin_usingDict(cctx, NULL, 0, compressionLevel); } /*! ZSTD_writeEpilogue() : * Ends a frame. * @return : nb of bytes written into dst (or an error code) */ static size_t ZSTD_writeEpilogue(ZSTD_CCtx* cctx, void* dst, size_t dstCapacity) { BYTE* const ostart = (BYTE*)dst; BYTE* op = ostart; size_t fhSize = 0; DEBUGLOG(4, "ZSTD_writeEpilogue"); RETURN_ERROR_IF(cctx->stage == ZSTDcs_created, stage_wrong, "init missing"); /* special case : empty frame */ if (cctx->stage == ZSTDcs_init) { fhSize = ZSTD_writeFrameHeader(dst, dstCapacity, &cctx->appliedParams, 0, 0); FORWARD_IF_ERROR(fhSize, "ZSTD_writeFrameHeader failed"); dstCapacity -= fhSize; op += fhSize; cctx->stage = ZSTDcs_ongoing; } if (cctx->stage != ZSTDcs_ending) { /* write one last empty block, make it the "last" block */ U32 const cBlockHeader24 = 1 /* last block */ + (((U32)bt_raw)<<1) + 0; RETURN_ERROR_IF(dstCapacity<4, dstSize_tooSmall, "no room for epilogue"); MEM_writeLE32(op, cBlockHeader24); op += ZSTD_blockHeaderSize; dstCapacity -= ZSTD_blockHeaderSize; } if (cctx->appliedParams.fParams.checksumFlag) { U32 const checksum = (U32) XXH64_digest(&cctx->xxhState); RETURN_ERROR_IF(dstCapacity<4, dstSize_tooSmall, "no room for checksum"); DEBUGLOG(4, "ZSTD_writeEpilogue: write checksum : %08X", (unsigned)checksum); MEM_writeLE32(op, checksum); op += 4; } cctx->stage = ZSTDcs_created; /* return to "created but no init" status */ return op-ostart; } void ZSTD_CCtx_trace(ZSTD_CCtx* cctx, size_t extraCSize) { #if ZSTD_TRACE if (cctx->traceCtx) { int const streaming = cctx->inBuffSize > 0 || cctx->outBuffSize > 0 || cctx->appliedParams.nbWorkers > 0; ZSTD_Trace trace; ZSTD_memset(&trace, 0, sizeof(trace)); trace.version = ZSTD_VERSION_NUMBER; trace.streaming = streaming; trace.dictionaryID = cctx->dictID; trace.dictionarySize = cctx->dictContentSize; trace.uncompressedSize = cctx->consumedSrcSize; trace.compressedSize = cctx->producedCSize + extraCSize; trace.params = &cctx->appliedParams; trace.cctx = cctx; ZSTD_trace_compress_end(cctx->traceCtx, &trace); } cctx->traceCtx = 0; #else (void)cctx; (void)extraCSize; #endif } size_t ZSTD_compressEnd (ZSTD_CCtx* cctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize) { size_t endResult; size_t const cSize = ZSTD_compressContinue_internal(cctx, dst, dstCapacity, src, srcSize, 1 /* frame mode */, 1 /* last chunk */); FORWARD_IF_ERROR(cSize, "ZSTD_compressContinue_internal failed"); endResult = ZSTD_writeEpilogue(cctx, (char*)dst + cSize, dstCapacity-cSize); FORWARD_IF_ERROR(endResult, "ZSTD_writeEpilogue failed"); assert(!(cctx->appliedParams.fParams.contentSizeFlag && cctx->pledgedSrcSizePlusOne == 0)); if (cctx->pledgedSrcSizePlusOne != 0) { /* control src size */ ZSTD_STATIC_ASSERT(ZSTD_CONTENTSIZE_UNKNOWN == (unsigned long long)-1); DEBUGLOG(4, "end of frame : controlling src size"); RETURN_ERROR_IF( cctx->pledgedSrcSizePlusOne != cctx->consumedSrcSize+1, srcSize_wrong, "error : pledgedSrcSize = %u, while realSrcSize = %u", (unsigned)cctx->pledgedSrcSizePlusOne-1, (unsigned)cctx->consumedSrcSize); } ZSTD_CCtx_trace(cctx, endResult); return cSize + endResult; } size_t ZSTD_compress_advanced (ZSTD_CCtx* cctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize, const void* dict,size_t dictSize, ZSTD_parameters params) { ZSTD_CCtx_params cctxParams; DEBUGLOG(4, "ZSTD_compress_advanced"); FORWARD_IF_ERROR(ZSTD_checkCParams(params.cParams), ""); ZSTD_CCtxParams_init_internal(&cctxParams, ¶ms, ZSTD_NO_CLEVEL); return ZSTD_compress_advanced_internal(cctx, dst, dstCapacity, src, srcSize, dict, dictSize, &cctxParams); } /* Internal */ size_t ZSTD_compress_advanced_internal( ZSTD_CCtx* cctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize, const void* dict,size_t dictSize, const ZSTD_CCtx_params* params) { DEBUGLOG(4, "ZSTD_compress_advanced_internal (srcSize:%u)", (unsigned)srcSize); FORWARD_IF_ERROR( ZSTD_compressBegin_internal(cctx, dict, dictSize, ZSTD_dct_auto, ZSTD_dtlm_fast, NULL, params, srcSize, ZSTDb_not_buffered) , ""); return ZSTD_compressEnd(cctx, dst, dstCapacity, src, srcSize); } size_t ZSTD_compress_usingDict(ZSTD_CCtx* cctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize, const void* dict, size_t dictSize, int compressionLevel) { ZSTD_CCtx_params cctxParams; { ZSTD_parameters const params = ZSTD_getParams_internal(compressionLevel, srcSize, dict ? dictSize : 0, ZSTD_cpm_noAttachDict); assert(params.fParams.contentSizeFlag == 1); ZSTD_CCtxParams_init_internal(&cctxParams, ¶ms, (compressionLevel == 0) ? ZSTD_CLEVEL_DEFAULT: compressionLevel); } DEBUGLOG(4, "ZSTD_compress_usingDict (srcSize=%u)", (unsigned)srcSize); return ZSTD_compress_advanced_internal(cctx, dst, dstCapacity, src, srcSize, dict, dictSize, &cctxParams); } size_t ZSTD_compressCCtx(ZSTD_CCtx* cctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize, int compressionLevel) { DEBUGLOG(4, "ZSTD_compressCCtx (srcSize=%u)", (unsigned)srcSize); assert(cctx != NULL); return ZSTD_compress_usingDict(cctx, dst, dstCapacity, src, srcSize, NULL, 0, compressionLevel); } size_t ZSTD_compress(void* dst, size_t dstCapacity, const void* src, size_t srcSize, int compressionLevel) { size_t result; #if ZSTD_COMPRESS_HEAPMODE ZSTD_CCtx* cctx = ZSTD_createCCtx(); RETURN_ERROR_IF(!cctx, memory_allocation, "ZSTD_createCCtx failed"); result = ZSTD_compressCCtx(cctx, dst, dstCapacity, src, srcSize, compressionLevel); ZSTD_freeCCtx(cctx); #else ZSTD_CCtx ctxBody; ZSTD_initCCtx(&ctxBody, ZSTD_defaultCMem); result = ZSTD_compressCCtx(&ctxBody, dst, dstCapacity, src, srcSize, compressionLevel); ZSTD_freeCCtxContent(&ctxBody); /* can't free ctxBody itself, as it's on stack; free only heap content */ #endif return result; } /* ===== Dictionary API ===== */ /*! ZSTD_estimateCDictSize_advanced() : * Estimate amount of memory that will be needed to create a dictionary with following arguments */ size_t ZSTD_estimateCDictSize_advanced( size_t dictSize, ZSTD_compressionParameters cParams, ZSTD_dictLoadMethod_e dictLoadMethod) { DEBUGLOG(5, "sizeof(ZSTD_CDict) : %u", (unsigned)sizeof(ZSTD_CDict)); return ZSTD_cwksp_alloc_size(sizeof(ZSTD_CDict)) + ZSTD_cwksp_alloc_size(HUF_WORKSPACE_SIZE) + ZSTD_sizeof_matchState(&cParams, /* forCCtx */ 0) + (dictLoadMethod == ZSTD_dlm_byRef ? 0 : ZSTD_cwksp_alloc_size(ZSTD_cwksp_align(dictSize, sizeof(void *)))); } size_t ZSTD_estimateCDictSize(size_t dictSize, int compressionLevel) { ZSTD_compressionParameters const cParams = ZSTD_getCParams_internal(compressionLevel, ZSTD_CONTENTSIZE_UNKNOWN, dictSize, ZSTD_cpm_createCDict); return ZSTD_estimateCDictSize_advanced(dictSize, cParams, ZSTD_dlm_byCopy); } size_t ZSTD_sizeof_CDict(const ZSTD_CDict* cdict) { if (cdict==NULL) return 0; /* support sizeof on NULL */ DEBUGLOG(5, "sizeof(*cdict) : %u", (unsigned)sizeof(*cdict)); /* cdict may be in the workspace */ return (cdict->workspace.workspace == cdict ? 0 : sizeof(*cdict)) + ZSTD_cwksp_sizeof(&cdict->workspace); } static size_t ZSTD_initCDict_internal( ZSTD_CDict* cdict, const void* dictBuffer, size_t dictSize, ZSTD_dictLoadMethod_e dictLoadMethod, ZSTD_dictContentType_e dictContentType, ZSTD_CCtx_params params) { DEBUGLOG(3, "ZSTD_initCDict_internal (dictContentType:%u)", (unsigned)dictContentType); assert(!ZSTD_checkCParams(params.cParams)); cdict->matchState.cParams = params.cParams; cdict->matchState.dedicatedDictSearch = params.enableDedicatedDictSearch; if (cdict->matchState.dedicatedDictSearch && dictSize > ZSTD_CHUNKSIZE_MAX) { cdict->matchState.dedicatedDictSearch = 0; } if ((dictLoadMethod == ZSTD_dlm_byRef) || (!dictBuffer) || (!dictSize)) { cdict->dictContent = dictBuffer; } else { void *internalBuffer = ZSTD_cwksp_reserve_object(&cdict->workspace, ZSTD_cwksp_align(dictSize, sizeof(void*))); RETURN_ERROR_IF(!internalBuffer, memory_allocation, "NULL pointer!"); cdict->dictContent = internalBuffer; ZSTD_memcpy(internalBuffer, dictBuffer, dictSize); } cdict->dictContentSize = dictSize; cdict->dictContentType = dictContentType; cdict->entropyWorkspace = (U32*)ZSTD_cwksp_reserve_object(&cdict->workspace, HUF_WORKSPACE_SIZE); /* Reset the state to no dictionary */ ZSTD_reset_compressedBlockState(&cdict->cBlockState); FORWARD_IF_ERROR(ZSTD_reset_matchState( &cdict->matchState, &cdict->workspace, ¶ms.cParams, ZSTDcrp_makeClean, ZSTDirp_reset, ZSTD_resetTarget_CDict), ""); /* (Maybe) load the dictionary * Skips loading the dictionary if it is < 8 bytes. */ { params.compressionLevel = ZSTD_CLEVEL_DEFAULT; params.fParams.contentSizeFlag = 1; { size_t const dictID = ZSTD_compress_insertDictionary( &cdict->cBlockState, &cdict->matchState, NULL, &cdict->workspace, ¶ms, cdict->dictContent, cdict->dictContentSize, dictContentType, ZSTD_dtlm_full, cdict->entropyWorkspace); FORWARD_IF_ERROR(dictID, "ZSTD_compress_insertDictionary failed"); assert(dictID <= (size_t)(U32)-1); cdict->dictID = (U32)dictID; } } return 0; } static ZSTD_CDict* ZSTD_createCDict_advanced_internal(size_t dictSize, ZSTD_dictLoadMethod_e dictLoadMethod, ZSTD_compressionParameters cParams, ZSTD_customMem customMem) { if ((!customMem.customAlloc) ^ (!customMem.customFree)) return NULL; { size_t const workspaceSize = ZSTD_cwksp_alloc_size(sizeof(ZSTD_CDict)) + ZSTD_cwksp_alloc_size(HUF_WORKSPACE_SIZE) + ZSTD_sizeof_matchState(&cParams, /* forCCtx */ 0) + (dictLoadMethod == ZSTD_dlm_byRef ? 0 : ZSTD_cwksp_alloc_size(ZSTD_cwksp_align(dictSize, sizeof(void*)))); void* const workspace = ZSTD_customMalloc(workspaceSize, customMem); ZSTD_cwksp ws; ZSTD_CDict* cdict; if (!workspace) { ZSTD_customFree(workspace, customMem); return NULL; } ZSTD_cwksp_init(&ws, workspace, workspaceSize, ZSTD_cwksp_dynamic_alloc); cdict = (ZSTD_CDict*)ZSTD_cwksp_reserve_object(&ws, sizeof(ZSTD_CDict)); assert(cdict != NULL); ZSTD_cwksp_move(&cdict->workspace, &ws); cdict->customMem = customMem; cdict->compressionLevel = ZSTD_NO_CLEVEL; /* signals advanced API usage */ return cdict; } } ZSTD_CDict* ZSTD_createCDict_advanced(const void* dictBuffer, size_t dictSize, ZSTD_dictLoadMethod_e dictLoadMethod, ZSTD_dictContentType_e dictContentType, ZSTD_compressionParameters cParams, ZSTD_customMem customMem) { ZSTD_CCtx_params cctxParams; ZSTD_memset(&cctxParams, 0, sizeof(cctxParams)); ZSTD_CCtxParams_init(&cctxParams, 0); cctxParams.cParams = cParams; cctxParams.customMem = customMem; return ZSTD_createCDict_advanced2( dictBuffer, dictSize, dictLoadMethod, dictContentType, &cctxParams, customMem); } ZSTDLIB_API ZSTD_CDict* ZSTD_createCDict_advanced2( const void* dict, size_t dictSize, ZSTD_dictLoadMethod_e dictLoadMethod, ZSTD_dictContentType_e dictContentType, const ZSTD_CCtx_params* originalCctxParams, ZSTD_customMem customMem) { ZSTD_CCtx_params cctxParams = *originalCctxParams; ZSTD_compressionParameters cParams; ZSTD_CDict* cdict; DEBUGLOG(3, "ZSTD_createCDict_advanced2, mode %u", (unsigned)dictContentType); if (!customMem.customAlloc ^ !customMem.customFree) return NULL; if (cctxParams.enableDedicatedDictSearch) { cParams = ZSTD_dedicatedDictSearch_getCParams( cctxParams.compressionLevel, dictSize); ZSTD_overrideCParams(&cParams, &cctxParams.cParams); } else { cParams = ZSTD_getCParamsFromCCtxParams( &cctxParams, ZSTD_CONTENTSIZE_UNKNOWN, dictSize, ZSTD_cpm_createCDict); } if (!ZSTD_dedicatedDictSearch_isSupported(&cParams)) { /* Fall back to non-DDSS params */ cctxParams.enableDedicatedDictSearch = 0; cParams = ZSTD_getCParamsFromCCtxParams( &cctxParams, ZSTD_CONTENTSIZE_UNKNOWN, dictSize, ZSTD_cpm_createCDict); } cctxParams.cParams = cParams; cdict = ZSTD_createCDict_advanced_internal(dictSize, dictLoadMethod, cctxParams.cParams, customMem); if (ZSTD_isError( ZSTD_initCDict_internal(cdict, dict, dictSize, dictLoadMethod, dictContentType, cctxParams) )) { ZSTD_freeCDict(cdict); return NULL; } return cdict; } ZSTD_CDict* ZSTD_createCDict(const void* dict, size_t dictSize, int compressionLevel) { ZSTD_compressionParameters cParams = ZSTD_getCParams_internal(compressionLevel, ZSTD_CONTENTSIZE_UNKNOWN, dictSize, ZSTD_cpm_createCDict); ZSTD_CDict* const cdict = ZSTD_createCDict_advanced(dict, dictSize, ZSTD_dlm_byCopy, ZSTD_dct_auto, cParams, ZSTD_defaultCMem); if (cdict) cdict->compressionLevel = (compressionLevel == 0) ? ZSTD_CLEVEL_DEFAULT : compressionLevel; return cdict; } ZSTD_CDict* ZSTD_createCDict_byReference(const void* dict, size_t dictSize, int compressionLevel) { ZSTD_compressionParameters cParams = ZSTD_getCParams_internal(compressionLevel, ZSTD_CONTENTSIZE_UNKNOWN, dictSize, ZSTD_cpm_createCDict); ZSTD_CDict* const cdict = ZSTD_createCDict_advanced(dict, dictSize, ZSTD_dlm_byRef, ZSTD_dct_auto, cParams, ZSTD_defaultCMem); if (cdict) cdict->compressionLevel = (compressionLevel == 0) ? ZSTD_CLEVEL_DEFAULT : compressionLevel; return cdict; } size_t ZSTD_freeCDict(ZSTD_CDict* cdict) { if (cdict==NULL) return 0; /* support free on NULL */ { ZSTD_customMem const cMem = cdict->customMem; int cdictInWorkspace = ZSTD_cwksp_owns_buffer(&cdict->workspace, cdict); ZSTD_cwksp_free(&cdict->workspace, cMem); if (!cdictInWorkspace) { ZSTD_customFree(cdict, cMem); } return 0; } } /*! ZSTD_initStaticCDict_advanced() : * Generate a digested dictionary in provided memory area. * workspace: The memory area to emplace the dictionary into. * Provided pointer must 8-bytes aligned. * It must outlive dictionary usage. * workspaceSize: Use ZSTD_estimateCDictSize() * to determine how large workspace must be. * cParams : use ZSTD_getCParams() to transform a compression level * into its relevants cParams. * @return : pointer to ZSTD_CDict*, or NULL if error (size too small) * Note : there is no corresponding "free" function. * Since workspace was allocated externally, it must be freed externally. */ const ZSTD_CDict* ZSTD_initStaticCDict( void* workspace, size_t workspaceSize, const void* dict, size_t dictSize, ZSTD_dictLoadMethod_e dictLoadMethod, ZSTD_dictContentType_e dictContentType, ZSTD_compressionParameters cParams) { size_t const matchStateSize = ZSTD_sizeof_matchState(&cParams, /* forCCtx */ 0); size_t const neededSize = ZSTD_cwksp_alloc_size(sizeof(ZSTD_CDict)) + (dictLoadMethod == ZSTD_dlm_byRef ? 0 : ZSTD_cwksp_alloc_size(ZSTD_cwksp_align(dictSize, sizeof(void*)))) + ZSTD_cwksp_alloc_size(HUF_WORKSPACE_SIZE) + matchStateSize; ZSTD_CDict* cdict; ZSTD_CCtx_params params; if ((size_t)workspace & 7) return NULL; /* 8-aligned */ { ZSTD_cwksp ws; ZSTD_cwksp_init(&ws, workspace, workspaceSize, ZSTD_cwksp_static_alloc); cdict = (ZSTD_CDict*)ZSTD_cwksp_reserve_object(&ws, sizeof(ZSTD_CDict)); if (cdict == NULL) return NULL; ZSTD_cwksp_move(&cdict->workspace, &ws); } DEBUGLOG(4, "(workspaceSize < neededSize) : (%u < %u) => %u", (unsigned)workspaceSize, (unsigned)neededSize, (unsigned)(workspaceSize < neededSize)); if (workspaceSize < neededSize) return NULL; ZSTD_CCtxParams_init(¶ms, 0); params.cParams = cParams; if (ZSTD_isError( ZSTD_initCDict_internal(cdict, dict, dictSize, dictLoadMethod, dictContentType, params) )) return NULL; return cdict; } ZSTD_compressionParameters ZSTD_getCParamsFromCDict(const ZSTD_CDict* cdict) { assert(cdict != NULL); return cdict->matchState.cParams; } /*! ZSTD_getDictID_fromCDict() : * Provides the dictID of the dictionary loaded into `cdict`. * If @return == 0, the dictionary is not conformant to Zstandard specification, or empty. * Non-conformant dictionaries can still be loaded, but as content-only dictionaries. */ unsigned ZSTD_getDictID_fromCDict(const ZSTD_CDict* cdict) { if (cdict==NULL) return 0; return cdict->dictID; } /* ZSTD_compressBegin_usingCDict_advanced() : * cdict must be != NULL */ size_t ZSTD_compressBegin_usingCDict_advanced( ZSTD_CCtx* const cctx, const ZSTD_CDict* const cdict, ZSTD_frameParameters const fParams, unsigned long long const pledgedSrcSize) { ZSTD_CCtx_params cctxParams; DEBUGLOG(4, "ZSTD_compressBegin_usingCDict_advanced"); RETURN_ERROR_IF(cdict==NULL, dictionary_wrong, "NULL pointer!"); /* Initialize the cctxParams from the cdict */ { ZSTD_parameters params; params.fParams = fParams; params.cParams = ( pledgedSrcSize < ZSTD_USE_CDICT_PARAMS_SRCSIZE_CUTOFF || pledgedSrcSize < cdict->dictContentSize * ZSTD_USE_CDICT_PARAMS_DICTSIZE_MULTIPLIER || pledgedSrcSize == ZSTD_CONTENTSIZE_UNKNOWN || cdict->compressionLevel == 0 ) ? ZSTD_getCParamsFromCDict(cdict) : ZSTD_getCParams(cdict->compressionLevel, pledgedSrcSize, cdict->dictContentSize); ZSTD_CCtxParams_init_internal(&cctxParams, ¶ms, cdict->compressionLevel); } /* Increase window log to fit the entire dictionary and source if the * source size is known. Limit the increase to 19, which is the * window log for compression level 1 with the largest source size. */ if (pledgedSrcSize != ZSTD_CONTENTSIZE_UNKNOWN) { U32 const limitedSrcSize = (U32)MIN(pledgedSrcSize, 1U << 19); U32 const limitedSrcLog = limitedSrcSize > 1 ? ZSTD_highbit32(limitedSrcSize - 1) + 1 : 1; cctxParams.cParams.windowLog = MAX(cctxParams.cParams.windowLog, limitedSrcLog); } return ZSTD_compressBegin_internal(cctx, NULL, 0, ZSTD_dct_auto, ZSTD_dtlm_fast, cdict, &cctxParams, pledgedSrcSize, ZSTDb_not_buffered); } /* ZSTD_compressBegin_usingCDict() : * pledgedSrcSize=0 means "unknown" * if pledgedSrcSize>0, it will enable contentSizeFlag */ size_t ZSTD_compressBegin_usingCDict(ZSTD_CCtx* cctx, const ZSTD_CDict* cdict) { ZSTD_frameParameters const fParams = { 0 /*content*/, 0 /*checksum*/, 0 /*noDictID*/ }; DEBUGLOG(4, "ZSTD_compressBegin_usingCDict : dictIDFlag == %u", !fParams.noDictIDFlag); return ZSTD_compressBegin_usingCDict_advanced(cctx, cdict, fParams, ZSTD_CONTENTSIZE_UNKNOWN); } size_t ZSTD_compress_usingCDict_advanced(ZSTD_CCtx* cctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize, const ZSTD_CDict* cdict, ZSTD_frameParameters fParams) { FORWARD_IF_ERROR(ZSTD_compressBegin_usingCDict_advanced(cctx, cdict, fParams, srcSize), ""); /* will check if cdict != NULL */ return ZSTD_compressEnd(cctx, dst, dstCapacity, src, srcSize); } /*! ZSTD_compress_usingCDict() : * Compression using a digested Dictionary. * Faster startup than ZSTD_compress_usingDict(), recommended when same dictionary is used multiple times. * Note that compression parameters are decided at CDict creation time * while frame parameters are hardcoded */ size_t ZSTD_compress_usingCDict(ZSTD_CCtx* cctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize, const ZSTD_CDict* cdict) { ZSTD_frameParameters const fParams = { 1 /*content*/, 0 /*checksum*/, 0 /*noDictID*/ }; return ZSTD_compress_usingCDict_advanced(cctx, dst, dstCapacity, src, srcSize, cdict, fParams); } /* ****************************************************************** * Streaming ********************************************************************/ ZSTD_CStream* ZSTD_createCStream(void) { DEBUGLOG(3, "ZSTD_createCStream"); return ZSTD_createCStream_advanced(ZSTD_defaultCMem); } ZSTD_CStream* ZSTD_initStaticCStream(void *workspace, size_t workspaceSize) { return ZSTD_initStaticCCtx(workspace, workspaceSize); } ZSTD_CStream* ZSTD_createCStream_advanced(ZSTD_customMem customMem) { /* CStream and CCtx are now same object */ return ZSTD_createCCtx_advanced(customMem); } size_t ZSTD_freeCStream(ZSTD_CStream* zcs) { return ZSTD_freeCCtx(zcs); /* same object */ } /*====== Initialization ======*/ size_t ZSTD_CStreamInSize(void) { return ZSTD_BLOCKSIZE_MAX; } size_t ZSTD_CStreamOutSize(void) { return ZSTD_compressBound(ZSTD_BLOCKSIZE_MAX) + ZSTD_blockHeaderSize + 4 /* 32-bits hash */ ; } static ZSTD_cParamMode_e ZSTD_getCParamMode(ZSTD_CDict const* cdict, ZSTD_CCtx_params const* params, U64 pledgedSrcSize) { if (cdict != NULL && ZSTD_shouldAttachDict(cdict, params, pledgedSrcSize)) return ZSTD_cpm_attachDict; else return ZSTD_cpm_noAttachDict; } /* ZSTD_resetCStream(): * pledgedSrcSize == 0 means "unknown" */ size_t ZSTD_resetCStream(ZSTD_CStream* zcs, unsigned long long pss) { /* temporary : 0 interpreted as "unknown" during transition period. * Users willing to specify "unknown" **must** use ZSTD_CONTENTSIZE_UNKNOWN. * 0 will be interpreted as "empty" in the future. */ U64 const pledgedSrcSize = (pss==0) ? ZSTD_CONTENTSIZE_UNKNOWN : pss; DEBUGLOG(4, "ZSTD_resetCStream: pledgedSrcSize = %u", (unsigned)pledgedSrcSize); FORWARD_IF_ERROR( ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only) , ""); FORWARD_IF_ERROR( ZSTD_CCtx_setPledgedSrcSize(zcs, pledgedSrcSize) , ""); return 0; } /*! ZSTD_initCStream_internal() : * Note : for lib/compress only. Used by zstdmt_compress.c. * Assumption 1 : params are valid * Assumption 2 : either dict, or cdict, is defined, not both */ size_t ZSTD_initCStream_internal(ZSTD_CStream* zcs, const void* dict, size_t dictSize, const ZSTD_CDict* cdict, const ZSTD_CCtx_params* params, unsigned long long pledgedSrcSize) { DEBUGLOG(4, "ZSTD_initCStream_internal"); FORWARD_IF_ERROR( ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only) , ""); FORWARD_IF_ERROR( ZSTD_CCtx_setPledgedSrcSize(zcs, pledgedSrcSize) , ""); assert(!ZSTD_isError(ZSTD_checkCParams(params->cParams))); zcs->requestedParams = *params; assert(!((dict) && (cdict))); /* either dict or cdict, not both */ if (dict) { FORWARD_IF_ERROR( ZSTD_CCtx_loadDictionary(zcs, dict, dictSize) , ""); } else { /* Dictionary is cleared if !cdict */ FORWARD_IF_ERROR( ZSTD_CCtx_refCDict(zcs, cdict) , ""); } return 0; } /* ZSTD_initCStream_usingCDict_advanced() : * same as ZSTD_initCStream_usingCDict(), with control over frame parameters */ size_t ZSTD_initCStream_usingCDict_advanced(ZSTD_CStream* zcs, const ZSTD_CDict* cdict, ZSTD_frameParameters fParams, unsigned long long pledgedSrcSize) { DEBUGLOG(4, "ZSTD_initCStream_usingCDict_advanced"); FORWARD_IF_ERROR( ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only) , ""); FORWARD_IF_ERROR( ZSTD_CCtx_setPledgedSrcSize(zcs, pledgedSrcSize) , ""); zcs->requestedParams.fParams = fParams; FORWARD_IF_ERROR( ZSTD_CCtx_refCDict(zcs, cdict) , ""); return 0; } /* note : cdict must outlive compression session */ size_t ZSTD_initCStream_usingCDict(ZSTD_CStream* zcs, const ZSTD_CDict* cdict) { DEBUGLOG(4, "ZSTD_initCStream_usingCDict"); FORWARD_IF_ERROR( ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only) , ""); FORWARD_IF_ERROR( ZSTD_CCtx_refCDict(zcs, cdict) , ""); return 0; } /* ZSTD_initCStream_advanced() : * pledgedSrcSize must be exact. * if srcSize is not known at init time, use value ZSTD_CONTENTSIZE_UNKNOWN. * dict is loaded with default parameters ZSTD_dct_auto and ZSTD_dlm_byCopy. */ size_t ZSTD_initCStream_advanced(ZSTD_CStream* zcs, const void* dict, size_t dictSize, ZSTD_parameters params, unsigned long long pss) { /* for compatibility with older programs relying on this behavior. * Users should now specify ZSTD_CONTENTSIZE_UNKNOWN. * This line will be removed in the future. */ U64 const pledgedSrcSize = (pss==0 && params.fParams.contentSizeFlag==0) ? ZSTD_CONTENTSIZE_UNKNOWN : pss; DEBUGLOG(4, "ZSTD_initCStream_advanced"); FORWARD_IF_ERROR( ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only) , ""); FORWARD_IF_ERROR( ZSTD_CCtx_setPledgedSrcSize(zcs, pledgedSrcSize) , ""); FORWARD_IF_ERROR( ZSTD_checkCParams(params.cParams) , ""); ZSTD_CCtxParams_setZstdParams(&zcs->requestedParams, ¶ms); FORWARD_IF_ERROR( ZSTD_CCtx_loadDictionary(zcs, dict, dictSize) , ""); return 0; } size_t ZSTD_initCStream_usingDict(ZSTD_CStream* zcs, const void* dict, size_t dictSize, int compressionLevel) { DEBUGLOG(4, "ZSTD_initCStream_usingDict"); FORWARD_IF_ERROR( ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only) , ""); FORWARD_IF_ERROR( ZSTD_CCtx_setParameter(zcs, ZSTD_c_compressionLevel, compressionLevel) , ""); FORWARD_IF_ERROR( ZSTD_CCtx_loadDictionary(zcs, dict, dictSize) , ""); return 0; } size_t ZSTD_initCStream_srcSize(ZSTD_CStream* zcs, int compressionLevel, unsigned long long pss) { /* temporary : 0 interpreted as "unknown" during transition period. * Users willing to specify "unknown" **must** use ZSTD_CONTENTSIZE_UNKNOWN. * 0 will be interpreted as "empty" in the future. */ U64 const pledgedSrcSize = (pss==0) ? ZSTD_CONTENTSIZE_UNKNOWN : pss; DEBUGLOG(4, "ZSTD_initCStream_srcSize"); FORWARD_IF_ERROR( ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only) , ""); FORWARD_IF_ERROR( ZSTD_CCtx_refCDict(zcs, NULL) , ""); FORWARD_IF_ERROR( ZSTD_CCtx_setParameter(zcs, ZSTD_c_compressionLevel, compressionLevel) , ""); FORWARD_IF_ERROR( ZSTD_CCtx_setPledgedSrcSize(zcs, pledgedSrcSize) , ""); return 0; } size_t ZSTD_initCStream(ZSTD_CStream* zcs, int compressionLevel) { DEBUGLOG(4, "ZSTD_initCStream"); FORWARD_IF_ERROR( ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only) , ""); FORWARD_IF_ERROR( ZSTD_CCtx_refCDict(zcs, NULL) , ""); FORWARD_IF_ERROR( ZSTD_CCtx_setParameter(zcs, ZSTD_c_compressionLevel, compressionLevel) , ""); return 0; } /*====== Compression ======*/ static size_t ZSTD_nextInputSizeHint(const ZSTD_CCtx* cctx) { size_t hintInSize = cctx->inBuffTarget - cctx->inBuffPos; if (hintInSize==0) hintInSize = cctx->blockSize; return hintInSize; } /** ZSTD_compressStream_generic(): * internal function for all *compressStream*() variants * non-static, because can be called from zstdmt_compress.c * @return : hint size for next input */ static size_t ZSTD_compressStream_generic(ZSTD_CStream* zcs, ZSTD_outBuffer* output, ZSTD_inBuffer* input, ZSTD_EndDirective const flushMode) { const char* const istart = (const char*)input->src; const char* const iend = input->size != 0 ? istart + input->size : istart; const char* ip = input->pos != 0 ? istart + input->pos : istart; char* const ostart = (char*)output->dst; char* const oend = output->size != 0 ? ostart + output->size : ostart; char* op = output->pos != 0 ? ostart + output->pos : ostart; U32 someMoreWork = 1; /* check expectations */ DEBUGLOG(5, "ZSTD_compressStream_generic, flush=%u", (unsigned)flushMode); if (zcs->appliedParams.inBufferMode == ZSTD_bm_buffered) { assert(zcs->inBuff != NULL); assert(zcs->inBuffSize > 0); } if (zcs->appliedParams.outBufferMode == ZSTD_bm_buffered) { assert(zcs->outBuff != NULL); assert(zcs->outBuffSize > 0); } assert(output->pos <= output->size); assert(input->pos <= input->size); assert((U32)flushMode <= (U32)ZSTD_e_end); while (someMoreWork) { switch(zcs->streamStage) { case zcss_init: RETURN_ERROR(init_missing, "call ZSTD_initCStream() first!"); case zcss_load: if ( (flushMode == ZSTD_e_end) && ( (size_t)(oend-op) >= ZSTD_compressBound(iend-ip) /* Enough output space */ || zcs->appliedParams.outBufferMode == ZSTD_bm_stable) /* OR we are allowed to return dstSizeTooSmall */ && (zcs->inBuffPos == 0) ) { /* shortcut to compression pass directly into output buffer */ size_t const cSize = ZSTD_compressEnd(zcs, op, oend-op, ip, iend-ip); DEBUGLOG(4, "ZSTD_compressEnd : cSize=%u", (unsigned)cSize); FORWARD_IF_ERROR(cSize, "ZSTD_compressEnd failed"); ip = iend; op += cSize; zcs->frameEnded = 1; ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only); someMoreWork = 0; break; } /* complete loading into inBuffer in buffered mode */ if (zcs->appliedParams.inBufferMode == ZSTD_bm_buffered) { size_t const toLoad = zcs->inBuffTarget - zcs->inBuffPos; size_t const loaded = ZSTD_limitCopy( zcs->inBuff + zcs->inBuffPos, toLoad, ip, iend-ip); zcs->inBuffPos += loaded; if (loaded != 0) ip += loaded; if ( (flushMode == ZSTD_e_continue) && (zcs->inBuffPos < zcs->inBuffTarget) ) { /* not enough input to fill full block : stop here */ someMoreWork = 0; break; } if ( (flushMode == ZSTD_e_flush) && (zcs->inBuffPos == zcs->inToCompress) ) { /* empty */ someMoreWork = 0; break; } } /* compress current block (note : this stage cannot be stopped in the middle) */ DEBUGLOG(5, "stream compression stage (flushMode==%u)", flushMode); { int const inputBuffered = (zcs->appliedParams.inBufferMode == ZSTD_bm_buffered); void* cDst; size_t cSize; size_t oSize = oend-op; size_t const iSize = inputBuffered ? zcs->inBuffPos - zcs->inToCompress : MIN((size_t)(iend - ip), zcs->blockSize); if (oSize >= ZSTD_compressBound(iSize) || zcs->appliedParams.outBufferMode == ZSTD_bm_stable) cDst = op; /* compress into output buffer, to skip flush stage */ else cDst = zcs->outBuff, oSize = zcs->outBuffSize; if (inputBuffered) { unsigned const lastBlock = (flushMode == ZSTD_e_end) && (ip==iend); cSize = lastBlock ? ZSTD_compressEnd(zcs, cDst, oSize, zcs->inBuff + zcs->inToCompress, iSize) : ZSTD_compressContinue(zcs, cDst, oSize, zcs->inBuff + zcs->inToCompress, iSize); FORWARD_IF_ERROR(cSize, "%s", lastBlock ? "ZSTD_compressEnd failed" : "ZSTD_compressContinue failed"); zcs->frameEnded = lastBlock; /* prepare next block */ zcs->inBuffTarget = zcs->inBuffPos + zcs->blockSize; if (zcs->inBuffTarget > zcs->inBuffSize) zcs->inBuffPos = 0, zcs->inBuffTarget = zcs->blockSize; DEBUGLOG(5, "inBuffTarget:%u / inBuffSize:%u", (unsigned)zcs->inBuffTarget, (unsigned)zcs->inBuffSize); if (!lastBlock) assert(zcs->inBuffTarget <= zcs->inBuffSize); zcs->inToCompress = zcs->inBuffPos; } else { unsigned const lastBlock = (ip + iSize == iend); assert(flushMode == ZSTD_e_end /* Already validated */); cSize = lastBlock ? ZSTD_compressEnd(zcs, cDst, oSize, ip, iSize) : ZSTD_compressContinue(zcs, cDst, oSize, ip, iSize); /* Consume the input prior to error checking to mirror buffered mode. */ if (iSize > 0) ip += iSize; FORWARD_IF_ERROR(cSize, "%s", lastBlock ? "ZSTD_compressEnd failed" : "ZSTD_compressContinue failed"); zcs->frameEnded = lastBlock; if (lastBlock) assert(ip == iend); } if (cDst == op) { /* no need to flush */ op += cSize; if (zcs->frameEnded) { DEBUGLOG(5, "Frame completed directly in outBuffer"); someMoreWork = 0; ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only); } break; } zcs->outBuffContentSize = cSize; zcs->outBuffFlushedSize = 0; zcs->streamStage = zcss_flush; /* pass-through to flush stage */ } /* fall-through */ case zcss_flush: DEBUGLOG(5, "flush stage"); assert(zcs->appliedParams.outBufferMode == ZSTD_bm_buffered); { size_t const toFlush = zcs->outBuffContentSize - zcs->outBuffFlushedSize; size_t const flushed = ZSTD_limitCopy(op, (size_t)(oend-op), zcs->outBuff + zcs->outBuffFlushedSize, toFlush); DEBUGLOG(5, "toFlush: %u into %u ==> flushed: %u", (unsigned)toFlush, (unsigned)(oend-op), (unsigned)flushed); if (flushed) op += flushed; zcs->outBuffFlushedSize += flushed; if (toFlush!=flushed) { /* flush not fully completed, presumably because dst is too small */ assert(op==oend); someMoreWork = 0; break; } zcs->outBuffContentSize = zcs->outBuffFlushedSize = 0; if (zcs->frameEnded) { DEBUGLOG(5, "Frame completed on flush"); someMoreWork = 0; ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only); break; } zcs->streamStage = zcss_load; break; } default: /* impossible */ assert(0); } } input->pos = ip - istart; output->pos = op - ostart; if (zcs->frameEnded) return 0; return ZSTD_nextInputSizeHint(zcs); } static size_t ZSTD_nextInputSizeHint_MTorST(const ZSTD_CCtx* cctx) { #ifdef ZSTD_MULTITHREAD if (cctx->appliedParams.nbWorkers >= 1) { assert(cctx->mtctx != NULL); return ZSTDMT_nextInputSizeHint(cctx->mtctx); } #endif return ZSTD_nextInputSizeHint(cctx); } size_t ZSTD_compressStream(ZSTD_CStream* zcs, ZSTD_outBuffer* output, ZSTD_inBuffer* input) { FORWARD_IF_ERROR( ZSTD_compressStream2(zcs, output, input, ZSTD_e_continue) , ""); return ZSTD_nextInputSizeHint_MTorST(zcs); } /* After a compression call set the expected input/output buffer. * This is validated at the start of the next compression call. */ static void ZSTD_setBufferExpectations(ZSTD_CCtx* cctx, ZSTD_outBuffer const* output, ZSTD_inBuffer const* input) { if (cctx->appliedParams.inBufferMode == ZSTD_bm_stable) { cctx->expectedInBuffer = *input; } if (cctx->appliedParams.outBufferMode == ZSTD_bm_stable) { cctx->expectedOutBufferSize = output->size - output->pos; } } /* Validate that the input/output buffers match the expectations set by * ZSTD_setBufferExpectations. */ static size_t ZSTD_checkBufferStability(ZSTD_CCtx const* cctx, ZSTD_outBuffer const* output, ZSTD_inBuffer const* input, ZSTD_EndDirective endOp) { if (cctx->appliedParams.inBufferMode == ZSTD_bm_stable) { ZSTD_inBuffer const expect = cctx->expectedInBuffer; if (expect.src != input->src || expect.pos != input->pos || expect.size != input->size) RETURN_ERROR(srcBuffer_wrong, "ZSTD_c_stableInBuffer enabled but input differs!"); if (endOp != ZSTD_e_end) RETURN_ERROR(srcBuffer_wrong, "ZSTD_c_stableInBuffer can only be used with ZSTD_e_end!"); } if (cctx->appliedParams.outBufferMode == ZSTD_bm_stable) { size_t const outBufferSize = output->size - output->pos; if (cctx->expectedOutBufferSize != outBufferSize) RETURN_ERROR(dstBuffer_wrong, "ZSTD_c_stableOutBuffer enabled but output size differs!"); } return 0; } static size_t ZSTD_CCtx_init_compressStream2(ZSTD_CCtx* cctx, ZSTD_EndDirective endOp, size_t inSize) { ZSTD_CCtx_params params = cctx->requestedParams; ZSTD_prefixDict const prefixDict = cctx->prefixDict; FORWARD_IF_ERROR( ZSTD_initLocalDict(cctx) , ""); /* Init the local dict if present. */ ZSTD_memset(&cctx->prefixDict, 0, sizeof(cctx->prefixDict)); /* single usage */ assert(prefixDict.dict==NULL || cctx->cdict==NULL); /* only one can be set */ if (cctx->cdict) params.compressionLevel = cctx->cdict->compressionLevel; /* let cdict take priority in terms of compression level */ DEBUGLOG(4, "ZSTD_compressStream2 : transparent init stage"); if (endOp == ZSTD_e_end) cctx->pledgedSrcSizePlusOne = inSize + 1; /* auto-fix pledgedSrcSize */ { size_t const dictSize = prefixDict.dict ? prefixDict.dictSize : (cctx->cdict ? cctx->cdict->dictContentSize : 0); ZSTD_cParamMode_e const mode = ZSTD_getCParamMode(cctx->cdict, ¶ms, cctx->pledgedSrcSizePlusOne - 1); params.cParams = ZSTD_getCParamsFromCCtxParams( ¶ms, cctx->pledgedSrcSizePlusOne-1, dictSize, mode); } if (ZSTD_CParams_shouldEnableLdm(¶ms.cParams)) { /* Enable LDM by default for optimal parser and window size >= 128MB */ DEBUGLOG(4, "LDM enabled by default (window size >= 128MB, strategy >= btopt)"); params.ldmParams.enableLdm = 1; } #ifdef ZSTD_MULTITHREAD if ((cctx->pledgedSrcSizePlusOne-1) <= ZSTDMT_JOBSIZE_MIN) { params.nbWorkers = 0; /* do not invoke multi-threading when src size is too small */ } if (params.nbWorkers > 0) { #if ZSTD_TRACE cctx->traceCtx = ZSTD_trace_compress_begin(cctx); #endif /* mt context creation */ if (cctx->mtctx == NULL) { DEBUGLOG(4, "ZSTD_compressStream2: creating new mtctx for nbWorkers=%u", params.nbWorkers); cctx->mtctx = ZSTDMT_createCCtx_advanced((U32)params.nbWorkers, cctx->customMem, cctx->pool); RETURN_ERROR_IF(cctx->mtctx == NULL, memory_allocation, "NULL pointer!"); } /* mt compression */ DEBUGLOG(4, "call ZSTDMT_initCStream_internal as nbWorkers=%u", params.nbWorkers); FORWARD_IF_ERROR( ZSTDMT_initCStream_internal( cctx->mtctx, prefixDict.dict, prefixDict.dictSize, prefixDict.dictContentType, cctx->cdict, params, cctx->pledgedSrcSizePlusOne-1) , ""); cctx->dictID = cctx->cdict ? cctx->cdict->dictID : 0; cctx->dictContentSize = cctx->cdict ? cctx->cdict->dictContentSize : prefixDict.dictSize; cctx->consumedSrcSize = 0; cctx->producedCSize = 0; cctx->streamStage = zcss_load; cctx->appliedParams = params; } else #endif { U64 const pledgedSrcSize = cctx->pledgedSrcSizePlusOne - 1; assert(!ZSTD_isError(ZSTD_checkCParams(params.cParams))); FORWARD_IF_ERROR( ZSTD_compressBegin_internal(cctx, prefixDict.dict, prefixDict.dictSize, prefixDict.dictContentType, ZSTD_dtlm_fast, cctx->cdict, ¶ms, pledgedSrcSize, ZSTDb_buffered) , ""); assert(cctx->appliedParams.nbWorkers == 0); cctx->inToCompress = 0; cctx->inBuffPos = 0; if (cctx->appliedParams.inBufferMode == ZSTD_bm_buffered) { /* for small input: avoid automatic flush on reaching end of block, since * it would require to add a 3-bytes null block to end frame */ cctx->inBuffTarget = cctx->blockSize + (cctx->blockSize == pledgedSrcSize); } else { cctx->inBuffTarget = 0; } cctx->outBuffContentSize = cctx->outBuffFlushedSize = 0; cctx->streamStage = zcss_load; cctx->frameEnded = 0; } return 0; } size_t ZSTD_compressStream2( ZSTD_CCtx* cctx, ZSTD_outBuffer* output, ZSTD_inBuffer* input, ZSTD_EndDirective endOp) { DEBUGLOG(5, "ZSTD_compressStream2, endOp=%u ", (unsigned)endOp); /* check conditions */ RETURN_ERROR_IF(output->pos > output->size, dstSize_tooSmall, "invalid output buffer"); RETURN_ERROR_IF(input->pos > input->size, srcSize_wrong, "invalid input buffer"); RETURN_ERROR_IF((U32)endOp > (U32)ZSTD_e_end, parameter_outOfBound, "invalid endDirective"); assert(cctx != NULL); /* transparent initialization stage */ if (cctx->streamStage == zcss_init) { FORWARD_IF_ERROR(ZSTD_CCtx_init_compressStream2(cctx, endOp, input->size), "CompressStream2 initialization failed"); ZSTD_setBufferExpectations(cctx, output, input); /* Set initial buffer expectations now that we've initialized */ } /* end of transparent initialization stage */ FORWARD_IF_ERROR(ZSTD_checkBufferStability(cctx, output, input, endOp), "invalid buffers"); /* compression stage */ #ifdef ZSTD_MULTITHREAD if (cctx->appliedParams.nbWorkers > 0) { size_t flushMin; if (cctx->cParamsChanged) { ZSTDMT_updateCParams_whileCompressing(cctx->mtctx, &cctx->requestedParams); cctx->cParamsChanged = 0; } for (;;) { size_t const ipos = input->pos; size_t const opos = output->pos; flushMin = ZSTDMT_compressStream_generic(cctx->mtctx, output, input, endOp); cctx->consumedSrcSize += (U64)(input->pos - ipos); cctx->producedCSize += (U64)(output->pos - opos); if ( ZSTD_isError(flushMin) || (endOp == ZSTD_e_end && flushMin == 0) ) { /* compression completed */ if (flushMin == 0) ZSTD_CCtx_trace(cctx, 0); ZSTD_CCtx_reset(cctx, ZSTD_reset_session_only); } FORWARD_IF_ERROR(flushMin, "ZSTDMT_compressStream_generic failed"); if (endOp == ZSTD_e_continue) { /* We only require some progress with ZSTD_e_continue, not maximal progress. * We're done if we've consumed or produced any bytes, or either buffer is * full. */ if (input->pos != ipos || output->pos != opos || input->pos == input->size || output->pos == output->size) break; } else { assert(endOp == ZSTD_e_flush || endOp == ZSTD_e_end); /* We require maximal progress. We're done when the flush is complete or the * output buffer is full. */ if (flushMin == 0 || output->pos == output->size) break; } } DEBUGLOG(5, "completed ZSTD_compressStream2 delegating to ZSTDMT_compressStream_generic"); /* Either we don't require maximum forward progress, we've finished the * flush, or we are out of output space. */ assert(endOp == ZSTD_e_continue || flushMin == 0 || output->pos == output->size); ZSTD_setBufferExpectations(cctx, output, input); return flushMin; } #endif FORWARD_IF_ERROR( ZSTD_compressStream_generic(cctx, output, input, endOp) , ""); DEBUGLOG(5, "completed ZSTD_compressStream2"); ZSTD_setBufferExpectations(cctx, output, input); return cctx->outBuffContentSize - cctx->outBuffFlushedSize; /* remaining to flush */ } size_t ZSTD_compressStream2_simpleArgs ( ZSTD_CCtx* cctx, void* dst, size_t dstCapacity, size_t* dstPos, const void* src, size_t srcSize, size_t* srcPos, ZSTD_EndDirective endOp) { ZSTD_outBuffer output = { dst, dstCapacity, *dstPos }; ZSTD_inBuffer input = { src, srcSize, *srcPos }; /* ZSTD_compressStream2() will check validity of dstPos and srcPos */ size_t const cErr = ZSTD_compressStream2(cctx, &output, &input, endOp); *dstPos = output.pos; *srcPos = input.pos; return cErr; } size_t ZSTD_compress2(ZSTD_CCtx* cctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize) { ZSTD_bufferMode_e const originalInBufferMode = cctx->requestedParams.inBufferMode; ZSTD_bufferMode_e const originalOutBufferMode = cctx->requestedParams.outBufferMode; DEBUGLOG(4, "ZSTD_compress2 (srcSize=%u)", (unsigned)srcSize); ZSTD_CCtx_reset(cctx, ZSTD_reset_session_only); /* Enable stable input/output buffers. */ cctx->requestedParams.inBufferMode = ZSTD_bm_stable; cctx->requestedParams.outBufferMode = ZSTD_bm_stable; { size_t oPos = 0; size_t iPos = 0; size_t const result = ZSTD_compressStream2_simpleArgs(cctx, dst, dstCapacity, &oPos, src, srcSize, &iPos, ZSTD_e_end); /* Reset to the original values. */ cctx->requestedParams.inBufferMode = originalInBufferMode; cctx->requestedParams.outBufferMode = originalOutBufferMode; FORWARD_IF_ERROR(result, "ZSTD_compressStream2_simpleArgs failed"); if (result != 0) { /* compression not completed, due to lack of output space */ assert(oPos == dstCapacity); RETURN_ERROR(dstSize_tooSmall, ""); } assert(iPos == srcSize); /* all input is expected consumed */ return oPos; } } typedef struct { U32 idx; /* Index in array of ZSTD_Sequence */ U32 posInSequence; /* Position within sequence at idx */ size_t posInSrc; /* Number of bytes given by sequences provided so far */ } ZSTD_sequencePosition; /* Returns a ZSTD error code if sequence is not valid */ static size_t ZSTD_validateSequence(U32 offCode, U32 matchLength, size_t posInSrc, U32 windowLog, size_t dictSize, U32 minMatch) { size_t offsetBound; U32 windowSize = 1 << windowLog; /* posInSrc represents the amount of data the the decoder would decode up to this point. * As long as the amount of data decoded is less than or equal to window size, offsets may be * larger than the total length of output decoded in order to reference the dict, even larger than * window size. After output surpasses windowSize, we're limited to windowSize offsets again. */ offsetBound = posInSrc > windowSize ? (size_t)windowSize : posInSrc + (size_t)dictSize; RETURN_ERROR_IF(offCode > offsetBound + ZSTD_REP_MOVE, corruption_detected, "Offset too large!"); RETURN_ERROR_IF(matchLength < minMatch, corruption_detected, "Matchlength too small"); return 0; } /* Returns an offset code, given a sequence's raw offset, the ongoing repcode array, and whether litLength == 0 */ static U32 ZSTD_finalizeOffCode(U32 rawOffset, const U32 rep[ZSTD_REP_NUM], U32 ll0) { U32 offCode = rawOffset + ZSTD_REP_MOVE; U32 repCode = 0; if (!ll0 && rawOffset == rep[0]) { repCode = 1; } else if (rawOffset == rep[1]) { repCode = 2 - ll0; } else if (rawOffset == rep[2]) { repCode = 3 - ll0; } else if (ll0 && rawOffset == rep[0] - 1) { repCode = 3; } if (repCode) { /* ZSTD_storeSeq expects a number in the range [0, 2] to represent a repcode */ offCode = repCode - 1; } return offCode; } /* Returns 0 on success, and a ZSTD_error otherwise. This function scans through an array of * ZSTD_Sequence, storing the sequences it finds, until it reaches a block delimiter. */ static size_t ZSTD_copySequencesToSeqStoreExplicitBlockDelim(ZSTD_CCtx* cctx, ZSTD_sequencePosition* seqPos, const ZSTD_Sequence* const inSeqs, size_t inSeqsSize, const void* src, size_t blockSize) { U32 idx = seqPos->idx; BYTE const* ip = (BYTE const*)(src); const BYTE* const iend = ip + blockSize; repcodes_t updatedRepcodes; U32 dictSize; U32 litLength; U32 matchLength; U32 ll0; U32 offCode; if (cctx->cdict) { dictSize = (U32)cctx->cdict->dictContentSize; } else if (cctx->prefixDict.dict) { dictSize = (U32)cctx->prefixDict.dictSize; } else { dictSize = 0; } ZSTD_memcpy(updatedRepcodes.rep, cctx->blockState.prevCBlock->rep, sizeof(repcodes_t)); for (; (inSeqs[idx].matchLength != 0 || inSeqs[idx].offset != 0) && idx < inSeqsSize; ++idx) { litLength = inSeqs[idx].litLength; matchLength = inSeqs[idx].matchLength; ll0 = litLength == 0; offCode = ZSTD_finalizeOffCode(inSeqs[idx].offset, updatedRepcodes.rep, ll0); updatedRepcodes = ZSTD_updateRep(updatedRepcodes.rep, offCode, ll0); DEBUGLOG(6, "Storing sequence: (of: %u, ml: %u, ll: %u)", offCode, matchLength, litLength); if (cctx->appliedParams.validateSequences) { seqPos->posInSrc += litLength + matchLength; FORWARD_IF_ERROR(ZSTD_validateSequence(offCode, matchLength, seqPos->posInSrc, cctx->appliedParams.cParams.windowLog, dictSize, cctx->appliedParams.cParams.minMatch), "Sequence validation failed"); } RETURN_ERROR_IF(idx - seqPos->idx > cctx->seqStore.maxNbSeq, memory_allocation, "Not enough memory allocated. Try adjusting ZSTD_c_minMatch."); ZSTD_storeSeq(&cctx->seqStore, litLength, ip, iend, offCode, matchLength - MINMATCH); ip += matchLength + litLength; } ZSTD_memcpy(cctx->blockState.nextCBlock->rep, updatedRepcodes.rep, sizeof(repcodes_t)); if (inSeqs[idx].litLength) { DEBUGLOG(6, "Storing last literals of size: %u", inSeqs[idx].litLength); ZSTD_storeLastLiterals(&cctx->seqStore, ip, inSeqs[idx].litLength); ip += inSeqs[idx].litLength; seqPos->posInSrc += inSeqs[idx].litLength; } RETURN_ERROR_IF(ip != iend, corruption_detected, "Blocksize doesn't agree with block delimiter!"); seqPos->idx = idx+1; return 0; } /* Returns the number of bytes to move the current read position back by. Only non-zero * if we ended up splitting a sequence. Otherwise, it may return a ZSTD error if something * went wrong. * * This function will attempt to scan through blockSize bytes represented by the sequences * in inSeqs, storing any (partial) sequences. * * Occasionally, we may want to change the actual number of bytes we consumed from inSeqs to * avoid splitting a match, or to avoid splitting a match such that it would produce a match * smaller than MINMATCH. In this case, we return the number of bytes that we didn't read from this block. */ static size_t ZSTD_copySequencesToSeqStoreNoBlockDelim(ZSTD_CCtx* cctx, ZSTD_sequencePosition* seqPos, const ZSTD_Sequence* const inSeqs, size_t inSeqsSize, const void* src, size_t blockSize) { U32 idx = seqPos->idx; U32 startPosInSequence = seqPos->posInSequence; U32 endPosInSequence = seqPos->posInSequence + (U32)blockSize; size_t dictSize; BYTE const* ip = (BYTE const*)(src); BYTE const* iend = ip + blockSize; /* May be adjusted if we decide to process fewer than blockSize bytes */ repcodes_t updatedRepcodes; U32 bytesAdjustment = 0; U32 finalMatchSplit = 0; U32 litLength; U32 matchLength; U32 rawOffset; U32 offCode; if (cctx->cdict) { dictSize = cctx->cdict->dictContentSize; } else if (cctx->prefixDict.dict) { dictSize = cctx->prefixDict.dictSize; } else { dictSize = 0; } DEBUGLOG(5, "ZSTD_copySequencesToSeqStore: idx: %u PIS: %u blockSize: %zu", idx, startPosInSequence, blockSize); DEBUGLOG(5, "Start seq: idx: %u (of: %u ml: %u ll: %u)", idx, inSeqs[idx].offset, inSeqs[idx].matchLength, inSeqs[idx].litLength); ZSTD_memcpy(updatedRepcodes.rep, cctx->blockState.prevCBlock->rep, sizeof(repcodes_t)); while (endPosInSequence && idx < inSeqsSize && !finalMatchSplit) { const ZSTD_Sequence currSeq = inSeqs[idx]; litLength = currSeq.litLength; matchLength = currSeq.matchLength; rawOffset = currSeq.offset; /* Modify the sequence depending on where endPosInSequence lies */ if (endPosInSequence >= currSeq.litLength + currSeq.matchLength) { if (startPosInSequence >= litLength) { startPosInSequence -= litLength; litLength = 0; matchLength -= startPosInSequence; } else { litLength -= startPosInSequence; } /* Move to the next sequence */ endPosInSequence -= currSeq.litLength + currSeq.matchLength; startPosInSequence = 0; idx++; } else { /* This is the final (partial) sequence we're adding from inSeqs, and endPosInSequence does not reach the end of the match. So, we have to split the sequence */ DEBUGLOG(6, "Require a split: diff: %u, idx: %u PIS: %u", currSeq.litLength + currSeq.matchLength - endPosInSequence, idx, endPosInSequence); if (endPosInSequence > litLength) { U32 firstHalfMatchLength; litLength = startPosInSequence >= litLength ? 0 : litLength - startPosInSequence; firstHalfMatchLength = endPosInSequence - startPosInSequence - litLength; if (matchLength > blockSize && firstHalfMatchLength >= cctx->appliedParams.cParams.minMatch) { /* Only ever split the match if it is larger than the block size */ U32 secondHalfMatchLength = currSeq.matchLength + currSeq.litLength - endPosInSequence; if (secondHalfMatchLength < cctx->appliedParams.cParams.minMatch) { /* Move the endPosInSequence backward so that it creates match of minMatch length */ endPosInSequence -= cctx->appliedParams.cParams.minMatch - secondHalfMatchLength; bytesAdjustment = cctx->appliedParams.cParams.minMatch - secondHalfMatchLength; firstHalfMatchLength -= bytesAdjustment; } matchLength = firstHalfMatchLength; /* Flag that we split the last match - after storing the sequence, exit the loop, but keep the value of endPosInSequence */ finalMatchSplit = 1; } else { /* Move the position in sequence backwards so that we don't split match, and break to store * the last literals. We use the original currSeq.litLength as a marker for where endPosInSequence * should go. We prefer to do this whenever it is not necessary to split the match, or if doing so * would cause the first half of the match to be too small */ bytesAdjustment = endPosInSequence - currSeq.litLength; endPosInSequence = currSeq.litLength; break; } } else { /* This sequence ends inside the literals, break to store the last literals */ break; } } /* Check if this offset can be represented with a repcode */ { U32 ll0 = (litLength == 0); offCode = ZSTD_finalizeOffCode(rawOffset, updatedRepcodes.rep, ll0); updatedRepcodes = ZSTD_updateRep(updatedRepcodes.rep, offCode, ll0); } if (cctx->appliedParams.validateSequences) { seqPos->posInSrc += litLength + matchLength; FORWARD_IF_ERROR(ZSTD_validateSequence(offCode, matchLength, seqPos->posInSrc, cctx->appliedParams.cParams.windowLog, dictSize, cctx->appliedParams.cParams.minMatch), "Sequence validation failed"); } DEBUGLOG(6, "Storing sequence: (of: %u, ml: %u, ll: %u)", offCode, matchLength, litLength); RETURN_ERROR_IF(idx - seqPos->idx > cctx->seqStore.maxNbSeq, memory_allocation, "Not enough memory allocated. Try adjusting ZSTD_c_minMatch."); ZSTD_storeSeq(&cctx->seqStore, litLength, ip, iend, offCode, matchLength - MINMATCH); ip += matchLength + litLength; } DEBUGLOG(5, "Ending seq: idx: %u (of: %u ml: %u ll: %u)", idx, inSeqs[idx].offset, inSeqs[idx].matchLength, inSeqs[idx].litLength); assert(idx == inSeqsSize || endPosInSequence <= inSeqs[idx].litLength + inSeqs[idx].matchLength); seqPos->idx = idx; seqPos->posInSequence = endPosInSequence; ZSTD_memcpy(cctx->blockState.nextCBlock->rep, updatedRepcodes.rep, sizeof(repcodes_t)); iend -= bytesAdjustment; if (ip != iend) { /* Store any last literals */ U32 lastLLSize = (U32)(iend - ip); assert(ip <= iend); DEBUGLOG(6, "Storing last literals of size: %u", lastLLSize); ZSTD_storeLastLiterals(&cctx->seqStore, ip, lastLLSize); seqPos->posInSrc += lastLLSize; } return bytesAdjustment; } typedef size_t (*ZSTD_sequenceCopier) (ZSTD_CCtx* cctx, ZSTD_sequencePosition* seqPos, const ZSTD_Sequence* const inSeqs, size_t inSeqsSize, const void* src, size_t blockSize); static ZSTD_sequenceCopier ZSTD_selectSequenceCopier(ZSTD_sequenceFormat_e mode) { ZSTD_sequenceCopier sequenceCopier = NULL; assert(ZSTD_cParam_withinBounds(ZSTD_c_blockDelimiters, mode)); if (mode == ZSTD_sf_explicitBlockDelimiters) { return ZSTD_copySequencesToSeqStoreExplicitBlockDelim; } else if (mode == ZSTD_sf_noBlockDelimiters) { return ZSTD_copySequencesToSeqStoreNoBlockDelim; } assert(sequenceCopier != NULL); return sequenceCopier; } /* Compress, block-by-block, all of the sequences given. * * Returns the cumulative size of all compressed blocks (including their headers), otherwise a ZSTD error. */ static size_t ZSTD_compressSequences_internal(ZSTD_CCtx* cctx, void* dst, size_t dstCapacity, const ZSTD_Sequence* inSeqs, size_t inSeqsSize, const void* src, size_t srcSize) { size_t cSize = 0; U32 lastBlock; size_t blockSize; size_t compressedSeqsSize; size_t remaining = srcSize; ZSTD_sequencePosition seqPos = {0, 0, 0}; BYTE const* ip = (BYTE const*)src; BYTE* op = (BYTE*)dst; ZSTD_sequenceCopier sequenceCopier = ZSTD_selectSequenceCopier(cctx->appliedParams.blockDelimiters); DEBUGLOG(4, "ZSTD_compressSequences_internal srcSize: %zu, inSeqsSize: %zu", srcSize, inSeqsSize); /* Special case: empty frame */ if (remaining == 0) { U32 const cBlockHeader24 = 1 /* last block */ + (((U32)bt_raw)<<1); RETURN_ERROR_IF(dstCapacity<4, dstSize_tooSmall, "No room for empty frame block header"); MEM_writeLE32(op, cBlockHeader24); op += ZSTD_blockHeaderSize; dstCapacity -= ZSTD_blockHeaderSize; cSize += ZSTD_blockHeaderSize; } while (remaining) { size_t cBlockSize; size_t additionalByteAdjustment; lastBlock = remaining <= cctx->blockSize; blockSize = lastBlock ? (U32)remaining : (U32)cctx->blockSize; ZSTD_resetSeqStore(&cctx->seqStore); DEBUGLOG(4, "Working on new block. Blocksize: %zu", blockSize); additionalByteAdjustment = sequenceCopier(cctx, &seqPos, inSeqs, inSeqsSize, ip, blockSize); FORWARD_IF_ERROR(additionalByteAdjustment, "Bad sequence copy"); blockSize -= additionalByteAdjustment; /* If blocks are too small, emit as a nocompress block */ if (blockSize < MIN_CBLOCK_SIZE+ZSTD_blockHeaderSize+1) { cBlockSize = ZSTD_noCompressBlock(op, dstCapacity, ip, blockSize, lastBlock); FORWARD_IF_ERROR(cBlockSize, "Nocompress block failed"); DEBUGLOG(4, "Block too small, writing out nocompress block: cSize: %zu", cBlockSize); cSize += cBlockSize; ip += blockSize; op += cBlockSize; remaining -= blockSize; dstCapacity -= cBlockSize; continue; } compressedSeqsSize = ZSTD_entropyCompressSequences(&cctx->seqStore, &cctx->blockState.prevCBlock->entropy, &cctx->blockState.nextCBlock->entropy, &cctx->appliedParams, op + ZSTD_blockHeaderSize /* Leave space for block header */, dstCapacity - ZSTD_blockHeaderSize, blockSize, cctx->entropyWorkspace, ENTROPY_WORKSPACE_SIZE /* statically allocated in resetCCtx */, cctx->bmi2); FORWARD_IF_ERROR(compressedSeqsSize, "Compressing sequences of block failed"); DEBUGLOG(4, "Compressed sequences size: %zu", compressedSeqsSize); if (!cctx->isFirstBlock && ZSTD_maybeRLE(&cctx->seqStore) && ZSTD_isRLE((BYTE const*)src, srcSize)) { /* We don't want to emit our first block as a RLE even if it qualifies because * doing so will cause the decoder (cli only) to throw a "should consume all input error." * This is only an issue for zstd <= v1.4.3 */ compressedSeqsSize = 1; } if (compressedSeqsSize == 0) { /* ZSTD_noCompressBlock writes the block header as well */ cBlockSize = ZSTD_noCompressBlock(op, dstCapacity, ip, blockSize, lastBlock); FORWARD_IF_ERROR(cBlockSize, "Nocompress block failed"); DEBUGLOG(4, "Writing out nocompress block, size: %zu", cBlockSize); } else if (compressedSeqsSize == 1) { cBlockSize = ZSTD_rleCompressBlock(op, dstCapacity, *ip, blockSize, lastBlock); FORWARD_IF_ERROR(cBlockSize, "RLE compress block failed"); DEBUGLOG(4, "Writing out RLE block, size: %zu", cBlockSize); } else { U32 cBlockHeader; /* Error checking and repcodes update */ ZSTD_confirmRepcodesAndEntropyTables(cctx); if (cctx->blockState.prevCBlock->entropy.fse.offcode_repeatMode == FSE_repeat_valid) cctx->blockState.prevCBlock->entropy.fse.offcode_repeatMode = FSE_repeat_check; /* Write block header into beginning of block*/ cBlockHeader = lastBlock + (((U32)bt_compressed)<<1) + (U32)(compressedSeqsSize << 3); MEM_writeLE24(op, cBlockHeader); cBlockSize = ZSTD_blockHeaderSize + compressedSeqsSize; DEBUGLOG(4, "Writing out compressed block, size: %zu", cBlockSize); } cSize += cBlockSize; DEBUGLOG(4, "cSize running total: %zu", cSize); if (lastBlock) { break; } else { ip += blockSize; op += cBlockSize; remaining -= blockSize; dstCapacity -= cBlockSize; cctx->isFirstBlock = 0; } } return cSize; } size_t ZSTD_compressSequences(ZSTD_CCtx* const cctx, void* dst, size_t dstCapacity, const ZSTD_Sequence* inSeqs, size_t inSeqsSize, const void* src, size_t srcSize) { BYTE* op = (BYTE*)dst; size_t cSize = 0; size_t compressedBlocksSize = 0; size_t frameHeaderSize = 0; /* Transparent initialization stage, same as compressStream2() */ DEBUGLOG(3, "ZSTD_compressSequences()"); assert(cctx != NULL); FORWARD_IF_ERROR(ZSTD_CCtx_init_compressStream2(cctx, ZSTD_e_end, srcSize), "CCtx initialization failed"); /* Begin writing output, starting with frame header */ frameHeaderSize = ZSTD_writeFrameHeader(op, dstCapacity, &cctx->appliedParams, srcSize, cctx->dictID); op += frameHeaderSize; dstCapacity -= frameHeaderSize; cSize += frameHeaderSize; if (cctx->appliedParams.fParams.checksumFlag && srcSize) { XXH64_update(&cctx->xxhState, src, srcSize); } /* cSize includes block header size and compressed sequences size */ compressedBlocksSize = ZSTD_compressSequences_internal(cctx, op, dstCapacity, inSeqs, inSeqsSize, src, srcSize); FORWARD_IF_ERROR(compressedBlocksSize, "Compressing blocks failed!"); cSize += compressedBlocksSize; dstCapacity -= compressedBlocksSize; if (cctx->appliedParams.fParams.checksumFlag) { U32 const checksum = (U32) XXH64_digest(&cctx->xxhState); RETURN_ERROR_IF(dstCapacity<4, dstSize_tooSmall, "no room for checksum"); DEBUGLOG(4, "Write checksum : %08X", (unsigned)checksum); MEM_writeLE32((char*)dst + cSize, checksum); cSize += 4; } DEBUGLOG(3, "Final compressed size: %zu", cSize); return cSize; } /*====== Finalize ======*/ /*! ZSTD_flushStream() : * @return : amount of data remaining to flush */ size_t ZSTD_flushStream(ZSTD_CStream* zcs, ZSTD_outBuffer* output) { ZSTD_inBuffer input = { NULL, 0, 0 }; return ZSTD_compressStream2(zcs, output, &input, ZSTD_e_flush); } size_t ZSTD_endStream(ZSTD_CStream* zcs, ZSTD_outBuffer* output) { ZSTD_inBuffer input = { NULL, 0, 0 }; size_t const remainingToFlush = ZSTD_compressStream2(zcs, output, &input, ZSTD_e_end); FORWARD_IF_ERROR( remainingToFlush , "ZSTD_compressStream2 failed"); if (zcs->appliedParams.nbWorkers > 0) return remainingToFlush; /* minimal estimation */ /* single thread mode : attempt to calculate remaining to flush more precisely */ { size_t const lastBlockSize = zcs->frameEnded ? 0 : ZSTD_BLOCKHEADERSIZE; size_t const checksumSize = (size_t)(zcs->frameEnded ? 0 : zcs->appliedParams.fParams.checksumFlag * 4); size_t const toFlush = remainingToFlush + lastBlockSize + checksumSize; DEBUGLOG(4, "ZSTD_endStream : remaining to flush : %u", (unsigned)toFlush); return toFlush; } } /*-===== Pre-defined compression levels =====-*/ #define ZSTD_MAX_CLEVEL 22 int ZSTD_maxCLevel(void) { return ZSTD_MAX_CLEVEL; } int ZSTD_minCLevel(void) { return (int)-ZSTD_TARGETLENGTH_MAX; } static const ZSTD_compressionParameters ZSTD_defaultCParameters[4][ZSTD_MAX_CLEVEL+1] = { { /* "default" - for any srcSize > 256 KB */ /* W, C, H, S, L, TL, strat */ { 19, 12, 13, 1, 6, 1, ZSTD_fast }, /* base for negative levels */ { 19, 13, 14, 1, 7, 0, ZSTD_fast }, /* level 1 */ { 20, 15, 16, 1, 6, 0, ZSTD_fast }, /* level 2 */ { 21, 16, 17, 1, 5, 0, ZSTD_dfast }, /* level 3 */ { 21, 18, 18, 1, 5, 0, ZSTD_dfast }, /* level 4 */ { 21, 18, 19, 2, 5, 2, ZSTD_greedy }, /* level 5 */ { 21, 19, 19, 3, 5, 4, ZSTD_greedy }, /* level 6 */ { 21, 19, 19, 3, 5, 8, ZSTD_lazy }, /* level 7 */ { 21, 19, 19, 3, 5, 16, ZSTD_lazy2 }, /* level 8 */ { 21, 19, 20, 4, 5, 16, ZSTD_lazy2 }, /* level 9 */ { 22, 20, 21, 4, 5, 16, ZSTD_lazy2 }, /* level 10 */ { 22, 21, 22, 4, 5, 16, ZSTD_lazy2 }, /* level 11 */ { 22, 21, 22, 5, 5, 16, ZSTD_lazy2 }, /* level 12 */ { 22, 21, 22, 5, 5, 32, ZSTD_btlazy2 }, /* level 13 */ { 22, 22, 23, 5, 5, 32, ZSTD_btlazy2 }, /* level 14 */ { 22, 23, 23, 6, 5, 32, ZSTD_btlazy2 }, /* level 15 */ { 22, 22, 22, 5, 5, 48, ZSTD_btopt }, /* level 16 */ { 23, 23, 22, 5, 4, 64, ZSTD_btopt }, /* level 17 */ { 23, 23, 22, 6, 3, 64, ZSTD_btultra }, /* level 18 */ { 23, 24, 22, 7, 3,256, ZSTD_btultra2}, /* level 19 */ { 25, 25, 23, 7, 3,256, ZSTD_btultra2}, /* level 20 */ { 26, 26, 24, 7, 3,512, ZSTD_btultra2}, /* level 21 */ { 27, 27, 25, 9, 3,999, ZSTD_btultra2}, /* level 22 */ }, { /* for srcSize <= 256 KB */ /* W, C, H, S, L, T, strat */ { 18, 12, 13, 1, 5, 1, ZSTD_fast }, /* base for negative levels */ { 18, 13, 14, 1, 6, 0, ZSTD_fast }, /* level 1 */ { 18, 14, 14, 1, 5, 0, ZSTD_dfast }, /* level 2 */ { 18, 16, 16, 1, 4, 0, ZSTD_dfast }, /* level 3 */ { 18, 16, 17, 2, 5, 2, ZSTD_greedy }, /* level 4.*/ { 18, 18, 18, 3, 5, 2, ZSTD_greedy }, /* level 5.*/ { 18, 18, 19, 3, 5, 4, ZSTD_lazy }, /* level 6.*/ { 18, 18, 19, 4, 4, 4, ZSTD_lazy }, /* level 7 */ { 18, 18, 19, 4, 4, 8, ZSTD_lazy2 }, /* level 8 */ { 18, 18, 19, 5, 4, 8, ZSTD_lazy2 }, /* level 9 */ { 18, 18, 19, 6, 4, 8, ZSTD_lazy2 }, /* level 10 */ { 18, 18, 19, 5, 4, 12, ZSTD_btlazy2 }, /* level 11.*/ { 18, 19, 19, 7, 4, 12, ZSTD_btlazy2 }, /* level 12.*/ { 18, 18, 19, 4, 4, 16, ZSTD_btopt }, /* level 13 */ { 18, 18, 19, 4, 3, 32, ZSTD_btopt }, /* level 14.*/ { 18, 18, 19, 6, 3,128, ZSTD_btopt }, /* level 15.*/ { 18, 19, 19, 6, 3,128, ZSTD_btultra }, /* level 16.*/ { 18, 19, 19, 8, 3,256, ZSTD_btultra }, /* level 17.*/ { 18, 19, 19, 6, 3,128, ZSTD_btultra2}, /* level 18.*/ { 18, 19, 19, 8, 3,256, ZSTD_btultra2}, /* level 19.*/ { 18, 19, 19, 10, 3,512, ZSTD_btultra2}, /* level 20.*/ { 18, 19, 19, 12, 3,512, ZSTD_btultra2}, /* level 21.*/ { 18, 19, 19, 13, 3,999, ZSTD_btultra2}, /* level 22.*/ }, { /* for srcSize <= 128 KB */ /* W, C, H, S, L, T, strat */ { 17, 12, 12, 1, 5, 1, ZSTD_fast }, /* base for negative levels */ { 17, 12, 13, 1, 6, 0, ZSTD_fast }, /* level 1 */ { 17, 13, 15, 1, 5, 0, ZSTD_fast }, /* level 2 */ { 17, 15, 16, 2, 5, 0, ZSTD_dfast }, /* level 3 */ { 17, 17, 17, 2, 4, 0, ZSTD_dfast }, /* level 4 */ { 17, 16, 17, 3, 4, 2, ZSTD_greedy }, /* level 5 */ { 17, 17, 17, 3, 4, 4, ZSTD_lazy }, /* level 6 */ { 17, 17, 17, 3, 4, 8, ZSTD_lazy2 }, /* level 7 */ { 17, 17, 17, 4, 4, 8, ZSTD_lazy2 }, /* level 8 */ { 17, 17, 17, 5, 4, 8, ZSTD_lazy2 }, /* level 9 */ { 17, 17, 17, 6, 4, 8, ZSTD_lazy2 }, /* level 10 */ { 17, 17, 17, 5, 4, 8, ZSTD_btlazy2 }, /* level 11 */ { 17, 18, 17, 7, 4, 12, ZSTD_btlazy2 }, /* level 12 */ { 17, 18, 17, 3, 4, 12, ZSTD_btopt }, /* level 13.*/ { 17, 18, 17, 4, 3, 32, ZSTD_btopt }, /* level 14.*/ { 17, 18, 17, 6, 3,256, ZSTD_btopt }, /* level 15.*/ { 17, 18, 17, 6, 3,128, ZSTD_btultra }, /* level 16.*/ { 17, 18, 17, 8, 3,256, ZSTD_btultra }, /* level 17.*/ { 17, 18, 17, 10, 3,512, ZSTD_btultra }, /* level 18.*/ { 17, 18, 17, 5, 3,256, ZSTD_btultra2}, /* level 19.*/ { 17, 18, 17, 7, 3,512, ZSTD_btultra2}, /* level 20.*/ { 17, 18, 17, 9, 3,512, ZSTD_btultra2}, /* level 21.*/ { 17, 18, 17, 11, 3,999, ZSTD_btultra2}, /* level 22.*/ }, { /* for srcSize <= 16 KB */ /* W, C, H, S, L, T, strat */ { 14, 12, 13, 1, 5, 1, ZSTD_fast }, /* base for negative levels */ { 14, 14, 15, 1, 5, 0, ZSTD_fast }, /* level 1 */ { 14, 14, 15, 1, 4, 0, ZSTD_fast }, /* level 2 */ { 14, 14, 15, 2, 4, 0, ZSTD_dfast }, /* level 3 */ { 14, 14, 14, 4, 4, 2, ZSTD_greedy }, /* level 4 */ { 14, 14, 14, 3, 4, 4, ZSTD_lazy }, /* level 5.*/ { 14, 14, 14, 4, 4, 8, ZSTD_lazy2 }, /* level 6 */ { 14, 14, 14, 6, 4, 8, ZSTD_lazy2 }, /* level 7 */ { 14, 14, 14, 8, 4, 8, ZSTD_lazy2 }, /* level 8.*/ { 14, 15, 14, 5, 4, 8, ZSTD_btlazy2 }, /* level 9.*/ { 14, 15, 14, 9, 4, 8, ZSTD_btlazy2 }, /* level 10.*/ { 14, 15, 14, 3, 4, 12, ZSTD_btopt }, /* level 11.*/ { 14, 15, 14, 4, 3, 24, ZSTD_btopt }, /* level 12.*/ { 14, 15, 14, 5, 3, 32, ZSTD_btultra }, /* level 13.*/ { 14, 15, 15, 6, 3, 64, ZSTD_btultra }, /* level 14.*/ { 14, 15, 15, 7, 3,256, ZSTD_btultra }, /* level 15.*/ { 14, 15, 15, 5, 3, 48, ZSTD_btultra2}, /* level 16.*/ { 14, 15, 15, 6, 3,128, ZSTD_btultra2}, /* level 17.*/ { 14, 15, 15, 7, 3,256, ZSTD_btultra2}, /* level 18.*/ { 14, 15, 15, 8, 3,256, ZSTD_btultra2}, /* level 19.*/ { 14, 15, 15, 8, 3,512, ZSTD_btultra2}, /* level 20.*/ { 14, 15, 15, 9, 3,512, ZSTD_btultra2}, /* level 21.*/ { 14, 15, 15, 10, 3,999, ZSTD_btultra2}, /* level 22.*/ }, }; static ZSTD_compressionParameters ZSTD_dedicatedDictSearch_getCParams(int const compressionLevel, size_t const dictSize) { ZSTD_compressionParameters cParams = ZSTD_getCParams_internal(compressionLevel, 0, dictSize, ZSTD_cpm_createCDict); switch (cParams.strategy) { case ZSTD_fast: case ZSTD_dfast: break; case ZSTD_greedy: case ZSTD_lazy: case ZSTD_lazy2: cParams.hashLog += ZSTD_LAZY_DDSS_BUCKET_LOG; break; case ZSTD_btlazy2: case ZSTD_btopt: case ZSTD_btultra: case ZSTD_btultra2: break; } return cParams; } static int ZSTD_dedicatedDictSearch_isSupported( ZSTD_compressionParameters const* cParams) { return (cParams->strategy >= ZSTD_greedy) && (cParams->strategy <= ZSTD_lazy2); } /** * Reverses the adjustment applied to cparams when enabling dedicated dict * search. This is used to recover the params set to be used in the working * context. (Otherwise, those tables would also grow.) */ static void ZSTD_dedicatedDictSearch_revertCParams( ZSTD_compressionParameters* cParams) { switch (cParams->strategy) { case ZSTD_fast: case ZSTD_dfast: break; case ZSTD_greedy: case ZSTD_lazy: case ZSTD_lazy2: cParams->hashLog -= ZSTD_LAZY_DDSS_BUCKET_LOG; break; case ZSTD_btlazy2: case ZSTD_btopt: case ZSTD_btultra: case ZSTD_btultra2: break; } } static U64 ZSTD_getCParamRowSize(U64 srcSizeHint, size_t dictSize, ZSTD_cParamMode_e mode) { switch (mode) { case ZSTD_cpm_unknown: case ZSTD_cpm_noAttachDict: case ZSTD_cpm_createCDict: break; case ZSTD_cpm_attachDict: dictSize = 0; break; default: assert(0); break; } { int const unknown = srcSizeHint == ZSTD_CONTENTSIZE_UNKNOWN; size_t const addedSize = unknown && dictSize > 0 ? 500 : 0; return unknown && dictSize == 0 ? ZSTD_CONTENTSIZE_UNKNOWN : srcSizeHint+dictSize+addedSize; } } /*! ZSTD_getCParams_internal() : * @return ZSTD_compressionParameters structure for a selected compression level, srcSize and dictSize. * Note: srcSizeHint 0 means 0, use ZSTD_CONTENTSIZE_UNKNOWN for unknown. * Use dictSize == 0 for unknown or unused. * Note: `mode` controls how we treat the `dictSize`. See docs for `ZSTD_cParamMode_e`. */ static ZSTD_compressionParameters ZSTD_getCParams_internal(int compressionLevel, unsigned long long srcSizeHint, size_t dictSize, ZSTD_cParamMode_e mode) { U64 const rSize = ZSTD_getCParamRowSize(srcSizeHint, dictSize, mode); U32 const tableID = (rSize <= 256 KB) + (rSize <= 128 KB) + (rSize <= 16 KB); int row; DEBUGLOG(5, "ZSTD_getCParams_internal (cLevel=%i)", compressionLevel); /* row */ if (compressionLevel == 0) row = ZSTD_CLEVEL_DEFAULT; /* 0 == default */ else if (compressionLevel < 0) row = 0; /* entry 0 is baseline for fast mode */ else if (compressionLevel > ZSTD_MAX_CLEVEL) row = ZSTD_MAX_CLEVEL; else row = compressionLevel; { ZSTD_compressionParameters cp = ZSTD_defaultCParameters[tableID][row]; /* acceleration factor */ if (compressionLevel < 0) { int const clampedCompressionLevel = MAX(ZSTD_minCLevel(), compressionLevel); cp.targetLength = (unsigned)(-clampedCompressionLevel); } /* refine parameters based on srcSize & dictSize */ return ZSTD_adjustCParams_internal(cp, srcSizeHint, dictSize, mode); } } /*! ZSTD_getCParams() : * @return ZSTD_compressionParameters structure for a selected compression level, srcSize and dictSize. * Size values are optional, provide 0 if not known or unused */ ZSTD_compressionParameters ZSTD_getCParams(int compressionLevel, unsigned long long srcSizeHint, size_t dictSize) { if (srcSizeHint == 0) srcSizeHint = ZSTD_CONTENTSIZE_UNKNOWN; return ZSTD_getCParams_internal(compressionLevel, srcSizeHint, dictSize, ZSTD_cpm_unknown); } /*! ZSTD_getParams() : * same idea as ZSTD_getCParams() * @return a `ZSTD_parameters` structure (instead of `ZSTD_compressionParameters`). * Fields of `ZSTD_frameParameters` are set to default values */ static ZSTD_parameters ZSTD_getParams_internal(int compressionLevel, unsigned long long srcSizeHint, size_t dictSize, ZSTD_cParamMode_e mode) { ZSTD_parameters params; ZSTD_compressionParameters const cParams = ZSTD_getCParams_internal(compressionLevel, srcSizeHint, dictSize, mode); DEBUGLOG(5, "ZSTD_getParams (cLevel=%i)", compressionLevel); ZSTD_memset(¶ms, 0, sizeof(params)); params.cParams = cParams; params.fParams.contentSizeFlag = 1; return params; } /*! ZSTD_getParams() : * same idea as ZSTD_getCParams() * @return a `ZSTD_parameters` structure (instead of `ZSTD_compressionParameters`). * Fields of `ZSTD_frameParameters` are set to default values */ ZSTD_parameters ZSTD_getParams(int compressionLevel, unsigned long long srcSizeHint, size_t dictSize) { if (srcSizeHint == 0) srcSizeHint = ZSTD_CONTENTSIZE_UNKNOWN; return ZSTD_getParams_internal(compressionLevel, srcSizeHint, dictSize, ZSTD_cpm_unknown); } /**** ended inlining compress/zstd_compress.c ****/ /**** start inlining compress/zstd_double_fast.c ****/ /* * Copyright (c) 2016-2021, Yann Collet, Facebook, Inc. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the * LICENSE file in the root directory of this source tree) and the GPLv2 (found * in the COPYING file in the root directory of this source tree). * You may select, at your option, one of the above-listed licenses. */ /**** skipping file: zstd_compress_internal.h ****/ /**** skipping file: zstd_double_fast.h ****/ void ZSTD_fillDoubleHashTable(ZSTD_matchState_t* ms, void const* end, ZSTD_dictTableLoadMethod_e dtlm) { const ZSTD_compressionParameters* const cParams = &ms->cParams; U32* const hashLarge = ms->hashTable; U32 const hBitsL = cParams->hashLog; U32 const mls = cParams->minMatch; U32* const hashSmall = ms->chainTable; U32 const hBitsS = cParams->chainLog; const BYTE* const base = ms->window.base; const BYTE* ip = base + ms->nextToUpdate; const BYTE* const iend = ((const BYTE*)end) - HASH_READ_SIZE; const U32 fastHashFillStep = 3; /* Always insert every fastHashFillStep position into the hash tables. * Insert the other positions into the large hash table if their entry * is empty. */ for (; ip + fastHashFillStep - 1 <= iend; ip += fastHashFillStep) { U32 const curr = (U32)(ip - base); U32 i; for (i = 0; i < fastHashFillStep; ++i) { size_t const smHash = ZSTD_hashPtr(ip + i, hBitsS, mls); size_t const lgHash = ZSTD_hashPtr(ip + i, hBitsL, 8); if (i == 0) hashSmall[smHash] = curr + i; if (i == 0 || hashLarge[lgHash] == 0) hashLarge[lgHash] = curr + i; /* Only load extra positions for ZSTD_dtlm_full */ if (dtlm == ZSTD_dtlm_fast) break; } } } FORCE_INLINE_TEMPLATE size_t ZSTD_compressBlock_doubleFast_generic( ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], void const* src, size_t srcSize, U32 const mls /* template */, ZSTD_dictMode_e const dictMode) { ZSTD_compressionParameters const* cParams = &ms->cParams; U32* const hashLong = ms->hashTable; const U32 hBitsL = cParams->hashLog; U32* const hashSmall = ms->chainTable; const U32 hBitsS = cParams->chainLog; const BYTE* const base = ms->window.base; const BYTE* const istart = (const BYTE*)src; const BYTE* ip = istart; const BYTE* anchor = istart; const U32 endIndex = (U32)((size_t)(istart - base) + srcSize); /* presumes that, if there is a dictionary, it must be using Attach mode */ const U32 prefixLowestIndex = ZSTD_getLowestPrefixIndex(ms, endIndex, cParams->windowLog); const BYTE* const prefixLowest = base + prefixLowestIndex; const BYTE* const iend = istart + srcSize; const BYTE* const ilimit = iend - HASH_READ_SIZE; U32 offset_1=rep[0], offset_2=rep[1]; U32 offsetSaved = 0; const ZSTD_matchState_t* const dms = ms->dictMatchState; const ZSTD_compressionParameters* const dictCParams = dictMode == ZSTD_dictMatchState ? &dms->cParams : NULL; const U32* const dictHashLong = dictMode == ZSTD_dictMatchState ? dms->hashTable : NULL; const U32* const dictHashSmall = dictMode == ZSTD_dictMatchState ? dms->chainTable : NULL; const U32 dictStartIndex = dictMode == ZSTD_dictMatchState ? dms->window.dictLimit : 0; const BYTE* const dictBase = dictMode == ZSTD_dictMatchState ? dms->window.base : NULL; const BYTE* const dictStart = dictMode == ZSTD_dictMatchState ? dictBase + dictStartIndex : NULL; const BYTE* const dictEnd = dictMode == ZSTD_dictMatchState ? dms->window.nextSrc : NULL; const U32 dictIndexDelta = dictMode == ZSTD_dictMatchState ? prefixLowestIndex - (U32)(dictEnd - dictBase) : 0; const U32 dictHBitsL = dictMode == ZSTD_dictMatchState ? dictCParams->hashLog : hBitsL; const U32 dictHBitsS = dictMode == ZSTD_dictMatchState ? dictCParams->chainLog : hBitsS; const U32 dictAndPrefixLength = (U32)((ip - prefixLowest) + (dictEnd - dictStart)); DEBUGLOG(5, "ZSTD_compressBlock_doubleFast_generic"); assert(dictMode == ZSTD_noDict || dictMode == ZSTD_dictMatchState); /* if a dictionary is attached, it must be within window range */ if (dictMode == ZSTD_dictMatchState) { assert(ms->window.dictLimit + (1U << cParams->windowLog) >= endIndex); } /* init */ ip += (dictAndPrefixLength == 0); if (dictMode == ZSTD_noDict) { U32 const curr = (U32)(ip - base); U32 const windowLow = ZSTD_getLowestPrefixIndex(ms, curr, cParams->windowLog); U32 const maxRep = curr - windowLow; if (offset_2 > maxRep) offsetSaved = offset_2, offset_2 = 0; if (offset_1 > maxRep) offsetSaved = offset_1, offset_1 = 0; } if (dictMode == ZSTD_dictMatchState) { /* dictMatchState repCode checks don't currently handle repCode == 0 * disabling. */ assert(offset_1 <= dictAndPrefixLength); assert(offset_2 <= dictAndPrefixLength); } /* Main Search Loop */ while (ip < ilimit) { /* < instead of <=, because repcode check at (ip+1) */ size_t mLength; U32 offset; size_t const h2 = ZSTD_hashPtr(ip, hBitsL, 8); size_t const h = ZSTD_hashPtr(ip, hBitsS, mls); size_t const dictHL = ZSTD_hashPtr(ip, dictHBitsL, 8); size_t const dictHS = ZSTD_hashPtr(ip, dictHBitsS, mls); U32 const curr = (U32)(ip-base); U32 const matchIndexL = hashLong[h2]; U32 matchIndexS = hashSmall[h]; const BYTE* matchLong = base + matchIndexL; const BYTE* match = base + matchIndexS; const U32 repIndex = curr + 1 - offset_1; const BYTE* repMatch = (dictMode == ZSTD_dictMatchState && repIndex < prefixLowestIndex) ? dictBase + (repIndex - dictIndexDelta) : base + repIndex; hashLong[h2] = hashSmall[h] = curr; /* update hash tables */ /* check dictMatchState repcode */ if (dictMode == ZSTD_dictMatchState && ((U32)((prefixLowestIndex-1) - repIndex) >= 3 /* intentional underflow */) && (MEM_read32(repMatch) == MEM_read32(ip+1)) ) { const BYTE* repMatchEnd = repIndex < prefixLowestIndex ? dictEnd : iend; mLength = ZSTD_count_2segments(ip+1+4, repMatch+4, iend, repMatchEnd, prefixLowest) + 4; ip++; ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, iend, 0, mLength-MINMATCH); goto _match_stored; } /* check noDict repcode */ if ( dictMode == ZSTD_noDict && ((offset_1 > 0) & (MEM_read32(ip+1-offset_1) == MEM_read32(ip+1)))) { mLength = ZSTD_count(ip+1+4, ip+1+4-offset_1, iend) + 4; ip++; ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, iend, 0, mLength-MINMATCH); goto _match_stored; } if (matchIndexL > prefixLowestIndex) { /* check prefix long match */ if (MEM_read64(matchLong) == MEM_read64(ip)) { mLength = ZSTD_count(ip+8, matchLong+8, iend) + 8; offset = (U32)(ip-matchLong); while (((ip>anchor) & (matchLong>prefixLowest)) && (ip[-1] == matchLong[-1])) { ip--; matchLong--; mLength++; } /* catch up */ goto _match_found; } } else if (dictMode == ZSTD_dictMatchState) { /* check dictMatchState long match */ U32 const dictMatchIndexL = dictHashLong[dictHL]; const BYTE* dictMatchL = dictBase + dictMatchIndexL; assert(dictMatchL < dictEnd); if (dictMatchL > dictStart && MEM_read64(dictMatchL) == MEM_read64(ip)) { mLength = ZSTD_count_2segments(ip+8, dictMatchL+8, iend, dictEnd, prefixLowest) + 8; offset = (U32)(curr - dictMatchIndexL - dictIndexDelta); while (((ip>anchor) & (dictMatchL>dictStart)) && (ip[-1] == dictMatchL[-1])) { ip--; dictMatchL--; mLength++; } /* catch up */ goto _match_found; } } if (matchIndexS > prefixLowestIndex) { /* check prefix short match */ if (MEM_read32(match) == MEM_read32(ip)) { goto _search_next_long; } } else if (dictMode == ZSTD_dictMatchState) { /* check dictMatchState short match */ U32 const dictMatchIndexS = dictHashSmall[dictHS]; match = dictBase + dictMatchIndexS; matchIndexS = dictMatchIndexS + dictIndexDelta; if (match > dictStart && MEM_read32(match) == MEM_read32(ip)) { goto _search_next_long; } } ip += ((ip-anchor) >> kSearchStrength) + 1; #if defined(__aarch64__) PREFETCH_L1(ip+256); #endif continue; _search_next_long: { size_t const hl3 = ZSTD_hashPtr(ip+1, hBitsL, 8); size_t const dictHLNext = ZSTD_hashPtr(ip+1, dictHBitsL, 8); U32 const matchIndexL3 = hashLong[hl3]; const BYTE* matchL3 = base + matchIndexL3; hashLong[hl3] = curr + 1; /* check prefix long +1 match */ if (matchIndexL3 > prefixLowestIndex) { if (MEM_read64(matchL3) == MEM_read64(ip+1)) { mLength = ZSTD_count(ip+9, matchL3+8, iend) + 8; ip++; offset = (U32)(ip-matchL3); while (((ip>anchor) & (matchL3>prefixLowest)) && (ip[-1] == matchL3[-1])) { ip--; matchL3--; mLength++; } /* catch up */ goto _match_found; } } else if (dictMode == ZSTD_dictMatchState) { /* check dict long +1 match */ U32 const dictMatchIndexL3 = dictHashLong[dictHLNext]; const BYTE* dictMatchL3 = dictBase + dictMatchIndexL3; assert(dictMatchL3 < dictEnd); if (dictMatchL3 > dictStart && MEM_read64(dictMatchL3) == MEM_read64(ip+1)) { mLength = ZSTD_count_2segments(ip+1+8, dictMatchL3+8, iend, dictEnd, prefixLowest) + 8; ip++; offset = (U32)(curr + 1 - dictMatchIndexL3 - dictIndexDelta); while (((ip>anchor) & (dictMatchL3>dictStart)) && (ip[-1] == dictMatchL3[-1])) { ip--; dictMatchL3--; mLength++; } /* catch up */ goto _match_found; } } } /* if no long +1 match, explore the short match we found */ if (dictMode == ZSTD_dictMatchState && matchIndexS < prefixLowestIndex) { mLength = ZSTD_count_2segments(ip+4, match+4, iend, dictEnd, prefixLowest) + 4; offset = (U32)(curr - matchIndexS); while (((ip>anchor) & (match>dictStart)) && (ip[-1] == match[-1])) { ip--; match--; mLength++; } /* catch up */ } else { mLength = ZSTD_count(ip+4, match+4, iend) + 4; offset = (U32)(ip - match); while (((ip>anchor) & (match>prefixLowest)) && (ip[-1] == match[-1])) { ip--; match--; mLength++; } /* catch up */ } /* fall-through */ _match_found: offset_2 = offset_1; offset_1 = offset; ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, iend, offset + ZSTD_REP_MOVE, mLength-MINMATCH); _match_stored: /* match found */ ip += mLength; anchor = ip; if (ip <= ilimit) { /* Complementary insertion */ /* done after iLimit test, as candidates could be > iend-8 */ { U32 const indexToInsert = curr+2; hashLong[ZSTD_hashPtr(base+indexToInsert, hBitsL, 8)] = indexToInsert; hashLong[ZSTD_hashPtr(ip-2, hBitsL, 8)] = (U32)(ip-2-base); hashSmall[ZSTD_hashPtr(base+indexToInsert, hBitsS, mls)] = indexToInsert; hashSmall[ZSTD_hashPtr(ip-1, hBitsS, mls)] = (U32)(ip-1-base); } /* check immediate repcode */ if (dictMode == ZSTD_dictMatchState) { while (ip <= ilimit) { U32 const current2 = (U32)(ip-base); U32 const repIndex2 = current2 - offset_2; const BYTE* repMatch2 = dictMode == ZSTD_dictMatchState && repIndex2 < prefixLowestIndex ? dictBase + repIndex2 - dictIndexDelta : base + repIndex2; if ( ((U32)((prefixLowestIndex-1) - (U32)repIndex2) >= 3 /* intentional overflow */) && (MEM_read32(repMatch2) == MEM_read32(ip)) ) { const BYTE* const repEnd2 = repIndex2 < prefixLowestIndex ? dictEnd : iend; size_t const repLength2 = ZSTD_count_2segments(ip+4, repMatch2+4, iend, repEnd2, prefixLowest) + 4; U32 tmpOffset = offset_2; offset_2 = offset_1; offset_1 = tmpOffset; /* swap offset_2 <=> offset_1 */ ZSTD_storeSeq(seqStore, 0, anchor, iend, 0, repLength2-MINMATCH); hashSmall[ZSTD_hashPtr(ip, hBitsS, mls)] = current2; hashLong[ZSTD_hashPtr(ip, hBitsL, 8)] = current2; ip += repLength2; anchor = ip; continue; } break; } } if (dictMode == ZSTD_noDict) { while ( (ip <= ilimit) && ( (offset_2>0) & (MEM_read32(ip) == MEM_read32(ip - offset_2)) )) { /* store sequence */ size_t const rLength = ZSTD_count(ip+4, ip+4-offset_2, iend) + 4; U32 const tmpOff = offset_2; offset_2 = offset_1; offset_1 = tmpOff; /* swap offset_2 <=> offset_1 */ hashSmall[ZSTD_hashPtr(ip, hBitsS, mls)] = (U32)(ip-base); hashLong[ZSTD_hashPtr(ip, hBitsL, 8)] = (U32)(ip-base); ZSTD_storeSeq(seqStore, 0, anchor, iend, 0, rLength-MINMATCH); ip += rLength; anchor = ip; continue; /* faster when present ... (?) */ } } } } /* while (ip < ilimit) */ /* save reps for next block */ rep[0] = offset_1 ? offset_1 : offsetSaved; rep[1] = offset_2 ? offset_2 : offsetSaved; /* Return the last literals size */ return (size_t)(iend - anchor); } size_t ZSTD_compressBlock_doubleFast( ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], void const* src, size_t srcSize) { const U32 mls = ms->cParams.minMatch; switch(mls) { default: /* includes case 3 */ case 4 : return ZSTD_compressBlock_doubleFast_generic(ms, seqStore, rep, src, srcSize, 4, ZSTD_noDict); case 5 : return ZSTD_compressBlock_doubleFast_generic(ms, seqStore, rep, src, srcSize, 5, ZSTD_noDict); case 6 : return ZSTD_compressBlock_doubleFast_generic(ms, seqStore, rep, src, srcSize, 6, ZSTD_noDict); case 7 : return ZSTD_compressBlock_doubleFast_generic(ms, seqStore, rep, src, srcSize, 7, ZSTD_noDict); } } size_t ZSTD_compressBlock_doubleFast_dictMatchState( ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], void const* src, size_t srcSize) { const U32 mls = ms->cParams.minMatch; switch(mls) { default: /* includes case 3 */ case 4 : return ZSTD_compressBlock_doubleFast_generic(ms, seqStore, rep, src, srcSize, 4, ZSTD_dictMatchState); case 5 : return ZSTD_compressBlock_doubleFast_generic(ms, seqStore, rep, src, srcSize, 5, ZSTD_dictMatchState); case 6 : return ZSTD_compressBlock_doubleFast_generic(ms, seqStore, rep, src, srcSize, 6, ZSTD_dictMatchState); case 7 : return ZSTD_compressBlock_doubleFast_generic(ms, seqStore, rep, src, srcSize, 7, ZSTD_dictMatchState); } } static size_t ZSTD_compressBlock_doubleFast_extDict_generic( ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], void const* src, size_t srcSize, U32 const mls /* template */) { ZSTD_compressionParameters const* cParams = &ms->cParams; U32* const hashLong = ms->hashTable; U32 const hBitsL = cParams->hashLog; U32* const hashSmall = ms->chainTable; U32 const hBitsS = cParams->chainLog; const BYTE* const istart = (const BYTE*)src; const BYTE* ip = istart; const BYTE* anchor = istart; const BYTE* const iend = istart + srcSize; const BYTE* const ilimit = iend - 8; const BYTE* const base = ms->window.base; const U32 endIndex = (U32)((size_t)(istart - base) + srcSize); const U32 lowLimit = ZSTD_getLowestMatchIndex(ms, endIndex, cParams->windowLog); const U32 dictStartIndex = lowLimit; const U32 dictLimit = ms->window.dictLimit; const U32 prefixStartIndex = (dictLimit > lowLimit) ? dictLimit : lowLimit; const BYTE* const prefixStart = base + prefixStartIndex; const BYTE* const dictBase = ms->window.dictBase; const BYTE* const dictStart = dictBase + dictStartIndex; const BYTE* const dictEnd = dictBase + prefixStartIndex; U32 offset_1=rep[0], offset_2=rep[1]; DEBUGLOG(5, "ZSTD_compressBlock_doubleFast_extDict_generic (srcSize=%zu)", srcSize); /* if extDict is invalidated due to maxDistance, switch to "regular" variant */ if (prefixStartIndex == dictStartIndex) return ZSTD_compressBlock_doubleFast_generic(ms, seqStore, rep, src, srcSize, mls, ZSTD_noDict); /* Search Loop */ while (ip < ilimit) { /* < instead of <=, because (ip+1) */ const size_t hSmall = ZSTD_hashPtr(ip, hBitsS, mls); const U32 matchIndex = hashSmall[hSmall]; const BYTE* const matchBase = matchIndex < prefixStartIndex ? dictBase : base; const BYTE* match = matchBase + matchIndex; const size_t hLong = ZSTD_hashPtr(ip, hBitsL, 8); const U32 matchLongIndex = hashLong[hLong]; const BYTE* const matchLongBase = matchLongIndex < prefixStartIndex ? dictBase : base; const BYTE* matchLong = matchLongBase + matchLongIndex; const U32 curr = (U32)(ip-base); const U32 repIndex = curr + 1 - offset_1; /* offset_1 expected <= curr +1 */ const BYTE* const repBase = repIndex < prefixStartIndex ? dictBase : base; const BYTE* const repMatch = repBase + repIndex; size_t mLength; hashSmall[hSmall] = hashLong[hLong] = curr; /* update hash table */ if ((((U32)((prefixStartIndex-1) - repIndex) >= 3) /* intentional underflow : ensure repIndex doesn't overlap dict + prefix */ & (repIndex > dictStartIndex)) && (MEM_read32(repMatch) == MEM_read32(ip+1)) ) { const BYTE* repMatchEnd = repIndex < prefixStartIndex ? dictEnd : iend; mLength = ZSTD_count_2segments(ip+1+4, repMatch+4, iend, repMatchEnd, prefixStart) + 4; ip++; ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, iend, 0, mLength-MINMATCH); } else { if ((matchLongIndex > dictStartIndex) && (MEM_read64(matchLong) == MEM_read64(ip))) { const BYTE* const matchEnd = matchLongIndex < prefixStartIndex ? dictEnd : iend; const BYTE* const lowMatchPtr = matchLongIndex < prefixStartIndex ? dictStart : prefixStart; U32 offset; mLength = ZSTD_count_2segments(ip+8, matchLong+8, iend, matchEnd, prefixStart) + 8; offset = curr - matchLongIndex; while (((ip>anchor) & (matchLong>lowMatchPtr)) && (ip[-1] == matchLong[-1])) { ip--; matchLong--; mLength++; } /* catch up */ offset_2 = offset_1; offset_1 = offset; ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, iend, offset + ZSTD_REP_MOVE, mLength-MINMATCH); } else if ((matchIndex > dictStartIndex) && (MEM_read32(match) == MEM_read32(ip))) { size_t const h3 = ZSTD_hashPtr(ip+1, hBitsL, 8); U32 const matchIndex3 = hashLong[h3]; const BYTE* const match3Base = matchIndex3 < prefixStartIndex ? dictBase : base; const BYTE* match3 = match3Base + matchIndex3; U32 offset; hashLong[h3] = curr + 1; if ( (matchIndex3 > dictStartIndex) && (MEM_read64(match3) == MEM_read64(ip+1)) ) { const BYTE* const matchEnd = matchIndex3 < prefixStartIndex ? dictEnd : iend; const BYTE* const lowMatchPtr = matchIndex3 < prefixStartIndex ? dictStart : prefixStart; mLength = ZSTD_count_2segments(ip+9, match3+8, iend, matchEnd, prefixStart) + 8; ip++; offset = curr+1 - matchIndex3; while (((ip>anchor) & (match3>lowMatchPtr)) && (ip[-1] == match3[-1])) { ip--; match3--; mLength++; } /* catch up */ } else { const BYTE* const matchEnd = matchIndex < prefixStartIndex ? dictEnd : iend; const BYTE* const lowMatchPtr = matchIndex < prefixStartIndex ? dictStart : prefixStart; mLength = ZSTD_count_2segments(ip+4, match+4, iend, matchEnd, prefixStart) + 4; offset = curr - matchIndex; while (((ip>anchor) & (match>lowMatchPtr)) && (ip[-1] == match[-1])) { ip--; match--; mLength++; } /* catch up */ } offset_2 = offset_1; offset_1 = offset; ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, iend, offset + ZSTD_REP_MOVE, mLength-MINMATCH); } else { ip += ((ip-anchor) >> kSearchStrength) + 1; continue; } } /* move to next sequence start */ ip += mLength; anchor = ip; if (ip <= ilimit) { /* Complementary insertion */ /* done after iLimit test, as candidates could be > iend-8 */ { U32 const indexToInsert = curr+2; hashLong[ZSTD_hashPtr(base+indexToInsert, hBitsL, 8)] = indexToInsert; hashLong[ZSTD_hashPtr(ip-2, hBitsL, 8)] = (U32)(ip-2-base); hashSmall[ZSTD_hashPtr(base+indexToInsert, hBitsS, mls)] = indexToInsert; hashSmall[ZSTD_hashPtr(ip-1, hBitsS, mls)] = (U32)(ip-1-base); } /* check immediate repcode */ while (ip <= ilimit) { U32 const current2 = (U32)(ip-base); U32 const repIndex2 = current2 - offset_2; const BYTE* repMatch2 = repIndex2 < prefixStartIndex ? dictBase + repIndex2 : base + repIndex2; if ( (((U32)((prefixStartIndex-1) - repIndex2) >= 3) /* intentional overflow : ensure repIndex2 doesn't overlap dict + prefix */ & (repIndex2 > dictStartIndex)) && (MEM_read32(repMatch2) == MEM_read32(ip)) ) { const BYTE* const repEnd2 = repIndex2 < prefixStartIndex ? dictEnd : iend; size_t const repLength2 = ZSTD_count_2segments(ip+4, repMatch2+4, iend, repEnd2, prefixStart) + 4; U32 const tmpOffset = offset_2; offset_2 = offset_1; offset_1 = tmpOffset; /* swap offset_2 <=> offset_1 */ ZSTD_storeSeq(seqStore, 0, anchor, iend, 0, repLength2-MINMATCH); hashSmall[ZSTD_hashPtr(ip, hBitsS, mls)] = current2; hashLong[ZSTD_hashPtr(ip, hBitsL, 8)] = current2; ip += repLength2; anchor = ip; continue; } break; } } } /* save reps for next block */ rep[0] = offset_1; rep[1] = offset_2; /* Return the last literals size */ return (size_t)(iend - anchor); } size_t ZSTD_compressBlock_doubleFast_extDict( ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], void const* src, size_t srcSize) { U32 const mls = ms->cParams.minMatch; switch(mls) { default: /* includes case 3 */ case 4 : return ZSTD_compressBlock_doubleFast_extDict_generic(ms, seqStore, rep, src, srcSize, 4); case 5 : return ZSTD_compressBlock_doubleFast_extDict_generic(ms, seqStore, rep, src, srcSize, 5); case 6 : return ZSTD_compressBlock_doubleFast_extDict_generic(ms, seqStore, rep, src, srcSize, 6); case 7 : return ZSTD_compressBlock_doubleFast_extDict_generic(ms, seqStore, rep, src, srcSize, 7); } } /**** ended inlining compress/zstd_double_fast.c ****/ /**** start inlining compress/zstd_fast.c ****/ /* * Copyright (c) 2016-2021, Yann Collet, Facebook, Inc. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the * LICENSE file in the root directory of this source tree) and the GPLv2 (found * in the COPYING file in the root directory of this source tree). * You may select, at your option, one of the above-listed licenses. */ /**** skipping file: zstd_compress_internal.h ****/ /**** skipping file: zstd_fast.h ****/ void ZSTD_fillHashTable(ZSTD_matchState_t* ms, const void* const end, ZSTD_dictTableLoadMethod_e dtlm) { const ZSTD_compressionParameters* const cParams = &ms->cParams; U32* const hashTable = ms->hashTable; U32 const hBits = cParams->hashLog; U32 const mls = cParams->minMatch; const BYTE* const base = ms->window.base; const BYTE* ip = base + ms->nextToUpdate; const BYTE* const iend = ((const BYTE*)end) - HASH_READ_SIZE; const U32 fastHashFillStep = 3; /* Always insert every fastHashFillStep position into the hash table. * Insert the other positions if their hash entry is empty. */ for ( ; ip + fastHashFillStep < iend + 2; ip += fastHashFillStep) { U32 const curr = (U32)(ip - base); size_t const hash0 = ZSTD_hashPtr(ip, hBits, mls); hashTable[hash0] = curr; if (dtlm == ZSTD_dtlm_fast) continue; /* Only load extra positions for ZSTD_dtlm_full */ { U32 p; for (p = 1; p < fastHashFillStep; ++p) { size_t const hash = ZSTD_hashPtr(ip + p, hBits, mls); if (hashTable[hash] == 0) { /* not yet filled */ hashTable[hash] = curr + p; } } } } } FORCE_INLINE_TEMPLATE size_t ZSTD_compressBlock_fast_generic( ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], void const* src, size_t srcSize, U32 const mls) { const ZSTD_compressionParameters* const cParams = &ms->cParams; U32* const hashTable = ms->hashTable; U32 const hlog = cParams->hashLog; /* support stepSize of 0 */ size_t const stepSize = cParams->targetLength + !(cParams->targetLength) + 1; const BYTE* const base = ms->window.base; const BYTE* const istart = (const BYTE*)src; /* We check ip0 (ip + 0) and ip1 (ip + 1) each loop */ const BYTE* ip0 = istart; const BYTE* ip1; const BYTE* anchor = istart; const U32 endIndex = (U32)((size_t)(istart - base) + srcSize); const U32 prefixStartIndex = ZSTD_getLowestPrefixIndex(ms, endIndex, cParams->windowLog); const BYTE* const prefixStart = base + prefixStartIndex; const BYTE* const iend = istart + srcSize; const BYTE* const ilimit = iend - HASH_READ_SIZE; U32 offset_1=rep[0], offset_2=rep[1]; U32 offsetSaved = 0; /* init */ DEBUGLOG(5, "ZSTD_compressBlock_fast_generic"); ip0 += (ip0 == prefixStart); ip1 = ip0 + 1; { U32 const curr = (U32)(ip0 - base); U32 const windowLow = ZSTD_getLowestPrefixIndex(ms, curr, cParams->windowLog); U32 const maxRep = curr - windowLow; if (offset_2 > maxRep) offsetSaved = offset_2, offset_2 = 0; if (offset_1 > maxRep) offsetSaved = offset_1, offset_1 = 0; } /* Main Search Loop */ #ifdef __INTEL_COMPILER /* From intel 'The vector pragma indicates that the loop should be * vectorized if it is legal to do so'. Can be used together with * #pragma ivdep (but have opted to exclude that because intel * warns against using it).*/ #pragma vector always #endif while (ip1 < ilimit) { /* < instead of <=, because check at ip0+2 */ size_t mLength; BYTE const* ip2 = ip0 + 2; size_t const h0 = ZSTD_hashPtr(ip0, hlog, mls); U32 const val0 = MEM_read32(ip0); size_t const h1 = ZSTD_hashPtr(ip1, hlog, mls); U32 const val1 = MEM_read32(ip1); U32 const current0 = (U32)(ip0-base); U32 const current1 = (U32)(ip1-base); U32 const matchIndex0 = hashTable[h0]; U32 const matchIndex1 = hashTable[h1]; BYTE const* repMatch = ip2 - offset_1; const BYTE* match0 = base + matchIndex0; const BYTE* match1 = base + matchIndex1; U32 offcode; #if defined(__aarch64__) PREFETCH_L1(ip0+256); #endif hashTable[h0] = current0; /* update hash table */ hashTable[h1] = current1; /* update hash table */ assert(ip0 + 1 == ip1); if ((offset_1 > 0) & (MEM_read32(repMatch) == MEM_read32(ip2))) { mLength = (ip2[-1] == repMatch[-1]) ? 1 : 0; ip0 = ip2 - mLength; match0 = repMatch - mLength; mLength += 4; offcode = 0; goto _match; } if ((matchIndex0 > prefixStartIndex) && MEM_read32(match0) == val0) { /* found a regular match */ goto _offset; } if ((matchIndex1 > prefixStartIndex) && MEM_read32(match1) == val1) { /* found a regular match after one literal */ ip0 = ip1; match0 = match1; goto _offset; } { size_t const step = ((size_t)(ip0-anchor) >> (kSearchStrength - 1)) + stepSize; assert(step >= 2); ip0 += step; ip1 += step; continue; } _offset: /* Requires: ip0, match0 */ /* Compute the offset code */ offset_2 = offset_1; offset_1 = (U32)(ip0-match0); offcode = offset_1 + ZSTD_REP_MOVE; mLength = 4; /* Count the backwards match length */ while (((ip0>anchor) & (match0>prefixStart)) && (ip0[-1] == match0[-1])) { ip0--; match0--; mLength++; } /* catch up */ _match: /* Requires: ip0, match0, offcode */ /* Count the forward length */ mLength += ZSTD_count(ip0+mLength, match0+mLength, iend); ZSTD_storeSeq(seqStore, (size_t)(ip0-anchor), anchor, iend, offcode, mLength-MINMATCH); /* match found */ ip0 += mLength; anchor = ip0; if (ip0 <= ilimit) { /* Fill Table */ assert(base+current0+2 > istart); /* check base overflow */ hashTable[ZSTD_hashPtr(base+current0+2, hlog, mls)] = current0+2; /* here because current+2 could be > iend-8 */ hashTable[ZSTD_hashPtr(ip0-2, hlog, mls)] = (U32)(ip0-2-base); if (offset_2 > 0) { /* offset_2==0 means offset_2 is invalidated */ while ( (ip0 <= ilimit) && (MEM_read32(ip0) == MEM_read32(ip0 - offset_2)) ) { /* store sequence */ size_t const rLength = ZSTD_count(ip0+4, ip0+4-offset_2, iend) + 4; { U32 const tmpOff = offset_2; offset_2 = offset_1; offset_1 = tmpOff; } /* swap offset_2 <=> offset_1 */ hashTable[ZSTD_hashPtr(ip0, hlog, mls)] = (U32)(ip0-base); ip0 += rLength; ZSTD_storeSeq(seqStore, 0 /*litLen*/, anchor, iend, 0 /*offCode*/, rLength-MINMATCH); anchor = ip0; continue; /* faster when present (confirmed on gcc-8) ... (?) */ } } } ip1 = ip0 + 1; } /* save reps for next block */ rep[0] = offset_1 ? offset_1 : offsetSaved; rep[1] = offset_2 ? offset_2 : offsetSaved; /* Return the last literals size */ return (size_t)(iend - anchor); } size_t ZSTD_compressBlock_fast( ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], void const* src, size_t srcSize) { U32 const mls = ms->cParams.minMatch; assert(ms->dictMatchState == NULL); switch(mls) { default: /* includes case 3 */ case 4 : return ZSTD_compressBlock_fast_generic(ms, seqStore, rep, src, srcSize, 4); case 5 : return ZSTD_compressBlock_fast_generic(ms, seqStore, rep, src, srcSize, 5); case 6 : return ZSTD_compressBlock_fast_generic(ms, seqStore, rep, src, srcSize, 6); case 7 : return ZSTD_compressBlock_fast_generic(ms, seqStore, rep, src, srcSize, 7); } } FORCE_INLINE_TEMPLATE size_t ZSTD_compressBlock_fast_dictMatchState_generic( ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], void const* src, size_t srcSize, U32 const mls) { const ZSTD_compressionParameters* const cParams = &ms->cParams; U32* const hashTable = ms->hashTable; U32 const hlog = cParams->hashLog; /* support stepSize of 0 */ U32 const stepSize = cParams->targetLength + !(cParams->targetLength); const BYTE* const base = ms->window.base; const BYTE* const istart = (const BYTE*)src; const BYTE* ip = istart; const BYTE* anchor = istart; const U32 prefixStartIndex = ms->window.dictLimit; const BYTE* const prefixStart = base + prefixStartIndex; const BYTE* const iend = istart + srcSize; const BYTE* const ilimit = iend - HASH_READ_SIZE; U32 offset_1=rep[0], offset_2=rep[1]; U32 offsetSaved = 0; const ZSTD_matchState_t* const dms = ms->dictMatchState; const ZSTD_compressionParameters* const dictCParams = &dms->cParams ; const U32* const dictHashTable = dms->hashTable; const U32 dictStartIndex = dms->window.dictLimit; const BYTE* const dictBase = dms->window.base; const BYTE* const dictStart = dictBase + dictStartIndex; const BYTE* const dictEnd = dms->window.nextSrc; const U32 dictIndexDelta = prefixStartIndex - (U32)(dictEnd - dictBase); const U32 dictAndPrefixLength = (U32)(ip - prefixStart + dictEnd - dictStart); const U32 dictHLog = dictCParams->hashLog; /* if a dictionary is still attached, it necessarily means that * it is within window size. So we just check it. */ const U32 maxDistance = 1U << cParams->windowLog; const U32 endIndex = (U32)((size_t)(ip - base) + srcSize); assert(endIndex - prefixStartIndex <= maxDistance); (void)maxDistance; (void)endIndex; /* these variables are not used when assert() is disabled */ /* ensure there will be no underflow * when translating a dict index into a local index */ assert(prefixStartIndex >= (U32)(dictEnd - dictBase)); /* init */ DEBUGLOG(5, "ZSTD_compressBlock_fast_dictMatchState_generic"); ip += (dictAndPrefixLength == 0); /* dictMatchState repCode checks don't currently handle repCode == 0 * disabling. */ assert(offset_1 <= dictAndPrefixLength); assert(offset_2 <= dictAndPrefixLength); /* Main Search Loop */ while (ip < ilimit) { /* < instead of <=, because repcode check at (ip+1) */ size_t mLength; size_t const h = ZSTD_hashPtr(ip, hlog, mls); U32 const curr = (U32)(ip-base); U32 const matchIndex = hashTable[h]; const BYTE* match = base + matchIndex; const U32 repIndex = curr + 1 - offset_1; const BYTE* repMatch = (repIndex < prefixStartIndex) ? dictBase + (repIndex - dictIndexDelta) : base + repIndex; hashTable[h] = curr; /* update hash table */ if ( ((U32)((prefixStartIndex-1) - repIndex) >= 3) /* intentional underflow : ensure repIndex isn't overlapping dict + prefix */ && (MEM_read32(repMatch) == MEM_read32(ip+1)) ) { const BYTE* const repMatchEnd = repIndex < prefixStartIndex ? dictEnd : iend; mLength = ZSTD_count_2segments(ip+1+4, repMatch+4, iend, repMatchEnd, prefixStart) + 4; ip++; ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, iend, 0, mLength-MINMATCH); } else if ( (matchIndex <= prefixStartIndex) ) { size_t const dictHash = ZSTD_hashPtr(ip, dictHLog, mls); U32 const dictMatchIndex = dictHashTable[dictHash]; const BYTE* dictMatch = dictBase + dictMatchIndex; if (dictMatchIndex <= dictStartIndex || MEM_read32(dictMatch) != MEM_read32(ip)) { assert(stepSize >= 1); ip += ((ip-anchor) >> kSearchStrength) + stepSize; continue; } else { /* found a dict match */ U32 const offset = (U32)(curr-dictMatchIndex-dictIndexDelta); mLength = ZSTD_count_2segments(ip+4, dictMatch+4, iend, dictEnd, prefixStart) + 4; while (((ip>anchor) & (dictMatch>dictStart)) && (ip[-1] == dictMatch[-1])) { ip--; dictMatch--; mLength++; } /* catch up */ offset_2 = offset_1; offset_1 = offset; ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, iend, offset + ZSTD_REP_MOVE, mLength-MINMATCH); } } else if (MEM_read32(match) != MEM_read32(ip)) { /* it's not a match, and we're not going to check the dictionary */ assert(stepSize >= 1); ip += ((ip-anchor) >> kSearchStrength) + stepSize; continue; } else { /* found a regular match */ U32 const offset = (U32)(ip-match); mLength = ZSTD_count(ip+4, match+4, iend) + 4; while (((ip>anchor) & (match>prefixStart)) && (ip[-1] == match[-1])) { ip--; match--; mLength++; } /* catch up */ offset_2 = offset_1; offset_1 = offset; ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, iend, offset + ZSTD_REP_MOVE, mLength-MINMATCH); } /* match found */ ip += mLength; anchor = ip; if (ip <= ilimit) { /* Fill Table */ assert(base+curr+2 > istart); /* check base overflow */ hashTable[ZSTD_hashPtr(base+curr+2, hlog, mls)] = curr+2; /* here because curr+2 could be > iend-8 */ hashTable[ZSTD_hashPtr(ip-2, hlog, mls)] = (U32)(ip-2-base); /* check immediate repcode */ while (ip <= ilimit) { U32 const current2 = (U32)(ip-base); U32 const repIndex2 = current2 - offset_2; const BYTE* repMatch2 = repIndex2 < prefixStartIndex ? dictBase - dictIndexDelta + repIndex2 : base + repIndex2; if ( ((U32)((prefixStartIndex-1) - (U32)repIndex2) >= 3 /* intentional overflow */) && (MEM_read32(repMatch2) == MEM_read32(ip)) ) { const BYTE* const repEnd2 = repIndex2 < prefixStartIndex ? dictEnd : iend; size_t const repLength2 = ZSTD_count_2segments(ip+4, repMatch2+4, iend, repEnd2, prefixStart) + 4; U32 tmpOffset = offset_2; offset_2 = offset_1; offset_1 = tmpOffset; /* swap offset_2 <=> offset_1 */ ZSTD_storeSeq(seqStore, 0, anchor, iend, 0, repLength2-MINMATCH); hashTable[ZSTD_hashPtr(ip, hlog, mls)] = current2; ip += repLength2; anchor = ip; continue; } break; } } } /* save reps for next block */ rep[0] = offset_1 ? offset_1 : offsetSaved; rep[1] = offset_2 ? offset_2 : offsetSaved; /* Return the last literals size */ return (size_t)(iend - anchor); } size_t ZSTD_compressBlock_fast_dictMatchState( ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], void const* src, size_t srcSize) { U32 const mls = ms->cParams.minMatch; assert(ms->dictMatchState != NULL); switch(mls) { default: /* includes case 3 */ case 4 : return ZSTD_compressBlock_fast_dictMatchState_generic(ms, seqStore, rep, src, srcSize, 4); case 5 : return ZSTD_compressBlock_fast_dictMatchState_generic(ms, seqStore, rep, src, srcSize, 5); case 6 : return ZSTD_compressBlock_fast_dictMatchState_generic(ms, seqStore, rep, src, srcSize, 6); case 7 : return ZSTD_compressBlock_fast_dictMatchState_generic(ms, seqStore, rep, src, srcSize, 7); } } static size_t ZSTD_compressBlock_fast_extDict_generic( ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], void const* src, size_t srcSize, U32 const mls) { const ZSTD_compressionParameters* const cParams = &ms->cParams; U32* const hashTable = ms->hashTable; U32 const hlog = cParams->hashLog; /* support stepSize of 0 */ U32 const stepSize = cParams->targetLength + !(cParams->targetLength); const BYTE* const base = ms->window.base; const BYTE* const dictBase = ms->window.dictBase; const BYTE* const istart = (const BYTE*)src; const BYTE* ip = istart; const BYTE* anchor = istart; const U32 endIndex = (U32)((size_t)(istart - base) + srcSize); const U32 lowLimit = ZSTD_getLowestMatchIndex(ms, endIndex, cParams->windowLog); const U32 dictStartIndex = lowLimit; const BYTE* const dictStart = dictBase + dictStartIndex; const U32 dictLimit = ms->window.dictLimit; const U32 prefixStartIndex = dictLimit < lowLimit ? lowLimit : dictLimit; const BYTE* const prefixStart = base + prefixStartIndex; const BYTE* const dictEnd = dictBase + prefixStartIndex; const BYTE* const iend = istart + srcSize; const BYTE* const ilimit = iend - 8; U32 offset_1=rep[0], offset_2=rep[1]; DEBUGLOG(5, "ZSTD_compressBlock_fast_extDict_generic (offset_1=%u)", offset_1); /* switch to "regular" variant if extDict is invalidated due to maxDistance */ if (prefixStartIndex == dictStartIndex) return ZSTD_compressBlock_fast_generic(ms, seqStore, rep, src, srcSize, mls); /* Search Loop */ while (ip < ilimit) { /* < instead of <=, because (ip+1) */ const size_t h = ZSTD_hashPtr(ip, hlog, mls); const U32 matchIndex = hashTable[h]; const BYTE* const matchBase = matchIndex < prefixStartIndex ? dictBase : base; const BYTE* match = matchBase + matchIndex; const U32 curr = (U32)(ip-base); const U32 repIndex = curr + 1 - offset_1; const BYTE* const repBase = repIndex < prefixStartIndex ? dictBase : base; const BYTE* const repMatch = repBase + repIndex; hashTable[h] = curr; /* update hash table */ DEBUGLOG(7, "offset_1 = %u , curr = %u", offset_1, curr); assert(offset_1 <= curr +1); /* check repIndex */ if ( (((U32)((prefixStartIndex-1) - repIndex) >= 3) /* intentional underflow */ & (repIndex > dictStartIndex)) && (MEM_read32(repMatch) == MEM_read32(ip+1)) ) { const BYTE* const repMatchEnd = repIndex < prefixStartIndex ? dictEnd : iend; size_t const rLength = ZSTD_count_2segments(ip+1 +4, repMatch +4, iend, repMatchEnd, prefixStart) + 4; ip++; ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, iend, 0, rLength-MINMATCH); ip += rLength; anchor = ip; } else { if ( (matchIndex < dictStartIndex) || (MEM_read32(match) != MEM_read32(ip)) ) { assert(stepSize >= 1); ip += ((ip-anchor) >> kSearchStrength) + stepSize; continue; } { const BYTE* const matchEnd = matchIndex < prefixStartIndex ? dictEnd : iend; const BYTE* const lowMatchPtr = matchIndex < prefixStartIndex ? dictStart : prefixStart; U32 const offset = curr - matchIndex; size_t mLength = ZSTD_count_2segments(ip+4, match+4, iend, matchEnd, prefixStart) + 4; while (((ip>anchor) & (match>lowMatchPtr)) && (ip[-1] == match[-1])) { ip--; match--; mLength++; } /* catch up */ offset_2 = offset_1; offset_1 = offset; /* update offset history */ ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, iend, offset + ZSTD_REP_MOVE, mLength-MINMATCH); ip += mLength; anchor = ip; } } if (ip <= ilimit) { /* Fill Table */ hashTable[ZSTD_hashPtr(base+curr+2, hlog, mls)] = curr+2; hashTable[ZSTD_hashPtr(ip-2, hlog, mls)] = (U32)(ip-2-base); /* check immediate repcode */ while (ip <= ilimit) { U32 const current2 = (U32)(ip-base); U32 const repIndex2 = current2 - offset_2; const BYTE* const repMatch2 = repIndex2 < prefixStartIndex ? dictBase + repIndex2 : base + repIndex2; if ( (((U32)((prefixStartIndex-1) - repIndex2) >= 3) & (repIndex2 > dictStartIndex)) /* intentional overflow */ && (MEM_read32(repMatch2) == MEM_read32(ip)) ) { const BYTE* const repEnd2 = repIndex2 < prefixStartIndex ? dictEnd : iend; size_t const repLength2 = ZSTD_count_2segments(ip+4, repMatch2+4, iend, repEnd2, prefixStart) + 4; { U32 const tmpOffset = offset_2; offset_2 = offset_1; offset_1 = tmpOffset; } /* swap offset_2 <=> offset_1 */ ZSTD_storeSeq(seqStore, 0 /*litlen*/, anchor, iend, 0 /*offcode*/, repLength2-MINMATCH); hashTable[ZSTD_hashPtr(ip, hlog, mls)] = current2; ip += repLength2; anchor = ip; continue; } break; } } } /* save reps for next block */ rep[0] = offset_1; rep[1] = offset_2; /* Return the last literals size */ return (size_t)(iend - anchor); } size_t ZSTD_compressBlock_fast_extDict( ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], void const* src, size_t srcSize) { U32 const mls = ms->cParams.minMatch; switch(mls) { default: /* includes case 3 */ case 4 : return ZSTD_compressBlock_fast_extDict_generic(ms, seqStore, rep, src, srcSize, 4); case 5 : return ZSTD_compressBlock_fast_extDict_generic(ms, seqStore, rep, src, srcSize, 5); case 6 : return ZSTD_compressBlock_fast_extDict_generic(ms, seqStore, rep, src, srcSize, 6); case 7 : return ZSTD_compressBlock_fast_extDict_generic(ms, seqStore, rep, src, srcSize, 7); } } /**** ended inlining compress/zstd_fast.c ****/ /**** start inlining compress/zstd_lazy.c ****/ /* * Copyright (c) 2016-2021, Yann Collet, Facebook, Inc. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the * LICENSE file in the root directory of this source tree) and the GPLv2 (found * in the COPYING file in the root directory of this source tree). * You may select, at your option, one of the above-listed licenses. */ /**** skipping file: zstd_compress_internal.h ****/ /**** skipping file: zstd_lazy.h ****/ /*-************************************* * Binary Tree search ***************************************/ static void ZSTD_updateDUBT(ZSTD_matchState_t* ms, const BYTE* ip, const BYTE* iend, U32 mls) { const ZSTD_compressionParameters* const cParams = &ms->cParams; U32* const hashTable = ms->hashTable; U32 const hashLog = cParams->hashLog; U32* const bt = ms->chainTable; U32 const btLog = cParams->chainLog - 1; U32 const btMask = (1 << btLog) - 1; const BYTE* const base = ms->window.base; U32 const target = (U32)(ip - base); U32 idx = ms->nextToUpdate; if (idx != target) DEBUGLOG(7, "ZSTD_updateDUBT, from %u to %u (dictLimit:%u)", idx, target, ms->window.dictLimit); assert(ip + 8 <= iend); /* condition for ZSTD_hashPtr */ (void)iend; assert(idx >= ms->window.dictLimit); /* condition for valid base+idx */ for ( ; idx < target ; idx++) { size_t const h = ZSTD_hashPtr(base + idx, hashLog, mls); /* assumption : ip + 8 <= iend */ U32 const matchIndex = hashTable[h]; U32* const nextCandidatePtr = bt + 2*(idx&btMask); U32* const sortMarkPtr = nextCandidatePtr + 1; DEBUGLOG(8, "ZSTD_updateDUBT: insert %u", idx); hashTable[h] = idx; /* Update Hash Table */ *nextCandidatePtr = matchIndex; /* update BT like a chain */ *sortMarkPtr = ZSTD_DUBT_UNSORTED_MARK; } ms->nextToUpdate = target; } /** ZSTD_insertDUBT1() : * sort one already inserted but unsorted position * assumption : curr >= btlow == (curr - btmask) * doesn't fail */ static void ZSTD_insertDUBT1(ZSTD_matchState_t* ms, U32 curr, const BYTE* inputEnd, U32 nbCompares, U32 btLow, const ZSTD_dictMode_e dictMode) { const ZSTD_compressionParameters* const cParams = &ms->cParams; U32* const bt = ms->chainTable; U32 const btLog = cParams->chainLog - 1; U32 const btMask = (1 << btLog) - 1; size_t commonLengthSmaller=0, commonLengthLarger=0; const BYTE* const base = ms->window.base; const BYTE* const dictBase = ms->window.dictBase; const U32 dictLimit = ms->window.dictLimit; const BYTE* const ip = (curr>=dictLimit) ? base + curr : dictBase + curr; const BYTE* const iend = (curr>=dictLimit) ? inputEnd : dictBase + dictLimit; const BYTE* const dictEnd = dictBase + dictLimit; const BYTE* const prefixStart = base + dictLimit; const BYTE* match; U32* smallerPtr = bt + 2*(curr&btMask); U32* largerPtr = smallerPtr + 1; U32 matchIndex = *smallerPtr; /* this candidate is unsorted : next sorted candidate is reached through *smallerPtr, while *largerPtr contains previous unsorted candidate (which is already saved and can be overwritten) */ U32 dummy32; /* to be nullified at the end */ U32 const windowValid = ms->window.lowLimit; U32 const maxDistance = 1U << cParams->windowLog; U32 const windowLow = (curr - windowValid > maxDistance) ? curr - maxDistance : windowValid; DEBUGLOG(8, "ZSTD_insertDUBT1(%u) (dictLimit=%u, lowLimit=%u)", curr, dictLimit, windowLow); assert(curr >= btLow); assert(ip < iend); /* condition for ZSTD_count */ while (nbCompares-- && (matchIndex > windowLow)) { U32* const nextPtr = bt + 2*(matchIndex & btMask); size_t matchLength = MIN(commonLengthSmaller, commonLengthLarger); /* guaranteed minimum nb of common bytes */ assert(matchIndex < curr); /* note : all candidates are now supposed sorted, * but it's still possible to have nextPtr[1] == ZSTD_DUBT_UNSORTED_MARK * when a real index has the same value as ZSTD_DUBT_UNSORTED_MARK */ if ( (dictMode != ZSTD_extDict) || (matchIndex+matchLength >= dictLimit) /* both in current segment*/ || (curr < dictLimit) /* both in extDict */) { const BYTE* const mBase = ( (dictMode != ZSTD_extDict) || (matchIndex+matchLength >= dictLimit)) ? base : dictBase; assert( (matchIndex+matchLength >= dictLimit) /* might be wrong if extDict is incorrectly set to 0 */ || (curr < dictLimit) ); match = mBase + matchIndex; matchLength += ZSTD_count(ip+matchLength, match+matchLength, iend); } else { match = dictBase + matchIndex; matchLength += ZSTD_count_2segments(ip+matchLength, match+matchLength, iend, dictEnd, prefixStart); if (matchIndex+matchLength >= dictLimit) match = base + matchIndex; /* preparation for next read of match[matchLength] */ } DEBUGLOG(8, "ZSTD_insertDUBT1: comparing %u with %u : found %u common bytes ", curr, matchIndex, (U32)matchLength); if (ip+matchLength == iend) { /* equal : no way to know if inf or sup */ break; /* drop , to guarantee consistency ; miss a bit of compression, but other solutions can corrupt tree */ } if (match[matchLength] < ip[matchLength]) { /* necessarily within buffer */ /* match is smaller than current */ *smallerPtr = matchIndex; /* update smaller idx */ commonLengthSmaller = matchLength; /* all smaller will now have at least this guaranteed common length */ if (matchIndex <= btLow) { smallerPtr=&dummy32; break; } /* beyond tree size, stop searching */ DEBUGLOG(8, "ZSTD_insertDUBT1: %u (>btLow=%u) is smaller : next => %u", matchIndex, btLow, nextPtr[1]); smallerPtr = nextPtr+1; /* new "candidate" => larger than match, which was smaller than target */ matchIndex = nextPtr[1]; /* new matchIndex, larger than previous and closer to current */ } else { /* match is larger than current */ *largerPtr = matchIndex; commonLengthLarger = matchLength; if (matchIndex <= btLow) { largerPtr=&dummy32; break; } /* beyond tree size, stop searching */ DEBUGLOG(8, "ZSTD_insertDUBT1: %u (>btLow=%u) is larger => %u", matchIndex, btLow, nextPtr[0]); largerPtr = nextPtr; matchIndex = nextPtr[0]; } } *smallerPtr = *largerPtr = 0; } static size_t ZSTD_DUBT_findBetterDictMatch ( ZSTD_matchState_t* ms, const BYTE* const ip, const BYTE* const iend, size_t* offsetPtr, size_t bestLength, U32 nbCompares, U32 const mls, const ZSTD_dictMode_e dictMode) { const ZSTD_matchState_t * const dms = ms->dictMatchState; const ZSTD_compressionParameters* const dmsCParams = &dms->cParams; const U32 * const dictHashTable = dms->hashTable; U32 const hashLog = dmsCParams->hashLog; size_t const h = ZSTD_hashPtr(ip, hashLog, mls); U32 dictMatchIndex = dictHashTable[h]; const BYTE* const base = ms->window.base; const BYTE* const prefixStart = base + ms->window.dictLimit; U32 const curr = (U32)(ip-base); const BYTE* const dictBase = dms->window.base; const BYTE* const dictEnd = dms->window.nextSrc; U32 const dictHighLimit = (U32)(dms->window.nextSrc - dms->window.base); U32 const dictLowLimit = dms->window.lowLimit; U32 const dictIndexDelta = ms->window.lowLimit - dictHighLimit; U32* const dictBt = dms->chainTable; U32 const btLog = dmsCParams->chainLog - 1; U32 const btMask = (1 << btLog) - 1; U32 const btLow = (btMask >= dictHighLimit - dictLowLimit) ? dictLowLimit : dictHighLimit - btMask; size_t commonLengthSmaller=0, commonLengthLarger=0; (void)dictMode; assert(dictMode == ZSTD_dictMatchState); while (nbCompares-- && (dictMatchIndex > dictLowLimit)) { U32* const nextPtr = dictBt + 2*(dictMatchIndex & btMask); size_t matchLength = MIN(commonLengthSmaller, commonLengthLarger); /* guaranteed minimum nb of common bytes */ const BYTE* match = dictBase + dictMatchIndex; matchLength += ZSTD_count_2segments(ip+matchLength, match+matchLength, iend, dictEnd, prefixStart); if (dictMatchIndex+matchLength >= dictHighLimit) match = base + dictMatchIndex + dictIndexDelta; /* to prepare for next usage of match[matchLength] */ if (matchLength > bestLength) { U32 matchIndex = dictMatchIndex + dictIndexDelta; if ( (4*(int)(matchLength-bestLength)) > (int)(ZSTD_highbit32(curr-matchIndex+1) - ZSTD_highbit32((U32)offsetPtr[0]+1)) ) { DEBUGLOG(9, "ZSTD_DUBT_findBetterDictMatch(%u) : found better match length %u -> %u and offsetCode %u -> %u (dictMatchIndex %u, matchIndex %u)", curr, (U32)bestLength, (U32)matchLength, (U32)*offsetPtr, ZSTD_REP_MOVE + curr - matchIndex, dictMatchIndex, matchIndex); bestLength = matchLength, *offsetPtr = ZSTD_REP_MOVE + curr - matchIndex; } if (ip+matchLength == iend) { /* reached end of input : ip[matchLength] is not valid, no way to know if it's larger or smaller than match */ break; /* drop, to guarantee consistency (miss a little bit of compression) */ } } if (match[matchLength] < ip[matchLength]) { if (dictMatchIndex <= btLow) { break; } /* beyond tree size, stop the search */ commonLengthSmaller = matchLength; /* all smaller will now have at least this guaranteed common length */ dictMatchIndex = nextPtr[1]; /* new matchIndex larger than previous (closer to current) */ } else { /* match is larger than current */ if (dictMatchIndex <= btLow) { break; } /* beyond tree size, stop the search */ commonLengthLarger = matchLength; dictMatchIndex = nextPtr[0]; } } if (bestLength >= MINMATCH) { U32 const mIndex = curr - ((U32)*offsetPtr - ZSTD_REP_MOVE); (void)mIndex; DEBUGLOG(8, "ZSTD_DUBT_findBetterDictMatch(%u) : found match of length %u and offsetCode %u (pos %u)", curr, (U32)bestLength, (U32)*offsetPtr, mIndex); } return bestLength; } static size_t ZSTD_DUBT_findBestMatch(ZSTD_matchState_t* ms, const BYTE* const ip, const BYTE* const iend, size_t* offsetPtr, U32 const mls, const ZSTD_dictMode_e dictMode) { const ZSTD_compressionParameters* const cParams = &ms->cParams; U32* const hashTable = ms->hashTable; U32 const hashLog = cParams->hashLog; size_t const h = ZSTD_hashPtr(ip, hashLog, mls); U32 matchIndex = hashTable[h]; const BYTE* const base = ms->window.base; U32 const curr = (U32)(ip-base); U32 const windowLow = ZSTD_getLowestMatchIndex(ms, curr, cParams->windowLog); U32* const bt = ms->chainTable; U32 const btLog = cParams->chainLog - 1; U32 const btMask = (1 << btLog) - 1; U32 const btLow = (btMask >= curr) ? 0 : curr - btMask; U32 const unsortLimit = MAX(btLow, windowLow); U32* nextCandidate = bt + 2*(matchIndex&btMask); U32* unsortedMark = bt + 2*(matchIndex&btMask) + 1; U32 nbCompares = 1U << cParams->searchLog; U32 nbCandidates = nbCompares; U32 previousCandidate = 0; DEBUGLOG(7, "ZSTD_DUBT_findBestMatch (%u) ", curr); assert(ip <= iend-8); /* required for h calculation */ assert(dictMode != ZSTD_dedicatedDictSearch); /* reach end of unsorted candidates list */ while ( (matchIndex > unsortLimit) && (*unsortedMark == ZSTD_DUBT_UNSORTED_MARK) && (nbCandidates > 1) ) { DEBUGLOG(8, "ZSTD_DUBT_findBestMatch: candidate %u is unsorted", matchIndex); *unsortedMark = previousCandidate; /* the unsortedMark becomes a reversed chain, to move up back to original position */ previousCandidate = matchIndex; matchIndex = *nextCandidate; nextCandidate = bt + 2*(matchIndex&btMask); unsortedMark = bt + 2*(matchIndex&btMask) + 1; nbCandidates --; } /* nullify last candidate if it's still unsorted * simplification, detrimental to compression ratio, beneficial for speed */ if ( (matchIndex > unsortLimit) && (*unsortedMark==ZSTD_DUBT_UNSORTED_MARK) ) { DEBUGLOG(7, "ZSTD_DUBT_findBestMatch: nullify last unsorted candidate %u", matchIndex); *nextCandidate = *unsortedMark = 0; } /* batch sort stacked candidates */ matchIndex = previousCandidate; while (matchIndex) { /* will end on matchIndex == 0 */ U32* const nextCandidateIdxPtr = bt + 2*(matchIndex&btMask) + 1; U32 const nextCandidateIdx = *nextCandidateIdxPtr; ZSTD_insertDUBT1(ms, matchIndex, iend, nbCandidates, unsortLimit, dictMode); matchIndex = nextCandidateIdx; nbCandidates++; } /* find longest match */ { size_t commonLengthSmaller = 0, commonLengthLarger = 0; const BYTE* const dictBase = ms->window.dictBase; const U32 dictLimit = ms->window.dictLimit; const BYTE* const dictEnd = dictBase + dictLimit; const BYTE* const prefixStart = base + dictLimit; U32* smallerPtr = bt + 2*(curr&btMask); U32* largerPtr = bt + 2*(curr&btMask) + 1; U32 matchEndIdx = curr + 8 + 1; U32 dummy32; /* to be nullified at the end */ size_t bestLength = 0; matchIndex = hashTable[h]; hashTable[h] = curr; /* Update Hash Table */ while (nbCompares-- && (matchIndex > windowLow)) { U32* const nextPtr = bt + 2*(matchIndex & btMask); size_t matchLength = MIN(commonLengthSmaller, commonLengthLarger); /* guaranteed minimum nb of common bytes */ const BYTE* match; if ((dictMode != ZSTD_extDict) || (matchIndex+matchLength >= dictLimit)) { match = base + matchIndex; matchLength += ZSTD_count(ip+matchLength, match+matchLength, iend); } else { match = dictBase + matchIndex; matchLength += ZSTD_count_2segments(ip+matchLength, match+matchLength, iend, dictEnd, prefixStart); if (matchIndex+matchLength >= dictLimit) match = base + matchIndex; /* to prepare for next usage of match[matchLength] */ } if (matchLength > bestLength) { if (matchLength > matchEndIdx - matchIndex) matchEndIdx = matchIndex + (U32)matchLength; if ( (4*(int)(matchLength-bestLength)) > (int)(ZSTD_highbit32(curr-matchIndex+1) - ZSTD_highbit32((U32)offsetPtr[0]+1)) ) bestLength = matchLength, *offsetPtr = ZSTD_REP_MOVE + curr - matchIndex; if (ip+matchLength == iend) { /* equal : no way to know if inf or sup */ if (dictMode == ZSTD_dictMatchState) { nbCompares = 0; /* in addition to avoiding checking any * further in this loop, make sure we * skip checking in the dictionary. */ } break; /* drop, to guarantee consistency (miss a little bit of compression) */ } } if (match[matchLength] < ip[matchLength]) { /* match is smaller than current */ *smallerPtr = matchIndex; /* update smaller idx */ commonLengthSmaller = matchLength; /* all smaller will now have at least this guaranteed common length */ if (matchIndex <= btLow) { smallerPtr=&dummy32; break; } /* beyond tree size, stop the search */ smallerPtr = nextPtr+1; /* new "smaller" => larger of match */ matchIndex = nextPtr[1]; /* new matchIndex larger than previous (closer to current) */ } else { /* match is larger than current */ *largerPtr = matchIndex; commonLengthLarger = matchLength; if (matchIndex <= btLow) { largerPtr=&dummy32; break; } /* beyond tree size, stop the search */ largerPtr = nextPtr; matchIndex = nextPtr[0]; } } *smallerPtr = *largerPtr = 0; if (dictMode == ZSTD_dictMatchState && nbCompares) { bestLength = ZSTD_DUBT_findBetterDictMatch( ms, ip, iend, offsetPtr, bestLength, nbCompares, mls, dictMode); } assert(matchEndIdx > curr+8); /* ensure nextToUpdate is increased */ ms->nextToUpdate = matchEndIdx - 8; /* skip repetitive patterns */ if (bestLength >= MINMATCH) { U32 const mIndex = curr - ((U32)*offsetPtr - ZSTD_REP_MOVE); (void)mIndex; DEBUGLOG(8, "ZSTD_DUBT_findBestMatch(%u) : found match of length %u and offsetCode %u (pos %u)", curr, (U32)bestLength, (U32)*offsetPtr, mIndex); } return bestLength; } } /** ZSTD_BtFindBestMatch() : Tree updater, providing best match */ FORCE_INLINE_TEMPLATE size_t ZSTD_BtFindBestMatch( ZSTD_matchState_t* ms, const BYTE* const ip, const BYTE* const iLimit, size_t* offsetPtr, const U32 mls /* template */, const ZSTD_dictMode_e dictMode) { DEBUGLOG(7, "ZSTD_BtFindBestMatch"); if (ip < ms->window.base + ms->nextToUpdate) return 0; /* skipped area */ ZSTD_updateDUBT(ms, ip, iLimit, mls); return ZSTD_DUBT_findBestMatch(ms, ip, iLimit, offsetPtr, mls, dictMode); } static size_t ZSTD_BtFindBestMatch_selectMLS ( ZSTD_matchState_t* ms, const BYTE* ip, const BYTE* const iLimit, size_t* offsetPtr) { switch(ms->cParams.minMatch) { default : /* includes case 3 */ case 4 : return ZSTD_BtFindBestMatch(ms, ip, iLimit, offsetPtr, 4, ZSTD_noDict); case 5 : return ZSTD_BtFindBestMatch(ms, ip, iLimit, offsetPtr, 5, ZSTD_noDict); case 7 : case 6 : return ZSTD_BtFindBestMatch(ms, ip, iLimit, offsetPtr, 6, ZSTD_noDict); } } static size_t ZSTD_BtFindBestMatch_dictMatchState_selectMLS ( ZSTD_matchState_t* ms, const BYTE* ip, const BYTE* const iLimit, size_t* offsetPtr) { switch(ms->cParams.minMatch) { default : /* includes case 3 */ case 4 : return ZSTD_BtFindBestMatch(ms, ip, iLimit, offsetPtr, 4, ZSTD_dictMatchState); case 5 : return ZSTD_BtFindBestMatch(ms, ip, iLimit, offsetPtr, 5, ZSTD_dictMatchState); case 7 : case 6 : return ZSTD_BtFindBestMatch(ms, ip, iLimit, offsetPtr, 6, ZSTD_dictMatchState); } } static size_t ZSTD_BtFindBestMatch_extDict_selectMLS ( ZSTD_matchState_t* ms, const BYTE* ip, const BYTE* const iLimit, size_t* offsetPtr) { switch(ms->cParams.minMatch) { default : /* includes case 3 */ case 4 : return ZSTD_BtFindBestMatch(ms, ip, iLimit, offsetPtr, 4, ZSTD_extDict); case 5 : return ZSTD_BtFindBestMatch(ms, ip, iLimit, offsetPtr, 5, ZSTD_extDict); case 7 : case 6 : return ZSTD_BtFindBestMatch(ms, ip, iLimit, offsetPtr, 6, ZSTD_extDict); } } /* ********************************* * Hash Chain ***********************************/ #define NEXT_IN_CHAIN(d, mask) chainTable[(d) & (mask)] /* Update chains up to ip (excluded) Assumption : always within prefix (i.e. not within extDict) */ FORCE_INLINE_TEMPLATE U32 ZSTD_insertAndFindFirstIndex_internal( ZSTD_matchState_t* ms, const ZSTD_compressionParameters* const cParams, const BYTE* ip, U32 const mls) { U32* const hashTable = ms->hashTable; const U32 hashLog = cParams->hashLog; U32* const chainTable = ms->chainTable; const U32 chainMask = (1 << cParams->chainLog) - 1; const BYTE* const base = ms->window.base; const U32 target = (U32)(ip - base); U32 idx = ms->nextToUpdate; while(idx < target) { /* catch up */ size_t const h = ZSTD_hashPtr(base+idx, hashLog, mls); NEXT_IN_CHAIN(idx, chainMask) = hashTable[h]; hashTable[h] = idx; idx++; } ms->nextToUpdate = target; return hashTable[ZSTD_hashPtr(ip, hashLog, mls)]; } U32 ZSTD_insertAndFindFirstIndex(ZSTD_matchState_t* ms, const BYTE* ip) { const ZSTD_compressionParameters* const cParams = &ms->cParams; return ZSTD_insertAndFindFirstIndex_internal(ms, cParams, ip, ms->cParams.minMatch); } void ZSTD_dedicatedDictSearch_lazy_loadDictionary(ZSTD_matchState_t* ms, const BYTE* const ip) { const BYTE* const base = ms->window.base; U32 const target = (U32)(ip - base); U32* const hashTable = ms->hashTable; U32* const chainTable = ms->chainTable; U32 const chainSize = 1 << ms->cParams.chainLog; U32 idx = ms->nextToUpdate; U32 const minChain = chainSize < target ? target - chainSize : idx; U32 const bucketSize = 1 << ZSTD_LAZY_DDSS_BUCKET_LOG; U32 const cacheSize = bucketSize - 1; U32 const chainAttempts = (1 << ms->cParams.searchLog) - cacheSize; U32 const chainLimit = chainAttempts > 255 ? 255 : chainAttempts; /* We know the hashtable is oversized by a factor of `bucketSize`. * We are going to temporarily pretend `bucketSize == 1`, keeping only a * single entry. We will use the rest of the space to construct a temporary * chaintable. */ U32 const hashLog = ms->cParams.hashLog - ZSTD_LAZY_DDSS_BUCKET_LOG; U32* const tmpHashTable = hashTable; U32* const tmpChainTable = hashTable + ((size_t)1 << hashLog); U32 const tmpChainSize = ((1 << ZSTD_LAZY_DDSS_BUCKET_LOG) - 1) << hashLog; U32 const tmpMinChain = tmpChainSize < target ? target - tmpChainSize : idx; U32 hashIdx; assert(ms->cParams.chainLog <= 24); assert(ms->cParams.hashLog >= ms->cParams.chainLog); assert(idx != 0); assert(tmpMinChain <= minChain); /* fill conventional hash table and conventional chain table */ for ( ; idx < target; idx++) { U32 const h = (U32)ZSTD_hashPtr(base + idx, hashLog, ms->cParams.minMatch); if (idx >= tmpMinChain) { tmpChainTable[idx - tmpMinChain] = hashTable[h]; } tmpHashTable[h] = idx; } /* sort chains into ddss chain table */ { U32 chainPos = 0; for (hashIdx = 0; hashIdx < (1U << hashLog); hashIdx++) { U32 count; U32 countBeyondMinChain = 0; U32 i = tmpHashTable[hashIdx]; for (count = 0; i >= tmpMinChain && count < cacheSize; count++) { /* skip through the chain to the first position that won't be * in the hash cache bucket */ if (i < minChain) { countBeyondMinChain++; } i = tmpChainTable[i - tmpMinChain]; } if (count == cacheSize) { for (count = 0; count < chainLimit;) { if (i < minChain) { if (!i || countBeyondMinChain++ > cacheSize) { /* only allow pulling `cacheSize` number of entries * into the cache or chainTable beyond `minChain`, * to replace the entries pulled out of the * chainTable into the cache. This lets us reach * back further without increasing the total number * of entries in the chainTable, guaranteeing the * DDSS chain table will fit into the space * allocated for the regular one. */ break; } } chainTable[chainPos++] = i; count++; if (i < tmpMinChain) { break; } i = tmpChainTable[i - tmpMinChain]; } } else { count = 0; } if (count) { tmpHashTable[hashIdx] = ((chainPos - count) << 8) + count; } else { tmpHashTable[hashIdx] = 0; } } assert(chainPos <= chainSize); /* I believe this is guaranteed... */ } /* move chain pointers into the last entry of each hash bucket */ for (hashIdx = (1 << hashLog); hashIdx; ) { U32 const bucketIdx = --hashIdx << ZSTD_LAZY_DDSS_BUCKET_LOG; U32 const chainPackedPointer = tmpHashTable[hashIdx]; U32 i; for (i = 0; i < cacheSize; i++) { hashTable[bucketIdx + i] = 0; } hashTable[bucketIdx + bucketSize - 1] = chainPackedPointer; } /* fill the buckets of the hash table */ for (idx = ms->nextToUpdate; idx < target; idx++) { U32 const h = (U32)ZSTD_hashPtr(base + idx, hashLog, ms->cParams.minMatch) << ZSTD_LAZY_DDSS_BUCKET_LOG; U32 i; /* Shift hash cache down 1. */ for (i = cacheSize - 1; i; i--) hashTable[h + i] = hashTable[h + i - 1]; hashTable[h] = idx; } ms->nextToUpdate = target; } /* inlining is important to hardwire a hot branch (template emulation) */ FORCE_INLINE_TEMPLATE size_t ZSTD_HcFindBestMatch_generic ( ZSTD_matchState_t* ms, const BYTE* const ip, const BYTE* const iLimit, size_t* offsetPtr, const U32 mls, const ZSTD_dictMode_e dictMode) { const ZSTD_compressionParameters* const cParams = &ms->cParams; U32* const chainTable = ms->chainTable; const U32 chainSize = (1 << cParams->chainLog); const U32 chainMask = chainSize-1; const BYTE* const base = ms->window.base; const BYTE* const dictBase = ms->window.dictBase; const U32 dictLimit = ms->window.dictLimit; const BYTE* const prefixStart = base + dictLimit; const BYTE* const dictEnd = dictBase + dictLimit; const U32 curr = (U32)(ip-base); const U32 maxDistance = 1U << cParams->windowLog; const U32 lowestValid = ms->window.lowLimit; const U32 withinMaxDistance = (curr - lowestValid > maxDistance) ? curr - maxDistance : lowestValid; const U32 isDictionary = (ms->loadedDictEnd != 0); const U32 lowLimit = isDictionary ? lowestValid : withinMaxDistance; const U32 minChain = curr > chainSize ? curr - chainSize : 0; U32 nbAttempts = 1U << cParams->searchLog; size_t ml=4-1; const ZSTD_matchState_t* const dms = ms->dictMatchState; const U32 ddsHashLog = dictMode == ZSTD_dedicatedDictSearch ? dms->cParams.hashLog - ZSTD_LAZY_DDSS_BUCKET_LOG : 0; const size_t ddsIdx = dictMode == ZSTD_dedicatedDictSearch ? ZSTD_hashPtr(ip, ddsHashLog, mls) << ZSTD_LAZY_DDSS_BUCKET_LOG : 0; U32 matchIndex; if (dictMode == ZSTD_dedicatedDictSearch) { const U32* entry = &dms->hashTable[ddsIdx]; PREFETCH_L1(entry); } /* HC4 match finder */ matchIndex = ZSTD_insertAndFindFirstIndex_internal(ms, cParams, ip, mls); for ( ; (matchIndex>=lowLimit) & (nbAttempts>0) ; nbAttempts--) { size_t currentMl=0; if ((dictMode != ZSTD_extDict) || matchIndex >= dictLimit) { const BYTE* const match = base + matchIndex; assert(matchIndex >= dictLimit); /* ensures this is true if dictMode != ZSTD_extDict */ if (match[ml] == ip[ml]) /* potentially better */ currentMl = ZSTD_count(ip, match, iLimit); } else { const BYTE* const match = dictBase + matchIndex; assert(match+4 <= dictEnd); if (MEM_read32(match) == MEM_read32(ip)) /* assumption : matchIndex <= dictLimit-4 (by table construction) */ currentMl = ZSTD_count_2segments(ip+4, match+4, iLimit, dictEnd, prefixStart) + 4; } /* save best solution */ if (currentMl > ml) { ml = currentMl; *offsetPtr = curr - matchIndex + ZSTD_REP_MOVE; if (ip+currentMl == iLimit) break; /* best possible, avoids read overflow on next attempt */ } if (matchIndex <= minChain) break; matchIndex = NEXT_IN_CHAIN(matchIndex, chainMask); } if (dictMode == ZSTD_dedicatedDictSearch) { const U32 ddsLowestIndex = dms->window.dictLimit; const BYTE* const ddsBase = dms->window.base; const BYTE* const ddsEnd = dms->window.nextSrc; const U32 ddsSize = (U32)(ddsEnd - ddsBase); const U32 ddsIndexDelta = dictLimit - ddsSize; const U32 bucketSize = (1 << ZSTD_LAZY_DDSS_BUCKET_LOG); const U32 bucketLimit = nbAttempts < bucketSize - 1 ? nbAttempts : bucketSize - 1; U32 ddsAttempt; for (ddsAttempt = 0; ddsAttempt < bucketSize - 1; ddsAttempt++) { PREFETCH_L1(ddsBase + dms->hashTable[ddsIdx + ddsAttempt]); } { U32 const chainPackedPointer = dms->hashTable[ddsIdx + bucketSize - 1]; U32 const chainIndex = chainPackedPointer >> 8; PREFETCH_L1(&dms->chainTable[chainIndex]); } for (ddsAttempt = 0; ddsAttempt < bucketLimit; ddsAttempt++) { size_t currentMl=0; const BYTE* match; matchIndex = dms->hashTable[ddsIdx + ddsAttempt]; match = ddsBase + matchIndex; if (!matchIndex) { return ml; } /* guaranteed by table construction */ (void)ddsLowestIndex; assert(matchIndex >= ddsLowestIndex); assert(match+4 <= ddsEnd); if (MEM_read32(match) == MEM_read32(ip)) { /* assumption : matchIndex <= dictLimit-4 (by table construction) */ currentMl = ZSTD_count_2segments(ip+4, match+4, iLimit, ddsEnd, prefixStart) + 4; } /* save best solution */ if (currentMl > ml) { ml = currentMl; *offsetPtr = curr - (matchIndex + ddsIndexDelta) + ZSTD_REP_MOVE; if (ip+currentMl == iLimit) { /* best possible, avoids read overflow on next attempt */ return ml; } } } { U32 const chainPackedPointer = dms->hashTable[ddsIdx + bucketSize - 1]; U32 chainIndex = chainPackedPointer >> 8; U32 const chainLength = chainPackedPointer & 0xFF; U32 const chainAttempts = nbAttempts - ddsAttempt; U32 const chainLimit = chainAttempts > chainLength ? chainLength : chainAttempts; U32 chainAttempt; for (chainAttempt = 0 ; chainAttempt < chainLimit; chainAttempt++) { PREFETCH_L1(ddsBase + dms->chainTable[chainIndex + chainAttempt]); } for (chainAttempt = 0 ; chainAttempt < chainLimit; chainAttempt++, chainIndex++) { size_t currentMl=0; const BYTE* match; matchIndex = dms->chainTable[chainIndex]; match = ddsBase + matchIndex; /* guaranteed by table construction */ assert(matchIndex >= ddsLowestIndex); assert(match+4 <= ddsEnd); if (MEM_read32(match) == MEM_read32(ip)) { /* assumption : matchIndex <= dictLimit-4 (by table construction) */ currentMl = ZSTD_count_2segments(ip+4, match+4, iLimit, ddsEnd, prefixStart) + 4; } /* save best solution */ if (currentMl > ml) { ml = currentMl; *offsetPtr = curr - (matchIndex + ddsIndexDelta) + ZSTD_REP_MOVE; if (ip+currentMl == iLimit) break; /* best possible, avoids read overflow on next attempt */ } } } } else if (dictMode == ZSTD_dictMatchState) { const U32* const dmsChainTable = dms->chainTable; const U32 dmsChainSize = (1 << dms->cParams.chainLog); const U32 dmsChainMask = dmsChainSize - 1; const U32 dmsLowestIndex = dms->window.dictLimit; const BYTE* const dmsBase = dms->window.base; const BYTE* const dmsEnd = dms->window.nextSrc; const U32 dmsSize = (U32)(dmsEnd - dmsBase); const U32 dmsIndexDelta = dictLimit - dmsSize; const U32 dmsMinChain = dmsSize > dmsChainSize ? dmsSize - dmsChainSize : 0; matchIndex = dms->hashTable[ZSTD_hashPtr(ip, dms->cParams.hashLog, mls)]; for ( ; (matchIndex>=dmsLowestIndex) & (nbAttempts>0) ; nbAttempts--) { size_t currentMl=0; const BYTE* const match = dmsBase + matchIndex; assert(match+4 <= dmsEnd); if (MEM_read32(match) == MEM_read32(ip)) /* assumption : matchIndex <= dictLimit-4 (by table construction) */ currentMl = ZSTD_count_2segments(ip+4, match+4, iLimit, dmsEnd, prefixStart) + 4; /* save best solution */ if (currentMl > ml) { ml = currentMl; *offsetPtr = curr - (matchIndex + dmsIndexDelta) + ZSTD_REP_MOVE; if (ip+currentMl == iLimit) break; /* best possible, avoids read overflow on next attempt */ } if (matchIndex <= dmsMinChain) break; matchIndex = dmsChainTable[matchIndex & dmsChainMask]; } } return ml; } FORCE_INLINE_TEMPLATE size_t ZSTD_HcFindBestMatch_selectMLS ( ZSTD_matchState_t* ms, const BYTE* ip, const BYTE* const iLimit, size_t* offsetPtr) { switch(ms->cParams.minMatch) { default : /* includes case 3 */ case 4 : return ZSTD_HcFindBestMatch_generic(ms, ip, iLimit, offsetPtr, 4, ZSTD_noDict); case 5 : return ZSTD_HcFindBestMatch_generic(ms, ip, iLimit, offsetPtr, 5, ZSTD_noDict); case 7 : case 6 : return ZSTD_HcFindBestMatch_generic(ms, ip, iLimit, offsetPtr, 6, ZSTD_noDict); } } static size_t ZSTD_HcFindBestMatch_dictMatchState_selectMLS ( ZSTD_matchState_t* ms, const BYTE* ip, const BYTE* const iLimit, size_t* offsetPtr) { switch(ms->cParams.minMatch) { default : /* includes case 3 */ case 4 : return ZSTD_HcFindBestMatch_generic(ms, ip, iLimit, offsetPtr, 4, ZSTD_dictMatchState); case 5 : return ZSTD_HcFindBestMatch_generic(ms, ip, iLimit, offsetPtr, 5, ZSTD_dictMatchState); case 7 : case 6 : return ZSTD_HcFindBestMatch_generic(ms, ip, iLimit, offsetPtr, 6, ZSTD_dictMatchState); } } static size_t ZSTD_HcFindBestMatch_dedicatedDictSearch_selectMLS ( ZSTD_matchState_t* ms, const BYTE* ip, const BYTE* const iLimit, size_t* offsetPtr) { switch(ms->cParams.minMatch) { default : /* includes case 3 */ case 4 : return ZSTD_HcFindBestMatch_generic(ms, ip, iLimit, offsetPtr, 4, ZSTD_dedicatedDictSearch); case 5 : return ZSTD_HcFindBestMatch_generic(ms, ip, iLimit, offsetPtr, 5, ZSTD_dedicatedDictSearch); case 7 : case 6 : return ZSTD_HcFindBestMatch_generic(ms, ip, iLimit, offsetPtr, 6, ZSTD_dedicatedDictSearch); } } FORCE_INLINE_TEMPLATE size_t ZSTD_HcFindBestMatch_extDict_selectMLS ( ZSTD_matchState_t* ms, const BYTE* ip, const BYTE* const iLimit, size_t* offsetPtr) { switch(ms->cParams.minMatch) { default : /* includes case 3 */ case 4 : return ZSTD_HcFindBestMatch_generic(ms, ip, iLimit, offsetPtr, 4, ZSTD_extDict); case 5 : return ZSTD_HcFindBestMatch_generic(ms, ip, iLimit, offsetPtr, 5, ZSTD_extDict); case 7 : case 6 : return ZSTD_HcFindBestMatch_generic(ms, ip, iLimit, offsetPtr, 6, ZSTD_extDict); } } /* ******************************* * Common parser - lazy strategy *********************************/ typedef enum { search_hashChain, search_binaryTree } searchMethod_e; FORCE_INLINE_TEMPLATE size_t ZSTD_compressBlock_lazy_generic( ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], const void* src, size_t srcSize, const searchMethod_e searchMethod, const U32 depth, ZSTD_dictMode_e const dictMode) { const BYTE* const istart = (const BYTE*)src; const BYTE* ip = istart; const BYTE* anchor = istart; const BYTE* const iend = istart + srcSize; const BYTE* const ilimit = iend - 8; const BYTE* const base = ms->window.base; const U32 prefixLowestIndex = ms->window.dictLimit; const BYTE* const prefixLowest = base + prefixLowestIndex; typedef size_t (*searchMax_f)( ZSTD_matchState_t* ms, const BYTE* ip, const BYTE* iLimit, size_t* offsetPtr); /** * This table is indexed first by the four ZSTD_dictMode_e values, and then * by the two searchMethod_e values. NULLs are placed for configurations * that should never occur (extDict modes go to the other implementation * below and there is no DDSS for binary tree search yet). */ const searchMax_f searchFuncs[4][2] = { { ZSTD_HcFindBestMatch_selectMLS, ZSTD_BtFindBestMatch_selectMLS }, { NULL, NULL }, { ZSTD_HcFindBestMatch_dictMatchState_selectMLS, ZSTD_BtFindBestMatch_dictMatchState_selectMLS }, { ZSTD_HcFindBestMatch_dedicatedDictSearch_selectMLS, NULL } }; searchMax_f const searchMax = searchFuncs[dictMode][searchMethod == search_binaryTree]; U32 offset_1 = rep[0], offset_2 = rep[1], savedOffset=0; const int isDMS = dictMode == ZSTD_dictMatchState; const int isDDS = dictMode == ZSTD_dedicatedDictSearch; const int isDxS = isDMS || isDDS; const ZSTD_matchState_t* const dms = ms->dictMatchState; const U32 dictLowestIndex = isDxS ? dms->window.dictLimit : 0; const BYTE* const dictBase = isDxS ? dms->window.base : NULL; const BYTE* const dictLowest = isDxS ? dictBase + dictLowestIndex : NULL; const BYTE* const dictEnd = isDxS ? dms->window.nextSrc : NULL; const U32 dictIndexDelta = isDxS ? prefixLowestIndex - (U32)(dictEnd - dictBase) : 0; const U32 dictAndPrefixLength = (U32)((ip - prefixLowest) + (dictEnd - dictLowest)); assert(searchMax != NULL); DEBUGLOG(5, "ZSTD_compressBlock_lazy_generic (dictMode=%u)", (U32)dictMode); /* init */ ip += (dictAndPrefixLength == 0); if (dictMode == ZSTD_noDict) { U32 const curr = (U32)(ip - base); U32 const windowLow = ZSTD_getLowestPrefixIndex(ms, curr, ms->cParams.windowLog); U32 const maxRep = curr - windowLow; if (offset_2 > maxRep) savedOffset = offset_2, offset_2 = 0; if (offset_1 > maxRep) savedOffset = offset_1, offset_1 = 0; } if (isDxS) { /* dictMatchState repCode checks don't currently handle repCode == 0 * disabling. */ assert(offset_1 <= dictAndPrefixLength); assert(offset_2 <= dictAndPrefixLength); } /* Match Loop */ #if defined(__GNUC__) && defined(__x86_64__) /* I've measured random a 5% speed loss on levels 5 & 6 (greedy) when the * code alignment is perturbed. To fix the instability align the loop on 32-bytes. */ __asm__(".p2align 5"); #endif while (ip < ilimit) { size_t matchLength=0; size_t offset=0; const BYTE* start=ip+1; /* check repCode */ if (isDxS) { const U32 repIndex = (U32)(ip - base) + 1 - offset_1; const BYTE* repMatch = ((dictMode == ZSTD_dictMatchState || dictMode == ZSTD_dedicatedDictSearch) && repIndex < prefixLowestIndex) ? dictBase + (repIndex - dictIndexDelta) : base + repIndex; if (((U32)((prefixLowestIndex-1) - repIndex) >= 3 /* intentional underflow */) && (MEM_read32(repMatch) == MEM_read32(ip+1)) ) { const BYTE* repMatchEnd = repIndex < prefixLowestIndex ? dictEnd : iend; matchLength = ZSTD_count_2segments(ip+1+4, repMatch+4, iend, repMatchEnd, prefixLowest) + 4; if (depth==0) goto _storeSequence; } } if ( dictMode == ZSTD_noDict && ((offset_1 > 0) & (MEM_read32(ip+1-offset_1) == MEM_read32(ip+1)))) { matchLength = ZSTD_count(ip+1+4, ip+1+4-offset_1, iend) + 4; if (depth==0) goto _storeSequence; } /* first search (depth 0) */ { size_t offsetFound = 999999999; size_t const ml2 = searchMax(ms, ip, iend, &offsetFound); if (ml2 > matchLength) matchLength = ml2, start = ip, offset=offsetFound; } if (matchLength < 4) { ip += ((ip-anchor) >> kSearchStrength) + 1; /* jump faster over incompressible sections */ continue; } /* let's try to find a better solution */ if (depth>=1) while (ip0) & (MEM_read32(ip) == MEM_read32(ip - offset_1)))) { size_t const mlRep = ZSTD_count(ip+4, ip+4-offset_1, iend) + 4; int const gain2 = (int)(mlRep * 3); int const gain1 = (int)(matchLength*3 - ZSTD_highbit32((U32)offset+1) + 1); if ((mlRep >= 4) && (gain2 > gain1)) matchLength = mlRep, offset = 0, start = ip; } if (isDxS) { const U32 repIndex = (U32)(ip - base) - offset_1; const BYTE* repMatch = repIndex < prefixLowestIndex ? dictBase + (repIndex - dictIndexDelta) : base + repIndex; if (((U32)((prefixLowestIndex-1) - repIndex) >= 3 /* intentional underflow */) && (MEM_read32(repMatch) == MEM_read32(ip)) ) { const BYTE* repMatchEnd = repIndex < prefixLowestIndex ? dictEnd : iend; size_t const mlRep = ZSTD_count_2segments(ip+4, repMatch+4, iend, repMatchEnd, prefixLowest) + 4; int const gain2 = (int)(mlRep * 3); int const gain1 = (int)(matchLength*3 - ZSTD_highbit32((U32)offset+1) + 1); if ((mlRep >= 4) && (gain2 > gain1)) matchLength = mlRep, offset = 0, start = ip; } } { size_t offset2=999999999; size_t const ml2 = searchMax(ms, ip, iend, &offset2); int const gain2 = (int)(ml2*4 - ZSTD_highbit32((U32)offset2+1)); /* raw approx */ int const gain1 = (int)(matchLength*4 - ZSTD_highbit32((U32)offset+1) + 4); if ((ml2 >= 4) && (gain2 > gain1)) { matchLength = ml2, offset = offset2, start = ip; continue; /* search a better one */ } } /* let's find an even better one */ if ((depth==2) && (ip0) & (MEM_read32(ip) == MEM_read32(ip - offset_1)))) { size_t const mlRep = ZSTD_count(ip+4, ip+4-offset_1, iend) + 4; int const gain2 = (int)(mlRep * 4); int const gain1 = (int)(matchLength*4 - ZSTD_highbit32((U32)offset+1) + 1); if ((mlRep >= 4) && (gain2 > gain1)) matchLength = mlRep, offset = 0, start = ip; } if (isDxS) { const U32 repIndex = (U32)(ip - base) - offset_1; const BYTE* repMatch = repIndex < prefixLowestIndex ? dictBase + (repIndex - dictIndexDelta) : base + repIndex; if (((U32)((prefixLowestIndex-1) - repIndex) >= 3 /* intentional underflow */) && (MEM_read32(repMatch) == MEM_read32(ip)) ) { const BYTE* repMatchEnd = repIndex < prefixLowestIndex ? dictEnd : iend; size_t const mlRep = ZSTD_count_2segments(ip+4, repMatch+4, iend, repMatchEnd, prefixLowest) + 4; int const gain2 = (int)(mlRep * 4); int const gain1 = (int)(matchLength*4 - ZSTD_highbit32((U32)offset+1) + 1); if ((mlRep >= 4) && (gain2 > gain1)) matchLength = mlRep, offset = 0, start = ip; } } { size_t offset2=999999999; size_t const ml2 = searchMax(ms, ip, iend, &offset2); int const gain2 = (int)(ml2*4 - ZSTD_highbit32((U32)offset2+1)); /* raw approx */ int const gain1 = (int)(matchLength*4 - ZSTD_highbit32((U32)offset+1) + 7); if ((ml2 >= 4) && (gain2 > gain1)) { matchLength = ml2, offset = offset2, start = ip; continue; } } } break; /* nothing found : store previous solution */ } /* NOTE: * start[-offset+ZSTD_REP_MOVE-1] is undefined behavior. * (-offset+ZSTD_REP_MOVE-1) is unsigned, and is added to start, which * overflows the pointer, which is undefined behavior. */ /* catch up */ if (offset) { if (dictMode == ZSTD_noDict) { while ( ((start > anchor) & (start - (offset-ZSTD_REP_MOVE) > prefixLowest)) && (start[-1] == (start-(offset-ZSTD_REP_MOVE))[-1]) ) /* only search for offset within prefix */ { start--; matchLength++; } } if (isDxS) { U32 const matchIndex = (U32)((start-base) - (offset - ZSTD_REP_MOVE)); const BYTE* match = (matchIndex < prefixLowestIndex) ? dictBase + matchIndex - dictIndexDelta : base + matchIndex; const BYTE* const mStart = (matchIndex < prefixLowestIndex) ? dictLowest : prefixLowest; while ((start>anchor) && (match>mStart) && (start[-1] == match[-1])) { start--; match--; matchLength++; } /* catch up */ } offset_2 = offset_1; offset_1 = (U32)(offset - ZSTD_REP_MOVE); } /* store sequence */ _storeSequence: { size_t const litLength = start - anchor; ZSTD_storeSeq(seqStore, litLength, anchor, iend, (U32)offset, matchLength-MINMATCH); anchor = ip = start + matchLength; } /* check immediate repcode */ if (isDxS) { while (ip <= ilimit) { U32 const current2 = (U32)(ip-base); U32 const repIndex = current2 - offset_2; const BYTE* repMatch = repIndex < prefixLowestIndex ? dictBase - dictIndexDelta + repIndex : base + repIndex; if ( ((U32)((prefixLowestIndex-1) - (U32)repIndex) >= 3 /* intentional overflow */) && (MEM_read32(repMatch) == MEM_read32(ip)) ) { const BYTE* const repEnd2 = repIndex < prefixLowestIndex ? dictEnd : iend; matchLength = ZSTD_count_2segments(ip+4, repMatch+4, iend, repEnd2, prefixLowest) + 4; offset = offset_2; offset_2 = offset_1; offset_1 = (U32)offset; /* swap offset_2 <=> offset_1 */ ZSTD_storeSeq(seqStore, 0, anchor, iend, 0, matchLength-MINMATCH); ip += matchLength; anchor = ip; continue; } break; } } if (dictMode == ZSTD_noDict) { while ( ((ip <= ilimit) & (offset_2>0)) && (MEM_read32(ip) == MEM_read32(ip - offset_2)) ) { /* store sequence */ matchLength = ZSTD_count(ip+4, ip+4-offset_2, iend) + 4; offset = offset_2; offset_2 = offset_1; offset_1 = (U32)offset; /* swap repcodes */ ZSTD_storeSeq(seqStore, 0, anchor, iend, 0, matchLength-MINMATCH); ip += matchLength; anchor = ip; continue; /* faster when present ... (?) */ } } } /* Save reps for next block */ rep[0] = offset_1 ? offset_1 : savedOffset; rep[1] = offset_2 ? offset_2 : savedOffset; /* Return the last literals size */ return (size_t)(iend - anchor); } size_t ZSTD_compressBlock_btlazy2( ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], void const* src, size_t srcSize) { return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_binaryTree, 2, ZSTD_noDict); } size_t ZSTD_compressBlock_lazy2( ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], void const* src, size_t srcSize) { return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_hashChain, 2, ZSTD_noDict); } size_t ZSTD_compressBlock_lazy( ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], void const* src, size_t srcSize) { return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_hashChain, 1, ZSTD_noDict); } size_t ZSTD_compressBlock_greedy( ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], void const* src, size_t srcSize) { return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_hashChain, 0, ZSTD_noDict); } size_t ZSTD_compressBlock_btlazy2_dictMatchState( ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], void const* src, size_t srcSize) { return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_binaryTree, 2, ZSTD_dictMatchState); } size_t ZSTD_compressBlock_lazy2_dictMatchState( ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], void const* src, size_t srcSize) { return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_hashChain, 2, ZSTD_dictMatchState); } size_t ZSTD_compressBlock_lazy_dictMatchState( ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], void const* src, size_t srcSize) { return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_hashChain, 1, ZSTD_dictMatchState); } size_t ZSTD_compressBlock_greedy_dictMatchState( ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], void const* src, size_t srcSize) { return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_hashChain, 0, ZSTD_dictMatchState); } size_t ZSTD_compressBlock_lazy2_dedicatedDictSearch( ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], void const* src, size_t srcSize) { return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_hashChain, 2, ZSTD_dedicatedDictSearch); } size_t ZSTD_compressBlock_lazy_dedicatedDictSearch( ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], void const* src, size_t srcSize) { return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_hashChain, 1, ZSTD_dedicatedDictSearch); } size_t ZSTD_compressBlock_greedy_dedicatedDictSearch( ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], void const* src, size_t srcSize) { return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_hashChain, 0, ZSTD_dedicatedDictSearch); } FORCE_INLINE_TEMPLATE size_t ZSTD_compressBlock_lazy_extDict_generic( ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], const void* src, size_t srcSize, const searchMethod_e searchMethod, const U32 depth) { const BYTE* const istart = (const BYTE*)src; const BYTE* ip = istart; const BYTE* anchor = istart; const BYTE* const iend = istart + srcSize; const BYTE* const ilimit = iend - 8; const BYTE* const base = ms->window.base; const U32 dictLimit = ms->window.dictLimit; const BYTE* const prefixStart = base + dictLimit; const BYTE* const dictBase = ms->window.dictBase; const BYTE* const dictEnd = dictBase + dictLimit; const BYTE* const dictStart = dictBase + ms->window.lowLimit; const U32 windowLog = ms->cParams.windowLog; typedef size_t (*searchMax_f)( ZSTD_matchState_t* ms, const BYTE* ip, const BYTE* iLimit, size_t* offsetPtr); searchMax_f searchMax = searchMethod==search_binaryTree ? ZSTD_BtFindBestMatch_extDict_selectMLS : ZSTD_HcFindBestMatch_extDict_selectMLS; U32 offset_1 = rep[0], offset_2 = rep[1]; DEBUGLOG(5, "ZSTD_compressBlock_lazy_extDict_generic"); /* init */ ip += (ip == prefixStart); /* Match Loop */ #if defined(__GNUC__) && defined(__x86_64__) /* I've measured random a 5% speed loss on levels 5 & 6 (greedy) when the * code alignment is perturbed. To fix the instability align the loop on 32-bytes. */ __asm__(".p2align 5"); #endif while (ip < ilimit) { size_t matchLength=0; size_t offset=0; const BYTE* start=ip+1; U32 curr = (U32)(ip-base); /* check repCode */ { const U32 windowLow = ZSTD_getLowestMatchIndex(ms, curr+1, windowLog); const U32 repIndex = (U32)(curr+1 - offset_1); const BYTE* const repBase = repIndex < dictLimit ? dictBase : base; const BYTE* const repMatch = repBase + repIndex; if (((U32)((dictLimit-1) - repIndex) >= 3) & (repIndex > windowLow)) /* intentional overflow */ if (MEM_read32(ip+1) == MEM_read32(repMatch)) { /* repcode detected we should take it */ const BYTE* const repEnd = repIndex < dictLimit ? dictEnd : iend; matchLength = ZSTD_count_2segments(ip+1+4, repMatch+4, iend, repEnd, prefixStart) + 4; if (depth==0) goto _storeSequence; } } /* first search (depth 0) */ { size_t offsetFound = 999999999; size_t const ml2 = searchMax(ms, ip, iend, &offsetFound); if (ml2 > matchLength) matchLength = ml2, start = ip, offset=offsetFound; } if (matchLength < 4) { ip += ((ip-anchor) >> kSearchStrength) + 1; /* jump faster over incompressible sections */ continue; } /* let's try to find a better solution */ if (depth>=1) while (ip= 3) & (repIndex > windowLow)) /* intentional overflow */ if (MEM_read32(ip) == MEM_read32(repMatch)) { /* repcode detected */ const BYTE* const repEnd = repIndex < dictLimit ? dictEnd : iend; size_t const repLength = ZSTD_count_2segments(ip+4, repMatch+4, iend, repEnd, prefixStart) + 4; int const gain2 = (int)(repLength * 3); int const gain1 = (int)(matchLength*3 - ZSTD_highbit32((U32)offset+1) + 1); if ((repLength >= 4) && (gain2 > gain1)) matchLength = repLength, offset = 0, start = ip; } } /* search match, depth 1 */ { size_t offset2=999999999; size_t const ml2 = searchMax(ms, ip, iend, &offset2); int const gain2 = (int)(ml2*4 - ZSTD_highbit32((U32)offset2+1)); /* raw approx */ int const gain1 = (int)(matchLength*4 - ZSTD_highbit32((U32)offset+1) + 4); if ((ml2 >= 4) && (gain2 > gain1)) { matchLength = ml2, offset = offset2, start = ip; continue; /* search a better one */ } } /* let's find an even better one */ if ((depth==2) && (ip= 3) & (repIndex > windowLow)) /* intentional overflow */ if (MEM_read32(ip) == MEM_read32(repMatch)) { /* repcode detected */ const BYTE* const repEnd = repIndex < dictLimit ? dictEnd : iend; size_t const repLength = ZSTD_count_2segments(ip+4, repMatch+4, iend, repEnd, prefixStart) + 4; int const gain2 = (int)(repLength * 4); int const gain1 = (int)(matchLength*4 - ZSTD_highbit32((U32)offset+1) + 1); if ((repLength >= 4) && (gain2 > gain1)) matchLength = repLength, offset = 0, start = ip; } } /* search match, depth 2 */ { size_t offset2=999999999; size_t const ml2 = searchMax(ms, ip, iend, &offset2); int const gain2 = (int)(ml2*4 - ZSTD_highbit32((U32)offset2+1)); /* raw approx */ int const gain1 = (int)(matchLength*4 - ZSTD_highbit32((U32)offset+1) + 7); if ((ml2 >= 4) && (gain2 > gain1)) { matchLength = ml2, offset = offset2, start = ip; continue; } } } break; /* nothing found : store previous solution */ } /* catch up */ if (offset) { U32 const matchIndex = (U32)((start-base) - (offset - ZSTD_REP_MOVE)); const BYTE* match = (matchIndex < dictLimit) ? dictBase + matchIndex : base + matchIndex; const BYTE* const mStart = (matchIndex < dictLimit) ? dictStart : prefixStart; while ((start>anchor) && (match>mStart) && (start[-1] == match[-1])) { start--; match--; matchLength++; } /* catch up */ offset_2 = offset_1; offset_1 = (U32)(offset - ZSTD_REP_MOVE); } /* store sequence */ _storeSequence: { size_t const litLength = start - anchor; ZSTD_storeSeq(seqStore, litLength, anchor, iend, (U32)offset, matchLength-MINMATCH); anchor = ip = start + matchLength; } /* check immediate repcode */ while (ip <= ilimit) { const U32 repCurrent = (U32)(ip-base); const U32 windowLow = ZSTD_getLowestMatchIndex(ms, repCurrent, windowLog); const U32 repIndex = repCurrent - offset_2; const BYTE* const repBase = repIndex < dictLimit ? dictBase : base; const BYTE* const repMatch = repBase + repIndex; if (((U32)((dictLimit-1) - repIndex) >= 3) & (repIndex > windowLow)) /* intentional overflow */ if (MEM_read32(ip) == MEM_read32(repMatch)) { /* repcode detected we should take it */ const BYTE* const repEnd = repIndex < dictLimit ? dictEnd : iend; matchLength = ZSTD_count_2segments(ip+4, repMatch+4, iend, repEnd, prefixStart) + 4; offset = offset_2; offset_2 = offset_1; offset_1 = (U32)offset; /* swap offset history */ ZSTD_storeSeq(seqStore, 0, anchor, iend, 0, matchLength-MINMATCH); ip += matchLength; anchor = ip; continue; /* faster when present ... (?) */ } break; } } /* Save reps for next block */ rep[0] = offset_1; rep[1] = offset_2; /* Return the last literals size */ return (size_t)(iend - anchor); } size_t ZSTD_compressBlock_greedy_extDict( ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], void const* src, size_t srcSize) { return ZSTD_compressBlock_lazy_extDict_generic(ms, seqStore, rep, src, srcSize, search_hashChain, 0); } size_t ZSTD_compressBlock_lazy_extDict( ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], void const* src, size_t srcSize) { return ZSTD_compressBlock_lazy_extDict_generic(ms, seqStore, rep, src, srcSize, search_hashChain, 1); } size_t ZSTD_compressBlock_lazy2_extDict( ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], void const* src, size_t srcSize) { return ZSTD_compressBlock_lazy_extDict_generic(ms, seqStore, rep, src, srcSize, search_hashChain, 2); } size_t ZSTD_compressBlock_btlazy2_extDict( ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], void const* src, size_t srcSize) { return ZSTD_compressBlock_lazy_extDict_generic(ms, seqStore, rep, src, srcSize, search_binaryTree, 2); } /**** ended inlining compress/zstd_lazy.c ****/ /**** start inlining compress/zstd_ldm.c ****/ /* * Copyright (c) 2016-2021, Yann Collet, Facebook, Inc. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the * LICENSE file in the root directory of this source tree) and the GPLv2 (found * in the COPYING file in the root directory of this source tree). * You may select, at your option, one of the above-listed licenses. */ /**** skipping file: zstd_ldm.h ****/ /**** skipping file: ../common/debug.h ****/ /**** skipping file: ../common/xxhash.h ****/ /**** skipping file: zstd_fast.h ****/ /**** skipping file: zstd_double_fast.h ****/ /**** start inlining zstd_ldm_geartab.h ****/ /* * Copyright (c) 2016-2021, Yann Collet, Facebook, Inc. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the * LICENSE file in the root directory of this source tree) and the GPLv2 (found * in the COPYING file in the root directory of this source tree). * You may select, at your option, one of the above-listed licenses. */ #ifndef ZSTD_LDM_GEARTAB_H #define ZSTD_LDM_GEARTAB_H static U64 ZSTD_ldm_gearTab[256] = { 0xf5b8f72c5f77775c, 0x84935f266b7ac412, 0xb647ada9ca730ccc, 0xb065bb4b114fb1de, 0x34584e7e8c3a9fd0, 0x4e97e17c6ae26b05, 0x3a03d743bc99a604, 0xcecd042422c4044f, 0x76de76c58524259e, 0x9c8528f65badeaca, 0x86563706e2097529, 0x2902475fa375d889, 0xafb32a9739a5ebe6, 0xce2714da3883e639, 0x21eaf821722e69e, 0x37b628620b628, 0x49a8d455d88caf5, 0x8556d711e6958140, 0x4f7ae74fc605c1f, 0x829f0c3468bd3a20, 0x4ffdc885c625179e, 0x8473de048a3daf1b, 0x51008822b05646b2, 0x69d75d12b2d1cc5f, 0x8c9d4a19159154bc, 0xc3cc10f4abbd4003, 0xd06ddc1cecb97391, 0xbe48e6e7ed80302e, 0x3481db31cee03547, 0xacc3f67cdaa1d210, 0x65cb771d8c7f96cc, 0x8eb27177055723dd, 0xc789950d44cd94be, 0x934feadc3700b12b, 0x5e485f11edbdf182, 0x1e2e2a46fd64767a, 0x2969ca71d82efa7c, 0x9d46e9935ebbba2e, 0xe056b67e05e6822b, 0x94d73f55739d03a0, 0xcd7010bdb69b5a03, 0x455ef9fcd79b82f4, 0x869cb54a8749c161, 0x38d1a4fa6185d225, 0xb475166f94bbe9bb, 0xa4143548720959f1, 0x7aed4780ba6b26ba, 0xd0ce264439e02312, 0x84366d746078d508, 0xa8ce973c72ed17be, 0x21c323a29a430b01, 0x9962d617e3af80ee, 0xab0ce91d9c8cf75b, 0x530e8ee6d19a4dbc, 0x2ef68c0cf53f5d72, 0xc03a681640a85506, 0x496e4e9f9c310967, 0x78580472b59b14a0, 0x273824c23b388577, 0x66bf923ad45cb553, 0x47ae1a5a2492ba86, 0x35e304569e229659, 0x4765182a46870b6f, 0x6cbab625e9099412, 0xddac9a2e598522c1, 0x7172086e666624f2, 0xdf5003ca503b7837, 0x88c0c1db78563d09, 0x58d51865acfc289d, 0x177671aec65224f1, 0xfb79d8a241e967d7, 0x2be1e101cad9a49a, 0x6625682f6e29186b, 0x399553457ac06e50, 0x35dffb4c23abb74, 0x429db2591f54aade, 0xc52802a8037d1009, 0x6acb27381f0b25f3, 0xf45e2551ee4f823b, 0x8b0ea2d99580c2f7, 0x3bed519cbcb4e1e1, 0xff452823dbb010a, 0x9d42ed614f3dd267, 0x5b9313c06257c57b, 0xa114b8008b5e1442, 0xc1fe311c11c13d4b, 0x66e8763ea34c5568, 0x8b982af1c262f05d, 0xee8876faaa75fbb7, 0x8a62a4d0d172bb2a, 0xc13d94a3b7449a97, 0x6dbbba9dc15d037c, 0xc786101f1d92e0f1, 0xd78681a907a0b79b, 0xf61aaf2962c9abb9, 0x2cfd16fcd3cb7ad9, 0x868c5b6744624d21, 0x25e650899c74ddd7, 0xba042af4a7c37463, 0x4eb1a539465a3eca, 0xbe09dbf03b05d5ca, 0x774e5a362b5472ba, 0x47a1221229d183cd, 0x504b0ca18ef5a2df, 0xdffbdfbde2456eb9, 0x46cd2b2fbee34634, 0xf2aef8fe819d98c3, 0x357f5276d4599d61, 0x24a5483879c453e3, 0x88026889192b4b9, 0x28da96671782dbec, 0x4ef37c40588e9aaa, 0x8837b90651bc9fb3, 0xc164f741d3f0e5d6, 0xbc135a0a704b70ba, 0x69cd868f7622ada, 0xbc37ba89e0b9c0ab, 0x47c14a01323552f6, 0x4f00794bacee98bb, 0x7107de7d637a69d5, 0x88af793bb6f2255e, 0xf3c6466b8799b598, 0xc288c616aa7f3b59, 0x81ca63cf42fca3fd, 0x88d85ace36a2674b, 0xd056bd3792389e7, 0xe55c396c4e9dd32d, 0xbefb504571e6c0a6, 0x96ab32115e91e8cc, 0xbf8acb18de8f38d1, 0x66dae58801672606, 0x833b6017872317fb, 0xb87c16f2d1c92864, 0xdb766a74e58b669c, 0x89659f85c61417be, 0xc8daad856011ea0c, 0x76a4b565b6fe7eae, 0xa469d085f6237312, 0xaaf0365683a3e96c, 0x4dbb746f8424f7b8, 0x638755af4e4acc1, 0x3d7807f5bde64486, 0x17be6d8f5bbb7639, 0x903f0cd44dc35dc, 0x67b672eafdf1196c, 0xa676ff93ed4c82f1, 0x521d1004c5053d9d, 0x37ba9ad09ccc9202, 0x84e54d297aacfb51, 0xa0b4b776a143445, 0x820d471e20b348e, 0x1874383cb83d46dc, 0x97edeec7a1efe11c, 0xb330e50b1bdc42aa, 0x1dd91955ce70e032, 0xa514cdb88f2939d5, 0x2791233fd90db9d3, 0x7b670a4cc50f7a9b, 0x77c07d2a05c6dfa5, 0xe3778b6646d0a6fa, 0xb39c8eda47b56749, 0x933ed448addbef28, 0xaf846af6ab7d0bf4, 0xe5af208eb666e49, 0x5e6622f73534cd6a, 0x297daeca42ef5b6e, 0x862daef3d35539a6, 0xe68722498f8e1ea9, 0x981c53093dc0d572, 0xfa09b0bfbf86fbf5, 0x30b1e96166219f15, 0x70e7d466bdc4fb83, 0x5a66736e35f2a8e9, 0xcddb59d2b7c1baef, 0xd6c7d247d26d8996, 0xea4e39eac8de1ba3, 0x539c8bb19fa3aff2, 0x9f90e4c5fd508d8, 0xa34e5956fbaf3385, 0x2e2f8e151d3ef375, 0x173691e9b83faec1, 0xb85a8d56bf016379, 0x8382381267408ae3, 0xb90f901bbdc0096d, 0x7c6ad32933bcec65, 0x76bb5e2f2c8ad595, 0x390f851a6cf46d28, 0xc3e6064da1c2da72, 0xc52a0c101cfa5389, 0xd78eaf84a3fbc530, 0x3781b9e2288b997e, 0x73c2f6dea83d05c4, 0x4228e364c5b5ed7, 0x9d7a3edf0da43911, 0x8edcfeda24686756, 0x5e7667a7b7a9b3a1, 0x4c4f389fa143791d, 0xb08bc1023da7cddc, 0x7ab4be3ae529b1cc, 0x754e6132dbe74ff9, 0x71635442a839df45, 0x2f6fb1643fbe52de, 0x961e0a42cf7a8177, 0xf3b45d83d89ef2ea, 0xee3de4cf4a6e3e9b, 0xcd6848542c3295e7, 0xe4cee1664c78662f, 0x9947548b474c68c4, 0x25d73777a5ed8b0b, 0xc915b1d636b7fc, 0x21c2ba75d9b0d2da, 0x5f6b5dcf608a64a1, 0xdcf333255ff9570c, 0x633b922418ced4ee, 0xc136dde0b004b34a, 0x58cc83b05d4b2f5a, 0x5eb424dda28e42d2, 0x62df47369739cd98, 0xb4e0b42485e4ce17, 0x16e1f0c1f9a8d1e7, 0x8ec3916707560ebf, 0x62ba6e2df2cc9db3, 0xcbf9f4ff77d83a16, 0x78d9d7d07d2bbcc4, 0xef554ce1e02c41f4, 0x8d7581127eccf94d, 0xa9b53336cb3c8a05, 0x38c42c0bf45c4f91, 0x640893cdf4488863, 0x80ec34bc575ea568, 0x39f324f5b48eaa40, 0xe9d9ed1f8eff527f, 0x9224fc058cc5a214, 0xbaba00b04cfe7741, 0x309a9f120fcf52af, 0xa558f3ec65626212, 0x424bec8b7adabe2f, 0x41622513a6aea433, 0xb88da2d5324ca798, 0xd287733b245528a4, 0x9a44697e6d68aec3, 0x7b1093be2f49bb28, 0x50bbec632e3d8aad, 0x6cd90723e1ea8283, 0x897b9e7431b02bf3, 0x219efdcb338a7047, 0x3b0311f0a27c0656, 0xdb17bf91c0db96e7, 0x8cd4fd6b4e85a5b2, 0xfab071054ba6409d, 0x40d6fe831fa9dfd9, 0xaf358debad7d791e, 0xeb8d0e25a65e3e58, 0xbbcbd3df14e08580, 0xcf751f27ecdab2b, 0x2b4da14f2613d8f4 }; #endif /* ZSTD_LDM_GEARTAB_H */ /**** ended inlining zstd_ldm_geartab.h ****/ #define LDM_BUCKET_SIZE_LOG 3 #define LDM_MIN_MATCH_LENGTH 64 #define LDM_HASH_RLOG 7 typedef struct { U64 rolling; U64 stopMask; } ldmRollingHashState_t; /** ZSTD_ldm_gear_init(): * * Initializes the rolling hash state such that it will honor the * settings in params. */ static void ZSTD_ldm_gear_init(ldmRollingHashState_t* state, ldmParams_t const* params) { unsigned maxBitsInMask = MIN(params->minMatchLength, 64); unsigned hashRateLog = params->hashRateLog; state->rolling = ~(U32)0; /* The choice of the splitting criterion is subject to two conditions: * 1. it has to trigger on average every 2^(hashRateLog) bytes; * 2. ideally, it has to depend on a window of minMatchLength bytes. * * In the gear hash algorithm, bit n depends on the last n bytes; * so in order to obtain a good quality splitting criterion it is * preferable to use bits with high weight. * * To match condition 1 we use a mask with hashRateLog bits set * and, because of the previous remark, we make sure these bits * have the highest possible weight while still respecting * condition 2. */ if (hashRateLog > 0 && hashRateLog <= maxBitsInMask) { state->stopMask = (((U64)1 << hashRateLog) - 1) << (maxBitsInMask - hashRateLog); } else { /* In this degenerate case we simply honor the hash rate. */ state->stopMask = ((U64)1 << hashRateLog) - 1; } } /** ZSTD_ldm_gear_feed(): * * Registers in the splits array all the split points found in the first * size bytes following the data pointer. This function terminates when * either all the data has been processed or LDM_BATCH_SIZE splits are * present in the splits array. * * Precondition: The splits array must not be full. * Returns: The number of bytes processed. */ static size_t ZSTD_ldm_gear_feed(ldmRollingHashState_t* state, BYTE const* data, size_t size, size_t* splits, unsigned* numSplits) { size_t n; U64 hash, mask; hash = state->rolling; mask = state->stopMask; n = 0; #define GEAR_ITER_ONCE() do { \ hash = (hash << 1) + ZSTD_ldm_gearTab[data[n] & 0xff]; \ n += 1; \ if (UNLIKELY((hash & mask) == 0)) { \ splits[*numSplits] = n; \ *numSplits += 1; \ if (*numSplits == LDM_BATCH_SIZE) \ goto done; \ } \ } while (0) while (n + 3 < size) { GEAR_ITER_ONCE(); GEAR_ITER_ONCE(); GEAR_ITER_ONCE(); GEAR_ITER_ONCE(); } while (n < size) { GEAR_ITER_ONCE(); } #undef GEAR_ITER_ONCE done: state->rolling = hash; return n; } void ZSTD_ldm_adjustParameters(ldmParams_t* params, ZSTD_compressionParameters const* cParams) { params->windowLog = cParams->windowLog; ZSTD_STATIC_ASSERT(LDM_BUCKET_SIZE_LOG <= ZSTD_LDM_BUCKETSIZELOG_MAX); DEBUGLOG(4, "ZSTD_ldm_adjustParameters"); if (!params->bucketSizeLog) params->bucketSizeLog = LDM_BUCKET_SIZE_LOG; if (!params->minMatchLength) params->minMatchLength = LDM_MIN_MATCH_LENGTH; if (params->hashLog == 0) { params->hashLog = MAX(ZSTD_HASHLOG_MIN, params->windowLog - LDM_HASH_RLOG); assert(params->hashLog <= ZSTD_HASHLOG_MAX); } if (params->hashRateLog == 0) { params->hashRateLog = params->windowLog < params->hashLog ? 0 : params->windowLog - params->hashLog; } params->bucketSizeLog = MIN(params->bucketSizeLog, params->hashLog); } size_t ZSTD_ldm_getTableSize(ldmParams_t params) { size_t const ldmHSize = ((size_t)1) << params.hashLog; size_t const ldmBucketSizeLog = MIN(params.bucketSizeLog, params.hashLog); size_t const ldmBucketSize = ((size_t)1) << (params.hashLog - ldmBucketSizeLog); size_t const totalSize = ZSTD_cwksp_alloc_size(ldmBucketSize) + ZSTD_cwksp_alloc_size(ldmHSize * sizeof(ldmEntry_t)); return params.enableLdm ? totalSize : 0; } size_t ZSTD_ldm_getMaxNbSeq(ldmParams_t params, size_t maxChunkSize) { return params.enableLdm ? (maxChunkSize / params.minMatchLength) : 0; } /** ZSTD_ldm_getBucket() : * Returns a pointer to the start of the bucket associated with hash. */ static ldmEntry_t* ZSTD_ldm_getBucket( ldmState_t* ldmState, size_t hash, ldmParams_t const ldmParams) { return ldmState->hashTable + (hash << ldmParams.bucketSizeLog); } /** ZSTD_ldm_insertEntry() : * Insert the entry with corresponding hash into the hash table */ static void ZSTD_ldm_insertEntry(ldmState_t* ldmState, size_t const hash, const ldmEntry_t entry, ldmParams_t const ldmParams) { BYTE* const pOffset = ldmState->bucketOffsets + hash; unsigned const offset = *pOffset; *(ZSTD_ldm_getBucket(ldmState, hash, ldmParams) + offset) = entry; *pOffset = (BYTE)((offset + 1) & ((1u << ldmParams.bucketSizeLog) - 1)); } /** ZSTD_ldm_countBackwardsMatch() : * Returns the number of bytes that match backwards before pIn and pMatch. * * We count only bytes where pMatch >= pBase and pIn >= pAnchor. */ static size_t ZSTD_ldm_countBackwardsMatch( const BYTE* pIn, const BYTE* pAnchor, const BYTE* pMatch, const BYTE* pMatchBase) { size_t matchLength = 0; while (pIn > pAnchor && pMatch > pMatchBase && pIn[-1] == pMatch[-1]) { pIn--; pMatch--; matchLength++; } return matchLength; } /** ZSTD_ldm_countBackwardsMatch_2segments() : * Returns the number of bytes that match backwards from pMatch, * even with the backwards match spanning 2 different segments. * * On reaching `pMatchBase`, start counting from mEnd */ static size_t ZSTD_ldm_countBackwardsMatch_2segments( const BYTE* pIn, const BYTE* pAnchor, const BYTE* pMatch, const BYTE* pMatchBase, const BYTE* pExtDictStart, const BYTE* pExtDictEnd) { size_t matchLength = ZSTD_ldm_countBackwardsMatch(pIn, pAnchor, pMatch, pMatchBase); if (pMatch - matchLength != pMatchBase || pMatchBase == pExtDictStart) { /* If backwards match is entirely in the extDict or prefix, immediately return */ return matchLength; } DEBUGLOG(7, "ZSTD_ldm_countBackwardsMatch_2segments: found 2-parts backwards match (length in prefix==%zu)", matchLength); matchLength += ZSTD_ldm_countBackwardsMatch(pIn - matchLength, pAnchor, pExtDictEnd, pExtDictStart); DEBUGLOG(7, "final backwards match length = %zu", matchLength); return matchLength; } /** ZSTD_ldm_fillFastTables() : * * Fills the relevant tables for the ZSTD_fast and ZSTD_dfast strategies. * This is similar to ZSTD_loadDictionaryContent. * * The tables for the other strategies are filled within their * block compressors. */ static size_t ZSTD_ldm_fillFastTables(ZSTD_matchState_t* ms, void const* end) { const BYTE* const iend = (const BYTE*)end; switch(ms->cParams.strategy) { case ZSTD_fast: ZSTD_fillHashTable(ms, iend, ZSTD_dtlm_fast); break; case ZSTD_dfast: ZSTD_fillDoubleHashTable(ms, iend, ZSTD_dtlm_fast); break; case ZSTD_greedy: case ZSTD_lazy: case ZSTD_lazy2: case ZSTD_btlazy2: case ZSTD_btopt: case ZSTD_btultra: case ZSTD_btultra2: break; default: assert(0); /* not possible : not a valid strategy id */ } return 0; } void ZSTD_ldm_fillHashTable( ldmState_t* ldmState, const BYTE* ip, const BYTE* iend, ldmParams_t const* params) { U32 const minMatchLength = params->minMatchLength; U32 const hBits = params->hashLog - params->bucketSizeLog; BYTE const* const base = ldmState->window.base; BYTE const* const istart = ip; ldmRollingHashState_t hashState; size_t* const splits = ldmState->splitIndices; unsigned numSplits; DEBUGLOG(5, "ZSTD_ldm_fillHashTable"); ZSTD_ldm_gear_init(&hashState, params); while (ip < iend) { size_t hashed; unsigned n; numSplits = 0; hashed = ZSTD_ldm_gear_feed(&hashState, ip, iend - ip, splits, &numSplits); for (n = 0; n < numSplits; n++) { if (ip + splits[n] >= istart + minMatchLength) { BYTE const* const split = ip + splits[n] - minMatchLength; U64 const xxhash = XXH64(split, minMatchLength, 0); U32 const hash = (U32)(xxhash & (((U32)1 << hBits) - 1)); ldmEntry_t entry; entry.offset = (U32)(split - base); entry.checksum = (U32)(xxhash >> 32); ZSTD_ldm_insertEntry(ldmState, hash, entry, *params); } } ip += hashed; } } /** ZSTD_ldm_limitTableUpdate() : * * Sets cctx->nextToUpdate to a position corresponding closer to anchor * if it is far way * (after a long match, only update tables a limited amount). */ static void ZSTD_ldm_limitTableUpdate(ZSTD_matchState_t* ms, const BYTE* anchor) { U32 const curr = (U32)(anchor - ms->window.base); if (curr > ms->nextToUpdate + 1024) { ms->nextToUpdate = curr - MIN(512, curr - ms->nextToUpdate - 1024); } } static size_t ZSTD_ldm_generateSequences_internal( ldmState_t* ldmState, rawSeqStore_t* rawSeqStore, ldmParams_t const* params, void const* src, size_t srcSize) { /* LDM parameters */ int const extDict = ZSTD_window_hasExtDict(ldmState->window); U32 const minMatchLength = params->minMatchLength; U32 const entsPerBucket = 1U << params->bucketSizeLog; U32 const hBits = params->hashLog - params->bucketSizeLog; /* Prefix and extDict parameters */ U32 const dictLimit = ldmState->window.dictLimit; U32 const lowestIndex = extDict ? ldmState->window.lowLimit : dictLimit; BYTE const* const base = ldmState->window.base; BYTE const* const dictBase = extDict ? ldmState->window.dictBase : NULL; BYTE const* const dictStart = extDict ? dictBase + lowestIndex : NULL; BYTE const* const dictEnd = extDict ? dictBase + dictLimit : NULL; BYTE const* const lowPrefixPtr = base + dictLimit; /* Input bounds */ BYTE const* const istart = (BYTE const*)src; BYTE const* const iend = istart + srcSize; BYTE const* const ilimit = iend - HASH_READ_SIZE; /* Input positions */ BYTE const* anchor = istart; BYTE const* ip = istart; /* Rolling hash state */ ldmRollingHashState_t hashState; /* Arrays for staged-processing */ size_t* const splits = ldmState->splitIndices; ldmMatchCandidate_t* const candidates = ldmState->matchCandidates; unsigned numSplits; if (srcSize < minMatchLength) return iend - anchor; /* Initialize the rolling hash state with the first minMatchLength bytes */ ZSTD_ldm_gear_init(&hashState, params); { size_t n = 0; while (n < minMatchLength) { numSplits = 0; n += ZSTD_ldm_gear_feed(&hashState, ip + n, minMatchLength - n, splits, &numSplits); } ip += minMatchLength; } while (ip < ilimit) { size_t hashed; unsigned n; numSplits = 0; hashed = ZSTD_ldm_gear_feed(&hashState, ip, ilimit - ip, splits, &numSplits); for (n = 0; n < numSplits; n++) { BYTE const* const split = ip + splits[n] - minMatchLength; U64 const xxhash = XXH64(split, minMatchLength, 0); U32 const hash = (U32)(xxhash & (((U32)1 << hBits) - 1)); candidates[n].split = split; candidates[n].hash = hash; candidates[n].checksum = (U32)(xxhash >> 32); candidates[n].bucket = ZSTD_ldm_getBucket(ldmState, hash, *params); PREFETCH_L1(candidates[n].bucket); } for (n = 0; n < numSplits; n++) { size_t forwardMatchLength = 0, backwardMatchLength = 0, bestMatchLength = 0, mLength; BYTE const* const split = candidates[n].split; U32 const checksum = candidates[n].checksum; U32 const hash = candidates[n].hash; ldmEntry_t* const bucket = candidates[n].bucket; ldmEntry_t const* cur; ldmEntry_t const* bestEntry = NULL; ldmEntry_t newEntry; newEntry.offset = (U32)(split - base); newEntry.checksum = checksum; /* If a split point would generate a sequence overlapping with * the previous one, we merely register it in the hash table and * move on */ if (split < anchor) { ZSTD_ldm_insertEntry(ldmState, hash, newEntry, *params); continue; } for (cur = bucket; cur < bucket + entsPerBucket; cur++) { size_t curForwardMatchLength, curBackwardMatchLength, curTotalMatchLength; if (cur->checksum != checksum || cur->offset <= lowestIndex) { continue; } if (extDict) { BYTE const* const curMatchBase = cur->offset < dictLimit ? dictBase : base; BYTE const* const pMatch = curMatchBase + cur->offset; BYTE const* const matchEnd = cur->offset < dictLimit ? dictEnd : iend; BYTE const* const lowMatchPtr = cur->offset < dictLimit ? dictStart : lowPrefixPtr; curForwardMatchLength = ZSTD_count_2segments(split, pMatch, iend, matchEnd, lowPrefixPtr); if (curForwardMatchLength < minMatchLength) { continue; } curBackwardMatchLength = ZSTD_ldm_countBackwardsMatch_2segments( split, anchor, pMatch, lowMatchPtr, dictStart, dictEnd); } else { /* !extDict */ BYTE const* const pMatch = base + cur->offset; curForwardMatchLength = ZSTD_count(split, pMatch, iend); if (curForwardMatchLength < minMatchLength) { continue; } curBackwardMatchLength = ZSTD_ldm_countBackwardsMatch(split, anchor, pMatch, lowPrefixPtr); } curTotalMatchLength = curForwardMatchLength + curBackwardMatchLength; if (curTotalMatchLength > bestMatchLength) { bestMatchLength = curTotalMatchLength; forwardMatchLength = curForwardMatchLength; backwardMatchLength = curBackwardMatchLength; bestEntry = cur; } } /* No match found -- insert an entry into the hash table * and process the next candidate match */ if (bestEntry == NULL) { ZSTD_ldm_insertEntry(ldmState, hash, newEntry, *params); continue; } /* Match found */ mLength = forwardMatchLength + backwardMatchLength; { U32 const offset = (U32)(split - base) - bestEntry->offset; rawSeq* const seq = rawSeqStore->seq + rawSeqStore->size; /* Out of sequence storage */ if (rawSeqStore->size == rawSeqStore->capacity) return ERROR(dstSize_tooSmall); seq->litLength = (U32)(split - backwardMatchLength - anchor); seq->matchLength = (U32)mLength; seq->offset = offset; rawSeqStore->size++; } /* Insert the current entry into the hash table --- it must be * done after the previous block to avoid clobbering bestEntry */ ZSTD_ldm_insertEntry(ldmState, hash, newEntry, *params); anchor = split + forwardMatchLength; } ip += hashed; } return iend - anchor; } /*! ZSTD_ldm_reduceTable() : * reduce table indexes by `reducerValue` */ static void ZSTD_ldm_reduceTable(ldmEntry_t* const table, U32 const size, U32 const reducerValue) { U32 u; for (u = 0; u < size; u++) { if (table[u].offset < reducerValue) table[u].offset = 0; else table[u].offset -= reducerValue; } } size_t ZSTD_ldm_generateSequences( ldmState_t* ldmState, rawSeqStore_t* sequences, ldmParams_t const* params, void const* src, size_t srcSize) { U32 const maxDist = 1U << params->windowLog; BYTE const* const istart = (BYTE const*)src; BYTE const* const iend = istart + srcSize; size_t const kMaxChunkSize = 1 << 20; size_t const nbChunks = (srcSize / kMaxChunkSize) + ((srcSize % kMaxChunkSize) != 0); size_t chunk; size_t leftoverSize = 0; assert(ZSTD_CHUNKSIZE_MAX >= kMaxChunkSize); /* Check that ZSTD_window_update() has been called for this chunk prior * to passing it to this function. */ assert(ldmState->window.nextSrc >= (BYTE const*)src + srcSize); /* The input could be very large (in zstdmt), so it must be broken up into * chunks to enforce the maximum distance and handle overflow correction. */ assert(sequences->pos <= sequences->size); assert(sequences->size <= sequences->capacity); for (chunk = 0; chunk < nbChunks && sequences->size < sequences->capacity; ++chunk) { BYTE const* const chunkStart = istart + chunk * kMaxChunkSize; size_t const remaining = (size_t)(iend - chunkStart); BYTE const *const chunkEnd = (remaining < kMaxChunkSize) ? iend : chunkStart + kMaxChunkSize; size_t const chunkSize = chunkEnd - chunkStart; size_t newLeftoverSize; size_t const prevSize = sequences->size; assert(chunkStart < iend); /* 1. Perform overflow correction if necessary. */ if (ZSTD_window_needOverflowCorrection(ldmState->window, chunkEnd)) { U32 const ldmHSize = 1U << params->hashLog; U32 const correction = ZSTD_window_correctOverflow( &ldmState->window, /* cycleLog */ 0, maxDist, chunkStart); ZSTD_ldm_reduceTable(ldmState->hashTable, ldmHSize, correction); /* invalidate dictionaries on overflow correction */ ldmState->loadedDictEnd = 0; } /* 2. We enforce the maximum offset allowed. * * kMaxChunkSize should be small enough that we don't lose too much of * the window through early invalidation. * TODO: * Test the chunk size. * * Try invalidation after the sequence generation and test the * the offset against maxDist directly. * * NOTE: Because of dictionaries + sequence splitting we MUST make sure * that any offset used is valid at the END of the sequence, since it may * be split into two sequences. This condition holds when using * ZSTD_window_enforceMaxDist(), but if we move to checking offsets * against maxDist directly, we'll have to carefully handle that case. */ ZSTD_window_enforceMaxDist(&ldmState->window, chunkEnd, maxDist, &ldmState->loadedDictEnd, NULL); /* 3. Generate the sequences for the chunk, and get newLeftoverSize. */ newLeftoverSize = ZSTD_ldm_generateSequences_internal( ldmState, sequences, params, chunkStart, chunkSize); if (ZSTD_isError(newLeftoverSize)) return newLeftoverSize; /* 4. We add the leftover literals from previous iterations to the first * newly generated sequence, or add the `newLeftoverSize` if none are * generated. */ /* Prepend the leftover literals from the last call */ if (prevSize < sequences->size) { sequences->seq[prevSize].litLength += (U32)leftoverSize; leftoverSize = newLeftoverSize; } else { assert(newLeftoverSize == chunkSize); leftoverSize += chunkSize; } } return 0; } void ZSTD_ldm_skipSequences(rawSeqStore_t* rawSeqStore, size_t srcSize, U32 const minMatch) { while (srcSize > 0 && rawSeqStore->pos < rawSeqStore->size) { rawSeq* seq = rawSeqStore->seq + rawSeqStore->pos; if (srcSize <= seq->litLength) { /* Skip past srcSize literals */ seq->litLength -= (U32)srcSize; return; } srcSize -= seq->litLength; seq->litLength = 0; if (srcSize < seq->matchLength) { /* Skip past the first srcSize of the match */ seq->matchLength -= (U32)srcSize; if (seq->matchLength < minMatch) { /* The match is too short, omit it */ if (rawSeqStore->pos + 1 < rawSeqStore->size) { seq[1].litLength += seq[0].matchLength; } rawSeqStore->pos++; } return; } srcSize -= seq->matchLength; seq->matchLength = 0; rawSeqStore->pos++; } } /** * If the sequence length is longer than remaining then the sequence is split * between this block and the next. * * Returns the current sequence to handle, or if the rest of the block should * be literals, it returns a sequence with offset == 0. */ static rawSeq maybeSplitSequence(rawSeqStore_t* rawSeqStore, U32 const remaining, U32 const minMatch) { rawSeq sequence = rawSeqStore->seq[rawSeqStore->pos]; assert(sequence.offset > 0); /* Likely: No partial sequence */ if (remaining >= sequence.litLength + sequence.matchLength) { rawSeqStore->pos++; return sequence; } /* Cut the sequence short (offset == 0 ==> rest is literals). */ if (remaining <= sequence.litLength) { sequence.offset = 0; } else if (remaining < sequence.litLength + sequence.matchLength) { sequence.matchLength = remaining - sequence.litLength; if (sequence.matchLength < minMatch) { sequence.offset = 0; } } /* Skip past `remaining` bytes for the future sequences. */ ZSTD_ldm_skipSequences(rawSeqStore, remaining, minMatch); return sequence; } void ZSTD_ldm_skipRawSeqStoreBytes(rawSeqStore_t* rawSeqStore, size_t nbBytes) { U32 currPos = (U32)(rawSeqStore->posInSequence + nbBytes); while (currPos && rawSeqStore->pos < rawSeqStore->size) { rawSeq currSeq = rawSeqStore->seq[rawSeqStore->pos]; if (currPos >= currSeq.litLength + currSeq.matchLength) { currPos -= currSeq.litLength + currSeq.matchLength; rawSeqStore->pos++; } else { rawSeqStore->posInSequence = currPos; break; } } if (currPos == 0 || rawSeqStore->pos == rawSeqStore->size) { rawSeqStore->posInSequence = 0; } } size_t ZSTD_ldm_blockCompress(rawSeqStore_t* rawSeqStore, ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], void const* src, size_t srcSize) { const ZSTD_compressionParameters* const cParams = &ms->cParams; unsigned const minMatch = cParams->minMatch; ZSTD_blockCompressor const blockCompressor = ZSTD_selectBlockCompressor(cParams->strategy, ZSTD_matchState_dictMode(ms)); /* Input bounds */ BYTE const* const istart = (BYTE const*)src; BYTE const* const iend = istart + srcSize; /* Input positions */ BYTE const* ip = istart; DEBUGLOG(5, "ZSTD_ldm_blockCompress: srcSize=%zu", srcSize); /* If using opt parser, use LDMs only as candidates rather than always accepting them */ if (cParams->strategy >= ZSTD_btopt) { size_t lastLLSize; ms->ldmSeqStore = rawSeqStore; lastLLSize = blockCompressor(ms, seqStore, rep, src, srcSize); ZSTD_ldm_skipRawSeqStoreBytes(rawSeqStore, srcSize); return lastLLSize; } assert(rawSeqStore->pos <= rawSeqStore->size); assert(rawSeqStore->size <= rawSeqStore->capacity); /* Loop through each sequence and apply the block compressor to the literals */ while (rawSeqStore->pos < rawSeqStore->size && ip < iend) { /* maybeSplitSequence updates rawSeqStore->pos */ rawSeq const sequence = maybeSplitSequence(rawSeqStore, (U32)(iend - ip), minMatch); int i; /* End signal */ if (sequence.offset == 0) break; assert(ip + sequence.litLength + sequence.matchLength <= iend); /* Fill tables for block compressor */ ZSTD_ldm_limitTableUpdate(ms, ip); ZSTD_ldm_fillFastTables(ms, ip); /* Run the block compressor */ DEBUGLOG(5, "pos %u : calling block compressor on segment of size %u", (unsigned)(ip-istart), sequence.litLength); { size_t const newLitLength = blockCompressor(ms, seqStore, rep, ip, sequence.litLength); ip += sequence.litLength; /* Update the repcodes */ for (i = ZSTD_REP_NUM - 1; i > 0; i--) rep[i] = rep[i-1]; rep[0] = sequence.offset; /* Store the sequence */ ZSTD_storeSeq(seqStore, newLitLength, ip - newLitLength, iend, sequence.offset + ZSTD_REP_MOVE, sequence.matchLength - MINMATCH); ip += sequence.matchLength; } } /* Fill the tables for the block compressor */ ZSTD_ldm_limitTableUpdate(ms, ip); ZSTD_ldm_fillFastTables(ms, ip); /* Compress the last literals */ return blockCompressor(ms, seqStore, rep, ip, iend - ip); } /**** ended inlining compress/zstd_ldm.c ****/ /**** start inlining compress/zstd_opt.c ****/ /* * Copyright (c) 2016-2021, Przemyslaw Skibinski, Yann Collet, Facebook, Inc. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the * LICENSE file in the root directory of this source tree) and the GPLv2 (found * in the COPYING file in the root directory of this source tree). * You may select, at your option, one of the above-listed licenses. */ /**** skipping file: zstd_compress_internal.h ****/ /**** skipping file: hist.h ****/ /**** skipping file: zstd_opt.h ****/ #define ZSTD_LITFREQ_ADD 2 /* scaling factor for litFreq, so that frequencies adapt faster to new stats */ #define ZSTD_FREQ_DIV 4 /* log factor when using previous stats to init next stats */ #define ZSTD_MAX_PRICE (1<<30) #define ZSTD_PREDEF_THRESHOLD 1024 /* if srcSize < ZSTD_PREDEF_THRESHOLD, symbols' cost is assumed static, directly determined by pre-defined distributions */ /*-************************************* * Price functions for optimal parser ***************************************/ #if 0 /* approximation at bit level */ # define BITCOST_ACCURACY 0 # define BITCOST_MULTIPLIER (1 << BITCOST_ACCURACY) # define WEIGHT(stat) ((void)opt, ZSTD_bitWeight(stat)) #elif 0 /* fractional bit accuracy */ # define BITCOST_ACCURACY 8 # define BITCOST_MULTIPLIER (1 << BITCOST_ACCURACY) # define WEIGHT(stat,opt) ((void)opt, ZSTD_fracWeight(stat)) #else /* opt==approx, ultra==accurate */ # define BITCOST_ACCURACY 8 # define BITCOST_MULTIPLIER (1 << BITCOST_ACCURACY) # define WEIGHT(stat,opt) (opt ? ZSTD_fracWeight(stat) : ZSTD_bitWeight(stat)) #endif MEM_STATIC U32 ZSTD_bitWeight(U32 stat) { return (ZSTD_highbit32(stat+1) * BITCOST_MULTIPLIER); } MEM_STATIC U32 ZSTD_fracWeight(U32 rawStat) { U32 const stat = rawStat + 1; U32 const hb = ZSTD_highbit32(stat); U32 const BWeight = hb * BITCOST_MULTIPLIER; U32 const FWeight = (stat << BITCOST_ACCURACY) >> hb; U32 const weight = BWeight + FWeight; assert(hb + BITCOST_ACCURACY < 31); return weight; } #if (DEBUGLEVEL>=2) /* debugging function, * @return price in bytes as fractional value * for debug messages only */ MEM_STATIC double ZSTD_fCost(U32 price) { return (double)price / (BITCOST_MULTIPLIER*8); } #endif static int ZSTD_compressedLiterals(optState_t const* const optPtr) { return optPtr->literalCompressionMode != ZSTD_lcm_uncompressed; } static void ZSTD_setBasePrices(optState_t* optPtr, int optLevel) { if (ZSTD_compressedLiterals(optPtr)) optPtr->litSumBasePrice = WEIGHT(optPtr->litSum, optLevel); optPtr->litLengthSumBasePrice = WEIGHT(optPtr->litLengthSum, optLevel); optPtr->matchLengthSumBasePrice = WEIGHT(optPtr->matchLengthSum, optLevel); optPtr->offCodeSumBasePrice = WEIGHT(optPtr->offCodeSum, optLevel); } /* ZSTD_downscaleStat() : * reduce all elements in table by a factor 2^(ZSTD_FREQ_DIV+malus) * return the resulting sum of elements */ static U32 ZSTD_downscaleStat(unsigned* table, U32 lastEltIndex, int malus) { U32 s, sum=0; DEBUGLOG(5, "ZSTD_downscaleStat (nbElts=%u)", (unsigned)lastEltIndex+1); assert(ZSTD_FREQ_DIV+malus > 0 && ZSTD_FREQ_DIV+malus < 31); for (s=0; s> (ZSTD_FREQ_DIV+malus)); sum += table[s]; } return sum; } /* ZSTD_rescaleFreqs() : * if first block (detected by optPtr->litLengthSum == 0) : init statistics * take hints from dictionary if there is one * or init from zero, using src for literals stats, or flat 1 for match symbols * otherwise downscale existing stats, to be used as seed for next block. */ static void ZSTD_rescaleFreqs(optState_t* const optPtr, const BYTE* const src, size_t const srcSize, int const optLevel) { int const compressedLiterals = ZSTD_compressedLiterals(optPtr); DEBUGLOG(5, "ZSTD_rescaleFreqs (srcSize=%u)", (unsigned)srcSize); optPtr->priceType = zop_dynamic; if (optPtr->litLengthSum == 0) { /* first block : init */ if (srcSize <= ZSTD_PREDEF_THRESHOLD) { /* heuristic */ DEBUGLOG(5, "(srcSize <= ZSTD_PREDEF_THRESHOLD) => zop_predef"); optPtr->priceType = zop_predef; } assert(optPtr->symbolCosts != NULL); if (optPtr->symbolCosts->huf.repeatMode == HUF_repeat_valid) { /* huffman table presumed generated by dictionary */ optPtr->priceType = zop_dynamic; if (compressedLiterals) { unsigned lit; assert(optPtr->litFreq != NULL); optPtr->litSum = 0; for (lit=0; lit<=MaxLit; lit++) { U32 const scaleLog = 11; /* scale to 2K */ U32 const bitCost = HUF_getNbBits(optPtr->symbolCosts->huf.CTable, lit); assert(bitCost <= scaleLog); optPtr->litFreq[lit] = bitCost ? 1 << (scaleLog-bitCost) : 1 /*minimum to calculate cost*/; optPtr->litSum += optPtr->litFreq[lit]; } } { unsigned ll; FSE_CState_t llstate; FSE_initCState(&llstate, optPtr->symbolCosts->fse.litlengthCTable); optPtr->litLengthSum = 0; for (ll=0; ll<=MaxLL; ll++) { U32 const scaleLog = 10; /* scale to 1K */ U32 const bitCost = FSE_getMaxNbBits(llstate.symbolTT, ll); assert(bitCost < scaleLog); optPtr->litLengthFreq[ll] = bitCost ? 1 << (scaleLog-bitCost) : 1 /*minimum to calculate cost*/; optPtr->litLengthSum += optPtr->litLengthFreq[ll]; } } { unsigned ml; FSE_CState_t mlstate; FSE_initCState(&mlstate, optPtr->symbolCosts->fse.matchlengthCTable); optPtr->matchLengthSum = 0; for (ml=0; ml<=MaxML; ml++) { U32 const scaleLog = 10; U32 const bitCost = FSE_getMaxNbBits(mlstate.symbolTT, ml); assert(bitCost < scaleLog); optPtr->matchLengthFreq[ml] = bitCost ? 1 << (scaleLog-bitCost) : 1 /*minimum to calculate cost*/; optPtr->matchLengthSum += optPtr->matchLengthFreq[ml]; } } { unsigned of; FSE_CState_t ofstate; FSE_initCState(&ofstate, optPtr->symbolCosts->fse.offcodeCTable); optPtr->offCodeSum = 0; for (of=0; of<=MaxOff; of++) { U32 const scaleLog = 10; U32 const bitCost = FSE_getMaxNbBits(ofstate.symbolTT, of); assert(bitCost < scaleLog); optPtr->offCodeFreq[of] = bitCost ? 1 << (scaleLog-bitCost) : 1 /*minimum to calculate cost*/; optPtr->offCodeSum += optPtr->offCodeFreq[of]; } } } else { /* not a dictionary */ assert(optPtr->litFreq != NULL); if (compressedLiterals) { unsigned lit = MaxLit; HIST_count_simple(optPtr->litFreq, &lit, src, srcSize); /* use raw first block to init statistics */ optPtr->litSum = ZSTD_downscaleStat(optPtr->litFreq, MaxLit, 1); } { unsigned ll; for (ll=0; ll<=MaxLL; ll++) optPtr->litLengthFreq[ll] = 1; } optPtr->litLengthSum = MaxLL+1; { unsigned ml; for (ml=0; ml<=MaxML; ml++) optPtr->matchLengthFreq[ml] = 1; } optPtr->matchLengthSum = MaxML+1; { unsigned of; for (of=0; of<=MaxOff; of++) optPtr->offCodeFreq[of] = 1; } optPtr->offCodeSum = MaxOff+1; } } else { /* new block : re-use previous statistics, scaled down */ if (compressedLiterals) optPtr->litSum = ZSTD_downscaleStat(optPtr->litFreq, MaxLit, 1); optPtr->litLengthSum = ZSTD_downscaleStat(optPtr->litLengthFreq, MaxLL, 0); optPtr->matchLengthSum = ZSTD_downscaleStat(optPtr->matchLengthFreq, MaxML, 0); optPtr->offCodeSum = ZSTD_downscaleStat(optPtr->offCodeFreq, MaxOff, 0); } ZSTD_setBasePrices(optPtr, optLevel); } /* ZSTD_rawLiteralsCost() : * price of literals (only) in specified segment (which length can be 0). * does not include price of literalLength symbol */ static U32 ZSTD_rawLiteralsCost(const BYTE* const literals, U32 const litLength, const optState_t* const optPtr, int optLevel) { if (litLength == 0) return 0; if (!ZSTD_compressedLiterals(optPtr)) return (litLength << 3) * BITCOST_MULTIPLIER; /* Uncompressed - 8 bytes per literal. */ if (optPtr->priceType == zop_predef) return (litLength*6) * BITCOST_MULTIPLIER; /* 6 bit per literal - no statistic used */ /* dynamic statistics */ { U32 price = litLength * optPtr->litSumBasePrice; U32 u; for (u=0; u < litLength; u++) { assert(WEIGHT(optPtr->litFreq[literals[u]], optLevel) <= optPtr->litSumBasePrice); /* literal cost should never be negative */ price -= WEIGHT(optPtr->litFreq[literals[u]], optLevel); } return price; } } /* ZSTD_litLengthPrice() : * cost of literalLength symbol */ static U32 ZSTD_litLengthPrice(U32 const litLength, const optState_t* const optPtr, int optLevel) { if (optPtr->priceType == zop_predef) return WEIGHT(litLength, optLevel); /* dynamic statistics */ { U32 const llCode = ZSTD_LLcode(litLength); return (LL_bits[llCode] * BITCOST_MULTIPLIER) + optPtr->litLengthSumBasePrice - WEIGHT(optPtr->litLengthFreq[llCode], optLevel); } } /* ZSTD_getMatchPrice() : * Provides the cost of the match part (offset + matchLength) of a sequence * Must be combined with ZSTD_fullLiteralsCost() to get the full cost of a sequence. * optLevel: when <2, favors small offset for decompression speed (improved cache efficiency) */ FORCE_INLINE_TEMPLATE U32 ZSTD_getMatchPrice(U32 const offset, U32 const matchLength, const optState_t* const optPtr, int const optLevel) { U32 price; U32 const offCode = ZSTD_highbit32(offset+1); U32 const mlBase = matchLength - MINMATCH; assert(matchLength >= MINMATCH); if (optPtr->priceType == zop_predef) /* fixed scheme, do not use statistics */ return WEIGHT(mlBase, optLevel) + ((16 + offCode) * BITCOST_MULTIPLIER); /* dynamic statistics */ price = (offCode * BITCOST_MULTIPLIER) + (optPtr->offCodeSumBasePrice - WEIGHT(optPtr->offCodeFreq[offCode], optLevel)); if ((optLevel<2) /*static*/ && offCode >= 20) price += (offCode-19)*2 * BITCOST_MULTIPLIER; /* handicap for long distance offsets, favor decompression speed */ /* match Length */ { U32 const mlCode = ZSTD_MLcode(mlBase); price += (ML_bits[mlCode] * BITCOST_MULTIPLIER) + (optPtr->matchLengthSumBasePrice - WEIGHT(optPtr->matchLengthFreq[mlCode], optLevel)); } price += BITCOST_MULTIPLIER / 5; /* heuristic : make matches a bit more costly to favor less sequences -> faster decompression speed */ DEBUGLOG(8, "ZSTD_getMatchPrice(ml:%u) = %u", matchLength, price); return price; } /* ZSTD_updateStats() : * assumption : literals + litLengtn <= iend */ static void ZSTD_updateStats(optState_t* const optPtr, U32 litLength, const BYTE* literals, U32 offsetCode, U32 matchLength) { /* literals */ if (ZSTD_compressedLiterals(optPtr)) { U32 u; for (u=0; u < litLength; u++) optPtr->litFreq[literals[u]] += ZSTD_LITFREQ_ADD; optPtr->litSum += litLength*ZSTD_LITFREQ_ADD; } /* literal Length */ { U32 const llCode = ZSTD_LLcode(litLength); optPtr->litLengthFreq[llCode]++; optPtr->litLengthSum++; } /* match offset code (0-2=>repCode; 3+=>offset+2) */ { U32 const offCode = ZSTD_highbit32(offsetCode+1); assert(offCode <= MaxOff); optPtr->offCodeFreq[offCode]++; optPtr->offCodeSum++; } /* match Length */ { U32 const mlBase = matchLength - MINMATCH; U32 const mlCode = ZSTD_MLcode(mlBase); optPtr->matchLengthFreq[mlCode]++; optPtr->matchLengthSum++; } } /* ZSTD_readMINMATCH() : * function safe only for comparisons * assumption : memPtr must be at least 4 bytes before end of buffer */ MEM_STATIC U32 ZSTD_readMINMATCH(const void* memPtr, U32 length) { switch (length) { default : case 4 : return MEM_read32(memPtr); case 3 : if (MEM_isLittleEndian()) return MEM_read32(memPtr)<<8; else return MEM_read32(memPtr)>>8; } } /* Update hashTable3 up to ip (excluded) Assumption : always within prefix (i.e. not within extDict) */ static U32 ZSTD_insertAndFindFirstIndexHash3 (ZSTD_matchState_t* ms, U32* nextToUpdate3, const BYTE* const ip) { U32* const hashTable3 = ms->hashTable3; U32 const hashLog3 = ms->hashLog3; const BYTE* const base = ms->window.base; U32 idx = *nextToUpdate3; U32 const target = (U32)(ip - base); size_t const hash3 = ZSTD_hash3Ptr(ip, hashLog3); assert(hashLog3 > 0); while(idx < target) { hashTable3[ZSTD_hash3Ptr(base+idx, hashLog3)] = idx; idx++; } *nextToUpdate3 = target; return hashTable3[hash3]; } /*-************************************* * Binary Tree search ***************************************/ /** ZSTD_insertBt1() : add one or multiple positions to tree. * ip : assumed <= iend-8 . * @return : nb of positions added */ static U32 ZSTD_insertBt1( ZSTD_matchState_t* ms, const BYTE* const ip, const BYTE* const iend, U32 const mls, const int extDict) { const ZSTD_compressionParameters* const cParams = &ms->cParams; U32* const hashTable = ms->hashTable; U32 const hashLog = cParams->hashLog; size_t const h = ZSTD_hashPtr(ip, hashLog, mls); U32* const bt = ms->chainTable; U32 const btLog = cParams->chainLog - 1; U32 const btMask = (1 << btLog) - 1; U32 matchIndex = hashTable[h]; size_t commonLengthSmaller=0, commonLengthLarger=0; const BYTE* const base = ms->window.base; const BYTE* const dictBase = ms->window.dictBase; const U32 dictLimit = ms->window.dictLimit; const BYTE* const dictEnd = dictBase + dictLimit; const BYTE* const prefixStart = base + dictLimit; const BYTE* match; const U32 curr = (U32)(ip-base); const U32 btLow = btMask >= curr ? 0 : curr - btMask; U32* smallerPtr = bt + 2*(curr&btMask); U32* largerPtr = smallerPtr + 1; U32 dummy32; /* to be nullified at the end */ U32 const windowLow = ms->window.lowLimit; U32 matchEndIdx = curr+8+1; size_t bestLength = 8; U32 nbCompares = 1U << cParams->searchLog; #ifdef ZSTD_C_PREDICT U32 predictedSmall = *(bt + 2*((curr-1)&btMask) + 0); U32 predictedLarge = *(bt + 2*((curr-1)&btMask) + 1); predictedSmall += (predictedSmall>0); predictedLarge += (predictedLarge>0); #endif /* ZSTD_C_PREDICT */ DEBUGLOG(8, "ZSTD_insertBt1 (%u)", curr); assert(ip <= iend-8); /* required for h calculation */ hashTable[h] = curr; /* Update Hash Table */ assert(windowLow > 0); while (nbCompares-- && (matchIndex >= windowLow)) { U32* const nextPtr = bt + 2*(matchIndex & btMask); size_t matchLength = MIN(commonLengthSmaller, commonLengthLarger); /* guaranteed minimum nb of common bytes */ assert(matchIndex < curr); #ifdef ZSTD_C_PREDICT /* note : can create issues when hlog small <= 11 */ const U32* predictPtr = bt + 2*((matchIndex-1) & btMask); /* written this way, as bt is a roll buffer */ if (matchIndex == predictedSmall) { /* no need to check length, result known */ *smallerPtr = matchIndex; if (matchIndex <= btLow) { smallerPtr=&dummy32; break; } /* beyond tree size, stop the search */ smallerPtr = nextPtr+1; /* new "smaller" => larger of match */ matchIndex = nextPtr[1]; /* new matchIndex larger than previous (closer to current) */ predictedSmall = predictPtr[1] + (predictPtr[1]>0); continue; } if (matchIndex == predictedLarge) { *largerPtr = matchIndex; if (matchIndex <= btLow) { largerPtr=&dummy32; break; } /* beyond tree size, stop the search */ largerPtr = nextPtr; matchIndex = nextPtr[0]; predictedLarge = predictPtr[0] + (predictPtr[0]>0); continue; } #endif if (!extDict || (matchIndex+matchLength >= dictLimit)) { assert(matchIndex+matchLength >= dictLimit); /* might be wrong if actually extDict */ match = base + matchIndex; matchLength += ZSTD_count(ip+matchLength, match+matchLength, iend); } else { match = dictBase + matchIndex; matchLength += ZSTD_count_2segments(ip+matchLength, match+matchLength, iend, dictEnd, prefixStart); if (matchIndex+matchLength >= dictLimit) match = base + matchIndex; /* to prepare for next usage of match[matchLength] */ } if (matchLength > bestLength) { bestLength = matchLength; if (matchLength > matchEndIdx - matchIndex) matchEndIdx = matchIndex + (U32)matchLength; } if (ip+matchLength == iend) { /* equal : no way to know if inf or sup */ break; /* drop , to guarantee consistency ; miss a bit of compression, but other solutions can corrupt tree */ } if (match[matchLength] < ip[matchLength]) { /* necessarily within buffer */ /* match is smaller than current */ *smallerPtr = matchIndex; /* update smaller idx */ commonLengthSmaller = matchLength; /* all smaller will now have at least this guaranteed common length */ if (matchIndex <= btLow) { smallerPtr=&dummy32; break; } /* beyond tree size, stop searching */ smallerPtr = nextPtr+1; /* new "candidate" => larger than match, which was smaller than target */ matchIndex = nextPtr[1]; /* new matchIndex, larger than previous and closer to current */ } else { /* match is larger than current */ *largerPtr = matchIndex; commonLengthLarger = matchLength; if (matchIndex <= btLow) { largerPtr=&dummy32; break; } /* beyond tree size, stop searching */ largerPtr = nextPtr; matchIndex = nextPtr[0]; } } *smallerPtr = *largerPtr = 0; { U32 positions = 0; if (bestLength > 384) positions = MIN(192, (U32)(bestLength - 384)); /* speed optimization */ assert(matchEndIdx > curr + 8); return MAX(positions, matchEndIdx - (curr + 8)); } } FORCE_INLINE_TEMPLATE void ZSTD_updateTree_internal( ZSTD_matchState_t* ms, const BYTE* const ip, const BYTE* const iend, const U32 mls, const ZSTD_dictMode_e dictMode) { const BYTE* const base = ms->window.base; U32 const target = (U32)(ip - base); U32 idx = ms->nextToUpdate; DEBUGLOG(6, "ZSTD_updateTree_internal, from %u to %u (dictMode:%u)", idx, target, dictMode); while(idx < target) { U32 const forward = ZSTD_insertBt1(ms, base+idx, iend, mls, dictMode == ZSTD_extDict); assert(idx < (U32)(idx + forward)); idx += forward; } assert((size_t)(ip - base) <= (size_t)(U32)(-1)); assert((size_t)(iend - base) <= (size_t)(U32)(-1)); ms->nextToUpdate = target; } void ZSTD_updateTree(ZSTD_matchState_t* ms, const BYTE* ip, const BYTE* iend) { ZSTD_updateTree_internal(ms, ip, iend, ms->cParams.minMatch, ZSTD_noDict); } FORCE_INLINE_TEMPLATE U32 ZSTD_insertBtAndGetAllMatches ( ZSTD_match_t* matches, /* store result (found matches) in this table (presumed large enough) */ ZSTD_matchState_t* ms, U32* nextToUpdate3, const BYTE* const ip, const BYTE* const iLimit, const ZSTD_dictMode_e dictMode, const U32 rep[ZSTD_REP_NUM], U32 const ll0, /* tells if associated literal length is 0 or not. This value must be 0 or 1 */ const U32 lengthToBeat, U32 const mls /* template */) { const ZSTD_compressionParameters* const cParams = &ms->cParams; U32 const sufficient_len = MIN(cParams->targetLength, ZSTD_OPT_NUM -1); const BYTE* const base = ms->window.base; U32 const curr = (U32)(ip-base); U32 const hashLog = cParams->hashLog; U32 const minMatch = (mls==3) ? 3 : 4; U32* const hashTable = ms->hashTable; size_t const h = ZSTD_hashPtr(ip, hashLog, mls); U32 matchIndex = hashTable[h]; U32* const bt = ms->chainTable; U32 const btLog = cParams->chainLog - 1; U32 const btMask= (1U << btLog) - 1; size_t commonLengthSmaller=0, commonLengthLarger=0; const BYTE* const dictBase = ms->window.dictBase; U32 const dictLimit = ms->window.dictLimit; const BYTE* const dictEnd = dictBase + dictLimit; const BYTE* const prefixStart = base + dictLimit; U32 const btLow = (btMask >= curr) ? 0 : curr - btMask; U32 const windowLow = ZSTD_getLowestMatchIndex(ms, curr, cParams->windowLog); U32 const matchLow = windowLow ? windowLow : 1; U32* smallerPtr = bt + 2*(curr&btMask); U32* largerPtr = bt + 2*(curr&btMask) + 1; U32 matchEndIdx = curr+8+1; /* farthest referenced position of any match => detects repetitive patterns */ U32 dummy32; /* to be nullified at the end */ U32 mnum = 0; U32 nbCompares = 1U << cParams->searchLog; const ZSTD_matchState_t* dms = dictMode == ZSTD_dictMatchState ? ms->dictMatchState : NULL; const ZSTD_compressionParameters* const dmsCParams = dictMode == ZSTD_dictMatchState ? &dms->cParams : NULL; const BYTE* const dmsBase = dictMode == ZSTD_dictMatchState ? dms->window.base : NULL; const BYTE* const dmsEnd = dictMode == ZSTD_dictMatchState ? dms->window.nextSrc : NULL; U32 const dmsHighLimit = dictMode == ZSTD_dictMatchState ? (U32)(dmsEnd - dmsBase) : 0; U32 const dmsLowLimit = dictMode == ZSTD_dictMatchState ? dms->window.lowLimit : 0; U32 const dmsIndexDelta = dictMode == ZSTD_dictMatchState ? windowLow - dmsHighLimit : 0; U32 const dmsHashLog = dictMode == ZSTD_dictMatchState ? dmsCParams->hashLog : hashLog; U32 const dmsBtLog = dictMode == ZSTD_dictMatchState ? dmsCParams->chainLog - 1 : btLog; U32 const dmsBtMask = dictMode == ZSTD_dictMatchState ? (1U << dmsBtLog) - 1 : 0; U32 const dmsBtLow = dictMode == ZSTD_dictMatchState && dmsBtMask < dmsHighLimit - dmsLowLimit ? dmsHighLimit - dmsBtMask : dmsLowLimit; size_t bestLength = lengthToBeat-1; DEBUGLOG(8, "ZSTD_insertBtAndGetAllMatches: current=%u", curr); /* check repCode */ assert(ll0 <= 1); /* necessarily 1 or 0 */ { U32 const lastR = ZSTD_REP_NUM + ll0; U32 repCode; for (repCode = ll0; repCode < lastR; repCode++) { U32 const repOffset = (repCode==ZSTD_REP_NUM) ? (rep[0] - 1) : rep[repCode]; U32 const repIndex = curr - repOffset; U32 repLen = 0; assert(curr >= dictLimit); if (repOffset-1 /* intentional overflow, discards 0 and -1 */ < curr-dictLimit) { /* equivalent to `curr > repIndex >= dictLimit` */ /* We must validate the repcode offset because when we're using a dictionary the * valid offset range shrinks when the dictionary goes out of bounds. */ if ((repIndex >= windowLow) & (ZSTD_readMINMATCH(ip, minMatch) == ZSTD_readMINMATCH(ip - repOffset, minMatch))) { repLen = (U32)ZSTD_count(ip+minMatch, ip+minMatch-repOffset, iLimit) + minMatch; } } else { /* repIndex < dictLimit || repIndex >= curr */ const BYTE* const repMatch = dictMode == ZSTD_dictMatchState ? dmsBase + repIndex - dmsIndexDelta : dictBase + repIndex; assert(curr >= windowLow); if ( dictMode == ZSTD_extDict && ( ((repOffset-1) /*intentional overflow*/ < curr - windowLow) /* equivalent to `curr > repIndex >= windowLow` */ & (((U32)((dictLimit-1) - repIndex) >= 3) ) /* intentional overflow : do not test positions overlapping 2 memory segments */) && (ZSTD_readMINMATCH(ip, minMatch) == ZSTD_readMINMATCH(repMatch, minMatch)) ) { repLen = (U32)ZSTD_count_2segments(ip+minMatch, repMatch+minMatch, iLimit, dictEnd, prefixStart) + minMatch; } if (dictMode == ZSTD_dictMatchState && ( ((repOffset-1) /*intentional overflow*/ < curr - (dmsLowLimit + dmsIndexDelta)) /* equivalent to `curr > repIndex >= dmsLowLimit` */ & ((U32)((dictLimit-1) - repIndex) >= 3) ) /* intentional overflow : do not test positions overlapping 2 memory segments */ && (ZSTD_readMINMATCH(ip, minMatch) == ZSTD_readMINMATCH(repMatch, minMatch)) ) { repLen = (U32)ZSTD_count_2segments(ip+minMatch, repMatch+minMatch, iLimit, dmsEnd, prefixStart) + minMatch; } } /* save longer solution */ if (repLen > bestLength) { DEBUGLOG(8, "found repCode %u (ll0:%u, offset:%u) of length %u", repCode, ll0, repOffset, repLen); bestLength = repLen; matches[mnum].off = repCode - ll0; matches[mnum].len = (U32)repLen; mnum++; if ( (repLen > sufficient_len) | (ip+repLen == iLimit) ) { /* best possible */ return mnum; } } } } /* HC3 match finder */ if ((mls == 3) /*static*/ && (bestLength < mls)) { U32 const matchIndex3 = ZSTD_insertAndFindFirstIndexHash3(ms, nextToUpdate3, ip); if ((matchIndex3 >= matchLow) & (curr - matchIndex3 < (1<<18)) /*heuristic : longer distance likely too expensive*/ ) { size_t mlen; if ((dictMode == ZSTD_noDict) /*static*/ || (dictMode == ZSTD_dictMatchState) /*static*/ || (matchIndex3 >= dictLimit)) { const BYTE* const match = base + matchIndex3; mlen = ZSTD_count(ip, match, iLimit); } else { const BYTE* const match = dictBase + matchIndex3; mlen = ZSTD_count_2segments(ip, match, iLimit, dictEnd, prefixStart); } /* save best solution */ if (mlen >= mls /* == 3 > bestLength */) { DEBUGLOG(8, "found small match with hlog3, of length %u", (U32)mlen); bestLength = mlen; assert(curr > matchIndex3); assert(mnum==0); /* no prior solution */ matches[0].off = (curr - matchIndex3) + ZSTD_REP_MOVE; matches[0].len = (U32)mlen; mnum = 1; if ( (mlen > sufficient_len) | (ip+mlen == iLimit) ) { /* best possible length */ ms->nextToUpdate = curr+1; /* skip insertion */ return 1; } } } /* no dictMatchState lookup: dicts don't have a populated HC3 table */ } hashTable[h] = curr; /* Update Hash Table */ while (nbCompares-- && (matchIndex >= matchLow)) { U32* const nextPtr = bt + 2*(matchIndex & btMask); const BYTE* match; size_t matchLength = MIN(commonLengthSmaller, commonLengthLarger); /* guaranteed minimum nb of common bytes */ assert(curr > matchIndex); if ((dictMode == ZSTD_noDict) || (dictMode == ZSTD_dictMatchState) || (matchIndex+matchLength >= dictLimit)) { assert(matchIndex+matchLength >= dictLimit); /* ensure the condition is correct when !extDict */ match = base + matchIndex; if (matchIndex >= dictLimit) assert(memcmp(match, ip, matchLength) == 0); /* ensure early section of match is equal as expected */ matchLength += ZSTD_count(ip+matchLength, match+matchLength, iLimit); } else { match = dictBase + matchIndex; assert(memcmp(match, ip, matchLength) == 0); /* ensure early section of match is equal as expected */ matchLength += ZSTD_count_2segments(ip+matchLength, match+matchLength, iLimit, dictEnd, prefixStart); if (matchIndex+matchLength >= dictLimit) match = base + matchIndex; /* prepare for match[matchLength] read */ } if (matchLength > bestLength) { DEBUGLOG(8, "found match of length %u at distance %u (offCode=%u)", (U32)matchLength, curr - matchIndex, curr - matchIndex + ZSTD_REP_MOVE); assert(matchEndIdx > matchIndex); if (matchLength > matchEndIdx - matchIndex) matchEndIdx = matchIndex + (U32)matchLength; bestLength = matchLength; matches[mnum].off = (curr - matchIndex) + ZSTD_REP_MOVE; matches[mnum].len = (U32)matchLength; mnum++; if ( (matchLength > ZSTD_OPT_NUM) | (ip+matchLength == iLimit) /* equal : no way to know if inf or sup */) { if (dictMode == ZSTD_dictMatchState) nbCompares = 0; /* break should also skip searching dms */ break; /* drop, to preserve bt consistency (miss a little bit of compression) */ } } if (match[matchLength] < ip[matchLength]) { /* match smaller than current */ *smallerPtr = matchIndex; /* update smaller idx */ commonLengthSmaller = matchLength; /* all smaller will now have at least this guaranteed common length */ if (matchIndex <= btLow) { smallerPtr=&dummy32; break; } /* beyond tree size, stop the search */ smallerPtr = nextPtr+1; /* new candidate => larger than match, which was smaller than current */ matchIndex = nextPtr[1]; /* new matchIndex, larger than previous, closer to current */ } else { *largerPtr = matchIndex; commonLengthLarger = matchLength; if (matchIndex <= btLow) { largerPtr=&dummy32; break; } /* beyond tree size, stop the search */ largerPtr = nextPtr; matchIndex = nextPtr[0]; } } *smallerPtr = *largerPtr = 0; if (dictMode == ZSTD_dictMatchState && nbCompares) { size_t const dmsH = ZSTD_hashPtr(ip, dmsHashLog, mls); U32 dictMatchIndex = dms->hashTable[dmsH]; const U32* const dmsBt = dms->chainTable; commonLengthSmaller = commonLengthLarger = 0; while (nbCompares-- && (dictMatchIndex > dmsLowLimit)) { const U32* const nextPtr = dmsBt + 2*(dictMatchIndex & dmsBtMask); size_t matchLength = MIN(commonLengthSmaller, commonLengthLarger); /* guaranteed minimum nb of common bytes */ const BYTE* match = dmsBase + dictMatchIndex; matchLength += ZSTD_count_2segments(ip+matchLength, match+matchLength, iLimit, dmsEnd, prefixStart); if (dictMatchIndex+matchLength >= dmsHighLimit) match = base + dictMatchIndex + dmsIndexDelta; /* to prepare for next usage of match[matchLength] */ if (matchLength > bestLength) { matchIndex = dictMatchIndex + dmsIndexDelta; DEBUGLOG(8, "found dms match of length %u at distance %u (offCode=%u)", (U32)matchLength, curr - matchIndex, curr - matchIndex + ZSTD_REP_MOVE); if (matchLength > matchEndIdx - matchIndex) matchEndIdx = matchIndex + (U32)matchLength; bestLength = matchLength; matches[mnum].off = (curr - matchIndex) + ZSTD_REP_MOVE; matches[mnum].len = (U32)matchLength; mnum++; if ( (matchLength > ZSTD_OPT_NUM) | (ip+matchLength == iLimit) /* equal : no way to know if inf or sup */) { break; /* drop, to guarantee consistency (miss a little bit of compression) */ } } if (dictMatchIndex <= dmsBtLow) { break; } /* beyond tree size, stop the search */ if (match[matchLength] < ip[matchLength]) { commonLengthSmaller = matchLength; /* all smaller will now have at least this guaranteed common length */ dictMatchIndex = nextPtr[1]; /* new matchIndex larger than previous (closer to current) */ } else { /* match is larger than current */ commonLengthLarger = matchLength; dictMatchIndex = nextPtr[0]; } } } assert(matchEndIdx > curr+8); ms->nextToUpdate = matchEndIdx - 8; /* skip repetitive patterns */ return mnum; } FORCE_INLINE_TEMPLATE U32 ZSTD_BtGetAllMatches ( ZSTD_match_t* matches, /* store result (match found, increasing size) in this table */ ZSTD_matchState_t* ms, U32* nextToUpdate3, const BYTE* ip, const BYTE* const iHighLimit, const ZSTD_dictMode_e dictMode, const U32 rep[ZSTD_REP_NUM], U32 const ll0, U32 const lengthToBeat) { const ZSTD_compressionParameters* const cParams = &ms->cParams; U32 const matchLengthSearch = cParams->minMatch; DEBUGLOG(8, "ZSTD_BtGetAllMatches"); if (ip < ms->window.base + ms->nextToUpdate) return 0; /* skipped area */ ZSTD_updateTree_internal(ms, ip, iHighLimit, matchLengthSearch, dictMode); switch(matchLengthSearch) { case 3 : return ZSTD_insertBtAndGetAllMatches(matches, ms, nextToUpdate3, ip, iHighLimit, dictMode, rep, ll0, lengthToBeat, 3); default : case 4 : return ZSTD_insertBtAndGetAllMatches(matches, ms, nextToUpdate3, ip, iHighLimit, dictMode, rep, ll0, lengthToBeat, 4); case 5 : return ZSTD_insertBtAndGetAllMatches(matches, ms, nextToUpdate3, ip, iHighLimit, dictMode, rep, ll0, lengthToBeat, 5); case 7 : case 6 : return ZSTD_insertBtAndGetAllMatches(matches, ms, nextToUpdate3, ip, iHighLimit, dictMode, rep, ll0, lengthToBeat, 6); } } /************************* * LDM helper functions * *************************/ /* Struct containing info needed to make decision about ldm inclusion */ typedef struct { rawSeqStore_t seqStore; /* External match candidates store for this block */ U32 startPosInBlock; /* Start position of the current match candidate */ U32 endPosInBlock; /* End position of the current match candidate */ U32 offset; /* Offset of the match candidate */ } ZSTD_optLdm_t; /* ZSTD_optLdm_skipRawSeqStoreBytes(): * Moves forward in rawSeqStore by nbBytes, which will update the fields 'pos' and 'posInSequence'. */ static void ZSTD_optLdm_skipRawSeqStoreBytes(rawSeqStore_t* rawSeqStore, size_t nbBytes) { U32 currPos = (U32)(rawSeqStore->posInSequence + nbBytes); while (currPos && rawSeqStore->pos < rawSeqStore->size) { rawSeq currSeq = rawSeqStore->seq[rawSeqStore->pos]; if (currPos >= currSeq.litLength + currSeq.matchLength) { currPos -= currSeq.litLength + currSeq.matchLength; rawSeqStore->pos++; } else { rawSeqStore->posInSequence = currPos; break; } } if (currPos == 0 || rawSeqStore->pos == rawSeqStore->size) { rawSeqStore->posInSequence = 0; } } /* ZSTD_opt_getNextMatchAndUpdateSeqStore(): * Calculates the beginning and end of the next match in the current block. * Updates 'pos' and 'posInSequence' of the ldmSeqStore. */ static void ZSTD_opt_getNextMatchAndUpdateSeqStore(ZSTD_optLdm_t* optLdm, U32 currPosInBlock, U32 blockBytesRemaining) { rawSeq currSeq; U32 currBlockEndPos; U32 literalsBytesRemaining; U32 matchBytesRemaining; /* Setting match end position to MAX to ensure we never use an LDM during this block */ if (optLdm->seqStore.size == 0 || optLdm->seqStore.pos >= optLdm->seqStore.size) { optLdm->startPosInBlock = UINT_MAX; optLdm->endPosInBlock = UINT_MAX; return; } /* Calculate appropriate bytes left in matchLength and litLength after adjusting based on ldmSeqStore->posInSequence */ currSeq = optLdm->seqStore.seq[optLdm->seqStore.pos]; assert(optLdm->seqStore.posInSequence <= currSeq.litLength + currSeq.matchLength); currBlockEndPos = currPosInBlock + blockBytesRemaining; literalsBytesRemaining = (optLdm->seqStore.posInSequence < currSeq.litLength) ? currSeq.litLength - (U32)optLdm->seqStore.posInSequence : 0; matchBytesRemaining = (literalsBytesRemaining == 0) ? currSeq.matchLength - ((U32)optLdm->seqStore.posInSequence - currSeq.litLength) : currSeq.matchLength; /* If there are more literal bytes than bytes remaining in block, no ldm is possible */ if (literalsBytesRemaining >= blockBytesRemaining) { optLdm->startPosInBlock = UINT_MAX; optLdm->endPosInBlock = UINT_MAX; ZSTD_optLdm_skipRawSeqStoreBytes(&optLdm->seqStore, blockBytesRemaining); return; } /* Matches may be < MINMATCH by this process. In that case, we will reject them when we are deciding whether or not to add the ldm */ optLdm->startPosInBlock = currPosInBlock + literalsBytesRemaining; optLdm->endPosInBlock = optLdm->startPosInBlock + matchBytesRemaining; optLdm->offset = currSeq.offset; if (optLdm->endPosInBlock > currBlockEndPos) { /* Match ends after the block ends, we can't use the whole match */ optLdm->endPosInBlock = currBlockEndPos; ZSTD_optLdm_skipRawSeqStoreBytes(&optLdm->seqStore, currBlockEndPos - currPosInBlock); } else { /* Consume nb of bytes equal to size of sequence left */ ZSTD_optLdm_skipRawSeqStoreBytes(&optLdm->seqStore, literalsBytesRemaining + matchBytesRemaining); } } /* ZSTD_optLdm_maybeAddMatch(): * Adds a match if it's long enough, based on it's 'matchStartPosInBlock' * and 'matchEndPosInBlock', into 'matches'. Maintains the correct ordering of 'matches' */ static void ZSTD_optLdm_maybeAddMatch(ZSTD_match_t* matches, U32* nbMatches, ZSTD_optLdm_t* optLdm, U32 currPosInBlock) { U32 posDiff = currPosInBlock - optLdm->startPosInBlock; /* Note: ZSTD_match_t actually contains offCode and matchLength (before subtracting MINMATCH) */ U32 candidateMatchLength = optLdm->endPosInBlock - optLdm->startPosInBlock - posDiff; U32 candidateOffCode = optLdm->offset + ZSTD_REP_MOVE; /* Ensure that current block position is not outside of the match */ if (currPosInBlock < optLdm->startPosInBlock || currPosInBlock >= optLdm->endPosInBlock || candidateMatchLength < MINMATCH) { return; } if (*nbMatches == 0 || ((candidateMatchLength > matches[*nbMatches-1].len) && *nbMatches < ZSTD_OPT_NUM)) { DEBUGLOG(6, "ZSTD_optLdm_maybeAddMatch(): Adding ldm candidate match (offCode: %u matchLength %u) at block position=%u", candidateOffCode, candidateMatchLength, currPosInBlock); matches[*nbMatches].len = candidateMatchLength; matches[*nbMatches].off = candidateOffCode; (*nbMatches)++; } } /* ZSTD_optLdm_processMatchCandidate(): * Wrapper function to update ldm seq store and call ldm functions as necessary. */ static void ZSTD_optLdm_processMatchCandidate(ZSTD_optLdm_t* optLdm, ZSTD_match_t* matches, U32* nbMatches, U32 currPosInBlock, U32 remainingBytes) { if (optLdm->seqStore.size == 0 || optLdm->seqStore.pos >= optLdm->seqStore.size) { return; } if (currPosInBlock >= optLdm->endPosInBlock) { if (currPosInBlock > optLdm->endPosInBlock) { /* The position at which ZSTD_optLdm_processMatchCandidate() is called is not necessarily * at the end of a match from the ldm seq store, and will often be some bytes * over beyond matchEndPosInBlock. As such, we need to correct for these "overshoots" */ U32 posOvershoot = currPosInBlock - optLdm->endPosInBlock; ZSTD_optLdm_skipRawSeqStoreBytes(&optLdm->seqStore, posOvershoot); } ZSTD_opt_getNextMatchAndUpdateSeqStore(optLdm, currPosInBlock, remainingBytes); } ZSTD_optLdm_maybeAddMatch(matches, nbMatches, optLdm, currPosInBlock); } /*-******************************* * Optimal parser *********************************/ static U32 ZSTD_totalLen(ZSTD_optimal_t sol) { return sol.litlen + sol.mlen; } #if 0 /* debug */ static void listStats(const U32* table, int lastEltID) { int const nbElts = lastEltID + 1; int enb; for (enb=0; enb < nbElts; enb++) { (void)table; /* RAWLOG(2, "%3i:%3i, ", enb, table[enb]); */ RAWLOG(2, "%4i,", table[enb]); } RAWLOG(2, " \n"); } #endif FORCE_INLINE_TEMPLATE size_t ZSTD_compressBlock_opt_generic(ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], const void* src, size_t srcSize, const int optLevel, const ZSTD_dictMode_e dictMode) { optState_t* const optStatePtr = &ms->opt; const BYTE* const istart = (const BYTE*)src; const BYTE* ip = istart; const BYTE* anchor = istart; const BYTE* const iend = istart + srcSize; const BYTE* const ilimit = iend - 8; const BYTE* const base = ms->window.base; const BYTE* const prefixStart = base + ms->window.dictLimit; const ZSTD_compressionParameters* const cParams = &ms->cParams; U32 const sufficient_len = MIN(cParams->targetLength, ZSTD_OPT_NUM -1); U32 const minMatch = (cParams->minMatch == 3) ? 3 : 4; U32 nextToUpdate3 = ms->nextToUpdate; ZSTD_optimal_t* const opt = optStatePtr->priceTable; ZSTD_match_t* const matches = optStatePtr->matchTable; ZSTD_optimal_t lastSequence; ZSTD_optLdm_t optLdm; optLdm.seqStore = ms->ldmSeqStore ? *ms->ldmSeqStore : kNullRawSeqStore; optLdm.endPosInBlock = optLdm.startPosInBlock = optLdm.offset = 0; ZSTD_opt_getNextMatchAndUpdateSeqStore(&optLdm, (U32)(ip-istart), (U32)(iend-ip)); /* init */ DEBUGLOG(5, "ZSTD_compressBlock_opt_generic: current=%u, prefix=%u, nextToUpdate=%u", (U32)(ip - base), ms->window.dictLimit, ms->nextToUpdate); assert(optLevel <= 2); ZSTD_rescaleFreqs(optStatePtr, (const BYTE*)src, srcSize, optLevel); ip += (ip==prefixStart); /* Match Loop */ while (ip < ilimit) { U32 cur, last_pos = 0; /* find first match */ { U32 const litlen = (U32)(ip - anchor); U32 const ll0 = !litlen; U32 nbMatches = ZSTD_BtGetAllMatches(matches, ms, &nextToUpdate3, ip, iend, dictMode, rep, ll0, minMatch); ZSTD_optLdm_processMatchCandidate(&optLdm, matches, &nbMatches, (U32)(ip-istart), (U32)(iend - ip)); if (!nbMatches) { ip++; continue; } /* initialize opt[0] */ { U32 i ; for (i=0; i immediate encoding */ { U32 const maxML = matches[nbMatches-1].len; U32 const maxOffset = matches[nbMatches-1].off; DEBUGLOG(6, "found %u matches of maxLength=%u and maxOffCode=%u at cPos=%u => start new series", nbMatches, maxML, maxOffset, (U32)(ip-prefixStart)); if (maxML > sufficient_len) { lastSequence.litlen = litlen; lastSequence.mlen = maxML; lastSequence.off = maxOffset; DEBUGLOG(6, "large match (%u>%u), immediate encoding", maxML, sufficient_len); cur = 0; last_pos = ZSTD_totalLen(lastSequence); goto _shortestPath; } } /* set prices for first matches starting position == 0 */ { U32 const literalsPrice = opt[0].price + ZSTD_litLengthPrice(0, optStatePtr, optLevel); U32 pos; U32 matchNb; for (pos = 1; pos < minMatch; pos++) { opt[pos].price = ZSTD_MAX_PRICE; /* mlen, litlen and price will be fixed during forward scanning */ } for (matchNb = 0; matchNb < nbMatches; matchNb++) { U32 const offset = matches[matchNb].off; U32 const end = matches[matchNb].len; for ( ; pos <= end ; pos++ ) { U32 const matchPrice = ZSTD_getMatchPrice(offset, pos, optStatePtr, optLevel); U32 const sequencePrice = literalsPrice + matchPrice; DEBUGLOG(7, "rPos:%u => set initial price : %.2f", pos, ZSTD_fCost(sequencePrice)); opt[pos].mlen = pos; opt[pos].off = offset; opt[pos].litlen = litlen; opt[pos].price = sequencePrice; } } last_pos = pos-1; } } /* check further positions */ for (cur = 1; cur <= last_pos; cur++) { const BYTE* const inr = ip + cur; assert(cur < ZSTD_OPT_NUM); DEBUGLOG(7, "cPos:%zi==rPos:%u", inr-istart, cur) /* Fix current position with one literal if cheaper */ { U32 const litlen = (opt[cur-1].mlen == 0) ? opt[cur-1].litlen + 1 : 1; int const price = opt[cur-1].price + ZSTD_rawLiteralsCost(ip+cur-1, 1, optStatePtr, optLevel) + ZSTD_litLengthPrice(litlen, optStatePtr, optLevel) - ZSTD_litLengthPrice(litlen-1, optStatePtr, optLevel); assert(price < 1000000000); /* overflow check */ if (price <= opt[cur].price) { DEBUGLOG(7, "cPos:%zi==rPos:%u : better price (%.2f<=%.2f) using literal (ll==%u) (hist:%u,%u,%u)", inr-istart, cur, ZSTD_fCost(price), ZSTD_fCost(opt[cur].price), litlen, opt[cur-1].rep[0], opt[cur-1].rep[1], opt[cur-1].rep[2]); opt[cur].mlen = 0; opt[cur].off = 0; opt[cur].litlen = litlen; opt[cur].price = price; } else { DEBUGLOG(7, "cPos:%zi==rPos:%u : literal would cost more (%.2f>%.2f) (hist:%u,%u,%u)", inr-istart, cur, ZSTD_fCost(price), ZSTD_fCost(opt[cur].price), opt[cur].rep[0], opt[cur].rep[1], opt[cur].rep[2]); } } /* Set the repcodes of the current position. We must do it here * because we rely on the repcodes of the 2nd to last sequence being * correct to set the next chunks repcodes during the backward * traversal. */ ZSTD_STATIC_ASSERT(sizeof(opt[cur].rep) == sizeof(repcodes_t)); assert(cur >= opt[cur].mlen); if (opt[cur].mlen != 0) { U32 const prev = cur - opt[cur].mlen; repcodes_t newReps = ZSTD_updateRep(opt[prev].rep, opt[cur].off, opt[cur].litlen==0); ZSTD_memcpy(opt[cur].rep, &newReps, sizeof(repcodes_t)); } else { ZSTD_memcpy(opt[cur].rep, opt[cur - 1].rep, sizeof(repcodes_t)); } /* last match must start at a minimum distance of 8 from oend */ if (inr > ilimit) continue; if (cur == last_pos) break; if ( (optLevel==0) /*static_test*/ && (opt[cur+1].price <= opt[cur].price + (BITCOST_MULTIPLIER/2)) ) { DEBUGLOG(7, "move to next rPos:%u : price is <=", cur+1); continue; /* skip unpromising positions; about ~+6% speed, -0.01 ratio */ } { U32 const ll0 = (opt[cur].mlen != 0); U32 const litlen = (opt[cur].mlen == 0) ? opt[cur].litlen : 0; U32 const previousPrice = opt[cur].price; U32 const basePrice = previousPrice + ZSTD_litLengthPrice(0, optStatePtr, optLevel); U32 nbMatches = ZSTD_BtGetAllMatches(matches, ms, &nextToUpdate3, inr, iend, dictMode, opt[cur].rep, ll0, minMatch); U32 matchNb; ZSTD_optLdm_processMatchCandidate(&optLdm, matches, &nbMatches, (U32)(inr-istart), (U32)(iend-inr)); if (!nbMatches) { DEBUGLOG(7, "rPos:%u : no match found", cur); continue; } { U32 const maxML = matches[nbMatches-1].len; DEBUGLOG(7, "cPos:%zi==rPos:%u, found %u matches, of maxLength=%u", inr-istart, cur, nbMatches, maxML); if ( (maxML > sufficient_len) || (cur + maxML >= ZSTD_OPT_NUM) ) { lastSequence.mlen = maxML; lastSequence.off = matches[nbMatches-1].off; lastSequence.litlen = litlen; cur -= (opt[cur].mlen==0) ? opt[cur].litlen : 0; /* last sequence is actually only literals, fix cur to last match - note : may underflow, in which case, it's first sequence, and it's okay */ last_pos = cur + ZSTD_totalLen(lastSequence); if (cur > ZSTD_OPT_NUM) cur = 0; /* underflow => first match */ goto _shortestPath; } } /* set prices using matches found at position == cur */ for (matchNb = 0; matchNb < nbMatches; matchNb++) { U32 const offset = matches[matchNb].off; U32 const lastML = matches[matchNb].len; U32 const startML = (matchNb>0) ? matches[matchNb-1].len+1 : minMatch; U32 mlen; DEBUGLOG(7, "testing match %u => offCode=%4u, mlen=%2u, llen=%2u", matchNb, matches[matchNb].off, lastML, litlen); for (mlen = lastML; mlen >= startML; mlen--) { /* scan downward */ U32 const pos = cur + mlen; int const price = basePrice + ZSTD_getMatchPrice(offset, mlen, optStatePtr, optLevel); if ((pos > last_pos) || (price < opt[pos].price)) { DEBUGLOG(7, "rPos:%u (ml=%2u) => new better price (%.2f<%.2f)", pos, mlen, ZSTD_fCost(price), ZSTD_fCost(opt[pos].price)); while (last_pos < pos) { opt[last_pos+1].price = ZSTD_MAX_PRICE; last_pos++; } /* fill empty positions */ opt[pos].mlen = mlen; opt[pos].off = offset; opt[pos].litlen = litlen; opt[pos].price = price; } else { DEBUGLOG(7, "rPos:%u (ml=%2u) => new price is worse (%.2f>=%.2f)", pos, mlen, ZSTD_fCost(price), ZSTD_fCost(opt[pos].price)); if (optLevel==0) break; /* early update abort; gets ~+10% speed for about -0.01 ratio loss */ } } } } } /* for (cur = 1; cur <= last_pos; cur++) */ lastSequence = opt[last_pos]; cur = last_pos > ZSTD_totalLen(lastSequence) ? last_pos - ZSTD_totalLen(lastSequence) : 0; /* single sequence, and it starts before `ip` */ assert(cur < ZSTD_OPT_NUM); /* control overflow*/ _shortestPath: /* cur, last_pos, best_mlen, best_off have to be set */ assert(opt[0].mlen == 0); /* Set the next chunk's repcodes based on the repcodes of the beginning * of the last match, and the last sequence. This avoids us having to * update them while traversing the sequences. */ if (lastSequence.mlen != 0) { repcodes_t reps = ZSTD_updateRep(opt[cur].rep, lastSequence.off, lastSequence.litlen==0); ZSTD_memcpy(rep, &reps, sizeof(reps)); } else { ZSTD_memcpy(rep, opt[cur].rep, sizeof(repcodes_t)); } { U32 const storeEnd = cur + 1; U32 storeStart = storeEnd; U32 seqPos = cur; DEBUGLOG(6, "start reverse traversal (last_pos:%u, cur:%u)", last_pos, cur); (void)last_pos; assert(storeEnd < ZSTD_OPT_NUM); DEBUGLOG(6, "last sequence copied into pos=%u (llen=%u,mlen=%u,ofc=%u)", storeEnd, lastSequence.litlen, lastSequence.mlen, lastSequence.off); opt[storeEnd] = lastSequence; while (seqPos > 0) { U32 const backDist = ZSTD_totalLen(opt[seqPos]); storeStart--; DEBUGLOG(6, "sequence from rPos=%u copied into pos=%u (llen=%u,mlen=%u,ofc=%u)", seqPos, storeStart, opt[seqPos].litlen, opt[seqPos].mlen, opt[seqPos].off); opt[storeStart] = opt[seqPos]; seqPos = (seqPos > backDist) ? seqPos - backDist : 0; } /* save sequences */ DEBUGLOG(6, "sending selected sequences into seqStore") { U32 storePos; for (storePos=storeStart; storePos <= storeEnd; storePos++) { U32 const llen = opt[storePos].litlen; U32 const mlen = opt[storePos].mlen; U32 const offCode = opt[storePos].off; U32 const advance = llen + mlen; DEBUGLOG(6, "considering seq starting at %zi, llen=%u, mlen=%u", anchor - istart, (unsigned)llen, (unsigned)mlen); if (mlen==0) { /* only literals => must be last "sequence", actually starting a new stream of sequences */ assert(storePos == storeEnd); /* must be last sequence */ ip = anchor + llen; /* last "sequence" is a bunch of literals => don't progress anchor */ continue; /* will finish */ } assert(anchor + llen <= iend); ZSTD_updateStats(optStatePtr, llen, anchor, offCode, mlen); ZSTD_storeSeq(seqStore, llen, anchor, iend, offCode, mlen-MINMATCH); anchor += advance; ip = anchor; } } ZSTD_setBasePrices(optStatePtr, optLevel); } } /* while (ip < ilimit) */ /* Return the last literals size */ return (size_t)(iend - anchor); } size_t ZSTD_compressBlock_btopt( ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], const void* src, size_t srcSize) { DEBUGLOG(5, "ZSTD_compressBlock_btopt"); return ZSTD_compressBlock_opt_generic(ms, seqStore, rep, src, srcSize, 0 /*optLevel*/, ZSTD_noDict); } /* used in 2-pass strategy */ static U32 ZSTD_upscaleStat(unsigned* table, U32 lastEltIndex, int bonus) { U32 s, sum=0; assert(ZSTD_FREQ_DIV+bonus >= 0); for (s=0; slitSum = ZSTD_upscaleStat(optPtr->litFreq, MaxLit, 0); optPtr->litLengthSum = ZSTD_upscaleStat(optPtr->litLengthFreq, MaxLL, 0); optPtr->matchLengthSum = ZSTD_upscaleStat(optPtr->matchLengthFreq, MaxML, 0); optPtr->offCodeSum = ZSTD_upscaleStat(optPtr->offCodeFreq, MaxOff, 0); } /* ZSTD_initStats_ultra(): * make a first compression pass, just to seed stats with more accurate starting values. * only works on first block, with no dictionary and no ldm. * this function cannot error, hence its contract must be respected. */ static void ZSTD_initStats_ultra(ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], const void* src, size_t srcSize) { U32 tmpRep[ZSTD_REP_NUM]; /* updated rep codes will sink here */ ZSTD_memcpy(tmpRep, rep, sizeof(tmpRep)); DEBUGLOG(4, "ZSTD_initStats_ultra (srcSize=%zu)", srcSize); assert(ms->opt.litLengthSum == 0); /* first block */ assert(seqStore->sequences == seqStore->sequencesStart); /* no ldm */ assert(ms->window.dictLimit == ms->window.lowLimit); /* no dictionary */ assert(ms->window.dictLimit - ms->nextToUpdate <= 1); /* no prefix (note: intentional overflow, defined as 2-complement) */ ZSTD_compressBlock_opt_generic(ms, seqStore, tmpRep, src, srcSize, 2 /*optLevel*/, ZSTD_noDict); /* generate stats into ms->opt*/ /* invalidate first scan from history */ ZSTD_resetSeqStore(seqStore); ms->window.base -= srcSize; ms->window.dictLimit += (U32)srcSize; ms->window.lowLimit = ms->window.dictLimit; ms->nextToUpdate = ms->window.dictLimit; /* re-inforce weight of collected statistics */ ZSTD_upscaleStats(&ms->opt); } size_t ZSTD_compressBlock_btultra( ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], const void* src, size_t srcSize) { DEBUGLOG(5, "ZSTD_compressBlock_btultra (srcSize=%zu)", srcSize); return ZSTD_compressBlock_opt_generic(ms, seqStore, rep, src, srcSize, 2 /*optLevel*/, ZSTD_noDict); } size_t ZSTD_compressBlock_btultra2( ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], const void* src, size_t srcSize) { U32 const curr = (U32)((const BYTE*)src - ms->window.base); DEBUGLOG(5, "ZSTD_compressBlock_btultra2 (srcSize=%zu)", srcSize); /* 2-pass strategy: * this strategy makes a first pass over first block to collect statistics * and seed next round's statistics with it. * After 1st pass, function forgets everything, and starts a new block. * Consequently, this can only work if no data has been previously loaded in tables, * aka, no dictionary, no prefix, no ldm preprocessing. * The compression ratio gain is generally small (~0.5% on first block), * the cost is 2x cpu time on first block. */ assert(srcSize <= ZSTD_BLOCKSIZE_MAX); if ( (ms->opt.litLengthSum==0) /* first block */ && (seqStore->sequences == seqStore->sequencesStart) /* no ldm */ && (ms->window.dictLimit == ms->window.lowLimit) /* no dictionary */ && (curr == ms->window.dictLimit) /* start of frame, nothing already loaded nor skipped */ && (srcSize > ZSTD_PREDEF_THRESHOLD) ) { ZSTD_initStats_ultra(ms, seqStore, rep, src, srcSize); } return ZSTD_compressBlock_opt_generic(ms, seqStore, rep, src, srcSize, 2 /*optLevel*/, ZSTD_noDict); } size_t ZSTD_compressBlock_btopt_dictMatchState( ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], const void* src, size_t srcSize) { return ZSTD_compressBlock_opt_generic(ms, seqStore, rep, src, srcSize, 0 /*optLevel*/, ZSTD_dictMatchState); } size_t ZSTD_compressBlock_btultra_dictMatchState( ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], const void* src, size_t srcSize) { return ZSTD_compressBlock_opt_generic(ms, seqStore, rep, src, srcSize, 2 /*optLevel*/, ZSTD_dictMatchState); } size_t ZSTD_compressBlock_btopt_extDict( ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], const void* src, size_t srcSize) { return ZSTD_compressBlock_opt_generic(ms, seqStore, rep, src, srcSize, 0 /*optLevel*/, ZSTD_extDict); } size_t ZSTD_compressBlock_btultra_extDict( ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], const void* src, size_t srcSize) { return ZSTD_compressBlock_opt_generic(ms, seqStore, rep, src, srcSize, 2 /*optLevel*/, ZSTD_extDict); } /* note : no btultra2 variant for extDict nor dictMatchState, * because btultra2 is not meant to work with dictionaries * and is only specific for the first block (no prefix) */ /**** ended inlining compress/zstd_opt.c ****/ #ifdef ZSTD_MULTITHREAD /**** start inlining compress/zstdmt_compress.c ****/ /* * Copyright (c) 2016-2021, Yann Collet, Facebook, Inc. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the * LICENSE file in the root directory of this source tree) and the GPLv2 (found * in the COPYING file in the root directory of this source tree). * You may select, at your option, one of the above-listed licenses. */ /* ====== Compiler specifics ====== */ #if defined(_MSC_VER) # pragma warning(disable : 4204) /* disable: C4204: non-constant aggregate initializer */ #endif /* ====== Constants ====== */ #define ZSTDMT_OVERLAPLOG_DEFAULT 0 /* ====== Dependencies ====== */ /**** skipping file: ../common/zstd_deps.h ****/ /**** skipping file: ../common/mem.h ****/ /**** skipping file: ../common/pool.h ****/ /**** skipping file: ../common/threading.h ****/ /**** skipping file: zstd_compress_internal.h ****/ /**** skipping file: zstd_ldm.h ****/ /**** skipping file: zstdmt_compress.h ****/ /* Guards code to support resizing the SeqPool. * We will want to resize the SeqPool to save memory in the future. * Until then, comment the code out since it is unused. */ #define ZSTD_RESIZE_SEQPOOL 0 /* ====== Debug ====== */ #if defined(DEBUGLEVEL) && (DEBUGLEVEL>=2) \ && !defined(_MSC_VER) \ && !defined(__MINGW32__) # include # include # include # define DEBUG_PRINTHEX(l,p,n) { \ unsigned debug_u; \ for (debug_u=0; debug_u<(n); debug_u++) \ RAWLOG(l, "%02X ", ((const unsigned char*)(p))[debug_u]); \ RAWLOG(l, " \n"); \ } static unsigned long long GetCurrentClockTimeMicroseconds(void) { static clock_t _ticksPerSecond = 0; if (_ticksPerSecond <= 0) _ticksPerSecond = sysconf(_SC_CLK_TCK); { struct tms junk; clock_t newTicks = (clock_t) times(&junk); return ((((unsigned long long)newTicks)*(1000000))/_ticksPerSecond); } } #define MUTEX_WAIT_TIME_DLEVEL 6 #define ZSTD_PTHREAD_MUTEX_LOCK(mutex) { \ if (DEBUGLEVEL >= MUTEX_WAIT_TIME_DLEVEL) { \ unsigned long long const beforeTime = GetCurrentClockTimeMicroseconds(); \ ZSTD_pthread_mutex_lock(mutex); \ { unsigned long long const afterTime = GetCurrentClockTimeMicroseconds(); \ unsigned long long const elapsedTime = (afterTime-beforeTime); \ if (elapsedTime > 1000) { /* or whatever threshold you like; I'm using 1 millisecond here */ \ DEBUGLOG(MUTEX_WAIT_TIME_DLEVEL, "Thread took %llu microseconds to acquire mutex %s \n", \ elapsedTime, #mutex); \ } } \ } else { \ ZSTD_pthread_mutex_lock(mutex); \ } \ } #else # define ZSTD_PTHREAD_MUTEX_LOCK(m) ZSTD_pthread_mutex_lock(m) # define DEBUG_PRINTHEX(l,p,n) {} #endif /* ===== Buffer Pool ===== */ /* a single Buffer Pool can be invoked from multiple threads in parallel */ typedef struct buffer_s { void* start; size_t capacity; } buffer_t; static const buffer_t g_nullBuffer = { NULL, 0 }; typedef struct ZSTDMT_bufferPool_s { ZSTD_pthread_mutex_t poolMutex; size_t bufferSize; unsigned totalBuffers; unsigned nbBuffers; ZSTD_customMem cMem; buffer_t bTable[1]; /* variable size */ } ZSTDMT_bufferPool; static ZSTDMT_bufferPool* ZSTDMT_createBufferPool(unsigned nbWorkers, ZSTD_customMem cMem) { unsigned const maxNbBuffers = 2*nbWorkers + 3; ZSTDMT_bufferPool* const bufPool = (ZSTDMT_bufferPool*)ZSTD_customCalloc( sizeof(ZSTDMT_bufferPool) + (maxNbBuffers-1) * sizeof(buffer_t), cMem); if (bufPool==NULL) return NULL; if (ZSTD_pthread_mutex_init(&bufPool->poolMutex, NULL)) { ZSTD_customFree(bufPool, cMem); return NULL; } bufPool->bufferSize = 64 KB; bufPool->totalBuffers = maxNbBuffers; bufPool->nbBuffers = 0; bufPool->cMem = cMem; return bufPool; } static void ZSTDMT_freeBufferPool(ZSTDMT_bufferPool* bufPool) { unsigned u; DEBUGLOG(3, "ZSTDMT_freeBufferPool (address:%08X)", (U32)(size_t)bufPool); if (!bufPool) return; /* compatibility with free on NULL */ for (u=0; utotalBuffers; u++) { DEBUGLOG(4, "free buffer %2u (address:%08X)", u, (U32)(size_t)bufPool->bTable[u].start); ZSTD_customFree(bufPool->bTable[u].start, bufPool->cMem); } ZSTD_pthread_mutex_destroy(&bufPool->poolMutex); ZSTD_customFree(bufPool, bufPool->cMem); } /* only works at initialization, not during compression */ static size_t ZSTDMT_sizeof_bufferPool(ZSTDMT_bufferPool* bufPool) { size_t const poolSize = sizeof(*bufPool) + (bufPool->totalBuffers - 1) * sizeof(buffer_t); unsigned u; size_t totalBufferSize = 0; ZSTD_pthread_mutex_lock(&bufPool->poolMutex); for (u=0; utotalBuffers; u++) totalBufferSize += bufPool->bTable[u].capacity; ZSTD_pthread_mutex_unlock(&bufPool->poolMutex); return poolSize + totalBufferSize; } /* ZSTDMT_setBufferSize() : * all future buffers provided by this buffer pool will have _at least_ this size * note : it's better for all buffers to have same size, * as they become freely interchangeable, reducing malloc/free usages and memory fragmentation */ static void ZSTDMT_setBufferSize(ZSTDMT_bufferPool* const bufPool, size_t const bSize) { ZSTD_pthread_mutex_lock(&bufPool->poolMutex); DEBUGLOG(4, "ZSTDMT_setBufferSize: bSize = %u", (U32)bSize); bufPool->bufferSize = bSize; ZSTD_pthread_mutex_unlock(&bufPool->poolMutex); } static ZSTDMT_bufferPool* ZSTDMT_expandBufferPool(ZSTDMT_bufferPool* srcBufPool, U32 nbWorkers) { unsigned const maxNbBuffers = 2*nbWorkers + 3; if (srcBufPool==NULL) return NULL; if (srcBufPool->totalBuffers >= maxNbBuffers) /* good enough */ return srcBufPool; /* need a larger buffer pool */ { ZSTD_customMem const cMem = srcBufPool->cMem; size_t const bSize = srcBufPool->bufferSize; /* forward parameters */ ZSTDMT_bufferPool* newBufPool; ZSTDMT_freeBufferPool(srcBufPool); newBufPool = ZSTDMT_createBufferPool(nbWorkers, cMem); if (newBufPool==NULL) return newBufPool; ZSTDMT_setBufferSize(newBufPool, bSize); return newBufPool; } } /** ZSTDMT_getBuffer() : * assumption : bufPool must be valid * @return : a buffer, with start pointer and size * note: allocation may fail, in this case, start==NULL and size==0 */ static buffer_t ZSTDMT_getBuffer(ZSTDMT_bufferPool* bufPool) { size_t const bSize = bufPool->bufferSize; DEBUGLOG(5, "ZSTDMT_getBuffer: bSize = %u", (U32)bufPool->bufferSize); ZSTD_pthread_mutex_lock(&bufPool->poolMutex); if (bufPool->nbBuffers) { /* try to use an existing buffer */ buffer_t const buf = bufPool->bTable[--(bufPool->nbBuffers)]; size_t const availBufferSize = buf.capacity; bufPool->bTable[bufPool->nbBuffers] = g_nullBuffer; if ((availBufferSize >= bSize) & ((availBufferSize>>3) <= bSize)) { /* large enough, but not too much */ DEBUGLOG(5, "ZSTDMT_getBuffer: provide buffer %u of size %u", bufPool->nbBuffers, (U32)buf.capacity); ZSTD_pthread_mutex_unlock(&bufPool->poolMutex); return buf; } /* size conditions not respected : scratch this buffer, create new one */ DEBUGLOG(5, "ZSTDMT_getBuffer: existing buffer does not meet size conditions => freeing"); ZSTD_customFree(buf.start, bufPool->cMem); } ZSTD_pthread_mutex_unlock(&bufPool->poolMutex); /* create new buffer */ DEBUGLOG(5, "ZSTDMT_getBuffer: create a new buffer"); { buffer_t buffer; void* const start = ZSTD_customMalloc(bSize, bufPool->cMem); buffer.start = start; /* note : start can be NULL if malloc fails ! */ buffer.capacity = (start==NULL) ? 0 : bSize; if (start==NULL) { DEBUGLOG(5, "ZSTDMT_getBuffer: buffer allocation failure !!"); } else { DEBUGLOG(5, "ZSTDMT_getBuffer: created buffer of size %u", (U32)bSize); } return buffer; } } #if ZSTD_RESIZE_SEQPOOL /** ZSTDMT_resizeBuffer() : * assumption : bufPool must be valid * @return : a buffer that is at least the buffer pool buffer size. * If a reallocation happens, the data in the input buffer is copied. */ static buffer_t ZSTDMT_resizeBuffer(ZSTDMT_bufferPool* bufPool, buffer_t buffer) { size_t const bSize = bufPool->bufferSize; if (buffer.capacity < bSize) { void* const start = ZSTD_customMalloc(bSize, bufPool->cMem); buffer_t newBuffer; newBuffer.start = start; newBuffer.capacity = start == NULL ? 0 : bSize; if (start != NULL) { assert(newBuffer.capacity >= buffer.capacity); ZSTD_memcpy(newBuffer.start, buffer.start, buffer.capacity); DEBUGLOG(5, "ZSTDMT_resizeBuffer: created buffer of size %u", (U32)bSize); return newBuffer; } DEBUGLOG(5, "ZSTDMT_resizeBuffer: buffer allocation failure !!"); } return buffer; } #endif /* store buffer for later re-use, up to pool capacity */ static void ZSTDMT_releaseBuffer(ZSTDMT_bufferPool* bufPool, buffer_t buf) { DEBUGLOG(5, "ZSTDMT_releaseBuffer"); if (buf.start == NULL) return; /* compatible with release on NULL */ ZSTD_pthread_mutex_lock(&bufPool->poolMutex); if (bufPool->nbBuffers < bufPool->totalBuffers) { bufPool->bTable[bufPool->nbBuffers++] = buf; /* stored for later use */ DEBUGLOG(5, "ZSTDMT_releaseBuffer: stored buffer of size %u in slot %u", (U32)buf.capacity, (U32)(bufPool->nbBuffers-1)); ZSTD_pthread_mutex_unlock(&bufPool->poolMutex); return; } ZSTD_pthread_mutex_unlock(&bufPool->poolMutex); /* Reached bufferPool capacity (should not happen) */ DEBUGLOG(5, "ZSTDMT_releaseBuffer: pool capacity reached => freeing "); ZSTD_customFree(buf.start, bufPool->cMem); } /* ===== Seq Pool Wrapper ====== */ typedef ZSTDMT_bufferPool ZSTDMT_seqPool; static size_t ZSTDMT_sizeof_seqPool(ZSTDMT_seqPool* seqPool) { return ZSTDMT_sizeof_bufferPool(seqPool); } static rawSeqStore_t bufferToSeq(buffer_t buffer) { rawSeqStore_t seq = kNullRawSeqStore; seq.seq = (rawSeq*)buffer.start; seq.capacity = buffer.capacity / sizeof(rawSeq); return seq; } static buffer_t seqToBuffer(rawSeqStore_t seq) { buffer_t buffer; buffer.start = seq.seq; buffer.capacity = seq.capacity * sizeof(rawSeq); return buffer; } static rawSeqStore_t ZSTDMT_getSeq(ZSTDMT_seqPool* seqPool) { if (seqPool->bufferSize == 0) { return kNullRawSeqStore; } return bufferToSeq(ZSTDMT_getBuffer(seqPool)); } #if ZSTD_RESIZE_SEQPOOL static rawSeqStore_t ZSTDMT_resizeSeq(ZSTDMT_seqPool* seqPool, rawSeqStore_t seq) { return bufferToSeq(ZSTDMT_resizeBuffer(seqPool, seqToBuffer(seq))); } #endif static void ZSTDMT_releaseSeq(ZSTDMT_seqPool* seqPool, rawSeqStore_t seq) { ZSTDMT_releaseBuffer(seqPool, seqToBuffer(seq)); } static void ZSTDMT_setNbSeq(ZSTDMT_seqPool* const seqPool, size_t const nbSeq) { ZSTDMT_setBufferSize(seqPool, nbSeq * sizeof(rawSeq)); } static ZSTDMT_seqPool* ZSTDMT_createSeqPool(unsigned nbWorkers, ZSTD_customMem cMem) { ZSTDMT_seqPool* const seqPool = ZSTDMT_createBufferPool(nbWorkers, cMem); if (seqPool == NULL) return NULL; ZSTDMT_setNbSeq(seqPool, 0); return seqPool; } static void ZSTDMT_freeSeqPool(ZSTDMT_seqPool* seqPool) { ZSTDMT_freeBufferPool(seqPool); } static ZSTDMT_seqPool* ZSTDMT_expandSeqPool(ZSTDMT_seqPool* pool, U32 nbWorkers) { return ZSTDMT_expandBufferPool(pool, nbWorkers); } /* ===== CCtx Pool ===== */ /* a single CCtx Pool can be invoked from multiple threads in parallel */ typedef struct { ZSTD_pthread_mutex_t poolMutex; int totalCCtx; int availCCtx; ZSTD_customMem cMem; ZSTD_CCtx* cctx[1]; /* variable size */ } ZSTDMT_CCtxPool; /* note : all CCtx borrowed from the pool should be released back to the pool _before_ freeing the pool */ static void ZSTDMT_freeCCtxPool(ZSTDMT_CCtxPool* pool) { int cid; for (cid=0; cidtotalCCtx; cid++) ZSTD_freeCCtx(pool->cctx[cid]); /* note : compatible with free on NULL */ ZSTD_pthread_mutex_destroy(&pool->poolMutex); ZSTD_customFree(pool, pool->cMem); } /* ZSTDMT_createCCtxPool() : * implies nbWorkers >= 1 , checked by caller ZSTDMT_createCCtx() */ static ZSTDMT_CCtxPool* ZSTDMT_createCCtxPool(int nbWorkers, ZSTD_customMem cMem) { ZSTDMT_CCtxPool* const cctxPool = (ZSTDMT_CCtxPool*) ZSTD_customCalloc( sizeof(ZSTDMT_CCtxPool) + (nbWorkers-1)*sizeof(ZSTD_CCtx*), cMem); assert(nbWorkers > 0); if (!cctxPool) return NULL; if (ZSTD_pthread_mutex_init(&cctxPool->poolMutex, NULL)) { ZSTD_customFree(cctxPool, cMem); return NULL; } cctxPool->cMem = cMem; cctxPool->totalCCtx = nbWorkers; cctxPool->availCCtx = 1; /* at least one cctx for single-thread mode */ cctxPool->cctx[0] = ZSTD_createCCtx_advanced(cMem); if (!cctxPool->cctx[0]) { ZSTDMT_freeCCtxPool(cctxPool); return NULL; } DEBUGLOG(3, "cctxPool created, with %u workers", nbWorkers); return cctxPool; } static ZSTDMT_CCtxPool* ZSTDMT_expandCCtxPool(ZSTDMT_CCtxPool* srcPool, int nbWorkers) { if (srcPool==NULL) return NULL; if (nbWorkers <= srcPool->totalCCtx) return srcPool; /* good enough */ /* need a larger cctx pool */ { ZSTD_customMem const cMem = srcPool->cMem; ZSTDMT_freeCCtxPool(srcPool); return ZSTDMT_createCCtxPool(nbWorkers, cMem); } } /* only works during initialization phase, not during compression */ static size_t ZSTDMT_sizeof_CCtxPool(ZSTDMT_CCtxPool* cctxPool) { ZSTD_pthread_mutex_lock(&cctxPool->poolMutex); { unsigned const nbWorkers = cctxPool->totalCCtx; size_t const poolSize = sizeof(*cctxPool) + (nbWorkers-1) * sizeof(ZSTD_CCtx*); unsigned u; size_t totalCCtxSize = 0; for (u=0; ucctx[u]); } ZSTD_pthread_mutex_unlock(&cctxPool->poolMutex); assert(nbWorkers > 0); return poolSize + totalCCtxSize; } } static ZSTD_CCtx* ZSTDMT_getCCtx(ZSTDMT_CCtxPool* cctxPool) { DEBUGLOG(5, "ZSTDMT_getCCtx"); ZSTD_pthread_mutex_lock(&cctxPool->poolMutex); if (cctxPool->availCCtx) { cctxPool->availCCtx--; { ZSTD_CCtx* const cctx = cctxPool->cctx[cctxPool->availCCtx]; ZSTD_pthread_mutex_unlock(&cctxPool->poolMutex); return cctx; } } ZSTD_pthread_mutex_unlock(&cctxPool->poolMutex); DEBUGLOG(5, "create one more CCtx"); return ZSTD_createCCtx_advanced(cctxPool->cMem); /* note : can be NULL, when creation fails ! */ } static void ZSTDMT_releaseCCtx(ZSTDMT_CCtxPool* pool, ZSTD_CCtx* cctx) { if (cctx==NULL) return; /* compatibility with release on NULL */ ZSTD_pthread_mutex_lock(&pool->poolMutex); if (pool->availCCtx < pool->totalCCtx) pool->cctx[pool->availCCtx++] = cctx; else { /* pool overflow : should not happen, since totalCCtx==nbWorkers */ DEBUGLOG(4, "CCtx pool overflow : free cctx"); ZSTD_freeCCtx(cctx); } ZSTD_pthread_mutex_unlock(&pool->poolMutex); } /* ==== Serial State ==== */ typedef struct { void const* start; size_t size; } range_t; typedef struct { /* All variables in the struct are protected by mutex. */ ZSTD_pthread_mutex_t mutex; ZSTD_pthread_cond_t cond; ZSTD_CCtx_params params; ldmState_t ldmState; XXH64_state_t xxhState; unsigned nextJobID; /* Protects ldmWindow. * Must be acquired after the main mutex when acquiring both. */ ZSTD_pthread_mutex_t ldmWindowMutex; ZSTD_pthread_cond_t ldmWindowCond; /* Signaled when ldmWindow is updated */ ZSTD_window_t ldmWindow; /* A thread-safe copy of ldmState.window */ } serialState_t; static int ZSTDMT_serialState_reset(serialState_t* serialState, ZSTDMT_seqPool* seqPool, ZSTD_CCtx_params params, size_t jobSize, const void* dict, size_t const dictSize, ZSTD_dictContentType_e dictContentType) { /* Adjust parameters */ if (params.ldmParams.enableLdm) { DEBUGLOG(4, "LDM window size = %u KB", (1U << params.cParams.windowLog) >> 10); ZSTD_ldm_adjustParameters(¶ms.ldmParams, ¶ms.cParams); assert(params.ldmParams.hashLog >= params.ldmParams.bucketSizeLog); assert(params.ldmParams.hashRateLog < 32); } else { ZSTD_memset(¶ms.ldmParams, 0, sizeof(params.ldmParams)); } serialState->nextJobID = 0; if (params.fParams.checksumFlag) XXH64_reset(&serialState->xxhState, 0); if (params.ldmParams.enableLdm) { ZSTD_customMem cMem = params.customMem; unsigned const hashLog = params.ldmParams.hashLog; size_t const hashSize = ((size_t)1 << hashLog) * sizeof(ldmEntry_t); unsigned const bucketLog = params.ldmParams.hashLog - params.ldmParams.bucketSizeLog; unsigned const prevBucketLog = serialState->params.ldmParams.hashLog - serialState->params.ldmParams.bucketSizeLog; size_t const numBuckets = (size_t)1 << bucketLog; /* Size the seq pool tables */ ZSTDMT_setNbSeq(seqPool, ZSTD_ldm_getMaxNbSeq(params.ldmParams, jobSize)); /* Reset the window */ ZSTD_window_init(&serialState->ldmState.window); /* Resize tables and output space if necessary. */ if (serialState->ldmState.hashTable == NULL || serialState->params.ldmParams.hashLog < hashLog) { ZSTD_customFree(serialState->ldmState.hashTable, cMem); serialState->ldmState.hashTable = (ldmEntry_t*)ZSTD_customMalloc(hashSize, cMem); } if (serialState->ldmState.bucketOffsets == NULL || prevBucketLog < bucketLog) { ZSTD_customFree(serialState->ldmState.bucketOffsets, cMem); serialState->ldmState.bucketOffsets = (BYTE*)ZSTD_customMalloc(numBuckets, cMem); } if (!serialState->ldmState.hashTable || !serialState->ldmState.bucketOffsets) return 1; /* Zero the tables */ ZSTD_memset(serialState->ldmState.hashTable, 0, hashSize); ZSTD_memset(serialState->ldmState.bucketOffsets, 0, numBuckets); /* Update window state and fill hash table with dict */ serialState->ldmState.loadedDictEnd = 0; if (dictSize > 0) { if (dictContentType == ZSTD_dct_rawContent) { BYTE const* const dictEnd = (const BYTE*)dict + dictSize; ZSTD_window_update(&serialState->ldmState.window, dict, dictSize); ZSTD_ldm_fillHashTable(&serialState->ldmState, (const BYTE*)dict, dictEnd, ¶ms.ldmParams); serialState->ldmState.loadedDictEnd = params.forceWindow ? 0 : (U32)(dictEnd - serialState->ldmState.window.base); } else { /* don't even load anything */ } } /* Initialize serialState's copy of ldmWindow. */ serialState->ldmWindow = serialState->ldmState.window; } serialState->params = params; serialState->params.jobSize = (U32)jobSize; return 0; } static int ZSTDMT_serialState_init(serialState_t* serialState) { int initError = 0; ZSTD_memset(serialState, 0, sizeof(*serialState)); initError |= ZSTD_pthread_mutex_init(&serialState->mutex, NULL); initError |= ZSTD_pthread_cond_init(&serialState->cond, NULL); initError |= ZSTD_pthread_mutex_init(&serialState->ldmWindowMutex, NULL); initError |= ZSTD_pthread_cond_init(&serialState->ldmWindowCond, NULL); return initError; } static void ZSTDMT_serialState_free(serialState_t* serialState) { ZSTD_customMem cMem = serialState->params.customMem; ZSTD_pthread_mutex_destroy(&serialState->mutex); ZSTD_pthread_cond_destroy(&serialState->cond); ZSTD_pthread_mutex_destroy(&serialState->ldmWindowMutex); ZSTD_pthread_cond_destroy(&serialState->ldmWindowCond); ZSTD_customFree(serialState->ldmState.hashTable, cMem); ZSTD_customFree(serialState->ldmState.bucketOffsets, cMem); } static void ZSTDMT_serialState_update(serialState_t* serialState, ZSTD_CCtx* jobCCtx, rawSeqStore_t seqStore, range_t src, unsigned jobID) { /* Wait for our turn */ ZSTD_PTHREAD_MUTEX_LOCK(&serialState->mutex); while (serialState->nextJobID < jobID) { DEBUGLOG(5, "wait for serialState->cond"); ZSTD_pthread_cond_wait(&serialState->cond, &serialState->mutex); } /* A future job may error and skip our job */ if (serialState->nextJobID == jobID) { /* It is now our turn, do any processing necessary */ if (serialState->params.ldmParams.enableLdm) { size_t error; assert(seqStore.seq != NULL && seqStore.pos == 0 && seqStore.size == 0 && seqStore.capacity > 0); assert(src.size <= serialState->params.jobSize); ZSTD_window_update(&serialState->ldmState.window, src.start, src.size); error = ZSTD_ldm_generateSequences( &serialState->ldmState, &seqStore, &serialState->params.ldmParams, src.start, src.size); /* We provide a large enough buffer to never fail. */ assert(!ZSTD_isError(error)); (void)error; /* Update ldmWindow to match the ldmState.window and signal the main * thread if it is waiting for a buffer. */ ZSTD_PTHREAD_MUTEX_LOCK(&serialState->ldmWindowMutex); serialState->ldmWindow = serialState->ldmState.window; ZSTD_pthread_cond_signal(&serialState->ldmWindowCond); ZSTD_pthread_mutex_unlock(&serialState->ldmWindowMutex); } if (serialState->params.fParams.checksumFlag && src.size > 0) XXH64_update(&serialState->xxhState, src.start, src.size); } /* Now it is the next jobs turn */ serialState->nextJobID++; ZSTD_pthread_cond_broadcast(&serialState->cond); ZSTD_pthread_mutex_unlock(&serialState->mutex); if (seqStore.size > 0) { size_t const err = ZSTD_referenceExternalSequences( jobCCtx, seqStore.seq, seqStore.size); assert(serialState->params.ldmParams.enableLdm); assert(!ZSTD_isError(err)); (void)err; } } static void ZSTDMT_serialState_ensureFinished(serialState_t* serialState, unsigned jobID, size_t cSize) { ZSTD_PTHREAD_MUTEX_LOCK(&serialState->mutex); if (serialState->nextJobID <= jobID) { assert(ZSTD_isError(cSize)); (void)cSize; DEBUGLOG(5, "Skipping past job %u because of error", jobID); serialState->nextJobID = jobID + 1; ZSTD_pthread_cond_broadcast(&serialState->cond); ZSTD_PTHREAD_MUTEX_LOCK(&serialState->ldmWindowMutex); ZSTD_window_clear(&serialState->ldmWindow); ZSTD_pthread_cond_signal(&serialState->ldmWindowCond); ZSTD_pthread_mutex_unlock(&serialState->ldmWindowMutex); } ZSTD_pthread_mutex_unlock(&serialState->mutex); } /* ------------------------------------------ */ /* ===== Worker thread ===== */ /* ------------------------------------------ */ static const range_t kNullRange = { NULL, 0 }; typedef struct { size_t consumed; /* SHARED - set0 by mtctx, then modified by worker AND read by mtctx */ size_t cSize; /* SHARED - set0 by mtctx, then modified by worker AND read by mtctx, then set0 by mtctx */ ZSTD_pthread_mutex_t job_mutex; /* Thread-safe - used by mtctx and worker */ ZSTD_pthread_cond_t job_cond; /* Thread-safe - used by mtctx and worker */ ZSTDMT_CCtxPool* cctxPool; /* Thread-safe - used by mtctx and (all) workers */ ZSTDMT_bufferPool* bufPool; /* Thread-safe - used by mtctx and (all) workers */ ZSTDMT_seqPool* seqPool; /* Thread-safe - used by mtctx and (all) workers */ serialState_t* serial; /* Thread-safe - used by mtctx and (all) workers */ buffer_t dstBuff; /* set by worker (or mtctx), then read by worker & mtctx, then modified by mtctx => no barrier */ range_t prefix; /* set by mtctx, then read by worker & mtctx => no barrier */ range_t src; /* set by mtctx, then read by worker & mtctx => no barrier */ unsigned jobID; /* set by mtctx, then read by worker => no barrier */ unsigned firstJob; /* set by mtctx, then read by worker => no barrier */ unsigned lastJob; /* set by mtctx, then read by worker => no barrier */ ZSTD_CCtx_params params; /* set by mtctx, then read by worker => no barrier */ const ZSTD_CDict* cdict; /* set by mtctx, then read by worker => no barrier */ unsigned long long fullFrameSize; /* set by mtctx, then read by worker => no barrier */ size_t dstFlushed; /* used only by mtctx */ unsigned frameChecksumNeeded; /* used only by mtctx */ } ZSTDMT_jobDescription; #define JOB_ERROR(e) { \ ZSTD_PTHREAD_MUTEX_LOCK(&job->job_mutex); \ job->cSize = e; \ ZSTD_pthread_mutex_unlock(&job->job_mutex); \ goto _endJob; \ } /* ZSTDMT_compressionJob() is a POOL_function type */ static void ZSTDMT_compressionJob(void* jobDescription) { ZSTDMT_jobDescription* const job = (ZSTDMT_jobDescription*)jobDescription; ZSTD_CCtx_params jobParams = job->params; /* do not modify job->params ! copy it, modify the copy */ ZSTD_CCtx* const cctx = ZSTDMT_getCCtx(job->cctxPool); rawSeqStore_t rawSeqStore = ZSTDMT_getSeq(job->seqPool); buffer_t dstBuff = job->dstBuff; size_t lastCBlockSize = 0; /* resources */ if (cctx==NULL) JOB_ERROR(ERROR(memory_allocation)); if (dstBuff.start == NULL) { /* streaming job : doesn't provide a dstBuffer */ dstBuff = ZSTDMT_getBuffer(job->bufPool); if (dstBuff.start==NULL) JOB_ERROR(ERROR(memory_allocation)); job->dstBuff = dstBuff; /* this value can be read in ZSTDMT_flush, when it copies the whole job */ } if (jobParams.ldmParams.enableLdm && rawSeqStore.seq == NULL) JOB_ERROR(ERROR(memory_allocation)); /* Don't compute the checksum for chunks, since we compute it externally, * but write it in the header. */ if (job->jobID != 0) jobParams.fParams.checksumFlag = 0; /* Don't run LDM for the chunks, since we handle it externally */ jobParams.ldmParams.enableLdm = 0; /* Correct nbWorkers to 0. */ jobParams.nbWorkers = 0; /* init */ if (job->cdict) { size_t const initError = ZSTD_compressBegin_advanced_internal(cctx, NULL, 0, ZSTD_dct_auto, ZSTD_dtlm_fast, job->cdict, &jobParams, job->fullFrameSize); assert(job->firstJob); /* only allowed for first job */ if (ZSTD_isError(initError)) JOB_ERROR(initError); } else { /* srcStart points at reloaded section */ U64 const pledgedSrcSize = job->firstJob ? job->fullFrameSize : job->src.size; { size_t const forceWindowError = ZSTD_CCtxParams_setParameter(&jobParams, ZSTD_c_forceMaxWindow, !job->firstJob); if (ZSTD_isError(forceWindowError)) JOB_ERROR(forceWindowError); } { size_t const initError = ZSTD_compressBegin_advanced_internal(cctx, job->prefix.start, job->prefix.size, ZSTD_dct_rawContent, /* load dictionary in "content-only" mode (no header analysis) */ ZSTD_dtlm_fast, NULL, /*cdict*/ &jobParams, pledgedSrcSize); if (ZSTD_isError(initError)) JOB_ERROR(initError); } } /* Perform serial step as early as possible, but after CCtx initialization */ ZSTDMT_serialState_update(job->serial, cctx, rawSeqStore, job->src, job->jobID); if (!job->firstJob) { /* flush and overwrite frame header when it's not first job */ size_t const hSize = ZSTD_compressContinue(cctx, dstBuff.start, dstBuff.capacity, job->src.start, 0); if (ZSTD_isError(hSize)) JOB_ERROR(hSize); DEBUGLOG(5, "ZSTDMT_compressionJob: flush and overwrite %u bytes of frame header (not first job)", (U32)hSize); ZSTD_invalidateRepCodes(cctx); } /* compress */ { size_t const chunkSize = 4*ZSTD_BLOCKSIZE_MAX; int const nbChunks = (int)((job->src.size + (chunkSize-1)) / chunkSize); const BYTE* ip = (const BYTE*) job->src.start; BYTE* const ostart = (BYTE*)dstBuff.start; BYTE* op = ostart; BYTE* oend = op + dstBuff.capacity; int chunkNb; if (sizeof(size_t) > sizeof(int)) assert(job->src.size < ((size_t)INT_MAX) * chunkSize); /* check overflow */ DEBUGLOG(5, "ZSTDMT_compressionJob: compress %u bytes in %i blocks", (U32)job->src.size, nbChunks); assert(job->cSize == 0); for (chunkNb = 1; chunkNb < nbChunks; chunkNb++) { size_t const cSize = ZSTD_compressContinue(cctx, op, oend-op, ip, chunkSize); if (ZSTD_isError(cSize)) JOB_ERROR(cSize); ip += chunkSize; op += cSize; assert(op < oend); /* stats */ ZSTD_PTHREAD_MUTEX_LOCK(&job->job_mutex); job->cSize += cSize; job->consumed = chunkSize * chunkNb; DEBUGLOG(5, "ZSTDMT_compressionJob: compress new block : cSize==%u bytes (total: %u)", (U32)cSize, (U32)job->cSize); ZSTD_pthread_cond_signal(&job->job_cond); /* warns some more data is ready to be flushed */ ZSTD_pthread_mutex_unlock(&job->job_mutex); } /* last block */ assert(chunkSize > 0); assert((chunkSize & (chunkSize - 1)) == 0); /* chunkSize must be power of 2 for mask==(chunkSize-1) to work */ if ((nbChunks > 0) | job->lastJob /*must output a "last block" flag*/ ) { size_t const lastBlockSize1 = job->src.size & (chunkSize-1); size_t const lastBlockSize = ((lastBlockSize1==0) & (job->src.size>=chunkSize)) ? chunkSize : lastBlockSize1; size_t const cSize = (job->lastJob) ? ZSTD_compressEnd (cctx, op, oend-op, ip, lastBlockSize) : ZSTD_compressContinue(cctx, op, oend-op, ip, lastBlockSize); if (ZSTD_isError(cSize)) JOB_ERROR(cSize); lastCBlockSize = cSize; } } ZSTD_CCtx_trace(cctx, 0); _endJob: ZSTDMT_serialState_ensureFinished(job->serial, job->jobID, job->cSize); if (job->prefix.size > 0) DEBUGLOG(5, "Finished with prefix: %zx", (size_t)job->prefix.start); DEBUGLOG(5, "Finished with source: %zx", (size_t)job->src.start); /* release resources */ ZSTDMT_releaseSeq(job->seqPool, rawSeqStore); ZSTDMT_releaseCCtx(job->cctxPool, cctx); /* report */ ZSTD_PTHREAD_MUTEX_LOCK(&job->job_mutex); if (ZSTD_isError(job->cSize)) assert(lastCBlockSize == 0); job->cSize += lastCBlockSize; job->consumed = job->src.size; /* when job->consumed == job->src.size , compression job is presumed completed */ ZSTD_pthread_cond_signal(&job->job_cond); ZSTD_pthread_mutex_unlock(&job->job_mutex); } /* ------------------------------------------ */ /* ===== Multi-threaded compression ===== */ /* ------------------------------------------ */ typedef struct { range_t prefix; /* read-only non-owned prefix buffer */ buffer_t buffer; size_t filled; } inBuff_t; typedef struct { BYTE* buffer; /* The round input buffer. All jobs get references * to pieces of the buffer. ZSTDMT_tryGetInputRange() * handles handing out job input buffers, and makes * sure it doesn't overlap with any pieces still in use. */ size_t capacity; /* The capacity of buffer. */ size_t pos; /* The position of the current inBuff in the round * buffer. Updated past the end if the inBuff once * the inBuff is sent to the worker thread. * pos <= capacity. */ } roundBuff_t; static const roundBuff_t kNullRoundBuff = {NULL, 0, 0}; #define RSYNC_LENGTH 32 typedef struct { U64 hash; U64 hitMask; U64 primePower; } rsyncState_t; struct ZSTDMT_CCtx_s { POOL_ctx* factory; ZSTDMT_jobDescription* jobs; ZSTDMT_bufferPool* bufPool; ZSTDMT_CCtxPool* cctxPool; ZSTDMT_seqPool* seqPool; ZSTD_CCtx_params params; size_t targetSectionSize; size_t targetPrefixSize; int jobReady; /* 1 => one job is already prepared, but pool has shortage of workers. Don't create a new job. */ inBuff_t inBuff; roundBuff_t roundBuff; serialState_t serial; rsyncState_t rsync; unsigned jobIDMask; unsigned doneJobID; unsigned nextJobID; unsigned frameEnded; unsigned allJobsCompleted; unsigned long long frameContentSize; unsigned long long consumed; unsigned long long produced; ZSTD_customMem cMem; ZSTD_CDict* cdictLocal; const ZSTD_CDict* cdict; unsigned providedFactory: 1; }; static void ZSTDMT_freeJobsTable(ZSTDMT_jobDescription* jobTable, U32 nbJobs, ZSTD_customMem cMem) { U32 jobNb; if (jobTable == NULL) return; for (jobNb=0; jobNb mtctx->jobIDMask+1) { /* need more job capacity */ ZSTDMT_freeJobsTable(mtctx->jobs, mtctx->jobIDMask+1, mtctx->cMem); mtctx->jobIDMask = 0; mtctx->jobs = ZSTDMT_createJobsTable(&nbJobs, mtctx->cMem); if (mtctx->jobs==NULL) return ERROR(memory_allocation); assert((nbJobs != 0) && ((nbJobs & (nbJobs - 1)) == 0)); /* ensure nbJobs is a power of 2 */ mtctx->jobIDMask = nbJobs - 1; } return 0; } /* ZSTDMT_CCtxParam_setNbWorkers(): * Internal use only */ static size_t ZSTDMT_CCtxParam_setNbWorkers(ZSTD_CCtx_params* params, unsigned nbWorkers) { return ZSTD_CCtxParams_setParameter(params, ZSTD_c_nbWorkers, (int)nbWorkers); } MEM_STATIC ZSTDMT_CCtx* ZSTDMT_createCCtx_advanced_internal(unsigned nbWorkers, ZSTD_customMem cMem, ZSTD_threadPool* pool) { ZSTDMT_CCtx* mtctx; U32 nbJobs = nbWorkers + 2; int initError; DEBUGLOG(3, "ZSTDMT_createCCtx_advanced (nbWorkers = %u)", nbWorkers); if (nbWorkers < 1) return NULL; nbWorkers = MIN(nbWorkers , ZSTDMT_NBWORKERS_MAX); if ((cMem.customAlloc!=NULL) ^ (cMem.customFree!=NULL)) /* invalid custom allocator */ return NULL; mtctx = (ZSTDMT_CCtx*) ZSTD_customCalloc(sizeof(ZSTDMT_CCtx), cMem); if (!mtctx) return NULL; ZSTDMT_CCtxParam_setNbWorkers(&mtctx->params, nbWorkers); mtctx->cMem = cMem; mtctx->allJobsCompleted = 1; if (pool != NULL) { mtctx->factory = pool; mtctx->providedFactory = 1; } else { mtctx->factory = POOL_create_advanced(nbWorkers, 0, cMem); mtctx->providedFactory = 0; } mtctx->jobs = ZSTDMT_createJobsTable(&nbJobs, cMem); assert(nbJobs > 0); assert((nbJobs & (nbJobs - 1)) == 0); /* ensure nbJobs is a power of 2 */ mtctx->jobIDMask = nbJobs - 1; mtctx->bufPool = ZSTDMT_createBufferPool(nbWorkers, cMem); mtctx->cctxPool = ZSTDMT_createCCtxPool(nbWorkers, cMem); mtctx->seqPool = ZSTDMT_createSeqPool(nbWorkers, cMem); initError = ZSTDMT_serialState_init(&mtctx->serial); mtctx->roundBuff = kNullRoundBuff; if (!mtctx->factory | !mtctx->jobs | !mtctx->bufPool | !mtctx->cctxPool | !mtctx->seqPool | initError) { ZSTDMT_freeCCtx(mtctx); return NULL; } DEBUGLOG(3, "mt_cctx created, for %u threads", nbWorkers); return mtctx; } ZSTDMT_CCtx* ZSTDMT_createCCtx_advanced(unsigned nbWorkers, ZSTD_customMem cMem, ZSTD_threadPool* pool) { #ifdef ZSTD_MULTITHREAD return ZSTDMT_createCCtx_advanced_internal(nbWorkers, cMem, pool); #else (void)nbWorkers; (void)cMem; (void)pool; return NULL; #endif } /* ZSTDMT_releaseAllJobResources() : * note : ensure all workers are killed first ! */ static void ZSTDMT_releaseAllJobResources(ZSTDMT_CCtx* mtctx) { unsigned jobID; DEBUGLOG(3, "ZSTDMT_releaseAllJobResources"); for (jobID=0; jobID <= mtctx->jobIDMask; jobID++) { /* Copy the mutex/cond out */ ZSTD_pthread_mutex_t const mutex = mtctx->jobs[jobID].job_mutex; ZSTD_pthread_cond_t const cond = mtctx->jobs[jobID].job_cond; DEBUGLOG(4, "job%02u: release dst address %08X", jobID, (U32)(size_t)mtctx->jobs[jobID].dstBuff.start); ZSTDMT_releaseBuffer(mtctx->bufPool, mtctx->jobs[jobID].dstBuff); /* Clear the job description, but keep the mutex/cond */ ZSTD_memset(&mtctx->jobs[jobID], 0, sizeof(mtctx->jobs[jobID])); mtctx->jobs[jobID].job_mutex = mutex; mtctx->jobs[jobID].job_cond = cond; } mtctx->inBuff.buffer = g_nullBuffer; mtctx->inBuff.filled = 0; mtctx->allJobsCompleted = 1; } static void ZSTDMT_waitForAllJobsCompleted(ZSTDMT_CCtx* mtctx) { DEBUGLOG(4, "ZSTDMT_waitForAllJobsCompleted"); while (mtctx->doneJobID < mtctx->nextJobID) { unsigned const jobID = mtctx->doneJobID & mtctx->jobIDMask; ZSTD_PTHREAD_MUTEX_LOCK(&mtctx->jobs[jobID].job_mutex); while (mtctx->jobs[jobID].consumed < mtctx->jobs[jobID].src.size) { DEBUGLOG(4, "waiting for jobCompleted signal from job %u", mtctx->doneJobID); /* we want to block when waiting for data to flush */ ZSTD_pthread_cond_wait(&mtctx->jobs[jobID].job_cond, &mtctx->jobs[jobID].job_mutex); } ZSTD_pthread_mutex_unlock(&mtctx->jobs[jobID].job_mutex); mtctx->doneJobID++; } } size_t ZSTDMT_freeCCtx(ZSTDMT_CCtx* mtctx) { if (mtctx==NULL) return 0; /* compatible with free on NULL */ if (!mtctx->providedFactory) POOL_free(mtctx->factory); /* stop and free worker threads */ ZSTDMT_releaseAllJobResources(mtctx); /* release job resources into pools first */ ZSTDMT_freeJobsTable(mtctx->jobs, mtctx->jobIDMask+1, mtctx->cMem); ZSTDMT_freeBufferPool(mtctx->bufPool); ZSTDMT_freeCCtxPool(mtctx->cctxPool); ZSTDMT_freeSeqPool(mtctx->seqPool); ZSTDMT_serialState_free(&mtctx->serial); ZSTD_freeCDict(mtctx->cdictLocal); if (mtctx->roundBuff.buffer) ZSTD_customFree(mtctx->roundBuff.buffer, mtctx->cMem); ZSTD_customFree(mtctx, mtctx->cMem); return 0; } size_t ZSTDMT_sizeof_CCtx(ZSTDMT_CCtx* mtctx) { if (mtctx == NULL) return 0; /* supports sizeof NULL */ return sizeof(*mtctx) + POOL_sizeof(mtctx->factory) + ZSTDMT_sizeof_bufferPool(mtctx->bufPool) + (mtctx->jobIDMask+1) * sizeof(ZSTDMT_jobDescription) + ZSTDMT_sizeof_CCtxPool(mtctx->cctxPool) + ZSTDMT_sizeof_seqPool(mtctx->seqPool) + ZSTD_sizeof_CDict(mtctx->cdictLocal) + mtctx->roundBuff.capacity; } /* ZSTDMT_resize() : * @return : error code if fails, 0 on success */ static size_t ZSTDMT_resize(ZSTDMT_CCtx* mtctx, unsigned nbWorkers) { if (POOL_resize(mtctx->factory, nbWorkers)) return ERROR(memory_allocation); FORWARD_IF_ERROR( ZSTDMT_expandJobsTable(mtctx, nbWorkers) , ""); mtctx->bufPool = ZSTDMT_expandBufferPool(mtctx->bufPool, nbWorkers); if (mtctx->bufPool == NULL) return ERROR(memory_allocation); mtctx->cctxPool = ZSTDMT_expandCCtxPool(mtctx->cctxPool, nbWorkers); if (mtctx->cctxPool == NULL) return ERROR(memory_allocation); mtctx->seqPool = ZSTDMT_expandSeqPool(mtctx->seqPool, nbWorkers); if (mtctx->seqPool == NULL) return ERROR(memory_allocation); ZSTDMT_CCtxParam_setNbWorkers(&mtctx->params, nbWorkers); return 0; } /*! ZSTDMT_updateCParams_whileCompressing() : * Updates a selected set of compression parameters, remaining compatible with currently active frame. * New parameters will be applied to next compression job. */ void ZSTDMT_updateCParams_whileCompressing(ZSTDMT_CCtx* mtctx, const ZSTD_CCtx_params* cctxParams) { U32 const saved_wlog = mtctx->params.cParams.windowLog; /* Do not modify windowLog while compressing */ int const compressionLevel = cctxParams->compressionLevel; DEBUGLOG(5, "ZSTDMT_updateCParams_whileCompressing (level:%i)", compressionLevel); mtctx->params.compressionLevel = compressionLevel; { ZSTD_compressionParameters cParams = ZSTD_getCParamsFromCCtxParams(cctxParams, ZSTD_CONTENTSIZE_UNKNOWN, 0, ZSTD_cpm_noAttachDict); cParams.windowLog = saved_wlog; mtctx->params.cParams = cParams; } } /* ZSTDMT_getFrameProgression(): * tells how much data has been consumed (input) and produced (output) for current frame. * able to count progression inside worker threads. * Note : mutex will be acquired during statistics collection inside workers. */ ZSTD_frameProgression ZSTDMT_getFrameProgression(ZSTDMT_CCtx* mtctx) { ZSTD_frameProgression fps; DEBUGLOG(5, "ZSTDMT_getFrameProgression"); fps.ingested = mtctx->consumed + mtctx->inBuff.filled; fps.consumed = mtctx->consumed; fps.produced = fps.flushed = mtctx->produced; fps.currentJobID = mtctx->nextJobID; fps.nbActiveWorkers = 0; { unsigned jobNb; unsigned lastJobNb = mtctx->nextJobID + mtctx->jobReady; assert(mtctx->jobReady <= 1); DEBUGLOG(6, "ZSTDMT_getFrameProgression: jobs: from %u to <%u (jobReady:%u)", mtctx->doneJobID, lastJobNb, mtctx->jobReady) for (jobNb = mtctx->doneJobID ; jobNb < lastJobNb ; jobNb++) { unsigned const wJobID = jobNb & mtctx->jobIDMask; ZSTDMT_jobDescription* jobPtr = &mtctx->jobs[wJobID]; ZSTD_pthread_mutex_lock(&jobPtr->job_mutex); { size_t const cResult = jobPtr->cSize; size_t const produced = ZSTD_isError(cResult) ? 0 : cResult; size_t const flushed = ZSTD_isError(cResult) ? 0 : jobPtr->dstFlushed; assert(flushed <= produced); fps.ingested += jobPtr->src.size; fps.consumed += jobPtr->consumed; fps.produced += produced; fps.flushed += flushed; fps.nbActiveWorkers += (jobPtr->consumed < jobPtr->src.size); } ZSTD_pthread_mutex_unlock(&mtctx->jobs[wJobID].job_mutex); } } return fps; } size_t ZSTDMT_toFlushNow(ZSTDMT_CCtx* mtctx) { size_t toFlush; unsigned const jobID = mtctx->doneJobID; assert(jobID <= mtctx->nextJobID); if (jobID == mtctx->nextJobID) return 0; /* no active job => nothing to flush */ /* look into oldest non-fully-flushed job */ { unsigned const wJobID = jobID & mtctx->jobIDMask; ZSTDMT_jobDescription* const jobPtr = &mtctx->jobs[wJobID]; ZSTD_pthread_mutex_lock(&jobPtr->job_mutex); { size_t const cResult = jobPtr->cSize; size_t const produced = ZSTD_isError(cResult) ? 0 : cResult; size_t const flushed = ZSTD_isError(cResult) ? 0 : jobPtr->dstFlushed; assert(flushed <= produced); assert(jobPtr->consumed <= jobPtr->src.size); toFlush = produced - flushed; /* if toFlush==0, nothing is available to flush. * However, jobID is expected to still be active: * if jobID was already completed and fully flushed, * ZSTDMT_flushProduced() should have already moved onto next job. * Therefore, some input has not yet been consumed. */ if (toFlush==0) { assert(jobPtr->consumed < jobPtr->src.size); } } ZSTD_pthread_mutex_unlock(&mtctx->jobs[wJobID].job_mutex); } return toFlush; } /* ------------------------------------------ */ /* ===== Multi-threaded compression ===== */ /* ------------------------------------------ */ static unsigned ZSTDMT_computeTargetJobLog(const ZSTD_CCtx_params* params) { unsigned jobLog; if (params->ldmParams.enableLdm) { /* In Long Range Mode, the windowLog is typically oversized. * In which case, it's preferable to determine the jobSize * based on cycleLog instead. */ jobLog = MAX(21, ZSTD_cycleLog(params->cParams.chainLog, params->cParams.strategy) + 3); } else { jobLog = MAX(20, params->cParams.windowLog + 2); } return MIN(jobLog, (unsigned)ZSTDMT_JOBLOG_MAX); } static int ZSTDMT_overlapLog_default(ZSTD_strategy strat) { switch(strat) { case ZSTD_btultra2: return 9; case ZSTD_btultra: case ZSTD_btopt: return 8; case ZSTD_btlazy2: case ZSTD_lazy2: return 7; case ZSTD_lazy: case ZSTD_greedy: case ZSTD_dfast: case ZSTD_fast: default:; } return 6; } static int ZSTDMT_overlapLog(int ovlog, ZSTD_strategy strat) { assert(0 <= ovlog && ovlog <= 9); if (ovlog == 0) return ZSTDMT_overlapLog_default(strat); return ovlog; } static size_t ZSTDMT_computeOverlapSize(const ZSTD_CCtx_params* params) { int const overlapRLog = 9 - ZSTDMT_overlapLog(params->overlapLog, params->cParams.strategy); int ovLog = (overlapRLog >= 8) ? 0 : (params->cParams.windowLog - overlapRLog); assert(0 <= overlapRLog && overlapRLog <= 8); if (params->ldmParams.enableLdm) { /* In Long Range Mode, the windowLog is typically oversized. * In which case, it's preferable to determine the jobSize * based on chainLog instead. * Then, ovLog becomes a fraction of the jobSize, rather than windowSize */ ovLog = MIN(params->cParams.windowLog, ZSTDMT_computeTargetJobLog(params) - 2) - overlapRLog; } assert(0 <= ovLog && ovLog <= ZSTD_WINDOWLOG_MAX); DEBUGLOG(4, "overlapLog : %i", params->overlapLog); DEBUGLOG(4, "overlap size : %i", 1 << ovLog); return (ovLog==0) ? 0 : (size_t)1 << ovLog; } /* ====================================== */ /* ======= Streaming API ======= */ /* ====================================== */ size_t ZSTDMT_initCStream_internal( ZSTDMT_CCtx* mtctx, const void* dict, size_t dictSize, ZSTD_dictContentType_e dictContentType, const ZSTD_CDict* cdict, ZSTD_CCtx_params params, unsigned long long pledgedSrcSize) { DEBUGLOG(4, "ZSTDMT_initCStream_internal (pledgedSrcSize=%u, nbWorkers=%u, cctxPool=%u)", (U32)pledgedSrcSize, params.nbWorkers, mtctx->cctxPool->totalCCtx); /* params supposed partially fully validated at this point */ assert(!ZSTD_isError(ZSTD_checkCParams(params.cParams))); assert(!((dict) && (cdict))); /* either dict or cdict, not both */ /* init */ if (params.nbWorkers != mtctx->params.nbWorkers) FORWARD_IF_ERROR( ZSTDMT_resize(mtctx, params.nbWorkers) , ""); if (params.jobSize != 0 && params.jobSize < ZSTDMT_JOBSIZE_MIN) params.jobSize = ZSTDMT_JOBSIZE_MIN; if (params.jobSize > (size_t)ZSTDMT_JOBSIZE_MAX) params.jobSize = (size_t)ZSTDMT_JOBSIZE_MAX; DEBUGLOG(4, "ZSTDMT_initCStream_internal: %u workers", params.nbWorkers); if (mtctx->allJobsCompleted == 0) { /* previous compression not correctly finished */ ZSTDMT_waitForAllJobsCompleted(mtctx); ZSTDMT_releaseAllJobResources(mtctx); mtctx->allJobsCompleted = 1; } mtctx->params = params; mtctx->frameContentSize = pledgedSrcSize; if (dict) { ZSTD_freeCDict(mtctx->cdictLocal); mtctx->cdictLocal = ZSTD_createCDict_advanced(dict, dictSize, ZSTD_dlm_byCopy, dictContentType, /* note : a loadPrefix becomes an internal CDict */ params.cParams, mtctx->cMem); mtctx->cdict = mtctx->cdictLocal; if (mtctx->cdictLocal == NULL) return ERROR(memory_allocation); } else { ZSTD_freeCDict(mtctx->cdictLocal); mtctx->cdictLocal = NULL; mtctx->cdict = cdict; } mtctx->targetPrefixSize = ZSTDMT_computeOverlapSize(¶ms); DEBUGLOG(4, "overlapLog=%i => %u KB", params.overlapLog, (U32)(mtctx->targetPrefixSize>>10)); mtctx->targetSectionSize = params.jobSize; if (mtctx->targetSectionSize == 0) { mtctx->targetSectionSize = 1ULL << ZSTDMT_computeTargetJobLog(¶ms); } assert(mtctx->targetSectionSize <= (size_t)ZSTDMT_JOBSIZE_MAX); if (params.rsyncable) { /* Aim for the targetsectionSize as the average job size. */ U32 const jobSizeMB = (U32)(mtctx->targetSectionSize >> 20); U32 const rsyncBits = ZSTD_highbit32(jobSizeMB) + 20; assert(jobSizeMB >= 1); DEBUGLOG(4, "rsyncLog = %u", rsyncBits); mtctx->rsync.hash = 0; mtctx->rsync.hitMask = (1ULL << rsyncBits) - 1; mtctx->rsync.primePower = ZSTD_rollingHash_primePower(RSYNC_LENGTH); } if (mtctx->targetSectionSize < mtctx->targetPrefixSize) mtctx->targetSectionSize = mtctx->targetPrefixSize; /* job size must be >= overlap size */ DEBUGLOG(4, "Job Size : %u KB (note : set to %u)", (U32)(mtctx->targetSectionSize>>10), (U32)params.jobSize); DEBUGLOG(4, "inBuff Size : %u KB", (U32)(mtctx->targetSectionSize>>10)); ZSTDMT_setBufferSize(mtctx->bufPool, ZSTD_compressBound(mtctx->targetSectionSize)); { /* If ldm is enabled we need windowSize space. */ size_t const windowSize = mtctx->params.ldmParams.enableLdm ? (1U << mtctx->params.cParams.windowLog) : 0; /* Two buffers of slack, plus extra space for the overlap * This is the minimum slack that LDM works with. One extra because * flush might waste up to targetSectionSize-1 bytes. Another extra * for the overlap (if > 0), then one to fill which doesn't overlap * with the LDM window. */ size_t const nbSlackBuffers = 2 + (mtctx->targetPrefixSize > 0); size_t const slackSize = mtctx->targetSectionSize * nbSlackBuffers; /* Compute the total size, and always have enough slack */ size_t const nbWorkers = MAX(mtctx->params.nbWorkers, 1); size_t const sectionsSize = mtctx->targetSectionSize * nbWorkers; size_t const capacity = MAX(windowSize, sectionsSize) + slackSize; if (mtctx->roundBuff.capacity < capacity) { if (mtctx->roundBuff.buffer) ZSTD_customFree(mtctx->roundBuff.buffer, mtctx->cMem); mtctx->roundBuff.buffer = (BYTE*)ZSTD_customMalloc(capacity, mtctx->cMem); if (mtctx->roundBuff.buffer == NULL) { mtctx->roundBuff.capacity = 0; return ERROR(memory_allocation); } mtctx->roundBuff.capacity = capacity; } } DEBUGLOG(4, "roundBuff capacity : %u KB", (U32)(mtctx->roundBuff.capacity>>10)); mtctx->roundBuff.pos = 0; mtctx->inBuff.buffer = g_nullBuffer; mtctx->inBuff.filled = 0; mtctx->inBuff.prefix = kNullRange; mtctx->doneJobID = 0; mtctx->nextJobID = 0; mtctx->frameEnded = 0; mtctx->allJobsCompleted = 0; mtctx->consumed = 0; mtctx->produced = 0; if (ZSTDMT_serialState_reset(&mtctx->serial, mtctx->seqPool, params, mtctx->targetSectionSize, dict, dictSize, dictContentType)) return ERROR(memory_allocation); return 0; } /* ZSTDMT_writeLastEmptyBlock() * Write a single empty block with an end-of-frame to finish a frame. * Job must be created from streaming variant. * This function is always successful if expected conditions are fulfilled. */ static void ZSTDMT_writeLastEmptyBlock(ZSTDMT_jobDescription* job) { assert(job->lastJob == 1); assert(job->src.size == 0); /* last job is empty -> will be simplified into a last empty block */ assert(job->firstJob == 0); /* cannot be first job, as it also needs to create frame header */ assert(job->dstBuff.start == NULL); /* invoked from streaming variant only (otherwise, dstBuff might be user's output) */ job->dstBuff = ZSTDMT_getBuffer(job->bufPool); if (job->dstBuff.start == NULL) { job->cSize = ERROR(memory_allocation); return; } assert(job->dstBuff.capacity >= ZSTD_blockHeaderSize); /* no buffer should ever be that small */ job->src = kNullRange; job->cSize = ZSTD_writeLastEmptyBlock(job->dstBuff.start, job->dstBuff.capacity); assert(!ZSTD_isError(job->cSize)); assert(job->consumed == 0); } static size_t ZSTDMT_createCompressionJob(ZSTDMT_CCtx* mtctx, size_t srcSize, ZSTD_EndDirective endOp) { unsigned const jobID = mtctx->nextJobID & mtctx->jobIDMask; int const endFrame = (endOp == ZSTD_e_end); if (mtctx->nextJobID > mtctx->doneJobID + mtctx->jobIDMask) { DEBUGLOG(5, "ZSTDMT_createCompressionJob: will not create new job : table is full"); assert((mtctx->nextJobID & mtctx->jobIDMask) == (mtctx->doneJobID & mtctx->jobIDMask)); return 0; } if (!mtctx->jobReady) { BYTE const* src = (BYTE const*)mtctx->inBuff.buffer.start; DEBUGLOG(5, "ZSTDMT_createCompressionJob: preparing job %u to compress %u bytes with %u preload ", mtctx->nextJobID, (U32)srcSize, (U32)mtctx->inBuff.prefix.size); mtctx->jobs[jobID].src.start = src; mtctx->jobs[jobID].src.size = srcSize; assert(mtctx->inBuff.filled >= srcSize); mtctx->jobs[jobID].prefix = mtctx->inBuff.prefix; mtctx->jobs[jobID].consumed = 0; mtctx->jobs[jobID].cSize = 0; mtctx->jobs[jobID].params = mtctx->params; mtctx->jobs[jobID].cdict = mtctx->nextJobID==0 ? mtctx->cdict : NULL; mtctx->jobs[jobID].fullFrameSize = mtctx->frameContentSize; mtctx->jobs[jobID].dstBuff = g_nullBuffer; mtctx->jobs[jobID].cctxPool = mtctx->cctxPool; mtctx->jobs[jobID].bufPool = mtctx->bufPool; mtctx->jobs[jobID].seqPool = mtctx->seqPool; mtctx->jobs[jobID].serial = &mtctx->serial; mtctx->jobs[jobID].jobID = mtctx->nextJobID; mtctx->jobs[jobID].firstJob = (mtctx->nextJobID==0); mtctx->jobs[jobID].lastJob = endFrame; mtctx->jobs[jobID].frameChecksumNeeded = mtctx->params.fParams.checksumFlag && endFrame && (mtctx->nextJobID>0); mtctx->jobs[jobID].dstFlushed = 0; /* Update the round buffer pos and clear the input buffer to be reset */ mtctx->roundBuff.pos += srcSize; mtctx->inBuff.buffer = g_nullBuffer; mtctx->inBuff.filled = 0; /* Set the prefix */ if (!endFrame) { size_t const newPrefixSize = MIN(srcSize, mtctx->targetPrefixSize); mtctx->inBuff.prefix.start = src + srcSize - newPrefixSize; mtctx->inBuff.prefix.size = newPrefixSize; } else { /* endFrame==1 => no need for another input buffer */ mtctx->inBuff.prefix = kNullRange; mtctx->frameEnded = endFrame; if (mtctx->nextJobID == 0) { /* single job exception : checksum is already calculated directly within worker thread */ mtctx->params.fParams.checksumFlag = 0; } } if ( (srcSize == 0) && (mtctx->nextJobID>0)/*single job must also write frame header*/ ) { DEBUGLOG(5, "ZSTDMT_createCompressionJob: creating a last empty block to end frame"); assert(endOp == ZSTD_e_end); /* only possible case : need to end the frame with an empty last block */ ZSTDMT_writeLastEmptyBlock(mtctx->jobs + jobID); mtctx->nextJobID++; return 0; } } DEBUGLOG(5, "ZSTDMT_createCompressionJob: posting job %u : %u bytes (end:%u, jobNb == %u (mod:%u))", mtctx->nextJobID, (U32)mtctx->jobs[jobID].src.size, mtctx->jobs[jobID].lastJob, mtctx->nextJobID, jobID); if (POOL_tryAdd(mtctx->factory, ZSTDMT_compressionJob, &mtctx->jobs[jobID])) { mtctx->nextJobID++; mtctx->jobReady = 0; } else { DEBUGLOG(5, "ZSTDMT_createCompressionJob: no worker available for job %u", mtctx->nextJobID); mtctx->jobReady = 1; } return 0; } /*! ZSTDMT_flushProduced() : * flush whatever data has been produced but not yet flushed in current job. * move to next job if current one is fully flushed. * `output` : `pos` will be updated with amount of data flushed . * `blockToFlush` : if >0, the function will block and wait if there is no data available to flush . * @return : amount of data remaining within internal buffer, 0 if no more, 1 if unknown but > 0, or an error code */ static size_t ZSTDMT_flushProduced(ZSTDMT_CCtx* mtctx, ZSTD_outBuffer* output, unsigned blockToFlush, ZSTD_EndDirective end) { unsigned const wJobID = mtctx->doneJobID & mtctx->jobIDMask; DEBUGLOG(5, "ZSTDMT_flushProduced (blocking:%u , job %u <= %u)", blockToFlush, mtctx->doneJobID, mtctx->nextJobID); assert(output->size >= output->pos); ZSTD_PTHREAD_MUTEX_LOCK(&mtctx->jobs[wJobID].job_mutex); if ( blockToFlush && (mtctx->doneJobID < mtctx->nextJobID) ) { assert(mtctx->jobs[wJobID].dstFlushed <= mtctx->jobs[wJobID].cSize); while (mtctx->jobs[wJobID].dstFlushed == mtctx->jobs[wJobID].cSize) { /* nothing to flush */ if (mtctx->jobs[wJobID].consumed == mtctx->jobs[wJobID].src.size) { DEBUGLOG(5, "job %u is completely consumed (%u == %u) => don't wait for cond, there will be none", mtctx->doneJobID, (U32)mtctx->jobs[wJobID].consumed, (U32)mtctx->jobs[wJobID].src.size); break; } DEBUGLOG(5, "waiting for something to flush from job %u (currently flushed: %u bytes)", mtctx->doneJobID, (U32)mtctx->jobs[wJobID].dstFlushed); ZSTD_pthread_cond_wait(&mtctx->jobs[wJobID].job_cond, &mtctx->jobs[wJobID].job_mutex); /* block when nothing to flush but some to come */ } } /* try to flush something */ { size_t cSize = mtctx->jobs[wJobID].cSize; /* shared */ size_t const srcConsumed = mtctx->jobs[wJobID].consumed; /* shared */ size_t const srcSize = mtctx->jobs[wJobID].src.size; /* read-only, could be done after mutex lock, but no-declaration-after-statement */ ZSTD_pthread_mutex_unlock(&mtctx->jobs[wJobID].job_mutex); if (ZSTD_isError(cSize)) { DEBUGLOG(5, "ZSTDMT_flushProduced: job %u : compression error detected : %s", mtctx->doneJobID, ZSTD_getErrorName(cSize)); ZSTDMT_waitForAllJobsCompleted(mtctx); ZSTDMT_releaseAllJobResources(mtctx); return cSize; } /* add frame checksum if necessary (can only happen once) */ assert(srcConsumed <= srcSize); if ( (srcConsumed == srcSize) /* job completed -> worker no longer active */ && mtctx->jobs[wJobID].frameChecksumNeeded ) { U32 const checksum = (U32)XXH64_digest(&mtctx->serial.xxhState); DEBUGLOG(4, "ZSTDMT_flushProduced: writing checksum : %08X \n", checksum); MEM_writeLE32((char*)mtctx->jobs[wJobID].dstBuff.start + mtctx->jobs[wJobID].cSize, checksum); cSize += 4; mtctx->jobs[wJobID].cSize += 4; /* can write this shared value, as worker is no longer active */ mtctx->jobs[wJobID].frameChecksumNeeded = 0; } if (cSize > 0) { /* compression is ongoing or completed */ size_t const toFlush = MIN(cSize - mtctx->jobs[wJobID].dstFlushed, output->size - output->pos); DEBUGLOG(5, "ZSTDMT_flushProduced: Flushing %u bytes from job %u (completion:%u/%u, generated:%u)", (U32)toFlush, mtctx->doneJobID, (U32)srcConsumed, (U32)srcSize, (U32)cSize); assert(mtctx->doneJobID < mtctx->nextJobID); assert(cSize >= mtctx->jobs[wJobID].dstFlushed); assert(mtctx->jobs[wJobID].dstBuff.start != NULL); if (toFlush > 0) { ZSTD_memcpy((char*)output->dst + output->pos, (const char*)mtctx->jobs[wJobID].dstBuff.start + mtctx->jobs[wJobID].dstFlushed, toFlush); } output->pos += toFlush; mtctx->jobs[wJobID].dstFlushed += toFlush; /* can write : this value is only used by mtctx */ if ( (srcConsumed == srcSize) /* job is completed */ && (mtctx->jobs[wJobID].dstFlushed == cSize) ) { /* output buffer fully flushed => free this job position */ DEBUGLOG(5, "Job %u completed (%u bytes), moving to next one", mtctx->doneJobID, (U32)mtctx->jobs[wJobID].dstFlushed); ZSTDMT_releaseBuffer(mtctx->bufPool, mtctx->jobs[wJobID].dstBuff); DEBUGLOG(5, "dstBuffer released"); mtctx->jobs[wJobID].dstBuff = g_nullBuffer; mtctx->jobs[wJobID].cSize = 0; /* ensure this job slot is considered "not started" in future check */ mtctx->consumed += srcSize; mtctx->produced += cSize; mtctx->doneJobID++; } } /* return value : how many bytes left in buffer ; fake it to 1 when unknown but >0 */ if (cSize > mtctx->jobs[wJobID].dstFlushed) return (cSize - mtctx->jobs[wJobID].dstFlushed); if (srcSize > srcConsumed) return 1; /* current job not completely compressed */ } if (mtctx->doneJobID < mtctx->nextJobID) return 1; /* some more jobs ongoing */ if (mtctx->jobReady) return 1; /* one job is ready to push, just not yet in the list */ if (mtctx->inBuff.filled > 0) return 1; /* input is not empty, and still needs to be converted into a job */ mtctx->allJobsCompleted = mtctx->frameEnded; /* all jobs are entirely flushed => if this one is last one, frame is completed */ if (end == ZSTD_e_end) return !mtctx->frameEnded; /* for ZSTD_e_end, question becomes : is frame completed ? instead of : are internal buffers fully flushed ? */ return 0; /* internal buffers fully flushed */ } /** * Returns the range of data used by the earliest job that is not yet complete. * If the data of the first job is broken up into two segments, we cover both * sections. */ static range_t ZSTDMT_getInputDataInUse(ZSTDMT_CCtx* mtctx) { unsigned const firstJobID = mtctx->doneJobID; unsigned const lastJobID = mtctx->nextJobID; unsigned jobID; for (jobID = firstJobID; jobID < lastJobID; ++jobID) { unsigned const wJobID = jobID & mtctx->jobIDMask; size_t consumed; ZSTD_PTHREAD_MUTEX_LOCK(&mtctx->jobs[wJobID].job_mutex); consumed = mtctx->jobs[wJobID].consumed; ZSTD_pthread_mutex_unlock(&mtctx->jobs[wJobID].job_mutex); if (consumed < mtctx->jobs[wJobID].src.size) { range_t range = mtctx->jobs[wJobID].prefix; if (range.size == 0) { /* Empty prefix */ range = mtctx->jobs[wJobID].src; } /* Job source in multiple segments not supported yet */ assert(range.start <= mtctx->jobs[wJobID].src.start); return range; } } return kNullRange; } /** * Returns non-zero iff buffer and range overlap. */ static int ZSTDMT_isOverlapped(buffer_t buffer, range_t range) { BYTE const* const bufferStart = (BYTE const*)buffer.start; BYTE const* const bufferEnd = bufferStart + buffer.capacity; BYTE const* const rangeStart = (BYTE const*)range.start; BYTE const* const rangeEnd = range.size != 0 ? rangeStart + range.size : rangeStart; if (rangeStart == NULL || bufferStart == NULL) return 0; /* Empty ranges cannot overlap */ if (bufferStart == bufferEnd || rangeStart == rangeEnd) return 0; return bufferStart < rangeEnd && rangeStart < bufferEnd; } static int ZSTDMT_doesOverlapWindow(buffer_t buffer, ZSTD_window_t window) { range_t extDict; range_t prefix; DEBUGLOG(5, "ZSTDMT_doesOverlapWindow"); extDict.start = window.dictBase + window.lowLimit; extDict.size = window.dictLimit - window.lowLimit; prefix.start = window.base + window.dictLimit; prefix.size = window.nextSrc - (window.base + window.dictLimit); DEBUGLOG(5, "extDict [0x%zx, 0x%zx)", (size_t)extDict.start, (size_t)extDict.start + extDict.size); DEBUGLOG(5, "prefix [0x%zx, 0x%zx)", (size_t)prefix.start, (size_t)prefix.start + prefix.size); return ZSTDMT_isOverlapped(buffer, extDict) || ZSTDMT_isOverlapped(buffer, prefix); } static void ZSTDMT_waitForLdmComplete(ZSTDMT_CCtx* mtctx, buffer_t buffer) { if (mtctx->params.ldmParams.enableLdm) { ZSTD_pthread_mutex_t* mutex = &mtctx->serial.ldmWindowMutex; DEBUGLOG(5, "ZSTDMT_waitForLdmComplete"); DEBUGLOG(5, "source [0x%zx, 0x%zx)", (size_t)buffer.start, (size_t)buffer.start + buffer.capacity); ZSTD_PTHREAD_MUTEX_LOCK(mutex); while (ZSTDMT_doesOverlapWindow(buffer, mtctx->serial.ldmWindow)) { DEBUGLOG(5, "Waiting for LDM to finish..."); ZSTD_pthread_cond_wait(&mtctx->serial.ldmWindowCond, mutex); } DEBUGLOG(6, "Done waiting for LDM to finish"); ZSTD_pthread_mutex_unlock(mutex); } } /** * Attempts to set the inBuff to the next section to fill. * If any part of the new section is still in use we give up. * Returns non-zero if the buffer is filled. */ static int ZSTDMT_tryGetInputRange(ZSTDMT_CCtx* mtctx) { range_t const inUse = ZSTDMT_getInputDataInUse(mtctx); size_t const spaceLeft = mtctx->roundBuff.capacity - mtctx->roundBuff.pos; size_t const target = mtctx->targetSectionSize; buffer_t buffer; DEBUGLOG(5, "ZSTDMT_tryGetInputRange"); assert(mtctx->inBuff.buffer.start == NULL); assert(mtctx->roundBuff.capacity >= target); if (spaceLeft < target) { /* ZSTD_invalidateRepCodes() doesn't work for extDict variants. * Simply copy the prefix to the beginning in that case. */ BYTE* const start = (BYTE*)mtctx->roundBuff.buffer; size_t const prefixSize = mtctx->inBuff.prefix.size; buffer.start = start; buffer.capacity = prefixSize; if (ZSTDMT_isOverlapped(buffer, inUse)) { DEBUGLOG(5, "Waiting for buffer..."); return 0; } ZSTDMT_waitForLdmComplete(mtctx, buffer); ZSTD_memmove(start, mtctx->inBuff.prefix.start, prefixSize); mtctx->inBuff.prefix.start = start; mtctx->roundBuff.pos = prefixSize; } buffer.start = mtctx->roundBuff.buffer + mtctx->roundBuff.pos; buffer.capacity = target; if (ZSTDMT_isOverlapped(buffer, inUse)) { DEBUGLOG(5, "Waiting for buffer..."); return 0; } assert(!ZSTDMT_isOverlapped(buffer, mtctx->inBuff.prefix)); ZSTDMT_waitForLdmComplete(mtctx, buffer); DEBUGLOG(5, "Using prefix range [%zx, %zx)", (size_t)mtctx->inBuff.prefix.start, (size_t)mtctx->inBuff.prefix.start + mtctx->inBuff.prefix.size); DEBUGLOG(5, "Using source range [%zx, %zx)", (size_t)buffer.start, (size_t)buffer.start + buffer.capacity); mtctx->inBuff.buffer = buffer; mtctx->inBuff.filled = 0; assert(mtctx->roundBuff.pos + buffer.capacity <= mtctx->roundBuff.capacity); return 1; } typedef struct { size_t toLoad; /* The number of bytes to load from the input. */ int flush; /* Boolean declaring if we must flush because we found a synchronization point. */ } syncPoint_t; /** * Searches through the input for a synchronization point. If one is found, we * will instruct the caller to flush, and return the number of bytes to load. * Otherwise, we will load as many bytes as possible and instruct the caller * to continue as normal. */ static syncPoint_t findSynchronizationPoint(ZSTDMT_CCtx const* mtctx, ZSTD_inBuffer const input) { BYTE const* const istart = (BYTE const*)input.src + input.pos; U64 const primePower = mtctx->rsync.primePower; U64 const hitMask = mtctx->rsync.hitMask; syncPoint_t syncPoint; U64 hash; BYTE const* prev; size_t pos; syncPoint.toLoad = MIN(input.size - input.pos, mtctx->targetSectionSize - mtctx->inBuff.filled); syncPoint.flush = 0; if (!mtctx->params.rsyncable) /* Rsync is disabled. */ return syncPoint; if (mtctx->inBuff.filled + syncPoint.toLoad < RSYNC_LENGTH) /* Not enough to compute the hash. * We will miss any synchronization points in this RSYNC_LENGTH byte * window. However, since it depends only in the internal buffers, if the * state is already synchronized, we will remain synchronized. * Additionally, the probability that we miss a synchronization point is * low: RSYNC_LENGTH / targetSectionSize. */ return syncPoint; /* Initialize the loop variables. */ if (mtctx->inBuff.filled >= RSYNC_LENGTH) { /* We have enough bytes buffered to initialize the hash. * Start scanning at the beginning of the input. */ pos = 0; prev = (BYTE const*)mtctx->inBuff.buffer.start + mtctx->inBuff.filled - RSYNC_LENGTH; hash = ZSTD_rollingHash_compute(prev, RSYNC_LENGTH); if ((hash & hitMask) == hitMask) { /* We're already at a sync point so don't load any more until * we're able to flush this sync point. * This likely happened because the job table was full so we * couldn't add our job. */ syncPoint.toLoad = 0; syncPoint.flush = 1; return syncPoint; } } else { /* We don't have enough bytes buffered to initialize the hash, but * we know we have at least RSYNC_LENGTH bytes total. * Start scanning after the first RSYNC_LENGTH bytes less the bytes * already buffered. */ pos = RSYNC_LENGTH - mtctx->inBuff.filled; prev = (BYTE const*)mtctx->inBuff.buffer.start - pos; hash = ZSTD_rollingHash_compute(mtctx->inBuff.buffer.start, mtctx->inBuff.filled); hash = ZSTD_rollingHash_append(hash, istart, pos); } /* Starting with the hash of the previous RSYNC_LENGTH bytes, roll * through the input. If we hit a synchronization point, then cut the * job off, and tell the compressor to flush the job. Otherwise, load * all the bytes and continue as normal. * If we go too long without a synchronization point (targetSectionSize) * then a block will be emitted anyways, but this is okay, since if we * are already synchronized we will remain synchronized. */ for (; pos < syncPoint.toLoad; ++pos) { BYTE const toRemove = pos < RSYNC_LENGTH ? prev[pos] : istart[pos - RSYNC_LENGTH]; /* if (pos >= RSYNC_LENGTH) assert(ZSTD_rollingHash_compute(istart + pos - RSYNC_LENGTH, RSYNC_LENGTH) == hash); */ hash = ZSTD_rollingHash_rotate(hash, toRemove, istart[pos], primePower); if ((hash & hitMask) == hitMask) { syncPoint.toLoad = pos + 1; syncPoint.flush = 1; break; } } return syncPoint; } size_t ZSTDMT_nextInputSizeHint(const ZSTDMT_CCtx* mtctx) { size_t hintInSize = mtctx->targetSectionSize - mtctx->inBuff.filled; if (hintInSize==0) hintInSize = mtctx->targetSectionSize; return hintInSize; } /** ZSTDMT_compressStream_generic() : * internal use only - exposed to be invoked from zstd_compress.c * assumption : output and input are valid (pos <= size) * @return : minimum amount of data remaining to flush, 0 if none */ size_t ZSTDMT_compressStream_generic(ZSTDMT_CCtx* mtctx, ZSTD_outBuffer* output, ZSTD_inBuffer* input, ZSTD_EndDirective endOp) { unsigned forwardInputProgress = 0; DEBUGLOG(5, "ZSTDMT_compressStream_generic (endOp=%u, srcSize=%u)", (U32)endOp, (U32)(input->size - input->pos)); assert(output->pos <= output->size); assert(input->pos <= input->size); if ((mtctx->frameEnded) && (endOp==ZSTD_e_continue)) { /* current frame being ended. Only flush/end are allowed */ return ERROR(stage_wrong); } /* fill input buffer */ if ( (!mtctx->jobReady) && (input->size > input->pos) ) { /* support NULL input */ if (mtctx->inBuff.buffer.start == NULL) { assert(mtctx->inBuff.filled == 0); /* Can't fill an empty buffer */ if (!ZSTDMT_tryGetInputRange(mtctx)) { /* It is only possible for this operation to fail if there are * still compression jobs ongoing. */ DEBUGLOG(5, "ZSTDMT_tryGetInputRange failed"); assert(mtctx->doneJobID != mtctx->nextJobID); } else DEBUGLOG(5, "ZSTDMT_tryGetInputRange completed successfully : mtctx->inBuff.buffer.start = %p", mtctx->inBuff.buffer.start); } if (mtctx->inBuff.buffer.start != NULL) { syncPoint_t const syncPoint = findSynchronizationPoint(mtctx, *input); if (syncPoint.flush && endOp == ZSTD_e_continue) { endOp = ZSTD_e_flush; } assert(mtctx->inBuff.buffer.capacity >= mtctx->targetSectionSize); DEBUGLOG(5, "ZSTDMT_compressStream_generic: adding %u bytes on top of %u to buffer of size %u", (U32)syncPoint.toLoad, (U32)mtctx->inBuff.filled, (U32)mtctx->targetSectionSize); ZSTD_memcpy((char*)mtctx->inBuff.buffer.start + mtctx->inBuff.filled, (const char*)input->src + input->pos, syncPoint.toLoad); input->pos += syncPoint.toLoad; mtctx->inBuff.filled += syncPoint.toLoad; forwardInputProgress = syncPoint.toLoad>0; } } if ((input->pos < input->size) && (endOp == ZSTD_e_end)) { /* Can't end yet because the input is not fully consumed. * We are in one of these cases: * - mtctx->inBuff is NULL & empty: we couldn't get an input buffer so don't create a new job. * - We filled the input buffer: flush this job but don't end the frame. * - We hit a synchronization point: flush this job but don't end the frame. */ assert(mtctx->inBuff.filled == 0 || mtctx->inBuff.filled == mtctx->targetSectionSize || mtctx->params.rsyncable); endOp = ZSTD_e_flush; } if ( (mtctx->jobReady) || (mtctx->inBuff.filled >= mtctx->targetSectionSize) /* filled enough : let's compress */ || ((endOp != ZSTD_e_continue) && (mtctx->inBuff.filled > 0)) /* something to flush : let's go */ || ((endOp == ZSTD_e_end) && (!mtctx->frameEnded)) ) { /* must finish the frame with a zero-size block */ size_t const jobSize = mtctx->inBuff.filled; assert(mtctx->inBuff.filled <= mtctx->targetSectionSize); FORWARD_IF_ERROR( ZSTDMT_createCompressionJob(mtctx, jobSize, endOp) , ""); } /* check for potential compressed data ready to be flushed */ { size_t const remainingToFlush = ZSTDMT_flushProduced(mtctx, output, !forwardInputProgress, endOp); /* block if there was no forward input progress */ if (input->pos < input->size) return MAX(remainingToFlush, 1); /* input not consumed : do not end flush yet */ DEBUGLOG(5, "end of ZSTDMT_compressStream_generic: remainingToFlush = %u", (U32)remainingToFlush); return remainingToFlush; } } /**** ended inlining compress/zstdmt_compress.c ****/ #endif /**** start inlining decompress/huf_decompress.c ****/ /* ****************************************************************** * huff0 huffman decoder, * part of Finite State Entropy library * Copyright (c) 2013-2021, Yann Collet, Facebook, Inc. * * You can contact the author at : * - FSE+HUF source repository : https://github.com/Cyan4973/FiniteStateEntropy * * This source code is licensed under both the BSD-style license (found in the * LICENSE file in the root directory of this source tree) and the GPLv2 (found * in the COPYING file in the root directory of this source tree). * You may select, at your option, one of the above-listed licenses. ****************************************************************** */ /* ************************************************************** * Dependencies ****************************************************************/ /**** skipping file: ../common/zstd_deps.h ****/ /**** skipping file: ../common/compiler.h ****/ /**** skipping file: ../common/bitstream.h ****/ /**** skipping file: ../common/fse.h ****/ #define HUF_STATIC_LINKING_ONLY /**** skipping file: ../common/huf.h ****/ /**** skipping file: ../common/error_private.h ****/ /* ************************************************************** * Macros ****************************************************************/ /* These two optional macros force the use one way or another of the two * Huffman decompression implementations. You can't force in both directions * at the same time. */ #if defined(HUF_FORCE_DECOMPRESS_X1) && \ defined(HUF_FORCE_DECOMPRESS_X2) #error "Cannot force the use of the X1 and X2 decoders at the same time!" #endif /* ************************************************************** * Error Management ****************************************************************/ #define HUF_isError ERR_isError /* ************************************************************** * Byte alignment for workSpace management ****************************************************************/ #define HUF_ALIGN(x, a) HUF_ALIGN_MASK((x), (a) - 1) #define HUF_ALIGN_MASK(x, mask) (((x) + (mask)) & ~(mask)) /* ************************************************************** * BMI2 Variant Wrappers ****************************************************************/ #if DYNAMIC_BMI2 #define HUF_DGEN(fn) \ \ static size_t fn##_default( \ void* dst, size_t dstSize, \ const void* cSrc, size_t cSrcSize, \ const HUF_DTable* DTable) \ { \ return fn##_body(dst, dstSize, cSrc, cSrcSize, DTable); \ } \ \ static TARGET_ATTRIBUTE("bmi2") size_t fn##_bmi2( \ void* dst, size_t dstSize, \ const void* cSrc, size_t cSrcSize, \ const HUF_DTable* DTable) \ { \ return fn##_body(dst, dstSize, cSrc, cSrcSize, DTable); \ } \ \ static size_t fn(void* dst, size_t dstSize, void const* cSrc, \ size_t cSrcSize, HUF_DTable const* DTable, int bmi2) \ { \ if (bmi2) { \ return fn##_bmi2(dst, dstSize, cSrc, cSrcSize, DTable); \ } \ return fn##_default(dst, dstSize, cSrc, cSrcSize, DTable); \ } #else #define HUF_DGEN(fn) \ static size_t fn(void* dst, size_t dstSize, void const* cSrc, \ size_t cSrcSize, HUF_DTable const* DTable, int bmi2) \ { \ (void)bmi2; \ return fn##_body(dst, dstSize, cSrc, cSrcSize, DTable); \ } #endif /*-***************************/ /* generic DTableDesc */ /*-***************************/ typedef struct { BYTE maxTableLog; BYTE tableType; BYTE tableLog; BYTE reserved; } DTableDesc; static DTableDesc HUF_getDTableDesc(const HUF_DTable* table) { DTableDesc dtd; ZSTD_memcpy(&dtd, table, sizeof(dtd)); return dtd; } #ifndef HUF_FORCE_DECOMPRESS_X2 /*-***************************/ /* single-symbol decoding */ /*-***************************/ typedef struct { BYTE byte; BYTE nbBits; } HUF_DEltX1; /* single-symbol decoding */ /** * Packs 4 HUF_DEltX1 structs into a U64. This is used to lay down 4 entries at * a time. */ static U64 HUF_DEltX1_set4(BYTE symbol, BYTE nbBits) { U64 D4; if (MEM_isLittleEndian()) { D4 = symbol + (nbBits << 8); } else { D4 = (symbol << 8) + nbBits; } D4 *= 0x0001000100010001ULL; return D4; } typedef struct { U32 rankVal[HUF_TABLELOG_ABSOLUTEMAX + 1]; U32 rankStart[HUF_TABLELOG_ABSOLUTEMAX + 1]; U32 statsWksp[HUF_READ_STATS_WORKSPACE_SIZE_U32]; BYTE symbols[HUF_SYMBOLVALUE_MAX + 1]; BYTE huffWeight[HUF_SYMBOLVALUE_MAX + 1]; } HUF_ReadDTableX1_Workspace; size_t HUF_readDTableX1_wksp(HUF_DTable* DTable, const void* src, size_t srcSize, void* workSpace, size_t wkspSize) { return HUF_readDTableX1_wksp_bmi2(DTable, src, srcSize, workSpace, wkspSize, /* bmi2 */ 0); } size_t HUF_readDTableX1_wksp_bmi2(HUF_DTable* DTable, const void* src, size_t srcSize, void* workSpace, size_t wkspSize, int bmi2) { U32 tableLog = 0; U32 nbSymbols = 0; size_t iSize; void* const dtPtr = DTable + 1; HUF_DEltX1* const dt = (HUF_DEltX1*)dtPtr; HUF_ReadDTableX1_Workspace* wksp = (HUF_ReadDTableX1_Workspace*)workSpace; DEBUG_STATIC_ASSERT(HUF_DECOMPRESS_WORKSPACE_SIZE >= sizeof(*wksp)); if (sizeof(*wksp) > wkspSize) return ERROR(tableLog_tooLarge); DEBUG_STATIC_ASSERT(sizeof(DTableDesc) == sizeof(HUF_DTable)); /* ZSTD_memset(huffWeight, 0, sizeof(huffWeight)); */ /* is not necessary, even though some analyzer complain ... */ iSize = HUF_readStats_wksp(wksp->huffWeight, HUF_SYMBOLVALUE_MAX + 1, wksp->rankVal, &nbSymbols, &tableLog, src, srcSize, wksp->statsWksp, sizeof(wksp->statsWksp), bmi2); if (HUF_isError(iSize)) return iSize; /* Table header */ { DTableDesc dtd = HUF_getDTableDesc(DTable); if (tableLog > (U32)(dtd.maxTableLog+1)) return ERROR(tableLog_tooLarge); /* DTable too small, Huffman tree cannot fit in */ dtd.tableType = 0; dtd.tableLog = (BYTE)tableLog; ZSTD_memcpy(DTable, &dtd, sizeof(dtd)); } /* Compute symbols and rankStart given rankVal: * * rankVal already contains the number of values of each weight. * * symbols contains the symbols ordered by weight. First are the rankVal[0] * weight 0 symbols, followed by the rankVal[1] weight 1 symbols, and so on. * symbols[0] is filled (but unused) to avoid a branch. * * rankStart contains the offset where each rank belongs in the DTable. * rankStart[0] is not filled because there are no entries in the table for * weight 0. */ { int n; int nextRankStart = 0; int const unroll = 4; int const nLimit = (int)nbSymbols - unroll + 1; for (n=0; n<(int)tableLog+1; n++) { U32 const curr = nextRankStart; nextRankStart += wksp->rankVal[n]; wksp->rankStart[n] = curr; } for (n=0; n < nLimit; n += unroll) { int u; for (u=0; u < unroll; ++u) { size_t const w = wksp->huffWeight[n+u]; wksp->symbols[wksp->rankStart[w]++] = (BYTE)(n+u); } } for (; n < (int)nbSymbols; ++n) { size_t const w = wksp->huffWeight[n]; wksp->symbols[wksp->rankStart[w]++] = (BYTE)n; } } /* fill DTable * We fill all entries of each weight in order. * That way length is a constant for each iteration of the outter loop. * We can switch based on the length to a different inner loop which is * optimized for that particular case. */ { U32 w; int symbol=wksp->rankVal[0]; int rankStart=0; for (w=1; wrankVal[w]; int const length = (1 << w) >> 1; int uStart = rankStart; BYTE const nbBits = (BYTE)(tableLog + 1 - w); int s; int u; switch (length) { case 1: for (s=0; ssymbols[symbol + s]; D.nbBits = nbBits; dt[uStart] = D; uStart += 1; } break; case 2: for (s=0; ssymbols[symbol + s]; D.nbBits = nbBits; dt[uStart+0] = D; dt[uStart+1] = D; uStart += 2; } break; case 4: for (s=0; ssymbols[symbol + s], nbBits); MEM_write64(dt + uStart, D4); uStart += 4; } break; case 8: for (s=0; ssymbols[symbol + s], nbBits); MEM_write64(dt + uStart, D4); MEM_write64(dt + uStart + 4, D4); uStart += 8; } break; default: for (s=0; ssymbols[symbol + s], nbBits); for (u=0; u < length; u += 16) { MEM_write64(dt + uStart + u + 0, D4); MEM_write64(dt + uStart + u + 4, D4); MEM_write64(dt + uStart + u + 8, D4); MEM_write64(dt + uStart + u + 12, D4); } assert(u == length); uStart += length; } break; } symbol += symbolCount; rankStart += symbolCount * length; } } return iSize; } FORCE_INLINE_TEMPLATE BYTE HUF_decodeSymbolX1(BIT_DStream_t* Dstream, const HUF_DEltX1* dt, const U32 dtLog) { size_t const val = BIT_lookBitsFast(Dstream, dtLog); /* note : dtLog >= 1 */ BYTE const c = dt[val].byte; BIT_skipBits(Dstream, dt[val].nbBits); return c; } #define HUF_DECODE_SYMBOLX1_0(ptr, DStreamPtr) \ *ptr++ = HUF_decodeSymbolX1(DStreamPtr, dt, dtLog) #define HUF_DECODE_SYMBOLX1_1(ptr, DStreamPtr) \ if (MEM_64bits() || (HUF_TABLELOG_MAX<=12)) \ HUF_DECODE_SYMBOLX1_0(ptr, DStreamPtr) #define HUF_DECODE_SYMBOLX1_2(ptr, DStreamPtr) \ if (MEM_64bits()) \ HUF_DECODE_SYMBOLX1_0(ptr, DStreamPtr) HINT_INLINE size_t HUF_decodeStreamX1(BYTE* p, BIT_DStream_t* const bitDPtr, BYTE* const pEnd, const HUF_DEltX1* const dt, const U32 dtLog) { BYTE* const pStart = p; /* up to 4 symbols at a time */ while ((BIT_reloadDStream(bitDPtr) == BIT_DStream_unfinished) & (p < pEnd-3)) { HUF_DECODE_SYMBOLX1_2(p, bitDPtr); HUF_DECODE_SYMBOLX1_1(p, bitDPtr); HUF_DECODE_SYMBOLX1_2(p, bitDPtr); HUF_DECODE_SYMBOLX1_0(p, bitDPtr); } /* [0-3] symbols remaining */ if (MEM_32bits()) while ((BIT_reloadDStream(bitDPtr) == BIT_DStream_unfinished) & (p < pEnd)) HUF_DECODE_SYMBOLX1_0(p, bitDPtr); /* no more data to retrieve from bitstream, no need to reload */ while (p < pEnd) HUF_DECODE_SYMBOLX1_0(p, bitDPtr); return pEnd-pStart; } FORCE_INLINE_TEMPLATE size_t HUF_decompress1X1_usingDTable_internal_body( void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, const HUF_DTable* DTable) { BYTE* op = (BYTE*)dst; BYTE* const oend = op + dstSize; const void* dtPtr = DTable + 1; const HUF_DEltX1* const dt = (const HUF_DEltX1*)dtPtr; BIT_DStream_t bitD; DTableDesc const dtd = HUF_getDTableDesc(DTable); U32 const dtLog = dtd.tableLog; CHECK_F( BIT_initDStream(&bitD, cSrc, cSrcSize) ); HUF_decodeStreamX1(op, &bitD, oend, dt, dtLog); if (!BIT_endOfDStream(&bitD)) return ERROR(corruption_detected); return dstSize; } FORCE_INLINE_TEMPLATE size_t HUF_decompress4X1_usingDTable_internal_body( void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, const HUF_DTable* DTable) { /* Check */ if (cSrcSize < 10) return ERROR(corruption_detected); /* strict minimum : jump table + 1 byte per stream */ { const BYTE* const istart = (const BYTE*) cSrc; BYTE* const ostart = (BYTE*) dst; BYTE* const oend = ostart + dstSize; BYTE* const olimit = oend - 3; const void* const dtPtr = DTable + 1; const HUF_DEltX1* const dt = (const HUF_DEltX1*)dtPtr; /* Init */ BIT_DStream_t bitD1; BIT_DStream_t bitD2; BIT_DStream_t bitD3; BIT_DStream_t bitD4; size_t const length1 = MEM_readLE16(istart); size_t const length2 = MEM_readLE16(istart+2); size_t const length3 = MEM_readLE16(istart+4); size_t const length4 = cSrcSize - (length1 + length2 + length3 + 6); const BYTE* const istart1 = istart + 6; /* jumpTable */ const BYTE* const istart2 = istart1 + length1; const BYTE* const istart3 = istart2 + length2; const BYTE* const istart4 = istart3 + length3; const size_t segmentSize = (dstSize+3) / 4; BYTE* const opStart2 = ostart + segmentSize; BYTE* const opStart3 = opStart2 + segmentSize; BYTE* const opStart4 = opStart3 + segmentSize; BYTE* op1 = ostart; BYTE* op2 = opStart2; BYTE* op3 = opStart3; BYTE* op4 = opStart4; DTableDesc const dtd = HUF_getDTableDesc(DTable); U32 const dtLog = dtd.tableLog; U32 endSignal = 1; if (length4 > cSrcSize) return ERROR(corruption_detected); /* overflow */ CHECK_F( BIT_initDStream(&bitD1, istart1, length1) ); CHECK_F( BIT_initDStream(&bitD2, istart2, length2) ); CHECK_F( BIT_initDStream(&bitD3, istart3, length3) ); CHECK_F( BIT_initDStream(&bitD4, istart4, length4) ); /* up to 16 symbols per loop (4 symbols per stream) in 64-bit mode */ for ( ; (endSignal) & (op4 < olimit) ; ) { HUF_DECODE_SYMBOLX1_2(op1, &bitD1); HUF_DECODE_SYMBOLX1_2(op2, &bitD2); HUF_DECODE_SYMBOLX1_2(op3, &bitD3); HUF_DECODE_SYMBOLX1_2(op4, &bitD4); HUF_DECODE_SYMBOLX1_1(op1, &bitD1); HUF_DECODE_SYMBOLX1_1(op2, &bitD2); HUF_DECODE_SYMBOLX1_1(op3, &bitD3); HUF_DECODE_SYMBOLX1_1(op4, &bitD4); HUF_DECODE_SYMBOLX1_2(op1, &bitD1); HUF_DECODE_SYMBOLX1_2(op2, &bitD2); HUF_DECODE_SYMBOLX1_2(op3, &bitD3); HUF_DECODE_SYMBOLX1_2(op4, &bitD4); HUF_DECODE_SYMBOLX1_0(op1, &bitD1); HUF_DECODE_SYMBOLX1_0(op2, &bitD2); HUF_DECODE_SYMBOLX1_0(op3, &bitD3); HUF_DECODE_SYMBOLX1_0(op4, &bitD4); endSignal &= BIT_reloadDStreamFast(&bitD1) == BIT_DStream_unfinished; endSignal &= BIT_reloadDStreamFast(&bitD2) == BIT_DStream_unfinished; endSignal &= BIT_reloadDStreamFast(&bitD3) == BIT_DStream_unfinished; endSignal &= BIT_reloadDStreamFast(&bitD4) == BIT_DStream_unfinished; } /* check corruption */ /* note : should not be necessary : op# advance in lock step, and we control op4. * but curiously, binary generated by gcc 7.2 & 7.3 with -mbmi2 runs faster when >=1 test is present */ if (op1 > opStart2) return ERROR(corruption_detected); if (op2 > opStart3) return ERROR(corruption_detected); if (op3 > opStart4) return ERROR(corruption_detected); /* note : op4 supposed already verified within main loop */ /* finish bitStreams one by one */ HUF_decodeStreamX1(op1, &bitD1, opStart2, dt, dtLog); HUF_decodeStreamX1(op2, &bitD2, opStart3, dt, dtLog); HUF_decodeStreamX1(op3, &bitD3, opStart4, dt, dtLog); HUF_decodeStreamX1(op4, &bitD4, oend, dt, dtLog); /* check */ { U32 const endCheck = BIT_endOfDStream(&bitD1) & BIT_endOfDStream(&bitD2) & BIT_endOfDStream(&bitD3) & BIT_endOfDStream(&bitD4); if (!endCheck) return ERROR(corruption_detected); } /* decoded size */ return dstSize; } } typedef size_t (*HUF_decompress_usingDTable_t)(void *dst, size_t dstSize, const void *cSrc, size_t cSrcSize, const HUF_DTable *DTable); HUF_DGEN(HUF_decompress1X1_usingDTable_internal) HUF_DGEN(HUF_decompress4X1_usingDTable_internal) size_t HUF_decompress1X1_usingDTable( void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, const HUF_DTable* DTable) { DTableDesc dtd = HUF_getDTableDesc(DTable); if (dtd.tableType != 0) return ERROR(GENERIC); return HUF_decompress1X1_usingDTable_internal(dst, dstSize, cSrc, cSrcSize, DTable, /* bmi2 */ 0); } size_t HUF_decompress1X1_DCtx_wksp(HUF_DTable* DCtx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, void* workSpace, size_t wkspSize) { const BYTE* ip = (const BYTE*) cSrc; size_t const hSize = HUF_readDTableX1_wksp(DCtx, cSrc, cSrcSize, workSpace, wkspSize); if (HUF_isError(hSize)) return hSize; if (hSize >= cSrcSize) return ERROR(srcSize_wrong); ip += hSize; cSrcSize -= hSize; return HUF_decompress1X1_usingDTable_internal(dst, dstSize, ip, cSrcSize, DCtx, /* bmi2 */ 0); } size_t HUF_decompress4X1_usingDTable( void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, const HUF_DTable* DTable) { DTableDesc dtd = HUF_getDTableDesc(DTable); if (dtd.tableType != 0) return ERROR(GENERIC); return HUF_decompress4X1_usingDTable_internal(dst, dstSize, cSrc, cSrcSize, DTable, /* bmi2 */ 0); } static size_t HUF_decompress4X1_DCtx_wksp_bmi2(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, void* workSpace, size_t wkspSize, int bmi2) { const BYTE* ip = (const BYTE*) cSrc; size_t const hSize = HUF_readDTableX1_wksp_bmi2(dctx, cSrc, cSrcSize, workSpace, wkspSize, bmi2); if (HUF_isError(hSize)) return hSize; if (hSize >= cSrcSize) return ERROR(srcSize_wrong); ip += hSize; cSrcSize -= hSize; return HUF_decompress4X1_usingDTable_internal(dst, dstSize, ip, cSrcSize, dctx, bmi2); } size_t HUF_decompress4X1_DCtx_wksp(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, void* workSpace, size_t wkspSize) { return HUF_decompress4X1_DCtx_wksp_bmi2(dctx, dst, dstSize, cSrc, cSrcSize, workSpace, wkspSize, 0); } #endif /* HUF_FORCE_DECOMPRESS_X2 */ #ifndef HUF_FORCE_DECOMPRESS_X1 /* *************************/ /* double-symbols decoding */ /* *************************/ typedef struct { U16 sequence; BYTE nbBits; BYTE length; } HUF_DEltX2; /* double-symbols decoding */ typedef struct { BYTE symbol; BYTE weight; } sortedSymbol_t; typedef U32 rankValCol_t[HUF_TABLELOG_MAX + 1]; typedef rankValCol_t rankVal_t[HUF_TABLELOG_MAX]; /* HUF_fillDTableX2Level2() : * `rankValOrigin` must be a table of at least (HUF_TABLELOG_MAX + 1) U32 */ static void HUF_fillDTableX2Level2(HUF_DEltX2* DTable, U32 sizeLog, const U32 consumed, const U32* rankValOrigin, const int minWeight, const sortedSymbol_t* sortedSymbols, const U32 sortedListSize, U32 nbBitsBaseline, U16 baseSeq) { HUF_DEltX2 DElt; U32 rankVal[HUF_TABLELOG_MAX + 1]; /* get pre-calculated rankVal */ ZSTD_memcpy(rankVal, rankValOrigin, sizeof(rankVal)); /* fill skipped values */ if (minWeight>1) { U32 i, skipSize = rankVal[minWeight]; MEM_writeLE16(&(DElt.sequence), baseSeq); DElt.nbBits = (BYTE)(consumed); DElt.length = 1; for (i = 0; i < skipSize; i++) DTable[i] = DElt; } /* fill DTable */ { U32 s; for (s=0; s= 1 */ rankVal[weight] += length; } } } static void HUF_fillDTableX2(HUF_DEltX2* DTable, const U32 targetLog, const sortedSymbol_t* sortedList, const U32 sortedListSize, const U32* rankStart, rankVal_t rankValOrigin, const U32 maxWeight, const U32 nbBitsBaseline) { U32 rankVal[HUF_TABLELOG_MAX + 1]; const int scaleLog = nbBitsBaseline - targetLog; /* note : targetLog >= srcLog, hence scaleLog <= 1 */ const U32 minBits = nbBitsBaseline - maxWeight; U32 s; ZSTD_memcpy(rankVal, rankValOrigin, sizeof(rankVal)); /* fill DTable */ for (s=0; s= minBits) { /* enough room for a second symbol */ U32 sortedRank; int minWeight = nbBits + scaleLog; if (minWeight < 1) minWeight = 1; sortedRank = rankStart[minWeight]; HUF_fillDTableX2Level2(DTable+start, targetLog-nbBits, nbBits, rankValOrigin[nbBits], minWeight, sortedList+sortedRank, sortedListSize-sortedRank, nbBitsBaseline, symbol); } else { HUF_DEltX2 DElt; MEM_writeLE16(&(DElt.sequence), symbol); DElt.nbBits = (BYTE)(nbBits); DElt.length = 1; { U32 const end = start + length; U32 u; for (u = start; u < end; u++) DTable[u] = DElt; } } rankVal[weight] += length; } } size_t HUF_readDTableX2_wksp(HUF_DTable* DTable, const void* src, size_t srcSize, void* workSpace, size_t wkspSize) { U32 tableLog, maxW, sizeOfSort, nbSymbols; DTableDesc dtd = HUF_getDTableDesc(DTable); U32 const maxTableLog = dtd.maxTableLog; size_t iSize; void* dtPtr = DTable+1; /* force compiler to avoid strict-aliasing */ HUF_DEltX2* const dt = (HUF_DEltX2*)dtPtr; U32 *rankStart; rankValCol_t* rankVal; U32* rankStats; U32* rankStart0; sortedSymbol_t* sortedSymbol; BYTE* weightList; size_t spaceUsed32 = 0; rankVal = (rankValCol_t *)((U32 *)workSpace + spaceUsed32); spaceUsed32 += (sizeof(rankValCol_t) * HUF_TABLELOG_MAX) >> 2; rankStats = (U32 *)workSpace + spaceUsed32; spaceUsed32 += HUF_TABLELOG_MAX + 1; rankStart0 = (U32 *)workSpace + spaceUsed32; spaceUsed32 += HUF_TABLELOG_MAX + 2; sortedSymbol = (sortedSymbol_t *)workSpace + (spaceUsed32 * sizeof(U32)) / sizeof(sortedSymbol_t); spaceUsed32 += HUF_ALIGN(sizeof(sortedSymbol_t) * (HUF_SYMBOLVALUE_MAX + 1), sizeof(U32)) >> 2; weightList = (BYTE *)((U32 *)workSpace + spaceUsed32); spaceUsed32 += HUF_ALIGN(HUF_SYMBOLVALUE_MAX + 1, sizeof(U32)) >> 2; if ((spaceUsed32 << 2) > wkspSize) return ERROR(tableLog_tooLarge); rankStart = rankStart0 + 1; ZSTD_memset(rankStats, 0, sizeof(U32) * (2 * HUF_TABLELOG_MAX + 2 + 1)); DEBUG_STATIC_ASSERT(sizeof(HUF_DEltX2) == sizeof(HUF_DTable)); /* if compiler fails here, assertion is wrong */ if (maxTableLog > HUF_TABLELOG_MAX) return ERROR(tableLog_tooLarge); /* ZSTD_memset(weightList, 0, sizeof(weightList)); */ /* is not necessary, even though some analyzer complain ... */ iSize = HUF_readStats(weightList, HUF_SYMBOLVALUE_MAX + 1, rankStats, &nbSymbols, &tableLog, src, srcSize); if (HUF_isError(iSize)) return iSize; /* check result */ if (tableLog > maxTableLog) return ERROR(tableLog_tooLarge); /* DTable can't fit code depth */ /* find maxWeight */ for (maxW = tableLog; rankStats[maxW]==0; maxW--) {} /* necessarily finds a solution before 0 */ /* Get start index of each weight */ { U32 w, nextRankStart = 0; for (w=1; w> consumed; } } } } HUF_fillDTableX2(dt, maxTableLog, sortedSymbol, sizeOfSort, rankStart0, rankVal, maxW, tableLog+1); dtd.tableLog = (BYTE)maxTableLog; dtd.tableType = 1; ZSTD_memcpy(DTable, &dtd, sizeof(dtd)); return iSize; } FORCE_INLINE_TEMPLATE U32 HUF_decodeSymbolX2(void* op, BIT_DStream_t* DStream, const HUF_DEltX2* dt, const U32 dtLog) { size_t const val = BIT_lookBitsFast(DStream, dtLog); /* note : dtLog >= 1 */ ZSTD_memcpy(op, dt+val, 2); BIT_skipBits(DStream, dt[val].nbBits); return dt[val].length; } FORCE_INLINE_TEMPLATE U32 HUF_decodeLastSymbolX2(void* op, BIT_DStream_t* DStream, const HUF_DEltX2* dt, const U32 dtLog) { size_t const val = BIT_lookBitsFast(DStream, dtLog); /* note : dtLog >= 1 */ ZSTD_memcpy(op, dt+val, 1); if (dt[val].length==1) BIT_skipBits(DStream, dt[val].nbBits); else { if (DStream->bitsConsumed < (sizeof(DStream->bitContainer)*8)) { BIT_skipBits(DStream, dt[val].nbBits); if (DStream->bitsConsumed > (sizeof(DStream->bitContainer)*8)) /* ugly hack; works only because it's the last symbol. Note : can't easily extract nbBits from just this symbol */ DStream->bitsConsumed = (sizeof(DStream->bitContainer)*8); } } return 1; } #define HUF_DECODE_SYMBOLX2_0(ptr, DStreamPtr) \ ptr += HUF_decodeSymbolX2(ptr, DStreamPtr, dt, dtLog) #define HUF_DECODE_SYMBOLX2_1(ptr, DStreamPtr) \ if (MEM_64bits() || (HUF_TABLELOG_MAX<=12)) \ ptr += HUF_decodeSymbolX2(ptr, DStreamPtr, dt, dtLog) #define HUF_DECODE_SYMBOLX2_2(ptr, DStreamPtr) \ if (MEM_64bits()) \ ptr += HUF_decodeSymbolX2(ptr, DStreamPtr, dt, dtLog) HINT_INLINE size_t HUF_decodeStreamX2(BYTE* p, BIT_DStream_t* bitDPtr, BYTE* const pEnd, const HUF_DEltX2* const dt, const U32 dtLog) { BYTE* const pStart = p; /* up to 8 symbols at a time */ while ((BIT_reloadDStream(bitDPtr) == BIT_DStream_unfinished) & (p < pEnd-(sizeof(bitDPtr->bitContainer)-1))) { HUF_DECODE_SYMBOLX2_2(p, bitDPtr); HUF_DECODE_SYMBOLX2_1(p, bitDPtr); HUF_DECODE_SYMBOLX2_2(p, bitDPtr); HUF_DECODE_SYMBOLX2_0(p, bitDPtr); } /* closer to end : up to 2 symbols at a time */ while ((BIT_reloadDStream(bitDPtr) == BIT_DStream_unfinished) & (p <= pEnd-2)) HUF_DECODE_SYMBOLX2_0(p, bitDPtr); while (p <= pEnd-2) HUF_DECODE_SYMBOLX2_0(p, bitDPtr); /* no need to reload : reached the end of DStream */ if (p < pEnd) p += HUF_decodeLastSymbolX2(p, bitDPtr, dt, dtLog); return p-pStart; } FORCE_INLINE_TEMPLATE size_t HUF_decompress1X2_usingDTable_internal_body( void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, const HUF_DTable* DTable) { BIT_DStream_t bitD; /* Init */ CHECK_F( BIT_initDStream(&bitD, cSrc, cSrcSize) ); /* decode */ { BYTE* const ostart = (BYTE*) dst; BYTE* const oend = ostart + dstSize; const void* const dtPtr = DTable+1; /* force compiler to not use strict-aliasing */ const HUF_DEltX2* const dt = (const HUF_DEltX2*)dtPtr; DTableDesc const dtd = HUF_getDTableDesc(DTable); HUF_decodeStreamX2(ostart, &bitD, oend, dt, dtd.tableLog); } /* check */ if (!BIT_endOfDStream(&bitD)) return ERROR(corruption_detected); /* decoded size */ return dstSize; } FORCE_INLINE_TEMPLATE size_t HUF_decompress4X2_usingDTable_internal_body( void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, const HUF_DTable* DTable) { if (cSrcSize < 10) return ERROR(corruption_detected); /* strict minimum : jump table + 1 byte per stream */ { const BYTE* const istart = (const BYTE*) cSrc; BYTE* const ostart = (BYTE*) dst; BYTE* const oend = ostart + dstSize; BYTE* const olimit = oend - (sizeof(size_t)-1); const void* const dtPtr = DTable+1; const HUF_DEltX2* const dt = (const HUF_DEltX2*)dtPtr; /* Init */ BIT_DStream_t bitD1; BIT_DStream_t bitD2; BIT_DStream_t bitD3; BIT_DStream_t bitD4; size_t const length1 = MEM_readLE16(istart); size_t const length2 = MEM_readLE16(istart+2); size_t const length3 = MEM_readLE16(istart+4); size_t const length4 = cSrcSize - (length1 + length2 + length3 + 6); const BYTE* const istart1 = istart + 6; /* jumpTable */ const BYTE* const istart2 = istart1 + length1; const BYTE* const istart3 = istart2 + length2; const BYTE* const istart4 = istart3 + length3; size_t const segmentSize = (dstSize+3) / 4; BYTE* const opStart2 = ostart + segmentSize; BYTE* const opStart3 = opStart2 + segmentSize; BYTE* const opStart4 = opStart3 + segmentSize; BYTE* op1 = ostart; BYTE* op2 = opStart2; BYTE* op3 = opStart3; BYTE* op4 = opStart4; U32 endSignal = 1; DTableDesc const dtd = HUF_getDTableDesc(DTable); U32 const dtLog = dtd.tableLog; if (length4 > cSrcSize) return ERROR(corruption_detected); /* overflow */ CHECK_F( BIT_initDStream(&bitD1, istart1, length1) ); CHECK_F( BIT_initDStream(&bitD2, istart2, length2) ); CHECK_F( BIT_initDStream(&bitD3, istart3, length3) ); CHECK_F( BIT_initDStream(&bitD4, istart4, length4) ); /* 16-32 symbols per loop (4-8 symbols per stream) */ for ( ; (endSignal) & (op4 < olimit); ) { #if defined(__clang__) && (defined(__x86_64__) || defined(__i386__)) HUF_DECODE_SYMBOLX2_2(op1, &bitD1); HUF_DECODE_SYMBOLX2_1(op1, &bitD1); HUF_DECODE_SYMBOLX2_2(op1, &bitD1); HUF_DECODE_SYMBOLX2_0(op1, &bitD1); HUF_DECODE_SYMBOLX2_2(op2, &bitD2); HUF_DECODE_SYMBOLX2_1(op2, &bitD2); HUF_DECODE_SYMBOLX2_2(op2, &bitD2); HUF_DECODE_SYMBOLX2_0(op2, &bitD2); endSignal &= BIT_reloadDStreamFast(&bitD1) == BIT_DStream_unfinished; endSignal &= BIT_reloadDStreamFast(&bitD2) == BIT_DStream_unfinished; HUF_DECODE_SYMBOLX2_2(op3, &bitD3); HUF_DECODE_SYMBOLX2_1(op3, &bitD3); HUF_DECODE_SYMBOLX2_2(op3, &bitD3); HUF_DECODE_SYMBOLX2_0(op3, &bitD3); HUF_DECODE_SYMBOLX2_2(op4, &bitD4); HUF_DECODE_SYMBOLX2_1(op4, &bitD4); HUF_DECODE_SYMBOLX2_2(op4, &bitD4); HUF_DECODE_SYMBOLX2_0(op4, &bitD4); endSignal &= BIT_reloadDStreamFast(&bitD3) == BIT_DStream_unfinished; endSignal &= BIT_reloadDStreamFast(&bitD4) == BIT_DStream_unfinished; #else HUF_DECODE_SYMBOLX2_2(op1, &bitD1); HUF_DECODE_SYMBOLX2_2(op2, &bitD2); HUF_DECODE_SYMBOLX2_2(op3, &bitD3); HUF_DECODE_SYMBOLX2_2(op4, &bitD4); HUF_DECODE_SYMBOLX2_1(op1, &bitD1); HUF_DECODE_SYMBOLX2_1(op2, &bitD2); HUF_DECODE_SYMBOLX2_1(op3, &bitD3); HUF_DECODE_SYMBOLX2_1(op4, &bitD4); HUF_DECODE_SYMBOLX2_2(op1, &bitD1); HUF_DECODE_SYMBOLX2_2(op2, &bitD2); HUF_DECODE_SYMBOLX2_2(op3, &bitD3); HUF_DECODE_SYMBOLX2_2(op4, &bitD4); HUF_DECODE_SYMBOLX2_0(op1, &bitD1); HUF_DECODE_SYMBOLX2_0(op2, &bitD2); HUF_DECODE_SYMBOLX2_0(op3, &bitD3); HUF_DECODE_SYMBOLX2_0(op4, &bitD4); endSignal = (U32)LIKELY( (BIT_reloadDStreamFast(&bitD1) == BIT_DStream_unfinished) & (BIT_reloadDStreamFast(&bitD2) == BIT_DStream_unfinished) & (BIT_reloadDStreamFast(&bitD3) == BIT_DStream_unfinished) & (BIT_reloadDStreamFast(&bitD4) == BIT_DStream_unfinished)); #endif } /* check corruption */ if (op1 > opStart2) return ERROR(corruption_detected); if (op2 > opStart3) return ERROR(corruption_detected); if (op3 > opStart4) return ERROR(corruption_detected); /* note : op4 already verified within main loop */ /* finish bitStreams one by one */ HUF_decodeStreamX2(op1, &bitD1, opStart2, dt, dtLog); HUF_decodeStreamX2(op2, &bitD2, opStart3, dt, dtLog); HUF_decodeStreamX2(op3, &bitD3, opStart4, dt, dtLog); HUF_decodeStreamX2(op4, &bitD4, oend, dt, dtLog); /* check */ { U32 const endCheck = BIT_endOfDStream(&bitD1) & BIT_endOfDStream(&bitD2) & BIT_endOfDStream(&bitD3) & BIT_endOfDStream(&bitD4); if (!endCheck) return ERROR(corruption_detected); } /* decoded size */ return dstSize; } } HUF_DGEN(HUF_decompress1X2_usingDTable_internal) HUF_DGEN(HUF_decompress4X2_usingDTable_internal) size_t HUF_decompress1X2_usingDTable( void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, const HUF_DTable* DTable) { DTableDesc dtd = HUF_getDTableDesc(DTable); if (dtd.tableType != 1) return ERROR(GENERIC); return HUF_decompress1X2_usingDTable_internal(dst, dstSize, cSrc, cSrcSize, DTable, /* bmi2 */ 0); } size_t HUF_decompress1X2_DCtx_wksp(HUF_DTable* DCtx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, void* workSpace, size_t wkspSize) { const BYTE* ip = (const BYTE*) cSrc; size_t const hSize = HUF_readDTableX2_wksp(DCtx, cSrc, cSrcSize, workSpace, wkspSize); if (HUF_isError(hSize)) return hSize; if (hSize >= cSrcSize) return ERROR(srcSize_wrong); ip += hSize; cSrcSize -= hSize; return HUF_decompress1X2_usingDTable_internal(dst, dstSize, ip, cSrcSize, DCtx, /* bmi2 */ 0); } size_t HUF_decompress4X2_usingDTable( void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, const HUF_DTable* DTable) { DTableDesc dtd = HUF_getDTableDesc(DTable); if (dtd.tableType != 1) return ERROR(GENERIC); return HUF_decompress4X2_usingDTable_internal(dst, dstSize, cSrc, cSrcSize, DTable, /* bmi2 */ 0); } static size_t HUF_decompress4X2_DCtx_wksp_bmi2(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, void* workSpace, size_t wkspSize, int bmi2) { const BYTE* ip = (const BYTE*) cSrc; size_t hSize = HUF_readDTableX2_wksp(dctx, cSrc, cSrcSize, workSpace, wkspSize); if (HUF_isError(hSize)) return hSize; if (hSize >= cSrcSize) return ERROR(srcSize_wrong); ip += hSize; cSrcSize -= hSize; return HUF_decompress4X2_usingDTable_internal(dst, dstSize, ip, cSrcSize, dctx, bmi2); } size_t HUF_decompress4X2_DCtx_wksp(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, void* workSpace, size_t wkspSize) { return HUF_decompress4X2_DCtx_wksp_bmi2(dctx, dst, dstSize, cSrc, cSrcSize, workSpace, wkspSize, /* bmi2 */ 0); } #endif /* HUF_FORCE_DECOMPRESS_X1 */ /* ***********************************/ /* Universal decompression selectors */ /* ***********************************/ size_t HUF_decompress1X_usingDTable(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const HUF_DTable* DTable) { DTableDesc const dtd = HUF_getDTableDesc(DTable); #if defined(HUF_FORCE_DECOMPRESS_X1) (void)dtd; assert(dtd.tableType == 0); return HUF_decompress1X1_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, /* bmi2 */ 0); #elif defined(HUF_FORCE_DECOMPRESS_X2) (void)dtd; assert(dtd.tableType == 1); return HUF_decompress1X2_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, /* bmi2 */ 0); #else return dtd.tableType ? HUF_decompress1X2_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, /* bmi2 */ 0) : HUF_decompress1X1_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, /* bmi2 */ 0); #endif } size_t HUF_decompress4X_usingDTable(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const HUF_DTable* DTable) { DTableDesc const dtd = HUF_getDTableDesc(DTable); #if defined(HUF_FORCE_DECOMPRESS_X1) (void)dtd; assert(dtd.tableType == 0); return HUF_decompress4X1_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, /* bmi2 */ 0); #elif defined(HUF_FORCE_DECOMPRESS_X2) (void)dtd; assert(dtd.tableType == 1); return HUF_decompress4X2_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, /* bmi2 */ 0); #else return dtd.tableType ? HUF_decompress4X2_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, /* bmi2 */ 0) : HUF_decompress4X1_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, /* bmi2 */ 0); #endif } #if !defined(HUF_FORCE_DECOMPRESS_X1) && !defined(HUF_FORCE_DECOMPRESS_X2) typedef struct { U32 tableTime; U32 decode256Time; } algo_time_t; static const algo_time_t algoTime[16 /* Quantization */][3 /* single, double, quad */] = { /* single, double, quad */ {{0,0}, {1,1}, {2,2}}, /* Q==0 : impossible */ {{0,0}, {1,1}, {2,2}}, /* Q==1 : impossible */ {{ 38,130}, {1313, 74}, {2151, 38}}, /* Q == 2 : 12-18% */ {{ 448,128}, {1353, 74}, {2238, 41}}, /* Q == 3 : 18-25% */ {{ 556,128}, {1353, 74}, {2238, 47}}, /* Q == 4 : 25-32% */ {{ 714,128}, {1418, 74}, {2436, 53}}, /* Q == 5 : 32-38% */ {{ 883,128}, {1437, 74}, {2464, 61}}, /* Q == 6 : 38-44% */ {{ 897,128}, {1515, 75}, {2622, 68}}, /* Q == 7 : 44-50% */ {{ 926,128}, {1613, 75}, {2730, 75}}, /* Q == 8 : 50-56% */ {{ 947,128}, {1729, 77}, {3359, 77}}, /* Q == 9 : 56-62% */ {{1107,128}, {2083, 81}, {4006, 84}}, /* Q ==10 : 62-69% */ {{1177,128}, {2379, 87}, {4785, 88}}, /* Q ==11 : 69-75% */ {{1242,128}, {2415, 93}, {5155, 84}}, /* Q ==12 : 75-81% */ {{1349,128}, {2644,106}, {5260,106}}, /* Q ==13 : 81-87% */ {{1455,128}, {2422,124}, {4174,124}}, /* Q ==14 : 87-93% */ {{ 722,128}, {1891,145}, {1936,146}}, /* Q ==15 : 93-99% */ }; #endif /** HUF_selectDecoder() : * Tells which decoder is likely to decode faster, * based on a set of pre-computed metrics. * @return : 0==HUF_decompress4X1, 1==HUF_decompress4X2 . * Assumption : 0 < dstSize <= 128 KB */ U32 HUF_selectDecoder (size_t dstSize, size_t cSrcSize) { assert(dstSize > 0); assert(dstSize <= 128*1024); #if defined(HUF_FORCE_DECOMPRESS_X1) (void)dstSize; (void)cSrcSize; return 0; #elif defined(HUF_FORCE_DECOMPRESS_X2) (void)dstSize; (void)cSrcSize; return 1; #else /* decoder timing evaluation */ { U32 const Q = (cSrcSize >= dstSize) ? 15 : (U32)(cSrcSize * 16 / dstSize); /* Q < 16 */ U32 const D256 = (U32)(dstSize >> 8); U32 const DTime0 = algoTime[Q][0].tableTime + (algoTime[Q][0].decode256Time * D256); U32 DTime1 = algoTime[Q][1].tableTime + (algoTime[Q][1].decode256Time * D256); DTime1 += DTime1 >> 3; /* advantage to algorithm using less memory, to reduce cache eviction */ return DTime1 < DTime0; } #endif } size_t HUF_decompress4X_hufOnly_wksp(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, void* workSpace, size_t wkspSize) { /* validation checks */ if (dstSize == 0) return ERROR(dstSize_tooSmall); if (cSrcSize == 0) return ERROR(corruption_detected); { U32 const algoNb = HUF_selectDecoder(dstSize, cSrcSize); #if defined(HUF_FORCE_DECOMPRESS_X1) (void)algoNb; assert(algoNb == 0); return HUF_decompress4X1_DCtx_wksp(dctx, dst, dstSize, cSrc, cSrcSize, workSpace, wkspSize); #elif defined(HUF_FORCE_DECOMPRESS_X2) (void)algoNb; assert(algoNb == 1); return HUF_decompress4X2_DCtx_wksp(dctx, dst, dstSize, cSrc, cSrcSize, workSpace, wkspSize); #else return algoNb ? HUF_decompress4X2_DCtx_wksp(dctx, dst, dstSize, cSrc, cSrcSize, workSpace, wkspSize): HUF_decompress4X1_DCtx_wksp(dctx, dst, dstSize, cSrc, cSrcSize, workSpace, wkspSize); #endif } } size_t HUF_decompress1X_DCtx_wksp(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, void* workSpace, size_t wkspSize) { /* validation checks */ if (dstSize == 0) return ERROR(dstSize_tooSmall); if (cSrcSize > dstSize) return ERROR(corruption_detected); /* invalid */ if (cSrcSize == dstSize) { ZSTD_memcpy(dst, cSrc, dstSize); return dstSize; } /* not compressed */ if (cSrcSize == 1) { ZSTD_memset(dst, *(const BYTE*)cSrc, dstSize); return dstSize; } /* RLE */ { U32 const algoNb = HUF_selectDecoder(dstSize, cSrcSize); #if defined(HUF_FORCE_DECOMPRESS_X1) (void)algoNb; assert(algoNb == 0); return HUF_decompress1X1_DCtx_wksp(dctx, dst, dstSize, cSrc, cSrcSize, workSpace, wkspSize); #elif defined(HUF_FORCE_DECOMPRESS_X2) (void)algoNb; assert(algoNb == 1); return HUF_decompress1X2_DCtx_wksp(dctx, dst, dstSize, cSrc, cSrcSize, workSpace, wkspSize); #else return algoNb ? HUF_decompress1X2_DCtx_wksp(dctx, dst, dstSize, cSrc, cSrcSize, workSpace, wkspSize): HUF_decompress1X1_DCtx_wksp(dctx, dst, dstSize, cSrc, cSrcSize, workSpace, wkspSize); #endif } } size_t HUF_decompress1X_usingDTable_bmi2(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const HUF_DTable* DTable, int bmi2) { DTableDesc const dtd = HUF_getDTableDesc(DTable); #if defined(HUF_FORCE_DECOMPRESS_X1) (void)dtd; assert(dtd.tableType == 0); return HUF_decompress1X1_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, bmi2); #elif defined(HUF_FORCE_DECOMPRESS_X2) (void)dtd; assert(dtd.tableType == 1); return HUF_decompress1X2_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, bmi2); #else return dtd.tableType ? HUF_decompress1X2_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, bmi2) : HUF_decompress1X1_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, bmi2); #endif } #ifndef HUF_FORCE_DECOMPRESS_X2 size_t HUF_decompress1X1_DCtx_wksp_bmi2(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, void* workSpace, size_t wkspSize, int bmi2) { const BYTE* ip = (const BYTE*) cSrc; size_t const hSize = HUF_readDTableX1_wksp_bmi2(dctx, cSrc, cSrcSize, workSpace, wkspSize, bmi2); if (HUF_isError(hSize)) return hSize; if (hSize >= cSrcSize) return ERROR(srcSize_wrong); ip += hSize; cSrcSize -= hSize; return HUF_decompress1X1_usingDTable_internal(dst, dstSize, ip, cSrcSize, dctx, bmi2); } #endif size_t HUF_decompress4X_usingDTable_bmi2(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const HUF_DTable* DTable, int bmi2) { DTableDesc const dtd = HUF_getDTableDesc(DTable); #if defined(HUF_FORCE_DECOMPRESS_X1) (void)dtd; assert(dtd.tableType == 0); return HUF_decompress4X1_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, bmi2); #elif defined(HUF_FORCE_DECOMPRESS_X2) (void)dtd; assert(dtd.tableType == 1); return HUF_decompress4X2_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, bmi2); #else return dtd.tableType ? HUF_decompress4X2_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, bmi2) : HUF_decompress4X1_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, bmi2); #endif } size_t HUF_decompress4X_hufOnly_wksp_bmi2(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, void* workSpace, size_t wkspSize, int bmi2) { /* validation checks */ if (dstSize == 0) return ERROR(dstSize_tooSmall); if (cSrcSize == 0) return ERROR(corruption_detected); { U32 const algoNb = HUF_selectDecoder(dstSize, cSrcSize); #if defined(HUF_FORCE_DECOMPRESS_X1) (void)algoNb; assert(algoNb == 0); return HUF_decompress4X1_DCtx_wksp_bmi2(dctx, dst, dstSize, cSrc, cSrcSize, workSpace, wkspSize, bmi2); #elif defined(HUF_FORCE_DECOMPRESS_X2) (void)algoNb; assert(algoNb == 1); return HUF_decompress4X2_DCtx_wksp_bmi2(dctx, dst, dstSize, cSrc, cSrcSize, workSpace, wkspSize, bmi2); #else return algoNb ? HUF_decompress4X2_DCtx_wksp_bmi2(dctx, dst, dstSize, cSrc, cSrcSize, workSpace, wkspSize, bmi2) : HUF_decompress4X1_DCtx_wksp_bmi2(dctx, dst, dstSize, cSrc, cSrcSize, workSpace, wkspSize, bmi2); #endif } } #ifndef ZSTD_NO_UNUSED_FUNCTIONS #ifndef HUF_FORCE_DECOMPRESS_X2 size_t HUF_readDTableX1(HUF_DTable* DTable, const void* src, size_t srcSize) { U32 workSpace[HUF_DECOMPRESS_WORKSPACE_SIZE_U32]; return HUF_readDTableX1_wksp(DTable, src, srcSize, workSpace, sizeof(workSpace)); } size_t HUF_decompress1X1_DCtx(HUF_DTable* DCtx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize) { U32 workSpace[HUF_DECOMPRESS_WORKSPACE_SIZE_U32]; return HUF_decompress1X1_DCtx_wksp(DCtx, dst, dstSize, cSrc, cSrcSize, workSpace, sizeof(workSpace)); } size_t HUF_decompress1X1 (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize) { HUF_CREATE_STATIC_DTABLEX1(DTable, HUF_TABLELOG_MAX); return HUF_decompress1X1_DCtx (DTable, dst, dstSize, cSrc, cSrcSize); } #endif #ifndef HUF_FORCE_DECOMPRESS_X1 size_t HUF_readDTableX2(HUF_DTable* DTable, const void* src, size_t srcSize) { U32 workSpace[HUF_DECOMPRESS_WORKSPACE_SIZE_U32]; return HUF_readDTableX2_wksp(DTable, src, srcSize, workSpace, sizeof(workSpace)); } size_t HUF_decompress1X2_DCtx(HUF_DTable* DCtx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize) { U32 workSpace[HUF_DECOMPRESS_WORKSPACE_SIZE_U32]; return HUF_decompress1X2_DCtx_wksp(DCtx, dst, dstSize, cSrc, cSrcSize, workSpace, sizeof(workSpace)); } size_t HUF_decompress1X2 (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize) { HUF_CREATE_STATIC_DTABLEX2(DTable, HUF_TABLELOG_MAX); return HUF_decompress1X2_DCtx(DTable, dst, dstSize, cSrc, cSrcSize); } #endif #ifndef HUF_FORCE_DECOMPRESS_X2 size_t HUF_decompress4X1_DCtx (HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize) { U32 workSpace[HUF_DECOMPRESS_WORKSPACE_SIZE_U32]; return HUF_decompress4X1_DCtx_wksp(dctx, dst, dstSize, cSrc, cSrcSize, workSpace, sizeof(workSpace)); } size_t HUF_decompress4X1 (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize) { HUF_CREATE_STATIC_DTABLEX1(DTable, HUF_TABLELOG_MAX); return HUF_decompress4X1_DCtx(DTable, dst, dstSize, cSrc, cSrcSize); } #endif #ifndef HUF_FORCE_DECOMPRESS_X1 size_t HUF_decompress4X2_DCtx(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize) { U32 workSpace[HUF_DECOMPRESS_WORKSPACE_SIZE_U32]; return HUF_decompress4X2_DCtx_wksp(dctx, dst, dstSize, cSrc, cSrcSize, workSpace, sizeof(workSpace)); } size_t HUF_decompress4X2 (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize) { HUF_CREATE_STATIC_DTABLEX2(DTable, HUF_TABLELOG_MAX); return HUF_decompress4X2_DCtx(DTable, dst, dstSize, cSrc, cSrcSize); } #endif typedef size_t (*decompressionAlgo)(void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize); size_t HUF_decompress (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize) { #if !defined(HUF_FORCE_DECOMPRESS_X1) && !defined(HUF_FORCE_DECOMPRESS_X2) static const decompressionAlgo decompress[2] = { HUF_decompress4X1, HUF_decompress4X2 }; #endif /* validation checks */ if (dstSize == 0) return ERROR(dstSize_tooSmall); if (cSrcSize > dstSize) return ERROR(corruption_detected); /* invalid */ if (cSrcSize == dstSize) { ZSTD_memcpy(dst, cSrc, dstSize); return dstSize; } /* not compressed */ if (cSrcSize == 1) { ZSTD_memset(dst, *(const BYTE*)cSrc, dstSize); return dstSize; } /* RLE */ { U32 const algoNb = HUF_selectDecoder(dstSize, cSrcSize); #if defined(HUF_FORCE_DECOMPRESS_X1) (void)algoNb; assert(algoNb == 0); return HUF_decompress4X1(dst, dstSize, cSrc, cSrcSize); #elif defined(HUF_FORCE_DECOMPRESS_X2) (void)algoNb; assert(algoNb == 1); return HUF_decompress4X2(dst, dstSize, cSrc, cSrcSize); #else return decompress[algoNb](dst, dstSize, cSrc, cSrcSize); #endif } } size_t HUF_decompress4X_DCtx (HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize) { /* validation checks */ if (dstSize == 0) return ERROR(dstSize_tooSmall); if (cSrcSize > dstSize) return ERROR(corruption_detected); /* invalid */ if (cSrcSize == dstSize) { ZSTD_memcpy(dst, cSrc, dstSize); return dstSize; } /* not compressed */ if (cSrcSize == 1) { ZSTD_memset(dst, *(const BYTE*)cSrc, dstSize); return dstSize; } /* RLE */ { U32 const algoNb = HUF_selectDecoder(dstSize, cSrcSize); #if defined(HUF_FORCE_DECOMPRESS_X1) (void)algoNb; assert(algoNb == 0); return HUF_decompress4X1_DCtx(dctx, dst, dstSize, cSrc, cSrcSize); #elif defined(HUF_FORCE_DECOMPRESS_X2) (void)algoNb; assert(algoNb == 1); return HUF_decompress4X2_DCtx(dctx, dst, dstSize, cSrc, cSrcSize); #else return algoNb ? HUF_decompress4X2_DCtx(dctx, dst, dstSize, cSrc, cSrcSize) : HUF_decompress4X1_DCtx(dctx, dst, dstSize, cSrc, cSrcSize) ; #endif } } size_t HUF_decompress4X_hufOnly(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize) { U32 workSpace[HUF_DECOMPRESS_WORKSPACE_SIZE_U32]; return HUF_decompress4X_hufOnly_wksp(dctx, dst, dstSize, cSrc, cSrcSize, workSpace, sizeof(workSpace)); } size_t HUF_decompress1X_DCtx(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize) { U32 workSpace[HUF_DECOMPRESS_WORKSPACE_SIZE_U32]; return HUF_decompress1X_DCtx_wksp(dctx, dst, dstSize, cSrc, cSrcSize, workSpace, sizeof(workSpace)); } #endif /**** ended inlining decompress/huf_decompress.c ****/ /**** start inlining decompress/zstd_ddict.c ****/ /* * Copyright (c) 2016-2021, Yann Collet, Facebook, Inc. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the * LICENSE file in the root directory of this source tree) and the GPLv2 (found * in the COPYING file in the root directory of this source tree). * You may select, at your option, one of the above-listed licenses. */ /* zstd_ddict.c : * concentrates all logic that needs to know the internals of ZSTD_DDict object */ /*-******************************************************* * Dependencies *********************************************************/ /**** skipping file: ../common/zstd_deps.h ****/ /**** skipping file: ../common/cpu.h ****/ /**** skipping file: ../common/mem.h ****/ #define FSE_STATIC_LINKING_ONLY /**** skipping file: ../common/fse.h ****/ #define HUF_STATIC_LINKING_ONLY /**** skipping file: ../common/huf.h ****/ /**** start inlining zstd_decompress_internal.h ****/ /* * Copyright (c) 2016-2021, Yann Collet, Facebook, Inc. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the * LICENSE file in the root directory of this source tree) and the GPLv2 (found * in the COPYING file in the root directory of this source tree). * You may select, at your option, one of the above-listed licenses. */ /* zstd_decompress_internal: * objects and definitions shared within lib/decompress modules */ #ifndef ZSTD_DECOMPRESS_INTERNAL_H #define ZSTD_DECOMPRESS_INTERNAL_H /*-******************************************************* * Dependencies *********************************************************/ /**** skipping file: ../common/mem.h ****/ /**** skipping file: ../common/zstd_internal.h ****/ /**** skipping file: ../common/zstd_trace.h ****/ /*-******************************************************* * Constants *********************************************************/ static UNUSED_ATTR const U32 LL_base[MaxLL+1] = { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 18, 20, 22, 24, 28, 32, 40, 48, 64, 0x80, 0x100, 0x200, 0x400, 0x800, 0x1000, 0x2000, 0x4000, 0x8000, 0x10000 }; static UNUSED_ATTR const U32 OF_base[MaxOff+1] = { 0, 1, 1, 5, 0xD, 0x1D, 0x3D, 0x7D, 0xFD, 0x1FD, 0x3FD, 0x7FD, 0xFFD, 0x1FFD, 0x3FFD, 0x7FFD, 0xFFFD, 0x1FFFD, 0x3FFFD, 0x7FFFD, 0xFFFFD, 0x1FFFFD, 0x3FFFFD, 0x7FFFFD, 0xFFFFFD, 0x1FFFFFD, 0x3FFFFFD, 0x7FFFFFD, 0xFFFFFFD, 0x1FFFFFFD, 0x3FFFFFFD, 0x7FFFFFFD }; static UNUSED_ATTR const U32 OF_bits[MaxOff+1] = { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31 }; static UNUSED_ATTR const U32 ML_base[MaxML+1] = { 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 37, 39, 41, 43, 47, 51, 59, 67, 83, 99, 0x83, 0x103, 0x203, 0x403, 0x803, 0x1003, 0x2003, 0x4003, 0x8003, 0x10003 }; /*-******************************************************* * Decompression types *********************************************************/ typedef struct { U32 fastMode; U32 tableLog; } ZSTD_seqSymbol_header; typedef struct { U16 nextState; BYTE nbAdditionalBits; BYTE nbBits; U32 baseValue; } ZSTD_seqSymbol; #define SEQSYMBOL_TABLE_SIZE(log) (1 + (1 << (log))) #define ZSTD_BUILD_FSE_TABLE_WKSP_SIZE (sizeof(S16) * (MaxSeq + 1) + (1u << MaxFSELog) + sizeof(U64)) #define ZSTD_BUILD_FSE_TABLE_WKSP_SIZE_U32 ((ZSTD_BUILD_FSE_TABLE_WKSP_SIZE + sizeof(U32) - 1) / sizeof(U32)) typedef struct { ZSTD_seqSymbol LLTable[SEQSYMBOL_TABLE_SIZE(LLFSELog)]; /* Note : Space reserved for FSE Tables */ ZSTD_seqSymbol OFTable[SEQSYMBOL_TABLE_SIZE(OffFSELog)]; /* is also used as temporary workspace while building hufTable during DDict creation */ ZSTD_seqSymbol MLTable[SEQSYMBOL_TABLE_SIZE(MLFSELog)]; /* and therefore must be at least HUF_DECOMPRESS_WORKSPACE_SIZE large */ HUF_DTable hufTable[HUF_DTABLE_SIZE(HufLog)]; /* can accommodate HUF_decompress4X */ U32 rep[ZSTD_REP_NUM]; U32 workspace[ZSTD_BUILD_FSE_TABLE_WKSP_SIZE_U32]; } ZSTD_entropyDTables_t; typedef enum { ZSTDds_getFrameHeaderSize, ZSTDds_decodeFrameHeader, ZSTDds_decodeBlockHeader, ZSTDds_decompressBlock, ZSTDds_decompressLastBlock, ZSTDds_checkChecksum, ZSTDds_decodeSkippableHeader, ZSTDds_skipFrame } ZSTD_dStage; typedef enum { zdss_init=0, zdss_loadHeader, zdss_read, zdss_load, zdss_flush } ZSTD_dStreamStage; typedef enum { ZSTD_use_indefinitely = -1, /* Use the dictionary indefinitely */ ZSTD_dont_use = 0, /* Do not use the dictionary (if one exists free it) */ ZSTD_use_once = 1 /* Use the dictionary once and set to ZSTD_dont_use */ } ZSTD_dictUses_e; /* Hashset for storing references to multiple ZSTD_DDict within ZSTD_DCtx */ typedef struct { const ZSTD_DDict** ddictPtrTable; size_t ddictPtrTableSize; size_t ddictPtrCount; } ZSTD_DDictHashSet; struct ZSTD_DCtx_s { const ZSTD_seqSymbol* LLTptr; const ZSTD_seqSymbol* MLTptr; const ZSTD_seqSymbol* OFTptr; const HUF_DTable* HUFptr; ZSTD_entropyDTables_t entropy; U32 workspace[HUF_DECOMPRESS_WORKSPACE_SIZE_U32]; /* space needed when building huffman tables */ const void* previousDstEnd; /* detect continuity */ const void* prefixStart; /* start of current segment */ const void* virtualStart; /* virtual start of previous segment if it was just before current one */ const void* dictEnd; /* end of previous segment */ size_t expected; ZSTD_frameHeader fParams; U64 processedCSize; U64 decodedSize; blockType_e bType; /* used in ZSTD_decompressContinue(), store blockType between block header decoding and block decompression stages */ ZSTD_dStage stage; U32 litEntropy; U32 fseEntropy; XXH64_state_t xxhState; size_t headerSize; ZSTD_format_e format; ZSTD_forceIgnoreChecksum_e forceIgnoreChecksum; /* User specified: if == 1, will ignore checksums in compressed frame. Default == 0 */ U32 validateChecksum; /* if == 1, will validate checksum. Is == 1 if (fParams.checksumFlag == 1) and (forceIgnoreChecksum == 0). */ const BYTE* litPtr; ZSTD_customMem customMem; size_t litSize; size_t rleSize; size_t staticSize; int bmi2; /* == 1 if the CPU supports BMI2 and 0 otherwise. CPU support is determined dynamically once per context lifetime. */ /* dictionary */ ZSTD_DDict* ddictLocal; const ZSTD_DDict* ddict; /* set by ZSTD_initDStream_usingDDict(), or ZSTD_DCtx_refDDict() */ U32 dictID; int ddictIsCold; /* if == 1 : dictionary is "new" for working context, and presumed "cold" (not in cpu cache) */ ZSTD_dictUses_e dictUses; ZSTD_DDictHashSet* ddictSet; /* Hash set for multiple ddicts */ ZSTD_refMultipleDDicts_e refMultipleDDicts; /* User specified: if == 1, will allow references to multiple DDicts. Default == 0 (disabled) */ /* streaming */ ZSTD_dStreamStage streamStage; char* inBuff; size_t inBuffSize; size_t inPos; size_t maxWindowSize; char* outBuff; size_t outBuffSize; size_t outStart; size_t outEnd; size_t lhSize; void* legacyContext; U32 previousLegacyVersion; U32 legacyVersion; U32 hostageByte; int noForwardProgress; ZSTD_bufferMode_e outBufferMode; ZSTD_outBuffer expectedOutBuffer; /* workspace */ BYTE litBuffer[ZSTD_BLOCKSIZE_MAX + WILDCOPY_OVERLENGTH]; BYTE headerBuffer[ZSTD_FRAMEHEADERSIZE_MAX]; size_t oversizedDuration; #ifdef FUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION void const* dictContentBeginForFuzzing; void const* dictContentEndForFuzzing; #endif /* Tracing */ #if ZSTD_TRACE ZSTD_TraceCtx traceCtx; #endif }; /* typedef'd to ZSTD_DCtx within "zstd.h" */ /*-******************************************************* * Shared internal functions *********************************************************/ /*! ZSTD_loadDEntropy() : * dict : must point at beginning of a valid zstd dictionary. * @return : size of dictionary header (size of magic number + dict ID + entropy tables) */ size_t ZSTD_loadDEntropy(ZSTD_entropyDTables_t* entropy, const void* const dict, size_t const dictSize); /*! ZSTD_checkContinuity() : * check if next `dst` follows previous position, where decompression ended. * If yes, do nothing (continue on current segment). * If not, classify previous segment as "external dictionary", and start a new segment. * This function cannot fail. */ void ZSTD_checkContinuity(ZSTD_DCtx* dctx, const void* dst, size_t dstSize); #endif /* ZSTD_DECOMPRESS_INTERNAL_H */ /**** ended inlining zstd_decompress_internal.h ****/ /**** start inlining zstd_ddict.h ****/ /* * Copyright (c) 2016-2021, Yann Collet, Facebook, Inc. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the * LICENSE file in the root directory of this source tree) and the GPLv2 (found * in the COPYING file in the root directory of this source tree). * You may select, at your option, one of the above-listed licenses. */ #ifndef ZSTD_DDICT_H #define ZSTD_DDICT_H /*-******************************************************* * Dependencies *********************************************************/ /**** skipping file: ../common/zstd_deps.h ****/ /**** skipping file: ../zstd.h ****/ /*-******************************************************* * Interface *********************************************************/ /* note: several prototypes are already published in `zstd.h` : * ZSTD_createDDict() * ZSTD_createDDict_byReference() * ZSTD_createDDict_advanced() * ZSTD_freeDDict() * ZSTD_initStaticDDict() * ZSTD_sizeof_DDict() * ZSTD_estimateDDictSize() * ZSTD_getDictID_fromDict() */ const void* ZSTD_DDict_dictContent(const ZSTD_DDict* ddict); size_t ZSTD_DDict_dictSize(const ZSTD_DDict* ddict); void ZSTD_copyDDictParameters(ZSTD_DCtx* dctx, const ZSTD_DDict* ddict); #endif /* ZSTD_DDICT_H */ /**** ended inlining zstd_ddict.h ****/ #if defined(ZSTD_LEGACY_SUPPORT) && (ZSTD_LEGACY_SUPPORT>=1) /**** start inlining ../legacy/zstd_legacy.h ****/ /* * Copyright (c) 2016-2021, Yann Collet, Facebook, Inc. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the * LICENSE file in the root directory of this source tree) and the GPLv2 (found * in the COPYING file in the root directory of this source tree). * You may select, at your option, one of the above-listed licenses. */ #ifndef ZSTD_LEGACY_H #define ZSTD_LEGACY_H #if defined (__cplusplus) extern "C" { #endif /* ************************************* * Includes ***************************************/ /**** skipping file: ../common/mem.h ****/ /**** skipping file: ../common/error_private.h ****/ /**** skipping file: ../common/zstd_internal.h ****/ #if !defined (ZSTD_LEGACY_SUPPORT) || (ZSTD_LEGACY_SUPPORT == 0) # undef ZSTD_LEGACY_SUPPORT # define ZSTD_LEGACY_SUPPORT 8 #endif #if (ZSTD_LEGACY_SUPPORT <= 1) /**** start inlining zstd_v01.h ****/ /* * Copyright (c) 2016-2021, Yann Collet, Facebook, Inc. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the * LICENSE file in the root directory of this source tree) and the GPLv2 (found * in the COPYING file in the root directory of this source tree). * You may select, at your option, one of the above-listed licenses. */ #ifndef ZSTD_V01_H_28739879432 #define ZSTD_V01_H_28739879432 #if defined (__cplusplus) extern "C" { #endif /* ************************************* * Includes ***************************************/ #include /* size_t */ /* ************************************* * Simple one-step function ***************************************/ /** ZSTDv01_decompress() : decompress ZSTD frames compliant with v0.1.x format compressedSize : is the exact source size maxOriginalSize : is the size of the 'dst' buffer, which must be already allocated. It must be equal or larger than originalSize, otherwise decompression will fail. return : the number of bytes decompressed into destination buffer (originalSize) or an errorCode if it fails (which can be tested using ZSTDv01_isError()) */ size_t ZSTDv01_decompress( void* dst, size_t maxOriginalSize, const void* src, size_t compressedSize); /** ZSTDv01_findFrameSizeInfoLegacy() : get the source length and decompressed bound of a ZSTD frame compliant with v0.1.x format srcSize : The size of the 'src' buffer, at least as large as the frame pointed to by 'src' cSize (output parameter) : the number of bytes that would be read to decompress this frame or an error code if it fails (which can be tested using ZSTDv01_isError()) dBound (output parameter) : an upper-bound for the decompressed size of the data in the frame or ZSTD_CONTENTSIZE_ERROR if an error occurs note : assumes `cSize` and `dBound` are _not_ NULL. */ void ZSTDv01_findFrameSizeInfoLegacy(const void *src, size_t srcSize, size_t* cSize, unsigned long long* dBound); /** ZSTDv01_isError() : tells if the result of ZSTDv01_decompress() is an error */ unsigned ZSTDv01_isError(size_t code); /* ************************************* * Advanced functions ***************************************/ typedef struct ZSTDv01_Dctx_s ZSTDv01_Dctx; ZSTDv01_Dctx* ZSTDv01_createDCtx(void); size_t ZSTDv01_freeDCtx(ZSTDv01_Dctx* dctx); size_t ZSTDv01_decompressDCtx(void* ctx, void* dst, size_t maxOriginalSize, const void* src, size_t compressedSize); /* ************************************* * Streaming functions ***************************************/ size_t ZSTDv01_resetDCtx(ZSTDv01_Dctx* dctx); size_t ZSTDv01_nextSrcSizeToDecompress(ZSTDv01_Dctx* dctx); size_t ZSTDv01_decompressContinue(ZSTDv01_Dctx* dctx, void* dst, size_t maxDstSize, const void* src, size_t srcSize); /** Use above functions alternatively. ZSTD_nextSrcSizeToDecompress() tells how much bytes to provide as 'srcSize' to ZSTD_decompressContinue(). ZSTD_decompressContinue() will use previous data blocks to improve compression if they are located prior to current block. Result is the number of bytes regenerated within 'dst'. It can be zero, which is not an error; it just means ZSTD_decompressContinue() has decoded some header. */ /* ************************************* * Prefix - version detection ***************************************/ #define ZSTDv01_magicNumber 0xFD2FB51E /* Big Endian version */ #define ZSTDv01_magicNumberLE 0x1EB52FFD /* Little Endian version */ #if defined (__cplusplus) } #endif #endif /* ZSTD_V01_H_28739879432 */ /**** ended inlining zstd_v01.h ****/ #endif #if (ZSTD_LEGACY_SUPPORT <= 2) /**** start inlining zstd_v02.h ****/ /* * Copyright (c) 2016-2021, Yann Collet, Facebook, Inc. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the * LICENSE file in the root directory of this source tree) and the GPLv2 (found * in the COPYING file in the root directory of this source tree). * You may select, at your option, one of the above-listed licenses. */ #ifndef ZSTD_V02_H_4174539423 #define ZSTD_V02_H_4174539423 #if defined (__cplusplus) extern "C" { #endif /* ************************************* * Includes ***************************************/ #include /* size_t */ /* ************************************* * Simple one-step function ***************************************/ /** ZSTDv02_decompress() : decompress ZSTD frames compliant with v0.2.x format compressedSize : is the exact source size maxOriginalSize : is the size of the 'dst' buffer, which must be already allocated. It must be equal or larger than originalSize, otherwise decompression will fail. return : the number of bytes decompressed into destination buffer (originalSize) or an errorCode if it fails (which can be tested using ZSTDv01_isError()) */ size_t ZSTDv02_decompress( void* dst, size_t maxOriginalSize, const void* src, size_t compressedSize); /** ZSTDv02_findFrameSizeInfoLegacy() : get the source length and decompressed bound of a ZSTD frame compliant with v0.2.x format srcSize : The size of the 'src' buffer, at least as large as the frame pointed to by 'src' cSize (output parameter) : the number of bytes that would be read to decompress this frame or an error code if it fails (which can be tested using ZSTDv01_isError()) dBound (output parameter) : an upper-bound for the decompressed size of the data in the frame or ZSTD_CONTENTSIZE_ERROR if an error occurs note : assumes `cSize` and `dBound` are _not_ NULL. */ void ZSTDv02_findFrameSizeInfoLegacy(const void *src, size_t srcSize, size_t* cSize, unsigned long long* dBound); /** ZSTDv02_isError() : tells if the result of ZSTDv02_decompress() is an error */ unsigned ZSTDv02_isError(size_t code); /* ************************************* * Advanced functions ***************************************/ typedef struct ZSTDv02_Dctx_s ZSTDv02_Dctx; ZSTDv02_Dctx* ZSTDv02_createDCtx(void); size_t ZSTDv02_freeDCtx(ZSTDv02_Dctx* dctx); size_t ZSTDv02_decompressDCtx(void* ctx, void* dst, size_t maxOriginalSize, const void* src, size_t compressedSize); /* ************************************* * Streaming functions ***************************************/ size_t ZSTDv02_resetDCtx(ZSTDv02_Dctx* dctx); size_t ZSTDv02_nextSrcSizeToDecompress(ZSTDv02_Dctx* dctx); size_t ZSTDv02_decompressContinue(ZSTDv02_Dctx* dctx, void* dst, size_t maxDstSize, const void* src, size_t srcSize); /** Use above functions alternatively. ZSTD_nextSrcSizeToDecompress() tells how much bytes to provide as 'srcSize' to ZSTD_decompressContinue(). ZSTD_decompressContinue() will use previous data blocks to improve compression if they are located prior to current block. Result is the number of bytes regenerated within 'dst'. It can be zero, which is not an error; it just means ZSTD_decompressContinue() has decoded some header. */ /* ************************************* * Prefix - version detection ***************************************/ #define ZSTDv02_magicNumber 0xFD2FB522 /* v0.2 */ #if defined (__cplusplus) } #endif #endif /* ZSTD_V02_H_4174539423 */ /**** ended inlining zstd_v02.h ****/ #endif #if (ZSTD_LEGACY_SUPPORT <= 3) /**** start inlining zstd_v03.h ****/ /* * Copyright (c) 2016-2021, Yann Collet, Facebook, Inc. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the * LICENSE file in the root directory of this source tree) and the GPLv2 (found * in the COPYING file in the root directory of this source tree). * You may select, at your option, one of the above-listed licenses. */ #ifndef ZSTD_V03_H_298734209782 #define ZSTD_V03_H_298734209782 #if defined (__cplusplus) extern "C" { #endif /* ************************************* * Includes ***************************************/ #include /* size_t */ /* ************************************* * Simple one-step function ***************************************/ /** ZSTDv03_decompress() : decompress ZSTD frames compliant with v0.3.x format compressedSize : is the exact source size maxOriginalSize : is the size of the 'dst' buffer, which must be already allocated. It must be equal or larger than originalSize, otherwise decompression will fail. return : the number of bytes decompressed into destination buffer (originalSize) or an errorCode if it fails (which can be tested using ZSTDv01_isError()) */ size_t ZSTDv03_decompress( void* dst, size_t maxOriginalSize, const void* src, size_t compressedSize); /** ZSTDv03_findFrameSizeInfoLegacy() : get the source length and decompressed bound of a ZSTD frame compliant with v0.3.x format srcSize : The size of the 'src' buffer, at least as large as the frame pointed to by 'src' cSize (output parameter) : the number of bytes that would be read to decompress this frame or an error code if it fails (which can be tested using ZSTDv01_isError()) dBound (output parameter) : an upper-bound for the decompressed size of the data in the frame or ZSTD_CONTENTSIZE_ERROR if an error occurs note : assumes `cSize` and `dBound` are _not_ NULL. */ void ZSTDv03_findFrameSizeInfoLegacy(const void *src, size_t srcSize, size_t* cSize, unsigned long long* dBound); /** ZSTDv03_isError() : tells if the result of ZSTDv03_decompress() is an error */ unsigned ZSTDv03_isError(size_t code); /* ************************************* * Advanced functions ***************************************/ typedef struct ZSTDv03_Dctx_s ZSTDv03_Dctx; ZSTDv03_Dctx* ZSTDv03_createDCtx(void); size_t ZSTDv03_freeDCtx(ZSTDv03_Dctx* dctx); size_t ZSTDv03_decompressDCtx(void* ctx, void* dst, size_t maxOriginalSize, const void* src, size_t compressedSize); /* ************************************* * Streaming functions ***************************************/ size_t ZSTDv03_resetDCtx(ZSTDv03_Dctx* dctx); size_t ZSTDv03_nextSrcSizeToDecompress(ZSTDv03_Dctx* dctx); size_t ZSTDv03_decompressContinue(ZSTDv03_Dctx* dctx, void* dst, size_t maxDstSize, const void* src, size_t srcSize); /** Use above functions alternatively. ZSTD_nextSrcSizeToDecompress() tells how much bytes to provide as 'srcSize' to ZSTD_decompressContinue(). ZSTD_decompressContinue() will use previous data blocks to improve compression if they are located prior to current block. Result is the number of bytes regenerated within 'dst'. It can be zero, which is not an error; it just means ZSTD_decompressContinue() has decoded some header. */ /* ************************************* * Prefix - version detection ***************************************/ #define ZSTDv03_magicNumber 0xFD2FB523 /* v0.3 */ #if defined (__cplusplus) } #endif #endif /* ZSTD_V03_H_298734209782 */ /**** ended inlining zstd_v03.h ****/ #endif #if (ZSTD_LEGACY_SUPPORT <= 4) /**** start inlining zstd_v04.h ****/ /* * Copyright (c) 2016-2021, Yann Collet, Facebook, Inc. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the * LICENSE file in the root directory of this source tree) and the GPLv2 (found * in the COPYING file in the root directory of this source tree). * You may select, at your option, one of the above-listed licenses. */ #ifndef ZSTD_V04_H_91868324769238 #define ZSTD_V04_H_91868324769238 #if defined (__cplusplus) extern "C" { #endif /* ************************************* * Includes ***************************************/ #include /* size_t */ /* ************************************* * Simple one-step function ***************************************/ /** ZSTDv04_decompress() : decompress ZSTD frames compliant with v0.4.x format compressedSize : is the exact source size maxOriginalSize : is the size of the 'dst' buffer, which must be already allocated. It must be equal or larger than originalSize, otherwise decompression will fail. return : the number of bytes decompressed into destination buffer (originalSize) or an errorCode if it fails (which can be tested using ZSTDv01_isError()) */ size_t ZSTDv04_decompress( void* dst, size_t maxOriginalSize, const void* src, size_t compressedSize); /** ZSTDv04_findFrameSizeInfoLegacy() : get the source length and decompressed bound of a ZSTD frame compliant with v0.4.x format srcSize : The size of the 'src' buffer, at least as large as the frame pointed to by 'src' cSize (output parameter) : the number of bytes that would be read to decompress this frame or an error code if it fails (which can be tested using ZSTDv01_isError()) dBound (output parameter) : an upper-bound for the decompressed size of the data in the frame or ZSTD_CONTENTSIZE_ERROR if an error occurs note : assumes `cSize` and `dBound` are _not_ NULL. */ void ZSTDv04_findFrameSizeInfoLegacy(const void *src, size_t srcSize, size_t* cSize, unsigned long long* dBound); /** ZSTDv04_isError() : tells if the result of ZSTDv04_decompress() is an error */ unsigned ZSTDv04_isError(size_t code); /* ************************************* * Advanced functions ***************************************/ typedef struct ZSTDv04_Dctx_s ZSTDv04_Dctx; ZSTDv04_Dctx* ZSTDv04_createDCtx(void); size_t ZSTDv04_freeDCtx(ZSTDv04_Dctx* dctx); size_t ZSTDv04_decompressDCtx(ZSTDv04_Dctx* dctx, void* dst, size_t maxOriginalSize, const void* src, size_t compressedSize); /* ************************************* * Direct Streaming ***************************************/ size_t ZSTDv04_resetDCtx(ZSTDv04_Dctx* dctx); size_t ZSTDv04_nextSrcSizeToDecompress(ZSTDv04_Dctx* dctx); size_t ZSTDv04_decompressContinue(ZSTDv04_Dctx* dctx, void* dst, size_t maxDstSize, const void* src, size_t srcSize); /** Use above functions alternatively. ZSTD_nextSrcSizeToDecompress() tells how much bytes to provide as 'srcSize' to ZSTD_decompressContinue(). ZSTD_decompressContinue() will use previous data blocks to improve compression if they are located prior to current block. Result is the number of bytes regenerated within 'dst'. It can be zero, which is not an error; it just means ZSTD_decompressContinue() has decoded some header. */ /* ************************************* * Buffered Streaming ***************************************/ typedef struct ZBUFFv04_DCtx_s ZBUFFv04_DCtx; ZBUFFv04_DCtx* ZBUFFv04_createDCtx(void); size_t ZBUFFv04_freeDCtx(ZBUFFv04_DCtx* dctx); size_t ZBUFFv04_decompressInit(ZBUFFv04_DCtx* dctx); size_t ZBUFFv04_decompressWithDictionary(ZBUFFv04_DCtx* dctx, const void* dict, size_t dictSize); size_t ZBUFFv04_decompressContinue(ZBUFFv04_DCtx* dctx, void* dst, size_t* maxDstSizePtr, const void* src, size_t* srcSizePtr); /** ************************************************ * Streaming decompression * * A ZBUFF_DCtx object is required to track streaming operation. * Use ZBUFF_createDCtx() and ZBUFF_freeDCtx() to create/release resources. * Use ZBUFF_decompressInit() to start a new decompression operation. * ZBUFF_DCtx objects can be reused multiple times. * * Optionally, a reference to a static dictionary can be set, using ZBUFF_decompressWithDictionary() * It must be the same content as the one set during compression phase. * Dictionary content must remain accessible during the decompression process. * * Use ZBUFF_decompressContinue() repetitively to consume your input. * *srcSizePtr and *maxDstSizePtr can be any size. * The function will report how many bytes were read or written by modifying *srcSizePtr and *maxDstSizePtr. * Note that it may not consume the entire input, in which case it's up to the caller to present remaining input again. * The content of dst will be overwritten (up to *maxDstSizePtr) at each function call, so save its content if it matters or change dst. * @return : a hint to preferred nb of bytes to use as input for next function call (it's only a hint, to improve latency) * or 0 when a frame is completely decoded * or an error code, which can be tested using ZBUFF_isError(). * * Hint : recommended buffer sizes (not compulsory) : ZBUFF_recommendedDInSize / ZBUFF_recommendedDOutSize * output : ZBUFF_recommendedDOutSize==128 KB block size is the internal unit, it ensures it's always possible to write a full block when it's decoded. * input : ZBUFF_recommendedDInSize==128Kb+3; just follow indications from ZBUFF_decompressContinue() to minimize latency. It should always be <= 128 KB + 3 . * **************************************************/ unsigned ZBUFFv04_isError(size_t errorCode); const char* ZBUFFv04_getErrorName(size_t errorCode); /** The below functions provide recommended buffer sizes for Compression or Decompression operations. * These sizes are not compulsory, they just tend to offer better latency */ size_t ZBUFFv04_recommendedDInSize(void); size_t ZBUFFv04_recommendedDOutSize(void); /* ************************************* * Prefix - version detection ***************************************/ #define ZSTDv04_magicNumber 0xFD2FB524 /* v0.4 */ #if defined (__cplusplus) } #endif #endif /* ZSTD_V04_H_91868324769238 */ /**** ended inlining zstd_v04.h ****/ #endif #if (ZSTD_LEGACY_SUPPORT <= 5) /**** start inlining zstd_v05.h ****/ /* * Copyright (c) 2016-2021, Yann Collet, Facebook, Inc. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the * LICENSE file in the root directory of this source tree) and the GPLv2 (found * in the COPYING file in the root directory of this source tree). * You may select, at your option, one of the above-listed licenses. */ #ifndef ZSTDv05_H #define ZSTDv05_H #if defined (__cplusplus) extern "C" { #endif /*-************************************* * Dependencies ***************************************/ #include /* size_t */ /**** skipping file: ../common/mem.h ****/ /* ************************************* * Simple functions ***************************************/ /*! ZSTDv05_decompress() : `compressedSize` : is the _exact_ size of the compressed blob, otherwise decompression will fail. `dstCapacity` must be large enough, equal or larger than originalSize. @return : the number of bytes decompressed into `dst` (<= `dstCapacity`), or an errorCode if it fails (which can be tested using ZSTDv05_isError()) */ size_t ZSTDv05_decompress( void* dst, size_t dstCapacity, const void* src, size_t compressedSize); /** ZSTDv05_findFrameSizeInfoLegacy() : get the source length and decompressed bound of a ZSTD frame compliant with v0.5.x format srcSize : The size of the 'src' buffer, at least as large as the frame pointed to by 'src' cSize (output parameter) : the number of bytes that would be read to decompress this frame or an error code if it fails (which can be tested using ZSTDv01_isError()) dBound (output parameter) : an upper-bound for the decompressed size of the data in the frame or ZSTD_CONTENTSIZE_ERROR if an error occurs note : assumes `cSize` and `dBound` are _not_ NULL. */ void ZSTDv05_findFrameSizeInfoLegacy(const void *src, size_t srcSize, size_t* cSize, unsigned long long* dBound); /* ************************************* * Helper functions ***************************************/ /* Error Management */ unsigned ZSTDv05_isError(size_t code); /*!< tells if a `size_t` function result is an error code */ const char* ZSTDv05_getErrorName(size_t code); /*!< provides readable string for an error code */ /* ************************************* * Explicit memory management ***************************************/ /** Decompression context */ typedef struct ZSTDv05_DCtx_s ZSTDv05_DCtx; ZSTDv05_DCtx* ZSTDv05_createDCtx(void); size_t ZSTDv05_freeDCtx(ZSTDv05_DCtx* dctx); /*!< @return : errorCode */ /** ZSTDv05_decompressDCtx() : * Same as ZSTDv05_decompress(), but requires an already allocated ZSTDv05_DCtx (see ZSTDv05_createDCtx()) */ size_t ZSTDv05_decompressDCtx(ZSTDv05_DCtx* ctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize); /*-*********************** * Simple Dictionary API *************************/ /*! ZSTDv05_decompress_usingDict() : * Decompression using a pre-defined Dictionary content (see dictBuilder). * Dictionary must be identical to the one used during compression, otherwise regenerated data will be corrupted. * Note : dict can be NULL, in which case, it's equivalent to ZSTDv05_decompressDCtx() */ size_t ZSTDv05_decompress_usingDict(ZSTDv05_DCtx* dctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize, const void* dict,size_t dictSize); /*-************************ * Advanced Streaming API ***************************/ typedef enum { ZSTDv05_fast, ZSTDv05_greedy, ZSTDv05_lazy, ZSTDv05_lazy2, ZSTDv05_btlazy2, ZSTDv05_opt, ZSTDv05_btopt } ZSTDv05_strategy; typedef struct { U64 srcSize; U32 windowLog; /* the only useful information to retrieve */ U32 contentLog; U32 hashLog; U32 searchLog; U32 searchLength; U32 targetLength; ZSTDv05_strategy strategy; } ZSTDv05_parameters; size_t ZSTDv05_getFrameParams(ZSTDv05_parameters* params, const void* src, size_t srcSize); size_t ZSTDv05_decompressBegin_usingDict(ZSTDv05_DCtx* dctx, const void* dict, size_t dictSize); void ZSTDv05_copyDCtx(ZSTDv05_DCtx* dstDCtx, const ZSTDv05_DCtx* srcDCtx); size_t ZSTDv05_nextSrcSizeToDecompress(ZSTDv05_DCtx* dctx); size_t ZSTDv05_decompressContinue(ZSTDv05_DCtx* dctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize); /*-*********************** * ZBUFF API *************************/ typedef struct ZBUFFv05_DCtx_s ZBUFFv05_DCtx; ZBUFFv05_DCtx* ZBUFFv05_createDCtx(void); size_t ZBUFFv05_freeDCtx(ZBUFFv05_DCtx* dctx); size_t ZBUFFv05_decompressInit(ZBUFFv05_DCtx* dctx); size_t ZBUFFv05_decompressInitDictionary(ZBUFFv05_DCtx* dctx, const void* dict, size_t dictSize); size_t ZBUFFv05_decompressContinue(ZBUFFv05_DCtx* dctx, void* dst, size_t* dstCapacityPtr, const void* src, size_t* srcSizePtr); /*-*************************************************************************** * Streaming decompression * * A ZBUFFv05_DCtx object is required to track streaming operations. * Use ZBUFFv05_createDCtx() and ZBUFFv05_freeDCtx() to create/release resources. * Use ZBUFFv05_decompressInit() to start a new decompression operation, * or ZBUFFv05_decompressInitDictionary() if decompression requires a dictionary. * Note that ZBUFFv05_DCtx objects can be reused multiple times. * * Use ZBUFFv05_decompressContinue() repetitively to consume your input. * *srcSizePtr and *dstCapacityPtr can be any size. * The function will report how many bytes were read or written by modifying *srcSizePtr and *dstCapacityPtr. * Note that it may not consume the entire input, in which case it's up to the caller to present remaining input again. * The content of @dst will be overwritten (up to *dstCapacityPtr) at each function call, so save its content if it matters or change @dst. * @return : a hint to preferred nb of bytes to use as input for next function call (it's only a hint, to help latency) * or 0 when a frame is completely decoded * or an error code, which can be tested using ZBUFFv05_isError(). * * Hint : recommended buffer sizes (not compulsory) : ZBUFFv05_recommendedDInSize() / ZBUFFv05_recommendedDOutSize() * output : ZBUFFv05_recommendedDOutSize==128 KB block size is the internal unit, it ensures it's always possible to write a full block when decoded. * input : ZBUFFv05_recommendedDInSize==128Kb+3; just follow indications from ZBUFFv05_decompressContinue() to minimize latency. It should always be <= 128 KB + 3 . * *******************************************************************************/ /* ************************************* * Tool functions ***************************************/ unsigned ZBUFFv05_isError(size_t errorCode); const char* ZBUFFv05_getErrorName(size_t errorCode); /** Functions below provide recommended buffer sizes for Compression or Decompression operations. * These sizes are just hints, and tend to offer better latency */ size_t ZBUFFv05_recommendedDInSize(void); size_t ZBUFFv05_recommendedDOutSize(void); /*-************************************* * Constants ***************************************/ #define ZSTDv05_MAGICNUMBER 0xFD2FB525 /* v0.5 */ #if defined (__cplusplus) } #endif #endif /* ZSTDv0505_H */ /**** ended inlining zstd_v05.h ****/ #endif #if (ZSTD_LEGACY_SUPPORT <= 6) /**** start inlining zstd_v06.h ****/ /* * Copyright (c) 2016-2021, Yann Collet, Facebook, Inc. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the * LICENSE file in the root directory of this source tree) and the GPLv2 (found * in the COPYING file in the root directory of this source tree). * You may select, at your option, one of the above-listed licenses. */ #ifndef ZSTDv06_H #define ZSTDv06_H #if defined (__cplusplus) extern "C" { #endif /*====== Dependency ======*/ #include /* size_t */ /*====== Export for Windows ======*/ /*! * ZSTDv06_DLL_EXPORT : * Enable exporting of functions when building a Windows DLL */ #if defined(_WIN32) && defined(ZSTDv06_DLL_EXPORT) && (ZSTDv06_DLL_EXPORT==1) # define ZSTDLIBv06_API __declspec(dllexport) #else # define ZSTDLIBv06_API #endif /* ************************************* * Simple functions ***************************************/ /*! ZSTDv06_decompress() : `compressedSize` : is the _exact_ size of the compressed blob, otherwise decompression will fail. `dstCapacity` must be large enough, equal or larger than originalSize. @return : the number of bytes decompressed into `dst` (<= `dstCapacity`), or an errorCode if it fails (which can be tested using ZSTDv06_isError()) */ ZSTDLIBv06_API size_t ZSTDv06_decompress( void* dst, size_t dstCapacity, const void* src, size_t compressedSize); /** ZSTDv06_findFrameSizeInfoLegacy() : get the source length and decompressed bound of a ZSTD frame compliant with v0.6.x format srcSize : The size of the 'src' buffer, at least as large as the frame pointed to by 'src' cSize (output parameter) : the number of bytes that would be read to decompress this frame or an error code if it fails (which can be tested using ZSTDv01_isError()) dBound (output parameter) : an upper-bound for the decompressed size of the data in the frame or ZSTD_CONTENTSIZE_ERROR if an error occurs note : assumes `cSize` and `dBound` are _not_ NULL. */ void ZSTDv06_findFrameSizeInfoLegacy(const void *src, size_t srcSize, size_t* cSize, unsigned long long* dBound); /* ************************************* * Helper functions ***************************************/ ZSTDLIBv06_API size_t ZSTDv06_compressBound(size_t srcSize); /*!< maximum compressed size (worst case scenario) */ /* Error Management */ ZSTDLIBv06_API unsigned ZSTDv06_isError(size_t code); /*!< tells if a `size_t` function result is an error code */ ZSTDLIBv06_API const char* ZSTDv06_getErrorName(size_t code); /*!< provides readable string for an error code */ /* ************************************* * Explicit memory management ***************************************/ /** Decompression context */ typedef struct ZSTDv06_DCtx_s ZSTDv06_DCtx; ZSTDLIBv06_API ZSTDv06_DCtx* ZSTDv06_createDCtx(void); ZSTDLIBv06_API size_t ZSTDv06_freeDCtx(ZSTDv06_DCtx* dctx); /*!< @return : errorCode */ /** ZSTDv06_decompressDCtx() : * Same as ZSTDv06_decompress(), but requires an already allocated ZSTDv06_DCtx (see ZSTDv06_createDCtx()) */ ZSTDLIBv06_API size_t ZSTDv06_decompressDCtx(ZSTDv06_DCtx* ctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize); /*-*********************** * Dictionary API *************************/ /*! ZSTDv06_decompress_usingDict() : * Decompression using a pre-defined Dictionary content (see dictBuilder). * Dictionary must be identical to the one used during compression, otherwise regenerated data will be corrupted. * Note : dict can be NULL, in which case, it's equivalent to ZSTDv06_decompressDCtx() */ ZSTDLIBv06_API size_t ZSTDv06_decompress_usingDict(ZSTDv06_DCtx* dctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize, const void* dict,size_t dictSize); /*-************************ * Advanced Streaming API ***************************/ struct ZSTDv06_frameParams_s { unsigned long long frameContentSize; unsigned windowLog; }; typedef struct ZSTDv06_frameParams_s ZSTDv06_frameParams; ZSTDLIBv06_API size_t ZSTDv06_getFrameParams(ZSTDv06_frameParams* fparamsPtr, const void* src, size_t srcSize); /**< doesn't consume input */ ZSTDLIBv06_API size_t ZSTDv06_decompressBegin_usingDict(ZSTDv06_DCtx* dctx, const void* dict, size_t dictSize); ZSTDLIBv06_API void ZSTDv06_copyDCtx(ZSTDv06_DCtx* dctx, const ZSTDv06_DCtx* preparedDCtx); ZSTDLIBv06_API size_t ZSTDv06_nextSrcSizeToDecompress(ZSTDv06_DCtx* dctx); ZSTDLIBv06_API size_t ZSTDv06_decompressContinue(ZSTDv06_DCtx* dctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize); /* ************************************* * ZBUFF API ***************************************/ typedef struct ZBUFFv06_DCtx_s ZBUFFv06_DCtx; ZSTDLIBv06_API ZBUFFv06_DCtx* ZBUFFv06_createDCtx(void); ZSTDLIBv06_API size_t ZBUFFv06_freeDCtx(ZBUFFv06_DCtx* dctx); ZSTDLIBv06_API size_t ZBUFFv06_decompressInit(ZBUFFv06_DCtx* dctx); ZSTDLIBv06_API size_t ZBUFFv06_decompressInitDictionary(ZBUFFv06_DCtx* dctx, const void* dict, size_t dictSize); ZSTDLIBv06_API size_t ZBUFFv06_decompressContinue(ZBUFFv06_DCtx* dctx, void* dst, size_t* dstCapacityPtr, const void* src, size_t* srcSizePtr); /*-*************************************************************************** * Streaming decompression howto * * A ZBUFFv06_DCtx object is required to track streaming operations. * Use ZBUFFv06_createDCtx() and ZBUFFv06_freeDCtx() to create/release resources. * Use ZBUFFv06_decompressInit() to start a new decompression operation, * or ZBUFFv06_decompressInitDictionary() if decompression requires a dictionary. * Note that ZBUFFv06_DCtx objects can be re-init multiple times. * * Use ZBUFFv06_decompressContinue() repetitively to consume your input. * *srcSizePtr and *dstCapacityPtr can be any size. * The function will report how many bytes were read or written by modifying *srcSizePtr and *dstCapacityPtr. * Note that it may not consume the entire input, in which case it's up to the caller to present remaining input again. * The content of `dst` will be overwritten (up to *dstCapacityPtr) at each function call, so save its content if it matters, or change `dst`. * @return : a hint to preferred nb of bytes to use as input for next function call (it's only a hint, to help latency), * or 0 when a frame is completely decoded, * or an error code, which can be tested using ZBUFFv06_isError(). * * Hint : recommended buffer sizes (not compulsory) : ZBUFFv06_recommendedDInSize() and ZBUFFv06_recommendedDOutSize() * output : ZBUFFv06_recommendedDOutSize== 128 KB block size is the internal unit, it ensures it's always possible to write a full block when decoded. * input : ZBUFFv06_recommendedDInSize == 128KB + 3; * just follow indications from ZBUFFv06_decompressContinue() to minimize latency. It should always be <= 128 KB + 3 . * *******************************************************************************/ /* ************************************* * Tool functions ***************************************/ ZSTDLIBv06_API unsigned ZBUFFv06_isError(size_t errorCode); ZSTDLIBv06_API const char* ZBUFFv06_getErrorName(size_t errorCode); /** Functions below provide recommended buffer sizes for Compression or Decompression operations. * These sizes are just hints, they tend to offer better latency */ ZSTDLIBv06_API size_t ZBUFFv06_recommendedDInSize(void); ZSTDLIBv06_API size_t ZBUFFv06_recommendedDOutSize(void); /*-************************************* * Constants ***************************************/ #define ZSTDv06_MAGICNUMBER 0xFD2FB526 /* v0.6 */ #if defined (__cplusplus) } #endif #endif /* ZSTDv06_BUFFERED_H */ /**** ended inlining zstd_v06.h ****/ #endif #if (ZSTD_LEGACY_SUPPORT <= 7) /**** start inlining zstd_v07.h ****/ /* * Copyright (c) 2016-2021, Yann Collet, Facebook, Inc. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the * LICENSE file in the root directory of this source tree) and the GPLv2 (found * in the COPYING file in the root directory of this source tree). * You may select, at your option, one of the above-listed licenses. */ #ifndef ZSTDv07_H_235446 #define ZSTDv07_H_235446 #if defined (__cplusplus) extern "C" { #endif /*====== Dependency ======*/ #include /* size_t */ /*====== Export for Windows ======*/ /*! * ZSTDv07_DLL_EXPORT : * Enable exporting of functions when building a Windows DLL */ #if defined(_WIN32) && defined(ZSTDv07_DLL_EXPORT) && (ZSTDv07_DLL_EXPORT==1) # define ZSTDLIBv07_API __declspec(dllexport) #else # define ZSTDLIBv07_API #endif /* ************************************* * Simple API ***************************************/ /*! ZSTDv07_getDecompressedSize() : * @return : decompressed size if known, 0 otherwise. note 1 : if `0`, follow up with ZSTDv07_getFrameParams() to know precise failure cause. note 2 : decompressed size could be wrong or intentionally modified ! always ensure results fit within application's authorized limits */ unsigned long long ZSTDv07_getDecompressedSize(const void* src, size_t srcSize); /*! ZSTDv07_decompress() : `compressedSize` : must be _exact_ size of compressed input, otherwise decompression will fail. `dstCapacity` must be equal or larger than originalSize. @return : the number of bytes decompressed into `dst` (<= `dstCapacity`), or an errorCode if it fails (which can be tested using ZSTDv07_isError()) */ ZSTDLIBv07_API size_t ZSTDv07_decompress( void* dst, size_t dstCapacity, const void* src, size_t compressedSize); /** ZSTDv07_findFrameSizeInfoLegacy() : get the source length and decompressed bound of a ZSTD frame compliant with v0.7.x format srcSize : The size of the 'src' buffer, at least as large as the frame pointed to by 'src' cSize (output parameter) : the number of bytes that would be read to decompress this frame or an error code if it fails (which can be tested using ZSTDv01_isError()) dBound (output parameter) : an upper-bound for the decompressed size of the data in the frame or ZSTD_CONTENTSIZE_ERROR if an error occurs note : assumes `cSize` and `dBound` are _not_ NULL. */ void ZSTDv07_findFrameSizeInfoLegacy(const void *src, size_t srcSize, size_t* cSize, unsigned long long* dBound); /*====== Helper functions ======*/ ZSTDLIBv07_API unsigned ZSTDv07_isError(size_t code); /*!< tells if a `size_t` function result is an error code */ ZSTDLIBv07_API const char* ZSTDv07_getErrorName(size_t code); /*!< provides readable string from an error code */ /*-************************************* * Explicit memory management ***************************************/ /** Decompression context */ typedef struct ZSTDv07_DCtx_s ZSTDv07_DCtx; ZSTDLIBv07_API ZSTDv07_DCtx* ZSTDv07_createDCtx(void); ZSTDLIBv07_API size_t ZSTDv07_freeDCtx(ZSTDv07_DCtx* dctx); /*!< @return : errorCode */ /** ZSTDv07_decompressDCtx() : * Same as ZSTDv07_decompress(), requires an allocated ZSTDv07_DCtx (see ZSTDv07_createDCtx()) */ ZSTDLIBv07_API size_t ZSTDv07_decompressDCtx(ZSTDv07_DCtx* ctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize); /*-************************ * Simple dictionary API ***************************/ /*! ZSTDv07_decompress_usingDict() : * Decompression using a pre-defined Dictionary content (see dictBuilder). * Dictionary must be identical to the one used during compression. * Note : This function load the dictionary, resulting in a significant startup time */ ZSTDLIBv07_API size_t ZSTDv07_decompress_usingDict(ZSTDv07_DCtx* dctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize, const void* dict,size_t dictSize); /*-************************** * Advanced Dictionary API ****************************/ /*! ZSTDv07_createDDict() : * Create a digested dictionary, ready to start decompression operation without startup delay. * `dict` can be released after creation */ typedef struct ZSTDv07_DDict_s ZSTDv07_DDict; ZSTDLIBv07_API ZSTDv07_DDict* ZSTDv07_createDDict(const void* dict, size_t dictSize); ZSTDLIBv07_API size_t ZSTDv07_freeDDict(ZSTDv07_DDict* ddict); /*! ZSTDv07_decompress_usingDDict() : * Decompression using a pre-digested Dictionary * Faster startup than ZSTDv07_decompress_usingDict(), recommended when same dictionary is used multiple times. */ ZSTDLIBv07_API size_t ZSTDv07_decompress_usingDDict(ZSTDv07_DCtx* dctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize, const ZSTDv07_DDict* ddict); typedef struct { unsigned long long frameContentSize; unsigned windowSize; unsigned dictID; unsigned checksumFlag; } ZSTDv07_frameParams; ZSTDLIBv07_API size_t ZSTDv07_getFrameParams(ZSTDv07_frameParams* fparamsPtr, const void* src, size_t srcSize); /**< doesn't consume input */ /* ************************************* * Streaming functions ***************************************/ typedef struct ZBUFFv07_DCtx_s ZBUFFv07_DCtx; ZSTDLIBv07_API ZBUFFv07_DCtx* ZBUFFv07_createDCtx(void); ZSTDLIBv07_API size_t ZBUFFv07_freeDCtx(ZBUFFv07_DCtx* dctx); ZSTDLIBv07_API size_t ZBUFFv07_decompressInit(ZBUFFv07_DCtx* dctx); ZSTDLIBv07_API size_t ZBUFFv07_decompressInitDictionary(ZBUFFv07_DCtx* dctx, const void* dict, size_t dictSize); ZSTDLIBv07_API size_t ZBUFFv07_decompressContinue(ZBUFFv07_DCtx* dctx, void* dst, size_t* dstCapacityPtr, const void* src, size_t* srcSizePtr); /*-*************************************************************************** * Streaming decompression howto * * A ZBUFFv07_DCtx object is required to track streaming operations. * Use ZBUFFv07_createDCtx() and ZBUFFv07_freeDCtx() to create/release resources. * Use ZBUFFv07_decompressInit() to start a new decompression operation, * or ZBUFFv07_decompressInitDictionary() if decompression requires a dictionary. * Note that ZBUFFv07_DCtx objects can be re-init multiple times. * * Use ZBUFFv07_decompressContinue() repetitively to consume your input. * *srcSizePtr and *dstCapacityPtr can be any size. * The function will report how many bytes were read or written by modifying *srcSizePtr and *dstCapacityPtr. * Note that it may not consume the entire input, in which case it's up to the caller to present remaining input again. * The content of `dst` will be overwritten (up to *dstCapacityPtr) at each function call, so save its content if it matters, or change `dst`. * @return : a hint to preferred nb of bytes to use as input for next function call (it's only a hint, to help latency), * or 0 when a frame is completely decoded, * or an error code, which can be tested using ZBUFFv07_isError(). * * Hint : recommended buffer sizes (not compulsory) : ZBUFFv07_recommendedDInSize() and ZBUFFv07_recommendedDOutSize() * output : ZBUFFv07_recommendedDOutSize== 128 KB block size is the internal unit, it ensures it's always possible to write a full block when decoded. * input : ZBUFFv07_recommendedDInSize == 128KB + 3; * just follow indications from ZBUFFv07_decompressContinue() to minimize latency. It should always be <= 128 KB + 3 . * *******************************************************************************/ /* ************************************* * Tool functions ***************************************/ ZSTDLIBv07_API unsigned ZBUFFv07_isError(size_t errorCode); ZSTDLIBv07_API const char* ZBUFFv07_getErrorName(size_t errorCode); /** Functions below provide recommended buffer sizes for Compression or Decompression operations. * These sizes are just hints, they tend to offer better latency */ ZSTDLIBv07_API size_t ZBUFFv07_recommendedDInSize(void); ZSTDLIBv07_API size_t ZBUFFv07_recommendedDOutSize(void); /*-************************************* * Constants ***************************************/ #define ZSTDv07_MAGICNUMBER 0xFD2FB527 /* v0.7 */ #if defined (__cplusplus) } #endif #endif /* ZSTDv07_H_235446 */ /**** ended inlining zstd_v07.h ****/ #endif /** ZSTD_isLegacy() : @return : > 0 if supported by legacy decoder. 0 otherwise. return value is the version. */ MEM_STATIC unsigned ZSTD_isLegacy(const void* src, size_t srcSize) { U32 magicNumberLE; if (srcSize<4) return 0; magicNumberLE = MEM_readLE32(src); switch(magicNumberLE) { #if (ZSTD_LEGACY_SUPPORT <= 1) case ZSTDv01_magicNumberLE:return 1; #endif #if (ZSTD_LEGACY_SUPPORT <= 2) case ZSTDv02_magicNumber : return 2; #endif #if (ZSTD_LEGACY_SUPPORT <= 3) case ZSTDv03_magicNumber : return 3; #endif #if (ZSTD_LEGACY_SUPPORT <= 4) case ZSTDv04_magicNumber : return 4; #endif #if (ZSTD_LEGACY_SUPPORT <= 5) case ZSTDv05_MAGICNUMBER : return 5; #endif #if (ZSTD_LEGACY_SUPPORT <= 6) case ZSTDv06_MAGICNUMBER : return 6; #endif #if (ZSTD_LEGACY_SUPPORT <= 7) case ZSTDv07_MAGICNUMBER : return 7; #endif default : return 0; } } MEM_STATIC unsigned long long ZSTD_getDecompressedSize_legacy(const void* src, size_t srcSize) { U32 const version = ZSTD_isLegacy(src, srcSize); if (version < 5) return 0; /* no decompressed size in frame header, or not a legacy format */ #if (ZSTD_LEGACY_SUPPORT <= 5) if (version==5) { ZSTDv05_parameters fParams; size_t const frResult = ZSTDv05_getFrameParams(&fParams, src, srcSize); if (frResult != 0) return 0; return fParams.srcSize; } #endif #if (ZSTD_LEGACY_SUPPORT <= 6) if (version==6) { ZSTDv06_frameParams fParams; size_t const frResult = ZSTDv06_getFrameParams(&fParams, src, srcSize); if (frResult != 0) return 0; return fParams.frameContentSize; } #endif #if (ZSTD_LEGACY_SUPPORT <= 7) if (version==7) { ZSTDv07_frameParams fParams; size_t const frResult = ZSTDv07_getFrameParams(&fParams, src, srcSize); if (frResult != 0) return 0; return fParams.frameContentSize; } #endif return 0; /* should not be possible */ } MEM_STATIC size_t ZSTD_decompressLegacy( void* dst, size_t dstCapacity, const void* src, size_t compressedSize, const void* dict,size_t dictSize) { U32 const version = ZSTD_isLegacy(src, compressedSize); (void)dst; (void)dstCapacity; (void)dict; (void)dictSize; /* unused when ZSTD_LEGACY_SUPPORT >= 8 */ switch(version) { #if (ZSTD_LEGACY_SUPPORT <= 1) case 1 : return ZSTDv01_decompress(dst, dstCapacity, src, compressedSize); #endif #if (ZSTD_LEGACY_SUPPORT <= 2) case 2 : return ZSTDv02_decompress(dst, dstCapacity, src, compressedSize); #endif #if (ZSTD_LEGACY_SUPPORT <= 3) case 3 : return ZSTDv03_decompress(dst, dstCapacity, src, compressedSize); #endif #if (ZSTD_LEGACY_SUPPORT <= 4) case 4 : return ZSTDv04_decompress(dst, dstCapacity, src, compressedSize); #endif #if (ZSTD_LEGACY_SUPPORT <= 5) case 5 : { size_t result; ZSTDv05_DCtx* const zd = ZSTDv05_createDCtx(); if (zd==NULL) return ERROR(memory_allocation); result = ZSTDv05_decompress_usingDict(zd, dst, dstCapacity, src, compressedSize, dict, dictSize); ZSTDv05_freeDCtx(zd); return result; } #endif #if (ZSTD_LEGACY_SUPPORT <= 6) case 6 : { size_t result; ZSTDv06_DCtx* const zd = ZSTDv06_createDCtx(); if (zd==NULL) return ERROR(memory_allocation); result = ZSTDv06_decompress_usingDict(zd, dst, dstCapacity, src, compressedSize, dict, dictSize); ZSTDv06_freeDCtx(zd); return result; } #endif #if (ZSTD_LEGACY_SUPPORT <= 7) case 7 : { size_t result; ZSTDv07_DCtx* const zd = ZSTDv07_createDCtx(); if (zd==NULL) return ERROR(memory_allocation); result = ZSTDv07_decompress_usingDict(zd, dst, dstCapacity, src, compressedSize, dict, dictSize); ZSTDv07_freeDCtx(zd); return result; } #endif default : return ERROR(prefix_unknown); } } MEM_STATIC ZSTD_frameSizeInfo ZSTD_findFrameSizeInfoLegacy(const void *src, size_t srcSize) { ZSTD_frameSizeInfo frameSizeInfo; U32 const version = ZSTD_isLegacy(src, srcSize); switch(version) { #if (ZSTD_LEGACY_SUPPORT <= 1) case 1 : ZSTDv01_findFrameSizeInfoLegacy(src, srcSize, &frameSizeInfo.compressedSize, &frameSizeInfo.decompressedBound); break; #endif #if (ZSTD_LEGACY_SUPPORT <= 2) case 2 : ZSTDv02_findFrameSizeInfoLegacy(src, srcSize, &frameSizeInfo.compressedSize, &frameSizeInfo.decompressedBound); break; #endif #if (ZSTD_LEGACY_SUPPORT <= 3) case 3 : ZSTDv03_findFrameSizeInfoLegacy(src, srcSize, &frameSizeInfo.compressedSize, &frameSizeInfo.decompressedBound); break; #endif #if (ZSTD_LEGACY_SUPPORT <= 4) case 4 : ZSTDv04_findFrameSizeInfoLegacy(src, srcSize, &frameSizeInfo.compressedSize, &frameSizeInfo.decompressedBound); break; #endif #if (ZSTD_LEGACY_SUPPORT <= 5) case 5 : ZSTDv05_findFrameSizeInfoLegacy(src, srcSize, &frameSizeInfo.compressedSize, &frameSizeInfo.decompressedBound); break; #endif #if (ZSTD_LEGACY_SUPPORT <= 6) case 6 : ZSTDv06_findFrameSizeInfoLegacy(src, srcSize, &frameSizeInfo.compressedSize, &frameSizeInfo.decompressedBound); break; #endif #if (ZSTD_LEGACY_SUPPORT <= 7) case 7 : ZSTDv07_findFrameSizeInfoLegacy(src, srcSize, &frameSizeInfo.compressedSize, &frameSizeInfo.decompressedBound); break; #endif default : frameSizeInfo.compressedSize = ERROR(prefix_unknown); frameSizeInfo.decompressedBound = ZSTD_CONTENTSIZE_ERROR; break; } if (!ZSTD_isError(frameSizeInfo.compressedSize) && frameSizeInfo.compressedSize > srcSize) { frameSizeInfo.compressedSize = ERROR(srcSize_wrong); frameSizeInfo.decompressedBound = ZSTD_CONTENTSIZE_ERROR; } return frameSizeInfo; } MEM_STATIC size_t ZSTD_findFrameCompressedSizeLegacy(const void *src, size_t srcSize) { ZSTD_frameSizeInfo frameSizeInfo = ZSTD_findFrameSizeInfoLegacy(src, srcSize); return frameSizeInfo.compressedSize; } MEM_STATIC size_t ZSTD_freeLegacyStreamContext(void* legacyContext, U32 version) { switch(version) { default : case 1 : case 2 : case 3 : (void)legacyContext; return ERROR(version_unsupported); #if (ZSTD_LEGACY_SUPPORT <= 4) case 4 : return ZBUFFv04_freeDCtx((ZBUFFv04_DCtx*)legacyContext); #endif #if (ZSTD_LEGACY_SUPPORT <= 5) case 5 : return ZBUFFv05_freeDCtx((ZBUFFv05_DCtx*)legacyContext); #endif #if (ZSTD_LEGACY_SUPPORT <= 6) case 6 : return ZBUFFv06_freeDCtx((ZBUFFv06_DCtx*)legacyContext); #endif #if (ZSTD_LEGACY_SUPPORT <= 7) case 7 : return ZBUFFv07_freeDCtx((ZBUFFv07_DCtx*)legacyContext); #endif } } MEM_STATIC size_t ZSTD_initLegacyStream(void** legacyContext, U32 prevVersion, U32 newVersion, const void* dict, size_t dictSize) { DEBUGLOG(5, "ZSTD_initLegacyStream for v0.%u", newVersion); if (prevVersion != newVersion) ZSTD_freeLegacyStreamContext(*legacyContext, prevVersion); switch(newVersion) { default : case 1 : case 2 : case 3 : (void)dict; (void)dictSize; return 0; #if (ZSTD_LEGACY_SUPPORT <= 4) case 4 : { ZBUFFv04_DCtx* dctx = (prevVersion != newVersion) ? ZBUFFv04_createDCtx() : (ZBUFFv04_DCtx*)*legacyContext; if (dctx==NULL) return ERROR(memory_allocation); ZBUFFv04_decompressInit(dctx); ZBUFFv04_decompressWithDictionary(dctx, dict, dictSize); *legacyContext = dctx; return 0; } #endif #if (ZSTD_LEGACY_SUPPORT <= 5) case 5 : { ZBUFFv05_DCtx* dctx = (prevVersion != newVersion) ? ZBUFFv05_createDCtx() : (ZBUFFv05_DCtx*)*legacyContext; if (dctx==NULL) return ERROR(memory_allocation); ZBUFFv05_decompressInitDictionary(dctx, dict, dictSize); *legacyContext = dctx; return 0; } #endif #if (ZSTD_LEGACY_SUPPORT <= 6) case 6 : { ZBUFFv06_DCtx* dctx = (prevVersion != newVersion) ? ZBUFFv06_createDCtx() : (ZBUFFv06_DCtx*)*legacyContext; if (dctx==NULL) return ERROR(memory_allocation); ZBUFFv06_decompressInitDictionary(dctx, dict, dictSize); *legacyContext = dctx; return 0; } #endif #if (ZSTD_LEGACY_SUPPORT <= 7) case 7 : { ZBUFFv07_DCtx* dctx = (prevVersion != newVersion) ? ZBUFFv07_createDCtx() : (ZBUFFv07_DCtx*)*legacyContext; if (dctx==NULL) return ERROR(memory_allocation); ZBUFFv07_decompressInitDictionary(dctx, dict, dictSize); *legacyContext = dctx; return 0; } #endif } } MEM_STATIC size_t ZSTD_decompressLegacyStream(void* legacyContext, U32 version, ZSTD_outBuffer* output, ZSTD_inBuffer* input) { DEBUGLOG(5, "ZSTD_decompressLegacyStream for v0.%u", version); switch(version) { default : case 1 : case 2 : case 3 : (void)legacyContext; (void)output; (void)input; return ERROR(version_unsupported); #if (ZSTD_LEGACY_SUPPORT <= 4) case 4 : { ZBUFFv04_DCtx* dctx = (ZBUFFv04_DCtx*) legacyContext; const void* src = (const char*)input->src + input->pos; size_t readSize = input->size - input->pos; void* dst = (char*)output->dst + output->pos; size_t decodedSize = output->size - output->pos; size_t const hintSize = ZBUFFv04_decompressContinue(dctx, dst, &decodedSize, src, &readSize); output->pos += decodedSize; input->pos += readSize; return hintSize; } #endif #if (ZSTD_LEGACY_SUPPORT <= 5) case 5 : { ZBUFFv05_DCtx* dctx = (ZBUFFv05_DCtx*) legacyContext; const void* src = (const char*)input->src + input->pos; size_t readSize = input->size - input->pos; void* dst = (char*)output->dst + output->pos; size_t decodedSize = output->size - output->pos; size_t const hintSize = ZBUFFv05_decompressContinue(dctx, dst, &decodedSize, src, &readSize); output->pos += decodedSize; input->pos += readSize; return hintSize; } #endif #if (ZSTD_LEGACY_SUPPORT <= 6) case 6 : { ZBUFFv06_DCtx* dctx = (ZBUFFv06_DCtx*) legacyContext; const void* src = (const char*)input->src + input->pos; size_t readSize = input->size - input->pos; void* dst = (char*)output->dst + output->pos; size_t decodedSize = output->size - output->pos; size_t const hintSize = ZBUFFv06_decompressContinue(dctx, dst, &decodedSize, src, &readSize); output->pos += decodedSize; input->pos += readSize; return hintSize; } #endif #if (ZSTD_LEGACY_SUPPORT <= 7) case 7 : { ZBUFFv07_DCtx* dctx = (ZBUFFv07_DCtx*) legacyContext; const void* src = (const char*)input->src + input->pos; size_t readSize = input->size - input->pos; void* dst = (char*)output->dst + output->pos; size_t decodedSize = output->size - output->pos; size_t const hintSize = ZBUFFv07_decompressContinue(dctx, dst, &decodedSize, src, &readSize); output->pos += decodedSize; input->pos += readSize; return hintSize; } #endif } } #if defined (__cplusplus) } #endif #endif /* ZSTD_LEGACY_H */ /**** ended inlining ../legacy/zstd_legacy.h ****/ #endif /*-******************************************************* * Types *********************************************************/ struct ZSTD_DDict_s { void* dictBuffer; const void* dictContent; size_t dictSize; ZSTD_entropyDTables_t entropy; U32 dictID; U32 entropyPresent; ZSTD_customMem cMem; }; /* typedef'd to ZSTD_DDict within "zstd.h" */ const void* ZSTD_DDict_dictContent(const ZSTD_DDict* ddict) { assert(ddict != NULL); return ddict->dictContent; } size_t ZSTD_DDict_dictSize(const ZSTD_DDict* ddict) { assert(ddict != NULL); return ddict->dictSize; } void ZSTD_copyDDictParameters(ZSTD_DCtx* dctx, const ZSTD_DDict* ddict) { DEBUGLOG(4, "ZSTD_copyDDictParameters"); assert(dctx != NULL); assert(ddict != NULL); dctx->dictID = ddict->dictID; dctx->prefixStart = ddict->dictContent; dctx->virtualStart = ddict->dictContent; dctx->dictEnd = (const BYTE*)ddict->dictContent + ddict->dictSize; dctx->previousDstEnd = dctx->dictEnd; #ifdef FUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION dctx->dictContentBeginForFuzzing = dctx->prefixStart; dctx->dictContentEndForFuzzing = dctx->previousDstEnd; #endif if (ddict->entropyPresent) { dctx->litEntropy = 1; dctx->fseEntropy = 1; dctx->LLTptr = ddict->entropy.LLTable; dctx->MLTptr = ddict->entropy.MLTable; dctx->OFTptr = ddict->entropy.OFTable; dctx->HUFptr = ddict->entropy.hufTable; dctx->entropy.rep[0] = ddict->entropy.rep[0]; dctx->entropy.rep[1] = ddict->entropy.rep[1]; dctx->entropy.rep[2] = ddict->entropy.rep[2]; } else { dctx->litEntropy = 0; dctx->fseEntropy = 0; } } static size_t ZSTD_loadEntropy_intoDDict(ZSTD_DDict* ddict, ZSTD_dictContentType_e dictContentType) { ddict->dictID = 0; ddict->entropyPresent = 0; if (dictContentType == ZSTD_dct_rawContent) return 0; if (ddict->dictSize < 8) { if (dictContentType == ZSTD_dct_fullDict) return ERROR(dictionary_corrupted); /* only accept specified dictionaries */ return 0; /* pure content mode */ } { U32 const magic = MEM_readLE32(ddict->dictContent); if (magic != ZSTD_MAGIC_DICTIONARY) { if (dictContentType == ZSTD_dct_fullDict) return ERROR(dictionary_corrupted); /* only accept specified dictionaries */ return 0; /* pure content mode */ } } ddict->dictID = MEM_readLE32((const char*)ddict->dictContent + ZSTD_FRAMEIDSIZE); /* load entropy tables */ RETURN_ERROR_IF(ZSTD_isError(ZSTD_loadDEntropy( &ddict->entropy, ddict->dictContent, ddict->dictSize)), dictionary_corrupted, ""); ddict->entropyPresent = 1; return 0; } static size_t ZSTD_initDDict_internal(ZSTD_DDict* ddict, const void* dict, size_t dictSize, ZSTD_dictLoadMethod_e dictLoadMethod, ZSTD_dictContentType_e dictContentType) { if ((dictLoadMethod == ZSTD_dlm_byRef) || (!dict) || (!dictSize)) { ddict->dictBuffer = NULL; ddict->dictContent = dict; if (!dict) dictSize = 0; } else { void* const internalBuffer = ZSTD_customMalloc(dictSize, ddict->cMem); ddict->dictBuffer = internalBuffer; ddict->dictContent = internalBuffer; if (!internalBuffer) return ERROR(memory_allocation); ZSTD_memcpy(internalBuffer, dict, dictSize); } ddict->dictSize = dictSize; ddict->entropy.hufTable[0] = (HUF_DTable)((HufLog)*0x1000001); /* cover both little and big endian */ /* parse dictionary content */ FORWARD_IF_ERROR( ZSTD_loadEntropy_intoDDict(ddict, dictContentType) , ""); return 0; } ZSTD_DDict* ZSTD_createDDict_advanced(const void* dict, size_t dictSize, ZSTD_dictLoadMethod_e dictLoadMethod, ZSTD_dictContentType_e dictContentType, ZSTD_customMem customMem) { if ((!customMem.customAlloc) ^ (!customMem.customFree)) return NULL; { ZSTD_DDict* const ddict = (ZSTD_DDict*) ZSTD_customMalloc(sizeof(ZSTD_DDict), customMem); if (ddict == NULL) return NULL; ddict->cMem = customMem; { size_t const initResult = ZSTD_initDDict_internal(ddict, dict, dictSize, dictLoadMethod, dictContentType); if (ZSTD_isError(initResult)) { ZSTD_freeDDict(ddict); return NULL; } } return ddict; } } /*! ZSTD_createDDict() : * Create a digested dictionary, to start decompression without startup delay. * `dict` content is copied inside DDict. * Consequently, `dict` can be released after `ZSTD_DDict` creation */ ZSTD_DDict* ZSTD_createDDict(const void* dict, size_t dictSize) { ZSTD_customMem const allocator = { NULL, NULL, NULL }; return ZSTD_createDDict_advanced(dict, dictSize, ZSTD_dlm_byCopy, ZSTD_dct_auto, allocator); } /*! ZSTD_createDDict_byReference() : * Create a digested dictionary, to start decompression without startup delay. * Dictionary content is simply referenced, it will be accessed during decompression. * Warning : dictBuffer must outlive DDict (DDict must be freed before dictBuffer) */ ZSTD_DDict* ZSTD_createDDict_byReference(const void* dictBuffer, size_t dictSize) { ZSTD_customMem const allocator = { NULL, NULL, NULL }; return ZSTD_createDDict_advanced(dictBuffer, dictSize, ZSTD_dlm_byRef, ZSTD_dct_auto, allocator); } const ZSTD_DDict* ZSTD_initStaticDDict( void* sBuffer, size_t sBufferSize, const void* dict, size_t dictSize, ZSTD_dictLoadMethod_e dictLoadMethod, ZSTD_dictContentType_e dictContentType) { size_t const neededSpace = sizeof(ZSTD_DDict) + (dictLoadMethod == ZSTD_dlm_byRef ? 0 : dictSize); ZSTD_DDict* const ddict = (ZSTD_DDict*)sBuffer; assert(sBuffer != NULL); assert(dict != NULL); if ((size_t)sBuffer & 7) return NULL; /* 8-aligned */ if (sBufferSize < neededSpace) return NULL; if (dictLoadMethod == ZSTD_dlm_byCopy) { ZSTD_memcpy(ddict+1, dict, dictSize); /* local copy */ dict = ddict+1; } if (ZSTD_isError( ZSTD_initDDict_internal(ddict, dict, dictSize, ZSTD_dlm_byRef, dictContentType) )) return NULL; return ddict; } size_t ZSTD_freeDDict(ZSTD_DDict* ddict) { if (ddict==NULL) return 0; /* support free on NULL */ { ZSTD_customMem const cMem = ddict->cMem; ZSTD_customFree(ddict->dictBuffer, cMem); ZSTD_customFree(ddict, cMem); return 0; } } /*! ZSTD_estimateDDictSize() : * Estimate amount of memory that will be needed to create a dictionary for decompression. * Note : dictionary created by reference using ZSTD_dlm_byRef are smaller */ size_t ZSTD_estimateDDictSize(size_t dictSize, ZSTD_dictLoadMethod_e dictLoadMethod) { return sizeof(ZSTD_DDict) + (dictLoadMethod == ZSTD_dlm_byRef ? 0 : dictSize); } size_t ZSTD_sizeof_DDict(const ZSTD_DDict* ddict) { if (ddict==NULL) return 0; /* support sizeof on NULL */ return sizeof(*ddict) + (ddict->dictBuffer ? ddict->dictSize : 0) ; } /*! ZSTD_getDictID_fromDDict() : * Provides the dictID of the dictionary loaded into `ddict`. * If @return == 0, the dictionary is not conformant to Zstandard specification, or empty. * Non-conformant dictionaries can still be loaded, but as content-only dictionaries. */ unsigned ZSTD_getDictID_fromDDict(const ZSTD_DDict* ddict) { if (ddict==NULL) return 0; return ZSTD_getDictID_fromDict(ddict->dictContent, ddict->dictSize); } /**** ended inlining decompress/zstd_ddict.c ****/ /**** start inlining decompress/zstd_decompress.c ****/ /* * Copyright (c) 2016-2021, Yann Collet, Facebook, Inc. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the * LICENSE file in the root directory of this source tree) and the GPLv2 (found * in the COPYING file in the root directory of this source tree). * You may select, at your option, one of the above-listed licenses. */ /* *************************************************************** * Tuning parameters *****************************************************************/ /*! * HEAPMODE : * Select how default decompression function ZSTD_decompress() allocates its context, * on stack (0), or into heap (1, default; requires malloc()). * Note that functions with explicit context such as ZSTD_decompressDCtx() are unaffected. */ #ifndef ZSTD_HEAPMODE # define ZSTD_HEAPMODE 1 #endif /*! * LEGACY_SUPPORT : * if set to 1+, ZSTD_decompress() can decode older formats (v0.1+) */ #ifndef ZSTD_LEGACY_SUPPORT # define ZSTD_LEGACY_SUPPORT 0 #endif /*! * MAXWINDOWSIZE_DEFAULT : * maximum window size accepted by DStream __by default__. * Frames requiring more memory will be rejected. * It's possible to set a different limit using ZSTD_DCtx_setMaxWindowSize(). */ #ifndef ZSTD_MAXWINDOWSIZE_DEFAULT # define ZSTD_MAXWINDOWSIZE_DEFAULT (((U32)1 << ZSTD_WINDOWLOG_LIMIT_DEFAULT) + 1) #endif /*! * NO_FORWARD_PROGRESS_MAX : * maximum allowed nb of calls to ZSTD_decompressStream() * without any forward progress * (defined as: no byte read from input, and no byte flushed to output) * before triggering an error. */ #ifndef ZSTD_NO_FORWARD_PROGRESS_MAX # define ZSTD_NO_FORWARD_PROGRESS_MAX 16 #endif /*-******************************************************* * Dependencies *********************************************************/ /**** skipping file: ../common/zstd_deps.h ****/ /**** skipping file: ../common/cpu.h ****/ /**** skipping file: ../common/mem.h ****/ /**** skipping file: ../common/zstd_trace.h ****/ #define FSE_STATIC_LINKING_ONLY /**** skipping file: ../common/fse.h ****/ #define HUF_STATIC_LINKING_ONLY /**** skipping file: ../common/huf.h ****/ /**** skipping file: ../common/xxhash.h ****/ /**** skipping file: ../common/zstd_internal.h ****/ /**** skipping file: zstd_decompress_internal.h ****/ /**** skipping file: zstd_ddict.h ****/ /**** start inlining zstd_decompress_block.h ****/ /* * Copyright (c) 2016-2021, Yann Collet, Facebook, Inc. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the * LICENSE file in the root directory of this source tree) and the GPLv2 (found * in the COPYING file in the root directory of this source tree). * You may select, at your option, one of the above-listed licenses. */ #ifndef ZSTD_DEC_BLOCK_H #define ZSTD_DEC_BLOCK_H /*-******************************************************* * Dependencies *********************************************************/ /**** skipping file: ../common/zstd_deps.h ****/ /**** skipping file: ../zstd.h ****/ /**** skipping file: ../common/zstd_internal.h ****/ /**** skipping file: zstd_decompress_internal.h ****/ /* === Prototypes === */ /* note: prototypes already published within `zstd.h` : * ZSTD_decompressBlock() */ /* note: prototypes already published within `zstd_internal.h` : * ZSTD_getcBlockSize() * ZSTD_decodeSeqHeaders() */ /* ZSTD_decompressBlock_internal() : * decompress block, starting at `src`, * into destination buffer `dst`. * @return : decompressed block size, * or an error code (which can be tested using ZSTD_isError()) */ size_t ZSTD_decompressBlock_internal(ZSTD_DCtx* dctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize, const int frame); /* ZSTD_buildFSETable() : * generate FSE decoding table for one symbol (ll, ml or off) * this function must be called with valid parameters only * (dt is large enough, normalizedCounter distribution total is a power of 2, max is within range, etc.) * in which case it cannot fail. * The workspace must be 4-byte aligned and at least ZSTD_BUILD_FSE_TABLE_WKSP_SIZE bytes, which is * defined in zstd_decompress_internal.h. * Internal use only. */ void ZSTD_buildFSETable(ZSTD_seqSymbol* dt, const short* normalizedCounter, unsigned maxSymbolValue, const U32* baseValue, const U32* nbAdditionalBits, unsigned tableLog, void* wksp, size_t wkspSize, int bmi2); #endif /* ZSTD_DEC_BLOCK_H */ /**** ended inlining zstd_decompress_block.h ****/ #if defined(ZSTD_LEGACY_SUPPORT) && (ZSTD_LEGACY_SUPPORT>=1) /**** skipping file: ../legacy/zstd_legacy.h ****/ #endif /************************************* * Multiple DDicts Hashset internals * *************************************/ #define DDICT_HASHSET_MAX_LOAD_FACTOR_COUNT_MULT 4 #define DDICT_HASHSET_MAX_LOAD_FACTOR_SIZE_MULT 3 /* These two constants represent SIZE_MULT/COUNT_MULT load factor without using a float. * Currently, that means a 0.75 load factor. * So, if count * COUNT_MULT / size * SIZE_MULT != 0, then we've exceeded * the load factor of the ddict hash set. */ #define DDICT_HASHSET_TABLE_BASE_SIZE 64 #define DDICT_HASHSET_RESIZE_FACTOR 2 /* Hash function to determine starting position of dict insertion within the table * Returns an index between [0, hashSet->ddictPtrTableSize] */ static size_t ZSTD_DDictHashSet_getIndex(const ZSTD_DDictHashSet* hashSet, U32 dictID) { const U64 hash = XXH64(&dictID, sizeof(U32), 0); /* DDict ptr table size is a multiple of 2, use size - 1 as mask to get index within [0, hashSet->ddictPtrTableSize) */ return hash & (hashSet->ddictPtrTableSize - 1); } /* Adds DDict to a hashset without resizing it. * If inserting a DDict with a dictID that already exists in the set, replaces the one in the set. * Returns 0 if successful, or a zstd error code if something went wrong. */ static size_t ZSTD_DDictHashSet_emplaceDDict(ZSTD_DDictHashSet* hashSet, const ZSTD_DDict* ddict) { const U32 dictID = ZSTD_getDictID_fromDDict(ddict); size_t idx = ZSTD_DDictHashSet_getIndex(hashSet, dictID); const size_t idxRangeMask = hashSet->ddictPtrTableSize - 1; RETURN_ERROR_IF(hashSet->ddictPtrCount == hashSet->ddictPtrTableSize, GENERIC, "Hash set is full!"); DEBUGLOG(4, "Hashed index: for dictID: %u is %zu", dictID, idx); while (hashSet->ddictPtrTable[idx] != NULL) { /* Replace existing ddict if inserting ddict with same dictID */ if (ZSTD_getDictID_fromDDict(hashSet->ddictPtrTable[idx]) == dictID) { DEBUGLOG(4, "DictID already exists, replacing rather than adding"); hashSet->ddictPtrTable[idx] = ddict; return 0; } idx &= idxRangeMask; idx++; } DEBUGLOG(4, "Final idx after probing for dictID %u is: %zu", dictID, idx); hashSet->ddictPtrTable[idx] = ddict; hashSet->ddictPtrCount++; return 0; } /* Expands hash table by factor of DDICT_HASHSET_RESIZE_FACTOR and * rehashes all values, allocates new table, frees old table. * Returns 0 on success, otherwise a zstd error code. */ static size_t ZSTD_DDictHashSet_expand(ZSTD_DDictHashSet* hashSet, ZSTD_customMem customMem) { size_t newTableSize = hashSet->ddictPtrTableSize * DDICT_HASHSET_RESIZE_FACTOR; const ZSTD_DDict** newTable = (const ZSTD_DDict**)ZSTD_customCalloc(sizeof(ZSTD_DDict*) * newTableSize, customMem); const ZSTD_DDict** oldTable = hashSet->ddictPtrTable; size_t oldTableSize = hashSet->ddictPtrTableSize; size_t i; DEBUGLOG(4, "Expanding DDict hash table! Old size: %zu new size: %zu", oldTableSize, newTableSize); RETURN_ERROR_IF(!newTable, memory_allocation, "Expanded hashset allocation failed!"); hashSet->ddictPtrTable = newTable; hashSet->ddictPtrTableSize = newTableSize; hashSet->ddictPtrCount = 0; for (i = 0; i < oldTableSize; ++i) { if (oldTable[i] != NULL) { FORWARD_IF_ERROR(ZSTD_DDictHashSet_emplaceDDict(hashSet, oldTable[i]), ""); } } ZSTD_customFree((void*)oldTable, customMem); DEBUGLOG(4, "Finished re-hash"); return 0; } /* Fetches a DDict with the given dictID * Returns the ZSTD_DDict* with the requested dictID. If it doesn't exist, then returns NULL. */ static const ZSTD_DDict* ZSTD_DDictHashSet_getDDict(ZSTD_DDictHashSet* hashSet, U32 dictID) { size_t idx = ZSTD_DDictHashSet_getIndex(hashSet, dictID); const size_t idxRangeMask = hashSet->ddictPtrTableSize - 1; DEBUGLOG(4, "Hashed index: for dictID: %u is %zu", dictID, idx); for (;;) { size_t currDictID = ZSTD_getDictID_fromDDict(hashSet->ddictPtrTable[idx]); if (currDictID == dictID || currDictID == 0) { /* currDictID == 0 implies a NULL ddict entry */ break; } else { idx &= idxRangeMask; /* Goes to start of table when we reach the end */ idx++; } } DEBUGLOG(4, "Final idx after probing for dictID %u is: %zu", dictID, idx); return hashSet->ddictPtrTable[idx]; } /* Allocates space for and returns a ddict hash set * The hash set's ZSTD_DDict* table has all values automatically set to NULL to begin with. * Returns NULL if allocation failed. */ static ZSTD_DDictHashSet* ZSTD_createDDictHashSet(ZSTD_customMem customMem) { ZSTD_DDictHashSet* ret = (ZSTD_DDictHashSet*)ZSTD_customMalloc(sizeof(ZSTD_DDictHashSet), customMem); DEBUGLOG(4, "Allocating new hash set"); ret->ddictPtrTable = (const ZSTD_DDict**)ZSTD_customCalloc(DDICT_HASHSET_TABLE_BASE_SIZE * sizeof(ZSTD_DDict*), customMem); ret->ddictPtrTableSize = DDICT_HASHSET_TABLE_BASE_SIZE; ret->ddictPtrCount = 0; if (!ret || !ret->ddictPtrTable) { return NULL; } return ret; } /* Frees the table of ZSTD_DDict* within a hashset, then frees the hashset itself. * Note: The ZSTD_DDict* within the table are NOT freed. */ static void ZSTD_freeDDictHashSet(ZSTD_DDictHashSet* hashSet, ZSTD_customMem customMem) { DEBUGLOG(4, "Freeing ddict hash set"); if (hashSet && hashSet->ddictPtrTable) { ZSTD_customFree((void*)hashSet->ddictPtrTable, customMem); } if (hashSet) { ZSTD_customFree(hashSet, customMem); } } /* Public function: Adds a DDict into the ZSTD_DDictHashSet, possibly triggering a resize of the hash set. * Returns 0 on success, or a ZSTD error. */ static size_t ZSTD_DDictHashSet_addDDict(ZSTD_DDictHashSet* hashSet, const ZSTD_DDict* ddict, ZSTD_customMem customMem) { DEBUGLOG(4, "Adding dict ID: %u to hashset with - Count: %zu Tablesize: %zu", ZSTD_getDictID_fromDDict(ddict), hashSet->ddictPtrCount, hashSet->ddictPtrTableSize); if (hashSet->ddictPtrCount * DDICT_HASHSET_MAX_LOAD_FACTOR_COUNT_MULT / hashSet->ddictPtrTableSize * DDICT_HASHSET_MAX_LOAD_FACTOR_SIZE_MULT != 0) { FORWARD_IF_ERROR(ZSTD_DDictHashSet_expand(hashSet, customMem), ""); } FORWARD_IF_ERROR(ZSTD_DDictHashSet_emplaceDDict(hashSet, ddict), ""); return 0; } /*-************************************************************* * Context management ***************************************************************/ size_t ZSTD_sizeof_DCtx (const ZSTD_DCtx* dctx) { if (dctx==NULL) return 0; /* support sizeof NULL */ return sizeof(*dctx) + ZSTD_sizeof_DDict(dctx->ddictLocal) + dctx->inBuffSize + dctx->outBuffSize; } size_t ZSTD_estimateDCtxSize(void) { return sizeof(ZSTD_DCtx); } static size_t ZSTD_startingInputLength(ZSTD_format_e format) { size_t const startingInputLength = ZSTD_FRAMEHEADERSIZE_PREFIX(format); /* only supports formats ZSTD_f_zstd1 and ZSTD_f_zstd1_magicless */ assert( (format == ZSTD_f_zstd1) || (format == ZSTD_f_zstd1_magicless) ); return startingInputLength; } static void ZSTD_DCtx_resetParameters(ZSTD_DCtx* dctx) { assert(dctx->streamStage == zdss_init); dctx->format = ZSTD_f_zstd1; dctx->maxWindowSize = ZSTD_MAXWINDOWSIZE_DEFAULT; dctx->outBufferMode = ZSTD_bm_buffered; dctx->forceIgnoreChecksum = ZSTD_d_validateChecksum; dctx->refMultipleDDicts = ZSTD_rmd_refSingleDDict; } static void ZSTD_initDCtx_internal(ZSTD_DCtx* dctx) { dctx->staticSize = 0; dctx->ddict = NULL; dctx->ddictLocal = NULL; dctx->dictEnd = NULL; dctx->ddictIsCold = 0; dctx->dictUses = ZSTD_dont_use; dctx->inBuff = NULL; dctx->inBuffSize = 0; dctx->outBuffSize = 0; dctx->streamStage = zdss_init; dctx->legacyContext = NULL; dctx->previousLegacyVersion = 0; dctx->noForwardProgress = 0; dctx->oversizedDuration = 0; dctx->bmi2 = ZSTD_cpuid_bmi2(ZSTD_cpuid()); dctx->ddictSet = NULL; ZSTD_DCtx_resetParameters(dctx); #ifdef FUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION dctx->dictContentEndForFuzzing = NULL; #endif } ZSTD_DCtx* ZSTD_initStaticDCtx(void *workspace, size_t workspaceSize) { ZSTD_DCtx* const dctx = (ZSTD_DCtx*) workspace; if ((size_t)workspace & 7) return NULL; /* 8-aligned */ if (workspaceSize < sizeof(ZSTD_DCtx)) return NULL; /* minimum size */ ZSTD_initDCtx_internal(dctx); dctx->staticSize = workspaceSize; dctx->inBuff = (char*)(dctx+1); return dctx; } ZSTD_DCtx* ZSTD_createDCtx_advanced(ZSTD_customMem customMem) { if ((!customMem.customAlloc) ^ (!customMem.customFree)) return NULL; { ZSTD_DCtx* const dctx = (ZSTD_DCtx*)ZSTD_customMalloc(sizeof(*dctx), customMem); if (!dctx) return NULL; dctx->customMem = customMem; ZSTD_initDCtx_internal(dctx); return dctx; } } ZSTD_DCtx* ZSTD_createDCtx(void) { DEBUGLOG(3, "ZSTD_createDCtx"); return ZSTD_createDCtx_advanced(ZSTD_defaultCMem); } static void ZSTD_clearDict(ZSTD_DCtx* dctx) { ZSTD_freeDDict(dctx->ddictLocal); dctx->ddictLocal = NULL; dctx->ddict = NULL; dctx->dictUses = ZSTD_dont_use; } size_t ZSTD_freeDCtx(ZSTD_DCtx* dctx) { if (dctx==NULL) return 0; /* support free on NULL */ RETURN_ERROR_IF(dctx->staticSize, memory_allocation, "not compatible with static DCtx"); { ZSTD_customMem const cMem = dctx->customMem; ZSTD_clearDict(dctx); ZSTD_customFree(dctx->inBuff, cMem); dctx->inBuff = NULL; #if defined(ZSTD_LEGACY_SUPPORT) && (ZSTD_LEGACY_SUPPORT >= 1) if (dctx->legacyContext) ZSTD_freeLegacyStreamContext(dctx->legacyContext, dctx->previousLegacyVersion); #endif if (dctx->ddictSet) { ZSTD_freeDDictHashSet(dctx->ddictSet, cMem); dctx->ddictSet = NULL; } ZSTD_customFree(dctx, cMem); return 0; } } /* no longer useful */ void ZSTD_copyDCtx(ZSTD_DCtx* dstDCtx, const ZSTD_DCtx* srcDCtx) { size_t const toCopy = (size_t)((char*)(&dstDCtx->inBuff) - (char*)dstDCtx); ZSTD_memcpy(dstDCtx, srcDCtx, toCopy); /* no need to copy workspace */ } /* Given a dctx with a digested frame params, re-selects the correct ZSTD_DDict based on * the requested dict ID from the frame. If there exists a reference to the correct ZSTD_DDict, then * accordingly sets the ddict to be used to decompress the frame. * * If no DDict is found, then no action is taken, and the ZSTD_DCtx::ddict remains as-is. * * ZSTD_d_refMultipleDDicts must be enabled for this function to be called. */ static void ZSTD_DCtx_selectFrameDDict(ZSTD_DCtx* dctx) { assert(dctx->refMultipleDDicts && dctx->ddictSet); DEBUGLOG(4, "Adjusting DDict based on requested dict ID from frame"); if (dctx->ddict) { const ZSTD_DDict* frameDDict = ZSTD_DDictHashSet_getDDict(dctx->ddictSet, dctx->fParams.dictID); if (frameDDict) { DEBUGLOG(4, "DDict found!"); ZSTD_clearDict(dctx); dctx->dictID = dctx->fParams.dictID; dctx->ddict = frameDDict; dctx->dictUses = ZSTD_use_indefinitely; } } } /*-************************************************************* * Frame header decoding ***************************************************************/ /*! ZSTD_isFrame() : * Tells if the content of `buffer` starts with a valid Frame Identifier. * Note : Frame Identifier is 4 bytes. If `size < 4`, @return will always be 0. * Note 2 : Legacy Frame Identifiers are considered valid only if Legacy Support is enabled. * Note 3 : Skippable Frame Identifiers are considered valid. */ unsigned ZSTD_isFrame(const void* buffer, size_t size) { if (size < ZSTD_FRAMEIDSIZE) return 0; { U32 const magic = MEM_readLE32(buffer); if (magic == ZSTD_MAGICNUMBER) return 1; if ((magic & ZSTD_MAGIC_SKIPPABLE_MASK) == ZSTD_MAGIC_SKIPPABLE_START) return 1; } #if defined(ZSTD_LEGACY_SUPPORT) && (ZSTD_LEGACY_SUPPORT >= 1) if (ZSTD_isLegacy(buffer, size)) return 1; #endif return 0; } /** ZSTD_frameHeaderSize_internal() : * srcSize must be large enough to reach header size fields. * note : only works for formats ZSTD_f_zstd1 and ZSTD_f_zstd1_magicless. * @return : size of the Frame Header * or an error code, which can be tested with ZSTD_isError() */ static size_t ZSTD_frameHeaderSize_internal(const void* src, size_t srcSize, ZSTD_format_e format) { size_t const minInputSize = ZSTD_startingInputLength(format); RETURN_ERROR_IF(srcSize < minInputSize, srcSize_wrong, ""); { BYTE const fhd = ((const BYTE*)src)[minInputSize-1]; U32 const dictID= fhd & 3; U32 const singleSegment = (fhd >> 5) & 1; U32 const fcsId = fhd >> 6; return minInputSize + !singleSegment + ZSTD_did_fieldSize[dictID] + ZSTD_fcs_fieldSize[fcsId] + (singleSegment && !fcsId); } } /** ZSTD_frameHeaderSize() : * srcSize must be >= ZSTD_frameHeaderSize_prefix. * @return : size of the Frame Header, * or an error code (if srcSize is too small) */ size_t ZSTD_frameHeaderSize(const void* src, size_t srcSize) { return ZSTD_frameHeaderSize_internal(src, srcSize, ZSTD_f_zstd1); } /** ZSTD_getFrameHeader_advanced() : * decode Frame Header, or require larger `srcSize`. * note : only works for formats ZSTD_f_zstd1 and ZSTD_f_zstd1_magicless * @return : 0, `zfhPtr` is correctly filled, * >0, `srcSize` is too small, value is wanted `srcSize` amount, * or an error code, which can be tested using ZSTD_isError() */ size_t ZSTD_getFrameHeader_advanced(ZSTD_frameHeader* zfhPtr, const void* src, size_t srcSize, ZSTD_format_e format) { const BYTE* ip = (const BYTE*)src; size_t const minInputSize = ZSTD_startingInputLength(format); ZSTD_memset(zfhPtr, 0, sizeof(*zfhPtr)); /* not strictly necessary, but static analyzer do not understand that zfhPtr is only going to be read only if return value is zero, since they are 2 different signals */ if (srcSize < minInputSize) return minInputSize; RETURN_ERROR_IF(src==NULL, GENERIC, "invalid parameter"); if ( (format != ZSTD_f_zstd1_magicless) && (MEM_readLE32(src) != ZSTD_MAGICNUMBER) ) { if ((MEM_readLE32(src) & ZSTD_MAGIC_SKIPPABLE_MASK) == ZSTD_MAGIC_SKIPPABLE_START) { /* skippable frame */ if (srcSize < ZSTD_SKIPPABLEHEADERSIZE) return ZSTD_SKIPPABLEHEADERSIZE; /* magic number + frame length */ ZSTD_memset(zfhPtr, 0, sizeof(*zfhPtr)); zfhPtr->frameContentSize = MEM_readLE32((const char *)src + ZSTD_FRAMEIDSIZE); zfhPtr->frameType = ZSTD_skippableFrame; return 0; } RETURN_ERROR(prefix_unknown, ""); } /* ensure there is enough `srcSize` to fully read/decode frame header */ { size_t const fhsize = ZSTD_frameHeaderSize_internal(src, srcSize, format); if (srcSize < fhsize) return fhsize; zfhPtr->headerSize = (U32)fhsize; } { BYTE const fhdByte = ip[minInputSize-1]; size_t pos = minInputSize; U32 const dictIDSizeCode = fhdByte&3; U32 const checksumFlag = (fhdByte>>2)&1; U32 const singleSegment = (fhdByte>>5)&1; U32 const fcsID = fhdByte>>6; U64 windowSize = 0; U32 dictID = 0; U64 frameContentSize = ZSTD_CONTENTSIZE_UNKNOWN; RETURN_ERROR_IF((fhdByte & 0x08) != 0, frameParameter_unsupported, "reserved bits, must be zero"); if (!singleSegment) { BYTE const wlByte = ip[pos++]; U32 const windowLog = (wlByte >> 3) + ZSTD_WINDOWLOG_ABSOLUTEMIN; RETURN_ERROR_IF(windowLog > ZSTD_WINDOWLOG_MAX, frameParameter_windowTooLarge, ""); windowSize = (1ULL << windowLog); windowSize += (windowSize >> 3) * (wlByte&7); } switch(dictIDSizeCode) { default: assert(0); /* impossible */ case 0 : break; case 1 : dictID = ip[pos]; pos++; break; case 2 : dictID = MEM_readLE16(ip+pos); pos+=2; break; case 3 : dictID = MEM_readLE32(ip+pos); pos+=4; break; } switch(fcsID) { default: assert(0); /* impossible */ case 0 : if (singleSegment) frameContentSize = ip[pos]; break; case 1 : frameContentSize = MEM_readLE16(ip+pos)+256; break; case 2 : frameContentSize = MEM_readLE32(ip+pos); break; case 3 : frameContentSize = MEM_readLE64(ip+pos); break; } if (singleSegment) windowSize = frameContentSize; zfhPtr->frameType = ZSTD_frame; zfhPtr->frameContentSize = frameContentSize; zfhPtr->windowSize = windowSize; zfhPtr->blockSizeMax = (unsigned) MIN(windowSize, ZSTD_BLOCKSIZE_MAX); zfhPtr->dictID = dictID; zfhPtr->checksumFlag = checksumFlag; } return 0; } /** ZSTD_getFrameHeader() : * decode Frame Header, or require larger `srcSize`. * note : this function does not consume input, it only reads it. * @return : 0, `zfhPtr` is correctly filled, * >0, `srcSize` is too small, value is wanted `srcSize` amount, * or an error code, which can be tested using ZSTD_isError() */ size_t ZSTD_getFrameHeader(ZSTD_frameHeader* zfhPtr, const void* src, size_t srcSize) { return ZSTD_getFrameHeader_advanced(zfhPtr, src, srcSize, ZSTD_f_zstd1); } /** ZSTD_getFrameContentSize() : * compatible with legacy mode * @return : decompressed size of the single frame pointed to be `src` if known, otherwise * - ZSTD_CONTENTSIZE_UNKNOWN if the size cannot be determined * - ZSTD_CONTENTSIZE_ERROR if an error occurred (e.g. invalid magic number, srcSize too small) */ unsigned long long ZSTD_getFrameContentSize(const void *src, size_t srcSize) { #if defined(ZSTD_LEGACY_SUPPORT) && (ZSTD_LEGACY_SUPPORT >= 1) if (ZSTD_isLegacy(src, srcSize)) { unsigned long long const ret = ZSTD_getDecompressedSize_legacy(src, srcSize); return ret == 0 ? ZSTD_CONTENTSIZE_UNKNOWN : ret; } #endif { ZSTD_frameHeader zfh; if (ZSTD_getFrameHeader(&zfh, src, srcSize) != 0) return ZSTD_CONTENTSIZE_ERROR; if (zfh.frameType == ZSTD_skippableFrame) { return 0; } else { return zfh.frameContentSize; } } } static size_t readSkippableFrameSize(void const* src, size_t srcSize) { size_t const skippableHeaderSize = ZSTD_SKIPPABLEHEADERSIZE; U32 sizeU32; RETURN_ERROR_IF(srcSize < ZSTD_SKIPPABLEHEADERSIZE, srcSize_wrong, ""); sizeU32 = MEM_readLE32((BYTE const*)src + ZSTD_FRAMEIDSIZE); RETURN_ERROR_IF((U32)(sizeU32 + ZSTD_SKIPPABLEHEADERSIZE) < sizeU32, frameParameter_unsupported, ""); { size_t const skippableSize = skippableHeaderSize + sizeU32; RETURN_ERROR_IF(skippableSize > srcSize, srcSize_wrong, ""); return skippableSize; } } /** ZSTD_findDecompressedSize() : * compatible with legacy mode * `srcSize` must be the exact length of some number of ZSTD compressed and/or * skippable frames * @return : decompressed size of the frames contained */ unsigned long long ZSTD_findDecompressedSize(const void* src, size_t srcSize) { unsigned long long totalDstSize = 0; while (srcSize >= ZSTD_startingInputLength(ZSTD_f_zstd1)) { U32 const magicNumber = MEM_readLE32(src); if ((magicNumber & ZSTD_MAGIC_SKIPPABLE_MASK) == ZSTD_MAGIC_SKIPPABLE_START) { size_t const skippableSize = readSkippableFrameSize(src, srcSize); if (ZSTD_isError(skippableSize)) { return ZSTD_CONTENTSIZE_ERROR; } assert(skippableSize <= srcSize); src = (const BYTE *)src + skippableSize; srcSize -= skippableSize; continue; } { unsigned long long const ret = ZSTD_getFrameContentSize(src, srcSize); if (ret >= ZSTD_CONTENTSIZE_ERROR) return ret; /* check for overflow */ if (totalDstSize + ret < totalDstSize) return ZSTD_CONTENTSIZE_ERROR; totalDstSize += ret; } { size_t const frameSrcSize = ZSTD_findFrameCompressedSize(src, srcSize); if (ZSTD_isError(frameSrcSize)) { return ZSTD_CONTENTSIZE_ERROR; } src = (const BYTE *)src + frameSrcSize; srcSize -= frameSrcSize; } } /* while (srcSize >= ZSTD_frameHeaderSize_prefix) */ if (srcSize) return ZSTD_CONTENTSIZE_ERROR; return totalDstSize; } /** ZSTD_getDecompressedSize() : * compatible with legacy mode * @return : decompressed size if known, 0 otherwise note : 0 can mean any of the following : - frame content is empty - decompressed size field is not present in frame header - frame header unknown / not supported - frame header not complete (`srcSize` too small) */ unsigned long long ZSTD_getDecompressedSize(const void* src, size_t srcSize) { unsigned long long const ret = ZSTD_getFrameContentSize(src, srcSize); ZSTD_STATIC_ASSERT(ZSTD_CONTENTSIZE_ERROR < ZSTD_CONTENTSIZE_UNKNOWN); return (ret >= ZSTD_CONTENTSIZE_ERROR) ? 0 : ret; } /** ZSTD_decodeFrameHeader() : * `headerSize` must be the size provided by ZSTD_frameHeaderSize(). * If multiple DDict references are enabled, also will choose the correct DDict to use. * @return : 0 if success, or an error code, which can be tested using ZSTD_isError() */ static size_t ZSTD_decodeFrameHeader(ZSTD_DCtx* dctx, const void* src, size_t headerSize) { size_t const result = ZSTD_getFrameHeader_advanced(&(dctx->fParams), src, headerSize, dctx->format); if (ZSTD_isError(result)) return result; /* invalid header */ RETURN_ERROR_IF(result>0, srcSize_wrong, "headerSize too small"); /* Reference DDict requested by frame if dctx references multiple ddicts */ if (dctx->refMultipleDDicts == ZSTD_rmd_refMultipleDDicts && dctx->ddictSet) { ZSTD_DCtx_selectFrameDDict(dctx); } #ifndef FUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION /* Skip the dictID check in fuzzing mode, because it makes the search * harder. */ RETURN_ERROR_IF(dctx->fParams.dictID && (dctx->dictID != dctx->fParams.dictID), dictionary_wrong, ""); #endif dctx->validateChecksum = (dctx->fParams.checksumFlag && !dctx->forceIgnoreChecksum) ? 1 : 0; if (dctx->validateChecksum) XXH64_reset(&dctx->xxhState, 0); dctx->processedCSize += headerSize; return 0; } static ZSTD_frameSizeInfo ZSTD_errorFrameSizeInfo(size_t ret) { ZSTD_frameSizeInfo frameSizeInfo; frameSizeInfo.compressedSize = ret; frameSizeInfo.decompressedBound = ZSTD_CONTENTSIZE_ERROR; return frameSizeInfo; } static ZSTD_frameSizeInfo ZSTD_findFrameSizeInfo(const void* src, size_t srcSize) { ZSTD_frameSizeInfo frameSizeInfo; ZSTD_memset(&frameSizeInfo, 0, sizeof(ZSTD_frameSizeInfo)); #if defined(ZSTD_LEGACY_SUPPORT) && (ZSTD_LEGACY_SUPPORT >= 1) if (ZSTD_isLegacy(src, srcSize)) return ZSTD_findFrameSizeInfoLegacy(src, srcSize); #endif if ((srcSize >= ZSTD_SKIPPABLEHEADERSIZE) && (MEM_readLE32(src) & ZSTD_MAGIC_SKIPPABLE_MASK) == ZSTD_MAGIC_SKIPPABLE_START) { frameSizeInfo.compressedSize = readSkippableFrameSize(src, srcSize); assert(ZSTD_isError(frameSizeInfo.compressedSize) || frameSizeInfo.compressedSize <= srcSize); return frameSizeInfo; } else { const BYTE* ip = (const BYTE*)src; const BYTE* const ipstart = ip; size_t remainingSize = srcSize; size_t nbBlocks = 0; ZSTD_frameHeader zfh; /* Extract Frame Header */ { size_t const ret = ZSTD_getFrameHeader(&zfh, src, srcSize); if (ZSTD_isError(ret)) return ZSTD_errorFrameSizeInfo(ret); if (ret > 0) return ZSTD_errorFrameSizeInfo(ERROR(srcSize_wrong)); } ip += zfh.headerSize; remainingSize -= zfh.headerSize; /* Iterate over each block */ while (1) { blockProperties_t blockProperties; size_t const cBlockSize = ZSTD_getcBlockSize(ip, remainingSize, &blockProperties); if (ZSTD_isError(cBlockSize)) return ZSTD_errorFrameSizeInfo(cBlockSize); if (ZSTD_blockHeaderSize + cBlockSize > remainingSize) return ZSTD_errorFrameSizeInfo(ERROR(srcSize_wrong)); ip += ZSTD_blockHeaderSize + cBlockSize; remainingSize -= ZSTD_blockHeaderSize + cBlockSize; nbBlocks++; if (blockProperties.lastBlock) break; } /* Final frame content checksum */ if (zfh.checksumFlag) { if (remainingSize < 4) return ZSTD_errorFrameSizeInfo(ERROR(srcSize_wrong)); ip += 4; } frameSizeInfo.compressedSize = (size_t)(ip - ipstart); frameSizeInfo.decompressedBound = (zfh.frameContentSize != ZSTD_CONTENTSIZE_UNKNOWN) ? zfh.frameContentSize : nbBlocks * zfh.blockSizeMax; return frameSizeInfo; } } /** ZSTD_findFrameCompressedSize() : * compatible with legacy mode * `src` must point to the start of a ZSTD frame, ZSTD legacy frame, or skippable frame * `srcSize` must be at least as large as the frame contained * @return : the compressed size of the frame starting at `src` */ size_t ZSTD_findFrameCompressedSize(const void *src, size_t srcSize) { ZSTD_frameSizeInfo const frameSizeInfo = ZSTD_findFrameSizeInfo(src, srcSize); return frameSizeInfo.compressedSize; } /** ZSTD_decompressBound() : * compatible with legacy mode * `src` must point to the start of a ZSTD frame or a skippeable frame * `srcSize` must be at least as large as the frame contained * @return : the maximum decompressed size of the compressed source */ unsigned long long ZSTD_decompressBound(const void* src, size_t srcSize) { unsigned long long bound = 0; /* Iterate over each frame */ while (srcSize > 0) { ZSTD_frameSizeInfo const frameSizeInfo = ZSTD_findFrameSizeInfo(src, srcSize); size_t const compressedSize = frameSizeInfo.compressedSize; unsigned long long const decompressedBound = frameSizeInfo.decompressedBound; if (ZSTD_isError(compressedSize) || decompressedBound == ZSTD_CONTENTSIZE_ERROR) return ZSTD_CONTENTSIZE_ERROR; assert(srcSize >= compressedSize); src = (const BYTE*)src + compressedSize; srcSize -= compressedSize; bound += decompressedBound; } return bound; } /*-************************************************************* * Frame decoding ***************************************************************/ /** ZSTD_insertBlock() : * insert `src` block into `dctx` history. Useful to track uncompressed blocks. */ size_t ZSTD_insertBlock(ZSTD_DCtx* dctx, const void* blockStart, size_t blockSize) { DEBUGLOG(5, "ZSTD_insertBlock: %u bytes", (unsigned)blockSize); ZSTD_checkContinuity(dctx, blockStart, blockSize); dctx->previousDstEnd = (const char*)blockStart + blockSize; return blockSize; } static size_t ZSTD_copyRawBlock(void* dst, size_t dstCapacity, const void* src, size_t srcSize) { DEBUGLOG(5, "ZSTD_copyRawBlock"); RETURN_ERROR_IF(srcSize > dstCapacity, dstSize_tooSmall, ""); if (dst == NULL) { if (srcSize == 0) return 0; RETURN_ERROR(dstBuffer_null, ""); } ZSTD_memcpy(dst, src, srcSize); return srcSize; } static size_t ZSTD_setRleBlock(void* dst, size_t dstCapacity, BYTE b, size_t regenSize) { RETURN_ERROR_IF(regenSize > dstCapacity, dstSize_tooSmall, ""); if (dst == NULL) { if (regenSize == 0) return 0; RETURN_ERROR(dstBuffer_null, ""); } ZSTD_memset(dst, b, regenSize); return regenSize; } static void ZSTD_DCtx_trace_end(ZSTD_DCtx const* dctx, U64 uncompressedSize, U64 compressedSize, unsigned streaming) { #if ZSTD_TRACE if (dctx->traceCtx) { ZSTD_Trace trace; ZSTD_memset(&trace, 0, sizeof(trace)); trace.version = ZSTD_VERSION_NUMBER; trace.streaming = streaming; if (dctx->ddict) { trace.dictionaryID = ZSTD_getDictID_fromDDict(dctx->ddict); trace.dictionarySize = ZSTD_DDict_dictSize(dctx->ddict); trace.dictionaryIsCold = dctx->ddictIsCold; } trace.uncompressedSize = (size_t)uncompressedSize; trace.compressedSize = (size_t)compressedSize; trace.dctx = dctx; ZSTD_trace_decompress_end(dctx->traceCtx, &trace); } #else (void)dctx; (void)uncompressedSize; (void)compressedSize; (void)streaming; #endif } /*! ZSTD_decompressFrame() : * @dctx must be properly initialized * will update *srcPtr and *srcSizePtr, * to make *srcPtr progress by one frame. */ static size_t ZSTD_decompressFrame(ZSTD_DCtx* dctx, void* dst, size_t dstCapacity, const void** srcPtr, size_t *srcSizePtr) { const BYTE* const istart = (const BYTE*)(*srcPtr); const BYTE* ip = istart; BYTE* const ostart = (BYTE*)dst; BYTE* const oend = dstCapacity != 0 ? ostart + dstCapacity : ostart; BYTE* op = ostart; size_t remainingSrcSize = *srcSizePtr; DEBUGLOG(4, "ZSTD_decompressFrame (srcSize:%i)", (int)*srcSizePtr); /* check */ RETURN_ERROR_IF( remainingSrcSize < ZSTD_FRAMEHEADERSIZE_MIN(dctx->format)+ZSTD_blockHeaderSize, srcSize_wrong, ""); /* Frame Header */ { size_t const frameHeaderSize = ZSTD_frameHeaderSize_internal( ip, ZSTD_FRAMEHEADERSIZE_PREFIX(dctx->format), dctx->format); if (ZSTD_isError(frameHeaderSize)) return frameHeaderSize; RETURN_ERROR_IF(remainingSrcSize < frameHeaderSize+ZSTD_blockHeaderSize, srcSize_wrong, ""); FORWARD_IF_ERROR( ZSTD_decodeFrameHeader(dctx, ip, frameHeaderSize) , ""); ip += frameHeaderSize; remainingSrcSize -= frameHeaderSize; } /* Loop on each block */ while (1) { size_t decodedSize; blockProperties_t blockProperties; size_t const cBlockSize = ZSTD_getcBlockSize(ip, remainingSrcSize, &blockProperties); if (ZSTD_isError(cBlockSize)) return cBlockSize; ip += ZSTD_blockHeaderSize; remainingSrcSize -= ZSTD_blockHeaderSize; RETURN_ERROR_IF(cBlockSize > remainingSrcSize, srcSize_wrong, ""); switch(blockProperties.blockType) { case bt_compressed: decodedSize = ZSTD_decompressBlock_internal(dctx, op, (size_t)(oend-op), ip, cBlockSize, /* frame */ 1); break; case bt_raw : decodedSize = ZSTD_copyRawBlock(op, (size_t)(oend-op), ip, cBlockSize); break; case bt_rle : decodedSize = ZSTD_setRleBlock(op, (size_t)(oend-op), *ip, blockProperties.origSize); break; case bt_reserved : default: RETURN_ERROR(corruption_detected, "invalid block type"); } if (ZSTD_isError(decodedSize)) return decodedSize; if (dctx->validateChecksum) XXH64_update(&dctx->xxhState, op, decodedSize); if (decodedSize != 0) op += decodedSize; assert(ip != NULL); ip += cBlockSize; remainingSrcSize -= cBlockSize; if (blockProperties.lastBlock) break; } if (dctx->fParams.frameContentSize != ZSTD_CONTENTSIZE_UNKNOWN) { RETURN_ERROR_IF((U64)(op-ostart) != dctx->fParams.frameContentSize, corruption_detected, ""); } if (dctx->fParams.checksumFlag) { /* Frame content checksum verification */ RETURN_ERROR_IF(remainingSrcSize<4, checksum_wrong, ""); if (!dctx->forceIgnoreChecksum) { U32 const checkCalc = (U32)XXH64_digest(&dctx->xxhState); U32 checkRead; checkRead = MEM_readLE32(ip); RETURN_ERROR_IF(checkRead != checkCalc, checksum_wrong, ""); } ip += 4; remainingSrcSize -= 4; } ZSTD_DCtx_trace_end(dctx, (U64)(op-ostart), (U64)(ip-istart), /* streaming */ 0); /* Allow caller to get size read */ *srcPtr = ip; *srcSizePtr = remainingSrcSize; return (size_t)(op-ostart); } static size_t ZSTD_decompressMultiFrame(ZSTD_DCtx* dctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize, const void* dict, size_t dictSize, const ZSTD_DDict* ddict) { void* const dststart = dst; int moreThan1Frame = 0; DEBUGLOG(5, "ZSTD_decompressMultiFrame"); assert(dict==NULL || ddict==NULL); /* either dict or ddict set, not both */ if (ddict) { dict = ZSTD_DDict_dictContent(ddict); dictSize = ZSTD_DDict_dictSize(ddict); } while (srcSize >= ZSTD_startingInputLength(dctx->format)) { #if defined(ZSTD_LEGACY_SUPPORT) && (ZSTD_LEGACY_SUPPORT >= 1) if (ZSTD_isLegacy(src, srcSize)) { size_t decodedSize; size_t const frameSize = ZSTD_findFrameCompressedSizeLegacy(src, srcSize); if (ZSTD_isError(frameSize)) return frameSize; RETURN_ERROR_IF(dctx->staticSize, memory_allocation, "legacy support is not compatible with static dctx"); decodedSize = ZSTD_decompressLegacy(dst, dstCapacity, src, frameSize, dict, dictSize); if (ZSTD_isError(decodedSize)) return decodedSize; assert(decodedSize <= dstCapacity); dst = (BYTE*)dst + decodedSize; dstCapacity -= decodedSize; src = (const BYTE*)src + frameSize; srcSize -= frameSize; continue; } #endif { U32 const magicNumber = MEM_readLE32(src); DEBUGLOG(4, "reading magic number %08X (expecting %08X)", (unsigned)magicNumber, ZSTD_MAGICNUMBER); if ((magicNumber & ZSTD_MAGIC_SKIPPABLE_MASK) == ZSTD_MAGIC_SKIPPABLE_START) { size_t const skippableSize = readSkippableFrameSize(src, srcSize); FORWARD_IF_ERROR(skippableSize, "readSkippableFrameSize failed"); assert(skippableSize <= srcSize); src = (const BYTE *)src + skippableSize; srcSize -= skippableSize; continue; } } if (ddict) { /* we were called from ZSTD_decompress_usingDDict */ FORWARD_IF_ERROR(ZSTD_decompressBegin_usingDDict(dctx, ddict), ""); } else { /* this will initialize correctly with no dict if dict == NULL, so * use this in all cases but ddict */ FORWARD_IF_ERROR(ZSTD_decompressBegin_usingDict(dctx, dict, dictSize), ""); } ZSTD_checkContinuity(dctx, dst, dstCapacity); { const size_t res = ZSTD_decompressFrame(dctx, dst, dstCapacity, &src, &srcSize); RETURN_ERROR_IF( (ZSTD_getErrorCode(res) == ZSTD_error_prefix_unknown) && (moreThan1Frame==1), srcSize_wrong, "At least one frame successfully completed, " "but following bytes are garbage: " "it's more likely to be a srcSize error, " "specifying more input bytes than size of frame(s). " "Note: one could be unlucky, it might be a corruption error instead, " "happening right at the place where we expect zstd magic bytes. " "But this is _much_ less likely than a srcSize field error."); if (ZSTD_isError(res)) return res; assert(res <= dstCapacity); if (res != 0) dst = (BYTE*)dst + res; dstCapacity -= res; } moreThan1Frame = 1; } /* while (srcSize >= ZSTD_frameHeaderSize_prefix) */ RETURN_ERROR_IF(srcSize, srcSize_wrong, "input not entirely consumed"); return (size_t)((BYTE*)dst - (BYTE*)dststart); } size_t ZSTD_decompress_usingDict(ZSTD_DCtx* dctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize, const void* dict, size_t dictSize) { return ZSTD_decompressMultiFrame(dctx, dst, dstCapacity, src, srcSize, dict, dictSize, NULL); } static ZSTD_DDict const* ZSTD_getDDict(ZSTD_DCtx* dctx) { switch (dctx->dictUses) { default: assert(0 /* Impossible */); /* fall-through */ case ZSTD_dont_use: ZSTD_clearDict(dctx); return NULL; case ZSTD_use_indefinitely: return dctx->ddict; case ZSTD_use_once: dctx->dictUses = ZSTD_dont_use; return dctx->ddict; } } size_t ZSTD_decompressDCtx(ZSTD_DCtx* dctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize) { return ZSTD_decompress_usingDDict(dctx, dst, dstCapacity, src, srcSize, ZSTD_getDDict(dctx)); } size_t ZSTD_decompress(void* dst, size_t dstCapacity, const void* src, size_t srcSize) { #if defined(ZSTD_HEAPMODE) && (ZSTD_HEAPMODE>=1) size_t regenSize; ZSTD_DCtx* const dctx = ZSTD_createDCtx(); RETURN_ERROR_IF(dctx==NULL, memory_allocation, "NULL pointer!"); regenSize = ZSTD_decompressDCtx(dctx, dst, dstCapacity, src, srcSize); ZSTD_freeDCtx(dctx); return regenSize; #else /* stack mode */ ZSTD_DCtx dctx; ZSTD_initDCtx_internal(&dctx); return ZSTD_decompressDCtx(&dctx, dst, dstCapacity, src, srcSize); #endif } /*-************************************** * Advanced Streaming Decompression API * Bufferless and synchronous ****************************************/ size_t ZSTD_nextSrcSizeToDecompress(ZSTD_DCtx* dctx) { return dctx->expected; } /** * Similar to ZSTD_nextSrcSizeToDecompress(), but when when a block input can be streamed, * we allow taking a partial block as the input. Currently only raw uncompressed blocks can * be streamed. * * For blocks that can be streamed, this allows us to reduce the latency until we produce * output, and avoid copying the input. * * @param inputSize - The total amount of input that the caller currently has. */ static size_t ZSTD_nextSrcSizeToDecompressWithInputSize(ZSTD_DCtx* dctx, size_t inputSize) { if (!(dctx->stage == ZSTDds_decompressBlock || dctx->stage == ZSTDds_decompressLastBlock)) return dctx->expected; if (dctx->bType != bt_raw) return dctx->expected; return MIN(MAX(inputSize, 1), dctx->expected); } ZSTD_nextInputType_e ZSTD_nextInputType(ZSTD_DCtx* dctx) { switch(dctx->stage) { default: /* should not happen */ assert(0); case ZSTDds_getFrameHeaderSize: case ZSTDds_decodeFrameHeader: return ZSTDnit_frameHeader; case ZSTDds_decodeBlockHeader: return ZSTDnit_blockHeader; case ZSTDds_decompressBlock: return ZSTDnit_block; case ZSTDds_decompressLastBlock: return ZSTDnit_lastBlock; case ZSTDds_checkChecksum: return ZSTDnit_checksum; case ZSTDds_decodeSkippableHeader: case ZSTDds_skipFrame: return ZSTDnit_skippableFrame; } } static int ZSTD_isSkipFrame(ZSTD_DCtx* dctx) { return dctx->stage == ZSTDds_skipFrame; } /** ZSTD_decompressContinue() : * srcSize : must be the exact nb of bytes expected (see ZSTD_nextSrcSizeToDecompress()) * @return : nb of bytes generated into `dst` (necessarily <= `dstCapacity) * or an error code, which can be tested using ZSTD_isError() */ size_t ZSTD_decompressContinue(ZSTD_DCtx* dctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize) { DEBUGLOG(5, "ZSTD_decompressContinue (srcSize:%u)", (unsigned)srcSize); /* Sanity check */ RETURN_ERROR_IF(srcSize != ZSTD_nextSrcSizeToDecompressWithInputSize(dctx, srcSize), srcSize_wrong, "not allowed"); ZSTD_checkContinuity(dctx, dst, dstCapacity); dctx->processedCSize += srcSize; switch (dctx->stage) { case ZSTDds_getFrameHeaderSize : assert(src != NULL); if (dctx->format == ZSTD_f_zstd1) { /* allows header */ assert(srcSize >= ZSTD_FRAMEIDSIZE); /* to read skippable magic number */ if ((MEM_readLE32(src) & ZSTD_MAGIC_SKIPPABLE_MASK) == ZSTD_MAGIC_SKIPPABLE_START) { /* skippable frame */ ZSTD_memcpy(dctx->headerBuffer, src, srcSize); dctx->expected = ZSTD_SKIPPABLEHEADERSIZE - srcSize; /* remaining to load to get full skippable frame header */ dctx->stage = ZSTDds_decodeSkippableHeader; return 0; } } dctx->headerSize = ZSTD_frameHeaderSize_internal(src, srcSize, dctx->format); if (ZSTD_isError(dctx->headerSize)) return dctx->headerSize; ZSTD_memcpy(dctx->headerBuffer, src, srcSize); dctx->expected = dctx->headerSize - srcSize; dctx->stage = ZSTDds_decodeFrameHeader; return 0; case ZSTDds_decodeFrameHeader: assert(src != NULL); ZSTD_memcpy(dctx->headerBuffer + (dctx->headerSize - srcSize), src, srcSize); FORWARD_IF_ERROR(ZSTD_decodeFrameHeader(dctx, dctx->headerBuffer, dctx->headerSize), ""); dctx->expected = ZSTD_blockHeaderSize; dctx->stage = ZSTDds_decodeBlockHeader; return 0; case ZSTDds_decodeBlockHeader: { blockProperties_t bp; size_t const cBlockSize = ZSTD_getcBlockSize(src, ZSTD_blockHeaderSize, &bp); if (ZSTD_isError(cBlockSize)) return cBlockSize; RETURN_ERROR_IF(cBlockSize > dctx->fParams.blockSizeMax, corruption_detected, "Block Size Exceeds Maximum"); dctx->expected = cBlockSize; dctx->bType = bp.blockType; dctx->rleSize = bp.origSize; if (cBlockSize) { dctx->stage = bp.lastBlock ? ZSTDds_decompressLastBlock : ZSTDds_decompressBlock; return 0; } /* empty block */ if (bp.lastBlock) { if (dctx->fParams.checksumFlag) { dctx->expected = 4; dctx->stage = ZSTDds_checkChecksum; } else { dctx->expected = 0; /* end of frame */ dctx->stage = ZSTDds_getFrameHeaderSize; } } else { dctx->expected = ZSTD_blockHeaderSize; /* jump to next header */ dctx->stage = ZSTDds_decodeBlockHeader; } return 0; } case ZSTDds_decompressLastBlock: case ZSTDds_decompressBlock: DEBUGLOG(5, "ZSTD_decompressContinue: case ZSTDds_decompressBlock"); { size_t rSize; switch(dctx->bType) { case bt_compressed: DEBUGLOG(5, "ZSTD_decompressContinue: case bt_compressed"); rSize = ZSTD_decompressBlock_internal(dctx, dst, dstCapacity, src, srcSize, /* frame */ 1); dctx->expected = 0; /* Streaming not supported */ break; case bt_raw : assert(srcSize <= dctx->expected); rSize = ZSTD_copyRawBlock(dst, dstCapacity, src, srcSize); FORWARD_IF_ERROR(rSize, "ZSTD_copyRawBlock failed"); assert(rSize == srcSize); dctx->expected -= rSize; break; case bt_rle : rSize = ZSTD_setRleBlock(dst, dstCapacity, *(const BYTE*)src, dctx->rleSize); dctx->expected = 0; /* Streaming not supported */ break; case bt_reserved : /* should never happen */ default: RETURN_ERROR(corruption_detected, "invalid block type"); } FORWARD_IF_ERROR(rSize, ""); RETURN_ERROR_IF(rSize > dctx->fParams.blockSizeMax, corruption_detected, "Decompressed Block Size Exceeds Maximum"); DEBUGLOG(5, "ZSTD_decompressContinue: decoded size from block : %u", (unsigned)rSize); dctx->decodedSize += rSize; if (dctx->validateChecksum) XXH64_update(&dctx->xxhState, dst, rSize); dctx->previousDstEnd = (char*)dst + rSize; /* Stay on the same stage until we are finished streaming the block. */ if (dctx->expected > 0) { return rSize; } if (dctx->stage == ZSTDds_decompressLastBlock) { /* end of frame */ DEBUGLOG(4, "ZSTD_decompressContinue: decoded size from frame : %u", (unsigned)dctx->decodedSize); RETURN_ERROR_IF( dctx->fParams.frameContentSize != ZSTD_CONTENTSIZE_UNKNOWN && dctx->decodedSize != dctx->fParams.frameContentSize, corruption_detected, ""); if (dctx->fParams.checksumFlag) { /* another round for frame checksum */ dctx->expected = 4; dctx->stage = ZSTDds_checkChecksum; } else { ZSTD_DCtx_trace_end(dctx, dctx->decodedSize, dctx->processedCSize, /* streaming */ 1); dctx->expected = 0; /* ends here */ dctx->stage = ZSTDds_getFrameHeaderSize; } } else { dctx->stage = ZSTDds_decodeBlockHeader; dctx->expected = ZSTD_blockHeaderSize; } return rSize; } case ZSTDds_checkChecksum: assert(srcSize == 4); /* guaranteed by dctx->expected */ { if (dctx->validateChecksum) { U32 const h32 = (U32)XXH64_digest(&dctx->xxhState); U32 const check32 = MEM_readLE32(src); DEBUGLOG(4, "ZSTD_decompressContinue: checksum : calculated %08X :: %08X read", (unsigned)h32, (unsigned)check32); RETURN_ERROR_IF(check32 != h32, checksum_wrong, ""); } ZSTD_DCtx_trace_end(dctx, dctx->decodedSize, dctx->processedCSize, /* streaming */ 1); dctx->expected = 0; dctx->stage = ZSTDds_getFrameHeaderSize; return 0; } case ZSTDds_decodeSkippableHeader: assert(src != NULL); assert(srcSize <= ZSTD_SKIPPABLEHEADERSIZE); ZSTD_memcpy(dctx->headerBuffer + (ZSTD_SKIPPABLEHEADERSIZE - srcSize), src, srcSize); /* complete skippable header */ dctx->expected = MEM_readLE32(dctx->headerBuffer + ZSTD_FRAMEIDSIZE); /* note : dctx->expected can grow seriously large, beyond local buffer size */ dctx->stage = ZSTDds_skipFrame; return 0; case ZSTDds_skipFrame: dctx->expected = 0; dctx->stage = ZSTDds_getFrameHeaderSize; return 0; default: assert(0); /* impossible */ RETURN_ERROR(GENERIC, "impossible to reach"); /* some compiler require default to do something */ } } static size_t ZSTD_refDictContent(ZSTD_DCtx* dctx, const void* dict, size_t dictSize) { dctx->dictEnd = dctx->previousDstEnd; dctx->virtualStart = (const char*)dict - ((const char*)(dctx->previousDstEnd) - (const char*)(dctx->prefixStart)); dctx->prefixStart = dict; dctx->previousDstEnd = (const char*)dict + dictSize; #ifdef FUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION dctx->dictContentBeginForFuzzing = dctx->prefixStart; dctx->dictContentEndForFuzzing = dctx->previousDstEnd; #endif return 0; } /*! ZSTD_loadDEntropy() : * dict : must point at beginning of a valid zstd dictionary. * @return : size of entropy tables read */ size_t ZSTD_loadDEntropy(ZSTD_entropyDTables_t* entropy, const void* const dict, size_t const dictSize) { const BYTE* dictPtr = (const BYTE*)dict; const BYTE* const dictEnd = dictPtr + dictSize; RETURN_ERROR_IF(dictSize <= 8, dictionary_corrupted, "dict is too small"); assert(MEM_readLE32(dict) == ZSTD_MAGIC_DICTIONARY); /* dict must be valid */ dictPtr += 8; /* skip header = magic + dictID */ ZSTD_STATIC_ASSERT(offsetof(ZSTD_entropyDTables_t, OFTable) == offsetof(ZSTD_entropyDTables_t, LLTable) + sizeof(entropy->LLTable)); ZSTD_STATIC_ASSERT(offsetof(ZSTD_entropyDTables_t, MLTable) == offsetof(ZSTD_entropyDTables_t, OFTable) + sizeof(entropy->OFTable)); ZSTD_STATIC_ASSERT(sizeof(entropy->LLTable) + sizeof(entropy->OFTable) + sizeof(entropy->MLTable) >= HUF_DECOMPRESS_WORKSPACE_SIZE); { void* const workspace = &entropy->LLTable; /* use fse tables as temporary workspace; implies fse tables are grouped together */ size_t const workspaceSize = sizeof(entropy->LLTable) + sizeof(entropy->OFTable) + sizeof(entropy->MLTable); #ifdef HUF_FORCE_DECOMPRESS_X1 /* in minimal huffman, we always use X1 variants */ size_t const hSize = HUF_readDTableX1_wksp(entropy->hufTable, dictPtr, dictEnd - dictPtr, workspace, workspaceSize); #else size_t const hSize = HUF_readDTableX2_wksp(entropy->hufTable, dictPtr, (size_t)(dictEnd - dictPtr), workspace, workspaceSize); #endif RETURN_ERROR_IF(HUF_isError(hSize), dictionary_corrupted, ""); dictPtr += hSize; } { short offcodeNCount[MaxOff+1]; unsigned offcodeMaxValue = MaxOff, offcodeLog; size_t const offcodeHeaderSize = FSE_readNCount(offcodeNCount, &offcodeMaxValue, &offcodeLog, dictPtr, (size_t)(dictEnd-dictPtr)); RETURN_ERROR_IF(FSE_isError(offcodeHeaderSize), dictionary_corrupted, ""); RETURN_ERROR_IF(offcodeMaxValue > MaxOff, dictionary_corrupted, ""); RETURN_ERROR_IF(offcodeLog > OffFSELog, dictionary_corrupted, ""); ZSTD_buildFSETable( entropy->OFTable, offcodeNCount, offcodeMaxValue, OF_base, OF_bits, offcodeLog, entropy->workspace, sizeof(entropy->workspace), /* bmi2 */0); dictPtr += offcodeHeaderSize; } { short matchlengthNCount[MaxML+1]; unsigned matchlengthMaxValue = MaxML, matchlengthLog; size_t const matchlengthHeaderSize = FSE_readNCount(matchlengthNCount, &matchlengthMaxValue, &matchlengthLog, dictPtr, (size_t)(dictEnd-dictPtr)); RETURN_ERROR_IF(FSE_isError(matchlengthHeaderSize), dictionary_corrupted, ""); RETURN_ERROR_IF(matchlengthMaxValue > MaxML, dictionary_corrupted, ""); RETURN_ERROR_IF(matchlengthLog > MLFSELog, dictionary_corrupted, ""); ZSTD_buildFSETable( entropy->MLTable, matchlengthNCount, matchlengthMaxValue, ML_base, ML_bits, matchlengthLog, entropy->workspace, sizeof(entropy->workspace), /* bmi2 */ 0); dictPtr += matchlengthHeaderSize; } { short litlengthNCount[MaxLL+1]; unsigned litlengthMaxValue = MaxLL, litlengthLog; size_t const litlengthHeaderSize = FSE_readNCount(litlengthNCount, &litlengthMaxValue, &litlengthLog, dictPtr, (size_t)(dictEnd-dictPtr)); RETURN_ERROR_IF(FSE_isError(litlengthHeaderSize), dictionary_corrupted, ""); RETURN_ERROR_IF(litlengthMaxValue > MaxLL, dictionary_corrupted, ""); RETURN_ERROR_IF(litlengthLog > LLFSELog, dictionary_corrupted, ""); ZSTD_buildFSETable( entropy->LLTable, litlengthNCount, litlengthMaxValue, LL_base, LL_bits, litlengthLog, entropy->workspace, sizeof(entropy->workspace), /* bmi2 */ 0); dictPtr += litlengthHeaderSize; } RETURN_ERROR_IF(dictPtr+12 > dictEnd, dictionary_corrupted, ""); { int i; size_t const dictContentSize = (size_t)(dictEnd - (dictPtr+12)); for (i=0; i<3; i++) { U32 const rep = MEM_readLE32(dictPtr); dictPtr += 4; RETURN_ERROR_IF(rep==0 || rep > dictContentSize, dictionary_corrupted, ""); entropy->rep[i] = rep; } } return (size_t)(dictPtr - (const BYTE*)dict); } static size_t ZSTD_decompress_insertDictionary(ZSTD_DCtx* dctx, const void* dict, size_t dictSize) { if (dictSize < 8) return ZSTD_refDictContent(dctx, dict, dictSize); { U32 const magic = MEM_readLE32(dict); if (magic != ZSTD_MAGIC_DICTIONARY) { return ZSTD_refDictContent(dctx, dict, dictSize); /* pure content mode */ } } dctx->dictID = MEM_readLE32((const char*)dict + ZSTD_FRAMEIDSIZE); /* load entropy tables */ { size_t const eSize = ZSTD_loadDEntropy(&dctx->entropy, dict, dictSize); RETURN_ERROR_IF(ZSTD_isError(eSize), dictionary_corrupted, ""); dict = (const char*)dict + eSize; dictSize -= eSize; } dctx->litEntropy = dctx->fseEntropy = 1; /* reference dictionary content */ return ZSTD_refDictContent(dctx, dict, dictSize); } size_t ZSTD_decompressBegin(ZSTD_DCtx* dctx) { assert(dctx != NULL); #if ZSTD_TRACE dctx->traceCtx = ZSTD_trace_decompress_begin(dctx); #endif dctx->expected = ZSTD_startingInputLength(dctx->format); /* dctx->format must be properly set */ dctx->stage = ZSTDds_getFrameHeaderSize; dctx->processedCSize = 0; dctx->decodedSize = 0; dctx->previousDstEnd = NULL; dctx->prefixStart = NULL; dctx->virtualStart = NULL; dctx->dictEnd = NULL; dctx->entropy.hufTable[0] = (HUF_DTable)((HufLog)*0x1000001); /* cover both little and big endian */ dctx->litEntropy = dctx->fseEntropy = 0; dctx->dictID = 0; dctx->bType = bt_reserved; ZSTD_STATIC_ASSERT(sizeof(dctx->entropy.rep) == sizeof(repStartValue)); ZSTD_memcpy(dctx->entropy.rep, repStartValue, sizeof(repStartValue)); /* initial repcodes */ dctx->LLTptr = dctx->entropy.LLTable; dctx->MLTptr = dctx->entropy.MLTable; dctx->OFTptr = dctx->entropy.OFTable; dctx->HUFptr = dctx->entropy.hufTable; return 0; } size_t ZSTD_decompressBegin_usingDict(ZSTD_DCtx* dctx, const void* dict, size_t dictSize) { FORWARD_IF_ERROR( ZSTD_decompressBegin(dctx) , ""); if (dict && dictSize) RETURN_ERROR_IF( ZSTD_isError(ZSTD_decompress_insertDictionary(dctx, dict, dictSize)), dictionary_corrupted, ""); return 0; } /* ====== ZSTD_DDict ====== */ size_t ZSTD_decompressBegin_usingDDict(ZSTD_DCtx* dctx, const ZSTD_DDict* ddict) { DEBUGLOG(4, "ZSTD_decompressBegin_usingDDict"); assert(dctx != NULL); if (ddict) { const char* const dictStart = (const char*)ZSTD_DDict_dictContent(ddict); size_t const dictSize = ZSTD_DDict_dictSize(ddict); const void* const dictEnd = dictStart + dictSize; dctx->ddictIsCold = (dctx->dictEnd != dictEnd); DEBUGLOG(4, "DDict is %s", dctx->ddictIsCold ? "~cold~" : "hot!"); } FORWARD_IF_ERROR( ZSTD_decompressBegin(dctx) , ""); if (ddict) { /* NULL ddict is equivalent to no dictionary */ ZSTD_copyDDictParameters(dctx, ddict); } return 0; } /*! ZSTD_getDictID_fromDict() : * Provides the dictID stored within dictionary. * if @return == 0, the dictionary is not conformant with Zstandard specification. * It can still be loaded, but as a content-only dictionary. */ unsigned ZSTD_getDictID_fromDict(const void* dict, size_t dictSize) { if (dictSize < 8) return 0; if (MEM_readLE32(dict) != ZSTD_MAGIC_DICTIONARY) return 0; return MEM_readLE32((const char*)dict + ZSTD_FRAMEIDSIZE); } /*! ZSTD_getDictID_fromFrame() : * Provides the dictID required to decompress frame stored within `src`. * If @return == 0, the dictID could not be decoded. * This could for one of the following reasons : * - The frame does not require a dictionary (most common case). * - The frame was built with dictID intentionally removed. * Needed dictionary is a hidden information. * Note : this use case also happens when using a non-conformant dictionary. * - `srcSize` is too small, and as a result, frame header could not be decoded. * Note : possible if `srcSize < ZSTD_FRAMEHEADERSIZE_MAX`. * - This is not a Zstandard frame. * When identifying the exact failure cause, it's possible to use * ZSTD_getFrameHeader(), which will provide a more precise error code. */ unsigned ZSTD_getDictID_fromFrame(const void* src, size_t srcSize) { ZSTD_frameHeader zfp = { 0, 0, 0, ZSTD_frame, 0, 0, 0 }; size_t const hError = ZSTD_getFrameHeader(&zfp, src, srcSize); if (ZSTD_isError(hError)) return 0; return zfp.dictID; } /*! ZSTD_decompress_usingDDict() : * Decompression using a pre-digested Dictionary * Use dictionary without significant overhead. */ size_t ZSTD_decompress_usingDDict(ZSTD_DCtx* dctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize, const ZSTD_DDict* ddict) { /* pass content and size in case legacy frames are encountered */ return ZSTD_decompressMultiFrame(dctx, dst, dstCapacity, src, srcSize, NULL, 0, ddict); } /*===================================== * Streaming decompression *====================================*/ ZSTD_DStream* ZSTD_createDStream(void) { DEBUGLOG(3, "ZSTD_createDStream"); return ZSTD_createDStream_advanced(ZSTD_defaultCMem); } ZSTD_DStream* ZSTD_initStaticDStream(void *workspace, size_t workspaceSize) { return ZSTD_initStaticDCtx(workspace, workspaceSize); } ZSTD_DStream* ZSTD_createDStream_advanced(ZSTD_customMem customMem) { return ZSTD_createDCtx_advanced(customMem); } size_t ZSTD_freeDStream(ZSTD_DStream* zds) { return ZSTD_freeDCtx(zds); } /* *** Initialization *** */ size_t ZSTD_DStreamInSize(void) { return ZSTD_BLOCKSIZE_MAX + ZSTD_blockHeaderSize; } size_t ZSTD_DStreamOutSize(void) { return ZSTD_BLOCKSIZE_MAX; } size_t ZSTD_DCtx_loadDictionary_advanced(ZSTD_DCtx* dctx, const void* dict, size_t dictSize, ZSTD_dictLoadMethod_e dictLoadMethod, ZSTD_dictContentType_e dictContentType) { RETURN_ERROR_IF(dctx->streamStage != zdss_init, stage_wrong, ""); ZSTD_clearDict(dctx); if (dict && dictSize != 0) { dctx->ddictLocal = ZSTD_createDDict_advanced(dict, dictSize, dictLoadMethod, dictContentType, dctx->customMem); RETURN_ERROR_IF(dctx->ddictLocal == NULL, memory_allocation, "NULL pointer!"); dctx->ddict = dctx->ddictLocal; dctx->dictUses = ZSTD_use_indefinitely; } return 0; } size_t ZSTD_DCtx_loadDictionary_byReference(ZSTD_DCtx* dctx, const void* dict, size_t dictSize) { return ZSTD_DCtx_loadDictionary_advanced(dctx, dict, dictSize, ZSTD_dlm_byRef, ZSTD_dct_auto); } size_t ZSTD_DCtx_loadDictionary(ZSTD_DCtx* dctx, const void* dict, size_t dictSize) { return ZSTD_DCtx_loadDictionary_advanced(dctx, dict, dictSize, ZSTD_dlm_byCopy, ZSTD_dct_auto); } size_t ZSTD_DCtx_refPrefix_advanced(ZSTD_DCtx* dctx, const void* prefix, size_t prefixSize, ZSTD_dictContentType_e dictContentType) { FORWARD_IF_ERROR(ZSTD_DCtx_loadDictionary_advanced(dctx, prefix, prefixSize, ZSTD_dlm_byRef, dictContentType), ""); dctx->dictUses = ZSTD_use_once; return 0; } size_t ZSTD_DCtx_refPrefix(ZSTD_DCtx* dctx, const void* prefix, size_t prefixSize) { return ZSTD_DCtx_refPrefix_advanced(dctx, prefix, prefixSize, ZSTD_dct_rawContent); } /* ZSTD_initDStream_usingDict() : * return : expected size, aka ZSTD_startingInputLength(). * this function cannot fail */ size_t ZSTD_initDStream_usingDict(ZSTD_DStream* zds, const void* dict, size_t dictSize) { DEBUGLOG(4, "ZSTD_initDStream_usingDict"); FORWARD_IF_ERROR( ZSTD_DCtx_reset(zds, ZSTD_reset_session_only) , ""); FORWARD_IF_ERROR( ZSTD_DCtx_loadDictionary(zds, dict, dictSize) , ""); return ZSTD_startingInputLength(zds->format); } /* note : this variant can't fail */ size_t ZSTD_initDStream(ZSTD_DStream* zds) { DEBUGLOG(4, "ZSTD_initDStream"); return ZSTD_initDStream_usingDDict(zds, NULL); } /* ZSTD_initDStream_usingDDict() : * ddict will just be referenced, and must outlive decompression session * this function cannot fail */ size_t ZSTD_initDStream_usingDDict(ZSTD_DStream* dctx, const ZSTD_DDict* ddict) { FORWARD_IF_ERROR( ZSTD_DCtx_reset(dctx, ZSTD_reset_session_only) , ""); FORWARD_IF_ERROR( ZSTD_DCtx_refDDict(dctx, ddict) , ""); return ZSTD_startingInputLength(dctx->format); } /* ZSTD_resetDStream() : * return : expected size, aka ZSTD_startingInputLength(). * this function cannot fail */ size_t ZSTD_resetDStream(ZSTD_DStream* dctx) { FORWARD_IF_ERROR(ZSTD_DCtx_reset(dctx, ZSTD_reset_session_only), ""); return ZSTD_startingInputLength(dctx->format); } size_t ZSTD_DCtx_refDDict(ZSTD_DCtx* dctx, const ZSTD_DDict* ddict) { RETURN_ERROR_IF(dctx->streamStage != zdss_init, stage_wrong, ""); ZSTD_clearDict(dctx); if (ddict) { dctx->ddict = ddict; dctx->dictUses = ZSTD_use_indefinitely; if (dctx->refMultipleDDicts == ZSTD_rmd_refMultipleDDicts) { if (dctx->ddictSet == NULL) { dctx->ddictSet = ZSTD_createDDictHashSet(dctx->customMem); if (!dctx->ddictSet) { RETURN_ERROR(memory_allocation, "Failed to allocate memory for hash set!"); } } assert(!dctx->staticSize); /* Impossible: ddictSet cannot have been allocated if static dctx */ FORWARD_IF_ERROR(ZSTD_DDictHashSet_addDDict(dctx->ddictSet, ddict, dctx->customMem), ""); } } return 0; } /* ZSTD_DCtx_setMaxWindowSize() : * note : no direct equivalence in ZSTD_DCtx_setParameter, * since this version sets windowSize, and the other sets windowLog */ size_t ZSTD_DCtx_setMaxWindowSize(ZSTD_DCtx* dctx, size_t maxWindowSize) { ZSTD_bounds const bounds = ZSTD_dParam_getBounds(ZSTD_d_windowLogMax); size_t const min = (size_t)1 << bounds.lowerBound; size_t const max = (size_t)1 << bounds.upperBound; RETURN_ERROR_IF(dctx->streamStage != zdss_init, stage_wrong, ""); RETURN_ERROR_IF(maxWindowSize < min, parameter_outOfBound, ""); RETURN_ERROR_IF(maxWindowSize > max, parameter_outOfBound, ""); dctx->maxWindowSize = maxWindowSize; return 0; } size_t ZSTD_DCtx_setFormat(ZSTD_DCtx* dctx, ZSTD_format_e format) { return ZSTD_DCtx_setParameter(dctx, ZSTD_d_format, (int)format); } ZSTD_bounds ZSTD_dParam_getBounds(ZSTD_dParameter dParam) { ZSTD_bounds bounds = { 0, 0, 0 }; switch(dParam) { case ZSTD_d_windowLogMax: bounds.lowerBound = ZSTD_WINDOWLOG_ABSOLUTEMIN; bounds.upperBound = ZSTD_WINDOWLOG_MAX; return bounds; case ZSTD_d_format: bounds.lowerBound = (int)ZSTD_f_zstd1; bounds.upperBound = (int)ZSTD_f_zstd1_magicless; ZSTD_STATIC_ASSERT(ZSTD_f_zstd1 < ZSTD_f_zstd1_magicless); return bounds; case ZSTD_d_stableOutBuffer: bounds.lowerBound = (int)ZSTD_bm_buffered; bounds.upperBound = (int)ZSTD_bm_stable; return bounds; case ZSTD_d_forceIgnoreChecksum: bounds.lowerBound = (int)ZSTD_d_validateChecksum; bounds.upperBound = (int)ZSTD_d_ignoreChecksum; return bounds; case ZSTD_d_refMultipleDDicts: bounds.lowerBound = (int)ZSTD_rmd_refSingleDDict; bounds.upperBound = (int)ZSTD_rmd_refMultipleDDicts; return bounds; default:; } bounds.error = ERROR(parameter_unsupported); return bounds; } /* ZSTD_dParam_withinBounds: * @return 1 if value is within dParam bounds, * 0 otherwise */ static int ZSTD_dParam_withinBounds(ZSTD_dParameter dParam, int value) { ZSTD_bounds const bounds = ZSTD_dParam_getBounds(dParam); if (ZSTD_isError(bounds.error)) return 0; if (value < bounds.lowerBound) return 0; if (value > bounds.upperBound) return 0; return 1; } #define CHECK_DBOUNDS(p,v) { \ RETURN_ERROR_IF(!ZSTD_dParam_withinBounds(p, v), parameter_outOfBound, ""); \ } size_t ZSTD_DCtx_getParameter(ZSTD_DCtx* dctx, ZSTD_dParameter param, int* value) { switch (param) { case ZSTD_d_windowLogMax: *value = (int)ZSTD_highbit32((U32)dctx->maxWindowSize); return 0; case ZSTD_d_format: *value = (int)dctx->format; return 0; case ZSTD_d_stableOutBuffer: *value = (int)dctx->outBufferMode; return 0; case ZSTD_d_forceIgnoreChecksum: *value = (int)dctx->forceIgnoreChecksum; return 0; case ZSTD_d_refMultipleDDicts: *value = (int)dctx->refMultipleDDicts; return 0; default:; } RETURN_ERROR(parameter_unsupported, ""); } size_t ZSTD_DCtx_setParameter(ZSTD_DCtx* dctx, ZSTD_dParameter dParam, int value) { RETURN_ERROR_IF(dctx->streamStage != zdss_init, stage_wrong, ""); switch(dParam) { case ZSTD_d_windowLogMax: if (value == 0) value = ZSTD_WINDOWLOG_LIMIT_DEFAULT; CHECK_DBOUNDS(ZSTD_d_windowLogMax, value); dctx->maxWindowSize = ((size_t)1) << value; return 0; case ZSTD_d_format: CHECK_DBOUNDS(ZSTD_d_format, value); dctx->format = (ZSTD_format_e)value; return 0; case ZSTD_d_stableOutBuffer: CHECK_DBOUNDS(ZSTD_d_stableOutBuffer, value); dctx->outBufferMode = (ZSTD_bufferMode_e)value; return 0; case ZSTD_d_forceIgnoreChecksum: CHECK_DBOUNDS(ZSTD_d_forceIgnoreChecksum, value); dctx->forceIgnoreChecksum = (ZSTD_forceIgnoreChecksum_e)value; return 0; case ZSTD_d_refMultipleDDicts: CHECK_DBOUNDS(ZSTD_d_refMultipleDDicts, value); if (dctx->staticSize != 0) { RETURN_ERROR(parameter_unsupported, "Static dctx does not support multiple DDicts!"); } dctx->refMultipleDDicts = (ZSTD_refMultipleDDicts_e)value; return 0; default:; } RETURN_ERROR(parameter_unsupported, ""); } size_t ZSTD_DCtx_reset(ZSTD_DCtx* dctx, ZSTD_ResetDirective reset) { if ( (reset == ZSTD_reset_session_only) || (reset == ZSTD_reset_session_and_parameters) ) { dctx->streamStage = zdss_init; dctx->noForwardProgress = 0; } if ( (reset == ZSTD_reset_parameters) || (reset == ZSTD_reset_session_and_parameters) ) { RETURN_ERROR_IF(dctx->streamStage != zdss_init, stage_wrong, ""); ZSTD_clearDict(dctx); ZSTD_DCtx_resetParameters(dctx); } return 0; } size_t ZSTD_sizeof_DStream(const ZSTD_DStream* dctx) { return ZSTD_sizeof_DCtx(dctx); } size_t ZSTD_decodingBufferSize_min(unsigned long long windowSize, unsigned long long frameContentSize) { size_t const blockSize = (size_t) MIN(windowSize, ZSTD_BLOCKSIZE_MAX); unsigned long long const neededRBSize = windowSize + blockSize + (WILDCOPY_OVERLENGTH * 2); unsigned long long const neededSize = MIN(frameContentSize, neededRBSize); size_t const minRBSize = (size_t) neededSize; RETURN_ERROR_IF((unsigned long long)minRBSize != neededSize, frameParameter_windowTooLarge, ""); return minRBSize; } size_t ZSTD_estimateDStreamSize(size_t windowSize) { size_t const blockSize = MIN(windowSize, ZSTD_BLOCKSIZE_MAX); size_t const inBuffSize = blockSize; /* no block can be larger */ size_t const outBuffSize = ZSTD_decodingBufferSize_min(windowSize, ZSTD_CONTENTSIZE_UNKNOWN); return ZSTD_estimateDCtxSize() + inBuffSize + outBuffSize; } size_t ZSTD_estimateDStreamSize_fromFrame(const void* src, size_t srcSize) { U32 const windowSizeMax = 1U << ZSTD_WINDOWLOG_MAX; /* note : should be user-selectable, but requires an additional parameter (or a dctx) */ ZSTD_frameHeader zfh; size_t const err = ZSTD_getFrameHeader(&zfh, src, srcSize); if (ZSTD_isError(err)) return err; RETURN_ERROR_IF(err>0, srcSize_wrong, ""); RETURN_ERROR_IF(zfh.windowSize > windowSizeMax, frameParameter_windowTooLarge, ""); return ZSTD_estimateDStreamSize((size_t)zfh.windowSize); } /* ***** Decompression ***** */ static int ZSTD_DCtx_isOverflow(ZSTD_DStream* zds, size_t const neededInBuffSize, size_t const neededOutBuffSize) { return (zds->inBuffSize + zds->outBuffSize) >= (neededInBuffSize + neededOutBuffSize) * ZSTD_WORKSPACETOOLARGE_FACTOR; } static void ZSTD_DCtx_updateOversizedDuration(ZSTD_DStream* zds, size_t const neededInBuffSize, size_t const neededOutBuffSize) { if (ZSTD_DCtx_isOverflow(zds, neededInBuffSize, neededOutBuffSize)) zds->oversizedDuration++; else zds->oversizedDuration = 0; } static int ZSTD_DCtx_isOversizedTooLong(ZSTD_DStream* zds) { return zds->oversizedDuration >= ZSTD_WORKSPACETOOLARGE_MAXDURATION; } /* Checks that the output buffer hasn't changed if ZSTD_obm_stable is used. */ static size_t ZSTD_checkOutBuffer(ZSTD_DStream const* zds, ZSTD_outBuffer const* output) { ZSTD_outBuffer const expect = zds->expectedOutBuffer; /* No requirement when ZSTD_obm_stable is not enabled. */ if (zds->outBufferMode != ZSTD_bm_stable) return 0; /* Any buffer is allowed in zdss_init, this must be the same for every other call until * the context is reset. */ if (zds->streamStage == zdss_init) return 0; /* The buffer must match our expectation exactly. */ if (expect.dst == output->dst && expect.pos == output->pos && expect.size == output->size) return 0; RETURN_ERROR(dstBuffer_wrong, "ZSTD_d_stableOutBuffer enabled but output differs!"); } /* Calls ZSTD_decompressContinue() with the right parameters for ZSTD_decompressStream() * and updates the stage and the output buffer state. This call is extracted so it can be * used both when reading directly from the ZSTD_inBuffer, and in buffered input mode. * NOTE: You must break after calling this function since the streamStage is modified. */ static size_t ZSTD_decompressContinueStream( ZSTD_DStream* zds, char** op, char* oend, void const* src, size_t srcSize) { int const isSkipFrame = ZSTD_isSkipFrame(zds); if (zds->outBufferMode == ZSTD_bm_buffered) { size_t const dstSize = isSkipFrame ? 0 : zds->outBuffSize - zds->outStart; size_t const decodedSize = ZSTD_decompressContinue(zds, zds->outBuff + zds->outStart, dstSize, src, srcSize); FORWARD_IF_ERROR(decodedSize, ""); if (!decodedSize && !isSkipFrame) { zds->streamStage = zdss_read; } else { zds->outEnd = zds->outStart + decodedSize; zds->streamStage = zdss_flush; } } else { /* Write directly into the output buffer */ size_t const dstSize = isSkipFrame ? 0 : (size_t)(oend - *op); size_t const decodedSize = ZSTD_decompressContinue(zds, *op, dstSize, src, srcSize); FORWARD_IF_ERROR(decodedSize, ""); *op += decodedSize; /* Flushing is not needed. */ zds->streamStage = zdss_read; assert(*op <= oend); assert(zds->outBufferMode == ZSTD_bm_stable); } return 0; } size_t ZSTD_decompressStream(ZSTD_DStream* zds, ZSTD_outBuffer* output, ZSTD_inBuffer* input) { const char* const src = (const char*)input->src; const char* const istart = input->pos != 0 ? src + input->pos : src; const char* const iend = input->size != 0 ? src + input->size : src; const char* ip = istart; char* const dst = (char*)output->dst; char* const ostart = output->pos != 0 ? dst + output->pos : dst; char* const oend = output->size != 0 ? dst + output->size : dst; char* op = ostart; U32 someMoreWork = 1; DEBUGLOG(5, "ZSTD_decompressStream"); RETURN_ERROR_IF( input->pos > input->size, srcSize_wrong, "forbidden. in: pos: %u vs size: %u", (U32)input->pos, (U32)input->size); RETURN_ERROR_IF( output->pos > output->size, dstSize_tooSmall, "forbidden. out: pos: %u vs size: %u", (U32)output->pos, (U32)output->size); DEBUGLOG(5, "input size : %u", (U32)(input->size - input->pos)); FORWARD_IF_ERROR(ZSTD_checkOutBuffer(zds, output), ""); while (someMoreWork) { switch(zds->streamStage) { case zdss_init : DEBUGLOG(5, "stage zdss_init => transparent reset "); zds->streamStage = zdss_loadHeader; zds->lhSize = zds->inPos = zds->outStart = zds->outEnd = 0; zds->legacyVersion = 0; zds->hostageByte = 0; zds->expectedOutBuffer = *output; /* fall-through */ case zdss_loadHeader : DEBUGLOG(5, "stage zdss_loadHeader (srcSize : %u)", (U32)(iend - ip)); #if defined(ZSTD_LEGACY_SUPPORT) && (ZSTD_LEGACY_SUPPORT>=1) if (zds->legacyVersion) { RETURN_ERROR_IF(zds->staticSize, memory_allocation, "legacy support is incompatible with static dctx"); { size_t const hint = ZSTD_decompressLegacyStream(zds->legacyContext, zds->legacyVersion, output, input); if (hint==0) zds->streamStage = zdss_init; return hint; } } #endif { size_t const hSize = ZSTD_getFrameHeader_advanced(&zds->fParams, zds->headerBuffer, zds->lhSize, zds->format); if (zds->refMultipleDDicts && zds->ddictSet) { ZSTD_DCtx_selectFrameDDict(zds); } DEBUGLOG(5, "header size : %u", (U32)hSize); if (ZSTD_isError(hSize)) { #if defined(ZSTD_LEGACY_SUPPORT) && (ZSTD_LEGACY_SUPPORT>=1) U32 const legacyVersion = ZSTD_isLegacy(istart, iend-istart); if (legacyVersion) { ZSTD_DDict const* const ddict = ZSTD_getDDict(zds); const void* const dict = ddict ? ZSTD_DDict_dictContent(ddict) : NULL; size_t const dictSize = ddict ? ZSTD_DDict_dictSize(ddict) : 0; DEBUGLOG(5, "ZSTD_decompressStream: detected legacy version v0.%u", legacyVersion); RETURN_ERROR_IF(zds->staticSize, memory_allocation, "legacy support is incompatible with static dctx"); FORWARD_IF_ERROR(ZSTD_initLegacyStream(&zds->legacyContext, zds->previousLegacyVersion, legacyVersion, dict, dictSize), ""); zds->legacyVersion = zds->previousLegacyVersion = legacyVersion; { size_t const hint = ZSTD_decompressLegacyStream(zds->legacyContext, legacyVersion, output, input); if (hint==0) zds->streamStage = zdss_init; /* or stay in stage zdss_loadHeader */ return hint; } } #endif return hSize; /* error */ } if (hSize != 0) { /* need more input */ size_t const toLoad = hSize - zds->lhSize; /* if hSize!=0, hSize > zds->lhSize */ size_t const remainingInput = (size_t)(iend-ip); assert(iend >= ip); if (toLoad > remainingInput) { /* not enough input to load full header */ if (remainingInput > 0) { ZSTD_memcpy(zds->headerBuffer + zds->lhSize, ip, remainingInput); zds->lhSize += remainingInput; } input->pos = input->size; return (MAX((size_t)ZSTD_FRAMEHEADERSIZE_MIN(zds->format), hSize) - zds->lhSize) + ZSTD_blockHeaderSize; /* remaining header bytes + next block header */ } assert(ip != NULL); ZSTD_memcpy(zds->headerBuffer + zds->lhSize, ip, toLoad); zds->lhSize = hSize; ip += toLoad; break; } } /* check for single-pass mode opportunity */ if (zds->fParams.frameContentSize != ZSTD_CONTENTSIZE_UNKNOWN && zds->fParams.frameType != ZSTD_skippableFrame && (U64)(size_t)(oend-op) >= zds->fParams.frameContentSize) { size_t const cSize = ZSTD_findFrameCompressedSize(istart, (size_t)(iend-istart)); if (cSize <= (size_t)(iend-istart)) { /* shortcut : using single-pass mode */ size_t const decompressedSize = ZSTD_decompress_usingDDict(zds, op, (size_t)(oend-op), istart, cSize, ZSTD_getDDict(zds)); if (ZSTD_isError(decompressedSize)) return decompressedSize; DEBUGLOG(4, "shortcut to single-pass ZSTD_decompress_usingDDict()") ip = istart + cSize; op += decompressedSize; zds->expected = 0; zds->streamStage = zdss_init; someMoreWork = 0; break; } } /* Check output buffer is large enough for ZSTD_odm_stable. */ if (zds->outBufferMode == ZSTD_bm_stable && zds->fParams.frameType != ZSTD_skippableFrame && zds->fParams.frameContentSize != ZSTD_CONTENTSIZE_UNKNOWN && (U64)(size_t)(oend-op) < zds->fParams.frameContentSize) { RETURN_ERROR(dstSize_tooSmall, "ZSTD_obm_stable passed but ZSTD_outBuffer is too small"); } /* Consume header (see ZSTDds_decodeFrameHeader) */ DEBUGLOG(4, "Consume header"); FORWARD_IF_ERROR(ZSTD_decompressBegin_usingDDict(zds, ZSTD_getDDict(zds)), ""); if ((MEM_readLE32(zds->headerBuffer) & ZSTD_MAGIC_SKIPPABLE_MASK) == ZSTD_MAGIC_SKIPPABLE_START) { /* skippable frame */ zds->expected = MEM_readLE32(zds->headerBuffer + ZSTD_FRAMEIDSIZE); zds->stage = ZSTDds_skipFrame; } else { FORWARD_IF_ERROR(ZSTD_decodeFrameHeader(zds, zds->headerBuffer, zds->lhSize), ""); zds->expected = ZSTD_blockHeaderSize; zds->stage = ZSTDds_decodeBlockHeader; } /* control buffer memory usage */ DEBUGLOG(4, "Control max memory usage (%u KB <= max %u KB)", (U32)(zds->fParams.windowSize >>10), (U32)(zds->maxWindowSize >> 10) ); zds->fParams.windowSize = MAX(zds->fParams.windowSize, 1U << ZSTD_WINDOWLOG_ABSOLUTEMIN); RETURN_ERROR_IF(zds->fParams.windowSize > zds->maxWindowSize, frameParameter_windowTooLarge, ""); /* Adapt buffer sizes to frame header instructions */ { size_t const neededInBuffSize = MAX(zds->fParams.blockSizeMax, 4 /* frame checksum */); size_t const neededOutBuffSize = zds->outBufferMode == ZSTD_bm_buffered ? ZSTD_decodingBufferSize_min(zds->fParams.windowSize, zds->fParams.frameContentSize) : 0; ZSTD_DCtx_updateOversizedDuration(zds, neededInBuffSize, neededOutBuffSize); { int const tooSmall = (zds->inBuffSize < neededInBuffSize) || (zds->outBuffSize < neededOutBuffSize); int const tooLarge = ZSTD_DCtx_isOversizedTooLong(zds); if (tooSmall || tooLarge) { size_t const bufferSize = neededInBuffSize + neededOutBuffSize; DEBUGLOG(4, "inBuff : from %u to %u", (U32)zds->inBuffSize, (U32)neededInBuffSize); DEBUGLOG(4, "outBuff : from %u to %u", (U32)zds->outBuffSize, (U32)neededOutBuffSize); if (zds->staticSize) { /* static DCtx */ DEBUGLOG(4, "staticSize : %u", (U32)zds->staticSize); assert(zds->staticSize >= sizeof(ZSTD_DCtx)); /* controlled at init */ RETURN_ERROR_IF( bufferSize > zds->staticSize - sizeof(ZSTD_DCtx), memory_allocation, ""); } else { ZSTD_customFree(zds->inBuff, zds->customMem); zds->inBuffSize = 0; zds->outBuffSize = 0; zds->inBuff = (char*)ZSTD_customMalloc(bufferSize, zds->customMem); RETURN_ERROR_IF(zds->inBuff == NULL, memory_allocation, ""); } zds->inBuffSize = neededInBuffSize; zds->outBuff = zds->inBuff + zds->inBuffSize; zds->outBuffSize = neededOutBuffSize; } } } zds->streamStage = zdss_read; /* fall-through */ case zdss_read: DEBUGLOG(5, "stage zdss_read"); { size_t const neededInSize = ZSTD_nextSrcSizeToDecompressWithInputSize(zds, (size_t)(iend - ip)); DEBUGLOG(5, "neededInSize = %u", (U32)neededInSize); if (neededInSize==0) { /* end of frame */ zds->streamStage = zdss_init; someMoreWork = 0; break; } if ((size_t)(iend-ip) >= neededInSize) { /* decode directly from src */ FORWARD_IF_ERROR(ZSTD_decompressContinueStream(zds, &op, oend, ip, neededInSize), ""); ip += neededInSize; /* Function modifies the stage so we must break */ break; } } if (ip==iend) { someMoreWork = 0; break; } /* no more input */ zds->streamStage = zdss_load; /* fall-through */ case zdss_load: { size_t const neededInSize = ZSTD_nextSrcSizeToDecompress(zds); size_t const toLoad = neededInSize - zds->inPos; int const isSkipFrame = ZSTD_isSkipFrame(zds); size_t loadedSize; /* At this point we shouldn't be decompressing a block that we can stream. */ assert(neededInSize == ZSTD_nextSrcSizeToDecompressWithInputSize(zds, iend - ip)); if (isSkipFrame) { loadedSize = MIN(toLoad, (size_t)(iend-ip)); } else { RETURN_ERROR_IF(toLoad > zds->inBuffSize - zds->inPos, corruption_detected, "should never happen"); loadedSize = ZSTD_limitCopy(zds->inBuff + zds->inPos, toLoad, ip, (size_t)(iend-ip)); } ip += loadedSize; zds->inPos += loadedSize; if (loadedSize < toLoad) { someMoreWork = 0; break; } /* not enough input, wait for more */ /* decode loaded input */ zds->inPos = 0; /* input is consumed */ FORWARD_IF_ERROR(ZSTD_decompressContinueStream(zds, &op, oend, zds->inBuff, neededInSize), ""); /* Function modifies the stage so we must break */ break; } case zdss_flush: { size_t const toFlushSize = zds->outEnd - zds->outStart; size_t const flushedSize = ZSTD_limitCopy(op, (size_t)(oend-op), zds->outBuff + zds->outStart, toFlushSize); op += flushedSize; zds->outStart += flushedSize; if (flushedSize == toFlushSize) { /* flush completed */ zds->streamStage = zdss_read; if ( (zds->outBuffSize < zds->fParams.frameContentSize) && (zds->outStart + zds->fParams.blockSizeMax > zds->outBuffSize) ) { DEBUGLOG(5, "restart filling outBuff from beginning (left:%i, needed:%u)", (int)(zds->outBuffSize - zds->outStart), (U32)zds->fParams.blockSizeMax); zds->outStart = zds->outEnd = 0; } break; } } /* cannot complete flush */ someMoreWork = 0; break; default: assert(0); /* impossible */ RETURN_ERROR(GENERIC, "impossible to reach"); /* some compiler require default to do something */ } } /* result */ input->pos = (size_t)(ip - (const char*)(input->src)); output->pos = (size_t)(op - (char*)(output->dst)); /* Update the expected output buffer for ZSTD_obm_stable. */ zds->expectedOutBuffer = *output; if ((ip==istart) && (op==ostart)) { /* no forward progress */ zds->noForwardProgress ++; if (zds->noForwardProgress >= ZSTD_NO_FORWARD_PROGRESS_MAX) { RETURN_ERROR_IF(op==oend, dstSize_tooSmall, ""); RETURN_ERROR_IF(ip==iend, srcSize_wrong, ""); assert(0); } } else { zds->noForwardProgress = 0; } { size_t nextSrcSizeHint = ZSTD_nextSrcSizeToDecompress(zds); if (!nextSrcSizeHint) { /* frame fully decoded */ if (zds->outEnd == zds->outStart) { /* output fully flushed */ if (zds->hostageByte) { if (input->pos >= input->size) { /* can't release hostage (not present) */ zds->streamStage = zdss_read; return 1; } input->pos++; /* release hostage */ } /* zds->hostageByte */ return 0; } /* zds->outEnd == zds->outStart */ if (!zds->hostageByte) { /* output not fully flushed; keep last byte as hostage; will be released when all output is flushed */ input->pos--; /* note : pos > 0, otherwise, impossible to finish reading last block */ zds->hostageByte=1; } return 1; } /* nextSrcSizeHint==0 */ nextSrcSizeHint += ZSTD_blockHeaderSize * (ZSTD_nextInputType(zds) == ZSTDnit_block); /* preload header of next block */ assert(zds->inPos <= nextSrcSizeHint); nextSrcSizeHint -= zds->inPos; /* part already loaded*/ return nextSrcSizeHint; } } size_t ZSTD_decompressStream_simpleArgs ( ZSTD_DCtx* dctx, void* dst, size_t dstCapacity, size_t* dstPos, const void* src, size_t srcSize, size_t* srcPos) { ZSTD_outBuffer output = { dst, dstCapacity, *dstPos }; ZSTD_inBuffer input = { src, srcSize, *srcPos }; /* ZSTD_compress_generic() will check validity of dstPos and srcPos */ size_t const cErr = ZSTD_decompressStream(dctx, &output, &input); *dstPos = output.pos; *srcPos = input.pos; return cErr; } /**** ended inlining decompress/zstd_decompress.c ****/ /**** start inlining decompress/zstd_decompress_block.c ****/ /* * Copyright (c) 2016-2021, Yann Collet, Facebook, Inc. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the * LICENSE file in the root directory of this source tree) and the GPLv2 (found * in the COPYING file in the root directory of this source tree). * You may select, at your option, one of the above-listed licenses. */ /* zstd_decompress_block : * this module takes care of decompressing _compressed_ block */ /*-******************************************************* * Dependencies *********************************************************/ /**** skipping file: ../common/zstd_deps.h ****/ /**** skipping file: ../common/compiler.h ****/ /**** skipping file: ../common/cpu.h ****/ /**** skipping file: ../common/mem.h ****/ #define FSE_STATIC_LINKING_ONLY /**** skipping file: ../common/fse.h ****/ #define HUF_STATIC_LINKING_ONLY /**** skipping file: ../common/huf.h ****/ /**** skipping file: ../common/zstd_internal.h ****/ /**** skipping file: zstd_decompress_internal.h ****/ /**** skipping file: zstd_ddict.h ****/ /**** skipping file: zstd_decompress_block.h ****/ /*_******************************************************* * Macros **********************************************************/ /* These two optional macros force the use one way or another of the two * ZSTD_decompressSequences implementations. You can't force in both directions * at the same time. */ #if defined(ZSTD_FORCE_DECOMPRESS_SEQUENCES_SHORT) && \ defined(ZSTD_FORCE_DECOMPRESS_SEQUENCES_LONG) #error "Cannot force the use of the short and the long ZSTD_decompressSequences variants!" #endif /*_******************************************************* * Memory operations **********************************************************/ static void ZSTD_copy4(void* dst, const void* src) { ZSTD_memcpy(dst, src, 4); } /*-************************************************************* * Block decoding ***************************************************************/ /*! ZSTD_getcBlockSize() : * Provides the size of compressed block from block header `src` */ size_t ZSTD_getcBlockSize(const void* src, size_t srcSize, blockProperties_t* bpPtr) { RETURN_ERROR_IF(srcSize < ZSTD_blockHeaderSize, srcSize_wrong, ""); { U32 const cBlockHeader = MEM_readLE24(src); U32 const cSize = cBlockHeader >> 3; bpPtr->lastBlock = cBlockHeader & 1; bpPtr->blockType = (blockType_e)((cBlockHeader >> 1) & 3); bpPtr->origSize = cSize; /* only useful for RLE */ if (bpPtr->blockType == bt_rle) return 1; RETURN_ERROR_IF(bpPtr->blockType == bt_reserved, corruption_detected, ""); return cSize; } } /* Hidden declaration for fullbench */ size_t ZSTD_decodeLiteralsBlock(ZSTD_DCtx* dctx, const void* src, size_t srcSize); /*! ZSTD_decodeLiteralsBlock() : * @return : nb of bytes read from src (< srcSize ) * note : symbol not declared but exposed for fullbench */ size_t ZSTD_decodeLiteralsBlock(ZSTD_DCtx* dctx, const void* src, size_t srcSize) /* note : srcSize < BLOCKSIZE */ { DEBUGLOG(5, "ZSTD_decodeLiteralsBlock"); RETURN_ERROR_IF(srcSize < MIN_CBLOCK_SIZE, corruption_detected, ""); { const BYTE* const istart = (const BYTE*) src; symbolEncodingType_e const litEncType = (symbolEncodingType_e)(istart[0] & 3); switch(litEncType) { case set_repeat: DEBUGLOG(5, "set_repeat flag : re-using stats from previous compressed literals block"); RETURN_ERROR_IF(dctx->litEntropy==0, dictionary_corrupted, ""); /* fall-through */ case set_compressed: RETURN_ERROR_IF(srcSize < 5, corruption_detected, "srcSize >= MIN_CBLOCK_SIZE == 3; here we need up to 5 for case 3"); { size_t lhSize, litSize, litCSize; U32 singleStream=0; U32 const lhlCode = (istart[0] >> 2) & 3; U32 const lhc = MEM_readLE32(istart); size_t hufSuccess; switch(lhlCode) { case 0: case 1: default: /* note : default is impossible, since lhlCode into [0..3] */ /* 2 - 2 - 10 - 10 */ singleStream = !lhlCode; lhSize = 3; litSize = (lhc >> 4) & 0x3FF; litCSize = (lhc >> 14) & 0x3FF; break; case 2: /* 2 - 2 - 14 - 14 */ lhSize = 4; litSize = (lhc >> 4) & 0x3FFF; litCSize = lhc >> 18; break; case 3: /* 2 - 2 - 18 - 18 */ lhSize = 5; litSize = (lhc >> 4) & 0x3FFFF; litCSize = (lhc >> 22) + ((size_t)istart[4] << 10); break; } RETURN_ERROR_IF(litSize > ZSTD_BLOCKSIZE_MAX, corruption_detected, ""); RETURN_ERROR_IF(litCSize + lhSize > srcSize, corruption_detected, ""); /* prefetch huffman table if cold */ if (dctx->ddictIsCold && (litSize > 768 /* heuristic */)) { PREFETCH_AREA(dctx->HUFptr, sizeof(dctx->entropy.hufTable)); } if (litEncType==set_repeat) { if (singleStream) { hufSuccess = HUF_decompress1X_usingDTable_bmi2( dctx->litBuffer, litSize, istart+lhSize, litCSize, dctx->HUFptr, dctx->bmi2); } else { hufSuccess = HUF_decompress4X_usingDTable_bmi2( dctx->litBuffer, litSize, istart+lhSize, litCSize, dctx->HUFptr, dctx->bmi2); } } else { if (singleStream) { #if defined(HUF_FORCE_DECOMPRESS_X2) hufSuccess = HUF_decompress1X_DCtx_wksp( dctx->entropy.hufTable, dctx->litBuffer, litSize, istart+lhSize, litCSize, dctx->workspace, sizeof(dctx->workspace)); #else hufSuccess = HUF_decompress1X1_DCtx_wksp_bmi2( dctx->entropy.hufTable, dctx->litBuffer, litSize, istart+lhSize, litCSize, dctx->workspace, sizeof(dctx->workspace), dctx->bmi2); #endif } else { hufSuccess = HUF_decompress4X_hufOnly_wksp_bmi2( dctx->entropy.hufTable, dctx->litBuffer, litSize, istart+lhSize, litCSize, dctx->workspace, sizeof(dctx->workspace), dctx->bmi2); } } RETURN_ERROR_IF(HUF_isError(hufSuccess), corruption_detected, ""); dctx->litPtr = dctx->litBuffer; dctx->litSize = litSize; dctx->litEntropy = 1; if (litEncType==set_compressed) dctx->HUFptr = dctx->entropy.hufTable; ZSTD_memset(dctx->litBuffer + dctx->litSize, 0, WILDCOPY_OVERLENGTH); return litCSize + lhSize; } case set_basic: { size_t litSize, lhSize; U32 const lhlCode = ((istart[0]) >> 2) & 3; switch(lhlCode) { case 0: case 2: default: /* note : default is impossible, since lhlCode into [0..3] */ lhSize = 1; litSize = istart[0] >> 3; break; case 1: lhSize = 2; litSize = MEM_readLE16(istart) >> 4; break; case 3: lhSize = 3; litSize = MEM_readLE24(istart) >> 4; break; } if (lhSize+litSize+WILDCOPY_OVERLENGTH > srcSize) { /* risk reading beyond src buffer with wildcopy */ RETURN_ERROR_IF(litSize+lhSize > srcSize, corruption_detected, ""); ZSTD_memcpy(dctx->litBuffer, istart+lhSize, litSize); dctx->litPtr = dctx->litBuffer; dctx->litSize = litSize; ZSTD_memset(dctx->litBuffer + dctx->litSize, 0, WILDCOPY_OVERLENGTH); return lhSize+litSize; } /* direct reference into compressed stream */ dctx->litPtr = istart+lhSize; dctx->litSize = litSize; return lhSize+litSize; } case set_rle: { U32 const lhlCode = ((istart[0]) >> 2) & 3; size_t litSize, lhSize; switch(lhlCode) { case 0: case 2: default: /* note : default is impossible, since lhlCode into [0..3] */ lhSize = 1; litSize = istart[0] >> 3; break; case 1: lhSize = 2; litSize = MEM_readLE16(istart) >> 4; break; case 3: lhSize = 3; litSize = MEM_readLE24(istart) >> 4; RETURN_ERROR_IF(srcSize<4, corruption_detected, "srcSize >= MIN_CBLOCK_SIZE == 3; here we need lhSize+1 = 4"); break; } RETURN_ERROR_IF(litSize > ZSTD_BLOCKSIZE_MAX, corruption_detected, ""); ZSTD_memset(dctx->litBuffer, istart[lhSize], litSize + WILDCOPY_OVERLENGTH); dctx->litPtr = dctx->litBuffer; dctx->litSize = litSize; return lhSize+1; } default: RETURN_ERROR(corruption_detected, "impossible"); } } } /* Default FSE distribution tables. * These are pre-calculated FSE decoding tables using default distributions as defined in specification : * https://github.com/facebook/zstd/blob/release/doc/zstd_compression_format.md#default-distributions * They were generated programmatically with following method : * - start from default distributions, present in /lib/common/zstd_internal.h * - generate tables normally, using ZSTD_buildFSETable() * - printout the content of tables * - pretify output, report below, test with fuzzer to ensure it's correct */ /* Default FSE distribution table for Literal Lengths */ static const ZSTD_seqSymbol LL_defaultDTable[(1<tableLog = 0; DTableH->fastMode = 0; cell->nbBits = 0; cell->nextState = 0; assert(nbAddBits < 255); cell->nbAdditionalBits = (BYTE)nbAddBits; cell->baseValue = baseValue; } /* ZSTD_buildFSETable() : * generate FSE decoding table for one symbol (ll, ml or off) * cannot fail if input is valid => * all inputs are presumed validated at this stage */ FORCE_INLINE_TEMPLATE void ZSTD_buildFSETable_body(ZSTD_seqSymbol* dt, const short* normalizedCounter, unsigned maxSymbolValue, const U32* baseValue, const U32* nbAdditionalBits, unsigned tableLog, void* wksp, size_t wkspSize) { ZSTD_seqSymbol* const tableDecode = dt+1; U32 const maxSV1 = maxSymbolValue + 1; U32 const tableSize = 1 << tableLog; U16* symbolNext = (U16*)wksp; BYTE* spread = (BYTE*)(symbolNext + MaxSeq + 1); U32 highThreshold = tableSize - 1; /* Sanity Checks */ assert(maxSymbolValue <= MaxSeq); assert(tableLog <= MaxFSELog); assert(wkspSize >= ZSTD_BUILD_FSE_TABLE_WKSP_SIZE); (void)wkspSize; /* Init, lay down lowprob symbols */ { ZSTD_seqSymbol_header DTableH; DTableH.tableLog = tableLog; DTableH.fastMode = 1; { S16 const largeLimit= (S16)(1 << (tableLog-1)); U32 s; for (s=0; s= largeLimit) DTableH.fastMode=0; assert(normalizedCounter[s]>=0); symbolNext[s] = (U16)normalizedCounter[s]; } } } ZSTD_memcpy(dt, &DTableH, sizeof(DTableH)); } /* Spread symbols */ assert(tableSize <= 512); /* Specialized symbol spreading for the case when there are * no low probability (-1 count) symbols. When compressing * small blocks we avoid low probability symbols to hit this * case, since header decoding speed matters more. */ if (highThreshold == tableSize - 1) { size_t const tableMask = tableSize-1; size_t const step = FSE_TABLESTEP(tableSize); /* First lay down the symbols in order. * We use a uint64_t to lay down 8 bytes at a time. This reduces branch * misses since small blocks generally have small table logs, so nearly * all symbols have counts <= 8. We ensure we have 8 bytes at the end of * our buffer to handle the over-write. */ { U64 const add = 0x0101010101010101ull; size_t pos = 0; U64 sv = 0; U32 s; for (s=0; s highThreshold) position = (position + step) & tableMask; /* lowprob area */ } } assert(position == 0); /* position must reach all cells once, otherwise normalizedCounter is incorrect */ } /* Build Decoding table */ { U32 u; for (u=0; u max, corruption_detected, ""); { U32 const symbol = *(const BYTE*)src; U32 const baseline = baseValue[symbol]; U32 const nbBits = nbAdditionalBits[symbol]; ZSTD_buildSeqTable_rle(DTableSpace, baseline, nbBits); } *DTablePtr = DTableSpace; return 1; case set_basic : *DTablePtr = defaultTable; return 0; case set_repeat: RETURN_ERROR_IF(!flagRepeatTable, corruption_detected, ""); /* prefetch FSE table if used */ if (ddictIsCold && (nbSeq > 24 /* heuristic */)) { const void* const pStart = *DTablePtr; size_t const pSize = sizeof(ZSTD_seqSymbol) * (SEQSYMBOL_TABLE_SIZE(maxLog)); PREFETCH_AREA(pStart, pSize); } return 0; case set_compressed : { unsigned tableLog; S16 norm[MaxSeq+1]; size_t const headerSize = FSE_readNCount(norm, &max, &tableLog, src, srcSize); RETURN_ERROR_IF(FSE_isError(headerSize), corruption_detected, ""); RETURN_ERROR_IF(tableLog > maxLog, corruption_detected, ""); ZSTD_buildFSETable(DTableSpace, norm, max, baseValue, nbAdditionalBits, tableLog, wksp, wkspSize, bmi2); *DTablePtr = DTableSpace; return headerSize; } default : assert(0); RETURN_ERROR(GENERIC, "impossible"); } } size_t ZSTD_decodeSeqHeaders(ZSTD_DCtx* dctx, int* nbSeqPtr, const void* src, size_t srcSize) { const BYTE* const istart = (const BYTE*)src; const BYTE* const iend = istart + srcSize; const BYTE* ip = istart; int nbSeq; DEBUGLOG(5, "ZSTD_decodeSeqHeaders"); /* check */ RETURN_ERROR_IF(srcSize < MIN_SEQUENCES_SIZE, srcSize_wrong, ""); /* SeqHead */ nbSeq = *ip++; if (!nbSeq) { *nbSeqPtr=0; RETURN_ERROR_IF(srcSize != 1, srcSize_wrong, ""); return 1; } if (nbSeq > 0x7F) { if (nbSeq == 0xFF) { RETURN_ERROR_IF(ip+2 > iend, srcSize_wrong, ""); nbSeq = MEM_readLE16(ip) + LONGNBSEQ; ip+=2; } else { RETURN_ERROR_IF(ip >= iend, srcSize_wrong, ""); nbSeq = ((nbSeq-0x80)<<8) + *ip++; } } *nbSeqPtr = nbSeq; /* FSE table descriptors */ RETURN_ERROR_IF(ip+1 > iend, srcSize_wrong, ""); /* minimum possible size: 1 byte for symbol encoding types */ { symbolEncodingType_e const LLtype = (symbolEncodingType_e)(*ip >> 6); symbolEncodingType_e const OFtype = (symbolEncodingType_e)((*ip >> 4) & 3); symbolEncodingType_e const MLtype = (symbolEncodingType_e)((*ip >> 2) & 3); ip++; /* Build DTables */ { size_t const llhSize = ZSTD_buildSeqTable(dctx->entropy.LLTable, &dctx->LLTptr, LLtype, MaxLL, LLFSELog, ip, iend-ip, LL_base, LL_bits, LL_defaultDTable, dctx->fseEntropy, dctx->ddictIsCold, nbSeq, dctx->workspace, sizeof(dctx->workspace), dctx->bmi2); RETURN_ERROR_IF(ZSTD_isError(llhSize), corruption_detected, "ZSTD_buildSeqTable failed"); ip += llhSize; } { size_t const ofhSize = ZSTD_buildSeqTable(dctx->entropy.OFTable, &dctx->OFTptr, OFtype, MaxOff, OffFSELog, ip, iend-ip, OF_base, OF_bits, OF_defaultDTable, dctx->fseEntropy, dctx->ddictIsCold, nbSeq, dctx->workspace, sizeof(dctx->workspace), dctx->bmi2); RETURN_ERROR_IF(ZSTD_isError(ofhSize), corruption_detected, "ZSTD_buildSeqTable failed"); ip += ofhSize; } { size_t const mlhSize = ZSTD_buildSeqTable(dctx->entropy.MLTable, &dctx->MLTptr, MLtype, MaxML, MLFSELog, ip, iend-ip, ML_base, ML_bits, ML_defaultDTable, dctx->fseEntropy, dctx->ddictIsCold, nbSeq, dctx->workspace, sizeof(dctx->workspace), dctx->bmi2); RETURN_ERROR_IF(ZSTD_isError(mlhSize), corruption_detected, "ZSTD_buildSeqTable failed"); ip += mlhSize; } } return ip-istart; } typedef struct { size_t litLength; size_t matchLength; size_t offset; const BYTE* match; } seq_t; typedef struct { size_t state; const ZSTD_seqSymbol* table; } ZSTD_fseState; typedef struct { BIT_DStream_t DStream; ZSTD_fseState stateLL; ZSTD_fseState stateOffb; ZSTD_fseState stateML; size_t prevOffset[ZSTD_REP_NUM]; const BYTE* prefixStart; const BYTE* dictEnd; size_t pos; } seqState_t; /*! ZSTD_overlapCopy8() : * Copies 8 bytes from ip to op and updates op and ip where ip <= op. * If the offset is < 8 then the offset is spread to at least 8 bytes. * * Precondition: *ip <= *op * Postcondition: *op - *op >= 8 */ HINT_INLINE void ZSTD_overlapCopy8(BYTE** op, BYTE const** ip, size_t offset) { assert(*ip <= *op); if (offset < 8) { /* close range match, overlap */ static const U32 dec32table[] = { 0, 1, 2, 1, 4, 4, 4, 4 }; /* added */ static const int dec64table[] = { 8, 8, 8, 7, 8, 9,10,11 }; /* subtracted */ int const sub2 = dec64table[offset]; (*op)[0] = (*ip)[0]; (*op)[1] = (*ip)[1]; (*op)[2] = (*ip)[2]; (*op)[3] = (*ip)[3]; *ip += dec32table[offset]; ZSTD_copy4(*op+4, *ip); *ip -= sub2; } else { ZSTD_copy8(*op, *ip); } *ip += 8; *op += 8; assert(*op - *ip >= 8); } /*! ZSTD_safecopy() : * Specialized version of memcpy() that is allowed to READ up to WILDCOPY_OVERLENGTH past the input buffer * and write up to 16 bytes past oend_w (op >= oend_w is allowed). * This function is only called in the uncommon case where the sequence is near the end of the block. It * should be fast for a single long sequence, but can be slow for several short sequences. * * @param ovtype controls the overlap detection * - ZSTD_no_overlap: The source and destination are guaranteed to be at least WILDCOPY_VECLEN bytes apart. * - ZSTD_overlap_src_before_dst: The src and dst may overlap and may be any distance apart. * The src buffer must be before the dst buffer. */ static void ZSTD_safecopy(BYTE* op, BYTE* const oend_w, BYTE const* ip, ptrdiff_t length, ZSTD_overlap_e ovtype) { ptrdiff_t const diff = op - ip; BYTE* const oend = op + length; assert((ovtype == ZSTD_no_overlap && (diff <= -8 || diff >= 8 || op >= oend_w)) || (ovtype == ZSTD_overlap_src_before_dst && diff >= 0)); if (length < 8) { /* Handle short lengths. */ while (op < oend) *op++ = *ip++; return; } if (ovtype == ZSTD_overlap_src_before_dst) { /* Copy 8 bytes and ensure the offset >= 8 when there can be overlap. */ assert(length >= 8); ZSTD_overlapCopy8(&op, &ip, diff); assert(op - ip >= 8); assert(op <= oend); } if (oend <= oend_w) { /* No risk of overwrite. */ ZSTD_wildcopy(op, ip, length, ovtype); return; } if (op <= oend_w) { /* Wildcopy until we get close to the end. */ assert(oend > oend_w); ZSTD_wildcopy(op, ip, oend_w - op, ovtype); ip += oend_w - op; op = oend_w; } /* Handle the leftovers. */ while (op < oend) *op++ = *ip++; } /* ZSTD_execSequenceEnd(): * This version handles cases that are near the end of the output buffer. It requires * more careful checks to make sure there is no overflow. By separating out these hard * and unlikely cases, we can speed up the common cases. * * NOTE: This function needs to be fast for a single long sequence, but doesn't need * to be optimized for many small sequences, since those fall into ZSTD_execSequence(). */ FORCE_NOINLINE size_t ZSTD_execSequenceEnd(BYTE* op, BYTE* const oend, seq_t sequence, const BYTE** litPtr, const BYTE* const litLimit, const BYTE* const prefixStart, const BYTE* const virtualStart, const BYTE* const dictEnd) { BYTE* const oLitEnd = op + sequence.litLength; size_t const sequenceLength = sequence.litLength + sequence.matchLength; const BYTE* const iLitEnd = *litPtr + sequence.litLength; const BYTE* match = oLitEnd - sequence.offset; BYTE* const oend_w = oend - WILDCOPY_OVERLENGTH; /* bounds checks : careful of address space overflow in 32-bit mode */ RETURN_ERROR_IF(sequenceLength > (size_t)(oend - op), dstSize_tooSmall, "last match must fit within dstBuffer"); RETURN_ERROR_IF(sequence.litLength > (size_t)(litLimit - *litPtr), corruption_detected, "try to read beyond literal buffer"); assert(op < op + sequenceLength); assert(oLitEnd < op + sequenceLength); /* copy literals */ ZSTD_safecopy(op, oend_w, *litPtr, sequence.litLength, ZSTD_no_overlap); op = oLitEnd; *litPtr = iLitEnd; /* copy Match */ if (sequence.offset > (size_t)(oLitEnd - prefixStart)) { /* offset beyond prefix */ RETURN_ERROR_IF(sequence.offset > (size_t)(oLitEnd - virtualStart), corruption_detected, ""); match = dictEnd - (prefixStart-match); if (match + sequence.matchLength <= dictEnd) { ZSTD_memmove(oLitEnd, match, sequence.matchLength); return sequenceLength; } /* span extDict & currentPrefixSegment */ { size_t const length1 = dictEnd - match; ZSTD_memmove(oLitEnd, match, length1); op = oLitEnd + length1; sequence.matchLength -= length1; match = prefixStart; } } ZSTD_safecopy(op, oend_w, match, sequence.matchLength, ZSTD_overlap_src_before_dst); return sequenceLength; } HINT_INLINE size_t ZSTD_execSequence(BYTE* op, BYTE* const oend, seq_t sequence, const BYTE** litPtr, const BYTE* const litLimit, const BYTE* const prefixStart, const BYTE* const virtualStart, const BYTE* const dictEnd) { BYTE* const oLitEnd = op + sequence.litLength; size_t const sequenceLength = sequence.litLength + sequence.matchLength; BYTE* const oMatchEnd = op + sequenceLength; /* risk : address space overflow (32-bits) */ BYTE* const oend_w = oend - WILDCOPY_OVERLENGTH; /* risk : address space underflow on oend=NULL */ const BYTE* const iLitEnd = *litPtr + sequence.litLength; const BYTE* match = oLitEnd - sequence.offset; assert(op != NULL /* Precondition */); assert(oend_w < oend /* No underflow */); /* Handle edge cases in a slow path: * - Read beyond end of literals * - Match end is within WILDCOPY_OVERLIMIT of oend * - 32-bit mode and the match length overflows */ if (UNLIKELY( iLitEnd > litLimit || oMatchEnd > oend_w || (MEM_32bits() && (size_t)(oend - op) < sequenceLength + WILDCOPY_OVERLENGTH))) return ZSTD_execSequenceEnd(op, oend, sequence, litPtr, litLimit, prefixStart, virtualStart, dictEnd); /* Assumptions (everything else goes into ZSTD_execSequenceEnd()) */ assert(op <= oLitEnd /* No overflow */); assert(oLitEnd < oMatchEnd /* Non-zero match & no overflow */); assert(oMatchEnd <= oend /* No underflow */); assert(iLitEnd <= litLimit /* Literal length is in bounds */); assert(oLitEnd <= oend_w /* Can wildcopy literals */); assert(oMatchEnd <= oend_w /* Can wildcopy matches */); /* Copy Literals: * Split out litLength <= 16 since it is nearly always true. +1.6% on gcc-9. * We likely don't need the full 32-byte wildcopy. */ assert(WILDCOPY_OVERLENGTH >= 16); ZSTD_copy16(op, (*litPtr)); if (UNLIKELY(sequence.litLength > 16)) { ZSTD_wildcopy(op+16, (*litPtr)+16, sequence.litLength-16, ZSTD_no_overlap); } op = oLitEnd; *litPtr = iLitEnd; /* update for next sequence */ /* Copy Match */ if (sequence.offset > (size_t)(oLitEnd - prefixStart)) { /* offset beyond prefix -> go into extDict */ RETURN_ERROR_IF(UNLIKELY(sequence.offset > (size_t)(oLitEnd - virtualStart)), corruption_detected, ""); match = dictEnd + (match - prefixStart); if (match + sequence.matchLength <= dictEnd) { ZSTD_memmove(oLitEnd, match, sequence.matchLength); return sequenceLength; } /* span extDict & currentPrefixSegment */ { size_t const length1 = dictEnd - match; ZSTD_memmove(oLitEnd, match, length1); op = oLitEnd + length1; sequence.matchLength -= length1; match = prefixStart; } } /* Match within prefix of 1 or more bytes */ assert(op <= oMatchEnd); assert(oMatchEnd <= oend_w); assert(match >= prefixStart); assert(sequence.matchLength >= 1); /* Nearly all offsets are >= WILDCOPY_VECLEN bytes, which means we can use wildcopy * without overlap checking. */ if (LIKELY(sequence.offset >= WILDCOPY_VECLEN)) { /* We bet on a full wildcopy for matches, since we expect matches to be * longer than literals (in general). In silesia, ~10% of matches are longer * than 16 bytes. */ ZSTD_wildcopy(op, match, (ptrdiff_t)sequence.matchLength, ZSTD_no_overlap); return sequenceLength; } assert(sequence.offset < WILDCOPY_VECLEN); /* Copy 8 bytes and spread the offset to be >= 8. */ ZSTD_overlapCopy8(&op, &match, sequence.offset); /* If the match length is > 8 bytes, then continue with the wildcopy. */ if (sequence.matchLength > 8) { assert(op < oMatchEnd); ZSTD_wildcopy(op, match, (ptrdiff_t)sequence.matchLength-8, ZSTD_overlap_src_before_dst); } return sequenceLength; } static void ZSTD_initFseState(ZSTD_fseState* DStatePtr, BIT_DStream_t* bitD, const ZSTD_seqSymbol* dt) { const void* ptr = dt; const ZSTD_seqSymbol_header* const DTableH = (const ZSTD_seqSymbol_header*)ptr; DStatePtr->state = BIT_readBits(bitD, DTableH->tableLog); DEBUGLOG(6, "ZSTD_initFseState : val=%u using %u bits", (U32)DStatePtr->state, DTableH->tableLog); BIT_reloadDStream(bitD); DStatePtr->table = dt + 1; } FORCE_INLINE_TEMPLATE void ZSTD_updateFseState(ZSTD_fseState* DStatePtr, BIT_DStream_t* bitD) { ZSTD_seqSymbol const DInfo = DStatePtr->table[DStatePtr->state]; U32 const nbBits = DInfo.nbBits; size_t const lowBits = BIT_readBits(bitD, nbBits); DStatePtr->state = DInfo.nextState + lowBits; } FORCE_INLINE_TEMPLATE void ZSTD_updateFseStateWithDInfo(ZSTD_fseState* DStatePtr, BIT_DStream_t* bitD, ZSTD_seqSymbol const DInfo) { U32 const nbBits = DInfo.nbBits; size_t const lowBits = BIT_readBits(bitD, nbBits); DStatePtr->state = DInfo.nextState + lowBits; } /* We need to add at most (ZSTD_WINDOWLOG_MAX_32 - 1) bits to read the maximum * offset bits. But we can only read at most (STREAM_ACCUMULATOR_MIN_32 - 1) * bits before reloading. This value is the maximum number of bytes we read * after reloading when we are decoding long offsets. */ #define LONG_OFFSETS_MAX_EXTRA_BITS_32 \ (ZSTD_WINDOWLOG_MAX_32 > STREAM_ACCUMULATOR_MIN_32 \ ? ZSTD_WINDOWLOG_MAX_32 - STREAM_ACCUMULATOR_MIN_32 \ : 0) typedef enum { ZSTD_lo_isRegularOffset, ZSTD_lo_isLongOffset=1 } ZSTD_longOffset_e; typedef enum { ZSTD_p_noPrefetch=0, ZSTD_p_prefetch=1 } ZSTD_prefetch_e; FORCE_INLINE_TEMPLATE seq_t ZSTD_decodeSequence(seqState_t* seqState, const ZSTD_longOffset_e longOffsets, const ZSTD_prefetch_e prefetch) { seq_t seq; ZSTD_seqSymbol const llDInfo = seqState->stateLL.table[seqState->stateLL.state]; ZSTD_seqSymbol const mlDInfo = seqState->stateML.table[seqState->stateML.state]; ZSTD_seqSymbol const ofDInfo = seqState->stateOffb.table[seqState->stateOffb.state]; U32 const llBase = llDInfo.baseValue; U32 const mlBase = mlDInfo.baseValue; U32 const ofBase = ofDInfo.baseValue; BYTE const llBits = llDInfo.nbAdditionalBits; BYTE const mlBits = mlDInfo.nbAdditionalBits; BYTE const ofBits = ofDInfo.nbAdditionalBits; BYTE const totalBits = llBits+mlBits+ofBits; /* sequence */ { size_t offset; if (ofBits > 1) { ZSTD_STATIC_ASSERT(ZSTD_lo_isLongOffset == 1); ZSTD_STATIC_ASSERT(LONG_OFFSETS_MAX_EXTRA_BITS_32 == 5); assert(ofBits <= MaxOff); if (MEM_32bits() && longOffsets && (ofBits >= STREAM_ACCUMULATOR_MIN_32)) { U32 const extraBits = ofBits - MIN(ofBits, 32 - seqState->DStream.bitsConsumed); offset = ofBase + (BIT_readBitsFast(&seqState->DStream, ofBits - extraBits) << extraBits); BIT_reloadDStream(&seqState->DStream); if (extraBits) offset += BIT_readBitsFast(&seqState->DStream, extraBits); assert(extraBits <= LONG_OFFSETS_MAX_EXTRA_BITS_32); /* to avoid another reload */ } else { offset = ofBase + BIT_readBitsFast(&seqState->DStream, ofBits/*>0*/); /* <= (ZSTD_WINDOWLOG_MAX-1) bits */ if (MEM_32bits()) BIT_reloadDStream(&seqState->DStream); } seqState->prevOffset[2] = seqState->prevOffset[1]; seqState->prevOffset[1] = seqState->prevOffset[0]; seqState->prevOffset[0] = offset; } else { U32 const ll0 = (llBase == 0); if (LIKELY((ofBits == 0))) { if (LIKELY(!ll0)) offset = seqState->prevOffset[0]; else { offset = seqState->prevOffset[1]; seqState->prevOffset[1] = seqState->prevOffset[0]; seqState->prevOffset[0] = offset; } } else { offset = ofBase + ll0 + BIT_readBitsFast(&seqState->DStream, 1); { size_t temp = (offset==3) ? seqState->prevOffset[0] - 1 : seqState->prevOffset[offset]; temp += !temp; /* 0 is not valid; input is corrupted; force offset to 1 */ if (offset != 1) seqState->prevOffset[2] = seqState->prevOffset[1]; seqState->prevOffset[1] = seqState->prevOffset[0]; seqState->prevOffset[0] = offset = temp; } } } seq.offset = offset; } seq.matchLength = mlBase; if (mlBits > 0) seq.matchLength += BIT_readBitsFast(&seqState->DStream, mlBits/*>0*/); if (MEM_32bits() && (mlBits+llBits >= STREAM_ACCUMULATOR_MIN_32-LONG_OFFSETS_MAX_EXTRA_BITS_32)) BIT_reloadDStream(&seqState->DStream); if (MEM_64bits() && UNLIKELY(totalBits >= STREAM_ACCUMULATOR_MIN_64-(LLFSELog+MLFSELog+OffFSELog))) BIT_reloadDStream(&seqState->DStream); /* Ensure there are enough bits to read the rest of data in 64-bit mode. */ ZSTD_STATIC_ASSERT(16+LLFSELog+MLFSELog+OffFSELog < STREAM_ACCUMULATOR_MIN_64); seq.litLength = llBase; if (llBits > 0) seq.litLength += BIT_readBitsFast(&seqState->DStream, llBits/*>0*/); if (MEM_32bits()) BIT_reloadDStream(&seqState->DStream); DEBUGLOG(6, "seq: litL=%u, matchL=%u, offset=%u", (U32)seq.litLength, (U32)seq.matchLength, (U32)seq.offset); if (prefetch == ZSTD_p_prefetch) { size_t const pos = seqState->pos + seq.litLength; const BYTE* const matchBase = (seq.offset > pos) ? seqState->dictEnd : seqState->prefixStart; seq.match = matchBase + pos - seq.offset; /* note : this operation can overflow when seq.offset is really too large, which can only happen when input is corrupted. * No consequence though : no memory access will occur, offset is only used for prefetching */ seqState->pos = pos + seq.matchLength; } /* ANS state update * gcc-9.0.0 does 2.5% worse with ZSTD_updateFseStateWithDInfo(). * clang-9.2.0 does 7% worse with ZSTD_updateFseState(). * Naturally it seems like ZSTD_updateFseStateWithDInfo() should be the * better option, so it is the default for other compilers. But, if you * measure that it is worse, please put up a pull request. */ { #if defined(__GNUC__) && !defined(__clang__) const int kUseUpdateFseState = 1; #else const int kUseUpdateFseState = 0; #endif if (kUseUpdateFseState) { ZSTD_updateFseState(&seqState->stateLL, &seqState->DStream); /* <= 9 bits */ ZSTD_updateFseState(&seqState->stateML, &seqState->DStream); /* <= 9 bits */ if (MEM_32bits()) BIT_reloadDStream(&seqState->DStream); /* <= 18 bits */ ZSTD_updateFseState(&seqState->stateOffb, &seqState->DStream); /* <= 8 bits */ } else { ZSTD_updateFseStateWithDInfo(&seqState->stateLL, &seqState->DStream, llDInfo); /* <= 9 bits */ ZSTD_updateFseStateWithDInfo(&seqState->stateML, &seqState->DStream, mlDInfo); /* <= 9 bits */ if (MEM_32bits()) BIT_reloadDStream(&seqState->DStream); /* <= 18 bits */ ZSTD_updateFseStateWithDInfo(&seqState->stateOffb, &seqState->DStream, ofDInfo); /* <= 8 bits */ } } return seq; } #ifdef FUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION MEM_STATIC int ZSTD_dictionaryIsActive(ZSTD_DCtx const* dctx, BYTE const* prefixStart, BYTE const* oLitEnd) { size_t const windowSize = dctx->fParams.windowSize; /* No dictionary used. */ if (dctx->dictContentEndForFuzzing == NULL) return 0; /* Dictionary is our prefix. */ if (prefixStart == dctx->dictContentBeginForFuzzing) return 1; /* Dictionary is not our ext-dict. */ if (dctx->dictEnd != dctx->dictContentEndForFuzzing) return 0; /* Dictionary is not within our window size. */ if ((size_t)(oLitEnd - prefixStart) >= windowSize) return 0; /* Dictionary is active. */ return 1; } MEM_STATIC void ZSTD_assertValidSequence( ZSTD_DCtx const* dctx, BYTE const* op, BYTE const* oend, seq_t const seq, BYTE const* prefixStart, BYTE const* virtualStart) { #if DEBUGLEVEL >= 1 size_t const windowSize = dctx->fParams.windowSize; size_t const sequenceSize = seq.litLength + seq.matchLength; BYTE const* const oLitEnd = op + seq.litLength; DEBUGLOG(6, "Checking sequence: litL=%u matchL=%u offset=%u", (U32)seq.litLength, (U32)seq.matchLength, (U32)seq.offset); assert(op <= oend); assert((size_t)(oend - op) >= sequenceSize); assert(sequenceSize <= ZSTD_BLOCKSIZE_MAX); if (ZSTD_dictionaryIsActive(dctx, prefixStart, oLitEnd)) { size_t const dictSize = (size_t)((char const*)dctx->dictContentEndForFuzzing - (char const*)dctx->dictContentBeginForFuzzing); /* Offset must be within the dictionary. */ assert(seq.offset <= (size_t)(oLitEnd - virtualStart)); assert(seq.offset <= windowSize + dictSize); } else { /* Offset must be within our window. */ assert(seq.offset <= windowSize); } #else (void)dctx, (void)op, (void)oend, (void)seq, (void)prefixStart, (void)virtualStart; #endif } #endif #ifndef ZSTD_FORCE_DECOMPRESS_SEQUENCES_LONG FORCE_INLINE_TEMPLATE size_t DONT_VECTORIZE ZSTD_decompressSequences_body( ZSTD_DCtx* dctx, void* dst, size_t maxDstSize, const void* seqStart, size_t seqSize, int nbSeq, const ZSTD_longOffset_e isLongOffset, const int frame) { const BYTE* ip = (const BYTE*)seqStart; const BYTE* const iend = ip + seqSize; BYTE* const ostart = (BYTE*)dst; BYTE* const oend = ostart + maxDstSize; BYTE* op = ostart; const BYTE* litPtr = dctx->litPtr; const BYTE* const litEnd = litPtr + dctx->litSize; const BYTE* const prefixStart = (const BYTE*) (dctx->prefixStart); const BYTE* const vBase = (const BYTE*) (dctx->virtualStart); const BYTE* const dictEnd = (const BYTE*) (dctx->dictEnd); DEBUGLOG(5, "ZSTD_decompressSequences_body"); (void)frame; /* Regen sequences */ if (nbSeq) { seqState_t seqState; size_t error = 0; dctx->fseEntropy = 1; { U32 i; for (i=0; ientropy.rep[i]; } RETURN_ERROR_IF( ERR_isError(BIT_initDStream(&seqState.DStream, ip, iend-ip)), corruption_detected, ""); ZSTD_initFseState(&seqState.stateLL, &seqState.DStream, dctx->LLTptr); ZSTD_initFseState(&seqState.stateOffb, &seqState.DStream, dctx->OFTptr); ZSTD_initFseState(&seqState.stateML, &seqState.DStream, dctx->MLTptr); assert(dst != NULL); ZSTD_STATIC_ASSERT( BIT_DStream_unfinished < BIT_DStream_completed && BIT_DStream_endOfBuffer < BIT_DStream_completed && BIT_DStream_completed < BIT_DStream_overflow); #if defined(__GNUC__) && defined(__x86_64__) /* Align the decompression loop to 32 + 16 bytes. * * zstd compiled with gcc-9 on an Intel i9-9900k shows 10% decompression * speed swings based on the alignment of the decompression loop. This * performance swing is caused by parts of the decompression loop falling * out of the DSB. The entire decompression loop should fit in the DSB, * when it can't we get much worse performance. You can measure if you've * hit the good case or the bad case with this perf command for some * compressed file test.zst: * * perf stat -e cycles -e instructions -e idq.all_dsb_cycles_any_uops \ * -e idq.all_mite_cycles_any_uops -- ./zstd -tq test.zst * * If you see most cycles served out of the MITE you've hit the bad case. * If you see most cycles served out of the DSB you've hit the good case. * If it is pretty even then you may be in an okay case. * * I've been able to reproduce this issue on the following CPUs: * - Kabylake: Macbook Pro (15-inch, 2019) 2.4 GHz Intel Core i9 * Use Instruments->Counters to get DSB/MITE cycles. * I never got performance swings, but I was able to * go from the good case of mostly DSB to half of the * cycles served from MITE. * - Coffeelake: Intel i9-9900k * * I haven't been able to reproduce the instability or DSB misses on any * of the following CPUS: * - Haswell * - Broadwell: Intel(R) Xeon(R) CPU E5-2680 v4 @ 2.40GH * - Skylake * * If you are seeing performance stability this script can help test. * It tests on 4 commits in zstd where I saw performance change. * * https://gist.github.com/terrelln/9889fc06a423fd5ca6e99351564473f4 */ __asm__(".p2align 5"); __asm__("nop"); __asm__(".p2align 4"); #endif for ( ; ; ) { seq_t const sequence = ZSTD_decodeSequence(&seqState, isLongOffset, ZSTD_p_noPrefetch); size_t const oneSeqSize = ZSTD_execSequence(op, oend, sequence, &litPtr, litEnd, prefixStart, vBase, dictEnd); #if defined(FUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION) && defined(FUZZING_ASSERT_VALID_SEQUENCE) assert(!ZSTD_isError(oneSeqSize)); if (frame) ZSTD_assertValidSequence(dctx, op, oend, sequence, prefixStart, vBase); #endif DEBUGLOG(6, "regenerated sequence size : %u", (U32)oneSeqSize); BIT_reloadDStream(&(seqState.DStream)); op += oneSeqSize; /* gcc and clang both don't like early returns in this loop. * Instead break and check for an error at the end of the loop. */ if (UNLIKELY(ZSTD_isError(oneSeqSize))) { error = oneSeqSize; break; } if (UNLIKELY(!--nbSeq)) break; } /* check if reached exact end */ DEBUGLOG(5, "ZSTD_decompressSequences_body: after decode loop, remaining nbSeq : %i", nbSeq); if (ZSTD_isError(error)) return error; RETURN_ERROR_IF(nbSeq, corruption_detected, ""); RETURN_ERROR_IF(BIT_reloadDStream(&seqState.DStream) < BIT_DStream_completed, corruption_detected, ""); /* save reps for next block */ { U32 i; for (i=0; ientropy.rep[i] = (U32)(seqState.prevOffset[i]); } } /* last literal segment */ { size_t const lastLLSize = litEnd - litPtr; RETURN_ERROR_IF(lastLLSize > (size_t)(oend-op), dstSize_tooSmall, ""); if (op != NULL) { ZSTD_memcpy(op, litPtr, lastLLSize); op += lastLLSize; } } return op-ostart; } static size_t ZSTD_decompressSequences_default(ZSTD_DCtx* dctx, void* dst, size_t maxDstSize, const void* seqStart, size_t seqSize, int nbSeq, const ZSTD_longOffset_e isLongOffset, const int frame) { return ZSTD_decompressSequences_body(dctx, dst, maxDstSize, seqStart, seqSize, nbSeq, isLongOffset, frame); } #endif /* ZSTD_FORCE_DECOMPRESS_SEQUENCES_LONG */ #ifndef ZSTD_FORCE_DECOMPRESS_SEQUENCES_SHORT FORCE_INLINE_TEMPLATE size_t ZSTD_decompressSequencesLong_body( ZSTD_DCtx* dctx, void* dst, size_t maxDstSize, const void* seqStart, size_t seqSize, int nbSeq, const ZSTD_longOffset_e isLongOffset, const int frame) { const BYTE* ip = (const BYTE*)seqStart; const BYTE* const iend = ip + seqSize; BYTE* const ostart = (BYTE*)dst; BYTE* const oend = ostart + maxDstSize; BYTE* op = ostart; const BYTE* litPtr = dctx->litPtr; const BYTE* const litEnd = litPtr + dctx->litSize; const BYTE* const prefixStart = (const BYTE*) (dctx->prefixStart); const BYTE* const dictStart = (const BYTE*) (dctx->virtualStart); const BYTE* const dictEnd = (const BYTE*) (dctx->dictEnd); (void)frame; /* Regen sequences */ if (nbSeq) { #define STORED_SEQS 4 #define STORED_SEQS_MASK (STORED_SEQS-1) #define ADVANCED_SEQS 4 seq_t sequences[STORED_SEQS]; int const seqAdvance = MIN(nbSeq, ADVANCED_SEQS); seqState_t seqState; int seqNb; dctx->fseEntropy = 1; { int i; for (i=0; ientropy.rep[i]; } seqState.prefixStart = prefixStart; seqState.pos = (size_t)(op-prefixStart); seqState.dictEnd = dictEnd; assert(dst != NULL); assert(iend >= ip); RETURN_ERROR_IF( ERR_isError(BIT_initDStream(&seqState.DStream, ip, iend-ip)), corruption_detected, ""); ZSTD_initFseState(&seqState.stateLL, &seqState.DStream, dctx->LLTptr); ZSTD_initFseState(&seqState.stateOffb, &seqState.DStream, dctx->OFTptr); ZSTD_initFseState(&seqState.stateML, &seqState.DStream, dctx->MLTptr); /* prepare in advance */ for (seqNb=0; (BIT_reloadDStream(&seqState.DStream) <= BIT_DStream_completed) && (seqNbentropy.rep[i] = (U32)(seqState.prevOffset[i]); } } /* last literal segment */ { size_t const lastLLSize = litEnd - litPtr; RETURN_ERROR_IF(lastLLSize > (size_t)(oend-op), dstSize_tooSmall, ""); if (op != NULL) { ZSTD_memcpy(op, litPtr, lastLLSize); op += lastLLSize; } } return op-ostart; } static size_t ZSTD_decompressSequencesLong_default(ZSTD_DCtx* dctx, void* dst, size_t maxDstSize, const void* seqStart, size_t seqSize, int nbSeq, const ZSTD_longOffset_e isLongOffset, const int frame) { return ZSTD_decompressSequencesLong_body(dctx, dst, maxDstSize, seqStart, seqSize, nbSeq, isLongOffset, frame); } #endif /* ZSTD_FORCE_DECOMPRESS_SEQUENCES_SHORT */ #if DYNAMIC_BMI2 #ifndef ZSTD_FORCE_DECOMPRESS_SEQUENCES_LONG static TARGET_ATTRIBUTE("bmi2") size_t DONT_VECTORIZE ZSTD_decompressSequences_bmi2(ZSTD_DCtx* dctx, void* dst, size_t maxDstSize, const void* seqStart, size_t seqSize, int nbSeq, const ZSTD_longOffset_e isLongOffset, const int frame) { return ZSTD_decompressSequences_body(dctx, dst, maxDstSize, seqStart, seqSize, nbSeq, isLongOffset, frame); } #endif /* ZSTD_FORCE_DECOMPRESS_SEQUENCES_LONG */ #ifndef ZSTD_FORCE_DECOMPRESS_SEQUENCES_SHORT static TARGET_ATTRIBUTE("bmi2") size_t ZSTD_decompressSequencesLong_bmi2(ZSTD_DCtx* dctx, void* dst, size_t maxDstSize, const void* seqStart, size_t seqSize, int nbSeq, const ZSTD_longOffset_e isLongOffset, const int frame) { return ZSTD_decompressSequencesLong_body(dctx, dst, maxDstSize, seqStart, seqSize, nbSeq, isLongOffset, frame); } #endif /* ZSTD_FORCE_DECOMPRESS_SEQUENCES_SHORT */ #endif /* DYNAMIC_BMI2 */ typedef size_t (*ZSTD_decompressSequences_t)( ZSTD_DCtx* dctx, void* dst, size_t maxDstSize, const void* seqStart, size_t seqSize, int nbSeq, const ZSTD_longOffset_e isLongOffset, const int frame); #ifndef ZSTD_FORCE_DECOMPRESS_SEQUENCES_LONG static size_t ZSTD_decompressSequences(ZSTD_DCtx* dctx, void* dst, size_t maxDstSize, const void* seqStart, size_t seqSize, int nbSeq, const ZSTD_longOffset_e isLongOffset, const int frame) { DEBUGLOG(5, "ZSTD_decompressSequences"); #if DYNAMIC_BMI2 if (dctx->bmi2) { return ZSTD_decompressSequences_bmi2(dctx, dst, maxDstSize, seqStart, seqSize, nbSeq, isLongOffset, frame); } #endif return ZSTD_decompressSequences_default(dctx, dst, maxDstSize, seqStart, seqSize, nbSeq, isLongOffset, frame); } #endif /* ZSTD_FORCE_DECOMPRESS_SEQUENCES_LONG */ #ifndef ZSTD_FORCE_DECOMPRESS_SEQUENCES_SHORT /* ZSTD_decompressSequencesLong() : * decompression function triggered when a minimum share of offsets is considered "long", * aka out of cache. * note : "long" definition seems overloaded here, sometimes meaning "wider than bitstream register", and sometimes meaning "farther than memory cache distance". * This function will try to mitigate main memory latency through the use of prefetching */ static size_t ZSTD_decompressSequencesLong(ZSTD_DCtx* dctx, void* dst, size_t maxDstSize, const void* seqStart, size_t seqSize, int nbSeq, const ZSTD_longOffset_e isLongOffset, const int frame) { DEBUGLOG(5, "ZSTD_decompressSequencesLong"); #if DYNAMIC_BMI2 if (dctx->bmi2) { return ZSTD_decompressSequencesLong_bmi2(dctx, dst, maxDstSize, seqStart, seqSize, nbSeq, isLongOffset, frame); } #endif return ZSTD_decompressSequencesLong_default(dctx, dst, maxDstSize, seqStart, seqSize, nbSeq, isLongOffset, frame); } #endif /* ZSTD_FORCE_DECOMPRESS_SEQUENCES_SHORT */ #if !defined(ZSTD_FORCE_DECOMPRESS_SEQUENCES_SHORT) && \ !defined(ZSTD_FORCE_DECOMPRESS_SEQUENCES_LONG) /* ZSTD_getLongOffsetsShare() : * condition : offTable must be valid * @return : "share" of long offsets (arbitrarily defined as > (1<<23)) * compared to maximum possible of (1< 22) total += 1; } assert(tableLog <= OffFSELog); total <<= (OffFSELog - tableLog); /* scale to OffFSELog */ return total; } #endif size_t ZSTD_decompressBlock_internal(ZSTD_DCtx* dctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize, const int frame) { /* blockType == blockCompressed */ const BYTE* ip = (const BYTE*)src; /* isLongOffset must be true if there are long offsets. * Offsets are long if they are larger than 2^STREAM_ACCUMULATOR_MIN. * We don't expect that to be the case in 64-bit mode. * In block mode, window size is not known, so we have to be conservative. * (note: but it could be evaluated from current-lowLimit) */ ZSTD_longOffset_e const isLongOffset = (ZSTD_longOffset_e)(MEM_32bits() && (!frame || (dctx->fParams.windowSize > (1ULL << STREAM_ACCUMULATOR_MIN)))); DEBUGLOG(5, "ZSTD_decompressBlock_internal (size : %u)", (U32)srcSize); RETURN_ERROR_IF(srcSize >= ZSTD_BLOCKSIZE_MAX, srcSize_wrong, ""); /* Decode literals section */ { size_t const litCSize = ZSTD_decodeLiteralsBlock(dctx, src, srcSize); DEBUGLOG(5, "ZSTD_decodeLiteralsBlock : %u", (U32)litCSize); if (ZSTD_isError(litCSize)) return litCSize; ip += litCSize; srcSize -= litCSize; } /* Build Decoding Tables */ { /* These macros control at build-time which decompressor implementation * we use. If neither is defined, we do some inspection and dispatch at * runtime. */ #if !defined(ZSTD_FORCE_DECOMPRESS_SEQUENCES_SHORT) && \ !defined(ZSTD_FORCE_DECOMPRESS_SEQUENCES_LONG) int usePrefetchDecoder = dctx->ddictIsCold; #endif int nbSeq; size_t const seqHSize = ZSTD_decodeSeqHeaders(dctx, &nbSeq, ip, srcSize); if (ZSTD_isError(seqHSize)) return seqHSize; ip += seqHSize; srcSize -= seqHSize; RETURN_ERROR_IF(dst == NULL && nbSeq > 0, dstSize_tooSmall, "NULL not handled"); #if !defined(ZSTD_FORCE_DECOMPRESS_SEQUENCES_SHORT) && \ !defined(ZSTD_FORCE_DECOMPRESS_SEQUENCES_LONG) if ( !usePrefetchDecoder && (!frame || (dctx->fParams.windowSize > (1<<24))) && (nbSeq>ADVANCED_SEQS) ) { /* could probably use a larger nbSeq limit */ U32 const shareLongOffsets = ZSTD_getLongOffsetsShare(dctx->OFTptr); U32 const minShare = MEM_64bits() ? 7 : 20; /* heuristic values, correspond to 2.73% and 7.81% */ usePrefetchDecoder = (shareLongOffsets >= minShare); } #endif dctx->ddictIsCold = 0; #if !defined(ZSTD_FORCE_DECOMPRESS_SEQUENCES_SHORT) && \ !defined(ZSTD_FORCE_DECOMPRESS_SEQUENCES_LONG) if (usePrefetchDecoder) #endif #ifndef ZSTD_FORCE_DECOMPRESS_SEQUENCES_SHORT return ZSTD_decompressSequencesLong(dctx, dst, dstCapacity, ip, srcSize, nbSeq, isLongOffset, frame); #endif #ifndef ZSTD_FORCE_DECOMPRESS_SEQUENCES_LONG /* else */ return ZSTD_decompressSequences(dctx, dst, dstCapacity, ip, srcSize, nbSeq, isLongOffset, frame); #endif } } void ZSTD_checkContinuity(ZSTD_DCtx* dctx, const void* dst, size_t dstSize) { if (dst != dctx->previousDstEnd && dstSize > 0) { /* not contiguous */ dctx->dictEnd = dctx->previousDstEnd; dctx->virtualStart = (const char*)dst - ((const char*)(dctx->previousDstEnd) - (const char*)(dctx->prefixStart)); dctx->prefixStart = dst; dctx->previousDstEnd = dst; } } size_t ZSTD_decompressBlock(ZSTD_DCtx* dctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize) { size_t dSize; ZSTD_checkContinuity(dctx, dst, dstCapacity); dSize = ZSTD_decompressBlock_internal(dctx, dst, dstCapacity, src, srcSize, /* frame */ 0); dctx->previousDstEnd = (char*)dst + dSize; return dSize; } /**** ended inlining decompress/zstd_decompress_block.c ****/ /**** start inlining dictBuilder/cover.c ****/ /* * Copyright (c) 2016-2021, Yann Collet, Facebook, Inc. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the * LICENSE file in the root directory of this source tree) and the GPLv2 (found * in the COPYING file in the root directory of this source tree). * You may select, at your option, one of the above-listed licenses. */ /* ***************************************************************************** * Constructs a dictionary using a heuristic based on the following paper: * * Liao, Petri, Moffat, Wirth * Effective Construction of Relative Lempel-Ziv Dictionaries * Published in WWW 2016. * * Adapted from code originally written by @ot (Giuseppe Ottaviano). ******************************************************************************/ /*-************************************* * Dependencies ***************************************/ #include /* fprintf */ #include /* malloc, free, qsort */ #include /* memset */ #include /* clock */ /**** skipping file: ../common/mem.h ****/ /**** skipping file: ../common/pool.h ****/ /**** skipping file: ../common/threading.h ****/ /**** start inlining cover.h ****/ /* * Copyright (c) 2017-2021, Facebook, Inc. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the * LICENSE file in the root directory of this source tree) and the GPLv2 (found * in the COPYING file in the root directory of this source tree). * You may select, at your option, one of the above-listed licenses. */ #include /* fprintf */ #include /* malloc, free, qsort */ #include /* memset */ #include /* clock */ /**** skipping file: ../common/mem.h ****/ /**** skipping file: ../common/pool.h ****/ /**** skipping file: ../common/threading.h ****/ /**** skipping file: ../common/zstd_internal.h ****/ #ifndef ZDICT_STATIC_LINKING_ONLY #define ZDICT_STATIC_LINKING_ONLY #endif /**** start inlining zdict.h ****/ /* * Copyright (c) 2016-2021, Yann Collet, Facebook, Inc. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the * LICENSE file in the root directory of this source tree) and the GPLv2 (found * in the COPYING file in the root directory of this source tree). * You may select, at your option, one of the above-listed licenses. */ #ifndef DICTBUILDER_H_001 #define DICTBUILDER_H_001 #if defined (__cplusplus) extern "C" { #endif /*====== Dependencies ======*/ #include /* size_t */ /* ===== ZDICTLIB_API : control library symbols visibility ===== */ #ifndef ZDICTLIB_VISIBILITY # if defined(__GNUC__) && (__GNUC__ >= 4) # define ZDICTLIB_VISIBILITY __attribute__ ((visibility ("default"))) # else # define ZDICTLIB_VISIBILITY # endif #endif #if defined(ZSTD_DLL_EXPORT) && (ZSTD_DLL_EXPORT==1) # define ZDICTLIB_API __declspec(dllexport) ZDICTLIB_VISIBILITY #elif defined(ZSTD_DLL_IMPORT) && (ZSTD_DLL_IMPORT==1) # define ZDICTLIB_API __declspec(dllimport) ZDICTLIB_VISIBILITY /* It isn't required but allows to generate better code, saving a function pointer load from the IAT and an indirect jump.*/ #else # define ZDICTLIB_API ZDICTLIB_VISIBILITY #endif /*! ZDICT_trainFromBuffer(): * Train a dictionary from an array of samples. * Redirect towards ZDICT_optimizeTrainFromBuffer_fastCover() single-threaded, with d=8, steps=4, * f=20, and accel=1. * Samples must be stored concatenated in a single flat buffer `samplesBuffer`, * supplied with an array of sizes `samplesSizes`, providing the size of each sample, in order. * The resulting dictionary will be saved into `dictBuffer`. * @return: size of dictionary stored into `dictBuffer` (<= `dictBufferCapacity`) * or an error code, which can be tested with ZDICT_isError(). * Note: Dictionary training will fail if there are not enough samples to construct a * dictionary, or if most of the samples are too small (< 8 bytes being the lower limit). * If dictionary training fails, you should use zstd without a dictionary, as the dictionary * would've been ineffective anyways. If you believe your samples would benefit from a dictionary * please open an issue with details, and we can look into it. * Note: ZDICT_trainFromBuffer()'s memory usage is about 6 MB. * Tips: In general, a reasonable dictionary has a size of ~ 100 KB. * It's possible to select smaller or larger size, just by specifying `dictBufferCapacity`. * In general, it's recommended to provide a few thousands samples, though this can vary a lot. * It's recommended that total size of all samples be about ~x100 times the target size of dictionary. */ ZDICTLIB_API size_t ZDICT_trainFromBuffer(void* dictBuffer, size_t dictBufferCapacity, const void* samplesBuffer, const size_t* samplesSizes, unsigned nbSamples); typedef struct { int compressionLevel; /*< optimize for a specific zstd compression level; 0 means default */ unsigned notificationLevel; /*< Write log to stderr; 0 = none (default); 1 = errors; 2 = progression; 3 = details; 4 = debug; */ unsigned dictID; /*< force dictID value; 0 means auto mode (32-bits random value) */ } ZDICT_params_t; /*! ZDICT_finalizeDictionary(): * Given a custom content as a basis for dictionary, and a set of samples, * finalize dictionary by adding headers and statistics according to the zstd * dictionary format. * * Samples must be stored concatenated in a flat buffer `samplesBuffer`, * supplied with an array of sizes `samplesSizes`, providing the size of each * sample in order. The samples are used to construct the statistics, so they * should be representative of what you will compress with this dictionary. * * The compression level can be set in `parameters`. You should pass the * compression level you expect to use in production. The statistics for each * compression level differ, so tuning the dictionary for the compression level * can help quite a bit. * * You can set an explicit dictionary ID in `parameters`, or allow us to pick * a random dictionary ID for you, but we can't guarantee no collisions. * * The dstDictBuffer and the dictContent may overlap, and the content will be * appended to the end of the header. If the header + the content doesn't fit in * maxDictSize the beginning of the content is truncated to make room, since it * is presumed that the most profitable content is at the end of the dictionary, * since that is the cheapest to reference. * * `dictContentSize` must be >= ZDICT_CONTENTSIZE_MIN bytes. * `maxDictSize` must be >= max(dictContentSize, ZSTD_DICTSIZE_MIN). * * @return: size of dictionary stored into `dstDictBuffer` (<= `maxDictSize`), * or an error code, which can be tested by ZDICT_isError(). * Note: ZDICT_finalizeDictionary() will push notifications into stderr if * instructed to, using notificationLevel>0. * NOTE: This function currently may fail in several edge cases including: * * Not enough samples * * Samples are uncompressible * * Samples are all exactly the same */ ZDICTLIB_API size_t ZDICT_finalizeDictionary(void* dstDictBuffer, size_t maxDictSize, const void* dictContent, size_t dictContentSize, const void* samplesBuffer, const size_t* samplesSizes, unsigned nbSamples, ZDICT_params_t parameters); /*====== Helper functions ======*/ ZDICTLIB_API unsigned ZDICT_getDictID(const void* dictBuffer, size_t dictSize); /**< extracts dictID; @return zero if error (not a valid dictionary) */ ZDICTLIB_API size_t ZDICT_getDictHeaderSize(const void* dictBuffer, size_t dictSize); /* returns dict header size; returns a ZSTD error code on failure */ ZDICTLIB_API unsigned ZDICT_isError(size_t errorCode); ZDICTLIB_API const char* ZDICT_getErrorName(size_t errorCode); #ifdef ZDICT_STATIC_LINKING_ONLY /* ==================================================================================== * The definitions in this section are considered experimental. * They should never be used with a dynamic library, as they may change in the future. * They are provided for advanced usages. * Use them only in association with static linking. * ==================================================================================== */ #define ZDICT_CONTENTSIZE_MIN 128 #define ZDICT_DICTSIZE_MIN 256 /*! ZDICT_cover_params_t: * k and d are the only required parameters. * For others, value 0 means default. */ typedef struct { unsigned k; /* Segment size : constraint: 0 < k : Reasonable range [16, 2048+] */ unsigned d; /* dmer size : constraint: 0 < d <= k : Reasonable range [6, 16] */ unsigned steps; /* Number of steps : Only used for optimization : 0 means default (40) : Higher means more parameters checked */ unsigned nbThreads; /* Number of threads : constraint: 0 < nbThreads : 1 means single-threaded : Only used for optimization : Ignored if ZSTD_MULTITHREAD is not defined */ double splitPoint; /* Percentage of samples used for training: Only used for optimization : the first nbSamples * splitPoint samples will be used to training, the last nbSamples * (1 - splitPoint) samples will be used for testing, 0 means default (1.0), 1.0 when all samples are used for both training and testing */ unsigned shrinkDict; /* Train dictionaries to shrink in size starting from the minimum size and selects the smallest dictionary that is shrinkDictMaxRegression% worse than the largest dictionary. 0 means no shrinking and 1 means shrinking */ unsigned shrinkDictMaxRegression; /* Sets shrinkDictMaxRegression so that a smaller dictionary can be at worse shrinkDictMaxRegression% worse than the max dict size dictionary. */ ZDICT_params_t zParams; } ZDICT_cover_params_t; typedef struct { unsigned k; /* Segment size : constraint: 0 < k : Reasonable range [16, 2048+] */ unsigned d; /* dmer size : constraint: 0 < d <= k : Reasonable range [6, 16] */ unsigned f; /* log of size of frequency array : constraint: 0 < f <= 31 : 1 means default(20)*/ unsigned steps; /* Number of steps : Only used for optimization : 0 means default (40) : Higher means more parameters checked */ unsigned nbThreads; /* Number of threads : constraint: 0 < nbThreads : 1 means single-threaded : Only used for optimization : Ignored if ZSTD_MULTITHREAD is not defined */ double splitPoint; /* Percentage of samples used for training: Only used for optimization : the first nbSamples * splitPoint samples will be used to training, the last nbSamples * (1 - splitPoint) samples will be used for testing, 0 means default (0.75), 1.0 when all samples are used for both training and testing */ unsigned accel; /* Acceleration level: constraint: 0 < accel <= 10, higher means faster and less accurate, 0 means default(1) */ unsigned shrinkDict; /* Train dictionaries to shrink in size starting from the minimum size and selects the smallest dictionary that is shrinkDictMaxRegression% worse than the largest dictionary. 0 means no shrinking and 1 means shrinking */ unsigned shrinkDictMaxRegression; /* Sets shrinkDictMaxRegression so that a smaller dictionary can be at worse shrinkDictMaxRegression% worse than the max dict size dictionary. */ ZDICT_params_t zParams; } ZDICT_fastCover_params_t; /*! ZDICT_trainFromBuffer_cover(): * Train a dictionary from an array of samples using the COVER algorithm. * Samples must be stored concatenated in a single flat buffer `samplesBuffer`, * supplied with an array of sizes `samplesSizes`, providing the size of each sample, in order. * The resulting dictionary will be saved into `dictBuffer`. * @return: size of dictionary stored into `dictBuffer` (<= `dictBufferCapacity`) * or an error code, which can be tested with ZDICT_isError(). * See ZDICT_trainFromBuffer() for details on failure modes. * Note: ZDICT_trainFromBuffer_cover() requires about 9 bytes of memory for each input byte. * Tips: In general, a reasonable dictionary has a size of ~ 100 KB. * It's possible to select smaller or larger size, just by specifying `dictBufferCapacity`. * In general, it's recommended to provide a few thousands samples, though this can vary a lot. * It's recommended that total size of all samples be about ~x100 times the target size of dictionary. */ ZDICTLIB_API size_t ZDICT_trainFromBuffer_cover( void *dictBuffer, size_t dictBufferCapacity, const void *samplesBuffer, const size_t *samplesSizes, unsigned nbSamples, ZDICT_cover_params_t parameters); /*! ZDICT_optimizeTrainFromBuffer_cover(): * The same requirements as above hold for all the parameters except `parameters`. * This function tries many parameter combinations and picks the best parameters. * `*parameters` is filled with the best parameters found, * dictionary constructed with those parameters is stored in `dictBuffer`. * * All of the parameters d, k, steps are optional. * If d is non-zero then we don't check multiple values of d, otherwise we check d = {6, 8}. * if steps is zero it defaults to its default value. * If k is non-zero then we don't check multiple values of k, otherwise we check steps values in [50, 2000]. * * @return: size of dictionary stored into `dictBuffer` (<= `dictBufferCapacity`) * or an error code, which can be tested with ZDICT_isError(). * On success `*parameters` contains the parameters selected. * See ZDICT_trainFromBuffer() for details on failure modes. * Note: ZDICT_optimizeTrainFromBuffer_cover() requires about 8 bytes of memory for each input byte and additionally another 5 bytes of memory for each byte of memory for each thread. */ ZDICTLIB_API size_t ZDICT_optimizeTrainFromBuffer_cover( void* dictBuffer, size_t dictBufferCapacity, const void* samplesBuffer, const size_t* samplesSizes, unsigned nbSamples, ZDICT_cover_params_t* parameters); /*! ZDICT_trainFromBuffer_fastCover(): * Train a dictionary from an array of samples using a modified version of COVER algorithm. * Samples must be stored concatenated in a single flat buffer `samplesBuffer`, * supplied with an array of sizes `samplesSizes`, providing the size of each sample, in order. * d and k are required. * All other parameters are optional, will use default values if not provided * The resulting dictionary will be saved into `dictBuffer`. * @return: size of dictionary stored into `dictBuffer` (<= `dictBufferCapacity`) * or an error code, which can be tested with ZDICT_isError(). * See ZDICT_trainFromBuffer() for details on failure modes. * Note: ZDICT_trainFromBuffer_fastCover() requires 6 * 2^f bytes of memory. * Tips: In general, a reasonable dictionary has a size of ~ 100 KB. * It's possible to select smaller or larger size, just by specifying `dictBufferCapacity`. * In general, it's recommended to provide a few thousands samples, though this can vary a lot. * It's recommended that total size of all samples be about ~x100 times the target size of dictionary. */ ZDICTLIB_API size_t ZDICT_trainFromBuffer_fastCover(void *dictBuffer, size_t dictBufferCapacity, const void *samplesBuffer, const size_t *samplesSizes, unsigned nbSamples, ZDICT_fastCover_params_t parameters); /*! ZDICT_optimizeTrainFromBuffer_fastCover(): * The same requirements as above hold for all the parameters except `parameters`. * This function tries many parameter combinations (specifically, k and d combinations) * and picks the best parameters. `*parameters` is filled with the best parameters found, * dictionary constructed with those parameters is stored in `dictBuffer`. * All of the parameters d, k, steps, f, and accel are optional. * If d is non-zero then we don't check multiple values of d, otherwise we check d = {6, 8}. * if steps is zero it defaults to its default value. * If k is non-zero then we don't check multiple values of k, otherwise we check steps values in [50, 2000]. * If f is zero, default value of 20 is used. * If accel is zero, default value of 1 is used. * * @return: size of dictionary stored into `dictBuffer` (<= `dictBufferCapacity`) * or an error code, which can be tested with ZDICT_isError(). * On success `*parameters` contains the parameters selected. * See ZDICT_trainFromBuffer() for details on failure modes. * Note: ZDICT_optimizeTrainFromBuffer_fastCover() requires about 6 * 2^f bytes of memory for each thread. */ ZDICTLIB_API size_t ZDICT_optimizeTrainFromBuffer_fastCover(void* dictBuffer, size_t dictBufferCapacity, const void* samplesBuffer, const size_t* samplesSizes, unsigned nbSamples, ZDICT_fastCover_params_t* parameters); typedef struct { unsigned selectivityLevel; /* 0 means default; larger => select more => larger dictionary */ ZDICT_params_t zParams; } ZDICT_legacy_params_t; /*! ZDICT_trainFromBuffer_legacy(): * Train a dictionary from an array of samples. * Samples must be stored concatenated in a single flat buffer `samplesBuffer`, * supplied with an array of sizes `samplesSizes`, providing the size of each sample, in order. * The resulting dictionary will be saved into `dictBuffer`. * `parameters` is optional and can be provided with values set to 0 to mean "default". * @return: size of dictionary stored into `dictBuffer` (<= `dictBufferCapacity`) * or an error code, which can be tested with ZDICT_isError(). * See ZDICT_trainFromBuffer() for details on failure modes. * Tips: In general, a reasonable dictionary has a size of ~ 100 KB. * It's possible to select smaller or larger size, just by specifying `dictBufferCapacity`. * In general, it's recommended to provide a few thousands samples, though this can vary a lot. * It's recommended that total size of all samples be about ~x100 times the target size of dictionary. * Note: ZDICT_trainFromBuffer_legacy() will send notifications into stderr if instructed to, using notificationLevel>0. */ ZDICTLIB_API size_t ZDICT_trainFromBuffer_legacy( void* dictBuffer, size_t dictBufferCapacity, const void* samplesBuffer, const size_t* samplesSizes, unsigned nbSamples, ZDICT_legacy_params_t parameters); /* Deprecation warnings */ /* It is generally possible to disable deprecation warnings from compiler, for example with -Wno-deprecated-declarations for gcc or _CRT_SECURE_NO_WARNINGS in Visual. Otherwise, it's also possible to manually define ZDICT_DISABLE_DEPRECATE_WARNINGS */ #ifdef ZDICT_DISABLE_DEPRECATE_WARNINGS # define ZDICT_DEPRECATED(message) ZDICTLIB_API /* disable deprecation warnings */ #else # define ZDICT_GCC_VERSION (__GNUC__ * 100 + __GNUC_MINOR__) # if defined (__cplusplus) && (__cplusplus >= 201402) /* C++14 or greater */ # define ZDICT_DEPRECATED(message) [[deprecated(message)]] ZDICTLIB_API # elif defined(__clang__) || (ZDICT_GCC_VERSION >= 405) # define ZDICT_DEPRECATED(message) ZDICTLIB_API __attribute__((deprecated(message))) # elif (ZDICT_GCC_VERSION >= 301) # define ZDICT_DEPRECATED(message) ZDICTLIB_API __attribute__((deprecated)) # elif defined(_MSC_VER) # define ZDICT_DEPRECATED(message) ZDICTLIB_API __declspec(deprecated(message)) # else # pragma message("WARNING: You need to implement ZDICT_DEPRECATED for this compiler") # define ZDICT_DEPRECATED(message) ZDICTLIB_API # endif #endif /* ZDICT_DISABLE_DEPRECATE_WARNINGS */ ZDICT_DEPRECATED("use ZDICT_finalizeDictionary() instead") size_t ZDICT_addEntropyTablesFromBuffer(void* dictBuffer, size_t dictContentSize, size_t dictBufferCapacity, const void* samplesBuffer, const size_t* samplesSizes, unsigned nbSamples); #endif /* ZDICT_STATIC_LINKING_ONLY */ #if defined (__cplusplus) } #endif #endif /* DICTBUILDER_H_001 */ /**** ended inlining zdict.h ****/ /** * COVER_best_t is used for two purposes: * 1. Synchronizing threads. * 2. Saving the best parameters and dictionary. * * All of the methods except COVER_best_init() are thread safe if zstd is * compiled with multithreaded support. */ typedef struct COVER_best_s { ZSTD_pthread_mutex_t mutex; ZSTD_pthread_cond_t cond; size_t liveJobs; void *dict; size_t dictSize; ZDICT_cover_params_t parameters; size_t compressedSize; } COVER_best_t; /** * A segment is a range in the source as well as the score of the segment. */ typedef struct { U32 begin; U32 end; U32 score; } COVER_segment_t; /** *Number of epochs and size of each epoch. */ typedef struct { U32 num; U32 size; } COVER_epoch_info_t; /** * Struct used for the dictionary selection function. */ typedef struct COVER_dictSelection { BYTE* dictContent; size_t dictSize; size_t totalCompressedSize; } COVER_dictSelection_t; /** * Computes the number of epochs and the size of each epoch. * We will make sure that each epoch gets at least 10 * k bytes. * * The COVER algorithms divide the data up into epochs of equal size and * select one segment from each epoch. * * @param maxDictSize The maximum allowed dictionary size. * @param nbDmers The number of dmers we are training on. * @param k The parameter k (segment size). * @param passes The target number of passes over the dmer corpus. * More passes means a better dictionary. */ COVER_epoch_info_t COVER_computeEpochs(U32 maxDictSize, U32 nbDmers, U32 k, U32 passes); /** * Warns the user when their corpus is too small. */ void COVER_warnOnSmallCorpus(size_t maxDictSize, size_t nbDmers, int displayLevel); /** * Checks total compressed size of a dictionary */ size_t COVER_checkTotalCompressedSize(const ZDICT_cover_params_t parameters, const size_t *samplesSizes, const BYTE *samples, size_t *offsets, size_t nbTrainSamples, size_t nbSamples, BYTE *const dict, size_t dictBufferCapacity); /** * Returns the sum of the sample sizes. */ size_t COVER_sum(const size_t *samplesSizes, unsigned nbSamples) ; /** * Initialize the `COVER_best_t`. */ void COVER_best_init(COVER_best_t *best); /** * Wait until liveJobs == 0. */ void COVER_best_wait(COVER_best_t *best); /** * Call COVER_best_wait() and then destroy the COVER_best_t. */ void COVER_best_destroy(COVER_best_t *best); /** * Called when a thread is about to be launched. * Increments liveJobs. */ void COVER_best_start(COVER_best_t *best); /** * Called when a thread finishes executing, both on error or success. * Decrements liveJobs and signals any waiting threads if liveJobs == 0. * If this dictionary is the best so far save it and its parameters. */ void COVER_best_finish(COVER_best_t *best, ZDICT_cover_params_t parameters, COVER_dictSelection_t selection); /** * Error function for COVER_selectDict function. Checks if the return * value is an error. */ unsigned COVER_dictSelectionIsError(COVER_dictSelection_t selection); /** * Error function for COVER_selectDict function. Returns a struct where * return.totalCompressedSize is a ZSTD error. */ COVER_dictSelection_t COVER_dictSelectionError(size_t error); /** * Always call after selectDict is called to free up used memory from * newly created dictionary. */ void COVER_dictSelectionFree(COVER_dictSelection_t selection); /** * Called to finalize the dictionary and select one based on whether or not * the shrink-dict flag was enabled. If enabled the dictionary used is the * smallest dictionary within a specified regression of the compressed size * from the largest dictionary. */ COVER_dictSelection_t COVER_selectDict(BYTE* customDictContent, size_t dictBufferCapacity, size_t dictContentSize, const BYTE* samplesBuffer, const size_t* samplesSizes, unsigned nbFinalizeSamples, size_t nbCheckSamples, size_t nbSamples, ZDICT_cover_params_t params, size_t* offsets, size_t totalCompressedSize); /**** ended inlining cover.h ****/ /**** skipping file: ../common/zstd_internal.h ****/ #ifndef ZDICT_STATIC_LINKING_ONLY #define ZDICT_STATIC_LINKING_ONLY #endif /**** skipping file: zdict.h ****/ /*-************************************* * Constants ***************************************/ #define COVER_MAX_SAMPLES_SIZE (sizeof(size_t) == 8 ? ((unsigned)-1) : ((unsigned)1 GB)) #define COVER_DEFAULT_SPLITPOINT 1.0 /*-************************************* * Console display ***************************************/ #ifndef LOCALDISPLAYLEVEL static int g_displayLevel = 2; #endif #undef DISPLAY #define DISPLAY(...) \ { \ fprintf(stderr, __VA_ARGS__); \ fflush(stderr); \ } #undef LOCALDISPLAYLEVEL #define LOCALDISPLAYLEVEL(displayLevel, l, ...) \ if (displayLevel >= l) { \ DISPLAY(__VA_ARGS__); \ } /* 0 : no display; 1: errors; 2: default; 3: details; 4: debug */ #undef DISPLAYLEVEL #define DISPLAYLEVEL(l, ...) LOCALDISPLAYLEVEL(g_displayLevel, l, __VA_ARGS__) #ifndef LOCALDISPLAYUPDATE static const clock_t g_refreshRate = CLOCKS_PER_SEC * 15 / 100; static clock_t g_time = 0; #endif #undef LOCALDISPLAYUPDATE #define LOCALDISPLAYUPDATE(displayLevel, l, ...) \ if (displayLevel >= l) { \ if ((clock() - g_time > g_refreshRate) || (displayLevel >= 4)) { \ g_time = clock(); \ DISPLAY(__VA_ARGS__); \ } \ } #undef DISPLAYUPDATE #define DISPLAYUPDATE(l, ...) LOCALDISPLAYUPDATE(g_displayLevel, l, __VA_ARGS__) /*-************************************* * Hash table *************************************** * A small specialized hash map for storing activeDmers. * The map does not resize, so if it becomes full it will loop forever. * Thus, the map must be large enough to store every value. * The map implements linear probing and keeps its load less than 0.5. */ #define MAP_EMPTY_VALUE ((U32)-1) typedef struct COVER_map_pair_t_s { U32 key; U32 value; } COVER_map_pair_t; typedef struct COVER_map_s { COVER_map_pair_t *data; U32 sizeLog; U32 size; U32 sizeMask; } COVER_map_t; /** * Clear the map. */ static void COVER_map_clear(COVER_map_t *map) { memset(map->data, MAP_EMPTY_VALUE, map->size * sizeof(COVER_map_pair_t)); } /** * Initializes a map of the given size. * Returns 1 on success and 0 on failure. * The map must be destroyed with COVER_map_destroy(). * The map is only guaranteed to be large enough to hold size elements. */ static int COVER_map_init(COVER_map_t *map, U32 size) { map->sizeLog = ZSTD_highbit32(size) + 2; map->size = (U32)1 << map->sizeLog; map->sizeMask = map->size - 1; map->data = (COVER_map_pair_t *)malloc(map->size * sizeof(COVER_map_pair_t)); if (!map->data) { map->sizeLog = 0; map->size = 0; return 0; } COVER_map_clear(map); return 1; } /** * Internal hash function */ static const U32 COVER_prime4bytes = 2654435761U; static U32 COVER_map_hash(COVER_map_t *map, U32 key) { return (key * COVER_prime4bytes) >> (32 - map->sizeLog); } /** * Helper function that returns the index that a key should be placed into. */ static U32 COVER_map_index(COVER_map_t *map, U32 key) { const U32 hash = COVER_map_hash(map, key); U32 i; for (i = hash;; i = (i + 1) & map->sizeMask) { COVER_map_pair_t *pos = &map->data[i]; if (pos->value == MAP_EMPTY_VALUE) { return i; } if (pos->key == key) { return i; } } } /** * Returns the pointer to the value for key. * If key is not in the map, it is inserted and the value is set to 0. * The map must not be full. */ static U32 *COVER_map_at(COVER_map_t *map, U32 key) { COVER_map_pair_t *pos = &map->data[COVER_map_index(map, key)]; if (pos->value == MAP_EMPTY_VALUE) { pos->key = key; pos->value = 0; } return &pos->value; } /** * Deletes key from the map if present. */ static void COVER_map_remove(COVER_map_t *map, U32 key) { U32 i = COVER_map_index(map, key); COVER_map_pair_t *del = &map->data[i]; U32 shift = 1; if (del->value == MAP_EMPTY_VALUE) { return; } for (i = (i + 1) & map->sizeMask;; i = (i + 1) & map->sizeMask) { COVER_map_pair_t *const pos = &map->data[i]; /* If the position is empty we are done */ if (pos->value == MAP_EMPTY_VALUE) { del->value = MAP_EMPTY_VALUE; return; } /* If pos can be moved to del do so */ if (((i - COVER_map_hash(map, pos->key)) & map->sizeMask) >= shift) { del->key = pos->key; del->value = pos->value; del = pos; shift = 1; } else { ++shift; } } } /** * Destroys a map that is inited with COVER_map_init(). */ static void COVER_map_destroy(COVER_map_t *map) { if (map->data) { free(map->data); } map->data = NULL; map->size = 0; } /*-************************************* * Context ***************************************/ typedef struct { const BYTE *samples; size_t *offsets; const size_t *samplesSizes; size_t nbSamples; size_t nbTrainSamples; size_t nbTestSamples; U32 *suffix; size_t suffixSize; U32 *freqs; U32 *dmerAt; unsigned d; } COVER_ctx_t; /* We need a global context for qsort... */ static COVER_ctx_t *g_coverCtx = NULL; /*-************************************* * Helper functions ***************************************/ /** * Returns the sum of the sample sizes. */ size_t COVER_sum(const size_t *samplesSizes, unsigned nbSamples) { size_t sum = 0; unsigned i; for (i = 0; i < nbSamples; ++i) { sum += samplesSizes[i]; } return sum; } /** * Returns -1 if the dmer at lp is less than the dmer at rp. * Return 0 if the dmers at lp and rp are equal. * Returns 1 if the dmer at lp is greater than the dmer at rp. */ static int COVER_cmp(COVER_ctx_t *ctx, const void *lp, const void *rp) { U32 const lhs = *(U32 const *)lp; U32 const rhs = *(U32 const *)rp; return memcmp(ctx->samples + lhs, ctx->samples + rhs, ctx->d); } /** * Faster version for d <= 8. */ static int COVER_cmp8(COVER_ctx_t *ctx, const void *lp, const void *rp) { U64 const mask = (ctx->d == 8) ? (U64)-1 : (((U64)1 << (8 * ctx->d)) - 1); U64 const lhs = MEM_readLE64(ctx->samples + *(U32 const *)lp) & mask; U64 const rhs = MEM_readLE64(ctx->samples + *(U32 const *)rp) & mask; if (lhs < rhs) { return -1; } return (lhs > rhs); } /** * Same as COVER_cmp() except ties are broken by pointer value * NOTE: g_coverCtx must be set to call this function. A global is required because * qsort doesn't take an opaque pointer. */ static int WIN_CDECL COVER_strict_cmp(const void *lp, const void *rp) { int result = COVER_cmp(g_coverCtx, lp, rp); if (result == 0) { result = lp < rp ? -1 : 1; } return result; } /** * Faster version for d <= 8. */ static int WIN_CDECL COVER_strict_cmp8(const void *lp, const void *rp) { int result = COVER_cmp8(g_coverCtx, lp, rp); if (result == 0) { result = lp < rp ? -1 : 1; } return result; } /** * Returns the first pointer in [first, last) whose element does not compare * less than value. If no such element exists it returns last. */ static const size_t *COVER_lower_bound(const size_t *first, const size_t *last, size_t value) { size_t count = last - first; while (count != 0) { size_t step = count / 2; const size_t *ptr = first; ptr += step; if (*ptr < value) { first = ++ptr; count -= step + 1; } else { count = step; } } return first; } /** * Generic groupBy function. * Groups an array sorted by cmp into groups with equivalent values. * Calls grp for each group. */ static void COVER_groupBy(const void *data, size_t count, size_t size, COVER_ctx_t *ctx, int (*cmp)(COVER_ctx_t *, const void *, const void *), void (*grp)(COVER_ctx_t *, const void *, const void *)) { const BYTE *ptr = (const BYTE *)data; size_t num = 0; while (num < count) { const BYTE *grpEnd = ptr + size; ++num; while (num < count && cmp(ctx, ptr, grpEnd) == 0) { grpEnd += size; ++num; } grp(ctx, ptr, grpEnd); ptr = grpEnd; } } /*-************************************* * Cover functions ***************************************/ /** * Called on each group of positions with the same dmer. * Counts the frequency of each dmer and saves it in the suffix array. * Fills `ctx->dmerAt`. */ static void COVER_group(COVER_ctx_t *ctx, const void *group, const void *groupEnd) { /* The group consists of all the positions with the same first d bytes. */ const U32 *grpPtr = (const U32 *)group; const U32 *grpEnd = (const U32 *)groupEnd; /* The dmerId is how we will reference this dmer. * This allows us to map the whole dmer space to a much smaller space, the * size of the suffix array. */ const U32 dmerId = (U32)(grpPtr - ctx->suffix); /* Count the number of samples this dmer shows up in */ U32 freq = 0; /* Details */ const size_t *curOffsetPtr = ctx->offsets; const size_t *offsetsEnd = ctx->offsets + ctx->nbSamples; /* Once *grpPtr >= curSampleEnd this occurrence of the dmer is in a * different sample than the last. */ size_t curSampleEnd = ctx->offsets[0]; for (; grpPtr != grpEnd; ++grpPtr) { /* Save the dmerId for this position so we can get back to it. */ ctx->dmerAt[*grpPtr] = dmerId; /* Dictionaries only help for the first reference to the dmer. * After that zstd can reference the match from the previous reference. * So only count each dmer once for each sample it is in. */ if (*grpPtr < curSampleEnd) { continue; } freq += 1; /* Binary search to find the end of the sample *grpPtr is in. * In the common case that grpPtr + 1 == grpEnd we can skip the binary * search because the loop is over. */ if (grpPtr + 1 != grpEnd) { const size_t *sampleEndPtr = COVER_lower_bound(curOffsetPtr, offsetsEnd, *grpPtr); curSampleEnd = *sampleEndPtr; curOffsetPtr = sampleEndPtr + 1; } } /* At this point we are never going to look at this segment of the suffix * array again. We take advantage of this fact to save memory. * We store the frequency of the dmer in the first position of the group, * which is dmerId. */ ctx->suffix[dmerId] = freq; } /** * Selects the best segment in an epoch. * Segments of are scored according to the function: * * Let F(d) be the frequency of dmer d. * Let S_i be the dmer at position i of segment S which has length k. * * Score(S) = F(S_1) + F(S_2) + ... + F(S_{k-d+1}) * * Once the dmer d is in the dictionary we set F(d) = 0. */ static COVER_segment_t COVER_selectSegment(const COVER_ctx_t *ctx, U32 *freqs, COVER_map_t *activeDmers, U32 begin, U32 end, ZDICT_cover_params_t parameters) { /* Constants */ const U32 k = parameters.k; const U32 d = parameters.d; const U32 dmersInK = k - d + 1; /* Try each segment (activeSegment) and save the best (bestSegment) */ COVER_segment_t bestSegment = {0, 0, 0}; COVER_segment_t activeSegment; /* Reset the activeDmers in the segment */ COVER_map_clear(activeDmers); /* The activeSegment starts at the beginning of the epoch. */ activeSegment.begin = begin; activeSegment.end = begin; activeSegment.score = 0; /* Slide the activeSegment through the whole epoch. * Save the best segment in bestSegment. */ while (activeSegment.end < end) { /* The dmerId for the dmer at the next position */ U32 newDmer = ctx->dmerAt[activeSegment.end]; /* The entry in activeDmers for this dmerId */ U32 *newDmerOcc = COVER_map_at(activeDmers, newDmer); /* If the dmer isn't already present in the segment add its score. */ if (*newDmerOcc == 0) { /* The paper suggest using the L-0.5 norm, but experiments show that it * doesn't help. */ activeSegment.score += freqs[newDmer]; } /* Add the dmer to the segment */ activeSegment.end += 1; *newDmerOcc += 1; /* If the window is now too large, drop the first position */ if (activeSegment.end - activeSegment.begin == dmersInK + 1) { U32 delDmer = ctx->dmerAt[activeSegment.begin]; U32 *delDmerOcc = COVER_map_at(activeDmers, delDmer); activeSegment.begin += 1; *delDmerOcc -= 1; /* If this is the last occurrence of the dmer, subtract its score */ if (*delDmerOcc == 0) { COVER_map_remove(activeDmers, delDmer); activeSegment.score -= freqs[delDmer]; } } /* If this segment is the best so far save it */ if (activeSegment.score > bestSegment.score) { bestSegment = activeSegment; } } { /* Trim off the zero frequency head and tail from the segment. */ U32 newBegin = bestSegment.end; U32 newEnd = bestSegment.begin; U32 pos; for (pos = bestSegment.begin; pos != bestSegment.end; ++pos) { U32 freq = freqs[ctx->dmerAt[pos]]; if (freq != 0) { newBegin = MIN(newBegin, pos); newEnd = pos + 1; } } bestSegment.begin = newBegin; bestSegment.end = newEnd; } { /* Zero out the frequency of each dmer covered by the chosen segment. */ U32 pos; for (pos = bestSegment.begin; pos != bestSegment.end; ++pos) { freqs[ctx->dmerAt[pos]] = 0; } } return bestSegment; } /** * Check the validity of the parameters. * Returns non-zero if the parameters are valid and 0 otherwise. */ static int COVER_checkParameters(ZDICT_cover_params_t parameters, size_t maxDictSize) { /* k and d are required parameters */ if (parameters.d == 0 || parameters.k == 0) { return 0; } /* k <= maxDictSize */ if (parameters.k > maxDictSize) { return 0; } /* d <= k */ if (parameters.d > parameters.k) { return 0; } /* 0 < splitPoint <= 1 */ if (parameters.splitPoint <= 0 || parameters.splitPoint > 1){ return 0; } return 1; } /** * Clean up a context initialized with `COVER_ctx_init()`. */ static void COVER_ctx_destroy(COVER_ctx_t *ctx) { if (!ctx) { return; } if (ctx->suffix) { free(ctx->suffix); ctx->suffix = NULL; } if (ctx->freqs) { free(ctx->freqs); ctx->freqs = NULL; } if (ctx->dmerAt) { free(ctx->dmerAt); ctx->dmerAt = NULL; } if (ctx->offsets) { free(ctx->offsets); ctx->offsets = NULL; } } /** * Prepare a context for dictionary building. * The context is only dependent on the parameter `d` and can used multiple * times. * Returns 0 on success or error code on error. * The context must be destroyed with `COVER_ctx_destroy()`. */ static size_t COVER_ctx_init(COVER_ctx_t *ctx, const void *samplesBuffer, const size_t *samplesSizes, unsigned nbSamples, unsigned d, double splitPoint) { const BYTE *const samples = (const BYTE *)samplesBuffer; const size_t totalSamplesSize = COVER_sum(samplesSizes, nbSamples); /* Split samples into testing and training sets */ const unsigned nbTrainSamples = splitPoint < 1.0 ? (unsigned)((double)nbSamples * splitPoint) : nbSamples; const unsigned nbTestSamples = splitPoint < 1.0 ? nbSamples - nbTrainSamples : nbSamples; const size_t trainingSamplesSize = splitPoint < 1.0 ? COVER_sum(samplesSizes, nbTrainSamples) : totalSamplesSize; const size_t testSamplesSize = splitPoint < 1.0 ? COVER_sum(samplesSizes + nbTrainSamples, nbTestSamples) : totalSamplesSize; /* Checks */ if (totalSamplesSize < MAX(d, sizeof(U64)) || totalSamplesSize >= (size_t)COVER_MAX_SAMPLES_SIZE) { DISPLAYLEVEL(1, "Total samples size is too large (%u MB), maximum size is %u MB\n", (unsigned)(totalSamplesSize>>20), (COVER_MAX_SAMPLES_SIZE >> 20)); return ERROR(srcSize_wrong); } /* Check if there are at least 5 training samples */ if (nbTrainSamples < 5) { DISPLAYLEVEL(1, "Total number of training samples is %u and is invalid.", nbTrainSamples); return ERROR(srcSize_wrong); } /* Check if there's testing sample */ if (nbTestSamples < 1) { DISPLAYLEVEL(1, "Total number of testing samples is %u and is invalid.", nbTestSamples); return ERROR(srcSize_wrong); } /* Zero the context */ memset(ctx, 0, sizeof(*ctx)); DISPLAYLEVEL(2, "Training on %u samples of total size %u\n", nbTrainSamples, (unsigned)trainingSamplesSize); DISPLAYLEVEL(2, "Testing on %u samples of total size %u\n", nbTestSamples, (unsigned)testSamplesSize); ctx->samples = samples; ctx->samplesSizes = samplesSizes; ctx->nbSamples = nbSamples; ctx->nbTrainSamples = nbTrainSamples; ctx->nbTestSamples = nbTestSamples; /* Partial suffix array */ ctx->suffixSize = trainingSamplesSize - MAX(d, sizeof(U64)) + 1; ctx->suffix = (U32 *)malloc(ctx->suffixSize * sizeof(U32)); /* Maps index to the dmerID */ ctx->dmerAt = (U32 *)malloc(ctx->suffixSize * sizeof(U32)); /* The offsets of each file */ ctx->offsets = (size_t *)malloc((nbSamples + 1) * sizeof(size_t)); if (!ctx->suffix || !ctx->dmerAt || !ctx->offsets) { DISPLAYLEVEL(1, "Failed to allocate scratch buffers\n"); COVER_ctx_destroy(ctx); return ERROR(memory_allocation); } ctx->freqs = NULL; ctx->d = d; /* Fill offsets from the samplesSizes */ { U32 i; ctx->offsets[0] = 0; for (i = 1; i <= nbSamples; ++i) { ctx->offsets[i] = ctx->offsets[i - 1] + samplesSizes[i - 1]; } } DISPLAYLEVEL(2, "Constructing partial suffix array\n"); { /* suffix is a partial suffix array. * It only sorts suffixes by their first parameters.d bytes. * The sort is stable, so each dmer group is sorted by position in input. */ U32 i; for (i = 0; i < ctx->suffixSize; ++i) { ctx->suffix[i] = i; } /* qsort doesn't take an opaque pointer, so pass as a global. * On OpenBSD qsort() is not guaranteed to be stable, their mergesort() is. */ g_coverCtx = ctx; #if defined(__OpenBSD__) mergesort(ctx->suffix, ctx->suffixSize, sizeof(U32), (ctx->d <= 8 ? &COVER_strict_cmp8 : &COVER_strict_cmp)); #else qsort(ctx->suffix, ctx->suffixSize, sizeof(U32), (ctx->d <= 8 ? &COVER_strict_cmp8 : &COVER_strict_cmp)); #endif } DISPLAYLEVEL(2, "Computing frequencies\n"); /* For each dmer group (group of positions with the same first d bytes): * 1. For each position we set dmerAt[position] = dmerID. The dmerID is * (groupBeginPtr - suffix). This allows us to go from position to * dmerID so we can look up values in freq. * 2. We calculate how many samples the dmer occurs in and save it in * freqs[dmerId]. */ COVER_groupBy(ctx->suffix, ctx->suffixSize, sizeof(U32), ctx, (ctx->d <= 8 ? &COVER_cmp8 : &COVER_cmp), &COVER_group); ctx->freqs = ctx->suffix; ctx->suffix = NULL; return 0; } void COVER_warnOnSmallCorpus(size_t maxDictSize, size_t nbDmers, int displayLevel) { const double ratio = (double)nbDmers / maxDictSize; if (ratio >= 10) { return; } LOCALDISPLAYLEVEL(displayLevel, 1, "WARNING: The maximum dictionary size %u is too large " "compared to the source size %u! " "size(source)/size(dictionary) = %f, but it should be >= " "10! This may lead to a subpar dictionary! We recommend " "training on sources at least 10x, and preferably 100x " "the size of the dictionary! \n", (U32)maxDictSize, (U32)nbDmers, ratio); } COVER_epoch_info_t COVER_computeEpochs(U32 maxDictSize, U32 nbDmers, U32 k, U32 passes) { const U32 minEpochSize = k * 10; COVER_epoch_info_t epochs; epochs.num = MAX(1, maxDictSize / k / passes); epochs.size = nbDmers / epochs.num; if (epochs.size >= minEpochSize) { assert(epochs.size * epochs.num <= nbDmers); return epochs; } epochs.size = MIN(minEpochSize, nbDmers); epochs.num = nbDmers / epochs.size; assert(epochs.size * epochs.num <= nbDmers); return epochs; } /** * Given the prepared context build the dictionary. */ static size_t COVER_buildDictionary(const COVER_ctx_t *ctx, U32 *freqs, COVER_map_t *activeDmers, void *dictBuffer, size_t dictBufferCapacity, ZDICT_cover_params_t parameters) { BYTE *const dict = (BYTE *)dictBuffer; size_t tail = dictBufferCapacity; /* Divide the data into epochs. We will select one segment from each epoch. */ const COVER_epoch_info_t epochs = COVER_computeEpochs( (U32)dictBufferCapacity, (U32)ctx->suffixSize, parameters.k, 4); const size_t maxZeroScoreRun = MAX(10, MIN(100, epochs.num >> 3)); size_t zeroScoreRun = 0; size_t epoch; DISPLAYLEVEL(2, "Breaking content into %u epochs of size %u\n", (U32)epochs.num, (U32)epochs.size); /* Loop through the epochs until there are no more segments or the dictionary * is full. */ for (epoch = 0; tail > 0; epoch = (epoch + 1) % epochs.num) { const U32 epochBegin = (U32)(epoch * epochs.size); const U32 epochEnd = epochBegin + epochs.size; size_t segmentSize; /* Select a segment */ COVER_segment_t segment = COVER_selectSegment( ctx, freqs, activeDmers, epochBegin, epochEnd, parameters); /* If the segment covers no dmers, then we are out of content. * There may be new content in other epochs, for continue for some time. */ if (segment.score == 0) { if (++zeroScoreRun >= maxZeroScoreRun) { break; } continue; } zeroScoreRun = 0; /* Trim the segment if necessary and if it is too small then we are done */ segmentSize = MIN(segment.end - segment.begin + parameters.d - 1, tail); if (segmentSize < parameters.d) { break; } /* We fill the dictionary from the back to allow the best segments to be * referenced with the smallest offsets. */ tail -= segmentSize; memcpy(dict + tail, ctx->samples + segment.begin, segmentSize); DISPLAYUPDATE( 2, "\r%u%% ", (unsigned)(((dictBufferCapacity - tail) * 100) / dictBufferCapacity)); } DISPLAYLEVEL(2, "\r%79s\r", ""); return tail; } ZDICTLIB_API size_t ZDICT_trainFromBuffer_cover( void *dictBuffer, size_t dictBufferCapacity, const void *samplesBuffer, const size_t *samplesSizes, unsigned nbSamples, ZDICT_cover_params_t parameters) { BYTE* const dict = (BYTE*)dictBuffer; COVER_ctx_t ctx; COVER_map_t activeDmers; parameters.splitPoint = 1.0; /* Initialize global data */ g_displayLevel = parameters.zParams.notificationLevel; /* Checks */ if (!COVER_checkParameters(parameters, dictBufferCapacity)) { DISPLAYLEVEL(1, "Cover parameters incorrect\n"); return ERROR(parameter_outOfBound); } if (nbSamples == 0) { DISPLAYLEVEL(1, "Cover must have at least one input file\n"); return ERROR(srcSize_wrong); } if (dictBufferCapacity < ZDICT_DICTSIZE_MIN) { DISPLAYLEVEL(1, "dictBufferCapacity must be at least %u\n", ZDICT_DICTSIZE_MIN); return ERROR(dstSize_tooSmall); } /* Initialize context and activeDmers */ { size_t const initVal = COVER_ctx_init(&ctx, samplesBuffer, samplesSizes, nbSamples, parameters.d, parameters.splitPoint); if (ZSTD_isError(initVal)) { return initVal; } } COVER_warnOnSmallCorpus(dictBufferCapacity, ctx.suffixSize, g_displayLevel); if (!COVER_map_init(&activeDmers, parameters.k - parameters.d + 1)) { DISPLAYLEVEL(1, "Failed to allocate dmer map: out of memory\n"); COVER_ctx_destroy(&ctx); return ERROR(memory_allocation); } DISPLAYLEVEL(2, "Building dictionary\n"); { const size_t tail = COVER_buildDictionary(&ctx, ctx.freqs, &activeDmers, dictBuffer, dictBufferCapacity, parameters); const size_t dictionarySize = ZDICT_finalizeDictionary( dict, dictBufferCapacity, dict + tail, dictBufferCapacity - tail, samplesBuffer, samplesSizes, nbSamples, parameters.zParams); if (!ZSTD_isError(dictionarySize)) { DISPLAYLEVEL(2, "Constructed dictionary of size %u\n", (unsigned)dictionarySize); } COVER_ctx_destroy(&ctx); COVER_map_destroy(&activeDmers); return dictionarySize; } } size_t COVER_checkTotalCompressedSize(const ZDICT_cover_params_t parameters, const size_t *samplesSizes, const BYTE *samples, size_t *offsets, size_t nbTrainSamples, size_t nbSamples, BYTE *const dict, size_t dictBufferCapacity) { size_t totalCompressedSize = ERROR(GENERIC); /* Pointers */ ZSTD_CCtx *cctx; ZSTD_CDict *cdict; void *dst; /* Local variables */ size_t dstCapacity; size_t i; /* Allocate dst with enough space to compress the maximum sized sample */ { size_t maxSampleSize = 0; i = parameters.splitPoint < 1.0 ? nbTrainSamples : 0; for (; i < nbSamples; ++i) { maxSampleSize = MAX(samplesSizes[i], maxSampleSize); } dstCapacity = ZSTD_compressBound(maxSampleSize); dst = malloc(dstCapacity); } /* Create the cctx and cdict */ cctx = ZSTD_createCCtx(); cdict = ZSTD_createCDict(dict, dictBufferCapacity, parameters.zParams.compressionLevel); if (!dst || !cctx || !cdict) { goto _compressCleanup; } /* Compress each sample and sum their sizes (or error) */ totalCompressedSize = dictBufferCapacity; i = parameters.splitPoint < 1.0 ? nbTrainSamples : 0; for (; i < nbSamples; ++i) { const size_t size = ZSTD_compress_usingCDict( cctx, dst, dstCapacity, samples + offsets[i], samplesSizes[i], cdict); if (ZSTD_isError(size)) { totalCompressedSize = size; goto _compressCleanup; } totalCompressedSize += size; } _compressCleanup: ZSTD_freeCCtx(cctx); ZSTD_freeCDict(cdict); if (dst) { free(dst); } return totalCompressedSize; } /** * Initialize the `COVER_best_t`. */ void COVER_best_init(COVER_best_t *best) { if (best==NULL) return; /* compatible with init on NULL */ (void)ZSTD_pthread_mutex_init(&best->mutex, NULL); (void)ZSTD_pthread_cond_init(&best->cond, NULL); best->liveJobs = 0; best->dict = NULL; best->dictSize = 0; best->compressedSize = (size_t)-1; memset(&best->parameters, 0, sizeof(best->parameters)); } /** * Wait until liveJobs == 0. */ void COVER_best_wait(COVER_best_t *best) { if (!best) { return; } ZSTD_pthread_mutex_lock(&best->mutex); while (best->liveJobs != 0) { ZSTD_pthread_cond_wait(&best->cond, &best->mutex); } ZSTD_pthread_mutex_unlock(&best->mutex); } /** * Call COVER_best_wait() and then destroy the COVER_best_t. */ void COVER_best_destroy(COVER_best_t *best) { if (!best) { return; } COVER_best_wait(best); if (best->dict) { free(best->dict); } ZSTD_pthread_mutex_destroy(&best->mutex); ZSTD_pthread_cond_destroy(&best->cond); } /** * Called when a thread is about to be launched. * Increments liveJobs. */ void COVER_best_start(COVER_best_t *best) { if (!best) { return; } ZSTD_pthread_mutex_lock(&best->mutex); ++best->liveJobs; ZSTD_pthread_mutex_unlock(&best->mutex); } /** * Called when a thread finishes executing, both on error or success. * Decrements liveJobs and signals any waiting threads if liveJobs == 0. * If this dictionary is the best so far save it and its parameters. */ void COVER_best_finish(COVER_best_t *best, ZDICT_cover_params_t parameters, COVER_dictSelection_t selection) { void* dict = selection.dictContent; size_t compressedSize = selection.totalCompressedSize; size_t dictSize = selection.dictSize; if (!best) { return; } { size_t liveJobs; ZSTD_pthread_mutex_lock(&best->mutex); --best->liveJobs; liveJobs = best->liveJobs; /* If the new dictionary is better */ if (compressedSize < best->compressedSize) { /* Allocate space if necessary */ if (!best->dict || best->dictSize < dictSize) { if (best->dict) { free(best->dict); } best->dict = malloc(dictSize); if (!best->dict) { best->compressedSize = ERROR(GENERIC); best->dictSize = 0; ZSTD_pthread_cond_signal(&best->cond); ZSTD_pthread_mutex_unlock(&best->mutex); return; } } /* Save the dictionary, parameters, and size */ if (dict) { memcpy(best->dict, dict, dictSize); best->dictSize = dictSize; best->parameters = parameters; best->compressedSize = compressedSize; } } if (liveJobs == 0) { ZSTD_pthread_cond_broadcast(&best->cond); } ZSTD_pthread_mutex_unlock(&best->mutex); } } COVER_dictSelection_t COVER_dictSelectionError(size_t error) { COVER_dictSelection_t selection = { NULL, 0, error }; return selection; } unsigned COVER_dictSelectionIsError(COVER_dictSelection_t selection) { return (ZSTD_isError(selection.totalCompressedSize) || !selection.dictContent); } void COVER_dictSelectionFree(COVER_dictSelection_t selection){ free(selection.dictContent); } COVER_dictSelection_t COVER_selectDict(BYTE* customDictContent, size_t dictBufferCapacity, size_t dictContentSize, const BYTE* samplesBuffer, const size_t* samplesSizes, unsigned nbFinalizeSamples, size_t nbCheckSamples, size_t nbSamples, ZDICT_cover_params_t params, size_t* offsets, size_t totalCompressedSize) { size_t largestDict = 0; size_t largestCompressed = 0; BYTE* customDictContentEnd = customDictContent + dictContentSize; BYTE * largestDictbuffer = (BYTE *)malloc(dictBufferCapacity); BYTE * candidateDictBuffer = (BYTE *)malloc(dictBufferCapacity); double regressionTolerance = ((double)params.shrinkDictMaxRegression / 100.0) + 1.00; if (!largestDictbuffer || !candidateDictBuffer) { free(largestDictbuffer); free(candidateDictBuffer); return COVER_dictSelectionError(dictContentSize); } /* Initial dictionary size and compressed size */ memcpy(largestDictbuffer, customDictContent, dictContentSize); dictContentSize = ZDICT_finalizeDictionary( largestDictbuffer, dictBufferCapacity, customDictContent, dictContentSize, samplesBuffer, samplesSizes, nbFinalizeSamples, params.zParams); if (ZDICT_isError(dictContentSize)) { free(largestDictbuffer); free(candidateDictBuffer); return COVER_dictSelectionError(dictContentSize); } totalCompressedSize = COVER_checkTotalCompressedSize(params, samplesSizes, samplesBuffer, offsets, nbCheckSamples, nbSamples, largestDictbuffer, dictContentSize); if (ZSTD_isError(totalCompressedSize)) { free(largestDictbuffer); free(candidateDictBuffer); return COVER_dictSelectionError(totalCompressedSize); } if (params.shrinkDict == 0) { COVER_dictSelection_t selection = { largestDictbuffer, dictContentSize, totalCompressedSize }; free(candidateDictBuffer); return selection; } largestDict = dictContentSize; largestCompressed = totalCompressedSize; dictContentSize = ZDICT_DICTSIZE_MIN; /* Largest dict is initially at least ZDICT_DICTSIZE_MIN */ while (dictContentSize < largestDict) { memcpy(candidateDictBuffer, largestDictbuffer, largestDict); dictContentSize = ZDICT_finalizeDictionary( candidateDictBuffer, dictBufferCapacity, customDictContentEnd - dictContentSize, dictContentSize, samplesBuffer, samplesSizes, nbFinalizeSamples, params.zParams); if (ZDICT_isError(dictContentSize)) { free(largestDictbuffer); free(candidateDictBuffer); return COVER_dictSelectionError(dictContentSize); } totalCompressedSize = COVER_checkTotalCompressedSize(params, samplesSizes, samplesBuffer, offsets, nbCheckSamples, nbSamples, candidateDictBuffer, dictContentSize); if (ZSTD_isError(totalCompressedSize)) { free(largestDictbuffer); free(candidateDictBuffer); return COVER_dictSelectionError(totalCompressedSize); } if (totalCompressedSize <= largestCompressed * regressionTolerance) { COVER_dictSelection_t selection = { candidateDictBuffer, dictContentSize, totalCompressedSize }; free(largestDictbuffer); return selection; } dictContentSize *= 2; } dictContentSize = largestDict; totalCompressedSize = largestCompressed; { COVER_dictSelection_t selection = { largestDictbuffer, dictContentSize, totalCompressedSize }; free(candidateDictBuffer); return selection; } } /** * Parameters for COVER_tryParameters(). */ typedef struct COVER_tryParameters_data_s { const COVER_ctx_t *ctx; COVER_best_t *best; size_t dictBufferCapacity; ZDICT_cover_params_t parameters; } COVER_tryParameters_data_t; /** * Tries a set of parameters and updates the COVER_best_t with the results. * This function is thread safe if zstd is compiled with multithreaded support. * It takes its parameters as an *OWNING* opaque pointer to support threading. */ static void COVER_tryParameters(void *opaque) { /* Save parameters as local variables */ COVER_tryParameters_data_t *const data = (COVER_tryParameters_data_t*)opaque; const COVER_ctx_t *const ctx = data->ctx; const ZDICT_cover_params_t parameters = data->parameters; size_t dictBufferCapacity = data->dictBufferCapacity; size_t totalCompressedSize = ERROR(GENERIC); /* Allocate space for hash table, dict, and freqs */ COVER_map_t activeDmers; BYTE* const dict = (BYTE*)malloc(dictBufferCapacity); COVER_dictSelection_t selection = COVER_dictSelectionError(ERROR(GENERIC)); U32* const freqs = (U32*)malloc(ctx->suffixSize * sizeof(U32)); if (!COVER_map_init(&activeDmers, parameters.k - parameters.d + 1)) { DISPLAYLEVEL(1, "Failed to allocate dmer map: out of memory\n"); goto _cleanup; } if (!dict || !freqs) { DISPLAYLEVEL(1, "Failed to allocate buffers: out of memory\n"); goto _cleanup; } /* Copy the frequencies because we need to modify them */ memcpy(freqs, ctx->freqs, ctx->suffixSize * sizeof(U32)); /* Build the dictionary */ { const size_t tail = COVER_buildDictionary(ctx, freqs, &activeDmers, dict, dictBufferCapacity, parameters); selection = COVER_selectDict(dict + tail, dictBufferCapacity, dictBufferCapacity - tail, ctx->samples, ctx->samplesSizes, (unsigned)ctx->nbTrainSamples, ctx->nbTrainSamples, ctx->nbSamples, parameters, ctx->offsets, totalCompressedSize); if (COVER_dictSelectionIsError(selection)) { DISPLAYLEVEL(1, "Failed to select dictionary\n"); goto _cleanup; } } _cleanup: free(dict); COVER_best_finish(data->best, parameters, selection); free(data); COVER_map_destroy(&activeDmers); COVER_dictSelectionFree(selection); free(freqs); } ZDICTLIB_API size_t ZDICT_optimizeTrainFromBuffer_cover( void* dictBuffer, size_t dictBufferCapacity, const void* samplesBuffer, const size_t* samplesSizes, unsigned nbSamples, ZDICT_cover_params_t* parameters) { /* constants */ const unsigned nbThreads = parameters->nbThreads; const double splitPoint = parameters->splitPoint <= 0.0 ? COVER_DEFAULT_SPLITPOINT : parameters->splitPoint; const unsigned kMinD = parameters->d == 0 ? 6 : parameters->d; const unsigned kMaxD = parameters->d == 0 ? 8 : parameters->d; const unsigned kMinK = parameters->k == 0 ? 50 : parameters->k; const unsigned kMaxK = parameters->k == 0 ? 2000 : parameters->k; const unsigned kSteps = parameters->steps == 0 ? 40 : parameters->steps; const unsigned kStepSize = MAX((kMaxK - kMinK) / kSteps, 1); const unsigned kIterations = (1 + (kMaxD - kMinD) / 2) * (1 + (kMaxK - kMinK) / kStepSize); const unsigned shrinkDict = 0; /* Local variables */ const int displayLevel = parameters->zParams.notificationLevel; unsigned iteration = 1; unsigned d; unsigned k; COVER_best_t best; POOL_ctx *pool = NULL; int warned = 0; /* Checks */ if (splitPoint <= 0 || splitPoint > 1) { LOCALDISPLAYLEVEL(displayLevel, 1, "Incorrect parameters\n"); return ERROR(parameter_outOfBound); } if (kMinK < kMaxD || kMaxK < kMinK) { LOCALDISPLAYLEVEL(displayLevel, 1, "Incorrect parameters\n"); return ERROR(parameter_outOfBound); } if (nbSamples == 0) { DISPLAYLEVEL(1, "Cover must have at least one input file\n"); return ERROR(srcSize_wrong); } if (dictBufferCapacity < ZDICT_DICTSIZE_MIN) { DISPLAYLEVEL(1, "dictBufferCapacity must be at least %u\n", ZDICT_DICTSIZE_MIN); return ERROR(dstSize_tooSmall); } if (nbThreads > 1) { pool = POOL_create(nbThreads, 1); if (!pool) { return ERROR(memory_allocation); } } /* Initialization */ COVER_best_init(&best); /* Turn down global display level to clean up display at level 2 and below */ g_displayLevel = displayLevel == 0 ? 0 : displayLevel - 1; /* Loop through d first because each new value needs a new context */ LOCALDISPLAYLEVEL(displayLevel, 2, "Trying %u different sets of parameters\n", kIterations); for (d = kMinD; d <= kMaxD; d += 2) { /* Initialize the context for this value of d */ COVER_ctx_t ctx; LOCALDISPLAYLEVEL(displayLevel, 3, "d=%u\n", d); { const size_t initVal = COVER_ctx_init(&ctx, samplesBuffer, samplesSizes, nbSamples, d, splitPoint); if (ZSTD_isError(initVal)) { LOCALDISPLAYLEVEL(displayLevel, 1, "Failed to initialize context\n"); COVER_best_destroy(&best); POOL_free(pool); return initVal; } } if (!warned) { COVER_warnOnSmallCorpus(dictBufferCapacity, ctx.suffixSize, displayLevel); warned = 1; } /* Loop through k reusing the same context */ for (k = kMinK; k <= kMaxK; k += kStepSize) { /* Prepare the arguments */ COVER_tryParameters_data_t *data = (COVER_tryParameters_data_t *)malloc( sizeof(COVER_tryParameters_data_t)); LOCALDISPLAYLEVEL(displayLevel, 3, "k=%u\n", k); if (!data) { LOCALDISPLAYLEVEL(displayLevel, 1, "Failed to allocate parameters\n"); COVER_best_destroy(&best); COVER_ctx_destroy(&ctx); POOL_free(pool); return ERROR(memory_allocation); } data->ctx = &ctx; data->best = &best; data->dictBufferCapacity = dictBufferCapacity; data->parameters = *parameters; data->parameters.k = k; data->parameters.d = d; data->parameters.splitPoint = splitPoint; data->parameters.steps = kSteps; data->parameters.shrinkDict = shrinkDict; data->parameters.zParams.notificationLevel = g_displayLevel; /* Check the parameters */ if (!COVER_checkParameters(data->parameters, dictBufferCapacity)) { DISPLAYLEVEL(1, "Cover parameters incorrect\n"); free(data); continue; } /* Call the function and pass ownership of data to it */ COVER_best_start(&best); if (pool) { POOL_add(pool, &COVER_tryParameters, data); } else { COVER_tryParameters(data); } /* Print status */ LOCALDISPLAYUPDATE(displayLevel, 2, "\r%u%% ", (unsigned)((iteration * 100) / kIterations)); ++iteration; } COVER_best_wait(&best); COVER_ctx_destroy(&ctx); } LOCALDISPLAYLEVEL(displayLevel, 2, "\r%79s\r", ""); /* Fill the output buffer and parameters with output of the best parameters */ { const size_t dictSize = best.dictSize; if (ZSTD_isError(best.compressedSize)) { const size_t compressedSize = best.compressedSize; COVER_best_destroy(&best); POOL_free(pool); return compressedSize; } *parameters = best.parameters; memcpy(dictBuffer, best.dict, dictSize); COVER_best_destroy(&best); POOL_free(pool); return dictSize; } } /**** ended inlining dictBuilder/cover.c ****/ /**** start inlining dictBuilder/divsufsort.c ****/ /* * divsufsort.c for libdivsufsort-lite * Copyright (c) 2003-2008 Yuta Mori All Rights Reserved. * * Permission is hereby granted, free of charge, to any person * obtaining a copy of this software and associated documentation * files (the "Software"), to deal in the Software without * restriction, including without limitation the rights to use, * copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following * conditions: * * The above copyright notice and this permission notice shall be * included in all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. */ /*- Compiler specifics -*/ #ifdef __clang__ #pragma clang diagnostic ignored "-Wshorten-64-to-32" #endif #if defined(_MSC_VER) # pragma warning(disable : 4244) # pragma warning(disable : 4127) /* C4127 : Condition expression is constant */ #endif /*- Dependencies -*/ #include #include #include /**** start inlining divsufsort.h ****/ /* * divsufsort.h for libdivsufsort-lite * Copyright (c) 2003-2008 Yuta Mori All Rights Reserved. * * Permission is hereby granted, free of charge, to any person * obtaining a copy of this software and associated documentation * files (the "Software"), to deal in the Software without * restriction, including without limitation the rights to use, * copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following * conditions: * * The above copyright notice and this permission notice shall be * included in all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. */ #ifndef _DIVSUFSORT_H #define _DIVSUFSORT_H 1 #ifdef __cplusplus extern "C" { #endif /* __cplusplus */ /*- Prototypes -*/ /** * Constructs the suffix array of a given string. * @param T [0..n-1] The input string. * @param SA [0..n-1] The output array of suffixes. * @param n The length of the given string. * @param openMP enables OpenMP optimization. * @return 0 if no error occurred, -1 or -2 otherwise. */ int divsufsort(const unsigned char *T, int *SA, int n, int openMP); /** * Constructs the burrows-wheeler transformed string of a given string. * @param T [0..n-1] The input string. * @param U [0..n-1] The output string. (can be T) * @param A [0..n-1] The temporary array. (can be NULL) * @param n The length of the given string. * @param num_indexes The length of secondary indexes array. (can be NULL) * @param indexes The secondary indexes array. (can be NULL) * @param openMP enables OpenMP optimization. * @return The primary index if no error occurred, -1 or -2 otherwise. */ int divbwt(const unsigned char *T, unsigned char *U, int *A, int n, unsigned char * num_indexes, int * indexes, int openMP); #ifdef __cplusplus } /* extern "C" */ #endif /* __cplusplus */ #endif /* _DIVSUFSORT_H */ /**** ended inlining divsufsort.h ****/ /*- Constants -*/ #if defined(INLINE) # undef INLINE #endif #if !defined(INLINE) # define INLINE __inline #endif #if defined(ALPHABET_SIZE) && (ALPHABET_SIZE < 1) # undef ALPHABET_SIZE #endif #if !defined(ALPHABET_SIZE) # define ALPHABET_SIZE (256) #endif #define BUCKET_A_SIZE (ALPHABET_SIZE) #define BUCKET_B_SIZE (ALPHABET_SIZE * ALPHABET_SIZE) #if defined(SS_INSERTIONSORT_THRESHOLD) # if SS_INSERTIONSORT_THRESHOLD < 1 # undef SS_INSERTIONSORT_THRESHOLD # define SS_INSERTIONSORT_THRESHOLD (1) # endif #else # define SS_INSERTIONSORT_THRESHOLD (8) #endif #if defined(SS_BLOCKSIZE) # if SS_BLOCKSIZE < 0 # undef SS_BLOCKSIZE # define SS_BLOCKSIZE (0) # elif 32768 <= SS_BLOCKSIZE # undef SS_BLOCKSIZE # define SS_BLOCKSIZE (32767) # endif #else # define SS_BLOCKSIZE (1024) #endif /* minstacksize = log(SS_BLOCKSIZE) / log(3) * 2 */ #if SS_BLOCKSIZE == 0 # define SS_MISORT_STACKSIZE (96) #elif SS_BLOCKSIZE <= 4096 # define SS_MISORT_STACKSIZE (16) #else # define SS_MISORT_STACKSIZE (24) #endif #define SS_SMERGE_STACKSIZE (32) #define TR_INSERTIONSORT_THRESHOLD (8) #define TR_STACKSIZE (64) /*- Macros -*/ #ifndef SWAP # define SWAP(_a, _b) do { t = (_a); (_a) = (_b); (_b) = t; } while(0) #endif /* SWAP */ #ifndef MIN # define MIN(_a, _b) (((_a) < (_b)) ? (_a) : (_b)) #endif /* MIN */ #ifndef MAX # define MAX(_a, _b) (((_a) > (_b)) ? (_a) : (_b)) #endif /* MAX */ #define STACK_PUSH(_a, _b, _c, _d)\ do {\ assert(ssize < STACK_SIZE);\ stack[ssize].a = (_a), stack[ssize].b = (_b),\ stack[ssize].c = (_c), stack[ssize++].d = (_d);\ } while(0) #define STACK_PUSH5(_a, _b, _c, _d, _e)\ do {\ assert(ssize < STACK_SIZE);\ stack[ssize].a = (_a), stack[ssize].b = (_b),\ stack[ssize].c = (_c), stack[ssize].d = (_d), stack[ssize++].e = (_e);\ } while(0) #define STACK_POP(_a, _b, _c, _d)\ do {\ assert(0 <= ssize);\ if(ssize == 0) { return; }\ (_a) = stack[--ssize].a, (_b) = stack[ssize].b,\ (_c) = stack[ssize].c, (_d) = stack[ssize].d;\ } while(0) #define STACK_POP5(_a, _b, _c, _d, _e)\ do {\ assert(0 <= ssize);\ if(ssize == 0) { return; }\ (_a) = stack[--ssize].a, (_b) = stack[ssize].b,\ (_c) = stack[ssize].c, (_d) = stack[ssize].d, (_e) = stack[ssize].e;\ } while(0) #define BUCKET_A(_c0) bucket_A[(_c0)] #if ALPHABET_SIZE == 256 #define BUCKET_B(_c0, _c1) (bucket_B[((_c1) << 8) | (_c0)]) #define BUCKET_BSTAR(_c0, _c1) (bucket_B[((_c0) << 8) | (_c1)]) #else #define BUCKET_B(_c0, _c1) (bucket_B[(_c1) * ALPHABET_SIZE + (_c0)]) #define BUCKET_BSTAR(_c0, _c1) (bucket_B[(_c0) * ALPHABET_SIZE + (_c1)]) #endif /*- Private Functions -*/ static const int lg_table[256]= { -1,0,1,1,2,2,2,2,3,3,3,3,3,3,3,3,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4, 5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5, 6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6, 6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6, 7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7, 7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7, 7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7, 7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7 }; #if (SS_BLOCKSIZE == 0) || (SS_INSERTIONSORT_THRESHOLD < SS_BLOCKSIZE) static INLINE int ss_ilg(int n) { #if SS_BLOCKSIZE == 0 return (n & 0xffff0000) ? ((n & 0xff000000) ? 24 + lg_table[(n >> 24) & 0xff] : 16 + lg_table[(n >> 16) & 0xff]) : ((n & 0x0000ff00) ? 8 + lg_table[(n >> 8) & 0xff] : 0 + lg_table[(n >> 0) & 0xff]); #elif SS_BLOCKSIZE < 256 return lg_table[n]; #else return (n & 0xff00) ? 8 + lg_table[(n >> 8) & 0xff] : 0 + lg_table[(n >> 0) & 0xff]; #endif } #endif /* (SS_BLOCKSIZE == 0) || (SS_INSERTIONSORT_THRESHOLD < SS_BLOCKSIZE) */ #if SS_BLOCKSIZE != 0 static const int sqq_table[256] = { 0, 16, 22, 27, 32, 35, 39, 42, 45, 48, 50, 53, 55, 57, 59, 61, 64, 65, 67, 69, 71, 73, 75, 76, 78, 80, 81, 83, 84, 86, 87, 89, 90, 91, 93, 94, 96, 97, 98, 99, 101, 102, 103, 104, 106, 107, 108, 109, 110, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 128, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 144, 145, 146, 147, 148, 149, 150, 150, 151, 152, 153, 154, 155, 155, 156, 157, 158, 159, 160, 160, 161, 162, 163, 163, 164, 165, 166, 167, 167, 168, 169, 170, 170, 171, 172, 173, 173, 174, 175, 176, 176, 177, 178, 178, 179, 180, 181, 181, 182, 183, 183, 184, 185, 185, 186, 187, 187, 188, 189, 189, 190, 191, 192, 192, 193, 193, 194, 195, 195, 196, 197, 197, 198, 199, 199, 200, 201, 201, 202, 203, 203, 204, 204, 205, 206, 206, 207, 208, 208, 209, 209, 210, 211, 211, 212, 212, 213, 214, 214, 215, 215, 216, 217, 217, 218, 218, 219, 219, 220, 221, 221, 222, 222, 223, 224, 224, 225, 225, 226, 226, 227, 227, 228, 229, 229, 230, 230, 231, 231, 232, 232, 233, 234, 234, 235, 235, 236, 236, 237, 237, 238, 238, 239, 240, 240, 241, 241, 242, 242, 243, 243, 244, 244, 245, 245, 246, 246, 247, 247, 248, 248, 249, 249, 250, 250, 251, 251, 252, 252, 253, 253, 254, 254, 255 }; static INLINE int ss_isqrt(int x) { int y, e; if(x >= (SS_BLOCKSIZE * SS_BLOCKSIZE)) { return SS_BLOCKSIZE; } e = (x & 0xffff0000) ? ((x & 0xff000000) ? 24 + lg_table[(x >> 24) & 0xff] : 16 + lg_table[(x >> 16) & 0xff]) : ((x & 0x0000ff00) ? 8 + lg_table[(x >> 8) & 0xff] : 0 + lg_table[(x >> 0) & 0xff]); if(e >= 16) { y = sqq_table[x >> ((e - 6) - (e & 1))] << ((e >> 1) - 7); if(e >= 24) { y = (y + 1 + x / y) >> 1; } y = (y + 1 + x / y) >> 1; } else if(e >= 8) { y = (sqq_table[x >> ((e - 6) - (e & 1))] >> (7 - (e >> 1))) + 1; } else { return sqq_table[x] >> 4; } return (x < (y * y)) ? y - 1 : y; } #endif /* SS_BLOCKSIZE != 0 */ /*---------------------------------------------------------------------------*/ /* Compares two suffixes. */ static INLINE int ss_compare(const unsigned char *T, const int *p1, const int *p2, int depth) { const unsigned char *U1, *U2, *U1n, *U2n; for(U1 = T + depth + *p1, U2 = T + depth + *p2, U1n = T + *(p1 + 1) + 2, U2n = T + *(p2 + 1) + 2; (U1 < U1n) && (U2 < U2n) && (*U1 == *U2); ++U1, ++U2) { } return U1 < U1n ? (U2 < U2n ? *U1 - *U2 : 1) : (U2 < U2n ? -1 : 0); } /*---------------------------------------------------------------------------*/ #if (SS_BLOCKSIZE != 1) && (SS_INSERTIONSORT_THRESHOLD != 1) /* Insertionsort for small size groups */ static void ss_insertionsort(const unsigned char *T, const int *PA, int *first, int *last, int depth) { int *i, *j; int t; int r; for(i = last - 2; first <= i; --i) { for(t = *i, j = i + 1; 0 < (r = ss_compare(T, PA + t, PA + *j, depth));) { do { *(j - 1) = *j; } while((++j < last) && (*j < 0)); if(last <= j) { break; } } if(r == 0) { *j = ~*j; } *(j - 1) = t; } } #endif /* (SS_BLOCKSIZE != 1) && (SS_INSERTIONSORT_THRESHOLD != 1) */ /*---------------------------------------------------------------------------*/ #if (SS_BLOCKSIZE == 0) || (SS_INSERTIONSORT_THRESHOLD < SS_BLOCKSIZE) static INLINE void ss_fixdown(const unsigned char *Td, const int *PA, int *SA, int i, int size) { int j, k; int v; int c, d, e; for(v = SA[i], c = Td[PA[v]]; (j = 2 * i + 1) < size; SA[i] = SA[k], i = k) { d = Td[PA[SA[k = j++]]]; if(d < (e = Td[PA[SA[j]]])) { k = j; d = e; } if(d <= c) { break; } } SA[i] = v; } /* Simple top-down heapsort. */ static void ss_heapsort(const unsigned char *Td, const int *PA, int *SA, int size) { int i, m; int t; m = size; if((size % 2) == 0) { m--; if(Td[PA[SA[m / 2]]] < Td[PA[SA[m]]]) { SWAP(SA[m], SA[m / 2]); } } for(i = m / 2 - 1; 0 <= i; --i) { ss_fixdown(Td, PA, SA, i, m); } if((size % 2) == 0) { SWAP(SA[0], SA[m]); ss_fixdown(Td, PA, SA, 0, m); } for(i = m - 1; 0 < i; --i) { t = SA[0], SA[0] = SA[i]; ss_fixdown(Td, PA, SA, 0, i); SA[i] = t; } } /*---------------------------------------------------------------------------*/ /* Returns the median of three elements. */ static INLINE int * ss_median3(const unsigned char *Td, const int *PA, int *v1, int *v2, int *v3) { int *t; if(Td[PA[*v1]] > Td[PA[*v2]]) { SWAP(v1, v2); } if(Td[PA[*v2]] > Td[PA[*v3]]) { if(Td[PA[*v1]] > Td[PA[*v3]]) { return v1; } else { return v3; } } return v2; } /* Returns the median of five elements. */ static INLINE int * ss_median5(const unsigned char *Td, const int *PA, int *v1, int *v2, int *v3, int *v4, int *v5) { int *t; if(Td[PA[*v2]] > Td[PA[*v3]]) { SWAP(v2, v3); } if(Td[PA[*v4]] > Td[PA[*v5]]) { SWAP(v4, v5); } if(Td[PA[*v2]] > Td[PA[*v4]]) { SWAP(v2, v4); SWAP(v3, v5); } if(Td[PA[*v1]] > Td[PA[*v3]]) { SWAP(v1, v3); } if(Td[PA[*v1]] > Td[PA[*v4]]) { SWAP(v1, v4); SWAP(v3, v5); } if(Td[PA[*v3]] > Td[PA[*v4]]) { return v4; } return v3; } /* Returns the pivot element. */ static INLINE int * ss_pivot(const unsigned char *Td, const int *PA, int *first, int *last) { int *middle; int t; t = last - first; middle = first + t / 2; if(t <= 512) { if(t <= 32) { return ss_median3(Td, PA, first, middle, last - 1); } else { t >>= 2; return ss_median5(Td, PA, first, first + t, middle, last - 1 - t, last - 1); } } t >>= 3; first = ss_median3(Td, PA, first, first + t, first + (t << 1)); middle = ss_median3(Td, PA, middle - t, middle, middle + t); last = ss_median3(Td, PA, last - 1 - (t << 1), last - 1 - t, last - 1); return ss_median3(Td, PA, first, middle, last); } /*---------------------------------------------------------------------------*/ /* Binary partition for substrings. */ static INLINE int * ss_partition(const int *PA, int *first, int *last, int depth) { int *a, *b; int t; for(a = first - 1, b = last;;) { for(; (++a < b) && ((PA[*a] + depth) >= (PA[*a + 1] + 1));) { *a = ~*a; } for(; (a < --b) && ((PA[*b] + depth) < (PA[*b + 1] + 1));) { } if(b <= a) { break; } t = ~*b; *b = *a; *a = t; } if(first < a) { *first = ~*first; } return a; } /* Multikey introsort for medium size groups. */ static void ss_mintrosort(const unsigned char *T, const int *PA, int *first, int *last, int depth) { #define STACK_SIZE SS_MISORT_STACKSIZE struct { int *a, *b, c; int d; } stack[STACK_SIZE]; const unsigned char *Td; int *a, *b, *c, *d, *e, *f; int s, t; int ssize; int limit; int v, x = 0; for(ssize = 0, limit = ss_ilg(last - first);;) { if((last - first) <= SS_INSERTIONSORT_THRESHOLD) { #if 1 < SS_INSERTIONSORT_THRESHOLD if(1 < (last - first)) { ss_insertionsort(T, PA, first, last, depth); } #endif STACK_POP(first, last, depth, limit); continue; } Td = T + depth; if(limit-- == 0) { ss_heapsort(Td, PA, first, last - first); } if(limit < 0) { for(a = first + 1, v = Td[PA[*first]]; a < last; ++a) { if((x = Td[PA[*a]]) != v) { if(1 < (a - first)) { break; } v = x; first = a; } } if(Td[PA[*first] - 1] < v) { first = ss_partition(PA, first, a, depth); } if((a - first) <= (last - a)) { if(1 < (a - first)) { STACK_PUSH(a, last, depth, -1); last = a, depth += 1, limit = ss_ilg(a - first); } else { first = a, limit = -1; } } else { if(1 < (last - a)) { STACK_PUSH(first, a, depth + 1, ss_ilg(a - first)); first = a, limit = -1; } else { last = a, depth += 1, limit = ss_ilg(a - first); } } continue; } /* choose pivot */ a = ss_pivot(Td, PA, first, last); v = Td[PA[*a]]; SWAP(*first, *a); /* partition */ for(b = first; (++b < last) && ((x = Td[PA[*b]]) == v);) { } if(((a = b) < last) && (x < v)) { for(; (++b < last) && ((x = Td[PA[*b]]) <= v);) { if(x == v) { SWAP(*b, *a); ++a; } } } for(c = last; (b < --c) && ((x = Td[PA[*c]]) == v);) { } if((b < (d = c)) && (x > v)) { for(; (b < --c) && ((x = Td[PA[*c]]) >= v);) { if(x == v) { SWAP(*c, *d); --d; } } } for(; b < c;) { SWAP(*b, *c); for(; (++b < c) && ((x = Td[PA[*b]]) <= v);) { if(x == v) { SWAP(*b, *a); ++a; } } for(; (b < --c) && ((x = Td[PA[*c]]) >= v);) { if(x == v) { SWAP(*c, *d); --d; } } } if(a <= d) { c = b - 1; if((s = a - first) > (t = b - a)) { s = t; } for(e = first, f = b - s; 0 < s; --s, ++e, ++f) { SWAP(*e, *f); } if((s = d - c) > (t = last - d - 1)) { s = t; } for(e = b, f = last - s; 0 < s; --s, ++e, ++f) { SWAP(*e, *f); } a = first + (b - a), c = last - (d - c); b = (v <= Td[PA[*a] - 1]) ? a : ss_partition(PA, a, c, depth); if((a - first) <= (last - c)) { if((last - c) <= (c - b)) { STACK_PUSH(b, c, depth + 1, ss_ilg(c - b)); STACK_PUSH(c, last, depth, limit); last = a; } else if((a - first) <= (c - b)) { STACK_PUSH(c, last, depth, limit); STACK_PUSH(b, c, depth + 1, ss_ilg(c - b)); last = a; } else { STACK_PUSH(c, last, depth, limit); STACK_PUSH(first, a, depth, limit); first = b, last = c, depth += 1, limit = ss_ilg(c - b); } } else { if((a - first) <= (c - b)) { STACK_PUSH(b, c, depth + 1, ss_ilg(c - b)); STACK_PUSH(first, a, depth, limit); first = c; } else if((last - c) <= (c - b)) { STACK_PUSH(first, a, depth, limit); STACK_PUSH(b, c, depth + 1, ss_ilg(c - b)); first = c; } else { STACK_PUSH(first, a, depth, limit); STACK_PUSH(c, last, depth, limit); first = b, last = c, depth += 1, limit = ss_ilg(c - b); } } } else { limit += 1; if(Td[PA[*first] - 1] < v) { first = ss_partition(PA, first, last, depth); limit = ss_ilg(last - first); } depth += 1; } } #undef STACK_SIZE } #endif /* (SS_BLOCKSIZE == 0) || (SS_INSERTIONSORT_THRESHOLD < SS_BLOCKSIZE) */ /*---------------------------------------------------------------------------*/ #if SS_BLOCKSIZE != 0 static INLINE void ss_blockswap(int *a, int *b, int n) { int t; for(; 0 < n; --n, ++a, ++b) { t = *a, *a = *b, *b = t; } } static INLINE void ss_rotate(int *first, int *middle, int *last) { int *a, *b, t; int l, r; l = middle - first, r = last - middle; for(; (0 < l) && (0 < r);) { if(l == r) { ss_blockswap(first, middle, l); break; } if(l < r) { a = last - 1, b = middle - 1; t = *a; do { *a-- = *b, *b-- = *a; if(b < first) { *a = t; last = a; if((r -= l + 1) <= l) { break; } a -= 1, b = middle - 1; t = *a; } } while(1); } else { a = first, b = middle; t = *a; do { *a++ = *b, *b++ = *a; if(last <= b) { *a = t; first = a + 1; if((l -= r + 1) <= r) { break; } a += 1, b = middle; t = *a; } } while(1); } } } /*---------------------------------------------------------------------------*/ static void ss_inplacemerge(const unsigned char *T, const int *PA, int *first, int *middle, int *last, int depth) { const int *p; int *a, *b; int len, half; int q, r; int x; for(;;) { if(*(last - 1) < 0) { x = 1; p = PA + ~*(last - 1); } else { x = 0; p = PA + *(last - 1); } for(a = first, len = middle - first, half = len >> 1, r = -1; 0 < len; len = half, half >>= 1) { b = a + half; q = ss_compare(T, PA + ((0 <= *b) ? *b : ~*b), p, depth); if(q < 0) { a = b + 1; half -= (len & 1) ^ 1; } else { r = q; } } if(a < middle) { if(r == 0) { *a = ~*a; } ss_rotate(a, middle, last); last -= middle - a; middle = a; if(first == middle) { break; } } --last; if(x != 0) { while(*--last < 0) { } } if(middle == last) { break; } } } /*---------------------------------------------------------------------------*/ /* Merge-forward with internal buffer. */ static void ss_mergeforward(const unsigned char *T, const int *PA, int *first, int *middle, int *last, int *buf, int depth) { int *a, *b, *c, *bufend; int t; int r; bufend = buf + (middle - first) - 1; ss_blockswap(buf, first, middle - first); for(t = *(a = first), b = buf, c = middle;;) { r = ss_compare(T, PA + *b, PA + *c, depth); if(r < 0) { do { *a++ = *b; if(bufend <= b) { *bufend = t; return; } *b++ = *a; } while(*b < 0); } else if(r > 0) { do { *a++ = *c, *c++ = *a; if(last <= c) { while(b < bufend) { *a++ = *b, *b++ = *a; } *a = *b, *b = t; return; } } while(*c < 0); } else { *c = ~*c; do { *a++ = *b; if(bufend <= b) { *bufend = t; return; } *b++ = *a; } while(*b < 0); do { *a++ = *c, *c++ = *a; if(last <= c) { while(b < bufend) { *a++ = *b, *b++ = *a; } *a = *b, *b = t; return; } } while(*c < 0); } } } /* Merge-backward with internal buffer. */ static void ss_mergebackward(const unsigned char *T, const int *PA, int *first, int *middle, int *last, int *buf, int depth) { const int *p1, *p2; int *a, *b, *c, *bufend; int t; int r; int x; bufend = buf + (last - middle) - 1; ss_blockswap(buf, middle, last - middle); x = 0; if(*bufend < 0) { p1 = PA + ~*bufend; x |= 1; } else { p1 = PA + *bufend; } if(*(middle - 1) < 0) { p2 = PA + ~*(middle - 1); x |= 2; } else { p2 = PA + *(middle - 1); } for(t = *(a = last - 1), b = bufend, c = middle - 1;;) { r = ss_compare(T, p1, p2, depth); if(0 < r) { if(x & 1) { do { *a-- = *b, *b-- = *a; } while(*b < 0); x ^= 1; } *a-- = *b; if(b <= buf) { *buf = t; break; } *b-- = *a; if(*b < 0) { p1 = PA + ~*b; x |= 1; } else { p1 = PA + *b; } } else if(r < 0) { if(x & 2) { do { *a-- = *c, *c-- = *a; } while(*c < 0); x ^= 2; } *a-- = *c, *c-- = *a; if(c < first) { while(buf < b) { *a-- = *b, *b-- = *a; } *a = *b, *b = t; break; } if(*c < 0) { p2 = PA + ~*c; x |= 2; } else { p2 = PA + *c; } } else { if(x & 1) { do { *a-- = *b, *b-- = *a; } while(*b < 0); x ^= 1; } *a-- = ~*b; if(b <= buf) { *buf = t; break; } *b-- = *a; if(x & 2) { do { *a-- = *c, *c-- = *a; } while(*c < 0); x ^= 2; } *a-- = *c, *c-- = *a; if(c < first) { while(buf < b) { *a-- = *b, *b-- = *a; } *a = *b, *b = t; break; } if(*b < 0) { p1 = PA + ~*b; x |= 1; } else { p1 = PA + *b; } if(*c < 0) { p2 = PA + ~*c; x |= 2; } else { p2 = PA + *c; } } } } /* D&C based merge. */ static void ss_swapmerge(const unsigned char *T, const int *PA, int *first, int *middle, int *last, int *buf, int bufsize, int depth) { #define STACK_SIZE SS_SMERGE_STACKSIZE #define GETIDX(a) ((0 <= (a)) ? (a) : (~(a))) #define MERGE_CHECK(a, b, c)\ do {\ if(((c) & 1) ||\ (((c) & 2) && (ss_compare(T, PA + GETIDX(*((a) - 1)), PA + *(a), depth) == 0))) {\ *(a) = ~*(a);\ }\ if(((c) & 4) && ((ss_compare(T, PA + GETIDX(*((b) - 1)), PA + *(b), depth) == 0))) {\ *(b) = ~*(b);\ }\ } while(0) struct { int *a, *b, *c; int d; } stack[STACK_SIZE]; int *l, *r, *lm, *rm; int m, len, half; int ssize; int check, next; for(check = 0, ssize = 0;;) { if((last - middle) <= bufsize) { if((first < middle) && (middle < last)) { ss_mergebackward(T, PA, first, middle, last, buf, depth); } MERGE_CHECK(first, last, check); STACK_POP(first, middle, last, check); continue; } if((middle - first) <= bufsize) { if(first < middle) { ss_mergeforward(T, PA, first, middle, last, buf, depth); } MERGE_CHECK(first, last, check); STACK_POP(first, middle, last, check); continue; } for(m = 0, len = MIN(middle - first, last - middle), half = len >> 1; 0 < len; len = half, half >>= 1) { if(ss_compare(T, PA + GETIDX(*(middle + m + half)), PA + GETIDX(*(middle - m - half - 1)), depth) < 0) { m += half + 1; half -= (len & 1) ^ 1; } } if(0 < m) { lm = middle - m, rm = middle + m; ss_blockswap(lm, middle, m); l = r = middle, next = 0; if(rm < last) { if(*rm < 0) { *rm = ~*rm; if(first < lm) { for(; *--l < 0;) { } next |= 4; } next |= 1; } else if(first < lm) { for(; *r < 0; ++r) { } next |= 2; } } if((l - first) <= (last - r)) { STACK_PUSH(r, rm, last, (next & 3) | (check & 4)); middle = lm, last = l, check = (check & 3) | (next & 4); } else { if((next & 2) && (r == middle)) { next ^= 6; } STACK_PUSH(first, lm, l, (check & 3) | (next & 4)); first = r, middle = rm, check = (next & 3) | (check & 4); } } else { if(ss_compare(T, PA + GETIDX(*(middle - 1)), PA + *middle, depth) == 0) { *middle = ~*middle; } MERGE_CHECK(first, last, check); STACK_POP(first, middle, last, check); } } #undef STACK_SIZE } #endif /* SS_BLOCKSIZE != 0 */ /*---------------------------------------------------------------------------*/ /* Substring sort */ static void sssort(const unsigned char *T, const int *PA, int *first, int *last, int *buf, int bufsize, int depth, int n, int lastsuffix) { int *a; #if SS_BLOCKSIZE != 0 int *b, *middle, *curbuf; int j, k, curbufsize, limit; #endif int i; if(lastsuffix != 0) { ++first; } #if SS_BLOCKSIZE == 0 ss_mintrosort(T, PA, first, last, depth); #else if((bufsize < SS_BLOCKSIZE) && (bufsize < (last - first)) && (bufsize < (limit = ss_isqrt(last - first)))) { if(SS_BLOCKSIZE < limit) { limit = SS_BLOCKSIZE; } buf = middle = last - limit, bufsize = limit; } else { middle = last, limit = 0; } for(a = first, i = 0; SS_BLOCKSIZE < (middle - a); a += SS_BLOCKSIZE, ++i) { #if SS_INSERTIONSORT_THRESHOLD < SS_BLOCKSIZE ss_mintrosort(T, PA, a, a + SS_BLOCKSIZE, depth); #elif 1 < SS_BLOCKSIZE ss_insertionsort(T, PA, a, a + SS_BLOCKSIZE, depth); #endif curbufsize = last - (a + SS_BLOCKSIZE); curbuf = a + SS_BLOCKSIZE; if(curbufsize <= bufsize) { curbufsize = bufsize, curbuf = buf; } for(b = a, k = SS_BLOCKSIZE, j = i; j & 1; b -= k, k <<= 1, j >>= 1) { ss_swapmerge(T, PA, b - k, b, b + k, curbuf, curbufsize, depth); } } #if SS_INSERTIONSORT_THRESHOLD < SS_BLOCKSIZE ss_mintrosort(T, PA, a, middle, depth); #elif 1 < SS_BLOCKSIZE ss_insertionsort(T, PA, a, middle, depth); #endif for(k = SS_BLOCKSIZE; i != 0; k <<= 1, i >>= 1) { if(i & 1) { ss_swapmerge(T, PA, a - k, a, middle, buf, bufsize, depth); a -= k; } } if(limit != 0) { #if SS_INSERTIONSORT_THRESHOLD < SS_BLOCKSIZE ss_mintrosort(T, PA, middle, last, depth); #elif 1 < SS_BLOCKSIZE ss_insertionsort(T, PA, middle, last, depth); #endif ss_inplacemerge(T, PA, first, middle, last, depth); } #endif if(lastsuffix != 0) { /* Insert last type B* suffix. */ int PAi[2]; PAi[0] = PA[*(first - 1)], PAi[1] = n - 2; for(a = first, i = *(first - 1); (a < last) && ((*a < 0) || (0 < ss_compare(T, &(PAi[0]), PA + *a, depth))); ++a) { *(a - 1) = *a; } *(a - 1) = i; } } /*---------------------------------------------------------------------------*/ static INLINE int tr_ilg(int n) { return (n & 0xffff0000) ? ((n & 0xff000000) ? 24 + lg_table[(n >> 24) & 0xff] : 16 + lg_table[(n >> 16) & 0xff]) : ((n & 0x0000ff00) ? 8 + lg_table[(n >> 8) & 0xff] : 0 + lg_table[(n >> 0) & 0xff]); } /*---------------------------------------------------------------------------*/ /* Simple insertionsort for small size groups. */ static void tr_insertionsort(const int *ISAd, int *first, int *last) { int *a, *b; int t, r; for(a = first + 1; a < last; ++a) { for(t = *a, b = a - 1; 0 > (r = ISAd[t] - ISAd[*b]);) { do { *(b + 1) = *b; } while((first <= --b) && (*b < 0)); if(b < first) { break; } } if(r == 0) { *b = ~*b; } *(b + 1) = t; } } /*---------------------------------------------------------------------------*/ static INLINE void tr_fixdown(const int *ISAd, int *SA, int i, int size) { int j, k; int v; int c, d, e; for(v = SA[i], c = ISAd[v]; (j = 2 * i + 1) < size; SA[i] = SA[k], i = k) { d = ISAd[SA[k = j++]]; if(d < (e = ISAd[SA[j]])) { k = j; d = e; } if(d <= c) { break; } } SA[i] = v; } /* Simple top-down heapsort. */ static void tr_heapsort(const int *ISAd, int *SA, int size) { int i, m; int t; m = size; if((size % 2) == 0) { m--; if(ISAd[SA[m / 2]] < ISAd[SA[m]]) { SWAP(SA[m], SA[m / 2]); } } for(i = m / 2 - 1; 0 <= i; --i) { tr_fixdown(ISAd, SA, i, m); } if((size % 2) == 0) { SWAP(SA[0], SA[m]); tr_fixdown(ISAd, SA, 0, m); } for(i = m - 1; 0 < i; --i) { t = SA[0], SA[0] = SA[i]; tr_fixdown(ISAd, SA, 0, i); SA[i] = t; } } /*---------------------------------------------------------------------------*/ /* Returns the median of three elements. */ static INLINE int * tr_median3(const int *ISAd, int *v1, int *v2, int *v3) { int *t; if(ISAd[*v1] > ISAd[*v2]) { SWAP(v1, v2); } if(ISAd[*v2] > ISAd[*v3]) { if(ISAd[*v1] > ISAd[*v3]) { return v1; } else { return v3; } } return v2; } /* Returns the median of five elements. */ static INLINE int * tr_median5(const int *ISAd, int *v1, int *v2, int *v3, int *v4, int *v5) { int *t; if(ISAd[*v2] > ISAd[*v3]) { SWAP(v2, v3); } if(ISAd[*v4] > ISAd[*v5]) { SWAP(v4, v5); } if(ISAd[*v2] > ISAd[*v4]) { SWAP(v2, v4); SWAP(v3, v5); } if(ISAd[*v1] > ISAd[*v3]) { SWAP(v1, v3); } if(ISAd[*v1] > ISAd[*v4]) { SWAP(v1, v4); SWAP(v3, v5); } if(ISAd[*v3] > ISAd[*v4]) { return v4; } return v3; } /* Returns the pivot element. */ static INLINE int * tr_pivot(const int *ISAd, int *first, int *last) { int *middle; int t; t = last - first; middle = first + t / 2; if(t <= 512) { if(t <= 32) { return tr_median3(ISAd, first, middle, last - 1); } else { t >>= 2; return tr_median5(ISAd, first, first + t, middle, last - 1 - t, last - 1); } } t >>= 3; first = tr_median3(ISAd, first, first + t, first + (t << 1)); middle = tr_median3(ISAd, middle - t, middle, middle + t); last = tr_median3(ISAd, last - 1 - (t << 1), last - 1 - t, last - 1); return tr_median3(ISAd, first, middle, last); } /*---------------------------------------------------------------------------*/ typedef struct _trbudget_t trbudget_t; struct _trbudget_t { int chance; int remain; int incval; int count; }; static INLINE void trbudget_init(trbudget_t *budget, int chance, int incval) { budget->chance = chance; budget->remain = budget->incval = incval; } static INLINE int trbudget_check(trbudget_t *budget, int size) { if(size <= budget->remain) { budget->remain -= size; return 1; } if(budget->chance == 0) { budget->count += size; return 0; } budget->remain += budget->incval - size; budget->chance -= 1; return 1; } /*---------------------------------------------------------------------------*/ static INLINE void tr_partition(const int *ISAd, int *first, int *middle, int *last, int **pa, int **pb, int v) { int *a, *b, *c, *d, *e, *f; int t, s; int x = 0; for(b = middle - 1; (++b < last) && ((x = ISAd[*b]) == v);) { } if(((a = b) < last) && (x < v)) { for(; (++b < last) && ((x = ISAd[*b]) <= v);) { if(x == v) { SWAP(*b, *a); ++a; } } } for(c = last; (b < --c) && ((x = ISAd[*c]) == v);) { } if((b < (d = c)) && (x > v)) { for(; (b < --c) && ((x = ISAd[*c]) >= v);) { if(x == v) { SWAP(*c, *d); --d; } } } for(; b < c;) { SWAP(*b, *c); for(; (++b < c) && ((x = ISAd[*b]) <= v);) { if(x == v) { SWAP(*b, *a); ++a; } } for(; (b < --c) && ((x = ISAd[*c]) >= v);) { if(x == v) { SWAP(*c, *d); --d; } } } if(a <= d) { c = b - 1; if((s = a - first) > (t = b - a)) { s = t; } for(e = first, f = b - s; 0 < s; --s, ++e, ++f) { SWAP(*e, *f); } if((s = d - c) > (t = last - d - 1)) { s = t; } for(e = b, f = last - s; 0 < s; --s, ++e, ++f) { SWAP(*e, *f); } first += (b - a), last -= (d - c); } *pa = first, *pb = last; } static void tr_copy(int *ISA, const int *SA, int *first, int *a, int *b, int *last, int depth) { /* sort suffixes of middle partition by using sorted order of suffixes of left and right partition. */ int *c, *d, *e; int s, v; v = b - SA - 1; for(c = first, d = a - 1; c <= d; ++c) { if((0 <= (s = *c - depth)) && (ISA[s] == v)) { *++d = s; ISA[s] = d - SA; } } for(c = last - 1, e = d + 1, d = b; e < d; --c) { if((0 <= (s = *c - depth)) && (ISA[s] == v)) { *--d = s; ISA[s] = d - SA; } } } static void tr_partialcopy(int *ISA, const int *SA, int *first, int *a, int *b, int *last, int depth) { int *c, *d, *e; int s, v; int rank, lastrank, newrank = -1; v = b - SA - 1; lastrank = -1; for(c = first, d = a - 1; c <= d; ++c) { if((0 <= (s = *c - depth)) && (ISA[s] == v)) { *++d = s; rank = ISA[s + depth]; if(lastrank != rank) { lastrank = rank; newrank = d - SA; } ISA[s] = newrank; } } lastrank = -1; for(e = d; first <= e; --e) { rank = ISA[*e]; if(lastrank != rank) { lastrank = rank; newrank = e - SA; } if(newrank != rank) { ISA[*e] = newrank; } } lastrank = -1; for(c = last - 1, e = d + 1, d = b; e < d; --c) { if((0 <= (s = *c - depth)) && (ISA[s] == v)) { *--d = s; rank = ISA[s + depth]; if(lastrank != rank) { lastrank = rank; newrank = d - SA; } ISA[s] = newrank; } } } static void tr_introsort(int *ISA, const int *ISAd, int *SA, int *first, int *last, trbudget_t *budget) { #define STACK_SIZE TR_STACKSIZE struct { const int *a; int *b, *c; int d, e; }stack[STACK_SIZE]; int *a, *b, *c; int t; int v, x = 0; int incr = ISAd - ISA; int limit, next; int ssize, trlink = -1; for(ssize = 0, limit = tr_ilg(last - first);;) { if(limit < 0) { if(limit == -1) { /* tandem repeat partition */ tr_partition(ISAd - incr, first, first, last, &a, &b, last - SA - 1); /* update ranks */ if(a < last) { for(c = first, v = a - SA - 1; c < a; ++c) { ISA[*c] = v; } } if(b < last) { for(c = a, v = b - SA - 1; c < b; ++c) { ISA[*c] = v; } } /* push */ if(1 < (b - a)) { STACK_PUSH5(NULL, a, b, 0, 0); STACK_PUSH5(ISAd - incr, first, last, -2, trlink); trlink = ssize - 2; } if((a - first) <= (last - b)) { if(1 < (a - first)) { STACK_PUSH5(ISAd, b, last, tr_ilg(last - b), trlink); last = a, limit = tr_ilg(a - first); } else if(1 < (last - b)) { first = b, limit = tr_ilg(last - b); } else { STACK_POP5(ISAd, first, last, limit, trlink); } } else { if(1 < (last - b)) { STACK_PUSH5(ISAd, first, a, tr_ilg(a - first), trlink); first = b, limit = tr_ilg(last - b); } else if(1 < (a - first)) { last = a, limit = tr_ilg(a - first); } else { STACK_POP5(ISAd, first, last, limit, trlink); } } } else if(limit == -2) { /* tandem repeat copy */ a = stack[--ssize].b, b = stack[ssize].c; if(stack[ssize].d == 0) { tr_copy(ISA, SA, first, a, b, last, ISAd - ISA); } else { if(0 <= trlink) { stack[trlink].d = -1; } tr_partialcopy(ISA, SA, first, a, b, last, ISAd - ISA); } STACK_POP5(ISAd, first, last, limit, trlink); } else { /* sorted partition */ if(0 <= *first) { a = first; do { ISA[*a] = a - SA; } while((++a < last) && (0 <= *a)); first = a; } if(first < last) { a = first; do { *a = ~*a; } while(*++a < 0); next = (ISA[*a] != ISAd[*a]) ? tr_ilg(a - first + 1) : -1; if(++a < last) { for(b = first, v = a - SA - 1; b < a; ++b) { ISA[*b] = v; } } /* push */ if(trbudget_check(budget, a - first)) { if((a - first) <= (last - a)) { STACK_PUSH5(ISAd, a, last, -3, trlink); ISAd += incr, last = a, limit = next; } else { if(1 < (last - a)) { STACK_PUSH5(ISAd + incr, first, a, next, trlink); first = a, limit = -3; } else { ISAd += incr, last = a, limit = next; } } } else { if(0 <= trlink) { stack[trlink].d = -1; } if(1 < (last - a)) { first = a, limit = -3; } else { STACK_POP5(ISAd, first, last, limit, trlink); } } } else { STACK_POP5(ISAd, first, last, limit, trlink); } } continue; } if((last - first) <= TR_INSERTIONSORT_THRESHOLD) { tr_insertionsort(ISAd, first, last); limit = -3; continue; } if(limit-- == 0) { tr_heapsort(ISAd, first, last - first); for(a = last - 1; first < a; a = b) { for(x = ISAd[*a], b = a - 1; (first <= b) && (ISAd[*b] == x); --b) { *b = ~*b; } } limit = -3; continue; } /* choose pivot */ a = tr_pivot(ISAd, first, last); SWAP(*first, *a); v = ISAd[*first]; /* partition */ tr_partition(ISAd, first, first + 1, last, &a, &b, v); if((last - first) != (b - a)) { next = (ISA[*a] != v) ? tr_ilg(b - a) : -1; /* update ranks */ for(c = first, v = a - SA - 1; c < a; ++c) { ISA[*c] = v; } if(b < last) { for(c = a, v = b - SA - 1; c < b; ++c) { ISA[*c] = v; } } /* push */ if((1 < (b - a)) && (trbudget_check(budget, b - a))) { if((a - first) <= (last - b)) { if((last - b) <= (b - a)) { if(1 < (a - first)) { STACK_PUSH5(ISAd + incr, a, b, next, trlink); STACK_PUSH5(ISAd, b, last, limit, trlink); last = a; } else if(1 < (last - b)) { STACK_PUSH5(ISAd + incr, a, b, next, trlink); first = b; } else { ISAd += incr, first = a, last = b, limit = next; } } else if((a - first) <= (b - a)) { if(1 < (a - first)) { STACK_PUSH5(ISAd, b, last, limit, trlink); STACK_PUSH5(ISAd + incr, a, b, next, trlink); last = a; } else { STACK_PUSH5(ISAd, b, last, limit, trlink); ISAd += incr, first = a, last = b, limit = next; } } else { STACK_PUSH5(ISAd, b, last, limit, trlink); STACK_PUSH5(ISAd, first, a, limit, trlink); ISAd += incr, first = a, last = b, limit = next; } } else { if((a - first) <= (b - a)) { if(1 < (last - b)) { STACK_PUSH5(ISAd + incr, a, b, next, trlink); STACK_PUSH5(ISAd, first, a, limit, trlink); first = b; } else if(1 < (a - first)) { STACK_PUSH5(ISAd + incr, a, b, next, trlink); last = a; } else { ISAd += incr, first = a, last = b, limit = next; } } else if((last - b) <= (b - a)) { if(1 < (last - b)) { STACK_PUSH5(ISAd, first, a, limit, trlink); STACK_PUSH5(ISAd + incr, a, b, next, trlink); first = b; } else { STACK_PUSH5(ISAd, first, a, limit, trlink); ISAd += incr, first = a, last = b, limit = next; } } else { STACK_PUSH5(ISAd, first, a, limit, trlink); STACK_PUSH5(ISAd, b, last, limit, trlink); ISAd += incr, first = a, last = b, limit = next; } } } else { if((1 < (b - a)) && (0 <= trlink)) { stack[trlink].d = -1; } if((a - first) <= (last - b)) { if(1 < (a - first)) { STACK_PUSH5(ISAd, b, last, limit, trlink); last = a; } else if(1 < (last - b)) { first = b; } else { STACK_POP5(ISAd, first, last, limit, trlink); } } else { if(1 < (last - b)) { STACK_PUSH5(ISAd, first, a, limit, trlink); first = b; } else if(1 < (a - first)) { last = a; } else { STACK_POP5(ISAd, first, last, limit, trlink); } } } } else { if(trbudget_check(budget, last - first)) { limit = tr_ilg(last - first), ISAd += incr; } else { if(0 <= trlink) { stack[trlink].d = -1; } STACK_POP5(ISAd, first, last, limit, trlink); } } } #undef STACK_SIZE } /*---------------------------------------------------------------------------*/ /* Tandem repeat sort */ static void trsort(int *ISA, int *SA, int n, int depth) { int *ISAd; int *first, *last; trbudget_t budget; int t, skip, unsorted; trbudget_init(&budget, tr_ilg(n) * 2 / 3, n); /* trbudget_init(&budget, tr_ilg(n) * 3 / 4, n); */ for(ISAd = ISA + depth; -n < *SA; ISAd += ISAd - ISA) { first = SA; skip = 0; unsorted = 0; do { if((t = *first) < 0) { first -= t; skip += t; } else { if(skip != 0) { *(first + skip) = skip; skip = 0; } last = SA + ISA[t] + 1; if(1 < (last - first)) { budget.count = 0; tr_introsort(ISA, ISAd, SA, first, last, &budget); if(budget.count != 0) { unsorted += budget.count; } else { skip = first - last; } } else if((last - first) == 1) { skip = -1; } first = last; } } while(first < (SA + n)); if(skip != 0) { *(first + skip) = skip; } if(unsorted == 0) { break; } } } /*---------------------------------------------------------------------------*/ /* Sorts suffixes of type B*. */ static int sort_typeBstar(const unsigned char *T, int *SA, int *bucket_A, int *bucket_B, int n, int openMP) { int *PAb, *ISAb, *buf; #ifdef LIBBSC_OPENMP int *curbuf; int l; #endif int i, j, k, t, m, bufsize; int c0, c1; #ifdef LIBBSC_OPENMP int d0, d1; #endif (void)openMP; /* Initialize bucket arrays. */ for(i = 0; i < BUCKET_A_SIZE; ++i) { bucket_A[i] = 0; } for(i = 0; i < BUCKET_B_SIZE; ++i) { bucket_B[i] = 0; } /* Count the number of occurrences of the first one or two characters of each type A, B and B* suffix. Moreover, store the beginning position of all type B* suffixes into the array SA. */ for(i = n - 1, m = n, c0 = T[n - 1]; 0 <= i;) { /* type A suffix. */ do { ++BUCKET_A(c1 = c0); } while((0 <= --i) && ((c0 = T[i]) >= c1)); if(0 <= i) { /* type B* suffix. */ ++BUCKET_BSTAR(c0, c1); SA[--m] = i; /* type B suffix. */ for(--i, c1 = c0; (0 <= i) && ((c0 = T[i]) <= c1); --i, c1 = c0) { ++BUCKET_B(c0, c1); } } } m = n - m; /* note: A type B* suffix is lexicographically smaller than a type B suffix that begins with the same first two characters. */ /* Calculate the index of start/end point of each bucket. */ for(c0 = 0, i = 0, j = 0; c0 < ALPHABET_SIZE; ++c0) { t = i + BUCKET_A(c0); BUCKET_A(c0) = i + j; /* start point */ i = t + BUCKET_B(c0, c0); for(c1 = c0 + 1; c1 < ALPHABET_SIZE; ++c1) { j += BUCKET_BSTAR(c0, c1); BUCKET_BSTAR(c0, c1) = j; /* end point */ i += BUCKET_B(c0, c1); } } if(0 < m) { /* Sort the type B* suffixes by their first two characters. */ PAb = SA + n - m; ISAb = SA + m; for(i = m - 2; 0 <= i; --i) { t = PAb[i], c0 = T[t], c1 = T[t + 1]; SA[--BUCKET_BSTAR(c0, c1)] = i; } t = PAb[m - 1], c0 = T[t], c1 = T[t + 1]; SA[--BUCKET_BSTAR(c0, c1)] = m - 1; /* Sort the type B* substrings using sssort. */ #ifdef LIBBSC_OPENMP if (openMP) { buf = SA + m; c0 = ALPHABET_SIZE - 2, c1 = ALPHABET_SIZE - 1, j = m; #pragma omp parallel default(shared) private(bufsize, curbuf, k, l, d0, d1) { bufsize = (n - (2 * m)) / omp_get_num_threads(); curbuf = buf + omp_get_thread_num() * bufsize; k = 0; for(;;) { #pragma omp critical(sssort_lock) { if(0 < (l = j)) { d0 = c0, d1 = c1; do { k = BUCKET_BSTAR(d0, d1); if(--d1 <= d0) { d1 = ALPHABET_SIZE - 1; if(--d0 < 0) { break; } } } while(((l - k) <= 1) && (0 < (l = k))); c0 = d0, c1 = d1, j = k; } } if(l == 0) { break; } sssort(T, PAb, SA + k, SA + l, curbuf, bufsize, 2, n, *(SA + k) == (m - 1)); } } } else { buf = SA + m, bufsize = n - (2 * m); for(c0 = ALPHABET_SIZE - 2, j = m; 0 < j; --c0) { for(c1 = ALPHABET_SIZE - 1; c0 < c1; j = i, --c1) { i = BUCKET_BSTAR(c0, c1); if(1 < (j - i)) { sssort(T, PAb, SA + i, SA + j, buf, bufsize, 2, n, *(SA + i) == (m - 1)); } } } } #else buf = SA + m, bufsize = n - (2 * m); for(c0 = ALPHABET_SIZE - 2, j = m; 0 < j; --c0) { for(c1 = ALPHABET_SIZE - 1; c0 < c1; j = i, --c1) { i = BUCKET_BSTAR(c0, c1); if(1 < (j - i)) { sssort(T, PAb, SA + i, SA + j, buf, bufsize, 2, n, *(SA + i) == (m - 1)); } } } #endif /* Compute ranks of type B* substrings. */ for(i = m - 1; 0 <= i; --i) { if(0 <= SA[i]) { j = i; do { ISAb[SA[i]] = i; } while((0 <= --i) && (0 <= SA[i])); SA[i + 1] = i - j; if(i <= 0) { break; } } j = i; do { ISAb[SA[i] = ~SA[i]] = j; } while(SA[--i] < 0); ISAb[SA[i]] = j; } /* Construct the inverse suffix array of type B* suffixes using trsort. */ trsort(ISAb, SA, m, 1); /* Set the sorted order of type B* suffixes. */ for(i = n - 1, j = m, c0 = T[n - 1]; 0 <= i;) { for(--i, c1 = c0; (0 <= i) && ((c0 = T[i]) >= c1); --i, c1 = c0) { } if(0 <= i) { t = i; for(--i, c1 = c0; (0 <= i) && ((c0 = T[i]) <= c1); --i, c1 = c0) { } SA[ISAb[--j]] = ((t == 0) || (1 < (t - i))) ? t : ~t; } } /* Calculate the index of start/end point of each bucket. */ BUCKET_B(ALPHABET_SIZE - 1, ALPHABET_SIZE - 1) = n; /* end point */ for(c0 = ALPHABET_SIZE - 2, k = m - 1; 0 <= c0; --c0) { i = BUCKET_A(c0 + 1) - 1; for(c1 = ALPHABET_SIZE - 1; c0 < c1; --c1) { t = i - BUCKET_B(c0, c1); BUCKET_B(c0, c1) = i; /* end point */ /* Move all type B* suffixes to the correct position. */ for(i = t, j = BUCKET_BSTAR(c0, c1); j <= k; --i, --k) { SA[i] = SA[k]; } } BUCKET_BSTAR(c0, c0 + 1) = i - BUCKET_B(c0, c0) + 1; /* start point */ BUCKET_B(c0, c0) = i; /* end point */ } } return m; } /* Constructs the suffix array by using the sorted order of type B* suffixes. */ static void construct_SA(const unsigned char *T, int *SA, int *bucket_A, int *bucket_B, int n, int m) { int *i, *j, *k; int s; int c0, c1, c2; if(0 < m) { /* Construct the sorted order of type B suffixes by using the sorted order of type B* suffixes. */ for(c1 = ALPHABET_SIZE - 2; 0 <= c1; --c1) { /* Scan the suffix array from right to left. */ for(i = SA + BUCKET_BSTAR(c1, c1 + 1), j = SA + BUCKET_A(c1 + 1) - 1, k = NULL, c2 = -1; i <= j; --j) { if(0 < (s = *j)) { assert(T[s] == c1); assert(((s + 1) < n) && (T[s] <= T[s + 1])); assert(T[s - 1] <= T[s]); *j = ~s; c0 = T[--s]; if((0 < s) && (T[s - 1] > c0)) { s = ~s; } if(c0 != c2) { if(0 <= c2) { BUCKET_B(c2, c1) = k - SA; } k = SA + BUCKET_B(c2 = c0, c1); } assert(k < j); assert(k != NULL); *k-- = s; } else { assert(((s == 0) && (T[s] == c1)) || (s < 0)); *j = ~s; } } } } /* Construct the suffix array by using the sorted order of type B suffixes. */ k = SA + BUCKET_A(c2 = T[n - 1]); *k++ = (T[n - 2] < c2) ? ~(n - 1) : (n - 1); /* Scan the suffix array from left to right. */ for(i = SA, j = SA + n; i < j; ++i) { if(0 < (s = *i)) { assert(T[s - 1] >= T[s]); c0 = T[--s]; if((s == 0) || (T[s - 1] < c0)) { s = ~s; } if(c0 != c2) { BUCKET_A(c2) = k - SA; k = SA + BUCKET_A(c2 = c0); } assert(i < k); *k++ = s; } else { assert(s < 0); *i = ~s; } } } /* Constructs the burrows-wheeler transformed string directly by using the sorted order of type B* suffixes. */ static int construct_BWT(const unsigned char *T, int *SA, int *bucket_A, int *bucket_B, int n, int m) { int *i, *j, *k, *orig; int s; int c0, c1, c2; if(0 < m) { /* Construct the sorted order of type B suffixes by using the sorted order of type B* suffixes. */ for(c1 = ALPHABET_SIZE - 2; 0 <= c1; --c1) { /* Scan the suffix array from right to left. */ for(i = SA + BUCKET_BSTAR(c1, c1 + 1), j = SA + BUCKET_A(c1 + 1) - 1, k = NULL, c2 = -1; i <= j; --j) { if(0 < (s = *j)) { assert(T[s] == c1); assert(((s + 1) < n) && (T[s] <= T[s + 1])); assert(T[s - 1] <= T[s]); c0 = T[--s]; *j = ~((int)c0); if((0 < s) && (T[s - 1] > c0)) { s = ~s; } if(c0 != c2) { if(0 <= c2) { BUCKET_B(c2, c1) = k - SA; } k = SA + BUCKET_B(c2 = c0, c1); } assert(k < j); assert(k != NULL); *k-- = s; } else if(s != 0) { *j = ~s; #ifndef NDEBUG } else { assert(T[s] == c1); #endif } } } } /* Construct the BWTed string by using the sorted order of type B suffixes. */ k = SA + BUCKET_A(c2 = T[n - 1]); *k++ = (T[n - 2] < c2) ? ~((int)T[n - 2]) : (n - 1); /* Scan the suffix array from left to right. */ for(i = SA, j = SA + n, orig = SA; i < j; ++i) { if(0 < (s = *i)) { assert(T[s - 1] >= T[s]); c0 = T[--s]; *i = c0; if((0 < s) && (T[s - 1] < c0)) { s = ~((int)T[s - 1]); } if(c0 != c2) { BUCKET_A(c2) = k - SA; k = SA + BUCKET_A(c2 = c0); } assert(i < k); *k++ = s; } else if(s != 0) { *i = ~s; } else { orig = i; } } return orig - SA; } /* Constructs the burrows-wheeler transformed string directly by using the sorted order of type B* suffixes. */ static int construct_BWT_indexes(const unsigned char *T, int *SA, int *bucket_A, int *bucket_B, int n, int m, unsigned char * num_indexes, int * indexes) { int *i, *j, *k, *orig; int s; int c0, c1, c2; int mod = n / 8; { mod |= mod >> 1; mod |= mod >> 2; mod |= mod >> 4; mod |= mod >> 8; mod |= mod >> 16; mod >>= 1; *num_indexes = (unsigned char)((n - 1) / (mod + 1)); } if(0 < m) { /* Construct the sorted order of type B suffixes by using the sorted order of type B* suffixes. */ for(c1 = ALPHABET_SIZE - 2; 0 <= c1; --c1) { /* Scan the suffix array from right to left. */ for(i = SA + BUCKET_BSTAR(c1, c1 + 1), j = SA + BUCKET_A(c1 + 1) - 1, k = NULL, c2 = -1; i <= j; --j) { if(0 < (s = *j)) { assert(T[s] == c1); assert(((s + 1) < n) && (T[s] <= T[s + 1])); assert(T[s - 1] <= T[s]); if ((s & mod) == 0) indexes[s / (mod + 1) - 1] = j - SA; c0 = T[--s]; *j = ~((int)c0); if((0 < s) && (T[s - 1] > c0)) { s = ~s; } if(c0 != c2) { if(0 <= c2) { BUCKET_B(c2, c1) = k - SA; } k = SA + BUCKET_B(c2 = c0, c1); } assert(k < j); assert(k != NULL); *k-- = s; } else if(s != 0) { *j = ~s; #ifndef NDEBUG } else { assert(T[s] == c1); #endif } } } } /* Construct the BWTed string by using the sorted order of type B suffixes. */ k = SA + BUCKET_A(c2 = T[n - 1]); if (T[n - 2] < c2) { if (((n - 1) & mod) == 0) indexes[(n - 1) / (mod + 1) - 1] = k - SA; *k++ = ~((int)T[n - 2]); } else { *k++ = n - 1; } /* Scan the suffix array from left to right. */ for(i = SA, j = SA + n, orig = SA; i < j; ++i) { if(0 < (s = *i)) { assert(T[s - 1] >= T[s]); if ((s & mod) == 0) indexes[s / (mod + 1) - 1] = i - SA; c0 = T[--s]; *i = c0; if(c0 != c2) { BUCKET_A(c2) = k - SA; k = SA + BUCKET_A(c2 = c0); } assert(i < k); if((0 < s) && (T[s - 1] < c0)) { if ((s & mod) == 0) indexes[s / (mod + 1) - 1] = k - SA; *k++ = ~((int)T[s - 1]); } else *k++ = s; } else if(s != 0) { *i = ~s; } else { orig = i; } } return orig - SA; } /*---------------------------------------------------------------------------*/ /*- Function -*/ int divsufsort(const unsigned char *T, int *SA, int n, int openMP) { int *bucket_A, *bucket_B; int m; int err = 0; /* Check arguments. */ if((T == NULL) || (SA == NULL) || (n < 0)) { return -1; } else if(n == 0) { return 0; } else if(n == 1) { SA[0] = 0; return 0; } else if(n == 2) { m = (T[0] < T[1]); SA[m ^ 1] = 0, SA[m] = 1; return 0; } bucket_A = (int *)malloc(BUCKET_A_SIZE * sizeof(int)); bucket_B = (int *)malloc(BUCKET_B_SIZE * sizeof(int)); /* Suffixsort. */ if((bucket_A != NULL) && (bucket_B != NULL)) { m = sort_typeBstar(T, SA, bucket_A, bucket_B, n, openMP); construct_SA(T, SA, bucket_A, bucket_B, n, m); } else { err = -2; } free(bucket_B); free(bucket_A); return err; } int divbwt(const unsigned char *T, unsigned char *U, int *A, int n, unsigned char * num_indexes, int * indexes, int openMP) { int *B; int *bucket_A, *bucket_B; int m, pidx, i; /* Check arguments. */ if((T == NULL) || (U == NULL) || (n < 0)) { return -1; } else if(n <= 1) { if(n == 1) { U[0] = T[0]; } return n; } if((B = A) == NULL) { B = (int *)malloc((size_t)(n + 1) * sizeof(int)); } bucket_A = (int *)malloc(BUCKET_A_SIZE * sizeof(int)); bucket_B = (int *)malloc(BUCKET_B_SIZE * sizeof(int)); /* Burrows-Wheeler Transform. */ if((B != NULL) && (bucket_A != NULL) && (bucket_B != NULL)) { m = sort_typeBstar(T, B, bucket_A, bucket_B, n, openMP); if (num_indexes == NULL || indexes == NULL) { pidx = construct_BWT(T, B, bucket_A, bucket_B, n, m); } else { pidx = construct_BWT_indexes(T, B, bucket_A, bucket_B, n, m, num_indexes, indexes); } /* Copy to output string. */ U[0] = T[n - 1]; for(i = 0; i < pidx; ++i) { U[i + 1] = (unsigned char)B[i]; } for(i += 1; i < n; ++i) { U[i] = (unsigned char)B[i]; } pidx += 1; } else { pidx = -2; } free(bucket_B); free(bucket_A); if(A == NULL) { free(B); } return pidx; } /**** ended inlining dictBuilder/divsufsort.c ****/ /**** start inlining dictBuilder/fastcover.c ****/ /* * Copyright (c) 2018-2021, Facebook, Inc. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the * LICENSE file in the root directory of this source tree) and the GPLv2 (found * in the COPYING file in the root directory of this source tree). * You may select, at your option, one of the above-listed licenses. */ /*-************************************* * Dependencies ***************************************/ #include /* fprintf */ #include /* malloc, free, qsort */ #include /* memset */ #include /* clock */ /**** skipping file: ../common/mem.h ****/ /**** skipping file: ../common/pool.h ****/ /**** skipping file: ../common/threading.h ****/ /**** skipping file: cover.h ****/ /**** skipping file: ../common/zstd_internal.h ****/ /**** skipping file: ../compress/zstd_compress_internal.h ****/ #ifndef ZDICT_STATIC_LINKING_ONLY #define ZDICT_STATIC_LINKING_ONLY #endif /**** skipping file: zdict.h ****/ /*-************************************* * Constants ***************************************/ #define FASTCOVER_MAX_SAMPLES_SIZE (sizeof(size_t) == 8 ? ((unsigned)-1) : ((unsigned)1 GB)) #define FASTCOVER_MAX_F 31 #define FASTCOVER_MAX_ACCEL 10 #define FASTCOVER_DEFAULT_SPLITPOINT 0.75 #define DEFAULT_F 20 #define DEFAULT_ACCEL 1 /*-************************************* * Console display ***************************************/ #ifndef LOCALDISPLAYLEVEL static int g_displayLevel = 2; #endif #undef DISPLAY #define DISPLAY(...) \ { \ fprintf(stderr, __VA_ARGS__); \ fflush(stderr); \ } #undef LOCALDISPLAYLEVEL #define LOCALDISPLAYLEVEL(displayLevel, l, ...) \ if (displayLevel >= l) { \ DISPLAY(__VA_ARGS__); \ } /* 0 : no display; 1: errors; 2: default; 3: details; 4: debug */ #undef DISPLAYLEVEL #define DISPLAYLEVEL(l, ...) LOCALDISPLAYLEVEL(g_displayLevel, l, __VA_ARGS__) #ifndef LOCALDISPLAYUPDATE static const clock_t g_refreshRate = CLOCKS_PER_SEC * 15 / 100; static clock_t g_time = 0; #endif #undef LOCALDISPLAYUPDATE #define LOCALDISPLAYUPDATE(displayLevel, l, ...) \ if (displayLevel >= l) { \ if ((clock() - g_time > g_refreshRate) || (displayLevel >= 4)) { \ g_time = clock(); \ DISPLAY(__VA_ARGS__); \ } \ } #undef DISPLAYUPDATE #define DISPLAYUPDATE(l, ...) LOCALDISPLAYUPDATE(g_displayLevel, l, __VA_ARGS__) /*-************************************* * Hash Functions ***************************************/ /** * Hash the d-byte value pointed to by p and mod 2^f into the frequency vector */ static size_t FASTCOVER_hashPtrToIndex(const void* p, U32 f, unsigned d) { if (d == 6) { return ZSTD_hash6Ptr(p, f); } return ZSTD_hash8Ptr(p, f); } /*-************************************* * Acceleration ***************************************/ typedef struct { unsigned finalize; /* Percentage of training samples used for ZDICT_finalizeDictionary */ unsigned skip; /* Number of dmer skipped between each dmer counted in computeFrequency */ } FASTCOVER_accel_t; static const FASTCOVER_accel_t FASTCOVER_defaultAccelParameters[FASTCOVER_MAX_ACCEL+1] = { { 100, 0 }, /* accel = 0, should not happen because accel = 0 defaults to accel = 1 */ { 100, 0 }, /* accel = 1 */ { 50, 1 }, /* accel = 2 */ { 34, 2 }, /* accel = 3 */ { 25, 3 }, /* accel = 4 */ { 20, 4 }, /* accel = 5 */ { 17, 5 }, /* accel = 6 */ { 14, 6 }, /* accel = 7 */ { 13, 7 }, /* accel = 8 */ { 11, 8 }, /* accel = 9 */ { 10, 9 }, /* accel = 10 */ }; /*-************************************* * Context ***************************************/ typedef struct { const BYTE *samples; size_t *offsets; const size_t *samplesSizes; size_t nbSamples; size_t nbTrainSamples; size_t nbTestSamples; size_t nbDmers; U32 *freqs; unsigned d; unsigned f; FASTCOVER_accel_t accelParams; } FASTCOVER_ctx_t; /*-************************************* * Helper functions ***************************************/ /** * Selects the best segment in an epoch. * Segments of are scored according to the function: * * Let F(d) be the frequency of all dmers with hash value d. * Let S_i be hash value of the dmer at position i of segment S which has length k. * * Score(S) = F(S_1) + F(S_2) + ... + F(S_{k-d+1}) * * Once the dmer with hash value d is in the dictionary we set F(d) = 0. */ static COVER_segment_t FASTCOVER_selectSegment(const FASTCOVER_ctx_t *ctx, U32 *freqs, U32 begin, U32 end, ZDICT_cover_params_t parameters, U16* segmentFreqs) { /* Constants */ const U32 k = parameters.k; const U32 d = parameters.d; const U32 f = ctx->f; const U32 dmersInK = k - d + 1; /* Try each segment (activeSegment) and save the best (bestSegment) */ COVER_segment_t bestSegment = {0, 0, 0}; COVER_segment_t activeSegment; /* Reset the activeDmers in the segment */ /* The activeSegment starts at the beginning of the epoch. */ activeSegment.begin = begin; activeSegment.end = begin; activeSegment.score = 0; /* Slide the activeSegment through the whole epoch. * Save the best segment in bestSegment. */ while (activeSegment.end < end) { /* Get hash value of current dmer */ const size_t idx = FASTCOVER_hashPtrToIndex(ctx->samples + activeSegment.end, f, d); /* Add frequency of this index to score if this is the first occurrence of index in active segment */ if (segmentFreqs[idx] == 0) { activeSegment.score += freqs[idx]; } /* Increment end of segment and segmentFreqs*/ activeSegment.end += 1; segmentFreqs[idx] += 1; /* If the window is now too large, drop the first position */ if (activeSegment.end - activeSegment.begin == dmersInK + 1) { /* Get hash value of the dmer to be eliminated from active segment */ const size_t delIndex = FASTCOVER_hashPtrToIndex(ctx->samples + activeSegment.begin, f, d); segmentFreqs[delIndex] -= 1; /* Subtract frequency of this index from score if this is the last occurrence of this index in active segment */ if (segmentFreqs[delIndex] == 0) { activeSegment.score -= freqs[delIndex]; } /* Increment start of segment */ activeSegment.begin += 1; } /* If this segment is the best so far save it */ if (activeSegment.score > bestSegment.score) { bestSegment = activeSegment; } } /* Zero out rest of segmentFreqs array */ while (activeSegment.begin < end) { const size_t delIndex = FASTCOVER_hashPtrToIndex(ctx->samples + activeSegment.begin, f, d); segmentFreqs[delIndex] -= 1; activeSegment.begin += 1; } { /* Zero the frequency of hash value of each dmer covered by the chosen segment. */ U32 pos; for (pos = bestSegment.begin; pos != bestSegment.end; ++pos) { const size_t i = FASTCOVER_hashPtrToIndex(ctx->samples + pos, f, d); freqs[i] = 0; } } return bestSegment; } static int FASTCOVER_checkParameters(ZDICT_cover_params_t parameters, size_t maxDictSize, unsigned f, unsigned accel) { /* k, d, and f are required parameters */ if (parameters.d == 0 || parameters.k == 0) { return 0; } /* d has to be 6 or 8 */ if (parameters.d != 6 && parameters.d != 8) { return 0; } /* k <= maxDictSize */ if (parameters.k > maxDictSize) { return 0; } /* d <= k */ if (parameters.d > parameters.k) { return 0; } /* 0 < f <= FASTCOVER_MAX_F*/ if (f > FASTCOVER_MAX_F || f == 0) { return 0; } /* 0 < splitPoint <= 1 */ if (parameters.splitPoint <= 0 || parameters.splitPoint > 1) { return 0; } /* 0 < accel <= 10 */ if (accel > 10 || accel == 0) { return 0; } return 1; } /** * Clean up a context initialized with `FASTCOVER_ctx_init()`. */ static void FASTCOVER_ctx_destroy(FASTCOVER_ctx_t* ctx) { if (!ctx) return; free(ctx->freqs); ctx->freqs = NULL; free(ctx->offsets); ctx->offsets = NULL; } /** * Calculate for frequency of hash value of each dmer in ctx->samples */ static void FASTCOVER_computeFrequency(U32* freqs, const FASTCOVER_ctx_t* ctx) { const unsigned f = ctx->f; const unsigned d = ctx->d; const unsigned skip = ctx->accelParams.skip; const unsigned readLength = MAX(d, 8); size_t i; assert(ctx->nbTrainSamples >= 5); assert(ctx->nbTrainSamples <= ctx->nbSamples); for (i = 0; i < ctx->nbTrainSamples; i++) { size_t start = ctx->offsets[i]; /* start of current dmer */ size_t const currSampleEnd = ctx->offsets[i+1]; while (start + readLength <= currSampleEnd) { const size_t dmerIndex = FASTCOVER_hashPtrToIndex(ctx->samples + start, f, d); freqs[dmerIndex]++; start = start + skip + 1; } } } /** * Prepare a context for dictionary building. * The context is only dependent on the parameter `d` and can used multiple * times. * Returns 0 on success or error code on error. * The context must be destroyed with `FASTCOVER_ctx_destroy()`. */ static size_t FASTCOVER_ctx_init(FASTCOVER_ctx_t* ctx, const void* samplesBuffer, const size_t* samplesSizes, unsigned nbSamples, unsigned d, double splitPoint, unsigned f, FASTCOVER_accel_t accelParams) { const BYTE* const samples = (const BYTE*)samplesBuffer; const size_t totalSamplesSize = COVER_sum(samplesSizes, nbSamples); /* Split samples into testing and training sets */ const unsigned nbTrainSamples = splitPoint < 1.0 ? (unsigned)((double)nbSamples * splitPoint) : nbSamples; const unsigned nbTestSamples = splitPoint < 1.0 ? nbSamples - nbTrainSamples : nbSamples; const size_t trainingSamplesSize = splitPoint < 1.0 ? COVER_sum(samplesSizes, nbTrainSamples) : totalSamplesSize; const size_t testSamplesSize = splitPoint < 1.0 ? COVER_sum(samplesSizes + nbTrainSamples, nbTestSamples) : totalSamplesSize; /* Checks */ if (totalSamplesSize < MAX(d, sizeof(U64)) || totalSamplesSize >= (size_t)FASTCOVER_MAX_SAMPLES_SIZE) { DISPLAYLEVEL(1, "Total samples size is too large (%u MB), maximum size is %u MB\n", (unsigned)(totalSamplesSize >> 20), (FASTCOVER_MAX_SAMPLES_SIZE >> 20)); return ERROR(srcSize_wrong); } /* Check if there are at least 5 training samples */ if (nbTrainSamples < 5) { DISPLAYLEVEL(1, "Total number of training samples is %u and is invalid\n", nbTrainSamples); return ERROR(srcSize_wrong); } /* Check if there's testing sample */ if (nbTestSamples < 1) { DISPLAYLEVEL(1, "Total number of testing samples is %u and is invalid.\n", nbTestSamples); return ERROR(srcSize_wrong); } /* Zero the context */ memset(ctx, 0, sizeof(*ctx)); DISPLAYLEVEL(2, "Training on %u samples of total size %u\n", nbTrainSamples, (unsigned)trainingSamplesSize); DISPLAYLEVEL(2, "Testing on %u samples of total size %u\n", nbTestSamples, (unsigned)testSamplesSize); ctx->samples = samples; ctx->samplesSizes = samplesSizes; ctx->nbSamples = nbSamples; ctx->nbTrainSamples = nbTrainSamples; ctx->nbTestSamples = nbTestSamples; ctx->nbDmers = trainingSamplesSize - MAX(d, sizeof(U64)) + 1; ctx->d = d; ctx->f = f; ctx->accelParams = accelParams; /* The offsets of each file */ ctx->offsets = (size_t*)calloc((nbSamples + 1), sizeof(size_t)); if (ctx->offsets == NULL) { DISPLAYLEVEL(1, "Failed to allocate scratch buffers \n"); FASTCOVER_ctx_destroy(ctx); return ERROR(memory_allocation); } /* Fill offsets from the samplesSizes */ { U32 i; ctx->offsets[0] = 0; assert(nbSamples >= 5); for (i = 1; i <= nbSamples; ++i) { ctx->offsets[i] = ctx->offsets[i - 1] + samplesSizes[i - 1]; } } /* Initialize frequency array of size 2^f */ ctx->freqs = (U32*)calloc(((U64)1 << f), sizeof(U32)); if (ctx->freqs == NULL) { DISPLAYLEVEL(1, "Failed to allocate frequency table \n"); FASTCOVER_ctx_destroy(ctx); return ERROR(memory_allocation); } DISPLAYLEVEL(2, "Computing frequencies\n"); FASTCOVER_computeFrequency(ctx->freqs, ctx); return 0; } /** * Given the prepared context build the dictionary. */ static size_t FASTCOVER_buildDictionary(const FASTCOVER_ctx_t* ctx, U32* freqs, void* dictBuffer, size_t dictBufferCapacity, ZDICT_cover_params_t parameters, U16* segmentFreqs) { BYTE *const dict = (BYTE *)dictBuffer; size_t tail = dictBufferCapacity; /* Divide the data into epochs. We will select one segment from each epoch. */ const COVER_epoch_info_t epochs = COVER_computeEpochs( (U32)dictBufferCapacity, (U32)ctx->nbDmers, parameters.k, 1); const size_t maxZeroScoreRun = 10; size_t zeroScoreRun = 0; size_t epoch; DISPLAYLEVEL(2, "Breaking content into %u epochs of size %u\n", (U32)epochs.num, (U32)epochs.size); /* Loop through the epochs until there are no more segments or the dictionary * is full. */ for (epoch = 0; tail > 0; epoch = (epoch + 1) % epochs.num) { const U32 epochBegin = (U32)(epoch * epochs.size); const U32 epochEnd = epochBegin + epochs.size; size_t segmentSize; /* Select a segment */ COVER_segment_t segment = FASTCOVER_selectSegment( ctx, freqs, epochBegin, epochEnd, parameters, segmentFreqs); /* If the segment covers no dmers, then we are out of content. * There may be new content in other epochs, for continue for some time. */ if (segment.score == 0) { if (++zeroScoreRun >= maxZeroScoreRun) { break; } continue; } zeroScoreRun = 0; /* Trim the segment if necessary and if it is too small then we are done */ segmentSize = MIN(segment.end - segment.begin + parameters.d - 1, tail); if (segmentSize < parameters.d) { break; } /* We fill the dictionary from the back to allow the best segments to be * referenced with the smallest offsets. */ tail -= segmentSize; memcpy(dict + tail, ctx->samples + segment.begin, segmentSize); DISPLAYUPDATE( 2, "\r%u%% ", (unsigned)(((dictBufferCapacity - tail) * 100) / dictBufferCapacity)); } DISPLAYLEVEL(2, "\r%79s\r", ""); return tail; } /** * Parameters for FASTCOVER_tryParameters(). */ typedef struct FASTCOVER_tryParameters_data_s { const FASTCOVER_ctx_t* ctx; COVER_best_t* best; size_t dictBufferCapacity; ZDICT_cover_params_t parameters; } FASTCOVER_tryParameters_data_t; /** * Tries a set of parameters and updates the COVER_best_t with the results. * This function is thread safe if zstd is compiled with multithreaded support. * It takes its parameters as an *OWNING* opaque pointer to support threading. */ static void FASTCOVER_tryParameters(void* opaque) { /* Save parameters as local variables */ FASTCOVER_tryParameters_data_t *const data = (FASTCOVER_tryParameters_data_t*)opaque; const FASTCOVER_ctx_t *const ctx = data->ctx; const ZDICT_cover_params_t parameters = data->parameters; size_t dictBufferCapacity = data->dictBufferCapacity; size_t totalCompressedSize = ERROR(GENERIC); /* Initialize array to keep track of frequency of dmer within activeSegment */ U16* segmentFreqs = (U16*)calloc(((U64)1 << ctx->f), sizeof(U16)); /* Allocate space for hash table, dict, and freqs */ BYTE *const dict = (BYTE*)malloc(dictBufferCapacity); COVER_dictSelection_t selection = COVER_dictSelectionError(ERROR(GENERIC)); U32* freqs = (U32*) malloc(((U64)1 << ctx->f) * sizeof(U32)); if (!segmentFreqs || !dict || !freqs) { DISPLAYLEVEL(1, "Failed to allocate buffers: out of memory\n"); goto _cleanup; } /* Copy the frequencies because we need to modify them */ memcpy(freqs, ctx->freqs, ((U64)1 << ctx->f) * sizeof(U32)); /* Build the dictionary */ { const size_t tail = FASTCOVER_buildDictionary(ctx, freqs, dict, dictBufferCapacity, parameters, segmentFreqs); const unsigned nbFinalizeSamples = (unsigned)(ctx->nbTrainSamples * ctx->accelParams.finalize / 100); selection = COVER_selectDict(dict + tail, dictBufferCapacity, dictBufferCapacity - tail, ctx->samples, ctx->samplesSizes, nbFinalizeSamples, ctx->nbTrainSamples, ctx->nbSamples, parameters, ctx->offsets, totalCompressedSize); if (COVER_dictSelectionIsError(selection)) { DISPLAYLEVEL(1, "Failed to select dictionary\n"); goto _cleanup; } } _cleanup: free(dict); COVER_best_finish(data->best, parameters, selection); free(data); free(segmentFreqs); COVER_dictSelectionFree(selection); free(freqs); } static void FASTCOVER_convertToCoverParams(ZDICT_fastCover_params_t fastCoverParams, ZDICT_cover_params_t* coverParams) { coverParams->k = fastCoverParams.k; coverParams->d = fastCoverParams.d; coverParams->steps = fastCoverParams.steps; coverParams->nbThreads = fastCoverParams.nbThreads; coverParams->splitPoint = fastCoverParams.splitPoint; coverParams->zParams = fastCoverParams.zParams; coverParams->shrinkDict = fastCoverParams.shrinkDict; } static void FASTCOVER_convertToFastCoverParams(ZDICT_cover_params_t coverParams, ZDICT_fastCover_params_t* fastCoverParams, unsigned f, unsigned accel) { fastCoverParams->k = coverParams.k; fastCoverParams->d = coverParams.d; fastCoverParams->steps = coverParams.steps; fastCoverParams->nbThreads = coverParams.nbThreads; fastCoverParams->splitPoint = coverParams.splitPoint; fastCoverParams->f = f; fastCoverParams->accel = accel; fastCoverParams->zParams = coverParams.zParams; fastCoverParams->shrinkDict = coverParams.shrinkDict; } ZDICTLIB_API size_t ZDICT_trainFromBuffer_fastCover(void* dictBuffer, size_t dictBufferCapacity, const void* samplesBuffer, const size_t* samplesSizes, unsigned nbSamples, ZDICT_fastCover_params_t parameters) { BYTE* const dict = (BYTE*)dictBuffer; FASTCOVER_ctx_t ctx; ZDICT_cover_params_t coverParams; FASTCOVER_accel_t accelParams; /* Initialize global data */ g_displayLevel = parameters.zParams.notificationLevel; /* Assign splitPoint and f if not provided */ parameters.splitPoint = 1.0; parameters.f = parameters.f == 0 ? DEFAULT_F : parameters.f; parameters.accel = parameters.accel == 0 ? DEFAULT_ACCEL : parameters.accel; /* Convert to cover parameter */ memset(&coverParams, 0 , sizeof(coverParams)); FASTCOVER_convertToCoverParams(parameters, &coverParams); /* Checks */ if (!FASTCOVER_checkParameters(coverParams, dictBufferCapacity, parameters.f, parameters.accel)) { DISPLAYLEVEL(1, "FASTCOVER parameters incorrect\n"); return ERROR(parameter_outOfBound); } if (nbSamples == 0) { DISPLAYLEVEL(1, "FASTCOVER must have at least one input file\n"); return ERROR(srcSize_wrong); } if (dictBufferCapacity < ZDICT_DICTSIZE_MIN) { DISPLAYLEVEL(1, "dictBufferCapacity must be at least %u\n", ZDICT_DICTSIZE_MIN); return ERROR(dstSize_tooSmall); } /* Assign corresponding FASTCOVER_accel_t to accelParams*/ accelParams = FASTCOVER_defaultAccelParameters[parameters.accel]; /* Initialize context */ { size_t const initVal = FASTCOVER_ctx_init(&ctx, samplesBuffer, samplesSizes, nbSamples, coverParams.d, parameters.splitPoint, parameters.f, accelParams); if (ZSTD_isError(initVal)) { DISPLAYLEVEL(1, "Failed to initialize context\n"); return initVal; } } COVER_warnOnSmallCorpus(dictBufferCapacity, ctx.nbDmers, g_displayLevel); /* Build the dictionary */ DISPLAYLEVEL(2, "Building dictionary\n"); { /* Initialize array to keep track of frequency of dmer within activeSegment */ U16* segmentFreqs = (U16 *)calloc(((U64)1 << parameters.f), sizeof(U16)); const size_t tail = FASTCOVER_buildDictionary(&ctx, ctx.freqs, dictBuffer, dictBufferCapacity, coverParams, segmentFreqs); const unsigned nbFinalizeSamples = (unsigned)(ctx.nbTrainSamples * ctx.accelParams.finalize / 100); const size_t dictionarySize = ZDICT_finalizeDictionary( dict, dictBufferCapacity, dict + tail, dictBufferCapacity - tail, samplesBuffer, samplesSizes, nbFinalizeSamples, coverParams.zParams); if (!ZSTD_isError(dictionarySize)) { DISPLAYLEVEL(2, "Constructed dictionary of size %u\n", (unsigned)dictionarySize); } FASTCOVER_ctx_destroy(&ctx); free(segmentFreqs); return dictionarySize; } } ZDICTLIB_API size_t ZDICT_optimizeTrainFromBuffer_fastCover( void* dictBuffer, size_t dictBufferCapacity, const void* samplesBuffer, const size_t* samplesSizes, unsigned nbSamples, ZDICT_fastCover_params_t* parameters) { ZDICT_cover_params_t coverParams; FASTCOVER_accel_t accelParams; /* constants */ const unsigned nbThreads = parameters->nbThreads; const double splitPoint = parameters->splitPoint <= 0.0 ? FASTCOVER_DEFAULT_SPLITPOINT : parameters->splitPoint; const unsigned kMinD = parameters->d == 0 ? 6 : parameters->d; const unsigned kMaxD = parameters->d == 0 ? 8 : parameters->d; const unsigned kMinK = parameters->k == 0 ? 50 : parameters->k; const unsigned kMaxK = parameters->k == 0 ? 2000 : parameters->k; const unsigned kSteps = parameters->steps == 0 ? 40 : parameters->steps; const unsigned kStepSize = MAX((kMaxK - kMinK) / kSteps, 1); const unsigned kIterations = (1 + (kMaxD - kMinD) / 2) * (1 + (kMaxK - kMinK) / kStepSize); const unsigned f = parameters->f == 0 ? DEFAULT_F : parameters->f; const unsigned accel = parameters->accel == 0 ? DEFAULT_ACCEL : parameters->accel; const unsigned shrinkDict = 0; /* Local variables */ const int displayLevel = parameters->zParams.notificationLevel; unsigned iteration = 1; unsigned d; unsigned k; COVER_best_t best; POOL_ctx *pool = NULL; int warned = 0; /* Checks */ if (splitPoint <= 0 || splitPoint > 1) { LOCALDISPLAYLEVEL(displayLevel, 1, "Incorrect splitPoint\n"); return ERROR(parameter_outOfBound); } if (accel == 0 || accel > FASTCOVER_MAX_ACCEL) { LOCALDISPLAYLEVEL(displayLevel, 1, "Incorrect accel\n"); return ERROR(parameter_outOfBound); } if (kMinK < kMaxD || kMaxK < kMinK) { LOCALDISPLAYLEVEL(displayLevel, 1, "Incorrect k\n"); return ERROR(parameter_outOfBound); } if (nbSamples == 0) { LOCALDISPLAYLEVEL(displayLevel, 1, "FASTCOVER must have at least one input file\n"); return ERROR(srcSize_wrong); } if (dictBufferCapacity < ZDICT_DICTSIZE_MIN) { LOCALDISPLAYLEVEL(displayLevel, 1, "dictBufferCapacity must be at least %u\n", ZDICT_DICTSIZE_MIN); return ERROR(dstSize_tooSmall); } if (nbThreads > 1) { pool = POOL_create(nbThreads, 1); if (!pool) { return ERROR(memory_allocation); } } /* Initialization */ COVER_best_init(&best); memset(&coverParams, 0 , sizeof(coverParams)); FASTCOVER_convertToCoverParams(*parameters, &coverParams); accelParams = FASTCOVER_defaultAccelParameters[accel]; /* Turn down global display level to clean up display at level 2 and below */ g_displayLevel = displayLevel == 0 ? 0 : displayLevel - 1; /* Loop through d first because each new value needs a new context */ LOCALDISPLAYLEVEL(displayLevel, 2, "Trying %u different sets of parameters\n", kIterations); for (d = kMinD; d <= kMaxD; d += 2) { /* Initialize the context for this value of d */ FASTCOVER_ctx_t ctx; LOCALDISPLAYLEVEL(displayLevel, 3, "d=%u\n", d); { size_t const initVal = FASTCOVER_ctx_init(&ctx, samplesBuffer, samplesSizes, nbSamples, d, splitPoint, f, accelParams); if (ZSTD_isError(initVal)) { LOCALDISPLAYLEVEL(displayLevel, 1, "Failed to initialize context\n"); COVER_best_destroy(&best); POOL_free(pool); return initVal; } } if (!warned) { COVER_warnOnSmallCorpus(dictBufferCapacity, ctx.nbDmers, displayLevel); warned = 1; } /* Loop through k reusing the same context */ for (k = kMinK; k <= kMaxK; k += kStepSize) { /* Prepare the arguments */ FASTCOVER_tryParameters_data_t *data = (FASTCOVER_tryParameters_data_t *)malloc( sizeof(FASTCOVER_tryParameters_data_t)); LOCALDISPLAYLEVEL(displayLevel, 3, "k=%u\n", k); if (!data) { LOCALDISPLAYLEVEL(displayLevel, 1, "Failed to allocate parameters\n"); COVER_best_destroy(&best); FASTCOVER_ctx_destroy(&ctx); POOL_free(pool); return ERROR(memory_allocation); } data->ctx = &ctx; data->best = &best; data->dictBufferCapacity = dictBufferCapacity; data->parameters = coverParams; data->parameters.k = k; data->parameters.d = d; data->parameters.splitPoint = splitPoint; data->parameters.steps = kSteps; data->parameters.shrinkDict = shrinkDict; data->parameters.zParams.notificationLevel = g_displayLevel; /* Check the parameters */ if (!FASTCOVER_checkParameters(data->parameters, dictBufferCapacity, data->ctx->f, accel)) { DISPLAYLEVEL(1, "FASTCOVER parameters incorrect\n"); free(data); continue; } /* Call the function and pass ownership of data to it */ COVER_best_start(&best); if (pool) { POOL_add(pool, &FASTCOVER_tryParameters, data); } else { FASTCOVER_tryParameters(data); } /* Print status */ LOCALDISPLAYUPDATE(displayLevel, 2, "\r%u%% ", (unsigned)((iteration * 100) / kIterations)); ++iteration; } COVER_best_wait(&best); FASTCOVER_ctx_destroy(&ctx); } LOCALDISPLAYLEVEL(displayLevel, 2, "\r%79s\r", ""); /* Fill the output buffer and parameters with output of the best parameters */ { const size_t dictSize = best.dictSize; if (ZSTD_isError(best.compressedSize)) { const size_t compressedSize = best.compressedSize; COVER_best_destroy(&best); POOL_free(pool); return compressedSize; } FASTCOVER_convertToFastCoverParams(best.parameters, parameters, f, accel); memcpy(dictBuffer, best.dict, dictSize); COVER_best_destroy(&best); POOL_free(pool); return dictSize; } } /**** ended inlining dictBuilder/fastcover.c ****/ /**** start inlining dictBuilder/zdict.c ****/ /* * Copyright (c) 2016-2021, Yann Collet, Facebook, Inc. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the * LICENSE file in the root directory of this source tree) and the GPLv2 (found * in the COPYING file in the root directory of this source tree). * You may select, at your option, one of the above-listed licenses. */ /*-************************************** * Tuning parameters ****************************************/ #define MINRATIO 4 /* minimum nb of apparition to be selected in dictionary */ #define ZDICT_MAX_SAMPLES_SIZE (2000U << 20) #define ZDICT_MIN_SAMPLES_SIZE (ZDICT_CONTENTSIZE_MIN * MINRATIO) /*-************************************** * Compiler Options ****************************************/ /* Unix Large Files support (>4GB) */ #define _FILE_OFFSET_BITS 64 #if (defined(__sun__) && (!defined(__LP64__))) /* Sun Solaris 32-bits requires specific definitions */ # ifndef _LARGEFILE_SOURCE # define _LARGEFILE_SOURCE # endif #elif ! defined(__LP64__) /* No point defining Large file for 64 bit */ # ifndef _LARGEFILE64_SOURCE # define _LARGEFILE64_SOURCE # endif #endif /*-************************************* * Dependencies ***************************************/ #include /* malloc, free */ #include /* memset */ #include /* fprintf, fopen, ftello64 */ #include /* clock */ /**** skipping file: ../common/mem.h ****/ /**** skipping file: ../common/fse.h ****/ #define HUF_STATIC_LINKING_ONLY /**** skipping file: ../common/huf.h ****/ /**** skipping file: ../common/zstd_internal.h ****/ /**** skipping file: ../common/xxhash.h ****/ /**** skipping file: divsufsort.h ****/ #ifndef ZDICT_STATIC_LINKING_ONLY # define ZDICT_STATIC_LINKING_ONLY #endif /**** skipping file: zdict.h ****/ /**** skipping file: ../compress/zstd_compress_internal.h ****/ /*-************************************* * Constants ***************************************/ #define KB *(1 <<10) #define MB *(1 <<20) #define GB *(1U<<30) #define DICTLISTSIZE_DEFAULT 10000 #define NOISELENGTH 32 static const U32 g_selectivity_default = 9; /*-************************************* * Console display ***************************************/ #undef DISPLAY #define DISPLAY(...) { fprintf(stderr, __VA_ARGS__); fflush( stderr ); } #undef DISPLAYLEVEL #define DISPLAYLEVEL(l, ...) if (notificationLevel>=l) { DISPLAY(__VA_ARGS__); } /* 0 : no display; 1: errors; 2: default; 3: details; 4: debug */ static clock_t ZDICT_clockSpan(clock_t nPrevious) { return clock() - nPrevious; } static void ZDICT_printHex(const void* ptr, size_t length) { const BYTE* const b = (const BYTE*)ptr; size_t u; for (u=0; u126) c = '.'; /* non-printable char */ DISPLAY("%c", c); } } /*-******************************************************** * Helper functions **********************************************************/ unsigned ZDICT_isError(size_t errorCode) { return ERR_isError(errorCode); } const char* ZDICT_getErrorName(size_t errorCode) { return ERR_getErrorName(errorCode); } unsigned ZDICT_getDictID(const void* dictBuffer, size_t dictSize) { if (dictSize < 8) return 0; if (MEM_readLE32(dictBuffer) != ZSTD_MAGIC_DICTIONARY) return 0; return MEM_readLE32((const char*)dictBuffer + 4); } size_t ZDICT_getDictHeaderSize(const void* dictBuffer, size_t dictSize) { size_t headerSize; if (dictSize <= 8 || MEM_readLE32(dictBuffer) != ZSTD_MAGIC_DICTIONARY) return ERROR(dictionary_corrupted); { ZSTD_compressedBlockState_t* bs = (ZSTD_compressedBlockState_t*)malloc(sizeof(ZSTD_compressedBlockState_t)); U32* wksp = (U32*)malloc(HUF_WORKSPACE_SIZE); if (!bs || !wksp) { headerSize = ERROR(memory_allocation); } else { ZSTD_reset_compressedBlockState(bs); headerSize = ZSTD_loadCEntropy(bs, wksp, dictBuffer, dictSize); } free(bs); free(wksp); } return headerSize; } /*-******************************************************** * Dictionary training functions **********************************************************/ static unsigned ZDICT_NbCommonBytes (size_t val) { if (MEM_isLittleEndian()) { if (MEM_64bits()) { # if defined(_MSC_VER) && defined(_WIN64) unsigned long r = 0; _BitScanForward64( &r, (U64)val ); return (unsigned)(r>>3); # elif defined(__GNUC__) && (__GNUC__ >= 3) return (__builtin_ctzll((U64)val) >> 3); # else static const int DeBruijnBytePos[64] = { 0, 0, 0, 0, 0, 1, 1, 2, 0, 3, 1, 3, 1, 4, 2, 7, 0, 2, 3, 6, 1, 5, 3, 5, 1, 3, 4, 4, 2, 5, 6, 7, 7, 0, 1, 2, 3, 3, 4, 6, 2, 6, 5, 5, 3, 4, 5, 6, 7, 1, 2, 4, 6, 4, 4, 5, 7, 2, 6, 5, 7, 6, 7, 7 }; return DeBruijnBytePos[((U64)((val & -(long long)val) * 0x0218A392CDABBD3FULL)) >> 58]; # endif } else { /* 32 bits */ # if defined(_MSC_VER) unsigned long r=0; _BitScanForward( &r, (U32)val ); return (unsigned)(r>>3); # elif defined(__GNUC__) && (__GNUC__ >= 3) return (__builtin_ctz((U32)val) >> 3); # else static const int DeBruijnBytePos[32] = { 0, 0, 3, 0, 3, 1, 3, 0, 3, 2, 2, 1, 3, 2, 0, 1, 3, 3, 1, 2, 2, 2, 2, 0, 3, 1, 2, 0, 1, 0, 1, 1 }; return DeBruijnBytePos[((U32)((val & -(S32)val) * 0x077CB531U)) >> 27]; # endif } } else { /* Big Endian CPU */ if (MEM_64bits()) { # if defined(_MSC_VER) && defined(_WIN64) unsigned long r = 0; _BitScanReverse64( &r, val ); return (unsigned)(r>>3); # elif defined(__GNUC__) && (__GNUC__ >= 3) return (__builtin_clzll(val) >> 3); # else unsigned r; const unsigned n32 = sizeof(size_t)*4; /* calculate this way due to compiler complaining in 32-bits mode */ if (!(val>>n32)) { r=4; } else { r=0; val>>=n32; } if (!(val>>16)) { r+=2; val>>=8; } else { val>>=24; } r += (!val); return r; # endif } else { /* 32 bits */ # if defined(_MSC_VER) unsigned long r = 0; _BitScanReverse( &r, (unsigned long)val ); return (unsigned)(r>>3); # elif defined(__GNUC__) && (__GNUC__ >= 3) return (__builtin_clz((U32)val) >> 3); # else unsigned r; if (!(val>>16)) { r=2; val>>=8; } else { r=0; val>>=24; } r += (!val); return r; # endif } } } /*! ZDICT_count() : Count the nb of common bytes between 2 pointers. Note : this function presumes end of buffer followed by noisy guard band. */ static size_t ZDICT_count(const void* pIn, const void* pMatch) { const char* const pStart = (const char*)pIn; for (;;) { size_t const diff = MEM_readST(pMatch) ^ MEM_readST(pIn); if (!diff) { pIn = (const char*)pIn+sizeof(size_t); pMatch = (const char*)pMatch+sizeof(size_t); continue; } pIn = (const char*)pIn+ZDICT_NbCommonBytes(diff); return (size_t)((const char*)pIn - pStart); } } typedef struct { U32 pos; U32 length; U32 savings; } dictItem; static void ZDICT_initDictItem(dictItem* d) { d->pos = 1; d->length = 0; d->savings = (U32)(-1); } #define LLIMIT 64 /* heuristic determined experimentally */ #define MINMATCHLENGTH 7 /* heuristic determined experimentally */ static dictItem ZDICT_analyzePos( BYTE* doneMarks, const int* suffix, U32 start, const void* buffer, U32 minRatio, U32 notificationLevel) { U32 lengthList[LLIMIT] = {0}; U32 cumulLength[LLIMIT] = {0}; U32 savings[LLIMIT] = {0}; const BYTE* b = (const BYTE*)buffer; size_t maxLength = LLIMIT; size_t pos = suffix[start]; U32 end = start; dictItem solution; /* init */ memset(&solution, 0, sizeof(solution)); doneMarks[pos] = 1; /* trivial repetition cases */ if ( (MEM_read16(b+pos+0) == MEM_read16(b+pos+2)) ||(MEM_read16(b+pos+1) == MEM_read16(b+pos+3)) ||(MEM_read16(b+pos+2) == MEM_read16(b+pos+4)) ) { /* skip and mark segment */ U16 const pattern16 = MEM_read16(b+pos+4); U32 u, patternEnd = 6; while (MEM_read16(b+pos+patternEnd) == pattern16) patternEnd+=2 ; if (b[pos+patternEnd] == b[pos+patternEnd-1]) patternEnd++; for (u=1; u= MINMATCHLENGTH); } /* look backward */ { size_t length; do { length = ZDICT_count(b + pos, b + *(suffix+start-1)); if (length >=MINMATCHLENGTH) start--; } while(length >= MINMATCHLENGTH); } /* exit if not found a minimum nb of repetitions */ if (end-start < minRatio) { U32 idx; for(idx=start; idx= %i at pos %7u ", (unsigned)(end-start), MINMATCHLENGTH, (unsigned)pos); DISPLAYLEVEL(4, "\n"); for (mml = MINMATCHLENGTH ; ; mml++) { BYTE currentChar = 0; U32 currentCount = 0; U32 currentID = refinedStart; U32 id; U32 selectedCount = 0; U32 selectedID = currentID; for (id =refinedStart; id < refinedEnd; id++) { if (b[suffix[id] + mml] != currentChar) { if (currentCount > selectedCount) { selectedCount = currentCount; selectedID = currentID; } currentID = id; currentChar = b[ suffix[id] + mml]; currentCount = 0; } currentCount ++; } if (currentCount > selectedCount) { /* for last */ selectedCount = currentCount; selectedID = currentID; } if (selectedCount < minRatio) break; refinedStart = selectedID; refinedEnd = refinedStart + selectedCount; } /* evaluate gain based on new dict */ start = refinedStart; pos = suffix[refinedStart]; end = start; memset(lengthList, 0, sizeof(lengthList)); /* look forward */ { size_t length; do { end++; length = ZDICT_count(b + pos, b + suffix[end]); if (length >= LLIMIT) length = LLIMIT-1; lengthList[length]++; } while (length >=MINMATCHLENGTH); } /* look backward */ { size_t length = MINMATCHLENGTH; while ((length >= MINMATCHLENGTH) & (start > 0)) { length = ZDICT_count(b + pos, b + suffix[start - 1]); if (length >= LLIMIT) length = LLIMIT - 1; lengthList[length]++; if (length >= MINMATCHLENGTH) start--; } } /* largest useful length */ memset(cumulLength, 0, sizeof(cumulLength)); cumulLength[maxLength-1] = lengthList[maxLength-1]; for (i=(int)(maxLength-2); i>=0; i--) cumulLength[i] = cumulLength[i+1] + lengthList[i]; for (i=LLIMIT-1; i>=MINMATCHLENGTH; i--) if (cumulLength[i]>=minRatio) break; maxLength = i; /* reduce maxLength in case of final into repetitive data */ { U32 l = (U32)maxLength; BYTE const c = b[pos + maxLength-1]; while (b[pos+l-2]==c) l--; maxLength = l; } if (maxLength < MINMATCHLENGTH) return solution; /* skip : no long-enough solution */ /* calculate savings */ savings[5] = 0; for (i=MINMATCHLENGTH; i<=(int)maxLength; i++) savings[i] = savings[i-1] + (lengthList[i] * (i-3)); DISPLAYLEVEL(4, "Selected dict at position %u, of length %u : saves %u (ratio: %.2f) \n", (unsigned)pos, (unsigned)maxLength, (unsigned)savings[maxLength], (double)savings[maxLength] / maxLength); solution.pos = (U32)pos; solution.length = (U32)maxLength; solution.savings = savings[maxLength]; /* mark positions done */ { U32 id; for (id=start; id solution.length) length = solution.length; } pEnd = (U32)(testedPos + length); for (p=testedPos; ppos; const U32 eltEnd = elt.pos + elt.length; const char* const buf = (const char*) buffer; /* tail overlap */ U32 u; for (u=1; u elt.pos) && (table[u].pos <= eltEnd)) { /* overlap, existing > new */ /* append */ U32 const addedLength = table[u].pos - elt.pos; table[u].length += addedLength; table[u].pos = elt.pos; table[u].savings += elt.savings * addedLength / elt.length; /* rough approx */ table[u].savings += elt.length / 8; /* rough approx bonus */ elt = table[u]; /* sort : improve rank */ while ((u>1) && (table[u-1].savings < elt.savings)) table[u] = table[u-1], u--; table[u] = elt; return u; } } /* front overlap */ for (u=1; u= elt.pos) && (table[u].pos < elt.pos)) { /* overlap, existing < new */ /* append */ int const addedLength = (int)eltEnd - (table[u].pos + table[u].length); table[u].savings += elt.length / 8; /* rough approx bonus */ if (addedLength > 0) { /* otherwise, elt fully included into existing */ table[u].length += addedLength; table[u].savings += elt.savings * addedLength / elt.length; /* rough approx */ } /* sort : improve rank */ elt = table[u]; while ((u>1) && (table[u-1].savings < elt.savings)) table[u] = table[u-1], u--; table[u] = elt; return u; } if (MEM_read64(buf + table[u].pos) == MEM_read64(buf + elt.pos + 1)) { if (isIncluded(buf + table[u].pos, buf + elt.pos + 1, table[u].length)) { size_t const addedLength = MAX( (int)elt.length - (int)table[u].length , 1 ); table[u].pos = elt.pos; table[u].savings += (U32)(elt.savings * addedLength / elt.length); table[u].length = MIN(elt.length, table[u].length + 1); return u; } } } return 0; } static void ZDICT_removeDictItem(dictItem* table, U32 id) { /* convention : table[0].pos stores nb of elts */ U32 const max = table[0].pos; U32 u; if (!id) return; /* protection, should never happen */ for (u=id; upos--; } static void ZDICT_insertDictItem(dictItem* table, U32 maxSize, dictItem elt, const void* buffer) { /* merge if possible */ U32 mergeId = ZDICT_tryMerge(table, elt, 0, buffer); if (mergeId) { U32 newMerge = 1; while (newMerge) { newMerge = ZDICT_tryMerge(table, table[mergeId], mergeId, buffer); if (newMerge) ZDICT_removeDictItem(table, mergeId); mergeId = newMerge; } return; } /* insert */ { U32 current; U32 nextElt = table->pos; if (nextElt >= maxSize) nextElt = maxSize-1; current = nextElt-1; while (table[current].savings < elt.savings) { table[current+1] = table[current]; current--; } table[current+1] = elt; table->pos = nextElt+1; } } static U32 ZDICT_dictSize(const dictItem* dictList) { U32 u, dictSize = 0; for (u=1; u=l) { \ if (ZDICT_clockSpan(displayClock) > refreshRate) \ { displayClock = clock(); DISPLAY(__VA_ARGS__); \ if (notificationLevel>=4) fflush(stderr); } } /* init */ DISPLAYLEVEL(2, "\r%70s\r", ""); /* clean display line */ if (!suffix0 || !reverseSuffix || !doneMarks || !filePos) { result = ERROR(memory_allocation); goto _cleanup; } if (minRatio < MINRATIO) minRatio = MINRATIO; memset(doneMarks, 0, bufferSize+16); /* limit sample set size (divsufsort limitation)*/ if (bufferSize > ZDICT_MAX_SAMPLES_SIZE) DISPLAYLEVEL(3, "sample set too large : reduced to %u MB ...\n", (unsigned)(ZDICT_MAX_SAMPLES_SIZE>>20)); while (bufferSize > ZDICT_MAX_SAMPLES_SIZE) bufferSize -= fileSizes[--nbFiles]; /* sort */ DISPLAYLEVEL(2, "sorting %u files of total size %u MB ...\n", nbFiles, (unsigned)(bufferSize>>20)); { int const divSuftSortResult = divsufsort((const unsigned char*)buffer, suffix, (int)bufferSize, 0); if (divSuftSortResult != 0) { result = ERROR(GENERIC); goto _cleanup; } } suffix[bufferSize] = (int)bufferSize; /* leads into noise */ suffix0[0] = (int)bufferSize; /* leads into noise */ /* build reverse suffix sort */ { size_t pos; for (pos=0; pos < bufferSize; pos++) reverseSuffix[suffix[pos]] = (U32)pos; /* note filePos tracks borders between samples. It's not used at this stage, but planned to become useful in a later update */ filePos[0] = 0; for (pos=1; pos> 21); } } typedef struct { ZSTD_CDict* dict; /* dictionary */ ZSTD_CCtx* zc; /* working context */ void* workPlace; /* must be ZSTD_BLOCKSIZE_MAX allocated */ } EStats_ress_t; #define MAXREPOFFSET 1024 static void ZDICT_countEStats(EStats_ress_t esr, const ZSTD_parameters* params, unsigned* countLit, unsigned* offsetcodeCount, unsigned* matchlengthCount, unsigned* litlengthCount, U32* repOffsets, const void* src, size_t srcSize, U32 notificationLevel) { size_t const blockSizeMax = MIN (ZSTD_BLOCKSIZE_MAX, 1 << params->cParams.windowLog); size_t cSize; if (srcSize > blockSizeMax) srcSize = blockSizeMax; /* protection vs large samples */ { size_t const errorCode = ZSTD_compressBegin_usingCDict(esr.zc, esr.dict); if (ZSTD_isError(errorCode)) { DISPLAYLEVEL(1, "warning : ZSTD_compressBegin_usingCDict failed \n"); return; } } cSize = ZSTD_compressBlock(esr.zc, esr.workPlace, ZSTD_BLOCKSIZE_MAX, src, srcSize); if (ZSTD_isError(cSize)) { DISPLAYLEVEL(3, "warning : could not compress sample size %u \n", (unsigned)srcSize); return; } if (cSize) { /* if == 0; block is not compressible */ const seqStore_t* const seqStorePtr = ZSTD_getSeqStore(esr.zc); /* literals stats */ { const BYTE* bytePtr; for(bytePtr = seqStorePtr->litStart; bytePtr < seqStorePtr->lit; bytePtr++) countLit[*bytePtr]++; } /* seqStats */ { U32 const nbSeq = (U32)(seqStorePtr->sequences - seqStorePtr->sequencesStart); ZSTD_seqToCodes(seqStorePtr); { const BYTE* codePtr = seqStorePtr->ofCode; U32 u; for (u=0; umlCode; U32 u; for (u=0; ullCode; U32 u; for (u=0; u= 2) { /* rep offsets */ const seqDef* const seq = seqStorePtr->sequencesStart; U32 offset1 = seq[0].offset - 3; U32 offset2 = seq[1].offset - 3; if (offset1 >= MAXREPOFFSET) offset1 = 0; if (offset2 >= MAXREPOFFSET) offset2 = 0; repOffsets[offset1] += 3; repOffsets[offset2] += 1; } } } } static size_t ZDICT_totalSampleSize(const size_t* fileSizes, unsigned nbFiles) { size_t total=0; unsigned u; for (u=0; u0; u--) { offsetCount_t tmp; if (table[u-1].count >= table[u].count) break; tmp = table[u-1]; table[u-1] = table[u]; table[u] = tmp; } } /* ZDICT_flatLit() : * rewrite `countLit` to contain a mostly flat but still compressible distribution of literals. * necessary to avoid generating a non-compressible distribution that HUF_writeCTable() cannot encode. */ static void ZDICT_flatLit(unsigned* countLit) { int u; for (u=1; u<256; u++) countLit[u] = 2; countLit[0] = 4; countLit[253] = 1; countLit[254] = 1; } #define OFFCODE_MAX 30 /* only applicable to first block */ static size_t ZDICT_analyzeEntropy(void* dstBuffer, size_t maxDstSize, int compressionLevel, const void* srcBuffer, const size_t* fileSizes, unsigned nbFiles, const void* dictBuffer, size_t dictBufferSize, unsigned notificationLevel) { unsigned countLit[256]; HUF_CREATE_STATIC_CTABLE(hufTable, 255); unsigned offcodeCount[OFFCODE_MAX+1]; short offcodeNCount[OFFCODE_MAX+1]; U32 offcodeMax = ZSTD_highbit32((U32)(dictBufferSize + 128 KB)); unsigned matchLengthCount[MaxML+1]; short matchLengthNCount[MaxML+1]; unsigned litLengthCount[MaxLL+1]; short litLengthNCount[MaxLL+1]; U32 repOffset[MAXREPOFFSET]; offsetCount_t bestRepOffset[ZSTD_REP_NUM+1]; EStats_ress_t esr = { NULL, NULL, NULL }; ZSTD_parameters params; U32 u, huffLog = 11, Offlog = OffFSELog, mlLog = MLFSELog, llLog = LLFSELog, total; size_t pos = 0, errorCode; size_t eSize = 0; size_t const totalSrcSize = ZDICT_totalSampleSize(fileSizes, nbFiles); size_t const averageSampleSize = totalSrcSize / (nbFiles + !nbFiles); BYTE* dstPtr = (BYTE*)dstBuffer; /* init */ DEBUGLOG(4, "ZDICT_analyzeEntropy"); if (offcodeMax>OFFCODE_MAX) { eSize = ERROR(dictionaryCreation_failed); goto _cleanup; } /* too large dictionary */ for (u=0; u<256; u++) countLit[u] = 1; /* any character must be described */ for (u=0; u<=offcodeMax; u++) offcodeCount[u] = 1; for (u=0; u<=MaxML; u++) matchLengthCount[u] = 1; for (u=0; u<=MaxLL; u++) litLengthCount[u] = 1; memset(repOffset, 0, sizeof(repOffset)); repOffset[1] = repOffset[4] = repOffset[8] = 1; memset(bestRepOffset, 0, sizeof(bestRepOffset)); if (compressionLevel==0) compressionLevel = ZSTD_CLEVEL_DEFAULT; params = ZSTD_getParams(compressionLevel, averageSampleSize, dictBufferSize); esr.dict = ZSTD_createCDict_advanced(dictBuffer, dictBufferSize, ZSTD_dlm_byRef, ZSTD_dct_rawContent, params.cParams, ZSTD_defaultCMem); esr.zc = ZSTD_createCCtx(); esr.workPlace = malloc(ZSTD_BLOCKSIZE_MAX); if (!esr.dict || !esr.zc || !esr.workPlace) { eSize = ERROR(memory_allocation); DISPLAYLEVEL(1, "Not enough memory \n"); goto _cleanup; } /* collect stats on all samples */ for (u=0; u dictBufferCapacity) dictContentSize = dictBufferCapacity - hSize; { size_t const dictSize = hSize + dictContentSize; char* dictEnd = (char*)dictBuffer + dictSize; memmove(dictEnd - dictContentSize, customDictContent, dictContentSize); memcpy(dictBuffer, header, hSize); return dictSize; } } static size_t ZDICT_addEntropyTablesFromBuffer_advanced( void* dictBuffer, size_t dictContentSize, size_t dictBufferCapacity, const void* samplesBuffer, const size_t* samplesSizes, unsigned nbSamples, ZDICT_params_t params) { int const compressionLevel = (params.compressionLevel == 0) ? ZSTD_CLEVEL_DEFAULT : params.compressionLevel; U32 const notificationLevel = params.notificationLevel; size_t hSize = 8; /* calculate entropy tables */ DISPLAYLEVEL(2, "\r%70s\r", ""); /* clean display line */ DISPLAYLEVEL(2, "statistics ... \n"); { size_t const eSize = ZDICT_analyzeEntropy((char*)dictBuffer+hSize, dictBufferCapacity-hSize, compressionLevel, samplesBuffer, samplesSizes, nbSamples, (char*)dictBuffer + dictBufferCapacity - dictContentSize, dictContentSize, notificationLevel); if (ZDICT_isError(eSize)) return eSize; hSize += eSize; } /* add dictionary header (after entropy tables) */ MEM_writeLE32(dictBuffer, ZSTD_MAGIC_DICTIONARY); { U64 const randomID = XXH64((char*)dictBuffer + dictBufferCapacity - dictContentSize, dictContentSize, 0); U32 const compliantID = (randomID % ((1U<<31)-32768)) + 32768; U32 const dictID = params.dictID ? params.dictID : compliantID; MEM_writeLE32((char*)dictBuffer+4, dictID); } if (hSize + dictContentSize < dictBufferCapacity) memmove((char*)dictBuffer + hSize, (char*)dictBuffer + dictBufferCapacity - dictContentSize, dictContentSize); return MIN(dictBufferCapacity, hSize+dictContentSize); } /*! ZDICT_trainFromBuffer_unsafe_legacy() : * Warning : `samplesBuffer` must be followed by noisy guard band !!! * @return : size of dictionary, or an error code which can be tested with ZDICT_isError() */ static size_t ZDICT_trainFromBuffer_unsafe_legacy( void* dictBuffer, size_t maxDictSize, const void* samplesBuffer, const size_t* samplesSizes, unsigned nbSamples, ZDICT_legacy_params_t params) { U32 const dictListSize = MAX(MAX(DICTLISTSIZE_DEFAULT, nbSamples), (U32)(maxDictSize/16)); dictItem* const dictList = (dictItem*)malloc(dictListSize * sizeof(*dictList)); unsigned const selectivity = params.selectivityLevel == 0 ? g_selectivity_default : params.selectivityLevel; unsigned const minRep = (selectivity > 30) ? MINRATIO : nbSamples >> selectivity; size_t const targetDictSize = maxDictSize; size_t const samplesBuffSize = ZDICT_totalSampleSize(samplesSizes, nbSamples); size_t dictSize = 0; U32 const notificationLevel = params.zParams.notificationLevel; /* checks */ if (!dictList) return ERROR(memory_allocation); if (maxDictSize < ZDICT_DICTSIZE_MIN) { free(dictList); return ERROR(dstSize_tooSmall); } /* requested dictionary size is too small */ if (samplesBuffSize < ZDICT_MIN_SAMPLES_SIZE) { free(dictList); return ERROR(dictionaryCreation_failed); } /* not enough source to create dictionary */ /* init */ ZDICT_initDictItem(dictList); /* build dictionary */ ZDICT_trainBuffer_legacy(dictList, dictListSize, samplesBuffer, samplesBuffSize, samplesSizes, nbSamples, minRep, notificationLevel); /* display best matches */ if (params.zParams.notificationLevel>= 3) { unsigned const nb = MIN(25, dictList[0].pos); unsigned const dictContentSize = ZDICT_dictSize(dictList); unsigned u; DISPLAYLEVEL(3, "\n %u segments found, of total size %u \n", (unsigned)dictList[0].pos-1, dictContentSize); DISPLAYLEVEL(3, "list %u best segments \n", nb-1); for (u=1; u samplesBuffSize) || ((pos + length) > samplesBuffSize)) { free(dictList); return ERROR(GENERIC); /* should never happen */ } DISPLAYLEVEL(3, "%3u:%3u bytes at pos %8u, savings %7u bytes |", u, length, pos, (unsigned)dictList[u].savings); ZDICT_printHex((const char*)samplesBuffer+pos, printedLength); DISPLAYLEVEL(3, "| \n"); } } /* create dictionary */ { unsigned dictContentSize = ZDICT_dictSize(dictList); if (dictContentSize < ZDICT_CONTENTSIZE_MIN) { free(dictList); return ERROR(dictionaryCreation_failed); } /* dictionary content too small */ if (dictContentSize < targetDictSize/4) { DISPLAYLEVEL(2, "! warning : selected content significantly smaller than requested (%u < %u) \n", dictContentSize, (unsigned)maxDictSize); if (samplesBuffSize < 10 * targetDictSize) DISPLAYLEVEL(2, "! consider increasing the number of samples (total size : %u MB)\n", (unsigned)(samplesBuffSize>>20)); if (minRep > MINRATIO) { DISPLAYLEVEL(2, "! consider increasing selectivity to produce larger dictionary (-s%u) \n", selectivity+1); DISPLAYLEVEL(2, "! note : larger dictionaries are not necessarily better, test its efficiency on samples \n"); } } if ((dictContentSize > targetDictSize*3) && (nbSamples > 2*MINRATIO) && (selectivity>1)) { unsigned proposedSelectivity = selectivity-1; while ((nbSamples >> proposedSelectivity) <= MINRATIO) { proposedSelectivity--; } DISPLAYLEVEL(2, "! note : calculated dictionary significantly larger than requested (%u > %u) \n", dictContentSize, (unsigned)maxDictSize); DISPLAYLEVEL(2, "! consider increasing dictionary size, or produce denser dictionary (-s%u) \n", proposedSelectivity); DISPLAYLEVEL(2, "! always test dictionary efficiency on real samples \n"); } /* limit dictionary size */ { U32 const max = dictList->pos; /* convention : nb of useful elts within dictList */ U32 currentSize = 0; U32 n; for (n=1; n targetDictSize) { currentSize -= dictList[n].length; break; } } dictList->pos = n; dictContentSize = currentSize; } /* build dict content */ { U32 u; BYTE* ptr = (BYTE*)dictBuffer + maxDictSize; for (u=1; upos; u++) { U32 l = dictList[u].length; ptr -= l; if (ptr<(BYTE*)dictBuffer) { free(dictList); return ERROR(GENERIC); } /* should not happen */ memcpy(ptr, (const char*)samplesBuffer+dictList[u].pos, l); } } dictSize = ZDICT_addEntropyTablesFromBuffer_advanced(dictBuffer, dictContentSize, maxDictSize, samplesBuffer, samplesSizes, nbSamples, params.zParams); } /* clean up */ free(dictList); return dictSize; } /* ZDICT_trainFromBuffer_legacy() : * issue : samplesBuffer need to be followed by a noisy guard band. * work around : duplicate the buffer, and add the noise */ size_t ZDICT_trainFromBuffer_legacy(void* dictBuffer, size_t dictBufferCapacity, const void* samplesBuffer, const size_t* samplesSizes, unsigned nbSamples, ZDICT_legacy_params_t params) { size_t result; void* newBuff; size_t const sBuffSize = ZDICT_totalSampleSize(samplesSizes, nbSamples); if (sBuffSize < ZDICT_MIN_SAMPLES_SIZE) return 0; /* not enough content => no dictionary */ newBuff = malloc(sBuffSize + NOISELENGTH); if (!newBuff) return ERROR(memory_allocation); memcpy(newBuff, samplesBuffer, sBuffSize); ZDICT_fillNoise((char*)newBuff + sBuffSize, NOISELENGTH); /* guard band, for end of buffer condition */ result = ZDICT_trainFromBuffer_unsafe_legacy(dictBuffer, dictBufferCapacity, newBuff, samplesSizes, nbSamples, params); free(newBuff); return result; } size_t ZDICT_trainFromBuffer(void* dictBuffer, size_t dictBufferCapacity, const void* samplesBuffer, const size_t* samplesSizes, unsigned nbSamples) { ZDICT_fastCover_params_t params; DEBUGLOG(3, "ZDICT_trainFromBuffer"); memset(¶ms, 0, sizeof(params)); params.d = 8; params.steps = 4; /* Use default level since no compression level information is available */ params.zParams.compressionLevel = ZSTD_CLEVEL_DEFAULT; #if defined(DEBUGLEVEL) && (DEBUGLEVEL>=1) params.zParams.notificationLevel = DEBUGLEVEL; #endif return ZDICT_optimizeTrainFromBuffer_fastCover(dictBuffer, dictBufferCapacity, samplesBuffer, samplesSizes, nbSamples, ¶ms); } size_t ZDICT_addEntropyTablesFromBuffer(void* dictBuffer, size_t dictContentSize, size_t dictBufferCapacity, const void* samplesBuffer, const size_t* samplesSizes, unsigned nbSamples) { ZDICT_params_t params; memset(¶ms, 0, sizeof(params)); return ZDICT_addEntropyTablesFromBuffer_advanced(dictBuffer, dictContentSize, dictBufferCapacity, samplesBuffer, samplesSizes, nbSamples, params); } /**** ended inlining dictBuilder/zdict.c ****/