1 module gamut.codecs.miniz; 2 3 version(encodePNG) 4 { 5 version = encodeDecodePNG; 6 } 7 version(decodePNG) 8 { 9 version = encodeDecodePNG; 10 } 11 version(encodeDecodePNG): 12 13 // To be able to use this and win about 5% of PNG loading time, this needs: 14 // - to disable adler32 checking (like STB) on the PNG decoder size, with flags that trickle down 15 // - to pass the flags that avoids zlib header parsing if it's an iphone PNG (CgBI chunk) 16 // Probably we can't use the simple zlib-like wrappers for this? 17 18 /* miniz.c 3.0.0 - public domain deflate/inflate, zlib-subset, ZIP reading/writing/appending, PNG writing 19 See "unlicense" statement at the end of this file. 20 Rich Geldreich <richgel99@gmail.com>, last updated Oct. 13, 2013 21 Implements RFC 1950: http://www.ietf.org/rfc/rfc1950.txt and RFC 1951: http://www.ietf.org/rfc/rfc1951.txt 22 23 Most API's defined in miniz.c are optional. For example, to disable the archive related functions just define 24 MINIZ_NO_ARCHIVE_APIS, or to get rid of all stdio usage define MINIZ_NO_STDIO (see the list below for more macros). 25 26 * Low-level Deflate/Inflate implementation notes: 27 28 Compression: Use the "tdefl" API's. The compressor supports raw, static, and dynamic blocks, lazy or 29 greedy parsing, match length filtering, RLE-only, and Huffman-only streams. It performs and compresses 30 approximately as well as zlib. 31 32 Decompression: Use the "tinfl" API's. The entire decompressor is implemented as a single function 33 coroutine: see tinfl_decompress(). It supports decompression into a 32KB (or larger power of 2) wrapping buffer, or into a memory 34 block large enough to hold the entire file. 35 36 The low-level tdefl/tinfl API's do not make any use of dynamic memory allocation. 37 38 * zlib-style API notes: 39 40 miniz.c implements a fairly large subset of zlib. There's enough functionality present for it to be a drop-in 41 zlib replacement in many apps: 42 The z_stream struct, optional memory allocation callbacks 43 deflateInit/deflateInit2/deflate/deflateReset/deflateEnd/deflateBound 44 inflateInit/inflateInit2/inflate/inflateReset/inflateEnd 45 compress, compress2, compressBound, uncompress 46 CRC-32, Adler-32 - Using modern, minimal code size, CPU cache friendly routines. 47 Supports raw deflate streams or standard zlib streams with adler-32 checking. 48 49 Limitations: 50 The callback API's are not implemented yet. No support for gzip headers or zlib static dictionaries. 51 I've tried to closely emulate zlib's various flavors of stream flushing and return status codes, but 52 there are no guarantees that miniz.c pulls this off perfectly. 53 54 * PNG writing: See the tdefl_write_image_to_png_file_in_memory() function, originally written by 55 Alex Evans. Supports 1-4 bytes/pixel images. 56 57 * ZIP archive API notes: 58 59 The ZIP archive API's where designed with simplicity and efficiency in mind, with just enough abstraction to 60 get the job done with minimal fuss. There are simple API's to retrieve file information, read files from 61 existing archives, create new archives, append new files to existing archives, or clone archive data from 62 one archive to another. It supports archives located in memory or the heap, on disk (using stdio.h), 63 or you can specify custom file read/write callbacks. 64 65 - Archive reading: Just call this function to read a single file from a disk archive: 66 67 void *mz_zip_extract_archive_file_to_heap(const char *pZip_filename, const char *pArchive_name, 68 size_t *pSize, mz_uint zip_flags); 69 70 For more complex cases, use the "mz_zip_reader" functions. Upon opening an archive, the entire central 71 directory is located and read as-is into memory, and subsequent file access only occurs when reading individual files. 72 73 - Archives file scanning: The simple way is to use this function to scan a loaded archive for a specific file: 74 75 int mz_zip_reader_locate_file(mz_zip_archive *pZip, const char *pName, const char *pComment, mz_uint flags); 76 77 The locate operation can optionally check file comments too, which (as one example) can be used to identify 78 multiple versions of the same file in an archive. This function uses a simple linear search through the central 79 directory, so it's not very fast. 80 81 Alternately, you can iterate through all the files in an archive (using mz_zip_reader_get_num_files()) and 82 retrieve detailed info on each file by calling mz_zip_reader_file_stat(). 83 84 - Archive creation: Use the "mz_zip_writer" functions. The ZIP writer immediately writes compressed file data 85 to disk and builds an exact image of the central directory in memory. The central directory image is written 86 all at once at the end of the archive file when the archive is finalized. 87 88 The archive writer can optionally align each file's local header and file data to any power of 2 alignment, 89 which can be useful when the archive will be read from optical media. Also, the writer supports placing 90 arbitrary data blobs at the very beginning of ZIP archives. Archives written using either feature are still 91 readable by any ZIP tool. 92 93 - Archive appending: The simple way to add a single file to an archive is to call this function: 94 95 mz_bool mz_zip_add_mem_to_archive_file_in_place(const char *pZip_filename, const char *pArchive_name, 96 const void *pBuf, size_t buf_size, const void *pComment, mz_uint16 comment_size, mz_uint level_and_flags); 97 98 The archive will be created if it doesn't already exist, otherwise it'll be appended to. 99 Note the appending is done in-place and is not an atomic operation, so if something goes wrong 100 during the operation it's possible the archive could be left without a central directory (although the local 101 file headers and file data will be fine, so the archive will be recoverable). 102 103 For more complex archive modification scenarios: 104 1. The safest way is to use a mz_zip_reader to read the existing archive, cloning only those bits you want to 105 preserve into a new archive using using the mz_zip_writer_add_from_zip_reader() function (which compiles the 106 compressed file data as-is). When you're done, delete the old archive and rename the newly written archive, and 107 you're done. This is safe but requires a bunch of temporary disk space or heap memory. 108 109 2. Or, you can convert an mz_zip_reader in-place to an mz_zip_writer using mz_zip_writer_init_from_reader(), 110 append new files as needed, then finalize the archive which will write an updated central directory to the 111 original archive. (This is basically what mz_zip_add_mem_to_archive_file_in_place() does.) There's a 112 possibility that the archive's central directory could be lost with this method if anything goes wrong, though. 113 114 - ZIP archive support limitations: 115 No spanning support. Extraction functions can only handle unencrypted, stored or deflated files. 116 Requires streams capable of seeking. 117 118 * This is a header file library, like stb_image.c. To get only a header file, either cut and paste the 119 below header, or create miniz.h, #define MINIZ_HEADER_FILE_ONLY, and then include miniz.c from it. 120 121 * Important: For best perf. be sure to customize the below macros for your target platform: 122 #define MINIZ_USE_UNALIGNED_LOADS_AND_STORES 1 123 #define MINIZ_LITTLE_ENDIAN 1 124 #define MINIZ_HAS_64BIT_REGISTERS 1 125 126 * On platforms using glibc, Be sure to "#define _LARGEFILE64_SOURCE 1" before including miniz.c to ensure miniz 127 uses the 64-bit variants: fopen64(), stat64(), etc. Otherwise you won't be able to process large files 128 (i.e. 32-bit stat() fails for me on files > 0x7FFFFFFF bytes). 129 */ 130 131 import core.stdc.stdlib: malloc, free, realloc; 132 import core.stdc.string: memset, memcpy; 133 import core.stdc.config: c_long, c_ulong; 134 135 nothrow @nogc: 136 137 version(LittleEndian) 138 enum MINIZ_LITTLE_ENDIAN = 1; 139 else 140 enum MINIZ_LITTLE_ENDIAN = 0; 141 142 /* ------------------- zlib-style API Definitions. */ 143 144 /* For more compatibility with zlib, miniz.c uses unsigned long for some parameters/struct members. 145 Beware: mz_ulong can be either 32 or 64-bits! */ 146 alias mz_ulong = c_ulong; 147 148 /* mz_free() internally uses the MZ_FREE() macro (which by default calls free() unless you've 149 modified the MZ_MALLOC macro) to release a block allocated from the heap. */ 150 alias mz_free = free; 151 152 enum MZ_ADLER32_INIT = 1; 153 enum MZ_CRC32_INIT = 0; 154 155 /* Compression strategies. */ 156 enum 157 { 158 MZ_DEFAULT_STRATEGY = 0, 159 MZ_FILTERED = 1, 160 MZ_HUFFMAN_ONLY = 2, 161 MZ_RLE = 3, 162 MZ_FIXED = 4 163 } 164 165 /* Method */ 166 public enum MZ_DEFLATED = 8; 167 168 /* Heap allocation callbacks. 169 Note that mz_alloc_func parameter types purposely differ from zlib's: items/size is size_t, not 170 unsigned long. */ 171 alias mz_alloc_func = void* function(void *opaque, size_t items, size_t size); 172 alias mz_free_func = void function(void *opaque, void *address); 173 alias mz_realloc_func = void* function(void *opaque, void *address, size_t items, size_t size); 174 175 void* MZ_MAX_voidp(void* a, void* b) 176 { 177 return (((a) > (b)) ? (a) : (b)); 178 } 179 180 int MZ_MAX_int(int a, int b) 181 { 182 return (a > b) ? a : b; 183 } 184 185 size_t MZ_MAX_size_t(size_t a, size_t b) 186 { 187 return (a > b) ? a : b; 188 } 189 190 191 mz_ulong MZ_MAX_mz_ulong(mz_ulong a, mz_ulong b) 192 { 193 return (a > b) ? a : b; 194 } 195 196 int MZ_MIN_int(int a, int b) 197 { 198 return (a < b) ? a : b; 199 } 200 201 uint MZ_MIN_uint(uint a, uint b) 202 { 203 return (a < b) ? a : b; 204 } 205 206 size_t MZ_MIN_size_t(size_t a, size_t b) 207 { 208 return (a < b) ? a : b; 209 } 210 211 /* Compression levels: 0-9 are the standard zlib-style levels, 10 is best possible compression 212 (not zlib compatible, and may be very slow), MZ_DEFAULT_COMPRESSION=MZ_DEFAULT_LEVEL. */ 213 enum 214 { 215 MZ_NO_COMPRESSION = 0, 216 MZ_BEST_SPEED = 1, 217 MZ_BEST_COMPRESSION = 9, 218 MZ_UBER_COMPRESSION = 10, 219 MZ_DEFAULT_LEVEL = 6, 220 MZ_DEFAULT_COMPRESSION = -1 221 } 222 223 enum MZ_VERSION = "11.0.2"; 224 enum MZ_VERNUM = 0xB002; 225 enum MZ_VER_MAJOR = 11; 226 enum MZ_VER_MINOR = 2; 227 enum MZ_VER_REVISION = 0; 228 enum MZ_VER_SUBREVISION = 0; 229 230 /* Flush values. For typical usage you only need MZ_NO_FLUSH and MZ_FINISH. The other values are 231 for advanced use (refer to the zlib docs). */ 232 enum 233 { 234 MZ_NO_FLUSH = 0, 235 MZ_PARTIAL_FLUSH = 1, 236 MZ_SYNC_FLUSH = 2, 237 MZ_FULL_FLUSH = 3, 238 MZ_FINISH = 4, 239 MZ_BLOCK = 5 240 } 241 242 /* Return status codes. MZ_PARAM_ERROR is non-standard. */ 243 enum 244 { 245 MZ_OK = 0, 246 MZ_STREAM_END = 1, 247 MZ_NEED_DICT = 2, 248 MZ_ERRNO = -1, 249 MZ_STREAM_ERROR = -2, 250 MZ_DATA_ERROR = -3, 251 MZ_MEM_ERROR = -4, 252 MZ_BUF_ERROR = -5, 253 MZ_VERSION_ERROR = -6, 254 MZ_PARAM_ERROR = -10000 255 }; 256 257 /* Window bits */ 258 enum MZ_DEFAULT_WINDOW_BITS = 15; 259 260 struct mz_internal_state; 261 262 /* Compression/decompression stream struct. */ 263 struct mz_stream 264 { 265 const(ubyte)* next_in; /* pointer to next byte to read */ 266 uint avail_in; /* number of bytes available at next_in */ 267 mz_ulong total_in; /* total number of bytes consumed so far */ 268 269 ubyte *next_out; /* pointer to next byte to write */ 270 uint avail_out; /* number of bytes that can be written to next_out */ 271 mz_ulong total_out; /* total number of bytes produced so far */ 272 273 ubyte *msg; /* error msg (unused) */ 274 mz_internal_state *state; /* internal state, allocated by zalloc/zfree */ 275 276 mz_alloc_func zalloc; /* optional heap allocation function (defaults to malloc) */ 277 mz_free_func zfree; /* optional heap free function (defaults to free) */ 278 void *opaque; /* heap alloc function user pointer */ 279 280 int data_type; /* data_type (unused) */ 281 mz_ulong adler; /* adler32 of the source or uncompressed data */ 282 mz_ulong reserved; /* not used */ 283 } 284 285 286 /* Redefine zlib-compatible names to miniz equivalents, so miniz.c can be used as a drop-in 287 replacement for the subset of zlib that miniz.c supports. */ 288 289 // start of miniz_common.h 290 291 /* ------------------- Types and macros */ 292 alias mz_uint8 = ubyte; 293 alias mz_int16 = short; 294 alias mz_uint16 = ushort; 295 alias mz_uint32 = uint; 296 alias mz_uint = uint; 297 alias mz_int64 = long; 298 alias mz_uint64 = ulong; 299 alias mz_bool = int; 300 301 enum MZ_FALSE = 0; 302 enum MZ_TRUE = 1; 303 304 alias MZ_MALLOC = malloc; 305 alias MZ_FREE = free; 306 alias MZ_REALLOC = realloc; 307 308 version(LittleEndian) 309 { 310 ushort MZ_READ_LE16(const(void)* p) 311 { 312 return *cast(const(mz_uint16)*)p; 313 } 314 315 uint MZ_READ_LE32(const(void)* p) 316 { 317 return *cast(const(mz_uint32)*)p; 318 } 319 } 320 else 321 { 322 ushort MZ_READ_LE16(const(void)* p) 323 { 324 // (interstingly, in miniz original source the macro return a uint32 on BigEndian and a uint16 on LittleEndian, meh) 325 const(mz_uint8)* b = cast(const(ubyte)*) p; 326 return b[0] | (b[1] << 8); 327 } 328 329 uint MZ_READ_LE32(const(void)* p) 330 { 331 const(mz_uint8)* b = cast(const(ubyte)*) p; 332 return b[0] | (b[1] << 8) | (b[1] << 16) | (b[1] << 24); 333 } 334 } 335 336 ulong MZ_READ_LE64(const(void)* p) 337 { 338 return cast(ulong) MZ_READ_LE32(p) | ( cast(ulong)(MZ_READ_LE32( (cast(ubyte*)p) + 4)) << 32 ); 339 } 340 341 enum MZ_UINT16_MAX = 0xFFFFU; 342 enum MZ_UINT32_MAX = 0xFFFFFFFFU; 343 344 // end of miniz_common.h 345 346 static assert(mz_uint16.sizeof == 2); 347 static assert(mz_uint32.sizeof == 4); 348 static assert(mz_uint64.sizeof == 8); 349 350 351 /* ------------------- zlib-style API's */ 352 353 /* mz_adler32() returns the initial adler-32 value to use when called with ptr==null. */ 354 mz_ulong mz_adler32(mz_ulong adler, const(ubyte) *ptr, size_t buf_len) 355 { 356 mz_uint32 i, s1 = cast(mz_uint32)(adler & 0xffff), s2 = cast(mz_uint32)(adler >> 16); 357 size_t block_len = buf_len % 5552; 358 if (!ptr) 359 return MZ_ADLER32_INIT; 360 while (buf_len) 361 { 362 for (i = 0; i + 7 < block_len; i += 8, ptr += 8) 363 { 364 s1 += ptr[0], s2 += s1; 365 s1 += ptr[1], s2 += s1; 366 s1 += ptr[2], s2 += s1; 367 s1 += ptr[3], s2 += s1; 368 s1 += ptr[4], s2 += s1; 369 s1 += ptr[5], s2 += s1; 370 s1 += ptr[6], s2 += s1; 371 s1 += ptr[7], s2 += s1; 372 } 373 for (; i < block_len; ++i) 374 s1 += *ptr++, s2 += s1; 375 s1 %= 65521U, s2 %= 65521U; 376 buf_len -= block_len; 377 block_len = 5552; 378 } 379 return (s2 << 16) + s1; 380 } 381 382 enum compactCRC32 = false; 383 /* Karl Malbrain's compact CRC-32. See "A compact CCITT crc16 and crc32 C implementation that balances processor cache usage against speed": http://www.geocities.com/malbrain/ */ 384 static if (compactCRC32) 385 { 386 /* mz_crc32() returns the initial CRC-32 value to use when called with ptr==null. */ 387 mz_ulong mz_crc32(mz_ulong crc, const mz_uint8 *ptr, size_t buf_len) 388 { 389 __gshared static immutable mz_uint32[16] s_crc32 = 390 [ 391 0, 0x1db71064, 0x3b6e20c8, 0x26d930ac, 0x76dc4190, 0x6b6b51f4, 0x4db26158, 0x5005713c, 392 0xedb88320, 0xf00f9344, 0xd6d6a3e8, 0xcb61b38c, 0x9b64c2b0, 0x86d3d2d4, 0xa00ae278, 0xbdbdf21c 393 ]; 394 mz_uint32 crcu32 = cast(mz_uint32)crc; 395 if (!ptr) 396 return MZ_CRC32_INIT; 397 crcu32 = ~crcu32; 398 while (buf_len--) 399 { 400 mz_uint8 b = *ptr++; 401 crcu32 = (crcu32 >> 4) ^ s_crc32[(crcu32 & 0xF) ^ (b & 0xF)]; 402 crcu32 = (crcu32 >> 4) ^ s_crc32[(crcu32 & 0xF) ^ (b >> 4)]; 403 } 404 return ~crcu32; 405 } 406 407 } 408 else 409 { 410 /* Faster, but larger CPU cache footprint. 411 mz_crc32() returns the initial CRC-32 value to use when called with ptr==null. 412 */ 413 mz_ulong mz_crc32(mz_ulong crc, const mz_uint8 *ptr, size_t buf_len) 414 { 415 __gshared static immutable mz_uint32[256] s_crc_table = 416 [ 417 0x00000000, 0x77073096, 0xEE0E612C, 0x990951BA, 0x076DC419, 0x706AF48F, 0xE963A535, 418 0x9E6495A3, 0x0EDB8832, 0x79DCB8A4, 0xE0D5E91E, 0x97D2D988, 0x09B64C2B, 0x7EB17CBD, 419 0xE7B82D07, 0x90BF1D91, 0x1DB71064, 0x6AB020F2, 0xF3B97148, 0x84BE41DE, 0x1ADAD47D, 420 0x6DDDE4EB, 0xF4D4B551, 0x83D385C7, 0x136C9856, 0x646BA8C0, 0xFD62F97A, 0x8A65C9EC, 421 0x14015C4F, 0x63066CD9, 0xFA0F3D63, 0x8D080DF5, 0x3B6E20C8, 0x4C69105E, 0xD56041E4, 422 0xA2677172, 0x3C03E4D1, 0x4B04D447, 0xD20D85FD, 0xA50AB56B, 0x35B5A8FA, 0x42B2986C, 423 0xDBBBC9D6, 0xACBCF940, 0x32D86CE3, 0x45DF5C75, 0xDCD60DCF, 0xABD13D59, 0x26D930AC, 424 0x51DE003A, 0xC8D75180, 0xBFD06116, 0x21B4F4B5, 0x56B3C423, 0xCFBA9599, 0xB8BDA50F, 425 0x2802B89E, 0x5F058808, 0xC60CD9B2, 0xB10BE924, 0x2F6F7C87, 0x58684C11, 0xC1611DAB, 426 0xB6662D3D, 0x76DC4190, 0x01DB7106, 0x98D220BC, 0xEFD5102A, 0x71B18589, 0x06B6B51F, 427 0x9FBFE4A5, 0xE8B8D433, 0x7807C9A2, 0x0F00F934, 0x9609A88E, 0xE10E9818, 0x7F6A0DBB, 428 0x086D3D2D, 0x91646C97, 0xE6635C01, 0x6B6B51F4, 0x1C6C6162, 0x856530D8, 0xF262004E, 429 0x6C0695ED, 0x1B01A57B, 0x8208F4C1, 0xF50FC457, 0x65B0D9C6, 0x12B7E950, 0x8BBEB8EA, 430 0xFCB9887C, 0x62DD1DDF, 0x15DA2D49, 0x8CD37CF3, 0xFBD44C65, 0x4DB26158, 0x3AB551CE, 431 0xA3BC0074, 0xD4BB30E2, 0x4ADFA541, 0x3DD895D7, 0xA4D1C46D, 0xD3D6F4FB, 0x4369E96A, 432 0x346ED9FC, 0xAD678846, 0xDA60B8D0, 0x44042D73, 0x33031DE5, 0xAA0A4C5F, 0xDD0D7CC9, 433 0x5005713C, 0x270241AA, 0xBE0B1010, 0xC90C2086, 0x5768B525, 0x206F85B3, 0xB966D409, 434 0xCE61E49F, 0x5EDEF90E, 0x29D9C998, 0xB0D09822, 0xC7D7A8B4, 0x59B33D17, 0x2EB40D81, 435 0xB7BD5C3B, 0xC0BA6CAD, 0xEDB88320, 0x9ABFB3B6, 0x03B6E20C, 0x74B1D29A, 0xEAD54739, 436 0x9DD277AF, 0x04DB2615, 0x73DC1683, 0xE3630B12, 0x94643B84, 0x0D6D6A3E, 0x7A6A5AA8, 437 0xE40ECF0B, 0x9309FF9D, 0x0A00AE27, 0x7D079EB1, 0xF00F9344, 0x8708A3D2, 0x1E01F268, 438 0x6906C2FE, 0xF762575D, 0x806567CB, 0x196C3671, 0x6E6B06E7, 0xFED41B76, 0x89D32BE0, 439 0x10DA7A5A, 0x67DD4ACC, 0xF9B9DF6F, 0x8EBEEFF9, 0x17B7BE43, 0x60B08ED5, 0xD6D6A3E8, 440 0xA1D1937E, 0x38D8C2C4, 0x4FDFF252, 0xD1BB67F1, 0xA6BC5767, 0x3FB506DD, 0x48B2364B, 441 0xD80D2BDA, 0xAF0A1B4C, 0x36034AF6, 0x41047A60, 0xDF60EFC3, 0xA867DF55, 0x316E8EEF, 442 0x4669BE79, 0xCB61B38C, 0xBC66831A, 0x256FD2A0, 0x5268E236, 0xCC0C7795, 0xBB0B4703, 443 0x220216B9, 0x5505262F, 0xC5BA3BBE, 0xB2BD0B28, 0x2BB45A92, 0x5CB36A04, 0xC2D7FFA7, 444 0xB5D0CF31, 0x2CD99E8B, 0x5BDEAE1D, 0x9B64C2B0, 0xEC63F226, 0x756AA39C, 0x026D930A, 445 0x9C0906A9, 0xEB0E363F, 0x72076785, 0x05005713, 0x95BF4A82, 0xE2B87A14, 0x7BB12BAE, 446 0x0CB61B38, 0x92D28E9B, 0xE5D5BE0D, 0x7CDCEFB7, 0x0BDBDF21, 0x86D3D2D4, 0xF1D4E242, 447 0x68DDB3F8, 0x1FDA836E, 0x81BE16CD, 0xF6B9265B, 0x6FB077E1, 0x18B74777, 0x88085AE6, 448 0xFF0F6A70, 0x66063BCA, 0x11010B5C, 0x8F659EFF, 0xF862AE69, 0x616BFFD3, 0x166CCF45, 449 0xA00AE278, 0xD70DD2EE, 0x4E048354, 0x3903B3C2, 0xA7672661, 0xD06016F7, 0x4969474D, 450 0x3E6E77DB, 0xAED16A4A, 0xD9D65ADC, 0x40DF0B66, 0x37D83BF0, 0xA9BCAE53, 0xDEBB9EC5, 451 0x47B2CF7F, 0x30B5FFE9, 0xBDBDF21C, 0xCABAC28A, 0x53B39330, 0x24B4A3A6, 0xBAD03605, 452 0xCDD70693, 0x54DE5729, 0x23D967BF, 0xB3667A2E, 0xC4614AB8, 0x5D681B02, 0x2A6F2B94, 453 0xB40BBE37, 0xC30C8EA1, 0x5A05DF1B, 0x2D02EF8D 454 ]; 455 456 mz_uint32 crc32 = cast(mz_uint32)crc ^ 0xFFFFFFFF; 457 const(mz_uint8)*pByte_buf = cast(const(mz_uint8)*)ptr; 458 459 while (buf_len >= 4) 460 { 461 crc32 = (crc32 >> 8) ^ s_crc_table[(crc32 ^ pByte_buf[0]) & 0xFF]; 462 crc32 = (crc32 >> 8) ^ s_crc_table[(crc32 ^ pByte_buf[1]) & 0xFF]; 463 crc32 = (crc32 >> 8) ^ s_crc_table[(crc32 ^ pByte_buf[2]) & 0xFF]; 464 crc32 = (crc32 >> 8) ^ s_crc_table[(crc32 ^ pByte_buf[3]) & 0xFF]; 465 pByte_buf += 4; 466 buf_len -= 4; 467 } 468 469 while (buf_len) 470 { 471 crc32 = (crc32 >> 8) ^ s_crc_table[(crc32 ^ pByte_buf[0]) & 0xFF]; 472 ++pByte_buf; 473 --buf_len; 474 } 475 476 return ~crc32; 477 } 478 } 479 480 void mz_free(void *p) 481 { 482 MZ_FREE(p); 483 } 484 485 void *miniz_def_alloc_func(void *opaque, size_t items, size_t size) 486 { 487 return MZ_MALLOC(items * size); 488 } 489 490 void miniz_def_free_func(void *opaque, void *address) 491 { 492 MZ_FREE(address); 493 } 494 495 void *miniz_def_realloc_func(void *opaque, void *address, size_t items, size_t size) 496 { 497 return MZ_REALLOC(address, items * size); 498 } 499 500 /* Returns the version string of miniz.c. */ 501 const(char)* mz_version() 502 { 503 return MZ_VERSION.ptr; 504 } 505 506 /* mz_deflateInit() initializes a compressor with default options: */ 507 /* Parameters: */ 508 /* pStream must point to an initialized mz_stream struct. */ 509 /* level must be between [MZ_NO_COMPRESSION, MZ_BEST_COMPRESSION]. */ 510 /* level 1 enables a specially optimized compression function that's been optimized purely for performance, not ratio. */ 511 /* (This special func. is currently only enabled when MINIZ_USE_UNALIGNED_LOADS_AND_STORES and MINIZ_LITTLE_ENDIAN are defined.) */ 512 /* Return values: */ 513 /* MZ_OK on success. */ 514 /* MZ_STREAM_ERROR if the stream is bogus. */ 515 /* MZ_PARAM_ERROR if the input parameters are bogus. */ 516 /* MZ_MEM_ERROR on out of memory. */ 517 int mz_deflateInit(mz_stream* pStream, int level) 518 { 519 return mz_deflateInit2(pStream, level, MZ_DEFLATED, MZ_DEFAULT_WINDOW_BITS, 9, MZ_DEFAULT_STRATEGY); 520 } 521 522 523 /* mz_deflateInit2() is like mz_deflate(), except with more control: */ 524 /* Additional parameters: */ 525 /* method must be MZ_DEFLATED */ 526 /* window_bits must be MZ_DEFAULT_WINDOW_BITS (to wrap the deflate stream with zlib header/adler-32 footer) or -MZ_DEFAULT_WINDOW_BITS (raw deflate/no header or footer) */ 527 /* mem_level must be between [1, 9] (it's checked but ignored by miniz.c) */ 528 int mz_deflateInit2(mz_stream* pStream, int level, int method, int window_bits, int mem_level, int strategy) 529 { 530 tdefl_compressor *pComp; 531 mz_uint comp_flags = TDEFL_COMPUTE_ADLER32 | tdefl_create_comp_flags_from_zip_params(level, window_bits, strategy); 532 533 if (!pStream) 534 return MZ_STREAM_ERROR; 535 if ((method != MZ_DEFLATED) || ((mem_level < 1) || (mem_level > 9)) || ((window_bits != MZ_DEFAULT_WINDOW_BITS) && (-window_bits != MZ_DEFAULT_WINDOW_BITS))) 536 return MZ_PARAM_ERROR; 537 538 pStream.data_type = 0; 539 pStream.adler = MZ_ADLER32_INIT; 540 pStream.msg = null; 541 pStream.reserved = 0; 542 pStream.total_in = 0; 543 pStream.total_out = 0; 544 if (!pStream.zalloc) 545 pStream.zalloc = &miniz_def_alloc_func; 546 if (!pStream.zfree) 547 pStream.zfree = &miniz_def_free_func; 548 549 pComp = cast(tdefl_compressor *) pStream.zalloc(pStream.opaque, 1, tdefl_compressor.sizeof); 550 if (!pComp) 551 return MZ_MEM_ERROR; 552 553 pStream.state = cast(mz_internal_state *)pComp; 554 555 if (tdefl_init(pComp, null, null, comp_flags) != TDEFL_STATUS_OKAY) 556 { 557 mz_deflateEnd(pStream); 558 return MZ_PARAM_ERROR; 559 } 560 561 return MZ_OK; 562 } 563 564 /* Quickly resets a compressor without having to reallocate anything. Same as calling mz_deflateEnd() followed by mz_deflateInit()/mz_deflateInit2(). */ 565 int mz_deflateReset(mz_stream* pStream) 566 { 567 if ((!pStream) || (!pStream.state) || (!pStream.zalloc) || (!pStream.zfree)) 568 return MZ_STREAM_ERROR; 569 pStream.total_in = pStream.total_out = 0; 570 tdefl_init(cast(tdefl_compressor *)pStream.state, null, null, (cast(tdefl_compressor *)pStream.state).m_flags); 571 return MZ_OK; 572 } 573 574 /* mz_deflate() compresses the input to output, consuming as much of the input and producing as much output as possible. */ 575 /* Parameters: */ 576 /* pStream is the stream to read from and write to. You must initialize/update the next_in, avail_in, next_out, and avail_out members. */ 577 /* flush may be MZ_NO_FLUSH, MZ_PARTIAL_FLUSH/MZ_SYNC_FLUSH, MZ_FULL_FLUSH, or MZ_FINISH. */ 578 /* Return values: */ 579 /* MZ_OK on success (when flushing, or if more input is needed but not available, and/or there's more output to be written but the output buffer is full). */ 580 /* MZ_STREAM_END if all input has been consumed and all output bytes have been written. Don't call mz_deflate() on the stream anymore. */ 581 /* MZ_STREAM_ERROR if the stream is bogus. */ 582 /* MZ_PARAM_ERROR if one of the parameters is invalid. */ 583 /* MZ_BUF_ERROR if no forward progress is possible because the input and/or output buffers are empty. (Fill up the input buffer or free up some output space and try again.) */ 584 int mz_deflate(mz_stream* pStream, int flush) 585 { 586 size_t in_bytes, out_bytes; 587 mz_ulong orig_total_in, orig_total_out; 588 int mz_status = MZ_OK; 589 590 if ((!pStream) || (!pStream.state) || (flush < 0) || (flush > MZ_FINISH) || (!pStream.next_out)) 591 return MZ_STREAM_ERROR; 592 if (!pStream.avail_out) 593 return MZ_BUF_ERROR; 594 595 if (flush == MZ_PARTIAL_FLUSH) 596 flush = MZ_SYNC_FLUSH; 597 598 if ((cast(tdefl_compressor *)pStream.state).m_prev_return_status == TDEFL_STATUS_DONE) 599 return (flush == MZ_FINISH) ? MZ_STREAM_END : MZ_BUF_ERROR; 600 601 orig_total_in = pStream.total_in; 602 orig_total_out = pStream.total_out; 603 for (;;) 604 { 605 tdefl_status defl_status; 606 in_bytes = pStream.avail_in; 607 out_bytes = pStream.avail_out; 608 609 defl_status = tdefl_compress(cast(tdefl_compressor *)pStream.state, pStream.next_in, &in_bytes, 610 pStream.next_out, &out_bytes, cast(tdefl_flush)flush); 611 pStream.next_in += cast(mz_uint)in_bytes; 612 pStream.avail_in -= cast(mz_uint)in_bytes; 613 pStream.total_in += cast(mz_uint)in_bytes; 614 pStream.adler = tdefl_get_adler32(cast(tdefl_compressor *)pStream.state); 615 616 pStream.next_out += cast(mz_uint)out_bytes; 617 pStream.avail_out -= cast(mz_uint)out_bytes; 618 pStream.total_out += cast(mz_uint)out_bytes; 619 620 if (defl_status < 0) 621 { 622 mz_status = MZ_STREAM_ERROR; 623 break; 624 } 625 else if (defl_status == TDEFL_STATUS_DONE) 626 { 627 mz_status = MZ_STREAM_END; 628 break; 629 } 630 else if (!pStream.avail_out) 631 break; 632 else if ((!pStream.avail_in) && (flush != MZ_FINISH)) 633 { 634 if ((flush) || (pStream.total_in != orig_total_in) || (pStream.total_out != orig_total_out)) 635 break; 636 return MZ_BUF_ERROR; /* Can't make forward progress without some input. 637 */ 638 } 639 } 640 return mz_status; 641 } 642 643 /* mz_deflateEnd() deinitializes a compressor: */ 644 /* Return values: */ 645 /* MZ_OK on success. */ 646 /* MZ_STREAM_ERROR if the stream is bogus. */ 647 int mz_deflateEnd(mz_stream* pStream) 648 { 649 if (!pStream) 650 return MZ_STREAM_ERROR; 651 if (pStream.state) 652 { 653 pStream.zfree(pStream.opaque, pStream.state); 654 pStream.state = null; 655 } 656 return MZ_OK; 657 } 658 659 /* mz_deflateBound() returns a (very) conservative upper bound on the amount of data that could be generated by deflate(), assuming flush is set to only MZ_NO_FLUSH or MZ_FINISH. */ 660 mz_ulong mz_deflateBound(mz_stream* pStream, mz_ulong source_len) 661 { 662 /* This is really over conservative. (And lame, but it's actually pretty tricky to compute a true upper bound given the way tdefl's blocking works.) */ 663 return MZ_MAX_mz_ulong(128 + (source_len * 110) / 100, 128 + source_len + ((source_len / (31 * 1024)) + 1) * 5); 664 } 665 666 /** Single-call compression functions mz_compress() and mz_compress2(): 667 Returns MZ_OK on success, or one of the error codes from mz_deflate() on failure. */ 668 int mz_compress2(ubyte *pDest, mz_ulong *pDest_len, const(ubyte)* pSource, mz_ulong source_len, int level) 669 { 670 int status; 671 mz_stream stream; 672 memset(&stream, 0, stream.sizeof); 673 674 /* In case mz_ulong is 64-bits (argh I hate longs). */ 675 if (cast(mz_uint64)(source_len | *pDest_len) > 0xFFFFFFFFU) 676 return MZ_PARAM_ERROR; 677 678 stream.next_in = pSource; 679 stream.avail_in = cast(mz_uint32)source_len; 680 stream.next_out = pDest; 681 stream.avail_out = cast(mz_uint32)*pDest_len; 682 683 status = mz_deflateInit(&stream, level); 684 if (status != MZ_OK) 685 return status; 686 687 status = mz_deflate(&stream, MZ_FINISH); 688 if (status != MZ_STREAM_END) 689 { 690 mz_deflateEnd(&stream); 691 return (status == MZ_OK) ? MZ_BUF_ERROR : status; 692 } 693 694 *pDest_len = stream.total_out; 695 return mz_deflateEnd(&stream); 696 } 697 698 ///ditto 699 int mz_compress(ubyte* pDest, mz_ulong *pDest_len, const(ubyte)* pSource, mz_ulong source_len) 700 { 701 return mz_compress2(pDest, pDest_len, pSource, source_len, MZ_DEFAULT_COMPRESSION); 702 } 703 704 /* mz_compressBound() returns a (very) conservative upper bound on the amount of data that could 705 be generated by calling mz_compress(). */ 706 mz_ulong mz_compressBound(mz_ulong source_len) 707 { 708 return mz_deflateBound(null, source_len); 709 } 710 711 struct inflate_state 712 { 713 tinfl_decompressor m_decomp; 714 mz_uint m_dict_ofs, m_dict_avail, m_first_call, m_has_flushed; 715 int m_window_bits; 716 mz_uint8[TINFL_LZ_DICT_SIZE] m_dict; 717 tinfl_status m_last_status; 718 } 719 720 /* mz_inflateInit2() is like mz_inflateInit() with an additional option that controls the window 721 size and whether or not the stream has been wrapped with a zlib header/footer: 722 /* window_bits must be MZ_DEFAULT_WINDOW_BITS (to parse zlib header/footer) or -MZ_DEFAULT_WINDOW_BITS 723 (raw deflate). */ 724 int mz_inflateInit2(mz_stream* pStream, int window_bits) 725 { 726 inflate_state *pDecomp; 727 if (!pStream) 728 return MZ_STREAM_ERROR; 729 if ((window_bits != MZ_DEFAULT_WINDOW_BITS) && (-window_bits != MZ_DEFAULT_WINDOW_BITS)) 730 return MZ_PARAM_ERROR; 731 732 pStream.data_type = 0; 733 pStream.adler = 0; 734 pStream.msg = null; 735 pStream.total_in = 0; 736 pStream.total_out = 0; 737 pStream.reserved = 0; 738 if (!pStream.zalloc) 739 pStream.zalloc = &miniz_def_alloc_func; 740 if (!pStream.zfree) 741 pStream.zfree = &miniz_def_free_func; 742 743 pDecomp = cast(inflate_state *)pStream.zalloc(pStream.opaque, 1, inflate_state.sizeof); 744 if (!pDecomp) 745 return MZ_MEM_ERROR; 746 747 pStream.state = cast(mz_internal_state *)pDecomp; 748 749 tinfl_init(&pDecomp.m_decomp); 750 pDecomp.m_dict_ofs = 0; 751 pDecomp.m_dict_avail = 0; 752 pDecomp.m_last_status = TINFL_STATUS_NEEDS_MORE_INPUT; 753 pDecomp.m_first_call = 1; 754 pDecomp.m_has_flushed = 0; 755 pDecomp.m_window_bits = window_bits; 756 757 return MZ_OK; 758 } 759 760 /* Initializes a decompressor. */ 761 int mz_inflateInit(mz_stream* pStream) 762 { 763 return mz_inflateInit2(pStream, MZ_DEFAULT_WINDOW_BITS); 764 } 765 766 /* Quickly resets a compressor without having to reallocate anything. Same as calling 767 mz_inflateEnd() followed by mz_inflateInit()/mz_inflateInit2(). */ 768 int mz_inflateReset(mz_stream* pStream) 769 { 770 inflate_state *pDecomp; 771 if (!pStream) 772 return MZ_STREAM_ERROR; 773 774 pStream.data_type = 0; 775 pStream.adler = 0; 776 pStream.msg = null; 777 pStream.total_in = 0; 778 pStream.total_out = 0; 779 pStream.reserved = 0; 780 781 pDecomp = cast(inflate_state *)pStream.state; 782 783 tinfl_init(&pDecomp.m_decomp); 784 pDecomp.m_dict_ofs = 0; 785 pDecomp.m_dict_avail = 0; 786 pDecomp.m_last_status = TINFL_STATUS_NEEDS_MORE_INPUT; 787 pDecomp.m_first_call = 1; 788 pDecomp.m_has_flushed = 0; 789 /* pDecomp.m_window_bits = window_bits; */ 790 791 return MZ_OK; 792 } 793 794 /* Decompresses the input stream to the output, consuming only as much of the input as needed, and 795 writing as much to the output as possible. */ 796 /* Parameters: */ 797 /* pStream is the stream to read from and write to. You must initialize/update the next_in, 798 avail_in, next_out, and avail_out members. */ 799 /* flush may be MZ_NO_FLUSH, MZ_SYNC_FLUSH, or MZ_FINISH. */ 800 /* On the first call, if flush is MZ_FINISH it's assumed the input and output buffers are both 801 sized large enough to decompress the entire stream in a single call (this is slightly faster). */ 802 /* MZ_FINISH implies that there are no more source bytes available beside what's already in the 803 input buffer, and that the output buffer is large enough to hold the rest of the decompressed data. */ 804 /* Return values: */ 805 /* MZ_OK on success. Either more input is needed but not available, and/or there's more output 806 to be written but the output buffer is full. */ 807 /* MZ_STREAM_END if all needed input has been consumed and all output bytes have been written. 808 For zlib streams, the adler-32 of the decompressed data has also been verified. */ 809 /* MZ_STREAM_ERROR if the stream is bogus. */ 810 /* MZ_DATA_ERROR if the deflate stream is invalid. */ 811 /* MZ_PARAM_ERROR if one of the parameters is invalid. */ 812 /* MZ_BUF_ERROR if no forward progress is possible because the input buffer is empty but the 813 inflater needs more input to continue, or if the output buffer is not large enough. Call 814 mz_inflate() again */ 815 /* with more input data, or with more room in the output buffer (except when using single call 816 decompression, described above). */ 817 int mz_inflate(mz_stream* pStream, int flush) 818 { 819 return mz_inflate2(pStream, flush, TINFL_FLAG_COMPUTE_ADLER32); 820 } 821 822 // Same with decom_flags control. 823 // You can use TINFL_FLAG_DO_NOT_COMPUTE_ADLER32 if input is trusted. 824 int mz_inflate2(mz_stream* pStream, int flush, int decomp_flags) 825 { 826 inflate_state *pState; 827 mz_uint n, first_call; 828 829 size_t in_bytes, out_bytes, orig_avail_in; 830 tinfl_status status; 831 832 if ((!pStream) || (!pStream.state)) 833 return MZ_STREAM_ERROR; 834 if (flush == MZ_PARTIAL_FLUSH) 835 flush = MZ_SYNC_FLUSH; 836 if ((flush) && (flush != MZ_SYNC_FLUSH) && (flush != MZ_FINISH)) 837 return MZ_STREAM_ERROR; 838 839 pState = cast(inflate_state *)pStream.state; 840 if (pState.m_window_bits > 0) 841 decomp_flags |= TINFL_FLAG_PARSE_ZLIB_HEADER; 842 orig_avail_in = pStream.avail_in; 843 844 first_call = pState.m_first_call; 845 pState.m_first_call = 0; 846 if (pState.m_last_status < 0) 847 return MZ_DATA_ERROR; 848 849 if (pState.m_has_flushed && (flush != MZ_FINISH)) 850 return MZ_STREAM_ERROR; 851 pState.m_has_flushed |= (flush == MZ_FINISH); 852 853 if ((flush == MZ_FINISH) && (first_call)) 854 { 855 /* MZ_FINISH on the first call implies that the input and output buffers are large enough to hold the entire compressed/decompressed file. */ 856 decomp_flags |= TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF; 857 in_bytes = pStream.avail_in; 858 out_bytes = pStream.avail_out; 859 status = tinfl_decompress(&pState.m_decomp, pStream.next_in, &in_bytes, pStream.next_out, pStream.next_out, &out_bytes, decomp_flags); 860 pState.m_last_status = status; 861 pStream.next_in += cast(mz_uint)in_bytes; 862 pStream.avail_in -= cast(mz_uint)in_bytes; 863 pStream.total_in += cast(mz_uint)in_bytes; 864 pStream.adler = tinfl_get_adler32(&pState.m_decomp); 865 pStream.next_out += cast(mz_uint)out_bytes; 866 pStream.avail_out -= cast(mz_uint)out_bytes; 867 pStream.total_out += cast(mz_uint)out_bytes; 868 869 if (status < 0) 870 return MZ_DATA_ERROR; 871 else if (status != TINFL_STATUS_DONE) 872 { 873 pState.m_last_status = TINFL_STATUS_FAILED; 874 return MZ_BUF_ERROR; 875 } 876 return MZ_STREAM_END; 877 } 878 /* flush != MZ_FINISH then we must assume there's more input. */ 879 if (flush != MZ_FINISH) 880 decomp_flags |= TINFL_FLAG_HAS_MORE_INPUT; 881 882 if (pState.m_dict_avail) 883 { 884 n = MZ_MIN_uint(pState.m_dict_avail, pStream.avail_out); 885 memcpy(pStream.next_out, pState.m_dict.ptr + pState.m_dict_ofs, n); 886 pStream.next_out += n; 887 pStream.avail_out -= n; 888 pStream.total_out += n; 889 pState.m_dict_avail -= n; 890 pState.m_dict_ofs = (pState.m_dict_ofs + n) & (TINFL_LZ_DICT_SIZE - 1); 891 return ((pState.m_last_status == TINFL_STATUS_DONE) && (!pState.m_dict_avail)) ? MZ_STREAM_END : MZ_OK; 892 } 893 894 for (;;) 895 { 896 in_bytes = pStream.avail_in; 897 out_bytes = TINFL_LZ_DICT_SIZE - pState.m_dict_ofs; 898 899 status = tinfl_decompress(&pState.m_decomp, pStream.next_in, &in_bytes, pState.m_dict.ptr, 900 pState.m_dict.ptr + pState.m_dict_ofs, &out_bytes, decomp_flags); 901 pState.m_last_status = status; 902 903 pStream.next_in += cast(mz_uint)in_bytes; 904 pStream.avail_in -= cast(mz_uint)in_bytes; 905 pStream.total_in += cast(mz_uint)in_bytes; 906 pStream.adler = tinfl_get_adler32(&pState.m_decomp); 907 908 pState.m_dict_avail = cast(mz_uint)out_bytes; 909 910 n = MZ_MIN_uint(pState.m_dict_avail, pStream.avail_out); 911 memcpy(pStream.next_out, pState.m_dict.ptr + pState.m_dict_ofs, n); 912 pStream.next_out += n; 913 pStream.avail_out -= n; 914 pStream.total_out += n; 915 pState.m_dict_avail -= n; 916 pState.m_dict_ofs = (pState.m_dict_ofs + n) & (TINFL_LZ_DICT_SIZE - 1); 917 918 if (status < 0) 919 return MZ_DATA_ERROR; /* Stream is corrupted (there could be some uncompressed data left in the output dictionary - oh well). */ 920 else if ((status == TINFL_STATUS_NEEDS_MORE_INPUT) && (!orig_avail_in)) 921 return MZ_BUF_ERROR; /* Signal caller that we can't make forward progress without supplying more input or by setting flush to MZ_FINISH. */ 922 else if (flush == MZ_FINISH) 923 { 924 /* The output buffer MUST be large to hold the remaining uncompressed data when flush==MZ_FINISH. */ 925 if (status == TINFL_STATUS_DONE) 926 return pState.m_dict_avail ? MZ_BUF_ERROR : MZ_STREAM_END; 927 /* status here must be TINFL_STATUS_HAS_MORE_OUTPUT, which means there's at least 1 more byte on the way. If there's no more room left in the output buffer then something is wrong. */ 928 else if (!pStream.avail_out) 929 return MZ_BUF_ERROR; 930 } 931 else if ((status == TINFL_STATUS_DONE) || (!pStream.avail_in) || (!pStream.avail_out) || (pState.m_dict_avail)) 932 break; 933 } 934 935 return ((status == TINFL_STATUS_DONE) && (!pState.m_dict_avail)) ? MZ_STREAM_END : MZ_OK; 936 } 937 938 /* Deinitializes a decompressor. */ 939 int mz_inflateEnd(mz_stream* pStream) 940 { 941 if (!pStream) 942 return MZ_STREAM_ERROR; 943 if (pStream.state) 944 { 945 pStream.zfree(pStream.opaque, pStream.state); 946 pStream.state = null; 947 } 948 return MZ_OK; 949 } 950 951 /* Single-call decompression. */ 952 /* Returns MZ_OK on success, or one of the error codes from mz_inflate() on failure. */ 953 int mz_uncompress2(ubyte *pDest, mz_ulong *pDest_len, const(ubyte)* pSource, mz_ulong *pSource_len) 954 { 955 return mz_uncompress3(pDest, pDest_len, pSource, pSource_len, MZ_DEFAULT_WINDOW_BITS, false); 956 } 957 958 /// Same, but also specify window_bits, in case the stream has no header. This is useful for iPhone PNG. 959 /// Also allows to skip Adler32 check. 960 int mz_uncompress3(ubyte *pDest, mz_ulong *pDest_len, 961 const(ubyte)* pSource, mz_ulong *pSource_len, 962 int window_bits, 963 bool trusted_input) 964 { 965 mz_stream stream = void; 966 int status; 967 memset(&stream, 0, stream.sizeof); 968 969 /* In case mz_ulong is 64-bits (argh I hate longs). */ 970 if (cast(mz_uint64)(*pSource_len | *pDest_len) > 0xFFFFFFFFU) 971 return MZ_PARAM_ERROR; 972 973 stream.next_in = pSource; 974 stream.avail_in = cast(mz_uint32)*pSource_len; 975 stream.next_out = pDest; 976 stream.avail_out = cast(mz_uint32)*pDest_len; 977 978 status = mz_inflateInit2(&stream, window_bits); 979 if (status != MZ_OK) 980 return status; 981 982 status = mz_inflate2(&stream, MZ_FINISH, trusted_input ? TINFL_FLAG_DO_NOT_COMPUTE_ADLER32 : TINFL_FLAG_COMPUTE_ADLER32); 983 *pSource_len = *pSource_len - stream.avail_in; 984 if (status != MZ_STREAM_END) 985 { 986 mz_inflateEnd(&stream); 987 return ((status == MZ_BUF_ERROR) && (!stream.avail_in)) ? MZ_DATA_ERROR : status; 988 } 989 *pDest_len = stream.total_out; 990 991 return mz_inflateEnd(&stream); 992 } 993 994 /* Single-call decompression. */ 995 /* Returns MZ_OK on success, or one of the error codes from mz_inflate() on failure. */ 996 int mz_uncompress(ubyte *pDest, mz_ulong *pDest_len, const(ubyte)* pSource, mz_ulong source_len) 997 { 998 return mz_uncompress2(pDest, pDest_len, pSource, &source_len); 999 } 1000 1001 /* Returns a string description of the specified error code, or null if the error code is invalid. */ 1002 const(char)* mz_error(int err) 1003 { 1004 if (err == MZ_STREAM_END) 1005 return "stream end".ptr; 1006 if (err == MZ_NEED_DICT) 1007 return "need dictionary".ptr; 1008 if (err == MZ_ERRNO) 1009 return "file error".ptr; 1010 if (err == MZ_STREAM_ERROR) 1011 return "stream error".ptr; 1012 if (err == MZ_DATA_ERROR) 1013 return "data error".ptr; 1014 if (err == MZ_MEM_ERROR) 1015 return "out of memory".ptr; 1016 if (err == MZ_BUF_ERROR) 1017 return "buf error".ptr; 1018 if (err == MZ_VERSION_ERROR) 1019 return "version error".ptr; 1020 if (err == MZ_PARAM_ERROR) 1021 return "parameter error".ptr; 1022 return null; 1023 } 1024 1025 1026 1027 enum ZLIB_COMPATIBLE_NAMES = true; 1028 static if (ZLIB_COMPATIBLE_NAMES) 1029 { 1030 alias Byte = char; 1031 alias uInt = uint; 1032 alias uLong = mz_ulong; 1033 alias intf = int; 1034 alias voidpf = void*; 1035 alias uLongf = uLong; 1036 alias voidp = void*; 1037 alias voidpc = const(void)*; 1038 1039 enum Z_null = 0; 1040 alias Z_NO_FLUSH = MZ_NO_FLUSH; 1041 alias Z_PARTIAL_FLUSH = MZ_PARTIAL_FLUSH; 1042 alias Z_SYNC_FLUSH = MZ_SYNC_FLUSH; 1043 alias Z_FULL_FLUSH = MZ_FULL_FLUSH; 1044 alias Z_FINISH = MZ_FINISH; 1045 alias Z_BLOCK = MZ_BLOCK; 1046 alias Z_OK = MZ_OK; 1047 alias Z_STREAM_END = MZ_STREAM_END; 1048 alias Z_NEED_DICT = MZ_NEED_DICT; 1049 alias Z_ERRNO = MZ_ERRNO; 1050 alias Z_STREAM_ERROR = MZ_STREAM_ERROR; 1051 alias Z_DATA_ERROR = MZ_DATA_ERROR; 1052 alias Z_MEM_ERROR = MZ_MEM_ERROR; 1053 alias Z_BUF_ERROR = MZ_BUF_ERROR; 1054 alias Z_VERSION_ERROR = MZ_VERSION_ERROR; 1055 alias Z_PARAM_ERROR = MZ_PARAM_ERROR; 1056 alias Z_NO_COMPRESSION = MZ_NO_COMPRESSION; 1057 alias Z_BEST_SPEED = MZ_BEST_SPEED; 1058 alias Z_BEST_COMPRESSION = MZ_BEST_COMPRESSION; 1059 alias Z_DEFAULT_COMPRESSION = MZ_DEFAULT_COMPRESSION; 1060 alias Z_DEFAULT_STRATEGY = MZ_DEFAULT_STRATEGY; 1061 alias Z_FILTERED = MZ_FILTERED; 1062 alias Z_HUFFMAN_ONLY = MZ_HUFFMAN_ONLY; 1063 alias Z_RLE = MZ_RLE; 1064 alias Z_FIXED = MZ_FIXED; 1065 alias Z_DEFLATED = MZ_DEFLATED; 1066 alias Z_DEFAULT_WINDOW_BITS = MZ_DEFAULT_WINDOW_BITS; 1067 alias alloc_func = mz_alloc_func; 1068 alias free_func = mz_free_func; 1069 alias internal_state = mz_internal_state; 1070 alias z_stream = mz_stream; 1071 //alias deflateInit = mz_deflateInit; 1072 //alias deflateInit2 = mz_deflateInit2; 1073 //alias deflateReset = mz_deflateReset; 1074 //alias deflate = mz_deflate; 1075 //alias deflateEnd = mz_deflateEnd; 1076 //alias deflateBound = mz_deflateBound; 1077 //alias compress = mz_compress; 1078 //alias compress2 = mz_compress2; 1079 //alias compressBound = mz_compressBound; 1080 alias inflateInit = mz_inflateInit; 1081 alias inflateInit2 = mz_inflateInit2; 1082 alias inflateReset = mz_inflateReset; 1083 alias inflate = mz_inflate; 1084 alias inflateEnd = mz_inflateEnd; 1085 alias uncompress = mz_uncompress; 1086 alias uncompress2 = mz_uncompress2; 1087 alias crc32 = mz_crc32; 1088 alias adler32 = mz_adler32; 1089 enum MAX_WBITS = 15; 1090 enum MAX_MEM_LEVEL = 9; 1091 alias zError = mz_error; 1092 alias ZLIB_VERSION = MZ_VERSION; 1093 alias ZLIB_VERNUM = MZ_VERNUM; 1094 alias ZLIB_VER_MAJOR = MZ_VER_MAJOR; 1095 alias ZLIB_VER_MINOR = MZ_VER_MINOR; 1096 alias ZLIB_VER_REVISION = MZ_VER_REVISION; 1097 alias ZLIB_VER_SUBREVISION = MZ_VER_SUBREVISION; 1098 alias zlibVersion = mz_version; 1099 enum zlib_version = mz_version(); 1100 } 1101 1102 1103 /* 1104 This is free and unencumbered software released into the public domain. 1105 1106 Anyone is free to copy, modify, publish, use, compile, sell, or 1107 distribute this software, either in source code form or as a compiled 1108 binary, for any purpose, commercial or non-commercial, and by any 1109 means. 1110 1111 In jurisdictions that recognize copyright laws, the author or authors 1112 of this software dedicate any and all copyright interest in the 1113 software to the public domain. We make this dedication for the benefit 1114 of the public at large and to the detriment of our heirs and 1115 successors. We intend this dedication to be an overt act of 1116 relinquishment in perpetuity of all present and future rights to this 1117 software under copyright law. 1118 1119 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 1120 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 1121 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. 1122 IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR 1123 OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 1124 ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 1125 OTHER DEALINGS IN THE SOFTWARE. 1126 1127 For more information, please refer to <http://unlicense.org/> 1128 */ 1129 1130 // miniz_tinfl.h 1131 1132 /* Decompression flags used by tinfl_decompress(). */ 1133 /* TINFL_FLAG_PARSE_ZLIB_HEADER: If set, the input has a valid zlib header and ends with an adler32 checksum (it's a valid zlib stream). Otherwise, the input is a raw deflate stream. */ 1134 /* TINFL_FLAG_HAS_MORE_INPUT: If set, there are more input bytes available beyond the end of the supplied input buffer. If clear, the input buffer contains all remaining input. */ 1135 /* TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF: If set, the output buffer is large enough to hold the entire decompressed stream. If clear, the output buffer is at least the size of the dictionary (typically 32KB). */ 1136 /* TINFL_FLAG_COMPUTE_ADLER32: Force adler-32 checksum computation of the decompressed bytes. */ 1137 enum 1138 { 1139 TINFL_FLAG_PARSE_ZLIB_HEADER = 1, 1140 TINFL_FLAG_HAS_MORE_INPUT = 2, 1141 TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF = 4, 1142 TINFL_FLAG_COMPUTE_ADLER32 = 8, // _forces_ adler compute 1143 1144 // _forces_ reported adler to be UB, this can be useful when the input is 1145 // trusted and you do not want to compute checksum 1146 TINFL_FLAG_DO_NOT_COMPUTE_ADLER32 = 16 1147 } 1148 1149 /* Max size of LZ dictionary. */ 1150 enum TINFL_LZ_DICT_SIZE = 32768; 1151 1152 /* Return status. */ 1153 alias tinfl_status = int; 1154 enum : tinfl_status 1155 { 1156 /* This flags indicates the inflator needs 1 or more input bytes to make forward progress, but the caller is indicating that no more are available. The compressed data */ 1157 /* is probably corrupted. If you call the inflator again with more bytes it'll try to continue processing the input but this is a BAD sign (either the data is corrupted or you called it incorrectly). */ 1158 /* If you call it again with no input you'll just get TINFL_STATUS_FAILED_CANNOT_MAKE_PROGRESS again. */ 1159 TINFL_STATUS_FAILED_CANNOT_MAKE_PROGRESS = -4, 1160 1161 /* This flag indicates that one or more of the input parameters was obviously bogus. (You can try calling it again, but if you get this error the calling code is wrong.) */ 1162 TINFL_STATUS_BAD_PARAM = -3, 1163 1164 /* This flags indicate the inflator is finished but the adler32 check of the uncompressed data didn't match. If you call it again it'll return TINFL_STATUS_DONE. */ 1165 TINFL_STATUS_ADLER32_MISMATCH = -2, 1166 1167 /* This flags indicate the inflator has somehow failed (bad code, corrupted input, etc.). If you call it again without resetting via tinfl_init() it it'll just keep on returning the same status failure code. */ 1168 TINFL_STATUS_FAILED = -1, 1169 1170 /* Any status code less than TINFL_STATUS_DONE must indicate a failure. */ 1171 1172 /* This flag indicates the inflator has returned every byte of uncompressed data that it can, has consumed every byte that it needed, has successfully reached the end of the deflate stream, and */ 1173 /* if zlib headers and adler32 checking enabled that it has successfully checked the uncompressed data's adler32. If you call it again you'll just get TINFL_STATUS_DONE over and over again. */ 1174 TINFL_STATUS_DONE = 0, 1175 1176 /* This flag indicates the inflator MUST have more input data (even 1 byte) before it can make any more forward progress, or you need to clear the TINFL_FLAG_HAS_MORE_INPUT */ 1177 /* flag on the next call if you don't have any more source data. If the source data was somehow corrupted it's also possible (but unlikely) for the inflator to keep on demanding input to */ 1178 /* proceed, so be sure to properly set the TINFL_FLAG_HAS_MORE_INPUT flag. */ 1179 TINFL_STATUS_NEEDS_MORE_INPUT = 1, 1180 1181 /* This flag indicates the inflator definitely has 1 or more bytes of uncompressed data available, but it cannot write this data into the output buffer. */ 1182 /* Note if the source compressed data was corrupted it's possible for the inflator to return a lot of uncompressed data to the caller. I've been assuming you know how much uncompressed data to expect */ 1183 /* (either exact or worst case) and will stop calling the inflator and fail after receiving too much. In pure streaming scenarios where you have no idea how many bytes to expect this may not be possible */ 1184 /* so I may need to add some code to address this. */ 1185 TINFL_STATUS_HAS_MORE_OUTPUT = 2 1186 } 1187 1188 void tinfl_init(tinfl_decompressor* r) 1189 { 1190 r.m_state = 0; 1191 } 1192 1193 uint tinfl_get_adler32(tinfl_decompressor* r) 1194 { 1195 return r.m_check_adler32; 1196 } 1197 1198 /* Internal/private bits follow. */ 1199 enum 1200 { 1201 TINFL_MAX_HUFF_TABLES = 3, 1202 TINFL_MAX_HUFF_SYMBOLS_0 = 288, 1203 TINFL_MAX_HUFF_SYMBOLS_1 = 32, 1204 TINFL_MAX_HUFF_SYMBOLS_2 = 19, 1205 TINFL_FAST_LOOKUP_BITS = 10, 1206 TINFL_FAST_LOOKUP_SIZE = 1 << TINFL_FAST_LOOKUP_BITS 1207 } 1208 1209 enum TINFL_USE_64BIT_BITBUF = true; 1210 alias tinfl_bit_buf_t = mz_uint64 ; 1211 1212 struct tinfl_decompressor 1213 { 1214 mz_uint32 m_state, m_num_bits, m_zhdr0, m_zhdr1, m_z_adler32, m_final, m_type, m_check_adler32, m_dist, m_counter, m_num_extra; 1215 mz_uint32[TINFL_MAX_HUFF_TABLES] m_table_sizes; 1216 tinfl_bit_buf_t m_bit_buf; 1217 size_t m_dist_from_out_buf_start; 1218 mz_int16[TINFL_FAST_LOOKUP_SIZE][TINFL_MAX_HUFF_TABLES] m_look_up; 1219 mz_int16[TINFL_MAX_HUFF_SYMBOLS_0 * 2] m_tree_0; 1220 mz_int16[TINFL_MAX_HUFF_SYMBOLS_1 * 2] m_tree_1; 1221 mz_int16[TINFL_MAX_HUFF_SYMBOLS_2 * 2] m_tree_2; 1222 mz_uint8[TINFL_MAX_HUFF_SYMBOLS_0] m_code_size_0; 1223 mz_uint8[TINFL_MAX_HUFF_SYMBOLS_1] m_code_size_1; 1224 mz_uint8[TINFL_MAX_HUFF_SYMBOLS_2] m_code_size_2; 1225 mz_uint8[4] m_raw_header; 1226 mz_uint8[TINFL_MAX_HUFF_SYMBOLS_0 + TINFL_MAX_HUFF_SYMBOLS_1 + 137] m_len_codes; 1227 } 1228 1229 // miniz_infl.c 1230 1231 1232 /* ------------------- Low-level Decompression (completely independent from all compression API's) */ 1233 1234 alias TINFL_MEMCPY = memcpy; 1235 alias TINFL_MEMSET = memset; 1236 1237 1238 void tinfl_clear_tree(tinfl_decompressor* r) 1239 { 1240 if (r.m_type == 0) 1241 r.m_tree_0[] = 0; 1242 else if (r.m_type == 1) 1243 r.m_tree_1[] = 0; 1244 else 1245 r.m_tree_2[] = 0; 1246 } 1247 1248 /* Main low-level decompressor coroutine function. This is the only function actually needed for decompression. All the other functions are just high-level helpers for improved usability. */ 1249 /* This is a universal API, i.e. it can be used as a building block to build any desired higher level decompression API. In the limit case, it can be called once per every byte input or output. */ 1250 tinfl_status tinfl_decompress(tinfl_decompressor *r, const mz_uint8 *pIn_buf_next, size_t *pIn_buf_size, mz_uint8 *pOut_buf_start, mz_uint8 *pOut_buf_next, size_t *pOut_buf_size, const mz_uint32 decomp_flags) 1251 { 1252 __gshared static immutable mz_uint16[31] s_length_base = [ 3, 4, 5, 6, 7, 8, 9, 10, 11, 13, 15, 17, 19, 23, 27, 31, 35, 43, 51, 59, 67, 83, 99, 115, 131, 163, 195, 227, 258, 0, 0 ]; 1253 __gshared static immutable mz_uint8[31] s_length_extra = [ 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 2, 2, 2, 2, 3, 3, 3, 3, 4, 4, 4, 4, 5, 5, 5, 5, 0, 0, 0 ]; 1254 __gshared static immutable mz_uint16[32] s_dist_base = [ 1, 2, 3, 4, 5, 7, 9, 13, 17, 25, 33, 49, 65, 97, 129, 193, 257, 385, 513, 769, 1025, 1537, 2049, 3073, 4097, 6145, 8193, 12289, 16385, 24577, 0, 0 ]; 1255 __gshared static immutable mz_uint8[32] s_dist_extra = [ 0, 0, 0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8, 8, 9, 9, 10, 10, 11, 11, 12, 12, 13, 13 ]; 1256 __gshared static immutable mz_uint8[19] s_length_dezigzag = [ 16, 17, 18, 0, 8, 7, 9, 6, 10, 5, 11, 4, 12, 3, 13, 2, 14, 1, 15 ]; 1257 __gshared static immutable mz_uint16[3] s_min_table_sizes = [ 257, 1, 4 ]; 1258 1259 mz_int16*[3] pTrees; 1260 mz_uint8*[3] pCode_sizes; 1261 1262 tinfl_status status = TINFL_STATUS_FAILED; 1263 mz_uint32 num_bits, dist, counter, num_extra; 1264 tinfl_bit_buf_t bit_buf; 1265 const(mz_uint8)* pIn_buf_cur = pIn_buf_next; 1266 const(mz_uint8*) pIn_buf_end = pIn_buf_next + *pIn_buf_size; 1267 mz_uint8 *pOut_buf_cur = pOut_buf_next; 1268 mz_uint8 *pOut_buf_end = pOut_buf_next ? pOut_buf_next + *pOut_buf_size : null; 1269 size_t out_buf_size_mask = (decomp_flags & TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF) ? cast(size_t)-1 : ((pOut_buf_next - pOut_buf_start) + *pOut_buf_size) - 1, dist_from_out_buf_start; 1270 1271 /* Ensure the output buffer's size is a power of 2, unless the output buffer is large enough to hold the entire output file (in which case it doesn't matter). */ 1272 if ((!pOut_buf_start) || (!pOut_buf_next) || (!pIn_buf_size) || (!pOut_buf_size)) 1273 { 1274 return TINFL_STATUS_BAD_PARAM; 1275 } 1276 if (((out_buf_size_mask + 1) & out_buf_size_mask) || (pOut_buf_next < pOut_buf_start)) 1277 { 1278 *pIn_buf_size = *pOut_buf_size = 0; 1279 return TINFL_STATUS_BAD_PARAM; 1280 } 1281 1282 pTrees[0] = r.m_tree_0.ptr; 1283 pTrees[1] = r.m_tree_1.ptr; 1284 pTrees[2] = r.m_tree_2.ptr; 1285 pCode_sizes[0] = r.m_code_size_0.ptr; 1286 pCode_sizes[1] = r.m_code_size_1.ptr; 1287 pCode_sizes[2] = r.m_code_size_2.ptr; 1288 1289 num_bits = r.m_num_bits; 1290 bit_buf = r.m_bit_buf; 1291 dist = r.m_dist; 1292 counter = r.m_counter; 1293 num_extra = r.m_num_extra; 1294 dist_from_out_buf_start = r.m_dist_from_out_buf_start; 1295 1296 uint c, c2; 1297 //int counterBits; 1298 int bits_; 1299 size_t nn; 1300 mz_uint s; 1301 1302 int tree_next, tree_cur; 1303 mz_int16 *pLookUp; 1304 mz_int16 *pTree; 1305 mz_uint8 *pCode_size; 1306 mz_uint i, j, used_syms, total, sym_index; 1307 mz_uint[17] next_code; 1308 mz_uint[16] total_syms; 1309 mz_uint32 i2, i3, s1, s2; 1310 mz_uint code_len2; 1311 mz_uint8 *pSrc; 1312 mz_uint extra_bits; 1313 int temp; 1314 1315 switch (r.m_state) 1316 { 1317 case 0: 1318 bit_buf = num_bits = dist = counter = num_extra = r.m_zhdr0 = r.m_zhdr1 = 0; 1319 r.m_z_adler32 = r.m_check_adler32 = 1; 1320 if (decomp_flags & TINFL_FLAG_PARSE_ZLIB_HEADER) 1321 { 1322 while (pIn_buf_cur >= pIn_buf_end) 1323 { 1324 status = (decomp_flags & TINFL_FLAG_HAS_MORE_INPUT) ? TINFL_STATUS_NEEDS_MORE_INPUT : TINFL_STATUS_FAILED_CANNOT_MAKE_PROGRESS; 1325 r.m_state = 1; 1326 goto common_exit; 1327 case 1: 1328 } 1329 r.m_zhdr0 = *pIn_buf_cur++; 1330 1331 while (pIn_buf_cur >= pIn_buf_end) 1332 { 1333 status = (decomp_flags & TINFL_FLAG_HAS_MORE_INPUT) ? TINFL_STATUS_NEEDS_MORE_INPUT : TINFL_STATUS_FAILED_CANNOT_MAKE_PROGRESS; 1334 r.m_state = 2; 1335 goto common_exit; 1336 case 2: 1337 } 1338 r.m_zhdr1 = *pIn_buf_cur++; 1339 1340 counter = (((r.m_zhdr0 * 256 + r.m_zhdr1) % 31 != 0) || (r.m_zhdr1 & 32) || ((r.m_zhdr0 & 15) != 8)); 1341 if (!(decomp_flags & TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF)) 1342 counter |= (((1U << (8U + (r.m_zhdr0 >> 4))) > 32768U) || ((out_buf_size_mask + 1) < cast(size_t)(cast(size_t)1 << (8U + (r.m_zhdr0 >> 4))))); 1343 if (counter) 1344 { 1345 for (;;) 1346 { 1347 status = TINFL_STATUS_FAILED; 1348 r.m_state = 36; 1349 goto common_exit; 1350 case 36: 1351 } 1352 } 1353 } 1354 1355 do 1356 { 1357 if (num_bits < cast(mz_uint)(3)) 1358 { 1359 do 1360 { 1361 while (pIn_buf_cur >= pIn_buf_end) 1362 { 1363 status = (decomp_flags & TINFL_FLAG_HAS_MORE_INPUT) ? TINFL_STATUS_NEEDS_MORE_INPUT : TINFL_STATUS_FAILED_CANNOT_MAKE_PROGRESS; 1364 r.m_state = 3; 1365 goto common_exit; 1366 case 3: 1367 } 1368 c = *pIn_buf_cur++; 1369 bit_buf |= ((cast(tinfl_bit_buf_t)c) << num_bits); 1370 num_bits += 8; 1371 } while (num_bits < cast(mz_uint)(3)); 1372 } 1373 r.m_final = bit_buf & ((1 << (3)) - 1); 1374 bit_buf >>= (3); 1375 num_bits -= (3); 1376 1377 r.m_type = r.m_final >> 1; 1378 if (r.m_type == 0) 1379 { 1380 if (num_bits < cast(mz_uint)(num_bits & 7)) 1381 { 1382 do 1383 { 1384 while (pIn_buf_cur >= pIn_buf_end) 1385 { 1386 status = (decomp_flags & TINFL_FLAG_HAS_MORE_INPUT) ? TINFL_STATUS_NEEDS_MORE_INPUT : TINFL_STATUS_FAILED_CANNOT_MAKE_PROGRESS; 1387 r.m_state = 5; 1388 goto common_exit; 1389 case 5: 1390 } 1391 c = *pIn_buf_cur++; 1392 bit_buf |= ((cast(tinfl_bit_buf_t)c) << num_bits); 1393 num_bits += 8; 1394 } while (num_bits < cast(mz_uint)(num_bits & 7)); 1395 } 1396 bit_buf >>= (num_bits & 7); 1397 num_bits -= (num_bits & 7); 1398 1399 for (counter = 0; counter < 4; ++counter) 1400 { 1401 if (num_bits) 1402 { 1403 //TINFL_GET_BITS(6, r.m_raw_header[counter], 8); 1404 if (num_bits < cast(mz_uint)(8)) 1405 { 1406 do 1407 { 1408 while (pIn_buf_cur >= pIn_buf_end) 1409 { 1410 status = (decomp_flags & TINFL_FLAG_HAS_MORE_INPUT) ? TINFL_STATUS_NEEDS_MORE_INPUT : TINFL_STATUS_FAILED_CANNOT_MAKE_PROGRESS; 1411 r.m_state = 6; 1412 goto common_exit; 1413 case 6: 1414 } 1415 c = *pIn_buf_cur++; 1416 bit_buf |= ((cast(tinfl_bit_buf_t)c) << num_bits); 1417 num_bits += 8; 1418 } while (num_bits < cast(mz_uint)(8)); 1419 } 1420 1421 r.m_raw_header[counter] = bit_buf & ((1 << (8)) - 1); 1422 bit_buf >>= (8); 1423 num_bits -= (8); 1424 } 1425 else 1426 { 1427 while (pIn_buf_cur >= pIn_buf_end) 1428 { 1429 status = (decomp_flags & TINFL_FLAG_HAS_MORE_INPUT) ? TINFL_STATUS_NEEDS_MORE_INPUT : TINFL_STATUS_FAILED_CANNOT_MAKE_PROGRESS; 1430 r.m_state = 7; 1431 goto common_exit; 1432 case 7: 1433 } 1434 r.m_raw_header[counter] = *pIn_buf_cur++; 1435 } 1436 } 1437 if ((counter = (r.m_raw_header[0] | (r.m_raw_header[1] << 8))) != cast(mz_uint)(0xFFFF ^ (r.m_raw_header[2] | (r.m_raw_header[3] << 8)))) 1438 { 1439 for (;;) 1440 { 1441 status = TINFL_STATUS_FAILED; 1442 r.m_state = 39; 1443 goto common_exit; 1444 case 39: 1445 } 1446 } 1447 while ((counter) && (num_bits)) 1448 { 1449 //TINFL_GET_BITS(51, dist, 8); 1450 if (num_bits < cast(mz_uint)(8)) 1451 { 1452 do 1453 { 1454 while (pIn_buf_cur >= pIn_buf_end) 1455 { 1456 status = (decomp_flags & TINFL_FLAG_HAS_MORE_INPUT) ? TINFL_STATUS_NEEDS_MORE_INPUT : TINFL_STATUS_FAILED_CANNOT_MAKE_PROGRESS; 1457 r.m_state = 51; 1458 goto common_exit; 1459 case 51: 1460 } 1461 c = *pIn_buf_cur++; 1462 bit_buf |= ((cast(tinfl_bit_buf_t)c) << num_bits); 1463 num_bits += 8; 1464 } while (num_bits < cast(mz_uint)(8)); 1465 } 1466 dist = bit_buf & ((1 << (8)) - 1); 1467 bit_buf >>= (8); 1468 num_bits -= (8); 1469 1470 while (pOut_buf_cur >= pOut_buf_end) 1471 { 1472 status = TINFL_STATUS_HAS_MORE_OUTPUT; 1473 r.m_state = 52; 1474 goto common_exit; 1475 case 52: 1476 } 1477 *pOut_buf_cur++ = cast(mz_uint8)dist; 1478 counter--; 1479 } 1480 while (counter) 1481 { 1482 while (pOut_buf_cur >= pOut_buf_end) 1483 { 1484 status = TINFL_STATUS_HAS_MORE_OUTPUT; 1485 r.m_state = 9; 1486 goto common_exit; 1487 case 9: 1488 } 1489 while (pIn_buf_cur >= pIn_buf_end) 1490 { 1491 status = (decomp_flags & TINFL_FLAG_HAS_MORE_INPUT) ? TINFL_STATUS_NEEDS_MORE_INPUT : TINFL_STATUS_FAILED_CANNOT_MAKE_PROGRESS; 1492 r.m_state = 38; 1493 goto common_exit; 1494 case 38: 1495 } 1496 nn = MZ_MIN_size_t(MZ_MIN_size_t(cast(size_t)(pOut_buf_end - pOut_buf_cur), cast(size_t)(pIn_buf_end - pIn_buf_cur)), counter); 1497 TINFL_MEMCPY(pOut_buf_cur, pIn_buf_cur, nn); 1498 pIn_buf_cur += nn; 1499 pOut_buf_cur += nn; 1500 counter -= cast(mz_uint)nn; 1501 } 1502 } 1503 else if (r.m_type == 3) 1504 { 1505 for (;;) 1506 { 1507 status = TINFL_STATUS_FAILED; 1508 r.m_state = 10; 1509 goto common_exit; 1510 case 10: 1511 } 1512 } 1513 else 1514 { 1515 if (r.m_type == 1) 1516 { 1517 mz_uint8 *p = r.m_code_size_0.ptr; 1518 r.m_table_sizes[0] = 288; 1519 r.m_table_sizes[1] = 32; 1520 TINFL_MEMSET(r.m_code_size_1.ptr, 5, 32); 1521 for (i2 = 0; i2 <= 143; ++i2) 1522 *p++ = 8; 1523 for (; i2 <= 255; ++i2) 1524 *p++ = 9; 1525 for (; i2 <= 279; ++i2) 1526 *p++ = 7; 1527 for (; i2 <= 287; ++i2) 1528 *p++ = 8; 1529 } 1530 else 1531 { 1532 for (counter = 0; counter < 3; counter++) 1533 { 1534 //TINFL_GET_BITS(11, r.m_table_sizes[counter], "\05\05\04"[counter]); 1535 1536 bits_ = "\05\05\04"[counter]; 1537 if (num_bits < cast(mz_uint)(bits_)) 1538 { 1539 do 1540 { 1541 while (pIn_buf_cur >= pIn_buf_end) 1542 { 1543 status = (decomp_flags & TINFL_FLAG_HAS_MORE_INPUT) ? TINFL_STATUS_NEEDS_MORE_INPUT : TINFL_STATUS_FAILED_CANNOT_MAKE_PROGRESS; 1544 r.m_state = 11; 1545 goto common_exit; 1546 case 11: 1547 } 1548 c = *pIn_buf_cur++; 1549 bit_buf |= ((cast(tinfl_bit_buf_t)c) << num_bits); 1550 num_bits += 8; 1551 } while (num_bits < cast(mz_uint)(bits_)); 1552 } 1553 1554 r.m_table_sizes[counter] = cast(ubyte)(bit_buf) & ((1 << (bits_)) - 1); 1555 bit_buf >>= (bits_); 1556 num_bits -= (bits_); 1557 1558 r.m_table_sizes[counter] += s_min_table_sizes[counter]; 1559 } 1560 r.m_code_size_2[] = 0; 1561 for (counter = 0; counter < r.m_table_sizes[2]; counter++) 1562 { 1563 //TINFL_GET_BITS(14, s, 3); 1564 bits_ = 3; 1565 if (num_bits < cast(mz_uint)(bits_)) 1566 { 1567 do 1568 { 1569 while (pIn_buf_cur >= pIn_buf_end) 1570 { 1571 status = (decomp_flags & TINFL_FLAG_HAS_MORE_INPUT) ? TINFL_STATUS_NEEDS_MORE_INPUT : TINFL_STATUS_FAILED_CANNOT_MAKE_PROGRESS; 1572 r.m_state = 14; 1573 goto common_exit; 1574 case 14: 1575 } 1576 c = *pIn_buf_cur++; 1577 bit_buf |= ((cast(tinfl_bit_buf_t)c) << num_bits); 1578 num_bits += 8; 1579 } while (num_bits < cast(mz_uint)(bits_)); 1580 } 1581 s = cast(mz_uint)(bit_buf) & ((1 << (bits_)) - 1); 1582 bit_buf >>= (bits_); 1583 num_bits -= (bits_); 1584 1585 r.m_code_size_2[s_length_dezigzag[counter]] = cast(mz_uint8)s; 1586 } 1587 r.m_table_sizes[2] = 19; 1588 } 1589 for (; cast(int)r.m_type >= 0; r.m_type--) 1590 { 1591 1592 pLookUp = r.m_look_up[r.m_type].ptr; 1593 pTree = pTrees[r.m_type]; 1594 pCode_size = pCode_sizes[r.m_type]; 1595 total_syms[] = 0; 1596 TINFL_MEMSET(pLookUp, 0, r.m_look_up[0].sizeof); 1597 tinfl_clear_tree(r); 1598 for (i = 0; i < r.m_table_sizes[r.m_type]; ++i) 1599 total_syms[pCode_size[i]]++; 1600 used_syms = 0, total = 0; 1601 next_code[0] = next_code[1] = 0; 1602 for (i = 1; i <= 15; ++i) 1603 { 1604 used_syms += total_syms[i]; 1605 next_code[i + 1] = (total = ((total + total_syms[i]) << 1)); 1606 } 1607 if ((65536 != total) && (used_syms > 1)) 1608 { 1609 for (;;) 1610 { 1611 status = TINFL_STATUS_FAILED; 1612 r.m_state = 35; 1613 goto common_exit; 1614 case 35: 1615 } 1616 } 1617 for (tree_next = -1, sym_index = 0; sym_index < r.m_table_sizes[r.m_type]; ++sym_index) 1618 { 1619 mz_uint rev_code = 0, l, cur_code, code_size = pCode_size[sym_index]; 1620 if (!code_size) 1621 continue; 1622 cur_code = next_code[code_size]++; 1623 for (l = code_size; l > 0; l--, cur_code >>= 1) 1624 rev_code = (rev_code << 1) | (cur_code & 1); 1625 if (code_size <= TINFL_FAST_LOOKUP_BITS) 1626 { 1627 mz_int16 k = cast(mz_int16)((code_size << 9) | sym_index); 1628 while (rev_code < TINFL_FAST_LOOKUP_SIZE) 1629 { 1630 pLookUp[rev_code] = k; 1631 rev_code += (1 << code_size); 1632 } 1633 continue; 1634 } 1635 if (0 == (tree_cur = pLookUp[rev_code & (TINFL_FAST_LOOKUP_SIZE - 1)])) 1636 { 1637 pLookUp[rev_code & (TINFL_FAST_LOOKUP_SIZE - 1)] = cast(mz_int16)tree_next; 1638 tree_cur = tree_next; 1639 tree_next -= 2; 1640 } 1641 rev_code >>= (TINFL_FAST_LOOKUP_BITS - 1); 1642 for (j = code_size; j > (TINFL_FAST_LOOKUP_BITS + 1); j--) 1643 { 1644 tree_cur -= ((rev_code >>= 1) & 1); 1645 if (!pTree[-tree_cur - 1]) 1646 { 1647 pTree[-tree_cur - 1] = cast(mz_int16)tree_next; 1648 tree_cur = tree_next; 1649 tree_next -= 2; 1650 } 1651 else 1652 tree_cur = pTree[-tree_cur - 1]; 1653 } 1654 tree_cur -= ((rev_code >>= 1) & 1); 1655 pTree[-tree_cur - 1] = cast(mz_int16)sym_index; 1656 } 1657 if (r.m_type == 2) 1658 { 1659 for (counter = 0; counter < (r.m_table_sizes[0] + r.m_table_sizes[1]);) 1660 { 1661 //TINFL_HUFF_DECODE(16, dist, r.m_look_up[2], r.m_tree_2); 1662 1663 1664 if (num_bits < 15) 1665 { 1666 if ((pIn_buf_end - pIn_buf_cur) < 2) 1667 { 1668 //TINFL_HUFF_BITBUF_FILL(state_index, pLookUp, pTree); 1669 do 1670 { 1671 temp = r.m_look_up[2][bit_buf & (TINFL_FAST_LOOKUP_SIZE - 1)]; 1672 if (temp >= 0) 1673 { 1674 code_len2 = temp >> 9; 1675 if ((code_len2) && (num_bits >= code_len2)) 1676 break; 1677 } 1678 else if (num_bits > TINFL_FAST_LOOKUP_BITS) 1679 { 1680 code_len2 = TINFL_FAST_LOOKUP_BITS; 1681 do 1682 { 1683 temp = r.m_tree_2[cast(size_t)(~temp + ((bit_buf >> code_len2++) & 1))]; 1684 } while ((temp < 0) && (num_bits >= (code_len2 + 1))); 1685 if (temp >= 0) 1686 break; 1687 } 1688 //TINFL_GET_BYTE(state_index, c); 1689 while (pIn_buf_cur >= pIn_buf_end) 1690 { 1691 status = (decomp_flags & TINFL_FLAG_HAS_MORE_INPUT) ? TINFL_STATUS_NEEDS_MORE_INPUT : TINFL_STATUS_FAILED_CANNOT_MAKE_PROGRESS; 1692 r.m_state = 16; 1693 goto common_exit; 1694 case 16: 1695 } 1696 c = *pIn_buf_cur++; 1697 1698 bit_buf |= ((cast(tinfl_bit_buf_t)c) << num_bits); 1699 num_bits += 8; 1700 } while (num_bits < 15); 1701 } 1702 else 1703 { 1704 bit_buf |= ((cast(tinfl_bit_buf_t)pIn_buf_cur[0]) << num_bits) | ((cast(tinfl_bit_buf_t)pIn_buf_cur[1]) << (num_bits + 8)); 1705 pIn_buf_cur += 2; 1706 num_bits += 16; 1707 } 1708 } 1709 if ((temp = r.m_look_up[2][bit_buf & (TINFL_FAST_LOOKUP_SIZE - 1)]) >= 0) 1710 code_len2 = temp >> 9, temp &= 511; 1711 else 1712 { 1713 code_len2 = TINFL_FAST_LOOKUP_BITS; 1714 do 1715 { 1716 temp = r.m_tree_2[cast(size_t)(~temp + ((bit_buf >> code_len2++) & 1))]; 1717 } while (temp < 0); 1718 } 1719 dist = temp; 1720 bit_buf >>= code_len2; 1721 num_bits -= code_len2; 1722 1723 if (dist < 16) 1724 { 1725 r.m_len_codes[counter++] = cast(mz_uint8)dist; 1726 continue; 1727 } 1728 if ((dist == 16) && (!counter)) 1729 { 1730 for (;;) 1731 { 1732 status = TINFL_STATUS_FAILED; 1733 r.m_state = 17; 1734 goto common_exit; 1735 case 17: 1736 } 1737 } 1738 num_extra = "\02\03\07"[dist - 16]; 1739 1740 //TINFL_GET_BITS(18, s, num_extra); 1741 if (num_bits < cast(mz_uint)(num_extra)) 1742 { 1743 do 1744 { 1745 while (pIn_buf_cur >= pIn_buf_end) 1746 { 1747 status = (decomp_flags & TINFL_FLAG_HAS_MORE_INPUT) ? TINFL_STATUS_NEEDS_MORE_INPUT : TINFL_STATUS_FAILED_CANNOT_MAKE_PROGRESS; 1748 r.m_state = 18; 1749 goto common_exit; 1750 case 18: 1751 } 1752 c2 = *pIn_buf_cur++; 1753 bit_buf |= ((cast(tinfl_bit_buf_t)c2) << num_bits); 1754 num_bits += 8; 1755 } while (num_bits < cast(mz_uint)(num_extra)); 1756 } 1757 s = (cast(uint)bit_buf) & ((1 << (num_extra)) - 1); 1758 bit_buf >>= (num_extra); 1759 num_bits -= (num_extra); 1760 1761 1762 s += "\03\03\013"[dist - 16]; 1763 TINFL_MEMSET(r.m_len_codes.ptr + counter, (dist == 16) ? r.m_len_codes[counter - 1] : 0, s); 1764 counter += s; 1765 } 1766 if ((r.m_table_sizes[0] + r.m_table_sizes[1]) != counter) 1767 { 1768 for (;;) 1769 { 1770 status = TINFL_STATUS_FAILED; 1771 r.m_state = 21; 1772 goto common_exit; 1773 case 21: 1774 } 1775 } 1776 TINFL_MEMCPY(r.m_code_size_0.ptr, r.m_len_codes.ptr, r.m_table_sizes[0]); 1777 TINFL_MEMCPY(r.m_code_size_1.ptr, r.m_len_codes.ptr + r.m_table_sizes[0], r.m_table_sizes[1]); 1778 } 1779 } 1780 for (;;) 1781 { 1782 for (;;) 1783 { 1784 if (((pIn_buf_end - pIn_buf_cur) < 4) || ((pOut_buf_end - pOut_buf_cur) < 2)) 1785 { 1786 // TINFL_HUFF_DECODE(state_index, sym, pLookUp, pTree) 1787 // TINFL_HUFF_DECODE(23, counter, r.m_look_up[0], r.m_tree_0); 1788 1789 //int temp; 1790 //mz_uint code_len; 1791 if (num_bits < 15) 1792 { 1793 if ((pIn_buf_end - pIn_buf_cur) < 2) 1794 { 1795 //TINFL_HUFF_BITBUF_FILL(state_index, pLookUp, pTree); 1796 do 1797 { 1798 temp = r.m_look_up[0][bit_buf & (TINFL_FAST_LOOKUP_SIZE - 1)]; 1799 if (temp >= 0) 1800 { 1801 code_len2 = temp >> 9; 1802 if ((code_len2) && (num_bits >= code_len2)) 1803 break; 1804 } 1805 else if (num_bits > TINFL_FAST_LOOKUP_BITS) 1806 { 1807 code_len2 = TINFL_FAST_LOOKUP_BITS; 1808 do 1809 { 1810 temp = r.m_tree_0[cast(size_t)(~temp + ((bit_buf >> code_len2++) & 1))]; 1811 } while ((temp < 0) && (num_bits >= (code_len2 + 1))); 1812 if (temp >= 0) 1813 break; 1814 } 1815 //TINFL_GET_BYTE(state_index, c); 1816 while (pIn_buf_cur >= pIn_buf_end) 1817 { 1818 status = (decomp_flags & TINFL_FLAG_HAS_MORE_INPUT) ? TINFL_STATUS_NEEDS_MORE_INPUT : TINFL_STATUS_FAILED_CANNOT_MAKE_PROGRESS; 1819 r.m_state = 23; 1820 goto common_exit; 1821 case 23: 1822 } 1823 c = *pIn_buf_cur++; 1824 bit_buf |= ((cast(tinfl_bit_buf_t)c) << num_bits); 1825 num_bits += 8; 1826 } while (num_bits < 15); 1827 } 1828 else 1829 { 1830 bit_buf |= ((cast(tinfl_bit_buf_t)pIn_buf_cur[0]) << num_bits) | ((cast(tinfl_bit_buf_t)pIn_buf_cur[1]) << (num_bits + 8)); 1831 pIn_buf_cur += 2; 1832 num_bits += 16; 1833 } 1834 } 1835 if ((temp = r.m_look_up[0][bit_buf & (TINFL_FAST_LOOKUP_SIZE - 1)]) >= 0) 1836 code_len2 = temp >> 9, temp &= 511; 1837 else 1838 { 1839 code_len2 = TINFL_FAST_LOOKUP_BITS; 1840 do 1841 { 1842 temp = r.m_tree_0[cast(size_t)(~temp + ((bit_buf >> code_len2++) & 1))]; 1843 } while (temp < 0); 1844 } 1845 counter = temp; 1846 bit_buf >>= code_len2; 1847 num_bits -= code_len2; 1848 1849 if (counter >= 256) 1850 break; 1851 while (pOut_buf_cur >= pOut_buf_end) 1852 { 1853 status = TINFL_STATUS_HAS_MORE_OUTPUT; 1854 r.m_state = 24; 1855 goto common_exit; 1856 case 24: 1857 } 1858 *pOut_buf_cur++ = cast(mz_uint8)counter; 1859 } 1860 else 1861 { 1862 int sym2; 1863 mz_uint code_len; 1864 1865 if (num_bits < 30) 1866 { 1867 bit_buf |= ((cast(tinfl_bit_buf_t)MZ_READ_LE32(pIn_buf_cur)) << num_bits); 1868 pIn_buf_cur += 4; 1869 num_bits += 32; 1870 } 1871 if ((sym2 = r.m_look_up[0][bit_buf & (TINFL_FAST_LOOKUP_SIZE - 1)]) >= 0) 1872 code_len = sym2 >> 9; 1873 else 1874 { 1875 code_len = TINFL_FAST_LOOKUP_BITS; 1876 do 1877 { 1878 sym2 = r.m_tree_0[cast(size_t)(~sym2 + ((bit_buf >> code_len++) & 1))]; 1879 } while (sym2 < 0); 1880 } 1881 counter = sym2; 1882 bit_buf >>= code_len; 1883 num_bits -= code_len; 1884 if (counter & 256) 1885 break; 1886 1887 if ((sym2 = r.m_look_up[0][bit_buf & (TINFL_FAST_LOOKUP_SIZE - 1)]) >= 0) 1888 code_len = sym2 >> 9; 1889 else 1890 { 1891 code_len = TINFL_FAST_LOOKUP_BITS; 1892 do 1893 { 1894 sym2 = r.m_tree_0[cast(size_t)(~sym2 + ((bit_buf >> code_len++) & 1))]; 1895 } while (sym2 < 0); 1896 } 1897 bit_buf >>= code_len; 1898 num_bits -= code_len; 1899 1900 pOut_buf_cur[0] = cast(mz_uint8)counter; 1901 if (sym2 & 256) 1902 { 1903 pOut_buf_cur++; 1904 counter = sym2; 1905 break; 1906 } 1907 pOut_buf_cur[1] = cast(mz_uint8)sym2; 1908 pOut_buf_cur += 2; 1909 } 1910 } 1911 if ((counter &= 511) == 256) 1912 break; 1913 1914 num_extra = s_length_extra[counter - 257]; 1915 counter = s_length_base[counter - 257]; 1916 if (num_extra) 1917 { 1918 //TINFL_GET_BITS(25, extra_bits, num_extra); 1919 if (num_bits < cast(mz_uint)(num_extra)) 1920 { 1921 do 1922 { 1923 while (pIn_buf_cur >= pIn_buf_end) 1924 { 1925 status = (decomp_flags & TINFL_FLAG_HAS_MORE_INPUT) ? TINFL_STATUS_NEEDS_MORE_INPUT : TINFL_STATUS_FAILED_CANNOT_MAKE_PROGRESS; 1926 r.m_state = 25; 1927 goto common_exit; 1928 case 25: 1929 } 1930 c = *pIn_buf_cur++; 1931 bit_buf |= ((cast(tinfl_bit_buf_t)c) << num_bits); 1932 num_bits += 8; 1933 } while (num_bits < cast(mz_uint)(num_extra)); 1934 } 1935 extra_bits = (cast(uint)bit_buf) & ((1 << (num_extra)) - 1); 1936 bit_buf >>= (num_extra); 1937 num_bits -= (num_extra); 1938 1939 1940 counter += extra_bits; 1941 } 1942 1943 //TINFL_HUFF_DECODE(state_index, sym, pLookUp, pTree) 1944 //TINFL_HUFF_DECODE(26, dist, r.m_look_up[1], r.m_tree_1); 1945 // int temp; 1946 // mz_uint code_len; 1947 if (num_bits < 15) 1948 { 1949 if ((pIn_buf_end - pIn_buf_cur) < 2) 1950 { 1951 //TINFL_HUFF_BITBUF_FILL(state_index, pLookUp, pTree); 1952 do 1953 { 1954 temp = r.m_look_up[1][bit_buf & (TINFL_FAST_LOOKUP_SIZE - 1)]; 1955 if (temp >= 0) 1956 { 1957 code_len2 = temp >> 9; 1958 if ((code_len2) && (num_bits >= code_len2)) 1959 break; 1960 } 1961 else if (num_bits > TINFL_FAST_LOOKUP_BITS) 1962 { 1963 code_len2 = TINFL_FAST_LOOKUP_BITS; 1964 do 1965 { 1966 temp = r.m_tree_1[cast(size_t)(~temp + ((bit_buf >> code_len2++) & 1))]; 1967 } while ((temp < 0) && (num_bits >= (code_len2 + 1))); 1968 if (temp >= 0) 1969 break; 1970 } 1971 //TINFL_GET_BYTE(state_index, c); 1972 while (pIn_buf_cur >= pIn_buf_end) 1973 { 1974 status = (decomp_flags & TINFL_FLAG_HAS_MORE_INPUT) ? TINFL_STATUS_NEEDS_MORE_INPUT : TINFL_STATUS_FAILED_CANNOT_MAKE_PROGRESS; 1975 r.m_state = 26; 1976 goto common_exit; 1977 case 26: 1978 } 1979 c = *pIn_buf_cur++; 1980 bit_buf |= ((cast(tinfl_bit_buf_t)c) << num_bits); 1981 num_bits += 8; 1982 } while (num_bits < 15); 1983 } 1984 else 1985 { 1986 bit_buf |= ((cast(tinfl_bit_buf_t)pIn_buf_cur[0]) << num_bits) | ((cast(tinfl_bit_buf_t)pIn_buf_cur[1]) << (num_bits + 8)); 1987 pIn_buf_cur += 2; 1988 num_bits += 16; 1989 } 1990 } 1991 if ((temp = r.m_look_up[1][bit_buf & (TINFL_FAST_LOOKUP_SIZE - 1)]) >= 0) 1992 code_len2 = temp >> 9, temp &= 511; 1993 else 1994 { 1995 code_len2 = TINFL_FAST_LOOKUP_BITS; 1996 do 1997 { 1998 temp = r.m_tree_1[cast(size_t)(~temp + ((bit_buf >> code_len2++) & 1))]; 1999 } while (temp < 0); 2000 } 2001 dist = temp; 2002 bit_buf >>= code_len2; 2003 num_bits -= code_len2; 2004 2005 num_extra = s_dist_extra[dist]; 2006 dist = s_dist_base[dist]; 2007 if (num_extra) 2008 { 2009 if (num_bits < cast(mz_uint)(num_extra)) 2010 { 2011 do 2012 { 2013 while (pIn_buf_cur >= pIn_buf_end) 2014 { 2015 status = (decomp_flags & TINFL_FLAG_HAS_MORE_INPUT) ? TINFL_STATUS_NEEDS_MORE_INPUT : TINFL_STATUS_FAILED_CANNOT_MAKE_PROGRESS; 2016 r.m_state = 27; 2017 goto common_exit; 2018 case 27: 2019 } 2020 c2 = *pIn_buf_cur++; 2021 bit_buf |= ((cast(tinfl_bit_buf_t)c2) << num_bits); 2022 num_bits += 8; 2023 } while (num_bits < cast(mz_uint)(num_extra)); 2024 } 2025 extra_bits = (cast(uint)bit_buf) & ((1 << (num_extra)) - 1); 2026 bit_buf >>= (num_extra); 2027 num_bits -= (num_extra); 2028 2029 dist += extra_bits; 2030 } 2031 2032 dist_from_out_buf_start = pOut_buf_cur - pOut_buf_start; 2033 if ((dist == 0 || dist > dist_from_out_buf_start || dist_from_out_buf_start == 0) && (decomp_flags & TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF)) 2034 { 2035 for (;;) 2036 { 2037 status = TINFL_STATUS_FAILED; 2038 r.m_state = 37; 2039 goto common_exit; 2040 case 37: 2041 } 2042 } 2043 2044 pSrc = pOut_buf_start + ((dist_from_out_buf_start - dist) & out_buf_size_mask); 2045 2046 if ((MZ_MAX_voidp(pOut_buf_cur, pSrc) + counter) > pOut_buf_end) 2047 { 2048 while (counter--) 2049 { 2050 while (pOut_buf_cur >= pOut_buf_end) 2051 { 2052 status = TINFL_STATUS_HAS_MORE_OUTPUT; 2053 r.m_state = 53; 2054 goto common_exit; 2055 case 53: 2056 } 2057 *pOut_buf_cur++ = pOut_buf_start[(dist_from_out_buf_start++ - dist) & out_buf_size_mask]; 2058 } 2059 continue; 2060 } 2061 else if ((counter >= 9) && (counter <= dist)) 2062 { 2063 const mz_uint8 *pSrc_end = pSrc + (counter & ~7); 2064 do 2065 { 2066 memcpy(pOut_buf_cur, pSrc, mz_uint32.sizeof*2); 2067 pOut_buf_cur += 8; 2068 } while ((pSrc += 8) < pSrc_end); 2069 if ((counter &= 7) < 3) 2070 { 2071 if (counter) 2072 { 2073 pOut_buf_cur[0] = pSrc[0]; 2074 if (counter > 1) 2075 pOut_buf_cur[1] = pSrc[1]; 2076 pOut_buf_cur += counter; 2077 } 2078 continue; 2079 } 2080 } 2081 while(counter>2) 2082 { 2083 pOut_buf_cur[0] = pSrc[0]; 2084 pOut_buf_cur[1] = pSrc[1]; 2085 pOut_buf_cur[2] = pSrc[2]; 2086 pOut_buf_cur += 3; 2087 pSrc += 3; 2088 counter -= 3; 2089 } 2090 if (counter > 0) 2091 { 2092 pOut_buf_cur[0] = pSrc[0]; 2093 if (counter > 1) 2094 pOut_buf_cur[1] = pSrc[1]; 2095 pOut_buf_cur += counter; 2096 } 2097 } 2098 } 2099 } while (!(r.m_final & 1)); 2100 2101 /* Ensure byte alignment and put back any bytes from the bitbuf if we've looked ahead too far on gzip, or other Deflate streams followed by arbitrary data. */ 2102 /* I'm being super conservative here. A number of simplifications can be made to the byte alignment part, and the Adler32 check shouldn't ever need to worry about reading from the bitbuf now. */ 2103 2104 //TINFL_SKIP_BITS(32, num_bits & 7); 2105 if (num_bits < cast(mz_uint)(num_bits & 7)) 2106 { 2107 do 2108 { 2109 while (pIn_buf_cur >= pIn_buf_end) 2110 { 2111 status = (decomp_flags & TINFL_FLAG_HAS_MORE_INPUT) ? TINFL_STATUS_NEEDS_MORE_INPUT : TINFL_STATUS_FAILED_CANNOT_MAKE_PROGRESS; 2112 r.m_state = 32; 2113 goto common_exit; 2114 case 32: 2115 } 2116 c = *pIn_buf_cur++; 2117 bit_buf |= ((cast(tinfl_bit_buf_t)c) << num_bits); 2118 num_bits += 8; 2119 } while (num_bits < cast(mz_uint)(num_bits & 7)); 2120 } 2121 bit_buf >>= (num_bits & 7); 2122 num_bits -= (num_bits & 7); 2123 2124 while ((pIn_buf_cur > pIn_buf_next) && (num_bits >= 8)) 2125 { 2126 --pIn_buf_cur; 2127 num_bits -= 8; 2128 } 2129 bit_buf &= ~(~cast(tinfl_bit_buf_t)0 << num_bits); 2130 assert(!num_bits); /* if this assert fires then we've read beyond the end of non-deflate/zlib streams with following data (such as gzip streams). */ 2131 2132 if (decomp_flags & TINFL_FLAG_PARSE_ZLIB_HEADER) 2133 { 2134 for (counter = 0; counter < 4; ++counter) 2135 { 2136 if (num_bits) 2137 { 2138 //TINFL_GET_BITS(41, s, 8); 2139 if (num_bits < cast(mz_uint)(8)) 2140 { 2141 do 2142 { 2143 while (pIn_buf_cur >= pIn_buf_end) 2144 { 2145 status = (decomp_flags & TINFL_FLAG_HAS_MORE_INPUT) ? TINFL_STATUS_NEEDS_MORE_INPUT : TINFL_STATUS_FAILED_CANNOT_MAKE_PROGRESS; 2146 r.m_state = 41; 2147 goto common_exit; 2148 case 41: 2149 } 2150 c = *pIn_buf_cur++; 2151 bit_buf |= ((cast(tinfl_bit_buf_t)c) << num_bits); 2152 num_bits += 8; 2153 } while (num_bits < cast(mz_uint)(8)); 2154 } 2155 s = bit_buf & ((1 << (8)) - 1); 2156 bit_buf >>= (8); 2157 num_bits -= (8); 2158 } 2159 else 2160 { 2161 //TINFL_GET_BYTE(42, s); 2162 while (pIn_buf_cur >= pIn_buf_end) 2163 { 2164 status = (decomp_flags & TINFL_FLAG_HAS_MORE_INPUT) ? TINFL_STATUS_NEEDS_MORE_INPUT : TINFL_STATUS_FAILED_CANNOT_MAKE_PROGRESS; 2165 r.m_state = 42; 2166 goto common_exit; 2167 case 42: 2168 } 2169 s = *pIn_buf_cur++; 2170 } 2171 r.m_z_adler32 = (r.m_z_adler32 << 8) | s; 2172 } 2173 } 2174 2175 for (;;) 2176 { 2177 status = TINFL_STATUS_DONE; 2178 r.m_state = 34; 2179 goto common_exit; 2180 case 34: 2181 } 2182 break; 2183 2184 default: 2185 assert(false); 2186 } 2187 2188 2189 common_exit: 2190 2191 /* As long as we aren't telling the caller that we NEED more input to make forward progress: */ 2192 /* Put back any bytes from the bitbuf in case we've looked ahead too far on gzip, or other Deflate streams followed by arbitrary data. */ 2193 /* We need to be very careful here to NOT push back any bytes we definitely know we need to make forward progress, though, or we'll lock the caller up into an inf loop. */ 2194 if ((status != TINFL_STATUS_NEEDS_MORE_INPUT) && (status != TINFL_STATUS_FAILED_CANNOT_MAKE_PROGRESS)) 2195 { 2196 while ((pIn_buf_cur > pIn_buf_next) && (num_bits >= 8)) 2197 { 2198 --pIn_buf_cur; 2199 num_bits -= 8; 2200 } 2201 } 2202 r.m_num_bits = num_bits; 2203 r.m_bit_buf = bit_buf & ~(~cast(tinfl_bit_buf_t)0 << num_bits); 2204 r.m_dist = dist; 2205 r.m_counter = counter; 2206 r.m_num_extra = num_extra; 2207 r.m_dist_from_out_buf_start = dist_from_out_buf_start; 2208 *pIn_buf_size = pIn_buf_cur - pIn_buf_next; 2209 *pOut_buf_size = pOut_buf_cur - pOut_buf_next; 2210 2211 // invalid flag combination 2212 assert( !((decomp_flags & TINFL_FLAG_COMPUTE_ADLER32) && (decomp_flags & TINFL_FLAG_DO_NOT_COMPUTE_ADLER32))); 2213 2214 bool checkAdler32 = (decomp_flags & (TINFL_FLAG_PARSE_ZLIB_HEADER | TINFL_FLAG_COMPUTE_ADLER32)) && (status >= 0); 2215 if (decomp_flags & TINFL_FLAG_DO_NOT_COMPUTE_ADLER32) 2216 checkAdler32 = false; 2217 2218 if (checkAdler32) 2219 { 2220 const(mz_uint8)*ptr = pOut_buf_next; 2221 size_t buf_len = *pOut_buf_size; 2222 2223 s1 = r.m_check_adler32 & 0xffff; 2224 s2 = r.m_check_adler32 >> 16; 2225 size_t block_len = buf_len % 5552; 2226 while (buf_len) 2227 { 2228 for (i3 = 0; i3 + 7 < block_len; i3 += 8, ptr += 8) 2229 { 2230 s1 += ptr[0], s2 += s1; 2231 s1 += ptr[1], s2 += s1; 2232 s1 += ptr[2], s2 += s1; 2233 s1 += ptr[3], s2 += s1; 2234 s1 += ptr[4], s2 += s1; 2235 s1 += ptr[5], s2 += s1; 2236 s1 += ptr[6], s2 += s1; 2237 s1 += ptr[7], s2 += s1; 2238 } 2239 for (; i3 < block_len; ++i3) 2240 s1 += *ptr++, s2 += s1; 2241 s1 %= 65521U, s2 %= 65521U; 2242 buf_len -= block_len; 2243 block_len = 5552; 2244 } 2245 r.m_check_adler32 = (s2 << 16) + s1; 2246 if ((status == TINFL_STATUS_DONE) && (decomp_flags & TINFL_FLAG_PARSE_ZLIB_HEADER) && (r.m_check_adler32 != r.m_z_adler32)) 2247 status = TINFL_STATUS_ADLER32_MISMATCH; 2248 } 2249 return status; 2250 } 2251 2252 /* Higher level helper functions. */ 2253 /* High level decompression functions: */ 2254 /* tinfl_decompress_mem_to_heap() decompresses a block in memory to a heap block allocated via malloc(). */ 2255 /* On entry: */ 2256 /* pSrc_buf, src_buf_len: Pointer and size of the Deflate or zlib source data to decompress. */ 2257 /* On return: */ 2258 /* Function returns a pointer to the decompressed data, or null on failure. */ 2259 /* *pOut_len will be set to the decompressed data's size, which could be larger than src_buf_len on uncompressible data. */ 2260 /* The caller must call mz_free() on the returned block when it's no longer needed. */ 2261 void *tinfl_decompress_mem_to_heap(const void *pSrc_buf, size_t src_buf_len, size_t *pOut_len, int flags) 2262 { 2263 tinfl_decompressor decomp; 2264 void *pBuf = null, pNew_buf; 2265 size_t src_buf_ofs = 0, out_buf_capacity = 0; 2266 *pOut_len = 0; 2267 tinfl_init(&decomp); 2268 for (;;) 2269 { 2270 size_t src_buf_size = src_buf_len - src_buf_ofs, dst_buf_size = out_buf_capacity - *pOut_len, new_out_buf_capacity; 2271 tinfl_status status = tinfl_decompress(&decomp, cast(const mz_uint8 *)pSrc_buf + src_buf_ofs, &src_buf_size, cast(mz_uint8 *)pBuf, pBuf ? cast(mz_uint8 *)pBuf + *pOut_len : null, &dst_buf_size, 2272 (flags & ~TINFL_FLAG_HAS_MORE_INPUT) | TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF); 2273 if ((status < 0) || (status == TINFL_STATUS_NEEDS_MORE_INPUT)) 2274 { 2275 MZ_FREE(pBuf); 2276 *pOut_len = 0; 2277 return null; 2278 } 2279 src_buf_ofs += src_buf_size; 2280 *pOut_len += dst_buf_size; 2281 if (status == TINFL_STATUS_DONE) 2282 break; 2283 new_out_buf_capacity = out_buf_capacity * 2; 2284 if (new_out_buf_capacity < 128) 2285 new_out_buf_capacity = 128; 2286 pNew_buf = MZ_REALLOC(pBuf, new_out_buf_capacity); 2287 if (!pNew_buf) 2288 { 2289 MZ_FREE(pBuf); 2290 *pOut_len = 0; 2291 return null; 2292 } 2293 pBuf = pNew_buf; 2294 out_buf_capacity = new_out_buf_capacity; 2295 } 2296 return pBuf; 2297 } 2298 2299 /* tinfl_decompress_mem_to_mem() decompresses a block in memory to another block in memory. */ 2300 /* Returns TINFL_DECOMPRESS_MEM_TO_MEM_FAILED on failure, or the number of bytes written on success. */ 2301 enum TINFL_DECOMPRESS_MEM_TO_MEM_FAILED = cast(size_t)(-1); 2302 size_t tinfl_decompress_mem_to_mem(void *pOut_buf, size_t out_buf_len, const void *pSrc_buf, size_t src_buf_len, int flags) 2303 { 2304 tinfl_decompressor decomp; 2305 tinfl_status status; 2306 tinfl_init(&decomp); 2307 status = tinfl_decompress(&decomp, cast(const mz_uint8 *)pSrc_buf, &src_buf_len, cast(mz_uint8 *)pOut_buf, cast(mz_uint8 *)pOut_buf, &out_buf_len, (flags & ~TINFL_FLAG_HAS_MORE_INPUT) | TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF); 2308 return (status != TINFL_STATUS_DONE) ? TINFL_DECOMPRESS_MEM_TO_MEM_FAILED : out_buf_len; 2309 } 2310 2311 2312 /* tinfl_decompress_mem_to_callback() decompresses a block in memory to an internal 32KB buffer, and a user provided callback function will be called to flush the buffer. */ 2313 /* Returns 1 on success or 0 on failure. */ 2314 alias tinfl_put_buf_func_ptr = int function(const(void)* pBuf, int len, void *pUser); 2315 int tinfl_decompress_mem_to_callback(const(void)*pIn_buf, 2316 size_t *pIn_buf_size, 2317 tinfl_put_buf_func_ptr pPut_buf_func, 2318 void *pPut_buf_user, int flags) 2319 { 2320 int result = 0; 2321 tinfl_decompressor decomp; 2322 mz_uint8 *pDict = cast(mz_uint8 *)MZ_MALLOC(TINFL_LZ_DICT_SIZE); 2323 size_t in_buf_ofs = 0, dict_ofs = 0; 2324 if (!pDict) 2325 return TINFL_STATUS_FAILED; 2326 memset(pDict,0,TINFL_LZ_DICT_SIZE); 2327 tinfl_init(&decomp); 2328 for (;;) 2329 { 2330 size_t in_buf_size = *pIn_buf_size - in_buf_ofs, dst_buf_size = TINFL_LZ_DICT_SIZE - dict_ofs; 2331 tinfl_status status = tinfl_decompress(&decomp, cast(const mz_uint8 *)pIn_buf + in_buf_ofs, &in_buf_size, pDict, pDict + dict_ofs, &dst_buf_size, 2332 (flags & ~(TINFL_FLAG_HAS_MORE_INPUT | TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF))); 2333 in_buf_ofs += in_buf_size; 2334 if ((dst_buf_size) && (!(*pPut_buf_func)(pDict + dict_ofs, cast(int)dst_buf_size, pPut_buf_user))) 2335 break; 2336 if (status != TINFL_STATUS_HAS_MORE_OUTPUT) 2337 { 2338 result = (status == TINFL_STATUS_DONE); 2339 break; 2340 } 2341 dict_ofs = (dict_ofs + dst_buf_size) & (TINFL_LZ_DICT_SIZE - 1); 2342 } 2343 MZ_FREE(pDict); 2344 *pIn_buf_size = in_buf_ofs; 2345 return result; 2346 } 2347 2348 /* Allocate the tinfl_decompressor structure in C so that */ 2349 /* non-C language bindings to tinfl_ API don't need to worry about */ 2350 /* structure size and allocation mechanism. */ 2351 tinfl_decompressor *tinfl_decompressor_alloc() 2352 { 2353 tinfl_decompressor *pDecomp = cast(tinfl_decompressor*) MZ_MALLOC(tinfl_decompressor.sizeof); 2354 if (pDecomp) 2355 tinfl_init(pDecomp); 2356 return pDecomp; 2357 } 2358 2359 void tinfl_decompressor_free(tinfl_decompressor *pDecomp) 2360 { 2361 MZ_FREE(pDecomp); 2362 } 2363 2364 2365 // Note: The miniz compressor wasn't translated yet, since it will only benefit PNG encoding which is already not that bad. 2366 // However, PNG decoding being faster is way more intersting. 2367 2368 2369 2370 // miniz_def.h 2371 2372 /* ------------------- Low-level Compression API Definitions */ 2373 2374 /* Set TDEFL_LESS_MEMORY to 1 to use less memory (compression will be slightly slower, and raw/dynamic blocks will be output more frequently). */ 2375 enum TDEFL_LESS_MEMORY = 0; 2376 2377 /* tdefl_init() compression flags logically OR'd together (low 12 bits contain the max. number of probes per dictionary search): */ 2378 /* TDEFL_DEFAULT_MAX_PROBES: The compressor defaults to 128 dictionary probes per dictionary search. 0=Huffman only, 1=Huffman+LZ (fastest/crap compression), 4095=Huffman+LZ (slowest/best compression). */ 2379 enum 2380 { 2381 TDEFL_HUFFMAN_ONLY = 0, 2382 TDEFL_DEFAULT_MAX_PROBES = 128, 2383 TDEFL_MAX_PROBES_MASK = 0xFFF 2384 } 2385 2386 /* TDEFL_WRITE_ZLIB_HEADER: If set, the compressor outputs a zlib header before the deflate data, and the Adler-32 of the source data at the end. Otherwise, you'll get raw deflate data. */ 2387 /* TDEFL_COMPUTE_ADLER32: Always compute the adler-32 of the input data (even when not writing zlib headers). */ 2388 /* TDEFL_GREEDY_PARSING_FLAG: Set to use faster greedy parsing, instead of more efficient lazy parsing. */ 2389 /* TDEFL_NONDETERMINISTIC_PARSING_FLAG: Enable to decrease the compressor's initialization time to the minimum, but the output may vary from run to run given the same input (depending on the contents of memory). */ 2390 /* TDEFL_RLE_MATCHES: Only look for RLE matches (matches with a distance of 1) */ 2391 /* TDEFL_FILTER_MATCHES: Discards matches <= 5 chars if enabled. */ 2392 /* TDEFL_FORCE_ALL_STATIC_BLOCKS: Disable usage of optimized Huffman tables. */ 2393 /* TDEFL_FORCE_ALL_RAW_BLOCKS: Only use raw (uncompressed) deflate blocks. */ 2394 /* The low 12 bits are reserved to control the max # of hash probes per dictionary lookup (see TDEFL_MAX_PROBES_MASK). */ 2395 enum 2396 { 2397 TDEFL_WRITE_ZLIB_HEADER = 0x01000, 2398 TDEFL_COMPUTE_ADLER32 = 0x02000, 2399 TDEFL_GREEDY_PARSING_FLAG = 0x04000, 2400 TDEFL_NONDETERMINISTIC_PARSING_FLAG = 0x08000, 2401 TDEFL_RLE_MATCHES = 0x10000, 2402 TDEFL_FILTER_MATCHES = 0x20000, 2403 TDEFL_FORCE_ALL_STATIC_BLOCKS = 0x40000, 2404 TDEFL_FORCE_ALL_RAW_BLOCKS = 0x80000 2405 } 2406 2407 2408 /+ 2409 2410 /* Compresses an image to a compressed PNG file in memory. */ 2411 /* On entry: */ 2412 /* pImage, w, h, and num_chans describe the image to compress. num_chans may be 1, 2, 3, or 4. */ 2413 /* The image pitch in bytes per scanline will be w*num_chans. The leftmost pixel on the top scanline is stored first in memory. */ 2414 /* level may range from [0,10], use MZ_NO_COMPRESSION, MZ_BEST_SPEED, MZ_BEST_COMPRESSION, etc. or a decent default is MZ_DEFAULT_LEVEL */ 2415 /* If flip is true, the image will be flipped on the Y axis (useful for OpenGL apps). */ 2416 /* On return: */ 2417 /* Function returns a pointer to the compressed data, or null on failure. */ 2418 /* *pLen_out will be set to the size of the PNG image file. */ 2419 /* The caller must mz_free() the returned heap block (which will typically be larger than *pLen_out) when it's no longer needed. */ 2420 MINIZ_EXPORT void *tdefl_write_image_to_png_file_in_memory_ex(const void *pImage, int w, int h, int num_chans, size_t *pLen_out, mz_uint level, mz_bool flip); 2421 MINIZ_EXPORT void *tdefl_write_image_to_png_file_in_memory(const void *pImage, int w, int h, int num_chans, size_t *pLen_out); 2422 2423 +/ 2424 2425 /* Output stream interface. The compressor uses this interface to write compressed data. It'll typically be called TDEFL_OUT_BUF_SIZE at a time. */ 2426 alias tdefl_put_buf_func_ptr = mz_bool function(const(void)* pBuf, int len, void *pUser); 2427 2428 enum 2429 { 2430 TDEFL_MAX_HUFF_TABLES = 3, 2431 TDEFL_MAX_HUFF_SYMBOLS_0 = 288, 2432 TDEFL_MAX_HUFF_SYMBOLS_1 = 32, 2433 TDEFL_MAX_HUFF_SYMBOLS_2 = 19, 2434 TDEFL_LZ_DICT_SIZE = 32768, 2435 TDEFL_LZ_DICT_SIZE_MASK = TDEFL_LZ_DICT_SIZE - 1, 2436 TDEFL_MIN_MATCH_LEN = 3, 2437 TDEFL_MAX_MATCH_LEN = 258 2438 } 2439 2440 /* TDEFL_OUT_BUF_SIZE MUST be large enough to hold a single entire compressed output block (using static/fixed Huffman codes). */ 2441 static if(TDEFL_LESS_MEMORY) 2442 { 2443 enum 2444 { 2445 TDEFL_LZ_CODE_BUF_SIZE = 24 * 1024, 2446 TDEFL_OUT_BUF_SIZE = (TDEFL_LZ_CODE_BUF_SIZE * 13) / 10, 2447 TDEFL_MAX_HUFF_SYMBOLS = 288, 2448 TDEFL_LZ_HASH_BITS = 12, 2449 TDEFL_LEVEL1_HASH_SIZE_MASK = 4095, 2450 TDEFL_LZ_HASH_SHIFT = (TDEFL_LZ_HASH_BITS + 2) / 3, 2451 TDEFL_LZ_HASH_SIZE = 1 << TDEFL_LZ_HASH_BITS 2452 } 2453 } 2454 else 2455 { 2456 enum 2457 { 2458 TDEFL_LZ_CODE_BUF_SIZE = 64 * 1024, 2459 TDEFL_OUT_BUF_SIZE = (TDEFL_LZ_CODE_BUF_SIZE * 13) / 10, 2460 TDEFL_MAX_HUFF_SYMBOLS = 288, 2461 TDEFL_LZ_HASH_BITS = 15, 2462 TDEFL_LEVEL1_HASH_SIZE_MASK = 4095, 2463 TDEFL_LZ_HASH_SHIFT = (TDEFL_LZ_HASH_BITS + 2) / 3, 2464 TDEFL_LZ_HASH_SIZE = 1 << TDEFL_LZ_HASH_BITS 2465 } 2466 } 2467 2468 /* The low-level tdefl functions below may be used directly if the above helper functions aren't flexible enough. The low-level functions don't make any heap allocations, unlike the above helper functions. */ 2469 alias tdefl_status = int; 2470 enum : tdefl_status 2471 { 2472 TDEFL_STATUS_BAD_PARAM = -2, 2473 TDEFL_STATUS_PUT_BUF_FAILED = -1, 2474 TDEFL_STATUS_OKAY = 0, 2475 TDEFL_STATUS_DONE = 1 2476 } 2477 2478 /* Must map to MZ_NO_FLUSH, MZ_SYNC_FLUSH, etc. enums */ 2479 alias tdefl_flush = int; 2480 enum : tdefl_flush 2481 { 2482 TDEFL_NO_FLUSH = 0, 2483 TDEFL_SYNC_FLUSH = 2, 2484 TDEFL_FULL_FLUSH = 3, 2485 TDEFL_FINISH = 4 2486 } 2487 2488 /* tdefl's compression state structure. */ 2489 struct tdefl_compressor 2490 { 2491 tdefl_put_buf_func_ptr m_pPut_buf_func; 2492 void *m_pPut_buf_user; 2493 mz_uint m_flags; 2494 mz_uint[2] m_max_probes; 2495 int m_greedy_parsing; 2496 mz_uint m_adler32, m_lookahead_pos, m_lookahead_size, m_dict_size; 2497 mz_uint8 *m_pLZ_code_buf, m_pLZ_flags, m_pOutput_buf, m_pOutput_buf_end; 2498 mz_uint m_num_flags_left, m_total_lz_bytes, m_lz_code_buf_dict_pos, m_bits_in, m_bit_buffer; 2499 mz_uint m_saved_match_dist, m_saved_match_len, m_saved_lit, m_output_flush_ofs, m_output_flush_remaining, m_finished, m_block_index, m_wants_to_finish; 2500 tdefl_status m_prev_return_status; 2501 const(void)* m_pIn_buf; 2502 void* m_pOut_buf; 2503 size_t* m_pIn_buf_size, m_pOut_buf_size; 2504 tdefl_flush m_flush; 2505 const(mz_uint8)* m_pSrc; 2506 size_t m_src_buf_left, m_out_buf_ofs; 2507 mz_uint8[TDEFL_LZ_DICT_SIZE + TDEFL_MAX_MATCH_LEN - 1] m_dict; 2508 mz_uint16[TDEFL_MAX_HUFF_SYMBOLS][TDEFL_MAX_HUFF_TABLES] m_huff_count; 2509 mz_uint16[TDEFL_MAX_HUFF_SYMBOLS][TDEFL_MAX_HUFF_TABLES] m_huff_codes; 2510 mz_uint8[TDEFL_MAX_HUFF_SYMBOLS][TDEFL_MAX_HUFF_TABLES] m_huff_code_sizes; 2511 mz_uint8[TDEFL_LZ_CODE_BUF_SIZE] m_lz_code_buf; 2512 mz_uint16[TDEFL_LZ_DICT_SIZE] m_next; 2513 mz_uint16[TDEFL_LZ_HASH_SIZE] m_hash; 2514 mz_uint8[TDEFL_OUT_BUF_SIZE] m_output_buf; 2515 } 2516 2517 2518 // miniz_def.c 2519 2520 /* ------------------- Low-level Compression (independent from all decompression API's) */ 2521 2522 /* Purposely making these tables static for faster init and thread safety. */ 2523 static immutable mz_uint16[256] s_tdefl_len_sym = 2524 [ 2525 257, 258, 259, 260, 261, 262, 263, 264, 265, 265, 266, 266, 267, 267, 268, 268, 269, 269, 269, 269, 270, 270, 270, 270, 271, 271, 271, 271, 272, 272, 272, 272, 2526 273, 273, 273, 273, 273, 273, 273, 273, 274, 274, 274, 274, 274, 274, 274, 274, 275, 275, 275, 275, 275, 275, 275, 275, 276, 276, 276, 276, 276, 276, 276, 276, 2527 277, 277, 277, 277, 277, 277, 277, 277, 277, 277, 277, 277, 277, 277, 277, 277, 278, 278, 278, 278, 278, 278, 278, 278, 278, 278, 278, 278, 278, 278, 278, 278, 2528 279, 279, 279, 279, 279, 279, 279, 279, 279, 279, 279, 279, 279, 279, 279, 279, 280, 280, 280, 280, 280, 280, 280, 280, 280, 280, 280, 280, 280, 280, 280, 280, 2529 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 2530 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 2531 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 2532 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 285 2533 ]; 2534 2535 static immutable mz_uint8[256] s_tdefl_len_extra = 2536 [ 2537 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 2538 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 2539 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 2540 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 0 2541 ]; 2542 2543 static immutable mz_uint8[512] s_tdefl_small_dist_sym = 2544 [ 2545 0, 1, 2, 3, 4, 4, 5, 5, 6, 6, 6, 6, 7, 7, 7, 7, 8, 8, 8, 8, 8, 8, 8, 8, 9, 9, 9, 9, 9, 9, 9, 9, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 11, 11, 11, 11, 11, 11, 2546 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 13, 2547 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 2548 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 2549 14, 14, 14, 14, 14, 14, 14, 14, 14, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 2550 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 2551 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 2552 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 2553 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 2554 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 2555 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 2556 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17 2557 ]; 2558 2559 static immutable mz_uint8[512] s_tdefl_small_dist_extra = 2560 [ 2561 0, 0, 0, 0, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 5, 5, 5, 5, 5, 5, 5, 5, 2562 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 2563 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 2564 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 2565 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 2566 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 2567 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 2568 7, 7, 7, 7, 7, 7, 7, 7 2569 ]; 2570 2571 static immutable mz_uint8[128] s_tdefl_large_dist_sym = 2572 [ 2573 0, 0, 18, 19, 20, 20, 21, 21, 22, 22, 22, 22, 23, 23, 23, 23, 24, 24, 24, 24, 24, 24, 24, 24, 25, 25, 25, 25, 25, 25, 25, 25, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 2574 26, 26, 26, 26, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 2575 28, 28, 28, 28, 28, 28, 28, 28, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29 2576 ]; 2577 2578 static immutable mz_uint8[128] s_tdefl_large_dist_extra = 2579 [ 2580 0, 0, 8, 8, 9, 9, 9, 9, 10, 10, 10, 10, 10, 10, 10, 10, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 2581 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 2582 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13 2583 ]; 2584 2585 /* Radix sorts tdefl_sym_freq[] array by 16-bit key m_key. Returns ptr to sorted values. */ 2586 struct tdefl_sym_freq 2587 { 2588 mz_uint16 m_key, m_sym_index; 2589 } 2590 2591 static tdefl_sym_freq *tdefl_radix_sort_syms(mz_uint num_syms, tdefl_sym_freq *pSyms0, tdefl_sym_freq *pSyms1) 2592 { 2593 mz_uint32 total_passes = 2, pass_shift, pass, i; 2594 mz_uint32[256 * 2] hist = void; 2595 tdefl_sym_freq* pCur_syms = pSyms0, 2596 pNew_syms = pSyms1; 2597 hist[] = 0; 2598 for (i = 0; i < num_syms; i++) 2599 { 2600 mz_uint freq = pSyms0[i].m_key; 2601 hist[freq & 0xFF]++; 2602 hist[256 + ((freq >> 8) & 0xFF)]++; 2603 } 2604 while ((total_passes > 1) && (num_syms == hist[(total_passes - 1) * 256])) 2605 total_passes--; 2606 for (pass_shift = 0, pass = 0; pass < total_passes; pass++, pass_shift += 8) 2607 { 2608 const mz_uint32 *pHist = &hist[pass << 8]; 2609 mz_uint[256] offsets = void; 2610 mz_uint cur_ofs = 0; 2611 for (i = 0; i < 256; i++) 2612 { 2613 offsets[i] = cur_ofs; 2614 cur_ofs += pHist[i]; 2615 } 2616 for (i = 0; i < num_syms; i++) 2617 pNew_syms[offsets[(pCur_syms[i].m_key >> pass_shift) & 0xFF]++] = pCur_syms[i]; 2618 { 2619 tdefl_sym_freq *t = pCur_syms; 2620 pCur_syms = pNew_syms; 2621 pNew_syms = t; 2622 } 2623 } 2624 return pCur_syms; 2625 } 2626 2627 /* tdefl_calculate_minimum_redundancy() originally written by: Alistair Moffat, alistair@cs.mu.oz.au, Jyrki Katajainen, jyrki@diku.dk, November 1996. */ 2628 static void tdefl_calculate_minimum_redundancy(tdefl_sym_freq *A, int n) 2629 { 2630 int root, leaf, next, avbl, used, dpth; 2631 if (n == 0) 2632 return; 2633 else if (n == 1) 2634 { 2635 A[0].m_key = 1; 2636 return; 2637 } 2638 A[0].m_key += A[1].m_key; 2639 root = 0; 2640 leaf = 2; 2641 for (next = 1; next < n - 1; next++) 2642 { 2643 if (leaf >= n || A[root].m_key < A[leaf].m_key) 2644 { 2645 A[next].m_key = A[root].m_key; 2646 A[root++].m_key = cast(mz_uint16)next; 2647 } 2648 else 2649 A[next].m_key = A[leaf++].m_key; 2650 if (leaf >= n || (root < next && A[root].m_key < A[leaf].m_key)) 2651 { 2652 A[next].m_key = cast(mz_uint16)(A[next].m_key + A[root].m_key); 2653 A[root++].m_key = cast(mz_uint16)next; 2654 } 2655 else 2656 A[next].m_key = cast(mz_uint16)(A[next].m_key + A[leaf++].m_key); 2657 } 2658 A[n - 2].m_key = 0; 2659 for (next = n - 3; next >= 0; next--) 2660 A[next].m_key = cast(ushort)( A[A[next].m_key].m_key + 1 ); 2661 avbl = 1; 2662 used = dpth = 0; 2663 root = n - 2; 2664 next = n - 1; 2665 while (avbl > 0) 2666 { 2667 while (root >= 0 && cast(int)A[root].m_key == dpth) 2668 { 2669 used++; 2670 root--; 2671 } 2672 while (avbl > used) 2673 { 2674 A[next--].m_key = cast(mz_uint16)(dpth); 2675 avbl--; 2676 } 2677 avbl = 2 * used; 2678 dpth++; 2679 used = 0; 2680 } 2681 } 2682 2683 /* Limits canonical Huffman code table's max code size. */ 2684 enum 2685 { 2686 TDEFL_MAX_SUPPORTED_HUFF_CODESIZE = 32 2687 }; 2688 static void tdefl_huffman_enforce_max_code_size(int *pNum_codes, int code_list_len, int max_code_size) 2689 { 2690 int i; 2691 mz_uint32 total = 0; 2692 if (code_list_len <= 1) 2693 return; 2694 for (i = max_code_size + 1; i <= TDEFL_MAX_SUPPORTED_HUFF_CODESIZE; i++) 2695 pNum_codes[max_code_size] += pNum_codes[i]; 2696 for (i = max_code_size; i > 0; i--) 2697 total += ((cast(mz_uint32)pNum_codes[i]) << (max_code_size - i)); 2698 while (total != (1UL << max_code_size)) 2699 { 2700 pNum_codes[max_code_size]--; 2701 for (i = max_code_size - 1; i > 0; i--) 2702 if (pNum_codes[i]) 2703 { 2704 pNum_codes[i]--; 2705 pNum_codes[i + 1] += 2; 2706 break; 2707 } 2708 total--; 2709 } 2710 } 2711 2712 static void tdefl_optimize_huffman_table(tdefl_compressor *d, int table_num, int table_len, int code_size_limit, int static_table) 2713 { 2714 int i, j, l; 2715 int[1 + TDEFL_MAX_SUPPORTED_HUFF_CODESIZE] num_codes = void; 2716 mz_uint[TDEFL_MAX_SUPPORTED_HUFF_CODESIZE + 1] next_code; 2717 num_codes[] = 0; 2718 if (static_table) 2719 { 2720 for (i = 0; i < table_len; i++) 2721 num_codes[d.m_huff_code_sizes[table_num][i]]++; 2722 } 2723 else 2724 { 2725 tdefl_sym_freq[TDEFL_MAX_HUFF_SYMBOLS] syms0; 2726 tdefl_sym_freq[TDEFL_MAX_HUFF_SYMBOLS] syms1; 2727 tdefl_sym_freq* pSyms; 2728 int num_used_syms = 0; 2729 const mz_uint16 *pSym_count = &d.m_huff_count[table_num][0]; 2730 for (i = 0; i < table_len; i++) 2731 if (pSym_count[i]) 2732 { 2733 syms0[num_used_syms].m_key = cast(mz_uint16)pSym_count[i]; 2734 syms0[num_used_syms++].m_sym_index = cast(mz_uint16)i; 2735 } 2736 2737 pSyms = tdefl_radix_sort_syms(num_used_syms, syms0.ptr, syms1.ptr); 2738 tdefl_calculate_minimum_redundancy(pSyms, num_used_syms); 2739 2740 for (i = 0; i < num_used_syms; i++) 2741 num_codes[pSyms[i].m_key]++; 2742 2743 tdefl_huffman_enforce_max_code_size(num_codes.ptr, num_used_syms, code_size_limit); 2744 2745 d.m_huff_code_sizes[table_num][] = 0; 2746 d.m_huff_codes[table_num][] = 0; 2747 for (i = 1, j = num_used_syms; i <= code_size_limit; i++) 2748 for (l = num_codes[i]; l > 0; l--) 2749 d.m_huff_code_sizes[table_num][pSyms[--j].m_sym_index] = cast(mz_uint8)(i); 2750 } 2751 2752 next_code[1] = 0; 2753 for (j = 0, i = 2; i <= code_size_limit; i++) 2754 next_code[i] = j = ((j + num_codes[i - 1]) << 1); 2755 2756 for (i = 0; i < table_len; i++) 2757 { 2758 mz_uint rev_code = 0, code, code_size; 2759 if ((code_size = d.m_huff_code_sizes[table_num][i]) == 0) 2760 continue; 2761 code = next_code[code_size]++; 2762 for (l = code_size; l > 0; l--, code >>= 1) 2763 rev_code = (rev_code << 1) | (code & 1); 2764 d.m_huff_codes[table_num][i] = cast(mz_uint16)rev_code; 2765 } 2766 } 2767 2768 /* 2769 #define TDEFL_PUT_BITS(b, l) 2770 do 2771 { 2772 mz_uint bits = b; 2773 mz_uint len = l; 2774 assert(bits <= ((1U << len) - 1U)); 2775 d.m_bit_buffer |= (bits << d.m_bits_in); 2776 d.m_bits_in += len; 2777 while (d.m_bits_in >= 8) 2778 { 2779 if (d.m_pOutput_buf < d.m_pOutput_buf_end) 2780 *d.m_pOutput_buf++ = (mz_uint8)(d.m_bit_buffer); 2781 d.m_bit_buffer >>= 8; 2782 d.m_bits_in -= 8; 2783 } 2784 } 2785 */ 2786 2787 /* 2788 2789 */ 2790 /* 2791 2792 */ 2793 2794 __gshared static immutable mz_uint8[19] s_tdefl_packed_code_size_syms_swizzle = 2795 [ 16, 17, 18, 0, 8, 7, 9, 6, 10, 5, 11, 4, 12, 3, 13, 2, 14, 1, 15 ]; 2796 2797 static void tdefl_start_dynamic_block(tdefl_compressor *d) 2798 { 2799 int num_lit_codes, num_dist_codes, num_bit_lengths; 2800 mz_uint i, total_code_sizes_to_pack, num_packed_code_sizes, rle_z_count, rle_repeat_count, packed_code_sizes_index; 2801 mz_uint8[TDEFL_MAX_HUFF_SYMBOLS_0 + TDEFL_MAX_HUFF_SYMBOLS_1] code_sizes_to_pack; 2802 mz_uint8[TDEFL_MAX_HUFF_SYMBOLS_0 + TDEFL_MAX_HUFF_SYMBOLS_1] packed_code_sizes; 2803 mz_uint8 prev_code_size = 0xFF; 2804 2805 d.m_huff_count[0][256] = 1; 2806 2807 tdefl_optimize_huffman_table(d, 0, TDEFL_MAX_HUFF_SYMBOLS_0, 15, MZ_FALSE); 2808 tdefl_optimize_huffman_table(d, 1, TDEFL_MAX_HUFF_SYMBOLS_1, 15, MZ_FALSE); 2809 2810 for (num_lit_codes = 286; num_lit_codes > 257; num_lit_codes--) 2811 if (d.m_huff_code_sizes[0][num_lit_codes - 1]) 2812 break; 2813 for (num_dist_codes = 30; num_dist_codes > 1; num_dist_codes--) 2814 if (d.m_huff_code_sizes[1][num_dist_codes - 1]) 2815 break; 2816 2817 memcpy(code_sizes_to_pack.ptr, &d.m_huff_code_sizes[0][0], num_lit_codes); 2818 memcpy(code_sizes_to_pack.ptr + num_lit_codes, &d.m_huff_code_sizes[1][0], num_dist_codes); 2819 total_code_sizes_to_pack = num_lit_codes + num_dist_codes; 2820 num_packed_code_sizes = 0; 2821 rle_z_count = 0; 2822 rle_repeat_count = 0; 2823 2824 memset(&d.m_huff_count[2][0], 0, (d.m_huff_count[2][0]).sizeof * TDEFL_MAX_HUFF_SYMBOLS_2); 2825 2826 void TDEFL_RLE_PREV_CODE_SIZE() pure nothrow @nogc 2827 { 2828 if (rle_repeat_count) 2829 { 2830 if (rle_repeat_count < 3) 2831 { 2832 d.m_huff_count[2][prev_code_size] = cast(mz_uint16)(d.m_huff_count[2][prev_code_size] + rle_repeat_count); 2833 while (rle_repeat_count--) 2834 packed_code_sizes[num_packed_code_sizes++] = prev_code_size; 2835 } 2836 else 2837 { 2838 d.m_huff_count[2][16] = cast(mz_uint16)(d.m_huff_count[2][16] + 1); 2839 packed_code_sizes[num_packed_code_sizes++] = 16; 2840 packed_code_sizes[num_packed_code_sizes++] = cast(mz_uint8)(rle_repeat_count - 3); 2841 } 2842 rle_repeat_count = 0; 2843 } 2844 } 2845 2846 void TDEFL_RLE_ZERO_CODE_SIZE() 2847 { 2848 if (rle_z_count) 2849 { 2850 if (rle_z_count < 3) 2851 { 2852 d.m_huff_count[2][0] = cast(mz_uint16)(d.m_huff_count[2][0] + rle_z_count); 2853 while (rle_z_count--) 2854 packed_code_sizes[num_packed_code_sizes++] = 0; 2855 } 2856 else if (rle_z_count <= 10) 2857 { 2858 d.m_huff_count[2][17] = cast(mz_uint16)(d.m_huff_count[2][17] + 1); 2859 packed_code_sizes[num_packed_code_sizes++] = 17; 2860 packed_code_sizes[num_packed_code_sizes++] = cast(mz_uint8)(rle_z_count - 3); 2861 } 2862 else 2863 { 2864 d.m_huff_count[2][18] = cast(mz_uint16)(d.m_huff_count[2][18] + 1); 2865 packed_code_sizes[num_packed_code_sizes++] = 18; 2866 packed_code_sizes[num_packed_code_sizes++] = cast(mz_uint8)(rle_z_count - 11); 2867 } 2868 rle_z_count = 0; 2869 } 2870 } 2871 2872 void TDEFL_PUT_BITS(mz_uint b, mz_uint l) 2873 { 2874 mz_uint bits = b; 2875 mz_uint len = l; 2876 assert(bits <= ((1U << len) - 1U)); 2877 d.m_bit_buffer |= (bits << d.m_bits_in); 2878 d.m_bits_in += len; 2879 while (d.m_bits_in >= 8) 2880 { 2881 if (d.m_pOutput_buf < d.m_pOutput_buf_end) 2882 *d.m_pOutput_buf++ = cast(mz_uint8)(d.m_bit_buffer); 2883 d.m_bit_buffer >>= 8; 2884 d.m_bits_in -= 8; 2885 } 2886 } 2887 2888 for (i = 0; i < total_code_sizes_to_pack; i++) 2889 { 2890 mz_uint8 code_size = code_sizes_to_pack[i]; 2891 if (!code_size) 2892 { 2893 TDEFL_RLE_PREV_CODE_SIZE(); 2894 if (++rle_z_count == 138) 2895 { 2896 TDEFL_RLE_ZERO_CODE_SIZE(); 2897 } 2898 } 2899 else 2900 { 2901 TDEFL_RLE_ZERO_CODE_SIZE(); 2902 if (code_size != prev_code_size) 2903 { 2904 TDEFL_RLE_PREV_CODE_SIZE(); 2905 d.m_huff_count[2][code_size] = cast(mz_uint16)(d.m_huff_count[2][code_size] + 1); 2906 packed_code_sizes[num_packed_code_sizes++] = code_size; 2907 } 2908 else if (++rle_repeat_count == 6) 2909 { 2910 TDEFL_RLE_PREV_CODE_SIZE(); 2911 } 2912 } 2913 prev_code_size = code_size; 2914 } 2915 if (rle_repeat_count) 2916 { 2917 TDEFL_RLE_PREV_CODE_SIZE(); 2918 } 2919 else 2920 { 2921 TDEFL_RLE_ZERO_CODE_SIZE(); 2922 } 2923 2924 tdefl_optimize_huffman_table(d, 2, TDEFL_MAX_HUFF_SYMBOLS_2, 7, MZ_FALSE); 2925 2926 TDEFL_PUT_BITS(2, 2); 2927 2928 TDEFL_PUT_BITS(num_lit_codes - 257, 5); 2929 TDEFL_PUT_BITS(num_dist_codes - 1, 5); 2930 2931 for (num_bit_lengths = 18; num_bit_lengths >= 0; num_bit_lengths--) 2932 if (d.m_huff_code_sizes[2][s_tdefl_packed_code_size_syms_swizzle[num_bit_lengths]]) 2933 break; 2934 num_bit_lengths = MZ_MAX_int(4, (num_bit_lengths + 1)); 2935 TDEFL_PUT_BITS(num_bit_lengths - 4, 4); 2936 for (i = 0; cast(int)i < num_bit_lengths; i++) 2937 TDEFL_PUT_BITS(d.m_huff_code_sizes[2][s_tdefl_packed_code_size_syms_swizzle[i]], 3); 2938 2939 for (packed_code_sizes_index = 0; packed_code_sizes_index < num_packed_code_sizes;) 2940 { 2941 mz_uint code = packed_code_sizes[packed_code_sizes_index++]; 2942 assert(code < TDEFL_MAX_HUFF_SYMBOLS_2); 2943 TDEFL_PUT_BITS(d.m_huff_codes[2][code], d.m_huff_code_sizes[2][code]); 2944 if (code >= 16) 2945 TDEFL_PUT_BITS(packed_code_sizes[packed_code_sizes_index++], "\02\03\07"[code - 16]); 2946 } 2947 } 2948 2949 static void tdefl_start_static_block(tdefl_compressor *d) 2950 { 2951 mz_uint i; 2952 mz_uint8 *p = &d.m_huff_code_sizes[0][0]; 2953 2954 for (i = 0; i <= 143; ++i) 2955 *p++ = 8; 2956 for (; i <= 255; ++i) 2957 *p++ = 9; 2958 for (; i <= 279; ++i) 2959 *p++ = 7; 2960 for (; i <= 287; ++i) 2961 *p++ = 8; 2962 2963 memset(d.m_huff_code_sizes[1].ptr, 5, 32); 2964 2965 tdefl_optimize_huffman_table(d, 0, 288, 15, MZ_TRUE); 2966 tdefl_optimize_huffman_table(d, 1, 32, 15, MZ_TRUE); 2967 2968 void TDEFL_PUT_BITS(mz_uint b, mz_uint l) 2969 { 2970 mz_uint bits = b; 2971 mz_uint len = l; 2972 assert(bits <= ((1U << len) - 1U)); 2973 d.m_bit_buffer |= (bits << d.m_bits_in); 2974 d.m_bits_in += len; 2975 while (d.m_bits_in >= 8) 2976 { 2977 if (d.m_pOutput_buf < d.m_pOutput_buf_end) 2978 *d.m_pOutput_buf++ = cast(mz_uint8)(d.m_bit_buffer); 2979 d.m_bit_buffer >>= 8; 2980 d.m_bits_in -= 8; 2981 } 2982 } 2983 2984 TDEFL_PUT_BITS(1, 2); 2985 } 2986 2987 __gshared static immutable mz_uint[17] mz_bitmasks 2988 = [ 0x0000, 0x0001, 0x0003, 0x0007, 0x000F, 0x001F, 0x003F, 0x007F, 0x00FF, 0x01FF, 0x03FF, 0x07FF, 0x0FFF, 0x1FFF, 0x3FFF, 0x7FFF, 0xFFFF ]; 2989 2990 2991 static mz_bool tdefl_compress_lz_codes(tdefl_compressor *d) 2992 { 2993 mz_uint flags; 2994 mz_uint8 *pLZ_codes; 2995 2996 void TDEFL_PUT_BITS(mz_uint b, mz_uint l) 2997 { 2998 mz_uint bits = b; 2999 mz_uint len = l; 3000 assert(bits <= ((1U << len) - 1U)); 3001 d.m_bit_buffer |= (bits << d.m_bits_in); 3002 d.m_bits_in += len; 3003 while (d.m_bits_in >= 8) 3004 { 3005 if (d.m_pOutput_buf < d.m_pOutput_buf_end) 3006 *d.m_pOutput_buf++ = cast(mz_uint8)(d.m_bit_buffer); 3007 d.m_bit_buffer >>= 8; 3008 d.m_bits_in -= 8; 3009 } 3010 } 3011 3012 flags = 1; 3013 for (pLZ_codes = d.m_lz_code_buf.ptr; pLZ_codes < d.m_pLZ_code_buf; flags >>= 1) 3014 { 3015 if (flags == 1) 3016 flags = *pLZ_codes++ | 0x100; 3017 if (flags & 1) 3018 { 3019 mz_uint sym, num_extra_bits; 3020 mz_uint match_len = pLZ_codes[0], match_dist = (pLZ_codes[1] | (pLZ_codes[2] << 8)); 3021 pLZ_codes += 3; 3022 3023 assert(d.m_huff_code_sizes[0][s_tdefl_len_sym[match_len]]); 3024 TDEFL_PUT_BITS(d.m_huff_codes[0][s_tdefl_len_sym[match_len]], d.m_huff_code_sizes[0][s_tdefl_len_sym[match_len]]); 3025 TDEFL_PUT_BITS(match_len & mz_bitmasks[s_tdefl_len_extra[match_len]], s_tdefl_len_extra[match_len]); 3026 3027 if (match_dist < 512) 3028 { 3029 sym = s_tdefl_small_dist_sym[match_dist]; 3030 num_extra_bits = s_tdefl_small_dist_extra[match_dist]; 3031 } 3032 else 3033 { 3034 sym = s_tdefl_large_dist_sym[match_dist >> 8]; 3035 num_extra_bits = s_tdefl_large_dist_extra[match_dist >> 8]; 3036 } 3037 assert(d.m_huff_code_sizes[1][sym]); 3038 TDEFL_PUT_BITS(d.m_huff_codes[1][sym], d.m_huff_code_sizes[1][sym]); 3039 TDEFL_PUT_BITS(match_dist & mz_bitmasks[num_extra_bits], num_extra_bits); 3040 } 3041 else 3042 { 3043 mz_uint lit = *pLZ_codes++; 3044 assert(d.m_huff_code_sizes[0][lit]); 3045 TDEFL_PUT_BITS(d.m_huff_codes[0][lit], d.m_huff_code_sizes[0][lit]); 3046 } 3047 } 3048 3049 TDEFL_PUT_BITS(d.m_huff_codes[0][256], d.m_huff_code_sizes[0][256]); 3050 3051 return (d.m_pOutput_buf < d.m_pOutput_buf_end); 3052 } 3053 3054 static mz_bool tdefl_compress_block(tdefl_compressor *d, mz_bool static_block) 3055 { 3056 if (static_block) 3057 tdefl_start_static_block(d); 3058 else 3059 tdefl_start_dynamic_block(d); 3060 return tdefl_compress_lz_codes(d); 3061 } 3062 3063 static int tdefl_flush_block(tdefl_compressor *d, int flush) 3064 { 3065 mz_uint saved_bit_buf, saved_bits_in; 3066 mz_uint8 *pSaved_output_buf; 3067 mz_bool comp_block_succeeded = MZ_FALSE; 3068 int n, use_raw_block = ((d.m_flags & TDEFL_FORCE_ALL_RAW_BLOCKS) != 0) && (d.m_lookahead_pos - d.m_lz_code_buf_dict_pos) <= d.m_dict_size; 3069 mz_uint8 *pOutput_buf_start = ((d.m_pPut_buf_func == null) && ((*d.m_pOut_buf_size - d.m_out_buf_ofs) >= TDEFL_OUT_BUF_SIZE)) 3070 ? (cast(mz_uint8 *)d.m_pOut_buf + d.m_out_buf_ofs) 3071 : d.m_output_buf.ptr; 3072 3073 d.m_pOutput_buf = pOutput_buf_start; 3074 d.m_pOutput_buf_end = d.m_pOutput_buf + TDEFL_OUT_BUF_SIZE - 16; 3075 3076 assert(!d.m_output_flush_remaining); 3077 d.m_output_flush_ofs = 0; 3078 d.m_output_flush_remaining = 0; 3079 3080 *d.m_pLZ_flags = cast(mz_uint8)(*d.m_pLZ_flags >> d.m_num_flags_left); 3081 d.m_pLZ_code_buf -= (d.m_num_flags_left == 8); 3082 3083 void TDEFL_PUT_BITS(mz_uint b, mz_uint l) 3084 { 3085 mz_uint bits = b; 3086 mz_uint len = l; 3087 assert(bits <= ((1U << len) - 1U)); 3088 d.m_bit_buffer |= (bits << d.m_bits_in); 3089 d.m_bits_in += len; 3090 while (d.m_bits_in >= 8) 3091 { 3092 if (d.m_pOutput_buf < d.m_pOutput_buf_end) 3093 *d.m_pOutput_buf++ = cast(mz_uint8)(d.m_bit_buffer); 3094 d.m_bit_buffer >>= 8; 3095 d.m_bits_in -= 8; 3096 } 3097 } 3098 3099 if ((d.m_flags & TDEFL_WRITE_ZLIB_HEADER) && (!d.m_block_index)) 3100 { 3101 const mz_uint8 cmf = 0x78; 3102 mz_uint8 flg, flevel = 3; 3103 mz_uint header, i, mz_un = (s_tdefl_num_probes).sizeof / mz_uint.sizeof; 3104 3105 /* Determine compression level by reversing the process in tdefl_create_comp_flags_from_zip_params() */ 3106 for (i = 0; i < mz_un; i++) 3107 if (s_tdefl_num_probes[i] == (d.m_flags & 0xFFF)) break; 3108 3109 if (i < 2) 3110 flevel = 0; 3111 else if (i < 6) 3112 flevel = 1; 3113 else if (i == 6) 3114 flevel = 2; 3115 3116 header = cmf << 8 | (flevel << 6); 3117 header += 31 - (header % 31); 3118 flg = header & 0xFF; 3119 3120 TDEFL_PUT_BITS(cmf, 8); 3121 TDEFL_PUT_BITS(flg, 8); 3122 } 3123 3124 TDEFL_PUT_BITS(flush == TDEFL_FINISH, 1); 3125 3126 pSaved_output_buf = d.m_pOutput_buf; 3127 saved_bit_buf = d.m_bit_buffer; 3128 saved_bits_in = d.m_bits_in; 3129 3130 if (!use_raw_block) 3131 comp_block_succeeded = tdefl_compress_block(d, (d.m_flags & TDEFL_FORCE_ALL_STATIC_BLOCKS) || (d.m_total_lz_bytes < 48)); 3132 3133 /* If the block gets expanded, forget the current contents of the output buffer and send a raw block instead. */ 3134 if (((use_raw_block) || ((d.m_total_lz_bytes) && ((d.m_pOutput_buf - pSaved_output_buf + 1U) >= d.m_total_lz_bytes))) && 3135 ((d.m_lookahead_pos - d.m_lz_code_buf_dict_pos) <= d.m_dict_size)) 3136 { 3137 mz_uint i; 3138 d.m_pOutput_buf = pSaved_output_buf; 3139 d.m_bit_buffer = saved_bit_buf, d.m_bits_in = saved_bits_in; 3140 TDEFL_PUT_BITS(0, 2); 3141 if (d.m_bits_in) 3142 { 3143 TDEFL_PUT_BITS(0, 8 - d.m_bits_in); 3144 } 3145 for (i = 2; i; --i, d.m_total_lz_bytes ^= 0xFFFF) 3146 { 3147 TDEFL_PUT_BITS(d.m_total_lz_bytes & 0xFFFF, 16); 3148 } 3149 for (i = 0; i < d.m_total_lz_bytes; ++i) 3150 { 3151 TDEFL_PUT_BITS(d.m_dict[(d.m_lz_code_buf_dict_pos + i) & TDEFL_LZ_DICT_SIZE_MASK], 8); 3152 } 3153 } 3154 /* Check for the extremely unlikely (if not impossible) case of the compressed block not fitting into the output buffer when using dynamic codes. */ 3155 else if (!comp_block_succeeded) 3156 { 3157 d.m_pOutput_buf = pSaved_output_buf; 3158 d.m_bit_buffer = saved_bit_buf, d.m_bits_in = saved_bits_in; 3159 tdefl_compress_block(d, MZ_TRUE); 3160 } 3161 3162 if (flush) 3163 { 3164 if (flush == TDEFL_FINISH) 3165 { 3166 if (d.m_bits_in) 3167 { 3168 TDEFL_PUT_BITS(0, 8 - d.m_bits_in); 3169 } 3170 if (d.m_flags & TDEFL_WRITE_ZLIB_HEADER) 3171 { 3172 mz_uint i, a = d.m_adler32; 3173 for (i = 0; i < 4; i++) 3174 { 3175 TDEFL_PUT_BITS((a >> 24) & 0xFF, 8); 3176 a <<= 8; 3177 } 3178 } 3179 } 3180 else 3181 { 3182 mz_uint i, z = 0; 3183 TDEFL_PUT_BITS(0, 3); 3184 if (d.m_bits_in) 3185 { 3186 TDEFL_PUT_BITS(0, 8 - d.m_bits_in); 3187 } 3188 for (i = 2; i; --i, z ^= 0xFFFF) 3189 { 3190 TDEFL_PUT_BITS(z & 0xFFFF, 16); 3191 } 3192 } 3193 } 3194 3195 assert(d.m_pOutput_buf < d.m_pOutput_buf_end); 3196 3197 memset(&d.m_huff_count[0][0], 0, (d.m_huff_count[0][0]).sizeof * TDEFL_MAX_HUFF_SYMBOLS_0); 3198 memset(&d.m_huff_count[1][0], 0, (d.m_huff_count[1][0]).sizeof * TDEFL_MAX_HUFF_SYMBOLS_1); 3199 3200 d.m_pLZ_code_buf = d.m_lz_code_buf.ptr + 1; 3201 d.m_pLZ_flags = d.m_lz_code_buf.ptr; 3202 d.m_num_flags_left = 8; 3203 d.m_lz_code_buf_dict_pos += d.m_total_lz_bytes; 3204 d.m_total_lz_bytes = 0; 3205 d.m_block_index++; 3206 3207 if ((n = cast(int)(d.m_pOutput_buf - pOutput_buf_start)) != 0) 3208 { 3209 if (d.m_pPut_buf_func) 3210 { 3211 *d.m_pIn_buf_size = d.m_pSrc - cast(const mz_uint8 *)d.m_pIn_buf; 3212 if (!(*d.m_pPut_buf_func)(d.m_output_buf.ptr, n, d.m_pPut_buf_user)) 3213 return (d.m_prev_return_status = TDEFL_STATUS_PUT_BUF_FAILED); 3214 } 3215 else if (pOutput_buf_start == d.m_output_buf.ptr) 3216 { 3217 int bytes_to_copy = cast(int)MZ_MIN_size_t(cast(size_t)n, cast(size_t)(*d.m_pOut_buf_size - d.m_out_buf_ofs)); 3218 memcpy(cast(mz_uint8 *)d.m_pOut_buf + d.m_out_buf_ofs, d.m_output_buf.ptr, bytes_to_copy); 3219 d.m_out_buf_ofs += bytes_to_copy; 3220 if ((n -= bytes_to_copy) != 0) 3221 { 3222 d.m_output_flush_ofs = bytes_to_copy; 3223 d.m_output_flush_remaining = n; 3224 } 3225 } 3226 else 3227 { 3228 d.m_out_buf_ofs += n; 3229 } 3230 } 3231 3232 return d.m_output_flush_remaining; 3233 } 3234 3235 ushort TDEFL_READ_UNALIGNED_WORD(const(void)* p) 3236 { 3237 return *cast(const(mz_uint16)*)(p); 3238 } 3239 alias TDEFL_READ_UNALIGNED_WORD2 = TDEFL_READ_UNALIGNED_WORD; 3240 3241 version(LittleEndian) 3242 { 3243 mz_uint32 TDEFL_READ_UNALIGNED_WORD32(const(mz_uint8)* p) 3244 { 3245 return *cast(const(mz_uint32)*)(p); 3246 } 3247 } 3248 3249 void tdefl_find_match(tdefl_compressor *d, mz_uint lookahead_pos, mz_uint max_dist, mz_uint max_match_len, mz_uint *pMatch_dist, mz_uint *pMatch_len) 3250 { 3251 mz_uint dist, pos = lookahead_pos & TDEFL_LZ_DICT_SIZE_MASK, match_len = *pMatch_len, probe_pos = pos, next_probe_pos, probe_len; 3252 mz_uint num_probes_left = d.m_max_probes[match_len >= 32]; 3253 const(mz_uint16)*s = cast(const(mz_uint16)*)(d.m_dict.ptr + pos); 3254 const(mz_uint16)*p, q; 3255 mz_uint16 c01 = TDEFL_READ_UNALIGNED_WORD(&d.m_dict[pos + match_len - 1]), s01 = TDEFL_READ_UNALIGNED_WORD2(s); 3256 assert(max_match_len <= TDEFL_MAX_MATCH_LEN); 3257 if (max_match_len <= match_len) 3258 return; 3259 for (;;) 3260 { 3261 for (;;) 3262 { 3263 if (--num_probes_left == 0) 3264 return; 3265 3266 next_probe_pos = d.m_next[probe_pos]; 3267 if ((!next_probe_pos) || ((dist = cast(mz_uint16)(lookahead_pos - next_probe_pos)) > max_dist)) 3268 return; 3269 probe_pos = next_probe_pos & TDEFL_LZ_DICT_SIZE_MASK; 3270 if (TDEFL_READ_UNALIGNED_WORD(&d.m_dict[probe_pos + match_len - 1]) == c01) 3271 break; 3272 3273 next_probe_pos = d.m_next[probe_pos]; 3274 if ((!next_probe_pos) || ((dist = cast(mz_uint16)(lookahead_pos - next_probe_pos)) > max_dist)) 3275 return; 3276 probe_pos = next_probe_pos & TDEFL_LZ_DICT_SIZE_MASK; 3277 if (TDEFL_READ_UNALIGNED_WORD(&d.m_dict[probe_pos + match_len - 1]) == c01) 3278 break; 3279 3280 next_probe_pos = d.m_next[probe_pos]; 3281 if ((!next_probe_pos) || ((dist = cast(mz_uint16)(lookahead_pos - next_probe_pos)) > max_dist)) 3282 return; 3283 probe_pos = next_probe_pos & TDEFL_LZ_DICT_SIZE_MASK; 3284 if (TDEFL_READ_UNALIGNED_WORD(&d.m_dict[probe_pos + match_len - 1]) == c01) 3285 break; 3286 } 3287 if (!dist) 3288 break; 3289 q = cast(const(mz_uint16)*)(d.m_dict.ptr + probe_pos); 3290 if (TDEFL_READ_UNALIGNED_WORD2(q) != s01) 3291 continue; 3292 p = s; 3293 probe_len = 32; 3294 do 3295 { 3296 } while ((TDEFL_READ_UNALIGNED_WORD2(++p) == TDEFL_READ_UNALIGNED_WORD2(++q)) && (TDEFL_READ_UNALIGNED_WORD2(++p) == TDEFL_READ_UNALIGNED_WORD2(++q)) && 3297 (TDEFL_READ_UNALIGNED_WORD2(++p) == TDEFL_READ_UNALIGNED_WORD2(++q)) && (TDEFL_READ_UNALIGNED_WORD2(++p) == TDEFL_READ_UNALIGNED_WORD2(++q)) && (--probe_len > 0)); 3298 if (!probe_len) 3299 { 3300 *pMatch_dist = dist; 3301 *pMatch_len = MZ_MIN_uint(max_match_len, cast(mz_uint)TDEFL_MAX_MATCH_LEN); 3302 break; 3303 } 3304 else if ((probe_len = (cast(mz_uint)(p - s) * 2) + cast(mz_uint)(*cast(const(mz_uint8)*)p == *cast(const(mz_uint8)*)q)) > match_len) 3305 { 3306 *pMatch_dist = dist; 3307 if ((*pMatch_len = match_len = MZ_MIN_uint(max_match_len, probe_len)) == max_match_len) 3308 break; 3309 c01 = TDEFL_READ_UNALIGNED_WORD(&d.m_dict[pos + match_len - 1]); 3310 } 3311 } 3312 } 3313 3314 version(LittleEndian) 3315 { 3316 mz_bool tdefl_compress_fast(tdefl_compressor *d) 3317 { 3318 /* Faster, minimally featured LZRW1-style match+parse loop with better register utilization. Intended for applications where raw throughput is valued more highly than ratio. */ 3319 mz_uint lookahead_pos = d.m_lookahead_pos, 3320 lookahead_size = d.m_lookahead_size, 3321 dict_size = d.m_dict_size, 3322 total_lz_bytes = d.m_total_lz_bytes, 3323 num_flags_left = d.m_num_flags_left; 3324 mz_uint8 *pLZ_code_buf = d.m_pLZ_code_buf, 3325 pLZ_flags = d.m_pLZ_flags; 3326 mz_uint cur_pos = lookahead_pos & TDEFL_LZ_DICT_SIZE_MASK; 3327 3328 while ((d.m_src_buf_left) || ((d.m_flush) && (lookahead_size))) 3329 { 3330 const mz_uint TDEFL_COMP_FAST_LOOKAHEAD_SIZE = 4096; 3331 mz_uint dst_pos = (lookahead_pos + lookahead_size) & TDEFL_LZ_DICT_SIZE_MASK; 3332 mz_uint num_bytes_to_process = cast(mz_uint)MZ_MIN_size_t(d.m_src_buf_left, TDEFL_COMP_FAST_LOOKAHEAD_SIZE - lookahead_size); 3333 d.m_src_buf_left -= num_bytes_to_process; 3334 lookahead_size += num_bytes_to_process; 3335 3336 while (num_bytes_to_process) 3337 { 3338 mz_uint32 n = MZ_MIN_uint(TDEFL_LZ_DICT_SIZE - dst_pos, num_bytes_to_process); 3339 memcpy(d.m_dict.ptr + dst_pos, d.m_pSrc, n); 3340 if (dst_pos < (TDEFL_MAX_MATCH_LEN - 1)) 3341 memcpy(d.m_dict.ptr + TDEFL_LZ_DICT_SIZE + dst_pos, d.m_pSrc, MZ_MIN_uint(n, (TDEFL_MAX_MATCH_LEN - 1) - dst_pos)); 3342 d.m_pSrc += n; 3343 dst_pos = (dst_pos + n) & TDEFL_LZ_DICT_SIZE_MASK; 3344 num_bytes_to_process -= n; 3345 } 3346 3347 dict_size = MZ_MIN_uint(TDEFL_LZ_DICT_SIZE - lookahead_size, dict_size); 3348 if ((!d.m_flush) && (lookahead_size < TDEFL_COMP_FAST_LOOKAHEAD_SIZE)) 3349 break; 3350 3351 while (lookahead_size >= 4) 3352 { 3353 mz_uint cur_match_dist, cur_match_len = 1; 3354 mz_uint8 *pCur_dict = d.m_dict.ptr + cur_pos; 3355 mz_uint first_trigram = TDEFL_READ_UNALIGNED_WORD32(pCur_dict) & 0xFFFFFF; 3356 mz_uint hash = (first_trigram ^ (first_trigram >> (24 - (TDEFL_LZ_HASH_BITS - 8)))) & TDEFL_LEVEL1_HASH_SIZE_MASK; 3357 mz_uint probe_pos = d.m_hash[hash]; 3358 d.m_hash[hash] = cast(mz_uint16)lookahead_pos; 3359 3360 if ( ((cur_match_dist = cast(mz_uint16)(lookahead_pos - probe_pos)) <= dict_size) 3361 && ((TDEFL_READ_UNALIGNED_WORD32(d.m_dict.ptr + (probe_pos &= TDEFL_LZ_DICT_SIZE_MASK)) & 0xFFFFFF) == first_trigram)) 3362 { 3363 const(mz_uint16) *p = cast(const(mz_uint16)*)pCur_dict; 3364 const(mz_uint16) *q = cast(const(mz_uint16)*)(d.m_dict.ptr + probe_pos); 3365 mz_uint32 probe_len = 32; 3366 do 3367 { 3368 } while ((TDEFL_READ_UNALIGNED_WORD2(++p) == TDEFL_READ_UNALIGNED_WORD2(++q)) && (TDEFL_READ_UNALIGNED_WORD2(++p) == TDEFL_READ_UNALIGNED_WORD2(++q)) && 3369 (TDEFL_READ_UNALIGNED_WORD2(++p) == TDEFL_READ_UNALIGNED_WORD2(++q)) && (TDEFL_READ_UNALIGNED_WORD2(++p) == TDEFL_READ_UNALIGNED_WORD2(++q)) && (--probe_len > 0)); 3370 cur_match_len = (cast(mz_uint)(p - cast(const mz_uint16 *)pCur_dict) * 2) + cast(mz_uint)(*cast(const(mz_uint8)*)p == *cast(const(mz_uint8)*)q); 3371 if (!probe_len) 3372 cur_match_len = cur_match_dist ? TDEFL_MAX_MATCH_LEN : 0; 3373 3374 if ((cur_match_len < TDEFL_MIN_MATCH_LEN) || ((cur_match_len == TDEFL_MIN_MATCH_LEN) && (cur_match_dist >= 8U * 1024U))) 3375 { 3376 cur_match_len = 1; 3377 *pLZ_code_buf++ = cast(mz_uint8)first_trigram; 3378 *pLZ_flags = cast(mz_uint8)(*pLZ_flags >> 1); 3379 d.m_huff_count[0][cast(mz_uint8)first_trigram]++; 3380 } 3381 else 3382 { 3383 mz_uint32 s0, s1; 3384 cur_match_len = MZ_MIN_uint(cur_match_len, lookahead_size); 3385 3386 assert((cur_match_len >= TDEFL_MIN_MATCH_LEN) && (cur_match_dist >= 1) && (cur_match_dist <= TDEFL_LZ_DICT_SIZE)); 3387 3388 cur_match_dist--; 3389 3390 pLZ_code_buf[0] = cast(mz_uint8)(cur_match_len - TDEFL_MIN_MATCH_LEN); 3391 *cast(mz_uint16 *)(&pLZ_code_buf[1]) = cast(mz_uint16)cur_match_dist; 3392 pLZ_code_buf += 3; 3393 *pLZ_flags = cast(mz_uint8)((*pLZ_flags >> 1) | 0x80); 3394 3395 s0 = s_tdefl_small_dist_sym[cur_match_dist & 511]; 3396 s1 = s_tdefl_large_dist_sym[cur_match_dist >> 8]; 3397 d.m_huff_count[1][(cur_match_dist < 512) ? s0 : s1]++; 3398 3399 d.m_huff_count[0][s_tdefl_len_sym[cur_match_len - TDEFL_MIN_MATCH_LEN]]++; 3400 } 3401 } 3402 else 3403 { 3404 *pLZ_code_buf++ = cast(mz_uint8)first_trigram; 3405 *pLZ_flags = cast(mz_uint8)(*pLZ_flags >> 1); 3406 d.m_huff_count[0][cast(mz_uint8)first_trigram]++; 3407 } 3408 3409 if (--num_flags_left == 0) 3410 { 3411 num_flags_left = 8; 3412 pLZ_flags = pLZ_code_buf++; 3413 } 3414 3415 total_lz_bytes += cur_match_len; 3416 lookahead_pos += cur_match_len; 3417 dict_size = MZ_MIN_uint(dict_size + cur_match_len, cast(mz_uint)TDEFL_LZ_DICT_SIZE); 3418 cur_pos = (cur_pos + cur_match_len) & TDEFL_LZ_DICT_SIZE_MASK; 3419 assert(lookahead_size >= cur_match_len); 3420 lookahead_size -= cur_match_len; 3421 3422 if (pLZ_code_buf > &d.m_lz_code_buf[TDEFL_LZ_CODE_BUF_SIZE - 8]) 3423 { 3424 int n; 3425 d.m_lookahead_pos = lookahead_pos; 3426 d.m_lookahead_size = lookahead_size; 3427 d.m_dict_size = dict_size; 3428 d.m_total_lz_bytes = total_lz_bytes; 3429 d.m_pLZ_code_buf = pLZ_code_buf; 3430 d.m_pLZ_flags = pLZ_flags; 3431 d.m_num_flags_left = num_flags_left; 3432 if ((n = tdefl_flush_block(d, 0)) != 0) 3433 return (n < 0) ? MZ_FALSE : MZ_TRUE; 3434 total_lz_bytes = d.m_total_lz_bytes; 3435 pLZ_code_buf = d.m_pLZ_code_buf; 3436 pLZ_flags = d.m_pLZ_flags; 3437 num_flags_left = d.m_num_flags_left; 3438 } 3439 } 3440 3441 while (lookahead_size) 3442 { 3443 mz_uint8 lit = d.m_dict[cur_pos]; 3444 3445 total_lz_bytes++; 3446 *pLZ_code_buf++ = lit; 3447 *pLZ_flags = cast(mz_uint8)(*pLZ_flags >> 1); 3448 if (--num_flags_left == 0) 3449 { 3450 num_flags_left = 8; 3451 pLZ_flags = pLZ_code_buf++; 3452 } 3453 3454 d.m_huff_count[0][lit]++; 3455 3456 lookahead_pos++; 3457 dict_size = MZ_MIN_uint(dict_size + 1, cast(mz_uint)TDEFL_LZ_DICT_SIZE); 3458 cur_pos = (cur_pos + 1) & TDEFL_LZ_DICT_SIZE_MASK; 3459 lookahead_size--; 3460 3461 if (pLZ_code_buf > &d.m_lz_code_buf[TDEFL_LZ_CODE_BUF_SIZE - 8]) 3462 { 3463 int n; 3464 d.m_lookahead_pos = lookahead_pos; 3465 d.m_lookahead_size = lookahead_size; 3466 d.m_dict_size = dict_size; 3467 d.m_total_lz_bytes = total_lz_bytes; 3468 d.m_pLZ_code_buf = pLZ_code_buf; 3469 d.m_pLZ_flags = pLZ_flags; 3470 d.m_num_flags_left = num_flags_left; 3471 if ((n = tdefl_flush_block(d, 0)) != 0) 3472 return (n < 0) ? MZ_FALSE : MZ_TRUE; 3473 total_lz_bytes = d.m_total_lz_bytes; 3474 pLZ_code_buf = d.m_pLZ_code_buf; 3475 pLZ_flags = d.m_pLZ_flags; 3476 num_flags_left = d.m_num_flags_left; 3477 } 3478 } 3479 } 3480 3481 d.m_lookahead_pos = lookahead_pos; 3482 d.m_lookahead_size = lookahead_size; 3483 d.m_dict_size = dict_size; 3484 d.m_total_lz_bytes = total_lz_bytes; 3485 d.m_pLZ_code_buf = pLZ_code_buf; 3486 d.m_pLZ_flags = pLZ_flags; 3487 d.m_num_flags_left = num_flags_left; 3488 return MZ_TRUE; 3489 } 3490 } 3491 3492 void tdefl_record_literal(tdefl_compressor *d, mz_uint8 lit) 3493 { 3494 d.m_total_lz_bytes++; 3495 *d.m_pLZ_code_buf++ = lit; 3496 *d.m_pLZ_flags = cast(mz_uint8)(*d.m_pLZ_flags >> 1); 3497 if (--d.m_num_flags_left == 0) 3498 { 3499 d.m_num_flags_left = 8; 3500 d.m_pLZ_flags = d.m_pLZ_code_buf++; 3501 } 3502 d.m_huff_count[0][lit]++; 3503 } 3504 3505 void tdefl_record_match(tdefl_compressor *d, mz_uint match_len, mz_uint match_dist) 3506 { 3507 mz_uint32 s0, s1; 3508 3509 assert((match_len >= TDEFL_MIN_MATCH_LEN) && (match_dist >= 1) && (match_dist <= TDEFL_LZ_DICT_SIZE)); 3510 3511 d.m_total_lz_bytes += match_len; 3512 3513 d.m_pLZ_code_buf[0] = cast(mz_uint8)(match_len - TDEFL_MIN_MATCH_LEN); 3514 3515 match_dist -= 1; 3516 d.m_pLZ_code_buf[1] = cast(mz_uint8)(match_dist & 0xFF); 3517 d.m_pLZ_code_buf[2] = cast(mz_uint8)(match_dist >> 8); 3518 d.m_pLZ_code_buf += 3; 3519 3520 *d.m_pLZ_flags = cast(mz_uint8)((*d.m_pLZ_flags >> 1) | 0x80); 3521 if (--d.m_num_flags_left == 0) 3522 { 3523 d.m_num_flags_left = 8; 3524 d.m_pLZ_flags = d.m_pLZ_code_buf++; 3525 } 3526 3527 s0 = s_tdefl_small_dist_sym[match_dist & 511]; 3528 s1 = s_tdefl_large_dist_sym[(match_dist >> 8) & 127]; 3529 d.m_huff_count[1][(match_dist < 512) ? s0 : s1]++; 3530 d.m_huff_count[0][s_tdefl_len_sym[match_len - TDEFL_MIN_MATCH_LEN]]++; 3531 } 3532 3533 mz_bool tdefl_compress_normal(tdefl_compressor *d) 3534 { 3535 const(mz_uint8)* pSrc = d.m_pSrc; 3536 size_t src_buf_left = d.m_src_buf_left; 3537 tdefl_flush flush = d.m_flush; 3538 3539 while ((src_buf_left) || ((flush) && (d.m_lookahead_size))) 3540 { 3541 mz_uint len_to_move, cur_match_dist, cur_match_len, cur_pos; 3542 /* Update dictionary and hash chains. Keeps the lookahead size equal to TDEFL_MAX_MATCH_LEN. */ 3543 if ((d.m_lookahead_size + d.m_dict_size) >= (TDEFL_MIN_MATCH_LEN - 1)) 3544 { 3545 mz_uint dst_pos = (d.m_lookahead_pos + d.m_lookahead_size) & TDEFL_LZ_DICT_SIZE_MASK, ins_pos = d.m_lookahead_pos + d.m_lookahead_size - 2; 3546 mz_uint hash = (d.m_dict[ins_pos & TDEFL_LZ_DICT_SIZE_MASK] << TDEFL_LZ_HASH_SHIFT) ^ d.m_dict[(ins_pos + 1) & TDEFL_LZ_DICT_SIZE_MASK]; 3547 mz_uint num_bytes_to_process = cast(mz_uint)MZ_MIN_size_t(src_buf_left, TDEFL_MAX_MATCH_LEN - d.m_lookahead_size); 3548 const mz_uint8 *pSrc_end = pSrc ? pSrc + num_bytes_to_process : null; 3549 src_buf_left -= num_bytes_to_process; 3550 d.m_lookahead_size += num_bytes_to_process; 3551 while (pSrc != pSrc_end) 3552 { 3553 mz_uint8 c = *pSrc++; 3554 d.m_dict[dst_pos] = c; 3555 if (dst_pos < (TDEFL_MAX_MATCH_LEN - 1)) 3556 d.m_dict[TDEFL_LZ_DICT_SIZE + dst_pos] = c; 3557 hash = ((hash << TDEFL_LZ_HASH_SHIFT) ^ c) & (TDEFL_LZ_HASH_SIZE - 1); 3558 d.m_next[ins_pos & TDEFL_LZ_DICT_SIZE_MASK] = d.m_hash[hash]; 3559 d.m_hash[hash] = cast(mz_uint16)(ins_pos); 3560 dst_pos = (dst_pos + 1) & TDEFL_LZ_DICT_SIZE_MASK; 3561 ins_pos++; 3562 } 3563 } 3564 else 3565 { 3566 while ((src_buf_left) && (d.m_lookahead_size < TDEFL_MAX_MATCH_LEN)) 3567 { 3568 mz_uint8 c = *pSrc++; 3569 mz_uint dst_pos = (d.m_lookahead_pos + d.m_lookahead_size) & TDEFL_LZ_DICT_SIZE_MASK; 3570 src_buf_left--; 3571 d.m_dict[dst_pos] = c; 3572 if (dst_pos < (TDEFL_MAX_MATCH_LEN - 1)) 3573 d.m_dict[TDEFL_LZ_DICT_SIZE + dst_pos] = c; 3574 if ((++d.m_lookahead_size + d.m_dict_size) >= TDEFL_MIN_MATCH_LEN) 3575 { 3576 mz_uint ins_pos = d.m_lookahead_pos + (d.m_lookahead_size - 1) - 2; 3577 mz_uint hash = ((d.m_dict[ins_pos & TDEFL_LZ_DICT_SIZE_MASK] << (TDEFL_LZ_HASH_SHIFT * 2)) ^ (d.m_dict[(ins_pos + 1) & TDEFL_LZ_DICT_SIZE_MASK] << TDEFL_LZ_HASH_SHIFT) ^ c) & (TDEFL_LZ_HASH_SIZE - 1); 3578 d.m_next[ins_pos & TDEFL_LZ_DICT_SIZE_MASK] = d.m_hash[hash]; 3579 d.m_hash[hash] = cast(mz_uint16)(ins_pos); 3580 } 3581 } 3582 } 3583 d.m_dict_size = MZ_MIN_uint(TDEFL_LZ_DICT_SIZE - d.m_lookahead_size, d.m_dict_size); 3584 if ((!flush) && (d.m_lookahead_size < TDEFL_MAX_MATCH_LEN)) 3585 break; 3586 3587 /* Simple lazy/greedy parsing state machine. */ 3588 len_to_move = 1; 3589 cur_match_dist = 0; 3590 cur_match_len = d.m_saved_match_len ? d.m_saved_match_len : (TDEFL_MIN_MATCH_LEN - 1); 3591 cur_pos = d.m_lookahead_pos & TDEFL_LZ_DICT_SIZE_MASK; 3592 if (d.m_flags & (TDEFL_RLE_MATCHES | TDEFL_FORCE_ALL_RAW_BLOCKS)) 3593 { 3594 if ((d.m_dict_size) && (!(d.m_flags & TDEFL_FORCE_ALL_RAW_BLOCKS))) 3595 { 3596 mz_uint8 c = d.m_dict[(cur_pos - 1) & TDEFL_LZ_DICT_SIZE_MASK]; 3597 cur_match_len = 0; 3598 while (cur_match_len < d.m_lookahead_size) 3599 { 3600 if (d.m_dict[cur_pos + cur_match_len] != c) 3601 break; 3602 cur_match_len++; 3603 } 3604 if (cur_match_len < TDEFL_MIN_MATCH_LEN) 3605 cur_match_len = 0; 3606 else 3607 cur_match_dist = 1; 3608 } 3609 } 3610 else 3611 { 3612 tdefl_find_match(d, d.m_lookahead_pos, d.m_dict_size, d.m_lookahead_size, &cur_match_dist, &cur_match_len); 3613 } 3614 if (((cur_match_len == TDEFL_MIN_MATCH_LEN) && (cur_match_dist >= 8U * 1024U)) || (cur_pos == cur_match_dist) || ((d.m_flags & TDEFL_FILTER_MATCHES) && (cur_match_len <= 5))) 3615 { 3616 cur_match_dist = cur_match_len = 0; 3617 } 3618 if (d.m_saved_match_len) 3619 { 3620 if (cur_match_len > d.m_saved_match_len) 3621 { 3622 tdefl_record_literal(d, cast(mz_uint8)d.m_saved_lit); 3623 if (cur_match_len >= 128) 3624 { 3625 tdefl_record_match(d, cur_match_len, cur_match_dist); 3626 d.m_saved_match_len = 0; 3627 len_to_move = cur_match_len; 3628 } 3629 else 3630 { 3631 d.m_saved_lit = d.m_dict[cur_pos]; 3632 d.m_saved_match_dist = cur_match_dist; 3633 d.m_saved_match_len = cur_match_len; 3634 } 3635 } 3636 else 3637 { 3638 tdefl_record_match(d, d.m_saved_match_len, d.m_saved_match_dist); 3639 len_to_move = d.m_saved_match_len - 1; 3640 d.m_saved_match_len = 0; 3641 } 3642 } 3643 else if (!cur_match_dist) 3644 tdefl_record_literal(d, d.m_dict[MZ_MIN_size_t(cur_pos, (d.m_dict).sizeof - 1)]); 3645 else if ((d.m_greedy_parsing) || (d.m_flags & TDEFL_RLE_MATCHES) || (cur_match_len >= 128)) 3646 { 3647 tdefl_record_match(d, cur_match_len, cur_match_dist); 3648 len_to_move = cur_match_len; 3649 } 3650 else 3651 { 3652 d.m_saved_lit = d.m_dict[MZ_MIN_size_t(cur_pos, (d.m_dict).sizeof - 1)]; 3653 d.m_saved_match_dist = cur_match_dist; 3654 d.m_saved_match_len = cur_match_len; 3655 } 3656 /* Move the lookahead forward by len_to_move bytes. */ 3657 d.m_lookahead_pos += len_to_move; 3658 assert(d.m_lookahead_size >= len_to_move); 3659 d.m_lookahead_size -= len_to_move; 3660 d.m_dict_size = MZ_MIN_uint(d.m_dict_size + len_to_move, cast(mz_uint)TDEFL_LZ_DICT_SIZE); 3661 /* Check if it's time to flush the current LZ codes to the internal output buffer. */ 3662 if ((d.m_pLZ_code_buf > &d.m_lz_code_buf[TDEFL_LZ_CODE_BUF_SIZE - 8]) || 3663 ((d.m_total_lz_bytes > 31 * 1024) && ((((cast(mz_uint)(d.m_pLZ_code_buf - d.m_lz_code_buf.ptr) * 115) >> 7) >= d.m_total_lz_bytes) || (d.m_flags & TDEFL_FORCE_ALL_RAW_BLOCKS)))) 3664 { 3665 int n; 3666 d.m_pSrc = pSrc; 3667 d.m_src_buf_left = src_buf_left; 3668 if ((n = tdefl_flush_block(d, 0)) != 0) 3669 return (n < 0) ? MZ_FALSE : MZ_TRUE; 3670 } 3671 } 3672 3673 d.m_pSrc = pSrc; 3674 d.m_src_buf_left = src_buf_left; 3675 return MZ_TRUE; 3676 } 3677 3678 tdefl_status tdefl_flush_output_buffer(tdefl_compressor *d) 3679 { 3680 if (d.m_pIn_buf_size) 3681 { 3682 *d.m_pIn_buf_size = d.m_pSrc - cast(const mz_uint8 *)d.m_pIn_buf; 3683 } 3684 3685 if (d.m_pOut_buf_size) 3686 { 3687 size_t n = MZ_MIN_size_t(*d.m_pOut_buf_size - d.m_out_buf_ofs, d.m_output_flush_remaining); 3688 memcpy(cast(mz_uint8 *)d.m_pOut_buf + d.m_out_buf_ofs, d.m_output_buf.ptr + d.m_output_flush_ofs, n); 3689 d.m_output_flush_ofs += cast(mz_uint)n; 3690 d.m_output_flush_remaining -= cast(mz_uint)n; 3691 d.m_out_buf_ofs += n; 3692 3693 *d.m_pOut_buf_size = d.m_out_buf_ofs; 3694 } 3695 3696 return (d.m_finished && !d.m_output_flush_remaining) ? TDEFL_STATUS_DONE : TDEFL_STATUS_OKAY; 3697 } 3698 3699 /* Compresses a block of data, consuming as much of the specified input buffer as possible, and writing as much compressed data to the specified output buffer as possible. */ 3700 tdefl_status tdefl_compress(tdefl_compressor *d, const void *pIn_buf, size_t *pIn_buf_size, void *pOut_buf, size_t *pOut_buf_size, tdefl_flush flush) 3701 { 3702 if (!d) 3703 { 3704 if (pIn_buf_size) 3705 *pIn_buf_size = 0; 3706 if (pOut_buf_size) 3707 *pOut_buf_size = 0; 3708 return TDEFL_STATUS_BAD_PARAM; 3709 } 3710 3711 d.m_pIn_buf = pIn_buf; 3712 d.m_pIn_buf_size = pIn_buf_size; 3713 d.m_pOut_buf = pOut_buf; 3714 d.m_pOut_buf_size = pOut_buf_size; 3715 d.m_pSrc = cast(const mz_uint8 *)(pIn_buf); 3716 d.m_src_buf_left = pIn_buf_size ? *pIn_buf_size : 0; 3717 d.m_out_buf_ofs = 0; 3718 d.m_flush = flush; 3719 3720 if (((d.m_pPut_buf_func != null) == ((pOut_buf != null) || (pOut_buf_size != null))) || (d.m_prev_return_status != TDEFL_STATUS_OKAY) || 3721 (d.m_wants_to_finish && (flush != TDEFL_FINISH)) || (pIn_buf_size && *pIn_buf_size && !pIn_buf) || (pOut_buf_size && *pOut_buf_size && !pOut_buf)) 3722 { 3723 if (pIn_buf_size) 3724 *pIn_buf_size = 0; 3725 if (pOut_buf_size) 3726 *pOut_buf_size = 0; 3727 return (d.m_prev_return_status = TDEFL_STATUS_BAD_PARAM); 3728 } 3729 d.m_wants_to_finish |= (flush == TDEFL_FINISH); 3730 3731 if ((d.m_output_flush_remaining) || (d.m_finished)) 3732 return (d.m_prev_return_status = tdefl_flush_output_buffer(d)); 3733 3734 version(LittleEndian) 3735 { 3736 if (((d.m_flags & TDEFL_MAX_PROBES_MASK) == 1) && 3737 ((d.m_flags & TDEFL_GREEDY_PARSING_FLAG) != 0) && 3738 ((d.m_flags & (TDEFL_FILTER_MATCHES | TDEFL_FORCE_ALL_RAW_BLOCKS | TDEFL_RLE_MATCHES)) == 0)) 3739 { 3740 if (!tdefl_compress_fast(d)) 3741 return d.m_prev_return_status; 3742 } 3743 else 3744 { 3745 if (!tdefl_compress_normal(d)) 3746 return d.m_prev_return_status; 3747 } 3748 } 3749 else 3750 { 3751 if (!tdefl_compress_normal(d)) 3752 return d.m_prev_return_status; 3753 } 3754 3755 if ((d.m_flags & (TDEFL_WRITE_ZLIB_HEADER | TDEFL_COMPUTE_ADLER32)) && (pIn_buf)) 3756 d.m_adler32 = cast(mz_uint32)mz_adler32(d.m_adler32, cast(const mz_uint8 *)pIn_buf, d.m_pSrc - cast(const mz_uint8 *)pIn_buf); 3757 3758 if ((flush) && (!d.m_lookahead_size) && (!d.m_src_buf_left) && (!d.m_output_flush_remaining)) 3759 { 3760 if (tdefl_flush_block(d, flush) < 0) 3761 return d.m_prev_return_status; 3762 d.m_finished = (flush == TDEFL_FINISH); 3763 if (flush == TDEFL_FULL_FLUSH) 3764 { 3765 d.m_hash[] = 0; 3766 d.m_next[] = 0; 3767 d.m_dict_size = 0; 3768 } 3769 } 3770 3771 return (d.m_prev_return_status = tdefl_flush_output_buffer(d)); 3772 } 3773 3774 /* tdefl_compress_buffer() is only usable when the tdefl_init() is called with a non-null tdefl_put_buf_func_ptr. */ 3775 /* tdefl_compress_buffer() always consumes the entire input buffer. */ 3776 tdefl_status tdefl_compress_buffer(tdefl_compressor *d, const void *pIn_buf, size_t in_buf_size, tdefl_flush flush) 3777 { 3778 assert(d.m_pPut_buf_func); 3779 return tdefl_compress(d, pIn_buf, &in_buf_size, null, null, flush); 3780 } 3781 3782 /* Initializes the compressor. */ 3783 /* There is no corresponding deinit() function because the tdefl API's do not dynamically allocate memory. */ 3784 /* pBut_buf_func: If null, output data will be supplied to the specified callback. In this case, the user should call the tdefl_compress_buffer() API for compression. */ 3785 /* If pBut_buf_func is null the user should always call the tdefl_compress() API. */ 3786 /* flags: See the above enums (TDEFL_HUFFMAN_ONLY, TDEFL_WRITE_ZLIB_HEADER, etc.) */ 3787 tdefl_status tdefl_init(tdefl_compressor *d, tdefl_put_buf_func_ptr pPut_buf_func, void *pPut_buf_user, int flags) 3788 { 3789 d.m_pPut_buf_func = pPut_buf_func; 3790 d.m_pPut_buf_user = pPut_buf_user; 3791 d.m_flags = cast(mz_uint)(flags); 3792 d.m_max_probes[0] = 1 + ((flags & 0xFFF) + 2) / 3; 3793 d.m_greedy_parsing = (flags & TDEFL_GREEDY_PARSING_FLAG) != 0; 3794 d.m_max_probes[1] = 1 + (((flags & 0xFFF) >> 2) + 2) / 3; 3795 if (!(flags & TDEFL_NONDETERMINISTIC_PARSING_FLAG)) 3796 { 3797 d.m_hash[] = 0; 3798 } 3799 d.m_lookahead_pos = d.m_lookahead_size = d.m_dict_size = d.m_total_lz_bytes = d.m_lz_code_buf_dict_pos = d.m_bits_in = 0; 3800 d.m_output_flush_ofs = d.m_output_flush_remaining = d.m_finished = d.m_block_index = d.m_bit_buffer = d.m_wants_to_finish = 0; 3801 d.m_pLZ_code_buf = d.m_lz_code_buf.ptr + 1; 3802 d.m_pLZ_flags = d.m_lz_code_buf.ptr; 3803 *d.m_pLZ_flags = 0; 3804 d.m_num_flags_left = 8; 3805 d.m_pOutput_buf = d.m_output_buf.ptr; 3806 d.m_pOutput_buf_end = d.m_output_buf.ptr; 3807 d.m_prev_return_status = TDEFL_STATUS_OKAY; 3808 d.m_saved_match_dist = d.m_saved_match_len = d.m_saved_lit = 0; 3809 d.m_adler32 = 1; 3810 d.m_pIn_buf = null; 3811 d.m_pOut_buf = null; 3812 d.m_pIn_buf_size = null; 3813 d.m_pOut_buf_size = null; 3814 d.m_flush = TDEFL_NO_FLUSH; 3815 d.m_pSrc = null; 3816 d.m_src_buf_left = 0; 3817 d.m_out_buf_ofs = 0; 3818 if (!(flags & TDEFL_NONDETERMINISTIC_PARSING_FLAG)) 3819 { 3820 d.m_dict[] = 0; 3821 } 3822 memset(&d.m_huff_count[0][0], 0, (d.m_huff_count[0][0]).sizeof * TDEFL_MAX_HUFF_SYMBOLS_0); 3823 memset(&d.m_huff_count[1][0], 0, (d.m_huff_count[1][0]).sizeof * TDEFL_MAX_HUFF_SYMBOLS_1); 3824 return TDEFL_STATUS_OKAY; 3825 } 3826 3827 tdefl_status tdefl_get_prev_return_status(tdefl_compressor *d) 3828 { 3829 return d.m_prev_return_status; 3830 } 3831 3832 mz_uint32 tdefl_get_adler32(tdefl_compressor *d) 3833 { 3834 return d.m_adler32; 3835 } 3836 3837 /* tdefl_compress_mem_to_output() compresses a block to an output stream. The above helpers use this function internally. */ 3838 mz_bool tdefl_compress_mem_to_output(const void *pBuf, size_t buf_len, tdefl_put_buf_func_ptr pPut_buf_func, void *pPut_buf_user, int flags) 3839 { 3840 tdefl_compressor *pComp; 3841 mz_bool succeeded; 3842 if (((buf_len) && (!pBuf)) || (!pPut_buf_func)) 3843 return MZ_FALSE; 3844 pComp = cast(tdefl_compressor *) MZ_MALLOC(tdefl_compressor.sizeof); 3845 if (!pComp) 3846 return MZ_FALSE; 3847 succeeded = (tdefl_init(pComp, pPut_buf_func, pPut_buf_user, flags) == TDEFL_STATUS_OKAY); 3848 succeeded = succeeded && (tdefl_compress_buffer(pComp, pBuf, buf_len, TDEFL_FINISH) == TDEFL_STATUS_DONE); 3849 MZ_FREE(pComp); 3850 return succeeded; 3851 } 3852 3853 struct tdefl_output_buffer 3854 { 3855 size_t m_size = 0, 3856 m_capacity = 0; 3857 mz_uint8 *m_pBuf = null; 3858 mz_bool m_expandable = 0; 3859 } 3860 3861 mz_bool tdefl_output_buffer_putter(const void *pBuf, int len, void *pUser) 3862 { 3863 tdefl_output_buffer *p = cast(tdefl_output_buffer *)pUser; 3864 size_t new_size = p.m_size + len; 3865 if (new_size > p.m_capacity) 3866 { 3867 size_t new_capacity = p.m_capacity; 3868 mz_uint8 *pNew_buf; 3869 if (!p.m_expandable) 3870 return MZ_FALSE; 3871 do 3872 { 3873 new_capacity = MZ_MAX_size_t(128U, new_capacity << 1U); 3874 } while (new_size > new_capacity); 3875 pNew_buf = cast(mz_uint8 *)MZ_REALLOC(p.m_pBuf, new_capacity); 3876 if (!pNew_buf) 3877 return MZ_FALSE; 3878 p.m_pBuf = pNew_buf; 3879 p.m_capacity = new_capacity; 3880 } 3881 memcpy(cast(mz_uint8 *)p.m_pBuf + p.m_size, pBuf, len); 3882 p.m_size = new_size; 3883 return MZ_TRUE; 3884 } 3885 3886 /* High level compression functions: */ 3887 /* tdefl_compress_mem_to_heap() compresses a block in memory to a heap block allocated via malloc(). */ 3888 /* On entry: */ 3889 /* pSrc_buf, src_buf_len: Pointer and size of source block to compress. */ 3890 /* flags: The max match finder probes (default is 128) logically OR'd against the above flags. Higher probes are slower but improve compression. */ 3891 /* On return: */ 3892 /* Function returns a pointer to the compressed data, or null on failure. */ 3893 /* *pOut_len will be set to the compressed data's size, which could be larger than src_buf_len on uncompressible data. */ 3894 /* The caller must free() the returned block when it's no longer needed. */ 3895 void *tdefl_compress_mem_to_heap(const void *pSrc_buf, size_t src_buf_len, size_t *pOut_len, int flags) 3896 { 3897 tdefl_output_buffer out_buf; 3898 if (!pOut_len) 3899 return null; 3900 else 3901 *pOut_len = 0; 3902 out_buf.m_expandable = MZ_TRUE; 3903 if (!tdefl_compress_mem_to_output(pSrc_buf, src_buf_len, &tdefl_output_buffer_putter, &out_buf, flags)) 3904 return null; 3905 *pOut_len = out_buf.m_size; 3906 return out_buf.m_pBuf; 3907 } 3908 3909 /* tdefl_compress_mem_to_mem() compresses a block in memory to another block in memory. */ 3910 /* Returns 0 on failure. */ 3911 size_t tdefl_compress_mem_to_mem(void *pOut_buf, size_t out_buf_len, const void *pSrc_buf, size_t src_buf_len, int flags) 3912 { 3913 tdefl_output_buffer out_buf; 3914 if (!pOut_buf) 3915 return 0; 3916 out_buf.m_pBuf = cast(mz_uint8 *)pOut_buf; 3917 out_buf.m_capacity = out_buf_len; 3918 if (!tdefl_compress_mem_to_output(pSrc_buf, src_buf_len, &tdefl_output_buffer_putter, &out_buf, flags)) 3919 return 0; 3920 return out_buf.m_size; 3921 } 3922 3923 static immutable mz_uint[11] s_tdefl_num_probes = [ 0, 1, 6, 32, 16, 32, 128, 256, 512, 768, 1500 ]; 3924 3925 /* Create tdefl_compress() flags given zlib-style compression parameters. */ 3926 /* level may range from [0,10] (where 10 is absolute max compression, but may be much slower on some files) */ 3927 /* window_bits may be -15 (raw deflate) or 15 (zlib) */ 3928 /* strategy may be either MZ_DEFAULT_STRATEGY, MZ_FILTERED, MZ_HUFFMAN_ONLY, MZ_RLE, or MZ_FIXED */ 3929 /* level may actually range from [0,10] (10 is a "hidden" max level, where we want a bit more compression and it's fine if throughput to fall off a cliff on some files). */ 3930 mz_uint tdefl_create_comp_flags_from_zip_params(int level, int window_bits, int strategy) 3931 { 3932 mz_uint comp_flags = s_tdefl_num_probes[(level >= 0) ? MZ_MIN_int(10, level) : MZ_DEFAULT_LEVEL] | ((level <= 3) ? TDEFL_GREEDY_PARSING_FLAG : 0); 3933 if (window_bits > 0) 3934 comp_flags |= TDEFL_WRITE_ZLIB_HEADER; 3935 3936 if (!level) 3937 comp_flags |= TDEFL_FORCE_ALL_RAW_BLOCKS; 3938 else if (strategy == MZ_FILTERED) 3939 comp_flags |= TDEFL_FILTER_MATCHES; 3940 else if (strategy == MZ_HUFFMAN_ONLY) 3941 comp_flags &= ~TDEFL_MAX_PROBES_MASK; 3942 else if (strategy == MZ_FIXED) 3943 comp_flags |= TDEFL_FORCE_ALL_STATIC_BLOCKS; 3944 else if (strategy == MZ_RLE) 3945 comp_flags |= TDEFL_RLE_MATCHES; 3946 3947 return comp_flags; 3948 } 3949 3950 3951 tdefl_compressor *tdefl_compressor_alloc() 3952 { 3953 return cast(tdefl_compressor *) MZ_MALLOC(tdefl_compressor.sizeof); 3954 } 3955 3956 void tdefl_compressor_free(tdefl_compressor *pComp) 3957 { 3958 MZ_FREE(pComp); 3959 } 3960 3961