The OpenD Programming Language

1 /*
2    Original C comment:
3    
4    LZ4 - Fast LZ compression algorithm
5    Copyright (C) 2011-2015, Yann Collet.
6    BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
7 
8    Redistribution and use in source and binary forms, with or without
9    modification, are permitted provided that the following conditions are
10    met:
11 
12 	   * Redistributions of source code must retain the above copyright
13    notice, this list of conditions and the following disclaimer.
14 	   * Redistributions in binary form must reproduce the above
15    copyright notice, this list of conditions and the following disclaimer
16    in the documentation and/or other materials provided with the
17    distribution.
18 
19    THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
20    "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
21    LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
22    A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
23    OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
24    SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
25    LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
26    DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
27    THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
28    (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
29    OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30 
31    You can contact the author at :
32    - LZ4 source repository : http://code.google.com/p/lz4
33    - LZ4 source mirror : https://github.com/Cyan4973/lz4
34    - LZ4 public forum : https://groups.google.com/forum/#!forum/lz4c
35 */
36 module gamut.codecs.lz4;
37 
38 
39 version(decodeQOIX)
40     version = hasLZ4;
41 version(encodeQOIX)
42     version = hasLZ4;
43 
44 version(hasLZ4):
45 
46 nothrow @nogc:
47 
48 private import core.stdc.stdlib;
49 private import core.stdc.string;
50 private import std.system;
51 private import std.bitmanip;
52 private import gamut.codecs.ctypes;
53 
54 /// Version constants
55 enum int LZ4_VERSION_MAJOR   =   1;    /* for breaking interface changes  */
56 /// ditto
57 enum int LZ4_VERSION_MINOR   =   5;    /* for new (non-breaking) interface capabilities */
58 /// ditto
59 enum int LZ4_VERSION_RELEASE =   0;    /* for tweaks, bug-fixes, or development */
60 /// ditto
61 enum int LZ4_VERSION_NUMBER  = (LZ4_VERSION_MAJOR *100*100 + LZ4_VERSION_MINOR *100 + LZ4_VERSION_RELEASE);
62 
63 /// Tuning constant
64 enum int LZ4_MEMORY_USAGE    =  cLZ4_MEMORY_USAGE;
65 /// Constant
66 enum int LZ4_MAX_INPUT_SIZE  =  0x7E000000;   /* 2 113 929 216 bytes */
67 /// -
68 uint LZ4_COMPRESSBOUND(uint isize)
69 {
70 	return (isize > LZ4_MAX_INPUT_SIZE) ? 0 : ((isize) + ((isize)/255) + 16);
71 }
72 /// Streaming constants
73 enum int LZ4_STREAMSIZE_U64 =  ((1 << (LZ4_MEMORY_USAGE-3)) + 4);
74 /// ditto
75 enum int LZ4_STREAMSIZE     =  (LZ4_STREAMSIZE_U64 * 8);
76 /// ditto
77 enum int LZ4_STREAMDECODESIZE_U64 =  4;
78 /// ditto
79 enum int LZ4_STREAMDECODESIZE     =  (LZ4_STREAMDECODESIZE_U64 * 8);
80 /// -
81 struct LZ4_stream_t
82 {
83 	long[LZ4_STREAMSIZE_U64] table;
84 }
85 /// -
86 struct LZ4_streamDecode_t
87 {
88 	long[LZ4_STREAMDECODESIZE_U64] table;
89 }
90 
91 //**********************************************************
92 
93 version(LDC)
94 {
95     // GP: When measured, did not make a difference tbh.
96     import ldc.intrinsics;
97     bool likely(bool b) { return llvm_expect!bool(b, true); }
98     bool unlikely(bool b) { return llvm_expect!bool(b, false); }
99 }
100 else
101 {
102     bool likely(bool b) { return b; }
103     bool unlikely(bool b) { return b; }
104 }
105 
106 /* *************************************
107    Reading and writing into memory
108 **************************************/
109 
110 private bool LZ4_64bits()
111 {
112     return size_t.sizeof == 8;
113 }
114 
115 private bool LZ4_isLittleEndian()
116 {
117 	version(LittleEndian)
118 		return true;
119 	else
120 		return false;
121 }
122 
123 
124 // FUTURE: use gamut.utils functions
125 
126 private ushort LZ4_readLE16(const(void)* memPtr)
127 {
128 	version(LittleEndian)
129 	{
130 		return( cast(ushort*)(memPtr))[0];
131 	}
132 	else
133 	{
134 		const(ubyte)* p = memPtr;
135 		return cast(ushort)((cast(ushort*)p)[0] + (p[1]<<8));
136 	}
137 }
138 
139 private void LZ4_writeLE16(void* memPtr, ushort value)
140 {
141 	version(LittleEndian)
142 	{
143 		(cast(ushort*)(memPtr))[0] = value;
144 	}
145 	else
146 	{
147 		ubyte* p = memPtr;
148 		p[0] = cast(ubyte) value;
149 		p[1] = cast(ubyte)(value>>8);
150 	}
151 }
152 
153 
154 private ushort LZ4_read16(const(void)* memPtr)
155 {
156 	return (cast(const(ushort)*)(memPtr))[0];
157 }
158 
159 private uint LZ4_read32(const(void)* memPtr)
160 {
161 	return (cast(const(uint)*)(memPtr))[0];
162 }
163 
164 private ulong LZ4_read64(const(void)* memPtr)
165 {
166 	return (cast(const(ulong)*)(memPtr))[0];
167 }
168 
169 private size_t LZ4_read_ARCH(const(void)* p)
170 {
171 	static if (size_t.sizeof == 8) // BUG: this shouldn't work on arm64
172 	{
173 		return cast(size_t)LZ4_read64(p);
174 	}
175 	else
176 	{
177 		return cast(size_t)LZ4_read32(p);
178 	}
179 }
180 
181 
182 private void LZ4_copy4(void* dstPtr, const(void)* srcPtr)
183 {
184 	dstPtr[0..4][] = srcPtr[0..4][];
185 }
186 
187 private void LZ4_copy8(void* dstPtr, const(void)* srcPtr)
188 {
189 	dstPtr[0..8][] = srcPtr[0..8][];
190 }
191 
192 /* customized version of memcpy, which may overwrite up to 7 bytes beyond dstEnd */
193 private void LZ4_wildCopy(void* dstPtr, const(void)* srcPtr, void* dstEnd)
194 {
195 	ubyte* d = cast(ubyte*)dstPtr;
196 	const(ubyte)* s = cast(const(ubyte)*)srcPtr;
197 	ubyte* e = cast(ubyte*)dstEnd;
198 	do { LZ4_copy8(d,s); d+=8; s+=8; } while (d<e);
199 }
200 
201 /**************************************/
202 
203 public uint LZ4_NbCommonBytes (size_t val)
204 {
205     import core.bitop: bsf;
206     assert(val != 0);
207     return bsf(val) >> 3;
208 }
209 unittest
210 {
211     assert(LZ4_NbCommonBytes(1) == 0);
212     assert(LZ4_NbCommonBytes(4) == 0);
213     assert(LZ4_NbCommonBytes(256) == 1);
214     assert(LZ4_NbCommonBytes(65534) == 0);
215     assert(LZ4_NbCommonBytes(0xffffff) == 0);
216     assert(LZ4_NbCommonBytes(0x1000000) == 3);
217 }
218 
219 
220 /********************************
221    Common functions
222 ********************************/
223 
224 private uint LZ4_count(const(ubyte)* pIn, const(ubyte)* pMatch, const(ubyte)* pInLimit)
225 {
226 	const(ubyte)* pStart = pIn;
227 
228 	while (likely(pIn<pInLimit-(STEPSIZE-1)))
229 	{
230 		size_t diff = LZ4_read_ARCH(pMatch) ^ LZ4_read_ARCH(pIn);
231 		if (!diff) { pIn+=STEPSIZE; pMatch+=STEPSIZE; continue; }
232 		pIn += LZ4_NbCommonBytes(diff);
233 		return cast(uint)(pIn - pStart);
234 	}
235 
236 	static if (size_t.sizeof == 8) 
237 	{
238 		if ((pIn<(pInLimit-3)) && (LZ4_read32(pMatch) == LZ4_read32(pIn))) 
239 		{ 
240 			pIn+=4; 
241 			pMatch+=4; 
242 		}
243 	}
244 	if ((pIn<(pInLimit-1)) && (LZ4_read16(pMatch) == LZ4_read16(pIn))) { pIn+=2; pMatch+=2; }
245 	if ((pIn<pInLimit) && (*pMatch == *pIn)) pIn++;
246 	return cast(uint)(pIn - pStart);
247 }
248 
249 /* *************************************
250    Local Utils
251 **************************************/
252 int LZ4_versionNumber () { return LZ4_VERSION_NUMBER; }
253 int LZ4_compressBound(int isize)  { return LZ4_COMPRESSBOUND(isize); }
254 
255 
256 /* *************************************
257    Local Structures and types
258 **************************************/
259 private
260 {
261 struct LZ4_stream_t_internal {
262 	uint[HASH_SIZE_U32] hashTable;
263 	uint currentOffset;
264 	uint initCheck;
265 	const(ubyte)* dictionary;
266 	const(ubyte)* bufferStart;
267 	uint dictSize;
268 }
269 
270 enum : int { notLimited = 0, limitedOutput = 1 }
271 alias int limitedOutput_directive;
272 enum : int { byPtr, byU32, byU16 }
273 alias int tableType_t;
274 
275 enum : int { noDict = 0, withPrefix64k, usingExtDict }
276 alias int dict_directive;
277 enum : int { noDictIssue = 0, dictSmall }
278 alias int dictIssue_directive;
279 
280 enum : int { endOnOutputSize = 0, endOnInputSize = 1 }
281 alias int endCondition_directive;
282 enum : int { full = 0, partial = 1 }
283 alias int earlyEnd_directive;
284 
285 }
286 
287 /* *******************************
288    Compression functions
289 ********************************/
290 
291 private uint LZ4_hashSequence(uint sequence, tableType_t tableType)
292 {
293 	if (tableType == byU16)
294 		return (((sequence) * 2654435761U) >> ((MINMATCH*8)-(LZ4_HASHLOG+1)));
295 	else
296 		return (((sequence) * 2654435761U) >> ((MINMATCH*8)-LZ4_HASHLOG));
297 }
298 
299 private uint LZ4_hashPosition(const(ubyte)* p, tableType_t tableType) { return LZ4_hashSequence(LZ4_read32(p), tableType); }
300 
301 private void LZ4_putPositionOnHash(const(ubyte)* p, uint h, void* tableBase, tableType_t tableType, const(ubyte)* srcBase)
302 {
303 	switch (tableType)
304 	{
305 	case byPtr: { const(ubyte)** hashTable = cast(const(ubyte)**)tableBase; hashTable[h] = p; return; }
306 	case byU32: { uint* hashTable = cast(uint*) tableBase; hashTable[h] = cast(uint)(p-srcBase); return; }
307 	case byU16: { ushort* hashTable = cast(ushort*) tableBase; hashTable[h] = cast(ushort)(p-srcBase); return; }
308 	default: assert(0);
309 	}
310 }
311 
312 private void LZ4_putPosition(const(ubyte)* p, void* tableBase, tableType_t tableType, const(ubyte)* srcBase)
313 {
314 	uint h = LZ4_hashPosition(p, tableType);
315 	LZ4_putPositionOnHash(p, h, tableBase, tableType, srcBase);
316 }
317 
318 private const(ubyte)* LZ4_getPositionOnHash(uint h, void* tableBase, tableType_t tableType, const(ubyte)* srcBase)
319 {
320 	if (tableType == byPtr) { const(ubyte)** hashTable = cast(const(ubyte)**) tableBase; return hashTable[h]; }
321 	if (tableType == byU32) { uint* hashTable = cast(uint*) tableBase; return hashTable[h] + srcBase; }
322 	{ ushort* hashTable = cast(ushort*) tableBase; return hashTable[h] + srcBase; }   /* default, to ensure a return */
323 }
324 
325 private const(ubyte)* LZ4_getPosition(const(ubyte)* p, void* tableBase, tableType_t tableType, const(ubyte)* srcBase)
326 {
327 	uint h = LZ4_hashPosition(p, tableType);
328 	return LZ4_getPositionOnHash(h, tableBase, tableType, srcBase);
329 }
330 
331 private int LZ4_compress_generic(
332 				 void* ctx,
333 				 const(char)* source,
334 				 char* dest,
335 				 int inputSize,
336 				 int maxOutputSize,
337 				 limitedOutput_directive outputLimited,
338 				 tableType_t tableType,
339 				 dict_directive dict,
340 				 dictIssue_directive dictIssue)
341 {
342 	LZ4_stream_t_internal* dictPtr = cast(LZ4_stream_t_internal*)ctx;
343 
344 	const(ubyte)* ip = cast(const(ubyte)*) source;
345 	const(ubyte)* base;
346 	const(ubyte)* lowLimit;
347 	const(ubyte)* lowRefLimit = ip - dictPtr.dictSize;
348 	const(ubyte)* dictionary = dictPtr.dictionary;
349 	const(ubyte)* dictEnd = dictionary + dictPtr.dictSize;
350 	const(size_t) dictDelta = dictEnd - cast(const(ubyte)*)source;
351 	const(ubyte)* anchor = cast(const(ubyte)*) source;
352 	const(ubyte)* iend = ip + inputSize;
353 	const(ubyte)* mflimit = iend - MFLIMIT;
354 	const(ubyte)* matchlimit = iend - LASTLITERALS;
355 
356 	ubyte* op = cast(ubyte*) dest;
357 	ubyte* olimit = op + maxOutputSize;
358 
359 	uint forwardH;
360 	size_t refDelta=0;
361 
362 	/* Init conditions */
363 	if (cast(uint)inputSize > cast(uint)LZ4_MAX_INPUT_SIZE) return 0;          /* Unsupported input size, too large (or negative) */
364 	switch(dict)
365 	{
366 	case noDict:
367 		base = cast(const(ubyte)*)source;
368 		lowLimit = cast(const(ubyte)*)source;
369 		break;
370 	case withPrefix64k:
371 		base = cast(const(ubyte)*)source - dictPtr.currentOffset;
372 		lowLimit = cast(const(ubyte)*)source - dictPtr.dictSize;
373 		break;
374 	case usingExtDict:
375 		base = cast(const(ubyte)*)source - dictPtr.currentOffset;
376 		lowLimit = cast(const(ubyte)*)source;
377 		break;
378 	default:
379 		base = cast(const(ubyte)*)source;
380 		lowLimit = cast(const(ubyte)*)source;
381 		break;
382 	}
383 	if ((tableType == byU16) && (inputSize>=LZ4_64Klimit)) return 0;   /* Size too large (not within 64K limit) */
384 	if (inputSize<LZ4_minLength) goto _last_literals;                  /* Input too small, no compression (all literals) */
385 
386 	/* First ubyte */
387 	LZ4_putPosition(ip, ctx, tableType, base);
388 	ip++; forwardH = LZ4_hashPosition(ip, tableType);
389 
390 	/* Main Loop */
391 	for ( ; ; )
392 	{
393 		const(ubyte)* match;
394 		ubyte* token;
395 		{
396 			const(ubyte)* forwardIp = ip;
397 			uint step=1;
398 			uint searchMatchNb = (1U << LZ4_skipTrigger);
399 
400 			/* Find a match */
401 			do {
402 				uint h = forwardH;
403 				ip = forwardIp;
404 				forwardIp += step;
405 				step = searchMatchNb++ >> LZ4_skipTrigger;
406 
407 				if (unlikely(forwardIp > mflimit)) goto _last_literals;
408 
409 				match = LZ4_getPositionOnHash(h, ctx, tableType, base);
410 				if (dict==usingExtDict)
411 				{
412 					if (match<cast(const(ubyte)*)source)
413 					{
414 						refDelta = dictDelta;
415 						lowLimit = dictionary;
416 					}
417 					else
418 					{
419 						refDelta = 0;
420 						lowLimit = cast(const(ubyte)*)source;
421 					}
422 				}
423 				forwardH = LZ4_hashPosition(forwardIp, tableType);
424 				LZ4_putPositionOnHash(ip, h, ctx, tableType, base);
425 
426 			} while ( ((dictIssue==dictSmall) ? (match < lowRefLimit) : 0)
427 				|| ((tableType==byU16) ? 0 : (match + MAX_DISTANCE < ip))
428 				|| (LZ4_read32(match+refDelta) != LZ4_read32(ip)) );
429 		}
430 
431 		/* Catch up */
432 		while ((ip>anchor) && (match+refDelta > lowLimit) && (unlikely(ip[-1]==match[refDelta-1]))) { ip--; match--; }
433 
434 		{
435 			/* Encode Literal length */
436 			uint litLength = cast(uint)(ip - anchor);
437 			token = op++;
438 			if ((outputLimited) && (unlikely(op + litLength + (2 + 1 + LASTLITERALS) + (litLength/255) > olimit)))
439 				return 0;   /* Check output limit */
440 			if (litLength>=RUN_MASK)
441 			{
442 				int len = cast(int)litLength-RUN_MASK;
443 				*token=(RUN_MASK<<ML_BITS);
444 				for(; len >= 255 ; len-=255) *op++ = 255;
445 				*op++ = cast(ubyte)len;
446 			}
447 			else *token = cast(ubyte)(litLength<<ML_BITS);
448 
449 			/* Copy Literals */
450 			LZ4_wildCopy(op, anchor, op+litLength);
451 			op+=litLength;
452 		}
453 
454 _next_match:
455 		/* Encode Offset */
456 		LZ4_writeLE16(op, cast(ushort)(ip-match)); op+=2;
457 
458 		/* Encode MatchLength */
459 		{
460 			uint matchLength;
461 
462 			if ((dict==usingExtDict) && (lowLimit==dictionary))
463 			{
464 				const(ubyte)* limit;
465 				match += refDelta;
466 				limit = ip + (dictEnd-match);
467 				if (limit > matchlimit) limit = matchlimit;
468 				matchLength = LZ4_count(ip+MINMATCH, match+MINMATCH, limit);
469 				ip += MINMATCH + matchLength;
470 				if (ip==limit)
471 				{
472 					uint more = LZ4_count(ip, cast(const(ubyte)*)source, matchlimit);
473 					matchLength += more;
474 					ip += more;
475 				}
476 			}
477 			else
478 			{
479 				matchLength = LZ4_count(ip+MINMATCH, match+MINMATCH, matchlimit);
480 				ip += MINMATCH + matchLength;
481 			}
482 
483 			if ((outputLimited) && (unlikely(op + (1 + LASTLITERALS) + (matchLength>>8) > olimit)))
484 				return 0;    /* Check output limit */
485 			if (matchLength>=ML_MASK)
486 			{
487 				*token += ML_MASK;
488 				matchLength -= ML_MASK;
489 				for (; matchLength >= 510 ; matchLength-=510) { *op++ = 255; *op++ = 255; }
490 				if (matchLength >= 255) { matchLength-=255; *op++ = 255; }
491 				*op++ = cast(ubyte)matchLength;
492 			}
493 			else *token += cast(ubyte)(matchLength);
494 		}
495 
496 		anchor = ip;
497 
498 		/* Test end of chunk */
499 		if (ip > mflimit) break;
500 
501 		/* Fill table */
502 		LZ4_putPosition(ip-2, ctx, tableType, base);
503 
504 		/* Test next position */
505 		match = LZ4_getPosition(ip, ctx, tableType, base);
506 		if (dict==usingExtDict)
507 		{
508 			if (match<cast(const(ubyte)*)source)
509 			{
510 				refDelta = dictDelta;
511 				lowLimit = dictionary;
512 			}
513 			else
514 			{
515 				refDelta = 0;
516 				lowLimit = cast(const(ubyte)*)source;
517 			}
518 		}
519 		LZ4_putPosition(ip, ctx, tableType, base);
520 		if ( ((dictIssue==dictSmall) ? (match>=lowRefLimit) : 1)
521 			&& (match+MAX_DISTANCE>=ip)
522 			&& (LZ4_read32(match+refDelta)==LZ4_read32(ip)) )
523 		{ token=op++; *token=0; goto _next_match; }
524 
525 		/* Prepare next loop */
526 		forwardH = LZ4_hashPosition(++ip, tableType);
527 	}
528 
529 _last_literals:
530 	/* Encode Last Literals */
531 	{
532 		int lastRun = cast(int)(iend - anchor);
533 		if ((outputLimited) && ((cast(char*)op - dest) + lastRun + 1 + ((lastRun+255-RUN_MASK)/255) > cast(uint)maxOutputSize))
534 			return 0;   /* Check output limit */
535 		if (lastRun>=cast(int)RUN_MASK) { *op++=(RUN_MASK<<ML_BITS); lastRun-=RUN_MASK; for(; lastRun >= 255 ; lastRun-=255) *op++ = 255; *op++ = cast(ubyte) lastRun; }
536 		else *op++ = cast(ubyte)(lastRun<<ML_BITS);
537 		memcpy(op, anchor, iend - anchor);
538 		op += iend-anchor;
539 	}
540 
541 	/* End */
542 	return cast(int) ((cast(char*)op)-dest);
543 }
544 
545 /// -
546 int LZ4_compress(const(char)* source, char* dest, int inputSize)
547 {
548 	ulong[LZ4_STREAMSIZE_U64] ctx;
549 	int result;
550 
551 	if (inputSize < LZ4_64Klimit)
552 		result = LZ4_compress_generic(cast(void*)ctx, source, dest, inputSize, 0, notLimited, byU16, noDict, noDictIssue);
553 	else
554 		result = LZ4_compress_generic(cast(void*)ctx, source, dest, inputSize, 0, notLimited, LZ4_64bits() ? byU32 : byPtr, noDict, noDictIssue);
555 	return result;
556 }
557 /// -
558 int LZ4_compress_limitedOutput(const(char)* source, char* dest, int inputSize, int maxOutputSize)
559 {
560 	ulong[LZ4_STREAMSIZE_U64] ctx;
561 	int result;
562 
563 	if (inputSize < LZ4_64Klimit)
564 		result = LZ4_compress_generic(cast(void*)ctx, source, dest, inputSize, maxOutputSize, limitedOutput, byU16, noDict, noDictIssue);
565 	else
566 		result = LZ4_compress_generic(cast(void*)ctx, source, dest, inputSize, maxOutputSize, limitedOutput, LZ4_64bits() ? byU32 : byPtr, noDict, noDictIssue);
567 	return result;
568 }
569 
570 
571 /* ****************************************
572    Experimental : Streaming functions
573 *****************************************/
574 
575 /**
576  * LZ4_initStream
577  * Use this function once, to init a newly allocated LZ4_stream_t structure
578  * Return : 1 if OK, 0 if error
579  */
580 void LZ4_resetStream (LZ4_stream_t* LZ4_stream)
581 {
582 	MEM_INIT(LZ4_stream, 0, LZ4_stream_t.sizeof);
583 }
584 /// -
585 LZ4_stream_t* LZ4_createStream()
586 {
587 	LZ4_stream_t* lz4s = cast(LZ4_stream_t*)ALLOCATOR(8, LZ4_STREAMSIZE_U64);
588 	static assert(LZ4_STREAMSIZE >= LZ4_stream_t_internal.sizeof);    /* A compilation error here means LZ4_STREAMSIZE is not large enough */
589 	LZ4_resetStream(lz4s);
590 	return lz4s;
591 }
592 /// -
593 int LZ4_freeStream (LZ4_stream_t* LZ4_stream)
594 {
595 	FREEMEM(LZ4_stream);
596 	return (0);
597 }
598 
599 /// -
600 int LZ4_loadDict (LZ4_stream_t* LZ4_dict, const(char)* dictionary, int dictSize)
601 {
602 	LZ4_stream_t_internal* dict = cast(LZ4_stream_t_internal*) LZ4_dict;
603 	const(ubyte)* p = cast(const(ubyte)*)dictionary;
604 	const(ubyte)* dictEnd = p + dictSize;
605 	const(ubyte)* base;
606 
607 	if (dict.initCheck) LZ4_resetStream(LZ4_dict);                         /* Uninitialized structure detected */
608 
609 	if (dictSize < MINMATCH)
610 	{
611 		dict.dictionary = null;
612 		dict.dictSize = 0;
613 		return 0;
614 	}
615 
616 	if (p <= dictEnd - 64*KB) p = dictEnd - 64*KB;
617 	base = p - dict.currentOffset;
618 	dict.dictionary = p;
619 	dict.dictSize = cast(uint)(dictEnd - p);
620 	dict.currentOffset += dict.dictSize;
621 
622 	while (p <= dictEnd-MINMATCH)
623 	{
624 		LZ4_putPosition(p, dict, byU32, base);
625 		p+=3;
626 	}
627 
628 	return dict.dictSize;
629 }
630 
631 
632 private void LZ4_renormDictT(LZ4_stream_t_internal* LZ4_dict, const(ubyte)* src)
633 {
634 	if ((LZ4_dict.currentOffset > 0x80000000) ||
635 		(cast(size_t)LZ4_dict.currentOffset > cast(size_t)src))   /* address space overflow */
636 	{
637 		/* rescale hash table */
638 		uint delta = LZ4_dict.currentOffset - 64*KB;
639 		const(ubyte)* dictEnd = LZ4_dict.dictionary + LZ4_dict.dictSize;
640 		int i;
641 		for (i=0; i<HASH_SIZE_U32; i++)
642 		{
643 			if (LZ4_dict.hashTable[i] < delta) LZ4_dict.hashTable[i]=0;
644 			else LZ4_dict.hashTable[i] -= delta;
645 		}
646 		LZ4_dict.currentOffset = 64*KB;
647 		if (LZ4_dict.dictSize > 64*KB) LZ4_dict.dictSize = 64*KB;
648 		LZ4_dict.dictionary = dictEnd - LZ4_dict.dictSize;
649 	}
650 }
651 
652 /// -
653 int LZ4_compress_continue_generic (void* LZ4_stream, const(char)* source, char* dest, int inputSize,
654 												int maxOutputSize, limitedOutput_directive limit)
655 {
656 	LZ4_stream_t_internal* streamPtr = cast(LZ4_stream_t_internal*)LZ4_stream;
657 	const(ubyte)* dictEnd = streamPtr.dictionary + streamPtr.dictSize;
658 
659 	const(ubyte)* smallest = cast(const(ubyte)*) source;
660 	if (streamPtr.initCheck) return 0;   /* Uninitialized structure detected */
661 	if ((streamPtr.dictSize>0) && (smallest>dictEnd)) smallest = dictEnd;
662 	LZ4_renormDictT(streamPtr, smallest);
663 
664 	/* Check overlapping input/dictionary space */
665 	{
666 		const(ubyte)* sourceEnd = cast(const(ubyte)*) source + inputSize;
667 		if ((sourceEnd > streamPtr.dictionary) && (sourceEnd < dictEnd))
668 		{
669 			streamPtr.dictSize = cast(uint)(dictEnd - sourceEnd);
670 			if (streamPtr.dictSize > 64*KB) streamPtr.dictSize = 64*KB;
671 			if (streamPtr.dictSize < 4) streamPtr.dictSize = 0;
672 			streamPtr.dictionary = dictEnd - streamPtr.dictSize;
673 		}
674 	}
675 
676 	/* prefix mode : source data follows dictionary */
677 	if (dictEnd == cast(const(ubyte)*)source)
678 	{
679 		int result;
680 		if ((streamPtr.dictSize < 64*KB) && (streamPtr.dictSize < streamPtr.currentOffset))
681 			result = LZ4_compress_generic(LZ4_stream, source, dest, inputSize, maxOutputSize, limit, byU32, withPrefix64k, dictSmall);
682 		else
683 			result = LZ4_compress_generic(LZ4_stream, source, dest, inputSize, maxOutputSize, limit, byU32, withPrefix64k, noDictIssue);
684 		streamPtr.dictSize += cast(uint)inputSize;
685 		streamPtr.currentOffset += cast(uint)inputSize;
686 		return result;
687 	}
688 
689 	/* external dictionary mode */
690 	{
691 		int result;
692 		if ((streamPtr.dictSize < 64*KB) && (streamPtr.dictSize < streamPtr.currentOffset))
693 			result = LZ4_compress_generic(LZ4_stream, source, dest, inputSize, maxOutputSize, limit, byU32, usingExtDict, dictSmall);
694 		else
695 			result = LZ4_compress_generic(LZ4_stream, source, dest, inputSize, maxOutputSize, limit, byU32, usingExtDict, noDictIssue);
696 		streamPtr.dictionary = cast(const(ubyte)*)source;
697 		streamPtr.dictSize = cast(uint)inputSize;
698 		streamPtr.currentOffset += cast(uint)inputSize;
699 		return result;
700 	}
701 }
702 /// -
703 int LZ4_compress_continue (LZ4_stream_t* LZ4_stream, const(char)* source, char* dest, int inputSize)
704 {
705 	return LZ4_compress_continue_generic(LZ4_stream, source, dest, inputSize, 0, notLimited);
706 }
707 /// -
708 int LZ4_compress_limitedOutput_continue (LZ4_stream_t* LZ4_stream, const(char)* source, char* dest, int inputSize, int maxOutputSize)
709 {
710 	return LZ4_compress_continue_generic(LZ4_stream, source, dest, inputSize, maxOutputSize, limitedOutput);
711 }
712 
713 
714 /** Hidden debug function, to force separate dictionary mode */
715 int LZ4_compress_forceExtDict (LZ4_stream_t* LZ4_dict, const(char)* source, char* dest, int inputSize)
716 {
717 	LZ4_stream_t_internal* streamPtr = cast(LZ4_stream_t_internal*)LZ4_dict;
718 	int result;
719 	const(ubyte)* dictEnd = streamPtr.dictionary + streamPtr.dictSize;
720 
721 	const(ubyte)* smallest = dictEnd;
722 	if (smallest > cast(const(ubyte)*) source) smallest = cast(const(ubyte)*) source;
723 	LZ4_renormDictT(cast(LZ4_stream_t_internal*)LZ4_dict, smallest);
724 
725 	result = LZ4_compress_generic(LZ4_dict, source, dest, inputSize, 0, notLimited, byU32, usingExtDict, noDictIssue);
726 
727 	streamPtr.dictionary = cast(const(ubyte)*)source;
728 	streamPtr.dictSize = cast(uint)inputSize;
729 	streamPtr.currentOffset += cast(uint)inputSize;
730 
731 	return result;
732 }
733 
734 /// -
735 int LZ4_saveDict (LZ4_stream_t* LZ4_dict, char* safeBuffer, int dictSize)
736 {
737 	LZ4_stream_t_internal* dict = cast(LZ4_stream_t_internal*) LZ4_dict;
738 	const(ubyte)* previousDictEnd = dict.dictionary + dict.dictSize;
739 
740 	if (cast(uint)dictSize > 64*KB) dictSize = 64*KB;   /* useless to define a dictionary > 64*KB */
741 	if (cast(uint)dictSize > dict.dictSize) dictSize = dict.dictSize;
742 
743 	memmove(safeBuffer, previousDictEnd - dictSize, dictSize);
744 
745 	dict.dictionary = cast(const(ubyte)*)safeBuffer;
746 	dict.dictSize = cast(uint)dictSize;
747 
748 	return dictSize;
749 }
750 
751 
752 
753 /* ***************************
754    Decompression functions
755 ****************************/
756 /**
757  * This generic decompression function cover all use cases.
758  * It shall be instantiated several times, using different sets of directives
759  * Note that it is essential this generic function is really inlined,
760  * in order to remove useless branches during compilation optimization.
761  */
762 int LZ4_decompress_generic(
763 				 const(char)* source,
764 				 char* dest,
765 				 int inputSize,
766 				 int outputSize,         /* If endOnInput==endOnInputSize, this value is the max size of Output Buffer. */
767 
768 				 int endOnInput,         /* endOnOutputSize, endOnInputSize */
769 				 int partialDecoding,    /* full, partial */
770 				 int targetOutputSize,   /* only used if partialDecoding==partial */
771 				 int dict,               /* noDict, withPrefix64k, usingExtDict */
772 				 const(ubyte)* lowPrefix,  /* == dest if dict == noDict */
773 				 const(ubyte)* dictStart,  /* only if dict==usingExtDict */
774 				 const size_t dictSize         /* note : = 0 if noDict */
775 				 )
776 {
777 	/* Local Variables */
778 	const(ubyte)*  ip = cast(const(ubyte)*) source;
779 	const(ubyte)* iend = ip + inputSize;
780 
781 	ubyte* op = cast(ubyte*) dest;
782 	ubyte* oend = op + outputSize;
783 	ubyte* cpy;
784 	ubyte* oexit = op + targetOutputSize;
785 	const(ubyte)* lowLimit = lowPrefix - dictSize;
786 
787 	const(ubyte)* dictEnd = cast(const(ubyte)*)dictStart + dictSize;
788 	const size_t[8] dec32table = [4, 1, 2, 1, 4, 4, 4, 4];
789 	const size_t[8] dec64table = [0, 0, 0, cast(size_t)-1, 0, 1, 2, 3];
790 
791 	const int safeDecode = (endOnInput==endOnInputSize);
792 	const int checkOffset = ((safeDecode) && (dictSize < cast(int)(64*KB)));
793 
794 
795 	/* Special cases */
796 	if ((partialDecoding) && (oexit> oend-MFLIMIT)) oexit = oend-MFLIMIT;                         /* targetOutputSize too high => decode everything */
797 	if ((endOnInput) && (unlikely(outputSize==0))) return ((inputSize==1) && (*ip==0)) ? 0 : -1;  /* Empty output buffer */
798 	if ((!endOnInput) && (unlikely(outputSize==0))) return (*ip==0?1:-1);
799 
800 
801 	/* Main Loop */
802 	while (true)
803 	{
804 		uint token;
805 		size_t length;
806 		const(ubyte)* match;
807 
808 		/* get literal length */
809 		token = *ip++;
810 		if ((length=(token>>ML_BITS)) == RUN_MASK)
811 		{
812 			uint s;
813 			do
814 			{
815 				s = *ip++;
816 				length += s;
817 			}
818 			while (likely((endOnInput)?ip<iend-RUN_MASK:1) && (s==255));
819 			if ((safeDecode) && unlikely(cast(size_t)(op+length)<cast(size_t)(op)))
820             {
821                 goto _output_error;   /* overflow detection */
822             }
823 			if ((safeDecode) && unlikely(cast(size_t)(ip+length)<cast(size_t)(ip))) 
824             {
825                 goto _output_error;   /* overflow detection */
826             }
827 		}
828 
829 		/* copy literals */
830 		cpy = op+length;
831 		if (((endOnInput) && ((cpy>(partialDecoding?oexit:oend-MFLIMIT)) || (ip+length>iend-(2+1+LASTLITERALS))) )
832 			|| ((!endOnInput) && (cpy>oend-COPYLENGTH)))
833 		{
834 			if (partialDecoding)
835 			{
836 				if (cpy > oend) goto _output_error;                           /* Error : write attempt beyond end of output buffer */
837 				if ((endOnInput) && (ip+length > iend)) 
838                 {
839                     goto _output_error;   /* Error : read attempt beyond end of input buffer */
840                 }
841 			}
842 			else
843 			{
844 				if ((!endOnInput) && (cpy != oend))
845                 {
846                     goto _output_error;       /* Error : block decoding must stop exactly there */
847                 }
848 				if ((endOnInput) && ((ip+length != iend) || (cpy > oend)))
849                 {
850                     goto _output_error;   /* Error : input must be consumed */
851                 }
852 			}
853 			memcpy(op, ip, length);
854 			ip += length;
855 			op += length;
856 			break;     /* Necessarily EOF, due to parsing restrictions */
857 		}
858 		LZ4_wildCopy(op, ip, cpy);
859 		ip += length; op = cpy;
860 
861 		/* get offset */
862 		match = cpy - LZ4_readLE16(ip); ip+=2;
863 		if ((checkOffset) && (unlikely(match < lowLimit)))
864         {
865             goto _output_error;   /* Error : offset outside destination buffer */
866         }
867 
868 		/* get matchlength */
869 		length = token & ML_MASK;
870 		if (length == ML_MASK)
871 		{
872 			uint s;
873 			do
874 			{
875 				if ((endOnInput) && (ip > iend-LASTLITERALS))
876                 {
877                     goto _output_error;
878                 }
879 				s = *ip++;
880 				length += s;
881 			} while (s==255);
882 			if ((safeDecode) && unlikely(cast(size_t)(op+length)<cast(size_t)op)) goto _output_error;   /* overflow detection */
883 		}
884 		length += MINMATCH;
885 
886 		/* check external dictionary */
887 		if ((dict==usingExtDict) && (match < lowPrefix))
888 		{
889 			if (unlikely(op+length > oend-LASTLITERALS))
890             {
891                 goto _output_error;   /* doesn't respect parsing restriction */
892             }
893 
894 			if (length <= cast(size_t)(lowPrefix-match))
895 			{
896 				/* match can be copied as a single segment from external dictionary */
897 				match = dictEnd - (lowPrefix-match);
898 				memcpy(op, match, length);
899 				op += length;
900 			}
901 			else
902 			{
903 				/* match encompass external dictionary and current segment */
904 				size_t copySize = cast(size_t)(lowPrefix-match);
905 				memcpy(op, dictEnd - copySize, copySize);
906 				op += copySize;
907 				copySize = length - copySize;
908 				if (copySize > cast(size_t)(op-lowPrefix))   /* overlap within current segment */
909 				{
910 					ubyte* endOfMatch = op + copySize;
911 					const(ubyte)* copyFrom = lowPrefix;
912 					while (op < endOfMatch) *op++ = *copyFrom++;
913 				}
914 				else
915 				{
916 					memcpy(op, lowPrefix, copySize);
917 					op += copySize;
918 				}
919 			}
920 			continue;
921 		}
922 
923 		/* copy repeated sequence */
924 		cpy = op + length;
925 		if (unlikely((op-match)<8))
926 		{
927 			const size_t dec64 = dec64table[op-match];
928 			op[0] = match[0];
929 			op[1] = match[1];
930 			op[2] = match[2];
931 			op[3] = match[3];
932 			match += dec32table[op-match];
933 			LZ4_copy4(op+4, match);
934 			op += 8; match -= dec64;
935 		} else { LZ4_copy8(op, match); op+=8; match+=8; }
936 
937 		if (unlikely(cpy>oend-12))
938 		{
939 			if (cpy > oend-LASTLITERALS)
940             {
941                 goto _output_error;    /* Error : last LASTLITERALS bytes must be literals */
942             }
943 			if (op < oend-8)
944 			{
945 				LZ4_wildCopy(op, match, oend-8);
946 				match += (oend-8) - op;
947 				op = oend-8;
948 			}
949 			while (op<cpy) *op++ = *match++;
950 		}
951 		else
952 			LZ4_wildCopy(op, match, cpy);
953 		op=cpy;   /* correction */
954 	}
955 
956 	/* end of decoding */
957 	if (endOnInput)
958 	   return cast(int) ((cast(char*)op)-dest);     /* Nb of output bytes decoded */
959 	else
960 	   return cast(int) ((cast(char*)ip)-source);   /* Nb of input bytes read */
961 
962 	/* Overflow error detected */
963 _output_error:
964 	return cast(int) (-((cast(char*)ip)-source))-1;
965 }
966 
967 /// -
968 int LZ4_decompress_safe(const(char)* source, char* dest, int compressedSize, int maxDecompressedSize)
969 {
970 	return LZ4_decompress_generic(source, dest, compressedSize, maxDecompressedSize, endOnInputSize, full, 0, noDict, cast(ubyte*)dest, null, 0);
971 }
972 /// -
973 int LZ4_decompress_safe_partial(const(char)* source, char* dest, int compressedSize, int targetOutputSize, int maxDecompressedSize)
974 {
975 	return LZ4_decompress_generic(source, dest, compressedSize, maxDecompressedSize, endOnInputSize, partial, targetOutputSize, noDict, cast(ubyte*)dest, null, 0);
976 }
977 /// -
978 int LZ4_decompress_fast(const(char)* source, char* dest, int originalSize)
979 {
980 	return LZ4_decompress_generic(source, dest, 0, originalSize, endOnOutputSize, full, 0, withPrefix64k, cast(ubyte*)(dest - 64*KB), null, 64*KB);
981 }
982 /* streaming decompression functions */
983 private struct LZ4_streamDecode_t_internal
984 {
985 	ubyte* externalDict;
986 	size_t extDictSize;
987 	ubyte* prefixEnd;
988 	size_t prefixSize;
989 }
990 
991 /**
992  * If you prefer dynamic allocation methods,
993  * LZ4_createStreamDecode()
994  * provides a pointer (void*) towards an initialized LZ4_streamDecode_t structure.
995  */
996 LZ4_streamDecode_t* LZ4_createStreamDecode()
997 {
998 	LZ4_streamDecode_t* lz4s = cast(LZ4_streamDecode_t*) ALLOCATOR(ulong.sizeof, LZ4_STREAMDECODESIZE_U64);
999 	return lz4s;
1000 }
1001 ///ditto
1002 int LZ4_freeStreamDecode (LZ4_streamDecode_t* LZ4_stream)
1003 {
1004 	FREEMEM(LZ4_stream);
1005 	return 0;
1006 }
1007 
1008 /**
1009  * LZ4_setStreamDecode
1010  * Use this function to instruct where to find the dictionary
1011  * This function is not necessary if previous data is still available where it was decoded.
1012  * Loading a size of 0 is allowed (same effect as no dictionary).
1013  * Return : 1 if OK, 0 if error
1014  */
1015 int LZ4_setStreamDecode (LZ4_streamDecode_t* LZ4_streamDecode, const(char)* dictionary, int dictSize)
1016 {
1017 	LZ4_streamDecode_t_internal* lz4sd = cast(LZ4_streamDecode_t_internal*) LZ4_streamDecode;
1018 	lz4sd.prefixSize = cast(size_t) dictSize;
1019 	lz4sd.prefixEnd = cast(ubyte*) dictionary + dictSize;
1020 	lz4sd.externalDict = null;
1021 	lz4sd.extDictSize  = 0;
1022 	return 1;
1023 }
1024 
1025 /**
1026 *_continue() :
1027 	These decoding functions allow decompression of multiple blocks in "streaming" mode.
1028 	Previously decoded blocks must still be available at the memory position where they were decoded.
1029 	If it's not possible, save the relevant part of decoded data into a safe buffer,
1030 	and indicate where it stands using LZ4_setStreamDecode()
1031 */
1032 int LZ4_decompress_safe_continue (LZ4_streamDecode_t* LZ4_streamDecode, const(char)* source, char* dest, int compressedSize, int maxOutputSize)
1033 {
1034 	LZ4_streamDecode_t_internal* lz4sd = cast(LZ4_streamDecode_t_internal*) LZ4_streamDecode;
1035 	int result;
1036 
1037 	if (lz4sd.prefixEnd == cast(ubyte*)dest)
1038 	{
1039 		result = LZ4_decompress_generic(source, dest, compressedSize, maxOutputSize,
1040 										endOnInputSize, full, 0,
1041 										usingExtDict, lz4sd.prefixEnd - lz4sd.prefixSize, lz4sd.externalDict, lz4sd.extDictSize);
1042 		if (result <= 0) return result;
1043 		lz4sd.prefixSize += result;
1044 		lz4sd.prefixEnd  += result;
1045 	}
1046 	else
1047 	{
1048 		lz4sd.extDictSize = lz4sd.prefixSize;
1049 		lz4sd.externalDict = lz4sd.prefixEnd - lz4sd.extDictSize;
1050 		result = LZ4_decompress_generic(source, dest, compressedSize, maxOutputSize,
1051 										endOnInputSize, full, 0,
1052 										usingExtDict, cast(ubyte*)dest, lz4sd.externalDict, lz4sd.extDictSize);
1053 		if (result <= 0) return result;
1054 		lz4sd.prefixSize = result;
1055 		lz4sd.prefixEnd  = cast(ubyte*)dest + result;
1056 	}
1057 
1058 	return result;
1059 }
1060 ///ditto
1061 int LZ4_decompress_fast_continue (LZ4_streamDecode_t* LZ4_streamDecode, const(char)* source, char* dest, int originalSize)
1062 {
1063 	LZ4_streamDecode_t_internal* lz4sd = cast(LZ4_streamDecode_t_internal*) LZ4_streamDecode;
1064 	int result;
1065 
1066 	if (lz4sd.prefixEnd == cast(ubyte*)dest)
1067 	{
1068 		result = LZ4_decompress_generic(source, dest, 0, originalSize,
1069 										endOnOutputSize, full, 0,
1070 										usingExtDict, lz4sd.prefixEnd - lz4sd.prefixSize, lz4sd.externalDict, lz4sd.extDictSize);
1071 		if (result <= 0) return result;
1072 		lz4sd.prefixSize += originalSize;
1073 		lz4sd.prefixEnd  += originalSize;
1074 	}
1075 	else
1076 	{
1077 		lz4sd.extDictSize = lz4sd.prefixSize;
1078 		lz4sd.externalDict = cast(ubyte*)dest - lz4sd.extDictSize;
1079 		result = LZ4_decompress_generic(source, dest, 0, originalSize,
1080 										endOnOutputSize, full, 0,
1081 										usingExtDict, cast(ubyte*)dest, lz4sd.externalDict, lz4sd.extDictSize);
1082 		if (result <= 0) return result;
1083 		lz4sd.prefixSize = originalSize;
1084 		lz4sd.prefixEnd  = cast(ubyte*)dest + originalSize;
1085 	}
1086 
1087 	return result;
1088 }
1089 
1090 
1091 /**
1092 Advanced decoding functions :
1093 *_usingDict() :
1094 	These decoding functions work the same as "_continue" ones,
1095 	the dictionary must be explicitly provided within parameters
1096 */
1097 
1098 int LZ4_decompress_usingDict_generic(const(char)* source, char* dest, int compressedSize, int maxOutputSize, int safe, const(char)* dictStart, int dictSize)
1099 {
1100 	if (dictSize==0)
1101 		return LZ4_decompress_generic(source, dest, compressedSize, maxOutputSize, safe, full, 0, noDict, cast(ubyte*)dest, null, 0);
1102 	if (dictStart+dictSize == dest)
1103 	{
1104 		if (dictSize >= cast(int)(64*KB - 1))
1105 			return LZ4_decompress_generic(source, dest, compressedSize, maxOutputSize, safe, full, 0, withPrefix64k, cast(ubyte*)dest-64*KB, null, 0);
1106 		return LZ4_decompress_generic(source, dest, compressedSize, maxOutputSize, safe, full, 0, noDict, cast(ubyte*)dest-dictSize, null, 0);
1107 	}
1108 	return LZ4_decompress_generic(source, dest, compressedSize, maxOutputSize, safe, full, 0, usingExtDict, cast(ubyte*)dest, cast(ubyte*)dictStart, dictSize);
1109 }
1110 ///ditto
1111 int LZ4_decompress_safe_usingDict(const(char)* source, char* dest, int compressedSize, int maxOutputSize, const(char)* dictStart, int dictSize)
1112 {
1113 	return LZ4_decompress_usingDict_generic(source, dest, compressedSize, maxOutputSize, 1, dictStart, dictSize);
1114 }
1115 ///ditto
1116 int LZ4_decompress_fast_usingDict(const(char)* source, char* dest, int originalSize, const(char)* dictStart, int dictSize)
1117 {
1118 	return LZ4_decompress_usingDict_generic(source, dest, 0, originalSize, 0, dictStart, dictSize);
1119 }
1120 
1121 /** debug function */
1122 int LZ4_decompress_safe_forceExtDict(const(char)* source, char* dest, int compressedSize, int maxOutputSize, const(char)* dictStart, int dictSize)
1123 {
1124 	return LZ4_decompress_generic(source, dest, compressedSize, maxOutputSize, endOnInputSize, full, 0, usingExtDict, cast(ubyte*)dest, cast(ubyte*)dictStart, dictSize);
1125 }