Print this page
Integrated r91 LZ4.
Split |
Close |
Expand all |
Collapse all |
--- old/usr/src/uts/common/fs/zfs/lz4.c
+++ new/usr/src/uts/common/fs/zfs/lz4.c
1 1 /*
2 2 * LZ4 - Fast LZ compression algorithm
3 3 * Header File
4 4 * Copyright (C) 2011-2013, Yann Collet.
5 5 * BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
6 6 *
7 7 * Redistribution and use in source and binary forms, with or without
8 8 * modification, are permitted provided that the following conditions are
9 9 * met:
10 10 *
11 11 * * Redistributions of source code must retain the above copyright
12 12 * notice, this list of conditions and the following disclaimer.
13 13 * * Redistributions in binary form must reproduce the above
14 14 * copyright notice, this list of conditions and the following disclaimer
15 15 * in the documentation and/or other materials provided with the
16 16 * distribution.
17 17 *
18 18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
19 19 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
20 20 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
21 21 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
22 22 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
↓ open down ↓ |
22 lines elided |
↑ open up ↑ |
23 23 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
24 24 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
25 25 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
26 26 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
27 27 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
28 28 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29 29 *
30 30 * You can contact the author at :
31 31 * - LZ4 homepage : http://fastcompression.blogspot.com/p/lz4.html
32 32 * - LZ4 source repository : http://code.google.com/p/lz4/
33 + * Upstream release : r91
33 34 */
34 35
35 36 #include <sys/zfs_context.h>
36 37
37 38 static int real_LZ4_compress(const char *source, char *dest, int isize,
38 39 int osize);
39 -static int real_LZ4_uncompress(const char *source, char *dest, int osize);
40 40 static int LZ4_compressBound(int isize);
41 41 static int LZ4_uncompress_unknownOutputSize(const char *source, char *dest,
42 42 int isize, int maxOutputSize);
43 43 static int LZ4_compressCtx(void *ctx, const char *source, char *dest,
44 44 int isize, int osize);
45 45 static int LZ4_compress64kCtx(void *ctx, const char *source, char *dest,
46 46 int isize, int osize);
47 47
48 48 /*ARGSUSED*/
49 49 size_t
50 50 lz4_compress(void *s_start, void *d_start, size_t s_len, size_t d_len, int n)
51 51 {
52 52 uint32_t bufsiz;
53 53 char *dest = d_start;
54 54
55 55 ASSERT(d_len >= sizeof (bufsiz));
56 56
57 57 bufsiz = real_LZ4_compress(s_start, &dest[sizeof (bufsiz)], s_len,
58 58 d_len - sizeof (bufsiz));
59 59
60 60 /* Signal an error if the compression routine returned zero. */
61 61 if (bufsiz == 0)
62 62 return (s_len);
63 63
64 64 /*
65 65 * Encode the compresed buffer size at the start. We'll need this in
66 66 * decompression to counter the effects of padding which might be
67 67 * added to the compressed buffer and which, if unhandled, would
68 68 * confuse the hell out of our decompression function.
69 69 */
70 70 *(uint32_t *)dest = BE_32(bufsiz);
71 71
72 72 return (bufsiz + sizeof (bufsiz));
73 73 }
74 74
75 75 /*ARGSUSED*/
76 76 int
77 77 lz4_decompress(void *s_start, void *d_start, size_t s_len, size_t d_len, int n)
78 78 {
79 79 const char *src = s_start;
80 80 uint32_t bufsiz = BE_IN32(src);
81 81
82 82 /* invalid compressed buffer size encoded at start */
83 83 if (bufsiz + sizeof (bufsiz) > s_len)
84 84 return (1);
85 85
86 86 /*
87 87 * Returns 0 on success (decompression function returned non-negative)
88 88 * and non-zero on failure (decompression function returned negative.
89 89 */
90 90 return (LZ4_uncompress_unknownOutputSize(&src[sizeof (bufsiz)],
91 91 d_start, bufsiz, d_len) < 0);
92 92 }
93 93
94 94 /*
95 95 * LZ4 API Description:
96 96 *
↓ open down ↓ |
47 lines elided |
↑ open up ↑ |
97 97 * Simple Functions:
98 98 * real_LZ4_compress() :
99 99 * isize : is the input size. Max supported value is ~1.9GB
100 100 * return : the number of bytes written in buffer dest
101 101 * or 0 if the compression fails (if LZ4_COMPRESSMIN is set).
102 102 * note : destination buffer must be already allocated.
103 103 * destination buffer must be sized to handle worst cases
104 104 * situations (input data not compressible) worst case size
105 105 * evaluation is provided by function LZ4_compressBound().
106 106 *
107 - * real_LZ4_uncompress() :
108 - * osize : is the output size, therefore the original size
109 - * return : the number of bytes read in the source buffer.
110 - * If the source stream is malformed, the function will stop
111 - * decoding and return a negative result, indicating the byte
112 - * position of the faulty instruction. This function never
113 - * writes beyond dest + osize, and is therefore protected
114 - * against malicious data packets.
115 - * note : destination buffer must be already allocated
116 - *
117 107 * Advanced Functions
118 108 *
119 109 * LZ4_compressBound() :
120 110 * Provides the maximum size that LZ4 may output in a "worst case"
121 111 * scenario (input data not compressible) primarily useful for memory
122 112 * allocation of output buffer.
123 113 *
124 114 * isize : is the input size. Max supported value is ~1.9GB
125 115 * return : maximum output size in a "worst case" scenario
126 116 * note : this function is limited by "int" range (2^31-1)
127 117 *
128 118 * LZ4_uncompress_unknownOutputSize() :
129 119 * isize : is the input size, therefore the compressed size
↓ open down ↓ |
3 lines elided |
↑ open up ↑ |
130 120 * maxOutputSize : is the size of the destination buffer (which must be
131 121 * already allocated)
132 122 * return : the number of bytes decoded in the destination buffer
133 123 * (necessarily <= maxOutputSize). If the source stream is
134 124 * malformed, the function will stop decoding and return a
135 125 * negative result, indicating the byte position of the faulty
136 126 * instruction. This function never writes beyond dest +
137 127 * maxOutputSize, and is therefore protected against malicious
138 128 * data packets.
139 129 * note : Destination buffer must be already allocated.
140 - * This version is slightly slower than real_LZ4_uncompress()
141 130 *
142 131 * LZ4_compressCtx() :
143 132 * This function explicitly handles the CTX memory structure.
144 133 *
145 134 * ILLUMOS CHANGES: the CTX memory structure must be explicitly allocated
146 135 * by the caller (either on the stack or using kmem_zalloc). Passing NULL
147 136 * isn't valid.
148 137 *
149 138 * LZ4_compress64kCtx() :
150 139 * Same as LZ4_compressCtx(), but specific to small inputs (<64KB).
151 140 * isize *Must* be <64KB, otherwise the output will be corrupted.
152 141 *
153 142 * ILLUMOS CHANGES: the CTX memory structure must be explicitly allocated
154 143 * by the caller (either on the stack or using kmem_zalloc). Passing NULL
155 144 * isn't valid.
156 145 */
157 146
158 147 /*
159 148 * Tuning parameters
160 149 */
161 150
162 151 /*
163 152 * COMPRESSIONLEVEL: Increasing this value improves compression ratio
164 153 * Lowering this value reduces memory usage. Reduced memory usage
165 154 * typically improves speed, due to cache effect (ex: L1 32KB for Intel,
166 155 * L1 64KB for AMD). Memory usage formula : N->2^(N+2) Bytes
167 156 * (examples : 12 -> 16KB ; 17 -> 512KB)
168 157 */
169 158 #define COMPRESSIONLEVEL 12
170 159
171 160 /*
172 161 * NOTCOMPRESSIBLE_CONFIRMATION: Decreasing this value will make the
173 162 * algorithm skip faster data segments considered "incompressible".
174 163 * This may decrease compression ratio dramatically, but will be
175 164 * faster on incompressible data. Increasing this value will make
176 165 * the algorithm search more before declaring a segment "incompressible".
177 166 * This could improve compression a bit, but will be slower on
178 167 * incompressible data. The default value (6) is recommended.
179 168 */
180 169 #define NOTCOMPRESSIBLE_CONFIRMATION 6
181 170
182 171 /*
183 172 * BIG_ENDIAN_NATIVE_BUT_INCOMPATIBLE: This will provide a boost to
184 173 * performance for big endian cpu, but the resulting compressed stream
185 174 * will be incompatible with little-endian CPU. You can set this option
186 175 * to 1 in situations where data will stay within closed environment.
187 176 * This option is useless on Little_Endian CPU (such as x86).
188 177 */
189 178 /* #define BIG_ENDIAN_NATIVE_BUT_INCOMPATIBLE 1 */
190 179
191 180 /*
192 181 * CPU Feature Detection
193 182 */
194 183
195 184 /* 32 or 64 bits ? */
196 185 #if (defined(__x86_64__) || defined(__x86_64) || defined(__amd64__) || \
197 186 defined(__amd64) || defined(__ppc64__) || defined(_WIN64) || \
198 187 defined(__LP64__) || defined(_LP64))
199 188 #define LZ4_ARCH64 1
200 189 #else
201 190 #define LZ4_ARCH64 0
202 191 #endif
203 192
204 193 /*
205 194 * Limits the amount of stack space that the algorithm may consume to hold
206 195 * the compression lookup table. The value `9' here means we'll never use
207 196 * more than 2k of stack (see above for a description of COMPRESSIONLEVEL).
208 197 * If more memory is needed, it is allocated from the heap.
209 198 */
210 199 #define STACKLIMIT 9
211 200
212 201 /*
213 202 * Little Endian or Big Endian?
214 203 * Note: overwrite the below #define if you know your architecture endianess.
215 204 */
216 205 #if (defined(__BIG_ENDIAN__) || defined(__BIG_ENDIAN) || \
217 206 defined(_BIG_ENDIAN) || defined(_ARCH_PPC) || defined(__PPC__) || \
218 207 defined(__PPC) || defined(PPC) || defined(__powerpc__) || \
219 208 defined(__powerpc) || defined(powerpc) || \
220 209 ((defined(__BYTE_ORDER__)&&(__BYTE_ORDER__ == __ORDER_BIG_ENDIAN__))))
221 210 #define LZ4_BIG_ENDIAN 1
222 211 #else
223 212 /*
224 213 * Little Endian assumed. PDP Endian and other very rare endian format
225 214 * are unsupported.
226 215 */
227 216 #endif
228 217
229 218 /*
230 219 * Unaligned memory access is automatically enabled for "common" CPU,
231 220 * such as x86. For others CPU, the compiler will be more cautious, and
232 221 * insert extra code to ensure aligned access is respected. If you know
233 222 * your target CPU supports unaligned memory access, you may want to
234 223 * force this option manually to improve performance
235 224 */
236 225 #if defined(__ARM_FEATURE_UNALIGNED)
237 226 #define LZ4_FORCE_UNALIGNED_ACCESS 1
238 227 #endif
239 228
240 229 /* #define LZ4_FORCE_SW_BITCOUNT */
241 230
242 231 /*
243 232 * Compiler Options
244 233 */
245 234 #if __STDC_VERSION__ >= 199901L /* C99 */
246 235 /* "restrict" is a known keyword */
247 236 #else
248 237 /* Disable restrict */
249 238 #define restrict
250 239 #endif
251 240
252 241 #define GCC_VERSION (__GNUC__ * 100 + __GNUC_MINOR__)
253 242
254 243 #ifdef _MSC_VER
255 244 /* Visual Studio */
256 245 /* Visual is not C99, but supports some kind of inline */
257 246 #define inline __forceinline
258 247 #if LZ4_ARCH64
259 248 /* For Visual 2005 */
260 249 #pragma intrinsic(_BitScanForward64)
261 250 #pragma intrinsic(_BitScanReverse64)
262 251 #else /* !LZ4_ARCH64 */
263 252 /* For Visual 2005 */
264 253 #pragma intrinsic(_BitScanForward)
265 254 #pragma intrinsic(_BitScanReverse)
266 255 #endif /* !LZ4_ARCH64 */
267 256 #endif /* _MSC_VER */
268 257
269 258 #ifdef _MSC_VER
270 259 #define lz4_bswap16(x) _byteswap_ushort(x)
271 260 #else /* !_MSC_VER */
272 261 #define lz4_bswap16(x) ((unsigned short int) ((((x) >> 8) & 0xffu) | \
273 262 (((x) & 0xffu) << 8)))
274 263 #endif /* !_MSC_VER */
275 264
276 265 #if (GCC_VERSION >= 302) || (__INTEL_COMPILER >= 800) || defined(__clang__)
277 266 #define expect(expr, value) (__builtin_expect((expr), (value)))
278 267 #else
279 268 #define expect(expr, value) (expr)
280 269 #endif
281 270
282 271 #define likely(expr) expect((expr) != 0, 1)
283 272 #define unlikely(expr) expect((expr) != 0, 0)
284 273
285 274 /* Basic types */
286 275 #if defined(_MSC_VER)
287 276 /* Visual Studio does not support 'stdint' natively */
288 277 #define BYTE unsigned __int8
289 278 #define U16 unsigned __int16
290 279 #define U32 unsigned __int32
291 280 #define S32 __int32
292 281 #define U64 unsigned __int64
293 282 #else /* !defined(_MSC_VER) */
294 283 #define BYTE uint8_t
295 284 #define U16 uint16_t
296 285 #define U32 uint32_t
297 286 #define S32 int32_t
298 287 #define U64 uint64_t
299 288 #endif /* !defined(_MSC_VER) */
300 289
301 290 #ifndef LZ4_FORCE_UNALIGNED_ACCESS
302 291 #pragma pack(1)
303 292 #endif
304 293
305 294 typedef struct _U16_S {
306 295 U16 v;
307 296 } U16_S;
308 297 typedef struct _U32_S {
309 298 U32 v;
310 299 } U32_S;
311 300 typedef struct _U64_S {
312 301 U64 v;
313 302 } U64_S;
314 303
315 304 #ifndef LZ4_FORCE_UNALIGNED_ACCESS
316 305 #pragma pack()
317 306 #endif
318 307
319 308 #define A64(x) (((U64_S *)(x))->v)
320 309 #define A32(x) (((U32_S *)(x))->v)
321 310 #define A16(x) (((U16_S *)(x))->v)
322 311
323 312 /*
324 313 * Constants
325 314 */
326 315 #define MINMATCH 4
327 316
328 317 #define HASH_LOG COMPRESSIONLEVEL
329 318 #define HASHTABLESIZE (1 << HASH_LOG)
330 319 #define HASH_MASK (HASHTABLESIZE - 1)
331 320
332 321 #define SKIPSTRENGTH (NOTCOMPRESSIBLE_CONFIRMATION > 2 ? \
333 322 NOTCOMPRESSIBLE_CONFIRMATION : 2)
334 323
335 324 /*
336 325 * Defines if memory is allocated into the stack (local variable),
337 326 * or into the heap (kmem_alloc()).
338 327 */
339 328 #define HEAPMODE (HASH_LOG > STACKLIMIT)
340 329 #define COPYLENGTH 8
341 330 #define LASTLITERALS 5
342 331 #define MFLIMIT (COPYLENGTH + MINMATCH)
343 332 #define MINLENGTH (MFLIMIT + 1)
344 333
345 334 #define MAXD_LOG 16
346 335 #define MAX_DISTANCE ((1 << MAXD_LOG) - 1)
347 336
348 337 #define ML_BITS 4
349 338 #define ML_MASK ((1U<<ML_BITS)-1)
350 339 #define RUN_BITS (8-ML_BITS)
351 340 #define RUN_MASK ((1U<<RUN_BITS)-1)
352 341
353 342
354 343 /*
355 344 * Architecture-specific macros
356 345 */
357 346 #if LZ4_ARCH64
358 347 #define STEPSIZE 8
359 348 #define UARCH U64
360 349 #define AARCH A64
361 350 #define LZ4_COPYSTEP(s, d) A64(d) = A64(s); d += 8; s += 8;
362 351 #define LZ4_COPYPACKET(s, d) LZ4_COPYSTEP(s, d)
363 352 #define LZ4_SECURECOPY(s, d, e) if (d < e) LZ4_WILDCOPY(s, d, e)
364 353 #define HTYPE U32
365 354 #define INITBASE(base) const BYTE* const base = ip
366 355 #else /* !LZ4_ARCH64 */
367 356 #define STEPSIZE 4
368 357 #define UARCH U32
369 358 #define AARCH A32
370 359 #define LZ4_COPYSTEP(s, d) A32(d) = A32(s); d += 4; s += 4;
371 360 #define LZ4_COPYPACKET(s, d) LZ4_COPYSTEP(s, d); LZ4_COPYSTEP(s, d);
372 361 #define LZ4_SECURECOPY LZ4_WILDCOPY
373 362 #define HTYPE const BYTE *
374 363 #define INITBASE(base) const int base = 0
375 364 #endif /* !LZ4_ARCH64 */
376 365
377 366 #if (defined(LZ4_BIG_ENDIAN) && !defined(BIG_ENDIAN_NATIVE_BUT_INCOMPATIBLE))
378 367 #define LZ4_READ_LITTLEENDIAN_16(d, s, p) \
379 368 { U16 v = A16(p); v = lz4_bswap16(v); d = (s) - v; }
380 369 #define LZ4_WRITE_LITTLEENDIAN_16(p, i) \
381 370 { U16 v = (U16)(i); v = lz4_bswap16(v); A16(p) = v; p += 2; }
382 371 #else
383 372 #define LZ4_READ_LITTLEENDIAN_16(d, s, p) { d = (s) - A16(p); }
384 373 #define LZ4_WRITE_LITTLEENDIAN_16(p, v) { A16(p) = v; p += 2; }
385 374 #endif
386 375
387 376
388 377 /* Local structures */
389 378 struct refTables {
390 379 HTYPE hashTable[HASHTABLESIZE];
391 380 };
392 381
393 382
394 383 /* Macros */
395 384 #define LZ4_HASH_FUNCTION(i) (((i) * 2654435761U) >> ((MINMATCH * 8) - \
396 385 HASH_LOG))
397 386 #define LZ4_HASH_VALUE(p) LZ4_HASH_FUNCTION(A32(p))
398 387 #define LZ4_WILDCOPY(s, d, e) do { LZ4_COPYPACKET(s, d) } while (d < e);
399 388 #define LZ4_BLINDCOPY(s, d, l) { BYTE* e = (d) + l; LZ4_WILDCOPY(s, d, e); \
400 389 d = e; }
401 390
402 391
403 392 /* Private functions */
404 393 #if LZ4_ARCH64
405 394
406 395 static inline int
407 396 LZ4_NbCommonBytes(register U64 val)
408 397 {
409 398 #if defined(LZ4_BIG_ENDIAN)
410 399 #if defined(_MSC_VER) && !defined(LZ4_FORCE_SW_BITCOUNT)
411 400 unsigned long r = 0;
412 401 _BitScanReverse64(&r, val);
413 402 return (int)(r >> 3);
414 403 #elif defined(__GNUC__) && (GCC_VERSION >= 304) && \
415 404 !defined(LZ4_FORCE_SW_BITCOUNT)
416 405 return (__builtin_clzll(val) >> 3);
417 406 #else
418 407 int r;
419 408 if (!(val >> 32)) {
420 409 r = 4;
421 410 } else {
422 411 r = 0;
423 412 val >>= 32;
424 413 }
425 414 if (!(val >> 16)) {
426 415 r += 2;
427 416 val >>= 8;
428 417 } else {
429 418 val >>= 24;
430 419 }
431 420 r += (!val);
432 421 return (r);
433 422 #endif
434 423 #else
435 424 #if defined(_MSC_VER) && !defined(LZ4_FORCE_SW_BITCOUNT)
436 425 unsigned long r = 0;
437 426 _BitScanForward64(&r, val);
438 427 return (int)(r >> 3);
439 428 #elif defined(__GNUC__) && (GCC_VERSION >= 304) && \
440 429 !defined(LZ4_FORCE_SW_BITCOUNT)
441 430 return (__builtin_ctzll(val) >> 3);
442 431 #else
443 432 static const int DeBruijnBytePos[64] =
444 433 { 0, 0, 0, 0, 0, 1, 1, 2, 0, 3, 1, 3, 1, 4, 2, 7, 0, 2, 3, 6, 1, 5,
445 434 3, 5, 1, 3, 4, 4, 2, 5, 6, 7, 7, 0, 1, 2, 3, 3, 4, 6, 2, 6, 5,
446 435 5, 3, 4, 5, 6, 7, 1, 2, 4, 6, 4,
447 436 4, 5, 7, 2, 6, 5, 7, 6, 7, 7
448 437 };
449 438 return DeBruijnBytePos[((U64) ((val & -val) * 0x0218A392CDABBD3F)) >>
450 439 58];
451 440 #endif
452 441 #endif
453 442 }
454 443
455 444 #else
456 445
457 446 static inline int
458 447 LZ4_NbCommonBytes(register U32 val)
459 448 {
460 449 #if defined(LZ4_BIG_ENDIAN)
461 450 #if defined(_MSC_VER) && !defined(LZ4_FORCE_SW_BITCOUNT)
462 451 unsigned long r = 0;
463 452 _BitScanReverse(&r, val);
464 453 return (int)(r >> 3);
465 454 #elif defined(__GNUC__) && (GCC_VERSION >= 304) && \
466 455 !defined(LZ4_FORCE_SW_BITCOUNT)
467 456 return (__builtin_clz(val) >> 3);
468 457 #else
469 458 int r;
470 459 if (!(val >> 16)) {
471 460 r = 2;
472 461 val >>= 8;
473 462 } else {
474 463 r = 0;
475 464 val >>= 24;
476 465 }
477 466 r += (!val);
478 467 return (r);
479 468 #endif
480 469 #else
481 470 #if defined(_MSC_VER) && !defined(LZ4_FORCE_SW_BITCOUNT)
482 471 unsigned long r = 0;
483 472 _BitScanForward(&r, val);
484 473 return (int)(r >> 3);
485 474 #elif defined(__GNUC__) && (GCC_VERSION >= 304) && \
486 475 !defined(LZ4_FORCE_SW_BITCOUNT)
487 476 return (__builtin_ctz(val) >> 3);
488 477 #else
489 478 static const int DeBruijnBytePos[32] = {
490 479 0, 0, 3, 0, 3, 1, 3, 0,
491 480 3, 2, 2, 1, 3, 2, 0, 1,
492 481 3, 3, 1, 2, 2, 2, 2, 0,
493 482 3, 1, 2, 0, 1, 0, 1, 1
494 483 };
495 484 return DeBruijnBytePos[((U32) ((val & -(S32) val) * 0x077CB531U)) >>
496 485 27];
497 486 #endif
498 487 #endif
499 488 }
500 489
501 490 #endif
502 491
503 492 /* Public functions */
504 493
505 494 static int
506 495 LZ4_compressBound(int isize)
507 496 {
508 497 return (isize + (isize / 255) + 16);
509 498 }
510 499
511 500 /* Compression functions */
512 501
513 502 /*ARGSUSED*/
514 503 static int
515 504 LZ4_compressCtx(void *ctx, const char *source, char *dest, int isize,
516 505 int osize)
517 506 {
518 507 #if HEAPMODE
519 508 struct refTables *srt = (struct refTables *)ctx;
520 509 HTYPE *HashTable = (HTYPE *) (srt->hashTable);
521 510 #else
522 511 HTYPE HashTable[HASHTABLESIZE] = { 0 };
523 512 #endif
524 513
↓ open down ↓ |
374 lines elided |
↑ open up ↑ |
525 514 const BYTE *ip = (BYTE *) source;
526 515 INITBASE(base);
527 516 const BYTE *anchor = ip;
528 517 const BYTE *const iend = ip + isize;
529 518 const BYTE *const oend = (BYTE *) dest + osize;
530 519 const BYTE *const mflimit = iend - MFLIMIT;
531 520 #define matchlimit (iend - LASTLITERALS)
532 521
533 522 BYTE *op = (BYTE *) dest;
534 523
535 - int len, length;
524 + int length;
536 525 const int skipStrength = SKIPSTRENGTH;
537 526 U32 forwardH;
538 527
539 528
540 529 /* Init */
541 530 if (isize < MINLENGTH)
542 531 goto _last_literals;
543 532
544 533 /* First Byte */
545 534 HashTable[LZ4_HASH_VALUE(ip)] = ip - base;
546 535 ip++;
547 536 forwardH = LZ4_HASH_VALUE(ip);
548 537
549 538 /* Main Loop */
550 539 for (;;) {
551 540 int findMatchAttempts = (1U << skipStrength) + 3;
552 541 const BYTE *forwardIp = ip;
553 542 const BYTE *ref;
554 543 BYTE *token;
555 544
556 545 /* Find a match */
557 546 do {
558 547 U32 h = forwardH;
559 548 int step = findMatchAttempts++ >> skipStrength;
560 549 ip = forwardIp;
561 550 forwardIp = ip + step;
562 551
563 552 if unlikely(forwardIp > mflimit) {
564 553 goto _last_literals;
565 554 }
566 555
567 556 forwardH = LZ4_HASH_VALUE(forwardIp);
568 557 ref = base + HashTable[h];
569 558 HashTable[h] = ip - base;
570 559
571 560 } while ((ref < ip - MAX_DISTANCE) || (A32(ref) != A32(ip)));
572 561
573 562 /* Catch up */
574 563 while ((ip > anchor) && (ref > (BYTE *) source) &&
575 564 unlikely(ip[-1] == ref[-1])) {
576 565 ip--;
577 566 ref--;
578 567 }
579 568
↓ open down ↓ |
34 lines elided |
↑ open up ↑ |
580 569 /* Encode Literal length */
581 570 length = ip - anchor;
582 571 token = op++;
583 572
584 573 /* Check output limit */
585 574 if unlikely(op + length + (2 + 1 + LASTLITERALS) +
586 575 (length >> 8) > oend)
587 576 return (0);
588 577
589 578 if (length >= (int)RUN_MASK) {
579 + int len;
590 580 *token = (RUN_MASK << ML_BITS);
591 581 len = length - RUN_MASK;
592 582 for (; len > 254; len -= 255)
593 583 *op++ = 255;
594 584 *op++ = (BYTE)len;
595 585 } else
596 586 *token = (length << ML_BITS);
597 587
598 588 /* Copy Literals */
599 589 LZ4_BLINDCOPY(anchor, op, length);
600 590
601 591 _next_match:
602 592 /* Encode Offset */
603 593 LZ4_WRITE_LITTLEENDIAN_16(op, ip - ref);
604 594
605 595 /* Start Counting */
606 596 ip += MINMATCH;
607 - ref += MINMATCH; /* MinMatch verified */
597 + ref += MINMATCH; /* MinMatch already verified */
608 598 anchor = ip;
609 599 while likely(ip < matchlimit - (STEPSIZE - 1)) {
610 600 UARCH diff = AARCH(ref) ^ AARCH(ip);
611 601 if (!diff) {
612 602 ip += STEPSIZE;
613 603 ref += STEPSIZE;
614 604 continue;
615 605 }
616 606 ip += LZ4_NbCommonBytes(diff);
617 607 goto _endCount;
618 608 }
619 609 #if LZ4_ARCH64
620 610 if ((ip < (matchlimit - 3)) && (A32(ref) == A32(ip))) {
621 611 ip += 4;
622 612 ref += 4;
623 613 }
↓ open down ↓ |
6 lines elided |
↑ open up ↑ |
624 614 #endif
625 615 if ((ip < (matchlimit - 1)) && (A16(ref) == A16(ip))) {
626 616 ip += 2;
627 617 ref += 2;
628 618 }
629 619 if ((ip < matchlimit) && (*ref == *ip))
630 620 ip++;
631 621 _endCount:
632 622
633 623 /* Encode MatchLength */
634 - len = (ip - anchor);
624 + length = (int)(ip - anchor);
635 625 /* Check output limit */
636 - if unlikely(op + (1 + LASTLITERALS) + (len >> 8) > oend)
626 + if unlikely(op + (1 + LASTLITERALS) + (length >> 8) > oend)
637 627 return (0);
638 - if (len >= (int)ML_MASK) {
628 + if (length >= (int)ML_MASK) {
639 629 *token += ML_MASK;
640 - len -= ML_MASK;
641 - for (; len > 509; len -= 510) {
630 + length -= ML_MASK;
631 + for (; length > 509; length -= 510) {
642 632 *op++ = 255;
643 633 *op++ = 255;
644 634 }
645 - if (len > 254) {
646 - len -= 255;
635 + if (length > 254) {
636 + length -= 255;
647 637 *op++ = 255;
648 638 }
649 - *op++ = (BYTE)len;
639 + *op++ = (BYTE)length;
650 640 } else
651 - *token += len;
641 + *token += length;
652 642
653 643 /* Test end of chunk */
654 644 if (ip > mflimit) {
655 645 anchor = ip;
656 646 break;
657 647 }
658 648 /* Fill table */
659 649 HashTable[LZ4_HASH_VALUE(ip - 2)] = ip - 2 - base;
660 650
661 651 /* Test next position */
662 652 ref = base + HashTable[LZ4_HASH_VALUE(ip)];
663 653 HashTable[LZ4_HASH_VALUE(ip)] = ip - base;
664 654 if ((ref > ip - (MAX_DISTANCE + 1)) && (A32(ref) == A32(ip))) {
665 655 token = op++;
666 656 *token = 0;
667 657 goto _next_match;
668 658 }
669 659 /* Prepare next loop */
670 660 anchor = ip++;
671 661 forwardH = LZ4_HASH_VALUE(ip);
672 662 }
673 663
674 664 _last_literals:
675 665 /* Encode Last Literals */
676 666 {
677 667 int lastRun = iend - anchor;
678 668 if (op + lastRun + 1 + ((lastRun + 255 - RUN_MASK) / 255) >
679 669 oend)
680 670 return (0);
681 671 if (lastRun >= (int)RUN_MASK) {
682 672 *op++ = (RUN_MASK << ML_BITS);
683 673 lastRun -= RUN_MASK;
684 674 for (; lastRun > 254; lastRun -= 255) {
685 675 *op++ = 255;
686 676 }
687 677 *op++ = (BYTE)lastRun;
688 678 } else
689 679 *op++ = (lastRun << ML_BITS);
690 680 (void) memcpy(op, anchor, iend - anchor);
691 681 op += iend - anchor;
692 682 }
693 683
694 684 /* End */
695 685 return (int)(((char *)op) - dest);
696 686 }
697 687
698 688
699 689
700 690 /* Note : this function is valid only if isize < LZ4_64KLIMIT */
701 691 #define LZ4_64KLIMIT ((1 << 16) + (MFLIMIT - 1))
702 692 #define HASHLOG64K (HASH_LOG + 1)
703 693 #define HASH64KTABLESIZE (1U << HASHLOG64K)
704 694 #define LZ4_HASH64K_FUNCTION(i) (((i) * 2654435761U) >> ((MINMATCH*8) - \
705 695 HASHLOG64K))
706 696 #define LZ4_HASH64K_VALUE(p) LZ4_HASH64K_FUNCTION(A32(p))
707 697
708 698 /*ARGSUSED*/
709 699 static int
710 700 LZ4_compress64kCtx(void *ctx, const char *source, char *dest, int isize,
711 701 int osize)
712 702 {
713 703 #if HEAPMODE
714 704 struct refTables *srt = (struct refTables *)ctx;
715 705 U16 *HashTable = (U16 *) (srt->hashTable);
716 706 #else
717 707 U16 HashTable[HASH64KTABLESIZE] = { 0 };
718 708 #endif
719 709
720 710 const BYTE *ip = (BYTE *) source;
721 711 const BYTE *anchor = ip;
722 712 const BYTE *const base = ip;
723 713 const BYTE *const iend = ip + isize;
724 714 const BYTE *const oend = (BYTE *) dest + osize;
725 715 const BYTE *const mflimit = iend - MFLIMIT;
726 716 #define matchlimit (iend - LASTLITERALS)
727 717
728 718 BYTE *op = (BYTE *) dest;
729 719
730 720 int len, length;
731 721 const int skipStrength = SKIPSTRENGTH;
732 722 U32 forwardH;
733 723
734 724 /* Init */
735 725 if (isize < MINLENGTH)
736 726 goto _last_literals;
737 727
738 728 /* First Byte */
739 729 ip++;
740 730 forwardH = LZ4_HASH64K_VALUE(ip);
741 731
742 732 /* Main Loop */
743 733 for (;;) {
744 734 int findMatchAttempts = (1U << skipStrength) + 3;
745 735 const BYTE *forwardIp = ip;
746 736 const BYTE *ref;
747 737 BYTE *token;
748 738
749 739 /* Find a match */
750 740 do {
751 741 U32 h = forwardH;
752 742 int step = findMatchAttempts++ >> skipStrength;
753 743 ip = forwardIp;
754 744 forwardIp = ip + step;
755 745
756 746 if (forwardIp > mflimit) {
757 747 goto _last_literals;
758 748 }
759 749
760 750 forwardH = LZ4_HASH64K_VALUE(forwardIp);
761 751 ref = base + HashTable[h];
762 752 HashTable[h] = ip - base;
763 753
764 754 } while (A32(ref) != A32(ip));
765 755
766 756 /* Catch up */
767 757 while ((ip > anchor) && (ref > (BYTE *) source) &&
768 758 (ip[-1] == ref[-1])) {
769 759 ip--;
770 760 ref--;
771 761 }
772 762
773 763 /* Encode Literal length */
774 764 length = ip - anchor;
775 765 token = op++;
776 766
777 767 /* Check output limit */
778 768 if unlikely(op + length + (2 + 1 + LASTLITERALS) +
779 769 (length >> 8) > oend)
780 770 return (0);
781 771
782 772 if (length >= (int)RUN_MASK) {
783 773 *token = (RUN_MASK << ML_BITS);
784 774 len = length - RUN_MASK;
785 775 for (; len > 254; len -= 255)
786 776 *op++ = 255;
787 777 *op++ = (BYTE)len;
788 778 } else
789 779 *token = (length << ML_BITS);
790 780
791 781 /* Copy Literals */
792 782 LZ4_BLINDCOPY(anchor, op, length);
793 783
794 784 _next_match:
795 785 /* Encode Offset */
796 786 LZ4_WRITE_LITTLEENDIAN_16(op, ip - ref);
797 787
798 788 /* Start Counting */
799 789 ip += MINMATCH;
800 790 ref += MINMATCH; /* MinMatch verified */
801 791 anchor = ip;
802 792 while (ip < matchlimit - (STEPSIZE - 1)) {
803 793 UARCH diff = AARCH(ref) ^ AARCH(ip);
804 794 if (!diff) {
805 795 ip += STEPSIZE;
806 796 ref += STEPSIZE;
807 797 continue;
808 798 }
809 799 ip += LZ4_NbCommonBytes(diff);
810 800 goto _endCount;
811 801 }
812 802 #if LZ4_ARCH64
813 803 if ((ip < (matchlimit - 3)) && (A32(ref) == A32(ip))) {
814 804 ip += 4;
815 805 ref += 4;
816 806 }
817 807 #endif
818 808 if ((ip < (matchlimit - 1)) && (A16(ref) == A16(ip))) {
819 809 ip += 2;
820 810 ref += 2;
821 811 }
822 812 if ((ip < matchlimit) && (*ref == *ip))
823 813 ip++;
824 814 _endCount:
825 815
826 816 /* Encode MatchLength */
827 817 len = (ip - anchor);
828 818 /* Check output limit */
829 819 if unlikely(op + (1 + LASTLITERALS) + (len >> 8) > oend)
830 820 return (0);
831 821 if (len >= (int)ML_MASK) {
832 822 *token += ML_MASK;
833 823 len -= ML_MASK;
834 824 for (; len > 509; len -= 510) {
835 825 *op++ = 255;
836 826 *op++ = 255;
837 827 }
838 828 if (len > 254) {
839 829 len -= 255;
840 830 *op++ = 255;
841 831 }
842 832 *op++ = (BYTE)len;
843 833 } else
844 834 *token += len;
845 835
846 836 /* Test end of chunk */
847 837 if (ip > mflimit) {
848 838 anchor = ip;
849 839 break;
850 840 }
851 841 /* Fill table */
852 842 HashTable[LZ4_HASH64K_VALUE(ip - 2)] = ip - 2 - base;
853 843
854 844 /* Test next position */
855 845 ref = base + HashTable[LZ4_HASH64K_VALUE(ip)];
856 846 HashTable[LZ4_HASH64K_VALUE(ip)] = ip - base;
857 847 if (A32(ref) == A32(ip)) {
858 848 token = op++;
859 849 *token = 0;
860 850 goto _next_match;
861 851 }
862 852 /* Prepare next loop */
863 853 anchor = ip++;
864 854 forwardH = LZ4_HASH64K_VALUE(ip);
865 855 }
866 856
867 857 _last_literals:
868 858 /* Encode Last Literals */
869 859 {
870 860 int lastRun = iend - anchor;
871 861 if (op + lastRun + 1 + ((lastRun + 255 - RUN_MASK) / 255) >
872 862 oend)
873 863 return (0);
874 864 if (lastRun >= (int)RUN_MASK) {
875 865 *op++ = (RUN_MASK << ML_BITS);
876 866 lastRun -= RUN_MASK;
877 867 for (; lastRun > 254; lastRun -= 255)
878 868 *op++ = 255;
879 869 *op++ = (BYTE)lastRun;
880 870 } else
881 871 *op++ = (lastRun << ML_BITS);
882 872 (void) memcpy(op, anchor, iend - anchor);
883 873 op += iend - anchor;
884 874 }
885 875
886 876 /* End */
887 877 return (int)(((char *)op) - dest);
888 878 }
889 879
890 880 static int
891 881 real_LZ4_compress(const char *source, char *dest, int isize, int osize)
892 882 {
893 883 #if HEAPMODE
894 884 void *ctx = kmem_zalloc(sizeof (struct refTables), KM_NOSLEEP);
895 885 int result;
896 886
897 887 /*
898 888 * out of kernel memory, gently fall through - this will disable
899 889 * compression in zio_compress_data
900 890 */
901 891 if (ctx == NULL)
902 892 return (0);
903 893
904 894 if (isize < LZ4_64KLIMIT)
905 895 result = LZ4_compress64kCtx(ctx, source, dest, isize, osize);
906 896 else
907 897 result = LZ4_compressCtx(ctx, source, dest, isize, osize);
908 898
909 899 kmem_free(ctx, sizeof (struct refTables));
↓ open down ↓ |
248 lines elided |
↑ open up ↑ |
910 900 return (result);
911 901 #else
912 902 if (isize < (int)LZ4_64KLIMIT)
913 903 return (LZ4_compress64kCtx(NULL, source, dest, isize, osize));
914 904 return (LZ4_compressCtx(NULL, source, dest, isize, osize));
915 905 #endif
916 906 }
917 907
918 908 /* Decompression functions */
919 909
920 -/*
921 - * Note: The decoding functions real_LZ4_uncompress() and
922 - * LZ4_uncompress_unknownOutputSize() are safe against "buffer overflow"
923 - * attack type. They will never write nor read outside of the provided
924 - * output buffers. LZ4_uncompress_unknownOutputSize() also insures that
925 - * it will never read outside of the input buffer. A corrupted input
926 - * will produce an error result, a negative int, indicating the position
927 - * of the error within input stream.
928 - */
929 -
930 910 static int
931 -real_LZ4_uncompress(const char *source, char *dest, int osize)
932 -{
933 - /* Local Variables */
934 - const BYTE *restrict ip = (const BYTE *) source;
935 - const BYTE *ref;
936 -
937 - BYTE *op = (BYTE *) dest;
938 - BYTE *const oend = op + osize;
939 - BYTE *cpy;
940 -
941 - unsigned token;
942 -
943 - size_t length;
944 - size_t dec32table[] = {0, 3, 2, 3, 0, 0, 0, 0};
945 -#if LZ4_ARCH64
946 - size_t dec64table[] = {0, 0, 0, (size_t)-1, 0, 1, 2, 3};
947 -#endif
948 -
949 - /* Main Loop */
950 - for (;;) {
951 - /* get runlength */
952 - token = *ip++;
953 - if ((length = (token >> ML_BITS)) == RUN_MASK) {
954 - size_t len;
955 - for (; (len = *ip++) == 255; length += 255) {
956 - }
957 - length += len;
958 - }
959 - /* copy literals */
960 - cpy = op + length;
961 - if unlikely(cpy > oend - COPYLENGTH) {
962 - if (cpy != oend)
963 - /* Error: we must necessarily stand at EOF */
964 - goto _output_error;
965 - (void) memcpy(op, ip, length);
966 - ip += length;
967 - break; /* EOF */
968 - }
969 - LZ4_WILDCOPY(ip, op, cpy);
970 - ip -= (op - cpy);
971 - op = cpy;
972 -
973 - /* get offset */
974 - LZ4_READ_LITTLEENDIAN_16(ref, cpy, ip);
975 - ip += 2;
976 - if unlikely(ref < (BYTE * const) dest)
977 - /*
978 - * Error: offset create reference outside destination
979 - * buffer
980 - */
981 - goto _output_error;
982 -
983 - /* get matchlength */
984 - if ((length = (token & ML_MASK)) == ML_MASK) {
985 - for (; *ip == 255; length += 255) {
986 - ip++;
987 - }
988 - length += *ip++;
989 - }
990 - /* copy repeated sequence */
991 - if unlikely(op - ref < STEPSIZE) {
992 -#if LZ4_ARCH64
993 - size_t dec64 = dec64table[op-ref];
994 -#else
995 - const int dec64 = 0;
996 -#endif
997 - op[0] = ref[0];
998 - op[1] = ref[1];
999 - op[2] = ref[2];
1000 - op[3] = ref[3];
1001 - op += 4;
1002 - ref += 4;
1003 - ref -= dec32table[op-ref];
1004 - A32(op) = A32(ref);
1005 - op += STEPSIZE - 4;
1006 - ref -= dec64;
1007 - } else {
1008 - LZ4_COPYSTEP(ref, op);
1009 - }
1010 - cpy = op + length - (STEPSIZE - 4);
1011 - if (cpy > oend - COPYLENGTH) {
1012 - if (cpy > oend)
1013 - /*
1014 - * Error: request to write beyond destination
1015 - * buffer
1016 - */
1017 - goto _output_error;
1018 - LZ4_SECURECOPY(ref, op, (oend - COPYLENGTH));
1019 - while (op < cpy)
1020 - *op++ = *ref++;
1021 - op = cpy;
1022 - if (op == oend)
1023 - /*
1024 - * Check EOF (should never happen, since last
1025 - * 5 bytes are supposed to be literals)
1026 - */
1027 - goto _output_error;
1028 - continue;
1029 - }
1030 - LZ4_SECURECOPY(ref, op, cpy);
1031 - op = cpy; /* correction */
1032 - }
1033 -
1034 - /* end of decoding */
1035 - return (int)(((char *)ip) - source);
1036 -
1037 - /* write overflow error detected */
1038 - _output_error:
1039 - return (int)(-(((char *)ip) - source));
1040 -}
1041 -
1042 -static int
1043 911 LZ4_uncompress_unknownOutputSize(const char *source, char *dest, int isize,
1044 912 int maxOutputSize)
1045 913 {
1046 914 /* Local Variables */
1047 915 const BYTE *restrict ip = (const BYTE *) source;
1048 916 const BYTE *const iend = ip + isize;
1049 917 const BYTE *ref;
1050 918
1051 919 BYTE *op = (BYTE *) dest;
1052 920 BYTE *const oend = op + maxOutputSize;
1053 921 BYTE *cpy;
1054 922
1055 923 size_t dec32table[] = {0, 3, 2, 3, 0, 0, 0, 0};
1056 924 #if LZ4_ARCH64
1057 925 size_t dec64table[] = {0, 0, 0, (size_t)-1, 0, 1, 2, 3};
1058 926 #endif
1059 927
928 + /*
929 + * Special case
930 + * A correctly formed null-compressed LZ4 must have at least
931 + * one byte (token=0)
932 + */
933 + if (unlikely(ip == iend))
934 + goto _output_error;
935 +
1060 936 /* Main Loop */
1061 - while (ip < iend) {
937 + /*LINTED E_CONSTANT_CONDITION*/
938 + while (1) {
1062 939 unsigned token;
1063 940 size_t length;
1064 941
1065 942 /* get runlength */
1066 943 token = *ip++;
1067 944 if ((length = (token >> ML_BITS)) == RUN_MASK) {
1068 945 int s = 255;
1069 946 while ((ip < iend) && (s == 255)) {
1070 947 s = *ip++;
1071 948 length += s;
1072 949 }
1073 950 }
1074 951 /* copy literals */
1075 952 cpy = op + length;
1076 - if ((cpy > oend - COPYLENGTH) ||
1077 - (ip + length > iend - COPYLENGTH)) {
953 + if ((cpy > oend - MFLIMIT) ||
954 + (ip + length > iend - (2 + 1 + LASTLITERALS))) {
1078 955 if (cpy > oend)
1079 956 /* Error: writes beyond output buffer */
1080 957 goto _output_error;
1081 958 if (ip + length != iend)
1082 959 /*
1083 960 * Error: LZ4 format requires to consume all
1084 - * input at this stage
961 + * input at this stage (no match within the
962 + * last 11 bytes, and at least 8 remaining
963 + * input bytes for another match + literals
1085 964 */
1086 965 goto _output_error;
1087 966 (void) memcpy(op, ip, length);
1088 967 op += length;
1089 968 /* Necessarily EOF, due to parsing restrictions */
1090 969 break;
1091 970 }
1092 971 LZ4_WILDCOPY(ip, op, cpy);
1093 972 ip -= (op - cpy);
1094 973 op = cpy;
1095 974
1096 975 /* get offset */
1097 976 LZ4_READ_LITTLEENDIAN_16(ref, cpy, ip);
1098 977 ip += 2;
1099 - if (ref < (BYTE * const) dest)
978 + if (unlikely(ref < (BYTE * const) dest))
1100 979 /*
1101 980 * Error: offset creates reference outside of
1102 981 * destination buffer
1103 982 */
1104 983 goto _output_error;
1105 984
1106 985 /* get matchlength */
1107 986 if ((length = (token & ML_MASK)) == ML_MASK) {
1108 - while (ip < iend) {
987 + while (likely(ip < iend - (LASTLITERALS + 1))) {
1109 988 int s = *ip++;
1110 989 length += s;
1111 990 if (s == 255)
1112 991 continue;
1113 992 break;
1114 993 }
1115 994 }
1116 995 /* copy repeated sequence */
1117 996 if unlikely(op - ref < STEPSIZE) {
1118 997 #if LZ4_ARCH64
1119 998 size_t dec64 = dec64table[op-ref];
1120 999 #else
1121 1000 const int dec64 = 0;
1122 1001 #endif
1123 1002 op[0] = ref[0];
1124 1003 op[1] = ref[1];
1125 1004 op[2] = ref[2];
1126 1005 op[3] = ref[3];
↓ open down ↓ |
8 lines elided |
↑ open up ↑ |
1127 1006 op += 4;
1128 1007 ref += 4;
1129 1008 ref -= dec32table[op-ref];
1130 1009 A32(op) = A32(ref);
1131 1010 op += STEPSIZE - 4;
1132 1011 ref -= dec64;
1133 1012 } else {
1134 1013 LZ4_COPYSTEP(ref, op);
1135 1014 }
1136 1015 cpy = op + length - (STEPSIZE - 4);
1137 - if (cpy > oend - COPYLENGTH) {
1138 - if (cpy > oend)
1016 + if (unlikely(cpy > oend - (COPYLENGTH + (STEPSIZE - 4)))) {
1017 + if (cpy > oend - LASTLITERALS)
1139 1018 /*
1140 - * Error: request to write outside of
1141 - * destination buffer
1019 + * Error: last 5 bytes must be literals
1142 1020 */
1143 1021 goto _output_error;
1144 1022 LZ4_SECURECOPY(ref, op, (oend - COPYLENGTH));
1145 1023 while (op < cpy)
1146 1024 *op++ = *ref++;
1147 1025 op = cpy;
1148 1026 if (op == oend)
1149 1027 /*
1150 1028 * Check EOF (should never happen, since
1151 1029 * last 5 bytes are supposed to be literals)
1152 1030 */
1153 1031 goto _output_error;
1154 1032 continue;
1155 1033 }
1156 - LZ4_SECURECOPY(ref, op, cpy);
1034 + LZ4_WILDCOPY(ref, op, cpy);
1157 1035 op = cpy; /* correction */
1158 1036 }
1159 1037
1160 1038 /* end of decoding */
1161 1039 return (int)(((char *)op) - dest);
1162 1040
1163 1041 /* write overflow error detected */
1164 1042 _output_error:
1165 1043 return (int)(-(((char *)ip) - source));
1166 1044 }
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX