1 /// PNG and BMP image loading. 2 /// D translation of stb_image-2.27 (png) and stb_image-2.29 (bmp). 3 /// This port only support PNG loading, 8-bit and 16-bit. 4 /// and BMP loading as in STB. 5 module gamut.codecs.stbdec; 6 7 8 /* stb_image - v2.27 - public domain image loader - http://nothings.org/stb 9 no warranty implied; use at your own risk 10 11 12 QUICK NOTES: 13 Primarily of interest to game developers and other people who can 14 avoid problematic images and only need the trivial interface 15 16 JPEG baseline & progressive (12 bpc/arithmetic not supported, same as stock IJG lib) 17 PNG 1/2/4/8/16-bit-per-channel 18 19 TGA (not sure what subset, if a subset) 20 BMP non-1bpp, non-RLE 21 PSD (composited view only, no extra channels, 8/16 bit-per-channel) 22 23 GIF (*comp always reports as 4-channel) 24 HDR (radiance rgbE format) 25 PIC (Softimage PIC) 26 PNM (PPM and PGM binary only) 27 28 Animated GIF still needs a proper API, but here's one way to do it: 29 http://gist.github.com/urraka/685d9a6340b26b830d49 30 31 - decode from memory 32 - decode from arbitrary I/O callbacks 33 - SIMD acceleration on x86/x64 (SSE2) and ARM (NEON) 34 35 Full documentation under "DOCUMENTATION" below. 36 37 38 LICENSE 39 40 See end of file for license information. 41 42 RECENT REVISION HISTORY: 43 44 2.27 (2021-07-11) document stbi_info better, 16-bit PNM support, bug fixes 45 2.26 (2020-07-13) many minor fixes 46 2.25 (2020-02-02) fix warnings 47 2.24 (2020-02-02) fix warnings; thread-local failure_reason and flip_vertically 48 2.23 (2019-08-11) fix clang static analysis warning 49 2.22 (2019-03-04) gif fixes, fix warnings 50 2.21 (2019-02-25) fix typo in comment 51 2.20 (2019-02-07) support utf8 filenames in Windows; fix warnings and platform ifdefs 52 2.19 (2018-02-11) fix warning 53 2.18 (2018-01-30) fix warnings 54 2.17 (2018-01-29) bugfix, 1-bit BMP, 16-bitness query, fix warnings 55 2.16 (2017-07-23) all functions have 16-bit variants; optimizations; bugfixes 56 2.15 (2017-03-18) fix png-1,2,4; all Imagenet JPGs; no runtime SSE detection on GCC 57 2.14 (2017-03-03) remove deprecated STBI_JPEG_OLD; fixes for Imagenet JPGs 58 2.13 (2016-12-04) experimental 16-bit API, only for PNG so far; fixes 59 2.12 (2016-04-02) fix typo in 2.11 PSD fix that caused crashes 60 2.11 (2016-04-02) 16-bit PNGS; enable SSE2 in non-gcc x64 61 RGB-format JPEG; remove white matting in PSD; 62 allocate large structures on the stack; 63 correct channel count for PNG & BMP 64 2.10 (2016-01-22) avoid warning introduced in 2.09 65 2.09 (2016-01-16) 16-bit TGA; comments in PNM files; STBI_REALLOC_SIZED 66 67 See end of file for full revision history. 68 69 70 ============================ Contributors ========================= 71 72 Image formats Extensions, features 73 Sean Barrett (jpeg, png, bmp) Jetro Lauha (stbi_info) 74 Nicolas Schulz (hdr, psd) Martin "SpartanJ" Golini (stbi_info) 75 Jonathan Dummer (tga) James "moose2000" Brown (iPhone PNG) 76 Jean-Marc Lienher (gif) Ben "Disch" Wenger (io callbacks) 77 Tom Seddon (pic) Omar Cornut (1/2/4-bit PNG) 78 Thatcher Ulrich (psd) Nicolas Guillemot (vertical flip) 79 Ken Miller (pgm, ppm) Richard Mitton (16-bit PSD) 80 github:urraka (animated gif) Junggon Kim (PNM comments) 81 Christopher Forseth (animated gif) Daniel Gibson (16-bit TGA) 82 socks-the-fox (16-bit PNG) 83 Jeremy Sawicki (handle all ImageNet JPGs) 84 Optimizations & bugfixes Mikhail Morozov (1-bit BMP) 85 Fabian "ryg" Giesen Anael Seghezzi (is-16-bit query) 86 Arseny Kapoulkine Simon Breuss (16-bit PNM) 87 John-Mark Allen 88 Carmelo J Fdez-Aguera 89 90 Bug & warning fixes 91 Marc LeBlanc David Woo Guillaume George Martins Mozeiko 92 Christpher Lloyd Jerry Jansson Joseph Thomson Blazej Dariusz Roszkowski 93 Phil Jordan Dave Moore Roy Eltham 94 Hayaki Saito Nathan Reed Won Chun 95 Luke Graham Johan Duparc Nick Verigakis the Horde3D community 96 Thomas Ruf Ronny Chevalier github:rlyeh 97 Janez Zemva John Bartholomew Michal Cichon github:romigrou 98 Jonathan Blow Ken Hamada Tero Hanninen github:svdijk 99 Eugene Golushkov Laurent Gomila Cort Stratton github:snagar 100 Aruelien Pocheville Sergio Gonzalez Thibault Reuille github:Zelex 101 Cass Everitt Ryamond Barbiero github:grim210 102 Paul Du Bois Engin Manap Aldo Culquicondor github:sammyhw 103 Philipp Wiesemann Dale Weiler Oriol Ferrer Mesia github:phprus 104 Josh Tobin Matthew Gregan github:poppolopoppo 105 Julian Raschke Gregory Mullen Christian Floisand github:darealshinji 106 Baldur Karlsson Kevin Schmidt JR Smith github:Michaelangel007 107 Brad Weinberger Matvey Cherevko github:mosra 108 Luca Sas Alexander Veselov Zack Middleton [reserved] 109 Ryan C. Gordon [reserved] [reserved] 110 DO NOT ADD YOUR NAME HERE 111 112 Jacko Dirks 113 114 To add your name to the credits, pick a random blank space in the middle and fill it. 115 80% of merge conflicts on stb PRs are due to people adding their name at the end 116 of the credits. 117 */ 118 119 import core.stdc.config: c_ulong; 120 import core.stdc.string: memcpy, memset; 121 import core.stdc.stdlib: malloc, free, realloc; 122 import core.atomic; 123 124 import std.math: ldexp, pow, abs; 125 126 127 nothrow @nogc: 128 129 import inteli.emmintrin; 130 enum stbi__sse2_available = true; // because always available with intel-intrinsics 131 132 // DOCUMENTATION 133 // 134 // Limitations: 135 // - no 12-bit-per-channel JPEG 136 // - no JPEGs with arithmetic coding 137 // - GIF always returns *comp=4 138 // 139 // Basic usage (see HDR discussion below for HDR usage): 140 // int x,y,n; 141 // unsigned char *data = stbi_load(filename, &x, &y, &n, 0); 142 // // ... process data if not null ... 143 // // ... x = width, y = height, n = # 8-bit components per pixel ... 144 // // ... replace '0' with '1'..'4' to force that many components per pixel 145 // // ... but 'n' will always be the number that it would have been if you said 0 146 // stbi_image_free(data) 147 // 148 // Standard parameters: 149 // int *x -- outputs image width in pixels 150 // int *y -- outputs image height in pixels 151 // int *channels_in_file -- outputs # of image components in image file 152 // int desired_channels -- if non-zero, # of image components requested in result 153 // 154 // The return value from an image loader is an 'unsigned char *' which points 155 // to the pixel data, or null on an allocation failure or if the image is 156 // corrupt or invalid. The pixel data consists of *y scanlines of *x pixels, 157 // with each pixel consisting of N interleaved 8-bit components; the first 158 // pixel pointed to is top-left-most in the image. There is no padding between 159 // image scanlines or between pixels, regardless of format. The number of 160 // components N is 'desired_channels' if desired_channels is non-zero, or 161 // *channels_in_file otherwise. If desired_channels is non-zero, 162 // *channels_in_file has the number of components that _would_ have been 163 // output otherwise. E.g. if you set desired_channels to 4, you will always 164 // get RGBA output, but you can check *channels_in_file to see if it's trivially 165 // opaque because e.g. there were only 3 channels in the source image. 166 // 167 // An output image with N components has the following components interleaved 168 // in this order in each pixel: 169 // 170 // N=#comp components 171 // 1 grey 172 // 2 grey, alpha 173 // 3 red, green, blue 174 // 4 red, green, blue, alpha 175 // 176 // If image loading fails for any reason, the return value will be null, 177 // and *x, *y, *channels_in_file will be unchanged. The function 178 // stbi_failure_reason() can be queried for an extremely brief, end-user 179 // unfriendly explanation of why the load failed. Define STBI_NO_FAILURE_STRINGS 180 // to avoid compiling these strings at all, and STBI_FAILURE_USERMSG to get slightly 181 // more user-friendly ones. 182 // 183 // Paletted PNG, BMP, GIF, and PIC images are automatically depalettized. 184 // 185 // To query the width, height and component count of an image without having to 186 // decode the full file, you can use the stbi_info family of functions: 187 // 188 // int x,y,n,ok; 189 // ok = stbi_info(filename, &x, &y, &n); 190 // // returns ok=1 and sets x, y, n if image is a supported format, 191 // // 0 otherwise. 192 // 193 // Note that stb_image pervasively uses ints in its public API for sizes, 194 // including sizes of memory buffers. This is now part of the API and thus 195 // hard to change without causing breakage. As a result, the various image 196 // loaders all have certain limits on image size; these differ somewhat 197 // by format but generally boil down to either just under 2GB or just under 198 // 1GB. When the decoded image would be larger than this, stb_image decoding 199 // will fail. 200 // 201 // Additionally, stb_image will reject image files that have any of their 202 // dimensions set to a larger value than the configurable STBI_MAX_DIMENSIONS, 203 // which defaults to 2**24 = 16777216 pixels. Due to the above memory limit, 204 // the only way to have an image with such dimensions load correctly 205 // is for it to have a rather extreme aspect ratio. Either way, the 206 // assumption here is that such larger images are likely to be malformed 207 // or malicious. If you do need to load an image with individual dimensions 208 // larger than that, and it still fits in the overall size limit, you can 209 // #define STBI_MAX_DIMENSIONS on your own to be something larger. 210 // 211 // 212 // Philosophy 213 // 214 // stb libraries are designed with the following priorities: 215 // 216 // 1. easy to use 217 // 2. easy to maintain 218 // 3. good performance 219 // 220 // Sometimes I let "good performance" creep up in priority over "easy to maintain", 221 // and for best performance I may provide less-easy-to-use APIs that give higher 222 // performance, in addition to the easy-to-use ones. Nevertheless, it's important 223 // to keep in mind that from the standpoint of you, a client of this library, 224 // all you care about is #1 and #3, and stb libraries DO NOT emphasize #3 above all. 225 // 226 // Some secondary priorities arise directly from the first two, some of which 227 // provide more explicit reasons why performance can't be emphasized. 228 // 229 // - Portable ("ease of use") 230 // - Small source code footprint ("easy to maintain") 231 // - No dependencies ("ease of use") 232 // 233 // =========================================================================== 234 // 235 // I/O callbacks 236 // 237 // I/O callbacks allow you to read from arbitrary sources, like packaged 238 // files or some other source. Data read from callbacks are processed 239 // through a small internal buffer (currently 128 bytes) to try to reduce 240 // overhead. 241 // 242 // The three functions you must define are "read" (reads some bytes of data), 243 // "skip" (skips some bytes of data), "eof" (reports if the stream is at the end). 244 // 245 // =========================================================================== 246 // 247 // SIMD support 248 // 249 // The JPEG decoder will try to automatically use SIMD kernels on x86 when 250 // supported by the compiler. For ARM Neon support, you must explicitly 251 // request it. 252 // 253 // (The old do-it-yourself SIMD API is no longer supported in the current 254 // code.) 255 // 256 // On x86, SSE2 will automatically be used when available based on a run-time 257 // test; if not, the generic C versions are used as a fall-back. On ARM targets, 258 // the typical path is to have separate builds for NEON and non-NEON devices 259 // (at least this is true for iOS and Android). Therefore, the NEON support is 260 // toggled by a build flag: define STBI_NEON to get NEON loops. 261 // 262 // If for some reason you do not want to use any of SIMD code, or if 263 // you have issues compiling it, you can disable it entirely by 264 // defining STBI_NO_SIMD. 265 // 266 // =========================================================================== 267 // 268 // HDR image support (disable by defining STBI_NO_HDR) 269 // 270 // stb_image supports loading HDR images in general, and currently the Radiance 271 // .HDR file format specifically. You can still load any file through the existing 272 // interface; if you attempt to load an HDR file, it will be automatically remapped 273 // to LDR, assuming gamma 2.2 and an arbitrary scale factor defaulting to 1; 274 // both of these constants can be reconfigured through this interface: 275 // 276 // stbi_hdr_to_ldr_gamma(2.2f); 277 // stbi_hdr_to_ldr_scale(1.0f); 278 // 279 // (note, do not use _inverse_ constants; stbi_image will invert them 280 // appropriately). 281 // 282 // Additionally, there is a new, parallel interface for loading files as 283 // (linear) floats to preserve the full dynamic range: 284 // 285 // float *data = stbi_loadf(filename, &x, &y, &n, 0); 286 // 287 // If you load LDR images through this interface, those images will 288 // be promoted to floating point values, run through the inverse of 289 // constants corresponding to the above: 290 // 291 // stbi_ldr_to_hdr_scale(1.0f); 292 // stbi_ldr_to_hdr_gamma(2.2f); 293 // 294 // Finally, given a filename (or an open file or memory block--see header 295 // file for details) containing image data, you can query for the "most 296 // appropriate" interface to use (that is, whether the image is HDR or 297 // not), using: 298 // 299 // stbi_is_hdr(char *filename); 300 // 301 // =========================================================================== 302 // 303 // iPhone PNG support: 304 // 305 // We optionally support converting iPhone-formatted PNGs (which store 306 // premultiplied BGRA) back to RGB, even though they're internally encoded 307 // differently. To enable this conversion, call 308 // stbi_convert_iphone_png_to_rgb(1). 309 // 310 // Call stbi_set_unpremultiply_on_load(1) as well to force a divide per 311 // pixel to remove any premultiplied alpha *only* if the image file explicitly 312 // says there's premultiplied data (currently only happens in iPhone images, 313 // and only if iPhone convert-to-rgb processing is on). 314 // 315 // =========================================================================== 316 // 317 // ADDITIONAL CONFIGURATION 318 // 319 // - You can suppress implementation of any of the decoders to reduce 320 // your code footprint by #defining one or more of the following 321 // symbols before creating the implementation. 322 // 323 // STBI_NO_JPEG 324 // STBI_NO_PNG 325 // STBI_NO_BMP 326 // STBI_NO_PSD 327 // STBI_NO_TGA 328 // STBI_NO_GIF 329 // STBI_NO_HDR 330 // STBI_NO_PIC 331 // STBI_NO_PNM (.ppm and .pgm) 332 // 333 // 334 // - If you use STBI_NO_PNG (or _ONLY_ without PNG), and you still 335 // want the zlib decoder to be available, #define STBI_SUPPORT_ZLIB 336 // 337 // - If you define STBI_MAX_DIMENSIONS, stb_image will reject images greater 338 // than that size (in either width or height) without further processing. 339 // This is to let programs in the wild set an upper bound to prevent 340 // denial-of-service attacks on untrusted data, as one could generate a 341 // valid image of gigantic dimensions and force stb_image to allocate a 342 // huge block of memory and spend disproportionate time decoding it. By 343 // default this is set to (1 << 24), which is 16777216, but that's still 344 // very big. 345 346 enum STBI_VERSION = 1; 347 348 enum 349 { 350 STBI_default = 0, // only used for desired_channels 351 352 STBI_grey = 1, 353 STBI_grey_alpha = 2, 354 STBI_rgb = 3, 355 STBI_rgb_alpha = 4 356 } 357 358 alias stbi_uc = ubyte; 359 alias stbi_us = ushort; 360 361 ////////////////////////////////////////////////////////////////////////////// 362 // 363 // PRIMARY API - works on images of any type 364 // 365 366 // 367 // load image by filename, open file, or memory buffer 368 // 369 370 struct stbi_io_callbacks 371 { 372 nothrow @nogc @system: 373 // fill 'data' with 'size' bytes. return number of bytes actually read 374 int function(void *user,char *data,int size) read; 375 376 // skip the next 'n' bytes, or 'unget' the last -n bytes if negative 377 void function(void *user,int n) skip; 378 379 // returns nonzero if we are at end of file/data 380 int function(void *user) eof; 381 } 382 383 // <Implementation> 384 385 alias stbi__uint16 = ushort; 386 alias stbi__int16 = short; 387 alias stbi__uint32 = uint; 388 alias stbi__int32 = int; 389 390 uint stbi_lrot(uint x, int y) 391 { 392 return (x << y) | (x >> (-y & 31)); 393 } 394 395 void* STBI_MALLOC(size_t size) 396 { 397 return malloc(size); 398 } 399 400 void* STBI_REALLOC(void* p, size_t new_size) 401 { 402 return realloc(p, new_size); 403 } 404 405 void* STBI_REALLOC_SIZED(void *ptr, size_t old_size, size_t new_size) 406 { 407 return realloc(ptr, new_size); 408 } 409 410 void STBI_FREE(void* p) 411 { 412 free(p); 413 } 414 415 enum STBI_MAX_DIMENSIONS = (1 << 24); 416 417 /////////////////////////////////////////////// 418 // 419 // stbi__context struct and start_xxx functions 420 421 // stbi__context structure is our basic context used by all images, so it 422 // contains all the IO context, plus some basic image information 423 struct stbi__context 424 { 425 stbi__uint32 img_x, img_y; 426 int img_n, img_out_n; 427 428 stbi_io_callbacks io; 429 void *io_user_data; 430 431 int read_from_callbacks; 432 int buflen; 433 stbi_uc[128] buffer_start; 434 int callback_already_read; 435 436 stbi_uc *img_buffer, img_buffer_end; 437 stbi_uc *img_buffer_original, img_buffer_original_end; 438 439 float ppmX; 440 float ppmY; 441 float pixelAspectRatio; 442 } 443 444 445 // initialize a callback-based context 446 void stbi__start_callbacks(stbi__context *s, stbi_io_callbacks *c, void *user) 447 { 448 s.io = *c; 449 s.io_user_data = user; 450 s.buflen = s.buffer_start.sizeof; 451 s.read_from_callbacks = 1; 452 s.callback_already_read = 0; 453 s.img_buffer = s.img_buffer_original = s.buffer_start.ptr; 454 stbi__refill_buffer(s); 455 s.img_buffer_original_end = s.img_buffer_end; 456 } 457 458 void stbi__rewind(stbi__context *s) 459 { 460 // conceptually rewind SHOULD rewind to the beginning of the stream, 461 // but we just rewind to the beginning of the initial buffer, because 462 // we only use it after doing 'test', which only ever looks at at most 92 bytes 463 s.img_buffer = s.img_buffer_original; 464 s.img_buffer_end = s.img_buffer_original_end; 465 } 466 467 enum 468 { 469 STBI_ORDER_RGB, 470 STBI_ORDER_BGR 471 } 472 473 struct stbi__result_info 474 { 475 int bits_per_channel; 476 int num_channels; 477 int channel_order; 478 } 479 480 alias stbi__malloc = STBI_MALLOC; 481 482 // stb_image uses ints pervasively, including for offset calculations. 483 // therefore the largest decoded image size we can support with the 484 // current code, even on 64-bit targets, is INT_MAX. this is not a 485 // significant limitation for the intended use case. 486 // 487 // we do, however, need to make sure our size calculations don't 488 // overflow. hence a few helper functions for size calculations that 489 // multiply integers together, making sure that they're non-negative 490 // and no overflow occurs. 491 492 // return 1 if the sum is valid, 0 on overflow. 493 // negative terms are considered invalid. 494 int stbi__addsizes_valid(int a, int b) 495 { 496 if (b < 0) return 0; 497 // now 0 <= b <= INT_MAX, hence also 498 // 0 <= INT_MAX - b <= INTMAX. 499 // And "a + b <= INT_MAX" (which might overflow) is the 500 // same as a <= INT_MAX - b (no overflow) 501 return a <= int.max - b; 502 } 503 504 // returns 1 if the product is valid, 0 on overflow. 505 // negative factors are considered invalid. 506 int stbi__mul2sizes_valid(int a, int b) 507 { 508 if (a < 0 || b < 0) return 0; 509 if (b == 0) return 1; // mul-by-0 is always safe 510 // portable way to check for no overflows in a*b 511 return a <= int.max/b; 512 } 513 514 int stbi__mad2sizes_valid(int a, int b, int add) 515 { 516 return stbi__mul2sizes_valid(a, b) && stbi__addsizes_valid(a*b, add); 517 } 518 519 // returns 1 if "a*b*c + add" has no negative terms/factors and doesn't overflow 520 int stbi__mad3sizes_valid(int a, int b, int c, int add) 521 { 522 return stbi__mul2sizes_valid(a, b) && stbi__mul2sizes_valid(a*b, c) && 523 stbi__addsizes_valid(a*b*c, add); 524 } 525 526 // returns 1 if "a*b*c*d + add" has no negative terms/factors and doesn't overflow 527 int stbi__mad4sizes_valid(int a, int b, int c, int d, int add) 528 { 529 return stbi__mul2sizes_valid(a, b) && stbi__mul2sizes_valid(a*b, c) && 530 stbi__mul2sizes_valid(a*b*c, d) && stbi__addsizes_valid(a*b*c*d, add); 531 } 532 533 void *stbi__malloc_mad2(int a, int b, int add) 534 { 535 if (!stbi__mad2sizes_valid(a, b, add)) return null; 536 return stbi__malloc(a*b + add); 537 } 538 539 void *stbi__malloc_mad3(int a, int b, int c, int add) 540 { 541 if (!stbi__mad3sizes_valid(a, b, c, add)) return null; 542 return stbi__malloc(a*b*c + add); 543 } 544 545 void *stbi__malloc_mad4(int a, int b, int c, int d, int add) 546 { 547 if (!stbi__mad4sizes_valid(a, b, c, d, add)) return null; 548 return stbi__malloc(a*b*c*d + add); 549 } 550 551 // stbi__err - error 552 553 deprecated int stbi__err(const(char)* msg, const(char)* msgUser) 554 { 555 return 0; 556 } 557 558 // stbi__errpf - error returning pointer to float 559 // stbi__errpuc - error returning pointer to unsigned char 560 deprecated float* stbi__errpf(const(char)* msg, const(char)* msgUser) 561 { 562 return cast(float*) (cast(size_t) stbi__err(msg, msgUser)); 563 } 564 565 deprecated ubyte* stbi__errpuc(const(char)* msg, const(char)* msgUser) 566 { 567 return cast(ubyte*) (cast(size_t) stbi__err(msg, msgUser)); 568 } 569 570 void stbi_image_free(void *retval_from_stbi_load) @trusted // TODO: make it @safe by changing stbi_load to return a slice 571 { 572 STBI_FREE(retval_from_stbi_load); 573 } 574 575 void *stbi__load_main(stbi__context *s, int *x, int *y, int *comp, int req_comp, stbi__result_info *ri, int bpc) 576 { 577 memset(ri, 0, (*ri).sizeof); // make sure it's initialized if we add new fields 578 ri.bits_per_channel = 8; // default is 8 so most paths don't have to be changed 579 ri.channel_order = STBI_ORDER_RGB; // all current input & output are this, but this is here so we can add BGR order 580 ri.num_channels = 0; 581 582 // test the formats with a very explicit header first (at least a FOURCC 583 // or distinctive magic number first) 584 version(decodePNG) 585 { 586 if (stbi__png_test(s)) return stbi__png_load(s,x,y,comp,req_comp, ri); 587 } 588 return null; 589 } 590 591 stbi_uc *stbi__convert_16_to_8(stbi__uint16 *orig, int w, int h, int channels) 592 { 593 int img_len = w * h * channels; 594 stbi_uc *reduced; 595 596 reduced = cast(stbi_uc *) stbi__malloc(img_len); 597 if (reduced == null) 598 return null; 599 600 for (int i = 0; i < img_len; ++i) 601 reduced[i] = cast(stbi_uc)((orig[i] >> 8) & 0xFF); // top half of each byte is sufficient approx of 16.8 bit scaling 602 603 STBI_FREE(orig); 604 return reduced; 605 } 606 607 stbi__uint16 *stbi__convert_8_to_16(stbi_uc *orig, int w, int h, int channels) 608 { 609 int i; 610 int img_len = w * h * channels; 611 stbi__uint16 *enlarged; 612 613 enlarged = cast(stbi__uint16 *) stbi__malloc(img_len*2); 614 if (enlarged == null) 615 return null; 616 617 for (i = 0; i < img_len; ++i) 618 enlarged[i] = (orig[i] << 8) + orig[i]; // replicate to high and low byte, maps 0.0, 255.0xffff 619 620 STBI_FREE(orig); 621 return enlarged; 622 } 623 624 625 ubyte *stbi__load_and_postprocess_8bit(stbi__context *s, int *x, int *y, int *comp, int req_comp) 626 { 627 stbi__result_info ri; 628 void *result = stbi__load_main(s, x, y, comp, req_comp, &ri, 8); 629 630 if (result == null) 631 return null; 632 633 // it is the responsibility of the loaders to make sure we get either 8 or 16 bit. 634 assert(ri.bits_per_channel == 8 || ri.bits_per_channel == 16); 635 636 if (ri.bits_per_channel != 8) { 637 result = stbi__convert_16_to_8(cast(stbi__uint16 *) result, *x, *y, req_comp == 0 ? *comp : req_comp); 638 ri.bits_per_channel = 8; 639 } 640 641 // @TODO: move stbi__convert_format to here 642 643 return cast(ubyte*) result; 644 } 645 646 stbi__uint16 *stbi__load_and_postprocess_16bit(stbi__context *s, int *x, int *y, int *comp, int req_comp) 647 { 648 stbi__result_info ri; 649 void *result = stbi__load_main(s, x, y, comp, req_comp, &ri, 16); 650 651 if (result == null) 652 return null; 653 654 // it is the responsibility of the loaders to make sure we get either 8 or 16 bit. 655 assert(ri.bits_per_channel == 8 || ri.bits_per_channel == 16); 656 657 if (ri.bits_per_channel != 16) { 658 result = stbi__convert_8_to_16(cast(stbi_uc *) result, *x, *y, req_comp == 0 ? *comp : req_comp); 659 ri.bits_per_channel = 16; 660 } 661 662 return cast(stbi__uint16 *) result; 663 } 664 665 void stbi__float_postprocess(float *result, int *x, int *y, int *comp, int req_comp) 666 { 667 } 668 669 stbi_us *stbi_load_16_from_callbacks(const(stbi_io_callbacks)*clbk, void *user, int *x, int *y, int *channels_in_file, 670 int desired_channels,float* ppmX, float* ppmY, float* pixelRatio) 671 { 672 stbi__context s; 673 stbi__start_callbacks(&s, cast(stbi_io_callbacks *)clbk, user); // const_cast here 674 stbi_us* res = stbi__load_and_postprocess_16bit(&s,x,y,channels_in_file,desired_channels); 675 *ppmX = s.ppmX; 676 *ppmY = s.ppmY; 677 *pixelRatio = s.pixelAspectRatio; 678 return res; 679 } 680 681 stbi_uc *stbi_load_from_callbacks(const(stbi_io_callbacks)*clbk, void *user, int *x, int *y, int *comp, int req_comp, 682 float* ppmX, float* ppmY, float* pixelRatio) 683 { 684 stbi__context s; 685 stbi__start_callbacks(&s, cast(stbi_io_callbacks *) clbk, user); // const_cast here 686 stbi_uc* res = stbi__load_and_postprocess_8bit(&s,x,y,comp,req_comp); 687 *ppmX = s.ppmX; 688 *ppmY = s.ppmY; 689 *pixelRatio = s.pixelAspectRatio; 690 return res; 691 } 692 693 version(enableLinear) 694 { 695 __gshared stbi__l2h_gamma = 2.2f; 696 __gshared stbi__l2h_scale = 1.0f; 697 698 void stbi_ldr_to_hdr_gamma(float gamma) 699 { 700 atomicStore(stbi__l2h_gamma, gamma); 701 } 702 703 void stbi_ldr_to_hdr_scale(float scale) 704 { 705 atomicStore(stbi__l2h_scale, scale); 706 } 707 } 708 709 710 shared(float) stbi__h2l_gamma_i = 1.0f / 2.2f, 711 stbi__h2l_scale_i = 1.0f; 712 713 void stbi_hdr_to_ldr_gamma(float gamma) 714 { 715 atomicStore(stbi__h2l_gamma_i, 1 / gamma); 716 } 717 718 void stbi_hdr_to_ldr_scale(float scale) 719 { 720 atomicStore(stbi__h2l_scale_i, 1 / scale); 721 } 722 723 724 ////////////////////////////////////////////////////////////////////////////// 725 // 726 // Common code used by all image loaders 727 // 728 729 enum 730 { 731 STBI__SCAN_load = 0, 732 STBI__SCAN_type, 733 STBI__SCAN_header 734 } 735 736 void stbi__refill_buffer(stbi__context *s) 737 { 738 int n = s.io.read(s.io_user_data, cast(char*)s.buffer_start, s.buflen); 739 s.callback_already_read += cast(int) (s.img_buffer - s.img_buffer_original); 740 if (n == 0) { 741 // at end of file, treat same as if from memory, but need to handle case 742 // where s.img_buffer isn't pointing to safe memory, e.g. 0-byte file 743 s.read_from_callbacks = 0; 744 s.img_buffer = s.buffer_start.ptr; 745 s.img_buffer_end = s.buffer_start.ptr+1; 746 *s.img_buffer = 0; 747 } else { 748 s.img_buffer = s.buffer_start.ptr; 749 s.img_buffer_end = s.buffer_start.ptr + n; 750 } 751 } 752 753 stbi_uc stbi__get8(stbi__context *s) 754 { 755 if (s.img_buffer < s.img_buffer_end) 756 return *s.img_buffer++; 757 if (s.read_from_callbacks) { 758 stbi__refill_buffer(s); 759 return *s.img_buffer++; 760 } 761 return 0; 762 } 763 764 int stbi__at_eof(stbi__context *s) 765 { 766 if (s.io.read) 767 { 768 if (!s.io.eof(s.io_user_data)) 769 return 0; 770 // if feof() is true, check if buffer = end 771 // special case: we've only got the special 0 character at the end 772 if (s.read_from_callbacks == 0) 773 return 1; 774 } 775 return s.img_buffer >= s.img_buffer_end; 776 } 777 778 void stbi__skip(stbi__context *s, int n) 779 { 780 if (n == 0) 781 return; // already there! 782 if (n < 0) 783 { 784 s.img_buffer = s.img_buffer_end; 785 return; 786 } 787 if (s.io.read) 788 { 789 int blen = cast(int) (s.img_buffer_end - s.img_buffer); 790 if (blen < n) 791 { 792 s.img_buffer = s.img_buffer_end; 793 s.io.skip(s.io_user_data, n - blen); 794 return; 795 } 796 } 797 s.img_buffer += n; 798 } 799 800 int stbi__getn(stbi__context *s, stbi_uc *buffer, int n) 801 { 802 if (s.io.read) 803 { 804 int blen = cast(int) (s.img_buffer_end - s.img_buffer); 805 if (blen < n) 806 { 807 int res, count; 808 memcpy(buffer, s.img_buffer, blen); 809 count = s.io.read(s.io_user_data, cast(char*) buffer + blen, n - blen); 810 res = (count == (n-blen)); 811 s.img_buffer = s.img_buffer_end; 812 return res; 813 } 814 } 815 816 if (s.img_buffer+n <= s.img_buffer_end) 817 { 818 memcpy(buffer, s.img_buffer, n); 819 s.img_buffer += n; 820 return 1; 821 } 822 else 823 return 0; 824 } 825 826 int stbi__get16be(stbi__context *s) 827 { 828 int z = stbi__get8(s); 829 return (z << 8) + stbi__get8(s); 830 } 831 832 stbi__uint32 stbi__get32be(stbi__context *s) 833 { 834 stbi__uint32 z = stbi__get16be(s); 835 return (z << 16) + stbi__get16be(s); 836 } 837 838 int stbi__get16le(stbi__context *s) 839 { 840 int z = stbi__get8(s); 841 return z + (stbi__get8(s) << 8); 842 } 843 844 stbi__uint32 stbi__get32le(stbi__context *s) 845 { 846 stbi__uint32 z = stbi__get16le(s); 847 z += cast(stbi__uint32)stbi__get16le(s) << 16; 848 return z; 849 } 850 851 ubyte STBI__BYTECAST(T)(T x) 852 { 853 return cast(ubyte)(x & 255); 854 } 855 856 ////////////////////////////////////////////////////////////////////////////// 857 // 858 // generic converter from built-in img_n to req_comp 859 // individual types do this automatically as much as possible (e.g. jpeg 860 // does all cases internally since it needs to colorspace convert anyway, 861 // and it never has alpha, so very few cases ). png can automatically 862 // interleave an alpha=255 channel, but falls back to this for other cases 863 // 864 // assume data buffer is malloced, so malloc a new one and free that one 865 // only failure mode is malloc failing 866 867 stbi_uc stbi__compute_y(int r, int g, int b) 868 { 869 return cast(ubyte)(((r * 77) + (g * 150) + (29 * b)) >> 8); 870 } 871 872 ubyte *stbi__convert_format(ubyte *data, int img_n, int req_comp, uint x, uint y) 873 { 874 int i,j; 875 ubyte *good; 876 877 if (req_comp == img_n) 878 return data; 879 assert(req_comp >= 1 && req_comp <= 4); 880 881 good = cast(ubyte*) stbi__malloc_mad3(req_comp, x, y, 0); 882 if (good == null) 883 { 884 STBI_FREE(data); 885 return null; 886 } 887 888 for (j = 0; j < cast(int) y; ++j) 889 { 890 ubyte *src = data + j * x * img_n ; 891 ubyte *dest = good + j * x * req_comp; 892 893 // convert source image with img_n components to one with req_comp components; 894 // avoid switch per pixel, so use switch per scanline and massive macros 895 switch (img_n * 8 + req_comp) 896 { 897 case 1 * 8 + 2: 898 { 899 for(i = x - 1; i >= 0; --i, src += 1, dest += 2) 900 { 901 dest[0] = src[0]; 902 dest[1] = 255; 903 } 904 } 905 break; 906 case 1 * 8 + 3: 907 { 908 for(i = x - 1; i >= 0; --i, src += 1, dest += 3) 909 { 910 dest[0] = dest[1] = dest[2] = src[0]; 911 } 912 } 913 break; 914 case 1 * 8 + 4: 915 for(i = x - 1; i >= 0; --i, src += 1, dest += 4) 916 { 917 dest[0] = dest[1] = dest[2] = src[0]; 918 dest[3] = 255; 919 } 920 break; 921 case 2 * 8 + 1: 922 { 923 for(i = x - 1; i >= 0; --i, src += 2, dest += 1) 924 { 925 dest[0] = src[0]; 926 } 927 } 928 break; 929 case 2 * 8 + 3: 930 { 931 for(i = x - 1; i >= 0; --i, src += 2, dest += 3) 932 { 933 dest[0] = dest[1] = dest[2] = src[0]; 934 } 935 } 936 break; 937 case 2 * 8 + 4: 938 { 939 for(i = x - 1; i >= 0; --i, src += 2, dest += 4) 940 { 941 dest[0] = dest[1] = dest[2] = src[0]; 942 dest[3] = src[1]; 943 } 944 } 945 break; 946 case 3 * 8 + 4: 947 { 948 for(i = x - 1; i >= 0; --i, src += 3, dest += 4) 949 { 950 dest[0] = src[0]; 951 dest[1] = src[1]; 952 dest[2] = src[2]; 953 dest[3] = 255; 954 } 955 } 956 break; 957 case 3 * 8 + 1: 958 { 959 for(i = x - 1; i >= 0; --i, src += 3, dest += 1) 960 { 961 dest[0] = stbi__compute_y(src[0],src[1],src[2]); 962 } 963 } 964 break; 965 case 3 * 8 + 2: 966 { 967 for(i = x - 1; i >= 0; --i, src += 3, dest += 2) 968 { 969 dest[0] = stbi__compute_y(src[0],src[1],src[2]); 970 dest[1] = 255; 971 } 972 } 973 break; 974 975 case 4 * 8 + 1: 976 { 977 for(i = x - 1; i >= 0; --i, src += 4, dest += 1) 978 { 979 dest[0] = stbi__compute_y(src[0],src[1],src[2]); 980 } 981 } 982 break; 983 984 case 4 * 8 + 2: 985 { 986 for(i = x - 1; i >= 0; --i, src += 4, dest += 2) 987 { 988 dest[0] = stbi__compute_y(src[0],src[1],src[2]); 989 dest[1] = src[3]; 990 } 991 } 992 break; 993 case 4 * 8 + 3: 994 { 995 for(i = x - 1; i >= 0; --i, src += 4, dest += 3) 996 { 997 dest[0] = src[0]; 998 dest[1] = src[1]; 999 dest[2] = src[2]; 1000 } 1001 } 1002 break; 1003 default: 1004 assert(0); 1005 } 1006 } 1007 1008 STBI_FREE(data); 1009 return good; 1010 } 1011 1012 stbi__uint16 stbi__compute_y_16(int r, int g, int b) 1013 { 1014 return cast(stbi__uint16) (((r*77) + (g*150) + (29*b)) >> 8); 1015 } 1016 1017 stbi__uint16* stbi__convert_format16(stbi__uint16 *data, int img_n, int req_comp, uint x, uint y) 1018 { 1019 int i,j; 1020 stbi__uint16 *good; 1021 1022 if (req_comp == img_n) 1023 return data; 1024 assert(req_comp >= 1 && req_comp <= 4); 1025 1026 good = cast(stbi__uint16 *) stbi__malloc(req_comp * x * y * 2); 1027 if (good == null) 1028 { 1029 STBI_FREE(data); 1030 return null; 1031 } 1032 1033 for (j = 0; j < cast(int) y; ++j) 1034 { 1035 stbi__uint16 *src = data + j * x * img_n ; 1036 stbi__uint16 *dest = good + j * x * req_comp; 1037 1038 // convert source image with img_n components to one with req_comp components; 1039 // avoid switch per pixel, so use switch per scanline and massive macros 1040 switch (img_n * 8 + req_comp) 1041 { 1042 case 1 * 8 + 2: 1043 { 1044 for(i = x - 1; i >= 0; --i, src += 1, dest += 2) 1045 { 1046 dest[0] = src[0]; 1047 dest[1] = 0xffff; 1048 } 1049 } 1050 break; 1051 case 1 * 8 + 3: 1052 { 1053 for(i = x - 1; i >= 0; --i, src += 1, dest += 3) 1054 { 1055 dest[0] = dest[1] = dest[2] = src[0]; 1056 } 1057 } 1058 break; 1059 case 1 * 8 + 4: 1060 for(i = x - 1; i >= 0; --i, src += 1, dest += 4) 1061 { 1062 dest[0] = dest[1] = dest[2] = src[0]; 1063 dest[3] = 0xffff; 1064 } 1065 break; 1066 case 2 * 8 + 1: 1067 { 1068 for(i = x - 1; i >= 0; --i, src += 2, dest += 1) 1069 { 1070 dest[0] = src[0]; 1071 } 1072 } 1073 break; 1074 case 2 * 8 + 3: 1075 { 1076 for(i = x - 1; i >= 0; --i, src += 2, dest += 3) 1077 { 1078 dest[0] = dest[1] = dest[2] = src[0]; 1079 } 1080 } 1081 break; 1082 case 2 * 8 + 4: 1083 { 1084 for(i = x - 1; i >= 0; --i, src += 2, dest += 4) 1085 { 1086 dest[0] = dest[1] = dest[2] = src[0]; 1087 dest[3] = src[1]; 1088 } 1089 } 1090 break; 1091 case 3 * 8 + 4: 1092 { 1093 for(i = x - 1; i >= 0; --i, src += 3, dest += 4) 1094 { 1095 dest[0] = src[0]; 1096 dest[1] = src[1]; 1097 dest[2] = src[2]; 1098 dest[3] = 0xffff; 1099 } 1100 } 1101 break; 1102 case 3 * 8 + 1: 1103 { 1104 for(i = x - 1; i >= 0; --i, src += 3, dest += 1) 1105 { 1106 dest[0] = stbi__compute_y_16(src[0],src[1],src[2]); 1107 } 1108 } 1109 break; 1110 case 3 * 8 + 2: 1111 { 1112 for(i = x - 1; i >= 0; --i, src += 3, dest += 2) 1113 { 1114 dest[0] = stbi__compute_y_16(src[0],src[1],src[2]); 1115 dest[1] = 0xffff; 1116 } 1117 } 1118 break; 1119 1120 case 4 * 8 + 1: 1121 { 1122 for(i = x - 1; i >= 0; --i, src += 4, dest += 1) 1123 { 1124 dest[0] = stbi__compute_y_16(src[0],src[1],src[2]); 1125 } 1126 } 1127 break; 1128 1129 case 4 * 8 + 2: 1130 { 1131 for(i = x - 1; i >= 0; --i, src += 4, dest += 2) 1132 { 1133 dest[0] = stbi__compute_y_16(src[0],src[1],src[2]); 1134 dest[1] = src[3]; 1135 } 1136 } 1137 break; 1138 case 4 * 8 + 3: 1139 { 1140 for(i = x - 1; i >= 0; --i, src += 4, dest += 3) 1141 { 1142 dest[0] = src[0]; 1143 dest[1] = src[1]; 1144 dest[2] = src[2]; 1145 } 1146 } 1147 break; 1148 default: 1149 assert(0); 1150 } 1151 } 1152 1153 STBI_FREE(data); 1154 return good; 1155 } 1156 1157 version(enableLinear) 1158 { 1159 float* stbi__ldr_to_hdr(stbi_uc *data, int x, int y, int comp) 1160 { 1161 int i,k,n; 1162 float *output; 1163 if (!data) return null; 1164 output = cast(float *) stbi__malloc_mad4(x, y, comp, float.sizeof, 0); 1165 if (output == null) 1166 { 1167 STBI_FREE(data); 1168 return null; 1169 } 1170 // compute number of non-alpha components 1171 if (comp & 1) 1172 n = comp; 1173 else 1174 n = comp - 1; 1175 for (i = 0; i < x*y; ++i) 1176 { 1177 for (k = 0; k < n; ++k) 1178 { 1179 output[i*comp + k] = cast(float) (pow(data[i*comp+k] / 255.0f, stbi__l2h_gamma) * stbi__l2h_scale); 1180 } 1181 } 1182 if (n < comp) 1183 { 1184 for (i=0; i < x*y; ++i) 1185 { 1186 output[i*comp + n] = data[i*comp + n] / 255.0f; 1187 } 1188 } 1189 STBI_FREE(data); 1190 return output; 1191 } 1192 } 1193 1194 int stbi__float2int(float x) 1195 { 1196 return cast(int)x; 1197 } 1198 1199 // public domain zlib decode v0.2 Sean Barrett 2006-11-18 1200 // simple implementation 1201 // - all input must be provided in an upfront buffer 1202 // - all output is written to a single output buffer (can malloc/realloc) 1203 // performance 1204 // - fast huffman 1205 1206 // fast-way is faster to check than jpeg huffman, but slow way is slower 1207 enum STBI__ZFAST_BITS = 9; // accelerate all cases in default tables 1208 enum STBI__ZFAST_MASK = ((1 << STBI__ZFAST_BITS) - 1); 1209 enum STBI__ZNSYMS = 288; // number of symbols in literal/length alphabet 1210 1211 // zlib-style huffman encoding 1212 // (jpegs packs from left, zlib from right, so can't share code) 1213 struct stbi__zhuffman 1214 { 1215 stbi__uint16[1 << STBI__ZFAST_BITS] fast; 1216 stbi__uint16[16] firstcode; 1217 int[17] maxcode; 1218 stbi__uint16[16] firstsymbol; 1219 stbi_uc[STBI__ZNSYMS] size; 1220 stbi__uint16[STBI__ZNSYMS] value; 1221 } 1222 1223 int stbi__bitreverse16(int n) 1224 { 1225 n = ((n & 0xAAAA) >> 1) | ((n & 0x5555) << 1); 1226 n = ((n & 0xCCCC) >> 2) | ((n & 0x3333) << 2); 1227 n = ((n & 0xF0F0) >> 4) | ((n & 0x0F0F) << 4); 1228 n = ((n & 0xFF00) >> 8) | ((n & 0x00FF) << 8); 1229 return n; 1230 } 1231 1232 int stbi__bit_reverse(int v, int bits) 1233 { 1234 assert(bits <= 16); 1235 // to bit reverse n bits, reverse 16 and shift 1236 // e.g. 11 bits, bit reverse and shift away 5 1237 return stbi__bitreverse16(v) >> (16-bits); 1238 } 1239 1240 int stbi__zbuild_huffman(stbi__zhuffman *z, const stbi_uc *sizelist, int num) 1241 { 1242 int i,k=0; 1243 int code; 1244 int[16] next_code; 1245 int[17] sizes; 1246 1247 // DEFLATE spec for generating codes 1248 memset(sizes.ptr, 0, sizes.sizeof); 1249 memset(z.fast.ptr, 0, z.fast.sizeof); 1250 for (i=0; i < num; ++i) 1251 ++sizes[sizelist[i]]; 1252 sizes[0] = 0; 1253 for (i=1; i < 16; ++i) 1254 if (sizes[i] > (1 << i)) 1255 return 0; // stbi__err("bad sizes", "Corrupt PNG"); 1256 code = 0; 1257 for (i=1; i < 16; ++i) { 1258 next_code[i] = code; 1259 z.firstcode[i] = cast(stbi__uint16) code; 1260 z.firstsymbol[i] = cast(stbi__uint16) k; 1261 code = (code + sizes[i]); 1262 if (sizes[i]) 1263 if (code-1 >= (1 << i)) return 0; // stbi__err("bad codelengths","Corrupt PNG"); 1264 z.maxcode[i] = code << (16-i); // preshift for inner loop 1265 code <<= 1; 1266 k += sizes[i]; 1267 } 1268 z.maxcode[16] = 0x10000; // sentinel 1269 for (i=0; i < num; ++i) { 1270 int s = sizelist[i]; 1271 if (s) { 1272 int c = next_code[s] - z.firstcode[s] + z.firstsymbol[s]; 1273 stbi__uint16 fastv = cast(stbi__uint16) ((s << 9) | i); 1274 z.size [c] = cast(stbi_uc ) s; 1275 z.value[c] = cast(stbi__uint16) i; 1276 if (s <= STBI__ZFAST_BITS) { 1277 int j = stbi__bit_reverse(next_code[s],s); 1278 while (j < (1 << STBI__ZFAST_BITS)) { 1279 z.fast[j] = fastv; 1280 j += (1 << s); 1281 } 1282 } 1283 ++next_code[s]; 1284 } 1285 } 1286 return 1; 1287 } 1288 1289 version(decodePNG) 1290 { 1291 import gamut.codecs.miniz; 1292 1293 /// Params: 1294 /// buffer Input buffer 1295 /// len Length of input buffer 1296 /// initial_size Size hint for output buffer (which is realloc on growth) 1297 public ubyte *stbi_zlib_decode_malloc_guesssize_headerflag( 1298 const(char)*buffer, 1299 int len, 1300 int initial_size, // note: stb_image gives the right initial_size, and the right outout buffer length 1301 int *outlen, 1302 int parse_header) 1303 { 1304 ubyte* outBuf = cast(ubyte*) malloc(initial_size); 1305 if (outBuf == null) 1306 return null; 1307 1308 1309 c_ulong destLen = initial_size; 1310 while(true) 1311 { 1312 1313 c_ulong inputLen = len; 1314 bool trusted_input = true; // this allows to not check adler32, but I'm not sure how safe that is. #SECURITY 1315 1316 int res = mz_uncompress3(outBuf, 1317 &destLen, 1318 cast(const(ubyte)*) buffer, 1319 &inputLen, 1320 parse_header ? MZ_DEFAULT_WINDOW_BITS : -MZ_DEFAULT_WINDOW_BITS, 1321 trusted_input); 1322 1323 if (res == MZ_OK) 1324 break; 1325 1326 if (res == MZ_BUF_ERROR) 1327 { 1328 if (initial_size > 536_870_912) // That much bytes is suspicious in just a zlib chunk 1329 { 1330 free(outBuf); 1331 return null; 1332 } 1333 1334 initial_size = initial_size*2; 1335 if (initial_size < 32*1024) 1336 initial_size = 32*1024; 1337 1338 outBuf = cast(ubyte*) realloc(outBuf, initial_size); 1339 if (outBuf == null) 1340 return null; 1341 destLen = initial_size; 1342 } 1343 else 1344 { 1345 free(outBuf); 1346 return null; 1347 } 1348 } 1349 *outlen = cast(int)(destLen); 1350 return outBuf; 1351 } 1352 } 1353 1354 // public domain "baseline" PNG decoder v0.10 Sean Barrett 2006-11-18 1355 // simple implementation 1356 // - only 8-bit samples 1357 // - no CRC checking 1358 // - allocates lots of intermediate memory 1359 // - avoids problem of streaming data between subsystems 1360 // - avoids explicit window management 1361 // performance 1362 // - uses stb_zlib, a PD zlib implementation with fast huffman decoding 1363 1364 version(decodePNG) 1365 { 1366 struct stbi__pngchunk 1367 { 1368 stbi__uint32 length; 1369 stbi__uint32 type; 1370 } 1371 1372 stbi__pngchunk stbi__get_chunk_header(stbi__context *s) 1373 { 1374 stbi__pngchunk c; 1375 c.length = stbi__get32be(s); 1376 c.type = stbi__get32be(s); 1377 return c; 1378 } 1379 1380 int stbi__check_png_header(stbi__context *s) 1381 { 1382 static immutable stbi_uc[8] png_sig = [ 137,80,78,71,13,10,26,10 ]; 1383 int i; 1384 for (i=0; i < 8; ++i) 1385 if (stbi__get8(s) != png_sig[i]) 1386 return 0; //stbi__err("bad png sig","Not a PNG"); 1387 return 1; 1388 } 1389 1390 struct stbi__png 1391 { 1392 stbi__context *s; 1393 stbi_uc* idata; 1394 stbi_uc* expanded; 1395 stbi_uc* out_; 1396 int depth; 1397 } 1398 1399 enum 1400 { 1401 STBI__F_none=0, 1402 STBI__F_sub=1, 1403 STBI__F_up=2, 1404 STBI__F_avg=3, 1405 STBI__F_paeth=4, 1406 // synthetic filters used for first scanline to avoid needing a dummy row of 0s 1407 STBI__F_avg_first, 1408 STBI__F_paeth_first 1409 } 1410 1411 static immutable stbi_uc[5] first_row_filter = 1412 [ 1413 STBI__F_none, 1414 STBI__F_sub, 1415 STBI__F_none, 1416 STBI__F_avg_first, 1417 STBI__F_paeth_first 1418 ]; 1419 1420 int stbi__paeth(int a, int b, int c) 1421 { 1422 int p = a + b - c; 1423 int pa = abs(p-a); 1424 int pb = abs(p-b); 1425 int pc = abs(p-c); 1426 if (pa <= pb && pa <= pc) 1427 return a; 1428 if (pb <= pc) 1429 return b; 1430 return c; 1431 } 1432 1433 static immutable stbi_uc[9] stbi__depth_scale_table = [ 0, 0xff, 0x55, 0, 0x11, 0,0,0, 0x01 ]; 1434 1435 // create the png data from post-deflated data 1436 int stbi__create_png_image_raw(stbi__png *a, stbi_uc *raw, stbi__uint32 raw_len, int out_n, stbi__uint32 x, stbi__uint32 y, int depth, int color) 1437 { 1438 int bytes = (depth == 16? 2 : 1); 1439 stbi__context *s = a.s; 1440 stbi__uint32 i,j,stride = x*out_n*bytes; 1441 stbi__uint32 img_len, img_width_bytes; 1442 int k; 1443 int img_n = s.img_n; // copy it into a local for later 1444 1445 int output_bytes = out_n*bytes; 1446 int filter_bytes = img_n*bytes; 1447 int width = x; 1448 1449 assert(out_n == s.img_n || out_n == s.img_n+1); 1450 a.out_ = cast(stbi_uc *) stbi__malloc_mad3(x, y, output_bytes, 0); // extra bytes to write off the end into 1451 if (!a.out_) return 0; //stbi__err("outofmem", "Out of memory"); 1452 1453 if (!stbi__mad3sizes_valid(img_n, x, depth, 7)) return 0; //stbi__err("too large", "Corrupt PNG"); 1454 img_width_bytes = (((img_n * x * depth) + 7) >> 3); 1455 img_len = (img_width_bytes + 1) * y; 1456 1457 // we used to check for exact match between raw_len and img_len on non-interlaced PNGs, 1458 // but issue #276 reported a PNG in the wild that had extra data at the end (all zeros), 1459 // so just check for raw_len < img_len always. 1460 if (raw_len < img_len) return 0; //stbi__err("not enough pixels","Corrupt PNG"); 1461 1462 for (j=0; j < y; ++j) 1463 { 1464 stbi_uc *cur = a.out_ + stride*j; 1465 stbi_uc *prior; 1466 int filter = *raw++; 1467 1468 if (filter > 4) 1469 return 0; //stbi__err("invalid filter","Corrupt PNG"); 1470 1471 if (depth < 8) { 1472 if (img_width_bytes > x) return 0; //stbi__err("invalid width","Corrupt PNG"); 1473 cur += x*out_n - img_width_bytes; // store output to the rightmost img_len bytes, so we can decode in place 1474 filter_bytes = 1; 1475 width = img_width_bytes; 1476 } 1477 prior = cur - stride; // bugfix: need to compute this after 'cur +=' computation above 1478 1479 // if first row, use special filter that doesn't sample previous row 1480 if (j == 0) filter = first_row_filter[filter]; 1481 1482 // handle first byte explicitly 1483 for (k=0; k < filter_bytes; ++k) 1484 { 1485 switch (filter) { 1486 case STBI__F_none : cur[k] = raw[k]; break; 1487 case STBI__F_sub : cur[k] = raw[k]; break; 1488 case STBI__F_up : cur[k] = STBI__BYTECAST(raw[k] + prior[k]); break; 1489 case STBI__F_avg : cur[k] = STBI__BYTECAST(raw[k] + (prior[k]>>1)); break; 1490 case STBI__F_paeth : cur[k] = STBI__BYTECAST(raw[k] + stbi__paeth(0,prior[k],0)); break; 1491 case STBI__F_avg_first : cur[k] = raw[k]; break; 1492 case STBI__F_paeth_first: cur[k] = raw[k]; break; 1493 default: assert(false); 1494 } 1495 } 1496 1497 if (depth == 8) { 1498 if (img_n != out_n) 1499 cur[img_n] = 255; // first pixel 1500 raw += img_n; 1501 cur += out_n; 1502 prior += out_n; 1503 } else if (depth == 16) { 1504 if (img_n != out_n) { 1505 cur[filter_bytes] = 255; // first pixel top byte 1506 cur[filter_bytes+1] = 255; // first pixel bottom byte 1507 } 1508 raw += filter_bytes; 1509 cur += output_bytes; 1510 prior += output_bytes; 1511 } else { 1512 raw += 1; 1513 cur += 1; 1514 prior += 1; 1515 } 1516 1517 // this is a little gross, so that we don't switch per-pixel or per-component 1518 if (depth < 8 || img_n == out_n) { 1519 int nk = (width - 1)*filter_bytes; 1520 switch (filter) { 1521 // "none" filter turns into a memcpy here; make that explicit. 1522 case STBI__F_none: 1523 memcpy(cur, raw, nk); 1524 break; 1525 case STBI__F_sub: for (k=0; k < nk; ++k) { cur[k] = STBI__BYTECAST(raw[k] + cur[k-filter_bytes]); } break; 1526 case STBI__F_up: for (k=0; k < nk; ++k) { cur[k] = STBI__BYTECAST(raw[k] + prior[k]); } break; 1527 case STBI__F_avg: for (k=0; k < nk; ++k) { cur[k] = STBI__BYTECAST(raw[k] + ((prior[k] + cur[k-filter_bytes])>>1)); } break; 1528 case STBI__F_paeth: for (k=0; k < nk; ++k) { cur[k] = STBI__BYTECAST(raw[k] + stbi__paeth(cur[k-filter_bytes],prior[k],prior[k-filter_bytes])); } break; 1529 case STBI__F_avg_first: for (k=0; k < nk; ++k) { cur[k] = STBI__BYTECAST(raw[k] + (cur[k-filter_bytes] >> 1)); } break; 1530 case STBI__F_paeth_first: for (k=0; k < nk; ++k) { cur[k] = STBI__BYTECAST(raw[k] + stbi__paeth(cur[k-filter_bytes],0,0)); } break; 1531 default: assert(0); 1532 } 1533 raw += nk; 1534 } else { 1535 assert(img_n+1 == out_n); 1536 switch (filter) { 1537 case STBI__F_none: 1538 for (i=x-1; i >= 1; --i, cur[filter_bytes]=255,raw+=filter_bytes,cur+=output_bytes,prior+=output_bytes) 1539 for (k=0; k < filter_bytes; ++k) 1540 { cur[k] = raw[k]; } break; 1541 case STBI__F_sub: 1542 for (i=x-1; i >= 1; --i, cur[filter_bytes]=255,raw+=filter_bytes,cur+=output_bytes,prior+=output_bytes) 1543 for (k=0; k < filter_bytes; ++k) 1544 { cur[k] = STBI__BYTECAST(raw[k] + cur[k- output_bytes]); } break; 1545 case STBI__F_up: 1546 for (i=x-1; i >= 1; --i, cur[filter_bytes]=255,raw+=filter_bytes,cur+=output_bytes,prior+=output_bytes) 1547 for (k=0; k < filter_bytes; ++k) 1548 { cur[k] = STBI__BYTECAST(raw[k] + prior[k]); } break; 1549 case STBI__F_avg: 1550 for (i=x-1; i >= 1; --i, cur[filter_bytes]=255,raw+=filter_bytes,cur+=output_bytes,prior+=output_bytes) 1551 for (k=0; k < filter_bytes; ++k) 1552 { cur[k] = STBI__BYTECAST(raw[k] + ((prior[k] + cur[k- output_bytes])>>1)); } break; 1553 case STBI__F_paeth: 1554 for (i=x-1; i >= 1; --i, cur[filter_bytes]=255,raw+=filter_bytes,cur+=output_bytes,prior+=output_bytes) 1555 for (k=0; k < filter_bytes; ++k) 1556 { cur[k] = STBI__BYTECAST(raw[k] + stbi__paeth(cur[k- output_bytes],prior[k],prior[k- output_bytes])); } break; 1557 case STBI__F_avg_first: 1558 for (i=x-1; i >= 1; --i, cur[filter_bytes]=255,raw+=filter_bytes,cur+=output_bytes,prior+=output_bytes) 1559 for (k=0; k < filter_bytes; ++k) 1560 { cur[k] = STBI__BYTECAST(raw[k] + (cur[k- output_bytes] >> 1)); } break; 1561 case STBI__F_paeth_first: 1562 for (i=x-1; i >= 1; --i, cur[filter_bytes]=255,raw+=filter_bytes,cur+=output_bytes,prior+=output_bytes) 1563 for (k=0; k < filter_bytes; ++k) 1564 { cur[k] = STBI__BYTECAST(raw[k] + stbi__paeth(cur[k- output_bytes],0,0)); } break; 1565 default: assert(0); 1566 } 1567 1568 // the loop above sets the high byte of the pixels' alpha, but for 1569 // 16 bit png files we also need the low byte set. we'll do that here. 1570 if (depth == 16) { 1571 cur = a.out_ + stride*j; // start at the beginning of the row again 1572 for (i=0; i < x; ++i,cur+=output_bytes) { 1573 cur[filter_bytes+1] = 255; 1574 } 1575 } 1576 } 1577 } 1578 1579 // we make a separate pass to expand bits to pixels; for performance, 1580 // this could run two scanlines behind the above code, so it won't 1581 // intefere with filtering but will still be in the cache. 1582 if (depth < 8) { 1583 for (j=0; j < y; ++j) { 1584 stbi_uc *cur = a.out_ + stride*j; 1585 stbi_uc *in_ = a.out_ + stride*j + x*out_n - img_width_bytes; 1586 // unpack 1/2/4-bit into a 8-bit buffer. allows us to keep the common 8-bit path optimal at minimal cost for 1/2/4-bit 1587 // png guarante byte alignment, if width is not multiple of 8/4/2 we'll decode dummy trailing data that will be skipped in the later loop 1588 stbi_uc scale = (color == 0) ? stbi__depth_scale_table[depth] : 1; // scale grayscale values to 0..255 range 1589 1590 // note that the final byte might overshoot and write more data than desired. 1591 // we can allocate enough data that this never writes out of memory, but it 1592 // could also overwrite the next scanline. can it overwrite non-empty data 1593 // on the next scanline? yes, consider 1-pixel-wide scanlines with 1-bit-per-pixel. 1594 // so we need to explicitly clamp the final ones 1595 1596 if (depth == 4) { 1597 for (k=x*img_n; k >= 2; k-=2, ++in_) { 1598 *cur++ = cast(ubyte)(scale * ((*in_ >> 4)) ); 1599 *cur++ = cast(ubyte)(scale * ((*in_ ) & 0x0f)); 1600 } 1601 if (k > 0) *cur++ = cast(ubyte)(scale * ((*in_ >> 4) )); 1602 } else if (depth == 2) { 1603 for (k=x*img_n; k >= 4; k-=4, ++in_) { 1604 *cur++ = cast(ubyte)(scale * ((*in_ >> 6) )); 1605 *cur++ = cast(ubyte)(scale * ((*in_ >> 4) & 0x03)); 1606 *cur++ = cast(ubyte)(scale * ((*in_ >> 2) & 0x03)); 1607 *cur++ = cast(ubyte)(scale * ((*in_ ) & 0x03)); 1608 } 1609 if (k > 0) *cur++ = cast(ubyte)(scale * ((*in_ >> 6) )); 1610 if (k > 1) *cur++ = cast(ubyte)(scale * ((*in_ >> 4) & 0x03)); 1611 if (k > 2) *cur++ = cast(ubyte)(scale * ((*in_ >> 2) & 0x03)); 1612 } else if (depth == 1) { 1613 for (k=x*img_n; k >= 8; k-=8, ++in_) { 1614 *cur++ = cast(ubyte)(scale * ((*in_ >> 7) )); 1615 *cur++ = cast(ubyte)(scale * ((*in_ >> 6) & 0x01)); 1616 *cur++ = cast(ubyte)(scale * ((*in_ >> 5) & 0x01)); 1617 *cur++ = cast(ubyte)(scale * ((*in_ >> 4) & 0x01)); 1618 *cur++ = cast(ubyte)(scale * ((*in_ >> 3) & 0x01)); 1619 *cur++ = cast(ubyte)(scale * ((*in_ >> 2) & 0x01)); 1620 *cur++ = cast(ubyte)(scale * ((*in_ >> 1) & 0x01)); 1621 *cur++ = cast(ubyte)(scale * ((*in_ ) & 0x01)); 1622 } 1623 if (k > 0) *cur++ = cast(ubyte)(scale * ((*in_ >> 7) )); 1624 if (k > 1) *cur++ = cast(ubyte)(scale * ((*in_ >> 6) & 0x01)); 1625 if (k > 2) *cur++ = cast(ubyte)(scale * ((*in_ >> 5) & 0x01)); 1626 if (k > 3) *cur++ = cast(ubyte)(scale * ((*in_ >> 4) & 0x01)); 1627 if (k > 4) *cur++ = cast(ubyte)(scale * ((*in_ >> 3) & 0x01)); 1628 if (k > 5) *cur++ = cast(ubyte)(scale * ((*in_ >> 2) & 0x01)); 1629 if (k > 6) *cur++ = cast(ubyte)(scale * ((*in_ >> 1) & 0x01)); 1630 } 1631 if (img_n != out_n) { 1632 int q; 1633 // insert alpha = 255 1634 cur = a.out_ + stride*j; 1635 if (img_n == 1) { 1636 for (q=x-1; q >= 0; --q) { 1637 cur[q*2+1] = 255; 1638 cur[q*2+0] = cur[q]; 1639 } 1640 } else { 1641 assert(img_n == 3); 1642 for (q=x-1; q >= 0; --q) { 1643 cur[q*4+3] = 255; 1644 cur[q*4+2] = cur[q*3+2]; 1645 cur[q*4+1] = cur[q*3+1]; 1646 cur[q*4+0] = cur[q*3+0]; 1647 } 1648 } 1649 } 1650 } 1651 } else if (depth == 16) { 1652 // force the image data from big-endian to platform-native. 1653 // this is done in a separate pass due to the decoding relying 1654 // on the data being untouched, but could probably be done 1655 // per-line during decode if care is taken. 1656 stbi_uc *cur = a.out_; 1657 stbi__uint16 *cur16 = cast(stbi__uint16*)cur; 1658 1659 for(i=0; i < x*y*out_n; ++i,cur16++,cur+=2) { 1660 *cur16 = (cur[0] << 8) | cur[1]; 1661 } 1662 } 1663 1664 return 1; 1665 } 1666 1667 int stbi__create_png_image(stbi__png *a, stbi_uc *image_data, stbi__uint32 image_data_len, int out_n, int depth, int color, int interlaced) 1668 { 1669 int bytes = (depth == 16 ? 2 : 1); 1670 int out_bytes = out_n * bytes; 1671 stbi_uc *final_; 1672 int p; 1673 if (!interlaced) 1674 return stbi__create_png_image_raw(a, image_data, image_data_len, out_n, a.s.img_x, a.s.img_y, depth, color); 1675 1676 // de-interlacing 1677 final_ = cast(stbi_uc *) stbi__malloc_mad3(a.s.img_x, a.s.img_y, out_bytes, 0); 1678 if (!final_) return 0; //stbi__err("outofmem", "Out of memory"); 1679 for (p=0; p < 7; ++p) { 1680 static immutable int[7] xorig = [ 0,4,0,2,0,1,0 ]; 1681 static immutable int[7] yorig = [ 0,0,4,0,2,0,1 ]; 1682 static immutable int[7] xspc = [ 8,8,4,4,2,2,1 ]; 1683 static immutable int[7] yspc = [ 8,8,8,4,4,2,2 ]; 1684 int i,j,x,y; 1685 // pass1_x[4] = 0, pass1_x[5] = 1, pass1_x[12] = 1 1686 x = (a.s.img_x - xorig[p] + xspc[p]-1) / xspc[p]; 1687 y = (a.s.img_y - yorig[p] + yspc[p]-1) / yspc[p]; 1688 if (x && y) { 1689 stbi__uint32 img_len = ((((a.s.img_n * x * depth) + 7) >> 3) + 1) * y; 1690 if (!stbi__create_png_image_raw(a, image_data, image_data_len, out_n, x, y, depth, color)) { 1691 STBI_FREE(final_); 1692 return 0; 1693 } 1694 for (j=0; j < y; ++j) { 1695 for (i=0; i < x; ++i) { 1696 int out_y = j*yspc[p]+yorig[p]; 1697 int out_x = i*xspc[p]+xorig[p]; 1698 memcpy(final_ + out_y*a.s.img_x*out_bytes + out_x*out_bytes, 1699 a.out_ + (j*x+i)*out_bytes, out_bytes); 1700 } 1701 } 1702 STBI_FREE(a.out_); 1703 image_data += img_len; 1704 image_data_len -= img_len; 1705 } 1706 } 1707 a.out_ = final_; 1708 1709 return 1; 1710 } 1711 1712 int stbi__compute_transparency(stbi__png *z, stbi_uc* tc, int out_n) 1713 { 1714 stbi__context *s = z.s; 1715 stbi__uint32 i, pixel_count = s.img_x * s.img_y; 1716 stbi_uc *p = z.out_; 1717 1718 // compute color-based transparency, assuming we've 1719 // already got 255 as the alpha value in the output 1720 assert(out_n == 2 || out_n == 4); 1721 1722 if (out_n == 2) { 1723 for (i=0; i < pixel_count; ++i) { 1724 p[1] = (p[0] == tc[0] ? 0 : 255); 1725 p += 2; 1726 } 1727 } else { 1728 for (i=0; i < pixel_count; ++i) { 1729 if (p[0] == tc[0] && p[1] == tc[1] && p[2] == tc[2]) 1730 p[3] = 0; 1731 p += 4; 1732 } 1733 } 1734 return 1; 1735 } 1736 1737 int stbi__compute_transparency16(stbi__png *z, stbi__uint16* tc, int out_n) 1738 { 1739 stbi__context *s = z.s; 1740 stbi__uint32 i, pixel_count = s.img_x * s.img_y; 1741 stbi__uint16 *p = cast(stbi__uint16*) z.out_; 1742 1743 // compute color-based transparency, assuming we've 1744 // already got 65535 as the alpha value in the output 1745 assert(out_n == 2 || out_n == 4); 1746 1747 if (out_n == 2) { 1748 for (i = 0; i < pixel_count; ++i) { 1749 p[1] = (p[0] == tc[0] ? 0 : 65535); 1750 p += 2; 1751 } 1752 } else { 1753 for (i = 0; i < pixel_count; ++i) { 1754 if (p[0] == tc[0] && p[1] == tc[1] && p[2] == tc[2]) 1755 p[3] = 0; 1756 p += 4; 1757 } 1758 } 1759 return 1; 1760 } 1761 1762 int stbi__expand_png_palette(stbi__png *a, stbi_uc *palette, int len, int pal_img_n) 1763 { 1764 stbi__uint32 i, pixel_count = a.s.img_x * a.s.img_y; 1765 stbi_uc* p, temp_out, orig = a.out_; 1766 1767 p = cast(stbi_uc *) stbi__malloc_mad2(pixel_count, pal_img_n, 0); 1768 if (p == null) return 0; //stbi__err("outofmem", "Out of memory"); 1769 1770 // between here and free(out) below, exitting would leak 1771 temp_out = p; 1772 1773 if (pal_img_n == 3) { 1774 for (i=0; i < pixel_count; ++i) { 1775 int n = orig[i]*4; 1776 p[0] = palette[n ]; 1777 p[1] = palette[n+1]; 1778 p[2] = palette[n+2]; 1779 p += 3; 1780 } 1781 } else { 1782 for (i=0; i < pixel_count; ++i) { 1783 int n = orig[i]*4; 1784 p[0] = palette[n ]; 1785 p[1] = palette[n+1]; 1786 p[2] = palette[n+2]; 1787 p[3] = palette[n+3]; 1788 p += 4; 1789 } 1790 } 1791 STBI_FREE(a.out_); 1792 a.out_ = temp_out; 1793 1794 return 1; 1795 } 1796 1797 enum stbi__unpremultiply_on_load = 1; 1798 1799 uint STBI__PNG_TYPE(char a, char b, char c, char d) 1800 { 1801 return ( (cast(uint)a) << 24 ) 1802 + ( (cast(uint)b) << 16 ) 1803 + ( (cast(uint)c) << 8 ) 1804 + ( (cast(uint)d) << 0 ); 1805 } 1806 1807 int stbi__parse_png_file(stbi__png *z, int scan, int req_comp) 1808 { 1809 stbi_uc[1024] palette; 1810 stbi_uc pal_img_n=0; 1811 stbi_uc has_trans = 0; 1812 stbi_uc[3] tc = [0, 0, 0]; 1813 stbi__uint16[3] tc16; 1814 stbi__uint32 ioff=0, idata_limit=0, i, pal_len=0; 1815 int first=1,k,interlace=0, color=0, is_iphone=0; 1816 stbi__context *s = z.s; 1817 1818 z.expanded = null; 1819 z.idata = null; 1820 z.out_ = null; 1821 1822 s.ppmX = -1; 1823 s.ppmY = -1; 1824 s.pixelAspectRatio = -1; 1825 1826 if (!stbi__check_png_header(s)) return 0; 1827 1828 if (scan == STBI__SCAN_type) return 1; 1829 1830 for (;;) { 1831 stbi__pngchunk c = stbi__get_chunk_header(s); 1832 uint aaaa = c.type; 1833 switch (c.type) { 1834 case STBI__PNG_TYPE('C','g','B','I'): 1835 is_iphone = 1; 1836 stbi__skip(s, c.length); 1837 break; 1838 1839 case STBI__PNG_TYPE('p','H','Y','s'): 1840 s.ppmX = stbi__get32be(s); 1841 s.ppmY = stbi__get32be(s); 1842 s.pixelAspectRatio = s.ppmX / s.ppmY; 1843 ubyte unit = stbi__get8(s); 1844 if (unit != 1) 1845 { 1846 s.ppmX = -1; // only contains an aspect ratio, but no physical resolution 1847 s.ppmY = -1; 1848 } 1849 break; 1850 1851 case STBI__PNG_TYPE('I','H','D','R'): { 1852 int comp,filter; 1853 if (!first) return 0; //stbi__err("multiple IHDR","Corrupt PNG"); 1854 first = 0; 1855 if (c.length != 13) return 0; //stbi__err("bad IHDR len","Corrupt PNG"); 1856 s.img_x = stbi__get32be(s); 1857 s.img_y = stbi__get32be(s); 1858 if (s.img_y > STBI_MAX_DIMENSIONS) return 0; //stbi__err("too large","Very large image (corrupt?)"); 1859 if (s.img_x > STBI_MAX_DIMENSIONS) return 0; //stbi__err("too large","Very large image (corrupt?)"); 1860 z.depth = stbi__get8(s); if (z.depth != 1 && z.depth != 2 && z.depth != 4 && z.depth != 8 && z.depth != 16) return 0; //stbi__err("1/2/4/8/16-bit only","PNG not supported: 1/2/4/8/16-bit only"); 1861 color = stbi__get8(s); if (color > 6) return 0; //stbi__err("bad ctype","Corrupt PNG"); 1862 if (color == 3 && z.depth == 16) return 0; //stbi__err("bad ctype","Corrupt PNG"); 1863 if (color == 3) pal_img_n = 3; else if (color & 1) return 0; //stbi__err("bad ctype","Corrupt PNG"); 1864 comp = stbi__get8(s); if (comp) return 0; //stbi__err("bad comp method","Corrupt PNG"); 1865 filter= stbi__get8(s); if (filter) return 0; //stbi__err("bad filter method","Corrupt PNG"); 1866 interlace = stbi__get8(s); if (interlace>1) return 0; //stbi__err("bad interlace method","Corrupt PNG"); 1867 if (!s.img_x || !s.img_y) return 0; //stbi__err("0-pixel image","Corrupt PNG"); 1868 if (!pal_img_n) { 1869 s.img_n = (color & 2 ? 3 : 1) + (color & 4 ? 1 : 0); 1870 if ((1 << 30) / s.img_x / s.img_n < s.img_y) return 0; //stbi__err("too large", "Image too large to decode"); 1871 if (scan == STBI__SCAN_header) return 1; 1872 } else { 1873 // if paletted, then pal_n is our final components, and 1874 // img_n is # components to decompress/filter. 1875 s.img_n = 1; 1876 if ((1 << 30) / s.img_x / 4 < s.img_y) return 0; //stbi__err("too large","Corrupt PNG"); 1877 // if SCAN_header, have to scan to see if we have a tRNS 1878 } 1879 break; 1880 } 1881 1882 case STBI__PNG_TYPE('P','L','T','E'): { 1883 if (first) return 0; //stbi__err("first not IHDR", "Corrupt PNG"); 1884 if (c.length > 256*3) return 0; //stbi__err("invalid PLTE","Corrupt PNG"); 1885 pal_len = c.length / 3; 1886 if (pal_len * 3 != c.length) return 0; //stbi__err("invalid PLTE","Corrupt PNG"); 1887 for (i=0; i < pal_len; ++i) { 1888 palette[i*4+0] = stbi__get8(s); 1889 palette[i*4+1] = stbi__get8(s); 1890 palette[i*4+2] = stbi__get8(s); 1891 palette[i*4+3] = 255; 1892 } 1893 break; 1894 } 1895 1896 case STBI__PNG_TYPE('t','R','N','S'): { 1897 if (first) return 0; //stbi__err("first not IHDR", "Corrupt PNG"); 1898 if (z.idata) return 0; //stbi__err("tRNS after IDAT","Corrupt PNG"); 1899 if (pal_img_n) { 1900 if (scan == STBI__SCAN_header) { s.img_n = 4; return 1; } 1901 if (pal_len == 0) return 0; //stbi__err("tRNS before PLTE","Corrupt PNG"); 1902 if (c.length > pal_len) return 0; //stbi__err("bad tRNS len","Corrupt PNG"); 1903 pal_img_n = 4; 1904 for (i=0; i < c.length; ++i) 1905 palette[i*4+3] = stbi__get8(s); 1906 } else { 1907 if (!(s.img_n & 1)) return 0; //stbi__err("tRNS with alpha","Corrupt PNG"); 1908 if (c.length != cast(stbi__uint32) s.img_n*2) return 0; //stbi__err("bad tRNS len","Corrupt PNG"); 1909 has_trans = 1; 1910 if (z.depth == 16) { 1911 for (k = 0; k < s.img_n; ++k) tc16[k] = cast(stbi__uint16)stbi__get16be(s); // copy the values as-is 1912 } else { 1913 for (k = 0; k < s.img_n; ++k) 1914 { 1915 tc[k] = cast(ubyte)( cast(stbi_uc)(stbi__get16be(s) & 255) * stbi__depth_scale_table[z.depth]); // non 8-bit images will be larger 1916 } 1917 } 1918 } 1919 break; 1920 } 1921 1922 case STBI__PNG_TYPE('I','D','A','T'): { 1923 if (first) 1924 { 1925 return 0; //stbi__err("first not IHDR", "Corrupt PNG"); 1926 } 1927 if (pal_img_n && !pal_len) 1928 { 1929 return 0; //stbi__err("no PLTE","Corrupt PNG"); 1930 } 1931 if (scan == STBI__SCAN_header) 1932 { 1933 s.img_n = pal_img_n; 1934 return 1; 1935 } 1936 if (cast(int)(ioff + c.length) < cast(int)ioff) 1937 { 1938 return 0; 1939 } 1940 if (ioff + c.length > idata_limit) { 1941 stbi__uint32 idata_limit_old = idata_limit; 1942 stbi_uc *p; 1943 if (idata_limit == 0) idata_limit = c.length > 4096 ? c.length : 4096; 1944 while (ioff + c.length > idata_limit) 1945 idata_limit *= 2; 1946 p = cast(stbi_uc *) STBI_REALLOC_SIZED(z.idata, idata_limit_old, idata_limit); 1947 if (p == null) 1948 { 1949 return 0; //stbi__err("outofmem", "Out of memory"); 1950 } 1951 z.idata = p; 1952 } 1953 if (!stbi__getn(s, z.idata+ioff,c.length)) 1954 { 1955 return 0; //stbi__err("outofdata","Corrupt PNG"); 1956 } 1957 ioff += c.length; 1958 break; 1959 } 1960 1961 case STBI__PNG_TYPE('I','E','N','D'): { 1962 stbi__uint32 raw_len, bpl; 1963 if (first) return 0; //stbi__err("first not IHDR", "Corrupt PNG"); 1964 if (scan != STBI__SCAN_load) return 1; 1965 if (z.idata == null) 1966 { 1967 return 0; //stbi__err("no IDAT","Corrupt PNG"); 1968 } 1969 // initial guess for decoded data size to avoid unnecessary reallocs 1970 bpl = (s.img_x * z.depth + 7) / 8; // bytes per line, per component 1971 raw_len = bpl * s.img_y * s.img_n /* pixels */ + s.img_y /* filter mode per row */; 1972 z.expanded = cast(stbi_uc *) stbi_zlib_decode_malloc_guesssize_headerflag(cast(char *) z.idata, 1973 ioff, 1974 raw_len, 1975 cast(int *) &raw_len, 1976 !is_iphone); 1977 if (z.expanded == null) 1978 { 1979 return 0; // zlib should set error 1980 } 1981 STBI_FREE(z.idata); z.idata = null; 1982 if ((req_comp == s.img_n+1 && req_comp != 3 && !pal_img_n) || has_trans) 1983 s.img_out_n = s.img_n+1; 1984 else 1985 s.img_out_n = s.img_n; 1986 if (!stbi__create_png_image(z, z.expanded, raw_len, s.img_out_n, z.depth, color, interlace)) 1987 { 1988 return 0; 1989 } 1990 if (has_trans) { 1991 if (z.depth == 16) { 1992 if (!stbi__compute_transparency16(z, tc16.ptr, s.img_out_n)) 1993 { 1994 return 0; 1995 } 1996 } else { 1997 if (!stbi__compute_transparency(z, tc.ptr, s.img_out_n)) 1998 { 1999 return 0; 2000 } 2001 } 2002 } 2003 2004 if (pal_img_n) { 2005 // pal_img_n == 3 or 4 2006 s.img_n = pal_img_n; // record the actual colors we had 2007 s.img_out_n = pal_img_n; 2008 if (req_comp >= 3) s.img_out_n = req_comp; 2009 if (!stbi__expand_png_palette(z, palette.ptr, pal_len, s.img_out_n)) 2010 { 2011 return 0; 2012 } 2013 } else if (has_trans) { 2014 // non-paletted image with tRNS . source image has (constant) alpha 2015 ++s.img_n; 2016 } 2017 STBI_FREE(z.expanded); z.expanded = null; 2018 // end of PNG chunk, read and skip CRC 2019 stbi__get32be(s); 2020 return 1; 2021 } 2022 2023 default: 2024 // if critical, fail 2025 if (first) 2026 { 2027 return 0; //stbi__err("first not IHDR", "Corrupt PNG"); 2028 } 2029 if ((c.type & (1 << 29)) == 0) 2030 { 2031 return 0; //stbi__err("invalid_chunk", "PNG not supported: unknown PNG chunk type"); 2032 } 2033 stbi__skip(s, c.length); 2034 break; 2035 } 2036 // end of PNG chunk, read and skip CRC 2037 stbi__get32be(s); 2038 } 2039 } 2040 2041 void *stbi__do_png(stbi__png *p, int *x, int *y, int *n, int req_comp, stbi__result_info *ri) 2042 { 2043 void *result=null; 2044 if (req_comp < 0 || req_comp > 4) return null; //stbi__errpuc("bad req_comp", "Internal error"); 2045 if (stbi__parse_png_file(p, STBI__SCAN_load, req_comp)) { 2046 if (p.depth <= 8) 2047 ri.bits_per_channel = 8; 2048 else if (p.depth == 16) 2049 ri.bits_per_channel = 16; 2050 else 2051 return null; //stbi__errpuc("bad bits_per_channel", "PNG not supported: unsupported color depth"); 2052 result = p.out_; 2053 p.out_ = null; 2054 if (req_comp && req_comp != p.s.img_out_n) { 2055 if (ri.bits_per_channel == 8) 2056 result = stbi__convert_format(cast(ubyte*) result, p.s.img_out_n, req_comp, p.s.img_x, p.s.img_y); 2057 else 2058 result = stbi__convert_format16(cast(stbi__uint16 *) result, p.s.img_out_n, req_comp, p.s.img_x, p.s.img_y); 2059 p.s.img_out_n = req_comp; 2060 if (result == null) return result; 2061 } 2062 *x = p.s.img_x; 2063 *y = p.s.img_y; 2064 if (n) *n = p.s.img_n; 2065 } 2066 STBI_FREE(p.out_); p.out_ = null; 2067 STBI_FREE(p.expanded); p.expanded = null; 2068 STBI_FREE(p.idata); p.idata = null; 2069 2070 return result; 2071 } 2072 2073 void *stbi__png_load(stbi__context *s, int *x, int *y, int *comp, int req_comp, stbi__result_info *ri) 2074 { 2075 stbi__png p; 2076 p.s = s; 2077 return stbi__do_png(&p, x,y,comp,req_comp, ri); 2078 } 2079 2080 int stbi__png_test(stbi__context *s) 2081 { 2082 int r; 2083 r = stbi__check_png_header(s); 2084 stbi__rewind(s); 2085 return r; 2086 } 2087 2088 int stbi__png_info_raw(stbi__png *p, int *x, int *y, int *comp) 2089 { 2090 if (!stbi__parse_png_file(p, STBI__SCAN_header, 0)) { 2091 stbi__rewind( p.s ); 2092 return 0; 2093 } 2094 if (x) *x = p.s.img_x; 2095 if (y) *y = p.s.img_y; 2096 if (comp) *comp = p.s.img_n; 2097 return 1; 2098 } 2099 2100 int stbi__png_info(stbi__context *s, int *x, int *y, int *comp) 2101 { 2102 stbi__png p; 2103 p.s = s; 2104 return stbi__png_info_raw(&p, x, y, comp); 2105 } 2106 2107 int stbi__png_is16(stbi__context *s) 2108 { 2109 stbi__png p; 2110 p.s = s; 2111 if (!stbi__png_info_raw(&p, null, null, null)) 2112 return 0; 2113 if (p.depth != 16) { 2114 stbi__rewind(p.s); 2115 return 0; 2116 } 2117 return 1; 2118 } 2119 2120 bool stbi__png_is16(stbi_io_callbacks* clbk, void* user) // #BONUS 2121 { 2122 stbi__context s; 2123 stbi__start_callbacks(&s, clbk, user); 2124 return stbi__png_is16(&s) != 0; 2125 } 2126 } 2127 2128 version(decodeBMP) 2129 { 2130 // unported yet 2131 /+ 2132 static void *stbi__bmp_load(stbi__context *s, int *x, int *y, int *comp, int req_comp, stbi__result_info *ri) 2133 { 2134 stbi_uc *out; 2135 unsigned int mr=0,mg=0,mb=0,ma=0, all_a; 2136 stbi_uc pal[256][4]; 2137 int psize=0,i,j,width; 2138 int flip_vertically, pad, target; 2139 stbi__bmp_data info; 2140 STBI_NOTUSED(ri); 2141 2142 info.all_a = 255; 2143 if (stbi__bmp_parse_header(s, &info) == NULL) 2144 return NULL; // error code already set 2145 2146 flip_vertically = ((int) s->img_y) > 0; 2147 s->img_y = abs((int) s->img_y); 2148 2149 if (s->img_y > STBI_MAX_DIMENSIONS) return stbi__errpuc("too large","Very large image (corrupt?)"); 2150 if (s->img_x > STBI_MAX_DIMENSIONS) return stbi__errpuc("too large","Very large image (corrupt?)"); 2151 2152 mr = info.mr; 2153 mg = info.mg; 2154 mb = info.mb; 2155 ma = info.ma; 2156 all_a = info.all_a; 2157 2158 if (info.hsz == 12) { 2159 if (info.bpp < 24) 2160 psize = (info.offset - info.extra_read - 24) / 3; 2161 } else { 2162 if (info.bpp < 16) 2163 psize = (info.offset - info.extra_read - info.hsz) >> 2; 2164 } 2165 if (psize == 0) { 2166 // accept some number of extra bytes after the header, but if the offset points either to before 2167 // the header ends or implies a large amount of extra data, reject the file as malformed 2168 int bytes_read_so_far = s->callback_already_read + (int)(s->img_buffer - s->img_buffer_original); 2169 int header_limit = 1024; // max we actually read is below 256 bytes currently. 2170 int extra_data_limit = 256*4; // what ordinarily goes here is a palette; 256 entries*4 bytes is its max size. 2171 if (bytes_read_so_far <= 0 || bytes_read_so_far > header_limit) { 2172 return stbi__errpuc("bad header", "Corrupt BMP"); 2173 } 2174 // we established that bytes_read_so_far is positive and sensible. 2175 // the first half of this test rejects offsets that are either too small positives, or 2176 // negative, and guarantees that info.offset >= bytes_read_so_far > 0. this in turn 2177 // ensures the number computed in the second half of the test can't overflow. 2178 if (info.offset < bytes_read_so_far || info.offset - bytes_read_so_far > extra_data_limit) { 2179 return stbi__errpuc("bad offset", "Corrupt BMP"); 2180 } else { 2181 stbi__skip(s, info.offset - bytes_read_so_far); 2182 } 2183 } 2184 2185 if (info.bpp == 24 && ma == 0xff000000) 2186 s->img_n = 3; 2187 else 2188 s->img_n = ma ? 4 : 3; 2189 if (req_comp && req_comp >= 3) // we can directly decode 3 or 4 2190 target = req_comp; 2191 else 2192 target = s->img_n; // if they want monochrome, we'll post-convert 2193 2194 // sanity-check size 2195 if (!stbi__mad3sizes_valid(target, s->img_x, s->img_y, 0)) 2196 return stbi__errpuc("too large", "Corrupt BMP"); 2197 2198 out = (stbi_uc *) stbi__malloc_mad3(target, s->img_x, s->img_y, 0); 2199 if (!out) return stbi__errpuc("outofmem", "Out of memory"); 2200 if (info.bpp < 16) { 2201 int z=0; 2202 if (psize == 0 || psize > 256) { STBI_FREE(out); return stbi__errpuc("invalid", "Corrupt BMP"); } 2203 for (i=0; i < psize; ++i) { 2204 pal[i][2] = stbi__get8(s); 2205 pal[i][1] = stbi__get8(s); 2206 pal[i][0] = stbi__get8(s); 2207 if (info.hsz != 12) stbi__get8(s); 2208 pal[i][3] = 255; 2209 } 2210 stbi__skip(s, info.offset - info.extra_read - info.hsz - psize * (info.hsz == 12 ? 3 : 4)); 2211 if (info.bpp == 1) width = (s->img_x + 7) >> 3; 2212 else if (info.bpp == 4) width = (s->img_x + 1) >> 1; 2213 else if (info.bpp == 8) width = s->img_x; 2214 else { STBI_FREE(out); return stbi__errpuc("bad bpp", "Corrupt BMP"); } 2215 pad = (-width)&3; 2216 if (info.bpp == 1) { 2217 for (j=0; j < (int) s->img_y; ++j) { 2218 int bit_offset = 7, v = stbi__get8(s); 2219 for (i=0; i < (int) s->img_x; ++i) { 2220 int color = (v>>bit_offset)&0x1; 2221 out[z++] = pal[color][0]; 2222 out[z++] = pal[color][1]; 2223 out[z++] = pal[color][2]; 2224 if (target == 4) out[z++] = 255; 2225 if (i+1 == (int) s->img_x) break; 2226 if((--bit_offset) < 0) { 2227 bit_offset = 7; 2228 v = stbi__get8(s); 2229 } 2230 } 2231 stbi__skip(s, pad); 2232 } 2233 } else { 2234 for (j=0; j < (int) s->img_y; ++j) { 2235 for (i=0; i < (int) s->img_x; i += 2) { 2236 int v=stbi__get8(s),v2=0; 2237 if (info.bpp == 4) { 2238 v2 = v & 15; 2239 v >>= 4; 2240 } 2241 out[z++] = pal[v][0]; 2242 out[z++] = pal[v][1]; 2243 out[z++] = pal[v][2]; 2244 if (target == 4) out[z++] = 255; 2245 if (i+1 == (int) s->img_x) break; 2246 v = (info.bpp == 8) ? stbi__get8(s) : v2; 2247 out[z++] = pal[v][0]; 2248 out[z++] = pal[v][1]; 2249 out[z++] = pal[v][2]; 2250 if (target == 4) out[z++] = 255; 2251 } 2252 stbi__skip(s, pad); 2253 } 2254 } 2255 } else { 2256 int rshift=0,gshift=0,bshift=0,ashift=0,rcount=0,gcount=0,bcount=0,acount=0; 2257 int z = 0; 2258 int easy=0; 2259 stbi__skip(s, info.offset - info.extra_read - info.hsz); 2260 if (info.bpp == 24) width = 3 * s->img_x; 2261 else if (info.bpp == 16) width = 2*s->img_x; 2262 else /* bpp = 32 and pad = 0 */ width=0; 2263 pad = (-width) & 3; 2264 if (info.bpp == 24) { 2265 easy = 1; 2266 } else if (info.bpp == 32) { 2267 if (mb == 0xff && mg == 0xff00 && mr == 0x00ff0000 && ma == 0xff000000) 2268 easy = 2; 2269 } 2270 if (!easy) { 2271 if (!mr || !mg || !mb) { STBI_FREE(out); return stbi__errpuc("bad masks", "Corrupt BMP"); } 2272 // right shift amt to put high bit in position #7 2273 rshift = stbi__high_bit(mr)-7; rcount = stbi__bitcount(mr); 2274 gshift = stbi__high_bit(mg)-7; gcount = stbi__bitcount(mg); 2275 bshift = stbi__high_bit(mb)-7; bcount = stbi__bitcount(mb); 2276 ashift = stbi__high_bit(ma)-7; acount = stbi__bitcount(ma); 2277 if (rcount > 8 || gcount > 8 || bcount > 8 || acount > 8) { STBI_FREE(out); return stbi__errpuc("bad masks", "Corrupt BMP"); } 2278 } 2279 for (j=0; j < (int) s->img_y; ++j) { 2280 if (easy) { 2281 for (i=0; i < (int) s->img_x; ++i) { 2282 unsigned char a; 2283 out[z+2] = stbi__get8(s); 2284 out[z+1] = stbi__get8(s); 2285 out[z+0] = stbi__get8(s); 2286 z += 3; 2287 a = (easy == 2 ? stbi__get8(s) : 255); 2288 all_a |= a; 2289 if (target == 4) out[z++] = a; 2290 } 2291 } else { 2292 int bpp = info.bpp; 2293 for (i=0; i < (int) s->img_x; ++i) { 2294 stbi__uint32 v = (bpp == 16 ? (stbi__uint32) stbi__get16le(s) : stbi__get32le(s)); 2295 unsigned int a; 2296 out[z++] = STBI__BYTECAST(stbi__shiftsigned(v & mr, rshift, rcount)); 2297 out[z++] = STBI__BYTECAST(stbi__shiftsigned(v & mg, gshift, gcount)); 2298 out[z++] = STBI__BYTECAST(stbi__shiftsigned(v & mb, bshift, bcount)); 2299 a = (ma ? stbi__shiftsigned(v & ma, ashift, acount) : 255); 2300 all_a |= a; 2301 if (target == 4) out[z++] = STBI__BYTECAST(a); 2302 } 2303 } 2304 stbi__skip(s, pad); 2305 } 2306 } 2307 2308 // if alpha channel is all 0s, replace with all 255s 2309 if (target == 4 && all_a == 0) 2310 for (i=4*s->img_x*s->img_y-1; i >= 0; i -= 4) 2311 out[i] = 255; 2312 2313 if (flip_vertically) { 2314 stbi_uc t; 2315 for (j=0; j < (int) s->img_y>>1; ++j) { 2316 stbi_uc *p1 = out + j *s->img_x*target; 2317 stbi_uc *p2 = out + (s->img_y-1-j)*s->img_x*target; 2318 for (i=0; i < (int) s->img_x*target; ++i) { 2319 t = p1[i]; p1[i] = p2[i]; p2[i] = t; 2320 } 2321 } 2322 } 2323 2324 if (req_comp && req_comp != target) { 2325 out = stbi__convert_format(out, target, req_comp, s->img_x, s->img_y); 2326 if (out == NULL) return out; // stbi__convert_format frees input on failure 2327 } 2328 2329 *x = s->img_x; 2330 *y = s->img_y; 2331 if (comp) *comp = s->img_n; 2332 return out; 2333 }+/ 2334 }