mirror of
https://github.com/libretro/RetroArch
synced 2025-01-30 03:32:46 +00:00
3706 lines
113 KiB
C
3706 lines
113 KiB
C
#ifndef STBI_INCLUDE_STB_IMAGE_H
|
|
#define STBI_INCLUDE_STB_IMAGE_H
|
|
|
|
#include <stdint.h>
|
|
|
|
#define STBI_VERSION 1
|
|
|
|
enum
|
|
{
|
|
STBI_default = 0,
|
|
|
|
STBI_grey = 1,
|
|
STBI_grey_alpha = 2,
|
|
STBI_rgb = 3,
|
|
STBI_rgb_alpha = 4
|
|
};
|
|
|
|
#ifdef __cplusplus
|
|
extern "C" {
|
|
#endif
|
|
|
|
#ifdef STB_IMAGE_STATIC
|
|
#define STBIDEF static
|
|
#else
|
|
#define STBIDEF extern
|
|
#endif
|
|
|
|
typedef struct
|
|
{
|
|
int (*read) (void *user,char *data,int size);
|
|
void (*skip) (void *user,int n);
|
|
int (*eof) (void *user);
|
|
} stbi_io_callbacks;
|
|
|
|
STBIDEF uint8_t *stbi_load (char const *filename, int *x, int *y, int *comp, int req_comp);
|
|
|
|
#ifndef STBI_NO_STDIO
|
|
STBIDEF uint8_t *stbi_load_from_file (FILE *f, int *x, int *y, int *comp, int req_comp);
|
|
#endif
|
|
|
|
STBIDEF char *stbi_zlib_decode_malloc_guesssize_headerflag(const char *buffer, int len, int initial_size, int *outlen, int parse_header);
|
|
|
|
#ifdef __cplusplus
|
|
}
|
|
#endif
|
|
|
|
#endif
|
|
|
|
#ifdef STB_IMAGE_IMPLEMENTATION
|
|
|
|
#if defined(STBI_ONLY_JPEG) || defined(STBI_ONLY_PNG) || defined(STBI_ONLY_BMP) \
|
|
|| defined(STBI_ONLY_TGA) || defined(STBI_ONLY_GIF) || defined(STBI_ONLY_PSD) \
|
|
|| defined(STBI_ONLY_HDR) || defined(STBI_ONLY_PIC) || defined(STBI_ONLY_PNM) \
|
|
|| defined(STBI_ONLY_ZLIB)
|
|
#ifndef STBI_ONLY_JPEG
|
|
#define STBI_NO_JPEG
|
|
#endif
|
|
#ifndef STBI_ONLY_PNG
|
|
#define STBI_NO_PNG
|
|
#endif
|
|
#ifndef STBI_ONLY_BMP
|
|
#define STBI_NO_BMP
|
|
#endif
|
|
#ifndef STBI_ONLY_TGA
|
|
#define STBI_NO_TGA
|
|
#endif
|
|
#endif
|
|
|
|
#if defined(STBI_NO_PNG) && !defined(STBI_SUPPORT_ZLIB) && !defined(STBI_NO_ZLIB)
|
|
#define STBI_NO_ZLIB
|
|
#endif
|
|
|
|
|
|
#include <stdarg.h>
|
|
#include <stddef.h>
|
|
#include <stdlib.h>
|
|
#include <string.h>
|
|
|
|
#if !defined(STBI_NO_LINEAR) || !defined(STBI_NO_HDR)
|
|
#include <math.h>
|
|
#endif
|
|
|
|
#ifndef STBI_NO_STDIO
|
|
#include <stdio.h>
|
|
#endif
|
|
|
|
#ifndef STBI_ASSERT
|
|
#include <assert.h>
|
|
#define STBI_ASSERT(x) assert(x)
|
|
#endif
|
|
|
|
#include <retro_inline.h>
|
|
|
|
typedef unsigned char validate_uint32[sizeof(uint32_t)==4 ? 1 : -1];
|
|
|
|
#ifdef _MSC_VER
|
|
#define STBI_NOTUSED(v) (void)(v)
|
|
#else
|
|
#define STBI_NOTUSED(v) (void)sizeof(v)
|
|
#endif
|
|
|
|
#ifdef _MSC_VER
|
|
#define STBI_HAS_LROTL
|
|
#endif
|
|
|
|
#ifdef STBI_HAS_LROTL
|
|
#define stbi_lrot(x,y) _lrotl(x,y)
|
|
#else
|
|
#define stbi_lrot(x,y) (((x) << (y)) | ((x) >> (32 - (y))))
|
|
#endif
|
|
|
|
#if defined(__x86_64__) || defined(_M_X64)
|
|
#define STBI__X64_TARGET
|
|
#elif defined(__i386) || defined(_M_IX86)
|
|
#define STBI__X86_TARGET
|
|
#endif
|
|
|
|
#if defined(__GNUC__) && (defined(STBI__X86_TARGET) || defined(STBI__X64_TARGET)) && !defined(__SSE2__) && !defined(STBI_NO_SIMD)
|
|
#define STBI_NO_SIMD
|
|
#endif
|
|
|
|
#if defined(__MINGW32__) && defined(STBI__X86_TARGET) && !defined(STBI_MINGW_ENABLE_SSE2) && !defined(STBI_NO_SIMD)
|
|
#define STBI_NO_SIMD
|
|
#endif
|
|
|
|
#if !defined(STBI_NO_SIMD) && defined(STBI__X86_TARGET)
|
|
#define STBI_SSE2
|
|
#include <emmintrin.h>
|
|
|
|
#ifdef _MSC_VER
|
|
|
|
#if _MSC_VER >= 1400
|
|
#include <intrin.h>
|
|
static int stbi__cpuid3(void)
|
|
{
|
|
int info[4];
|
|
__cpuid(info,1);
|
|
return info[3];
|
|
}
|
|
#else
|
|
static int stbi__cpuid3(void)
|
|
{
|
|
int res;
|
|
__asm {
|
|
mov eax,1
|
|
cpuid
|
|
mov res,edx
|
|
}
|
|
return res;
|
|
}
|
|
#endif
|
|
|
|
#define STBI_SIMD_ALIGN(type, name) __declspec(align(16)) type name
|
|
|
|
static int stbi__sse2_available()
|
|
{
|
|
int info3 = stbi__cpuid3();
|
|
return ((info3 >> 26) & 1) != 0;
|
|
}
|
|
#else
|
|
#define STBI_SIMD_ALIGN(type, name) type name __attribute__((aligned(16)))
|
|
|
|
static int stbi__sse2_available()
|
|
{
|
|
#if defined(__GNUC__) && (__GNUC__ * 100 + __GNUC_MINOR__) >= 408
|
|
return __builtin_cpu_supports("sse2");
|
|
#else
|
|
return 0;
|
|
#endif
|
|
}
|
|
#endif
|
|
#endif
|
|
|
|
#if defined(STBI_NO_SIMD) && defined(STBI_NEON)
|
|
#undef STBI_NEON
|
|
#endif
|
|
|
|
#ifdef STBI_NEON
|
|
#include <arm_neon.h>
|
|
#define STBI_SIMD_ALIGN(type, name) type name __attribute__((aligned(16)))
|
|
#endif
|
|
|
|
#ifndef STBI_SIMD_ALIGN
|
|
#define STBI_SIMD_ALIGN(type, name) type name
|
|
#endif
|
|
|
|
typedef struct
|
|
{
|
|
uint32_t img_x, img_y;
|
|
int img_n, img_out_n;
|
|
|
|
stbi_io_callbacks io;
|
|
void *io_user_data;
|
|
|
|
int read_from_callbacks;
|
|
int buflen;
|
|
uint8_t buffer_start[128];
|
|
|
|
uint8_t *img_buffer, *img_buffer_end;
|
|
uint8_t *img_buffer_original;
|
|
} stbi__context;
|
|
|
|
|
|
static void stbi__refill_buffer(stbi__context *s);
|
|
|
|
static void stbi__start_callbacks(stbi__context *s, stbi_io_callbacks *c, void *user)
|
|
{
|
|
s->io = *c;
|
|
s->io_user_data = user;
|
|
s->buflen = sizeof(s->buffer_start);
|
|
s->read_from_callbacks = 1;
|
|
s->img_buffer_original = s->buffer_start;
|
|
stbi__refill_buffer(s);
|
|
}
|
|
|
|
#ifndef STBI_NO_STDIO
|
|
|
|
static int stbi__stdio_read(void *user, char *data, int size)
|
|
{
|
|
return (int) fread(data,1,size,(FILE*) user);
|
|
}
|
|
|
|
static void stbi__stdio_skip(void *user, int n)
|
|
{
|
|
fseek((FILE*) user, n, SEEK_CUR);
|
|
}
|
|
|
|
static int stbi__stdio_eof(void *user)
|
|
{
|
|
return feof((FILE*) user);
|
|
}
|
|
|
|
static stbi_io_callbacks stbi__stdio_callbacks =
|
|
{
|
|
stbi__stdio_read,
|
|
stbi__stdio_skip,
|
|
stbi__stdio_eof,
|
|
};
|
|
|
|
static void stbi__start_file(stbi__context *s, FILE *f)
|
|
{
|
|
stbi__start_callbacks(s, &stbi__stdio_callbacks, (void *) f);
|
|
}
|
|
|
|
#endif
|
|
|
|
static void stbi__rewind(stbi__context *s)
|
|
{
|
|
s->img_buffer = s->img_buffer_original;
|
|
}
|
|
|
|
#ifndef STBI_NO_JPEG
|
|
static int stbi__jpeg_test(stbi__context *s);
|
|
static uint8_t *stbi__jpeg_load(stbi__context *s, int *x, int *y, int *comp, int req_comp);
|
|
#endif
|
|
|
|
#ifndef STBI_NO_PNG
|
|
static int stbi__png_test(stbi__context *s);
|
|
static uint8_t *stbi__png_load(stbi__context *s, int *x, int *y, int *comp, int req_comp);
|
|
#endif
|
|
|
|
#ifndef STBI_NO_BMP
|
|
static int stbi__bmp_test(stbi__context *s);
|
|
static uint8_t *stbi__bmp_load(stbi__context *s, int *x, int *y, int *comp, int req_comp);
|
|
#endif
|
|
|
|
#ifndef STBI_NO_TGA
|
|
static int stbi__tga_test(stbi__context *s);
|
|
static uint8_t *stbi__tga_load(stbi__context *s, int *x, int *y, int *comp, int req_comp);
|
|
#endif
|
|
|
|
static const char *stbi__g_failure_reason;
|
|
|
|
static int stbi__err(const char *str)
|
|
{
|
|
stbi__g_failure_reason = str;
|
|
return 0;
|
|
}
|
|
|
|
#ifdef STBI_NO_FAILURE_STRINGS
|
|
#define stbi__err(x,y) 0
|
|
#elif defined(STBI_FAILURE_USERMSG)
|
|
#define stbi__err(x,y) stbi__err(y)
|
|
#else
|
|
#define stbi__err(x,y) stbi__err(x)
|
|
#endif
|
|
|
|
#define stbi__errpf(x,y) ((float *) (stbi__err(x,y)?NULL:NULL))
|
|
#define stbi__errpuc(x,y) ((unsigned char *) (stbi__err(x,y)?NULL:NULL))
|
|
|
|
static int stbi__vertically_flip_on_load = 0;
|
|
|
|
static unsigned char *stbi__load_main(stbi__context *s, int *x, int *y, int *comp, int req_comp)
|
|
{
|
|
#ifndef STBI_NO_JPEG
|
|
if (stbi__jpeg_test(s)) return stbi__jpeg_load(s,x,y,comp,req_comp);
|
|
#endif
|
|
#ifndef STBI_NO_PNG
|
|
if (stbi__png_test(s)) return stbi__png_load(s,x,y,comp,req_comp);
|
|
#endif
|
|
#ifndef STBI_NO_BMP
|
|
if (stbi__bmp_test(s)) return stbi__bmp_load(s,x,y,comp,req_comp);
|
|
#endif
|
|
|
|
#ifndef STBI_NO_TGA
|
|
if (stbi__tga_test(s))
|
|
return stbi__tga_load(s,x,y,comp,req_comp);
|
|
#endif
|
|
|
|
return stbi__errpuc("unknown image type", "Image not of any known type, or corrupt");
|
|
}
|
|
|
|
static unsigned char *stbi__load_flip(stbi__context *s, int *x, int *y, int *comp, int req_comp)
|
|
{
|
|
unsigned char *result = stbi__load_main(s, x, y, comp, req_comp);
|
|
|
|
if (stbi__vertically_flip_on_load && result != NULL)
|
|
{
|
|
int row,col,z;
|
|
uint8_t temp;
|
|
int w = *x, h = *y;
|
|
int depth = req_comp ? req_comp : *comp;
|
|
|
|
for (row = 0; row < (h>>1); row++)
|
|
{
|
|
for (col = 0; col < w; col++)
|
|
{
|
|
for (z = 0; z < depth; z++)
|
|
{
|
|
temp = result[(row * w + col) * depth + z];
|
|
result[(row * w + col) * depth + z] = result[((h - row - 1) * w + col) * depth + z];
|
|
result[((h - row - 1) * w + col) * depth + z] = temp;
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
return result;
|
|
}
|
|
|
|
#ifndef STBI_NO_STDIO
|
|
|
|
static FILE *stbi__fopen(char const *filename, char const *mode)
|
|
{
|
|
FILE *f;
|
|
#if defined(_MSC_VER) && _MSC_VER >= 1400
|
|
if (0 != fopen_s(&f, filename, mode))
|
|
f=0;
|
|
#else
|
|
f = fopen(filename, mode);
|
|
#endif
|
|
return f;
|
|
}
|
|
|
|
|
|
STBIDEF uint8_t *stbi_load(char const *filename, int *x, int *y, int *comp, int req_comp)
|
|
{
|
|
unsigned char *result;
|
|
FILE *f = stbi__fopen(filename, "rb");
|
|
if (!f)
|
|
return stbi__errpuc("can't fopen", "Unable to open file");
|
|
result = stbi_load_from_file(f,x,y,comp,req_comp);
|
|
fclose(f);
|
|
return result;
|
|
}
|
|
|
|
STBIDEF uint8_t *stbi_load_from_file(FILE *f, int *x, int *y, int *comp, int req_comp)
|
|
{
|
|
unsigned char *result;
|
|
stbi__context s;
|
|
stbi__start_file(&s,f);
|
|
result = stbi__load_flip(&s,x,y,comp,req_comp);
|
|
if (result)
|
|
fseek(f, - (int) (s.img_buffer_end - s.img_buffer), SEEK_CUR);
|
|
return result;
|
|
}
|
|
#endif
|
|
|
|
static float stbi__h2l_gamma_i=1.0f/2.2f, stbi__h2l_scale_i=1.0f;
|
|
static float stbi__l2h_gamma=2.2f, stbi__l2h_scale=1.0f;
|
|
|
|
enum
|
|
{
|
|
STBI__SCAN_load=0,
|
|
STBI__SCAN_type,
|
|
STBI__SCAN_header
|
|
};
|
|
|
|
static void stbi__refill_buffer(stbi__context *s)
|
|
{
|
|
int n = (s->io.read)(s->io_user_data,(char*)s->buffer_start,s->buflen);
|
|
if (n == 0)
|
|
{
|
|
s->read_from_callbacks = 0;
|
|
s->img_buffer = s->buffer_start;
|
|
s->img_buffer_end = s->buffer_start+1;
|
|
*s->img_buffer = 0;
|
|
} else {
|
|
s->img_buffer = s->buffer_start;
|
|
s->img_buffer_end = s->buffer_start + n;
|
|
}
|
|
}
|
|
|
|
static INLINE uint8_t stbi__get8(stbi__context *s)
|
|
{
|
|
if (s->img_buffer < s->img_buffer_end)
|
|
return *s->img_buffer++;
|
|
if (s->read_from_callbacks) {
|
|
stbi__refill_buffer(s);
|
|
return *s->img_buffer++;
|
|
}
|
|
return 0;
|
|
}
|
|
|
|
static INLINE int stbi__at_eof(stbi__context *s)
|
|
{
|
|
if (s->io.read)
|
|
{
|
|
if (!(s->io.eof)(s->io_user_data))
|
|
return 0;
|
|
if (s->read_from_callbacks == 0) return 1;
|
|
}
|
|
|
|
return s->img_buffer >= s->img_buffer_end;
|
|
}
|
|
|
|
static void stbi__skip(stbi__context *s, int n)
|
|
{
|
|
if (n < 0) {
|
|
s->img_buffer = s->img_buffer_end;
|
|
return;
|
|
}
|
|
if (s->io.read) {
|
|
int blen = (int) (s->img_buffer_end - s->img_buffer);
|
|
if (blen < n) {
|
|
s->img_buffer = s->img_buffer_end;
|
|
(s->io.skip)(s->io_user_data, n - blen);
|
|
return;
|
|
}
|
|
}
|
|
s->img_buffer += n;
|
|
}
|
|
|
|
static int stbi__getn(stbi__context *s, uint8_t *buffer, int n)
|
|
{
|
|
if (s->io.read) {
|
|
int blen = (int) (s->img_buffer_end - s->img_buffer);
|
|
if (blen < n) {
|
|
int res, count;
|
|
|
|
memcpy(buffer, s->img_buffer, blen);
|
|
|
|
count = (s->io.read)(s->io_user_data, (char*) buffer + blen, n - blen);
|
|
res = (count == (n-blen));
|
|
s->img_buffer = s->img_buffer_end;
|
|
return res;
|
|
}
|
|
}
|
|
|
|
if (s->img_buffer+n <= s->img_buffer_end) {
|
|
memcpy(buffer, s->img_buffer, n);
|
|
s->img_buffer += n;
|
|
return 1;
|
|
} else
|
|
return 0;
|
|
}
|
|
|
|
static int stbi__get16be(stbi__context *s)
|
|
{
|
|
int z = stbi__get8(s);
|
|
return (z << 8) + stbi__get8(s);
|
|
}
|
|
|
|
static uint32_t stbi__get32be(stbi__context *s)
|
|
{
|
|
uint32_t z = stbi__get16be(s);
|
|
return (z << 16) + stbi__get16be(s);
|
|
}
|
|
|
|
static int stbi__get16le(stbi__context *s)
|
|
{
|
|
int z = stbi__get8(s);
|
|
return z + (stbi__get8(s) << 8);
|
|
}
|
|
|
|
static uint32_t stbi__get32le(stbi__context *s)
|
|
{
|
|
uint32_t z = stbi__get16le(s);
|
|
return z + (stbi__get16le(s) << 16);
|
|
}
|
|
|
|
#define STBI__BYTECAST(x) ((uint8_t) ((x) & 255))
|
|
|
|
static uint8_t stbi__compute_y(int r, int g, int b)
|
|
{
|
|
return (uint8_t) (((r*77) + (g*150) + (29*b)) >> 8);
|
|
}
|
|
|
|
static unsigned char *stbi__convert_format(unsigned char *data, int img_n, int req_comp, unsigned int x, unsigned int y)
|
|
{
|
|
int i,j;
|
|
unsigned char *good;
|
|
|
|
if (req_comp == img_n) return data;
|
|
STBI_ASSERT(req_comp >= 1 && req_comp <= 4);
|
|
|
|
good = (unsigned char *) malloc(req_comp * x * y);
|
|
|
|
if (good == NULL)
|
|
{
|
|
free(data);
|
|
return stbi__errpuc("outofmem", "Out of memory");
|
|
}
|
|
|
|
for (j=0; j < (int) y; ++j) {
|
|
unsigned char *src = data + j * x * img_n ;
|
|
unsigned char *dest = good + j * x * req_comp;
|
|
|
|
#define COMBO(a,b) ((a)*8+(b))
|
|
#define CASE(a,b) case COMBO(a,b): for(i=x-1; i >= 0; --i, src += a, dest += b)
|
|
switch (COMBO(img_n, req_comp))
|
|
{
|
|
CASE(1,2) dest[0]=src[0], dest[1]=255; break;
|
|
CASE(1,3) dest[0]=dest[1]=dest[2]=src[0]; break;
|
|
CASE(1,4) dest[0]=dest[1]=dest[2]=src[0], dest[3]=255; break;
|
|
CASE(2,1) dest[0]=src[0]; break;
|
|
CASE(2,3) dest[0]=dest[1]=dest[2]=src[0]; break;
|
|
CASE(2,4) dest[0]=dest[1]=dest[2]=src[0], dest[3]=src[1]; break;
|
|
CASE(3,4) dest[0]=src[0],dest[1]=src[1],dest[2]=src[2],dest[3]=255; break;
|
|
CASE(3,1) dest[0]=stbi__compute_y(src[0],src[1],src[2]); break;
|
|
CASE(3,2) dest[0]=stbi__compute_y(src[0],src[1],src[2]), dest[1] = 255; break;
|
|
CASE(4,1) dest[0]=stbi__compute_y(src[0],src[1],src[2]); break;
|
|
CASE(4,2) dest[0]=stbi__compute_y(src[0],src[1],src[2]), dest[1] = src[3]; break;
|
|
CASE(4,3) dest[0]=src[0],dest[1]=src[1],dest[2]=src[2]; break;
|
|
default: STBI_ASSERT(0);
|
|
}
|
|
#undef CASE
|
|
}
|
|
|
|
free(data);
|
|
return good;
|
|
}
|
|
|
|
#ifndef STBI_NO_JPEG
|
|
|
|
#define FAST_BITS 9
|
|
|
|
typedef struct
|
|
{
|
|
uint8_t fast[1 << FAST_BITS];
|
|
uint16_t code[256];
|
|
uint8_t values[256];
|
|
uint8_t size[257];
|
|
unsigned int maxcode[18];
|
|
int delta[17];
|
|
} stbi__huffman;
|
|
|
|
typedef struct
|
|
{
|
|
stbi__context *s;
|
|
stbi__huffman huff_dc[4];
|
|
stbi__huffman huff_ac[4];
|
|
uint8_t dequant[4][64];
|
|
int16_t fast_ac[4][1 << FAST_BITS];
|
|
|
|
int img_h_max, img_v_max;
|
|
int img_mcu_x, img_mcu_y;
|
|
int img_mcu_w, img_mcu_h;
|
|
|
|
struct
|
|
{
|
|
int id;
|
|
int h,v;
|
|
int tq;
|
|
int hd,ha;
|
|
int dc_pred;
|
|
|
|
int x,y,w2,h2;
|
|
uint8_t *data;
|
|
void *raw_data, *raw_coeff;
|
|
uint8_t *linebuf;
|
|
short *coeff;
|
|
int coeff_w, coeff_h;
|
|
} img_comp[4];
|
|
|
|
uint32_t code_buffer;
|
|
int code_bits;
|
|
unsigned char marker;
|
|
int nomore;
|
|
|
|
int progressive;
|
|
int spec_start;
|
|
int spec_end;
|
|
int succ_high;
|
|
int succ_low;
|
|
int eob_run;
|
|
|
|
int scan_n, order[4];
|
|
int restart_interval, todo;
|
|
|
|
void (*idct_block_kernel)(uint8_t *out, int out_stride, short data[64]);
|
|
void (*YCbCr_to_RGB_kernel)(uint8_t *out, const uint8_t *y, const uint8_t *pcb, const uint8_t *pcr, int count, int step);
|
|
uint8_t *(*resample_row_hv_2_kernel)(uint8_t *out, uint8_t *in_near, uint8_t *in_far, int w, int hs);
|
|
} stbi__jpeg;
|
|
|
|
static int stbi__build_huffman(stbi__huffman *h, int *count)
|
|
{
|
|
int i,j,k=0,code;
|
|
|
|
for (i=0; i < 16; ++i)
|
|
for (j=0; j < count[i]; ++j)
|
|
h->size[k++] = (uint8_t) (i+1);
|
|
h->size[k] = 0;
|
|
|
|
code = 0;
|
|
k = 0;
|
|
for(j=1; j <= 16; ++j) {
|
|
h->delta[j] = k - code;
|
|
if (h->size[k] == j) {
|
|
while (h->size[k] == j)
|
|
h->code[k++] = (uint16_t) (code++);
|
|
if (code-1 >= (1 << j)) return stbi__err("bad code lengths","Corrupt JPEG");
|
|
}
|
|
h->maxcode[j] = code << (16-j);
|
|
code <<= 1;
|
|
}
|
|
h->maxcode[j] = 0xffffffff;
|
|
|
|
memset(h->fast, 255, 1 << FAST_BITS);
|
|
for (i=0; i < k; ++i) {
|
|
int s = h->size[i];
|
|
if (s <= FAST_BITS) {
|
|
int c = h->code[i] << (FAST_BITS-s);
|
|
int m = 1 << (FAST_BITS-s);
|
|
for (j=0; j < m; ++j) {
|
|
h->fast[c+j] = (uint8_t) i;
|
|
}
|
|
}
|
|
}
|
|
return 1;
|
|
}
|
|
|
|
static void stbi__build_fast_ac(int16_t *fast_ac, stbi__huffman *h)
|
|
{
|
|
int i;
|
|
for (i=0; i < (1 << FAST_BITS); ++i) {
|
|
uint8_t fast = h->fast[i];
|
|
fast_ac[i] = 0;
|
|
if (fast < 255) {
|
|
int rs = h->values[fast];
|
|
int run = (rs >> 4) & 15;
|
|
int magbits = rs & 15;
|
|
int len = h->size[fast];
|
|
|
|
if (magbits && len + magbits <= FAST_BITS)
|
|
{
|
|
int k = ((i << len) & ((1 << FAST_BITS) - 1)) >> (FAST_BITS - magbits);
|
|
int m = 1 << (magbits - 1);
|
|
if (k < m) k += (-1 << magbits) + 1;
|
|
if (k >= -128 && k <= 127)
|
|
fast_ac[i] = (int16_t) ((k << 8) + (run << 4) + (len + magbits));
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
static void stbi__grow_buffer_unsafe(stbi__jpeg *j)
|
|
{
|
|
do {
|
|
int b = j->nomore ? 0 : stbi__get8(j->s);
|
|
if (b == 0xff) {
|
|
int c = stbi__get8(j->s);
|
|
if (c != 0) {
|
|
j->marker = (unsigned char) c;
|
|
j->nomore = 1;
|
|
return;
|
|
}
|
|
}
|
|
j->code_buffer |= b << (24 - j->code_bits);
|
|
j->code_bits += 8;
|
|
} while (j->code_bits <= 24);
|
|
}
|
|
|
|
static uint32_t stbi__bmask[17]={0,1,3,7,15,31,63,127,255,511,1023,2047,4095,8191,16383,32767,65535};
|
|
|
|
static INLINE int stbi__jpeg_huff_decode(stbi__jpeg *j, stbi__huffman *h)
|
|
{
|
|
unsigned int temp;
|
|
int c,k;
|
|
|
|
if (j->code_bits < 16) stbi__grow_buffer_unsafe(j);
|
|
|
|
c = (j->code_buffer >> (32 - FAST_BITS)) & ((1 << FAST_BITS)-1);
|
|
k = h->fast[c];
|
|
if (k < 255) {
|
|
int s = h->size[k];
|
|
if (s > j->code_bits)
|
|
return -1;
|
|
j->code_buffer <<= s;
|
|
j->code_bits -= s;
|
|
return h->values[k];
|
|
}
|
|
|
|
temp = j->code_buffer >> 16;
|
|
for (k=FAST_BITS+1 ; ; ++k)
|
|
if (temp < h->maxcode[k])
|
|
break;
|
|
if (k == 17) {
|
|
j->code_bits -= 16;
|
|
return -1;
|
|
}
|
|
|
|
if (k > j->code_bits)
|
|
return -1;
|
|
|
|
c = ((j->code_buffer >> (32 - k)) & stbi__bmask[k]) + h->delta[k];
|
|
STBI_ASSERT((((j->code_buffer) >> (32 - h->size[c])) & stbi__bmask[h->size[c]]) == h->code[c]);
|
|
|
|
j->code_bits -= k;
|
|
j->code_buffer <<= k;
|
|
return h->values[c];
|
|
}
|
|
|
|
static int const stbi__jbias[16] = {0,-1,-3,-7,-15,-31,-63,-127,-255,-511,-1023,-2047,-4095,-8191,-16383,-32767};
|
|
|
|
static INLINE int stbi__extend_receive(stbi__jpeg *j, int n)
|
|
{
|
|
unsigned int k;
|
|
int sgn;
|
|
if (j->code_bits < n) stbi__grow_buffer_unsafe(j);
|
|
|
|
sgn = (int32_t)j->code_buffer >> 31;
|
|
k = stbi_lrot(j->code_buffer, n);
|
|
STBI_ASSERT((n >= 0) && ((unsigned)n < sizeof(stbi__bmask)/sizeof(*stbi__bmask)));
|
|
j->code_buffer = k & ~stbi__bmask[n];
|
|
k &= stbi__bmask[n];
|
|
j->code_bits -= n;
|
|
return k + (stbi__jbias[n] & ~sgn);
|
|
}
|
|
|
|
static INLINE int stbi__jpeg_get_bits(stbi__jpeg *j, int n)
|
|
{
|
|
unsigned int k;
|
|
if (j->code_bits < n) stbi__grow_buffer_unsafe(j);
|
|
k = stbi_lrot(j->code_buffer, n);
|
|
j->code_buffer = k & ~stbi__bmask[n];
|
|
k &= stbi__bmask[n];
|
|
j->code_bits -= n;
|
|
return k;
|
|
}
|
|
|
|
static INLINE int stbi__jpeg_get_bit(stbi__jpeg *j)
|
|
{
|
|
unsigned int k;
|
|
if (j->code_bits < 1) stbi__grow_buffer_unsafe(j);
|
|
k = j->code_buffer;
|
|
j->code_buffer <<= 1;
|
|
--j->code_bits;
|
|
return k & 0x80000000;
|
|
}
|
|
|
|
static uint8_t stbi__jpeg_dezigzag[64+15] =
|
|
{
|
|
0, 1, 8, 16, 9, 2, 3, 10,
|
|
17, 24, 32, 25, 18, 11, 4, 5,
|
|
12, 19, 26, 33, 40, 48, 41, 34,
|
|
27, 20, 13, 6, 7, 14, 21, 28,
|
|
35, 42, 49, 56, 57, 50, 43, 36,
|
|
29, 22, 15, 23, 30, 37, 44, 51,
|
|
58, 59, 52, 45, 38, 31, 39, 46,
|
|
53, 60, 61, 54, 47, 55, 62, 63,
|
|
63, 63, 63, 63, 63, 63, 63, 63,
|
|
63, 63, 63, 63, 63, 63, 63
|
|
};
|
|
|
|
static int stbi__jpeg_decode_block(stbi__jpeg *j, short data[64], stbi__huffman *hdc, stbi__huffman *hac, int16_t *fac, int b, uint8_t *dequant)
|
|
{
|
|
int diff,dc,k;
|
|
int t;
|
|
|
|
if (j->code_bits < 16) stbi__grow_buffer_unsafe(j);
|
|
t = stbi__jpeg_huff_decode(j, hdc);
|
|
if (t < 0) return stbi__err("bad huffman code","Corrupt JPEG");
|
|
|
|
memset(data,0,64*sizeof(data[0]));
|
|
|
|
diff = t ? stbi__extend_receive(j, t) : 0;
|
|
dc = j->img_comp[b].dc_pred + diff;
|
|
j->img_comp[b].dc_pred = dc;
|
|
data[0] = (short) (dc * dequant[0]);
|
|
|
|
k = 1;
|
|
do {
|
|
unsigned int zig;
|
|
int c,r,s;
|
|
if (j->code_bits < 16) stbi__grow_buffer_unsafe(j);
|
|
c = (j->code_buffer >> (32 - FAST_BITS)) & ((1 << FAST_BITS)-1);
|
|
r = fac[c];
|
|
if (r)
|
|
{
|
|
k += (r >> 4) & 15;
|
|
s = r & 15;
|
|
j->code_buffer <<= s;
|
|
j->code_bits -= s;
|
|
zig = stbi__jpeg_dezigzag[k++];
|
|
data[zig] = (short) ((r >> 8) * dequant[zig]);
|
|
} else {
|
|
int rs = stbi__jpeg_huff_decode(j, hac);
|
|
if (rs < 0) return stbi__err("bad huffman code","Corrupt JPEG");
|
|
s = rs & 15;
|
|
r = rs >> 4;
|
|
if (s == 0) {
|
|
if (rs != 0xf0) break;
|
|
k += 16;
|
|
} else {
|
|
k += r;
|
|
zig = stbi__jpeg_dezigzag[k++];
|
|
data[zig] = (short) (stbi__extend_receive(j,s) * dequant[zig]);
|
|
}
|
|
}
|
|
} while (k < 64);
|
|
return 1;
|
|
}
|
|
|
|
static int stbi__jpeg_decode_block_prog_dc(stbi__jpeg *j, short data[64], stbi__huffman *hdc, int b)
|
|
{
|
|
int diff,dc;
|
|
int t;
|
|
if (j->spec_end != 0) return stbi__err("can't merge dc and ac", "Corrupt JPEG");
|
|
|
|
if (j->code_bits < 16) stbi__grow_buffer_unsafe(j);
|
|
|
|
if (j->succ_high == 0) {
|
|
memset(data,0,64*sizeof(data[0]));
|
|
t = stbi__jpeg_huff_decode(j, hdc);
|
|
diff = t ? stbi__extend_receive(j, t) : 0;
|
|
|
|
dc = j->img_comp[b].dc_pred + diff;
|
|
j->img_comp[b].dc_pred = dc;
|
|
data[0] = (short) (dc << j->succ_low);
|
|
} else {
|
|
if (stbi__jpeg_get_bit(j))
|
|
data[0] += (short) (1 << j->succ_low);
|
|
}
|
|
return 1;
|
|
}
|
|
|
|
static int stbi__jpeg_decode_block_prog_ac(stbi__jpeg *j, short data[64], stbi__huffman *hac, int16_t *fac)
|
|
{
|
|
int k;
|
|
if (j->spec_start == 0) return stbi__err("can't merge dc and ac", "Corrupt JPEG");
|
|
|
|
if (j->succ_high == 0) {
|
|
int shift = j->succ_low;
|
|
|
|
if (j->eob_run) {
|
|
--j->eob_run;
|
|
return 1;
|
|
}
|
|
|
|
k = j->spec_start;
|
|
do {
|
|
unsigned int zig;
|
|
int c,r,s;
|
|
if (j->code_bits < 16) stbi__grow_buffer_unsafe(j);
|
|
c = (j->code_buffer >> (32 - FAST_BITS)) & ((1 << FAST_BITS)-1);
|
|
r = fac[c];
|
|
if (r)
|
|
{
|
|
k += (r >> 4) & 15;
|
|
s = r & 15;
|
|
j->code_buffer <<= s;
|
|
j->code_bits -= s;
|
|
zig = stbi__jpeg_dezigzag[k++];
|
|
data[zig] = (short) ((r >> 8) << shift);
|
|
} else {
|
|
int rs = stbi__jpeg_huff_decode(j, hac);
|
|
if (rs < 0) return stbi__err("bad huffman code","Corrupt JPEG");
|
|
s = rs & 15;
|
|
r = rs >> 4;
|
|
if (s == 0) {
|
|
if (r < 15) {
|
|
j->eob_run = (1 << r);
|
|
if (r)
|
|
j->eob_run += stbi__jpeg_get_bits(j, r);
|
|
--j->eob_run;
|
|
break;
|
|
}
|
|
k += 16;
|
|
} else {
|
|
k += r;
|
|
zig = stbi__jpeg_dezigzag[k++];
|
|
data[zig] = (short) (stbi__extend_receive(j,s) << shift);
|
|
}
|
|
}
|
|
} while (k <= j->spec_end);
|
|
}
|
|
else
|
|
{
|
|
short bit = (short) (1 << j->succ_low);
|
|
|
|
if (j->eob_run) {
|
|
--j->eob_run;
|
|
for (k = j->spec_start; k <= j->spec_end; ++k) {
|
|
short *p = &data[stbi__jpeg_dezigzag[k]];
|
|
if (*p != 0)
|
|
if (stbi__jpeg_get_bit(j))
|
|
if ((*p & bit)==0) {
|
|
if (*p > 0)
|
|
*p += bit;
|
|
else
|
|
*p -= bit;
|
|
}
|
|
}
|
|
} else {
|
|
k = j->spec_start;
|
|
do {
|
|
int r,s;
|
|
int rs = stbi__jpeg_huff_decode(j, hac);
|
|
if (rs < 0) return stbi__err("bad huffman code","Corrupt JPEG");
|
|
s = rs & 15;
|
|
r = rs >> 4;
|
|
if (s == 0) {
|
|
if (r < 15) {
|
|
j->eob_run = (1 << r) - 1;
|
|
if (r)
|
|
j->eob_run += stbi__jpeg_get_bits(j, r);
|
|
r = 64;
|
|
} else
|
|
r = 16;
|
|
} else {
|
|
if (s != 1) return stbi__err("bad huffman code", "Corrupt JPEG");
|
|
if (stbi__jpeg_get_bit(j))
|
|
s = bit;
|
|
else
|
|
s = -bit;
|
|
}
|
|
|
|
while (k <= j->spec_end) {
|
|
short *p = &data[stbi__jpeg_dezigzag[k]];
|
|
if (*p != 0) {
|
|
if (stbi__jpeg_get_bit(j))
|
|
if ((*p & bit)==0) {
|
|
if (*p > 0)
|
|
*p += bit;
|
|
else
|
|
*p -= bit;
|
|
}
|
|
++k;
|
|
} else {
|
|
if (r == 0) {
|
|
if (s)
|
|
data[stbi__jpeg_dezigzag[k++]] = (short) s;
|
|
break;
|
|
}
|
|
--r;
|
|
++k;
|
|
}
|
|
}
|
|
} while (k <= j->spec_end);
|
|
}
|
|
}
|
|
return 1;
|
|
}
|
|
|
|
static INLINE uint8_t stbi__clamp(int x)
|
|
{
|
|
if ((unsigned int) x > 255) {
|
|
if (x < 0) return 0;
|
|
if (x > 255) return 255;
|
|
}
|
|
return (uint8_t) x;
|
|
}
|
|
|
|
#define stbi__f2f(x) ((int) (((x) * 4096 + 0.5)))
|
|
#define stbi__fsh(x) ((x) << 12)
|
|
|
|
#define STBI__IDCT_1D(s0,s1,s2,s3,s4,s5,s6,s7) \
|
|
int t0,t1,t2,t3,p1,p2,p3,p4,p5,x0,x1,x2,x3; \
|
|
p2 = s2; \
|
|
p3 = s6; \
|
|
p1 = (p2+p3) * stbi__f2f(0.5411961f); \
|
|
t2 = p1 + p3*stbi__f2f(-1.847759065f); \
|
|
t3 = p1 + p2*stbi__f2f( 0.765366865f); \
|
|
p2 = s0; \
|
|
p3 = s4; \
|
|
t0 = stbi__fsh(p2+p3); \
|
|
t1 = stbi__fsh(p2-p3); \
|
|
x0 = t0+t3; \
|
|
x3 = t0-t3; \
|
|
x1 = t1+t2; \
|
|
x2 = t1-t2; \
|
|
t0 = s7; \
|
|
t1 = s5; \
|
|
t2 = s3; \
|
|
t3 = s1; \
|
|
p3 = t0+t2; \
|
|
p4 = t1+t3; \
|
|
p1 = t0+t3; \
|
|
p2 = t1+t2; \
|
|
p5 = (p3+p4)*stbi__f2f( 1.175875602f); \
|
|
t0 = t0*stbi__f2f( 0.298631336f); \
|
|
t1 = t1*stbi__f2f( 2.053119869f); \
|
|
t2 = t2*stbi__f2f( 3.072711026f); \
|
|
t3 = t3*stbi__f2f( 1.501321110f); \
|
|
p1 = p5 + p1*stbi__f2f(-0.899976223f); \
|
|
p2 = p5 + p2*stbi__f2f(-2.562915447f); \
|
|
p3 = p3*stbi__f2f(-1.961570560f); \
|
|
p4 = p4*stbi__f2f(-0.390180644f); \
|
|
t3 += p1+p4; \
|
|
t2 += p2+p3; \
|
|
t1 += p2+p4; \
|
|
t0 += p1+p3;
|
|
|
|
static void stbi__idct_block(uint8_t *out, int out_stride, short data[64])
|
|
{
|
|
int i,val[64],*v=val;
|
|
uint8_t *o;
|
|
short *d = data;
|
|
|
|
for (i=0; i < 8; ++i,++d, ++v) {
|
|
if (d[ 8]==0 && d[16]==0 && d[24]==0 && d[32]==0
|
|
&& d[40]==0 && d[48]==0 && d[56]==0) {
|
|
int dcterm = d[0] << 2;
|
|
v[0] = v[8] = v[16] = v[24] = v[32] = v[40] = v[48] = v[56] = dcterm;
|
|
} else {
|
|
STBI__IDCT_1D(d[ 0],d[ 8],d[16],d[24],d[32],d[40],d[48],d[56])
|
|
x0 += 512; x1 += 512; x2 += 512; x3 += 512;
|
|
v[ 0] = (x0+t3) >> 10;
|
|
v[56] = (x0-t3) >> 10;
|
|
v[ 8] = (x1+t2) >> 10;
|
|
v[48] = (x1-t2) >> 10;
|
|
v[16] = (x2+t1) >> 10;
|
|
v[40] = (x2-t1) >> 10;
|
|
v[24] = (x3+t0) >> 10;
|
|
v[32] = (x3-t0) >> 10;
|
|
}
|
|
}
|
|
|
|
for (i=0, v=val, o=out; i < 8; ++i,v+=8,o+=out_stride) {
|
|
STBI__IDCT_1D(v[0],v[1],v[2],v[3],v[4],v[5],v[6],v[7])
|
|
x0 += 65536 + (128<<17);
|
|
x1 += 65536 + (128<<17);
|
|
x2 += 65536 + (128<<17);
|
|
x3 += 65536 + (128<<17);
|
|
o[0] = stbi__clamp((x0+t3) >> 17);
|
|
o[7] = stbi__clamp((x0-t3) >> 17);
|
|
o[1] = stbi__clamp((x1+t2) >> 17);
|
|
o[6] = stbi__clamp((x1-t2) >> 17);
|
|
o[2] = stbi__clamp((x2+t1) >> 17);
|
|
o[5] = stbi__clamp((x2-t1) >> 17);
|
|
o[3] = stbi__clamp((x3+t0) >> 17);
|
|
o[4] = stbi__clamp((x3-t0) >> 17);
|
|
}
|
|
}
|
|
|
|
#ifdef STBI_SSE2
|
|
static void stbi__idct_simd(uint8_t *out, int out_stride, short data[64])
|
|
{
|
|
__m128i row0, row1, row2, row3, row4, row5, row6, row7;
|
|
__m128i tmp;
|
|
|
|
#define dct_const(x,y) _mm_setr_epi16((x),(y),(x),(y),(x),(y),(x),(y))
|
|
|
|
#define dct_rot(out0,out1, x,y,c0,c1) \
|
|
__m128i c0##lo = _mm_unpacklo_epi16((x),(y)); \
|
|
__m128i c0##hi = _mm_unpackhi_epi16((x),(y)); \
|
|
__m128i out0##_l = _mm_madd_epi16(c0##lo, c0); \
|
|
__m128i out0##_h = _mm_madd_epi16(c0##hi, c0); \
|
|
__m128i out1##_l = _mm_madd_epi16(c0##lo, c1); \
|
|
__m128i out1##_h = _mm_madd_epi16(c0##hi, c1)
|
|
|
|
#define dct_widen(out, in) \
|
|
__m128i out##_l = _mm_srai_epi32(_mm_unpacklo_epi16(_mm_setzero_si128(), (in)), 4); \
|
|
__m128i out##_h = _mm_srai_epi32(_mm_unpackhi_epi16(_mm_setzero_si128(), (in)), 4)
|
|
|
|
#define dct_wadd(out, a, b) \
|
|
__m128i out##_l = _mm_add_epi32(a##_l, b##_l); \
|
|
__m128i out##_h = _mm_add_epi32(a##_h, b##_h)
|
|
|
|
#define dct_wsub(out, a, b) \
|
|
__m128i out##_l = _mm_sub_epi32(a##_l, b##_l); \
|
|
__m128i out##_h = _mm_sub_epi32(a##_h, b##_h)
|
|
|
|
#define dct_bfly32o(out0, out1, a,b,bias,s) \
|
|
{ \
|
|
__m128i abiased_l = _mm_add_epi32(a##_l, bias); \
|
|
__m128i abiased_h = _mm_add_epi32(a##_h, bias); \
|
|
dct_wadd(sum, abiased, b); \
|
|
dct_wsub(dif, abiased, b); \
|
|
out0 = _mm_packs_epi32(_mm_srai_epi32(sum_l, s), _mm_srai_epi32(sum_h, s)); \
|
|
out1 = _mm_packs_epi32(_mm_srai_epi32(dif_l, s), _mm_srai_epi32(dif_h, s)); \
|
|
}
|
|
|
|
#define dct_interleave8(a, b) \
|
|
tmp = a; \
|
|
a = _mm_unpacklo_epi8(a, b); \
|
|
b = _mm_unpackhi_epi8(tmp, b)
|
|
|
|
#define dct_interleave16(a, b) \
|
|
tmp = a; \
|
|
a = _mm_unpacklo_epi16(a, b); \
|
|
b = _mm_unpackhi_epi16(tmp, b)
|
|
|
|
#define dct_pass(bias,shift) \
|
|
{ \
|
|
/* even part */ \
|
|
dct_rot(t2e,t3e, row2,row6, rot0_0,rot0_1); \
|
|
__m128i sum04 = _mm_add_epi16(row0, row4); \
|
|
__m128i dif04 = _mm_sub_epi16(row0, row4); \
|
|
dct_widen(t0e, sum04); \
|
|
dct_widen(t1e, dif04); \
|
|
dct_wadd(x0, t0e, t3e); \
|
|
dct_wsub(x3, t0e, t3e); \
|
|
dct_wadd(x1, t1e, t2e); \
|
|
dct_wsub(x2, t1e, t2e); \
|
|
/* odd part */ \
|
|
dct_rot(y0o,y2o, row7,row3, rot2_0,rot2_1); \
|
|
dct_rot(y1o,y3o, row5,row1, rot3_0,rot3_1); \
|
|
__m128i sum17 = _mm_add_epi16(row1, row7); \
|
|
__m128i sum35 = _mm_add_epi16(row3, row5); \
|
|
dct_rot(y4o,y5o, sum17,sum35, rot1_0,rot1_1); \
|
|
dct_wadd(x4, y0o, y4o); \
|
|
dct_wadd(x5, y1o, y5o); \
|
|
dct_wadd(x6, y2o, y5o); \
|
|
dct_wadd(x7, y3o, y4o); \
|
|
dct_bfly32o(row0,row7, x0,x7,bias,shift); \
|
|
dct_bfly32o(row1,row6, x1,x6,bias,shift); \
|
|
dct_bfly32o(row2,row5, x2,x5,bias,shift); \
|
|
dct_bfly32o(row3,row4, x3,x4,bias,shift); \
|
|
}
|
|
|
|
__m128i rot0_0 = dct_const(stbi__f2f(0.5411961f), stbi__f2f(0.5411961f) + stbi__f2f(-1.847759065f));
|
|
__m128i rot0_1 = dct_const(stbi__f2f(0.5411961f) + stbi__f2f( 0.765366865f), stbi__f2f(0.5411961f));
|
|
__m128i rot1_0 = dct_const(stbi__f2f(1.175875602f) + stbi__f2f(-0.899976223f), stbi__f2f(1.175875602f));
|
|
__m128i rot1_1 = dct_const(stbi__f2f(1.175875602f), stbi__f2f(1.175875602f) + stbi__f2f(-2.562915447f));
|
|
__m128i rot2_0 = dct_const(stbi__f2f(-1.961570560f) + stbi__f2f( 0.298631336f), stbi__f2f(-1.961570560f));
|
|
__m128i rot2_1 = dct_const(stbi__f2f(-1.961570560f), stbi__f2f(-1.961570560f) + stbi__f2f( 3.072711026f));
|
|
__m128i rot3_0 = dct_const(stbi__f2f(-0.390180644f) + stbi__f2f( 2.053119869f), stbi__f2f(-0.390180644f));
|
|
__m128i rot3_1 = dct_const(stbi__f2f(-0.390180644f), stbi__f2f(-0.390180644f) + stbi__f2f( 1.501321110f));
|
|
|
|
__m128i bias_0 = _mm_set1_epi32(512);
|
|
__m128i bias_1 = _mm_set1_epi32(65536 + (128<<17));
|
|
|
|
row0 = _mm_load_si128((const __m128i *) (data + 0*8));
|
|
row1 = _mm_load_si128((const __m128i *) (data + 1*8));
|
|
row2 = _mm_load_si128((const __m128i *) (data + 2*8));
|
|
row3 = _mm_load_si128((const __m128i *) (data + 3*8));
|
|
row4 = _mm_load_si128((const __m128i *) (data + 4*8));
|
|
row5 = _mm_load_si128((const __m128i *) (data + 5*8));
|
|
row6 = _mm_load_si128((const __m128i *) (data + 6*8));
|
|
row7 = _mm_load_si128((const __m128i *) (data + 7*8));
|
|
|
|
dct_pass(bias_0, 10);
|
|
|
|
{
|
|
dct_interleave16(row0, row4);
|
|
dct_interleave16(row1, row5);
|
|
dct_interleave16(row2, row6);
|
|
dct_interleave16(row3, row7);
|
|
|
|
dct_interleave16(row0, row2);
|
|
dct_interleave16(row1, row3);
|
|
dct_interleave16(row4, row6);
|
|
dct_interleave16(row5, row7);
|
|
|
|
dct_interleave16(row0, row1);
|
|
dct_interleave16(row2, row3);
|
|
dct_interleave16(row4, row5);
|
|
dct_interleave16(row6, row7);
|
|
}
|
|
|
|
dct_pass(bias_1, 17);
|
|
|
|
{
|
|
__m128i p0 = _mm_packus_epi16(row0, row1);
|
|
__m128i p1 = _mm_packus_epi16(row2, row3);
|
|
__m128i p2 = _mm_packus_epi16(row4, row5);
|
|
__m128i p3 = _mm_packus_epi16(row6, row7);
|
|
|
|
dct_interleave8(p0, p2);
|
|
dct_interleave8(p1, p3);
|
|
|
|
dct_interleave8(p0, p1);
|
|
dct_interleave8(p2, p3);
|
|
|
|
dct_interleave8(p0, p2);
|
|
dct_interleave8(p1, p3);
|
|
|
|
_mm_storel_epi64((__m128i *) out, p0); out += out_stride;
|
|
_mm_storel_epi64((__m128i *) out, _mm_shuffle_epi32(p0, 0x4e)); out += out_stride;
|
|
_mm_storel_epi64((__m128i *) out, p2); out += out_stride;
|
|
_mm_storel_epi64((__m128i *) out, _mm_shuffle_epi32(p2, 0x4e)); out += out_stride;
|
|
_mm_storel_epi64((__m128i *) out, p1); out += out_stride;
|
|
_mm_storel_epi64((__m128i *) out, _mm_shuffle_epi32(p1, 0x4e)); out += out_stride;
|
|
_mm_storel_epi64((__m128i *) out, p3); out += out_stride;
|
|
_mm_storel_epi64((__m128i *) out, _mm_shuffle_epi32(p3, 0x4e));
|
|
}
|
|
|
|
#undef dct_const
|
|
#undef dct_rot
|
|
#undef dct_widen
|
|
#undef dct_wadd
|
|
#undef dct_wsub
|
|
#undef dct_bfly32o
|
|
#undef dct_interleave8
|
|
#undef dct_interleave16
|
|
#undef dct_pass
|
|
}
|
|
|
|
#endif
|
|
|
|
#ifdef STBI_NEON
|
|
|
|
static void stbi__idct_simd(uint8_t *out, int out_stride, short data[64])
|
|
{
|
|
int16x8_t row0, row1, row2, row3, row4, row5, row6, row7;
|
|
|
|
int16x4_t rot0_0 = vdup_n_s16(stbi__f2f(0.5411961f));
|
|
int16x4_t rot0_1 = vdup_n_s16(stbi__f2f(-1.847759065f));
|
|
int16x4_t rot0_2 = vdup_n_s16(stbi__f2f( 0.765366865f));
|
|
int16x4_t rot1_0 = vdup_n_s16(stbi__f2f( 1.175875602f));
|
|
int16x4_t rot1_1 = vdup_n_s16(stbi__f2f(-0.899976223f));
|
|
int16x4_t rot1_2 = vdup_n_s16(stbi__f2f(-2.562915447f));
|
|
int16x4_t rot2_0 = vdup_n_s16(stbi__f2f(-1.961570560f));
|
|
int16x4_t rot2_1 = vdup_n_s16(stbi__f2f(-0.390180644f));
|
|
int16x4_t rot3_0 = vdup_n_s16(stbi__f2f( 0.298631336f));
|
|
int16x4_t rot3_1 = vdup_n_s16(stbi__f2f( 2.053119869f));
|
|
int16x4_t rot3_2 = vdup_n_s16(stbi__f2f( 3.072711026f));
|
|
int16x4_t rot3_3 = vdup_n_s16(stbi__f2f( 1.501321110f));
|
|
|
|
#define dct_long_mul(out, inq, coeff) \
|
|
int32x4_t out##_l = vmull_s16(vget_low_s16(inq), coeff); \
|
|
int32x4_t out##_h = vmull_s16(vget_high_s16(inq), coeff)
|
|
|
|
#define dct_long_mac(out, acc, inq, coeff) \
|
|
int32x4_t out##_l = vmlal_s16(acc##_l, vget_low_s16(inq), coeff); \
|
|
int32x4_t out##_h = vmlal_s16(acc##_h, vget_high_s16(inq), coeff)
|
|
|
|
#define dct_widen(out, inq) \
|
|
int32x4_t out##_l = vshll_n_s16(vget_low_s16(inq), 12); \
|
|
int32x4_t out##_h = vshll_n_s16(vget_high_s16(inq), 12)
|
|
|
|
#define dct_wadd(out, a, b) \
|
|
int32x4_t out##_l = vaddq_s32(a##_l, b##_l); \
|
|
int32x4_t out##_h = vaddq_s32(a##_h, b##_h)
|
|
|
|
#define dct_wsub(out, a, b) \
|
|
int32x4_t out##_l = vsubq_s32(a##_l, b##_l); \
|
|
int32x4_t out##_h = vsubq_s32(a##_h, b##_h)
|
|
|
|
#define dct_bfly32o(out0,out1, a,b,shiftop,s) \
|
|
{ \
|
|
dct_wadd(sum, a, b); \
|
|
dct_wsub(dif, a, b); \
|
|
out0 = vcombine_s16(shiftop(sum_l, s), shiftop(sum_h, s)); \
|
|
out1 = vcombine_s16(shiftop(dif_l, s), shiftop(dif_h, s)); \
|
|
}
|
|
|
|
#define dct_pass(shiftop, shift) \
|
|
{ \
|
|
/* even part */ \
|
|
int16x8_t sum26 = vaddq_s16(row2, row6); \
|
|
dct_long_mul(p1e, sum26, rot0_0); \
|
|
dct_long_mac(t2e, p1e, row6, rot0_1); \
|
|
dct_long_mac(t3e, p1e, row2, rot0_2); \
|
|
int16x8_t sum04 = vaddq_s16(row0, row4); \
|
|
int16x8_t dif04 = vsubq_s16(row0, row4); \
|
|
dct_widen(t0e, sum04); \
|
|
dct_widen(t1e, dif04); \
|
|
dct_wadd(x0, t0e, t3e); \
|
|
dct_wsub(x3, t0e, t3e); \
|
|
dct_wadd(x1, t1e, t2e); \
|
|
dct_wsub(x2, t1e, t2e); \
|
|
/* odd part */ \
|
|
int16x8_t sum15 = vaddq_s16(row1, row5); \
|
|
int16x8_t sum17 = vaddq_s16(row1, row7); \
|
|
int16x8_t sum35 = vaddq_s16(row3, row5); \
|
|
int16x8_t sum37 = vaddq_s16(row3, row7); \
|
|
int16x8_t sumodd = vaddq_s16(sum17, sum35); \
|
|
dct_long_mul(p5o, sumodd, rot1_0); \
|
|
dct_long_mac(p1o, p5o, sum17, rot1_1); \
|
|
dct_long_mac(p2o, p5o, sum35, rot1_2); \
|
|
dct_long_mul(p3o, sum37, rot2_0); \
|
|
dct_long_mul(p4o, sum15, rot2_1); \
|
|
dct_wadd(sump13o, p1o, p3o); \
|
|
dct_wadd(sump24o, p2o, p4o); \
|
|
dct_wadd(sump23o, p2o, p3o); \
|
|
dct_wadd(sump14o, p1o, p4o); \
|
|
dct_long_mac(x4, sump13o, row7, rot3_0); \
|
|
dct_long_mac(x5, sump24o, row5, rot3_1); \
|
|
dct_long_mac(x6, sump23o, row3, rot3_2); \
|
|
dct_long_mac(x7, sump14o, row1, rot3_3); \
|
|
dct_bfly32o(row0,row7, x0,x7,shiftop,shift); \
|
|
dct_bfly32o(row1,row6, x1,x6,shiftop,shift); \
|
|
dct_bfly32o(row2,row5, x2,x5,shiftop,shift); \
|
|
dct_bfly32o(row3,row4, x3,x4,shiftop,shift); \
|
|
}
|
|
|
|
row0 = vld1q_s16(data + 0*8);
|
|
row1 = vld1q_s16(data + 1*8);
|
|
row2 = vld1q_s16(data + 2*8);
|
|
row3 = vld1q_s16(data + 3*8);
|
|
row4 = vld1q_s16(data + 4*8);
|
|
row5 = vld1q_s16(data + 5*8);
|
|
row6 = vld1q_s16(data + 6*8);
|
|
row7 = vld1q_s16(data + 7*8);
|
|
|
|
row0 = vaddq_s16(row0, vsetq_lane_s16(1024, vdupq_n_s16(0), 0));
|
|
|
|
dct_pass(vrshrn_n_s32, 10);
|
|
|
|
{
|
|
#define dct_trn16(x, y) { int16x8x2_t t = vtrnq_s16(x, y); x = t.val[0]; y = t.val[1]; }
|
|
#define dct_trn32(x, y) { int32x4x2_t t = vtrnq_s32(vreinterpretq_s32_s16(x), vreinterpretq_s32_s16(y)); x = vreinterpretq_s16_s32(t.val[0]); y = vreinterpretq_s16_s32(t.val[1]); }
|
|
#define dct_trn64(x, y) { int16x8_t x0 = x; int16x8_t y0 = y; x = vcombine_s16(vget_low_s16(x0), vget_low_s16(y0)); y = vcombine_s16(vget_high_s16(x0), vget_high_s16(y0)); }
|
|
|
|
dct_trn16(row0, row1);
|
|
dct_trn16(row2, row3);
|
|
dct_trn16(row4, row5);
|
|
dct_trn16(row6, row7);
|
|
|
|
dct_trn32(row0, row2);
|
|
dct_trn32(row1, row3);
|
|
dct_trn32(row4, row6);
|
|
dct_trn32(row5, row7);
|
|
|
|
dct_trn64(row0, row4);
|
|
dct_trn64(row1, row5);
|
|
dct_trn64(row2, row6);
|
|
dct_trn64(row3, row7);
|
|
|
|
#undef dct_trn16
|
|
#undef dct_trn32
|
|
#undef dct_trn64
|
|
}
|
|
|
|
dct_pass(vshrn_n_s32, 16);
|
|
|
|
{
|
|
uint8x8_t p0 = vqrshrun_n_s16(row0, 1);
|
|
uint8x8_t p1 = vqrshrun_n_s16(row1, 1);
|
|
uint8x8_t p2 = vqrshrun_n_s16(row2, 1);
|
|
uint8x8_t p3 = vqrshrun_n_s16(row3, 1);
|
|
uint8x8_t p4 = vqrshrun_n_s16(row4, 1);
|
|
uint8x8_t p5 = vqrshrun_n_s16(row5, 1);
|
|
uint8x8_t p6 = vqrshrun_n_s16(row6, 1);
|
|
uint8x8_t p7 = vqrshrun_n_s16(row7, 1);
|
|
|
|
#define dct_trn8_8(x, y) { uint8x8x2_t t = vtrn_u8(x, y); x = t.val[0]; y = t.val[1]; }
|
|
#define dct_trn8_16(x, y) { uint16x4x2_t t = vtrn_u16(vreinterpret_u16_u8(x), vreinterpret_u16_u8(y)); x = vreinterpret_u8_u16(t.val[0]); y = vreinterpret_u8_u16(t.val[1]); }
|
|
#define dct_trn8_32(x, y) { uint32x2x2_t t = vtrn_u32(vreinterpret_u32_u8(x), vreinterpret_u32_u8(y)); x = vreinterpret_u8_u32(t.val[0]); y = vreinterpret_u8_u32(t.val[1]); }
|
|
|
|
dct_trn8_8(p0, p1);
|
|
dct_trn8_8(p2, p3);
|
|
dct_trn8_8(p4, p5);
|
|
dct_trn8_8(p6, p7);
|
|
|
|
dct_trn8_16(p0, p2);
|
|
dct_trn8_16(p1, p3);
|
|
dct_trn8_16(p4, p6);
|
|
dct_trn8_16(p5, p7);
|
|
|
|
dct_trn8_32(p0, p4);
|
|
dct_trn8_32(p1, p5);
|
|
dct_trn8_32(p2, p6);
|
|
dct_trn8_32(p3, p7);
|
|
|
|
vst1_u8(out, p0); out += out_stride;
|
|
vst1_u8(out, p1); out += out_stride;
|
|
vst1_u8(out, p2); out += out_stride;
|
|
vst1_u8(out, p3); out += out_stride;
|
|
vst1_u8(out, p4); out += out_stride;
|
|
vst1_u8(out, p5); out += out_stride;
|
|
vst1_u8(out, p6); out += out_stride;
|
|
vst1_u8(out, p7);
|
|
|
|
#undef dct_trn8_8
|
|
#undef dct_trn8_16
|
|
#undef dct_trn8_32
|
|
}
|
|
|
|
#undef dct_long_mul
|
|
#undef dct_long_mac
|
|
#undef dct_widen
|
|
#undef dct_wadd
|
|
#undef dct_wsub
|
|
#undef dct_bfly32o
|
|
#undef dct_pass
|
|
}
|
|
|
|
#endif
|
|
|
|
#define STBI__MARKER_none 0xff
|
|
static uint8_t stbi__get_marker(stbi__jpeg *j)
|
|
{
|
|
uint8_t x;
|
|
if (j->marker != STBI__MARKER_none) { x = j->marker; j->marker = STBI__MARKER_none; return x; }
|
|
x = stbi__get8(j->s);
|
|
if (x != 0xff) return STBI__MARKER_none;
|
|
while (x == 0xff)
|
|
x = stbi__get8(j->s);
|
|
return x;
|
|
}
|
|
|
|
#define STBI__RESTART(x) ((x) >= 0xd0 && (x) <= 0xd7)
|
|
|
|
static void stbi__jpeg_reset(stbi__jpeg *j)
|
|
{
|
|
j->code_bits = 0;
|
|
j->code_buffer = 0;
|
|
j->nomore = 0;
|
|
j->img_comp[0].dc_pred = j->img_comp[1].dc_pred = j->img_comp[2].dc_pred = 0;
|
|
j->marker = STBI__MARKER_none;
|
|
j->todo = j->restart_interval ? j->restart_interval : 0x7fffffff;
|
|
j->eob_run = 0;
|
|
}
|
|
|
|
static int stbi__parse_entropy_coded_data(stbi__jpeg *z)
|
|
{
|
|
stbi__jpeg_reset(z);
|
|
if (!z->progressive)
|
|
{
|
|
if (z->scan_n == 1)
|
|
{
|
|
int i,j;
|
|
STBI_SIMD_ALIGN(short, data[64]);
|
|
int n = z->order[0];
|
|
int w = (z->img_comp[n].x+7) >> 3;
|
|
int h = (z->img_comp[n].y+7) >> 3;
|
|
for (j=0; j < h; ++j) {
|
|
for (i=0; i < w; ++i) {
|
|
int ha = z->img_comp[n].ha;
|
|
if (!stbi__jpeg_decode_block(z, data, z->huff_dc+z->img_comp[n].hd, z->huff_ac+ha, z->fast_ac[ha], n, z->dequant[z->img_comp[n].tq])) return 0;
|
|
z->idct_block_kernel(z->img_comp[n].data+z->img_comp[n].w2*j*8+i*8, z->img_comp[n].w2, data);
|
|
if (--z->todo <= 0) {
|
|
if (z->code_bits < 24) stbi__grow_buffer_unsafe(z);
|
|
if (!STBI__RESTART(z->marker)) return 1;
|
|
stbi__jpeg_reset(z);
|
|
}
|
|
}
|
|
}
|
|
return 1;
|
|
}
|
|
else
|
|
{
|
|
int i,j,k,x,y;
|
|
STBI_SIMD_ALIGN(short, data[64]);
|
|
for (j=0; j < z->img_mcu_y; ++j)
|
|
{
|
|
for (i=0; i < z->img_mcu_x; ++i)
|
|
{
|
|
for (k=0; k < z->scan_n; ++k)
|
|
{
|
|
int n = z->order[k];
|
|
for (y=0; y < z->img_comp[n].v; ++y) {
|
|
for (x=0; x < z->img_comp[n].h; ++x) {
|
|
int x2 = (i*z->img_comp[n].h + x)*8;
|
|
int y2 = (j*z->img_comp[n].v + y)*8;
|
|
int ha = z->img_comp[n].ha;
|
|
if (!stbi__jpeg_decode_block(z, data, z->huff_dc+z->img_comp[n].hd, z->huff_ac+ha, z->fast_ac[ha], n, z->dequant[z->img_comp[n].tq])) return 0;
|
|
z->idct_block_kernel(z->img_comp[n].data+z->img_comp[n].w2*y2+x2, z->img_comp[n].w2, data);
|
|
}
|
|
}
|
|
}
|
|
|
|
if (--z->todo <= 0) {
|
|
if (z->code_bits < 24) stbi__grow_buffer_unsafe(z);
|
|
if (!STBI__RESTART(z->marker)) return 1;
|
|
stbi__jpeg_reset(z);
|
|
}
|
|
}
|
|
}
|
|
return 1;
|
|
}
|
|
}
|
|
else
|
|
{
|
|
if (z->scan_n == 1)
|
|
{
|
|
int i,j;
|
|
int n = z->order[0];
|
|
int w = (z->img_comp[n].x+7) >> 3;
|
|
int h = (z->img_comp[n].y+7) >> 3;
|
|
for (j=0; j < h; ++j) {
|
|
for (i=0; i < w; ++i) {
|
|
short *data = z->img_comp[n].coeff + 64 * (i + j * z->img_comp[n].coeff_w);
|
|
if (z->spec_start == 0) {
|
|
if (!stbi__jpeg_decode_block_prog_dc(z, data, &z->huff_dc[z->img_comp[n].hd], n))
|
|
return 0;
|
|
} else {
|
|
int ha = z->img_comp[n].ha;
|
|
if (!stbi__jpeg_decode_block_prog_ac(z, data, &z->huff_ac[ha], z->fast_ac[ha]))
|
|
return 0;
|
|
}
|
|
if (--z->todo <= 0) {
|
|
if (z->code_bits < 24) stbi__grow_buffer_unsafe(z);
|
|
if (!STBI__RESTART(z->marker)) return 1;
|
|
stbi__jpeg_reset(z);
|
|
}
|
|
}
|
|
}
|
|
return 1;
|
|
}
|
|
else
|
|
{
|
|
int i,j,k,x,y;
|
|
for (j=0; j < z->img_mcu_y; ++j) {
|
|
for (i=0; i < z->img_mcu_x; ++i) {
|
|
for (k=0; k < z->scan_n; ++k) {
|
|
int n = z->order[k];
|
|
for (y=0; y < z->img_comp[n].v; ++y) {
|
|
for (x=0; x < z->img_comp[n].h; ++x) {
|
|
int x2 = (i*z->img_comp[n].h + x);
|
|
int y2 = (j*z->img_comp[n].v + y);
|
|
short *data = z->img_comp[n].coeff + 64 * (x2 + y2 * z->img_comp[n].coeff_w);
|
|
if (!stbi__jpeg_decode_block_prog_dc(z, data, &z->huff_dc[z->img_comp[n].hd], n))
|
|
return 0;
|
|
}
|
|
}
|
|
}
|
|
if (--z->todo <= 0) {
|
|
if (z->code_bits < 24) stbi__grow_buffer_unsafe(z);
|
|
if (!STBI__RESTART(z->marker)) return 1;
|
|
stbi__jpeg_reset(z);
|
|
}
|
|
}
|
|
}
|
|
return 1;
|
|
}
|
|
}
|
|
}
|
|
|
|
static void stbi__jpeg_dequantize(short *data, uint8_t *dequant)
|
|
{
|
|
int i;
|
|
for (i=0; i < 64; ++i)
|
|
data[i] *= dequant[i];
|
|
}
|
|
|
|
static void stbi__jpeg_finish(stbi__jpeg *z)
|
|
{
|
|
if (z->progressive) {
|
|
int i,j,n;
|
|
for (n=0; n < z->s->img_n; ++n) {
|
|
int w = (z->img_comp[n].x+7) >> 3;
|
|
int h = (z->img_comp[n].y+7) >> 3;
|
|
for (j=0; j < h; ++j) {
|
|
for (i=0; i < w; ++i) {
|
|
short *data = z->img_comp[n].coeff + 64 * (i + j * z->img_comp[n].coeff_w);
|
|
stbi__jpeg_dequantize(data, z->dequant[z->img_comp[n].tq]);
|
|
z->idct_block_kernel(z->img_comp[n].data+z->img_comp[n].w2*j*8+i*8, z->img_comp[n].w2, data);
|
|
}
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
static int stbi__process_marker(stbi__jpeg *z, int m)
|
|
{
|
|
int L;
|
|
switch (m) {
|
|
case STBI__MARKER_none:
|
|
return stbi__err("expected marker","Corrupt JPEG");
|
|
|
|
case 0xDD:
|
|
if (stbi__get16be(z->s) != 4) return stbi__err("bad DRI len","Corrupt JPEG");
|
|
z->restart_interval = stbi__get16be(z->s);
|
|
return 1;
|
|
|
|
case 0xDB:
|
|
L = stbi__get16be(z->s)-2;
|
|
while (L > 0) {
|
|
int q = stbi__get8(z->s);
|
|
int p = q >> 4;
|
|
int t = q & 15,i;
|
|
if (p != 0) return stbi__err("bad DQT type","Corrupt JPEG");
|
|
if (t > 3) return stbi__err("bad DQT table","Corrupt JPEG");
|
|
for (i=0; i < 64; ++i)
|
|
z->dequant[t][stbi__jpeg_dezigzag[i]] = stbi__get8(z->s);
|
|
L -= 65;
|
|
}
|
|
return L==0;
|
|
|
|
case 0xC4:
|
|
L = stbi__get16be(z->s)-2;
|
|
while (L > 0) {
|
|
uint8_t *v;
|
|
int sizes[16],i,n=0;
|
|
int q = stbi__get8(z->s);
|
|
int tc = q >> 4;
|
|
int th = q & 15;
|
|
if (tc > 1 || th > 3) return stbi__err("bad DHT header","Corrupt JPEG");
|
|
for (i=0; i < 16; ++i) {
|
|
sizes[i] = stbi__get8(z->s);
|
|
n += sizes[i];
|
|
}
|
|
L -= 17;
|
|
if (tc == 0) {
|
|
if (!stbi__build_huffman(z->huff_dc+th, sizes)) return 0;
|
|
v = z->huff_dc[th].values;
|
|
} else {
|
|
if (!stbi__build_huffman(z->huff_ac+th, sizes)) return 0;
|
|
v = z->huff_ac[th].values;
|
|
}
|
|
for (i=0; i < n; ++i)
|
|
v[i] = stbi__get8(z->s);
|
|
if (tc != 0)
|
|
stbi__build_fast_ac(z->fast_ac[th], z->huff_ac + th);
|
|
L -= n;
|
|
}
|
|
return L==0;
|
|
}
|
|
if ((m >= 0xE0 && m <= 0xEF) || m == 0xFE) {
|
|
stbi__skip(z->s, stbi__get16be(z->s)-2);
|
|
return 1;
|
|
}
|
|
return 0;
|
|
}
|
|
|
|
static int stbi__process_scan_header(stbi__jpeg *z)
|
|
{
|
|
int i;
|
|
int Ls = stbi__get16be(z->s);
|
|
z->scan_n = stbi__get8(z->s);
|
|
if (z->scan_n < 1 || z->scan_n > 4 || z->scan_n > (int) z->s->img_n) return stbi__err("bad SOS component count","Corrupt JPEG");
|
|
if (Ls != 6+2*z->scan_n) return stbi__err("bad SOS len","Corrupt JPEG");
|
|
for (i=0; i < z->scan_n; ++i) {
|
|
int id = stbi__get8(z->s), which;
|
|
int q = stbi__get8(z->s);
|
|
for (which = 0; which < z->s->img_n; ++which)
|
|
if (z->img_comp[which].id == id)
|
|
break;
|
|
if (which == z->s->img_n) return 0;
|
|
z->img_comp[which].hd = q >> 4; if (z->img_comp[which].hd > 3) return stbi__err("bad DC huff","Corrupt JPEG");
|
|
z->img_comp[which].ha = q & 15; if (z->img_comp[which].ha > 3) return stbi__err("bad AC huff","Corrupt JPEG");
|
|
z->order[i] = which;
|
|
}
|
|
|
|
{
|
|
int aa;
|
|
z->spec_start = stbi__get8(z->s);
|
|
z->spec_end = stbi__get8(z->s);
|
|
aa = stbi__get8(z->s);
|
|
z->succ_high = (aa >> 4);
|
|
z->succ_low = (aa & 15);
|
|
if (z->progressive) {
|
|
if (z->spec_start > 63 || z->spec_end > 63 || z->spec_start > z->spec_end || z->succ_high > 13 || z->succ_low > 13)
|
|
return stbi__err("bad SOS", "Corrupt JPEG");
|
|
} else {
|
|
if (z->spec_start != 0) return stbi__err("bad SOS","Corrupt JPEG");
|
|
if (z->succ_high != 0 || z->succ_low != 0) return stbi__err("bad SOS","Corrupt JPEG");
|
|
z->spec_end = 63;
|
|
}
|
|
}
|
|
|
|
return 1;
|
|
}
|
|
|
|
static int stbi__process_frame_header(stbi__jpeg *z, int scan)
|
|
{
|
|
stbi__context *s = z->s;
|
|
int Lf,p,i,q, h_max=1,v_max=1,c;
|
|
Lf = stbi__get16be(s); if (Lf < 11) return stbi__err("bad SOF len","Corrupt JPEG");
|
|
p = stbi__get8(s); if (p != 8) return stbi__err("only 8-bit","JPEG format not supported: 8-bit only");
|
|
s->img_y = stbi__get16be(s); if (s->img_y == 0) return stbi__err("no header height", "JPEG format not supported: delayed height");
|
|
s->img_x = stbi__get16be(s); if (s->img_x == 0) return stbi__err("0 width","Corrupt JPEG");
|
|
c = stbi__get8(s);
|
|
if (c != 3 && c != 1) return stbi__err("bad component count","Corrupt JPEG");
|
|
s->img_n = c;
|
|
for (i=0; i < c; ++i) {
|
|
z->img_comp[i].data = NULL;
|
|
z->img_comp[i].linebuf = NULL;
|
|
}
|
|
|
|
if (Lf != 8+3*s->img_n) return stbi__err("bad SOF len","Corrupt JPEG");
|
|
|
|
for (i=0; i < s->img_n; ++i) {
|
|
z->img_comp[i].id = stbi__get8(s);
|
|
if (z->img_comp[i].id != i+1)
|
|
if (z->img_comp[i].id != i)
|
|
return stbi__err("bad component ID","Corrupt JPEG");
|
|
q = stbi__get8(s);
|
|
z->img_comp[i].h = (q >> 4); if (!z->img_comp[i].h || z->img_comp[i].h > 4) return stbi__err("bad H","Corrupt JPEG");
|
|
z->img_comp[i].v = q & 15; if (!z->img_comp[i].v || z->img_comp[i].v > 4) return stbi__err("bad V","Corrupt JPEG");
|
|
z->img_comp[i].tq = stbi__get8(s); if (z->img_comp[i].tq > 3) return stbi__err("bad TQ","Corrupt JPEG");
|
|
}
|
|
|
|
if (scan != STBI__SCAN_load) return 1;
|
|
|
|
if ((1 << 30) / s->img_x / s->img_n < s->img_y) return stbi__err("too large", "Image too large to decode");
|
|
|
|
for (i=0; i < s->img_n; ++i) {
|
|
if (z->img_comp[i].h > h_max) h_max = z->img_comp[i].h;
|
|
if (z->img_comp[i].v > v_max) v_max = z->img_comp[i].v;
|
|
}
|
|
|
|
z->img_h_max = h_max;
|
|
z->img_v_max = v_max;
|
|
z->img_mcu_w = h_max * 8;
|
|
z->img_mcu_h = v_max * 8;
|
|
z->img_mcu_x = (s->img_x + z->img_mcu_w-1) / z->img_mcu_w;
|
|
z->img_mcu_y = (s->img_y + z->img_mcu_h-1) / z->img_mcu_h;
|
|
|
|
for (i=0; i < s->img_n; ++i) {
|
|
z->img_comp[i].x = (s->img_x * z->img_comp[i].h + h_max-1) / h_max;
|
|
z->img_comp[i].y = (s->img_y * z->img_comp[i].v + v_max-1) / v_max;
|
|
z->img_comp[i].w2 = z->img_mcu_x * z->img_comp[i].h * 8;
|
|
z->img_comp[i].h2 = z->img_mcu_y * z->img_comp[i].v * 8;
|
|
z->img_comp[i].raw_data = malloc(z->img_comp[i].w2 * z->img_comp[i].h2+15);
|
|
|
|
if (z->img_comp[i].raw_data == NULL) {
|
|
for(--i; i >= 0; --i) {
|
|
free(z->img_comp[i].raw_data);
|
|
z->img_comp[i].data = NULL;
|
|
}
|
|
return stbi__err("outofmem", "Out of memory");
|
|
}
|
|
z->img_comp[i].data = (uint8_t*) (((size_t) z->img_comp[i].raw_data + 15) & ~15);
|
|
z->img_comp[i].linebuf = NULL;
|
|
if (z->progressive) {
|
|
z->img_comp[i].coeff_w = (z->img_comp[i].w2 + 7) >> 3;
|
|
z->img_comp[i].coeff_h = (z->img_comp[i].h2 + 7) >> 3;
|
|
z->img_comp[i].raw_coeff = malloc(z->img_comp[i].coeff_w * z->img_comp[i].coeff_h * 64 * sizeof(short) + 15);
|
|
z->img_comp[i].coeff = (short*) (((size_t) z->img_comp[i].raw_coeff + 15) & ~15);
|
|
} else {
|
|
z->img_comp[i].coeff = 0;
|
|
z->img_comp[i].raw_coeff = 0;
|
|
}
|
|
}
|
|
|
|
return 1;
|
|
}
|
|
|
|
#define stbi__DNL(x) ((x) == 0xdc)
|
|
#define stbi__SOI(x) ((x) == 0xd8)
|
|
#define stbi__EOI(x) ((x) == 0xd9)
|
|
#define stbi__SOF(x) ((x) == 0xc0 || (x) == 0xc1 || (x) == 0xc2)
|
|
#define stbi__SOS(x) ((x) == 0xda)
|
|
|
|
#define stbi__SOF_progressive(x) ((x) == 0xc2)
|
|
|
|
static int stbi__decode_jpeg_header(stbi__jpeg *z, int scan)
|
|
{
|
|
int m;
|
|
z->marker = STBI__MARKER_none;
|
|
m = stbi__get_marker(z);
|
|
if (!stbi__SOI(m)) return stbi__err("no SOI","Corrupt JPEG");
|
|
if (scan == STBI__SCAN_type) return 1;
|
|
m = stbi__get_marker(z);
|
|
while (!stbi__SOF(m))
|
|
{
|
|
if (!stbi__process_marker(z,m)) return 0;
|
|
m = stbi__get_marker(z);
|
|
while (m == STBI__MARKER_none)
|
|
{
|
|
if (stbi__at_eof(z->s)) return stbi__err("no SOF", "Corrupt JPEG");
|
|
m = stbi__get_marker(z);
|
|
}
|
|
}
|
|
z->progressive = stbi__SOF_progressive(m);
|
|
if (!stbi__process_frame_header(z, scan)) return 0;
|
|
return 1;
|
|
}
|
|
|
|
static int stbi__decode_jpeg_image(stbi__jpeg *j)
|
|
{
|
|
int m;
|
|
for (m = 0; m < 4; m++) {
|
|
j->img_comp[m].raw_data = NULL;
|
|
j->img_comp[m].raw_coeff = NULL;
|
|
}
|
|
j->restart_interval = 0;
|
|
if (!stbi__decode_jpeg_header(j, STBI__SCAN_load)) return 0;
|
|
m = stbi__get_marker(j);
|
|
while (!stbi__EOI(m)) {
|
|
if (stbi__SOS(m)) {
|
|
if (!stbi__process_scan_header(j)) return 0;
|
|
if (!stbi__parse_entropy_coded_data(j)) return 0;
|
|
if (j->marker == STBI__MARKER_none ) {
|
|
while (!stbi__at_eof(j->s)) {
|
|
int x = stbi__get8(j->s);
|
|
if (x == 255) {
|
|
j->marker = stbi__get8(j->s);
|
|
break;
|
|
} else if (x != 0) {
|
|
return stbi__err("junk before marker", "Corrupt JPEG");
|
|
}
|
|
}
|
|
}
|
|
} else {
|
|
if (!stbi__process_marker(j, m)) return 0;
|
|
}
|
|
m = stbi__get_marker(j);
|
|
}
|
|
if (j->progressive)
|
|
stbi__jpeg_finish(j);
|
|
return 1;
|
|
}
|
|
|
|
|
|
typedef uint8_t *(*resample_row_func)(uint8_t *out, uint8_t *in0, uint8_t *in1,
|
|
int w, int hs);
|
|
|
|
#define stbi__div4(x) ((uint8_t) ((x) >> 2))
|
|
|
|
static uint8_t *resample_row_1(uint8_t *out, uint8_t *in_near, uint8_t *in_far, int w, int hs)
|
|
{
|
|
STBI_NOTUSED(out);
|
|
STBI_NOTUSED(in_far);
|
|
STBI_NOTUSED(w);
|
|
STBI_NOTUSED(hs);
|
|
return in_near;
|
|
}
|
|
|
|
static uint8_t* stbi__resample_row_v_2(uint8_t *out, uint8_t *in_near, uint8_t *in_far, int w, int hs)
|
|
{
|
|
int i;
|
|
STBI_NOTUSED(hs);
|
|
for (i=0; i < w; ++i)
|
|
out[i] = stbi__div4(3*in_near[i] + in_far[i] + 2);
|
|
return out;
|
|
}
|
|
|
|
static uint8_t* stbi__resample_row_h_2(uint8_t *out, uint8_t *in_near, uint8_t *in_far, int w, int hs)
|
|
{
|
|
int i;
|
|
uint8_t *input = in_near;
|
|
|
|
if (w == 1) {
|
|
out[0] = out[1] = input[0];
|
|
return out;
|
|
}
|
|
|
|
out[0] = input[0];
|
|
out[1] = stbi__div4(input[0]*3 + input[1] + 2);
|
|
for (i=1; i < w-1; ++i) {
|
|
int n = 3*input[i]+2;
|
|
out[i*2+0] = stbi__div4(n+input[i-1]);
|
|
out[i*2+1] = stbi__div4(n+input[i+1]);
|
|
}
|
|
out[i*2+0] = stbi__div4(input[w-2]*3 + input[w-1] + 2);
|
|
out[i*2+1] = input[w-1];
|
|
|
|
STBI_NOTUSED(in_far);
|
|
STBI_NOTUSED(hs);
|
|
|
|
return out;
|
|
}
|
|
|
|
#define stbi__div16(x) ((uint8_t) ((x) >> 4))
|
|
|
|
static uint8_t *stbi__resample_row_hv_2(uint8_t *out, uint8_t *in_near, uint8_t *in_far, int w, int hs)
|
|
{
|
|
int i,t0,t1;
|
|
if (w == 1) {
|
|
out[0] = out[1] = stbi__div4(3*in_near[0] + in_far[0] + 2);
|
|
return out;
|
|
}
|
|
|
|
t1 = 3*in_near[0] + in_far[0];
|
|
out[0] = stbi__div4(t1+2);
|
|
for (i=1; i < w; ++i) {
|
|
t0 = t1;
|
|
t1 = 3*in_near[i]+in_far[i];
|
|
out[i*2-1] = stbi__div16(3*t0 + t1 + 8);
|
|
out[i*2 ] = stbi__div16(3*t1 + t0 + 8);
|
|
}
|
|
out[w*2-1] = stbi__div4(t1+2);
|
|
|
|
STBI_NOTUSED(hs);
|
|
|
|
return out;
|
|
}
|
|
|
|
#if defined(STBI_SSE2) || defined(STBI_NEON)
|
|
static uint8_t *stbi__resample_row_hv_2_simd(uint8_t *out, uint8_t *in_near, uint8_t *in_far, int w, int hs)
|
|
{
|
|
int i=0,t0,t1;
|
|
|
|
if (w == 1) {
|
|
out[0] = out[1] = stbi__div4(3*in_near[0] + in_far[0] + 2);
|
|
return out;
|
|
}
|
|
|
|
t1 = 3*in_near[0] + in_far[0];
|
|
for (; i < ((w-1) & ~7); i += 8)
|
|
{
|
|
#if defined(STBI_SSE2)
|
|
__m128i zero = _mm_setzero_si128();
|
|
__m128i farb = _mm_loadl_epi64((__m128i *) (in_far + i));
|
|
__m128i nearb = _mm_loadl_epi64((__m128i *) (in_near + i));
|
|
__m128i farw = _mm_unpacklo_epi8(farb, zero);
|
|
__m128i nearw = _mm_unpacklo_epi8(nearb, zero);
|
|
__m128i diff = _mm_sub_epi16(farw, nearw);
|
|
__m128i nears = _mm_slli_epi16(nearw, 2);
|
|
__m128i curr = _mm_add_epi16(nears, diff);
|
|
__m128i prv0 = _mm_slli_si128(curr, 2);
|
|
__m128i nxt0 = _mm_srli_si128(curr, 2);
|
|
__m128i prev = _mm_insert_epi16(prv0, t1, 0);
|
|
__m128i next = _mm_insert_epi16(nxt0, 3*in_near[i+8] + in_far[i+8], 7);
|
|
__m128i bias = _mm_set1_epi16(8);
|
|
__m128i curs = _mm_slli_epi16(curr, 2);
|
|
__m128i prvd = _mm_sub_epi16(prev, curr);
|
|
__m128i nxtd = _mm_sub_epi16(next, curr);
|
|
__m128i curb = _mm_add_epi16(curs, bias);
|
|
__m128i even = _mm_add_epi16(prvd, curb);
|
|
__m128i odd = _mm_add_epi16(nxtd, curb);
|
|
__m128i int0 = _mm_unpacklo_epi16(even, odd);
|
|
__m128i int1 = _mm_unpackhi_epi16(even, odd);
|
|
__m128i de0 = _mm_srli_epi16(int0, 4);
|
|
__m128i de1 = _mm_srli_epi16(int1, 4);
|
|
__m128i outv = _mm_packus_epi16(de0, de1);
|
|
_mm_storeu_si128((__m128i *) (out + i*2), outv);
|
|
#elif defined(STBI_NEON)
|
|
uint8x8x2_t o;
|
|
uint8x8_t farb = vld1_u8(in_far + i);
|
|
uint8x8_t nearb = vld1_u8(in_near + i);
|
|
int16x8_t diff = vreinterpretq_s16_u16(vsubl_u8(farb, nearb));
|
|
int16x8_t nears = vreinterpretq_s16_u16(vshll_n_u8(nearb, 2));
|
|
int16x8_t curr = vaddq_s16(nears, diff);
|
|
int16x8_t prv0 = vextq_s16(curr, curr, 7);
|
|
int16x8_t nxt0 = vextq_s16(curr, curr, 1);
|
|
int16x8_t prev = vsetq_lane_s16(t1, prv0, 0);
|
|
int16x8_t next = vsetq_lane_s16(3*in_near[i+8] + in_far[i+8], nxt0, 7);
|
|
int16x8_t curs = vshlq_n_s16(curr, 2);
|
|
int16x8_t prvd = vsubq_s16(prev, curr);
|
|
int16x8_t nxtd = vsubq_s16(next, curr);
|
|
int16x8_t even = vaddq_s16(curs, prvd);
|
|
int16x8_t odd = vaddq_s16(curs, nxtd);
|
|
o.val[0] = vqrshrun_n_s16(even, 4);
|
|
o.val[1] = vqrshrun_n_s16(odd, 4);
|
|
vst2_u8(out + i*2, o);
|
|
#endif
|
|
|
|
t1 = 3*in_near[i+7] + in_far[i+7];
|
|
}
|
|
|
|
t0 = t1;
|
|
t1 = 3*in_near[i] + in_far[i];
|
|
out[i*2] = stbi__div16(3*t1 + t0 + 8);
|
|
|
|
for (++i; i < w; ++i) {
|
|
t0 = t1;
|
|
t1 = 3*in_near[i]+in_far[i];
|
|
out[i*2-1] = stbi__div16(3*t0 + t1 + 8);
|
|
out[i*2 ] = stbi__div16(3*t1 + t0 + 8);
|
|
}
|
|
out[w*2-1] = stbi__div4(t1+2);
|
|
|
|
STBI_NOTUSED(hs);
|
|
|
|
return out;
|
|
}
|
|
#endif
|
|
|
|
static uint8_t *stbi__resample_row_generic(uint8_t *out, uint8_t *in_near, uint8_t *in_far, int w, int hs)
|
|
{
|
|
int i,j;
|
|
STBI_NOTUSED(in_far);
|
|
for (i=0; i < w; ++i)
|
|
for (j=0; j < hs; ++j)
|
|
out[i*hs+j] = in_near[i];
|
|
return out;
|
|
}
|
|
|
|
#ifdef STBI_JPEG_OLD
|
|
#define float2fixed(x) ((int) ((x) * 65536 + 0.5))
|
|
static void stbi__YCbCr_to_RGB_row(uint8_t *out, const uint8_t *y, const uint8_t *pcb, const uint8_t *pcr, int count, int step)
|
|
{
|
|
int i;
|
|
for (i=0; i < count; ++i)
|
|
{
|
|
int y_fixed = (y[i] << 16) + 32768;
|
|
int cr = pcr[i] - 128;
|
|
int cb = pcb[i] - 128;
|
|
int r = y_fixed + cr*float2fixed(1.40200f);
|
|
int g = y_fixed - cr*float2fixed(0.71414f) - cb*float2fixed(0.34414f);
|
|
int b = y_fixed + cb*float2fixed(1.77200f);
|
|
r >>= 16;
|
|
g >>= 16;
|
|
b >>= 16;
|
|
if ((unsigned) r > 255) { if (r < 0) r = 0; else r = 255; }
|
|
if ((unsigned) g > 255) { if (g < 0) g = 0; else g = 255; }
|
|
if ((unsigned) b > 255) { if (b < 0) b = 0; else b = 255; }
|
|
out[0] = (uint8_t)r;
|
|
out[1] = (uint8_t)g;
|
|
out[2] = (uint8_t)b;
|
|
out[3] = 255;
|
|
out += step;
|
|
}
|
|
}
|
|
#else
|
|
#define float2fixed(x) (((int) ((x) * 4096.0f + 0.5f)) << 8)
|
|
static void stbi__YCbCr_to_RGB_row(uint8_t *out, const uint8_t *y, const uint8_t *pcb, const uint8_t *pcr, int count, int step)
|
|
{
|
|
int i;
|
|
for (i=0; i < count; ++i) {
|
|
int y_fixed = (y[i] << 20) + (1<<19);
|
|
int cr = pcr[i] - 128;
|
|
int cb = pcb[i] - 128;
|
|
int r = y_fixed + cr* float2fixed(1.40200f);
|
|
int g = y_fixed + (cr*-float2fixed(0.71414f)) + ((cb*-float2fixed(0.34414f)) & 0xffff0000);
|
|
int b = y_fixed + cb* float2fixed(1.77200f);
|
|
r >>= 20;
|
|
g >>= 20;
|
|
b >>= 20;
|
|
if ((unsigned) r > 255) { if (r < 0) r = 0; else r = 255; }
|
|
if ((unsigned) g > 255) { if (g < 0) g = 0; else g = 255; }
|
|
if ((unsigned) b > 255) { if (b < 0) b = 0; else b = 255; }
|
|
out[0] = (uint8_t)r;
|
|
out[1] = (uint8_t)g;
|
|
out[2] = (uint8_t)b;
|
|
out[3] = 255;
|
|
out += step;
|
|
}
|
|
}
|
|
#endif
|
|
|
|
#if defined(STBI_SSE2) || defined(STBI_NEON)
|
|
static void stbi__YCbCr_to_RGB_simd(uint8_t *out, uint8_t const *y, uint8_t const *pcb, uint8_t const *pcr, int count, int step)
|
|
{
|
|
int i = 0;
|
|
|
|
#ifdef STBI_SSE2
|
|
if (step == 4)
|
|
{
|
|
__m128i signflip = _mm_set1_epi8(-0x80);
|
|
__m128i cr_const0 = _mm_set1_epi16( (short) ( 1.40200f*4096.0f+0.5f));
|
|
__m128i cr_const1 = _mm_set1_epi16( - (short) ( 0.71414f*4096.0f+0.5f));
|
|
__m128i cb_const0 = _mm_set1_epi16( - (short) ( 0.34414f*4096.0f+0.5f));
|
|
__m128i cb_const1 = _mm_set1_epi16( (short) ( 1.77200f*4096.0f+0.5f));
|
|
__m128i y_bias = _mm_set1_epi8((char) (unsigned char) 128);
|
|
__m128i xw = _mm_set1_epi16(255);
|
|
|
|
for (; i+7 < count; i += 8) {
|
|
__m128i y_bytes = _mm_loadl_epi64((__m128i *) (y+i));
|
|
__m128i cr_bytes = _mm_loadl_epi64((__m128i *) (pcr+i));
|
|
__m128i cb_bytes = _mm_loadl_epi64((__m128i *) (pcb+i));
|
|
__m128i cr_biased = _mm_xor_si128(cr_bytes, signflip);
|
|
__m128i cb_biased = _mm_xor_si128(cb_bytes, signflip);
|
|
|
|
__m128i yw = _mm_unpacklo_epi8(y_bias, y_bytes);
|
|
__m128i crw = _mm_unpacklo_epi8(_mm_setzero_si128(), cr_biased);
|
|
__m128i cbw = _mm_unpacklo_epi8(_mm_setzero_si128(), cb_biased);
|
|
|
|
__m128i yws = _mm_srli_epi16(yw, 4);
|
|
__m128i cr0 = _mm_mulhi_epi16(cr_const0, crw);
|
|
__m128i cb0 = _mm_mulhi_epi16(cb_const0, cbw);
|
|
__m128i cb1 = _mm_mulhi_epi16(cbw, cb_const1);
|
|
__m128i cr1 = _mm_mulhi_epi16(crw, cr_const1);
|
|
__m128i rws = _mm_add_epi16(cr0, yws);
|
|
__m128i gwt = _mm_add_epi16(cb0, yws);
|
|
__m128i bws = _mm_add_epi16(yws, cb1);
|
|
__m128i gws = _mm_add_epi16(gwt, cr1);
|
|
|
|
__m128i rw = _mm_srai_epi16(rws, 4);
|
|
__m128i bw = _mm_srai_epi16(bws, 4);
|
|
__m128i gw = _mm_srai_epi16(gws, 4);
|
|
|
|
__m128i brb = _mm_packus_epi16(rw, bw);
|
|
__m128i gxb = _mm_packus_epi16(gw, xw);
|
|
|
|
__m128i t0 = _mm_unpacklo_epi8(brb, gxb);
|
|
__m128i t1 = _mm_unpackhi_epi8(brb, gxb);
|
|
__m128i o0 = _mm_unpacklo_epi16(t0, t1);
|
|
__m128i o1 = _mm_unpackhi_epi16(t0, t1);
|
|
|
|
_mm_storeu_si128((__m128i *) (out + 0), o0);
|
|
_mm_storeu_si128((__m128i *) (out + 16), o1);
|
|
out += 32;
|
|
}
|
|
}
|
|
#endif
|
|
|
|
#ifdef STBI_NEON
|
|
if (step == 4)
|
|
{
|
|
uint8x8_t signflip = vdup_n_u8(0x80);
|
|
int16x8_t cr_const0 = vdupq_n_s16( (short) ( 1.40200f*4096.0f+0.5f));
|
|
int16x8_t cr_const1 = vdupq_n_s16( - (short) ( 0.71414f*4096.0f+0.5f));
|
|
int16x8_t cb_const0 = vdupq_n_s16( - (short) ( 0.34414f*4096.0f+0.5f));
|
|
int16x8_t cb_const1 = vdupq_n_s16( (short) ( 1.77200f*4096.0f+0.5f));
|
|
|
|
for (; i+7 < count; i += 8)
|
|
{
|
|
uint8x8x4_t o;
|
|
uint8x8_t y_bytes = vld1_u8(y + i);
|
|
uint8x8_t cr_bytes = vld1_u8(pcr + i);
|
|
uint8x8_t cb_bytes = vld1_u8(pcb + i);
|
|
int8x8_t cr_biased = vreinterpret_s8_u8(vsub_u8(cr_bytes, signflip));
|
|
int8x8_t cb_biased = vreinterpret_s8_u8(vsub_u8(cb_bytes, signflip));
|
|
|
|
int16x8_t yws = vreinterpretq_s16_u16(vshll_n_u8(y_bytes, 4));
|
|
int16x8_t crw = vshll_n_s8(cr_biased, 7);
|
|
int16x8_t cbw = vshll_n_s8(cb_biased, 7);
|
|
|
|
int16x8_t cr0 = vqdmulhq_s16(crw, cr_const0);
|
|
int16x8_t cb0 = vqdmulhq_s16(cbw, cb_const0);
|
|
int16x8_t cr1 = vqdmulhq_s16(crw, cr_const1);
|
|
int16x8_t cb1 = vqdmulhq_s16(cbw, cb_const1);
|
|
int16x8_t rws = vaddq_s16(yws, cr0);
|
|
int16x8_t gws = vaddq_s16(vaddq_s16(yws, cb0), cr1);
|
|
int16x8_t bws = vaddq_s16(yws, cb1);
|
|
|
|
o.val[0] = vqrshrun_n_s16(rws, 4);
|
|
o.val[1] = vqrshrun_n_s16(gws, 4);
|
|
o.val[2] = vqrshrun_n_s16(bws, 4);
|
|
o.val[3] = vdup_n_u8(255);
|
|
|
|
vst4_u8(out, o);
|
|
out += 8*4;
|
|
}
|
|
}
|
|
#endif
|
|
|
|
for (; i < count; ++i)
|
|
{
|
|
int y_fixed = (y[i] << 20) + (1<<19);
|
|
int cr = pcr[i] - 128;
|
|
int cb = pcb[i] - 128;
|
|
int r = y_fixed + cr* float2fixed(1.40200f);
|
|
int g = y_fixed + cr*-float2fixed(0.71414f) + ((cb*-float2fixed(0.34414f)) & 0xffff0000);
|
|
int b = y_fixed + cb* float2fixed(1.77200f);
|
|
r >>= 20;
|
|
g >>= 20;
|
|
b >>= 20;
|
|
if ((unsigned) r > 255) { if (r < 0) r = 0; else r = 255; }
|
|
if ((unsigned) g > 255) { if (g < 0) g = 0; else g = 255; }
|
|
if ((unsigned) b > 255) { if (b < 0) b = 0; else b = 255; }
|
|
out[0] = (uint8_t)r;
|
|
out[1] = (uint8_t)g;
|
|
out[2] = (uint8_t)b;
|
|
out[3] = 255;
|
|
out += step;
|
|
}
|
|
}
|
|
#endif
|
|
|
|
static void stbi__setup_jpeg(stbi__jpeg *j)
|
|
{
|
|
j->idct_block_kernel = stbi__idct_block;
|
|
j->YCbCr_to_RGB_kernel = stbi__YCbCr_to_RGB_row;
|
|
j->resample_row_hv_2_kernel = stbi__resample_row_hv_2;
|
|
|
|
#ifdef STBI_SSE2
|
|
if (stbi__sse2_available())
|
|
{
|
|
j->idct_block_kernel = stbi__idct_simd;
|
|
#ifndef STBI_JPEG_OLD
|
|
j->YCbCr_to_RGB_kernel = stbi__YCbCr_to_RGB_simd;
|
|
#endif
|
|
j->resample_row_hv_2_kernel = stbi__resample_row_hv_2_simd;
|
|
}
|
|
#endif
|
|
|
|
#ifdef STBI_NEON
|
|
j->idct_block_kernel = stbi__idct_simd;
|
|
#ifndef STBI_JPEG_OLD
|
|
j->YCbCr_to_RGB_kernel = stbi__YCbCr_to_RGB_simd;
|
|
#endif
|
|
j->resample_row_hv_2_kernel = stbi__resample_row_hv_2_simd;
|
|
#endif
|
|
}
|
|
|
|
static void stbi__cleanup_jpeg(stbi__jpeg *j)
|
|
{
|
|
int i;
|
|
for (i=0; i < j->s->img_n; ++i)
|
|
{
|
|
if (j->img_comp[i].raw_data)
|
|
{
|
|
free(j->img_comp[i].raw_data);
|
|
j->img_comp[i].raw_data = NULL;
|
|
j->img_comp[i].data = NULL;
|
|
}
|
|
if (j->img_comp[i].raw_coeff)
|
|
{
|
|
free(j->img_comp[i].raw_coeff);
|
|
j->img_comp[i].raw_coeff = 0;
|
|
j->img_comp[i].coeff = 0;
|
|
}
|
|
if (j->img_comp[i].linebuf)
|
|
{
|
|
free(j->img_comp[i].linebuf);
|
|
j->img_comp[i].linebuf = NULL;
|
|
}
|
|
}
|
|
}
|
|
|
|
typedef struct
|
|
{
|
|
resample_row_func resample;
|
|
uint8_t *line0,*line1;
|
|
int hs,vs;
|
|
int w_lores;
|
|
int ystep;
|
|
int ypos;
|
|
} stbi__resample;
|
|
|
|
static uint8_t *load_jpeg_image(stbi__jpeg *z, int *out_x, int *out_y, int *comp, int req_comp)
|
|
{
|
|
int n, decode_n;
|
|
z->s->img_n = 0;
|
|
|
|
if (req_comp < 0 || req_comp > 4)
|
|
return stbi__errpuc("bad req_comp", "Internal error");
|
|
|
|
if (!stbi__decode_jpeg_image(z)) { stbi__cleanup_jpeg(z); return NULL; }
|
|
|
|
n = req_comp ? req_comp : z->s->img_n;
|
|
|
|
if (z->s->img_n == 3 && n < 3)
|
|
decode_n = 1;
|
|
else
|
|
decode_n = z->s->img_n;
|
|
|
|
{
|
|
int k;
|
|
unsigned int i,j;
|
|
uint8_t *output;
|
|
uint8_t *coutput[4];
|
|
|
|
stbi__resample res_comp[4];
|
|
|
|
for (k=0; k < decode_n; ++k) {
|
|
stbi__resample *r = &res_comp[k];
|
|
|
|
z->img_comp[k].linebuf = (uint8_t *) malloc(z->s->img_x + 3);
|
|
if (!z->img_comp[k].linebuf) { stbi__cleanup_jpeg(z); return stbi__errpuc("outofmem", "Out of memory"); }
|
|
|
|
r->hs = z->img_h_max / z->img_comp[k].h;
|
|
r->vs = z->img_v_max / z->img_comp[k].v;
|
|
r->ystep = r->vs >> 1;
|
|
r->w_lores = (z->s->img_x + r->hs-1) / r->hs;
|
|
r->ypos = 0;
|
|
r->line0 = r->line1 = z->img_comp[k].data;
|
|
|
|
if (r->hs == 1 && r->vs == 1) r->resample = resample_row_1;
|
|
else if (r->hs == 1 && r->vs == 2) r->resample = stbi__resample_row_v_2;
|
|
else if (r->hs == 2 && r->vs == 1) r->resample = stbi__resample_row_h_2;
|
|
else if (r->hs == 2 && r->vs == 2) r->resample = z->resample_row_hv_2_kernel;
|
|
else r->resample = stbi__resample_row_generic;
|
|
}
|
|
|
|
output = (uint8_t *) malloc(n * z->s->img_x * z->s->img_y + 1);
|
|
if (!output) { stbi__cleanup_jpeg(z); return stbi__errpuc("outofmem", "Out of memory"); }
|
|
|
|
for (j=0; j < z->s->img_y; ++j) {
|
|
uint8_t *out = output + n * z->s->img_x * j;
|
|
for (k=0; k < decode_n; ++k) {
|
|
stbi__resample *r = &res_comp[k];
|
|
int y_bot = r->ystep >= (r->vs >> 1);
|
|
coutput[k] = r->resample(z->img_comp[k].linebuf,
|
|
y_bot ? r->line1 : r->line0,
|
|
y_bot ? r->line0 : r->line1,
|
|
r->w_lores, r->hs);
|
|
if (++r->ystep >= r->vs) {
|
|
r->ystep = 0;
|
|
r->line0 = r->line1;
|
|
if (++r->ypos < z->img_comp[k].y)
|
|
r->line1 += z->img_comp[k].w2;
|
|
}
|
|
}
|
|
if (n >= 3) {
|
|
uint8_t *y = coutput[0];
|
|
if (z->s->img_n == 3) {
|
|
z->YCbCr_to_RGB_kernel(out, y, coutput[1], coutput[2], z->s->img_x, n);
|
|
} else
|
|
for (i=0; i < z->s->img_x; ++i) {
|
|
out[0] = out[1] = out[2] = y[i];
|
|
out[3] = 255;
|
|
out += n;
|
|
}
|
|
} else {
|
|
uint8_t *y = coutput[0];
|
|
if (n == 1)
|
|
for (i=0; i < z->s->img_x; ++i) out[i] = y[i];
|
|
else
|
|
for (i=0; i < z->s->img_x; ++i) *out++ = y[i], *out++ = 255;
|
|
}
|
|
}
|
|
stbi__cleanup_jpeg(z);
|
|
*out_x = z->s->img_x;
|
|
*out_y = z->s->img_y;
|
|
if (comp) *comp = z->s->img_n;
|
|
return output;
|
|
}
|
|
}
|
|
|
|
static unsigned char *stbi__jpeg_load(stbi__context *s, int *x, int *y, int *comp, int req_comp)
|
|
{
|
|
stbi__jpeg j;
|
|
j.s = s;
|
|
stbi__setup_jpeg(&j);
|
|
return load_jpeg_image(&j, x,y,comp,req_comp);
|
|
}
|
|
|
|
static int stbi__jpeg_test(stbi__context *s)
|
|
{
|
|
int r;
|
|
stbi__jpeg j;
|
|
j.s = s;
|
|
stbi__setup_jpeg(&j);
|
|
r = stbi__decode_jpeg_header(&j, STBI__SCAN_type);
|
|
stbi__rewind(s);
|
|
return r;
|
|
}
|
|
#endif
|
|
|
|
#ifndef STBI_NO_ZLIB
|
|
|
|
#define STBI__ZFAST_BITS 9
|
|
#define STBI__ZFAST_MASK ((1 << STBI__ZFAST_BITS) - 1)
|
|
|
|
typedef struct
|
|
{
|
|
uint16_t fast[1 << STBI__ZFAST_BITS];
|
|
uint16_t firstcode[16];
|
|
int maxcode[17];
|
|
uint16_t firstsymbol[16];
|
|
uint8_t size[288];
|
|
uint16_t value[288];
|
|
} stbi__zhuffman;
|
|
|
|
static INLINE int stbi__bitreverse16(int n)
|
|
{
|
|
n = ((n & 0xAAAA) >> 1) | ((n & 0x5555) << 1);
|
|
n = ((n & 0xCCCC) >> 2) | ((n & 0x3333) << 2);
|
|
n = ((n & 0xF0F0) >> 4) | ((n & 0x0F0F) << 4);
|
|
n = ((n & 0xFF00) >> 8) | ((n & 0x00FF) << 8);
|
|
return n;
|
|
}
|
|
|
|
static INLINE int stbi__bit_reverse(int v, int bits)
|
|
{
|
|
STBI_ASSERT(bits <= 16);
|
|
return stbi__bitreverse16(v) >> (16-bits);
|
|
}
|
|
|
|
static int stbi__zbuild_huffman(stbi__zhuffman *z, uint8_t *sizelist, int num)
|
|
{
|
|
int i,k=0;
|
|
int code, next_code[16], sizes[17];
|
|
|
|
memset(sizes, 0, sizeof(sizes));
|
|
memset(z->fast, 0, sizeof(z->fast));
|
|
for (i=0; i < num; ++i)
|
|
++sizes[sizelist[i]];
|
|
sizes[0] = 0;
|
|
for (i=1; i < 16; ++i)
|
|
if (sizes[i] > (1 << i))
|
|
return stbi__err("bad sizes", "Corrupt PNG");
|
|
code = 0;
|
|
for (i=1; i < 16; ++i) {
|
|
next_code[i] = code;
|
|
z->firstcode[i] = (uint16_t) code;
|
|
z->firstsymbol[i] = (uint16_t) k;
|
|
code = (code + sizes[i]);
|
|
if (sizes[i])
|
|
if (code-1 >= (1 << i)) return stbi__err("bad codelengths","Corrupt PNG");
|
|
z->maxcode[i] = code << (16-i);
|
|
code <<= 1;
|
|
k += sizes[i];
|
|
}
|
|
z->maxcode[16] = 0x10000;
|
|
for (i=0; i < num; ++i)
|
|
{
|
|
int s = sizelist[i];
|
|
if (s)
|
|
{
|
|
int c = next_code[s] - z->firstcode[s] + z->firstsymbol[s];
|
|
uint16_t fastv = (uint16_t) ((s << 9) | i);
|
|
z->size [c] = (uint8_t ) s;
|
|
z->value[c] = (uint16_t) i;
|
|
if (s <= STBI__ZFAST_BITS) {
|
|
int k = stbi__bit_reverse(next_code[s],s);
|
|
while (k < (1 << STBI__ZFAST_BITS)) {
|
|
z->fast[k] = fastv;
|
|
k += (1 << s);
|
|
}
|
|
}
|
|
++next_code[s];
|
|
}
|
|
}
|
|
return 1;
|
|
}
|
|
|
|
typedef struct
|
|
{
|
|
uint8_t *zbuffer, *zbuffer_end;
|
|
int num_bits;
|
|
uint32_t code_buffer;
|
|
|
|
char *zout;
|
|
char *zout_start;
|
|
char *zout_end;
|
|
int z_expandable;
|
|
|
|
stbi__zhuffman z_length, z_distance;
|
|
} stbi__zbuf;
|
|
|
|
static INLINE uint8_t stbi__zget8(stbi__zbuf *z)
|
|
{
|
|
if (z->zbuffer >= z->zbuffer_end) return 0;
|
|
return *z->zbuffer++;
|
|
}
|
|
|
|
static void stbi__fill_bits(stbi__zbuf *z)
|
|
{
|
|
do {
|
|
STBI_ASSERT(z->code_buffer < (1U << z->num_bits));
|
|
z->code_buffer |= stbi__zget8(z) << z->num_bits;
|
|
z->num_bits += 8;
|
|
} while (z->num_bits <= 24);
|
|
}
|
|
|
|
static INLINE unsigned int stbi__zreceive(stbi__zbuf *z, int n)
|
|
{
|
|
unsigned int k;
|
|
if (z->num_bits < n) stbi__fill_bits(z);
|
|
k = z->code_buffer & ((1 << n) - 1);
|
|
z->code_buffer >>= n;
|
|
z->num_bits -= n;
|
|
return k;
|
|
}
|
|
|
|
static int stbi__zhuffman_decode_slowpath(stbi__zbuf *a, stbi__zhuffman *z)
|
|
{
|
|
int b,s,k;
|
|
k = stbi__bit_reverse(a->code_buffer, 16);
|
|
for (s=STBI__ZFAST_BITS+1; ; ++s)
|
|
if (k < z->maxcode[s])
|
|
break;
|
|
if (s == 16) return -1;
|
|
b = (k >> (16-s)) - z->firstcode[s] + z->firstsymbol[s];
|
|
STBI_ASSERT(z->size[b] == s);
|
|
a->code_buffer >>= s;
|
|
a->num_bits -= s;
|
|
return z->value[b];
|
|
}
|
|
|
|
static INLINE int stbi__zhuffman_decode(stbi__zbuf *a, stbi__zhuffman *z)
|
|
{
|
|
int b,s;
|
|
if (a->num_bits < 16) stbi__fill_bits(a);
|
|
b = z->fast[a->code_buffer & STBI__ZFAST_MASK];
|
|
if (b) {
|
|
s = b >> 9;
|
|
a->code_buffer >>= s;
|
|
a->num_bits -= s;
|
|
return b & 511;
|
|
}
|
|
return stbi__zhuffman_decode_slowpath(a, z);
|
|
}
|
|
|
|
static int stbi__zexpand(stbi__zbuf *z, char *zout, int n)
|
|
{
|
|
char *q;
|
|
int cur, limit;
|
|
z->zout = zout;
|
|
if (!z->z_expandable) return stbi__err("output buffer limit","Corrupt PNG");
|
|
cur = (int) (z->zout - z->zout_start);
|
|
limit = (int) (z->zout_end - z->zout_start);
|
|
while (cur + n > limit)
|
|
limit *= 2;
|
|
q = (char *) realloc(z->zout_start, limit);
|
|
if (q == NULL) return stbi__err("outofmem", "Out of memory");
|
|
z->zout_start = q;
|
|
z->zout = q + cur;
|
|
z->zout_end = q + limit;
|
|
return 1;
|
|
}
|
|
|
|
static int stbi__zlength_base[31] = {
|
|
3,4,5,6,7,8,9,10,11,13,
|
|
15,17,19,23,27,31,35,43,51,59,
|
|
67,83,99,115,131,163,195,227,258,0,0 };
|
|
|
|
static int stbi__zlength_extra[31]=
|
|
{ 0,0,0,0,0,0,0,0,1,1,1,1,2,2,2,2,3,3,3,3,4,4,4,4,5,5,5,5,0,0,0 };
|
|
|
|
static int stbi__zdist_base[32] = { 1,2,3,4,5,7,9,13,17,25,33,49,65,97,129,193,
|
|
257,385,513,769,1025,1537,2049,3073,4097,6145,8193,12289,16385,24577,0,0};
|
|
|
|
static int stbi__zdist_extra[32] =
|
|
{ 0,0,0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,8,8,9,9,10,10,11,11,12,12,13,13};
|
|
|
|
static int stbi__parse_huffman_block(stbi__zbuf *a)
|
|
{
|
|
char *zout = a->zout;
|
|
for(;;) {
|
|
int z = stbi__zhuffman_decode(a, &a->z_length);
|
|
if (z < 256) {
|
|
if (z < 0) return stbi__err("bad huffman code","Corrupt PNG");
|
|
if (zout >= a->zout_end) {
|
|
if (!stbi__zexpand(a, zout, 1)) return 0;
|
|
zout = a->zout;
|
|
}
|
|
*zout++ = (char) z;
|
|
} else {
|
|
uint8_t *p;
|
|
int len,dist;
|
|
if (z == 256) {
|
|
a->zout = zout;
|
|
return 1;
|
|
}
|
|
z -= 257;
|
|
len = stbi__zlength_base[z];
|
|
if (stbi__zlength_extra[z]) len += stbi__zreceive(a, stbi__zlength_extra[z]);
|
|
z = stbi__zhuffman_decode(a, &a->z_distance);
|
|
if (z < 0) return stbi__err("bad huffman code","Corrupt PNG");
|
|
dist = stbi__zdist_base[z];
|
|
if (stbi__zdist_extra[z]) dist += stbi__zreceive(a, stbi__zdist_extra[z]);
|
|
if (zout - a->zout_start < dist) return stbi__err("bad dist","Corrupt PNG");
|
|
if (zout + len > a->zout_end) {
|
|
if (!stbi__zexpand(a, zout, len)) return 0;
|
|
zout = a->zout;
|
|
}
|
|
p = (uint8_t *) (zout - dist);
|
|
if (dist == 1)
|
|
{
|
|
uint8_t v = *p;
|
|
if (len) { do *zout++ = v; while (--len); }
|
|
} else {
|
|
if (len) { do *zout++ = *p++; while (--len); }
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
static int stbi__compute_huffman_codes(stbi__zbuf *a)
|
|
{
|
|
static uint8_t length_dezigzag[19] = { 16,17,18,0,8,7,9,6,10,5,11,4,12,3,13,2,14,1,15 };
|
|
stbi__zhuffman z_codelength;
|
|
uint8_t lencodes[286+32+137];
|
|
uint8_t codelength_sizes[19];
|
|
int i,n;
|
|
|
|
int hlit = stbi__zreceive(a,5) + 257;
|
|
int hdist = stbi__zreceive(a,5) + 1;
|
|
int hclen = stbi__zreceive(a,4) + 4;
|
|
|
|
memset(codelength_sizes, 0, sizeof(codelength_sizes));
|
|
for (i=0; i < hclen; ++i) {
|
|
int s = stbi__zreceive(a,3);
|
|
codelength_sizes[length_dezigzag[i]] = (uint8_t) s;
|
|
}
|
|
if (!stbi__zbuild_huffman(&z_codelength, codelength_sizes, 19)) return 0;
|
|
|
|
n = 0;
|
|
while (n < hlit + hdist) {
|
|
int c = stbi__zhuffman_decode(a, &z_codelength);
|
|
if (c < 0 || c >= 19) return stbi__err("bad codelengths", "Corrupt PNG");
|
|
if (c < 16)
|
|
lencodes[n++] = (uint8_t) c;
|
|
else if (c == 16) {
|
|
c = stbi__zreceive(a,2)+3;
|
|
memset(lencodes+n, lencodes[n-1], c);
|
|
n += c;
|
|
} else if (c == 17) {
|
|
c = stbi__zreceive(a,3)+3;
|
|
memset(lencodes+n, 0, c);
|
|
n += c;
|
|
} else {
|
|
STBI_ASSERT(c == 18);
|
|
c = stbi__zreceive(a,7)+11;
|
|
memset(lencodes+n, 0, c);
|
|
n += c;
|
|
}
|
|
}
|
|
if (n != hlit+hdist) return stbi__err("bad codelengths","Corrupt PNG");
|
|
if (!stbi__zbuild_huffman(&a->z_length, lencodes, hlit)) return 0;
|
|
if (!stbi__zbuild_huffman(&a->z_distance, lencodes+hlit, hdist)) return 0;
|
|
return 1;
|
|
}
|
|
|
|
static int stbi__parse_uncomperssed_block(stbi__zbuf *a)
|
|
{
|
|
uint8_t header[4];
|
|
int len,nlen,k;
|
|
if (a->num_bits & 7)
|
|
stbi__zreceive(a, a->num_bits & 7);
|
|
k = 0;
|
|
while (a->num_bits > 0) {
|
|
header[k++] = (uint8_t) (a->code_buffer & 255);
|
|
a->code_buffer >>= 8;
|
|
a->num_bits -= 8;
|
|
}
|
|
STBI_ASSERT(a->num_bits == 0);
|
|
while (k < 4)
|
|
header[k++] = stbi__zget8(a);
|
|
len = header[1] * 256 + header[0];
|
|
nlen = header[3] * 256 + header[2];
|
|
if (nlen != (len ^ 0xffff)) return stbi__err("zlib corrupt","Corrupt PNG");
|
|
if (a->zbuffer + len > a->zbuffer_end) return stbi__err("read past buffer","Corrupt PNG");
|
|
if (a->zout + len > a->zout_end)
|
|
if (!stbi__zexpand(a, a->zout, len)) return 0;
|
|
memcpy(a->zout, a->zbuffer, len);
|
|
a->zbuffer += len;
|
|
a->zout += len;
|
|
return 1;
|
|
}
|
|
|
|
static int stbi__parse_zlib_header(stbi__zbuf *a)
|
|
{
|
|
int cmf = stbi__zget8(a);
|
|
int cm = cmf & 15;
|
|
/* int cinfo = cmf >> 4; */
|
|
int flg = stbi__zget8(a);
|
|
if ((cmf*256+flg) % 31 != 0) return stbi__err("bad zlib header","Corrupt PNG");
|
|
if (flg & 32) return stbi__err("no preset dict","Corrupt PNG");
|
|
if (cm != 8) return stbi__err("bad compression","Corrupt PNG");
|
|
return 1;
|
|
}
|
|
|
|
static uint8_t stbi__zdefault_length[288], stbi__zdefault_distance[32];
|
|
static void stbi__init_zdefaults(void)
|
|
{
|
|
int i;
|
|
for (i=0; i <= 143; ++i) stbi__zdefault_length[i] = 8;
|
|
for ( ; i <= 255; ++i) stbi__zdefault_length[i] = 9;
|
|
for ( ; i <= 279; ++i) stbi__zdefault_length[i] = 7;
|
|
for ( ; i <= 287; ++i) stbi__zdefault_length[i] = 8;
|
|
|
|
for (i=0; i <= 31; ++i) stbi__zdefault_distance[i] = 5;
|
|
}
|
|
|
|
static int stbi__parse_zlib(stbi__zbuf *a, int parse_header)
|
|
{
|
|
int final, type;
|
|
if (parse_header)
|
|
if (!stbi__parse_zlib_header(a)) return 0;
|
|
a->num_bits = 0;
|
|
a->code_buffer = 0;
|
|
do {
|
|
final = stbi__zreceive(a,1);
|
|
type = stbi__zreceive(a,2);
|
|
if (type == 0) {
|
|
if (!stbi__parse_uncomperssed_block(a)) return 0;
|
|
} else if (type == 3) {
|
|
return 0;
|
|
} else {
|
|
if (type == 1) {
|
|
if (!stbi__zdefault_distance[31]) stbi__init_zdefaults();
|
|
if (!stbi__zbuild_huffman(&a->z_length , stbi__zdefault_length , 288)) return 0;
|
|
if (!stbi__zbuild_huffman(&a->z_distance, stbi__zdefault_distance, 32)) return 0;
|
|
} else {
|
|
if (!stbi__compute_huffman_codes(a)) return 0;
|
|
}
|
|
if (!stbi__parse_huffman_block(a)) return 0;
|
|
}
|
|
} while (!final);
|
|
return 1;
|
|
}
|
|
|
|
static int stbi__do_zlib(stbi__zbuf *a, char *obuf, int olen, int exp, int parse_header)
|
|
{
|
|
a->zout_start = obuf;
|
|
a->zout = obuf;
|
|
a->zout_end = obuf + olen;
|
|
a->z_expandable = exp;
|
|
|
|
return stbi__parse_zlib(a, parse_header);
|
|
}
|
|
|
|
STBIDEF char *stbi_zlib_decode_malloc_guesssize_headerflag(const char *buffer, int len, int initial_size, int *outlen, int parse_header)
|
|
{
|
|
stbi__zbuf a;
|
|
char *p = (char *) malloc(initial_size);
|
|
if (p == NULL) return NULL;
|
|
a.zbuffer = (uint8_t *) buffer;
|
|
a.zbuffer_end = (uint8_t *) buffer + len;
|
|
if (stbi__do_zlib(&a, p, initial_size, 1, parse_header)) {
|
|
if (outlen) *outlen = (int) (a.zout - a.zout_start);
|
|
return a.zout_start;
|
|
} else {
|
|
free(a.zout_start);
|
|
return NULL;
|
|
}
|
|
}
|
|
#endif
|
|
|
|
#ifndef STBI_NO_PNG
|
|
typedef struct
|
|
{
|
|
uint32_t length;
|
|
uint32_t type;
|
|
} stbi__pngchunk;
|
|
|
|
static stbi__pngchunk stbi__get_chunk_header(stbi__context *s)
|
|
{
|
|
stbi__pngchunk c;
|
|
c.length = stbi__get32be(s);
|
|
c.type = stbi__get32be(s);
|
|
return c;
|
|
}
|
|
|
|
static int stbi__check_png_header(stbi__context *s)
|
|
{
|
|
static uint8_t png_sig[8] = { 137,80,78,71,13,10,26,10 };
|
|
int i;
|
|
for (i=0; i < 8; ++i)
|
|
if (stbi__get8(s) != png_sig[i]) return stbi__err("bad png sig","Not a PNG");
|
|
return 1;
|
|
}
|
|
|
|
typedef struct
|
|
{
|
|
stbi__context *s;
|
|
uint8_t *idata, *expanded, *out;
|
|
} stbi__png;
|
|
|
|
|
|
enum {
|
|
STBI__F_none=0,
|
|
STBI__F_sub=1,
|
|
STBI__F_up=2,
|
|
STBI__F_avg=3,
|
|
STBI__F_paeth=4,
|
|
STBI__F_avg_first,
|
|
STBI__F_paeth_first
|
|
};
|
|
|
|
static uint8_t first_row_filter[5] =
|
|
{
|
|
STBI__F_none,
|
|
STBI__F_sub,
|
|
STBI__F_none,
|
|
STBI__F_avg_first,
|
|
STBI__F_paeth_first
|
|
};
|
|
|
|
static int stbi__paeth(int a, int b, int c)
|
|
{
|
|
int p = a + b - c;
|
|
int pa = abs(p-a);
|
|
int pb = abs(p-b);
|
|
int pc = abs(p-c);
|
|
if (pa <= pb && pa <= pc) return a;
|
|
if (pb <= pc) return b;
|
|
return c;
|
|
}
|
|
|
|
static uint8_t stbi__depth_scale_table[9] = { 0, 0xff, 0x55, 0, 0x11, 0,0,0, 0x01 };
|
|
|
|
static int stbi__create_png_image_raw(stbi__png *a, uint8_t *raw, uint32_t raw_len, int out_n, uint32_t x, uint32_t y, int depth, int color)
|
|
{
|
|
int k;
|
|
stbi__context *s = a->s;
|
|
uint32_t i,j,stride = x*out_n;
|
|
uint32_t img_len, img_width_bytes;
|
|
int img_n = s->img_n;
|
|
|
|
STBI_ASSERT(out_n == s->img_n || out_n == s->img_n+1);
|
|
a->out = (uint8_t *) malloc(x * y * out_n);
|
|
if (!a->out) return stbi__err("outofmem", "Out of memory");
|
|
|
|
img_width_bytes = (((img_n * x * depth) + 7) >> 3);
|
|
img_len = (img_width_bytes + 1) * y;
|
|
if (s->img_x == x && s->img_y == y)
|
|
{
|
|
if (raw_len != img_len) return stbi__err("not enough pixels","Corrupt PNG");
|
|
}
|
|
else
|
|
{
|
|
if (raw_len < img_len) return stbi__err("not enough pixels","Corrupt PNG");
|
|
}
|
|
|
|
for (j=0; j < y; ++j) {
|
|
uint8_t *cur = a->out + stride*j;
|
|
uint8_t *prior = cur - stride;
|
|
int filter = *raw++;
|
|
int filter_bytes = img_n;
|
|
int width = x;
|
|
if (filter > 4)
|
|
return stbi__err("invalid filter","Corrupt PNG");
|
|
|
|
if (depth < 8) {
|
|
STBI_ASSERT(img_width_bytes <= x);
|
|
cur += x*out_n - img_width_bytes;
|
|
filter_bytes = 1;
|
|
width = img_width_bytes;
|
|
}
|
|
|
|
if (j == 0) filter = first_row_filter[filter];
|
|
|
|
for (k=0; k < filter_bytes; ++k)
|
|
{
|
|
switch (filter)
|
|
{
|
|
case STBI__F_none : cur[k] = raw[k]; break;
|
|
case STBI__F_sub : cur[k] = raw[k]; break;
|
|
case STBI__F_up : cur[k] = STBI__BYTECAST(raw[k] + prior[k]); break;
|
|
case STBI__F_avg : cur[k] = STBI__BYTECAST(raw[k] + (prior[k]>>1)); break;
|
|
case STBI__F_paeth : cur[k] = STBI__BYTECAST(raw[k] + stbi__paeth(0,prior[k],0)); break;
|
|
case STBI__F_avg_first : cur[k] = raw[k]; break;
|
|
case STBI__F_paeth_first: cur[k] = raw[k]; break;
|
|
}
|
|
}
|
|
|
|
if (depth == 8) {
|
|
if (img_n != out_n)
|
|
cur[img_n] = 255;
|
|
raw += img_n;
|
|
cur += out_n;
|
|
prior += out_n;
|
|
} else {
|
|
raw += 1;
|
|
cur += 1;
|
|
prior += 1;
|
|
}
|
|
|
|
if (depth < 8 || img_n == out_n)
|
|
{
|
|
int nk = (width - 1)*img_n;
|
|
#define CASE(f) \
|
|
case f: \
|
|
for (k=0; k < nk; ++k)
|
|
switch (filter) {
|
|
case STBI__F_none: memcpy(cur, raw, nk); break;
|
|
CASE(STBI__F_sub) cur[k] = STBI__BYTECAST(raw[k] + cur[k-filter_bytes]); break;
|
|
CASE(STBI__F_up) cur[k] = STBI__BYTECAST(raw[k] + prior[k]); break;
|
|
CASE(STBI__F_avg) cur[k] = STBI__BYTECAST(raw[k] + ((prior[k] + cur[k-filter_bytes])>>1)); break;
|
|
CASE(STBI__F_paeth) cur[k] = STBI__BYTECAST(raw[k] + stbi__paeth(cur[k-filter_bytes],prior[k],prior[k-filter_bytes])); break;
|
|
CASE(STBI__F_avg_first) cur[k] = STBI__BYTECAST(raw[k] + (cur[k-filter_bytes] >> 1)); break;
|
|
CASE(STBI__F_paeth_first) cur[k] = STBI__BYTECAST(raw[k] + stbi__paeth(cur[k-filter_bytes],0,0)); break;
|
|
}
|
|
#undef CASE
|
|
raw += nk;
|
|
} else {
|
|
STBI_ASSERT(img_n+1 == out_n);
|
|
#define CASE(f) \
|
|
case f: \
|
|
for (i=x-1; i >= 1; --i, cur[img_n]=255,raw+=img_n,cur+=out_n,prior+=out_n) \
|
|
for (k=0; k < img_n; ++k)
|
|
switch (filter) {
|
|
CASE(STBI__F_none) cur[k] = raw[k]; break;
|
|
CASE(STBI__F_sub) cur[k] = STBI__BYTECAST(raw[k] + cur[k-out_n]); break;
|
|
CASE(STBI__F_up) cur[k] = STBI__BYTECAST(raw[k] + prior[k]); break;
|
|
CASE(STBI__F_avg) cur[k] = STBI__BYTECAST(raw[k] + ((prior[k] + cur[k-out_n])>>1)); break;
|
|
CASE(STBI__F_paeth) cur[k] = STBI__BYTECAST(raw[k] + stbi__paeth(cur[k-out_n],prior[k],prior[k-out_n])); break;
|
|
CASE(STBI__F_avg_first) cur[k] = STBI__BYTECAST(raw[k] + (cur[k-out_n] >> 1)); break;
|
|
CASE(STBI__F_paeth_first) cur[k] = STBI__BYTECAST(raw[k] + stbi__paeth(cur[k-out_n],0,0)); break;
|
|
}
|
|
#undef CASE
|
|
}
|
|
}
|
|
|
|
if (depth < 8)
|
|
{
|
|
for (j=0; j < y; ++j)
|
|
{
|
|
uint8_t *cur = a->out + stride*j;
|
|
uint8_t *in = a->out + stride*j + x*out_n - img_width_bytes;
|
|
uint8_t scale = (color == 0) ? stbi__depth_scale_table[depth] : 1;
|
|
|
|
if (depth == 4)
|
|
{
|
|
for (k=x*img_n; k >= 2; k-=2, ++in) {
|
|
*cur++ = scale * ((*in >> 4) );
|
|
*cur++ = scale * ((*in ) & 0x0f);
|
|
}
|
|
if (k > 0) *cur++ = scale * ((*in >> 4) );
|
|
} else if (depth == 2) {
|
|
for (k=x*img_n; k >= 4; k-=4, ++in) {
|
|
*cur++ = scale * ((*in >> 6) );
|
|
*cur++ = scale * ((*in >> 4) & 0x03);
|
|
*cur++ = scale * ((*in >> 2) & 0x03);
|
|
*cur++ = scale * ((*in ) & 0x03);
|
|
}
|
|
if (k > 0) *cur++ = scale * ((*in >> 6) );
|
|
if (k > 1) *cur++ = scale * ((*in >> 4) & 0x03);
|
|
if (k > 2) *cur++ = scale * ((*in >> 2) & 0x03);
|
|
} else if (depth == 1) {
|
|
for (k=x*img_n; k >= 8; k-=8, ++in) {
|
|
*cur++ = scale * ((*in >> 7) );
|
|
*cur++ = scale * ((*in >> 6) & 0x01);
|
|
*cur++ = scale * ((*in >> 5) & 0x01);
|
|
*cur++ = scale * ((*in >> 4) & 0x01);
|
|
*cur++ = scale * ((*in >> 3) & 0x01);
|
|
*cur++ = scale * ((*in >> 2) & 0x01);
|
|
*cur++ = scale * ((*in >> 1) & 0x01);
|
|
*cur++ = scale * ((*in ) & 0x01);
|
|
}
|
|
if (k > 0) *cur++ = scale * ((*in >> 7) );
|
|
if (k > 1) *cur++ = scale * ((*in >> 6) & 0x01);
|
|
if (k > 2) *cur++ = scale * ((*in >> 5) & 0x01);
|
|
if (k > 3) *cur++ = scale * ((*in >> 4) & 0x01);
|
|
if (k > 4) *cur++ = scale * ((*in >> 3) & 0x01);
|
|
if (k > 5) *cur++ = scale * ((*in >> 2) & 0x01);
|
|
if (k > 6) *cur++ = scale * ((*in >> 1) & 0x01);
|
|
}
|
|
if (img_n != out_n) {
|
|
uint8_t *cur = a->out + stride*j;
|
|
int i;
|
|
if (img_n == 1) {
|
|
for (i=x-1; i >= 0; --i) {
|
|
cur[i*2+1] = 255;
|
|
cur[i*2+0] = cur[i];
|
|
}
|
|
} else {
|
|
STBI_ASSERT(img_n == 3);
|
|
for (i=x-1; i >= 0; --i) {
|
|
cur[i*4+3] = 255;
|
|
cur[i*4+2] = cur[i*3+2];
|
|
cur[i*4+1] = cur[i*3+1];
|
|
cur[i*4+0] = cur[i*3+0];
|
|
}
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
return 1;
|
|
}
|
|
|
|
static int stbi__create_png_image(stbi__png *a, uint8_t *image_data, uint32_t image_data_len, int out_n, int depth, int color, int interlaced)
|
|
{
|
|
uint8_t *final;
|
|
int p;
|
|
if (!interlaced)
|
|
return stbi__create_png_image_raw(a, image_data, image_data_len, out_n, a->s->img_x, a->s->img_y, depth, color);
|
|
|
|
final = (uint8_t *) malloc(a->s->img_x * a->s->img_y * out_n);
|
|
for (p=0; p < 7; ++p) {
|
|
int xorig[] = { 0,4,0,2,0,1,0 };
|
|
int yorig[] = { 0,0,4,0,2,0,1 };
|
|
int xspc[] = { 8,8,4,4,2,2,1 };
|
|
int yspc[] = { 8,8,8,4,4,2,2 };
|
|
int i,j,x,y;
|
|
x = (a->s->img_x - xorig[p] + xspc[p]-1) / xspc[p];
|
|
y = (a->s->img_y - yorig[p] + yspc[p]-1) / yspc[p];
|
|
if (x && y) {
|
|
uint32_t img_len = ((((a->s->img_n * x * depth) + 7) >> 3) + 1) * y;
|
|
if (!stbi__create_png_image_raw(a, image_data, image_data_len, out_n, x, y, depth, color)) {
|
|
free(final);
|
|
return 0;
|
|
}
|
|
for (j=0; j < y; ++j) {
|
|
for (i=0; i < x; ++i) {
|
|
int out_y = j*yspc[p]+yorig[p];
|
|
int out_x = i*xspc[p]+xorig[p];
|
|
memcpy(final + out_y*a->s->img_x*out_n + out_x*out_n,
|
|
a->out + (j*x+i)*out_n, out_n);
|
|
}
|
|
}
|
|
free(a->out);
|
|
image_data += img_len;
|
|
image_data_len -= img_len;
|
|
}
|
|
}
|
|
a->out = final;
|
|
|
|
return 1;
|
|
}
|
|
|
|
static int stbi__compute_transparency(stbi__png *z, uint8_t tc[3], int out_n)
|
|
{
|
|
stbi__context *s = z->s;
|
|
uint32_t i, pixel_count = s->img_x * s->img_y;
|
|
uint8_t *p = z->out;
|
|
|
|
STBI_ASSERT(out_n == 2 || out_n == 4);
|
|
|
|
if (out_n == 2) {
|
|
for (i=0; i < pixel_count; ++i) {
|
|
p[1] = (p[0] == tc[0] ? 0 : 255);
|
|
p += 2;
|
|
}
|
|
} else {
|
|
for (i=0; i < pixel_count; ++i) {
|
|
if (p[0] == tc[0] && p[1] == tc[1] && p[2] == tc[2])
|
|
p[3] = 0;
|
|
p += 4;
|
|
}
|
|
}
|
|
return 1;
|
|
}
|
|
|
|
static int stbi__expand_png_palette(stbi__png *a, uint8_t *palette, int len, int pal_img_n)
|
|
{
|
|
uint32_t i, pixel_count = a->s->img_x * a->s->img_y;
|
|
uint8_t *p, *temp_out, *orig = a->out;
|
|
|
|
p = (uint8_t *) malloc(pixel_count * pal_img_n);
|
|
if (p == NULL) return stbi__err("outofmem", "Out of memory");
|
|
|
|
temp_out = p;
|
|
|
|
if (pal_img_n == 3) {
|
|
for (i=0; i < pixel_count; ++i) {
|
|
int n = orig[i]*4;
|
|
p[0] = palette[n ];
|
|
p[1] = palette[n+1];
|
|
p[2] = palette[n+2];
|
|
p += 3;
|
|
}
|
|
} else {
|
|
for (i=0; i < pixel_count; ++i) {
|
|
int n = orig[i]*4;
|
|
p[0] = palette[n ];
|
|
p[1] = palette[n+1];
|
|
p[2] = palette[n+2];
|
|
p[3] = palette[n+3];
|
|
p += 4;
|
|
}
|
|
}
|
|
free(a->out);
|
|
a->out = temp_out;
|
|
|
|
STBI_NOTUSED(len);
|
|
|
|
return 1;
|
|
}
|
|
|
|
static int stbi__unpremultiply_on_load = 0;
|
|
static int stbi__de_iphone_flag = 0;
|
|
|
|
static void stbi__de_iphone(stbi__png *z)
|
|
{
|
|
stbi__context *s = z->s;
|
|
uint32_t i, pixel_count = s->img_x * s->img_y;
|
|
uint8_t *p = z->out;
|
|
|
|
if (s->img_out_n == 3)
|
|
{
|
|
for (i=0; i < pixel_count; ++i) {
|
|
uint8_t t = p[0];
|
|
p[0] = p[2];
|
|
p[2] = t;
|
|
p += 3;
|
|
}
|
|
}
|
|
else
|
|
{
|
|
STBI_ASSERT(s->img_out_n == 4);
|
|
if (stbi__unpremultiply_on_load)
|
|
{
|
|
for (i=0; i < pixel_count; ++i) {
|
|
uint8_t a = p[3];
|
|
uint8_t t = p[0];
|
|
if (a) {
|
|
p[0] = p[2] * 255 / a;
|
|
p[1] = p[1] * 255 / a;
|
|
p[2] = t * 255 / a;
|
|
} else {
|
|
p[0] = p[2];
|
|
p[2] = t;
|
|
}
|
|
p += 4;
|
|
}
|
|
} else {
|
|
for (i=0; i < pixel_count; ++i) {
|
|
uint8_t t = p[0];
|
|
p[0] = p[2];
|
|
p[2] = t;
|
|
p += 4;
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
#define STBI__PNG_TYPE(a,b,c,d) (((a) << 24) + ((b) << 16) + ((c) << 8) + (d))
|
|
|
|
static int stbi__parse_png_file(stbi__png *z, int scan, int req_comp)
|
|
{
|
|
uint8_t palette[1024], pal_img_n=0;
|
|
uint8_t has_trans=0, tc[3];
|
|
uint32_t ioff=0, idata_limit=0, i, pal_len=0;
|
|
int first=1,k,interlace=0, color=0, depth=0, is_iphone=0;
|
|
stbi__context *s = z->s;
|
|
|
|
z->expanded = NULL;
|
|
z->idata = NULL;
|
|
z->out = NULL;
|
|
|
|
if (!stbi__check_png_header(s)) return 0;
|
|
|
|
if (scan == STBI__SCAN_type) return 1;
|
|
|
|
for (;;) {
|
|
stbi__pngchunk c = stbi__get_chunk_header(s);
|
|
switch (c.type) {
|
|
case STBI__PNG_TYPE('C','g','B','I'):
|
|
is_iphone = 1;
|
|
stbi__skip(s, c.length);
|
|
break;
|
|
case STBI__PNG_TYPE('I','H','D','R'): {
|
|
int comp,filter;
|
|
if (!first) return stbi__err("multiple IHDR","Corrupt PNG");
|
|
first = 0;
|
|
if (c.length != 13) return stbi__err("bad IHDR len","Corrupt PNG");
|
|
s->img_x = stbi__get32be(s); if (s->img_x > (1 << 24)) return stbi__err("too large","Very large image (corrupt?)");
|
|
s->img_y = stbi__get32be(s); if (s->img_y > (1 << 24)) return stbi__err("too large","Very large image (corrupt?)");
|
|
depth = stbi__get8(s); if (depth != 1 && depth != 2 && depth != 4 && depth != 8) return stbi__err("1/2/4/8-bit only","PNG not supported: 1/2/4/8-bit only");
|
|
color = stbi__get8(s); if (color > 6) return stbi__err("bad ctype","Corrupt PNG");
|
|
if (color == 3) pal_img_n = 3; else if (color & 1) return stbi__err("bad ctype","Corrupt PNG");
|
|
comp = stbi__get8(s); if (comp) return stbi__err("bad comp method","Corrupt PNG");
|
|
filter= stbi__get8(s); if (filter) return stbi__err("bad filter method","Corrupt PNG");
|
|
interlace = stbi__get8(s); if (interlace>1) return stbi__err("bad interlace method","Corrupt PNG");
|
|
if (!s->img_x || !s->img_y) return stbi__err("0-pixel image","Corrupt PNG");
|
|
if (!pal_img_n) {
|
|
s->img_n = (color & 2 ? 3 : 1) + (color & 4 ? 1 : 0);
|
|
if ((1 << 30) / s->img_x / s->img_n < s->img_y) return stbi__err("too large", "Image too large to decode");
|
|
if (scan == STBI__SCAN_header) return 1;
|
|
} else {
|
|
s->img_n = 1;
|
|
if ((1 << 30) / s->img_x / 4 < s->img_y) return stbi__err("too large","Corrupt PNG");
|
|
}
|
|
break;
|
|
}
|
|
|
|
case STBI__PNG_TYPE('P','L','T','E'): {
|
|
if (first) return stbi__err("first not IHDR", "Corrupt PNG");
|
|
if (c.length > 256*3) return stbi__err("invalid PLTE","Corrupt PNG");
|
|
pal_len = c.length / 3;
|
|
if (pal_len * 3 != c.length) return stbi__err("invalid PLTE","Corrupt PNG");
|
|
for (i=0; i < pal_len; ++i) {
|
|
palette[i*4+0] = stbi__get8(s);
|
|
palette[i*4+1] = stbi__get8(s);
|
|
palette[i*4+2] = stbi__get8(s);
|
|
palette[i*4+3] = 255;
|
|
}
|
|
break;
|
|
}
|
|
|
|
case STBI__PNG_TYPE('t','R','N','S'): {
|
|
if (first) return stbi__err("first not IHDR", "Corrupt PNG");
|
|
if (z->idata) return stbi__err("tRNS after IDAT","Corrupt PNG");
|
|
if (pal_img_n) {
|
|
if (scan == STBI__SCAN_header) { s->img_n = 4; return 1; }
|
|
if (pal_len == 0) return stbi__err("tRNS before PLTE","Corrupt PNG");
|
|
if (c.length > pal_len) return stbi__err("bad tRNS len","Corrupt PNG");
|
|
pal_img_n = 4;
|
|
for (i=0; i < c.length; ++i)
|
|
palette[i*4+3] = stbi__get8(s);
|
|
} else {
|
|
if (!(s->img_n & 1)) return stbi__err("tRNS with alpha","Corrupt PNG");
|
|
if (c.length != (uint32_t) s->img_n*2) return stbi__err("bad tRNS len","Corrupt PNG");
|
|
has_trans = 1;
|
|
for (k=0; k < s->img_n; ++k)
|
|
tc[k] = (uint8_t) (stbi__get16be(s) & 255) * stbi__depth_scale_table[depth];
|
|
}
|
|
break;
|
|
}
|
|
|
|
case STBI__PNG_TYPE('I','D','A','T'): {
|
|
if (first) return stbi__err("first not IHDR", "Corrupt PNG");
|
|
if (pal_img_n && !pal_len) return stbi__err("no PLTE","Corrupt PNG");
|
|
if (scan == STBI__SCAN_header) { s->img_n = pal_img_n; return 1; }
|
|
if ((int)(ioff + c.length) < (int)ioff) return 0;
|
|
if (ioff + c.length > idata_limit) {
|
|
uint8_t *p;
|
|
if (idata_limit == 0) idata_limit = c.length > 4096 ? c.length : 4096;
|
|
while (ioff + c.length > idata_limit)
|
|
idata_limit *= 2;
|
|
p = (uint8_t *) realloc(z->idata, idata_limit); if (p == NULL) return stbi__err("outofmem", "Out of memory");
|
|
z->idata = p;
|
|
}
|
|
if (!stbi__getn(s, z->idata+ioff,c.length)) return stbi__err("outofdata","Corrupt PNG");
|
|
ioff += c.length;
|
|
break;
|
|
}
|
|
|
|
case STBI__PNG_TYPE('I','E','N','D'): {
|
|
uint32_t raw_len, bpl;
|
|
if (first) return stbi__err("first not IHDR", "Corrupt PNG");
|
|
if (scan != STBI__SCAN_load) return 1;
|
|
if (z->idata == NULL) return stbi__err("no IDAT","Corrupt PNG");
|
|
bpl = (s->img_x * depth + 7) / 8;
|
|
raw_len = bpl * s->img_y * s->img_n /* pixels */ + s->img_y /* filter mode per row */;
|
|
z->expanded = (uint8_t *) stbi_zlib_decode_malloc_guesssize_headerflag((char *) z->idata, ioff, raw_len, (int *) &raw_len, !is_iphone);
|
|
if (z->expanded == NULL) return 0;
|
|
free(z->idata); z->idata = NULL;
|
|
if ((req_comp == s->img_n+1 && req_comp != 3 && !pal_img_n) || has_trans)
|
|
s->img_out_n = s->img_n+1;
|
|
else
|
|
s->img_out_n = s->img_n;
|
|
if (!stbi__create_png_image(z, z->expanded, raw_len, s->img_out_n, depth, color, interlace)) return 0;
|
|
if (has_trans)
|
|
if (!stbi__compute_transparency(z, tc, s->img_out_n)) return 0;
|
|
if (is_iphone && stbi__de_iphone_flag && s->img_out_n > 2)
|
|
stbi__de_iphone(z);
|
|
if (pal_img_n) {
|
|
s->img_n = pal_img_n;
|
|
s->img_out_n = pal_img_n;
|
|
if (req_comp >= 3) s->img_out_n = req_comp;
|
|
if (!stbi__expand_png_palette(z, palette, pal_len, s->img_out_n))
|
|
return 0;
|
|
}
|
|
free(z->expanded); z->expanded = NULL;
|
|
return 1;
|
|
}
|
|
|
|
default:
|
|
if (first) return stbi__err("first not IHDR", "Corrupt PNG");
|
|
if ((c.type & (1 << 29)) == 0) {
|
|
#ifndef STBI_NO_FAILURE_STRINGS
|
|
static char invalid_chunk[] = "XXXX PNG chunk not known";
|
|
invalid_chunk[0] = STBI__BYTECAST(c.type >> 24);
|
|
invalid_chunk[1] = STBI__BYTECAST(c.type >> 16);
|
|
invalid_chunk[2] = STBI__BYTECAST(c.type >> 8);
|
|
invalid_chunk[3] = STBI__BYTECAST(c.type >> 0);
|
|
#endif
|
|
return stbi__err(invalid_chunk, "PNG not supported: unknown PNG chunk type");
|
|
}
|
|
stbi__skip(s, c.length);
|
|
break;
|
|
}
|
|
stbi__get32be(s);
|
|
}
|
|
}
|
|
|
|
static unsigned char *stbi__do_png(stbi__png *p, int *x, int *y, int *n, int req_comp)
|
|
{
|
|
unsigned char *result=NULL;
|
|
if (req_comp < 0 || req_comp > 4) return stbi__errpuc("bad req_comp", "Internal error");
|
|
if (stbi__parse_png_file(p, STBI__SCAN_load, req_comp)) {
|
|
result = p->out;
|
|
p->out = NULL;
|
|
if (req_comp && req_comp != p->s->img_out_n) {
|
|
result = stbi__convert_format(result, p->s->img_out_n, req_comp, p->s->img_x, p->s->img_y);
|
|
p->s->img_out_n = req_comp;
|
|
if (result == NULL) return result;
|
|
}
|
|
*x = p->s->img_x;
|
|
*y = p->s->img_y;
|
|
if (n) *n = p->s->img_out_n;
|
|
}
|
|
free(p->out); p->out = NULL;
|
|
free(p->expanded); p->expanded = NULL;
|
|
free(p->idata); p->idata = NULL;
|
|
|
|
return result;
|
|
}
|
|
|
|
static unsigned char *stbi__png_load(stbi__context *s, int *x, int *y, int *comp, int req_comp)
|
|
{
|
|
stbi__png p;
|
|
p.s = s;
|
|
return stbi__do_png(&p, x,y,comp,req_comp);
|
|
}
|
|
|
|
static int stbi__png_test(stbi__context *s)
|
|
{
|
|
int r;
|
|
r = stbi__check_png_header(s);
|
|
stbi__rewind(s);
|
|
return r;
|
|
}
|
|
#endif
|
|
|
|
#ifndef STBI_NO_BMP
|
|
static int stbi__bmp_test_raw(stbi__context *s)
|
|
{
|
|
int r;
|
|
int sz;
|
|
if (stbi__get8(s) != 'B') return 0;
|
|
if (stbi__get8(s) != 'M') return 0;
|
|
stbi__get32le(s);
|
|
stbi__get16le(s);
|
|
stbi__get16le(s);
|
|
stbi__get32le(s);
|
|
sz = stbi__get32le(s);
|
|
r = (sz == 12 || sz == 40 || sz == 56 || sz == 108 || sz == 124);
|
|
return r;
|
|
}
|
|
|
|
static int stbi__bmp_test(stbi__context *s)
|
|
{
|
|
int r = stbi__bmp_test_raw(s);
|
|
stbi__rewind(s);
|
|
return r;
|
|
}
|
|
|
|
|
|
static int stbi__high_bit(unsigned int z)
|
|
{
|
|
int n=0;
|
|
if (z == 0) return -1;
|
|
if (z >= 0x10000) n += 16, z >>= 16;
|
|
if (z >= 0x00100) n += 8, z >>= 8;
|
|
if (z >= 0x00010) n += 4, z >>= 4;
|
|
if (z >= 0x00004) n += 2, z >>= 2;
|
|
if (z >= 0x00002) n += 1, z >>= 1;
|
|
return n;
|
|
}
|
|
|
|
static int stbi__bitcount(unsigned int a)
|
|
{
|
|
a = (a & 0x55555555) + ((a >> 1) & 0x55555555);
|
|
a = (a & 0x33333333) + ((a >> 2) & 0x33333333);
|
|
a = (a + (a >> 4)) & 0x0f0f0f0f;
|
|
a = (a + (a >> 8));
|
|
a = (a + (a >> 16));
|
|
return a & 0xff;
|
|
}
|
|
|
|
static int stbi__shiftsigned(int v, int shift, int bits)
|
|
{
|
|
int result;
|
|
int z=0;
|
|
|
|
if (shift < 0) v <<= -shift;
|
|
else v >>= shift;
|
|
result = v;
|
|
|
|
z = bits;
|
|
while (z < 8) {
|
|
result += v >> z;
|
|
z += bits;
|
|
}
|
|
return result;
|
|
}
|
|
|
|
static uint8_t *stbi__bmp_load(stbi__context *s, int *x, int *y, int *comp, int req_comp)
|
|
{
|
|
uint8_t *out;
|
|
unsigned int mr=0,mg=0,mb=0,ma=0, fake_a=0;
|
|
uint8_t pal[256][4];
|
|
int psize=0,i,j,compress=0,width;
|
|
int bpp, flip_vertically, pad, target, offset, hsz;
|
|
if (stbi__get8(s) != 'B' || stbi__get8(s) != 'M') return stbi__errpuc("not BMP", "Corrupt BMP");
|
|
stbi__get32le(s);
|
|
stbi__get16le(s);
|
|
stbi__get16le(s);
|
|
offset = stbi__get32le(s);
|
|
hsz = stbi__get32le(s);
|
|
if (hsz != 12 && hsz != 40 && hsz != 56 && hsz != 108 && hsz != 124) return stbi__errpuc("unknown BMP", "BMP type not supported: unknown");
|
|
if (hsz == 12) {
|
|
s->img_x = stbi__get16le(s);
|
|
s->img_y = stbi__get16le(s);
|
|
} else {
|
|
s->img_x = stbi__get32le(s);
|
|
s->img_y = stbi__get32le(s);
|
|
}
|
|
if (stbi__get16le(s) != 1) return stbi__errpuc("bad BMP", "bad BMP");
|
|
bpp = stbi__get16le(s);
|
|
if (bpp == 1) return stbi__errpuc("monochrome", "BMP type not supported: 1-bit");
|
|
flip_vertically = ((int) s->img_y) > 0;
|
|
s->img_y = abs((int) s->img_y);
|
|
if (hsz == 12) {
|
|
if (bpp < 24)
|
|
psize = (offset - 14 - 24) / 3;
|
|
} else {
|
|
compress = stbi__get32le(s);
|
|
if (compress == 1 || compress == 2) return stbi__errpuc("BMP RLE", "BMP type not supported: RLE");
|
|
stbi__get32le(s);
|
|
stbi__get32le(s);
|
|
stbi__get32le(s);
|
|
stbi__get32le(s);
|
|
stbi__get32le(s);
|
|
if (hsz == 40 || hsz == 56) {
|
|
if (hsz == 56) {
|
|
stbi__get32le(s);
|
|
stbi__get32le(s);
|
|
stbi__get32le(s);
|
|
stbi__get32le(s);
|
|
}
|
|
if (bpp == 16 || bpp == 32) {
|
|
mr = mg = mb = 0;
|
|
if (compress == 0) {
|
|
if (bpp == 32) {
|
|
mr = 0xffu << 16;
|
|
mg = 0xffu << 8;
|
|
mb = 0xffu << 0;
|
|
ma = 0xffu << 24;
|
|
fake_a = 1;
|
|
STBI_NOTUSED(fake_a);
|
|
} else {
|
|
mr = 31u << 10;
|
|
mg = 31u << 5;
|
|
mb = 31u << 0;
|
|
}
|
|
} else if (compress == 3) {
|
|
mr = stbi__get32le(s);
|
|
mg = stbi__get32le(s);
|
|
mb = stbi__get32le(s);
|
|
if (mr == mg && mg == mb) {
|
|
return stbi__errpuc("bad BMP", "bad BMP");
|
|
}
|
|
} else
|
|
return stbi__errpuc("bad BMP", "bad BMP");
|
|
}
|
|
} else {
|
|
STBI_ASSERT(hsz == 108 || hsz == 124);
|
|
mr = stbi__get32le(s);
|
|
mg = stbi__get32le(s);
|
|
mb = stbi__get32le(s);
|
|
ma = stbi__get32le(s);
|
|
stbi__get32le(s);
|
|
for (i=0; i < 12; ++i)
|
|
stbi__get32le(s);
|
|
if (hsz == 124) {
|
|
stbi__get32le(s);
|
|
stbi__get32le(s);
|
|
stbi__get32le(s);
|
|
stbi__get32le(s);
|
|
}
|
|
}
|
|
if (bpp < 16)
|
|
psize = (offset - 14 - hsz) >> 2;
|
|
}
|
|
s->img_n = ma ? 4 : 3;
|
|
if (req_comp && req_comp >= 3)
|
|
target = req_comp;
|
|
else
|
|
target = s->img_n;
|
|
out = (uint8_t *) malloc(target * s->img_x * s->img_y);
|
|
if (!out) return stbi__errpuc("outofmem", "Out of memory");
|
|
if (bpp < 16) {
|
|
int z=0;
|
|
if (psize == 0 || psize > 256) { free(out); return stbi__errpuc("invalid", "Corrupt BMP"); }
|
|
for (i=0; i < psize; ++i) {
|
|
pal[i][2] = stbi__get8(s);
|
|
pal[i][1] = stbi__get8(s);
|
|
pal[i][0] = stbi__get8(s);
|
|
if (hsz != 12) stbi__get8(s);
|
|
pal[i][3] = 255;
|
|
}
|
|
stbi__skip(s, offset - 14 - hsz - psize * (hsz == 12 ? 3 : 4));
|
|
if (bpp == 4) width = (s->img_x + 1) >> 1;
|
|
else if (bpp == 8) width = s->img_x;
|
|
else { free(out); return stbi__errpuc("bad bpp", "Corrupt BMP"); }
|
|
pad = (-width)&3;
|
|
for (j=0; j < (int) s->img_y; ++j) {
|
|
for (i=0; i < (int) s->img_x; i += 2) {
|
|
int v=stbi__get8(s),v2=0;
|
|
if (bpp == 4) {
|
|
v2 = v & 15;
|
|
v >>= 4;
|
|
}
|
|
out[z++] = pal[v][0];
|
|
out[z++] = pal[v][1];
|
|
out[z++] = pal[v][2];
|
|
if (target == 4) out[z++] = 255;
|
|
if (i+1 == (int) s->img_x) break;
|
|
v = (bpp == 8) ? stbi__get8(s) : v2;
|
|
out[z++] = pal[v][0];
|
|
out[z++] = pal[v][1];
|
|
out[z++] = pal[v][2];
|
|
if (target == 4) out[z++] = 255;
|
|
}
|
|
stbi__skip(s, pad);
|
|
}
|
|
} else {
|
|
int rshift=0,gshift=0,bshift=0,ashift=0,rcount=0,gcount=0,bcount=0,acount=0;
|
|
int z = 0;
|
|
int easy=0;
|
|
stbi__skip(s, offset - 14 - hsz);
|
|
if (bpp == 24) width = 3 * s->img_x;
|
|
else if (bpp == 16) width = 2*s->img_x;
|
|
else /* bpp = 32 and pad = 0 */ width=0;
|
|
pad = (-width) & 3;
|
|
if (bpp == 24) {
|
|
easy = 1;
|
|
} else if (bpp == 32) {
|
|
if (mb == 0xff && mg == 0xff00 && mr == 0x00ff0000 && ma == 0xff000000)
|
|
easy = 2;
|
|
}
|
|
if (!easy) {
|
|
if (!mr || !mg || !mb) { free(out); return stbi__errpuc("bad masks", "Corrupt BMP"); }
|
|
rshift = stbi__high_bit(mr)-7; rcount = stbi__bitcount(mr);
|
|
gshift = stbi__high_bit(mg)-7; gcount = stbi__bitcount(mg);
|
|
bshift = stbi__high_bit(mb)-7; bcount = stbi__bitcount(mb);
|
|
ashift = stbi__high_bit(ma)-7; acount = stbi__bitcount(ma);
|
|
}
|
|
for (j=0; j < (int) s->img_y; ++j) {
|
|
if (easy) {
|
|
for (i=0; i < (int) s->img_x; ++i) {
|
|
unsigned char a;
|
|
out[z+2] = stbi__get8(s);
|
|
out[z+1] = stbi__get8(s);
|
|
out[z+0] = stbi__get8(s);
|
|
z += 3;
|
|
a = (easy == 2 ? stbi__get8(s) : 255);
|
|
if (target == 4) out[z++] = a;
|
|
}
|
|
} else {
|
|
for (i=0; i < (int) s->img_x; ++i) {
|
|
uint32_t v = (bpp == 16 ? (uint32_t) stbi__get16le(s) : stbi__get32le(s));
|
|
int a;
|
|
out[z++] = STBI__BYTECAST(stbi__shiftsigned(v & mr, rshift, rcount));
|
|
out[z++] = STBI__BYTECAST(stbi__shiftsigned(v & mg, gshift, gcount));
|
|
out[z++] = STBI__BYTECAST(stbi__shiftsigned(v & mb, bshift, bcount));
|
|
a = (ma ? stbi__shiftsigned(v & ma, ashift, acount) : 255);
|
|
if (target == 4) out[z++] = STBI__BYTECAST(a);
|
|
}
|
|
}
|
|
stbi__skip(s, pad);
|
|
}
|
|
}
|
|
if (flip_vertically) {
|
|
uint8_t t;
|
|
for (j=0; j < (int) s->img_y>>1; ++j) {
|
|
uint8_t *p1 = out + j *s->img_x*target;
|
|
uint8_t *p2 = out + (s->img_y-1-j)*s->img_x*target;
|
|
for (i=0; i < (int) s->img_x*target; ++i) {
|
|
t = p1[i], p1[i] = p2[i], p2[i] = t;
|
|
}
|
|
}
|
|
}
|
|
|
|
if (req_comp && req_comp != target) {
|
|
out = stbi__convert_format(out, target, req_comp, s->img_x, s->img_y);
|
|
if (out == NULL) return out;
|
|
}
|
|
|
|
*x = s->img_x;
|
|
*y = s->img_y;
|
|
if (comp) *comp = s->img_n;
|
|
return out;
|
|
}
|
|
#endif
|
|
|
|
#ifndef STBI_NO_TGA
|
|
static int stbi__tga_test(stbi__context *s)
|
|
{
|
|
int res;
|
|
int sz;
|
|
stbi__get8(s);
|
|
sz = stbi__get8(s);
|
|
if ( sz > 1 ) return 0;
|
|
sz = stbi__get8(s);
|
|
if ( (sz != 1) && (sz != 2) && (sz != 3) && (sz != 9) && (sz != 10) && (sz != 11) ) return 0;
|
|
stbi__get16be(s);
|
|
stbi__get16be(s);
|
|
stbi__get8(s);
|
|
stbi__get16be(s);
|
|
stbi__get16be(s);
|
|
if ( stbi__get16be(s) < 1 ) return 0;
|
|
if ( stbi__get16be(s) < 1 ) return 0;
|
|
sz = stbi__get8(s);
|
|
if ( (sz != 8) && (sz != 16) && (sz != 24) && (sz != 32) )
|
|
res = 0;
|
|
else
|
|
res = 1;
|
|
stbi__rewind(s);
|
|
return res;
|
|
}
|
|
|
|
static uint8_t *stbi__tga_load(stbi__context *s, int *x, int *y, int *comp, int req_comp)
|
|
{
|
|
unsigned char *tga_data;
|
|
int i, j;
|
|
int tga_offset = stbi__get8(s);
|
|
int tga_indexed = stbi__get8(s);
|
|
int tga_image_type = stbi__get8(s);
|
|
int tga_is_RLE = 0;
|
|
int tga_palette_start = stbi__get16le(s);
|
|
int tga_palette_len = stbi__get16le(s);
|
|
int tga_palette_bits = stbi__get8(s);
|
|
int tga_x_origin = stbi__get16le(s);
|
|
int tga_y_origin = stbi__get16le(s);
|
|
int tga_width = stbi__get16le(s);
|
|
int tga_height = stbi__get16le(s);
|
|
int tga_bits_per_pixel = stbi__get8(s);
|
|
int tga_comp = tga_bits_per_pixel / 8;
|
|
int tga_inverted = stbi__get8(s);
|
|
unsigned char *tga_palette = NULL;
|
|
unsigned char raw_data[4] = {0};
|
|
int RLE_count = 0;
|
|
int RLE_repeating = 0;
|
|
int read_next_pixel = 1;
|
|
|
|
if ( tga_image_type >= 8 )
|
|
{
|
|
tga_image_type -= 8;
|
|
tga_is_RLE = 1;
|
|
}
|
|
|
|
tga_inverted = 1 - ((tga_inverted >> 5) & 1);
|
|
|
|
if ( (tga_width < 1) || (tga_height < 1) ||
|
|
(tga_image_type < 1) || (tga_image_type > 3) ||
|
|
((tga_bits_per_pixel != 8) && (tga_bits_per_pixel != 16) &&
|
|
(tga_bits_per_pixel != 24) && (tga_bits_per_pixel != 32))
|
|
)
|
|
return NULL;
|
|
|
|
if ( tga_indexed )
|
|
tga_comp = tga_palette_bits / 8;
|
|
|
|
*x = tga_width;
|
|
*y = tga_height;
|
|
if (comp) *comp = tga_comp;
|
|
|
|
tga_data = (unsigned char*)malloc( (size_t)tga_width * tga_height * tga_comp );
|
|
if (!tga_data) return stbi__errpuc("outofmem", "Out of memory");
|
|
|
|
stbi__skip(s, tga_offset );
|
|
|
|
if ( !tga_indexed && !tga_is_RLE) {
|
|
for (i=0; i < tga_height; ++i) {
|
|
int y = tga_inverted ? tga_height -i - 1 : i;
|
|
uint8_t *tga_row = tga_data + y*tga_width*tga_comp;
|
|
stbi__getn(s, tga_row, tga_width * tga_comp);
|
|
}
|
|
}
|
|
else
|
|
{
|
|
if ( tga_indexed)
|
|
{
|
|
stbi__skip(s, tga_palette_start );
|
|
tga_palette = (unsigned char*)malloc( tga_palette_len * tga_palette_bits / 8 );
|
|
if (!tga_palette) {
|
|
free(tga_data);
|
|
return stbi__errpuc("outofmem", "Out of memory");
|
|
}
|
|
if (!stbi__getn(s, tga_palette, tga_palette_len * tga_palette_bits / 8 )) {
|
|
free(tga_data);
|
|
free(tga_palette);
|
|
return stbi__errpuc("bad palette", "Corrupt TGA");
|
|
}
|
|
}
|
|
for (i=0; i < tga_width * tga_height; ++i)
|
|
{
|
|
if ( tga_is_RLE )
|
|
{
|
|
if ( RLE_count == 0 )
|
|
{
|
|
int RLE_cmd = stbi__get8(s);
|
|
RLE_count = 1 + (RLE_cmd & 127);
|
|
RLE_repeating = RLE_cmd >> 7;
|
|
read_next_pixel = 1;
|
|
} else if ( !RLE_repeating )
|
|
{
|
|
read_next_pixel = 1;
|
|
}
|
|
} else
|
|
{
|
|
read_next_pixel = 1;
|
|
}
|
|
if ( read_next_pixel )
|
|
{
|
|
if ( tga_indexed )
|
|
{
|
|
int pal_idx = stbi__get8(s);
|
|
if ( pal_idx >= tga_palette_len )
|
|
{
|
|
pal_idx = 0;
|
|
}
|
|
pal_idx *= tga_bits_per_pixel / 8;
|
|
for (j = 0; j*8 < tga_bits_per_pixel; ++j)
|
|
{
|
|
raw_data[j] = tga_palette[pal_idx+j];
|
|
}
|
|
} else
|
|
{
|
|
for (j = 0; j*8 < tga_bits_per_pixel; ++j)
|
|
raw_data[j] = stbi__get8(s);
|
|
}
|
|
read_next_pixel = 0;
|
|
}
|
|
|
|
for (j = 0; j < tga_comp; ++j)
|
|
tga_data[i*tga_comp+j] = raw_data[j];
|
|
|
|
--RLE_count;
|
|
}
|
|
if ( tga_inverted )
|
|
{
|
|
for (j = 0; j*2 < tga_height; ++j)
|
|
{
|
|
int index1 = j * tga_width * tga_comp;
|
|
int index2 = (tga_height - 1 - j) * tga_width * tga_comp;
|
|
for (i = tga_width * tga_comp; i > 0; --i)
|
|
{
|
|
unsigned char temp = tga_data[index1];
|
|
tga_data[index1] = tga_data[index2];
|
|
tga_data[index2] = temp;
|
|
++index1;
|
|
++index2;
|
|
}
|
|
}
|
|
}
|
|
if ( tga_palette != NULL )
|
|
{
|
|
free( tga_palette );
|
|
}
|
|
}
|
|
|
|
if (tga_comp >= 3)
|
|
{
|
|
unsigned char* tga_pixel = tga_data;
|
|
for (i=0; i < tga_width * tga_height; ++i)
|
|
{
|
|
unsigned char temp = tga_pixel[0];
|
|
tga_pixel[0] = tga_pixel[2];
|
|
tga_pixel[2] = temp;
|
|
tga_pixel += tga_comp;
|
|
}
|
|
}
|
|
|
|
if (req_comp && req_comp != tga_comp)
|
|
tga_data = stbi__convert_format(tga_data, tga_comp, req_comp, tga_width, tga_height);
|
|
|
|
tga_palette_start = tga_palette_len = tga_palette_bits =
|
|
tga_x_origin = tga_y_origin = 0;
|
|
return tga_data;
|
|
}
|
|
#endif
|
|
|
|
#endif
|