FFmpeg  4.4.6
webp.c
Go to the documentation of this file.
1 /*
2  * WebP (.webp) image decoder
3  * Copyright (c) 2013 Aneesh Dogra <aneesh@sugarlabs.org>
4  * Copyright (c) 2013 Justin Ruggles <justin.ruggles@gmail.com>
5  *
6  * This file is part of FFmpeg.
7  *
8  * FFmpeg is free software; you can redistribute it and/or
9  * modify it under the terms of the GNU Lesser General Public
10  * License as published by the Free Software Foundation; either
11  * version 2.1 of the License, or (at your option) any later version.
12  *
13  * FFmpeg is distributed in the hope that it will be useful,
14  * but WITHOUT ANY WARRANTY; without even the implied warranty of
15  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16  * Lesser General Public License for more details.
17  *
18  * You should have received a copy of the GNU Lesser General Public
19  * License along with FFmpeg; if not, write to the Free Software
20  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
21  */
22 
23 /**
24  * @file
25  * WebP image decoder
26  *
27  * @author Aneesh Dogra <aneesh@sugarlabs.org>
28  * Container and Lossy decoding
29  *
30  * @author Justin Ruggles <justin.ruggles@gmail.com>
31  * Lossless decoder
32  * Compressed alpha for lossy
33  *
34  * @author James Almer <jamrial@gmail.com>
35  * Exif metadata
36  * ICC profile
37  *
38  * Unimplemented:
39  * - Animation
40  * - XMP metadata
41  */
42 
43 #include "libavutil/imgutils.h"
44 
45 #define BITSTREAM_READER_LE
46 #include "avcodec.h"
47 #include "bytestream.h"
48 #include "exif.h"
49 #include "get_bits.h"
50 #include "internal.h"
51 #include "thread.h"
52 #include "vp8.h"
53 
54 #define VP8X_FLAG_ANIMATION 0x02
55 #define VP8X_FLAG_XMP_METADATA 0x04
56 #define VP8X_FLAG_EXIF_METADATA 0x08
57 #define VP8X_FLAG_ALPHA 0x10
58 #define VP8X_FLAG_ICC 0x20
59 
60 #define MAX_PALETTE_SIZE 256
61 #define MAX_CACHE_BITS 11
62 #define NUM_CODE_LENGTH_CODES 19
63 #define HUFFMAN_CODES_PER_META_CODE 5
64 #define NUM_LITERAL_CODES 256
65 #define NUM_LENGTH_CODES 24
66 #define NUM_DISTANCE_CODES 40
67 #define NUM_SHORT_DISTANCES 120
68 #define MAX_HUFFMAN_CODE_LENGTH 15
69 
70 static const uint16_t alphabet_sizes[HUFFMAN_CODES_PER_META_CODE] = {
74 };
75 
77  17, 18, 0, 1, 2, 3, 4, 5, 16, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15
78 };
79 
80 static const int8_t lz77_distance_offsets[NUM_SHORT_DISTANCES][2] = {
81  { 0, 1 }, { 1, 0 }, { 1, 1 }, { -1, 1 }, { 0, 2 }, { 2, 0 }, { 1, 2 }, { -1, 2 },
82  { 2, 1 }, { -2, 1 }, { 2, 2 }, { -2, 2 }, { 0, 3 }, { 3, 0 }, { 1, 3 }, { -1, 3 },
83  { 3, 1 }, { -3, 1 }, { 2, 3 }, { -2, 3 }, { 3, 2 }, { -3, 2 }, { 0, 4 }, { 4, 0 },
84  { 1, 4 }, { -1, 4 }, { 4, 1 }, { -4, 1 }, { 3, 3 }, { -3, 3 }, { 2, 4 }, { -2, 4 },
85  { 4, 2 }, { -4, 2 }, { 0, 5 }, { 3, 4 }, { -3, 4 }, { 4, 3 }, { -4, 3 }, { 5, 0 },
86  { 1, 5 }, { -1, 5 }, { 5, 1 }, { -5, 1 }, { 2, 5 }, { -2, 5 }, { 5, 2 }, { -5, 2 },
87  { 4, 4 }, { -4, 4 }, { 3, 5 }, { -3, 5 }, { 5, 3 }, { -5, 3 }, { 0, 6 }, { 6, 0 },
88  { 1, 6 }, { -1, 6 }, { 6, 1 }, { -6, 1 }, { 2, 6 }, { -2, 6 }, { 6, 2 }, { -6, 2 },
89  { 4, 5 }, { -4, 5 }, { 5, 4 }, { -5, 4 }, { 3, 6 }, { -3, 6 }, { 6, 3 }, { -6, 3 },
90  { 0, 7 }, { 7, 0 }, { 1, 7 }, { -1, 7 }, { 5, 5 }, { -5, 5 }, { 7, 1 }, { -7, 1 },
91  { 4, 6 }, { -4, 6 }, { 6, 4 }, { -6, 4 }, { 2, 7 }, { -2, 7 }, { 7, 2 }, { -7, 2 },
92  { 3, 7 }, { -3, 7 }, { 7, 3 }, { -7, 3 }, { 5, 6 }, { -5, 6 }, { 6, 5 }, { -6, 5 },
93  { 8, 0 }, { 4, 7 }, { -4, 7 }, { 7, 4 }, { -7, 4 }, { 8, 1 }, { 8, 2 }, { 6, 6 },
94  { -6, 6 }, { 8, 3 }, { 5, 7 }, { -5, 7 }, { 7, 5 }, { -7, 5 }, { 8, 4 }, { 6, 7 },
95  { -6, 7 }, { 7, 6 }, { -7, 6 }, { 8, 5 }, { 7, 7 }, { -7, 7 }, { 8, 6 }, { 8, 7 }
96 };
97 
101 };
102 
108 };
109 
115 };
116 
132 };
133 
139  HUFF_IDX_DIST = 4
140 };
141 
142 /* The structure of WebP lossless is an optional series of transformation data,
143  * followed by the primary image. The primary image also optionally contains
144  * an entropy group mapping if there are multiple entropy groups. There is a
145  * basic image type called an "entropy coded image" that is used for all of
146  * these. The type of each entropy coded image is referred to by the
147  * specification as its role. */
148 enum ImageRole {
149  /* Primary Image: Stores the actual pixels of the image. */
151 
152  /* Entropy Image: Defines which Huffman group to use for different areas of
153  * the primary image. */
155 
156  /* Predictors: Defines which predictor type to use for different areas of
157  * the primary image. */
159 
160  /* Color Transform Data: Defines the color transformation for different
161  * areas of the primary image. */
163 
164  /* Color Index: Stored as an image of height == 1. */
166 
168 };
169 
170 typedef struct HuffReader {
171  VLC vlc; /* Huffman decoder context */
172  int simple; /* whether to use simple mode */
173  int nb_symbols; /* number of coded symbols */
174  uint16_t simple_symbols[2]; /* symbols for simple mode */
175 } HuffReader;
176 
177 typedef struct ImageContext {
178  enum ImageRole role; /* role of this image */
179  AVFrame *frame; /* AVFrame for data */
180  int color_cache_bits; /* color cache size, log2 */
181  uint32_t *color_cache; /* color cache data */
182  int nb_huffman_groups; /* number of huffman groups */
183  HuffReader *huffman_groups; /* reader for each huffman group */
184  int size_reduction; /* relative size compared to primary image, log2 */
186 } ImageContext;
187 
188 typedef struct WebPContext {
189  VP8Context v; /* VP8 Context used for lossy decoding */
190  GetBitContext gb; /* bitstream reader for main image chunk */
191  AVFrame *alpha_frame; /* AVFrame for alpha data decompressed from VP8L */
192  AVPacket *pkt; /* AVPacket to be passed to the underlying VP8 decoder */
193  AVCodecContext *avctx; /* parent AVCodecContext */
194  int initialized; /* set once the VP8 context is initialized */
195  int has_alpha; /* has a separate alpha chunk */
196  enum AlphaCompression alpha_compression; /* compression type for alpha chunk */
197  enum AlphaFilter alpha_filter; /* filtering method for alpha chunk */
198  uint8_t *alpha_data; /* alpha chunk data */
199  int alpha_data_size; /* alpha chunk data size */
200  int has_exif; /* set after an EXIF chunk has been processed */
201  int has_iccp; /* set after an ICCP chunk has been processed */
202  int width; /* image width */
203  int height; /* image height */
204  int lossless; /* indicates lossless or lossy */
205 
206  int nb_transforms; /* number of transforms */
207  enum TransformType transforms[4]; /* transformations used in the image, in order */
208  int reduced_width; /* reduced width for index image, if applicable */
209  int nb_huffman_groups; /* number of huffman groups in the primary image */
210  ImageContext image[IMAGE_ROLE_NB]; /* image context for each role */
211 } WebPContext;
212 
213 #define GET_PIXEL(frame, x, y) \
214  ((frame)->data[0] + (y) * frame->linesize[0] + 4 * (x))
215 
216 #define GET_PIXEL_COMP(frame, x, y, c) \
217  (*((frame)->data[0] + (y) * frame->linesize[0] + 4 * (x) + c))
218 
220 {
221  int i, j;
222 
223  av_free(img->color_cache);
224  if (img->role != IMAGE_ROLE_ARGB && !img->is_alpha_primary)
225  av_frame_free(&img->frame);
226  if (img->huffman_groups) {
227  for (i = 0; i < img->nb_huffman_groups; i++) {
228  for (j = 0; j < HUFFMAN_CODES_PER_META_CODE; j++)
229  ff_free_vlc(&img->huffman_groups[i * HUFFMAN_CODES_PER_META_CODE + j].vlc);
230  }
231  av_free(img->huffman_groups);
232  }
233  memset(img, 0, sizeof(*img));
234 }
235 
237 {
238  if (r->simple) {
239  if (r->nb_symbols == 1)
240  return r->simple_symbols[0];
241  else
242  return r->simple_symbols[get_bits1(gb)];
243  } else
244  return get_vlc2(gb, r->vlc.table, 8, 2);
245 }
246 
247 static int huff_reader_build_canonical(HuffReader *r, const uint8_t *code_lengths,
248  int alphabet_size)
249 {
250  int len = 0, sym, code = 0, ret;
251  int max_code_length = 0;
252  uint16_t *codes;
253 
254  /* special-case 1 symbol since the vlc reader cannot handle it */
255  for (sym = 0; sym < alphabet_size; sym++) {
256  if (code_lengths[sym] > 0) {
257  len++;
258  code = sym;
259  if (len > 1)
260  break;
261  }
262  }
263  if (len == 1) {
264  r->nb_symbols = 1;
265  r->simple_symbols[0] = code;
266  r->simple = 1;
267  return 0;
268  }
269 
270  for (sym = 0; sym < alphabet_size; sym++)
271  max_code_length = FFMAX(max_code_length, code_lengths[sym]);
272 
273  if (max_code_length == 0 || max_code_length > MAX_HUFFMAN_CODE_LENGTH)
274  return AVERROR(EINVAL);
275 
276  codes = av_malloc_array(alphabet_size, sizeof(*codes));
277  if (!codes)
278  return AVERROR(ENOMEM);
279 
280  code = 0;
281  r->nb_symbols = 0;
282  for (len = 1; len <= max_code_length; len++) {
283  for (sym = 0; sym < alphabet_size; sym++) {
284  if (code_lengths[sym] != len)
285  continue;
286  codes[sym] = code++;
287  r->nb_symbols++;
288  }
289  code <<= 1;
290  }
291  if (!r->nb_symbols) {
292  av_free(codes);
293  return AVERROR_INVALIDDATA;
294  }
295 
296  ret = init_vlc(&r->vlc, 8, alphabet_size,
297  code_lengths, sizeof(*code_lengths), sizeof(*code_lengths),
298  codes, sizeof(*codes), sizeof(*codes), INIT_VLC_OUTPUT_LE);
299  if (ret < 0) {
300  av_free(codes);
301  return ret;
302  }
303  r->simple = 0;
304 
305  av_free(codes);
306  return 0;
307 }
308 
310 {
311  hc->nb_symbols = get_bits1(&s->gb) + 1;
312 
313  if (get_bits1(&s->gb))
314  hc->simple_symbols[0] = get_bits(&s->gb, 8);
315  else
316  hc->simple_symbols[0] = get_bits1(&s->gb);
317 
318  if (hc->nb_symbols == 2)
319  hc->simple_symbols[1] = get_bits(&s->gb, 8);
320 
321  hc->simple = 1;
322 }
323 
325  int alphabet_size)
326 {
327  HuffReader code_len_hc = { { 0 }, 0, 0, { 0 } };
328  uint8_t *code_lengths;
329  uint8_t code_length_code_lengths[NUM_CODE_LENGTH_CODES] = { 0 };
330  int i, symbol, max_symbol, prev_code_len, ret;
331  int num_codes = 4 + get_bits(&s->gb, 4);
332 
333  av_assert1(num_codes <= NUM_CODE_LENGTH_CODES);
334 
335  for (i = 0; i < num_codes; i++)
336  code_length_code_lengths[code_length_code_order[i]] = get_bits(&s->gb, 3);
337 
338  ret = huff_reader_build_canonical(&code_len_hc, code_length_code_lengths,
340  if (ret < 0)
341  return ret;
342 
343  code_lengths = av_mallocz(alphabet_size);
344  if (!code_lengths) {
345  ret = AVERROR(ENOMEM);
346  goto finish;
347  }
348 
349  if (get_bits1(&s->gb)) {
350  int bits = 2 + 2 * get_bits(&s->gb, 3);
351  max_symbol = 2 + get_bits(&s->gb, bits);
352  if (max_symbol > alphabet_size) {
353  av_log(s->avctx, AV_LOG_ERROR, "max symbol %d > alphabet size %d\n",
354  max_symbol, alphabet_size);
355  ret = AVERROR_INVALIDDATA;
356  goto finish;
357  }
358  } else {
359  max_symbol = alphabet_size;
360  }
361 
362  prev_code_len = 8;
363  symbol = 0;
364  while (symbol < alphabet_size) {
365  int code_len;
366 
367  if (!max_symbol--)
368  break;
369  code_len = huff_reader_get_symbol(&code_len_hc, &s->gb);
370  if (code_len < 16) {
371  /* Code length code [0..15] indicates literal code lengths. */
372  code_lengths[symbol++] = code_len;
373  if (code_len)
374  prev_code_len = code_len;
375  } else {
376  int repeat = 0, length = 0;
377  switch (code_len) {
378  case 16:
379  /* Code 16 repeats the previous non-zero value [3..6] times,
380  * i.e., 3 + ReadBits(2) times. If code 16 is used before a
381  * non-zero value has been emitted, a value of 8 is repeated. */
382  repeat = 3 + get_bits(&s->gb, 2);
383  length = prev_code_len;
384  break;
385  case 17:
386  /* Code 17 emits a streak of zeros [3..10], i.e.,
387  * 3 + ReadBits(3) times. */
388  repeat = 3 + get_bits(&s->gb, 3);
389  break;
390  case 18:
391  /* Code 18 emits a streak of zeros of length [11..138], i.e.,
392  * 11 + ReadBits(7) times. */
393  repeat = 11 + get_bits(&s->gb, 7);
394  break;
395  }
396  if (symbol + repeat > alphabet_size) {
397  av_log(s->avctx, AV_LOG_ERROR,
398  "invalid symbol %d + repeat %d > alphabet size %d\n",
399  symbol, repeat, alphabet_size);
400  ret = AVERROR_INVALIDDATA;
401  goto finish;
402  }
403  while (repeat-- > 0)
404  code_lengths[symbol++] = length;
405  }
406  }
407 
408  ret = huff_reader_build_canonical(hc, code_lengths, alphabet_size);
409 
410 finish:
411  ff_free_vlc(&code_len_hc.vlc);
412  av_free(code_lengths);
413  return ret;
414 }
415 
416 static int decode_entropy_coded_image(WebPContext *s, enum ImageRole role,
417  int w, int h);
418 
419 #define PARSE_BLOCK_SIZE(w, h) do { \
420  block_bits = get_bits(&s->gb, 3) + 2; \
421  blocks_w = FFALIGN((w), 1 << block_bits) >> block_bits; \
422  blocks_h = FFALIGN((h), 1 << block_bits) >> block_bits; \
423 } while (0)
424 
426 {
427  ImageContext *img;
428  int ret, block_bits, width, blocks_w, blocks_h, x, y, max;
429 
430  width = s->width;
431  if (s->reduced_width > 0)
432  width = s->reduced_width;
433 
434  PARSE_BLOCK_SIZE(width, s->height);
435 
436  ret = decode_entropy_coded_image(s, IMAGE_ROLE_ENTROPY, blocks_w, blocks_h);
437  if (ret < 0)
438  return ret;
439 
440  img = &s->image[IMAGE_ROLE_ENTROPY];
441  img->size_reduction = block_bits;
442 
443  /* the number of huffman groups is determined by the maximum group number
444  * coded in the entropy image */
445  max = 0;
446  for (y = 0; y < img->frame->height; y++) {
447  for (x = 0; x < img->frame->width; x++) {
448  int p0 = GET_PIXEL_COMP(img->frame, x, y, 1);
449  int p1 = GET_PIXEL_COMP(img->frame, x, y, 2);
450  int p = p0 << 8 | p1;
451  max = FFMAX(max, p);
452  }
453  }
454  s->nb_huffman_groups = max + 1;
455 
456  return 0;
457 }
458 
460 {
461  int block_bits, blocks_w, blocks_h, ret;
462 
463  PARSE_BLOCK_SIZE(s->width, s->height);
464 
466  blocks_h);
467  if (ret < 0)
468  return ret;
469 
470  s->image[IMAGE_ROLE_PREDICTOR].size_reduction = block_bits;
471 
472  return 0;
473 }
474 
476 {
477  int block_bits, blocks_w, blocks_h, ret;
478 
479  PARSE_BLOCK_SIZE(s->width, s->height);
480 
482  blocks_h);
483  if (ret < 0)
484  return ret;
485 
486  s->image[IMAGE_ROLE_COLOR_TRANSFORM].size_reduction = block_bits;
487 
488  return 0;
489 }
490 
492 {
493  ImageContext *img;
494  int width_bits, index_size, ret, x;
495  uint8_t *ct;
496 
497  index_size = get_bits(&s->gb, 8) + 1;
498 
499  if (index_size <= 2)
500  width_bits = 3;
501  else if (index_size <= 4)
502  width_bits = 2;
503  else if (index_size <= 16)
504  width_bits = 1;
505  else
506  width_bits = 0;
507 
509  index_size, 1);
510  if (ret < 0)
511  return ret;
512 
513  img = &s->image[IMAGE_ROLE_COLOR_INDEXING];
514  img->size_reduction = width_bits;
515  if (width_bits > 0)
516  s->reduced_width = (s->width + ((1 << width_bits) - 1)) >> width_bits;
517 
518  /* color index values are delta-coded */
519  ct = img->frame->data[0] + 4;
520  for (x = 4; x < img->frame->width * 4; x++, ct++)
521  ct[0] += ct[-4];
522 
523  return 0;
524 }
525 
527  int x, int y)
528 {
529  ImageContext *gimg = &s->image[IMAGE_ROLE_ENTROPY];
530  int group = 0;
531 
532  if (gimg->size_reduction > 0) {
533  int group_x = x >> gimg->size_reduction;
534  int group_y = y >> gimg->size_reduction;
535  int g0 = GET_PIXEL_COMP(gimg->frame, group_x, group_y, 1);
536  int g1 = GET_PIXEL_COMP(gimg->frame, group_x, group_y, 2);
537  group = g0 << 8 | g1;
538  }
539 
540  return &img->huffman_groups[group * HUFFMAN_CODES_PER_META_CODE];
541 }
542 
544 {
545  uint32_t cache_idx = (0x1E35A7BD * c) >> (32 - img->color_cache_bits);
546  img->color_cache[cache_idx] = c;
547 }
548 
550  int w, int h)
551 {
552  ImageContext *img;
553  HuffReader *hg;
554  int i, j, ret, x, y, width;
555 
556  img = &s->image[role];
557  img->role = role;
558 
559  if (!img->frame) {
560  img->frame = av_frame_alloc();
561  if (!img->frame)
562  return AVERROR(ENOMEM);
563  }
564 
565  img->frame->format = AV_PIX_FMT_ARGB;
566  img->frame->width = w;
567  img->frame->height = h;
568 
569  if (role == IMAGE_ROLE_ARGB && !img->is_alpha_primary) {
570  ThreadFrame pt = { .f = img->frame };
571  ret = ff_thread_get_buffer(s->avctx, &pt, 0);
572  } else
573  ret = av_frame_get_buffer(img->frame, 1);
574  if (ret < 0)
575  return ret;
576 
577  if (get_bits1(&s->gb)) {
578  img->color_cache_bits = get_bits(&s->gb, 4);
579  if (img->color_cache_bits < 1 || img->color_cache_bits > 11) {
580  av_log(s->avctx, AV_LOG_ERROR, "invalid color cache bits: %d\n",
581  img->color_cache_bits);
582  return AVERROR_INVALIDDATA;
583  }
584  img->color_cache = av_mallocz_array(1 << img->color_cache_bits,
585  sizeof(*img->color_cache));
586  if (!img->color_cache)
587  return AVERROR(ENOMEM);
588  } else {
589  img->color_cache_bits = 0;
590  }
591 
592  img->nb_huffman_groups = 1;
593  if (role == IMAGE_ROLE_ARGB && get_bits1(&s->gb)) {
594  ret = decode_entropy_image(s);
595  if (ret < 0)
596  return ret;
597  img->nb_huffman_groups = s->nb_huffman_groups;
598  }
599  img->huffman_groups = av_mallocz_array(img->nb_huffman_groups *
601  sizeof(*img->huffman_groups));
602  if (!img->huffman_groups)
603  return AVERROR(ENOMEM);
604 
605  for (i = 0; i < img->nb_huffman_groups; i++) {
606  hg = &img->huffman_groups[i * HUFFMAN_CODES_PER_META_CODE];
607  for (j = 0; j < HUFFMAN_CODES_PER_META_CODE; j++) {
608  int alphabet_size = alphabet_sizes[j];
609  if (!j && img->color_cache_bits > 0)
610  alphabet_size += 1 << img->color_cache_bits;
611 
612  if (get_bits1(&s->gb)) {
613  read_huffman_code_simple(s, &hg[j]);
614  } else {
615  ret = read_huffman_code_normal(s, &hg[j], alphabet_size);
616  if (ret < 0)
617  return ret;
618  }
619  }
620  }
621 
622  width = img->frame->width;
623  if (role == IMAGE_ROLE_ARGB && s->reduced_width > 0)
624  width = s->reduced_width;
625 
626  x = 0; y = 0;
627  while (y < img->frame->height) {
628  int v;
629 
630  if (get_bits_left(&s->gb) < 0)
631  return AVERROR_INVALIDDATA;
632 
633  hg = get_huffman_group(s, img, x, y);
634  v = huff_reader_get_symbol(&hg[HUFF_IDX_GREEN], &s->gb);
635  if (v < NUM_LITERAL_CODES) {
636  /* literal pixel values */
637  uint8_t *p = GET_PIXEL(img->frame, x, y);
638  p[2] = v;
639  p[1] = huff_reader_get_symbol(&hg[HUFF_IDX_RED], &s->gb);
640  p[3] = huff_reader_get_symbol(&hg[HUFF_IDX_BLUE], &s->gb);
641  p[0] = huff_reader_get_symbol(&hg[HUFF_IDX_ALPHA], &s->gb);
642  if (img->color_cache_bits)
644  x++;
645  if (x == width) {
646  x = 0;
647  y++;
648  }
649  } else if (v < NUM_LITERAL_CODES + NUM_LENGTH_CODES) {
650  /* LZ77 backwards mapping */
651  int prefix_code, length, distance, ref_x, ref_y;
652 
653  /* parse length and distance */
654  prefix_code = v - NUM_LITERAL_CODES;
655  if (prefix_code < 4) {
656  length = prefix_code + 1;
657  } else {
658  int extra_bits = (prefix_code - 2) >> 1;
659  int offset = 2 + (prefix_code & 1) << extra_bits;
660  length = offset + get_bits(&s->gb, extra_bits) + 1;
661  }
662  prefix_code = huff_reader_get_symbol(&hg[HUFF_IDX_DIST], &s->gb);
663  if (prefix_code > 39U) {
664  av_log(s->avctx, AV_LOG_ERROR,
665  "distance prefix code too large: %d\n", prefix_code);
666  return AVERROR_INVALIDDATA;
667  }
668  if (prefix_code < 4) {
669  distance = prefix_code + 1;
670  } else {
671  int extra_bits = prefix_code - 2 >> 1;
672  int offset = 2 + (prefix_code & 1) << extra_bits;
673  distance = offset + get_bits(&s->gb, extra_bits) + 1;
674  }
675 
676  /* find reference location */
677  if (distance <= NUM_SHORT_DISTANCES) {
678  int xi = lz77_distance_offsets[distance - 1][0];
679  int yi = lz77_distance_offsets[distance - 1][1];
680  distance = FFMAX(1, xi + yi * width);
681  } else {
683  }
684  ref_x = x;
685  ref_y = y;
686  if (distance <= x) {
687  ref_x -= distance;
688  distance = 0;
689  } else {
690  ref_x = 0;
691  distance -= x;
692  }
693  while (distance >= width) {
694  ref_y--;
695  distance -= width;
696  }
697  if (distance > 0) {
698  ref_x = width - distance;
699  ref_y--;
700  }
701  ref_x = FFMAX(0, ref_x);
702  ref_y = FFMAX(0, ref_y);
703 
704  if (ref_y == y && ref_x >= x)
705  return AVERROR_INVALIDDATA;
706 
707  /* copy pixels
708  * source and dest regions can overlap and wrap lines, so just
709  * copy per-pixel */
710  for (i = 0; i < length; i++) {
711  uint8_t *p_ref = GET_PIXEL(img->frame, ref_x, ref_y);
712  uint8_t *p = GET_PIXEL(img->frame, x, y);
713 
714  AV_COPY32(p, p_ref);
715  if (img->color_cache_bits)
717  x++;
718  ref_x++;
719  if (x == width) {
720  x = 0;
721  y++;
722  }
723  if (ref_x == width) {
724  ref_x = 0;
725  ref_y++;
726  }
727  if (y == img->frame->height || ref_y == img->frame->height)
728  break;
729  }
730  } else {
731  /* read from color cache */
732  uint8_t *p = GET_PIXEL(img->frame, x, y);
733  int cache_idx = v - (NUM_LITERAL_CODES + NUM_LENGTH_CODES);
734 
735  if (!img->color_cache_bits) {
736  av_log(s->avctx, AV_LOG_ERROR, "color cache not found\n");
737  return AVERROR_INVALIDDATA;
738  }
739  if (cache_idx >= 1 << img->color_cache_bits) {
740  av_log(s->avctx, AV_LOG_ERROR,
741  "color cache index out-of-bounds\n");
742  return AVERROR_INVALIDDATA;
743  }
744  AV_WB32(p, img->color_cache[cache_idx]);
745  x++;
746  if (x == width) {
747  x = 0;
748  y++;
749  }
750  }
751  }
752 
753  return 0;
754 }
755 
756 /* PRED_MODE_BLACK */
757 static void inv_predict_0(uint8_t *p, const uint8_t *p_l, const uint8_t *p_tl,
758  const uint8_t *p_t, const uint8_t *p_tr)
759 {
760  AV_WB32(p, 0xFF000000);
761 }
762 
763 /* PRED_MODE_L */
764 static void inv_predict_1(uint8_t *p, const uint8_t *p_l, const uint8_t *p_tl,
765  const uint8_t *p_t, const uint8_t *p_tr)
766 {
767  AV_COPY32(p, p_l);
768 }
769 
770 /* PRED_MODE_T */
771 static void inv_predict_2(uint8_t *p, const uint8_t *p_l, const uint8_t *p_tl,
772  const uint8_t *p_t, const uint8_t *p_tr)
773 {
774  AV_COPY32(p, p_t);
775 }
776 
777 /* PRED_MODE_TR */
778 static void inv_predict_3(uint8_t *p, const uint8_t *p_l, const uint8_t *p_tl,
779  const uint8_t *p_t, const uint8_t *p_tr)
780 {
781  AV_COPY32(p, p_tr);
782 }
783 
784 /* PRED_MODE_TL */
785 static void inv_predict_4(uint8_t *p, const uint8_t *p_l, const uint8_t *p_tl,
786  const uint8_t *p_t, const uint8_t *p_tr)
787 {
788  AV_COPY32(p, p_tl);
789 }
790 
791 /* PRED_MODE_AVG_T_AVG_L_TR */
792 static void inv_predict_5(uint8_t *p, const uint8_t *p_l, const uint8_t *p_tl,
793  const uint8_t *p_t, const uint8_t *p_tr)
794 {
795  p[0] = p_t[0] + (p_l[0] + p_tr[0] >> 1) >> 1;
796  p[1] = p_t[1] + (p_l[1] + p_tr[1] >> 1) >> 1;
797  p[2] = p_t[2] + (p_l[2] + p_tr[2] >> 1) >> 1;
798  p[3] = p_t[3] + (p_l[3] + p_tr[3] >> 1) >> 1;
799 }
800 
801 /* PRED_MODE_AVG_L_TL */
802 static void inv_predict_6(uint8_t *p, const uint8_t *p_l, const uint8_t *p_tl,
803  const uint8_t *p_t, const uint8_t *p_tr)
804 {
805  p[0] = p_l[0] + p_tl[0] >> 1;
806  p[1] = p_l[1] + p_tl[1] >> 1;
807  p[2] = p_l[2] + p_tl[2] >> 1;
808  p[3] = p_l[3] + p_tl[3] >> 1;
809 }
810 
811 /* PRED_MODE_AVG_L_T */
812 static void inv_predict_7(uint8_t *p, const uint8_t *p_l, const uint8_t *p_tl,
813  const uint8_t *p_t, const uint8_t *p_tr)
814 {
815  p[0] = p_l[0] + p_t[0] >> 1;
816  p[1] = p_l[1] + p_t[1] >> 1;
817  p[2] = p_l[2] + p_t[2] >> 1;
818  p[3] = p_l[3] + p_t[3] >> 1;
819 }
820 
821 /* PRED_MODE_AVG_TL_T */
822 static void inv_predict_8(uint8_t *p, const uint8_t *p_l, const uint8_t *p_tl,
823  const uint8_t *p_t, const uint8_t *p_tr)
824 {
825  p[0] = p_tl[0] + p_t[0] >> 1;
826  p[1] = p_tl[1] + p_t[1] >> 1;
827  p[2] = p_tl[2] + p_t[2] >> 1;
828  p[3] = p_tl[3] + p_t[3] >> 1;
829 }
830 
831 /* PRED_MODE_AVG_T_TR */
832 static void inv_predict_9(uint8_t *p, const uint8_t *p_l, const uint8_t *p_tl,
833  const uint8_t *p_t, const uint8_t *p_tr)
834 {
835  p[0] = p_t[0] + p_tr[0] >> 1;
836  p[1] = p_t[1] + p_tr[1] >> 1;
837  p[2] = p_t[2] + p_tr[2] >> 1;
838  p[3] = p_t[3] + p_tr[3] >> 1;
839 }
840 
841 /* PRED_MODE_AVG_AVG_L_TL_AVG_T_TR */
842 static void inv_predict_10(uint8_t *p, const uint8_t *p_l, const uint8_t *p_tl,
843  const uint8_t *p_t, const uint8_t *p_tr)
844 {
845  p[0] = (p_l[0] + p_tl[0] >> 1) + (p_t[0] + p_tr[0] >> 1) >> 1;
846  p[1] = (p_l[1] + p_tl[1] >> 1) + (p_t[1] + p_tr[1] >> 1) >> 1;
847  p[2] = (p_l[2] + p_tl[2] >> 1) + (p_t[2] + p_tr[2] >> 1) >> 1;
848  p[3] = (p_l[3] + p_tl[3] >> 1) + (p_t[3] + p_tr[3] >> 1) >> 1;
849 }
850 
851 /* PRED_MODE_SELECT */
852 static void inv_predict_11(uint8_t *p, const uint8_t *p_l, const uint8_t *p_tl,
853  const uint8_t *p_t, const uint8_t *p_tr)
854 {
855  int diff = (FFABS(p_l[0] - p_tl[0]) - FFABS(p_t[0] - p_tl[0])) +
856  (FFABS(p_l[1] - p_tl[1]) - FFABS(p_t[1] - p_tl[1])) +
857  (FFABS(p_l[2] - p_tl[2]) - FFABS(p_t[2] - p_tl[2])) +
858  (FFABS(p_l[3] - p_tl[3]) - FFABS(p_t[3] - p_tl[3]));
859  if (diff <= 0)
860  AV_COPY32(p, p_t);
861  else
862  AV_COPY32(p, p_l);
863 }
864 
865 /* PRED_MODE_ADD_SUBTRACT_FULL */
866 static void inv_predict_12(uint8_t *p, const uint8_t *p_l, const uint8_t *p_tl,
867  const uint8_t *p_t, const uint8_t *p_tr)
868 {
869  p[0] = av_clip_uint8(p_l[0] + p_t[0] - p_tl[0]);
870  p[1] = av_clip_uint8(p_l[1] + p_t[1] - p_tl[1]);
871  p[2] = av_clip_uint8(p_l[2] + p_t[2] - p_tl[2]);
872  p[3] = av_clip_uint8(p_l[3] + p_t[3] - p_tl[3]);
873 }
874 
876 {
877  int d = a + b >> 1;
878  return av_clip_uint8(d + (d - c) / 2);
879 }
880 
881 /* PRED_MODE_ADD_SUBTRACT_HALF */
882 static void inv_predict_13(uint8_t *p, const uint8_t *p_l, const uint8_t *p_tl,
883  const uint8_t *p_t, const uint8_t *p_tr)
884 {
885  p[0] = clamp_add_subtract_half(p_l[0], p_t[0], p_tl[0]);
886  p[1] = clamp_add_subtract_half(p_l[1], p_t[1], p_tl[1]);
887  p[2] = clamp_add_subtract_half(p_l[2], p_t[2], p_tl[2]);
888  p[3] = clamp_add_subtract_half(p_l[3], p_t[3], p_tl[3]);
889 }
890 
891 typedef void (*inv_predict_func)(uint8_t *p, const uint8_t *p_l,
892  const uint8_t *p_tl, const uint8_t *p_t,
893  const uint8_t *p_tr);
894 
895 static const inv_predict_func inverse_predict[14] = {
900 };
901 
902 static void inverse_prediction(AVFrame *frame, enum PredictionMode m, int x, int y)
903 {
904  uint8_t *dec, *p_l, *p_tl, *p_t, *p_tr;
905  uint8_t p[4];
906 
907  dec = GET_PIXEL(frame, x, y);
908  p_l = GET_PIXEL(frame, x - 1, y);
909  p_tl = GET_PIXEL(frame, x - 1, y - 1);
910  p_t = GET_PIXEL(frame, x, y - 1);
911  if (x == frame->width - 1)
912  p_tr = GET_PIXEL(frame, 0, y);
913  else
914  p_tr = GET_PIXEL(frame, x + 1, y - 1);
915 
916  inverse_predict[m](p, p_l, p_tl, p_t, p_tr);
917 
918  dec[0] += p[0];
919  dec[1] += p[1];
920  dec[2] += p[2];
921  dec[3] += p[3];
922 }
923 
925 {
926  ImageContext *img = &s->image[IMAGE_ROLE_ARGB];
927  ImageContext *pimg = &s->image[IMAGE_ROLE_PREDICTOR];
928  int x, y;
929 
930  for (y = 0; y < img->frame->height; y++) {
931  for (x = 0; x < img->frame->width; x++) {
932  int tx = x >> pimg->size_reduction;
933  int ty = y >> pimg->size_reduction;
934  enum PredictionMode m = GET_PIXEL_COMP(pimg->frame, tx, ty, 2);
935 
936  if (x == 0) {
937  if (y == 0)
938  m = PRED_MODE_BLACK;
939  else
940  m = PRED_MODE_T;
941  } else if (y == 0)
942  m = PRED_MODE_L;
943 
944  if (m > 13) {
945  av_log(s->avctx, AV_LOG_ERROR,
946  "invalid predictor mode: %d\n", m);
947  return AVERROR_INVALIDDATA;
948  }
949  inverse_prediction(img->frame, m, x, y);
950  }
951  }
952  return 0;
953 }
954 
956  uint8_t color)
957 {
958  return (int)ff_u8_to_s8(color_pred) * ff_u8_to_s8(color) >> 5;
959 }
960 
962 {
963  ImageContext *img, *cimg;
964  int x, y, cx, cy;
965  uint8_t *p, *cp;
966 
967  img = &s->image[IMAGE_ROLE_ARGB];
968  cimg = &s->image[IMAGE_ROLE_COLOR_TRANSFORM];
969 
970  for (y = 0; y < img->frame->height; y++) {
971  for (x = 0; x < img->frame->width; x++) {
972  cx = x >> cimg->size_reduction;
973  cy = y >> cimg->size_reduction;
974  cp = GET_PIXEL(cimg->frame, cx, cy);
975  p = GET_PIXEL(img->frame, x, y);
976 
977  p[1] += color_transform_delta(cp[3], p[2]);
978  p[3] += color_transform_delta(cp[2], p[2]) +
979  color_transform_delta(cp[1], p[1]);
980  }
981  }
982  return 0;
983 }
984 
986 {
987  int x, y;
988  ImageContext *img = &s->image[IMAGE_ROLE_ARGB];
989 
990  for (y = 0; y < img->frame->height; y++) {
991  for (x = 0; x < img->frame->width; x++) {
992  uint8_t *p = GET_PIXEL(img->frame, x, y);
993  p[1] += p[2];
994  p[3] += p[2];
995  }
996  }
997  return 0;
998 }
999 
1001 {
1002  ImageContext *img;
1003  ImageContext *pal;
1004  int i, x, y;
1005  uint8_t *p;
1006 
1007  img = &s->image[IMAGE_ROLE_ARGB];
1008  pal = &s->image[IMAGE_ROLE_COLOR_INDEXING];
1009 
1010  if (pal->size_reduction > 0) {
1011  GetBitContext gb_g;
1012  uint8_t *line;
1013  int pixel_bits = 8 >> pal->size_reduction;
1014 
1015  line = av_malloc(img->frame->linesize[0] + AV_INPUT_BUFFER_PADDING_SIZE);
1016  if (!line)
1017  return AVERROR(ENOMEM);
1018 
1019  for (y = 0; y < img->frame->height; y++) {
1020  p = GET_PIXEL(img->frame, 0, y);
1021  memcpy(line, p, img->frame->linesize[0]);
1022  init_get_bits(&gb_g, line, img->frame->linesize[0] * 8);
1023  skip_bits(&gb_g, 16);
1024  i = 0;
1025  for (x = 0; x < img->frame->width; x++) {
1026  p = GET_PIXEL(img->frame, x, y);
1027  p[2] = get_bits(&gb_g, pixel_bits);
1028  i++;
1029  if (i == 1 << pal->size_reduction) {
1030  skip_bits(&gb_g, 24);
1031  i = 0;
1032  }
1033  }
1034  }
1035  av_free(line);
1036  }
1037 
1038  // switch to local palette if it's worth initializing it
1039  if (img->frame->height * img->frame->width > 300) {
1040  uint8_t palette[256 * 4];
1041  const int size = pal->frame->width * 4;
1042  av_assert0(size <= 1024U);
1043  memcpy(palette, GET_PIXEL(pal->frame, 0, 0), size); // copy palette
1044  // set extra entries to transparent black
1045  memset(palette + size, 0, 256 * 4 - size);
1046  for (y = 0; y < img->frame->height; y++) {
1047  for (x = 0; x < img->frame->width; x++) {
1048  p = GET_PIXEL(img->frame, x, y);
1049  i = p[2];
1050  AV_COPY32(p, &palette[i * 4]);
1051  }
1052  }
1053  } else {
1054  for (y = 0; y < img->frame->height; y++) {
1055  for (x = 0; x < img->frame->width; x++) {
1056  p = GET_PIXEL(img->frame, x, y);
1057  i = p[2];
1058  if (i >= pal->frame->width) {
1059  AV_WB32(p, 0x00000000);
1060  } else {
1061  const uint8_t *pi = GET_PIXEL(pal->frame, i, 0);
1062  AV_COPY32(p, pi);
1063  }
1064  }
1065  }
1066  }
1067 
1068  return 0;
1069 }
1070 
1071 static void update_canvas_size(AVCodecContext *avctx, int w, int h)
1072 {
1073  WebPContext *s = avctx->priv_data;
1074  if (s->width && s->width != w) {
1075  av_log(avctx, AV_LOG_WARNING, "Width mismatch. %d != %d\n",
1076  s->width, w);
1077  }
1078  s->width = w;
1079  if (s->height && s->height != h) {
1080  av_log(avctx, AV_LOG_WARNING, "Height mismatch. %d != %d\n",
1081  s->height, h);
1082  }
1083  s->height = h;
1084 }
1085 
1087  int *got_frame, uint8_t *data_start,
1088  unsigned int data_size, int is_alpha_chunk)
1089 {
1090  WebPContext *s = avctx->priv_data;
1091  int w, h, ret, i, used;
1092 
1093  if (!is_alpha_chunk) {
1094  s->lossless = 1;
1095  avctx->pix_fmt = AV_PIX_FMT_ARGB;
1096  }
1097 
1098  ret = init_get_bits8(&s->gb, data_start, data_size);
1099  if (ret < 0)
1100  return ret;
1101 
1102  if (!is_alpha_chunk) {
1103  if (get_bits(&s->gb, 8) != 0x2F) {
1104  av_log(avctx, AV_LOG_ERROR, "Invalid WebP Lossless signature\n");
1105  return AVERROR_INVALIDDATA;
1106  }
1107 
1108  w = get_bits(&s->gb, 14) + 1;
1109  h = get_bits(&s->gb, 14) + 1;
1110 
1111  update_canvas_size(avctx, w, h);
1112 
1113  ret = ff_set_dimensions(avctx, s->width, s->height);
1114  if (ret < 0)
1115  return ret;
1116 
1117  s->has_alpha = get_bits1(&s->gb);
1118 
1119  if (get_bits(&s->gb, 3) != 0x0) {
1120  av_log(avctx, AV_LOG_ERROR, "Invalid WebP Lossless version\n");
1121  return AVERROR_INVALIDDATA;
1122  }
1123  } else {
1124  if (!s->width || !s->height)
1125  return AVERROR_BUG;
1126  w = s->width;
1127  h = s->height;
1128  }
1129 
1130  /* parse transformations */
1131  s->nb_transforms = 0;
1132  s->reduced_width = 0;
1133  used = 0;
1134  while (get_bits1(&s->gb)) {
1135  enum TransformType transform = get_bits(&s->gb, 2);
1136  if (used & (1 << transform)) {
1137  av_log(avctx, AV_LOG_ERROR, "Transform %d used more than once\n",
1138  transform);
1139  ret = AVERROR_INVALIDDATA;
1140  goto free_and_return;
1141  }
1142  used |= (1 << transform);
1143  s->transforms[s->nb_transforms++] = transform;
1144  switch (transform) {
1145  case PREDICTOR_TRANSFORM:
1147  break;
1148  case COLOR_TRANSFORM:
1149  ret = parse_transform_color(s);
1150  break;
1153  break;
1154  }
1155  if (ret < 0)
1156  goto free_and_return;
1157  }
1158 
1159  /* decode primary image */
1160  s->image[IMAGE_ROLE_ARGB].frame = p;
1161  if (is_alpha_chunk)
1162  s->image[IMAGE_ROLE_ARGB].is_alpha_primary = 1;
1164  if (ret < 0)
1165  goto free_and_return;
1166 
1167  /* apply transformations */
1168  for (i = s->nb_transforms - 1; i >= 0; i--) {
1169  switch (s->transforms[i]) {
1170  case PREDICTOR_TRANSFORM:
1172  break;
1173  case COLOR_TRANSFORM:
1174  ret = apply_color_transform(s);
1175  break;
1176  case SUBTRACT_GREEN:
1178  break;
1181  break;
1182  }
1183  if (ret < 0)
1184  goto free_and_return;
1185  }
1186 
1187  *got_frame = 1;
1189  p->key_frame = 1;
1190  ret = data_size;
1191 
1192 free_and_return:
1193  for (i = 0; i < IMAGE_ROLE_NB; i++)
1194  image_ctx_free(&s->image[i]);
1195 
1196  return ret;
1197 }
1198 
1200 {
1201  int x, y, ls;
1202  uint8_t *dec;
1203 
1204  ls = frame->linesize[3];
1205 
1206  /* filter first row using horizontal filter */
1207  dec = frame->data[3] + 1;
1208  for (x = 1; x < frame->width; x++, dec++)
1209  *dec += *(dec - 1);
1210 
1211  /* filter first column using vertical filter */
1212  dec = frame->data[3] + ls;
1213  for (y = 1; y < frame->height; y++, dec += ls)
1214  *dec += *(dec - ls);
1215 
1216  /* filter the rest using the specified filter */
1217  switch (m) {
1219  for (y = 1; y < frame->height; y++) {
1220  dec = frame->data[3] + y * ls + 1;
1221  for (x = 1; x < frame->width; x++, dec++)
1222  *dec += *(dec - 1);
1223  }
1224  break;
1225  case ALPHA_FILTER_VERTICAL:
1226  for (y = 1; y < frame->height; y++) {
1227  dec = frame->data[3] + y * ls + 1;
1228  for (x = 1; x < frame->width; x++, dec++)
1229  *dec += *(dec - ls);
1230  }
1231  break;
1232  case ALPHA_FILTER_GRADIENT:
1233  for (y = 1; y < frame->height; y++) {
1234  dec = frame->data[3] + y * ls + 1;
1235  for (x = 1; x < frame->width; x++, dec++)
1236  dec[0] += av_clip_uint8(*(dec - 1) + *(dec - ls) - *(dec - ls - 1));
1237  }
1238  break;
1239  }
1240 }
1241 
1243  uint8_t *data_start,
1244  unsigned int data_size)
1245 {
1246  WebPContext *s = avctx->priv_data;
1247  int x, y, ret;
1248 
1249  if (s->alpha_compression == ALPHA_COMPRESSION_NONE) {
1250  GetByteContext gb;
1251 
1252  bytestream2_init(&gb, data_start, data_size);
1253  for (y = 0; y < s->height; y++)
1254  bytestream2_get_buffer(&gb, p->data[3] + p->linesize[3] * y,
1255  s->width);
1256  } else if (s->alpha_compression == ALPHA_COMPRESSION_VP8L) {
1257  uint8_t *ap, *pp;
1258  int alpha_got_frame = 0;
1259 
1260  s->alpha_frame = av_frame_alloc();
1261  if (!s->alpha_frame)
1262  return AVERROR(ENOMEM);
1263 
1264  ret = vp8_lossless_decode_frame(avctx, s->alpha_frame, &alpha_got_frame,
1265  data_start, data_size, 1);
1266  if (ret < 0) {
1267  av_frame_free(&s->alpha_frame);
1268  return ret;
1269  }
1270  if (!alpha_got_frame) {
1271  av_frame_free(&s->alpha_frame);
1272  return AVERROR_INVALIDDATA;
1273  }
1274 
1275  /* copy green component of alpha image to alpha plane of primary image */
1276  for (y = 0; y < s->height; y++) {
1277  ap = GET_PIXEL(s->alpha_frame, 0, y) + 2;
1278  pp = p->data[3] + p->linesize[3] * y;
1279  for (x = 0; x < s->width; x++) {
1280  *pp = *ap;
1281  pp++;
1282  ap += 4;
1283  }
1284  }
1285  av_frame_free(&s->alpha_frame);
1286  }
1287 
1288  /* apply alpha filtering */
1289  if (s->alpha_filter)
1290  alpha_inverse_prediction(p, s->alpha_filter);
1291 
1292  return 0;
1293 }
1294 
1296  int *got_frame, uint8_t *data_start,
1297  unsigned int data_size)
1298 {
1299  WebPContext *s = avctx->priv_data;
1300  int ret;
1301 
1302  if (!s->initialized) {
1303  ff_vp8_decode_init(avctx);
1304  s->initialized = 1;
1305  s->v.actually_webp = 1;
1306  }
1307  avctx->pix_fmt = s->has_alpha ? AV_PIX_FMT_YUVA420P : AV_PIX_FMT_YUV420P;
1308  s->lossless = 0;
1309 
1310  if (data_size > INT_MAX) {
1311  av_log(avctx, AV_LOG_ERROR, "unsupported chunk size\n");
1312  return AVERROR_PATCHWELCOME;
1313  }
1314 
1315  av_packet_unref(s->pkt);
1316  s->pkt->data = data_start;
1317  s->pkt->size = data_size;
1318 
1319  ret = ff_vp8_decode_frame(avctx, p, got_frame, s->pkt);
1320  if (ret < 0)
1321  return ret;
1322 
1323  if (!*got_frame)
1324  return AVERROR_INVALIDDATA;
1325 
1326  update_canvas_size(avctx, avctx->width, avctx->height);
1327 
1328  if (s->has_alpha) {
1329  ret = vp8_lossy_decode_alpha(avctx, p, s->alpha_data,
1330  s->alpha_data_size);
1331  if (ret < 0)
1332  return ret;
1333  }
1334  return ret;
1335 }
1336 
1337 static int webp_decode_frame(AVCodecContext *avctx, void *data, int *got_frame,
1338  AVPacket *avpkt)
1339 {
1340  AVFrame * const p = data;
1341  WebPContext *s = avctx->priv_data;
1342  GetByteContext gb;
1343  int ret;
1344  uint32_t chunk_type, chunk_size;
1345  int vp8x_flags = 0;
1346 
1347  s->avctx = avctx;
1348  s->width = 0;
1349  s->height = 0;
1350  *got_frame = 0;
1351  s->has_alpha = 0;
1352  s->has_exif = 0;
1353  s->has_iccp = 0;
1354  bytestream2_init(&gb, avpkt->data, avpkt->size);
1355 
1356  if (bytestream2_get_bytes_left(&gb) < 12)
1357  return AVERROR_INVALIDDATA;
1358 
1359  if (bytestream2_get_le32(&gb) != MKTAG('R', 'I', 'F', 'F')) {
1360  av_log(avctx, AV_LOG_ERROR, "missing RIFF tag\n");
1361  return AVERROR_INVALIDDATA;
1362  }
1363 
1364  chunk_size = bytestream2_get_le32(&gb);
1365  if (bytestream2_get_bytes_left(&gb) < chunk_size)
1366  return AVERROR_INVALIDDATA;
1367 
1368  if (bytestream2_get_le32(&gb) != MKTAG('W', 'E', 'B', 'P')) {
1369  av_log(avctx, AV_LOG_ERROR, "missing WEBP tag\n");
1370  return AVERROR_INVALIDDATA;
1371  }
1372 
1373  while (bytestream2_get_bytes_left(&gb) > 8) {
1374  char chunk_str[5] = { 0 };
1375 
1376  chunk_type = bytestream2_get_le32(&gb);
1377  chunk_size = bytestream2_get_le32(&gb);
1378  if (chunk_size == UINT32_MAX)
1379  return AVERROR_INVALIDDATA;
1380  chunk_size += chunk_size & 1;
1381 
1382  if (bytestream2_get_bytes_left(&gb) < chunk_size) {
1383  /* we seem to be running out of data, but it could also be that the
1384  bitstream has trailing junk leading to bogus chunk_size. */
1385  break;
1386  }
1387 
1388  switch (chunk_type) {
1389  case MKTAG('V', 'P', '8', ' '):
1390  if (!*got_frame) {
1391  ret = vp8_lossy_decode_frame(avctx, p, got_frame,
1392  avpkt->data + bytestream2_tell(&gb),
1393  chunk_size);
1394  if (ret < 0)
1395  return ret;
1396  }
1397  bytestream2_skip(&gb, chunk_size);
1398  break;
1399  case MKTAG('V', 'P', '8', 'L'):
1400  if (!*got_frame) {
1401  ret = vp8_lossless_decode_frame(avctx, p, got_frame,
1402  avpkt->data + bytestream2_tell(&gb),
1403  chunk_size, 0);
1404  if (ret < 0)
1405  return ret;
1407  }
1408  bytestream2_skip(&gb, chunk_size);
1409  break;
1410  case MKTAG('V', 'P', '8', 'X'):
1411  if (s->width || s->height || *got_frame) {
1412  av_log(avctx, AV_LOG_ERROR, "Canvas dimensions are already set\n");
1413  return AVERROR_INVALIDDATA;
1414  }
1415  vp8x_flags = bytestream2_get_byte(&gb);
1416  bytestream2_skip(&gb, 3);
1417  s->width = bytestream2_get_le24(&gb) + 1;
1418  s->height = bytestream2_get_le24(&gb) + 1;
1419  ret = av_image_check_size(s->width, s->height, 0, avctx);
1420  if (ret < 0)
1421  return ret;
1422  break;
1423  case MKTAG('A', 'L', 'P', 'H'): {
1424  int alpha_header, filter_m, compression;
1425 
1426  if (!(vp8x_flags & VP8X_FLAG_ALPHA)) {
1427  av_log(avctx, AV_LOG_WARNING,
1428  "ALPHA chunk present, but alpha bit not set in the "
1429  "VP8X header\n");
1430  }
1431  if (chunk_size == 0) {
1432  av_log(avctx, AV_LOG_ERROR, "invalid ALPHA chunk size\n");
1433  return AVERROR_INVALIDDATA;
1434  }
1435  alpha_header = bytestream2_get_byte(&gb);
1436  s->alpha_data = avpkt->data + bytestream2_tell(&gb);
1437  s->alpha_data_size = chunk_size - 1;
1438  bytestream2_skip(&gb, s->alpha_data_size);
1439 
1440  filter_m = (alpha_header >> 2) & 0x03;
1441  compression = alpha_header & 0x03;
1442 
1443  if (compression > ALPHA_COMPRESSION_VP8L) {
1444  av_log(avctx, AV_LOG_VERBOSE,
1445  "skipping unsupported ALPHA chunk\n");
1446  } else {
1447  s->has_alpha = 1;
1448  s->alpha_compression = compression;
1449  s->alpha_filter = filter_m;
1450  }
1451 
1452  break;
1453  }
1454  case MKTAG('E', 'X', 'I', 'F'): {
1455  int le, ifd_offset, exif_offset = bytestream2_tell(&gb);
1456  AVDictionary *exif_metadata = NULL;
1457  GetByteContext exif_gb;
1458 
1459  if (s->has_exif) {
1460  av_log(avctx, AV_LOG_VERBOSE, "Ignoring extra EXIF chunk\n");
1461  goto exif_end;
1462  }
1463  if (!(vp8x_flags & VP8X_FLAG_EXIF_METADATA))
1464  av_log(avctx, AV_LOG_WARNING,
1465  "EXIF chunk present, but Exif bit not set in the "
1466  "VP8X header\n");
1467 
1468  s->has_exif = 1;
1469  bytestream2_init(&exif_gb, avpkt->data + exif_offset,
1470  avpkt->size - exif_offset);
1471  if (ff_tdecode_header(&exif_gb, &le, &ifd_offset) < 0) {
1472  av_log(avctx, AV_LOG_ERROR, "invalid TIFF header "
1473  "in Exif data\n");
1474  goto exif_end;
1475  }
1476 
1477  bytestream2_seek(&exif_gb, ifd_offset, SEEK_SET);
1478  if (ff_exif_decode_ifd(avctx, &exif_gb, le, 0, &exif_metadata) < 0) {
1479  av_log(avctx, AV_LOG_ERROR, "error decoding Exif data\n");
1480  goto exif_end;
1481  }
1482 
1483  av_dict_copy(&((AVFrame *) data)->metadata, exif_metadata, 0);
1484 
1485 exif_end:
1486  av_dict_free(&exif_metadata);
1487  bytestream2_skip(&gb, chunk_size);
1488  break;
1489  }
1490  case MKTAG('I', 'C', 'C', 'P'): {
1491  AVFrameSideData *sd;
1492 
1493  if (s->has_iccp) {
1494  av_log(avctx, AV_LOG_VERBOSE, "Ignoring extra ICCP chunk\n");
1495  bytestream2_skip(&gb, chunk_size);
1496  break;
1497  }
1498  if (!(vp8x_flags & VP8X_FLAG_ICC))
1499  av_log(avctx, AV_LOG_WARNING,
1500  "ICCP chunk present, but ICC Profile bit not set in the "
1501  "VP8X header\n");
1502 
1503  s->has_iccp = 1;
1505  if (!sd)
1506  return AVERROR(ENOMEM);
1507 
1508  bytestream2_get_buffer(&gb, sd->data, chunk_size);
1509  break;
1510  }
1511  case MKTAG('A', 'N', 'I', 'M'):
1512  case MKTAG('A', 'N', 'M', 'F'):
1513  case MKTAG('X', 'M', 'P', ' '):
1514  AV_WL32(chunk_str, chunk_type);
1515  av_log(avctx, AV_LOG_WARNING, "skipping unsupported chunk: %s\n",
1516  chunk_str);
1517  bytestream2_skip(&gb, chunk_size);
1518  break;
1519  default:
1520  AV_WL32(chunk_str, chunk_type);
1521  av_log(avctx, AV_LOG_VERBOSE, "skipping unknown chunk: %s\n",
1522  chunk_str);
1523  bytestream2_skip(&gb, chunk_size);
1524  break;
1525  }
1526  }
1527 
1528  if (!*got_frame) {
1529  av_log(avctx, AV_LOG_ERROR, "image data not found\n");
1530  return AVERROR_INVALIDDATA;
1531  }
1532 
1533  return avpkt->size;
1534 }
1535 
1537 {
1538  WebPContext *s = avctx->priv_data;
1539 
1540  s->pkt = av_packet_alloc();
1541  if (!s->pkt)
1542  return AVERROR(ENOMEM);
1543 
1544  return 0;
1545 }
1546 
1548 {
1549  WebPContext *s = avctx->priv_data;
1550 
1551  av_packet_free(&s->pkt);
1552 
1553  if (s->initialized)
1554  return ff_vp8_decode_free(avctx);
1555 
1556  return 0;
1557 }
1558 
1560  .name = "webp",
1561  .long_name = NULL_IF_CONFIG_SMALL("WebP image"),
1562  .type = AVMEDIA_TYPE_VIDEO,
1563  .id = AV_CODEC_ID_WEBP,
1564  .priv_data_size = sizeof(WebPContext),
1567  .close = webp_decode_close,
1568  .capabilities = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_FRAME_THREADS,
1569 };
#define av_always_inline
Definition: attributes.h:45
#define av_cold
Definition: attributes.h:88
uint8_t
#define av_assert1(cond)
assert() equivalent, that does not lie in speed critical code.
Definition: avassert.h:53
#define av_assert0(cond)
assert() equivalent, that is always enabled.
Definition: avassert.h:37
Libavcodec external API header.
#define FF_CODEC_PROPERTY_LOSSLESS
Definition: avcodec.h:2188
#define AV_RB32
Definition: intreadwrite.h:130
static av_cold int init(AVCodecContext *avctx)
Definition: avrndec.c:31
void ff_free_vlc(VLC *vlc)
Definition: bitstream.c:431
static av_always_inline unsigned int bytestream2_get_buffer(GetByteContext *g, uint8_t *dst, unsigned int size)
Definition: bytestream.h:267
static av_always_inline int bytestream2_get_bytes_left(GetByteContext *g)
Definition: bytestream.h:158
static av_always_inline void bytestream2_init(GetByteContext *g, const uint8_t *buf, int buf_size)
Definition: bytestream.h:137
static av_always_inline void bytestream2_skip(GetByteContext *g, unsigned int size)
Definition: bytestream.h:168
static av_always_inline int bytestream2_seek(GetByteContext *g, int offset, int whence)
Definition: bytestream.h:212
static av_always_inline int bytestream2_tell(GetByteContext *g)
Definition: bytestream.h:192
#define xi(width, name, var, range_min, range_max, subs,...)
Definition: cbs_h2645.c:404
#define s(width, name)
Definition: cbs_vp9.c:257
#define MKTAG(a, b, c, d)
Definition: common.h:478
#define FFMAX(a, b)
Definition: common.h:103
#define av_clip_uint8
Definition: common.h:128
#define FFABS(a)
Absolute value, Note, INT_MIN / INT64_MIN result in undefined behavior as they are not representable ...
Definition: common.h:72
#define NULL
Definition: coverity.c:32
#define max(a, b)
Definition: cuda_runtime.h:33
static void decode(AVCodecContext *dec_ctx, AVPacket *pkt, AVFrame *frame, FILE *outfile)
Definition: decode_audio.c:71
static AVFrame * frame
int ff_exif_decode_ifd(void *logctx, GetByteContext *gbytes, int le, int depth, AVDictionary **metadata)
Definition: exif.c:115
EXIF metadata parser.
bitstream reader API header.
static av_always_inline int get_vlc2(GetBitContext *s, VLC_TYPE(*table)[2], int bits, int max_depth)
Parse a vlc code.
Definition: get_bits.h:797
static int get_bits_left(GetBitContext *gb)
Definition: get_bits.h:849
static unsigned int get_bits1(GetBitContext *s)
Definition: get_bits.h:498
static void skip_bits(GetBitContext *s, int n)
Definition: get_bits.h:467
static int init_get_bits8(GetBitContext *s, const uint8_t *buffer, int byte_size)
Initialize GetBitContext.
Definition: get_bits.h:677
static unsigned int get_bits(GetBitContext *s, int n)
Read 1-25 bits.
Definition: get_bits.h:379
static int init_get_bits(GetBitContext *s, const uint8_t *buffer, int bit_size)
Initialize GetBitContext.
Definition: get_bits.h:659
#define AV_CODEC_CAP_DR1
Codec uses get_buffer() or get_encode_buffer() for allocating buffers and supports custom allocators.
Definition: codec.h:52
#define AV_CODEC_CAP_FRAME_THREADS
Codec supports frame-level multithreading.
Definition: codec.h:108
@ AV_CODEC_ID_WEBP
Definition: codec_id.h:221
#define AV_INPUT_BUFFER_PADDING_SIZE
Required number of additionally allocated bytes at the end of the input bitstream for decoding.
Definition: avcodec.h:215
void av_packet_free(AVPacket **pkt)
Free the packet, if the packet is reference counted, it will be unreferenced first.
Definition: avpacket.c:75
void av_packet_unref(AVPacket *pkt)
Wipe the packet.
Definition: avpacket.c:634
AVPacket * av_packet_alloc(void)
Allocate an AVPacket and set its fields to default values.
Definition: avpacket.c:64
void av_dict_free(AVDictionary **pm)
Free all the memory allocated for an AVDictionary struct and all keys and values.
Definition: dict.c:203
int av_dict_copy(AVDictionary **dst, const AVDictionary *src, int flags)
Copy entries from one AVDictionary struct into another.
Definition: dict.c:217
#define AVERROR_PATCHWELCOME
Not yet implemented in FFmpeg, patches welcome.
Definition: error.h:62
#define AVERROR_BUG
Internal bug, also see AVERROR_BUG2.
Definition: error.h:50
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
Definition: error.h:59
#define AVERROR(e)
Definition: error.h:43
int av_frame_get_buffer(AVFrame *frame, int align)
Allocate new buffer(s) for audio or video data.
Definition: frame.c:337
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:203
AVFrameSideData * av_frame_new_side_data(AVFrame *frame, enum AVFrameSideDataType type, buffer_size_t size)
Add a new side data to a frame.
Definition: frame.c:726
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values.
Definition: frame.c:190
@ AV_FRAME_DATA_ICC_PROFILE
The data contains an ICC profile as an opaque octet buffer following the format described by ISO 1507...
Definition: frame.h:143
#define AV_LOG_WARNING
Something somehow does not look correct.
Definition: log.h:200
#define AV_LOG_VERBOSE
Detailed information.
Definition: log.h:210
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:194
void * av_mallocz(size_t size)
Allocate a memory block with alignment suitable for all memory accesses (including vectors if availab...
Definition: mem.c:237
void * av_mallocz_array(size_t nmemb, size_t size)
Allocate a memory block for an array with av_mallocz().
Definition: mem.c:190
@ AVMEDIA_TYPE_VIDEO
Definition: avutil.h:201
int av_image_check_size(unsigned int w, unsigned int h, int log_offset, void *log_ctx)
Check if the given dimension of an image is valid, meaning that all bytes of the image can be address...
Definition: imgutils.c:317
@ AV_PICTURE_TYPE_I
Intra.
Definition: avutil.h:274
static const int8_t transform[32][32]
Definition: hevcdsp.c:27
misc image utilities
static const uint8_t block_bits[]
Definition: imm4.c:103
int i
Definition: input.c:407
#define extra_bits(eb)
Definition: intrax8.c:125
#define AV_WB32(p, v)
Definition: intreadwrite.h:419
#define AV_WL32(p, v)
Definition: intreadwrite.h:426
#define AV_COPY32(d, s)
Definition: intreadwrite.h:601
int ff_set_dimensions(AVCodecContext *s, int width, int height)
Check that the provided frame dimensions are valid and set them on the codec context.
Definition: utils.c:84
common internal API header
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification.
Definition: internal.h:117
uint8_t w
Definition: llviddspenc.c:39
static int8_t ff_u8_to_s8(uint8_t a)
Definition: mathops.h:233
const char data[16]
Definition: mxf.c:142
static float distance(float x, float y, int band)
@ AV_PIX_FMT_YUV420P
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
Definition: pixfmt.h:66
@ AV_PIX_FMT_ARGB
packed ARGB 8:8:8:8, 32bpp, ARGBARGB...
Definition: pixfmt.h:92
@ AV_PIX_FMT_YUVA420P
planar YUV 4:2:0, 20bpp, (1 Cr & Cb sample per 2x2 Y & A samples)
Definition: pixfmt.h:101
FF_ENABLE_DEPRECATION_WARNINGS int ff_thread_get_buffer(AVCodecContext *avctx, ThreadFrame *f, int flags)
Wrapper around get_buffer() for frame-multithreaded codecs.
typedef void(RENAME(mix_any_func_type))
int pt
Definition: rtp.c:35
const uint8_t * code
Definition: spdifenc.c:413
main external API structure.
Definition: avcodec.h:536
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
Definition: avcodec.h:746
int width
picture width / height.
Definition: avcodec.h:709
unsigned properties
Properties of the stream that gets decoded.
Definition: avcodec.h:2187
void * priv_data
Definition: avcodec.h:563
AVCodec.
Definition: codec.h:197
const char * name
Name of the codec implementation.
Definition: codec.h:204
Structure to hold side data for an AVFrame.
Definition: frame.h:220
uint8_t * data
Definition: frame.h:222
This structure describes decoded (raw) audio or video data.
Definition: frame.h:318
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:332
int width
Definition: frame.h:376
int key_frame
1 -> keyframe, 0-> not
Definition: frame.h:396
int height
Definition: frame.h:376
int linesize[AV_NUM_DATA_POINTERS]
For video, size in bytes of each picture line.
Definition: frame.h:349
enum AVPictureType pict_type
Picture type of the frame.
Definition: frame.h:401
This structure stores compressed data.
Definition: packet.h:346
int size
Definition: packet.h:370
uint8_t * data
Definition: packet.h:369
uint16_t simple_symbols[2]
Definition: webp.c:174
VLC vlc
Definition: webp.c:171
int nb_symbols
Definition: webp.c:173
int simple
Definition: webp.c:172
int is_alpha_primary
Definition: webp.c:185
enum ImageRole role
Definition: webp.c:178
AVFrame * frame
Definition: webp.c:179
int size_reduction
Definition: webp.c:184
int nb_huffman_groups
Definition: webp.c:182
HuffReader * huffman_groups
Definition: webp.c:183
int color_cache_bits
Definition: webp.c:180
uint32_t * color_cache
Definition: webp.c:181
Definition: vlc.h:26
int initialized
Definition: webp.c:194
enum AlphaFilter alpha_filter
Definition: webp.c:197
enum TransformType transforms[4]
Definition: webp.c:207
int width
Definition: webp.c:202
GetBitContext gb
Definition: webp.c:190
int nb_transforms
Definition: webp.c:206
int nb_huffman_groups
Definition: webp.c:209
AVFrame * alpha_frame
Definition: webp.c:191
int has_exif
Definition: webp.c:200
int has_alpha
Definition: webp.c:195
int lossless
Definition: webp.c:204
int has_iccp
Definition: webp.c:201
AVCodecContext * avctx
Definition: webp.c:193
uint8_t * alpha_data
Definition: webp.c:198
int alpha_data_size
Definition: webp.c:199
VP8Context v
Definition: webp.c:189
enum AlphaCompression alpha_compression
Definition: webp.c:196
AVPacket * pkt
Definition: webp.c:192
int reduced_width
Definition: webp.c:208
int height
Definition: webp.c:203
ImageContext image[IMAGE_ROLE_NB]
Definition: webp.c:210
Definition: graph2dot.c:48
#define av_free(p)
#define av_malloc_array(a, b)
#define av_malloc(s)
#define av_log(a,...)
static void finish(void)
Definition: movenc.c:342
#define width
int ff_tdecode_header(GetByteContext *gb, int *le, int *ifd_offset)
Decodes a TIFF header from the input bytestream and sets the endianness in *le and the offset to the ...
Definition: tiff_common.c:261
int size
#define img
const char * b
Definition: vf_curves.c:118
const char * r
Definition: vf_curves.c:116
static av_always_inline int diff(const uint32_t a, const uint32_t b)
static const uint8_t offset[127][2]
Definition: vf_spp.c:107
#define init_vlc(vlc, nb_bits, nb_codes, bits, bits_wrap, bits_size, codes, codes_wrap, codes_size, flags)
Definition: vlc.h:38
#define INIT_VLC_OUTPUT_LE
Definition: vlc.h:93
int len
uint8_t bits
Definition: vp3data.h:141
av_cold int ff_vp8_decode_free(AVCodecContext *avctx)
Definition: vp8.c:2832
av_cold int ff_vp8_decode_init(AVCodecContext *avctx)
Definition: vp8.c:2902
int ff_vp8_decode_frame(AVCodecContext *avctx, void *data, int *got_frame, AVPacket *avpkt)
Definition: vp8.c:2818
static double c[64]
static int read_huffman_code_normal(WebPContext *s, HuffReader *hc, int alphabet_size)
Definition: webp.c:324
static void inv_predict_10(uint8_t *p, const uint8_t *p_l, const uint8_t *p_tl, const uint8_t *p_t, const uint8_t *p_tr)
Definition: webp.c:842
static void image_ctx_free(ImageContext *img)
Definition: webp.c:219
static int apply_color_transform(WebPContext *s)
Definition: webp.c:961
AlphaCompression
Definition: webp.c:98
@ ALPHA_COMPRESSION_NONE
Definition: webp.c:99
@ ALPHA_COMPRESSION_VP8L
Definition: webp.c:100
static void inv_predict_8(uint8_t *p, const uint8_t *p_l, const uint8_t *p_tl, const uint8_t *p_t, const uint8_t *p_tr)
Definition: webp.c:822
static av_always_inline uint8_t color_transform_delta(uint8_t color_pred, uint8_t color)
Definition: webp.c:955
#define NUM_CODE_LENGTH_CODES
Definition: webp.c:62
static void inv_predict_7(uint8_t *p, const uint8_t *p_l, const uint8_t *p_tl, const uint8_t *p_t, const uint8_t *p_tr)
Definition: webp.c:812
static av_cold int webp_decode_close(AVCodecContext *avctx)
Definition: webp.c:1547
PredictionMode
Definition: webp.c:117
@ PRED_MODE_ADD_SUBTRACT_FULL
Definition: webp.c:130
@ PRED_MODE_TR
Definition: webp.c:121
@ PRED_MODE_AVG_L_T
Definition: webp.c:125
@ PRED_MODE_AVG_TL_T
Definition: webp.c:126
@ PRED_MODE_T
Definition: webp.c:120
@ PRED_MODE_AVG_T_TR
Definition: webp.c:127
@ PRED_MODE_AVG_AVG_L_TL_AVG_T_TR
Definition: webp.c:128
@ PRED_MODE_TL
Definition: webp.c:122
@ PRED_MODE_AVG_L_TL
Definition: webp.c:124
@ PRED_MODE_SELECT
Definition: webp.c:129
@ PRED_MODE_ADD_SUBTRACT_HALF
Definition: webp.c:131
@ PRED_MODE_BLACK
Definition: webp.c:118
@ PRED_MODE_L
Definition: webp.c:119
@ PRED_MODE_AVG_T_AVG_L_TR
Definition: webp.c:123
static void alpha_inverse_prediction(AVFrame *frame, enum AlphaFilter m)
Definition: webp.c:1199
#define VP8X_FLAG_ICC
Definition: webp.c:58
static int parse_transform_color(WebPContext *s)
Definition: webp.c:475
static void inv_predict_4(uint8_t *p, const uint8_t *p_l, const uint8_t *p_tl, const uint8_t *p_t, const uint8_t *p_tr)
Definition: webp.c:785
#define GET_PIXEL(frame, x, y)
Definition: webp.c:213
static const uint8_t code_length_code_order[NUM_CODE_LENGTH_CODES]
Definition: webp.c:76
HuffmanIndex
Definition: webp.c:134
@ HUFF_IDX_RED
Definition: webp.c:136
@ HUFF_IDX_GREEN
Definition: webp.c:135
@ HUFF_IDX_BLUE
Definition: webp.c:137
@ HUFF_IDX_ALPHA
Definition: webp.c:138
@ HUFF_IDX_DIST
Definition: webp.c:139
static int webp_decode_frame(AVCodecContext *avctx, void *data, int *got_frame, AVPacket *avpkt)
Definition: webp.c:1337
static void inverse_prediction(AVFrame *frame, enum PredictionMode m, int x, int y)
Definition: webp.c:902
static av_cold int webp_decode_init(AVCodecContext *avctx)
Definition: webp.c:1536
static const int8_t lz77_distance_offsets[NUM_SHORT_DISTANCES][2]
Definition: webp.c:80
static av_always_inline void color_cache_put(ImageContext *img, uint32_t c)
Definition: webp.c:543
static void inv_predict_0(uint8_t *p, const uint8_t *p_l, const uint8_t *p_tl, const uint8_t *p_t, const uint8_t *p_tr)
Definition: webp.c:757
#define VP8X_FLAG_ALPHA
Definition: webp.c:57
static av_always_inline uint8_t clamp_add_subtract_half(int a, int b, int c)
Definition: webp.c:875
static void inv_predict_11(uint8_t *p, const uint8_t *p_l, const uint8_t *p_tl, const uint8_t *p_t, const uint8_t *p_tr)
Definition: webp.c:852
static int apply_subtract_green_transform(WebPContext *s)
Definition: webp.c:985
static int vp8_lossy_decode_frame(AVCodecContext *avctx, AVFrame *p, int *got_frame, uint8_t *data_start, unsigned int data_size)
Definition: webp.c:1295
static int vp8_lossless_decode_frame(AVCodecContext *avctx, AVFrame *p, int *got_frame, uint8_t *data_start, unsigned int data_size, int is_alpha_chunk)
Definition: webp.c:1086
static void inv_predict_9(uint8_t *p, const uint8_t *p_l, const uint8_t *p_tl, const uint8_t *p_t, const uint8_t *p_tr)
Definition: webp.c:832
AlphaFilter
Definition: webp.c:103
@ ALPHA_FILTER_NONE
Definition: webp.c:104
@ ALPHA_FILTER_GRADIENT
Definition: webp.c:107
@ ALPHA_FILTER_VERTICAL
Definition: webp.c:106
@ ALPHA_FILTER_HORIZONTAL
Definition: webp.c:105
#define NUM_LITERAL_CODES
Definition: webp.c:64
#define VP8X_FLAG_EXIF_METADATA
Definition: webp.c:56
static void inv_predict_3(uint8_t *p, const uint8_t *p_l, const uint8_t *p_tl, const uint8_t *p_t, const uint8_t *p_tr)
Definition: webp.c:778
#define MAX_HUFFMAN_CODE_LENGTH
Definition: webp.c:68
void(* inv_predict_func)(uint8_t *p, const uint8_t *p_l, const uint8_t *p_tl, const uint8_t *p_t, const uint8_t *p_tr)
Definition: webp.c:891
static void read_huffman_code_simple(WebPContext *s, HuffReader *hc)
Definition: webp.c:309
static int apply_predictor_transform(WebPContext *s)
Definition: webp.c:924
static int decode_entropy_coded_image(WebPContext *s, enum ImageRole role, int w, int h)
Definition: webp.c:549
static void inv_predict_6(uint8_t *p, const uint8_t *p_l, const uint8_t *p_tl, const uint8_t *p_t, const uint8_t *p_tr)
Definition: webp.c:802
TransformType
Definition: webp.c:110
@ COLOR_TRANSFORM
Definition: webp.c:112
@ COLOR_INDEXING_TRANSFORM
Definition: webp.c:114
@ PREDICTOR_TRANSFORM
Definition: webp.c:111
@ SUBTRACT_GREEN
Definition: webp.c:113
#define NUM_LENGTH_CODES
Definition: webp.c:65
static int apply_color_indexing_transform(WebPContext *s)
Definition: webp.c:1000
AVCodec ff_webp_decoder
Definition: webp.c:1559
#define HUFFMAN_CODES_PER_META_CODE
Definition: webp.c:63
static void inv_predict_2(uint8_t *p, const uint8_t *p_l, const uint8_t *p_tl, const uint8_t *p_t, const uint8_t *p_tr)
Definition: webp.c:771
static int parse_transform_color_indexing(WebPContext *s)
Definition: webp.c:491
ImageRole
Definition: webp.c:148
@ IMAGE_ROLE_PREDICTOR
Definition: webp.c:158
@ IMAGE_ROLE_COLOR_TRANSFORM
Definition: webp.c:162
@ IMAGE_ROLE_ENTROPY
Definition: webp.c:154
@ IMAGE_ROLE_NB
Definition: webp.c:167
@ IMAGE_ROLE_COLOR_INDEXING
Definition: webp.c:165
@ IMAGE_ROLE_ARGB
Definition: webp.c:150
#define PARSE_BLOCK_SIZE(w, h)
Definition: webp.c:419
static void inv_predict_13(uint8_t *p, const uint8_t *p_l, const uint8_t *p_tl, const uint8_t *p_t, const uint8_t *p_tr)
Definition: webp.c:882
#define GET_PIXEL_COMP(frame, x, y, c)
Definition: webp.c:216
#define NUM_DISTANCE_CODES
Definition: webp.c:66
static int decode_entropy_image(WebPContext *s)
Definition: webp.c:425
static const uint16_t alphabet_sizes[HUFFMAN_CODES_PER_META_CODE]
Definition: webp.c:70
static void update_canvas_size(AVCodecContext *avctx, int w, int h)
Definition: webp.c:1071
static int vp8_lossy_decode_alpha(AVCodecContext *avctx, AVFrame *p, uint8_t *data_start, unsigned int data_size)
Definition: webp.c:1242
static void inv_predict_1(uint8_t *p, const uint8_t *p_l, const uint8_t *p_tl, const uint8_t *p_t, const uint8_t *p_tr)
Definition: webp.c:764
static int huff_reader_build_canonical(HuffReader *r, const uint8_t *code_lengths, int alphabet_size)
Definition: webp.c:247
static HuffReader * get_huffman_group(WebPContext *s, ImageContext *img, int x, int y)
Definition: webp.c:526
static const inv_predict_func inverse_predict[14]
Definition: webp.c:895
#define NUM_SHORT_DISTANCES
Definition: webp.c:67
static void inv_predict_12(uint8_t *p, const uint8_t *p_l, const uint8_t *p_tl, const uint8_t *p_t, const uint8_t *p_tr)
Definition: webp.c:866
static int huff_reader_get_symbol(HuffReader *r, GetBitContext *gb)
Definition: webp.c:236
static int parse_transform_predictor(WebPContext *s)
Definition: webp.c:459
static void inv_predict_5(uint8_t *p, const uint8_t *p_l, const uint8_t *p_tl, const uint8_t *p_t, const uint8_t *p_tr)
Definition: webp.c:792