FFmpeg  4.4.6
hevcdec.c
Go to the documentation of this file.
1 /*
2  * HEVC video Decoder
3  *
4  * Copyright (C) 2012 - 2013 Guillaume Martres
5  * Copyright (C) 2012 - 2013 Mickael Raulet
6  * Copyright (C) 2012 - 2013 Gildas Cocherel
7  * Copyright (C) 2012 - 2013 Wassim Hamidouche
8  *
9  * This file is part of FFmpeg.
10  *
11  * FFmpeg is free software; you can redistribute it and/or
12  * modify it under the terms of the GNU Lesser General Public
13  * License as published by the Free Software Foundation; either
14  * version 2.1 of the License, or (at your option) any later version.
15  *
16  * FFmpeg is distributed in the hope that it will be useful,
17  * but WITHOUT ANY WARRANTY; without even the implied warranty of
18  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19  * Lesser General Public License for more details.
20  *
21  * You should have received a copy of the GNU Lesser General Public
22  * License along with FFmpeg; if not, write to the Free Software
23  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
24  */
25 
26 #include "libavutil/attributes.h"
27 #include "libavutil/common.h"
28 #include "libavutil/display.h"
29 #include "libavutil/internal.h"
31 #include "libavutil/md5.h"
32 #include "libavutil/opt.h"
33 #include "libavutil/pixdesc.h"
34 #include "libavutil/stereo3d.h"
35 #include "libavutil/timecode.h"
36 
37 #include "bswapdsp.h"
38 #include "bytestream.h"
39 #include "cabac_functions.h"
40 #include "golomb.h"
41 #include "hevc.h"
42 #include "hevc_data.h"
43 #include "hevc_parse.h"
44 #include "hevcdec.h"
45 #include "hwconfig.h"
46 #include "profiles.h"
47 
48 const uint8_t ff_hevc_pel_weight[65] = { [2] = 0, [4] = 1, [6] = 2, [8] = 3, [12] = 4, [16] = 5, [24] = 6, [32] = 7, [48] = 8, [64] = 9 };
49 
50 /**
51  * NOTE: Each function hls_foo correspond to the function foo in the
52  * specification (HLS stands for High Level Syntax).
53  */
54 
55 /**
56  * Section 5.7
57  */
58 
59 /* free everything allocated by pic_arrays_init() */
61 {
62  av_freep(&s->sao);
63  av_freep(&s->deblock);
64 
65  av_freep(&s->skip_flag);
66  av_freep(&s->tab_ct_depth);
67 
68  av_freep(&s->tab_ipm);
69  av_freep(&s->cbf_luma);
70  av_freep(&s->is_pcm);
71 
72  av_freep(&s->qp_y_tab);
73  av_freep(&s->tab_slice_address);
74  av_freep(&s->filter_slice_edges);
75 
76  av_freep(&s->horizontal_bs);
77  av_freep(&s->vertical_bs);
78 
79  av_freep(&s->sh.entry_point_offset);
80  av_freep(&s->sh.size);
81  av_freep(&s->sh.offset);
82 
83  av_buffer_pool_uninit(&s->tab_mvf_pool);
84  av_buffer_pool_uninit(&s->rpl_tab_pool);
85 }
86 
87 /* allocate arrays that depend on frame dimensions */
88 static int pic_arrays_init(HEVCContext *s, const HEVCSPS *sps)
89 {
90  int log2_min_cb_size = sps->log2_min_cb_size;
91  int width = sps->width;
92  int height = sps->height;
93  int pic_size_in_ctb = ((width >> log2_min_cb_size) + 1) *
94  ((height >> log2_min_cb_size) + 1);
95  int ctb_count = sps->ctb_width * sps->ctb_height;
96  int min_pu_size = sps->min_pu_width * sps->min_pu_height;
97 
98  s->bs_width = (width >> 2) + 1;
99  s->bs_height = (height >> 2) + 1;
100 
101  s->sao = av_mallocz_array(ctb_count, sizeof(*s->sao));
102  s->deblock = av_mallocz_array(ctb_count, sizeof(*s->deblock));
103  if (!s->sao || !s->deblock)
104  goto fail;
105 
106  s->skip_flag = av_malloc_array(sps->min_cb_height, sps->min_cb_width);
107  s->tab_ct_depth = av_malloc_array(sps->min_cb_height, sps->min_cb_width);
108  if (!s->skip_flag || !s->tab_ct_depth)
109  goto fail;
110 
111  s->cbf_luma = av_malloc_array(sps->min_tb_width, sps->min_tb_height);
112  s->tab_ipm = av_mallocz(min_pu_size);
113  s->is_pcm = av_malloc_array(sps->min_pu_width + 1, sps->min_pu_height + 1);
114  if (!s->tab_ipm || !s->cbf_luma || !s->is_pcm)
115  goto fail;
116 
117  s->filter_slice_edges = av_mallocz(ctb_count);
118  s->tab_slice_address = av_malloc_array(pic_size_in_ctb,
119  sizeof(*s->tab_slice_address));
120  s->qp_y_tab = av_malloc_array(pic_size_in_ctb,
121  sizeof(*s->qp_y_tab));
122  if (!s->qp_y_tab || !s->filter_slice_edges || !s->tab_slice_address)
123  goto fail;
124 
125  s->horizontal_bs = av_mallocz_array(s->bs_width, s->bs_height);
126  s->vertical_bs = av_mallocz_array(s->bs_width, s->bs_height);
127  if (!s->horizontal_bs || !s->vertical_bs)
128  goto fail;
129 
130  s->tab_mvf_pool = av_buffer_pool_init(min_pu_size * sizeof(MvField),
132  s->rpl_tab_pool = av_buffer_pool_init(ctb_count * sizeof(RefPicListTab),
134  if (!s->tab_mvf_pool || !s->rpl_tab_pool)
135  goto fail;
136 
137  return 0;
138 
139 fail:
141  return AVERROR(ENOMEM);
142 }
143 
145 {
146  int i = 0;
147  int j = 0;
148  uint8_t luma_weight_l0_flag[16];
149  uint8_t chroma_weight_l0_flag[16];
150  uint8_t luma_weight_l1_flag[16];
151  uint8_t chroma_weight_l1_flag[16];
152  int luma_log2_weight_denom;
153 
154  luma_log2_weight_denom = get_ue_golomb_long(gb);
155  if (luma_log2_weight_denom < 0 || luma_log2_weight_denom > 7) {
156  av_log(s->avctx, AV_LOG_ERROR, "luma_log2_weight_denom %d is invalid\n", luma_log2_weight_denom);
157  return AVERROR_INVALIDDATA;
158  }
159  s->sh.luma_log2_weight_denom = av_clip_uintp2(luma_log2_weight_denom, 3);
160  if (s->ps.sps->chroma_format_idc != 0) {
161  int64_t chroma_log2_weight_denom = luma_log2_weight_denom + (int64_t)get_se_golomb(gb);
162  if (chroma_log2_weight_denom < 0 || chroma_log2_weight_denom > 7) {
163  av_log(s->avctx, AV_LOG_ERROR, "chroma_log2_weight_denom %"PRId64" is invalid\n", chroma_log2_weight_denom);
164  return AVERROR_INVALIDDATA;
165  }
166  s->sh.chroma_log2_weight_denom = chroma_log2_weight_denom;
167  }
168 
169  for (i = 0; i < s->sh.nb_refs[L0]; i++) {
170  luma_weight_l0_flag[i] = get_bits1(gb);
171  if (!luma_weight_l0_flag[i]) {
172  s->sh.luma_weight_l0[i] = 1 << s->sh.luma_log2_weight_denom;
173  s->sh.luma_offset_l0[i] = 0;
174  }
175  }
176  if (s->ps.sps->chroma_format_idc != 0) {
177  for (i = 0; i < s->sh.nb_refs[L0]; i++)
178  chroma_weight_l0_flag[i] = get_bits1(gb);
179  } else {
180  for (i = 0; i < s->sh.nb_refs[L0]; i++)
181  chroma_weight_l0_flag[i] = 0;
182  }
183  for (i = 0; i < s->sh.nb_refs[L0]; i++) {
184  if (luma_weight_l0_flag[i]) {
185  int delta_luma_weight_l0 = get_se_golomb(gb);
186  if ((int8_t)delta_luma_weight_l0 != delta_luma_weight_l0)
187  return AVERROR_INVALIDDATA;
188  s->sh.luma_weight_l0[i] = (1 << s->sh.luma_log2_weight_denom) + delta_luma_weight_l0;
189  s->sh.luma_offset_l0[i] = get_se_golomb(gb);
190  }
191  if (chroma_weight_l0_flag[i]) {
192  for (j = 0; j < 2; j++) {
193  int delta_chroma_weight_l0 = get_se_golomb(gb);
194  int delta_chroma_offset_l0 = get_se_golomb(gb);
195 
196  if ( (int8_t)delta_chroma_weight_l0 != delta_chroma_weight_l0
197  || delta_chroma_offset_l0 < -(1<<17) || delta_chroma_offset_l0 > (1<<17)) {
198  return AVERROR_INVALIDDATA;
199  }
200 
201  s->sh.chroma_weight_l0[i][j] = (1 << s->sh.chroma_log2_weight_denom) + delta_chroma_weight_l0;
202  s->sh.chroma_offset_l0[i][j] = av_clip((delta_chroma_offset_l0 - ((128 * s->sh.chroma_weight_l0[i][j])
203  >> s->sh.chroma_log2_weight_denom) + 128), -128, 127);
204  }
205  } else {
206  s->sh.chroma_weight_l0[i][0] = 1 << s->sh.chroma_log2_weight_denom;
207  s->sh.chroma_offset_l0[i][0] = 0;
208  s->sh.chroma_weight_l0[i][1] = 1 << s->sh.chroma_log2_weight_denom;
209  s->sh.chroma_offset_l0[i][1] = 0;
210  }
211  }
212  if (s->sh.slice_type == HEVC_SLICE_B) {
213  for (i = 0; i < s->sh.nb_refs[L1]; i++) {
214  luma_weight_l1_flag[i] = get_bits1(gb);
215  if (!luma_weight_l1_flag[i]) {
216  s->sh.luma_weight_l1[i] = 1 << s->sh.luma_log2_weight_denom;
217  s->sh.luma_offset_l1[i] = 0;
218  }
219  }
220  if (s->ps.sps->chroma_format_idc != 0) {
221  for (i = 0; i < s->sh.nb_refs[L1]; i++)
222  chroma_weight_l1_flag[i] = get_bits1(gb);
223  } else {
224  for (i = 0; i < s->sh.nb_refs[L1]; i++)
225  chroma_weight_l1_flag[i] = 0;
226  }
227  for (i = 0; i < s->sh.nb_refs[L1]; i++) {
228  if (luma_weight_l1_flag[i]) {
229  int delta_luma_weight_l1 = get_se_golomb(gb);
230  if ((int8_t)delta_luma_weight_l1 != delta_luma_weight_l1)
231  return AVERROR_INVALIDDATA;
232  s->sh.luma_weight_l1[i] = (1 << s->sh.luma_log2_weight_denom) + delta_luma_weight_l1;
233  s->sh.luma_offset_l1[i] = get_se_golomb(gb);
234  }
235  if (chroma_weight_l1_flag[i]) {
236  for (j = 0; j < 2; j++) {
237  int delta_chroma_weight_l1 = get_se_golomb(gb);
238  int delta_chroma_offset_l1 = get_se_golomb(gb);
239 
240  if ( (int8_t)delta_chroma_weight_l1 != delta_chroma_weight_l1
241  || delta_chroma_offset_l1 < -(1<<17) || delta_chroma_offset_l1 > (1<<17)) {
242  return AVERROR_INVALIDDATA;
243  }
244 
245  s->sh.chroma_weight_l1[i][j] = (1 << s->sh.chroma_log2_weight_denom) + delta_chroma_weight_l1;
246  s->sh.chroma_offset_l1[i][j] = av_clip((delta_chroma_offset_l1 - ((128 * s->sh.chroma_weight_l1[i][j])
247  >> s->sh.chroma_log2_weight_denom) + 128), -128, 127);
248  }
249  } else {
250  s->sh.chroma_weight_l1[i][0] = 1 << s->sh.chroma_log2_weight_denom;
251  s->sh.chroma_offset_l1[i][0] = 0;
252  s->sh.chroma_weight_l1[i][1] = 1 << s->sh.chroma_log2_weight_denom;
253  s->sh.chroma_offset_l1[i][1] = 0;
254  }
255  }
256  }
257  return 0;
258 }
259 
261 {
262  const HEVCSPS *sps = s->ps.sps;
263  int max_poc_lsb = 1 << sps->log2_max_poc_lsb;
264  int prev_delta_msb = 0;
265  unsigned int nb_sps = 0, nb_sh;
266  int i;
267 
268  rps->nb_refs = 0;
269  if (!sps->long_term_ref_pics_present_flag)
270  return 0;
271 
272  if (sps->num_long_term_ref_pics_sps > 0)
273  nb_sps = get_ue_golomb_long(gb);
274  nb_sh = get_ue_golomb_long(gb);
275 
276  if (nb_sps > sps->num_long_term_ref_pics_sps)
277  return AVERROR_INVALIDDATA;
278  if (nb_sh + (uint64_t)nb_sps > FF_ARRAY_ELEMS(rps->poc))
279  return AVERROR_INVALIDDATA;
280 
281  rps->nb_refs = nb_sh + nb_sps;
282 
283  for (i = 0; i < rps->nb_refs; i++) {
284 
285  if (i < nb_sps) {
286  uint8_t lt_idx_sps = 0;
287 
288  if (sps->num_long_term_ref_pics_sps > 1)
289  lt_idx_sps = get_bits(gb, av_ceil_log2(sps->num_long_term_ref_pics_sps));
290 
291  rps->poc[i] = sps->lt_ref_pic_poc_lsb_sps[lt_idx_sps];
292  rps->used[i] = sps->used_by_curr_pic_lt_sps_flag[lt_idx_sps];
293  } else {
294  rps->poc[i] = get_bits(gb, sps->log2_max_poc_lsb);
295  rps->used[i] = get_bits1(gb);
296  }
297 
298  rps->poc_msb_present[i] = get_bits1(gb);
299  if (rps->poc_msb_present[i]) {
301  int64_t poc;
302 
303  if (i && i != nb_sps)
304  delta += prev_delta_msb;
305 
306  poc = rps->poc[i] + s->poc - delta * max_poc_lsb - s->sh.pic_order_cnt_lsb;
307  if (poc != (int32_t)poc)
308  return AVERROR_INVALIDDATA;
309  rps->poc[i] = poc;
310  prev_delta_msb = delta;
311  }
312  }
313 
314  return 0;
315 }
316 
318 {
319  AVCodecContext *avctx = s->avctx;
320  const HEVCParamSets *ps = &s->ps;
321  const HEVCVPS *vps = (const HEVCVPS*)ps->vps_list[sps->vps_id]->data;
322  const HEVCWindow *ow = &sps->output_window;
323  unsigned int num = 0, den = 0;
324 
325  avctx->pix_fmt = sps->pix_fmt;
326  avctx->coded_width = sps->width;
327  avctx->coded_height = sps->height;
328  avctx->width = sps->width - ow->left_offset - ow->right_offset;
329  avctx->height = sps->height - ow->top_offset - ow->bottom_offset;
330  avctx->has_b_frames = sps->temporal_layer[sps->max_sub_layers - 1].num_reorder_pics;
331  avctx->profile = sps->ptl.general_ptl.profile_idc;
332  avctx->level = sps->ptl.general_ptl.level_idc;
333 
334  ff_set_sar(avctx, sps->vui.sar);
335 
336  if (sps->vui.video_signal_type_present_flag)
337  avctx->color_range = sps->vui.video_full_range_flag ? AVCOL_RANGE_JPEG
339  else
340  avctx->color_range = AVCOL_RANGE_MPEG;
341 
342  if (sps->vui.colour_description_present_flag) {
343  avctx->color_primaries = sps->vui.colour_primaries;
344  avctx->color_trc = sps->vui.transfer_characteristic;
345  avctx->colorspace = sps->vui.matrix_coeffs;
346  } else {
350  }
351 
353  if (sps->chroma_format_idc == 1) {
354  if (sps->vui.chroma_loc_info_present_flag) {
355  if (sps->vui.chroma_sample_loc_type_top_field <= 5)
356  avctx->chroma_sample_location = sps->vui.chroma_sample_loc_type_top_field + 1;
357  } else
359  }
360 
361  if (vps->vps_timing_info_present_flag) {
362  num = vps->vps_num_units_in_tick;
363  den = vps->vps_time_scale;
364  } else if (sps->vui.vui_timing_info_present_flag) {
365  num = sps->vui.vui_num_units_in_tick;
366  den = sps->vui.vui_time_scale;
367  }
368 
369  if (num != 0 && den != 0)
370  av_reduce(&avctx->framerate.den, &avctx->framerate.num,
371  num, den, 1 << 30);
372 }
373 
375 {
376  AVCodecContext *avctx = s->avctx;
377 
378  if (s->sei.a53_caption.buf_ref)
379  s->avctx->properties |= FF_CODEC_PROPERTY_CLOSED_CAPTIONS;
380 
381  if (s->sei.alternative_transfer.present &&
382  av_color_transfer_name(s->sei.alternative_transfer.preferred_transfer_characteristics) &&
383  s->sei.alternative_transfer.preferred_transfer_characteristics != AVCOL_TRC_UNSPECIFIED) {
384  avctx->color_trc = s->sei.alternative_transfer.preferred_transfer_characteristics;
385  }
386 
387  return 0;
388 }
389 
390 static enum AVPixelFormat get_format(HEVCContext *s, const HEVCSPS *sps)
391 {
392 #define HWACCEL_MAX (CONFIG_HEVC_DXVA2_HWACCEL + \
393  CONFIG_HEVC_D3D11VA_HWACCEL * 2 + \
394  CONFIG_HEVC_NVDEC_HWACCEL + \
395  CONFIG_HEVC_VAAPI_HWACCEL + \
396  CONFIG_HEVC_VIDEOTOOLBOX_HWACCEL + \
397  CONFIG_HEVC_VDPAU_HWACCEL)
398  enum AVPixelFormat pix_fmts[HWACCEL_MAX + 2], *fmt = pix_fmts;
399 
400  switch (sps->pix_fmt) {
401  case AV_PIX_FMT_YUV420P:
402  case AV_PIX_FMT_YUVJ420P:
403 #if CONFIG_HEVC_DXVA2_HWACCEL
404  *fmt++ = AV_PIX_FMT_DXVA2_VLD;
405 #endif
406 #if CONFIG_HEVC_D3D11VA_HWACCEL
407  *fmt++ = AV_PIX_FMT_D3D11VA_VLD;
408  *fmt++ = AV_PIX_FMT_D3D11;
409 #endif
410 #if CONFIG_HEVC_VAAPI_HWACCEL
411  *fmt++ = AV_PIX_FMT_VAAPI;
412 #endif
413 #if CONFIG_HEVC_VDPAU_HWACCEL
414  *fmt++ = AV_PIX_FMT_VDPAU;
415 #endif
416 #if CONFIG_HEVC_NVDEC_HWACCEL
417  *fmt++ = AV_PIX_FMT_CUDA;
418 #endif
419 #if CONFIG_HEVC_VIDEOTOOLBOX_HWACCEL
420  *fmt++ = AV_PIX_FMT_VIDEOTOOLBOX;
421 #endif
422  break;
424 #if CONFIG_HEVC_DXVA2_HWACCEL
425  *fmt++ = AV_PIX_FMT_DXVA2_VLD;
426 #endif
427 #if CONFIG_HEVC_D3D11VA_HWACCEL
428  *fmt++ = AV_PIX_FMT_D3D11VA_VLD;
429  *fmt++ = AV_PIX_FMT_D3D11;
430 #endif
431 #if CONFIG_HEVC_VAAPI_HWACCEL
432  *fmt++ = AV_PIX_FMT_VAAPI;
433 #endif
434 #if CONFIG_HEVC_VIDEOTOOLBOX_HWACCEL
435  *fmt++ = AV_PIX_FMT_VIDEOTOOLBOX;
436 #endif
437 #if CONFIG_HEVC_VDPAU_HWACCEL
438  *fmt++ = AV_PIX_FMT_VDPAU;
439 #endif
440 #if CONFIG_HEVC_NVDEC_HWACCEL
441  *fmt++ = AV_PIX_FMT_CUDA;
442 #endif
443  break;
444  case AV_PIX_FMT_YUV444P:
445 #if CONFIG_HEVC_VDPAU_HWACCEL
446  *fmt++ = AV_PIX_FMT_VDPAU;
447 #endif
448 #if CONFIG_HEVC_NVDEC_HWACCEL
449  *fmt++ = AV_PIX_FMT_CUDA;
450 #endif
451  break;
452  case AV_PIX_FMT_YUV422P:
454 #if CONFIG_HEVC_VAAPI_HWACCEL
455  *fmt++ = AV_PIX_FMT_VAAPI;
456 #endif
457  break;
461 #if CONFIG_HEVC_VDPAU_HWACCEL
462  *fmt++ = AV_PIX_FMT_VDPAU;
463 #endif
464 #if CONFIG_HEVC_NVDEC_HWACCEL
465  *fmt++ = AV_PIX_FMT_CUDA;
466 #endif
467  break;
468  }
469 
470  *fmt++ = sps->pix_fmt;
471  *fmt = AV_PIX_FMT_NONE;
472 
473  return ff_thread_get_format(s->avctx, pix_fmts);
474 }
475 
476 static int set_sps(HEVCContext *s, const HEVCSPS *sps,
477  enum AVPixelFormat pix_fmt)
478 {
479  int ret, i;
480 
482  s->ps.sps = NULL;
483  s->ps.vps = NULL;
484 
485  if (!sps)
486  return 0;
487 
488  ret = pic_arrays_init(s, sps);
489  if (ret < 0)
490  goto fail;
491 
493 
494  s->avctx->pix_fmt = pix_fmt;
495 
496  ff_hevc_pred_init(&s->hpc, sps->bit_depth);
497  ff_hevc_dsp_init (&s->hevcdsp, sps->bit_depth);
498  ff_videodsp_init (&s->vdsp, sps->bit_depth);
499 
500  for (i = 0; i < 3; i++) {
501  av_freep(&s->sao_pixel_buffer_h[i]);
502  av_freep(&s->sao_pixel_buffer_v[i]);
503  }
504 
505  if (sps->sao_enabled && !s->avctx->hwaccel) {
506  int c_count = (sps->chroma_format_idc != 0) ? 3 : 1;
507  int c_idx;
508 
509  for(c_idx = 0; c_idx < c_count; c_idx++) {
510  int w = sps->width >> sps->hshift[c_idx];
511  int h = sps->height >> sps->vshift[c_idx];
512  s->sao_pixel_buffer_h[c_idx] =
513  av_malloc((w * 2 * sps->ctb_height) <<
514  sps->pixel_shift);
515  s->sao_pixel_buffer_v[c_idx] =
516  av_malloc((h * 2 * sps->ctb_width) <<
517  sps->pixel_shift);
518  if (!s->sao_pixel_buffer_h[c_idx] ||
519  !s->sao_pixel_buffer_v[c_idx])
520  goto fail;
521  }
522  }
523 
524  s->ps.sps = sps;
525  s->ps.vps = (HEVCVPS*) s->ps.vps_list[s->ps.sps->vps_id]->data;
526 
527  return 0;
528 
529 fail:
531  for (i = 0; i < 3; i++) {
532  av_freep(&s->sao_pixel_buffer_h[i]);
533  av_freep(&s->sao_pixel_buffer_v[i]);
534  }
535  s->ps.sps = NULL;
536  return ret;
537 }
538 
540 {
541  GetBitContext *gb = &s->HEVClc->gb;
542  SliceHeader *sh = &s->sh;
543  int i, ret;
544 
545  // Coded parameters
547  if (s->ref && sh->first_slice_in_pic_flag) {
548  av_log(s->avctx, AV_LOG_ERROR, "Two slices reporting being the first in the same frame.\n");
549  return 1; // This slice will be skipped later, do not corrupt state
550  }
551 
552  if ((IS_IDR(s) || IS_BLA(s)) && sh->first_slice_in_pic_flag) {
553  s->seq_decode = (s->seq_decode + 1) & 0xff;
554  s->max_ra = INT_MAX;
555  if (IS_IDR(s))
557  }
559  if (IS_IRAP(s))
561 
562  sh->pps_id = get_ue_golomb_long(gb);
563  if (sh->pps_id >= HEVC_MAX_PPS_COUNT || !s->ps.pps_list[sh->pps_id]) {
564  av_log(s->avctx, AV_LOG_ERROR, "PPS id out of range: %d\n", sh->pps_id);
565  return AVERROR_INVALIDDATA;
566  }
567  if (!sh->first_slice_in_pic_flag &&
568  s->ps.pps != (HEVCPPS*)s->ps.pps_list[sh->pps_id]->data) {
569  av_log(s->avctx, AV_LOG_ERROR, "PPS changed between slices.\n");
570  return AVERROR_INVALIDDATA;
571  }
572  s->ps.pps = (HEVCPPS*)s->ps.pps_list[sh->pps_id]->data;
573  if (s->nal_unit_type == HEVC_NAL_CRA_NUT && s->last_eos == 1)
575 
576  if (s->ps.sps != (HEVCSPS*)s->ps.sps_list[s->ps.pps->sps_id]->data) {
577  const HEVCSPS *sps = (HEVCSPS*)s->ps.sps_list[s->ps.pps->sps_id]->data;
578  const HEVCSPS *last_sps = s->ps.sps;
579  enum AVPixelFormat pix_fmt;
580 
581  if (last_sps && IS_IRAP(s) && s->nal_unit_type != HEVC_NAL_CRA_NUT) {
582  if (sps->width != last_sps->width || sps->height != last_sps->height ||
583  sps->temporal_layer[sps->max_sub_layers - 1].max_dec_pic_buffering !=
584  last_sps->temporal_layer[last_sps->max_sub_layers - 1].max_dec_pic_buffering)
586  }
588 
589  ret = set_sps(s, sps, sps->pix_fmt);
590  if (ret < 0)
591  return ret;
592 
593  pix_fmt = get_format(s, sps);
594  if (pix_fmt < 0)
595  return pix_fmt;
596  s->avctx->pix_fmt = pix_fmt;
597 
598  s->seq_decode = (s->seq_decode + 1) & 0xff;
599  s->max_ra = INT_MAX;
600  }
601 
603  if (ret < 0)
604  return ret;
605 
607  if (!sh->first_slice_in_pic_flag) {
608  int slice_address_length;
609 
610  if (s->ps.pps->dependent_slice_segments_enabled_flag)
612  if (sh->dependent_slice_segment_flag && !s->slice_initialized) {
613  av_log(s->avctx, AV_LOG_ERROR, "Independent slice segment missing.\n");
614  return AVERROR_INVALIDDATA;
615  }
616 
617  slice_address_length = av_ceil_log2(s->ps.sps->ctb_width *
618  s->ps.sps->ctb_height);
619  sh->slice_segment_addr = get_bitsz(gb, slice_address_length);
620  if (sh->slice_segment_addr >= s->ps.sps->ctb_width * s->ps.sps->ctb_height) {
621  av_log(s->avctx, AV_LOG_ERROR,
622  "Invalid slice segment address: %u.\n",
623  sh->slice_segment_addr);
624  return AVERROR_INVALIDDATA;
625  }
626 
627  if (!sh->dependent_slice_segment_flag) {
628  sh->slice_addr = sh->slice_segment_addr;
629  s->slice_idx++;
630  }
631  } else {
632  sh->slice_segment_addr = sh->slice_addr = 0;
633  s->slice_idx = 0;
634  s->slice_initialized = 0;
635  }
636 
637  if (!sh->dependent_slice_segment_flag) {
638  s->slice_initialized = 0;
639 
640  for (i = 0; i < s->ps.pps->num_extra_slice_header_bits; i++)
641  skip_bits(gb, 1); // slice_reserved_undetermined_flag[]
642 
643  sh->slice_type = get_ue_golomb_long(gb);
644  if (!(sh->slice_type == HEVC_SLICE_I ||
645  sh->slice_type == HEVC_SLICE_P ||
646  sh->slice_type == HEVC_SLICE_B)) {
647  av_log(s->avctx, AV_LOG_ERROR, "Unknown slice type: %d.\n",
648  sh->slice_type);
649  return AVERROR_INVALIDDATA;
650  }
651  if (IS_IRAP(s) && sh->slice_type != HEVC_SLICE_I) {
652  av_log(s->avctx, AV_LOG_ERROR, "Inter slices in an IRAP frame.\n");
653  return AVERROR_INVALIDDATA;
654  }
655 
656  // when flag is not present, picture is inferred to be output
657  sh->pic_output_flag = 1;
658  if (s->ps.pps->output_flag_present_flag)
659  sh->pic_output_flag = get_bits1(gb);
660 
661  if (s->ps.sps->separate_colour_plane_flag)
662  sh->colour_plane_id = get_bits(gb, 2);
663 
664  if (!IS_IDR(s)) {
665  int poc, pos;
666 
667  sh->pic_order_cnt_lsb = get_bits(gb, s->ps.sps->log2_max_poc_lsb);
668  poc = ff_hevc_compute_poc(s->ps.sps, s->pocTid0, sh->pic_order_cnt_lsb, s->nal_unit_type);
669  if (!sh->first_slice_in_pic_flag && poc != s->poc) {
670  av_log(s->avctx, AV_LOG_WARNING,
671  "Ignoring POC change between slices: %d -> %d\n", s->poc, poc);
672  if (s->avctx->err_recognition & AV_EF_EXPLODE)
673  return AVERROR_INVALIDDATA;
674  poc = s->poc;
675  }
676  s->poc = poc;
677 
679  pos = get_bits_left(gb);
681  ret = ff_hevc_decode_short_term_rps(gb, s->avctx, &sh->slice_rps, s->ps.sps, 1);
682  if (ret < 0)
683  return ret;
684 
685  sh->short_term_rps = &sh->slice_rps;
686  } else {
687  int numbits, rps_idx;
688 
689  if (!s->ps.sps->nb_st_rps) {
690  av_log(s->avctx, AV_LOG_ERROR, "No ref lists in the SPS.\n");
691  return AVERROR_INVALIDDATA;
692  }
693 
694  numbits = av_ceil_log2(s->ps.sps->nb_st_rps);
695  rps_idx = numbits > 0 ? get_bits(gb, numbits) : 0;
696  sh->short_term_rps = &s->ps.sps->st_rps[rps_idx];
697  }
699 
700  pos = get_bits_left(gb);
701  ret = decode_lt_rps(s, &sh->long_term_rps, gb);
702  if (ret < 0) {
703  av_log(s->avctx, AV_LOG_WARNING, "Invalid long term RPS.\n");
704  if (s->avctx->err_recognition & AV_EF_EXPLODE)
705  return AVERROR_INVALIDDATA;
706  }
708 
709  if (s->ps.sps->sps_temporal_mvp_enabled_flag)
711  else
713  } else {
714  s->sh.short_term_rps = NULL;
715  s->poc = 0;
716  }
717 
718  /* 8.3.1 */
719  if (sh->first_slice_in_pic_flag && s->temporal_id == 0 &&
720  s->nal_unit_type != HEVC_NAL_TRAIL_N &&
721  s->nal_unit_type != HEVC_NAL_TSA_N &&
722  s->nal_unit_type != HEVC_NAL_STSA_N &&
723  s->nal_unit_type != HEVC_NAL_RADL_N &&
724  s->nal_unit_type != HEVC_NAL_RADL_R &&
725  s->nal_unit_type != HEVC_NAL_RASL_N &&
726  s->nal_unit_type != HEVC_NAL_RASL_R)
727  s->pocTid0 = s->poc;
728 
729  if (s->ps.sps->sao_enabled) {
731  if (s->ps.sps->chroma_format_idc) {
734  }
735  } else {
739  }
740 
741  sh->nb_refs[L0] = sh->nb_refs[L1] = 0;
742  if (sh->slice_type == HEVC_SLICE_P || sh->slice_type == HEVC_SLICE_B) {
743  int nb_refs;
744 
745  sh->nb_refs[L0] = s->ps.pps->num_ref_idx_l0_default_active;
746  if (sh->slice_type == HEVC_SLICE_B)
747  sh->nb_refs[L1] = s->ps.pps->num_ref_idx_l1_default_active;
748 
749  if (get_bits1(gb)) { // num_ref_idx_active_override_flag
750  sh->nb_refs[L0] = get_ue_golomb_long(gb) + 1;
751  if (sh->slice_type == HEVC_SLICE_B)
752  sh->nb_refs[L1] = get_ue_golomb_long(gb) + 1;
753  }
754  if (sh->nb_refs[L0] > HEVC_MAX_REFS || sh->nb_refs[L1] > HEVC_MAX_REFS) {
755  av_log(s->avctx, AV_LOG_ERROR, "Too many refs: %d/%d.\n",
756  sh->nb_refs[L0], sh->nb_refs[L1]);
757  return AVERROR_INVALIDDATA;
758  }
759 
760  sh->rpl_modification_flag[0] = 0;
761  sh->rpl_modification_flag[1] = 0;
762  nb_refs = ff_hevc_frame_nb_refs(s);
763  if (!nb_refs) {
764  av_log(s->avctx, AV_LOG_ERROR, "Zero refs for a frame with P or B slices.\n");
765  return AVERROR_INVALIDDATA;
766  }
767 
768  if (s->ps.pps->lists_modification_present_flag && nb_refs > 1) {
769  sh->rpl_modification_flag[0] = get_bits1(gb);
770  if (sh->rpl_modification_flag[0]) {
771  for (i = 0; i < sh->nb_refs[L0]; i++)
772  sh->list_entry_lx[0][i] = get_bits(gb, av_ceil_log2(nb_refs));
773  }
774 
775  if (sh->slice_type == HEVC_SLICE_B) {
776  sh->rpl_modification_flag[1] = get_bits1(gb);
777  if (sh->rpl_modification_flag[1] == 1)
778  for (i = 0; i < sh->nb_refs[L1]; i++)
779  sh->list_entry_lx[1][i] = get_bits(gb, av_ceil_log2(nb_refs));
780  }
781  }
782 
783  if (sh->slice_type == HEVC_SLICE_B)
784  sh->mvd_l1_zero_flag = get_bits1(gb);
785 
786  if (s->ps.pps->cabac_init_present_flag)
787  sh->cabac_init_flag = get_bits1(gb);
788  else
789  sh->cabac_init_flag = 0;
790 
791  sh->collocated_ref_idx = 0;
793  sh->collocated_list = L0;
794  if (sh->slice_type == HEVC_SLICE_B)
795  sh->collocated_list = !get_bits1(gb);
796 
797  if (sh->nb_refs[sh->collocated_list] > 1) {
799  if (sh->collocated_ref_idx >= sh->nb_refs[sh->collocated_list]) {
800  av_log(s->avctx, AV_LOG_ERROR,
801  "Invalid collocated_ref_idx: %d.\n",
802  sh->collocated_ref_idx);
803  return AVERROR_INVALIDDATA;
804  }
805  }
806  }
807 
808  if ((s->ps.pps->weighted_pred_flag && sh->slice_type == HEVC_SLICE_P) ||
809  (s->ps.pps->weighted_bipred_flag && sh->slice_type == HEVC_SLICE_B)) {
810  int ret = pred_weight_table(s, gb);
811  if (ret < 0)
812  return ret;
813  }
814 
816  if (sh->max_num_merge_cand < 1 || sh->max_num_merge_cand > 5) {
817  av_log(s->avctx, AV_LOG_ERROR,
818  "Invalid number of merging MVP candidates: %d.\n",
819  sh->max_num_merge_cand);
820  return AVERROR_INVALIDDATA;
821  }
822  }
823 
824  sh->slice_qp_delta = get_se_golomb(gb);
825 
826  if (s->ps.pps->pic_slice_level_chroma_qp_offsets_present_flag) {
829  if (sh->slice_cb_qp_offset < -12 || sh->slice_cb_qp_offset > 12 ||
830  sh->slice_cr_qp_offset < -12 || sh->slice_cr_qp_offset > 12) {
831  av_log(s->avctx, AV_LOG_ERROR, "Invalid slice cx qp offset.\n");
832  return AVERROR_INVALIDDATA;
833  }
834  } else {
835  sh->slice_cb_qp_offset = 0;
836  sh->slice_cr_qp_offset = 0;
837  }
838 
839  if (s->ps.pps->chroma_qp_offset_list_enabled_flag)
841  else
843 
844  if (s->ps.pps->deblocking_filter_control_present_flag) {
845  int deblocking_filter_override_flag = 0;
846 
847  if (s->ps.pps->deblocking_filter_override_enabled_flag)
848  deblocking_filter_override_flag = get_bits1(gb);
849 
850  if (deblocking_filter_override_flag) {
853  int beta_offset_div2 = get_se_golomb(gb);
854  int tc_offset_div2 = get_se_golomb(gb) ;
855  if (beta_offset_div2 < -6 || beta_offset_div2 > 6 ||
856  tc_offset_div2 < -6 || tc_offset_div2 > 6) {
857  av_log(s->avctx, AV_LOG_ERROR,
858  "Invalid deblock filter offsets: %d, %d\n",
859  beta_offset_div2, tc_offset_div2);
860  return AVERROR_INVALIDDATA;
861  }
862  sh->beta_offset = beta_offset_div2 * 2;
863  sh->tc_offset = tc_offset_div2 * 2;
864  }
865  } else {
866  sh->disable_deblocking_filter_flag = s->ps.pps->disable_dbf;
867  sh->beta_offset = s->ps.pps->beta_offset;
868  sh->tc_offset = s->ps.pps->tc_offset;
869  }
870  } else {
872  sh->beta_offset = 0;
873  sh->tc_offset = 0;
874  }
875 
876  if (s->ps.pps->seq_loop_filter_across_slices_enabled_flag &&
881  } else {
882  sh->slice_loop_filter_across_slices_enabled_flag = s->ps.pps->seq_loop_filter_across_slices_enabled_flag;
883  }
884  }
885 
886  sh->num_entry_point_offsets = 0;
887  if (s->ps.pps->tiles_enabled_flag || s->ps.pps->entropy_coding_sync_enabled_flag) {
888  unsigned num_entry_point_offsets = get_ue_golomb_long(gb);
889  // It would be possible to bound this tighter but this here is simpler
890  if (num_entry_point_offsets > get_bits_left(gb)) {
891  av_log(s->avctx, AV_LOG_ERROR, "num_entry_point_offsets %d is invalid\n", num_entry_point_offsets);
892  return AVERROR_INVALIDDATA;
893  }
894 
895  sh->num_entry_point_offsets = num_entry_point_offsets;
896  if (sh->num_entry_point_offsets > 0) {
897  int offset_len = get_ue_golomb_long(gb) + 1;
898 
899  if (offset_len < 1 || offset_len > 32) {
900  sh->num_entry_point_offsets = 0;
901  av_log(s->avctx, AV_LOG_ERROR, "offset_len %d is invalid\n", offset_len);
902  return AVERROR_INVALIDDATA;
903  }
904 
906  av_freep(&sh->offset);
907  av_freep(&sh->size);
908  sh->entry_point_offset = av_malloc_array(sh->num_entry_point_offsets, sizeof(unsigned));
909  sh->offset = av_malloc_array(sh->num_entry_point_offsets, sizeof(int));
910  sh->size = av_malloc_array(sh->num_entry_point_offsets, sizeof(int));
911  if (!sh->entry_point_offset || !sh->offset || !sh->size) {
912  sh->num_entry_point_offsets = 0;
913  av_log(s->avctx, AV_LOG_ERROR, "Failed to allocate memory\n");
914  return AVERROR(ENOMEM);
915  }
916  for (i = 0; i < sh->num_entry_point_offsets; i++) {
917  unsigned val = get_bits_long(gb, offset_len);
918  sh->entry_point_offset[i] = val + 1; // +1; // +1 to get the size
919  }
920  if (s->threads_number > 1 && (s->ps.pps->num_tile_rows > 1 || s->ps.pps->num_tile_columns > 1)) {
921  s->enable_parallel_tiles = 0; // TODO: you can enable tiles in parallel here
922  s->threads_number = 1;
923  } else
924  s->enable_parallel_tiles = 0;
925  } else
926  s->enable_parallel_tiles = 0;
927  }
928 
929  if (s->ps.pps->slice_header_extension_present_flag) {
930  unsigned int length = get_ue_golomb_long(gb);
931  if (length*8LL > get_bits_left(gb)) {
932  av_log(s->avctx, AV_LOG_ERROR, "too many slice_header_extension_data_bytes\n");
933  return AVERROR_INVALIDDATA;
934  }
935  for (i = 0; i < length; i++)
936  skip_bits(gb, 8); // slice_header_extension_data_byte
937  }
938 
939  // Inferred parameters
940  sh->slice_qp = 26U + s->ps.pps->pic_init_qp_minus26 + sh->slice_qp_delta;
941  if (sh->slice_qp > 51 ||
942  sh->slice_qp < -s->ps.sps->qp_bd_offset) {
943  av_log(s->avctx, AV_LOG_ERROR,
944  "The slice_qp %d is outside the valid range "
945  "[%d, 51].\n",
946  sh->slice_qp,
947  -s->ps.sps->qp_bd_offset);
948  return AVERROR_INVALIDDATA;
949  }
950 
952 
953  if (!s->sh.slice_ctb_addr_rs && s->sh.dependent_slice_segment_flag) {
954  av_log(s->avctx, AV_LOG_ERROR, "Impossible slice segment.\n");
955  return AVERROR_INVALIDDATA;
956  }
957 
958  if (get_bits_left(gb) < 0) {
959  av_log(s->avctx, AV_LOG_ERROR,
960  "Overread slice header by %d bits\n", -get_bits_left(gb));
961  return AVERROR_INVALIDDATA;
962  }
963 
964  s->HEVClc->first_qp_group = !s->sh.dependent_slice_segment_flag;
965 
966  if (!s->ps.pps->cu_qp_delta_enabled_flag)
967  s->HEVClc->qp_y = s->sh.slice_qp;
968 
969  s->slice_initialized = 1;
970  s->HEVClc->tu.cu_qp_offset_cb = 0;
971  s->HEVClc->tu.cu_qp_offset_cr = 0;
972 
973  return 0;
974 }
975 
976 #define CTB(tab, x, y) ((tab)[(y) * s->ps.sps->ctb_width + (x)])
977 
978 #define SET_SAO(elem, value) \
979 do { \
980  if (!sao_merge_up_flag && !sao_merge_left_flag) \
981  sao->elem = value; \
982  else if (sao_merge_left_flag) \
983  sao->elem = CTB(s->sao, rx-1, ry).elem; \
984  else if (sao_merge_up_flag) \
985  sao->elem = CTB(s->sao, rx, ry-1).elem; \
986  else \
987  sao->elem = 0; \
988 } while (0)
989 
990 static void hls_sao_param(HEVCContext *s, int rx, int ry)
991 {
992  HEVCLocalContext *lc = s->HEVClc;
993  int sao_merge_left_flag = 0;
994  int sao_merge_up_flag = 0;
995  SAOParams *sao = &CTB(s->sao, rx, ry);
996  int c_idx, i;
997 
998  if (s->sh.slice_sample_adaptive_offset_flag[0] ||
999  s->sh.slice_sample_adaptive_offset_flag[1]) {
1000  if (rx > 0) {
1001  if (lc->ctb_left_flag)
1002  sao_merge_left_flag = ff_hevc_sao_merge_flag_decode(s);
1003  }
1004  if (ry > 0 && !sao_merge_left_flag) {
1005  if (lc->ctb_up_flag)
1006  sao_merge_up_flag = ff_hevc_sao_merge_flag_decode(s);
1007  }
1008  }
1009 
1010  for (c_idx = 0; c_idx < (s->ps.sps->chroma_format_idc ? 3 : 1); c_idx++) {
1011  int log2_sao_offset_scale = c_idx == 0 ? s->ps.pps->log2_sao_offset_scale_luma :
1012  s->ps.pps->log2_sao_offset_scale_chroma;
1013 
1014  if (!s->sh.slice_sample_adaptive_offset_flag[c_idx]) {
1015  sao->type_idx[c_idx] = SAO_NOT_APPLIED;
1016  continue;
1017  }
1018 
1019  if (c_idx == 2) {
1020  sao->type_idx[2] = sao->type_idx[1];
1021  sao->eo_class[2] = sao->eo_class[1];
1022  } else {
1023  SET_SAO(type_idx[c_idx], ff_hevc_sao_type_idx_decode(s));
1024  }
1025 
1026  if (sao->type_idx[c_idx] == SAO_NOT_APPLIED)
1027  continue;
1028 
1029  for (i = 0; i < 4; i++)
1030  SET_SAO(offset_abs[c_idx][i], ff_hevc_sao_offset_abs_decode(s));
1031 
1032  if (sao->type_idx[c_idx] == SAO_BAND) {
1033  for (i = 0; i < 4; i++) {
1034  if (sao->offset_abs[c_idx][i]) {
1035  SET_SAO(offset_sign[c_idx][i],
1037  } else {
1038  sao->offset_sign[c_idx][i] = 0;
1039  }
1040  }
1041  SET_SAO(band_position[c_idx], ff_hevc_sao_band_position_decode(s));
1042  } else if (c_idx != 2) {
1043  SET_SAO(eo_class[c_idx], ff_hevc_sao_eo_class_decode(s));
1044  }
1045 
1046  // Inferred parameters
1047  sao->offset_val[c_idx][0] = 0;
1048  for (i = 0; i < 4; i++) {
1049  sao->offset_val[c_idx][i + 1] = sao->offset_abs[c_idx][i];
1050  if (sao->type_idx[c_idx] == SAO_EDGE) {
1051  if (i > 1)
1052  sao->offset_val[c_idx][i + 1] = -sao->offset_val[c_idx][i + 1];
1053  } else if (sao->offset_sign[c_idx][i]) {
1054  sao->offset_val[c_idx][i + 1] = -sao->offset_val[c_idx][i + 1];
1055  }
1056  sao->offset_val[c_idx][i + 1] *= 1 << log2_sao_offset_scale;
1057  }
1058  }
1059 }
1060 
1061 #undef SET_SAO
1062 #undef CTB
1063 
1064 static int hls_cross_component_pred(HEVCContext *s, int idx) {
1065  HEVCLocalContext *lc = s->HEVClc;
1066  int log2_res_scale_abs_plus1 = ff_hevc_log2_res_scale_abs(s, idx);
1067 
1068  if (log2_res_scale_abs_plus1 != 0) {
1069  int res_scale_sign_flag = ff_hevc_res_scale_sign_flag(s, idx);
1070  lc->tu.res_scale_val = (1 << (log2_res_scale_abs_plus1 - 1)) *
1071  (1 - 2 * res_scale_sign_flag);
1072  } else {
1073  lc->tu.res_scale_val = 0;
1074  }
1075 
1076 
1077  return 0;
1078 }
1079 
1080 static int hls_transform_unit(HEVCContext *s, int x0, int y0,
1081  int xBase, int yBase, int cb_xBase, int cb_yBase,
1082  int log2_cb_size, int log2_trafo_size,
1083  int blk_idx, int cbf_luma, int *cbf_cb, int *cbf_cr)
1084 {
1085  HEVCLocalContext *lc = s->HEVClc;
1086  const int log2_trafo_size_c = log2_trafo_size - s->ps.sps->hshift[1];
1087  int i;
1088 
1089  if (lc->cu.pred_mode == MODE_INTRA) {
1090  int trafo_size = 1 << log2_trafo_size;
1091  ff_hevc_set_neighbour_available(s, x0, y0, trafo_size, trafo_size);
1092 
1093  s->hpc.intra_pred[log2_trafo_size - 2](s, x0, y0, 0);
1094  }
1095 
1096  if (cbf_luma || cbf_cb[0] || cbf_cr[0] ||
1097  (s->ps.sps->chroma_format_idc == 2 && (cbf_cb[1] || cbf_cr[1]))) {
1098  int scan_idx = SCAN_DIAG;
1099  int scan_idx_c = SCAN_DIAG;
1100  int cbf_chroma = cbf_cb[0] || cbf_cr[0] ||
1101  (s->ps.sps->chroma_format_idc == 2 &&
1102  (cbf_cb[1] || cbf_cr[1]));
1103 
1104  if (s->ps.pps->cu_qp_delta_enabled_flag && !lc->tu.is_cu_qp_delta_coded) {
1106  if (lc->tu.cu_qp_delta != 0)
1107  if (ff_hevc_cu_qp_delta_sign_flag(s) == 1)
1108  lc->tu.cu_qp_delta = -lc->tu.cu_qp_delta;
1109  lc->tu.is_cu_qp_delta_coded = 1;
1110 
1111  if (lc->tu.cu_qp_delta < -(26 + s->ps.sps->qp_bd_offset / 2) ||
1112  lc->tu.cu_qp_delta > (25 + s->ps.sps->qp_bd_offset / 2)) {
1113  av_log(s->avctx, AV_LOG_ERROR,
1114  "The cu_qp_delta %d is outside the valid range "
1115  "[%d, %d].\n",
1116  lc->tu.cu_qp_delta,
1117  -(26 + s->ps.sps->qp_bd_offset / 2),
1118  (25 + s->ps.sps->qp_bd_offset / 2));
1119  return AVERROR_INVALIDDATA;
1120  }
1121 
1122  ff_hevc_set_qPy(s, cb_xBase, cb_yBase, log2_cb_size);
1123  }
1124 
1125  if (s->sh.cu_chroma_qp_offset_enabled_flag && cbf_chroma &&
1127  int cu_chroma_qp_offset_flag = ff_hevc_cu_chroma_qp_offset_flag(s);
1128  if (cu_chroma_qp_offset_flag) {
1129  int cu_chroma_qp_offset_idx = 0;
1130  if (s->ps.pps->chroma_qp_offset_list_len_minus1 > 0) {
1131  cu_chroma_qp_offset_idx = ff_hevc_cu_chroma_qp_offset_idx(s);
1132  av_log(s->avctx, AV_LOG_ERROR,
1133  "cu_chroma_qp_offset_idx not yet tested.\n");
1134  }
1135  lc->tu.cu_qp_offset_cb = s->ps.pps->cb_qp_offset_list[cu_chroma_qp_offset_idx];
1136  lc->tu.cu_qp_offset_cr = s->ps.pps->cr_qp_offset_list[cu_chroma_qp_offset_idx];
1137  } else {
1138  lc->tu.cu_qp_offset_cb = 0;
1139  lc->tu.cu_qp_offset_cr = 0;
1140  }
1142  }
1143 
1144  if (lc->cu.pred_mode == MODE_INTRA && log2_trafo_size < 4) {
1145  if (lc->tu.intra_pred_mode >= 6 &&
1146  lc->tu.intra_pred_mode <= 14) {
1147  scan_idx = SCAN_VERT;
1148  } else if (lc->tu.intra_pred_mode >= 22 &&
1149  lc->tu.intra_pred_mode <= 30) {
1150  scan_idx = SCAN_HORIZ;
1151  }
1152 
1153  if (lc->tu.intra_pred_mode_c >= 6 &&
1154  lc->tu.intra_pred_mode_c <= 14) {
1155  scan_idx_c = SCAN_VERT;
1156  } else if (lc->tu.intra_pred_mode_c >= 22 &&
1157  lc->tu.intra_pred_mode_c <= 30) {
1158  scan_idx_c = SCAN_HORIZ;
1159  }
1160  }
1161 
1162  lc->tu.cross_pf = 0;
1163 
1164  if (cbf_luma)
1165  ff_hevc_hls_residual_coding(s, x0, y0, log2_trafo_size, scan_idx, 0);
1166  if (s->ps.sps->chroma_format_idc && (log2_trafo_size > 2 || s->ps.sps->chroma_format_idc == 3)) {
1167  int trafo_size_h = 1 << (log2_trafo_size_c + s->ps.sps->hshift[1]);
1168  int trafo_size_v = 1 << (log2_trafo_size_c + s->ps.sps->vshift[1]);
1169  lc->tu.cross_pf = (s->ps.pps->cross_component_prediction_enabled_flag && cbf_luma &&
1170  (lc->cu.pred_mode == MODE_INTER ||
1171  (lc->tu.chroma_mode_c == 4)));
1172 
1173  if (lc->tu.cross_pf) {
1175  }
1176  for (i = 0; i < (s->ps.sps->chroma_format_idc == 2 ? 2 : 1); i++) {
1177  if (lc->cu.pred_mode == MODE_INTRA) {
1178  ff_hevc_set_neighbour_available(s, x0, y0 + (i << log2_trafo_size_c), trafo_size_h, trafo_size_v);
1179  s->hpc.intra_pred[log2_trafo_size_c - 2](s, x0, y0 + (i << log2_trafo_size_c), 1);
1180  }
1181  if (cbf_cb[i])
1182  ff_hevc_hls_residual_coding(s, x0, y0 + (i << log2_trafo_size_c),
1183  log2_trafo_size_c, scan_idx_c, 1);
1184  else
1185  if (lc->tu.cross_pf) {
1186  ptrdiff_t stride = s->frame->linesize[1];
1187  int hshift = s->ps.sps->hshift[1];
1188  int vshift = s->ps.sps->vshift[1];
1189  int16_t *coeffs_y = (int16_t*)lc->edge_emu_buffer;
1190  int16_t *coeffs = (int16_t*)lc->edge_emu_buffer2;
1191  int size = 1 << log2_trafo_size_c;
1192 
1193  uint8_t *dst = &s->frame->data[1][(y0 >> vshift) * stride +
1194  ((x0 >> hshift) << s->ps.sps->pixel_shift)];
1195  for (i = 0; i < (size * size); i++) {
1196  coeffs[i] = ((lc->tu.res_scale_val * coeffs_y[i]) >> 3);
1197  }
1198  s->hevcdsp.add_residual[log2_trafo_size_c-2](dst, coeffs, stride);
1199  }
1200  }
1201 
1202  if (lc->tu.cross_pf) {
1204  }
1205  for (i = 0; i < (s->ps.sps->chroma_format_idc == 2 ? 2 : 1); i++) {
1206  if (lc->cu.pred_mode == MODE_INTRA) {
1207  ff_hevc_set_neighbour_available(s, x0, y0 + (i << log2_trafo_size_c), trafo_size_h, trafo_size_v);
1208  s->hpc.intra_pred[log2_trafo_size_c - 2](s, x0, y0 + (i << log2_trafo_size_c), 2);
1209  }
1210  if (cbf_cr[i])
1211  ff_hevc_hls_residual_coding(s, x0, y0 + (i << log2_trafo_size_c),
1212  log2_trafo_size_c, scan_idx_c, 2);
1213  else
1214  if (lc->tu.cross_pf) {
1215  ptrdiff_t stride = s->frame->linesize[2];
1216  int hshift = s->ps.sps->hshift[2];
1217  int vshift = s->ps.sps->vshift[2];
1218  int16_t *coeffs_y = (int16_t*)lc->edge_emu_buffer;
1219  int16_t *coeffs = (int16_t*)lc->edge_emu_buffer2;
1220  int size = 1 << log2_trafo_size_c;
1221 
1222  uint8_t *dst = &s->frame->data[2][(y0 >> vshift) * stride +
1223  ((x0 >> hshift) << s->ps.sps->pixel_shift)];
1224  for (i = 0; i < (size * size); i++) {
1225  coeffs[i] = ((lc->tu.res_scale_val * coeffs_y[i]) >> 3);
1226  }
1227  s->hevcdsp.add_residual[log2_trafo_size_c-2](dst, coeffs, stride);
1228  }
1229  }
1230  } else if (s->ps.sps->chroma_format_idc && blk_idx == 3) {
1231  int trafo_size_h = 1 << (log2_trafo_size + 1);
1232  int trafo_size_v = 1 << (log2_trafo_size + s->ps.sps->vshift[1]);
1233  for (i = 0; i < (s->ps.sps->chroma_format_idc == 2 ? 2 : 1); i++) {
1234  if (lc->cu.pred_mode == MODE_INTRA) {
1235  ff_hevc_set_neighbour_available(s, xBase, yBase + (i << log2_trafo_size),
1236  trafo_size_h, trafo_size_v);
1237  s->hpc.intra_pred[log2_trafo_size - 2](s, xBase, yBase + (i << log2_trafo_size), 1);
1238  }
1239  if (cbf_cb[i])
1240  ff_hevc_hls_residual_coding(s, xBase, yBase + (i << log2_trafo_size),
1241  log2_trafo_size, scan_idx_c, 1);
1242  }
1243  for (i = 0; i < (s->ps.sps->chroma_format_idc == 2 ? 2 : 1); i++) {
1244  if (lc->cu.pred_mode == MODE_INTRA) {
1245  ff_hevc_set_neighbour_available(s, xBase, yBase + (i << log2_trafo_size),
1246  trafo_size_h, trafo_size_v);
1247  s->hpc.intra_pred[log2_trafo_size - 2](s, xBase, yBase + (i << log2_trafo_size), 2);
1248  }
1249  if (cbf_cr[i])
1250  ff_hevc_hls_residual_coding(s, xBase, yBase + (i << log2_trafo_size),
1251  log2_trafo_size, scan_idx_c, 2);
1252  }
1253  }
1254  } else if (s->ps.sps->chroma_format_idc && lc->cu.pred_mode == MODE_INTRA) {
1255  if (log2_trafo_size > 2 || s->ps.sps->chroma_format_idc == 3) {
1256  int trafo_size_h = 1 << (log2_trafo_size_c + s->ps.sps->hshift[1]);
1257  int trafo_size_v = 1 << (log2_trafo_size_c + s->ps.sps->vshift[1]);
1258  ff_hevc_set_neighbour_available(s, x0, y0, trafo_size_h, trafo_size_v);
1259  s->hpc.intra_pred[log2_trafo_size_c - 2](s, x0, y0, 1);
1260  s->hpc.intra_pred[log2_trafo_size_c - 2](s, x0, y0, 2);
1261  if (s->ps.sps->chroma_format_idc == 2) {
1262  ff_hevc_set_neighbour_available(s, x0, y0 + (1 << log2_trafo_size_c),
1263  trafo_size_h, trafo_size_v);
1264  s->hpc.intra_pred[log2_trafo_size_c - 2](s, x0, y0 + (1 << log2_trafo_size_c), 1);
1265  s->hpc.intra_pred[log2_trafo_size_c - 2](s, x0, y0 + (1 << log2_trafo_size_c), 2);
1266  }
1267  } else if (blk_idx == 3) {
1268  int trafo_size_h = 1 << (log2_trafo_size + 1);
1269  int trafo_size_v = 1 << (log2_trafo_size + s->ps.sps->vshift[1]);
1270  ff_hevc_set_neighbour_available(s, xBase, yBase,
1271  trafo_size_h, trafo_size_v);
1272  s->hpc.intra_pred[log2_trafo_size - 2](s, xBase, yBase, 1);
1273  s->hpc.intra_pred[log2_trafo_size - 2](s, xBase, yBase, 2);
1274  if (s->ps.sps->chroma_format_idc == 2) {
1275  ff_hevc_set_neighbour_available(s, xBase, yBase + (1 << (log2_trafo_size)),
1276  trafo_size_h, trafo_size_v);
1277  s->hpc.intra_pred[log2_trafo_size - 2](s, xBase, yBase + (1 << (log2_trafo_size)), 1);
1278  s->hpc.intra_pred[log2_trafo_size - 2](s, xBase, yBase + (1 << (log2_trafo_size)), 2);
1279  }
1280  }
1281  }
1282 
1283  return 0;
1284 }
1285 
1286 static void set_deblocking_bypass(HEVCContext *s, int x0, int y0, int log2_cb_size)
1287 {
1288  int cb_size = 1 << log2_cb_size;
1289  int log2_min_pu_size = s->ps.sps->log2_min_pu_size;
1290 
1291  int min_pu_width = s->ps.sps->min_pu_width;
1292  int x_end = FFMIN(x0 + cb_size, s->ps.sps->width);
1293  int y_end = FFMIN(y0 + cb_size, s->ps.sps->height);
1294  int i, j;
1295 
1296  for (j = (y0 >> log2_min_pu_size); j < (y_end >> log2_min_pu_size); j++)
1297  for (i = (x0 >> log2_min_pu_size); i < (x_end >> log2_min_pu_size); i++)
1298  s->is_pcm[i + j * min_pu_width] = 2;
1299 }
1300 
1301 static int hls_transform_tree(HEVCContext *s, int x0, int y0,
1302  int xBase, int yBase, int cb_xBase, int cb_yBase,
1303  int log2_cb_size, int log2_trafo_size,
1304  int trafo_depth, int blk_idx,
1305  const int *base_cbf_cb, const int *base_cbf_cr)
1306 {
1307  HEVCLocalContext *lc = s->HEVClc;
1308  uint8_t split_transform_flag;
1309  int cbf_cb[2];
1310  int cbf_cr[2];
1311  int ret;
1312 
1313  cbf_cb[0] = base_cbf_cb[0];
1314  cbf_cb[1] = base_cbf_cb[1];
1315  cbf_cr[0] = base_cbf_cr[0];
1316  cbf_cr[1] = base_cbf_cr[1];
1317 
1318  if (lc->cu.intra_split_flag) {
1319  if (trafo_depth == 1) {
1320  lc->tu.intra_pred_mode = lc->pu.intra_pred_mode[blk_idx];
1321  if (s->ps.sps->chroma_format_idc == 3) {
1322  lc->tu.intra_pred_mode_c = lc->pu.intra_pred_mode_c[blk_idx];
1323  lc->tu.chroma_mode_c = lc->pu.chroma_mode_c[blk_idx];
1324  } else {
1326  lc->tu.chroma_mode_c = lc->pu.chroma_mode_c[0];
1327  }
1328  }
1329  } else {
1330  lc->tu.intra_pred_mode = lc->pu.intra_pred_mode[0];
1332  lc->tu.chroma_mode_c = lc->pu.chroma_mode_c[0];
1333  }
1334 
1335  if (log2_trafo_size <= s->ps.sps->log2_max_trafo_size &&
1336  log2_trafo_size > s->ps.sps->log2_min_tb_size &&
1337  trafo_depth < lc->cu.max_trafo_depth &&
1338  !(lc->cu.intra_split_flag && trafo_depth == 0)) {
1339  split_transform_flag = ff_hevc_split_transform_flag_decode(s, log2_trafo_size);
1340  } else {
1341  int inter_split = s->ps.sps->max_transform_hierarchy_depth_inter == 0 &&
1342  lc->cu.pred_mode == MODE_INTER &&
1343  lc->cu.part_mode != PART_2Nx2N &&
1344  trafo_depth == 0;
1345 
1346  split_transform_flag = log2_trafo_size > s->ps.sps->log2_max_trafo_size ||
1347  (lc->cu.intra_split_flag && trafo_depth == 0) ||
1348  inter_split;
1349  }
1350 
1351  if (s->ps.sps->chroma_format_idc && (log2_trafo_size > 2 || s->ps.sps->chroma_format_idc == 3)) {
1352  if (trafo_depth == 0 || cbf_cb[0]) {
1353  cbf_cb[0] = ff_hevc_cbf_cb_cr_decode(s, trafo_depth);
1354  if (s->ps.sps->chroma_format_idc == 2 && (!split_transform_flag || log2_trafo_size == 3)) {
1355  cbf_cb[1] = ff_hevc_cbf_cb_cr_decode(s, trafo_depth);
1356  }
1357  }
1358 
1359  if (trafo_depth == 0 || cbf_cr[0]) {
1360  cbf_cr[0] = ff_hevc_cbf_cb_cr_decode(s, trafo_depth);
1361  if (s->ps.sps->chroma_format_idc == 2 && (!split_transform_flag || log2_trafo_size == 3)) {
1362  cbf_cr[1] = ff_hevc_cbf_cb_cr_decode(s, trafo_depth);
1363  }
1364  }
1365  }
1366 
1367  if (split_transform_flag) {
1368  const int trafo_size_split = 1 << (log2_trafo_size - 1);
1369  const int x1 = x0 + trafo_size_split;
1370  const int y1 = y0 + trafo_size_split;
1371 
1372 #define SUBDIVIDE(x, y, idx) \
1373 do { \
1374  ret = hls_transform_tree(s, x, y, x0, y0, cb_xBase, cb_yBase, log2_cb_size, \
1375  log2_trafo_size - 1, trafo_depth + 1, idx, \
1376  cbf_cb, cbf_cr); \
1377  if (ret < 0) \
1378  return ret; \
1379 } while (0)
1380 
1381  SUBDIVIDE(x0, y0, 0);
1382  SUBDIVIDE(x1, y0, 1);
1383  SUBDIVIDE(x0, y1, 2);
1384  SUBDIVIDE(x1, y1, 3);
1385 
1386 #undef SUBDIVIDE
1387  } else {
1388  int min_tu_size = 1 << s->ps.sps->log2_min_tb_size;
1389  int log2_min_tu_size = s->ps.sps->log2_min_tb_size;
1390  int min_tu_width = s->ps.sps->min_tb_width;
1391  int cbf_luma = 1;
1392 
1393  if (lc->cu.pred_mode == MODE_INTRA || trafo_depth != 0 ||
1394  cbf_cb[0] || cbf_cr[0] ||
1395  (s->ps.sps->chroma_format_idc == 2 && (cbf_cb[1] || cbf_cr[1]))) {
1396  cbf_luma = ff_hevc_cbf_luma_decode(s, trafo_depth);
1397  }
1398 
1399  ret = hls_transform_unit(s, x0, y0, xBase, yBase, cb_xBase, cb_yBase,
1400  log2_cb_size, log2_trafo_size,
1401  blk_idx, cbf_luma, cbf_cb, cbf_cr);
1402  if (ret < 0)
1403  return ret;
1404  // TODO: store cbf_luma somewhere else
1405  if (cbf_luma) {
1406  int i, j;
1407  for (i = 0; i < (1 << log2_trafo_size); i += min_tu_size)
1408  for (j = 0; j < (1 << log2_trafo_size); j += min_tu_size) {
1409  int x_tu = (x0 + j) >> log2_min_tu_size;
1410  int y_tu = (y0 + i) >> log2_min_tu_size;
1411  s->cbf_luma[y_tu * min_tu_width + x_tu] = 1;
1412  }
1413  }
1414  if (!s->sh.disable_deblocking_filter_flag) {
1415  ff_hevc_deblocking_boundary_strengths(s, x0, y0, log2_trafo_size);
1416  if (s->ps.pps->transquant_bypass_enable_flag &&
1418  set_deblocking_bypass(s, x0, y0, log2_trafo_size);
1419  }
1420  }
1421  return 0;
1422 }
1423 
1424 static int hls_pcm_sample(HEVCContext *s, int x0, int y0, int log2_cb_size)
1425 {
1426  HEVCLocalContext *lc = s->HEVClc;
1427  GetBitContext gb;
1428  int cb_size = 1 << log2_cb_size;
1429  ptrdiff_t stride0 = s->frame->linesize[0];
1430  ptrdiff_t stride1 = s->frame->linesize[1];
1431  ptrdiff_t stride2 = s->frame->linesize[2];
1432  uint8_t *dst0 = &s->frame->data[0][y0 * stride0 + (x0 << s->ps.sps->pixel_shift)];
1433  uint8_t *dst1 = &s->frame->data[1][(y0 >> s->ps.sps->vshift[1]) * stride1 + ((x0 >> s->ps.sps->hshift[1]) << s->ps.sps->pixel_shift)];
1434  uint8_t *dst2 = &s->frame->data[2][(y0 >> s->ps.sps->vshift[2]) * stride2 + ((x0 >> s->ps.sps->hshift[2]) << s->ps.sps->pixel_shift)];
1435 
1436  int length = cb_size * cb_size * s->ps.sps->pcm.bit_depth +
1437  (((cb_size >> s->ps.sps->hshift[1]) * (cb_size >> s->ps.sps->vshift[1])) +
1438  ((cb_size >> s->ps.sps->hshift[2]) * (cb_size >> s->ps.sps->vshift[2]))) *
1439  s->ps.sps->pcm.bit_depth_chroma;
1440  const uint8_t *pcm = skip_bytes(&lc->cc, (length + 7) >> 3);
1441  int ret;
1442 
1443  if (!s->sh.disable_deblocking_filter_flag)
1444  ff_hevc_deblocking_boundary_strengths(s, x0, y0, log2_cb_size);
1445 
1446  ret = init_get_bits(&gb, pcm, length);
1447  if (ret < 0)
1448  return ret;
1449 
1450  s->hevcdsp.put_pcm(dst0, stride0, cb_size, cb_size, &gb, s->ps.sps->pcm.bit_depth);
1451  if (s->ps.sps->chroma_format_idc) {
1452  s->hevcdsp.put_pcm(dst1, stride1,
1453  cb_size >> s->ps.sps->hshift[1],
1454  cb_size >> s->ps.sps->vshift[1],
1455  &gb, s->ps.sps->pcm.bit_depth_chroma);
1456  s->hevcdsp.put_pcm(dst2, stride2,
1457  cb_size >> s->ps.sps->hshift[2],
1458  cb_size >> s->ps.sps->vshift[2],
1459  &gb, s->ps.sps->pcm.bit_depth_chroma);
1460  }
1461 
1462  return 0;
1463 }
1464 
1465 /**
1466  * 8.5.3.2.2.1 Luma sample unidirectional interpolation process
1467  *
1468  * @param s HEVC decoding context
1469  * @param dst target buffer for block data at block position
1470  * @param dststride stride of the dst buffer
1471  * @param ref reference picture buffer at origin (0, 0)
1472  * @param mv motion vector (relative to block position) to get pixel data from
1473  * @param x_off horizontal position of block from origin (0, 0)
1474  * @param y_off vertical position of block from origin (0, 0)
1475  * @param block_w width of block
1476  * @param block_h height of block
1477  * @param luma_weight weighting factor applied to the luma prediction
1478  * @param luma_offset additive offset applied to the luma prediction value
1479  */
1480 
1481 static void luma_mc_uni(HEVCContext *s, uint8_t *dst, ptrdiff_t dststride,
1482  AVFrame *ref, const Mv *mv, int x_off, int y_off,
1483  int block_w, int block_h, int luma_weight, int luma_offset)
1484 {
1485  HEVCLocalContext *lc = s->HEVClc;
1486  uint8_t *src = ref->data[0];
1487  ptrdiff_t srcstride = ref->linesize[0];
1488  int pic_width = s->ps.sps->width;
1489  int pic_height = s->ps.sps->height;
1490  int mx = mv->x & 3;
1491  int my = mv->y & 3;
1492  int weight_flag = (s->sh.slice_type == HEVC_SLICE_P && s->ps.pps->weighted_pred_flag) ||
1493  (s->sh.slice_type == HEVC_SLICE_B && s->ps.pps->weighted_bipred_flag);
1494  int idx = ff_hevc_pel_weight[block_w];
1495 
1496  x_off += mv->x >> 2;
1497  y_off += mv->y >> 2;
1498  src += y_off * srcstride + (x_off * (1 << s->ps.sps->pixel_shift));
1499 
1500  if (x_off < QPEL_EXTRA_BEFORE || y_off < QPEL_EXTRA_AFTER ||
1501  x_off >= pic_width - block_w - QPEL_EXTRA_AFTER ||
1502  y_off >= pic_height - block_h - QPEL_EXTRA_AFTER ||
1503  ref == s->frame) {
1504  const ptrdiff_t edge_emu_stride = EDGE_EMU_BUFFER_STRIDE << s->ps.sps->pixel_shift;
1505  int offset = QPEL_EXTRA_BEFORE * srcstride + (QPEL_EXTRA_BEFORE << s->ps.sps->pixel_shift);
1506  int buf_offset = QPEL_EXTRA_BEFORE * edge_emu_stride + (QPEL_EXTRA_BEFORE << s->ps.sps->pixel_shift);
1507 
1508  s->vdsp.emulated_edge_mc(lc->edge_emu_buffer, src - offset,
1509  edge_emu_stride, srcstride,
1510  block_w + QPEL_EXTRA,
1511  block_h + QPEL_EXTRA,
1512  x_off - QPEL_EXTRA_BEFORE, y_off - QPEL_EXTRA_BEFORE,
1513  pic_width, pic_height);
1514  src = lc->edge_emu_buffer + buf_offset;
1515  srcstride = edge_emu_stride;
1516  }
1517 
1518  if (!weight_flag)
1519  s->hevcdsp.put_hevc_qpel_uni[idx][!!my][!!mx](dst, dststride, src, srcstride,
1520  block_h, mx, my, block_w);
1521  else
1522  s->hevcdsp.put_hevc_qpel_uni_w[idx][!!my][!!mx](dst, dststride, src, srcstride,
1523  block_h, s->sh.luma_log2_weight_denom,
1524  luma_weight, luma_offset, mx, my, block_w);
1525 }
1526 
1527 /**
1528  * 8.5.3.2.2.1 Luma sample bidirectional interpolation process
1529  *
1530  * @param s HEVC decoding context
1531  * @param dst target buffer for block data at block position
1532  * @param dststride stride of the dst buffer
1533  * @param ref0 reference picture0 buffer at origin (0, 0)
1534  * @param mv0 motion vector0 (relative to block position) to get pixel data from
1535  * @param x_off horizontal position of block from origin (0, 0)
1536  * @param y_off vertical position of block from origin (0, 0)
1537  * @param block_w width of block
1538  * @param block_h height of block
1539  * @param ref1 reference picture1 buffer at origin (0, 0)
1540  * @param mv1 motion vector1 (relative to block position) to get pixel data from
1541  * @param current_mv current motion vector structure
1542  */
1543  static void luma_mc_bi(HEVCContext *s, uint8_t *dst, ptrdiff_t dststride,
1544  AVFrame *ref0, const Mv *mv0, int x_off, int y_off,
1545  int block_w, int block_h, AVFrame *ref1, const Mv *mv1, struct MvField *current_mv)
1546 {
1547  HEVCLocalContext *lc = s->HEVClc;
1548  ptrdiff_t src0stride = ref0->linesize[0];
1549  ptrdiff_t src1stride = ref1->linesize[0];
1550  int pic_width = s->ps.sps->width;
1551  int pic_height = s->ps.sps->height;
1552  int mx0 = mv0->x & 3;
1553  int my0 = mv0->y & 3;
1554  int mx1 = mv1->x & 3;
1555  int my1 = mv1->y & 3;
1556  int weight_flag = (s->sh.slice_type == HEVC_SLICE_P && s->ps.pps->weighted_pred_flag) ||
1557  (s->sh.slice_type == HEVC_SLICE_B && s->ps.pps->weighted_bipred_flag);
1558  int x_off0 = x_off + (mv0->x >> 2);
1559  int y_off0 = y_off + (mv0->y >> 2);
1560  int x_off1 = x_off + (mv1->x >> 2);
1561  int y_off1 = y_off + (mv1->y >> 2);
1562  int idx = ff_hevc_pel_weight[block_w];
1563 
1564  uint8_t *src0 = ref0->data[0] + y_off0 * src0stride + (int)((unsigned)x_off0 << s->ps.sps->pixel_shift);
1565  uint8_t *src1 = ref1->data[0] + y_off1 * src1stride + (int)((unsigned)x_off1 << s->ps.sps->pixel_shift);
1566 
1567  if (x_off0 < QPEL_EXTRA_BEFORE || y_off0 < QPEL_EXTRA_AFTER ||
1568  x_off0 >= pic_width - block_w - QPEL_EXTRA_AFTER ||
1569  y_off0 >= pic_height - block_h - QPEL_EXTRA_AFTER) {
1570  const ptrdiff_t edge_emu_stride = EDGE_EMU_BUFFER_STRIDE << s->ps.sps->pixel_shift;
1571  int offset = QPEL_EXTRA_BEFORE * src0stride + (QPEL_EXTRA_BEFORE << s->ps.sps->pixel_shift);
1572  int buf_offset = QPEL_EXTRA_BEFORE * edge_emu_stride + (QPEL_EXTRA_BEFORE << s->ps.sps->pixel_shift);
1573 
1574  s->vdsp.emulated_edge_mc(lc->edge_emu_buffer, src0 - offset,
1575  edge_emu_stride, src0stride,
1576  block_w + QPEL_EXTRA,
1577  block_h + QPEL_EXTRA,
1578  x_off0 - QPEL_EXTRA_BEFORE, y_off0 - QPEL_EXTRA_BEFORE,
1579  pic_width, pic_height);
1580  src0 = lc->edge_emu_buffer + buf_offset;
1581  src0stride = edge_emu_stride;
1582  }
1583 
1584  if (x_off1 < QPEL_EXTRA_BEFORE || y_off1 < QPEL_EXTRA_AFTER ||
1585  x_off1 >= pic_width - block_w - QPEL_EXTRA_AFTER ||
1586  y_off1 >= pic_height - block_h - QPEL_EXTRA_AFTER) {
1587  const ptrdiff_t edge_emu_stride = EDGE_EMU_BUFFER_STRIDE << s->ps.sps->pixel_shift;
1588  int offset = QPEL_EXTRA_BEFORE * src1stride + (QPEL_EXTRA_BEFORE << s->ps.sps->pixel_shift);
1589  int buf_offset = QPEL_EXTRA_BEFORE * edge_emu_stride + (QPEL_EXTRA_BEFORE << s->ps.sps->pixel_shift);
1590 
1591  s->vdsp.emulated_edge_mc(lc->edge_emu_buffer2, src1 - offset,
1592  edge_emu_stride, src1stride,
1593  block_w + QPEL_EXTRA,
1594  block_h + QPEL_EXTRA,
1595  x_off1 - QPEL_EXTRA_BEFORE, y_off1 - QPEL_EXTRA_BEFORE,
1596  pic_width, pic_height);
1597  src1 = lc->edge_emu_buffer2 + buf_offset;
1598  src1stride = edge_emu_stride;
1599  }
1600 
1601  s->hevcdsp.put_hevc_qpel[idx][!!my0][!!mx0](lc->tmp, src0, src0stride,
1602  block_h, mx0, my0, block_w);
1603  if (!weight_flag)
1604  s->hevcdsp.put_hevc_qpel_bi[idx][!!my1][!!mx1](dst, dststride, src1, src1stride, lc->tmp,
1605  block_h, mx1, my1, block_w);
1606  else
1607  s->hevcdsp.put_hevc_qpel_bi_w[idx][!!my1][!!mx1](dst, dststride, src1, src1stride, lc->tmp,
1608  block_h, s->sh.luma_log2_weight_denom,
1609  s->sh.luma_weight_l0[current_mv->ref_idx[0]],
1610  s->sh.luma_weight_l1[current_mv->ref_idx[1]],
1611  s->sh.luma_offset_l0[current_mv->ref_idx[0]],
1612  s->sh.luma_offset_l1[current_mv->ref_idx[1]],
1613  mx1, my1, block_w);
1614 
1615 }
1616 
1617 /**
1618  * 8.5.3.2.2.2 Chroma sample uniprediction interpolation process
1619  *
1620  * @param s HEVC decoding context
1621  * @param dst1 target buffer for block data at block position (U plane)
1622  * @param dst2 target buffer for block data at block position (V plane)
1623  * @param dststride stride of the dst1 and dst2 buffers
1624  * @param ref reference picture buffer at origin (0, 0)
1625  * @param mv motion vector (relative to block position) to get pixel data from
1626  * @param x_off horizontal position of block from origin (0, 0)
1627  * @param y_off vertical position of block from origin (0, 0)
1628  * @param block_w width of block
1629  * @param block_h height of block
1630  * @param chroma_weight weighting factor applied to the chroma prediction
1631  * @param chroma_offset additive offset applied to the chroma prediction value
1632  */
1633 
1634 static void chroma_mc_uni(HEVCContext *s, uint8_t *dst0,
1635  ptrdiff_t dststride, uint8_t *src0, ptrdiff_t srcstride, int reflist,
1636  int x_off, int y_off, int block_w, int block_h, struct MvField *current_mv, int chroma_weight, int chroma_offset)
1637 {
1638  HEVCLocalContext *lc = s->HEVClc;
1639  int pic_width = s->ps.sps->width >> s->ps.sps->hshift[1];
1640  int pic_height = s->ps.sps->height >> s->ps.sps->vshift[1];
1641  const Mv *mv = &current_mv->mv[reflist];
1642  int weight_flag = (s->sh.slice_type == HEVC_SLICE_P && s->ps.pps->weighted_pred_flag) ||
1643  (s->sh.slice_type == HEVC_SLICE_B && s->ps.pps->weighted_bipred_flag);
1644  int idx = ff_hevc_pel_weight[block_w];
1645  int hshift = s->ps.sps->hshift[1];
1646  int vshift = s->ps.sps->vshift[1];
1647  intptr_t mx = av_mod_uintp2(mv->x, 2 + hshift);
1648  intptr_t my = av_mod_uintp2(mv->y, 2 + vshift);
1649  intptr_t _mx = mx << (1 - hshift);
1650  intptr_t _my = my << (1 - vshift);
1651  int emu = src0 == s->frame->data[1] || src0 == s->frame->data[2];
1652 
1653  x_off += mv->x >> (2 + hshift);
1654  y_off += mv->y >> (2 + vshift);
1655  src0 += y_off * srcstride + (x_off * (1 << s->ps.sps->pixel_shift));
1656 
1657  if (x_off < EPEL_EXTRA_BEFORE || y_off < EPEL_EXTRA_AFTER ||
1658  x_off >= pic_width - block_w - EPEL_EXTRA_AFTER ||
1659  y_off >= pic_height - block_h - EPEL_EXTRA_AFTER ||
1660  emu) {
1661  const int edge_emu_stride = EDGE_EMU_BUFFER_STRIDE << s->ps.sps->pixel_shift;
1662  int offset0 = EPEL_EXTRA_BEFORE * (srcstride + (1 << s->ps.sps->pixel_shift));
1663  int buf_offset0 = EPEL_EXTRA_BEFORE *
1664  (edge_emu_stride + (1 << s->ps.sps->pixel_shift));
1665  s->vdsp.emulated_edge_mc(lc->edge_emu_buffer, src0 - offset0,
1666  edge_emu_stride, srcstride,
1667  block_w + EPEL_EXTRA, block_h + EPEL_EXTRA,
1668  x_off - EPEL_EXTRA_BEFORE,
1669  y_off - EPEL_EXTRA_BEFORE,
1670  pic_width, pic_height);
1671 
1672  src0 = lc->edge_emu_buffer + buf_offset0;
1673  srcstride = edge_emu_stride;
1674  }
1675  if (!weight_flag)
1676  s->hevcdsp.put_hevc_epel_uni[idx][!!my][!!mx](dst0, dststride, src0, srcstride,
1677  block_h, _mx, _my, block_w);
1678  else
1679  s->hevcdsp.put_hevc_epel_uni_w[idx][!!my][!!mx](dst0, dststride, src0, srcstride,
1680  block_h, s->sh.chroma_log2_weight_denom,
1681  chroma_weight, chroma_offset, _mx, _my, block_w);
1682 }
1683 
1684 /**
1685  * 8.5.3.2.2.2 Chroma sample bidirectional interpolation process
1686  *
1687  * @param s HEVC decoding context
1688  * @param dst target buffer for block data at block position
1689  * @param dststride stride of the dst buffer
1690  * @param ref0 reference picture0 buffer at origin (0, 0)
1691  * @param mv0 motion vector0 (relative to block position) to get pixel data from
1692  * @param x_off horizontal position of block from origin (0, 0)
1693  * @param y_off vertical position of block from origin (0, 0)
1694  * @param block_w width of block
1695  * @param block_h height of block
1696  * @param ref1 reference picture1 buffer at origin (0, 0)
1697  * @param mv1 motion vector1 (relative to block position) to get pixel data from
1698  * @param current_mv current motion vector structure
1699  * @param cidx chroma component(cb, cr)
1700  */
1701 static void chroma_mc_bi(HEVCContext *s, uint8_t *dst0, ptrdiff_t dststride, AVFrame *ref0, AVFrame *ref1,
1702  int x_off, int y_off, int block_w, int block_h, struct MvField *current_mv, int cidx)
1703 {
1704  HEVCLocalContext *lc = s->HEVClc;
1705  uint8_t *src1 = ref0->data[cidx+1];
1706  uint8_t *src2 = ref1->data[cidx+1];
1707  ptrdiff_t src1stride = ref0->linesize[cidx+1];
1708  ptrdiff_t src2stride = ref1->linesize[cidx+1];
1709  int weight_flag = (s->sh.slice_type == HEVC_SLICE_P && s->ps.pps->weighted_pred_flag) ||
1710  (s->sh.slice_type == HEVC_SLICE_B && s->ps.pps->weighted_bipred_flag);
1711  int pic_width = s->ps.sps->width >> s->ps.sps->hshift[1];
1712  int pic_height = s->ps.sps->height >> s->ps.sps->vshift[1];
1713  Mv *mv0 = &current_mv->mv[0];
1714  Mv *mv1 = &current_mv->mv[1];
1715  int hshift = s->ps.sps->hshift[1];
1716  int vshift = s->ps.sps->vshift[1];
1717 
1718  intptr_t mx0 = av_mod_uintp2(mv0->x, 2 + hshift);
1719  intptr_t my0 = av_mod_uintp2(mv0->y, 2 + vshift);
1720  intptr_t mx1 = av_mod_uintp2(mv1->x, 2 + hshift);
1721  intptr_t my1 = av_mod_uintp2(mv1->y, 2 + vshift);
1722  intptr_t _mx0 = mx0 << (1 - hshift);
1723  intptr_t _my0 = my0 << (1 - vshift);
1724  intptr_t _mx1 = mx1 << (1 - hshift);
1725  intptr_t _my1 = my1 << (1 - vshift);
1726 
1727  int x_off0 = x_off + (mv0->x >> (2 + hshift));
1728  int y_off0 = y_off + (mv0->y >> (2 + vshift));
1729  int x_off1 = x_off + (mv1->x >> (2 + hshift));
1730  int y_off1 = y_off + (mv1->y >> (2 + vshift));
1731  int idx = ff_hevc_pel_weight[block_w];
1732  src1 += y_off0 * src1stride + (int)((unsigned)x_off0 << s->ps.sps->pixel_shift);
1733  src2 += y_off1 * src2stride + (int)((unsigned)x_off1 << s->ps.sps->pixel_shift);
1734 
1735  if (x_off0 < EPEL_EXTRA_BEFORE || y_off0 < EPEL_EXTRA_AFTER ||
1736  x_off0 >= pic_width - block_w - EPEL_EXTRA_AFTER ||
1737  y_off0 >= pic_height - block_h - EPEL_EXTRA_AFTER) {
1738  const int edge_emu_stride = EDGE_EMU_BUFFER_STRIDE << s->ps.sps->pixel_shift;
1739  int offset1 = EPEL_EXTRA_BEFORE * (src1stride + (1 << s->ps.sps->pixel_shift));
1740  int buf_offset1 = EPEL_EXTRA_BEFORE *
1741  (edge_emu_stride + (1 << s->ps.sps->pixel_shift));
1742 
1743  s->vdsp.emulated_edge_mc(lc->edge_emu_buffer, src1 - offset1,
1744  edge_emu_stride, src1stride,
1745  block_w + EPEL_EXTRA, block_h + EPEL_EXTRA,
1746  x_off0 - EPEL_EXTRA_BEFORE,
1747  y_off0 - EPEL_EXTRA_BEFORE,
1748  pic_width, pic_height);
1749 
1750  src1 = lc->edge_emu_buffer + buf_offset1;
1751  src1stride = edge_emu_stride;
1752  }
1753 
1754  if (x_off1 < EPEL_EXTRA_BEFORE || y_off1 < EPEL_EXTRA_AFTER ||
1755  x_off1 >= pic_width - block_w - EPEL_EXTRA_AFTER ||
1756  y_off1 >= pic_height - block_h - EPEL_EXTRA_AFTER) {
1757  const int edge_emu_stride = EDGE_EMU_BUFFER_STRIDE << s->ps.sps->pixel_shift;
1758  int offset1 = EPEL_EXTRA_BEFORE * (src2stride + (1 << s->ps.sps->pixel_shift));
1759  int buf_offset1 = EPEL_EXTRA_BEFORE *
1760  (edge_emu_stride + (1 << s->ps.sps->pixel_shift));
1761 
1762  s->vdsp.emulated_edge_mc(lc->edge_emu_buffer2, src2 - offset1,
1763  edge_emu_stride, src2stride,
1764  block_w + EPEL_EXTRA, block_h + EPEL_EXTRA,
1765  x_off1 - EPEL_EXTRA_BEFORE,
1766  y_off1 - EPEL_EXTRA_BEFORE,
1767  pic_width, pic_height);
1768 
1769  src2 = lc->edge_emu_buffer2 + buf_offset1;
1770  src2stride = edge_emu_stride;
1771  }
1772 
1773  s->hevcdsp.put_hevc_epel[idx][!!my0][!!mx0](lc->tmp, src1, src1stride,
1774  block_h, _mx0, _my0, block_w);
1775  if (!weight_flag)
1776  s->hevcdsp.put_hevc_epel_bi[idx][!!my1][!!mx1](dst0, s->frame->linesize[cidx+1],
1777  src2, src2stride, lc->tmp,
1778  block_h, _mx1, _my1, block_w);
1779  else
1780  s->hevcdsp.put_hevc_epel_bi_w[idx][!!my1][!!mx1](dst0, s->frame->linesize[cidx+1],
1781  src2, src2stride, lc->tmp,
1782  block_h,
1783  s->sh.chroma_log2_weight_denom,
1784  s->sh.chroma_weight_l0[current_mv->ref_idx[0]][cidx],
1785  s->sh.chroma_weight_l1[current_mv->ref_idx[1]][cidx],
1786  s->sh.chroma_offset_l0[current_mv->ref_idx[0]][cidx],
1787  s->sh.chroma_offset_l1[current_mv->ref_idx[1]][cidx],
1788  _mx1, _my1, block_w);
1789 }
1790 
1792  const Mv *mv, int y0, int height)
1793 {
1794  if (s->threads_type == FF_THREAD_FRAME ) {
1795  int y = FFMAX(0, (mv->y >> 2) + y0 + height + 9);
1796 
1797  ff_thread_await_progress(&ref->tf, y, 0);
1798  }
1799 }
1800 
1801 static void hevc_luma_mv_mvp_mode(HEVCContext *s, int x0, int y0, int nPbW,
1802  int nPbH, int log2_cb_size, int part_idx,
1803  int merge_idx, MvField *mv)
1804 {
1805  HEVCLocalContext *lc = s->HEVClc;
1806  enum InterPredIdc inter_pred_idc = PRED_L0;
1807  int mvp_flag;
1808 
1809  ff_hevc_set_neighbour_available(s, x0, y0, nPbW, nPbH);
1810  mv->pred_flag = 0;
1811  if (s->sh.slice_type == HEVC_SLICE_B)
1812  inter_pred_idc = ff_hevc_inter_pred_idc_decode(s, nPbW, nPbH);
1813 
1814  if (inter_pred_idc != PRED_L1) {
1815  if (s->sh.nb_refs[L0])
1816  mv->ref_idx[0]= ff_hevc_ref_idx_lx_decode(s, s->sh.nb_refs[L0]);
1817 
1818  mv->pred_flag = PF_L0;
1819  ff_hevc_hls_mvd_coding(s, x0, y0, 0);
1820  mvp_flag = ff_hevc_mvp_lx_flag_decode(s);
1821  ff_hevc_luma_mv_mvp_mode(s, x0, y0, nPbW, nPbH, log2_cb_size,
1822  part_idx, merge_idx, mv, mvp_flag, 0);
1823  mv->mv[0].x += lc->pu.mvd.x;
1824  mv->mv[0].y += lc->pu.mvd.y;
1825  }
1826 
1827  if (inter_pred_idc != PRED_L0) {
1828  if (s->sh.nb_refs[L1])
1829  mv->ref_idx[1]= ff_hevc_ref_idx_lx_decode(s, s->sh.nb_refs[L1]);
1830 
1831  if (s->sh.mvd_l1_zero_flag == 1 && inter_pred_idc == PRED_BI) {
1832  AV_ZERO32(&lc->pu.mvd);
1833  } else {
1834  ff_hevc_hls_mvd_coding(s, x0, y0, 1);
1835  }
1836 
1837  mv->pred_flag += PF_L1;
1838  mvp_flag = ff_hevc_mvp_lx_flag_decode(s);
1839  ff_hevc_luma_mv_mvp_mode(s, x0, y0, nPbW, nPbH, log2_cb_size,
1840  part_idx, merge_idx, mv, mvp_flag, 1);
1841  mv->mv[1].x += lc->pu.mvd.x;
1842  mv->mv[1].y += lc->pu.mvd.y;
1843  }
1844 }
1845 
1846 static void hls_prediction_unit(HEVCContext *s, int x0, int y0,
1847  int nPbW, int nPbH,
1848  int log2_cb_size, int partIdx, int idx)
1849 {
1850 #define POS(c_idx, x, y) \
1851  &s->frame->data[c_idx][((y) >> s->ps.sps->vshift[c_idx]) * s->frame->linesize[c_idx] + \
1852  (((x) >> s->ps.sps->hshift[c_idx]) << s->ps.sps->pixel_shift)]
1853  HEVCLocalContext *lc = s->HEVClc;
1854  int merge_idx = 0;
1855  struct MvField current_mv = {{{ 0 }}};
1856 
1857  int min_pu_width = s->ps.sps->min_pu_width;
1858 
1859  MvField *tab_mvf = s->ref->tab_mvf;
1860  RefPicList *refPicList = s->ref->refPicList;
1861  HEVCFrame *ref0 = NULL, *ref1 = NULL;
1862  uint8_t *dst0 = POS(0, x0, y0);
1863  uint8_t *dst1 = POS(1, x0, y0);
1864  uint8_t *dst2 = POS(2, x0, y0);
1865  int log2_min_cb_size = s->ps.sps->log2_min_cb_size;
1866  int min_cb_width = s->ps.sps->min_cb_width;
1867  int x_cb = x0 >> log2_min_cb_size;
1868  int y_cb = y0 >> log2_min_cb_size;
1869  int x_pu, y_pu;
1870  int i, j;
1871 
1872  int skip_flag = SAMPLE_CTB(s->skip_flag, x_cb, y_cb);
1873 
1874  if (!skip_flag)
1876 
1877  if (skip_flag || lc->pu.merge_flag) {
1878  if (s->sh.max_num_merge_cand > 1)
1879  merge_idx = ff_hevc_merge_idx_decode(s);
1880  else
1881  merge_idx = 0;
1882 
1883  ff_hevc_luma_mv_merge_mode(s, x0, y0, nPbW, nPbH, log2_cb_size,
1884  partIdx, merge_idx, &current_mv);
1885  } else {
1886  hevc_luma_mv_mvp_mode(s, x0, y0, nPbW, nPbH, log2_cb_size,
1887  partIdx, merge_idx, &current_mv);
1888  }
1889 
1890  x_pu = x0 >> s->ps.sps->log2_min_pu_size;
1891  y_pu = y0 >> s->ps.sps->log2_min_pu_size;
1892 
1893  for (j = 0; j < nPbH >> s->ps.sps->log2_min_pu_size; j++)
1894  for (i = 0; i < nPbW >> s->ps.sps->log2_min_pu_size; i++)
1895  tab_mvf[(y_pu + j) * min_pu_width + x_pu + i] = current_mv;
1896 
1897  if (current_mv.pred_flag & PF_L0) {
1898  ref0 = refPicList[0].ref[current_mv.ref_idx[0]];
1899  if (!ref0 || !ref0->frame)
1900  return;
1901  hevc_await_progress(s, ref0, &current_mv.mv[0], y0, nPbH);
1902  }
1903  if (current_mv.pred_flag & PF_L1) {
1904  ref1 = refPicList[1].ref[current_mv.ref_idx[1]];
1905  if (!ref1 || !ref1->frame)
1906  return;
1907  hevc_await_progress(s, ref1, &current_mv.mv[1], y0, nPbH);
1908  }
1909 
1910  if (current_mv.pred_flag == PF_L0) {
1911  int x0_c = x0 >> s->ps.sps->hshift[1];
1912  int y0_c = y0 >> s->ps.sps->vshift[1];
1913  int nPbW_c = nPbW >> s->ps.sps->hshift[1];
1914  int nPbH_c = nPbH >> s->ps.sps->vshift[1];
1915 
1916  luma_mc_uni(s, dst0, s->frame->linesize[0], ref0->frame,
1917  &current_mv.mv[0], x0, y0, nPbW, nPbH,
1918  s->sh.luma_weight_l0[current_mv.ref_idx[0]],
1919  s->sh.luma_offset_l0[current_mv.ref_idx[0]]);
1920 
1921  if (s->ps.sps->chroma_format_idc) {
1922  chroma_mc_uni(s, dst1, s->frame->linesize[1], ref0->frame->data[1], ref0->frame->linesize[1],
1923  0, x0_c, y0_c, nPbW_c, nPbH_c, &current_mv,
1924  s->sh.chroma_weight_l0[current_mv.ref_idx[0]][0], s->sh.chroma_offset_l0[current_mv.ref_idx[0]][0]);
1925  chroma_mc_uni(s, dst2, s->frame->linesize[2], ref0->frame->data[2], ref0->frame->linesize[2],
1926  0, x0_c, y0_c, nPbW_c, nPbH_c, &current_mv,
1927  s->sh.chroma_weight_l0[current_mv.ref_idx[0]][1], s->sh.chroma_offset_l0[current_mv.ref_idx[0]][1]);
1928  }
1929  } else if (current_mv.pred_flag == PF_L1) {
1930  int x0_c = x0 >> s->ps.sps->hshift[1];
1931  int y0_c = y0 >> s->ps.sps->vshift[1];
1932  int nPbW_c = nPbW >> s->ps.sps->hshift[1];
1933  int nPbH_c = nPbH >> s->ps.sps->vshift[1];
1934 
1935  luma_mc_uni(s, dst0, s->frame->linesize[0], ref1->frame,
1936  &current_mv.mv[1], x0, y0, nPbW, nPbH,
1937  s->sh.luma_weight_l1[current_mv.ref_idx[1]],
1938  s->sh.luma_offset_l1[current_mv.ref_idx[1]]);
1939 
1940  if (s->ps.sps->chroma_format_idc) {
1941  chroma_mc_uni(s, dst1, s->frame->linesize[1], ref1->frame->data[1], ref1->frame->linesize[1],
1942  1, x0_c, y0_c, nPbW_c, nPbH_c, &current_mv,
1943  s->sh.chroma_weight_l1[current_mv.ref_idx[1]][0], s->sh.chroma_offset_l1[current_mv.ref_idx[1]][0]);
1944 
1945  chroma_mc_uni(s, dst2, s->frame->linesize[2], ref1->frame->data[2], ref1->frame->linesize[2],
1946  1, x0_c, y0_c, nPbW_c, nPbH_c, &current_mv,
1947  s->sh.chroma_weight_l1[current_mv.ref_idx[1]][1], s->sh.chroma_offset_l1[current_mv.ref_idx[1]][1]);
1948  }
1949  } else if (current_mv.pred_flag == PF_BI) {
1950  int x0_c = x0 >> s->ps.sps->hshift[1];
1951  int y0_c = y0 >> s->ps.sps->vshift[1];
1952  int nPbW_c = nPbW >> s->ps.sps->hshift[1];
1953  int nPbH_c = nPbH >> s->ps.sps->vshift[1];
1954 
1955  luma_mc_bi(s, dst0, s->frame->linesize[0], ref0->frame,
1956  &current_mv.mv[0], x0, y0, nPbW, nPbH,
1957  ref1->frame, &current_mv.mv[1], &current_mv);
1958 
1959  if (s->ps.sps->chroma_format_idc) {
1960  chroma_mc_bi(s, dst1, s->frame->linesize[1], ref0->frame, ref1->frame,
1961  x0_c, y0_c, nPbW_c, nPbH_c, &current_mv, 0);
1962 
1963  chroma_mc_bi(s, dst2, s->frame->linesize[2], ref0->frame, ref1->frame,
1964  x0_c, y0_c, nPbW_c, nPbH_c, &current_mv, 1);
1965  }
1966  }
1967 }
1968 
1969 /**
1970  * 8.4.1
1971  */
1972 static int luma_intra_pred_mode(HEVCContext *s, int x0, int y0, int pu_size,
1973  int prev_intra_luma_pred_flag)
1974 {
1975  HEVCLocalContext *lc = s->HEVClc;
1976  int x_pu = x0 >> s->ps.sps->log2_min_pu_size;
1977  int y_pu = y0 >> s->ps.sps->log2_min_pu_size;
1978  int min_pu_width = s->ps.sps->min_pu_width;
1979  int size_in_pus = pu_size >> s->ps.sps->log2_min_pu_size;
1980  int x0b = av_mod_uintp2(x0, s->ps.sps->log2_ctb_size);
1981  int y0b = av_mod_uintp2(y0, s->ps.sps->log2_ctb_size);
1982 
1983  int cand_up = (lc->ctb_up_flag || y0b) ?
1984  s->tab_ipm[(y_pu - 1) * min_pu_width + x_pu] : INTRA_DC;
1985  int cand_left = (lc->ctb_left_flag || x0b) ?
1986  s->tab_ipm[y_pu * min_pu_width + x_pu - 1] : INTRA_DC;
1987 
1988  int y_ctb = (y0 >> (s->ps.sps->log2_ctb_size)) << (s->ps.sps->log2_ctb_size);
1989 
1990  MvField *tab_mvf = s->ref->tab_mvf;
1991  int intra_pred_mode;
1992  int candidate[3];
1993  int i, j;
1994 
1995  // intra_pred_mode prediction does not cross vertical CTB boundaries
1996  if ((y0 - 1) < y_ctb)
1997  cand_up = INTRA_DC;
1998 
1999  if (cand_left == cand_up) {
2000  if (cand_left < 2) {
2001  candidate[0] = INTRA_PLANAR;
2002  candidate[1] = INTRA_DC;
2003  candidate[2] = INTRA_ANGULAR_26;
2004  } else {
2005  candidate[0] = cand_left;
2006  candidate[1] = 2 + ((cand_left - 2 - 1 + 32) & 31);
2007  candidate[2] = 2 + ((cand_left - 2 + 1) & 31);
2008  }
2009  } else {
2010  candidate[0] = cand_left;
2011  candidate[1] = cand_up;
2012  if (candidate[0] != INTRA_PLANAR && candidate[1] != INTRA_PLANAR) {
2013  candidate[2] = INTRA_PLANAR;
2014  } else if (candidate[0] != INTRA_DC && candidate[1] != INTRA_DC) {
2015  candidate[2] = INTRA_DC;
2016  } else {
2017  candidate[2] = INTRA_ANGULAR_26;
2018  }
2019  }
2020 
2021  if (prev_intra_luma_pred_flag) {
2022  intra_pred_mode = candidate[lc->pu.mpm_idx];
2023  } else {
2024  if (candidate[0] > candidate[1])
2025  FFSWAP(uint8_t, candidate[0], candidate[1]);
2026  if (candidate[0] > candidate[2])
2027  FFSWAP(uint8_t, candidate[0], candidate[2]);
2028  if (candidate[1] > candidate[2])
2029  FFSWAP(uint8_t, candidate[1], candidate[2]);
2030 
2031  intra_pred_mode = lc->pu.rem_intra_luma_pred_mode;
2032  for (i = 0; i < 3; i++)
2033  if (intra_pred_mode >= candidate[i])
2034  intra_pred_mode++;
2035  }
2036 
2037  /* write the intra prediction units into the mv array */
2038  if (!size_in_pus)
2039  size_in_pus = 1;
2040  for (i = 0; i < size_in_pus; i++) {
2041  memset(&s->tab_ipm[(y_pu + i) * min_pu_width + x_pu],
2042  intra_pred_mode, size_in_pus);
2043 
2044  for (j = 0; j < size_in_pus; j++) {
2045  tab_mvf[(y_pu + j) * min_pu_width + x_pu + i].pred_flag = PF_INTRA;
2046  }
2047  }
2048 
2049  return intra_pred_mode;
2050 }
2051 
2052 static av_always_inline void set_ct_depth(HEVCContext *s, int x0, int y0,
2053  int log2_cb_size, int ct_depth)
2054 {
2055  int length = (1 << log2_cb_size) >> s->ps.sps->log2_min_cb_size;
2056  int x_cb = x0 >> s->ps.sps->log2_min_cb_size;
2057  int y_cb = y0 >> s->ps.sps->log2_min_cb_size;
2058  int y;
2059 
2060  for (y = 0; y < length; y++)
2061  memset(&s->tab_ct_depth[(y_cb + y) * s->ps.sps->min_cb_width + x_cb],
2062  ct_depth, length);
2063 }
2064 
2065 static const uint8_t tab_mode_idx[] = {
2066  0, 1, 2, 2, 2, 2, 3, 5, 7, 8, 10, 12, 13, 15, 17, 18, 19, 20,
2067  21, 22, 23, 23, 24, 24, 25, 25, 26, 27, 27, 28, 28, 29, 29, 30, 31};
2068 
2069 static void intra_prediction_unit(HEVCContext *s, int x0, int y0,
2070  int log2_cb_size)
2071 {
2072  HEVCLocalContext *lc = s->HEVClc;
2073  static const uint8_t intra_chroma_table[4] = { 0, 26, 10, 1 };
2074  uint8_t prev_intra_luma_pred_flag[4];
2075  int split = lc->cu.part_mode == PART_NxN;
2076  int pb_size = (1 << log2_cb_size) >> split;
2077  int side = split + 1;
2078  int chroma_mode;
2079  int i, j;
2080 
2081  for (i = 0; i < side; i++)
2082  for (j = 0; j < side; j++)
2083  prev_intra_luma_pred_flag[2 * i + j] = ff_hevc_prev_intra_luma_pred_flag_decode(s);
2084 
2085  for (i = 0; i < side; i++) {
2086  for (j = 0; j < side; j++) {
2087  if (prev_intra_luma_pred_flag[2 * i + j])
2089  else
2091 
2092  lc->pu.intra_pred_mode[2 * i + j] =
2093  luma_intra_pred_mode(s, x0 + pb_size * j, y0 + pb_size * i, pb_size,
2094  prev_intra_luma_pred_flag[2 * i + j]);
2095  }
2096  }
2097 
2098  if (s->ps.sps->chroma_format_idc == 3) {
2099  for (i = 0; i < side; i++) {
2100  for (j = 0; j < side; j++) {
2101  lc->pu.chroma_mode_c[2 * i + j] = chroma_mode = ff_hevc_intra_chroma_pred_mode_decode(s);
2102  if (chroma_mode != 4) {
2103  if (lc->pu.intra_pred_mode[2 * i + j] == intra_chroma_table[chroma_mode])
2104  lc->pu.intra_pred_mode_c[2 * i + j] = 34;
2105  else
2106  lc->pu.intra_pred_mode_c[2 * i + j] = intra_chroma_table[chroma_mode];
2107  } else {
2108  lc->pu.intra_pred_mode_c[2 * i + j] = lc->pu.intra_pred_mode[2 * i + j];
2109  }
2110  }
2111  }
2112  } else if (s->ps.sps->chroma_format_idc == 2) {
2113  int mode_idx;
2114  lc->pu.chroma_mode_c[0] = chroma_mode = ff_hevc_intra_chroma_pred_mode_decode(s);
2115  if (chroma_mode != 4) {
2116  if (lc->pu.intra_pred_mode[0] == intra_chroma_table[chroma_mode])
2117  mode_idx = 34;
2118  else
2119  mode_idx = intra_chroma_table[chroma_mode];
2120  } else {
2121  mode_idx = lc->pu.intra_pred_mode[0];
2122  }
2123  lc->pu.intra_pred_mode_c[0] = tab_mode_idx[mode_idx];
2124  } else if (s->ps.sps->chroma_format_idc != 0) {
2126  if (chroma_mode != 4) {
2127  if (lc->pu.intra_pred_mode[0] == intra_chroma_table[chroma_mode])
2128  lc->pu.intra_pred_mode_c[0] = 34;
2129  else
2130  lc->pu.intra_pred_mode_c[0] = intra_chroma_table[chroma_mode];
2131  } else {
2132  lc->pu.intra_pred_mode_c[0] = lc->pu.intra_pred_mode[0];
2133  }
2134  }
2135 }
2136 
2138  int x0, int y0,
2139  int log2_cb_size)
2140 {
2141  HEVCLocalContext *lc = s->HEVClc;
2142  int pb_size = 1 << log2_cb_size;
2143  int size_in_pus = pb_size >> s->ps.sps->log2_min_pu_size;
2144  int min_pu_width = s->ps.sps->min_pu_width;
2145  MvField *tab_mvf = s->ref->tab_mvf;
2146  int x_pu = x0 >> s->ps.sps->log2_min_pu_size;
2147  int y_pu = y0 >> s->ps.sps->log2_min_pu_size;
2148  int j, k;
2149 
2150  if (size_in_pus == 0)
2151  size_in_pus = 1;
2152  for (j = 0; j < size_in_pus; j++)
2153  memset(&s->tab_ipm[(y_pu + j) * min_pu_width + x_pu], INTRA_DC, size_in_pus);
2154  if (lc->cu.pred_mode == MODE_INTRA)
2155  for (j = 0; j < size_in_pus; j++)
2156  for (k = 0; k < size_in_pus; k++)
2157  tab_mvf[(y_pu + j) * min_pu_width + x_pu + k].pred_flag = PF_INTRA;
2158 }
2159 
2160 static int hls_coding_unit(HEVCContext *s, int x0, int y0, int log2_cb_size)
2161 {
2162  int cb_size = 1 << log2_cb_size;
2163  HEVCLocalContext *lc = s->HEVClc;
2164  int log2_min_cb_size = s->ps.sps->log2_min_cb_size;
2165  int length = cb_size >> log2_min_cb_size;
2166  int min_cb_width = s->ps.sps->min_cb_width;
2167  int x_cb = x0 >> log2_min_cb_size;
2168  int y_cb = y0 >> log2_min_cb_size;
2169  int idx = log2_cb_size - 2;
2170  int qp_block_mask = (1<<(s->ps.sps->log2_ctb_size - s->ps.pps->diff_cu_qp_delta_depth)) - 1;
2171  int x, y, ret;
2172 
2173  lc->cu.x = x0;
2174  lc->cu.y = y0;
2175  lc->cu.pred_mode = MODE_INTRA;
2176  lc->cu.part_mode = PART_2Nx2N;
2177  lc->cu.intra_split_flag = 0;
2178 
2179  SAMPLE_CTB(s->skip_flag, x_cb, y_cb) = 0;
2180  for (x = 0; x < 4; x++)
2181  lc->pu.intra_pred_mode[x] = 1;
2182  if (s->ps.pps->transquant_bypass_enable_flag) {
2184  if (lc->cu.cu_transquant_bypass_flag)
2185  set_deblocking_bypass(s, x0, y0, log2_cb_size);
2186  } else
2187  lc->cu.cu_transquant_bypass_flag = 0;
2188 
2189  if (s->sh.slice_type != HEVC_SLICE_I) {
2190  uint8_t skip_flag = ff_hevc_skip_flag_decode(s, x0, y0, x_cb, y_cb);
2191 
2192  x = y_cb * min_cb_width + x_cb;
2193  for (y = 0; y < length; y++) {
2194  memset(&s->skip_flag[x], skip_flag, length);
2195  x += min_cb_width;
2196  }
2197  lc->cu.pred_mode = skip_flag ? MODE_SKIP : MODE_INTER;
2198  } else {
2199  x = y_cb * min_cb_width + x_cb;
2200  for (y = 0; y < length; y++) {
2201  memset(&s->skip_flag[x], 0, length);
2202  x += min_cb_width;
2203  }
2204  }
2205 
2206  if (SAMPLE_CTB(s->skip_flag, x_cb, y_cb)) {
2207  hls_prediction_unit(s, x0, y0, cb_size, cb_size, log2_cb_size, 0, idx);
2208  intra_prediction_unit_default_value(s, x0, y0, log2_cb_size);
2209 
2210  if (!s->sh.disable_deblocking_filter_flag)
2211  ff_hevc_deblocking_boundary_strengths(s, x0, y0, log2_cb_size);
2212  } else {
2213  int pcm_flag = 0;
2214 
2215  if (s->sh.slice_type != HEVC_SLICE_I)
2217  if (lc->cu.pred_mode != MODE_INTRA ||
2218  log2_cb_size == s->ps.sps->log2_min_cb_size) {
2219  lc->cu.part_mode = ff_hevc_part_mode_decode(s, log2_cb_size);
2220  lc->cu.intra_split_flag = lc->cu.part_mode == PART_NxN &&
2221  lc->cu.pred_mode == MODE_INTRA;
2222  }
2223 
2224  if (lc->cu.pred_mode == MODE_INTRA) {
2225  if (lc->cu.part_mode == PART_2Nx2N && s->ps.sps->pcm_enabled_flag &&
2226  log2_cb_size >= s->ps.sps->pcm.log2_min_pcm_cb_size &&
2227  log2_cb_size <= s->ps.sps->pcm.log2_max_pcm_cb_size) {
2228  pcm_flag = ff_hevc_pcm_flag_decode(s);
2229  }
2230  if (pcm_flag) {
2231  intra_prediction_unit_default_value(s, x0, y0, log2_cb_size);
2232  ret = hls_pcm_sample(s, x0, y0, log2_cb_size);
2233  if (s->ps.sps->pcm.loop_filter_disable_flag)
2234  set_deblocking_bypass(s, x0, y0, log2_cb_size);
2235 
2236  if (ret < 0)
2237  return ret;
2238  } else {
2239  intra_prediction_unit(s, x0, y0, log2_cb_size);
2240  }
2241  } else {
2242  intra_prediction_unit_default_value(s, x0, y0, log2_cb_size);
2243  switch (lc->cu.part_mode) {
2244  case PART_2Nx2N:
2245  hls_prediction_unit(s, x0, y0, cb_size, cb_size, log2_cb_size, 0, idx);
2246  break;
2247  case PART_2NxN:
2248  hls_prediction_unit(s, x0, y0, cb_size, cb_size / 2, log2_cb_size, 0, idx);
2249  hls_prediction_unit(s, x0, y0 + cb_size / 2, cb_size, cb_size / 2, log2_cb_size, 1, idx);
2250  break;
2251  case PART_Nx2N:
2252  hls_prediction_unit(s, x0, y0, cb_size / 2, cb_size, log2_cb_size, 0, idx - 1);
2253  hls_prediction_unit(s, x0 + cb_size / 2, y0, cb_size / 2, cb_size, log2_cb_size, 1, idx - 1);
2254  break;
2255  case PART_2NxnU:
2256  hls_prediction_unit(s, x0, y0, cb_size, cb_size / 4, log2_cb_size, 0, idx);
2257  hls_prediction_unit(s, x0, y0 + cb_size / 4, cb_size, cb_size * 3 / 4, log2_cb_size, 1, idx);
2258  break;
2259  case PART_2NxnD:
2260  hls_prediction_unit(s, x0, y0, cb_size, cb_size * 3 / 4, log2_cb_size, 0, idx);
2261  hls_prediction_unit(s, x0, y0 + cb_size * 3 / 4, cb_size, cb_size / 4, log2_cb_size, 1, idx);
2262  break;
2263  case PART_nLx2N:
2264  hls_prediction_unit(s, x0, y0, cb_size / 4, cb_size, log2_cb_size, 0, idx - 2);
2265  hls_prediction_unit(s, x0 + cb_size / 4, y0, cb_size * 3 / 4, cb_size, log2_cb_size, 1, idx - 2);
2266  break;
2267  case PART_nRx2N:
2268  hls_prediction_unit(s, x0, y0, cb_size * 3 / 4, cb_size, log2_cb_size, 0, idx - 2);
2269  hls_prediction_unit(s, x0 + cb_size * 3 / 4, y0, cb_size / 4, cb_size, log2_cb_size, 1, idx - 2);
2270  break;
2271  case PART_NxN:
2272  hls_prediction_unit(s, x0, y0, cb_size / 2, cb_size / 2, log2_cb_size, 0, idx - 1);
2273  hls_prediction_unit(s, x0 + cb_size / 2, y0, cb_size / 2, cb_size / 2, log2_cb_size, 1, idx - 1);
2274  hls_prediction_unit(s, x0, y0 + cb_size / 2, cb_size / 2, cb_size / 2, log2_cb_size, 2, idx - 1);
2275  hls_prediction_unit(s, x0 + cb_size / 2, y0 + cb_size / 2, cb_size / 2, cb_size / 2, log2_cb_size, 3, idx - 1);
2276  break;
2277  }
2278  }
2279 
2280  if (!pcm_flag) {
2281  int rqt_root_cbf = 1;
2282 
2283  if (lc->cu.pred_mode != MODE_INTRA &&
2284  !(lc->cu.part_mode == PART_2Nx2N && lc->pu.merge_flag)) {
2285  rqt_root_cbf = ff_hevc_no_residual_syntax_flag_decode(s);
2286  }
2287  if (rqt_root_cbf) {
2288  const static int cbf[2] = { 0 };
2289  lc->cu.max_trafo_depth = lc->cu.pred_mode == MODE_INTRA ?
2290  s->ps.sps->max_transform_hierarchy_depth_intra + lc->cu.intra_split_flag :
2291  s->ps.sps->max_transform_hierarchy_depth_inter;
2292  ret = hls_transform_tree(s, x0, y0, x0, y0, x0, y0,
2293  log2_cb_size,
2294  log2_cb_size, 0, 0, cbf, cbf);
2295  if (ret < 0)
2296  return ret;
2297  } else {
2298  if (!s->sh.disable_deblocking_filter_flag)
2299  ff_hevc_deblocking_boundary_strengths(s, x0, y0, log2_cb_size);
2300  }
2301  }
2302  }
2303 
2304  if (s->ps.pps->cu_qp_delta_enabled_flag && lc->tu.is_cu_qp_delta_coded == 0)
2305  ff_hevc_set_qPy(s, x0, y0, log2_cb_size);
2306 
2307  x = y_cb * min_cb_width + x_cb;
2308  for (y = 0; y < length; y++) {
2309  memset(&s->qp_y_tab[x], lc->qp_y, length);
2310  x += min_cb_width;
2311  }
2312 
2313  if(((x0 + (1<<log2_cb_size)) & qp_block_mask) == 0 &&
2314  ((y0 + (1<<log2_cb_size)) & qp_block_mask) == 0) {
2315  lc->qPy_pred = lc->qp_y;
2316  }
2317 
2318  set_ct_depth(s, x0, y0, log2_cb_size, lc->ct_depth);
2319 
2320  return 0;
2321 }
2322 
2323 static int hls_coding_quadtree(HEVCContext *s, int x0, int y0,
2324  int log2_cb_size, int cb_depth)
2325 {
2326  HEVCLocalContext *lc = s->HEVClc;
2327  const int cb_size = 1 << log2_cb_size;
2328  int ret;
2329  int split_cu;
2330 
2331  lc->ct_depth = cb_depth;
2332  if (x0 + cb_size <= s->ps.sps->width &&
2333  y0 + cb_size <= s->ps.sps->height &&
2334  log2_cb_size > s->ps.sps->log2_min_cb_size) {
2335  split_cu = ff_hevc_split_coding_unit_flag_decode(s, cb_depth, x0, y0);
2336  } else {
2337  split_cu = (log2_cb_size > s->ps.sps->log2_min_cb_size);
2338  }
2339  if (s->ps.pps->cu_qp_delta_enabled_flag &&
2340  log2_cb_size >= s->ps.sps->log2_ctb_size - s->ps.pps->diff_cu_qp_delta_depth) {
2341  lc->tu.is_cu_qp_delta_coded = 0;
2342  lc->tu.cu_qp_delta = 0;
2343  }
2344 
2345  if (s->sh.cu_chroma_qp_offset_enabled_flag &&
2346  log2_cb_size >= s->ps.sps->log2_ctb_size - s->ps.pps->diff_cu_chroma_qp_offset_depth) {
2348  }
2349 
2350  if (split_cu) {
2351  int qp_block_mask = (1<<(s->ps.sps->log2_ctb_size - s->ps.pps->diff_cu_qp_delta_depth)) - 1;
2352  const int cb_size_split = cb_size >> 1;
2353  const int x1 = x0 + cb_size_split;
2354  const int y1 = y0 + cb_size_split;
2355 
2356  int more_data = 0;
2357 
2358  more_data = hls_coding_quadtree(s, x0, y0, log2_cb_size - 1, cb_depth + 1);
2359  if (more_data < 0)
2360  return more_data;
2361 
2362  if (more_data && x1 < s->ps.sps->width) {
2363  more_data = hls_coding_quadtree(s, x1, y0, log2_cb_size - 1, cb_depth + 1);
2364  if (more_data < 0)
2365  return more_data;
2366  }
2367  if (more_data && y1 < s->ps.sps->height) {
2368  more_data = hls_coding_quadtree(s, x0, y1, log2_cb_size - 1, cb_depth + 1);
2369  if (more_data < 0)
2370  return more_data;
2371  }
2372  if (more_data && x1 < s->ps.sps->width &&
2373  y1 < s->ps.sps->height) {
2374  more_data = hls_coding_quadtree(s, x1, y1, log2_cb_size - 1, cb_depth + 1);
2375  if (more_data < 0)
2376  return more_data;
2377  }
2378 
2379  if(((x0 + (1<<log2_cb_size)) & qp_block_mask) == 0 &&
2380  ((y0 + (1<<log2_cb_size)) & qp_block_mask) == 0)
2381  lc->qPy_pred = lc->qp_y;
2382 
2383  if (more_data)
2384  return ((x1 + cb_size_split) < s->ps.sps->width ||
2385  (y1 + cb_size_split) < s->ps.sps->height);
2386  else
2387  return 0;
2388  } else {
2389  ret = hls_coding_unit(s, x0, y0, log2_cb_size);
2390  if (ret < 0)
2391  return ret;
2392  if ((!((x0 + cb_size) %
2393  (1 << (s->ps.sps->log2_ctb_size))) ||
2394  (x0 + cb_size >= s->ps.sps->width)) &&
2395  (!((y0 + cb_size) %
2396  (1 << (s->ps.sps->log2_ctb_size))) ||
2397  (y0 + cb_size >= s->ps.sps->height))) {
2398  int end_of_slice_flag = ff_hevc_end_of_slice_flag_decode(s);
2399  return !end_of_slice_flag;
2400  } else {
2401  return 1;
2402  }
2403  }
2404 
2405  return 0;
2406 }
2407 
2408 static void hls_decode_neighbour(HEVCContext *s, int x_ctb, int y_ctb,
2409  int ctb_addr_ts)
2410 {
2411  HEVCLocalContext *lc = s->HEVClc;
2412  int ctb_size = 1 << s->ps.sps->log2_ctb_size;
2413  int ctb_addr_rs = s->ps.pps->ctb_addr_ts_to_rs[ctb_addr_ts];
2414  int ctb_addr_in_slice = ctb_addr_rs - s->sh.slice_addr;
2415 
2416  s->tab_slice_address[ctb_addr_rs] = s->sh.slice_addr;
2417 
2418  if (s->ps.pps->entropy_coding_sync_enabled_flag) {
2419  if (x_ctb == 0 && (y_ctb & (ctb_size - 1)) == 0)
2420  lc->first_qp_group = 1;
2421  lc->end_of_tiles_x = s->ps.sps->width;
2422  } else if (s->ps.pps->tiles_enabled_flag) {
2423  if (ctb_addr_ts && s->ps.pps->tile_id[ctb_addr_ts] != s->ps.pps->tile_id[ctb_addr_ts - 1]) {
2424  int idxX = s->ps.pps->col_idxX[x_ctb >> s->ps.sps->log2_ctb_size];
2425  lc->end_of_tiles_x = x_ctb + (s->ps.pps->column_width[idxX] << s->ps.sps->log2_ctb_size);
2426  lc->first_qp_group = 1;
2427  }
2428  } else {
2429  lc->end_of_tiles_x = s->ps.sps->width;
2430  }
2431 
2432  lc->end_of_tiles_y = FFMIN(y_ctb + ctb_size, s->ps.sps->height);
2433 
2434  lc->boundary_flags = 0;
2435  if (s->ps.pps->tiles_enabled_flag) {
2436  if (x_ctb > 0 && s->ps.pps->tile_id[ctb_addr_ts] != s->ps.pps->tile_id[s->ps.pps->ctb_addr_rs_to_ts[ctb_addr_rs - 1]])
2438  if (x_ctb > 0 && s->tab_slice_address[ctb_addr_rs] != s->tab_slice_address[ctb_addr_rs - 1])
2440  if (y_ctb > 0 && s->ps.pps->tile_id[ctb_addr_ts] != s->ps.pps->tile_id[s->ps.pps->ctb_addr_rs_to_ts[ctb_addr_rs - s->ps.sps->ctb_width]])
2442  if (y_ctb > 0 && s->tab_slice_address[ctb_addr_rs] != s->tab_slice_address[ctb_addr_rs - s->ps.sps->ctb_width])
2444  } else {
2445  if (ctb_addr_in_slice <= 0)
2447  if (ctb_addr_in_slice < s->ps.sps->ctb_width)
2449  }
2450 
2451  lc->ctb_left_flag = ((x_ctb > 0) && (ctb_addr_in_slice > 0) && !(lc->boundary_flags & BOUNDARY_LEFT_TILE));
2452  lc->ctb_up_flag = ((y_ctb > 0) && (ctb_addr_in_slice >= s->ps.sps->ctb_width) && !(lc->boundary_flags & BOUNDARY_UPPER_TILE));
2453  lc->ctb_up_right_flag = ((y_ctb > 0) && (ctb_addr_in_slice+1 >= s->ps.sps->ctb_width) && (s->ps.pps->tile_id[ctb_addr_ts] == s->ps.pps->tile_id[s->ps.pps->ctb_addr_rs_to_ts[ctb_addr_rs+1 - s->ps.sps->ctb_width]]));
2454  lc->ctb_up_left_flag = ((x_ctb > 0) && (y_ctb > 0) && (ctb_addr_in_slice-1 >= s->ps.sps->ctb_width) && (s->ps.pps->tile_id[ctb_addr_ts] == s->ps.pps->tile_id[s->ps.pps->ctb_addr_rs_to_ts[ctb_addr_rs-1 - s->ps.sps->ctb_width]]));
2455 }
2456 
2457 static int hls_decode_entry(AVCodecContext *avctxt, void *isFilterThread)
2458 {
2459  HEVCContext *s = avctxt->priv_data;
2460  int ctb_size = 1 << s->ps.sps->log2_ctb_size;
2461  int more_data = 1;
2462  int x_ctb = 0;
2463  int y_ctb = 0;
2464  int ctb_addr_ts = s->ps.pps->ctb_addr_rs_to_ts[s->sh.slice_ctb_addr_rs];
2465  int ret;
2466 
2467  if (!ctb_addr_ts && s->sh.dependent_slice_segment_flag) {
2468  av_log(s->avctx, AV_LOG_ERROR, "Impossible initial tile.\n");
2469  return AVERROR_INVALIDDATA;
2470  }
2471 
2472  if (s->sh.dependent_slice_segment_flag) {
2473  int prev_rs = s->ps.pps->ctb_addr_ts_to_rs[ctb_addr_ts - 1];
2474  if (s->tab_slice_address[prev_rs] != s->sh.slice_addr) {
2475  av_log(s->avctx, AV_LOG_ERROR, "Previous slice segment missing\n");
2476  return AVERROR_INVALIDDATA;
2477  }
2478  }
2479 
2480  while (more_data && ctb_addr_ts < s->ps.sps->ctb_size) {
2481  int ctb_addr_rs = s->ps.pps->ctb_addr_ts_to_rs[ctb_addr_ts];
2482 
2483  x_ctb = (ctb_addr_rs % ((s->ps.sps->width + ctb_size - 1) >> s->ps.sps->log2_ctb_size)) << s->ps.sps->log2_ctb_size;
2484  y_ctb = (ctb_addr_rs / ((s->ps.sps->width + ctb_size - 1) >> s->ps.sps->log2_ctb_size)) << s->ps.sps->log2_ctb_size;
2485  hls_decode_neighbour(s, x_ctb, y_ctb, ctb_addr_ts);
2486 
2487  ret = ff_hevc_cabac_init(s, ctb_addr_ts, 0);
2488  if (ret < 0) {
2489  s->tab_slice_address[ctb_addr_rs] = -1;
2490  return ret;
2491  }
2492 
2493  hls_sao_param(s, x_ctb >> s->ps.sps->log2_ctb_size, y_ctb >> s->ps.sps->log2_ctb_size);
2494 
2495  s->deblock[ctb_addr_rs].beta_offset = s->sh.beta_offset;
2496  s->deblock[ctb_addr_rs].tc_offset = s->sh.tc_offset;
2497  s->filter_slice_edges[ctb_addr_rs] = s->sh.slice_loop_filter_across_slices_enabled_flag;
2498 
2499  more_data = hls_coding_quadtree(s, x_ctb, y_ctb, s->ps.sps->log2_ctb_size, 0);
2500  if (more_data < 0) {
2501  s->tab_slice_address[ctb_addr_rs] = -1;
2502  return more_data;
2503  }
2504 
2505 
2506  ctb_addr_ts++;
2507  ff_hevc_save_states(s, ctb_addr_ts);
2508  ff_hevc_hls_filters(s, x_ctb, y_ctb, ctb_size);
2509  }
2510 
2511  if (x_ctb + ctb_size >= s->ps.sps->width &&
2512  y_ctb + ctb_size >= s->ps.sps->height)
2513  ff_hevc_hls_filter(s, x_ctb, y_ctb, ctb_size);
2514 
2515  return ctb_addr_ts;
2516 }
2517 
2519 {
2520  int arg[2];
2521  int ret[2];
2522 
2523  arg[0] = 0;
2524  arg[1] = 1;
2525 
2526  s->avctx->execute(s->avctx, hls_decode_entry, arg, ret , 1, sizeof(int));
2527  return ret[0];
2528 }
2529 static int hls_decode_entry_wpp(AVCodecContext *avctxt, void *input_ctb_row, int job, int self_id)
2530 {
2531  HEVCContext *s1 = avctxt->priv_data, *s;
2532  HEVCLocalContext *lc;
2533  int ctb_size = 1<< s1->ps.sps->log2_ctb_size;
2534  int more_data = 1;
2535  int *ctb_row_p = input_ctb_row;
2536  int ctb_row = ctb_row_p[job];
2537  int ctb_addr_rs = s1->sh.slice_ctb_addr_rs + ctb_row * ((s1->ps.sps->width + ctb_size - 1) >> s1->ps.sps->log2_ctb_size);
2538  int ctb_addr_ts = s1->ps.pps->ctb_addr_rs_to_ts[ctb_addr_rs];
2539  int thread = ctb_row % s1->threads_number;
2540  int ret;
2541 
2542  s = s1->sList[self_id];
2543  lc = s->HEVClc;
2544 
2545  if(ctb_row) {
2546  ret = init_get_bits8(&lc->gb, s->data + s->sh.offset[ctb_row - 1], s->sh.size[ctb_row - 1]);
2547  if (ret < 0)
2548  goto error;
2549  ff_init_cabac_decoder(&lc->cc, s->data + s->sh.offset[(ctb_row)-1], s->sh.size[ctb_row - 1]);
2550  }
2551 
2552  while(more_data && ctb_addr_ts < s->ps.sps->ctb_size) {
2553  int x_ctb = (ctb_addr_rs % s->ps.sps->ctb_width) << s->ps.sps->log2_ctb_size;
2554  int y_ctb = (ctb_addr_rs / s->ps.sps->ctb_width) << s->ps.sps->log2_ctb_size;
2555 
2556  hls_decode_neighbour(s, x_ctb, y_ctb, ctb_addr_ts);
2557 
2558  ff_thread_await_progress2(s->avctx, ctb_row, thread, SHIFT_CTB_WPP);
2559 
2560  if (atomic_load(&s1->wpp_err)) {
2561  ff_thread_report_progress2(s->avctx, ctb_row , thread, SHIFT_CTB_WPP);
2562  return 0;
2563  }
2564 
2565  ret = ff_hevc_cabac_init(s, ctb_addr_ts, thread);
2566  if (ret < 0)
2567  goto error;
2568  hls_sao_param(s, x_ctb >> s->ps.sps->log2_ctb_size, y_ctb >> s->ps.sps->log2_ctb_size);
2569  more_data = hls_coding_quadtree(s, x_ctb, y_ctb, s->ps.sps->log2_ctb_size, 0);
2570 
2571  if (more_data < 0) {
2572  ret = more_data;
2573  goto error;
2574  }
2575 
2576  ctb_addr_ts++;
2577 
2578  ff_hevc_save_states(s, ctb_addr_ts);
2579  ff_thread_report_progress2(s->avctx, ctb_row, thread, 1);
2580  ff_hevc_hls_filters(s, x_ctb, y_ctb, ctb_size);
2581 
2582  if (!more_data && (x_ctb+ctb_size) < s->ps.sps->width && ctb_row != s->sh.num_entry_point_offsets) {
2583  atomic_store(&s1->wpp_err, 1);
2584  ff_thread_report_progress2(s->avctx, ctb_row ,thread, SHIFT_CTB_WPP);
2585  return 0;
2586  }
2587 
2588  if ((x_ctb+ctb_size) >= s->ps.sps->width && (y_ctb+ctb_size) >= s->ps.sps->height ) {
2589  ff_hevc_hls_filter(s, x_ctb, y_ctb, ctb_size);
2590  ff_thread_report_progress2(s->avctx, ctb_row , thread, SHIFT_CTB_WPP);
2591  return ctb_addr_ts;
2592  }
2593  ctb_addr_rs = s->ps.pps->ctb_addr_ts_to_rs[ctb_addr_ts];
2594  x_ctb+=ctb_size;
2595 
2596  if(x_ctb >= s->ps.sps->width) {
2597  break;
2598  }
2599  }
2600  ff_thread_report_progress2(s->avctx, ctb_row ,thread, SHIFT_CTB_WPP);
2601 
2602  return 0;
2603 error:
2604  s->tab_slice_address[ctb_addr_rs] = -1;
2605  atomic_store(&s1->wpp_err, 1);
2606  ff_thread_report_progress2(s->avctx, ctb_row ,thread, SHIFT_CTB_WPP);
2607  return ret;
2608 }
2609 
2610 static int hls_slice_data_wpp(HEVCContext *s, const H2645NAL *nal)
2611 {
2612  const uint8_t *data = nal->data;
2613  int length = nal->size;
2614  HEVCLocalContext *lc = s->HEVClc;
2615  int *ret = av_malloc_array(s->sh.num_entry_point_offsets + 1, sizeof(int));
2616  int *arg = av_malloc_array(s->sh.num_entry_point_offsets + 1, sizeof(int));
2617  int64_t offset;
2618  int64_t startheader, cmpt = 0;
2619  int i, j, res = 0;
2620 
2621  if (!ret || !arg) {
2622  av_free(ret);
2623  av_free(arg);
2624  return AVERROR(ENOMEM);
2625  }
2626 
2627  if (s->sh.slice_ctb_addr_rs + s->sh.num_entry_point_offsets * s->ps.sps->ctb_width >= s->ps.sps->ctb_width * s->ps.sps->ctb_height) {
2628  av_log(s->avctx, AV_LOG_ERROR, "WPP ctb addresses are wrong (%d %d %d %d)\n",
2629  s->sh.slice_ctb_addr_rs, s->sh.num_entry_point_offsets,
2630  s->ps.sps->ctb_width, s->ps.sps->ctb_height
2631  );
2632  res = AVERROR_INVALIDDATA;
2633  goto error;
2634  }
2635 
2636  ff_alloc_entries(s->avctx, s->sh.num_entry_point_offsets + 1);
2637 
2638  for (i = 1; i < s->threads_number; i++) {
2639  if (s->sList[i] && s->HEVClcList[i])
2640  continue;
2641  av_freep(&s->sList[i]);
2642  av_freep(&s->HEVClcList[i]);
2643  s->sList[i] = av_malloc(sizeof(HEVCContext));
2644  s->HEVClcList[i] = av_mallocz(sizeof(HEVCLocalContext));
2645  if (!s->sList[i] || !s->HEVClcList[i]) {
2646  res = AVERROR(ENOMEM);
2647  goto error;
2648  }
2649  memcpy(s->sList[i], s, sizeof(HEVCContext));
2650  s->sList[i]->HEVClc = s->HEVClcList[i];
2651  }
2652 
2653  offset = (lc->gb.index >> 3);
2654 
2655  for (j = 0, cmpt = 0, startheader = offset + s->sh.entry_point_offset[0]; j < nal->skipped_bytes; j++) {
2656  if (nal->skipped_bytes_pos[j] >= offset && nal->skipped_bytes_pos[j] < startheader) {
2657  startheader--;
2658  cmpt++;
2659  }
2660  }
2661 
2662  for (i = 1; i < s->sh.num_entry_point_offsets; i++) {
2663  offset += (s->sh.entry_point_offset[i - 1] - cmpt);
2664  for (j = 0, cmpt = 0, startheader = offset
2665  + s->sh.entry_point_offset[i]; j < nal->skipped_bytes; j++) {
2666  if (nal->skipped_bytes_pos[j] >= offset && nal->skipped_bytes_pos[j] < startheader) {
2667  startheader--;
2668  cmpt++;
2669  }
2670  }
2671  s->sh.size[i - 1] = s->sh.entry_point_offset[i] - cmpt;
2672  s->sh.offset[i - 1] = offset;
2673 
2674  }
2675  if (s->sh.num_entry_point_offsets != 0) {
2676  offset += s->sh.entry_point_offset[s->sh.num_entry_point_offsets - 1] - cmpt;
2677  if (length < offset) {
2678  av_log(s->avctx, AV_LOG_ERROR, "entry_point_offset table is corrupted\n");
2679  res = AVERROR_INVALIDDATA;
2680  goto error;
2681  }
2682  s->sh.size[s->sh.num_entry_point_offsets - 1] = length - offset;
2683  s->sh.offset[s->sh.num_entry_point_offsets - 1] = offset;
2684 
2685  }
2686  s->data = data;
2687 
2688  for (i = 1; i < s->threads_number; i++) {
2689  s->sList[i]->HEVClc->first_qp_group = 1;
2690  s->sList[i]->HEVClc->qp_y = s->sList[0]->HEVClc->qp_y;
2691  memcpy(s->sList[i], s, sizeof(HEVCContext));
2692  s->sList[i]->HEVClc = s->HEVClcList[i];
2693  }
2694 
2695  atomic_store(&s->wpp_err, 0);
2696  ff_reset_entries(s->avctx);
2697 
2698  for (i = 0; i <= s->sh.num_entry_point_offsets; i++) {
2699  arg[i] = i;
2700  ret[i] = 0;
2701  }
2702 
2703  if (s->ps.pps->entropy_coding_sync_enabled_flag)
2704  s->avctx->execute2(s->avctx, hls_decode_entry_wpp, arg, ret, s->sh.num_entry_point_offsets + 1);
2705 
2706  for (i = 0; i <= s->sh.num_entry_point_offsets; i++)
2707  res += ret[i];
2708 error:
2709  av_free(ret);
2710  av_free(arg);
2711  return res;
2712 }
2713 
2715 {
2716  AVFrame *out = s->ref->frame;
2717 
2718  if (s->sei.frame_packing.present &&
2719  s->sei.frame_packing.arrangement_type >= 3 &&
2720  s->sei.frame_packing.arrangement_type <= 5 &&
2721  s->sei.frame_packing.content_interpretation_type > 0 &&
2722  s->sei.frame_packing.content_interpretation_type < 3) {
2724  if (!stereo)
2725  return AVERROR(ENOMEM);
2726 
2727  switch (s->sei.frame_packing.arrangement_type) {
2728  case 3:
2729  if (s->sei.frame_packing.quincunx_subsampling)
2731  else
2732  stereo->type = AV_STEREO3D_SIDEBYSIDE;
2733  break;
2734  case 4:
2735  stereo->type = AV_STEREO3D_TOPBOTTOM;
2736  break;
2737  case 5:
2738  stereo->type = AV_STEREO3D_FRAMESEQUENCE;
2739  break;
2740  }
2741 
2742  if (s->sei.frame_packing.content_interpretation_type == 2)
2743  stereo->flags = AV_STEREO3D_FLAG_INVERT;
2744 
2745  if (s->sei.frame_packing.arrangement_type == 5) {
2746  if (s->sei.frame_packing.current_frame_is_frame0_flag)
2747  stereo->view = AV_STEREO3D_VIEW_LEFT;
2748  else
2749  stereo->view = AV_STEREO3D_VIEW_RIGHT;
2750  }
2751  }
2752 
2753  if (s->sei.display_orientation.present &&
2754  (s->sei.display_orientation.anticlockwise_rotation ||
2755  s->sei.display_orientation.hflip || s->sei.display_orientation.vflip)) {
2756  double angle = s->sei.display_orientation.anticlockwise_rotation * 360 / (double) (1 << 16);
2759  sizeof(int32_t) * 9);
2760  if (!rotation)
2761  return AVERROR(ENOMEM);
2762 
2763  av_display_rotation_set((int32_t *)rotation->data, angle);
2764  av_display_matrix_flip((int32_t *)rotation->data,
2765  s->sei.display_orientation.hflip,
2766  s->sei.display_orientation.vflip);
2767  }
2768 
2769  // Decrement the mastering display flag when IRAP frame has no_rasl_output_flag=1
2770  // so the side data persists for the entire coded video sequence.
2771  if (s->sei.mastering_display.present > 0 &&
2772  IS_IRAP(s) && s->no_rasl_output_flag) {
2773  s->sei.mastering_display.present--;
2774  }
2775  if (s->sei.mastering_display.present) {
2776  // HEVC uses a g,b,r ordering, which we convert to a more natural r,g,b
2777  const int mapping[3] = {2, 0, 1};
2778  const int chroma_den = 50000;
2779  const int luma_den = 10000;
2780  int i;
2781  AVMasteringDisplayMetadata *metadata =
2783  if (!metadata)
2784  return AVERROR(ENOMEM);
2785 
2786  for (i = 0; i < 3; i++) {
2787  const int j = mapping[i];
2788  metadata->display_primaries[i][0].num = s->sei.mastering_display.display_primaries[j][0];
2789  metadata->display_primaries[i][0].den = chroma_den;
2790  metadata->display_primaries[i][1].num = s->sei.mastering_display.display_primaries[j][1];
2791  metadata->display_primaries[i][1].den = chroma_den;
2792  }
2793  metadata->white_point[0].num = s->sei.mastering_display.white_point[0];
2794  metadata->white_point[0].den = chroma_den;
2795  metadata->white_point[1].num = s->sei.mastering_display.white_point[1];
2796  metadata->white_point[1].den = chroma_den;
2797 
2798  metadata->max_luminance.num = s->sei.mastering_display.max_luminance;
2799  metadata->max_luminance.den = luma_den;
2800  metadata->min_luminance.num = s->sei.mastering_display.min_luminance;
2801  metadata->min_luminance.den = luma_den;
2802  metadata->has_luminance = 1;
2803  metadata->has_primaries = 1;
2804 
2805  av_log(s->avctx, AV_LOG_DEBUG, "Mastering Display Metadata:\n");
2806  av_log(s->avctx, AV_LOG_DEBUG,
2807  "r(%5.4f,%5.4f) g(%5.4f,%5.4f) b(%5.4f %5.4f) wp(%5.4f, %5.4f)\n",
2808  av_q2d(metadata->display_primaries[0][0]),
2809  av_q2d(metadata->display_primaries[0][1]),
2810  av_q2d(metadata->display_primaries[1][0]),
2811  av_q2d(metadata->display_primaries[1][1]),
2812  av_q2d(metadata->display_primaries[2][0]),
2813  av_q2d(metadata->display_primaries[2][1]),
2814  av_q2d(metadata->white_point[0]), av_q2d(metadata->white_point[1]));
2815  av_log(s->avctx, AV_LOG_DEBUG,
2816  "min_luminance=%f, max_luminance=%f\n",
2817  av_q2d(metadata->min_luminance), av_q2d(metadata->max_luminance));
2818  }
2819  // Decrement the mastering display flag when IRAP frame has no_rasl_output_flag=1
2820  // so the side data persists for the entire coded video sequence.
2821  if (s->sei.content_light.present > 0 &&
2822  IS_IRAP(s) && s->no_rasl_output_flag) {
2823  s->sei.content_light.present--;
2824  }
2825  if (s->sei.content_light.present) {
2826  AVContentLightMetadata *metadata =
2828  if (!metadata)
2829  return AVERROR(ENOMEM);
2830  metadata->MaxCLL = s->sei.content_light.max_content_light_level;
2831  metadata->MaxFALL = s->sei.content_light.max_pic_average_light_level;
2832 
2833  av_log(s->avctx, AV_LOG_DEBUG, "Content Light Level Metadata:\n");
2834  av_log(s->avctx, AV_LOG_DEBUG, "MaxCLL=%d, MaxFALL=%d\n",
2835  metadata->MaxCLL, metadata->MaxFALL);
2836  }
2837 
2838  if (s->sei.a53_caption.buf_ref) {
2839  HEVCSEIA53Caption *a53 = &s->sei.a53_caption;
2840 
2842  if (!sd)
2843  av_buffer_unref(&a53->buf_ref);
2844  a53->buf_ref = NULL;
2845  }
2846 
2847  for (int i = 0; i < s->sei.unregistered.nb_buf_ref; i++) {
2848  HEVCSEIUnregistered *unreg = &s->sei.unregistered;
2849 
2850  if (unreg->buf_ref[i]) {
2853  unreg->buf_ref[i]);
2854  if (!sd)
2855  av_buffer_unref(&unreg->buf_ref[i]);
2856  unreg->buf_ref[i] = NULL;
2857  }
2858  }
2859  s->sei.unregistered.nb_buf_ref = 0;
2860 
2861  if (s->sei.timecode.present) {
2862  uint32_t *tc_sd;
2863  char tcbuf[AV_TIMECODE_STR_SIZE];
2865  sizeof(uint32_t) * 4);
2866  if (!tcside)
2867  return AVERROR(ENOMEM);
2868 
2869  tc_sd = (uint32_t*)tcside->data;
2870  tc_sd[0] = s->sei.timecode.num_clock_ts;
2871 
2872  for (int i = 0; i < tc_sd[0]; i++) {
2873  int drop = s->sei.timecode.cnt_dropped_flag[i];
2874  int hh = s->sei.timecode.hours_value[i];
2875  int mm = s->sei.timecode.minutes_value[i];
2876  int ss = s->sei.timecode.seconds_value[i];
2877  int ff = s->sei.timecode.n_frames[i];
2878 
2879  tc_sd[i + 1] = av_timecode_get_smpte(s->avctx->framerate, drop, hh, mm, ss, ff);
2880  av_timecode_make_smpte_tc_string2(tcbuf, s->avctx->framerate, tc_sd[i + 1], 0, 0);
2881  av_dict_set(&out->metadata, "timecode", tcbuf, 0);
2882  }
2883 
2884  s->sei.timecode.num_clock_ts = 0;
2885  }
2886 
2887  if (s->sei.dynamic_hdr_plus.info) {
2888  AVBufferRef *info_ref = av_buffer_ref(s->sei.dynamic_hdr_plus.info);
2889  if (!info_ref)
2890  return AVERROR(ENOMEM);
2891 
2893  av_buffer_unref(&info_ref);
2894  return AVERROR(ENOMEM);
2895  }
2896  }
2897 
2898  return 0;
2899 }
2900 
2902 {
2903  HEVCLocalContext *lc = s->HEVClc;
2904  int pic_size_in_ctb = ((s->ps.sps->width >> s->ps.sps->log2_min_cb_size) + 1) *
2905  ((s->ps.sps->height >> s->ps.sps->log2_min_cb_size) + 1);
2906  int ret;
2907 
2908  memset(s->horizontal_bs, 0, s->bs_width * s->bs_height);
2909  memset(s->vertical_bs, 0, s->bs_width * s->bs_height);
2910  memset(s->cbf_luma, 0, s->ps.sps->min_tb_width * s->ps.sps->min_tb_height);
2911  memset(s->is_pcm, 0, (s->ps.sps->min_pu_width + 1) * (s->ps.sps->min_pu_height + 1));
2912  memset(s->tab_slice_address, -1, pic_size_in_ctb * sizeof(*s->tab_slice_address));
2913 
2914  s->is_decoded = 0;
2915  s->first_nal_type = s->nal_unit_type;
2916 
2917  s->no_rasl_output_flag = IS_IDR(s) || IS_BLA(s) || (s->nal_unit_type == HEVC_NAL_CRA_NUT && s->last_eos);
2918 
2919  if (s->ps.pps->tiles_enabled_flag)
2920  lc->end_of_tiles_x = s->ps.pps->column_width[0] << s->ps.sps->log2_ctb_size;
2921 
2922  ret = ff_hevc_set_new_ref(s, &s->frame, s->poc);
2923  if (ret < 0)
2924  goto fail;
2925 
2926  ret = ff_hevc_frame_rps(s);
2927  if (ret < 0) {
2928  av_log(s->avctx, AV_LOG_ERROR, "Error constructing the frame RPS.\n");
2929  goto fail;
2930  }
2931 
2932  s->ref->frame->key_frame = IS_IRAP(s);
2933 
2934  ret = set_side_data(s);
2935  if (ret < 0)
2936  goto fail;
2937 
2938  s->frame->pict_type = 3 - s->sh.slice_type;
2939 
2940  if (!IS_IRAP(s))
2942 
2943  av_frame_unref(s->output_frame);
2944  ret = ff_hevc_output_frame(s, s->output_frame, 0);
2945  if (ret < 0)
2946  goto fail;
2947 
2948  if (!s->avctx->hwaccel)
2949  ff_thread_finish_setup(s->avctx);
2950 
2951  return 0;
2952 
2953 fail:
2954  if (s->ref)
2955  ff_hevc_unref_frame(s, s->ref, ~0);
2956  s->ref = NULL;
2957  return ret;
2958 }
2959 
2960 static int decode_nal_unit(HEVCContext *s, const H2645NAL *nal)
2961 {
2962  HEVCLocalContext *lc = s->HEVClc;
2963  GetBitContext *gb = &lc->gb;
2964  int ctb_addr_ts, ret;
2965 
2966  *gb = nal->gb;
2967  s->nal_unit_type = nal->type;
2968  s->temporal_id = nal->temporal_id;
2969 
2970  switch (s->nal_unit_type) {
2971  case HEVC_NAL_VPS:
2972  if (s->avctx->hwaccel && s->avctx->hwaccel->decode_params) {
2973  ret = s->avctx->hwaccel->decode_params(s->avctx,
2974  nal->type,
2975  nal->raw_data,
2976  nal->raw_size);
2977  if (ret < 0)
2978  goto fail;
2979  }
2980  ret = ff_hevc_decode_nal_vps(gb, s->avctx, &s->ps);
2981  if (ret < 0)
2982  goto fail;
2983  break;
2984  case HEVC_NAL_SPS:
2985  if (s->avctx->hwaccel && s->avctx->hwaccel->decode_params) {
2986  ret = s->avctx->hwaccel->decode_params(s->avctx,
2987  nal->type,
2988  nal->raw_data,
2989  nal->raw_size);
2990  if (ret < 0)
2991  goto fail;
2992  }
2993  ret = ff_hevc_decode_nal_sps(gb, s->avctx, &s->ps,
2994  s->apply_defdispwin);
2995  if (ret < 0)
2996  goto fail;
2997  break;
2998  case HEVC_NAL_PPS:
2999  if (s->avctx->hwaccel && s->avctx->hwaccel->decode_params) {
3000  ret = s->avctx->hwaccel->decode_params(s->avctx,
3001  nal->type,
3002  nal->raw_data,
3003  nal->raw_size);
3004  if (ret < 0)
3005  goto fail;
3006  }
3007  ret = ff_hevc_decode_nal_pps(gb, s->avctx, &s->ps);
3008  if (ret < 0)
3009  goto fail;
3010  break;
3011  case HEVC_NAL_SEI_PREFIX:
3012  case HEVC_NAL_SEI_SUFFIX:
3013  if (s->avctx->hwaccel && s->avctx->hwaccel->decode_params) {
3014  ret = s->avctx->hwaccel->decode_params(s->avctx,
3015  nal->type,
3016  nal->raw_data,
3017  nal->raw_size);
3018  if (ret < 0)
3019  goto fail;
3020  }
3021  ret = ff_hevc_decode_nal_sei(gb, s->avctx, &s->sei, &s->ps, s->nal_unit_type);
3022  if (ret < 0)
3023  goto fail;
3024  break;
3025  case HEVC_NAL_TRAIL_R:
3026  case HEVC_NAL_TRAIL_N:
3027  case HEVC_NAL_TSA_N:
3028  case HEVC_NAL_TSA_R:
3029  case HEVC_NAL_STSA_N:
3030  case HEVC_NAL_STSA_R:
3031  case HEVC_NAL_BLA_W_LP:
3032  case HEVC_NAL_BLA_W_RADL:
3033  case HEVC_NAL_BLA_N_LP:
3034  case HEVC_NAL_IDR_W_RADL:
3035  case HEVC_NAL_IDR_N_LP:
3036  case HEVC_NAL_CRA_NUT:
3037  case HEVC_NAL_RADL_N:
3038  case HEVC_NAL_RADL_R:
3039  case HEVC_NAL_RASL_N:
3040  case HEVC_NAL_RASL_R:
3041  ret = hls_slice_header(s);
3042  if (ret < 0) {
3043  // hls_slice_header() does not cleanup on failure thus the state now is inconsistant so we cannot use it on depandant slices
3044  s->slice_initialized = 0;
3045  return ret;
3046  }
3047  if (ret == 1) {
3048  ret = AVERROR_INVALIDDATA;
3049  goto fail;
3050  }
3051 
3052 
3053  if (
3054  (s->avctx->skip_frame >= AVDISCARD_BIDIR && s->sh.slice_type == HEVC_SLICE_B) ||
3055  (s->avctx->skip_frame >= AVDISCARD_NONINTRA && s->sh.slice_type != HEVC_SLICE_I) ||
3056  (s->avctx->skip_frame >= AVDISCARD_NONKEY && !IS_IRAP(s))) {
3057  break;
3058  }
3059 
3060  if (s->sh.first_slice_in_pic_flag) {
3061  if (s->max_ra == INT_MAX) {
3062  if (s->nal_unit_type == HEVC_NAL_CRA_NUT || IS_BLA(s)) {
3063  s->max_ra = s->poc;
3064  } else {
3065  if (IS_IDR(s))
3066  s->max_ra = INT_MIN;
3067  }
3068  }
3069 
3070  if ((s->nal_unit_type == HEVC_NAL_RASL_R || s->nal_unit_type == HEVC_NAL_RASL_N) &&
3071  s->poc <= s->max_ra) {
3072  s->is_decoded = 0;
3073  break;
3074  } else {
3075  if (s->nal_unit_type == HEVC_NAL_RASL_R && s->poc > s->max_ra)
3076  s->max_ra = INT_MIN;
3077  }
3078 
3079  s->overlap ++;
3080  ret = hevc_frame_start(s);
3081  if (ret < 0)
3082  return ret;
3083  } else if (!s->ref) {
3084  av_log(s->avctx, AV_LOG_ERROR, "First slice in a frame missing.\n");
3085  goto fail;
3086  }
3087 
3088  if (s->nal_unit_type != s->first_nal_type) {
3089  av_log(s->avctx, AV_LOG_ERROR,
3090  "Non-matching NAL types of the VCL NALUs: %d %d\n",
3091  s->first_nal_type, s->nal_unit_type);
3092  return AVERROR_INVALIDDATA;
3093  }
3094 
3095  if (!s->sh.dependent_slice_segment_flag &&
3096  s->sh.slice_type != HEVC_SLICE_I) {
3097  ret = ff_hevc_slice_rpl(s);
3098  if (ret < 0) {
3099  av_log(s->avctx, AV_LOG_WARNING,
3100  "Error constructing the reference lists for the current slice.\n");
3101  goto fail;
3102  }
3103  }
3104 
3105  if (s->sh.first_slice_in_pic_flag && s->avctx->hwaccel) {
3106  ret = s->avctx->hwaccel->start_frame(s->avctx, NULL, 0);
3107  if (ret < 0)
3108  goto fail;
3109  }
3110 
3111  if (s->avctx->hwaccel) {
3112  ret = s->avctx->hwaccel->decode_slice(s->avctx, nal->raw_data, nal->raw_size);
3113  if (ret < 0)
3114  goto fail;
3115  } else {
3116  if (s->threads_number > 1 && s->sh.num_entry_point_offsets > 0)
3117  ctb_addr_ts = hls_slice_data_wpp(s, nal);
3118  else
3119  ctb_addr_ts = hls_slice_data(s);
3120  if (ctb_addr_ts >= (s->ps.sps->ctb_width * s->ps.sps->ctb_height)) {
3121  s->is_decoded = 1;
3122  }
3123 
3124  if (ctb_addr_ts < 0) {
3125  ret = ctb_addr_ts;
3126  goto fail;
3127  }
3128  }
3129  break;
3130  case HEVC_NAL_EOS_NUT:
3131  case HEVC_NAL_EOB_NUT:
3132  s->seq_decode = (s->seq_decode + 1) & 0xff;
3133  s->max_ra = INT_MAX;
3134  break;
3135  case HEVC_NAL_AUD:
3136  case HEVC_NAL_FD_NUT:
3137  break;
3138  default:
3139  av_log(s->avctx, AV_LOG_INFO,
3140  "Skipping NAL unit %d\n", s->nal_unit_type);
3141  }
3142 
3143  return 0;
3144 fail:
3145  if (s->avctx->err_recognition & AV_EF_EXPLODE)
3146  return ret;
3147  return 0;
3148 }
3149 
3150 static int decode_nal_units(HEVCContext *s, const uint8_t *buf, int length)
3151 {
3152  int i, ret = 0;
3153  int eos_at_start = 1;
3154 
3155  s->ref = NULL;
3156  s->last_eos = s->eos;
3157  s->eos = 0;
3158  s->overlap = 0;
3159 
3160  /* split the input packet into NAL units, so we know the upper bound on the
3161  * number of slices in the frame */
3162  ret = ff_h2645_packet_split(&s->pkt, buf, length, s->avctx, s->is_nalff,
3163  s->nal_length_size, s->avctx->codec_id, 1, 0);
3164  if (ret < 0) {
3165  av_log(s->avctx, AV_LOG_ERROR,
3166  "Error splitting the input into NAL units.\n");
3167  return ret;
3168  }
3169 
3170  for (i = 0; i < s->pkt.nb_nals; i++) {
3171  if (s->pkt.nals[i].type == HEVC_NAL_EOB_NUT ||
3172  s->pkt.nals[i].type == HEVC_NAL_EOS_NUT) {
3173  if (eos_at_start) {
3174  s->last_eos = 1;
3175  } else {
3176  s->eos = 1;
3177  }
3178  } else {
3179  eos_at_start = 0;
3180  }
3181  }
3182 
3183  /* decode the NAL units */
3184  for (i = 0; i < s->pkt.nb_nals; i++) {
3185  H2645NAL *nal = &s->pkt.nals[i];
3186 
3187  if (s->avctx->skip_frame >= AVDISCARD_ALL ||
3188  (s->avctx->skip_frame >= AVDISCARD_NONREF
3189  && ff_hevc_nal_is_nonref(nal->type)) || nal->nuh_layer_id > 0)
3190  continue;
3191 
3192  ret = decode_nal_unit(s, nal);
3193  if (ret >= 0 && s->overlap > 2)
3194  ret = AVERROR_INVALIDDATA;
3195  if (ret < 0) {
3196  av_log(s->avctx, AV_LOG_WARNING,
3197  "Error parsing NAL unit #%d.\n", i);
3198  goto fail;
3199  }
3200  }
3201 
3202 fail:
3203  if (s->ref && s->threads_type == FF_THREAD_FRAME)
3204  ff_thread_report_progress(&s->ref->tf, INT_MAX, 0);
3205 
3206  return ret;
3207 }
3208 
3209 static void print_md5(void *log_ctx, int level, uint8_t md5[16])
3210 {
3211  int i;
3212  for (i = 0; i < 16; i++)
3213  av_log(log_ctx, level, "%02"PRIx8, md5[i]);
3214 }
3215 
3217 {
3219  int pixel_shift;
3220  int i, j;
3221 
3222  if (!desc)
3223  return AVERROR(EINVAL);
3224 
3225  pixel_shift = desc->comp[0].depth > 8;
3226 
3227  av_log(s->avctx, AV_LOG_DEBUG, "Verifying checksum for frame with POC %d: ",
3228  s->poc);
3229 
3230  /* the checksums are LE, so we have to byteswap for >8bpp formats
3231  * on BE arches */
3232 #if HAVE_BIGENDIAN
3233  if (pixel_shift && !s->checksum_buf) {
3234  av_fast_malloc(&s->checksum_buf, &s->checksum_buf_size,
3235  FFMAX3(frame->linesize[0], frame->linesize[1],
3236  frame->linesize[2]));
3237  if (!s->checksum_buf)
3238  return AVERROR(ENOMEM);
3239  }
3240 #endif
3241 
3242  for (i = 0; frame->data[i]; i++) {
3243  int width = s->avctx->coded_width;
3244  int height = s->avctx->coded_height;
3245  int w = (i == 1 || i == 2) ? (width >> desc->log2_chroma_w) : width;
3246  int h = (i == 1 || i == 2) ? (height >> desc->log2_chroma_h) : height;
3247  uint8_t md5[16];
3248 
3249  av_md5_init(s->md5_ctx);
3250  for (j = 0; j < h; j++) {
3251  const uint8_t *src = frame->data[i] + j * frame->linesize[i];
3252 #if HAVE_BIGENDIAN
3253  if (pixel_shift) {
3254  s->bdsp.bswap16_buf((uint16_t *) s->checksum_buf,
3255  (const uint16_t *) src, w);
3256  src = s->checksum_buf;
3257  }
3258 #endif
3259  av_md5_update(s->md5_ctx, src, w << pixel_shift);
3260  }
3261  av_md5_final(s->md5_ctx, md5);
3262 
3263  if (!memcmp(md5, s->sei.picture_hash.md5[i], 16)) {
3264  av_log (s->avctx, AV_LOG_DEBUG, "plane %d - correct ", i);
3265  print_md5(s->avctx, AV_LOG_DEBUG, md5);
3266  av_log (s->avctx, AV_LOG_DEBUG, "; ");
3267  } else {
3268  av_log (s->avctx, AV_LOG_ERROR, "mismatching checksum of plane %d - ", i);
3269  print_md5(s->avctx, AV_LOG_ERROR, md5);
3270  av_log (s->avctx, AV_LOG_ERROR, " != ");
3271  print_md5(s->avctx, AV_LOG_ERROR, s->sei.picture_hash.md5[i]);
3272  av_log (s->avctx, AV_LOG_ERROR, "\n");
3273  return AVERROR_INVALIDDATA;
3274  }
3275  }
3276 
3277  av_log(s->avctx, AV_LOG_DEBUG, "\n");
3278 
3279  return 0;
3280 }
3281 
3282 static int hevc_decode_extradata(HEVCContext *s, uint8_t *buf, int length, int first)
3283 {
3284  int ret, i;
3285 
3286  ret = ff_hevc_decode_extradata(buf, length, &s->ps, &s->sei, &s->is_nalff,
3287  &s->nal_length_size, s->avctx->err_recognition,
3288  s->apply_defdispwin, s->avctx);
3289  if (ret < 0)
3290  return ret;
3291 
3292  /* export stream parameters from the first SPS */
3293  for (i = 0; i < FF_ARRAY_ELEMS(s->ps.sps_list); i++) {
3294  if (first && s->ps.sps_list[i]) {
3295  const HEVCSPS *sps = (const HEVCSPS*)s->ps.sps_list[i]->data;
3297  break;
3298  }
3299  }
3300 
3301  /* export stream parameters from SEI */
3303  if (ret < 0)
3304  return ret;
3305 
3306  return 0;
3307 }
3308 
3309 static int hevc_decode_frame(AVCodecContext *avctx, void *data, int *got_output,
3310  AVPacket *avpkt)
3311 {
3312  int ret;
3313  buffer_size_t new_extradata_size;
3314  uint8_t *new_extradata;
3315  HEVCContext *s = avctx->priv_data;
3316 
3317  if (!avpkt->size) {
3318  ret = ff_hevc_output_frame(s, data, 1);
3319  if (ret < 0)
3320  return ret;
3321 
3322  *got_output = ret;
3323  return 0;
3324  }
3325 
3326  new_extradata = av_packet_get_side_data(avpkt, AV_PKT_DATA_NEW_EXTRADATA,
3327  &new_extradata_size);
3328  if (new_extradata && new_extradata_size > 0) {
3329  ret = hevc_decode_extradata(s, new_extradata, new_extradata_size, 0);
3330  if (ret < 0)
3331  return ret;
3332  }
3333 
3334  s->ref = NULL;
3335  ret = decode_nal_units(s, avpkt->data, avpkt->size);
3336  if (ret < 0)
3337  return ret;
3338 
3339  if (avctx->hwaccel) {
3340  if (s->ref && (ret = avctx->hwaccel->end_frame(avctx)) < 0) {
3341  av_log(avctx, AV_LOG_ERROR,
3342  "hardware accelerator failed to decode picture\n");
3343  ff_hevc_unref_frame(s, s->ref, ~0);
3344  return ret;
3345  }
3346  } else {
3347  /* verify the SEI checksum */
3348  if (avctx->err_recognition & AV_EF_CRCCHECK && s->ref && s->is_decoded &&
3349  s->sei.picture_hash.is_md5) {
3350  ret = verify_md5(s, s->ref->frame);
3351  if (ret < 0 && avctx->err_recognition & AV_EF_EXPLODE) {
3352  ff_hevc_unref_frame(s, s->ref, ~0);
3353  return ret;
3354  }
3355  }
3356  }
3357  s->sei.picture_hash.is_md5 = 0;
3358 
3359  if (s->is_decoded) {
3360  av_log(avctx, AV_LOG_DEBUG, "Decoded frame with POC %d.\n", s->poc);
3361  s->is_decoded = 0;
3362  }
3363 
3364  if (s->output_frame->buf[0]) {
3365  av_frame_move_ref(data, s->output_frame);
3366  *got_output = 1;
3367  }
3368 
3369  return avpkt->size;
3370 }
3371 
3373 {
3374  int ret;
3375 
3376  ret = ff_thread_ref_frame(&dst->tf, &src->tf);
3377  if (ret < 0)
3378  return ret;
3379 
3380  dst->tab_mvf_buf = av_buffer_ref(src->tab_mvf_buf);
3381  if (!dst->tab_mvf_buf)
3382  goto fail;
3383  dst->tab_mvf = src->tab_mvf;
3384 
3385  dst->rpl_tab_buf = av_buffer_ref(src->rpl_tab_buf);
3386  if (!dst->rpl_tab_buf)
3387  goto fail;
3388  dst->rpl_tab = src->rpl_tab;
3389 
3390  dst->rpl_buf = av_buffer_ref(src->rpl_buf);
3391  if (!dst->rpl_buf)
3392  goto fail;
3393 
3394  dst->poc = src->poc;
3395  dst->ctb_count = src->ctb_count;
3396  dst->flags = src->flags;
3397  dst->sequence = src->sequence;
3398 
3399  if (src->hwaccel_picture_private) {
3400  dst->hwaccel_priv_buf = av_buffer_ref(src->hwaccel_priv_buf);
3401  if (!dst->hwaccel_priv_buf)
3402  goto fail;
3404  }
3405 
3406  return 0;
3407 fail:
3408  ff_hevc_unref_frame(s, dst, ~0);
3409  return AVERROR(ENOMEM);
3410 }
3411 
3413 {
3414  HEVCContext *s = avctx->priv_data;
3415  int i;
3416 
3417  pic_arrays_free(s);
3418 
3419  av_freep(&s->md5_ctx);
3420 
3421  av_freep(&s->cabac_state);
3422 
3423  for (i = 0; i < 3; i++) {
3424  av_freep(&s->sao_pixel_buffer_h[i]);
3425  av_freep(&s->sao_pixel_buffer_v[i]);
3426  }
3427  av_frame_free(&s->output_frame);
3428 
3429  for (i = 0; i < FF_ARRAY_ELEMS(s->DPB); i++) {
3430  ff_hevc_unref_frame(s, &s->DPB[i], ~0);
3431  av_frame_free(&s->DPB[i].frame);
3432  }
3433 
3434  ff_hevc_ps_uninit(&s->ps);
3435 
3436  av_freep(&s->sh.entry_point_offset);
3437  av_freep(&s->sh.offset);
3438  av_freep(&s->sh.size);
3439 
3440  if (s->HEVClcList && s->sList) {
3441  for (i = 1; i < s->threads_number; i++) {
3442  av_freep(&s->HEVClcList[i]);
3443  av_freep(&s->sList[i]);
3444  }
3445  }
3446  av_freep(&s->HEVClc);
3447  av_freep(&s->HEVClcList);
3448  av_freep(&s->sList);
3449 
3450  ff_h2645_packet_uninit(&s->pkt);
3451 
3452  ff_hevc_reset_sei(&s->sei);
3453 
3454  return 0;
3455 }
3456 
3458 {
3459  HEVCContext *s = avctx->priv_data;
3460  int i;
3461 
3462  s->avctx = avctx;
3463 
3464  s->HEVClc = av_mallocz(sizeof(HEVCLocalContext));
3465  s->HEVClcList = av_mallocz(sizeof(HEVCLocalContext*) * s->threads_number);
3466  s->sList = av_mallocz(sizeof(HEVCContext*) * s->threads_number);
3467  if (!s->HEVClc || !s->HEVClcList || !s->sList)
3468  goto fail;
3469  s->HEVClcList[0] = s->HEVClc;
3470  s->sList[0] = s;
3471 
3472  s->cabac_state = av_malloc(HEVC_CONTEXTS);
3473  if (!s->cabac_state)
3474  goto fail;
3475 
3476  s->output_frame = av_frame_alloc();
3477  if (!s->output_frame)
3478  goto fail;
3479 
3480  for (i = 0; i < FF_ARRAY_ELEMS(s->DPB); i++) {
3481  s->DPB[i].frame = av_frame_alloc();
3482  if (!s->DPB[i].frame)
3483  goto fail;
3484  s->DPB[i].tf.f = s->DPB[i].frame;
3485  }
3486 
3487  s->max_ra = INT_MAX;
3488 
3489  s->md5_ctx = av_md5_alloc();
3490  if (!s->md5_ctx)
3491  goto fail;
3492 
3493  ff_bswapdsp_init(&s->bdsp);
3494 
3495  s->context_initialized = 1;
3496  s->eos = 0;
3497 
3498  ff_hevc_reset_sei(&s->sei);
3499 
3500  return 0;
3501 
3502 fail:
3503  hevc_decode_free(avctx);
3504  return AVERROR(ENOMEM);
3505 }
3506 
3507 #if HAVE_THREADS
3508 static int hevc_update_thread_context(AVCodecContext *dst,
3509  const AVCodecContext *src)
3510 {
3511  HEVCContext *s = dst->priv_data;
3512  HEVCContext *s0 = src->priv_data;
3513  int i, ret;
3514 
3515  if (!s->context_initialized) {
3516  ret = hevc_init_context(dst);
3517  if (ret < 0)
3518  return ret;
3519  }
3520 
3521  for (i = 0; i < FF_ARRAY_ELEMS(s->DPB); i++) {
3522  ff_hevc_unref_frame(s, &s->DPB[i], ~0);
3523  if (s0->DPB[i].frame->buf[0]) {
3524  ret = hevc_ref_frame(s, &s->DPB[i], &s0->DPB[i]);
3525  if (ret < 0)
3526  return ret;
3527  }
3528  }
3529 
3530  if (s->ps.sps != s0->ps.sps)
3531  s->ps.sps = NULL;
3532  for (i = 0; i < FF_ARRAY_ELEMS(s->ps.vps_list); i++) {
3533  ret = av_buffer_replace(&s->ps.vps_list[i], s0->ps.vps_list[i]);
3534  if (ret < 0)
3535  return ret;
3536  }
3537 
3538  for (i = 0; i < FF_ARRAY_ELEMS(s->ps.sps_list); i++) {
3539  ret = av_buffer_replace(&s->ps.sps_list[i], s0->ps.sps_list[i]);
3540  if (ret < 0)
3541  return ret;
3542  }
3543 
3544  for (i = 0; i < FF_ARRAY_ELEMS(s->ps.pps_list); i++) {
3545  ret = av_buffer_replace(&s->ps.pps_list[i], s0->ps.pps_list[i]);
3546  if (ret < 0)
3547  return ret;
3548  }
3549 
3550  if (s->ps.sps != s0->ps.sps)
3551  if ((ret = set_sps(s, s0->ps.sps, src->pix_fmt)) < 0)
3552  return ret;
3553 
3554  s->seq_decode = s0->seq_decode;
3555  s->seq_output = s0->seq_output;
3556  s->pocTid0 = s0->pocTid0;
3557  s->max_ra = s0->max_ra;
3558  s->eos = s0->eos;
3559  s->no_rasl_output_flag = s0->no_rasl_output_flag;
3560 
3561  s->is_nalff = s0->is_nalff;
3562  s->nal_length_size = s0->nal_length_size;
3563 
3564  s->threads_number = s0->threads_number;
3565  s->threads_type = s0->threads_type;
3566 
3567  if (s0->eos) {
3568  s->seq_decode = (s->seq_decode + 1) & 0xff;
3569  s->max_ra = INT_MAX;
3570  }
3571 
3572  ret = av_buffer_replace(&s->sei.a53_caption.buf_ref, s0->sei.a53_caption.buf_ref);
3573  if (ret < 0)
3574  return ret;
3575 
3576  for (i = 0; i < s->sei.unregistered.nb_buf_ref; i++)
3577  av_buffer_unref(&s->sei.unregistered.buf_ref[i]);
3578  s->sei.unregistered.nb_buf_ref = 0;
3579 
3580  if (s0->sei.unregistered.nb_buf_ref) {
3581  ret = av_reallocp_array(&s->sei.unregistered.buf_ref,
3582  s0->sei.unregistered.nb_buf_ref,
3583  sizeof(*s->sei.unregistered.buf_ref));
3584  if (ret < 0)
3585  return ret;
3586 
3587  for (i = 0; i < s0->sei.unregistered.nb_buf_ref; i++) {
3588  s->sei.unregistered.buf_ref[i] = av_buffer_ref(s0->sei.unregistered.buf_ref[i]);
3589  if (!s->sei.unregistered.buf_ref[i])
3590  return AVERROR(ENOMEM);
3591  s->sei.unregistered.nb_buf_ref++;
3592  }
3593  }
3594 
3595  ret = av_buffer_replace(&s->sei.dynamic_hdr_plus.info, s0->sei.dynamic_hdr_plus.info);
3596  if (ret < 0)
3597  return ret;
3598 
3599  s->sei.frame_packing = s0->sei.frame_packing;
3600  s->sei.display_orientation = s0->sei.display_orientation;
3601  s->sei.mastering_display = s0->sei.mastering_display;
3602  s->sei.content_light = s0->sei.content_light;
3603  s->sei.alternative_transfer = s0->sei.alternative_transfer;
3604 
3606  if (ret < 0)
3607  return ret;
3608 
3609  return 0;
3610 }
3611 #endif
3612 
3614 {
3615  HEVCContext *s = avctx->priv_data;
3616  int ret;
3617 
3618  if(avctx->active_thread_type & FF_THREAD_SLICE)
3619  s->threads_number = avctx->thread_count;
3620  else
3621  s->threads_number = 1;
3622 
3623  if((avctx->active_thread_type & FF_THREAD_FRAME) && avctx->thread_count > 1)
3624  s->threads_type = FF_THREAD_FRAME;
3625  else
3626  s->threads_type = FF_THREAD_SLICE;
3627 
3628  ret = hevc_init_context(avctx);
3629  if (ret < 0)
3630  return ret;
3631 
3632  s->enable_parallel_tiles = 0;
3633  s->sei.picture_timing.picture_struct = 0;
3634  s->eos = 1;
3635 
3636  atomic_init(&s->wpp_err, 0);
3637 
3638  if (!avctx->internal->is_copy) {
3639  if (avctx->extradata_size > 0 && avctx->extradata) {
3640  ret = hevc_decode_extradata(s, avctx->extradata, avctx->extradata_size, 1);
3641  if (ret < 0) {
3642  return ret;
3643  }
3644  }
3645  }
3646 
3647  return 0;
3648 }
3649 
3651 {
3652  HEVCContext *s = avctx->priv_data;
3654  ff_hevc_reset_sei(&s->sei);
3655  s->max_ra = INT_MAX;
3656  s->eos = 1;
3657 }
3658 
3659 #define OFFSET(x) offsetof(HEVCContext, x)
3660 #define PAR (AV_OPT_FLAG_DECODING_PARAM | AV_OPT_FLAG_VIDEO_PARAM)
3661 
3662 static const AVOption options[] = {
3663  { "apply_defdispwin", "Apply default display window from VUI", OFFSET(apply_defdispwin),
3664  AV_OPT_TYPE_BOOL, {.i64 = 0}, 0, 1, PAR },
3665  { "strict-displaywin", "stricly apply default display window size", OFFSET(apply_defdispwin),
3666  AV_OPT_TYPE_BOOL, {.i64 = 0}, 0, 1, PAR },
3667  { NULL },
3668 };
3669 
3670 static const AVClass hevc_decoder_class = {
3671  .class_name = "HEVC decoder",
3672  .item_name = av_default_item_name,
3673  .option = options,
3674  .version = LIBAVUTIL_VERSION_INT,
3675 };
3676 
3678  .name = "hevc",
3679  .long_name = NULL_IF_CONFIG_SMALL("HEVC (High Efficiency Video Coding)"),
3680  .type = AVMEDIA_TYPE_VIDEO,
3681  .id = AV_CODEC_ID_HEVC,
3682  .priv_data_size = sizeof(HEVCContext),
3683  .priv_class = &hevc_decoder_class,
3685  .close = hevc_decode_free,
3688  .update_thread_context = ONLY_IF_THREADS_ENABLED(hevc_update_thread_context),
3689  .capabilities = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_DELAY |
3694  .hw_configs = (const AVCodecHWConfigInternal *const []) {
3695 #if CONFIG_HEVC_DXVA2_HWACCEL
3696  HWACCEL_DXVA2(hevc),
3697 #endif
3698 #if CONFIG_HEVC_D3D11VA_HWACCEL
3699  HWACCEL_D3D11VA(hevc),
3700 #endif
3701 #if CONFIG_HEVC_D3D11VA2_HWACCEL
3702  HWACCEL_D3D11VA2(hevc),
3703 #endif
3704 #if CONFIG_HEVC_NVDEC_HWACCEL
3705  HWACCEL_NVDEC(hevc),
3706 #endif
3707 #if CONFIG_HEVC_VAAPI_HWACCEL
3708  HWACCEL_VAAPI(hevc),
3709 #endif
3710 #if CONFIG_HEVC_VDPAU_HWACCEL
3711  HWACCEL_VDPAU(hevc),
3712 #endif
3713 #if CONFIG_HEVC_VIDEOTOOLBOX_HWACCEL
3714  HWACCEL_VIDEOTOOLBOX(hevc),
3715 #endif
3716  NULL
3717  },
3718 };
static void flush(AVCodecContext *avctx)
static double val(void *priv, double ch)
Definition: aeval.c:76
static char * split(char *message, char delim)
Definition: af_channelmap.c:81
Macro definitions for various function/variable attributes.
#define av_always_inline
Definition: attributes.h:45
#define av_cold
Definition: attributes.h:88
uint8_t
int32_t
#define FF_CODEC_PROPERTY_CLOSED_CAPTIONS
Definition: avcodec.h:2189
#define FF_THREAD_FRAME
Decode more than one frame at once.
Definition: avcodec.h:1788
#define AV_EF_CRCCHECK
Verify checksums embedded in the bitstream (could be of either encoded or decoded data,...
Definition: avcodec.h:1657
#define AV_EF_EXPLODE
abort decoding on minor error detection
Definition: avcodec.h:1660
#define FF_THREAD_SLICE
Decode more than one part of a single frame at once.
Definition: avcodec.h:1789
uint8_t * av_packet_get_side_data(const AVPacket *pkt, enum AVPacketSideDataType type, buffer_size_t *size)
Definition: avpacket.c:368
static av_cold int init(AVCodecContext *avctx)
Definition: avrndec.c:31
int ff_init_cabac_decoder(CABACContext *c, const uint8_t *buf, int buf_size)
Definition: cabac.c:165
Context Adaptive Binary Arithmetic Coder inline functions.
static av_unused const uint8_t * skip_bytes(CABACContext *c, int n)
Skip n bytes and reset the decoder.
static int FUNC() sps(CodedBitstreamContext *ctx, RWContext *rw, H264RawSPS *current)
static int FUNC() vps(CodedBitstreamContext *ctx, RWContext *rw, H265RawVPS *current)
#define ss(width, name, subs,...)
Definition: cbs_vp9.c:261
#define s(width, name)
Definition: cbs_vp9.c:257
#define fail()
Definition: checkasm.h:133
common internal and external API header
#define FFMAX3(a, b, c)
Definition: common.h:104
#define FFSWAP(type, a, b)
Definition: common.h:108
#define FFMIN(a, b)
Definition: common.h:105
#define av_mod_uintp2
Definition: common.h:149
#define av_clip
Definition: common.h:122
#define FFMAX(a, b)
Definition: common.h:103
#define av_clip_uintp2
Definition: common.h:146
#define av_ceil_log2
Definition: common.h:119
#define NULL
Definition: coverity.c:32
long long int64_t
Definition: coverity.c:34
static void decode(AVCodecContext *dec_ctx, AVPacket *pkt, AVFrame *frame, FILE *outfile)
Definition: decode_audio.c:71
static enum AVPixelFormat pix_fmt
static AVFrame * frame
Display matrix.
#define atomic_store(object, desired)
Definition: stdatomic.h:85
#define atomic_load(object)
Definition: stdatomic.h:93
#define atomic_init(obj, value)
Definition: stdatomic.h:33
@ HWACCEL_VIDEOTOOLBOX
Definition: ffmpeg.h:62
int
static unsigned int get_bits_long(GetBitContext *s, int n)
Read 0-32 bits.
Definition: get_bits.h:546
static int get_bits_left(GetBitContext *gb)
Definition: get_bits.h:849
static unsigned int get_bits1(GetBitContext *s)
Definition: get_bits.h:498
static void skip_bits(GetBitContext *s, int n)
Definition: get_bits.h:467
static int init_get_bits8(GetBitContext *s, const uint8_t *buffer, int byte_size)
Initialize GetBitContext.
Definition: get_bits.h:677
static unsigned int get_bits(GetBitContext *s, int n)
Read 1-25 bits.
Definition: get_bits.h:379
static av_always_inline int get_bitsz(GetBitContext *s, int n)
Read 0-25 bits.
Definition: get_bits.h:415
static int init_get_bits(GetBitContext *s, const uint8_t *buffer, int bit_size)
Initialize GetBitContext.
Definition: get_bits.h:659
exp golomb vlc stuff
static int get_se_golomb(GetBitContext *gb)
read signed exp golomb code.
Definition: golomb.h:241
static unsigned get_ue_golomb_long(GetBitContext *gb)
Read an unsigned Exp-Golomb code in the range 0 to UINT32_MAX-1.
Definition: golomb.h:106
@ AV_OPT_TYPE_BOOL
Definition: opt.h:242
#define AV_CODEC_CAP_DELAY
Encoder or decoder requires flushing with NULL input at the end in order to give the complete and cor...
Definition: codec.h:77
#define AV_CODEC_CAP_DR1
Codec uses get_buffer() or get_encode_buffer() for allocating buffers and supports custom allocators.
Definition: codec.h:52
#define AV_CODEC_CAP_SLICE_THREADS
Codec supports slice-based (or partition-based) multithreading.
Definition: codec.h:112
#define AV_CODEC_CAP_FRAME_THREADS
Codec supports frame-level multithreading.
Definition: codec.h:108
@ AV_CODEC_ID_HEVC
Definition: codec_id.h:223
@ AVDISCARD_ALL
discard all
Definition: avcodec.h:236
@ AVDISCARD_NONKEY
discard all frames except keyframes
Definition: avcodec.h:235
@ AVDISCARD_BIDIR
discard all bidirectional frames
Definition: avcodec.h:233
@ AVDISCARD_NONINTRA
discard all non intra frames
Definition: avcodec.h:234
@ AVDISCARD_NONREF
discard all non reference
Definition: avcodec.h:232
@ AV_PKT_DATA_NEW_EXTRADATA
The AV_PKT_DATA_NEW_EXTRADATA is used to notify the codec or the format that the extradata buffer was...
Definition: packet.h:55
void av_buffer_unref(AVBufferRef **buf)
Free a given reference and automatically free the buffer if there are no more references to it.
Definition: buffer.c:125
AVBufferRef * av_buffer_allocz(buffer_size_t size)
Same as av_buffer_alloc(), except the returned buffer will be initialized to zero.
Definition: buffer.c:83
int av_buffer_replace(AVBufferRef **pdst, AVBufferRef *src)
Ensure dst refers to the same data as src.
Definition: buffer.c:219
AVBufferRef * av_buffer_ref(AVBufferRef *buf)
Create a new reference to an AVBuffer.
Definition: buffer.c:93
AVBufferPool * av_buffer_pool_init(buffer_size_t size, AVBufferRef *(*alloc)(buffer_size_t size))
Allocate and initialize a buffer pool.
Definition: buffer.c:269
void av_buffer_pool_uninit(AVBufferPool **ppool)
Mark the pool as being available for freeing.
Definition: buffer.c:314
int av_dict_set(AVDictionary **pm, const char *key, const char *value, int flags)
Set the given entry in *pm, overwriting an existing entry.
Definition: dict.c:70
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
Definition: error.h:59
#define AVERROR(e)
Definition: error.h:43
void av_frame_unref(AVFrame *frame)
Unreference all the buffers referenced by frame and reset the frame fields.
Definition: frame.c:553
void av_frame_move_ref(AVFrame *dst, AVFrame *src)
Move everything contained in src to dst and reset src.
Definition: frame.c:582
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:203
AVFrameSideData * av_frame_new_side_data(AVFrame *frame, enum AVFrameSideDataType type, buffer_size_t size)
Add a new side data to a frame.
Definition: frame.c:726
AVFrameSideData * av_frame_new_side_data_from_buf(AVFrame *frame, enum AVFrameSideDataType type, AVBufferRef *buf)
Add a new side data to a frame from an existing AVBufferRef.
Definition: frame.c:694
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values.
Definition: frame.c:190
@ AV_FRAME_DATA_DISPLAYMATRIX
This side data contains a 3x3 transformation matrix describing an affine transformation that needs to...
Definition: frame.h:84
@ AV_FRAME_DATA_A53_CC
ATSC A53 Part 4 Closed Captions.
Definition: frame.h:58
@ AV_FRAME_DATA_SEI_UNREGISTERED
User data unregistered metadata associated with a video frame.
Definition: frame.h:194
@ AV_FRAME_DATA_DYNAMIC_HDR_PLUS
HDR dynamic metadata associated with a video frame.
Definition: frame.h:175
@ AV_FRAME_DATA_S12M_TIMECODE
Timecode which conforms to SMPTE ST 12-1.
Definition: frame.h:168
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
Definition: log.h:215
#define AV_LOG_WARNING
Something somehow does not look correct.
Definition: log.h:200
#define AV_LOG_INFO
Standard information.
Definition: log.h:205
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:194
const char * av_default_item_name(void *ptr)
Return the context name.
Definition: log.c:235
int av_reduce(int *dst_num, int *dst_den, int64_t num, int64_t den, int64_t max)
Reduce a fraction.
Definition: rational.c:35
static double av_q2d(AVRational a)
Convert an AVRational to a double.
Definition: rational.h:104
void av_md5_init(AVMD5 *ctx)
Initialize MD5 hashing.
Definition: md5.c:143
struct AVMD5 * av_md5_alloc(void)
Allocate an AVMD5 context.
Definition: md5.c:48
void av_md5_final(AVMD5 *ctx, uint8_t *dst)
Finish hashing and output digest value.
Definition: md5.c:192
void av_md5_update(AVMD5 *ctx, const uint8_t *src, int len)
Update hash value.
Definition: md5.c:154
void av_fast_malloc(void *ptr, unsigned int *size, size_t min_size)
Allocate a buffer, reusing the given one if large enough.
Definition: mem.c:502
void * av_mallocz(size_t size)
Allocate a memory block with alignment suitable for all memory accesses (including vectors if availab...
Definition: mem.c:237
int av_reallocp_array(void *ptr, size_t nmemb, size_t size)
Allocate, reallocate, or free an array through a pointer to a pointer.
Definition: mem.c:206
void * av_mallocz_array(size_t nmemb, size_t size)
Allocate a memory block for an array with av_mallocz().
Definition: mem.c:190
@ AVMEDIA_TYPE_VIDEO
Definition: avutil.h:201
#define LIBAVUTIL_VERSION_INT
Definition: version.h:85
void av_display_rotation_set(int32_t matrix[9], double angle)
Initialize a transformation matrix describing a pure counterclockwise rotation by the specified angle...
Definition: display.c:50
void av_display_matrix_flip(int32_t matrix[9], int hflip, int vflip)
Flip the input matrix horizontally and/or vertically.
Definition: display.c:65
#define AV_STEREO3D_FLAG_INVERT
Inverted views, Right/Bottom represents the left view.
Definition: stereo3d.h:167
AVStereo3D * av_stereo3d_create_side_data(AVFrame *frame)
Allocate a complete AVFrameSideData and add it to the frame.
Definition: stereo3d.c:33
@ AV_STEREO3D_TOPBOTTOM
Views are on top of each other.
Definition: stereo3d.h:79
@ AV_STEREO3D_SIDEBYSIDE_QUINCUNX
Views are next to each other, but when upscaling apply a checkerboard pattern.
Definition: stereo3d.h:117
@ AV_STEREO3D_FRAMESEQUENCE
Views are alternated temporally.
Definition: stereo3d.h:92
@ AV_STEREO3D_SIDEBYSIDE
Views are next to each other.
Definition: stereo3d.h:67
@ AV_STEREO3D_VIEW_RIGHT
Frame contains only the right view.
Definition: stereo3d.h:161
@ AV_STEREO3D_VIEW_LEFT
Frame contains only the left view.
Definition: stereo3d.h:156
int ff_h2645_packet_split(H2645Packet *pkt, const uint8_t *buf, int length, void *logctx, int is_nalff, int nal_length_size, enum AVCodecID codec_id, int small_padding, int use_ref)
Split an input packet into NAL units.
Definition: h2645_parse.c:392
void ff_h2645_packet_uninit(H2645Packet *pkt)
Free all the allocated memory in the packet.
Definition: h2645_parse.c:523
for(j=16;j >0;--j)
int ff_hevc_inter_pred_idc_decode(HEVCContext *s, int nPbW, int nPbH)
Definition: hevc_cabac.c:804
int ff_hevc_intra_chroma_pred_mode_decode(HEVCContext *s)
Definition: hevc_cabac.c:777
int ff_hevc_cu_transquant_bypass_flag_decode(HEVCContext *s)
Definition: hevc_cabac.c:620
int ff_hevc_sao_offset_abs_decode(HEVCContext *s)
Definition: hevc_cabac.c:593
int ff_hevc_split_coding_unit_flag_decode(HEVCContext *s, int ct_depth, int x0, int y0)
Definition: hevc_cabac.c:693
int ff_hevc_res_scale_sign_flag(HEVCContext *s, int idx)
Definition: hevc_cabac.c:912
int ff_hevc_rem_intra_luma_pred_mode_decode(HEVCContext *s)
Definition: hevc_cabac.c:767
int ff_hevc_no_residual_syntax_flag_decode(HEVCContext *s)
Definition: hevc_cabac.c:835
int ff_hevc_mvp_lx_flag_decode(HEVCContext *s)
Definition: hevc_cabac.c:830
int ff_hevc_pred_mode_decode(HEVCContext *s)
Definition: hevc_cabac.c:688
int ff_hevc_prev_intra_luma_pred_flag_decode(HEVCContext *s)
Definition: hevc_cabac.c:754
int ff_hevc_cu_qp_delta_abs(HEVCContext *s)
Definition: hevc_cabac.c:640
void ff_hevc_hls_residual_coding(HEVCContext *s, int x0, int y0, int log2_trafo_size, enum ScanType scan_idx, int c_idx)
Definition: hevc_cabac.c:1031
int ff_hevc_split_transform_flag_decode(HEVCContext *s, int log2_trafo_size)
Definition: hevc_cabac.c:873
int ff_hevc_cu_chroma_qp_offset_flag(HEVCContext *s)
Definition: hevc_cabac.c:672
int ff_hevc_log2_res_scale_abs(HEVCContext *s, int idx)
Definition: hevc_cabac.c:903
int ff_hevc_cbf_luma_decode(HEVCContext *s, int trafo_depth)
Definition: hevc_cabac.c:883
int ff_hevc_part_mode_decode(HEVCContext *s, int log2_cb_size)
Definition: hevc_cabac.c:712
int ff_hevc_cu_chroma_qp_offset_idx(HEVCContext *s)
Definition: hevc_cabac.c:677
void ff_hevc_save_states(HEVCContext *s, int ctb_addr_ts)
Definition: hevc_cabac.c:450
int ff_hevc_cabac_init(HEVCContext *s, int ctb_addr_ts, int thread)
Definition: hevc_cabac.c:511
int ff_hevc_sao_band_position_decode(HEVCContext *s)
Definition: hevc_cabac.c:583
int ff_hevc_ref_idx_lx_decode(HEVCContext *s, int num_ref_idx_lx)
Definition: hevc_cabac.c:814
int ff_hevc_skip_flag_decode(HEVCContext *s, int x0, int y0, int x_cb, int y_cb)
Definition: hevc_cabac.c:625
int ff_hevc_sao_type_idx_decode(HEVCContext *s)
Definition: hevc_cabac.c:573
int ff_hevc_sao_offset_sign_decode(HEVCContext *s)
Definition: hevc_cabac.c:603
int ff_hevc_cbf_cb_cr_decode(HEVCContext *s, int trafo_depth)
Definition: hevc_cabac.c:878
int ff_hevc_end_of_slice_flag_decode(HEVCContext *s)
Definition: hevc_cabac.c:615
int ff_hevc_sao_merge_flag_decode(HEVCContext *s)
Definition: hevc_cabac.c:568
int ff_hevc_merge_idx_decode(HEVCContext *s)
Definition: hevc_cabac.c:788
int ff_hevc_merge_flag_decode(HEVCContext *s)
Definition: hevc_cabac.c:799
int ff_hevc_sao_eo_class_decode(HEVCContext *s)
Definition: hevc_cabac.c:608
int ff_hevc_pcm_flag_decode(HEVCContext *s)
Definition: hevc_cabac.c:749
int ff_hevc_mpm_idx_decode(HEVCContext *s)
Definition: hevc_cabac.c:759
void ff_hevc_hls_mvd_coding(HEVCContext *s, int x0, int y0, int log2_cb_size)
Definition: hevc_cabac.c:1541
int ff_hevc_cu_qp_delta_sign_flag(HEVCContext *s)
Definition: hevc_cabac.c:667
void ff_hevc_deblocking_boundary_strengths(HEVCContext *s, int x0, int y0, int log2_trafo_size)
Definition: hevc_filter.c:725
void ff_hevc_set_qPy(HEVCContext *s, int xBase, int yBase, int log2_cb_size)
Definition: hevc_filter.c:121
void ff_hevc_hls_filters(HEVCContext *s, int x_ctb, int y_ctb, int ctb_size)
Definition: hevc_filter.c:889
void ff_hevc_hls_filter(HEVCContext *s, int x, int y, int ctb_size)
Definition: hevc_filter.c:853
void ff_hevc_set_neighbour_available(HEVCContext *s, int x0, int y0, int nPbW, int nPbH)
Definition: hevc_mvs.c:42
void ff_hevc_luma_mv_merge_mode(HEVCContext *s, int x0, int y0, int nPbW, int nPbH, int log2_cb_size, int part_idx, int merge_idx, MvField *mv)
Definition: hevc_mvs.c:479
void ff_hevc_luma_mv_mvp_mode(HEVCContext *s, int x0, int y0, int nPbW, int nPbH, int log2_cb_size, int part_idx, int merge_idx, MvField *mv, int mvp_lx_flag, int LX)
Definition: hevc_mvs.c:582
int ff_hevc_decode_extradata(const uint8_t *data, int size, HEVCParamSets *ps, HEVCSEI *sei, int *is_nalff, int *nal_length_size, int err_recognition, int apply_defdispwin, void *logctx)
Definition: hevc_parse.c:80
H.265 parser code.
int ff_hevc_decode_nal_pps(GetBitContext *gb, AVCodecContext *avctx, HEVCParamSets *ps)
Definition: hevc_ps.c:1499
void ff_hevc_ps_uninit(HEVCParamSets *ps)
Definition: hevc_ps.c:1747
int ff_hevc_decode_nal_vps(GetBitContext *gb, AVCodecContext *avctx, HEVCParamSets *ps)
Definition: hevc_ps.c:458
int ff_hevc_decode_short_term_rps(GetBitContext *gb, AVCodecContext *avctx, ShortTermRPS *rps, const HEVCSPS *sps, int is_slice_header)
Definition: hevc_ps.c:119
int ff_hevc_decode_nal_sps(GetBitContext *gb, AVCodecContext *avctx, HEVCParamSets *ps, int apply_defdispwin)
Definition: hevc_ps.c:1250
int ff_hevc_compute_poc(const HEVCSPS *sps, int pocTid0, int poc_lsb, int nal_unit_type)
Compute POC of the current frame and return it.
Definition: hevc_ps.c:1763
int ff_hevc_output_frame(HEVCContext *s, AVFrame *out, int flush)
Find next frame in output order and put a reference to it in frame.
Definition: hevc_refs.c:174
int ff_hevc_frame_nb_refs(const HEVCContext *s)
Get the number of candidate references for the current frame.
Definition: hevc_refs.c:503
void ff_hevc_flush_dpb(HEVCContext *s)
Drop all frames currently in DPB.
Definition: hevc_refs.c:75
int ff_hevc_set_new_ref(HEVCContext *s, AVFrame **frame, int poc)
Definition: hevc_refs.c:135
void ff_hevc_unref_frame(HEVCContext *s, HEVCFrame *frame, int flags)
Definition: hevc_refs.c:32
void ff_hevc_bump_frame(HEVCContext *s)
Definition: hevc_refs.c:233
int ff_hevc_frame_rps(HEVCContext *s)
Construct the reference picture sets for the current frame.
Definition: hevc_refs.c:443
void ff_hevc_clear_refs(HEVCContext *s)
Mark all frames in DPB as unused for reference.
Definition: hevc_refs.c:66
int ff_hevc_slice_rpl(HEVCContext *s)
Construct the reference picture list(s) for the current slice.
Definition: hevc_refs.c:291
int ff_hevc_decode_nal_sei(GetBitContext *gb, void *logctx, HEVCSEI *s, const HEVCParamSets *ps, int type)
Definition: hevc_sei.c:480
void ff_hevc_reset_sei(HEVCSEI *s)
Reset SEI values that are stored on the Context.
Definition: hevc_sei.c:493
#define BOUNDARY_UPPER_SLICE
Definition: hevcdec.h:461
#define EPEL_EXTRA_AFTER
Definition: hevcdec.h:64
#define IS_IDR(s)
Definition: hevcdec.h:78
#define L1
Definition: hevcdec.h:61
#define BOUNDARY_LEFT_TILE
Definition: hevcdec.h:460
@ MODE_INTRA
Definition: hevcdec.h:157
@ MODE_INTER
Definition: hevcdec.h:156
@ MODE_SKIP
Definition: hevcdec.h:158
@ PART_2Nx2N
Definition: hevcdec.h:145
@ PART_Nx2N
Definition: hevcdec.h:147
@ PART_NxN
Definition: hevcdec.h:148
@ PART_2NxnD
Definition: hevcdec.h:150
@ PART_nLx2N
Definition: hevcdec.h:151
@ PART_2NxN
Definition: hevcdec.h:146
@ PART_2NxnU
Definition: hevcdec.h:149
@ PART_nRx2N
Definition: hevcdec.h:152
@ PF_L1
Definition: hevcdec.h:170
@ PF_BI
Definition: hevcdec.h:171
@ PF_L0
Definition: hevcdec.h:169
@ PF_INTRA
Definition: hevcdec.h:168
@ INTRA_ANGULAR_26
Definition: hevcdec.h:201
@ INTRA_DC
Definition: hevcdec.h:176
@ INTRA_PLANAR
Definition: hevcdec.h:175
#define SHIFT_CTB_WPP
Definition: hevcdec.h:46
InterPredIdc
Definition: hevcdec.h:161
@ PRED_L0
Definition: hevcdec.h:162
@ PRED_BI
Definition: hevcdec.h:164
@ PRED_L1
Definition: hevcdec.h:163
@ SAO_BAND
Definition: hevcdec.h:214
@ SAO_NOT_APPLIED
Definition: hevcdec.h:213
@ SAO_EDGE
Definition: hevcdec.h:215
#define QPEL_EXTRA
Definition: hevcdec.h:68
#define HEVC_CONTEXTS
Definition: hevcdec.h:55
#define QPEL_EXTRA_BEFORE
Definition: hevcdec.h:66
static av_always_inline int ff_hevc_nal_is_nonref(enum HEVCNALUnitType type)
Definition: hevcdec.h:638
#define EPEL_EXTRA_BEFORE
Definition: hevcdec.h:63
#define EPEL_EXTRA
Definition: hevcdec.h:65
#define SAMPLE_CTB(tab, x, y)
Definition: hevcdec.h:76
#define L0
Definition: hevcdec.h:60
#define QPEL_EXTRA_AFTER
Definition: hevcdec.h:67
#define BOUNDARY_UPPER_TILE
Definition: hevcdec.h:462
@ SCAN_HORIZ
Definition: hevcdec.h:228
@ SCAN_DIAG
Definition: hevcdec.h:227
@ SCAN_VERT
Definition: hevcdec.h:229
#define EDGE_EMU_BUFFER_STRIDE
Definition: hevcdec.h:70
#define BOUNDARY_LEFT_SLICE
Definition: hevcdec.h:459
#define IS_IRAP(s)
Definition: hevcdec.h:81
#define IS_BLA(s)
Definition: hevcdec.h:79
void ff_hevc_dsp_init(HEVCDSPContext *hevcdsp, int bit_depth)
Definition: hevcdsp.c:126
void ff_hevc_pred_init(HEVCPredContext *hpc, int bit_depth)
Definition: hevcpred.c:43
#define HWACCEL_DXVA2(codec)
Definition: hwconfig.h:67
#define HWACCEL_VDPAU(codec)
Definition: hwconfig.h:75
#define HWACCEL_NVDEC(codec)
Definition: hwconfig.h:71
#define HWACCEL_VAAPI(codec)
Definition: hwconfig.h:73
#define HWACCEL_D3D11VA(codec)
Definition: hwconfig.h:79
#define HWACCEL_D3D11VA2(codec)
Definition: hwconfig.h:69
int i
Definition: input.c:407
#define AV_ZERO32(d)
Definition: intreadwrite.h:629
static const int8_t mv[256][2]
Definition: 4xm.c:78
av_cold void ff_bswapdsp_init(BswapDSPContext *c)
Definition: bswapdsp.c:49
@ HEVC_NAL_RASL_N
Definition: hevc.h:37
@ HEVC_NAL_CRA_NUT
Definition: hevc.h:50
@ HEVC_NAL_TRAIL_N
Definition: hevc.h:29
@ HEVC_NAL_BLA_W_LP
Definition: hevc.h:45
@ HEVC_NAL_BLA_N_LP
Definition: hevc.h:47
@ HEVC_NAL_BLA_W_RADL
Definition: hevc.h:46
@ HEVC_NAL_TSA_R
Definition: hevc.h:32
@ HEVC_NAL_AUD
Definition: hevc.h:64
@ HEVC_NAL_IDR_W_RADL
Definition: hevc.h:48
@ HEVC_NAL_IDR_N_LP
Definition: hevc.h:49
@ HEVC_NAL_SEI_SUFFIX
Definition: hevc.h:69
@ HEVC_NAL_TSA_N
Definition: hevc.h:31
@ HEVC_NAL_SEI_PREFIX
Definition: hevc.h:68
@ HEVC_NAL_EOS_NUT
Definition: hevc.h:65
@ HEVC_NAL_PPS
Definition: hevc.h:63
@ HEVC_NAL_VPS
Definition: hevc.h:61
@ HEVC_NAL_FD_NUT
Definition: hevc.h:67
@ HEVC_NAL_TRAIL_R
Definition: hevc.h:30
@ HEVC_NAL_STSA_N
Definition: hevc.h:33
@ HEVC_NAL_STSA_R
Definition: hevc.h:34
@ HEVC_NAL_EOB_NUT
Definition: hevc.h:66
@ HEVC_NAL_RASL_R
Definition: hevc.h:38
@ HEVC_NAL_RADL_R
Definition: hevc.h:36
@ HEVC_NAL_SPS
Definition: hevc.h:62
@ HEVC_NAL_RADL_N
Definition: hevc.h:35
@ HEVC_SLICE_P
Definition: hevc.h:97
@ HEVC_SLICE_I
Definition: hevc.h:98
@ HEVC_SLICE_B
Definition: hevc.h:96
@ HEVC_MAX_PPS_COUNT
Definition: hevc.h:114
@ HEVC_MAX_REFS
Definition: hevc.h:119
static av_cold int hevc_init_context(AVCodecContext *avctx)
Definition: hevcdec.c:3457
AVCodec ff_hevc_decoder
Definition: hevcdec.c:3677
static int hls_transform_unit(HEVCContext *s, int x0, int y0, int xBase, int yBase, int cb_xBase, int cb_yBase, int log2_cb_size, int log2_trafo_size, int blk_idx, int cbf_luma, int *cbf_cb, int *cbf_cr)
Definition: hevcdec.c:1080
static void set_deblocking_bypass(HEVCContext *s, int x0, int y0, int log2_cb_size)
Definition: hevcdec.c:1286
static void chroma_mc_bi(HEVCContext *s, uint8_t *dst0, ptrdiff_t dststride, AVFrame *ref0, AVFrame *ref1, int x_off, int y_off, int block_w, int block_h, struct MvField *current_mv, int cidx)
8.5.3.2.2.2 Chroma sample bidirectional interpolation process
Definition: hevcdec.c:1701
#define PAR
Definition: hevcdec.c:3660
static int decode_nal_units(HEVCContext *s, const uint8_t *buf, int length)
Definition: hevcdec.c:3150
static av_cold int hevc_decode_init(AVCodecContext *avctx)
Definition: hevcdec.c:3613
static void luma_mc_bi(HEVCContext *s, uint8_t *dst, ptrdiff_t dststride, AVFrame *ref0, const Mv *mv0, int x_off, int y_off, int block_w, int block_h, AVFrame *ref1, const Mv *mv1, struct MvField *current_mv)
8.5.3.2.2.1 Luma sample bidirectional interpolation process
Definition: hevcdec.c:1543
#define POS(c_idx, x, y)
static const AVClass hevc_decoder_class
Definition: hevcdec.c:3670
static const AVOption options[]
Definition: hevcdec.c:3662
static int hevc_frame_start(HEVCContext *s)
Definition: hevcdec.c:2901
static int hevc_ref_frame(HEVCContext *s, HEVCFrame *dst, HEVCFrame *src)
Definition: hevcdec.c:3372
static av_cold int hevc_decode_free(AVCodecContext *avctx)
Definition: hevcdec.c:3412
static int hevc_decode_frame(AVCodecContext *avctx, void *data, int *got_output, AVPacket *avpkt)
Definition: hevcdec.c:3309
static int pic_arrays_init(HEVCContext *s, const HEVCSPS *sps)
Definition: hevcdec.c:88
static int hls_slice_header(HEVCContext *s)
Definition: hevcdec.c:539
static void luma_mc_uni(HEVCContext *s, uint8_t *dst, ptrdiff_t dststride, AVFrame *ref, const Mv *mv, int x_off, int y_off, int block_w, int block_h, int luma_weight, int luma_offset)
8.5.3.2.2.1 Luma sample unidirectional interpolation process
Definition: hevcdec.c:1481
static int hevc_decode_extradata(HEVCContext *s, uint8_t *buf, int length, int first)
Definition: hevcdec.c:3282
static void pic_arrays_free(HEVCContext *s)
NOTE: Each function hls_foo correspond to the function foo in the specification (HLS stands for High ...
Definition: hevcdec.c:60
static int export_stream_params_from_sei(HEVCContext *s)
Definition: hevcdec.c:374
const uint8_t ff_hevc_pel_weight[65]
Definition: hevcdec.c:48
static int pred_weight_table(HEVCContext *s, GetBitContext *gb)
Definition: hevcdec.c:144
static int hls_pcm_sample(HEVCContext *s, int x0, int y0, int log2_cb_size)
Definition: hevcdec.c:1424
static void chroma_mc_uni(HEVCContext *s, uint8_t *dst0, ptrdiff_t dststride, uint8_t *src0, ptrdiff_t srcstride, int reflist, int x_off, int y_off, int block_w, int block_h, struct MvField *current_mv, int chroma_weight, int chroma_offset)
8.5.3.2.2.2 Chroma sample uniprediction interpolation process
Definition: hevcdec.c:1634
#define SET_SAO(elem, value)
Definition: hevcdec.c:978
static int hls_coding_unit(HEVCContext *s, int x0, int y0, int log2_cb_size)
Definition: hevcdec.c:2160
static int hls_coding_quadtree(HEVCContext *s, int x0, int y0, int log2_cb_size, int cb_depth)
Definition: hevcdec.c:2323
static int hls_decode_entry_wpp(AVCodecContext *avctxt, void *input_ctb_row, int job, int self_id)
Definition: hevcdec.c:2529
static int hls_decode_entry(AVCodecContext *avctxt, void *isFilterThread)
Definition: hevcdec.c:2457
static void hevc_luma_mv_mvp_mode(HEVCContext *s, int x0, int y0, int nPbW, int nPbH, int log2_cb_size, int part_idx, int merge_idx, MvField *mv)
Definition: hevcdec.c:1801
#define HWACCEL_MAX
static int verify_md5(HEVCContext *s, AVFrame *frame)
Definition: hevcdec.c:3216
static void print_md5(void *log_ctx, int level, uint8_t md5[16])
Definition: hevcdec.c:3209
static int set_sps(HEVCContext *s, const HEVCSPS *sps, enum AVPixelFormat pix_fmt)
Definition: hevcdec.c:476
static int luma_intra_pred_mode(HEVCContext *s, int x0, int y0, int pu_size, int prev_intra_luma_pred_flag)
8.4.1
Definition: hevcdec.c:1972
static int hls_slice_data(HEVCContext *s)
Definition: hevcdec.c:2518
static void intra_prediction_unit(HEVCContext *s, int x0, int y0, int log2_cb_size)
Definition: hevcdec.c:2069
#define CTB(tab, x, y)
Definition: hevcdec.c:976
static int set_side_data(HEVCContext *s)
Definition: hevcdec.c:2714
static void hls_prediction_unit(HEVCContext *s, int x0, int y0, int nPbW, int nPbH, int log2_cb_size, int partIdx, int idx)
Definition: hevcdec.c:1846
#define SUBDIVIDE(x, y, idx)
static av_always_inline void set_ct_depth(HEVCContext *s, int x0, int y0, int log2_cb_size, int ct_depth)
Definition: hevcdec.c:2052
static void hls_decode_neighbour(HEVCContext *s, int x_ctb, int y_ctb, int ctb_addr_ts)
Definition: hevcdec.c:2408
static int hls_cross_component_pred(HEVCContext *s, int idx)
Definition: hevcdec.c:1064
static void hevc_decode_flush(AVCodecContext *avctx)
Definition: hevcdec.c:3650
static void hls_sao_param(HEVCContext *s, int rx, int ry)
Definition: hevcdec.c:990
static int decode_nal_unit(HEVCContext *s, const H2645NAL *nal)
Definition: hevcdec.c:2960
#define OFFSET(x)
Definition: hevcdec.c:3659
static const uint8_t tab_mode_idx[]
Definition: hevcdec.c:2065
static int hls_slice_data_wpp(HEVCContext *s, const H2645NAL *nal)
Definition: hevcdec.c:2610
static int hls_transform_tree(HEVCContext *s, int x0, int y0, int xBase, int yBase, int cb_xBase, int cb_yBase, int log2_cb_size, int log2_trafo_size, int trafo_depth, int blk_idx, const int *base_cbf_cb, const int *base_cbf_cr)
Definition: hevcdec.c:1301
static int decode_lt_rps(HEVCContext *s, LongTermRPS *rps, GetBitContext *gb)
Definition: hevcdec.c:260
static void hevc_await_progress(HEVCContext *s, HEVCFrame *ref, const Mv *mv, int y0, int height)
Definition: hevcdec.c:1791
static void export_stream_params(HEVCContext *s, const HEVCSPS *sps)
Definition: hevcdec.c:317
static void intra_prediction_unit_default_value(HEVCContext *s, int x0, int y0, int log2_cb_size)
Definition: hevcdec.c:2137
static enum AVPixelFormat get_format(HEVCContext *s, const HEVCSPS *sps)
Definition: hevcdec.c:390
#define FF_CODEC_CAP_ALLOCATE_PROGRESS
Definition: internal.h:76
#define FF_CODEC_CAP_EXPORTS_CROPPING
The decoder sets the cropping fields in the output frames manually.
Definition: internal.h:67
int ff_set_sar(AVCodecContext *avctx, AVRational sar)
Check that the provided sample aspect ratio is valid and set it on the codec context.
Definition: utils.c:99
#define FF_CODEC_CAP_INIT_THREADSAFE
The codec does not modify any global variables in the init function, allowing to call the init functi...
Definition: internal.h:41
#define FF_CODEC_CAP_INIT_CLEANUP
The codec allows calling the close function for deallocation even if the init function returned a fai...
Definition: internal.h:49
const char * arg
Definition: jacosubdec.c:66
int ff_thread_ref_frame(ThreadFrame *dst, const ThreadFrame *src)
Definition: utils.c:943
av_cold void ff_videodsp_init(VideoDSPContext *ctx, int bpc)
Definition: videodsp.c:38
internal header for HEVC (de)muxer utilities
common internal API header
int buffer_size_t
Definition: internal.h:306
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification.
Definition: internal.h:117
#define ONLY_IF_THREADS_ENABLED(x)
Define a function with only the non-default version specified.
Definition: internal.h:156
Stereoscopic video.
static enum AVPixelFormat pix_fmts[]
Definition: libkvazaar.c:309
const char * desc
Definition: libsvtav1.c:79
uint8_t w
Definition: llviddspenc.c:39
int stride
Definition: mace.c:144
AVMasteringDisplayMetadata * av_mastering_display_metadata_create_side_data(AVFrame *frame)
Allocate a complete AVMasteringDisplayMetadata and add it to the frame.
AVContentLightMetadata * av_content_light_metadata_create_side_data(AVFrame *frame)
Allocate a complete AVContentLightMetadata and add it to the frame.
Public header for MD5 hash function implementation.
const char data[16]
Definition: mxf.c:142
AVOptions.
const char * av_color_transfer_name(enum AVColorTransferCharacteristic transfer)
Definition: pixdesc.c:2940
const AVPixFmtDescriptor * av_pix_fmt_desc_get(enum AVPixelFormat pix_fmt)
Definition: pixdesc.c:2573
#define AV_PIX_FMT_YUV444P12
Definition: pixfmt.h:406
@ AVCHROMA_LOC_LEFT
MPEG-2/4 4:2:0, H.264 default for 4:2:0.
Definition: pixfmt.h:607
@ AVCHROMA_LOC_UNSPECIFIED
Definition: pixfmt.h:606
#define AV_PIX_FMT_YUV420P10
Definition: pixfmt.h:399
@ AVCOL_RANGE_MPEG
Narrow or limited range content.
Definition: pixfmt.h:569
@ AVCOL_RANGE_JPEG
Full range content.
Definition: pixfmt.h:586
#define AV_PIX_FMT_YUV420P12
Definition: pixfmt.h:403
AVPixelFormat
Pixel format.
Definition: pixfmt.h:64
@ AV_PIX_FMT_NONE
Definition: pixfmt.h:65
@ AV_PIX_FMT_VIDEOTOOLBOX
hardware decoding through Videotoolbox
Definition: pixfmt.h:282
@ AV_PIX_FMT_YUV420P
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
Definition: pixfmt.h:66
@ AV_PIX_FMT_YUV422P
planar YUV 4:2:2, 16bpp, (1 Cr & Cb sample per 2x1 Y samples)
Definition: pixfmt.h:70
@ AV_PIX_FMT_DXVA2_VLD
HW decoding through DXVA2, Picture.data[3] contains a LPDIRECT3DSURFACE9 pointer.
Definition: pixfmt.h:137
@ AV_PIX_FMT_CUDA
HW acceleration through CUDA.
Definition: pixfmt.h:235
@ AV_PIX_FMT_YUV444P
planar YUV 4:4:4, 24bpp, (1 Cr & Cb sample per 1x1 Y samples)
Definition: pixfmt.h:71
@ AV_PIX_FMT_D3D11
Hardware surfaces for Direct3D11.
Definition: pixfmt.h:313
@ AV_PIX_FMT_D3D11VA_VLD
HW decoding through Direct3D11 via old API, Picture.data[3] contains a ID3D11VideoDecoderOutputView p...
Definition: pixfmt.h:229
@ AV_PIX_FMT_YUV422P10LE
planar YUV 4:2:2, 20bpp, (1 Cr & Cb sample per 2x1 Y samples), little-endian
Definition: pixfmt.h:161
@ AV_PIX_FMT_VAAPI
Definition: pixfmt.h:122
@ AV_PIX_FMT_YUVJ420P
planar YUV 4:2:0, 12bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV420P and setting col...
Definition: pixfmt.h:78
@ AV_PIX_FMT_VDPAU
HW acceleration through VDPAU, Picture.data[3] contains a VdpVideoSurface.
Definition: pixfmt.h:197
@ AVCOL_PRI_UNSPECIFIED
Definition: pixfmt.h:461
@ AVCOL_TRC_UNSPECIFIED
Definition: pixfmt.h:486
#define AV_PIX_FMT_YUV444P10
Definition: pixfmt.h:402
@ AVCOL_SPC_UNSPECIFIED
Definition: pixfmt.h:515
const AVProfile ff_hevc_profiles[]
Definition: profiles.c:83
void ff_thread_report_progress(ThreadFrame *f, int n, int field)
Notify later decoding threads when part of their reference picture is ready.
void ff_thread_await_progress(ThreadFrame *f, int n, int field)
Wait for earlier decoding threads to finish reference pictures.
void ff_thread_finish_setup(AVCodecContext *avctx)
If the codec defines update_thread_context(), call this when they are ready for the next thread to st...
FF_DISABLE_DEPRECATION_WARNINGS enum AVPixelFormat ff_thread_get_format(AVCodecContext *avctx, const enum AVPixelFormat *fmt)
Wrapper around get_format() for frame-multithreaded codecs.
void ff_reset_entries(AVCodecContext *avctx)
int ff_alloc_entries(AVCodecContext *avctx, int count)
void ff_thread_await_progress2(AVCodecContext *avctx, int field, int thread, int shift)
void ff_thread_report_progress2(AVCodecContext *avctx, int field, int thread, int n)
#define s1
Definition: regdef.h:38
#define s0
Definition: regdef.h:37
#define FF_ARRAY_ELEMS(a)
unsigned int pos
Definition: spdifenc.c:412
A reference to a data buffer.
Definition: buffer.h:84
uint8_t * data
The data buffer.
Definition: buffer.h:92
Describe the class of an AVClass context structure.
Definition: log.h:67
const char * class_name
The name of the class; usually it is the same name as the context structure type to which the AVClass...
Definition: log.h:72
main external API structure.
Definition: avcodec.h:536
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
Definition: avcodec.h:746
int width
picture width / height.
Definition: avcodec.h:709
enum AVColorRange color_range
MPEG vs JPEG YUV range.
Definition: avcodec.h:1171
enum AVColorPrimaries color_primaries
Chromaticity coordinates of the source primaries.
Definition: avcodec.h:1150
AVRational framerate
Definition: avcodec.h:2075
const struct AVHWAccel * hwaccel
Hardware accelerator in use.
Definition: avcodec.h:1684
int active_thread_type
Which multithreading methods are in use by the codec.
Definition: avcodec.h:1796
int has_b_frames
Size of the frame reordering buffer in the decoder.
Definition: avcodec.h:826
int level
level
Definition: avcodec.h:1988
int profile
profile
Definition: avcodec.h:1862
enum AVColorSpace colorspace
YUV colorspace type.
Definition: avcodec.h:1164
int thread_count
thread count is used to decide how many independent tasks should be passed to execute()
Definition: avcodec.h:1777
int coded_height
Definition: avcodec.h:724
enum AVColorTransferCharacteristic color_trc
Color Transfer Characteristic.
Definition: avcodec.h:1157
uint8_t * extradata
some codecs need / can use extradata like Huffman tables.
Definition: avcodec.h:637
enum AVChromaLocation chroma_sample_location
This defines the location of chroma samples.
Definition: avcodec.h:1178
int extradata_size
Definition: avcodec.h:638
int coded_width
Bitstream width / height, may be different from width/height e.g.
Definition: avcodec.h:724
struct AVCodecInternal * internal
Private context used for internal data.
Definition: avcodec.h:571
void * priv_data
Definition: avcodec.h:563
int err_recognition
Error recognition; may misdetect some more or less valid parts as errors.
Definition: avcodec.h:1649
int is_copy
Whether the parent AVCodecContext is a copy of the context which had init() called on it.
Definition: internal.h:136
AVCodec.
Definition: codec.h:197
const char * name
Name of the codec implementation.
Definition: codec.h:204
Content light level needed by to transmit HDR over HDMI (CTA-861.3).
unsigned MaxFALL
Max average light level per frame (cd/m^2).
unsigned MaxCLL
Max content light level (cd/m^2).
Structure to hold side data for an AVFrame.
Definition: frame.h:220
uint8_t * data
Definition: frame.h:222
This structure describes decoded (raw) audio or video data.
Definition: frame.h:318
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:332
int linesize[AV_NUM_DATA_POINTERS]
For video, size in bytes of each picture line.
Definition: frame.h:349
int format
format of the frame, -1 if unknown or unset Values correspond to enum AVPixelFormat for video frames,...
Definition: frame.h:391
int(* end_frame)(AVCodecContext *avctx)
Called at the end of each frame or field picture.
Definition: avcodec.h:2543
Mastering display metadata capable of representing the color volume of the display used to master the...
int has_primaries
Flag indicating whether the display primaries (and white point) are set.
AVRational max_luminance
Max luminance of mastering display (cd/m^2).
AVRational min_luminance
Min luminance of mastering display (cd/m^2).
AVRational display_primaries[3][2]
CIE 1931 xy chromaticity coords of color primaries (r, g, b order).
AVRational white_point[2]
CIE 1931 xy chromaticity coords of white point.
int has_luminance
Flag indicating whether the luminance (min_ and max_) have been set.
AVOption.
Definition: opt.h:248
This structure stores compressed data.
Definition: packet.h:346
int size
Definition: packet.h:370
uint8_t * data
Definition: packet.h:369
Descriptor that unambiguously describes how the bits of a pixel are stored in the up to 4 data planes...
Definition: pixdesc.h:81
int num
Numerator.
Definition: rational.h:59
int den
Denominator.
Definition: rational.h:60
Stereo 3D type: this structure describes how two videos are packed within a single video surface,...
Definition: stereo3d.h:176
enum AVStereo3DType type
How views are packed within the video.
Definition: stereo3d.h:180
int flags
Additional information about the frame packing.
Definition: stereo3d.h:185
enum AVStereo3DView view
Determines which views are packed.
Definition: stereo3d.h:190
enum PredMode pred_mode
PredMode.
Definition: hevcdec.h:331
uint8_t intra_split_flag
IntraSplitFlag.
Definition: hevcdec.h:335
uint8_t max_trafo_depth
MaxTrafoDepth.
Definition: hevcdec.h:336
enum PartMode part_mode
PartMode.
Definition: hevcdec.h:332
uint8_t cu_transquant_bypass_flag
Definition: hevcdec.h:337
const uint8_t * data
Definition: h2645_parse.h:36
int * skipped_bytes_pos
Definition: h2645_parse.h:66
int raw_size
Definition: h2645_parse.h:44
int size
Definition: h2645_parse.h:35
int nuh_layer_id
Definition: h2645_parse.h:62
int type
NAL unit type.
Definition: h2645_parse.h:52
GetBitContext gb
Definition: h2645_parse.h:47
int temporal_id
HEVC only, nuh_temporal_id_plus_1 - 1.
Definition: h2645_parse.h:57
const uint8_t * raw_data
Definition: h2645_parse.h:45
AVBufferRef * rpl_buf
Definition: hevcdec.h:408
RefPicListTab ** rpl_tab
Definition: hevcdec.h:401
void * hwaccel_picture_private
Definition: hevcdec.h:411
uint8_t flags
A combination of HEVC_FRAME_FLAG_*.
Definition: hevcdec.h:422
int poc
Definition: hevcdec.h:403
uint16_t sequence
A sequence counter, so that old frames are output first after a POC reset.
Definition: hevcdec.h:417
AVBufferRef * rpl_tab_buf
Definition: hevcdec.h:407
MvField * tab_mvf
Definition: hevcdec.h:399
AVBufferRef * tab_mvf_buf
Definition: hevcdec.h:406
int ctb_count
Definition: hevcdec.h:402
AVFrame * frame
Definition: hevcdec.h:397
ThreadFrame tf
Definition: hevcdec.h:398
AVBufferRef * hwaccel_priv_buf
Definition: hevcdec.h:410
GetBitContext gb
Definition: hevcdec.h:432
uint8_t ctb_left_flag
Definition: hevcdec.h:442
PredictionUnit pu
Definition: hevcdec.h:456
uint8_t ctb_up_left_flag
Definition: hevcdec.h:445
uint8_t edge_emu_buffer[(MAX_PB_SIZE+7) *EDGE_EMU_BUFFER_STRIDE *2]
Definition: hevcdec.h:449
TransformUnit tu
Definition: hevcdec.h:440
int end_of_tiles_x
Definition: hevcdec.h:446
int16_t tmp[MAX_PB_SIZE *MAX_PB_SIZE]
Definition: hevcdec.h:452
uint8_t ctb_up_flag
Definition: hevcdec.h:443
CodingUnit cu
Definition: hevcdec.h:455
uint8_t first_qp_group
Definition: hevcdec.h:430
uint8_t ctb_up_right_flag
Definition: hevcdec.h:444
int boundary_flags
Definition: hevcdec.h:465
uint8_t edge_emu_buffer2[(MAX_PB_SIZE+7) *EDGE_EMU_BUFFER_STRIDE *2]
Definition: hevcdec.h:451
int end_of_tiles_y
Definition: hevcdec.h:447
CABACContext cc
Definition: hevcdec.h:433
AVBufferRef * vps_list[HEVC_MAX_VPS_COUNT]
Definition: hevc_ps.h:328
AVBufferRef * buf_ref
Definition: hevc_sei.h:59
AVBufferRef ** buf_ref
Definition: hevc_sei.h:63
uint8_t used[32]
Definition: hevcdec.h:235
uint8_t nb_refs
Definition: hevcdec.h:236
int poc[32]
Definition: hevcdec.h:233
uint8_t poc_msb_present[32]
Definition: hevcdec.h:234
int8_t pred_flag
Definition: hevcdec.h:348
Mv mv[2]
Definition: hevcdec.h:346
int8_t ref_idx[2]
Definition: hevcdec.h:347
Definition: hevcdec.h:340
int16_t x
horizontal component of motion vector
Definition: hevcdec.h:341
int16_t y
vertical component of motion vector
Definition: hevcdec.h:342
uint8_t intra_pred_mode_c[4]
Definition: hevcdec.h:366
uint8_t intra_pred_mode[4]
Definition: hevcdec.h:363
int rem_intra_luma_pred_mode
Definition: hevcdec.h:362
uint8_t merge_flag
Definition: hevcdec.h:365
uint8_t chroma_mode_c[4]
Definition: hevcdec.h:367
struct HEVCFrame * ref[HEVC_MAX_REFS]
Definition: hevcdec.h:240
int offset_abs[3][4]
sao_offset_abs
Definition: hevcdsp.h:35
int eo_class[3]
sao_eo_class
Definition: hevcdsp.h:40
uint8_t type_idx[3]
sao_type_idx
Definition: hevcdsp.h:44
int16_t offset_val[3][5]
SaoOffsetVal.
Definition: hevcdsp.h:42
int offset_sign[3][4]
sao_offset_sign
Definition: hevcdsp.h:36
uint8_t cu_chroma_qp_offset_enabled_flag
Definition: hevcdec.h:296
const ShortTermRPS * short_term_rps
Definition: hevcdec.h:271
unsigned int slice_addr
Definition: hevcdec.h:256
uint8_t disable_deblocking_filter_flag
slice_header_disable_deblocking_filter_flag
Definition: hevcdec.h:286
uint8_t mvd_l1_zero_flag
Definition: hevcdec.h:283
uint8_t slice_temporal_mvp_enabled_flag
Definition: hevcdec.h:278
int tc_offset
tc_offset_div2 * 2
Definition: hevcdec.h:299
int8_t slice_qp
Definition: hevcdec.h:308
uint8_t no_output_of_prior_pics_flag
Definition: hevcdec.h:277
unsigned int pps_id
address (in raster order) of the first block in the current slice segment
Definition: hevcdec.h:251
uint8_t colour_plane_id
RPS coded in the slice header itself is stored here.
Definition: hevcdec.h:265
int short_term_ref_pic_set_sps_flag
Definition: hevcdec.h:268
unsigned int max_num_merge_cand
5 - 5_minus_max_num_merge_cand
Definition: hevcdec.h:301
int slice_cb_qp_offset
Definition: hevcdec.h:293
enum HEVCSliceType slice_type
Definition: hevcdec.h:258
int * size
Definition: hevcdec.h:305
unsigned int list_entry_lx[2][32]
Definition: hevcdec.h:274
unsigned * entry_point_offset
Definition: hevcdec.h:303
unsigned int slice_segment_addr
address (in raster order) of the first block in the current slice
Definition: hevcdec.h:254
int slice_ctb_addr_rs
Definition: hevcdec.h:324
unsigned int nb_refs[2]
Definition: hevcdec.h:280
int slice_qp_delta
Definition: hevcdec.h:292
uint8_t cabac_init_flag
Definition: hevcdec.h:285
uint8_t dependent_slice_segment_flag
Definition: hevcdec.h:263
int * offset
Definition: hevcdec.h:304
ShortTermRPS slice_rps
Definition: hevcdec.h:270
uint8_t rpl_modification_flag[2]
Definition: hevcdec.h:276
int short_term_ref_pic_set_size
Definition: hevcdec.h:269
uint8_t first_slice_in_pic_flag
Definition: hevcdec.h:262
LongTermRPS long_term_rps
Definition: hevcdec.h:273
int num_entry_point_offsets
Definition: hevcdec.h:306
uint8_t slice_sample_adaptive_offset_flag[3]
Definition: hevcdec.h:282
unsigned int collocated_ref_idx
Definition: hevcdec.h:290
int beta_offset
beta_offset_div2 * 2
Definition: hevcdec.h:298
uint8_t slice_loop_filter_across_slices_enabled_flag
Definition: hevcdec.h:287
uint8_t collocated_list
Definition: hevcdec.h:288
uint8_t pic_output_flag
Definition: hevcdec.h:264
int slice_cr_qp_offset
Definition: hevcdec.h:294
int long_term_ref_pic_set_size
Definition: hevcdec.h:272
int pic_order_cnt_lsb
Definition: hevcdec.h:260
uint8_t is_cu_chroma_qp_offset_coded
Definition: hevcdec.h:380
uint8_t cross_pf
Definition: hevcdec.h:383
int cu_qp_delta
Definition: hevcdec.h:371
int8_t cu_qp_offset_cr
Definition: hevcdec.h:382
int intra_pred_mode_c
Definition: hevcdec.h:377
int8_t cu_qp_offset_cb
Definition: hevcdec.h:381
int chroma_mode_c
Definition: hevcdec.h:378
uint8_t is_cu_qp_delta_coded
Definition: hevcdec.h:379
int res_scale_val
Definition: hevcdec.h:373
int intra_pred_mode
Definition: hevcdec.h:376
uint8_t level
Definition: svq3.c:206
#define av_free(p)
#define av_malloc_array(a, b)
#define av_freep(p)
#define av_malloc(s)
#define av_log(a,...)
static void error(const char *err)
#define src1
Definition: h264pred.c:140
#define src0
Definition: h264pred.c:139
#define src
Definition: vp8dsp.c:255
static int ref[MAX_W *MAX_W]
Definition: jpeg2000dwt.c:107
FILE * out
Definition: movenc.c:54
struct AVMD5 * md5
Definition: movenc.c:56
#define height
#define width
uint32_t av_timecode_get_smpte(AVRational rate, int drop, int hh, int mm, int ss, int ff)
Convert sei info to SMPTE 12M binary representation.
Definition: timecode.c:68
char * av_timecode_make_smpte_tc_string2(char *buf, AVRational rate, uint32_t tcsmpte, int prevent_df, int skip_field)
Get the timecode string from the SMPTE timecode format.
Definition: timecode.c:136
Timecode helpers header.
#define AV_TIMECODE_STR_SIZE
Definition: timecode.h:33
int size
if(ret< 0)
Definition: vf_mcdeint.c:282
static const uint8_t offset[127][2]
Definition: vf_spp.c:107
float delta