48 const uint8_t ff_hevc_pel_weight[65] = { [2] = 0, [4] = 1, [6] = 2, [8] = 3, [12] = 4, [16] = 5, [24] = 6, [32] = 7, [48] = 8, [64] = 9 };
90 int log2_min_cb_size =
sps->log2_min_cb_size;
93 int pic_size_in_ctb = ((
width >> log2_min_cb_size) + 1) *
94 ((
height >> log2_min_cb_size) + 1);
95 int ctb_count =
sps->ctb_width *
sps->ctb_height;
96 int min_pu_size =
sps->min_pu_width *
sps->min_pu_height;
98 s->bs_width = (
width >> 2) + 1;
99 s->bs_height = (
height >> 2) + 1;
103 if (!
s->sao || !
s->deblock)
108 if (!
s->skip_flag || !
s->tab_ct_depth)
114 if (!
s->tab_ipm || !
s->cbf_luma || !
s->is_pcm)
119 sizeof(*
s->tab_slice_address));
121 sizeof(*
s->qp_y_tab));
122 if (!
s->qp_y_tab || !
s->filter_slice_edges || !
s->tab_slice_address)
127 if (!
s->horizontal_bs || !
s->vertical_bs)
134 if (!
s->tab_mvf_pool || !
s->rpl_tab_pool)
148 uint8_t luma_weight_l0_flag[16];
149 uint8_t chroma_weight_l0_flag[16];
150 uint8_t luma_weight_l1_flag[16];
151 uint8_t chroma_weight_l1_flag[16];
152 int luma_log2_weight_denom;
155 if (luma_log2_weight_denom < 0 || luma_log2_weight_denom > 7) {
156 av_log(
s->avctx,
AV_LOG_ERROR,
"luma_log2_weight_denom %d is invalid\n", luma_log2_weight_denom);
159 s->sh.luma_log2_weight_denom =
av_clip_uintp2(luma_log2_weight_denom, 3);
160 if (
s->ps.sps->chroma_format_idc != 0) {
162 if (chroma_log2_weight_denom < 0 || chroma_log2_weight_denom > 7) {
163 av_log(
s->avctx,
AV_LOG_ERROR,
"chroma_log2_weight_denom %"PRId64
" is invalid\n", chroma_log2_weight_denom);
166 s->sh.chroma_log2_weight_denom = chroma_log2_weight_denom;
169 for (
i = 0;
i <
s->sh.nb_refs[
L0];
i++) {
171 if (!luma_weight_l0_flag[
i]) {
172 s->sh.luma_weight_l0[
i] = 1 <<
s->sh.luma_log2_weight_denom;
173 s->sh.luma_offset_l0[
i] = 0;
176 if (
s->ps.sps->chroma_format_idc != 0) {
177 for (
i = 0;
i <
s->sh.nb_refs[
L0];
i++)
180 for (
i = 0;
i <
s->sh.nb_refs[
L0];
i++)
181 chroma_weight_l0_flag[
i] = 0;
183 for (
i = 0;
i <
s->sh.nb_refs[
L0];
i++) {
184 if (luma_weight_l0_flag[
i]) {
186 if ((int8_t)delta_luma_weight_l0 != delta_luma_weight_l0)
188 s->sh.luma_weight_l0[
i] = (1 <<
s->sh.luma_log2_weight_denom) + delta_luma_weight_l0;
191 if (chroma_weight_l0_flag[
i]) {
192 for (j = 0; j < 2; j++) {
196 if ( (int8_t)delta_chroma_weight_l0 != delta_chroma_weight_l0
197 || delta_chroma_offset_l0 < -(1<<17) || delta_chroma_offset_l0 > (1<<17)) {
201 s->sh.chroma_weight_l0[
i][j] = (1 <<
s->sh.chroma_log2_weight_denom) + delta_chroma_weight_l0;
202 s->sh.chroma_offset_l0[
i][j] =
av_clip((delta_chroma_offset_l0 - ((128 *
s->sh.chroma_weight_l0[
i][j])
203 >>
s->sh.chroma_log2_weight_denom) + 128), -128, 127);
206 s->sh.chroma_weight_l0[
i][0] = 1 <<
s->sh.chroma_log2_weight_denom;
207 s->sh.chroma_offset_l0[
i][0] = 0;
208 s->sh.chroma_weight_l0[
i][1] = 1 <<
s->sh.chroma_log2_weight_denom;
209 s->sh.chroma_offset_l0[
i][1] = 0;
213 for (
i = 0;
i <
s->sh.nb_refs[
L1];
i++) {
215 if (!luma_weight_l1_flag[
i]) {
216 s->sh.luma_weight_l1[
i] = 1 <<
s->sh.luma_log2_weight_denom;
217 s->sh.luma_offset_l1[
i] = 0;
220 if (
s->ps.sps->chroma_format_idc != 0) {
221 for (
i = 0;
i <
s->sh.nb_refs[
L1];
i++)
224 for (
i = 0;
i <
s->sh.nb_refs[
L1];
i++)
225 chroma_weight_l1_flag[
i] = 0;
227 for (
i = 0;
i <
s->sh.nb_refs[
L1];
i++) {
228 if (luma_weight_l1_flag[
i]) {
230 if ((int8_t)delta_luma_weight_l1 != delta_luma_weight_l1)
232 s->sh.luma_weight_l1[
i] = (1 <<
s->sh.luma_log2_weight_denom) + delta_luma_weight_l1;
235 if (chroma_weight_l1_flag[
i]) {
236 for (j = 0; j < 2; j++) {
240 if ( (int8_t)delta_chroma_weight_l1 != delta_chroma_weight_l1
241 || delta_chroma_offset_l1 < -(1<<17) || delta_chroma_offset_l1 > (1<<17)) {
245 s->sh.chroma_weight_l1[
i][j] = (1 <<
s->sh.chroma_log2_weight_denom) + delta_chroma_weight_l1;
246 s->sh.chroma_offset_l1[
i][j] =
av_clip((delta_chroma_offset_l1 - ((128 *
s->sh.chroma_weight_l1[
i][j])
247 >>
s->sh.chroma_log2_weight_denom) + 128), -128, 127);
250 s->sh.chroma_weight_l1[
i][0] = 1 <<
s->sh.chroma_log2_weight_denom;
251 s->sh.chroma_offset_l1[
i][0] = 0;
252 s->sh.chroma_weight_l1[
i][1] = 1 <<
s->sh.chroma_log2_weight_denom;
253 s->sh.chroma_offset_l1[
i][1] = 0;
263 int max_poc_lsb = 1 <<
sps->log2_max_poc_lsb;
264 int prev_delta_msb = 0;
265 unsigned int nb_sps = 0, nb_sh;
269 if (!
sps->long_term_ref_pics_present_flag)
272 if (
sps->num_long_term_ref_pics_sps > 0)
276 if (nb_sps >
sps->num_long_term_ref_pics_sps)
288 if (
sps->num_long_term_ref_pics_sps > 1)
291 rps->
poc[
i] =
sps->lt_ref_pic_poc_lsb_sps[lt_idx_sps];
292 rps->
used[
i] =
sps->used_by_curr_pic_lt_sps_flag[lt_idx_sps];
303 if (
i &&
i != nb_sps)
304 delta += prev_delta_msb;
306 poc = rps->
poc[
i] +
s->poc -
delta * max_poc_lsb -
s->sh.pic_order_cnt_lsb;
310 prev_delta_msb =
delta;
323 unsigned int num = 0, den = 0;
328 avctx->
width =
sps->width - ow->left_offset - ow->right_offset;
329 avctx->
height =
sps->height - ow->top_offset - ow->bottom_offset;
331 avctx->
profile =
sps->ptl.general_ptl.profile_idc;
332 avctx->
level =
sps->ptl.general_ptl.level_idc;
336 if (
sps->vui.video_signal_type_present_flag)
342 if (
sps->vui.colour_description_present_flag) {
353 if (
sps->chroma_format_idc == 1) {
354 if (
sps->vui.chroma_loc_info_present_flag) {
355 if (
sps->vui.chroma_sample_loc_type_top_field <= 5)
361 if (
vps->vps_timing_info_present_flag) {
362 num =
vps->vps_num_units_in_tick;
363 den =
vps->vps_time_scale;
364 }
else if (
sps->vui.vui_timing_info_present_flag) {
365 num =
sps->vui.vui_num_units_in_tick;
366 den =
sps->vui.vui_time_scale;
369 if (num != 0 && den != 0)
378 if (
s->sei.a53_caption.buf_ref)
381 if (
s->sei.alternative_transfer.present &&
384 avctx->
color_trc =
s->sei.alternative_transfer.preferred_transfer_characteristics;
392 #define HWACCEL_MAX (CONFIG_HEVC_DXVA2_HWACCEL + \
393 CONFIG_HEVC_D3D11VA_HWACCEL * 2 + \
394 CONFIG_HEVC_NVDEC_HWACCEL + \
395 CONFIG_HEVC_VAAPI_HWACCEL + \
396 CONFIG_HEVC_VIDEOTOOLBOX_HWACCEL + \
397 CONFIG_HEVC_VDPAU_HWACCEL)
400 switch (
sps->pix_fmt) {
403 #if CONFIG_HEVC_DXVA2_HWACCEL
406 #if CONFIG_HEVC_D3D11VA_HWACCEL
410 #if CONFIG_HEVC_VAAPI_HWACCEL
413 #if CONFIG_HEVC_VDPAU_HWACCEL
416 #if CONFIG_HEVC_NVDEC_HWACCEL
419 #if CONFIG_HEVC_VIDEOTOOLBOX_HWACCEL
424 #if CONFIG_HEVC_DXVA2_HWACCEL
427 #if CONFIG_HEVC_D3D11VA_HWACCEL
431 #if CONFIG_HEVC_VAAPI_HWACCEL
434 #if CONFIG_HEVC_VIDEOTOOLBOX_HWACCEL
437 #if CONFIG_HEVC_VDPAU_HWACCEL
440 #if CONFIG_HEVC_NVDEC_HWACCEL
445 #if CONFIG_HEVC_VDPAU_HWACCEL
448 #if CONFIG_HEVC_NVDEC_HWACCEL
454 #if CONFIG_HEVC_VAAPI_HWACCEL
461 #if CONFIG_HEVC_VDPAU_HWACCEL
464 #if CONFIG_HEVC_NVDEC_HWACCEL
470 *fmt++ =
sps->pix_fmt;
500 for (
i = 0;
i < 3;
i++) {
505 if (
sps->sao_enabled && !
s->avctx->hwaccel) {
506 int c_count = (
sps->chroma_format_idc != 0) ? 3 : 1;
509 for(c_idx = 0; c_idx < c_count; c_idx++) {
510 int w =
sps->width >>
sps->hshift[c_idx];
511 int h =
sps->height >>
sps->vshift[c_idx];
512 s->sao_pixel_buffer_h[c_idx] =
515 s->sao_pixel_buffer_v[c_idx] =
518 if (!
s->sao_pixel_buffer_h[c_idx] ||
519 !
s->sao_pixel_buffer_v[c_idx])
525 s->ps.vps = (
HEVCVPS*)
s->ps.vps_list[
s->ps.sps->vps_id]->data;
531 for (
i = 0;
i < 3;
i++) {
548 av_log(
s->avctx,
AV_LOG_ERROR,
"Two slices reporting being the first in the same frame.\n");
553 s->seq_decode = (
s->seq_decode + 1) & 0xff;
576 if (
s->ps.sps != (
HEVCSPS*)
s->ps.sps_list[
s->ps.pps->sps_id]->data) {
578 const HEVCSPS *last_sps =
s->ps.sps;
582 if (
sps->width != last_sps->width ||
sps->height != last_sps->height ||
583 sps->temporal_layer[
sps->max_sub_layers - 1].max_dec_pic_buffering !=
584 last_sps->temporal_layer[last_sps->max_sub_layers - 1].max_dec_pic_buffering)
598 s->seq_decode = (
s->seq_decode + 1) & 0xff;
608 int slice_address_length;
610 if (
s->ps.pps->dependent_slice_segments_enabled_flag)
618 s->ps.sps->ctb_height);
622 "Invalid slice segment address: %u.\n",
634 s->slice_initialized = 0;
638 s->slice_initialized = 0;
640 for (
i = 0;
i <
s->ps.pps->num_extra_slice_header_bits;
i++)
658 if (
s->ps.pps->output_flag_present_flag)
661 if (
s->ps.sps->separate_colour_plane_flag)
671 "Ignoring POC change between slices: %d -> %d\n",
s->poc, poc);
687 int numbits, rps_idx;
689 if (!
s->ps.sps->nb_st_rps) {
695 rps_idx = numbits > 0 ?
get_bits(gb, numbits) : 0;
709 if (
s->ps.sps->sps_temporal_mvp_enabled_flag)
714 s->sh.short_term_rps =
NULL;
729 if (
s->ps.sps->sao_enabled) {
731 if (
s->ps.sps->chroma_format_idc) {
745 sh->
nb_refs[
L0] =
s->ps.pps->num_ref_idx_l0_default_active;
747 sh->
nb_refs[
L1] =
s->ps.pps->num_ref_idx_l1_default_active;
768 if (
s->ps.pps->lists_modification_present_flag && nb_refs > 1) {
786 if (
s->ps.pps->cabac_init_present_flag)
801 "Invalid collocated_ref_idx: %d.\n",
818 "Invalid number of merging MVP candidates: %d.\n",
826 if (
s->ps.pps->pic_slice_level_chroma_qp_offsets_present_flag) {
839 if (
s->ps.pps->chroma_qp_offset_list_enabled_flag)
844 if (
s->ps.pps->deblocking_filter_control_present_flag) {
845 int deblocking_filter_override_flag = 0;
847 if (
s->ps.pps->deblocking_filter_override_enabled_flag)
848 deblocking_filter_override_flag =
get_bits1(gb);
850 if (deblocking_filter_override_flag) {
855 if (beta_offset_div2 < -6 || beta_offset_div2 > 6 ||
856 tc_offset_div2 < -6 || tc_offset_div2 > 6) {
858 "Invalid deblock filter offsets: %d, %d\n",
859 beta_offset_div2, tc_offset_div2);
876 if (
s->ps.pps->seq_loop_filter_across_slices_enabled_flag &&
887 if (
s->ps.pps->tiles_enabled_flag ||
s->ps.pps->entropy_coding_sync_enabled_flag) {
891 av_log(
s->avctx,
AV_LOG_ERROR,
"num_entry_point_offsets %d is invalid\n", num_entry_point_offsets);
899 if (offset_len < 1 || offset_len > 32) {
920 if (
s->threads_number > 1 && (
s->ps.pps->num_tile_rows > 1 ||
s->ps.pps->num_tile_columns > 1)) {
921 s->enable_parallel_tiles = 0;
922 s->threads_number = 1;
924 s->enable_parallel_tiles = 0;
926 s->enable_parallel_tiles = 0;
929 if (
s->ps.pps->slice_header_extension_present_flag) {
935 for (
i = 0;
i < length;
i++)
942 sh->
slice_qp < -
s->ps.sps->qp_bd_offset) {
944 "The slice_qp %d is outside the valid range "
947 -
s->ps.sps->qp_bd_offset);
953 if (!
s->sh.slice_ctb_addr_rs &&
s->sh.dependent_slice_segment_flag) {
964 s->HEVClc->first_qp_group = !
s->sh.dependent_slice_segment_flag;
966 if (!
s->ps.pps->cu_qp_delta_enabled_flag)
967 s->HEVClc->qp_y =
s->sh.slice_qp;
969 s->slice_initialized = 1;
970 s->HEVClc->tu.cu_qp_offset_cb = 0;
971 s->HEVClc->tu.cu_qp_offset_cr = 0;
976 #define CTB(tab, x, y) ((tab)[(y) * s->ps.sps->ctb_width + (x)])
978 #define SET_SAO(elem, value) \
980 if (!sao_merge_up_flag && !sao_merge_left_flag) \
982 else if (sao_merge_left_flag) \
983 sao->elem = CTB(s->sao, rx-1, ry).elem; \
984 else if (sao_merge_up_flag) \
985 sao->elem = CTB(s->sao, rx, ry-1).elem; \
993 int sao_merge_left_flag = 0;
994 int sao_merge_up_flag = 0;
998 if (
s->sh.slice_sample_adaptive_offset_flag[0] ||
999 s->sh.slice_sample_adaptive_offset_flag[1]) {
1004 if (ry > 0 && !sao_merge_left_flag) {
1010 for (c_idx = 0; c_idx < (
s->ps.sps->chroma_format_idc ? 3 : 1); c_idx++) {
1011 int log2_sao_offset_scale = c_idx == 0 ?
s->ps.pps->log2_sao_offset_scale_luma :
1012 s->ps.pps->log2_sao_offset_scale_chroma;
1014 if (!
s->sh.slice_sample_adaptive_offset_flag[c_idx]) {
1029 for (
i = 0;
i < 4;
i++)
1033 for (
i = 0;
i < 4;
i++) {
1042 }
else if (c_idx != 2) {
1048 for (
i = 0;
i < 4;
i++) {
1056 sao->
offset_val[c_idx][
i + 1] *= 1 << log2_sao_offset_scale;
1068 if (log2_res_scale_abs_plus1 != 0) {
1071 (1 - 2 * res_scale_sign_flag);
1081 int xBase,
int yBase,
int cb_xBase,
int cb_yBase,
1082 int log2_cb_size,
int log2_trafo_size,
1083 int blk_idx,
int cbf_luma,
int *cbf_cb,
int *cbf_cr)
1086 const int log2_trafo_size_c = log2_trafo_size -
s->ps.sps->hshift[1];
1090 int trafo_size = 1 << log2_trafo_size;
1093 s->hpc.intra_pred[log2_trafo_size - 2](
s, x0, y0, 0);
1096 if (cbf_luma || cbf_cb[0] || cbf_cr[0] ||
1097 (
s->ps.sps->chroma_format_idc == 2 && (cbf_cb[1] || cbf_cr[1]))) {
1100 int cbf_chroma = cbf_cb[0] || cbf_cr[0] ||
1101 (
s->ps.sps->chroma_format_idc == 2 &&
1102 (cbf_cb[1] || cbf_cr[1]));
1114 "The cu_qp_delta %d is outside the valid range "
1117 -(26 +
s->ps.sps->qp_bd_offset / 2),
1118 (25 +
s->ps.sps->qp_bd_offset / 2));
1125 if (
s->sh.cu_chroma_qp_offset_enabled_flag && cbf_chroma &&
1128 if (cu_chroma_qp_offset_flag) {
1129 int cu_chroma_qp_offset_idx = 0;
1130 if (
s->ps.pps->chroma_qp_offset_list_len_minus1 > 0) {
1133 "cu_chroma_qp_offset_idx not yet tested.\n");
1166 if (
s->ps.sps->chroma_format_idc && (log2_trafo_size > 2 ||
s->ps.sps->chroma_format_idc == 3)) {
1167 int trafo_size_h = 1 << (log2_trafo_size_c +
s->ps.sps->hshift[1]);
1168 int trafo_size_v = 1 << (log2_trafo_size_c +
s->ps.sps->vshift[1]);
1169 lc->
tu.
cross_pf = (
s->ps.pps->cross_component_prediction_enabled_flag && cbf_luma &&
1176 for (
i = 0;
i < (
s->ps.sps->chroma_format_idc == 2 ? 2 : 1);
i++) {
1179 s->hpc.intra_pred[log2_trafo_size_c - 2](
s, x0, y0 + (
i << log2_trafo_size_c), 1);
1183 log2_trafo_size_c, scan_idx_c, 1);
1186 ptrdiff_t
stride =
s->frame->linesize[1];
1187 int hshift =
s->ps.sps->hshift[1];
1188 int vshift =
s->ps.sps->vshift[1];
1191 int size = 1 << log2_trafo_size_c;
1194 ((x0 >> hshift) <<
s->ps.sps->pixel_shift)];
1198 s->hevcdsp.add_residual[log2_trafo_size_c-2](dst, coeffs,
stride);
1205 for (
i = 0;
i < (
s->ps.sps->chroma_format_idc == 2 ? 2 : 1);
i++) {
1208 s->hpc.intra_pred[log2_trafo_size_c - 2](
s, x0, y0 + (
i << log2_trafo_size_c), 2);
1212 log2_trafo_size_c, scan_idx_c, 2);
1215 ptrdiff_t
stride =
s->frame->linesize[2];
1216 int hshift =
s->ps.sps->hshift[2];
1217 int vshift =
s->ps.sps->vshift[2];
1220 int size = 1 << log2_trafo_size_c;
1223 ((x0 >> hshift) <<
s->ps.sps->pixel_shift)];
1227 s->hevcdsp.add_residual[log2_trafo_size_c-2](dst, coeffs,
stride);
1230 }
else if (
s->ps.sps->chroma_format_idc && blk_idx == 3) {
1231 int trafo_size_h = 1 << (log2_trafo_size + 1);
1232 int trafo_size_v = 1 << (log2_trafo_size +
s->ps.sps->vshift[1]);
1233 for (
i = 0;
i < (
s->ps.sps->chroma_format_idc == 2 ? 2 : 1);
i++) {
1236 trafo_size_h, trafo_size_v);
1237 s->hpc.intra_pred[log2_trafo_size - 2](
s, xBase, yBase + (
i << log2_trafo_size), 1);
1241 log2_trafo_size, scan_idx_c, 1);
1243 for (
i = 0;
i < (
s->ps.sps->chroma_format_idc == 2 ? 2 : 1);
i++) {
1246 trafo_size_h, trafo_size_v);
1247 s->hpc.intra_pred[log2_trafo_size - 2](
s, xBase, yBase + (
i << log2_trafo_size), 2);
1251 log2_trafo_size, scan_idx_c, 2);
1255 if (log2_trafo_size > 2 ||
s->ps.sps->chroma_format_idc == 3) {
1256 int trafo_size_h = 1 << (log2_trafo_size_c +
s->ps.sps->hshift[1]);
1257 int trafo_size_v = 1 << (log2_trafo_size_c +
s->ps.sps->vshift[1]);
1259 s->hpc.intra_pred[log2_trafo_size_c - 2](
s, x0, y0, 1);
1260 s->hpc.intra_pred[log2_trafo_size_c - 2](
s, x0, y0, 2);
1261 if (
s->ps.sps->chroma_format_idc == 2) {
1263 trafo_size_h, trafo_size_v);
1264 s->hpc.intra_pred[log2_trafo_size_c - 2](
s, x0, y0 + (1 << log2_trafo_size_c), 1);
1265 s->hpc.intra_pred[log2_trafo_size_c - 2](
s, x0, y0 + (1 << log2_trafo_size_c), 2);
1267 }
else if (blk_idx == 3) {
1268 int trafo_size_h = 1 << (log2_trafo_size + 1);
1269 int trafo_size_v = 1 << (log2_trafo_size +
s->ps.sps->vshift[1]);
1271 trafo_size_h, trafo_size_v);
1272 s->hpc.intra_pred[log2_trafo_size - 2](
s, xBase, yBase, 1);
1273 s->hpc.intra_pred[log2_trafo_size - 2](
s, xBase, yBase, 2);
1274 if (
s->ps.sps->chroma_format_idc == 2) {
1276 trafo_size_h, trafo_size_v);
1277 s->hpc.intra_pred[log2_trafo_size - 2](
s, xBase, yBase + (1 << (log2_trafo_size)), 1);
1278 s->hpc.intra_pred[log2_trafo_size - 2](
s, xBase, yBase + (1 << (log2_trafo_size)), 2);
1288 int cb_size = 1 << log2_cb_size;
1289 int log2_min_pu_size =
s->ps.sps->log2_min_pu_size;
1291 int min_pu_width =
s->ps.sps->min_pu_width;
1292 int x_end =
FFMIN(x0 + cb_size,
s->ps.sps->width);
1293 int y_end =
FFMIN(y0 + cb_size,
s->ps.sps->height);
1296 for (j = (y0 >> log2_min_pu_size); j < (y_end >> log2_min_pu_size); j++)
1297 for (
i = (x0 >> log2_min_pu_size);
i < (x_end >> log2_min_pu_size);
i++)
1298 s->is_pcm[
i + j * min_pu_width] = 2;
1302 int xBase,
int yBase,
int cb_xBase,
int cb_yBase,
1303 int log2_cb_size,
int log2_trafo_size,
1304 int trafo_depth,
int blk_idx,
1305 const int *base_cbf_cb,
const int *base_cbf_cr)
1313 cbf_cb[0] = base_cbf_cb[0];
1314 cbf_cb[1] = base_cbf_cb[1];
1315 cbf_cr[0] = base_cbf_cr[0];
1316 cbf_cr[1] = base_cbf_cr[1];
1319 if (trafo_depth == 1) {
1321 if (
s->ps.sps->chroma_format_idc == 3) {
1335 if (log2_trafo_size <= s->ps.sps->log2_max_trafo_size &&
1336 log2_trafo_size >
s->ps.sps->log2_min_tb_size &&
1337 trafo_depth < lc->cu.max_trafo_depth &&
1341 int inter_split =
s->ps.sps->max_transform_hierarchy_depth_inter == 0 &&
1346 split_transform_flag = log2_trafo_size >
s->ps.sps->log2_max_trafo_size ||
1351 if (
s->ps.sps->chroma_format_idc && (log2_trafo_size > 2 ||
s->ps.sps->chroma_format_idc == 3)) {
1352 if (trafo_depth == 0 || cbf_cb[0]) {
1354 if (
s->ps.sps->chroma_format_idc == 2 && (!split_transform_flag || log2_trafo_size == 3)) {
1359 if (trafo_depth == 0 || cbf_cr[0]) {
1361 if (
s->ps.sps->chroma_format_idc == 2 && (!split_transform_flag || log2_trafo_size == 3)) {
1367 if (split_transform_flag) {
1368 const int trafo_size_split = 1 << (log2_trafo_size - 1);
1369 const int x1 = x0 + trafo_size_split;
1370 const int y1 = y0 + trafo_size_split;
1372 #define SUBDIVIDE(x, y, idx) \
1374 ret = hls_transform_tree(s, x, y, x0, y0, cb_xBase, cb_yBase, log2_cb_size, \
1375 log2_trafo_size - 1, trafo_depth + 1, idx, \
1388 int min_tu_size = 1 <<
s->ps.sps->log2_min_tb_size;
1389 int log2_min_tu_size =
s->ps.sps->log2_min_tb_size;
1390 int min_tu_width =
s->ps.sps->min_tb_width;
1394 cbf_cb[0] || cbf_cr[0] ||
1395 (
s->ps.sps->chroma_format_idc == 2 && (cbf_cb[1] || cbf_cr[1]))) {
1400 log2_cb_size, log2_trafo_size,
1401 blk_idx, cbf_luma, cbf_cb, cbf_cr);
1407 for (
i = 0;
i < (1 << log2_trafo_size);
i += min_tu_size)
1408 for (j = 0; j < (1 << log2_trafo_size); j += min_tu_size) {
1409 int x_tu = (x0 + j) >> log2_min_tu_size;
1410 int y_tu = (y0 +
i) >> log2_min_tu_size;
1411 s->cbf_luma[y_tu * min_tu_width + x_tu] = 1;
1414 if (!
s->sh.disable_deblocking_filter_flag) {
1416 if (
s->ps.pps->transquant_bypass_enable_flag &&
1428 int cb_size = 1 << log2_cb_size;
1429 ptrdiff_t stride0 =
s->frame->linesize[0];
1430 ptrdiff_t stride1 =
s->frame->linesize[1];
1431 ptrdiff_t stride2 =
s->frame->linesize[2];
1432 uint8_t *dst0 = &
s->frame->data[0][y0 * stride0 + (x0 <<
s->ps.sps->pixel_shift)];
1433 uint8_t *dst1 = &
s->frame->data[1][(y0 >>
s->ps.sps->vshift[1]) * stride1 + ((x0 >>
s->ps.sps->hshift[1]) <<
s->ps.sps->pixel_shift)];
1434 uint8_t *dst2 = &
s->frame->data[2][(y0 >>
s->ps.sps->vshift[2]) * stride2 + ((x0 >>
s->ps.sps->hshift[2]) <<
s->ps.sps->pixel_shift)];
1436 int length = cb_size * cb_size *
s->ps.sps->pcm.bit_depth +
1437 (((cb_size >>
s->ps.sps->hshift[1]) * (cb_size >>
s->ps.sps->vshift[1])) +
1438 ((cb_size >>
s->ps.sps->hshift[2]) * (cb_size >>
s->ps.sps->vshift[2]))) *
1439 s->ps.sps->pcm.bit_depth_chroma;
1443 if (!
s->sh.disable_deblocking_filter_flag)
1450 s->hevcdsp.put_pcm(dst0, stride0, cb_size, cb_size, &gb,
s->ps.sps->pcm.bit_depth);
1451 if (
s->ps.sps->chroma_format_idc) {
1452 s->hevcdsp.put_pcm(dst1, stride1,
1453 cb_size >>
s->ps.sps->hshift[1],
1454 cb_size >>
s->ps.sps->vshift[1],
1455 &gb,
s->ps.sps->pcm.bit_depth_chroma);
1456 s->hevcdsp.put_pcm(dst2, stride2,
1457 cb_size >>
s->ps.sps->hshift[2],
1458 cb_size >>
s->ps.sps->vshift[2],
1459 &gb,
s->ps.sps->pcm.bit_depth_chroma);
1483 int block_w,
int block_h,
int luma_weight,
int luma_offset)
1487 ptrdiff_t srcstride =
ref->linesize[0];
1488 int pic_width =
s->ps.sps->width;
1489 int pic_height =
s->ps.sps->height;
1492 int weight_flag = (
s->sh.slice_type ==
HEVC_SLICE_P &&
s->ps.pps->weighted_pred_flag) ||
1493 (
s->sh.slice_type ==
HEVC_SLICE_B &&
s->ps.pps->weighted_bipred_flag);
1496 x_off +=
mv->x >> 2;
1497 y_off +=
mv->y >> 2;
1498 src += y_off * srcstride + (x_off * (1 <<
s->ps.sps->pixel_shift));
1509 edge_emu_stride, srcstride,
1513 pic_width, pic_height);
1515 srcstride = edge_emu_stride;
1519 s->hevcdsp.put_hevc_qpel_uni[idx][!!my][!!mx](dst, dststride,
src, srcstride,
1520 block_h, mx, my, block_w);
1522 s->hevcdsp.put_hevc_qpel_uni_w[idx][!!my][!!mx](dst, dststride,
src, srcstride,
1523 block_h,
s->sh.luma_log2_weight_denom,
1524 luma_weight, luma_offset, mx, my, block_w);
1544 AVFrame *ref0,
const Mv *mv0,
int x_off,
int y_off,
1545 int block_w,
int block_h,
AVFrame *ref1,
const Mv *mv1,
struct MvField *current_mv)
1548 ptrdiff_t src0stride = ref0->
linesize[0];
1549 ptrdiff_t src1stride = ref1->
linesize[0];
1550 int pic_width =
s->ps.sps->width;
1551 int pic_height =
s->ps.sps->height;
1552 int mx0 = mv0->
x & 3;
1553 int my0 = mv0->
y & 3;
1554 int mx1 = mv1->
x & 3;
1555 int my1 = mv1->
y & 3;
1556 int weight_flag = (
s->sh.slice_type ==
HEVC_SLICE_P &&
s->ps.pps->weighted_pred_flag) ||
1557 (
s->sh.slice_type ==
HEVC_SLICE_B &&
s->ps.pps->weighted_bipred_flag);
1558 int x_off0 = x_off + (mv0->
x >> 2);
1559 int y_off0 = y_off + (mv0->
y >> 2);
1560 int x_off1 = x_off + (mv1->
x >> 2);
1561 int y_off1 = y_off + (mv1->
y >> 2);
1564 uint8_t *
src0 = ref0->
data[0] + y_off0 * src0stride + (
int)((
unsigned)x_off0 <<
s->ps.sps->pixel_shift);
1565 uint8_t *
src1 = ref1->
data[0] + y_off1 * src1stride + (
int)((
unsigned)x_off1 <<
s->ps.sps->pixel_shift);
1575 edge_emu_stride, src0stride,
1579 pic_width, pic_height);
1581 src0stride = edge_emu_stride;
1592 edge_emu_stride, src1stride,
1596 pic_width, pic_height);
1598 src1stride = edge_emu_stride;
1601 s->hevcdsp.put_hevc_qpel[idx][!!my0][!!mx0](lc->
tmp,
src0, src0stride,
1602 block_h, mx0, my0, block_w);
1604 s->hevcdsp.put_hevc_qpel_bi[idx][!!my1][!!mx1](dst, dststride,
src1, src1stride, lc->
tmp,
1605 block_h, mx1, my1, block_w);
1607 s->hevcdsp.put_hevc_qpel_bi_w[idx][!!my1][!!mx1](dst, dststride,
src1, src1stride, lc->
tmp,
1608 block_h,
s->sh.luma_log2_weight_denom,
1609 s->sh.luma_weight_l0[current_mv->
ref_idx[0]],
1610 s->sh.luma_weight_l1[current_mv->
ref_idx[1]],
1611 s->sh.luma_offset_l0[current_mv->
ref_idx[0]],
1612 s->sh.luma_offset_l1[current_mv->
ref_idx[1]],
1635 ptrdiff_t dststride,
uint8_t *
src0, ptrdiff_t srcstride,
int reflist,
1636 int x_off,
int y_off,
int block_w,
int block_h,
struct MvField *current_mv,
int chroma_weight,
int chroma_offset)
1639 int pic_width =
s->ps.sps->width >>
s->ps.sps->hshift[1];
1640 int pic_height =
s->ps.sps->height >>
s->ps.sps->vshift[1];
1641 const Mv *
mv = ¤t_mv->
mv[reflist];
1642 int weight_flag = (
s->sh.slice_type ==
HEVC_SLICE_P &&
s->ps.pps->weighted_pred_flag) ||
1643 (
s->sh.slice_type ==
HEVC_SLICE_B &&
s->ps.pps->weighted_bipred_flag);
1645 int hshift =
s->ps.sps->hshift[1];
1646 int vshift =
s->ps.sps->vshift[1];
1649 intptr_t _mx = mx << (1 - hshift);
1650 intptr_t _my = my << (1 - vshift);
1651 int emu =
src0 ==
s->frame->data[1] ||
src0 ==
s->frame->data[2];
1653 x_off +=
mv->x >> (2 + hshift);
1654 y_off +=
mv->y >> (2 + vshift);
1655 src0 += y_off * srcstride + (x_off * (1 <<
s->ps.sps->pixel_shift));
1664 (edge_emu_stride + (1 <<
s->ps.sps->pixel_shift));
1666 edge_emu_stride, srcstride,
1670 pic_width, pic_height);
1673 srcstride = edge_emu_stride;
1676 s->hevcdsp.put_hevc_epel_uni[idx][!!my][!!mx](dst0, dststride,
src0, srcstride,
1677 block_h, _mx, _my, block_w);
1679 s->hevcdsp.put_hevc_epel_uni_w[idx][!!my][!!mx](dst0, dststride,
src0, srcstride,
1680 block_h,
s->sh.chroma_log2_weight_denom,
1681 chroma_weight, chroma_offset, _mx, _my, block_w);
1702 int x_off,
int y_off,
int block_w,
int block_h,
struct MvField *current_mv,
int cidx)
1707 ptrdiff_t src1stride = ref0->
linesize[cidx+1];
1708 ptrdiff_t src2stride = ref1->
linesize[cidx+1];
1709 int weight_flag = (
s->sh.slice_type ==
HEVC_SLICE_P &&
s->ps.pps->weighted_pred_flag) ||
1710 (
s->sh.slice_type ==
HEVC_SLICE_B &&
s->ps.pps->weighted_bipred_flag);
1711 int pic_width =
s->ps.sps->width >>
s->ps.sps->hshift[1];
1712 int pic_height =
s->ps.sps->height >>
s->ps.sps->vshift[1];
1713 Mv *mv0 = ¤t_mv->
mv[0];
1714 Mv *mv1 = ¤t_mv->
mv[1];
1715 int hshift =
s->ps.sps->hshift[1];
1716 int vshift =
s->ps.sps->vshift[1];
1722 intptr_t _mx0 = mx0 << (1 - hshift);
1723 intptr_t _my0 = my0 << (1 - vshift);
1724 intptr_t _mx1 = mx1 << (1 - hshift);
1725 intptr_t _my1 = my1 << (1 - vshift);
1727 int x_off0 = x_off + (mv0->
x >> (2 + hshift));
1728 int y_off0 = y_off + (mv0->
y >> (2 + vshift));
1729 int x_off1 = x_off + (mv1->
x >> (2 + hshift));
1730 int y_off1 = y_off + (mv1->
y >> (2 + vshift));
1732 src1 += y_off0 * src1stride + (
int)((
unsigned)x_off0 <<
s->ps.sps->pixel_shift);
1733 src2 += y_off1 * src2stride + (
int)((
unsigned)x_off1 <<
s->ps.sps->pixel_shift);
1741 (edge_emu_stride + (1 <<
s->ps.sps->pixel_shift));
1744 edge_emu_stride, src1stride,
1748 pic_width, pic_height);
1751 src1stride = edge_emu_stride;
1760 (edge_emu_stride + (1 <<
s->ps.sps->pixel_shift));
1763 edge_emu_stride, src2stride,
1767 pic_width, pic_height);
1770 src2stride = edge_emu_stride;
1773 s->hevcdsp.put_hevc_epel[idx][!!my0][!!mx0](lc->
tmp,
src1, src1stride,
1774 block_h, _mx0, _my0, block_w);
1776 s->hevcdsp.put_hevc_epel_bi[idx][!!my1][!!mx1](dst0,
s->frame->linesize[cidx+1],
1777 src2, src2stride, lc->
tmp,
1778 block_h, _mx1, _my1, block_w);
1780 s->hevcdsp.put_hevc_epel_bi_w[idx][!!my1][!!mx1](dst0,
s->frame->linesize[cidx+1],
1781 src2, src2stride, lc->
tmp,
1783 s->sh.chroma_log2_weight_denom,
1784 s->sh.chroma_weight_l0[current_mv->
ref_idx[0]][cidx],
1785 s->sh.chroma_weight_l1[current_mv->
ref_idx[1]][cidx],
1786 s->sh.chroma_offset_l0[current_mv->
ref_idx[0]][cidx],
1787 s->sh.chroma_offset_l1[current_mv->
ref_idx[1]][cidx],
1788 _mx1, _my1, block_w);
1802 int nPbH,
int log2_cb_size,
int part_idx,
1814 if (inter_pred_idc !=
PRED_L1) {
1815 if (
s->sh.nb_refs[
L0])
1822 part_idx, merge_idx,
mv, mvp_flag, 0);
1827 if (inter_pred_idc !=
PRED_L0) {
1828 if (
s->sh.nb_refs[
L1])
1831 if (
s->sh.mvd_l1_zero_flag == 1 && inter_pred_idc ==
PRED_BI) {
1840 part_idx, merge_idx,
mv, mvp_flag, 1);
1848 int log2_cb_size,
int partIdx,
int idx)
1850 #define POS(c_idx, x, y) \
1851 &s->frame->data[c_idx][((y) >> s->ps.sps->vshift[c_idx]) * s->frame->linesize[c_idx] + \
1852 (((x) >> s->ps.sps->hshift[c_idx]) << s->ps.sps->pixel_shift)]
1855 struct MvField current_mv = {{{ 0 }}};
1857 int min_pu_width =
s->ps.sps->min_pu_width;
1859 MvField *tab_mvf =
s->ref->tab_mvf;
1865 int log2_min_cb_size =
s->ps.sps->log2_min_cb_size;
1866 int min_cb_width =
s->ps.sps->min_cb_width;
1867 int x_cb = x0 >> log2_min_cb_size;
1868 int y_cb = y0 >> log2_min_cb_size;
1872 int skip_flag =
SAMPLE_CTB(
s->skip_flag, x_cb, y_cb);
1878 if (
s->sh.max_num_merge_cand > 1)
1884 partIdx, merge_idx, ¤t_mv);
1887 partIdx, merge_idx, ¤t_mv);
1890 x_pu = x0 >>
s->ps.sps->log2_min_pu_size;
1891 y_pu = y0 >>
s->ps.sps->log2_min_pu_size;
1893 for (j = 0; j < nPbH >>
s->ps.sps->log2_min_pu_size; j++)
1894 for (
i = 0; i < nPbW >>
s->ps.sps->log2_min_pu_size;
i++)
1895 tab_mvf[(y_pu + j) * min_pu_width + x_pu +
i] = current_mv;
1898 ref0 = refPicList[0].
ref[current_mv.
ref_idx[0]];
1899 if (!ref0 || !ref0->
frame)
1904 ref1 = refPicList[1].
ref[current_mv.
ref_idx[1]];
1905 if (!ref1 || !ref1->frame)
1911 int x0_c = x0 >>
s->ps.sps->hshift[1];
1912 int y0_c = y0 >>
s->ps.sps->vshift[1];
1913 int nPbW_c = nPbW >>
s->ps.sps->hshift[1];
1914 int nPbH_c = nPbH >>
s->ps.sps->vshift[1];
1917 ¤t_mv.
mv[0], x0, y0, nPbW, nPbH,
1918 s->sh.luma_weight_l0[current_mv.
ref_idx[0]],
1919 s->sh.luma_offset_l0[current_mv.
ref_idx[0]]);
1921 if (
s->ps.sps->chroma_format_idc) {
1923 0, x0_c, y0_c, nPbW_c, nPbH_c, ¤t_mv,
1924 s->sh.chroma_weight_l0[current_mv.
ref_idx[0]][0],
s->sh.chroma_offset_l0[current_mv.
ref_idx[0]][0]);
1926 0, x0_c, y0_c, nPbW_c, nPbH_c, ¤t_mv,
1927 s->sh.chroma_weight_l0[current_mv.
ref_idx[0]][1],
s->sh.chroma_offset_l0[current_mv.
ref_idx[0]][1]);
1930 int x0_c = x0 >>
s->ps.sps->hshift[1];
1931 int y0_c = y0 >>
s->ps.sps->vshift[1];
1932 int nPbW_c = nPbW >>
s->ps.sps->hshift[1];
1933 int nPbH_c = nPbH >>
s->ps.sps->vshift[1];
1936 ¤t_mv.
mv[1], x0, y0, nPbW, nPbH,
1937 s->sh.luma_weight_l1[current_mv.
ref_idx[1]],
1938 s->sh.luma_offset_l1[current_mv.
ref_idx[1]]);
1940 if (
s->ps.sps->chroma_format_idc) {
1941 chroma_mc_uni(
s, dst1,
s->frame->linesize[1], ref1->frame->data[1], ref1->frame->linesize[1],
1942 1, x0_c, y0_c, nPbW_c, nPbH_c, ¤t_mv,
1943 s->sh.chroma_weight_l1[current_mv.
ref_idx[1]][0],
s->sh.chroma_offset_l1[current_mv.
ref_idx[1]][0]);
1945 chroma_mc_uni(
s, dst2,
s->frame->linesize[2], ref1->frame->data[2], ref1->frame->linesize[2],
1946 1, x0_c, y0_c, nPbW_c, nPbH_c, ¤t_mv,
1947 s->sh.chroma_weight_l1[current_mv.
ref_idx[1]][1],
s->sh.chroma_offset_l1[current_mv.
ref_idx[1]][1]);
1950 int x0_c = x0 >>
s->ps.sps->hshift[1];
1951 int y0_c = y0 >>
s->ps.sps->vshift[1];
1952 int nPbW_c = nPbW >>
s->ps.sps->hshift[1];
1953 int nPbH_c = nPbH >>
s->ps.sps->vshift[1];
1956 ¤t_mv.
mv[0], x0, y0, nPbW, nPbH,
1957 ref1->frame, ¤t_mv.
mv[1], ¤t_mv);
1959 if (
s->ps.sps->chroma_format_idc) {
1961 x0_c, y0_c, nPbW_c, nPbH_c, ¤t_mv, 0);
1964 x0_c, y0_c, nPbW_c, nPbH_c, ¤t_mv, 1);
1973 int prev_intra_luma_pred_flag)
1976 int x_pu = x0 >>
s->ps.sps->log2_min_pu_size;
1977 int y_pu = y0 >>
s->ps.sps->log2_min_pu_size;
1978 int min_pu_width =
s->ps.sps->min_pu_width;
1979 int size_in_pus = pu_size >>
s->ps.sps->log2_min_pu_size;
1984 s->tab_ipm[(y_pu - 1) * min_pu_width + x_pu] :
INTRA_DC;
1986 s->tab_ipm[y_pu * min_pu_width + x_pu - 1] :
INTRA_DC;
1988 int y_ctb = (y0 >> (
s->ps.sps->log2_ctb_size)) << (
s->ps.sps->log2_ctb_size);
1990 MvField *tab_mvf =
s->ref->tab_mvf;
1991 int intra_pred_mode;
1996 if ((y0 - 1) < y_ctb)
1999 if (cand_left == cand_up) {
2000 if (cand_left < 2) {
2005 candidate[0] = cand_left;
2006 candidate[1] = 2 + ((cand_left - 2 - 1 + 32) & 31);
2007 candidate[2] = 2 + ((cand_left - 2 + 1) & 31);
2010 candidate[0] = cand_left;
2011 candidate[1] = cand_up;
2021 if (prev_intra_luma_pred_flag) {
2022 intra_pred_mode = candidate[lc->
pu.
mpm_idx];
2024 if (candidate[0] > candidate[1])
2026 if (candidate[0] > candidate[2])
2028 if (candidate[1] > candidate[2])
2032 for (
i = 0;
i < 3;
i++)
2033 if (intra_pred_mode >= candidate[
i])
2040 for (
i = 0;
i < size_in_pus;
i++) {
2041 memset(&
s->tab_ipm[(y_pu +
i) * min_pu_width + x_pu],
2042 intra_pred_mode, size_in_pus);
2044 for (j = 0; j < size_in_pus; j++) {
2049 return intra_pred_mode;
2053 int log2_cb_size,
int ct_depth)
2055 int length = (1 << log2_cb_size) >>
s->ps.sps->log2_min_cb_size;
2056 int x_cb = x0 >>
s->ps.sps->log2_min_cb_size;
2057 int y_cb = y0 >>
s->ps.sps->log2_min_cb_size;
2060 for (y = 0; y < length; y++)
2061 memset(&
s->tab_ct_depth[(y_cb + y) *
s->ps.sps->min_cb_width + x_cb],
2066 0, 1, 2, 2, 2, 2, 3, 5, 7, 8, 10, 12, 13, 15, 17, 18, 19, 20,
2067 21, 22, 23, 23, 24, 24, 25, 25, 26, 27, 27, 28, 28, 29, 29, 30, 31};
2073 static const uint8_t intra_chroma_table[4] = { 0, 26, 10, 1 };
2074 uint8_t prev_intra_luma_pred_flag[4];
2076 int pb_size = (1 << log2_cb_size) >>
split;
2077 int side =
split + 1;
2081 for (
i = 0;
i < side;
i++)
2082 for (j = 0; j < side; j++)
2085 for (
i = 0;
i < side;
i++) {
2086 for (j = 0; j < side; j++) {
2087 if (prev_intra_luma_pred_flag[2 *
i + j])
2094 prev_intra_luma_pred_flag[2 *
i + j]);
2098 if (
s->ps.sps->chroma_format_idc == 3) {
2099 for (
i = 0;
i < side;
i++) {
2100 for (j = 0; j < side; j++) {
2102 if (chroma_mode != 4) {
2112 }
else if (
s->ps.sps->chroma_format_idc == 2) {
2115 if (chroma_mode != 4) {
2119 mode_idx = intra_chroma_table[chroma_mode];
2124 }
else if (
s->ps.sps->chroma_format_idc != 0) {
2126 if (chroma_mode != 4) {
2142 int pb_size = 1 << log2_cb_size;
2143 int size_in_pus = pb_size >>
s->ps.sps->log2_min_pu_size;
2144 int min_pu_width =
s->ps.sps->min_pu_width;
2145 MvField *tab_mvf =
s->ref->tab_mvf;
2146 int x_pu = x0 >>
s->ps.sps->log2_min_pu_size;
2147 int y_pu = y0 >>
s->ps.sps->log2_min_pu_size;
2150 if (size_in_pus == 0)
2152 for (j = 0; j < size_in_pus; j++)
2153 memset(&
s->tab_ipm[(y_pu + j) * min_pu_width + x_pu],
INTRA_DC, size_in_pus);
2155 for (j = 0; j < size_in_pus; j++)
2156 for (k = 0; k < size_in_pus; k++)
2162 int cb_size = 1 << log2_cb_size;
2164 int log2_min_cb_size =
s->ps.sps->log2_min_cb_size;
2165 int length = cb_size >> log2_min_cb_size;
2166 int min_cb_width =
s->ps.sps->min_cb_width;
2167 int x_cb = x0 >> log2_min_cb_size;
2168 int y_cb = y0 >> log2_min_cb_size;
2169 int idx = log2_cb_size - 2;
2170 int qp_block_mask = (1<<(
s->ps.sps->log2_ctb_size -
s->ps.pps->diff_cu_qp_delta_depth)) - 1;
2180 for (x = 0; x < 4; x++)
2182 if (
s->ps.pps->transquant_bypass_enable_flag) {
2192 x = y_cb * min_cb_width + x_cb;
2193 for (y = 0; y < length; y++) {
2194 memset(&
s->skip_flag[x], skip_flag, length);
2199 x = y_cb * min_cb_width + x_cb;
2200 for (y = 0; y < length; y++) {
2201 memset(&
s->skip_flag[x], 0, length);
2210 if (!
s->sh.disable_deblocking_filter_flag)
2218 log2_cb_size ==
s->ps.sps->log2_min_cb_size) {
2226 log2_cb_size >=
s->ps.sps->pcm.log2_min_pcm_cb_size &&
2227 log2_cb_size <= s->ps.sps->pcm.log2_max_pcm_cb_size) {
2233 if (
s->ps.sps->pcm.loop_filter_disable_flag)
2265 hls_prediction_unit(
s, x0 + cb_size / 4, y0, cb_size * 3 / 4, cb_size, log2_cb_size, 1, idx - 2);
2269 hls_prediction_unit(
s, x0 + cb_size * 3 / 4, y0, cb_size / 4, cb_size, log2_cb_size, 1, idx - 2);
2273 hls_prediction_unit(
s, x0 + cb_size / 2, y0, cb_size / 2, cb_size / 2, log2_cb_size, 1, idx - 1);
2274 hls_prediction_unit(
s, x0, y0 + cb_size / 2, cb_size / 2, cb_size / 2, log2_cb_size, 2, idx - 1);
2275 hls_prediction_unit(
s, x0 + cb_size / 2, y0 + cb_size / 2, cb_size / 2, cb_size / 2, log2_cb_size, 3, idx - 1);
2281 int rqt_root_cbf = 1;
2288 const static int cbf[2] = { 0 };
2291 s->ps.sps->max_transform_hierarchy_depth_inter;
2294 log2_cb_size, 0, 0, cbf, cbf);
2298 if (!
s->sh.disable_deblocking_filter_flag)
2307 x = y_cb * min_cb_width + x_cb;
2308 for (y = 0; y < length; y++) {
2309 memset(&
s->qp_y_tab[x], lc->
qp_y, length);
2313 if(((x0 + (1<<log2_cb_size)) & qp_block_mask) == 0 &&
2314 ((y0 + (1<<log2_cb_size)) & qp_block_mask) == 0) {
2324 int log2_cb_size,
int cb_depth)
2327 const int cb_size = 1 << log2_cb_size;
2332 if (x0 + cb_size <= s->ps.sps->width &&
2333 y0 + cb_size <= s->ps.sps->height &&
2334 log2_cb_size >
s->ps.sps->log2_min_cb_size) {
2337 split_cu = (log2_cb_size >
s->ps.sps->log2_min_cb_size);
2339 if (
s->ps.pps->cu_qp_delta_enabled_flag &&
2340 log2_cb_size >=
s->ps.sps->log2_ctb_size -
s->ps.pps->diff_cu_qp_delta_depth) {
2345 if (
s->sh.cu_chroma_qp_offset_enabled_flag &&
2346 log2_cb_size >=
s->ps.sps->log2_ctb_size -
s->ps.pps->diff_cu_chroma_qp_offset_depth) {
2351 int qp_block_mask = (1<<(
s->ps.sps->log2_ctb_size -
s->ps.pps->diff_cu_qp_delta_depth)) - 1;
2352 const int cb_size_split = cb_size >> 1;
2353 const int x1 = x0 + cb_size_split;
2354 const int y1 = y0 + cb_size_split;
2362 if (more_data && x1 < s->ps.sps->width) {
2367 if (more_data && y1 < s->ps.sps->height) {
2372 if (more_data && x1 < s->ps.sps->width &&
2373 y1 < s->ps.sps->height) {
2379 if(((x0 + (1<<log2_cb_size)) & qp_block_mask) == 0 &&
2380 ((y0 + (1<<log2_cb_size)) & qp_block_mask) == 0)
2384 return ((x1 + cb_size_split) <
s->ps.sps->width ||
2385 (y1 + cb_size_split) <
s->ps.sps->height);
2392 if ((!((x0 + cb_size) %
2393 (1 << (
s->ps.sps->log2_ctb_size))) ||
2394 (x0 + cb_size >=
s->ps.sps->width)) &&
2396 (1 << (
s->ps.sps->log2_ctb_size))) ||
2397 (y0 + cb_size >=
s->ps.sps->height))) {
2399 return !end_of_slice_flag;
2412 int ctb_size = 1 <<
s->ps.sps->log2_ctb_size;
2413 int ctb_addr_rs =
s->ps.pps->ctb_addr_ts_to_rs[ctb_addr_ts];
2414 int ctb_addr_in_slice = ctb_addr_rs -
s->sh.slice_addr;
2416 s->tab_slice_address[ctb_addr_rs] =
s->sh.slice_addr;
2418 if (
s->ps.pps->entropy_coding_sync_enabled_flag) {
2419 if (x_ctb == 0 && (y_ctb & (ctb_size - 1)) == 0)
2422 }
else if (
s->ps.pps->tiles_enabled_flag) {
2423 if (ctb_addr_ts &&
s->ps.pps->tile_id[ctb_addr_ts] !=
s->ps.pps->tile_id[ctb_addr_ts - 1]) {
2424 int idxX =
s->ps.pps->col_idxX[x_ctb >>
s->ps.sps->log2_ctb_size];
2425 lc->
end_of_tiles_x = x_ctb + (
s->ps.pps->column_width[idxX] <<
s->ps.sps->log2_ctb_size);
2435 if (
s->ps.pps->tiles_enabled_flag) {
2436 if (x_ctb > 0 &&
s->ps.pps->tile_id[ctb_addr_ts] !=
s->ps.pps->tile_id[
s->ps.pps->ctb_addr_rs_to_ts[ctb_addr_rs - 1]])
2438 if (x_ctb > 0 &&
s->tab_slice_address[ctb_addr_rs] !=
s->tab_slice_address[ctb_addr_rs - 1])
2440 if (y_ctb > 0 &&
s->ps.pps->tile_id[ctb_addr_ts] !=
s->ps.pps->tile_id[
s->ps.pps->ctb_addr_rs_to_ts[ctb_addr_rs -
s->ps.sps->ctb_width]])
2442 if (y_ctb > 0 &&
s->tab_slice_address[ctb_addr_rs] !=
s->tab_slice_address[ctb_addr_rs -
s->ps.sps->ctb_width])
2445 if (ctb_addr_in_slice <= 0)
2447 if (ctb_addr_in_slice < s->ps.sps->ctb_width)
2453 lc->
ctb_up_right_flag = ((y_ctb > 0) && (ctb_addr_in_slice+1 >=
s->ps.sps->ctb_width) && (
s->ps.pps->tile_id[ctb_addr_ts] ==
s->ps.pps->tile_id[
s->ps.pps->ctb_addr_rs_to_ts[ctb_addr_rs+1 -
s->ps.sps->ctb_width]]));
2454 lc->
ctb_up_left_flag = ((x_ctb > 0) && (y_ctb > 0) && (ctb_addr_in_slice-1 >=
s->ps.sps->ctb_width) && (
s->ps.pps->tile_id[ctb_addr_ts] ==
s->ps.pps->tile_id[
s->ps.pps->ctb_addr_rs_to_ts[ctb_addr_rs-1 -
s->ps.sps->ctb_width]]));
2460 int ctb_size = 1 <<
s->ps.sps->log2_ctb_size;
2464 int ctb_addr_ts =
s->ps.pps->ctb_addr_rs_to_ts[
s->sh.slice_ctb_addr_rs];
2467 if (!ctb_addr_ts &&
s->sh.dependent_slice_segment_flag) {
2472 if (
s->sh.dependent_slice_segment_flag) {
2473 int prev_rs =
s->ps.pps->ctb_addr_ts_to_rs[ctb_addr_ts - 1];
2474 if (
s->tab_slice_address[prev_rs] !=
s->sh.slice_addr) {
2480 while (more_data && ctb_addr_ts < s->ps.sps->ctb_size) {
2481 int ctb_addr_rs =
s->ps.pps->ctb_addr_ts_to_rs[ctb_addr_ts];
2483 x_ctb = (ctb_addr_rs % ((
s->ps.sps->width + ctb_size - 1) >>
s->ps.sps->log2_ctb_size)) <<
s->ps.sps->log2_ctb_size;
2484 y_ctb = (ctb_addr_rs / ((
s->ps.sps->width + ctb_size - 1) >>
s->ps.sps->log2_ctb_size)) <<
s->ps.sps->log2_ctb_size;
2489 s->tab_slice_address[ctb_addr_rs] = -1;
2493 hls_sao_param(
s, x_ctb >>
s->ps.sps->log2_ctb_size, y_ctb >>
s->ps.sps->log2_ctb_size);
2495 s->deblock[ctb_addr_rs].beta_offset =
s->sh.beta_offset;
2496 s->deblock[ctb_addr_rs].tc_offset =
s->sh.tc_offset;
2497 s->filter_slice_edges[ctb_addr_rs] =
s->sh.slice_loop_filter_across_slices_enabled_flag;
2500 if (more_data < 0) {
2501 s->tab_slice_address[ctb_addr_rs] = -1;
2511 if (x_ctb + ctb_size >=
s->ps.sps->width &&
2512 y_ctb + ctb_size >=
s->ps.sps->height)
2533 int ctb_size = 1<<
s1->ps.sps->log2_ctb_size;
2535 int *ctb_row_p = input_ctb_row;
2536 int ctb_row = ctb_row_p[job];
2537 int ctb_addr_rs =
s1->sh.slice_ctb_addr_rs + ctb_row * ((
s1->ps.sps->width + ctb_size - 1) >>
s1->ps.sps->log2_ctb_size);
2538 int ctb_addr_ts =
s1->ps.pps->ctb_addr_rs_to_ts[ctb_addr_rs];
2539 int thread = ctb_row %
s1->threads_number;
2542 s =
s1->sList[self_id];
2546 ret =
init_get_bits8(&lc->
gb,
s->data +
s->sh.offset[ctb_row - 1],
s->sh.size[ctb_row - 1]);
2552 while(more_data && ctb_addr_ts < s->ps.sps->ctb_size) {
2553 int x_ctb = (ctb_addr_rs %
s->ps.sps->ctb_width) <<
s->ps.sps->log2_ctb_size;
2554 int y_ctb = (ctb_addr_rs /
s->ps.sps->ctb_width) <<
s->ps.sps->log2_ctb_size;
2568 hls_sao_param(
s, x_ctb >>
s->ps.sps->log2_ctb_size, y_ctb >>
s->ps.sps->log2_ctb_size);
2571 if (more_data < 0) {
2582 if (!more_data && (x_ctb+ctb_size) <
s->ps.sps->width && ctb_row !=
s->sh.num_entry_point_offsets) {
2588 if ((x_ctb+ctb_size) >=
s->ps.sps->width && (y_ctb+ctb_size) >=
s->ps.sps->height ) {
2593 ctb_addr_rs =
s->ps.pps->ctb_addr_ts_to_rs[ctb_addr_ts];
2596 if(x_ctb >=
s->ps.sps->width) {
2604 s->tab_slice_address[ctb_addr_rs] = -1;
2613 int length = nal->
size;
2615 int *ret =
av_malloc_array(
s->sh.num_entry_point_offsets + 1,
sizeof(
int));
2618 int64_t startheader, cmpt = 0;
2627 if (
s->sh.slice_ctb_addr_rs +
s->sh.num_entry_point_offsets *
s->ps.sps->ctb_width >=
s->ps.sps->ctb_width *
s->ps.sps->ctb_height) {
2629 s->sh.slice_ctb_addr_rs,
s->sh.num_entry_point_offsets,
2630 s->ps.sps->ctb_width,
s->ps.sps->ctb_height
2638 for (
i = 1;
i <
s->threads_number;
i++) {
2639 if (
s->sList[
i] &&
s->HEVClcList[
i])
2645 if (!
s->sList[
i] || !
s->HEVClcList[
i]) {
2650 s->sList[
i]->HEVClc =
s->HEVClcList[
i];
2655 for (j = 0, cmpt = 0, startheader =
offset +
s->sh.entry_point_offset[0]; j < nal->skipped_bytes; j++) {
2662 for (
i = 1;
i <
s->sh.num_entry_point_offsets;
i++) {
2663 offset += (
s->sh.entry_point_offset[
i - 1] - cmpt);
2664 for (j = 0, cmpt = 0, startheader =
offset
2665 +
s->sh.entry_point_offset[
i]; j < nal->skipped_bytes; j++) {
2671 s->sh.size[
i - 1] =
s->sh.entry_point_offset[
i] - cmpt;
2675 if (
s->sh.num_entry_point_offsets != 0) {
2676 offset +=
s->sh.entry_point_offset[
s->sh.num_entry_point_offsets - 1] - cmpt;
2682 s->sh.size[
s->sh.num_entry_point_offsets - 1] = length -
offset;
2683 s->sh.offset[
s->sh.num_entry_point_offsets - 1] =
offset;
2688 for (
i = 1;
i <
s->threads_number;
i++) {
2689 s->sList[
i]->HEVClc->first_qp_group = 1;
2690 s->sList[
i]->HEVClc->qp_y =
s->sList[0]->HEVClc->qp_y;
2692 s->sList[
i]->HEVClc =
s->HEVClcList[
i];
2698 for (
i = 0;
i <=
s->sh.num_entry_point_offsets;
i++) {
2703 if (
s->ps.pps->entropy_coding_sync_enabled_flag)
2706 for (
i = 0;
i <=
s->sh.num_entry_point_offsets;
i++)
2718 if (
s->sei.frame_packing.present &&
2719 s->sei.frame_packing.arrangement_type >= 3 &&
2720 s->sei.frame_packing.arrangement_type <= 5 &&
2721 s->sei.frame_packing.content_interpretation_type > 0 &&
2722 s->sei.frame_packing.content_interpretation_type < 3) {
2727 switch (
s->sei.frame_packing.arrangement_type) {
2729 if (
s->sei.frame_packing.quincunx_subsampling)
2742 if (
s->sei.frame_packing.content_interpretation_type == 2)
2745 if (
s->sei.frame_packing.arrangement_type == 5) {
2746 if (
s->sei.frame_packing.current_frame_is_frame0_flag)
2753 if (
s->sei.display_orientation.present &&
2754 (
s->sei.display_orientation.anticlockwise_rotation ||
2755 s->sei.display_orientation.hflip ||
s->sei.display_orientation.vflip)) {
2756 double angle =
s->sei.display_orientation.anticlockwise_rotation * 360 / (double) (1 << 16);
2765 s->sei.display_orientation.hflip,
2766 s->sei.display_orientation.vflip);
2771 if (
s->sei.mastering_display.present > 0 &&
2773 s->sei.mastering_display.present--;
2775 if (
s->sei.mastering_display.present) {
2777 const int mapping[3] = {2, 0, 1};
2778 const int chroma_den = 50000;
2779 const int luma_den = 10000;
2786 for (
i = 0;
i < 3;
i++) {
2787 const int j = mapping[
i];
2793 metadata->
white_point[0].
num =
s->sei.mastering_display.white_point[0];
2795 metadata->
white_point[1].
num =
s->sei.mastering_display.white_point[1];
2807 "r(%5.4f,%5.4f) g(%5.4f,%5.4f) b(%5.4f %5.4f) wp(%5.4f, %5.4f)\n",
2816 "min_luminance=%f, max_luminance=%f\n",
2821 if (
s->sei.content_light.present > 0 &&
2823 s->sei.content_light.present--;
2825 if (
s->sei.content_light.present) {
2830 metadata->
MaxCLL =
s->sei.content_light.max_content_light_level;
2831 metadata->
MaxFALL =
s->sei.content_light.max_pic_average_light_level;
2838 if (
s->sei.a53_caption.buf_ref) {
2847 for (
int i = 0;
i <
s->sei.unregistered.nb_buf_ref;
i++) {
2859 s->sei.unregistered.nb_buf_ref = 0;
2861 if (
s->sei.timecode.present) {
2865 sizeof(uint32_t) * 4);
2869 tc_sd = (uint32_t*)tcside->
data;
2870 tc_sd[0] =
s->sei.timecode.num_clock_ts;
2872 for (
int i = 0;
i < tc_sd[0];
i++) {
2873 int drop =
s->sei.timecode.cnt_dropped_flag[
i];
2874 int hh =
s->sei.timecode.hours_value[
i];
2875 int mm =
s->sei.timecode.minutes_value[
i];
2876 int ss =
s->sei.timecode.seconds_value[
i];
2877 int ff =
s->sei.timecode.n_frames[
i];
2884 s->sei.timecode.num_clock_ts = 0;
2887 if (
s->sei.dynamic_hdr_plus.info) {
2904 int pic_size_in_ctb = ((
s->ps.sps->width >>
s->ps.sps->log2_min_cb_size) + 1) *
2905 ((
s->ps.sps->height >>
s->ps.sps->log2_min_cb_size) + 1);
2908 memset(
s->horizontal_bs, 0,
s->bs_width *
s->bs_height);
2909 memset(
s->vertical_bs, 0,
s->bs_width *
s->bs_height);
2910 memset(
s->cbf_luma, 0,
s->ps.sps->min_tb_width *
s->ps.sps->min_tb_height);
2911 memset(
s->is_pcm, 0, (
s->ps.sps->min_pu_width + 1) * (
s->ps.sps->min_pu_height + 1));
2912 memset(
s->tab_slice_address, -1, pic_size_in_ctb *
sizeof(*
s->tab_slice_address));
2915 s->first_nal_type =
s->nal_unit_type;
2919 if (
s->ps.pps->tiles_enabled_flag)
2920 lc->
end_of_tiles_x =
s->ps.pps->column_width[0] <<
s->ps.sps->log2_ctb_size;
2938 s->frame->pict_type = 3 -
s->sh.slice_type;
2948 if (!
s->avctx->hwaccel)
2964 int ctb_addr_ts, ret;
2967 s->nal_unit_type = nal->
type;
2970 switch (
s->nal_unit_type) {
2972 if (
s->avctx->hwaccel &&
s->avctx->hwaccel->decode_params) {
2973 ret =
s->avctx->hwaccel->decode_params(
s->avctx,
2985 if (
s->avctx->hwaccel &&
s->avctx->hwaccel->decode_params) {
2986 ret =
s->avctx->hwaccel->decode_params(
s->avctx,
2994 s->apply_defdispwin);
2999 if (
s->avctx->hwaccel &&
s->avctx->hwaccel->decode_params) {
3000 ret =
s->avctx->hwaccel->decode_params(
s->avctx,
3013 if (
s->avctx->hwaccel &&
s->avctx->hwaccel->decode_params) {
3014 ret =
s->avctx->hwaccel->decode_params(
s->avctx,
3044 s->slice_initialized = 0;
3060 if (
s->sh.first_slice_in_pic_flag) {
3061 if (
s->max_ra == INT_MAX) {
3066 s->max_ra = INT_MIN;
3071 s->poc <=
s->max_ra) {
3076 s->max_ra = INT_MIN;
3083 }
else if (!
s->ref) {
3088 if (
s->nal_unit_type !=
s->first_nal_type) {
3090 "Non-matching NAL types of the VCL NALUs: %d %d\n",
3091 s->first_nal_type,
s->nal_unit_type);
3095 if (!
s->sh.dependent_slice_segment_flag &&
3100 "Error constructing the reference lists for the current slice.\n");
3105 if (
s->sh.first_slice_in_pic_flag &&
s->avctx->hwaccel) {
3106 ret =
s->avctx->hwaccel->start_frame(
s->avctx,
NULL, 0);
3111 if (
s->avctx->hwaccel) {
3116 if (
s->threads_number > 1 &&
s->sh.num_entry_point_offsets > 0)
3120 if (ctb_addr_ts >= (
s->ps.sps->ctb_width *
s->ps.sps->ctb_height)) {
3124 if (ctb_addr_ts < 0) {
3132 s->seq_decode = (
s->seq_decode + 1) & 0xff;
3133 s->max_ra = INT_MAX;
3140 "Skipping NAL unit %d\n",
s->nal_unit_type);
3153 int eos_at_start = 1;
3156 s->last_eos =
s->eos;
3163 s->nal_length_size,
s->avctx->codec_id, 1, 0);
3166 "Error splitting the input into NAL units.\n");
3170 for (
i = 0;
i <
s->pkt.nb_nals;
i++) {
3184 for (
i = 0;
i <
s->pkt.nb_nals;
i++) {
3193 if (ret >= 0 &&
s->overlap > 2)
3197 "Error parsing NAL unit #%d.\n",
i);
3212 for (
i = 0;
i < 16;
i++)
3225 pixel_shift =
desc->comp[0].depth > 8;
3233 if (pixel_shift && !
s->checksum_buf) {
3237 if (!
s->checksum_buf)
3243 int width =
s->avctx->coded_width;
3244 int height =
s->avctx->coded_height;
3250 for (j = 0; j <
h; j++) {
3254 s->bdsp.bswap16_buf((uint16_t *)
s->checksum_buf,
3255 (
const uint16_t *)
src,
w);
3256 src =
s->checksum_buf;
3263 if (!memcmp(
md5,
s->sei.picture_hash.md5[
i], 16)) {
3287 &
s->nal_length_size,
s->avctx->err_recognition,
3288 s->apply_defdispwin,
s->avctx);
3294 if (first &&
s->ps.sps_list[
i]) {
3327 &new_extradata_size);
3328 if (new_extradata && new_extradata_size > 0) {
3342 "hardware accelerator failed to decode picture\n");
3349 s->sei.picture_hash.is_md5) {
3357 s->sei.picture_hash.is_md5 = 0;
3359 if (
s->is_decoded) {
3364 if (
s->output_frame->buf[0]) {
3399 if (
src->hwaccel_picture_private) {
3423 for (
i = 0;
i < 3;
i++) {
3440 if (
s->HEVClcList &&
s->sList) {
3441 for (
i = 1;
i <
s->threads_number;
i++) {
3467 if (!
s->HEVClc || !
s->HEVClcList || !
s->sList)
3469 s->HEVClcList[0] =
s->HEVClc;
3473 if (!
s->cabac_state)
3477 if (!
s->output_frame)
3482 if (!
s->DPB[
i].frame)
3484 s->DPB[
i].tf.f =
s->DPB[
i].frame;
3487 s->max_ra = INT_MAX;
3495 s->context_initialized = 1;
3515 if (!
s->context_initialized) {
3523 if (
s0->DPB[
i].frame->buf[0]) {
3530 if (
s->ps.sps !=
s0->ps.sps)
3550 if (
s->ps.sps !=
s0->ps.sps)
3554 s->seq_decode =
s0->seq_decode;
3555 s->seq_output =
s0->seq_output;
3556 s->pocTid0 =
s0->pocTid0;
3557 s->max_ra =
s0->max_ra;
3559 s->no_rasl_output_flag =
s0->no_rasl_output_flag;
3561 s->is_nalff =
s0->is_nalff;
3562 s->nal_length_size =
s0->nal_length_size;
3564 s->threads_number =
s0->threads_number;
3565 s->threads_type =
s0->threads_type;
3568 s->seq_decode = (
s->seq_decode + 1) & 0xff;
3569 s->max_ra = INT_MAX;
3576 for (
i = 0;
i <
s->sei.unregistered.nb_buf_ref;
i++)
3578 s->sei.unregistered.nb_buf_ref = 0;
3580 if (
s0->sei.unregistered.nb_buf_ref) {
3582 s0->sei.unregistered.nb_buf_ref,
3583 sizeof(*
s->sei.unregistered.buf_ref));
3587 for (
i = 0;
i <
s0->sei.unregistered.nb_buf_ref;
i++) {
3589 if (!
s->sei.unregistered.buf_ref[
i])
3591 s->sei.unregistered.nb_buf_ref++;
3599 s->sei.frame_packing =
s0->sei.frame_packing;
3600 s->sei.display_orientation =
s0->sei.display_orientation;
3601 s->sei.mastering_display =
s0->sei.mastering_display;
3602 s->sei.content_light =
s0->sei.content_light;
3603 s->sei.alternative_transfer =
s0->sei.alternative_transfer;
3621 s->threads_number = 1;
3632 s->enable_parallel_tiles = 0;
3633 s->sei.picture_timing.picture_struct = 0;
3655 s->max_ra = INT_MAX;
3659 #define OFFSET(x) offsetof(HEVCContext, x)
3660 #define PAR (AV_OPT_FLAG_DECODING_PARAM | AV_OPT_FLAG_VIDEO_PARAM)
3663 {
"apply_defdispwin",
"Apply default display window from VUI",
OFFSET(apply_defdispwin),
3665 {
"strict-displaywin",
"stricly apply default display window size",
OFFSET(apply_defdispwin),
3695 #if CONFIG_HEVC_DXVA2_HWACCEL
3698 #if CONFIG_HEVC_D3D11VA_HWACCEL
3701 #if CONFIG_HEVC_D3D11VA2_HWACCEL
3704 #if CONFIG_HEVC_NVDEC_HWACCEL
3707 #if CONFIG_HEVC_VAAPI_HWACCEL
3710 #if CONFIG_HEVC_VDPAU_HWACCEL
3713 #if CONFIG_HEVC_VIDEOTOOLBOX_HWACCEL
static void flush(AVCodecContext *avctx)
static double val(void *priv, double ch)
static char * split(char *message, char delim)
Macro definitions for various function/variable attributes.
#define FF_CODEC_PROPERTY_CLOSED_CAPTIONS
#define FF_THREAD_FRAME
Decode more than one frame at once.
#define AV_EF_CRCCHECK
Verify checksums embedded in the bitstream (could be of either encoded or decoded data,...
#define AV_EF_EXPLODE
abort decoding on minor error detection
#define FF_THREAD_SLICE
Decode more than one part of a single frame at once.
uint8_t * av_packet_get_side_data(const AVPacket *pkt, enum AVPacketSideDataType type, buffer_size_t *size)
static av_cold int init(AVCodecContext *avctx)
int ff_init_cabac_decoder(CABACContext *c, const uint8_t *buf, int buf_size)
Context Adaptive Binary Arithmetic Coder inline functions.
static av_unused const uint8_t * skip_bytes(CABACContext *c, int n)
Skip n bytes and reset the decoder.
static int FUNC() sps(CodedBitstreamContext *ctx, RWContext *rw, H264RawSPS *current)
static int FUNC() vps(CodedBitstreamContext *ctx, RWContext *rw, H265RawVPS *current)
#define ss(width, name, subs,...)
common internal and external API header
#define FFSWAP(type, a, b)
static void decode(AVCodecContext *dec_ctx, AVPacket *pkt, AVFrame *frame, FILE *outfile)
static enum AVPixelFormat pix_fmt
#define atomic_store(object, desired)
#define atomic_load(object)
#define atomic_init(obj, value)
static unsigned int get_bits_long(GetBitContext *s, int n)
Read 0-32 bits.
static int get_bits_left(GetBitContext *gb)
static unsigned int get_bits1(GetBitContext *s)
static void skip_bits(GetBitContext *s, int n)
static int init_get_bits8(GetBitContext *s, const uint8_t *buffer, int byte_size)
Initialize GetBitContext.
static unsigned int get_bits(GetBitContext *s, int n)
Read 1-25 bits.
static av_always_inline int get_bitsz(GetBitContext *s, int n)
Read 0-25 bits.
static int init_get_bits(GetBitContext *s, const uint8_t *buffer, int bit_size)
Initialize GetBitContext.
static int get_se_golomb(GetBitContext *gb)
read signed exp golomb code.
static unsigned get_ue_golomb_long(GetBitContext *gb)
Read an unsigned Exp-Golomb code in the range 0 to UINT32_MAX-1.
#define AV_CODEC_CAP_DELAY
Encoder or decoder requires flushing with NULL input at the end in order to give the complete and cor...
#define AV_CODEC_CAP_DR1
Codec uses get_buffer() or get_encode_buffer() for allocating buffers and supports custom allocators.
#define AV_CODEC_CAP_SLICE_THREADS
Codec supports slice-based (or partition-based) multithreading.
#define AV_CODEC_CAP_FRAME_THREADS
Codec supports frame-level multithreading.
@ AVDISCARD_ALL
discard all
@ AVDISCARD_NONKEY
discard all frames except keyframes
@ AVDISCARD_BIDIR
discard all bidirectional frames
@ AVDISCARD_NONINTRA
discard all non intra frames
@ AVDISCARD_NONREF
discard all non reference
@ AV_PKT_DATA_NEW_EXTRADATA
The AV_PKT_DATA_NEW_EXTRADATA is used to notify the codec or the format that the extradata buffer was...
void av_buffer_unref(AVBufferRef **buf)
Free a given reference and automatically free the buffer if there are no more references to it.
AVBufferRef * av_buffer_allocz(buffer_size_t size)
Same as av_buffer_alloc(), except the returned buffer will be initialized to zero.
int av_buffer_replace(AVBufferRef **pdst, AVBufferRef *src)
Ensure dst refers to the same data as src.
AVBufferRef * av_buffer_ref(AVBufferRef *buf)
Create a new reference to an AVBuffer.
AVBufferPool * av_buffer_pool_init(buffer_size_t size, AVBufferRef *(*alloc)(buffer_size_t size))
Allocate and initialize a buffer pool.
void av_buffer_pool_uninit(AVBufferPool **ppool)
Mark the pool as being available for freeing.
int av_dict_set(AVDictionary **pm, const char *key, const char *value, int flags)
Set the given entry in *pm, overwriting an existing entry.
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
void av_frame_unref(AVFrame *frame)
Unreference all the buffers referenced by frame and reset the frame fields.
void av_frame_move_ref(AVFrame *dst, AVFrame *src)
Move everything contained in src to dst and reset src.
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
AVFrameSideData * av_frame_new_side_data(AVFrame *frame, enum AVFrameSideDataType type, buffer_size_t size)
Add a new side data to a frame.
AVFrameSideData * av_frame_new_side_data_from_buf(AVFrame *frame, enum AVFrameSideDataType type, AVBufferRef *buf)
Add a new side data to a frame from an existing AVBufferRef.
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values.
@ AV_FRAME_DATA_DISPLAYMATRIX
This side data contains a 3x3 transformation matrix describing an affine transformation that needs to...
@ AV_FRAME_DATA_A53_CC
ATSC A53 Part 4 Closed Captions.
@ AV_FRAME_DATA_SEI_UNREGISTERED
User data unregistered metadata associated with a video frame.
@ AV_FRAME_DATA_DYNAMIC_HDR_PLUS
HDR dynamic metadata associated with a video frame.
@ AV_FRAME_DATA_S12M_TIMECODE
Timecode which conforms to SMPTE ST 12-1.
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
#define AV_LOG_WARNING
Something somehow does not look correct.
#define AV_LOG_INFO
Standard information.
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
const char * av_default_item_name(void *ptr)
Return the context name.
int av_reduce(int *dst_num, int *dst_den, int64_t num, int64_t den, int64_t max)
Reduce a fraction.
static double av_q2d(AVRational a)
Convert an AVRational to a double.
void av_md5_init(AVMD5 *ctx)
Initialize MD5 hashing.
struct AVMD5 * av_md5_alloc(void)
Allocate an AVMD5 context.
void av_md5_final(AVMD5 *ctx, uint8_t *dst)
Finish hashing and output digest value.
void av_md5_update(AVMD5 *ctx, const uint8_t *src, int len)
Update hash value.
void av_fast_malloc(void *ptr, unsigned int *size, size_t min_size)
Allocate a buffer, reusing the given one if large enough.
void * av_mallocz(size_t size)
Allocate a memory block with alignment suitable for all memory accesses (including vectors if availab...
int av_reallocp_array(void *ptr, size_t nmemb, size_t size)
Allocate, reallocate, or free an array through a pointer to a pointer.
void * av_mallocz_array(size_t nmemb, size_t size)
Allocate a memory block for an array with av_mallocz().
#define LIBAVUTIL_VERSION_INT
void av_display_rotation_set(int32_t matrix[9], double angle)
Initialize a transformation matrix describing a pure counterclockwise rotation by the specified angle...
void av_display_matrix_flip(int32_t matrix[9], int hflip, int vflip)
Flip the input matrix horizontally and/or vertically.
#define AV_STEREO3D_FLAG_INVERT
Inverted views, Right/Bottom represents the left view.
AVStereo3D * av_stereo3d_create_side_data(AVFrame *frame)
Allocate a complete AVFrameSideData and add it to the frame.
@ AV_STEREO3D_TOPBOTTOM
Views are on top of each other.
@ AV_STEREO3D_SIDEBYSIDE_QUINCUNX
Views are next to each other, but when upscaling apply a checkerboard pattern.
@ AV_STEREO3D_FRAMESEQUENCE
Views are alternated temporally.
@ AV_STEREO3D_SIDEBYSIDE
Views are next to each other.
@ AV_STEREO3D_VIEW_RIGHT
Frame contains only the right view.
@ AV_STEREO3D_VIEW_LEFT
Frame contains only the left view.
int ff_h2645_packet_split(H2645Packet *pkt, const uint8_t *buf, int length, void *logctx, int is_nalff, int nal_length_size, enum AVCodecID codec_id, int small_padding, int use_ref)
Split an input packet into NAL units.
void ff_h2645_packet_uninit(H2645Packet *pkt)
Free all the allocated memory in the packet.
int ff_hevc_inter_pred_idc_decode(HEVCContext *s, int nPbW, int nPbH)
int ff_hevc_intra_chroma_pred_mode_decode(HEVCContext *s)
int ff_hevc_cu_transquant_bypass_flag_decode(HEVCContext *s)
int ff_hevc_sao_offset_abs_decode(HEVCContext *s)
int ff_hevc_split_coding_unit_flag_decode(HEVCContext *s, int ct_depth, int x0, int y0)
int ff_hevc_res_scale_sign_flag(HEVCContext *s, int idx)
int ff_hevc_rem_intra_luma_pred_mode_decode(HEVCContext *s)
int ff_hevc_no_residual_syntax_flag_decode(HEVCContext *s)
int ff_hevc_mvp_lx_flag_decode(HEVCContext *s)
int ff_hevc_pred_mode_decode(HEVCContext *s)
int ff_hevc_prev_intra_luma_pred_flag_decode(HEVCContext *s)
int ff_hevc_cu_qp_delta_abs(HEVCContext *s)
void ff_hevc_hls_residual_coding(HEVCContext *s, int x0, int y0, int log2_trafo_size, enum ScanType scan_idx, int c_idx)
int ff_hevc_split_transform_flag_decode(HEVCContext *s, int log2_trafo_size)
int ff_hevc_cu_chroma_qp_offset_flag(HEVCContext *s)
int ff_hevc_log2_res_scale_abs(HEVCContext *s, int idx)
int ff_hevc_cbf_luma_decode(HEVCContext *s, int trafo_depth)
int ff_hevc_part_mode_decode(HEVCContext *s, int log2_cb_size)
int ff_hevc_cu_chroma_qp_offset_idx(HEVCContext *s)
void ff_hevc_save_states(HEVCContext *s, int ctb_addr_ts)
int ff_hevc_cabac_init(HEVCContext *s, int ctb_addr_ts, int thread)
int ff_hevc_sao_band_position_decode(HEVCContext *s)
int ff_hevc_ref_idx_lx_decode(HEVCContext *s, int num_ref_idx_lx)
int ff_hevc_skip_flag_decode(HEVCContext *s, int x0, int y0, int x_cb, int y_cb)
int ff_hevc_sao_type_idx_decode(HEVCContext *s)
int ff_hevc_sao_offset_sign_decode(HEVCContext *s)
int ff_hevc_cbf_cb_cr_decode(HEVCContext *s, int trafo_depth)
int ff_hevc_end_of_slice_flag_decode(HEVCContext *s)
int ff_hevc_sao_merge_flag_decode(HEVCContext *s)
int ff_hevc_merge_idx_decode(HEVCContext *s)
int ff_hevc_merge_flag_decode(HEVCContext *s)
int ff_hevc_sao_eo_class_decode(HEVCContext *s)
int ff_hevc_pcm_flag_decode(HEVCContext *s)
int ff_hevc_mpm_idx_decode(HEVCContext *s)
void ff_hevc_hls_mvd_coding(HEVCContext *s, int x0, int y0, int log2_cb_size)
int ff_hevc_cu_qp_delta_sign_flag(HEVCContext *s)
void ff_hevc_deblocking_boundary_strengths(HEVCContext *s, int x0, int y0, int log2_trafo_size)
void ff_hevc_set_qPy(HEVCContext *s, int xBase, int yBase, int log2_cb_size)
void ff_hevc_hls_filters(HEVCContext *s, int x_ctb, int y_ctb, int ctb_size)
void ff_hevc_hls_filter(HEVCContext *s, int x, int y, int ctb_size)
void ff_hevc_set_neighbour_available(HEVCContext *s, int x0, int y0, int nPbW, int nPbH)
void ff_hevc_luma_mv_merge_mode(HEVCContext *s, int x0, int y0, int nPbW, int nPbH, int log2_cb_size, int part_idx, int merge_idx, MvField *mv)
void ff_hevc_luma_mv_mvp_mode(HEVCContext *s, int x0, int y0, int nPbW, int nPbH, int log2_cb_size, int part_idx, int merge_idx, MvField *mv, int mvp_lx_flag, int LX)
int ff_hevc_decode_extradata(const uint8_t *data, int size, HEVCParamSets *ps, HEVCSEI *sei, int *is_nalff, int *nal_length_size, int err_recognition, int apply_defdispwin, void *logctx)
int ff_hevc_decode_nal_pps(GetBitContext *gb, AVCodecContext *avctx, HEVCParamSets *ps)
void ff_hevc_ps_uninit(HEVCParamSets *ps)
int ff_hevc_decode_nal_vps(GetBitContext *gb, AVCodecContext *avctx, HEVCParamSets *ps)
int ff_hevc_decode_short_term_rps(GetBitContext *gb, AVCodecContext *avctx, ShortTermRPS *rps, const HEVCSPS *sps, int is_slice_header)
int ff_hevc_decode_nal_sps(GetBitContext *gb, AVCodecContext *avctx, HEVCParamSets *ps, int apply_defdispwin)
int ff_hevc_compute_poc(const HEVCSPS *sps, int pocTid0, int poc_lsb, int nal_unit_type)
Compute POC of the current frame and return it.
int ff_hevc_output_frame(HEVCContext *s, AVFrame *out, int flush)
Find next frame in output order and put a reference to it in frame.
int ff_hevc_frame_nb_refs(const HEVCContext *s)
Get the number of candidate references for the current frame.
void ff_hevc_flush_dpb(HEVCContext *s)
Drop all frames currently in DPB.
int ff_hevc_set_new_ref(HEVCContext *s, AVFrame **frame, int poc)
void ff_hevc_unref_frame(HEVCContext *s, HEVCFrame *frame, int flags)
void ff_hevc_bump_frame(HEVCContext *s)
int ff_hevc_frame_rps(HEVCContext *s)
Construct the reference picture sets for the current frame.
void ff_hevc_clear_refs(HEVCContext *s)
Mark all frames in DPB as unused for reference.
int ff_hevc_slice_rpl(HEVCContext *s)
Construct the reference picture list(s) for the current slice.
int ff_hevc_decode_nal_sei(GetBitContext *gb, void *logctx, HEVCSEI *s, const HEVCParamSets *ps, int type)
void ff_hevc_reset_sei(HEVCSEI *s)
Reset SEI values that are stored on the Context.
#define BOUNDARY_UPPER_SLICE
#define BOUNDARY_LEFT_TILE
#define QPEL_EXTRA_BEFORE
static av_always_inline int ff_hevc_nal_is_nonref(enum HEVCNALUnitType type)
#define EPEL_EXTRA_BEFORE
#define SAMPLE_CTB(tab, x, y)
#define BOUNDARY_UPPER_TILE
#define EDGE_EMU_BUFFER_STRIDE
#define BOUNDARY_LEFT_SLICE
void ff_hevc_dsp_init(HEVCDSPContext *hevcdsp, int bit_depth)
void ff_hevc_pred_init(HEVCPredContext *hpc, int bit_depth)
#define HWACCEL_DXVA2(codec)
#define HWACCEL_VDPAU(codec)
#define HWACCEL_NVDEC(codec)
#define HWACCEL_VAAPI(codec)
#define HWACCEL_D3D11VA(codec)
#define HWACCEL_D3D11VA2(codec)
static const int8_t mv[256][2]
av_cold void ff_bswapdsp_init(BswapDSPContext *c)
static av_cold int hevc_init_context(AVCodecContext *avctx)
static int hls_transform_unit(HEVCContext *s, int x0, int y0, int xBase, int yBase, int cb_xBase, int cb_yBase, int log2_cb_size, int log2_trafo_size, int blk_idx, int cbf_luma, int *cbf_cb, int *cbf_cr)
static void set_deblocking_bypass(HEVCContext *s, int x0, int y0, int log2_cb_size)
static void chroma_mc_bi(HEVCContext *s, uint8_t *dst0, ptrdiff_t dststride, AVFrame *ref0, AVFrame *ref1, int x_off, int y_off, int block_w, int block_h, struct MvField *current_mv, int cidx)
8.5.3.2.2.2 Chroma sample bidirectional interpolation process
static int decode_nal_units(HEVCContext *s, const uint8_t *buf, int length)
static av_cold int hevc_decode_init(AVCodecContext *avctx)
static void luma_mc_bi(HEVCContext *s, uint8_t *dst, ptrdiff_t dststride, AVFrame *ref0, const Mv *mv0, int x_off, int y_off, int block_w, int block_h, AVFrame *ref1, const Mv *mv1, struct MvField *current_mv)
8.5.3.2.2.1 Luma sample bidirectional interpolation process
static const AVClass hevc_decoder_class
static const AVOption options[]
static int hevc_frame_start(HEVCContext *s)
static int hevc_ref_frame(HEVCContext *s, HEVCFrame *dst, HEVCFrame *src)
static av_cold int hevc_decode_free(AVCodecContext *avctx)
static int hevc_decode_frame(AVCodecContext *avctx, void *data, int *got_output, AVPacket *avpkt)
static int pic_arrays_init(HEVCContext *s, const HEVCSPS *sps)
static int hls_slice_header(HEVCContext *s)
static void luma_mc_uni(HEVCContext *s, uint8_t *dst, ptrdiff_t dststride, AVFrame *ref, const Mv *mv, int x_off, int y_off, int block_w, int block_h, int luma_weight, int luma_offset)
8.5.3.2.2.1 Luma sample unidirectional interpolation process
static int hevc_decode_extradata(HEVCContext *s, uint8_t *buf, int length, int first)
static void pic_arrays_free(HEVCContext *s)
NOTE: Each function hls_foo correspond to the function foo in the specification (HLS stands for High ...
static int export_stream_params_from_sei(HEVCContext *s)
const uint8_t ff_hevc_pel_weight[65]
static int pred_weight_table(HEVCContext *s, GetBitContext *gb)
static int hls_pcm_sample(HEVCContext *s, int x0, int y0, int log2_cb_size)
static void chroma_mc_uni(HEVCContext *s, uint8_t *dst0, ptrdiff_t dststride, uint8_t *src0, ptrdiff_t srcstride, int reflist, int x_off, int y_off, int block_w, int block_h, struct MvField *current_mv, int chroma_weight, int chroma_offset)
8.5.3.2.2.2 Chroma sample uniprediction interpolation process
#define SET_SAO(elem, value)
static int hls_coding_unit(HEVCContext *s, int x0, int y0, int log2_cb_size)
static int hls_coding_quadtree(HEVCContext *s, int x0, int y0, int log2_cb_size, int cb_depth)
static int hls_decode_entry_wpp(AVCodecContext *avctxt, void *input_ctb_row, int job, int self_id)
static int hls_decode_entry(AVCodecContext *avctxt, void *isFilterThread)
static void hevc_luma_mv_mvp_mode(HEVCContext *s, int x0, int y0, int nPbW, int nPbH, int log2_cb_size, int part_idx, int merge_idx, MvField *mv)
static int verify_md5(HEVCContext *s, AVFrame *frame)
static void print_md5(void *log_ctx, int level, uint8_t md5[16])
static int set_sps(HEVCContext *s, const HEVCSPS *sps, enum AVPixelFormat pix_fmt)
static int luma_intra_pred_mode(HEVCContext *s, int x0, int y0, int pu_size, int prev_intra_luma_pred_flag)
8.4.1
static int hls_slice_data(HEVCContext *s)
static void intra_prediction_unit(HEVCContext *s, int x0, int y0, int log2_cb_size)
static int set_side_data(HEVCContext *s)
static void hls_prediction_unit(HEVCContext *s, int x0, int y0, int nPbW, int nPbH, int log2_cb_size, int partIdx, int idx)
#define SUBDIVIDE(x, y, idx)
static av_always_inline void set_ct_depth(HEVCContext *s, int x0, int y0, int log2_cb_size, int ct_depth)
static void hls_decode_neighbour(HEVCContext *s, int x_ctb, int y_ctb, int ctb_addr_ts)
static int hls_cross_component_pred(HEVCContext *s, int idx)
static void hevc_decode_flush(AVCodecContext *avctx)
static void hls_sao_param(HEVCContext *s, int rx, int ry)
static int decode_nal_unit(HEVCContext *s, const H2645NAL *nal)
static const uint8_t tab_mode_idx[]
static int hls_slice_data_wpp(HEVCContext *s, const H2645NAL *nal)
static int hls_transform_tree(HEVCContext *s, int x0, int y0, int xBase, int yBase, int cb_xBase, int cb_yBase, int log2_cb_size, int log2_trafo_size, int trafo_depth, int blk_idx, const int *base_cbf_cb, const int *base_cbf_cr)
static int decode_lt_rps(HEVCContext *s, LongTermRPS *rps, GetBitContext *gb)
static void hevc_await_progress(HEVCContext *s, HEVCFrame *ref, const Mv *mv, int y0, int height)
static void export_stream_params(HEVCContext *s, const HEVCSPS *sps)
static void intra_prediction_unit_default_value(HEVCContext *s, int x0, int y0, int log2_cb_size)
static enum AVPixelFormat get_format(HEVCContext *s, const HEVCSPS *sps)
#define FF_CODEC_CAP_ALLOCATE_PROGRESS
#define FF_CODEC_CAP_EXPORTS_CROPPING
The decoder sets the cropping fields in the output frames manually.
int ff_set_sar(AVCodecContext *avctx, AVRational sar)
Check that the provided sample aspect ratio is valid and set it on the codec context.
#define FF_CODEC_CAP_INIT_THREADSAFE
The codec does not modify any global variables in the init function, allowing to call the init functi...
#define FF_CODEC_CAP_INIT_CLEANUP
The codec allows calling the close function for deallocation even if the init function returned a fai...
int ff_thread_ref_frame(ThreadFrame *dst, const ThreadFrame *src)
av_cold void ff_videodsp_init(VideoDSPContext *ctx, int bpc)
common internal API header
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification.
#define ONLY_IF_THREADS_ENABLED(x)
Define a function with only the non-default version specified.
static enum AVPixelFormat pix_fmts[]
Public header for MD5 hash function implementation.
const char * av_color_transfer_name(enum AVColorTransferCharacteristic transfer)
const AVPixFmtDescriptor * av_pix_fmt_desc_get(enum AVPixelFormat pix_fmt)
#define AV_PIX_FMT_YUV444P12
@ AVCHROMA_LOC_LEFT
MPEG-2/4 4:2:0, H.264 default for 4:2:0.
@ AVCHROMA_LOC_UNSPECIFIED
#define AV_PIX_FMT_YUV420P10
@ AVCOL_RANGE_MPEG
Narrow or limited range content.
@ AVCOL_RANGE_JPEG
Full range content.
#define AV_PIX_FMT_YUV420P12
AVPixelFormat
Pixel format.
@ AV_PIX_FMT_VIDEOTOOLBOX
hardware decoding through Videotoolbox
@ AV_PIX_FMT_YUV420P
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
@ AV_PIX_FMT_YUV422P
planar YUV 4:2:2, 16bpp, (1 Cr & Cb sample per 2x1 Y samples)
@ AV_PIX_FMT_DXVA2_VLD
HW decoding through DXVA2, Picture.data[3] contains a LPDIRECT3DSURFACE9 pointer.
@ AV_PIX_FMT_CUDA
HW acceleration through CUDA.
@ AV_PIX_FMT_YUV444P
planar YUV 4:4:4, 24bpp, (1 Cr & Cb sample per 1x1 Y samples)
@ AV_PIX_FMT_D3D11
Hardware surfaces for Direct3D11.
@ AV_PIX_FMT_D3D11VA_VLD
HW decoding through Direct3D11 via old API, Picture.data[3] contains a ID3D11VideoDecoderOutputView p...
@ AV_PIX_FMT_YUV422P10LE
planar YUV 4:2:2, 20bpp, (1 Cr & Cb sample per 2x1 Y samples), little-endian
@ AV_PIX_FMT_YUVJ420P
planar YUV 4:2:0, 12bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV420P and setting col...
@ AV_PIX_FMT_VDPAU
HW acceleration through VDPAU, Picture.data[3] contains a VdpVideoSurface.
#define AV_PIX_FMT_YUV444P10
const AVProfile ff_hevc_profiles[]
void ff_thread_report_progress(ThreadFrame *f, int n, int field)
Notify later decoding threads when part of their reference picture is ready.
void ff_thread_await_progress(ThreadFrame *f, int n, int field)
Wait for earlier decoding threads to finish reference pictures.
void ff_thread_finish_setup(AVCodecContext *avctx)
If the codec defines update_thread_context(), call this when they are ready for the next thread to st...
FF_DISABLE_DEPRECATION_WARNINGS enum AVPixelFormat ff_thread_get_format(AVCodecContext *avctx, const enum AVPixelFormat *fmt)
Wrapper around get_format() for frame-multithreaded codecs.
void ff_reset_entries(AVCodecContext *avctx)
int ff_alloc_entries(AVCodecContext *avctx, int count)
void ff_thread_await_progress2(AVCodecContext *avctx, int field, int thread, int shift)
void ff_thread_report_progress2(AVCodecContext *avctx, int field, int thread, int n)
#define FF_ARRAY_ELEMS(a)
A reference to a data buffer.
uint8_t * data
The data buffer.
Describe the class of an AVClass context structure.
const char * class_name
The name of the class; usually it is the same name as the context structure type to which the AVClass...
main external API structure.
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
int width
picture width / height.
enum AVColorRange color_range
MPEG vs JPEG YUV range.
enum AVColorPrimaries color_primaries
Chromaticity coordinates of the source primaries.
const struct AVHWAccel * hwaccel
Hardware accelerator in use.
int active_thread_type
Which multithreading methods are in use by the codec.
int has_b_frames
Size of the frame reordering buffer in the decoder.
enum AVColorSpace colorspace
YUV colorspace type.
int thread_count
thread count is used to decide how many independent tasks should be passed to execute()
enum AVColorTransferCharacteristic color_trc
Color Transfer Characteristic.
uint8_t * extradata
some codecs need / can use extradata like Huffman tables.
enum AVChromaLocation chroma_sample_location
This defines the location of chroma samples.
int coded_width
Bitstream width / height, may be different from width/height e.g.
struct AVCodecInternal * internal
Private context used for internal data.
int err_recognition
Error recognition; may misdetect some more or less valid parts as errors.
int is_copy
Whether the parent AVCodecContext is a copy of the context which had init() called on it.
const char * name
Name of the codec implementation.
Content light level needed by to transmit HDR over HDMI (CTA-861.3).
unsigned MaxFALL
Max average light level per frame (cd/m^2).
unsigned MaxCLL
Max content light level (cd/m^2).
Structure to hold side data for an AVFrame.
This structure describes decoded (raw) audio or video data.
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
int linesize[AV_NUM_DATA_POINTERS]
For video, size in bytes of each picture line.
int format
format of the frame, -1 if unknown or unset Values correspond to enum AVPixelFormat for video frames,...
int(* end_frame)(AVCodecContext *avctx)
Called at the end of each frame or field picture.
This structure stores compressed data.
Descriptor that unambiguously describes how the bits of a pixel are stored in the up to 4 data planes...
Stereo 3D type: this structure describes how two videos are packed within a single video surface,...
enum AVStereo3DType type
How views are packed within the video.
int flags
Additional information about the frame packing.
enum AVStereo3DView view
Determines which views are packed.
enum PredMode pred_mode
PredMode.
uint8_t intra_split_flag
IntraSplitFlag.
uint8_t max_trafo_depth
MaxTrafoDepth.
enum PartMode part_mode
PartMode.
uint8_t cu_transquant_bypass_flag
int temporal_id
HEVC only, nuh_temporal_id_plus_1 - 1.
void * hwaccel_picture_private
uint8_t flags
A combination of HEVC_FRAME_FLAG_*.
uint16_t sequence
A sequence counter, so that old frames are output first after a POC reset.
AVBufferRef * rpl_tab_buf
AVBufferRef * tab_mvf_buf
AVBufferRef * hwaccel_priv_buf
uint8_t edge_emu_buffer[(MAX_PB_SIZE+7) *EDGE_EMU_BUFFER_STRIDE *2]
int16_t tmp[MAX_PB_SIZE *MAX_PB_SIZE]
uint8_t ctb_up_right_flag
uint8_t edge_emu_buffer2[(MAX_PB_SIZE+7) *EDGE_EMU_BUFFER_STRIDE *2]
AVBufferRef * vps_list[HEVC_MAX_VPS_COUNT]
uint8_t poc_msb_present[32]
int16_t x
horizontal component of motion vector
int16_t y
vertical component of motion vector
uint8_t intra_pred_mode_c[4]
uint8_t intra_pred_mode[4]
int rem_intra_luma_pred_mode
struct HEVCFrame * ref[HEVC_MAX_REFS]
int offset_abs[3][4]
sao_offset_abs
int eo_class[3]
sao_eo_class
uint8_t type_idx[3]
sao_type_idx
int16_t offset_val[3][5]
SaoOffsetVal.
int offset_sign[3][4]
sao_offset_sign
#define av_malloc_array(a, b)
static void error(const char *err)
static int ref[MAX_W *MAX_W]
uint32_t av_timecode_get_smpte(AVRational rate, int drop, int hh, int mm, int ss, int ff)
Convert sei info to SMPTE 12M binary representation.
char * av_timecode_make_smpte_tc_string2(char *buf, AVRational rate, uint32_t tcsmpte, int prevent_df, int skip_field)
Get the timecode string from the SMPTE timecode format.
#define AV_TIMECODE_STR_SIZE
static const uint8_t offset[127][2]