34 uint8_t *dst_edge, ptrdiff_t stride_edge,
35 uint8_t *dst_inner, ptrdiff_t stride_inner,
38 int p,
int ss_h,
int ss_v,
int bytesperpixel)
41 int have_top = row > 0 || y > 0;
42 int have_left = col >
td->tile_col_start || x > 0;
43 int have_right = x <
w - 1;
45 static const uint8_t mode_conv[10][2 ][2 ] = {
76 [
DC_PRED] = { .needs_top = 1, .needs_left = 1 },
85 [
HOR_UP_PRED] = { .needs_left = 1, .invert_left = 1 },
96 mode = mode_conv[
mode][have_left][have_top];
97 if (edges[
mode].needs_top) {
99 int n_px_need = 4 << tx, n_px_have = (((
s->cols - col) << !ss_h) - x) * 4;
100 int n_px_need_tr = 0;
102 if (tx ==
TX_4X4 && edges[
mode].needs_topright && have_right)
109 top = !(row & 7) && !y ?
110 s->intra_pred_data[p] + (col * (8 >> ss_h) + x * 4) * bytesperpixel :
111 y == 0 ? &dst_edge[-stride_edge] : &dst_inner[-stride_inner];
113 topleft = !(row & 7) && !y ?
114 s->intra_pred_data[p] + (col * (8 >> ss_h) + x * 4) * bytesperpixel :
115 y == 0 || x == 0 ? &dst_edge[-stride_edge] :
116 &dst_inner[-stride_inner];
120 (!edges[
mode].needs_topleft || (have_left && top == topleft)) &&
121 (tx !=
TX_4X4 || !edges[
mode].needs_topright || have_right) &&
122 n_px_need + n_px_need_tr <= n_px_have) {
126 if (n_px_need <= n_px_have) {
127 memcpy(*
a, top, n_px_need * bytesperpixel);
129 #define memset_bpp(c, i1, v, i2, num) do { \
130 if (bytesperpixel == 1) { \
131 memset(&(c)[(i1)], (v)[(i2)], (num)); \
133 int n, val = AV_RN16A(&(v)[(i2) * 2]); \
134 for (n = 0; n < (num); n++) { \
135 AV_WN16A(&(c)[((i1) + n) * 2], val); \
139 memcpy(*
a, top, n_px_have * bytesperpixel);
140 memset_bpp(*
a, n_px_have, (*
a), n_px_have - 1, n_px_need - n_px_have);
143 #define memset_val(c, val, num) do { \
144 if (bytesperpixel == 1) { \
145 memset((c), (val), (num)); \
148 for (n = 0; n < (num); n++) { \
149 AV_WN16A(&(c)[n * 2], (val)); \
155 if (edges[
mode].needs_topleft) {
156 if (have_left && have_top) {
157 #define assign_bpp(c, i1, v, i2) do { \
158 if (bytesperpixel == 1) { \
159 (c)[(i1)] = (v)[(i2)]; \
161 AV_COPY16(&(c)[(i1) * 2], &(v)[(i2) * 2]); \
166 #define assign_val(c, i, v) do { \
167 if (bytesperpixel == 1) { \
170 AV_WN16A(&(c)[(i) * 2], (v)); \
173 assign_val((*
a), -1, (128 << (bpp - 8)) + (have_top ? +1 : -1));
176 if (tx ==
TX_4X4 && edges[
mode].needs_topright) {
177 if (have_top && have_right &&
178 n_px_need + n_px_need_tr <= n_px_have) {
179 memcpy(&(*
a)[4 * bytesperpixel], &top[4 * bytesperpixel], 4 * bytesperpixel);
186 if (edges[
mode].needs_left) {
188 int n_px_need = 4 << tx,
i, n_px_have = (((
s->rows - row) << !ss_v) - y) * 4;
189 uint8_t *dst = x == 0 ? dst_edge : dst_inner;
190 ptrdiff_t
stride = x == 0 ? stride_edge : stride_inner;
192 if (edges[
mode].invert_left) {
193 if (n_px_need <= n_px_have) {
194 for (
i = 0;
i < n_px_need;
i++)
197 for (
i = 0;
i < n_px_have;
i++)
199 memset_bpp(l, n_px_have, l, n_px_have - 1, n_px_need - n_px_have);
202 if (n_px_need <= n_px_have) {
203 for (
i = 0;
i < n_px_need;
i++)
206 for (
i = 0;
i < n_px_have;
i++)
208 memset_bpp(l, 0, l, n_px_need - n_px_have, n_px_need - n_px_have);
212 memset_val(l, (128 << (bpp - 8)) + 1, 4 << tx);
220 ptrdiff_t uv_off,
int bytesperpixel)
224 int row =
td->row, col =
td->col;
227 int end_x =
FFMIN(2 * (
s->cols - col), w4);
228 int end_y =
FFMIN(2 * (
s->rows - row), h4);
229 int tx = 4 *
s->s.h.lossless +
b->tx, uvtx =
b->uvtx + 4 *
s->s.h.lossless;
230 int uvstep1d = 1 <<
b->uvtx, p;
235 for (n = 0, y = 0; y < end_y; y += step1d) {
236 uint8_t *ptr = dst, *ptr_r = dst_r;
237 for (x = 0; x < end_x; x += step1d, ptr += 4 * step1d * bytesperpixel,
238 ptr_r += 4 * step1d * bytesperpixel, n += step) {
247 ptr,
td->y_stride, l,
248 col, x, w4, row, y,
b->tx, 0, 0, 0, bytesperpixel);
249 s->dsp.intra_pred[
b->tx][
mode](ptr,
td->y_stride, l,
a);
251 s->dsp.itxfm_add[tx][txtp](ptr,
td->y_stride,
252 td->block + 16 * n * bytesperpixel, eob);
254 dst_r += 4 * step1d *
s->s.frames[
CUR_FRAME].tf.f->linesize[0];
255 dst += 4 * step1d *
td->y_stride;
262 step = 1 << (
b->uvtx * 2);
263 for (p = 0; p < 2; p++) {
264 dst =
td->dst[1 + p];
265 dst_r =
s->s.frames[
CUR_FRAME].tf.f->data[1 + p] + uv_off;
266 for (n = 0, y = 0; y < end_y; y += uvstep1d) {
267 uint8_t *ptr = dst, *ptr_r = dst_r;
268 for (x = 0; x < end_x; x += uvstep1d, ptr += 4 * uvstep1d * bytesperpixel,
269 ptr_r += 4 * uvstep1d * bytesperpixel, n += step) {
270 int mode =
b->uvmode;
276 ptr,
td->uv_stride, l, col, x, w4, row, y,
277 b->uvtx, p + 1,
s->ss_h,
s->ss_v, bytesperpixel);
278 s->dsp.intra_pred[
b->uvtx][
mode](ptr,
td->uv_stride, l,
a);
280 s->dsp.itxfm_add[uvtx][
DCT_DCT](ptr,
td->uv_stride,
281 td->uvblock[p] + 16 * n * bytesperpixel, eob);
283 dst_r += 4 * uvstep1d *
s->s.frames[
CUR_FRAME].tf.f->linesize[1];
284 dst += 4 * uvstep1d *
td->uv_stride;
300 uint8_t *dst, ptrdiff_t dst_stride,
303 ptrdiff_t y, ptrdiff_t x,
const VP56mv *
mv,
304 int bw,
int bh,
int w,
int h,
int bytesperpixel)
307 int mx =
mv->x, my =
mv->y,
th;
311 ref += y * ref_stride + x * bytesperpixel;
317 th = (y + bh + 4 * !!my + 7) >> 6;
325 if (x < !!mx * 3 || y < !!my * 3 ||
327 x + !!mx * 4 >
w - bw || y + !!my * 5 >
h - bh) {
328 s->vdsp.emulated_edge_mc(
td->edge_emu_buffer,
329 ref - !!my * 3 * ref_stride - !!mx * 3 * bytesperpixel,
331 bw + !!mx * 7, bh + !!my * 7,
332 x - !!mx * 3, y - !!my * 3,
w,
h);
333 ref =
td->edge_emu_buffer + !!my * 3 * 160 + !!mx * 3 * bytesperpixel;
336 mc[!!mx][!!my](dst, dst_stride,
ref, ref_stride, bh, mx << 1, my << 1);
341 ptrdiff_t dst_stride,
342 const uint8_t *ref_u, ptrdiff_t src_stride_u,
343 const uint8_t *ref_v, ptrdiff_t src_stride_v,
345 ptrdiff_t y, ptrdiff_t x,
const VP56mv *
mv,
346 int bw,
int bh,
int w,
int h,
int bytesperpixel)
349 int mx =
mv->x * (1 << !
s->ss_h), my =
mv->y * (1 << !
s->ss_v),
th;
353 ref_u += y * src_stride_u + x * bytesperpixel;
354 ref_v += y * src_stride_v + x * bytesperpixel;
360 th = (y + bh + 4 * !!my + 7) >> (6 -
s->ss_v);
368 if (x < !!mx * 3 || y < !!my * 3 ||
370 x + !!mx * 4 >
w - bw || y + !!my * 5 >
h - bh) {
371 s->vdsp.emulated_edge_mc(
td->edge_emu_buffer,
372 ref_u - !!my * 3 * src_stride_u - !!mx * 3 * bytesperpixel,
374 bw + !!mx * 7, bh + !!my * 7,
375 x - !!mx * 3, y - !!my * 3,
w,
h);
376 ref_u =
td->edge_emu_buffer + !!my * 3 * 160 + !!mx * 3 * bytesperpixel;
377 mc[!!mx][!!my](dst_u, dst_stride, ref_u, 160, bh, mx, my);
379 s->vdsp.emulated_edge_mc(
td->edge_emu_buffer,
380 ref_v - !!my * 3 * src_stride_v - !!mx * 3 * bytesperpixel,
382 bw + !!mx * 7, bh + !!my * 7,
383 x - !!mx * 3, y - !!my * 3,
w,
h);
384 ref_v =
td->edge_emu_buffer + !!my * 3 * 160 + !!mx * 3 * bytesperpixel;
385 mc[!!mx][!!my](dst_v, dst_stride, ref_v, 160, bh, mx, my);
387 mc[!!mx][!!my](dst_u, dst_stride, ref_u, src_stride_u, bh, mx, my);
388 mc[!!mx][!!my](dst_v, dst_stride, ref_v, src_stride_v, bh, mx, my);
392 #define mc_luma_dir(td, mc, dst, dst_ls, src, src_ls, tref, row, col, mv, \
393 px, py, pw, ph, bw, bh, w, h, i) \
394 mc_luma_unscaled(td, s->dsp.mc, dst, dst_ls, src, src_ls, tref, row, col, \
395 mv, bw, bh, w, h, bytesperpixel)
396 #define mc_chroma_dir(td, mc, dstu, dstv, dst_ls, srcu, srcu_ls, srcv, srcv_ls, tref, \
397 row, col, mv, px, py, pw, ph, bw, bh, w, h, i) \
398 mc_chroma_unscaled(td, s->dsp.mc, dstu, dstv, dst_ls, srcu, srcu_ls, srcv, srcv_ls, tref, \
399 row, col, mv, bw, bh, w, h, bytesperpixel)
401 #define FN(x) x##_8bpp
402 #define BYTES_PER_PIXEL 1
405 #undef BYTES_PER_PIXEL
406 #define FN(x) x##_16bpp
407 #define BYTES_PER_PIXEL 2
412 #undef BYTES_PER_PIXEL
417 uint8_t *dst, ptrdiff_t dst_stride,
420 ptrdiff_t y, ptrdiff_t x,
const VP56mv *in_mv,
421 int px,
int py,
int pw,
int ph,
422 int bw,
int bh,
int w,
int h,
int bytesperpixel,
423 const uint16_t *scale,
const uint8_t *step)
429 y, x, in_mv, bw, bh,
w,
h, bytesperpixel);
431 #define scale_mv(n, dim) (((int64_t)(n) * scale[dim]) >> 14)
433 int refbw_m1, refbh_m1;
437 mv.x =
av_clip(in_mv->
x, -(x + pw - px + 4) * 8, (
s->cols * 8 - x + px + 3) * 8);
438 mv.y =
av_clip(in_mv->
y, -(y + ph - py + 4) * 8, (
s->rows * 8 - y + py + 3) * 8);
447 ref += y * ref_stride + x * bytesperpixel;
450 refbw_m1 = ((bw - 1) * step[0] + mx) >> 4;
451 refbh_m1 = ((bh - 1) * step[1] + my) >> 4;
455 th = (y + refbh_m1 + 4 + 7) >> 6;
460 if (x < 3 || y < 3 || x + 4 >=
w - refbw_m1 || y + 5 >=
h - refbh_m1) {
461 s->vdsp.emulated_edge_mc(
td->edge_emu_buffer,
462 ref - 3 * ref_stride - 3 * bytesperpixel,
464 refbw_m1 + 8, refbh_m1 + 8,
466 ref =
td->edge_emu_buffer + 3 * 288 + 3 * bytesperpixel;
469 smc(dst, dst_stride,
ref, ref_stride, bh, mx, my, step[0], step[1]);
476 ptrdiff_t dst_stride,
477 const uint8_t *ref_u, ptrdiff_t src_stride_u,
478 const uint8_t *ref_v, ptrdiff_t src_stride_v,
480 ptrdiff_t y, ptrdiff_t x,
const VP56mv *in_mv,
481 int px,
int py,
int pw,
int ph,
482 int bw,
int bh,
int w,
int h,
int bytesperpixel,
483 const uint16_t *scale,
const uint8_t *step)
489 ref_v, src_stride_v, ref_frame,
490 y, x, in_mv, bw, bh,
w,
h, bytesperpixel);
493 int refbw_m1, refbh_m1;
499 mv.x =
av_clip(in_mv->
x, -(x + pw - px + 4) * 16, (
s->cols * 4 - x + px + 3) * 16);
502 mv.x =
av_clip(in_mv->
x, -(x + pw - px + 4) * 8, (
s->cols * 8 - x + px + 3) * 8);
507 mv.y =
av_clip(in_mv->
y, -(y + ph - py + 4) * 16, (
s->rows * 4 - y + py + 3) * 16);
510 mv.y =
av_clip(in_mv->
y, -(y + ph - py + 4) * 8, (
s->rows * 8 - y + py + 3) * 8);
516 ref_u += y * src_stride_u + x * bytesperpixel;
517 ref_v += y * src_stride_v + x * bytesperpixel;
520 refbw_m1 = ((bw - 1) * step[0] + mx) >> 4;
521 refbh_m1 = ((bh - 1) * step[1] + my) >> 4;
525 th = (y + refbh_m1 + 4 + 7) >> (6 -
s->ss_v);
530 if (x < 3 || y < 3 || x + 4 >=
w - refbw_m1 || y + 5 >=
h - refbh_m1) {
531 s->vdsp.emulated_edge_mc(
td->edge_emu_buffer,
532 ref_u - 3 * src_stride_u - 3 * bytesperpixel,
534 refbw_m1 + 8, refbh_m1 + 8,
536 ref_u =
td->edge_emu_buffer + 3 * 288 + 3 * bytesperpixel;
537 smc(dst_u, dst_stride, ref_u, 288, bh, mx, my, step[0], step[1]);
539 s->vdsp.emulated_edge_mc(
td->edge_emu_buffer,
540 ref_v - 3 * src_stride_v - 3 * bytesperpixel,
542 refbw_m1 + 8, refbh_m1 + 8,
544 ref_v =
td->edge_emu_buffer + 3 * 288 + 3 * bytesperpixel;
545 smc(dst_v, dst_stride, ref_v, 288, bh, mx, my, step[0], step[1]);
547 smc(dst_u, dst_stride, ref_u, src_stride_u, bh, mx, my, step[0], step[1]);
548 smc(dst_v, dst_stride, ref_v, src_stride_v, bh, mx, my, step[0], step[1]);
553 #define mc_luma_dir(td, mc, dst, dst_ls, src, src_ls, tref, row, col, mv, \
554 px, py, pw, ph, bw, bh, w, h, i) \
555 mc_luma_scaled(td, s->dsp.s##mc, s->dsp.mc, dst, dst_ls, src, src_ls, tref, row, col, \
556 mv, px, py, pw, ph, bw, bh, w, h, bytesperpixel, \
557 s->mvscale[b->ref[i]], s->mvstep[b->ref[i]])
558 #define mc_chroma_dir(td, mc, dstu, dstv, dst_ls, srcu, srcu_ls, srcv, srcv_ls, tref, \
559 row, col, mv, px, py, pw, ph, bw, bh, w, h, i) \
560 mc_chroma_scaled(td, s->dsp.s##mc, s->dsp.mc, dstu, dstv, dst_ls, srcu, srcu_ls, srcv, srcv_ls, tref, \
561 row, col, mv, px, py, pw, ph, bw, bh, w, h, bytesperpixel, \
562 s->mvscale[b->ref[i]], s->mvstep[b->ref[i]])
564 #define FN(x) x##_scaled_8bpp
565 #define BYTES_PER_PIXEL 1
568 #undef BYTES_PER_PIXEL
569 #define FN(x) x##_scaled_16bpp
570 #define BYTES_PER_PIXEL 2
575 #undef BYTES_PER_PIXEL
582 int row =
td->row, col =
td->col;
586 if (!
s->td->error_info) {
589 "reference frame has invalid dimensions\n");
594 if (
s->mvscale[
b->ref[0]][0] || (
b->comp &&
s->mvscale[
b->ref[1]][0])) {
595 if (bytesperpixel == 1) {
596 inter_pred_scaled_8bpp(
td);
598 inter_pred_scaled_16bpp(
td);
601 if (bytesperpixel == 1) {
604 inter_pred_16bpp(
td);
613 int end_x =
FFMIN(2 * (
s->cols - col), w4);
614 int end_y =
FFMIN(2 * (
s->rows - row), h4);
615 int tx = 4 *
s->s.h.lossless +
b->tx, uvtx =
b->uvtx + 4 *
s->s.h.lossless;
616 int uvstep1d = 1 <<
b->uvtx, p;
620 for (n = 0, y = 0; y < end_y; y += step1d) {
622 for (x = 0; x < end_x; x += step1d,
623 ptr += 4 * step1d * bytesperpixel, n += step) {
627 s->dsp.itxfm_add[tx][
DCT_DCT](ptr,
td->y_stride,
628 td->block + 16 * n * bytesperpixel, eob);
630 dst += 4 *
td->y_stride * step1d;
636 step = 1 << (
b->uvtx * 2);
637 for (p = 0; p < 2; p++) {
638 dst =
td->dst[p + 1];
639 for (n = 0, y = 0; y < end_y; y += uvstep1d) {
641 for (x = 0; x < end_x; x += uvstep1d,
642 ptr += 4 * uvstep1d * bytesperpixel, n += step) {
646 s->dsp.itxfm_add[uvtx][
DCT_DCT](ptr,
td->uv_stride,
647 td->uvblock[p] + 16 * n * bytesperpixel, eob);
649 dst += 4 * uvstep1d *
td->uv_stride;
simple assert() macros that are a bit more flexible than ISO C assert().
#define av_assert2(cond)
assert() equivalent, that does lie in speed critical code.
Libavcodec external API header.
mode
Use these values in ebur128_init (or'ed).
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
static const int8_t mv[256][2]
common internal API header
#define LOCAL_ALIGNED_32(t, v,...)
void ff_thread_await_progress(ThreadFrame *f, int n, int field)
Wait for earlier decoding threads to finish reference pictures.
static int ref[MAX_W *MAX_W]
Core video DSP helper functions.
enum TxfmType ff_vp9_intra_txfm_type[14]
const uint8_t ff_vp9_bwh_tab[2][N_BS_SIZES][2]
#define REF_INVALID_SCALE
void(* vp9_scaled_mc_func)(uint8_t *dst, ptrdiff_t dst_stride, const uint8_t *ref, ptrdiff_t ref_stride, int h, int mx, int my, int dx, int dy)
void(* vp9_mc_func)(uint8_t *dst, ptrdiff_t dst_stride, const uint8_t *ref, ptrdiff_t ref_stride, int h, int mx, int my)
static av_always_inline void mc_chroma_unscaled(VP9TileData *td, vp9_mc_func(*mc)[2], uint8_t *dst_u, uint8_t *dst_v, ptrdiff_t dst_stride, const uint8_t *ref_u, ptrdiff_t src_stride_u, const uint8_t *ref_v, ptrdiff_t src_stride_v, ThreadFrame *ref_frame, ptrdiff_t y, ptrdiff_t x, const VP56mv *mv, int bw, int bh, int w, int h, int bytesperpixel)
void ff_vp9_intra_recon_16bpp(VP9TileData *td, ptrdiff_t y_off, ptrdiff_t uv_off)
#define assign_val(c, i, v)
#define assign_bpp(c, i1, v, i2)
#define memset_bpp(c, i1, v, i2, num)
void ff_vp9_inter_recon_16bpp(VP9TileData *td)
static av_always_inline int check_intra_mode(VP9TileData *td, int mode, uint8_t **a, uint8_t *dst_edge, ptrdiff_t stride_edge, uint8_t *dst_inner, ptrdiff_t stride_inner, uint8_t *l, int col, int x, int w, int row, int y, enum TxfmMode tx, int p, int ss_h, int ss_v, int bytesperpixel)
void ff_vp9_inter_recon_8bpp(VP9TileData *td)
static av_always_inline void intra_recon(VP9TileData *td, ptrdiff_t y_off, ptrdiff_t uv_off, int bytesperpixel)
static av_always_inline void inter_recon(VP9TileData *td, int bytesperpixel)
static av_always_inline void mc_chroma_scaled(VP9TileData *td, vp9_scaled_mc_func smc, vp9_mc_func(*mc)[2], uint8_t *dst_u, uint8_t *dst_v, ptrdiff_t dst_stride, const uint8_t *ref_u, ptrdiff_t src_stride_u, const uint8_t *ref_v, ptrdiff_t src_stride_v, ThreadFrame *ref_frame, ptrdiff_t y, ptrdiff_t x, const VP56mv *in_mv, int px, int py, int pw, int ph, int bw, int bh, int w, int h, int bytesperpixel, const uint16_t *scale, const uint8_t *step)
static av_always_inline void mc_luma_unscaled(VP9TileData *td, vp9_mc_func(*mc)[2], uint8_t *dst, ptrdiff_t dst_stride, const uint8_t *ref, ptrdiff_t ref_stride, ThreadFrame *ref_frame, ptrdiff_t y, ptrdiff_t x, const VP56mv *mv, int bw, int bh, int w, int h, int bytesperpixel)
static av_always_inline void mc_luma_scaled(VP9TileData *td, vp9_scaled_mc_func smc, vp9_mc_func(*mc)[2], uint8_t *dst, ptrdiff_t dst_stride, const uint8_t *ref, ptrdiff_t ref_stride, ThreadFrame *ref_frame, ptrdiff_t y, ptrdiff_t x, const VP56mv *in_mv, int px, int py, int pw, int ph, int bw, int bh, int w, int h, int bytesperpixel, const uint16_t *scale, const uint8_t *step)
void ff_vp9_intra_recon_8bpp(VP9TileData *td, ptrdiff_t y_off, ptrdiff_t uv_off)
#define memset_val(c, val, num)