31 #define randomize_buffers(mask) \
33 int i, size = sizeof(*y0); \
34 for (i = 0; i < BUF_SIZE; i += 4 / size) { \
35 uint32_t r = rnd() & mask; \
36 AV_WN32A(y0 + i, r); \
37 AV_WN32A(y1 + i, r); \
39 for (i = 0; i < BUF_SIZE / 2; i += 4 / size) { \
40 uint32_t r = rnd() & mask; \
41 AV_WN32A(u0 + i, r); \
42 AV_WN32A(u1 + i, r); \
44 AV_WN32A(v0 + i, r); \
45 AV_WN32A(v1 + i, r); \
47 for (i = 0; i < width * 8 / 3; i += 4) { \
49 AV_WN32A(dst0 + i, r); \
50 AV_WN32A(dst1 + i, r); \
54 #define check_pack_line(type, mask) \
56 LOCAL_ALIGNED_16(type, y0, [BUF_SIZE]); \
57 LOCAL_ALIGNED_16(type, y1, [BUF_SIZE]); \
58 LOCAL_ALIGNED_16(type, u0, [BUF_SIZE / 2]); \
59 LOCAL_ALIGNED_16(type, u1, [BUF_SIZE / 2]); \
60 LOCAL_ALIGNED_16(type, v0, [BUF_SIZE / 2]); \
61 LOCAL_ALIGNED_16(type, v1, [BUF_SIZE / 2]); \
62 LOCAL_ALIGNED_16(uint8_t, dst0, [BUF_SIZE * 8 / 3]); \
63 LOCAL_ALIGNED_16(uint8_t, dst1, [BUF_SIZE * 8 / 3]); \
65 declare_func(void, const type * y, const type * u, const type * v, \
66 uint8_t * dst, ptrdiff_t width); \
67 ptrdiff_t width, step = 12 / sizeof(type); \
69 for (width = step; width < BUF_SIZE - 15; width += step) { \
70 int y_offset = rnd() & 15; \
71 int uv_offset = y_offset / 2; \
72 randomize_buffers(mask); \
73 call_ref(y0 + y_offset, u0 + uv_offset, v0 + uv_offset, dst0, width); \
74 call_new(y1 + y_offset, u1 + uv_offset, v1 + uv_offset, dst1, width); \
75 if (memcmp(y0, y1, BUF_SIZE) || memcmp(u0, u1, BUF_SIZE / 2) || \
76 memcmp(v0, v1, BUF_SIZE / 2) || memcmp(dst0, dst1, width * 8 / 3)) \
78 bench_new(y1 + y_offset, u1 + uv_offset, v1 + uv_offset, dst1, width); \
88 if (
check_func(
h.pack_line_8,
"v210_planar_pack_8"))
91 if (
check_func(
h.pack_line_10,
"v210_planar_pack_10"))
#define check_func(func,...)
common internal and external API header
av_cold void ff_v210enc_init(V210EncContext *s)
common internal API header
void checkasm_check_v210enc(void)
#define check_pack_line(type, mask)