41 { 36, 68, 60, 92, 34, 66, 58, 90, },
42 { 100, 4, 124, 28, 98, 2, 122, 26, },
43 { 52, 84, 44, 76, 50, 82, 42, 74, },
44 { 116, 20, 108, 12, 114, 18, 106, 10, },
45 { 32, 64, 56, 88, 38, 70, 62, 94, },
46 { 96, 0, 120, 24, 102, 6, 126, 30, },
47 { 48, 80, 40, 72, 54, 86, 46, 78, },
48 { 112, 16, 104, 8, 118, 22, 110, 14, },
49 { 36, 68, 60, 92, 34, 66, 58, 90, },
53 64, 64, 64, 64, 64, 64, 64, 64
69 const int32_t *filterPos,
int filterSize)
74 const uint16_t *
src = (
const uint16_t *) _src;
84 for (
i = 0;
i < dstW;
i++) {
86 int srcPos = filterPos[
i];
89 for (j = 0; j < filterSize; j++) {
99 const int32_t *filterPos,
int filterSize)
103 const uint16_t *
src = (
const uint16_t *) _src;
104 int sh =
desc->comp[0].depth - 1;
112 for (
i = 0;
i < dstW;
i++) {
114 int srcPos = filterPos[
i];
117 for (j = 0; j < filterSize; j++) {
121 dst[
i] =
FFMIN(
val >> sh, (1 << 15) - 1);
128 const int32_t *filterPos,
int filterSize)
131 for (
i = 0;
i < dstW;
i++) {
133 int srcPos = filterPos[
i];
135 for (j = 0; j < filterSize; j++) {
144 const int32_t *filterPos,
int filterSize)
148 for (
i = 0;
i < dstW;
i++) {
150 int srcPos = filterPos[
i];
152 for (j = 0; j < filterSize; j++) {
165 dstU[
i] = (
FFMIN(dstU[
i], 30775) * 4663 - 9289992) >> 12;
174 dstU[
i] = (dstU[
i] * 1799 + 4081085) >> 11;
183 dst[
i] = (
FFMIN(dst[
i], 30189) * 19077 - 39057361) >> 14;
190 dst[
i] = (dst[
i] * 14071 + 33561947) >> 14;
199 dstU[
i] = (
FFMIN(dstU[
i], 30775 << 4) * 4663 - (9289992 << 4)) >> 12;
210 dstU[
i] = (dstU[
i] * 1799 + (4081085 << 4)) >> 11;
211 dstV[
i] = (
dstV[
i] * 1799 + (4081085 << 4)) >> 11;
220 dst[
i] = ((
int)(
FFMIN(dst[
i], 30189 << 4) * 4769U - (39057361 << 2))) >> 12;
229 dst[
i] = ((
int)(dst[
i]*(14071U/4) + (33561947<<4)/4)) >> 12;
233 #define DEBUG_SWSCALE_BUFFERS 0
234 #define DEBUG_BUFFERS(...) \
235 if (DEBUG_SWSCALE_BUFFERS) \
236 av_log(c, AV_LOG_DEBUG, __VA_ARGS__)
239 int srcStride[],
int srcSliceY,
244 const int dstW =
c->dstW;
245 const int dstH =
c->dstH;
248 const int flags =
c->flags;
249 int32_t *vLumFilterPos =
c->vLumFilterPos;
250 int32_t *vChrFilterPos =
c->vChrFilterPos;
252 const int vLumFilterSize =
c->vLumFilterSize;
253 const int vChrFilterSize =
c->vChrFilterSize;
262 const int chrSrcSliceY = srcSliceY >>
c->chrSrcVSubSample;
264 int should_dither =
isNBPS(
c->srcFormat) ||
270 int lastInLumBuf =
c->lastInLumBuf;
271 int lastInChrBuf =
c->lastInChrBuf;
274 int lumEnd =
c->descIndex[0];
275 int chrStart = lumEnd;
276 int chrEnd =
c->descIndex[1];
278 int vEnd =
c->numDesc;
279 SwsSlice *src_slice = &
c->slice[lumStart];
280 SwsSlice *hout_slice = &
c->slice[
c->numSlice-2];
281 SwsSlice *vout_slice = &
c->slice[
c->numSlice-1];
284 int needAlpha =
c->needAlpha;
295 srcStride[3] = srcStride[0];
297 srcStride[1] *= 1 <<
c->vChrDrop;
298 srcStride[2] *= 1 <<
c->vChrDrop;
300 DEBUG_BUFFERS(
"swscale() %p[%d] %p[%d] %p[%d] %p[%d] -> %p[%d] %p[%d] %p[%d] %p[%d]\n",
301 src[0], srcStride[0],
src[1], srcStride[1],
302 src[2], srcStride[2],
src[3], srcStride[3],
303 dst[0], dstStride[0], dst[1], dstStride[1],
304 dst[2], dstStride[2], dst[3], dstStride[3]);
305 DEBUG_BUFFERS(
"srcSliceY: %d srcSliceH: %d dstY: %d dstH: %d\n",
308 vLumFilterSize, vChrFilterSize);
310 if (dstStride[0]&15 || dstStride[1]&15 ||
311 dstStride[2]&15 || dstStride[3]&15) {
312 static int warnedAlready = 0;
315 "Warning: dstStride is not aligned!\n"
316 " ->cannot do aligned memory accesses anymore\n");
321 if ( (uintptr_t)dst[0]&15 || (uintptr_t)dst[1]&15 || (uintptr_t)dst[2]&15
322 || (uintptr_t)
src[0]&15 || (uintptr_t)
src[1]&15 || (uintptr_t)
src[2]&15
323 || dstStride[0]&15 || dstStride[1]&15 || dstStride[2]&15 || dstStride[3]&15
324 || srcStride[0]&15 || srcStride[1]&15 || srcStride[2]&15 || srcStride[3]&15
326 static int warnedAlready=0;
337 if (srcSliceY == 0) {
343 if (!should_dither) {
349 yuv2packed1, yuv2packed2, yuv2packedX, yuv2anyX,
c->use_mmx_vfilter);
352 srcSliceY,
srcSliceH, chrSrcSliceY, chrSrcSliceH, 1);
355 dstY, dstH, dstY >>
c->chrDstVSubSample,
357 if (srcSliceY == 0) {
367 hout_slice->
width = dstW;
370 for (; dstY < dstH; dstY++) {
371 const int chrDstY = dstY >>
c->chrDstVSubSample;
372 int use_mmx_vfilter=
c->use_mmx_vfilter;
375 const int firstLumSrcY =
FFMAX(1 - vLumFilterSize, vLumFilterPos[dstY]);
376 const int firstLumSrcY2 =
FFMAX(1 - vLumFilterSize, vLumFilterPos[
FFMIN(dstY | ((1 <<
c->chrDstVSubSample) - 1), dstH - 1)]);
378 const int firstChrSrcY =
FFMAX(1 - vChrFilterSize, vChrFilterPos[chrDstY]);
381 int lastLumSrcY =
FFMIN(
c->srcH, firstLumSrcY + vLumFilterSize) - 1;
382 int lastLumSrcY2 =
FFMIN(
c->srcH, firstLumSrcY2 + vLumFilterSize) - 1;
383 int lastChrSrcY =
FFMIN(
c->chrSrcH, firstChrSrcY + vChrFilterSize) - 1;
387 int posY, cPosY, firstPosY, lastPosY, firstCPosY, lastCPosY;
390 if (firstLumSrcY > lastInLumBuf) {
392 hasLumHoles = lastInLumBuf != firstLumSrcY - 1;
400 lastInLumBuf = firstLumSrcY - 1;
402 if (firstChrSrcY > lastInChrBuf) {
404 hasChrHoles = lastInChrBuf != firstChrSrcY - 1;
412 lastInChrBuf = firstChrSrcY - 1;
416 DEBUG_BUFFERS(
"\tfirstLumSrcY: %d lastLumSrcY: %d lastInLumBuf: %d\n",
417 firstLumSrcY, lastLumSrcY, lastInLumBuf);
418 DEBUG_BUFFERS(
"\tfirstChrSrcY: %d lastChrSrcY: %d lastInChrBuf: %d\n",
419 firstChrSrcY, lastChrSrcY, lastInChrBuf);
422 enough_lines = lastLumSrcY2 < srcSliceY +
srcSliceH &&
427 lastChrSrcY = chrSrcSliceY + chrSrcSliceH - 1;
428 DEBUG_BUFFERS(
"buffering slice: lastLumSrcY %d lastChrSrcY %d\n",
429 lastLumSrcY, lastChrSrcY);
437 if (posY <= lastLumSrcY && !hasLumHoles) {
438 firstPosY =
FFMAX(firstLumSrcY, posY);
442 lastPosY = lastLumSrcY;
446 if (cPosY <= lastChrSrcY && !hasChrHoles) {
447 firstCPosY =
FFMAX(firstChrSrcY, cPosY);
451 lastCPosY = lastChrSrcY;
456 if (posY < lastLumSrcY + 1) {
457 for (
i = lumStart;
i < lumEnd; ++
i)
461 lastInLumBuf = lastLumSrcY;
463 if (cPosY < lastChrSrcY + 1) {
464 for (
i = chrStart;
i < chrEnd; ++
i)
468 lastInChrBuf = lastChrSrcY;
480 if (dstY >= dstH - 2) {
484 &yuv2packed1, &yuv2packed2, &yuv2packedX, &yuv2anyX);
487 yuv2packed1, yuv2packed2, yuv2packedX, yuv2anyX, use_mmx_vfilter);
491 for (
i = vStart;
i < vEnd; ++
i)
497 int height = dstY - lastDstY;
502 1,
desc->comp[3].depth,
504 }
else if (
is32BPS(dstFormat)) {
507 1,
desc->comp[3].depth,
513 #if HAVE_MMXEXT_INLINE
515 __asm__ volatile (
"sfence" :::
"memory");
521 c->lastInLumBuf = lastInLumBuf;
522 c->lastInChrBuf = lastInChrBuf;
524 return dstY - lastDstY;
529 c->lumConvertRange =
NULL;
530 c->chrConvertRange =
NULL;
531 if (
c->srcRange !=
c->dstRange && !
isAnyRGB(
c->dstFormat)) {
532 if (
c->dstBpc <= 14) {
557 &
c->yuv2nv12cX, &
c->yuv2packed1,
558 &
c->yuv2packed2, &
c->yuv2packedX, &
c->yuv2anyX);
562 if (
c->srcBpc == 8) {
563 if (
c->dstBpc <= 14) {
581 c->needs_hcscale = 1;
613 const int linesizes[4])
620 for (
i = 0;
i < 4;
i++) {
621 int plane =
desc->comp[
i].plane;
622 if (!
data[plane] || !linesizes[plane])
635 for (yp=0; yp<
h; yp++) {
636 for (xp=0; xp+2<
stride; xp+=3) {
637 int x, y, z,
r,
g,
b;
649 x =
c->xyzgamma[x>>4];
650 y =
c->xyzgamma[y>>4];
651 z =
c->xyzgamma[z>>4];
654 r =
c->xyz2rgb_matrix[0][0] * x +
655 c->xyz2rgb_matrix[0][1] * y +
656 c->xyz2rgb_matrix[0][2] * z >> 12;
657 g =
c->xyz2rgb_matrix[1][0] * x +
658 c->xyz2rgb_matrix[1][1] * y +
659 c->xyz2rgb_matrix[1][2] * z >> 12;
660 b =
c->xyz2rgb_matrix[2][0] * x +
661 c->xyz2rgb_matrix[2][1] * y +
662 c->xyz2rgb_matrix[2][2] * z >> 12;
671 AV_WB16(dst + xp + 0,
c->rgbgamma[
r] << 4);
672 AV_WB16(dst + xp + 1,
c->rgbgamma[
g] << 4);
673 AV_WB16(dst + xp + 2,
c->rgbgamma[
b] << 4);
675 AV_WL16(dst + xp + 0,
c->rgbgamma[
r] << 4);
676 AV_WL16(dst + xp + 1,
c->rgbgamma[
g] << 4);
677 AV_WL16(dst + xp + 2,
c->rgbgamma[
b] << 4);
691 for (yp=0; yp<
h; yp++) {
692 for (xp=0; xp+2<
stride; xp+=3) {
693 int x, y, z,
r,
g,
b;
705 r =
c->rgbgammainv[
r>>4];
706 g =
c->rgbgammainv[
g>>4];
707 b =
c->rgbgammainv[
b>>4];
710 x =
c->rgb2xyz_matrix[0][0] *
r +
711 c->rgb2xyz_matrix[0][1] *
g +
712 c->rgb2xyz_matrix[0][2] *
b >> 12;
713 y =
c->rgb2xyz_matrix[1][0] *
r +
714 c->rgb2xyz_matrix[1][1] *
g +
715 c->rgb2xyz_matrix[1][2] *
b >> 12;
716 z =
c->rgb2xyz_matrix[2][0] *
r +
717 c->rgb2xyz_matrix[2][1] *
g +
718 c->rgb2xyz_matrix[2][2] *
b >> 12;
727 AV_WB16(dst + xp + 0,
c->xyzgammainv[x] << 4);
728 AV_WB16(dst + xp + 1,
c->xyzgammainv[y] << 4);
729 AV_WB16(dst + xp + 2,
c->xyzgammainv[z] << 4);
731 AV_WL16(dst + xp + 0,
c->xyzgammainv[x] << 4);
732 AV_WL16(dst + xp + 1,
c->xyzgammainv[y] << 4);
733 AV_WL16(dst + xp + 2,
c->xyzgammainv[z] << 4);
746 const uint8_t *
const srcSlice[],
747 const int srcStride[],
int srcSliceY,
749 const int dstStride[])
755 int macro_height =
isBayer(
c->srcFormat) ? 2 : (1 <<
c->chrSrcVSubSample);
759 int srcSliceY_internal = srcSliceY;
761 if (!srcStride || !dstStride || !dst || !srcSlice) {
762 av_log(
c,
AV_LOG_ERROR,
"One of the input parameters to sws_scale() is NULL, please check the calling code\n");
766 for (
i=0;
i<4;
i++) {
767 srcStride2[
i] = srcStride[
i];
768 dstStride2[
i] = dstStride[
i];
771 if ((srcSliceY & (macro_height-1)) ||
779 if (
c->gamma_flag &&
c->cascaded_context[0]) {
781 srcSlice, srcStride, srcSliceY,
srcSliceH,
782 c->cascaded_tmp,
c->cascaded_tmpStride);
787 if (
c->cascaded_context[2])
788 ret =
sws_scale(
c->cascaded_context[1], (
const uint8_t *
const *)
c->cascaded_tmp,
c->cascaded_tmpStride, srcSliceY,
srcSliceH,
c->cascaded1_tmp,
c->cascaded1_tmpStride);
790 ret =
sws_scale(
c->cascaded_context[1], (
const uint8_t *
const *)
c->cascaded_tmp,
c->cascaded_tmpStride, srcSliceY,
srcSliceH, dst, dstStride);
795 if (
c->cascaded_context[2]) {
797 (
const uint8_t *
const *)
c->cascaded1_tmp,
c->cascaded1_tmpStride,
c->cascaded_context[1]->dstY - ret,
c->cascaded_context[1]->dstY,
803 if (
c->cascaded_context[0] && srcSliceY == 0 &&
srcSliceH ==
c->cascaded_context[0]->srcH) {
805 srcSlice, srcStride, srcSliceY,
srcSliceH,
806 c->cascaded_tmp,
c->cascaded_tmpStride);
810 (
const uint8_t *
const * )
c->cascaded_tmp,
c->cascaded_tmpStride, 0,
c->cascaded_context[0]->dstH,
815 memcpy(src2, srcSlice,
sizeof(src2));
816 memcpy(dst2, dst,
sizeof(dst2));
831 if (
c->sliceDir == 0 && srcSliceY != 0 && srcSliceY +
srcSliceH !=
c->srcH) {
835 if (
c->sliceDir == 0) {
836 if (srcSliceY == 0)
c->sliceDir = 1;
else c->sliceDir = -1;
840 for (
i = 0;
i < 256;
i++) {
841 int r,
g,
b, y,
u, v,
a = 0xff;
843 uint32_t p = ((
const uint32_t *)(srcSlice[1]))[
i];
844 a = (p >> 24) & 0xFF;
845 r = (p >> 16) & 0xFF;
850 g = ((
i >> 2) & 7) * 36;
854 g = ((
i >> 3) & 7) * 36;
857 r = (
i >> 3 ) * 255;
858 g = ((
i >> 1) & 3) * 85;
864 b = (
i >> 3 ) * 255;
865 g = ((
i >> 1) & 3) * 85;
868 #define RGB2YUV_SHIFT 15
869 #define BY ( (int) (0.114 * 219 / 255 * (1 << RGB2YUV_SHIFT) + 0.5))
870 #define BV (-(int) (0.081 * 224 / 255 * (1 << RGB2YUV_SHIFT) + 0.5))
871 #define BU ( (int) (0.500 * 224 / 255 * (1 << RGB2YUV_SHIFT) + 0.5))
872 #define GY ( (int) (0.587 * 219 / 255 * (1 << RGB2YUV_SHIFT) + 0.5))
873 #define GV (-(int) (0.419 * 224 / 255 * (1 << RGB2YUV_SHIFT) + 0.5))
874 #define GU (-(int) (0.331 * 224 / 255 * (1 << RGB2YUV_SHIFT) + 0.5))
875 #define RY ( (int) (0.299 * 219 / 255 * (1 << RGB2YUV_SHIFT) + 0.5))
876 #define RV ( (int) (0.500 * 224 / 255 * (1 << RGB2YUV_SHIFT) + 0.5))
877 #define RU (-(int) (0.169 * 224 / 255 * (1 << RGB2YUV_SHIFT) + 0.5))
882 c->pal_yuv[
i]= y + (
u<<8) + (v<<16) + ((unsigned)
a<<24);
884 switch (
c->dstFormat) {
889 c->pal_rgb[
i]=
r + (
g<<8) + (
b<<16) + ((unsigned)
a<<24);
895 c->pal_rgb[
i]=
a + (
r<<8) + (
g<<16) + ((unsigned)
b<<24);
901 c->pal_rgb[
i]=
a + (
b<<8) + (
g<<16) + ((unsigned)
r<<24);
908 c->pal_rgb[
i]=
b + (
g<<8) + (
r<<16) + ((unsigned)
a<<24);
913 if (
c->src0Alpha && !
c->dst0Alpha &&
isALPHA(
c->dstFormat)) {
920 base = srcStride[0] < 0 ? rgb0_tmp - srcStride[0] * (
srcSliceH-1) : rgb0_tmp;
922 memcpy(
base + srcStride[0]*y, src2[0] + srcStride[0]*y, 4*
c->srcW);
923 for (x=
c->src0Alpha-1; x<4*c->srcW; x+=4) {
924 base[ srcStride[0]*y + x] = 0xFF;
930 if (
c->srcXYZ && !(
c->dstXYZ &&
c->srcW==
c->dstW &&
c->srcH==
c->dstH)) {
936 base = srcStride[0] < 0 ? rgb0_tmp - srcStride[0] * (
srcSliceH-1) : rgb0_tmp;
943 for (
i = 0;
i < 4;
i++)
944 memset(
c->dither_error[
i], 0,
sizeof(
c->dither_error[0][0]) * (
c->dstW+2));
946 if (
c->sliceDir != 1) {
948 for (
i=0;
i<4;
i++) {
953 src2[0] += (
srcSliceH - 1) * srcStride[0];
955 src2[1] += ((
srcSliceH >>
c->chrSrcVSubSample) - 1) * srcStride[1];
956 src2[2] += ((
srcSliceH >>
c->chrSrcVSubSample) - 1) * srcStride[2];
957 src2[3] += (
srcSliceH - 1) * srcStride[3];
958 dst2[0] += (
c->dstH - 1) * dstStride[0];
959 dst2[1] += ((
c->dstH >>
c->chrDstVSubSample) - 1) * dstStride[1];
960 dst2[2] += ((
c->dstH >>
c->chrDstVSubSample) - 1) * dstStride[2];
961 dst2[3] += (
c->dstH - 1) * dstStride[3];
963 srcSliceY_internal =
c->srcH-srcSliceY-
srcSliceH;
969 if (srcSliceY_internal +
srcSliceH ==
c->srcH)
971 ret =
c->swscale(
c, src2, srcStride2, srcSliceY_internal,
srcSliceH, dst2, dstStride2);
973 if (
c->dstXYZ && !(
c->srcXYZ &&
c->srcW==
c->dstW &&
c->srcH==
c->dstH)) {
974 int dstY =
c->dstY ?
c->dstY : srcSliceY +
srcSliceH;
975 uint16_t *dst16 = (uint16_t*)(dst2[0] + (dstY - ret) * dstStride2[0]);
av_cold void ff_sws_init_swscale_aarch64(SwsContext *c)
static double val(void *priv, double ch)
static const char *const format[]
av_cold void ff_sws_init_swscale_arm(SwsContext *c)
__asm__(".macro parse_r var r\n\t" "\\var = -1\n\t" _IFC_REG(0) _IFC_REG(1) _IFC_REG(2) _IFC_REG(3) _IFC_REG(4) _IFC_REG(5) _IFC_REG(6) _IFC_REG(7) _IFC_REG(8) _IFC_REG(9) _IFC_REG(10) _IFC_REG(11) _IFC_REG(12) _IFC_REG(13) _IFC_REG(14) _IFC_REG(15) _IFC_REG(16) _IFC_REG(17) _IFC_REG(18) _IFC_REG(19) _IFC_REG(20) _IFC_REG(21) _IFC_REG(22) _IFC_REG(23) _IFC_REG(24) _IFC_REG(25) _IFC_REG(26) _IFC_REG(27) _IFC_REG(28) _IFC_REG(29) _IFC_REG(30) _IFC_REG(31) ".iflt \\var\n\t" ".error \"Unable to parse register name \\r\"\n\t" ".endif\n\t" ".endm")
simple assert() macros that are a bit more flexible than ISO C assert().
#define av_assert2(cond)
assert() equivalent, that does lie in speed critical code.
#define av_assert1(cond)
assert() equivalent, that does not lie in speed critical code.
#define av_assert0(cond)
assert() equivalent, that is always enabled.
Convenience header that includes libavutil's core.
#define flags(name, subs,...)
#define u(width, name, range_min, range_max)
static av_always_inline void filter(int16_t *output, ptrdiff_t out_stride, const int16_t *low, ptrdiff_t low_stride, const int16_t *high, ptrdiff_t high_stride, int len, int clip)
#define AV_CEIL_RSHIFT(a, b)
#define FFABS(a)
Absolute value, Note, INT_MIN / INT64_MIN result in undefined behavior as they are not representable ...
static atomic_int cpu_flags
int av_get_cpu_flags(void)
Return the flags which specify extensions supported by the CPU.
#define AV_CPU_FLAG_SSE2
PIV SSE2 functions.
#define AV_CPU_FLAG_MMXEXT
SSE integer functions or AMD MMX ext.
static enum AVPixelFormat pix_fmt
#define AV_LOG_WARNING
Something somehow does not look correct.
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
#define DECLARE_ALIGNED(n, t, v)
Declare a variable that is aligned in memory.
#define SWS_FAST_BILINEAR
int attribute_align_arg sws_scale(struct SwsContext *c, const uint8_t *const srcSlice[], const int srcStride[], int srcSliceY, int srcSliceH, uint8_t *const dst[], const int dstStride[])
swscale wrapper, so we don't need to export the SwsContext.
void ff_hcscale_fast_c(SwsContext *c, int16_t *dst1, int16_t *dst2, int dstWidth, const uint8_t *src1, const uint8_t *src2, int srcW, int xInc)
void ff_hyscale_fast_c(SwsContext *c, int16_t *dst, int dstWidth, const uint8_t *src, int srcW, int xInc)
#define attribute_align_arg
av_cold void ff_sws_init_output_funcs(SwsContext *c, yuv2planar1_fn *yuv2plane1, yuv2planarX_fn *yuv2planeX, yuv2interleavedX_fn *yuv2nv12cX, yuv2packed1_fn *yuv2packed1, yuv2packed2_fn *yuv2packed2, yuv2packedX_fn *yuv2packedX, yuv2anyX_fn *yuv2anyX)
const AVPixFmtDescriptor * av_pix_fmt_desc_get(enum AVPixelFormat pix_fmt)
#define AV_PIX_FMT_FLAG_FLOAT
The pixel format contains IEEE-754 floating point values.
#define AV_PIX_FMT_FLAG_BE
Pixel format is big-endian.
AVPixelFormat
Pixel format.
@ AV_PIX_FMT_RGB24
packed RGB 8:8:8, 24bpp, RGBRGB...
@ AV_PIX_FMT_MONOBLACK
Y , 1bpp, 0 is black, 1 is white, in each byte pixels are ordered from the msb to the lsb.
@ AV_PIX_FMT_GRAY8A
alias for AV_PIX_FMT_YA8
@ AV_PIX_FMT_GRAY8
Y , 8bpp.
@ AV_PIX_FMT_RGB8
packed RGB 3:3:2, 8bpp, (msb)2R 3G 3B(lsb)
@ AV_PIX_FMT_BGR8
packed RGB 3:3:2, 8bpp, (msb)2B 3G 3R(lsb)
@ AV_PIX_FMT_RGB4_BYTE
packed RGB 1:2:1, 8bpp, (msb)1R 2G 1B(lsb)
@ AV_PIX_FMT_BGR4_BYTE
packed RGB 1:2:1, 8bpp, (msb)1B 2G 1R(lsb)
@ AV_PIX_FMT_PAL8
8 bits with AV_PIX_FMT_RGB32 palette
@ AV_PIX_FMT_BGR24
packed RGB 8:8:8, 24bpp, BGRBGR...
@ AV_PIX_FMT_MONOWHITE
Y , 1bpp, 0 is white, 1 is black, in each byte pixels are ordered from the msb to the lsb.
#define AV_PIX_FMT_RGB32_1
#define AV_PIX_FMT_BGR32_1
int ff_init_slice_from_src(SwsSlice *s, uint8_t *src[4], int stride[4], int srcW, int lumY, int lumH, int chrY, int chrH, int relative)
int ff_rotate_slice(SwsSlice *s, int lum, int chr)
Descriptor that unambiguously describes how the bits of a pixel are stored in the up to 4 data planes...
Struct which holds all necessary data for processing a slice.
int available_lines
max number of lines that can be hold by this plane
int sliceY
index of first line
int sliceH
number of lines
Struct which defines a slice of an image to be scaled or an output for a scaled slice.
SwsPlane plane[MAX_SLICE_PLANES]
color planes
int width
Slice line width.
static void lumRangeToJpeg_c(int16_t *dst, int width)
const uint8_t ff_dither_8x8_128[9][8]
static void lumRangeFromJpeg_c(int16_t *dst, int width)
static int swscale(SwsContext *c, const uint8_t *src[], int srcStride[], int srcSliceY, int srcSliceH, uint8_t *dst[], int dstStride[])
static void hScale8To19_c(SwsContext *c, int16_t *_dst, int dstW, const uint8_t *src, const int16_t *filter, const int32_t *filterPos, int filterSize)
static void chrRangeToJpeg16_c(int16_t *_dstU, int16_t *_dstV, int width)
static void hScale8To15_c(SwsContext *c, int16_t *dst, int dstW, const uint8_t *src, const int16_t *filter, const int32_t *filterPos, int filterSize)
#define DEBUG_BUFFERS(...)
static void reset_ptr(const uint8_t *src[], enum AVPixelFormat format)
av_cold void ff_sws_init_range_convert(SwsContext *c)
static void xyz12Torgb48(struct SwsContext *c, uint16_t *dst, const uint16_t *src, int stride, int h)
static av_always_inline void fillPlane(uint8_t *plane, int stride, int width, int height, int y, uint8_t val)
static void lumRangeToJpeg16_c(int16_t *_dst, int width)
static av_cold void sws_init_swscale(SwsContext *c)
static int check_image_pointers(const uint8_t *const data[4], enum AVPixelFormat pix_fmt, const int linesizes[4])
static void hScale16To15_c(SwsContext *c, int16_t *dst, int dstW, const uint8_t *_src, const int16_t *filter, const int32_t *filterPos, int filterSize)
static void chrRangeToJpeg_c(int16_t *dstU, int16_t *dstV, int width)
static void chrRangeFromJpeg16_c(int16_t *_dstU, int16_t *_dstV, int width)
static void rgb48Toxyz12(struct SwsContext *c, uint16_t *dst, const uint16_t *src, int stride, int h)
static void lumRangeFromJpeg16_c(int16_t *_dst, int width)
static void chrRangeFromJpeg_c(int16_t *dstU, int16_t *dstV, int width)
static const uint8_t sws_pb_64[8]
SwsFunc ff_getSwsFunc(SwsContext *c)
Return function pointer to fastest main scaler path function depending on architecture and available ...
static void hScale16To19_c(SwsContext *c, int16_t *_dst, int dstW, const uint8_t *_src, const int16_t *filter, const int32_t *filterPos, int filterSize)
av_cold void ff_sws_init_swscale_ppc(SwsContext *c)
void(* yuv2planarX_fn)(const int16_t *filter, int filterSize, const int16_t **src, uint8_t *dest, int dstW, const uint8_t *dither, int offset)
Write one line of horizontally scaled data to planar output with multi-point vertical scaling between...
int(* SwsFunc)(struct SwsContext *context, const uint8_t *src[], int srcStride[], int srcSliceY, int srcSliceH, uint8_t *dst[], int dstStride[])
static av_always_inline int isBayer(enum AVPixelFormat pix_fmt)
static void fillPlane16(uint8_t *plane, int stride, int width, int height, int y, int alpha, int bits, const int big_endian)
static av_always_inline int isPlanar(enum AVPixelFormat pix_fmt)
static av_always_inline int isAnyRGB(enum AVPixelFormat pix_fmt)
static av_always_inline int is16BPS(enum AVPixelFormat pix_fmt)
static av_always_inline int usePal(enum AVPixelFormat pix_fmt)
void(* yuv2planar1_fn)(const int16_t *src, uint8_t *dest, int dstW, const uint8_t *dither, int offset)
Write one line of horizontally scaled data to planar output without any additional vertical scaling (...
void ff_init_vscale_pfn(SwsContext *c, yuv2planar1_fn yuv2plane1, yuv2planarX_fn yuv2planeX, yuv2interleavedX_fn yuv2nv12cX, yuv2packed1_fn yuv2packed1, yuv2packed2_fn yuv2packed2, yuv2packedX_fn yuv2packedX, yuv2anyX_fn yuv2anyX, int use_mmx)
setup vertical scaler functions
static av_always_inline int isPacked(enum AVPixelFormat pix_fmt)
void(* yuv2packed1_fn)(struct SwsContext *c, const int16_t *lumSrc, const int16_t *chrUSrc[2], const int16_t *chrVSrc[2], const int16_t *alpSrc, uint8_t *dest, int dstW, int uvalpha, int y)
Write one line of horizontally scaled Y/U/V/A to packed-pixel YUV/RGB output without any additional v...
static void fillPlane32(uint8_t *plane, int stride, int width, int height, int y, int alpha, int bits, const int big_endian, int is_float)
void(* yuv2anyX_fn)(struct SwsContext *c, const int16_t *lumFilter, const int16_t **lumSrc, int lumFilterSize, const int16_t *chrFilter, const int16_t **chrUSrc, const int16_t **chrVSrc, int chrFilterSize, const int16_t **alpSrc, uint8_t **dest, int dstW, int y)
Write one line of horizontally scaled Y/U/V/A to YUV/RGB output by doing multi-point vertical scaling...
void(* yuv2interleavedX_fn)(enum AVPixelFormat dstFormat, const uint8_t *chrDither, const int16_t *chrFilter, int chrFilterSize, const int16_t **chrUSrc, const int16_t **chrVSrc, uint8_t *dest, int dstW)
Write one line of horizontally scaled chroma to interleaved output with multi-point vertical scaling ...
void ff_updateMMXDitherTables(SwsContext *c, int dstY)
static av_always_inline int isBE(enum AVPixelFormat pix_fmt)
void ff_sws_init_input_funcs(SwsContext *c)
static av_always_inline int is32BPS(enum AVPixelFormat pix_fmt)
void(* yuv2packedX_fn)(struct SwsContext *c, const int16_t *lumFilter, const int16_t **lumSrc, int lumFilterSize, const int16_t *chrFilter, const int16_t **chrUSrc, const int16_t **chrVSrc, int chrFilterSize, const int16_t **alpSrc, uint8_t *dest, int dstW, int y)
Write one line of horizontally scaled Y/U/V/A to packed-pixel YUV/RGB output by doing multi-point ver...
static av_always_inline int isNBPS(enum AVPixelFormat pix_fmt)
void(* yuv2packed2_fn)(struct SwsContext *c, const int16_t *lumSrc[2], const int16_t *chrUSrc[2], const int16_t *chrVSrc[2], const int16_t *alpSrc[2], uint8_t *dest, int dstW, int yalpha, int uvalpha, int y)
Write one line of horizontally scaled Y/U/V/A to packed-pixel YUV/RGB output by doing bilinear scalin...
static void FUNC() yuv2planeX(const int16_t *filter, int filterSize, const int16_t **src, uint8_t *dest, int dstW, const uint8_t *dither, int offset)
static void process(NormalizeContext *s, AVFrame *in, AVFrame *out)
av_cold void ff_sws_init_swscale_x86(SwsContext *c)