81 ht[
i].bits, ht[
i].values,
82 ht[
i].class == 1,
s->avctx);
86 if (ht[
i].
class < 2) {
87 memcpy(
s->raw_huffman_lengths[ht[
i].class][ht[
i].index],
89 memcpy(
s->raw_huffman_values[ht[
i].class][ht[
i].index],
90 ht[
i].values, ht[
i].length);
100 if (
len > 14 && buf[12] == 1)
101 s->interlace_polarity = 1;
102 if (
len > 14 && buf[12] == 2)
103 s->interlace_polarity = 0;
122 if (!
s->picture_ptr) {
126 s->picture_ptr =
s->picture;
140 s->first_picture = 1;
150 if (
s->extern_huff) {
156 "error using external huffman table, switching back to internal\n");
162 s->interlace_polarity = 1;
166 s->interlace_polarity = 1;
173 if (
s->smv_frames_per_jpeg <= 0) {
217 for (
i = 0;
i < 64;
i++) {
219 if (
s->quant_matrixes[
index][
i] == 0) {
227 s->quant_matrixes[
index][8]) >> 1;
230 len -= 1 + 64 * (1+pr);
260 for (
i = 1;
i <= 16;
i++) {
265 if (len < n || n > 256)
268 for (
i = 0;
i < n;
i++) {
279 val_table,
class > 0,
s->avctx)) < 0)
285 val_table, 0,
s->avctx)) < 0)
289 for (
i = 0;
i < 16;
i++)
290 s->raw_huffman_lengths[
class][
index][
i] = bits_table[
i + 1];
292 s->raw_huffman_values[
class][
index][
i] = val_table[
i];
305 memset(
s->upscale_h, 0,
sizeof(
s->upscale_h));
306 memset(
s->upscale_v, 0,
sizeof(
s->upscale_v));
316 if (
s->avctx->bits_per_raw_sample !=
bits) {
318 s->avctx->bits_per_raw_sample =
bits;
323 if (
bits == 9 && !
s->pegasus_rct)
326 if(
s->lossless &&
s->avctx->lowres){
335 if (
s->interlaced &&
s->width ==
width &&
s->height ==
height + 1)
341 if (
s->buf_size && (
width + 7) / 8 * ((
height + 7) / 8) >
s->buf_size * 4LL)
345 if (nb_components <= 0 ||
348 if (
s->interlaced && (
s->bottom_field == !
s->interlace_polarity)) {
349 if (nb_components !=
s->nb_components) {
351 "nb_components changing in interlaced picture\n");
355 if (
s->ls && !(
bits <= 8 || nb_components == 1)) {
357 "JPEG-LS that is not <= 8 "
358 "bits/component or 16-bit gray");
361 if (
len != 8 + 3 * nb_components) {
362 av_log(
s->avctx,
AV_LOG_ERROR,
"decode_sof0: error, len(%d) mismatch %d components\n",
len, nb_components);
366 s->nb_components = nb_components;
369 for (
i = 0;
i < nb_components;
i++) {
375 if (h_count[
i] >
s->h_max)
376 s->h_max = h_count[
i];
377 if (v_count[
i] >
s->v_max)
378 s->v_max = v_count[
i];
380 if (
s->quant_index[
i] >= 4) {
384 if (!h_count[
i] || !v_count[
i]) {
386 "Invalid sampling factor in component %d %d:%d\n",
387 i, h_count[
i], v_count[
i]);
392 i, h_count[
i], v_count[
i],
393 s->component_id[
i],
s->quant_index[
i]);
395 if ( nb_components == 4
396 &&
s->component_id[0] ==
'C' - 1
397 &&
s->component_id[1] ==
'M' - 1
398 &&
s->component_id[2] ==
'Y' - 1
399 &&
s->component_id[3] ==
'K' - 1)
400 s->adobe_transform = 0;
402 if (
s->ls && (
s->h_max > 1 ||
s->v_max > 1)) {
408 if (nb_components == 2) {
422 memcmp(
s->h_count, h_count,
sizeof(h_count)) ||
423 memcmp(
s->v_count, v_count,
sizeof(v_count))) {
429 memcpy(
s->h_count, h_count,
sizeof(h_count));
430 memcpy(
s->v_count, v_count,
sizeof(v_count));
435 if (
s->first_picture &&
436 (
s->multiscope != 2 ||
s->avctx->time_base.den >= 25 *
s->avctx->time_base.num) &&
437 s->orig_height != 0 &&
438 s->height < ((
s->orig_height * 3) / 4)) {
440 s->bottom_field =
s->interlace_polarity;
441 s->picture_ptr->interlaced_frame = 1;
442 s->picture_ptr->top_field_first = !
s->interlace_polarity;
450 if ((
s->avctx->codec_tag ==
MKTAG(
'A',
'V',
'R',
'n') ||
451 s->avctx->codec_tag ==
MKTAG(
'A',
'V',
'D',
'J')) &&
455 s->first_picture = 0;
461 s->avctx->height =
s->avctx->coded_height /
s->smv_frames_per_jpeg;
462 if (
s->avctx->height <= 0)
465 if (
s->bayer &&
s->progressive) {
470 if (
s->got_picture &&
s->interlaced && (
s->bottom_field == !
s->interlace_polarity)) {
471 if (
s->progressive) {
476 if (
s->v_max == 1 &&
s->h_max == 1 &&
s->lossless==1 && (nb_components==3 || nb_components==4))
478 else if (!
s->lossless)
481 pix_fmt_id = ((unsigned)
s->h_count[0] << 28) | (
s->v_count[0] << 24) |
482 (
s->h_count[1] << 20) | (
s->v_count[1] << 16) |
483 (
s->h_count[2] << 12) | (
s->v_count[2] << 8) |
484 (
s->h_count[3] << 4) |
s->v_count[3];
488 if (!(pix_fmt_id & 0xD0D0D0D0))
489 pix_fmt_id -= (pix_fmt_id & 0xF0F0F0F0) >> 1;
490 if (!(pix_fmt_id & 0x0D0D0D0D))
491 pix_fmt_id -= (pix_fmt_id & 0x0F0F0F0F) >> 1;
493 for (
i = 0;
i < 8;
i++) {
494 int j = 6 + (
i&1) - (
i&6);
495 int is = (pix_fmt_id >> (4*
i)) & 0xF;
496 int js = (pix_fmt_id >> (4*j)) & 0xF;
498 if (
is == 1 && js != 2 && (i < 2 || i > 5))
499 js = (pix_fmt_id >> ( 8 + 4*(
i&1))) & 0xF;
500 if (
is == 1 && js != 2 && (i < 2 || i > 5))
501 js = (pix_fmt_id >> (16 + 4*(
i&1))) & 0xF;
503 if (
is == 1 && js == 2) {
504 if (
i & 1)
s->upscale_h[j/2] = 1;
505 else s->upscale_v[j/2] = 1;
510 if (pix_fmt_id != 0x11110000 && pix_fmt_id != 0x11000000)
514 switch (pix_fmt_id) {
524 if (
s->adobe_transform == 0
525 ||
s->component_id[0] ==
'R' - 1 &&
s->component_id[1] ==
'G' - 1 &&
s->component_id[2] ==
'B' - 1) {
539 if (
s->adobe_transform == 0 &&
s->bits <= 8) {
550 if (
s->adobe_transform == 0 &&
s->bits <= 8) {
552 s->upscale_v[1] =
s->upscale_v[2] = 1;
553 s->upscale_h[1] =
s->upscale_h[2] = 1;
554 }
else if (
s->adobe_transform == 2 &&
s->bits <= 8) {
556 s->upscale_v[1] =
s->upscale_v[2] = 1;
557 s->upscale_h[1] =
s->upscale_h[2] = 1;
597 if (
s->component_id[0] ==
'Q' &&
s->component_id[1] ==
'F' &&
s->component_id[2] ==
'A') {
601 s->upscale_v[0] =
s->upscale_v[1] = 1;
603 if (pix_fmt_id == 0x14111100)
604 s->upscale_v[1] =
s->upscale_v[2] = 1;
612 if (
s->component_id[0] ==
'Q' &&
s->component_id[1] ==
'F' &&
s->component_id[2] ==
'A') {
616 s->upscale_h[0] =
s->upscale_h[1] = 1;
628 s->upscale_h[1] =
s->upscale_h[2] = 2;
644 if (pix_fmt_id == 0x42111100) {
647 s->upscale_h[1] =
s->upscale_h[2] = 1;
648 }
else if (pix_fmt_id == 0x24111100) {
651 s->upscale_v[1] =
s->upscale_v[2] = 1;
652 }
else if (pix_fmt_id == 0x23111100) {
655 s->upscale_v[1] =
s->upscale_v[2] = 2;
667 memset(
s->upscale_h, 0,
sizeof(
s->upscale_h));
668 memset(
s->upscale_v, 0,
sizeof(
s->upscale_v));
680 memset(
s->upscale_h, 0,
sizeof(
s->upscale_h));
681 memset(
s->upscale_v, 0,
sizeof(
s->upscale_v));
682 if (
s->nb_components == 3) {
684 }
else if (
s->nb_components != 1) {
687 }
else if (
s->palette_index &&
s->bits <= 8)
689 else if (
s->bits <= 8)
701 if (
s->avctx->pix_fmt ==
s->hwaccel_sw_pix_fmt && !size_change) {
702 s->avctx->pix_fmt =
s->hwaccel_pix_fmt;
705 #if CONFIG_MJPEG_NVDEC_HWACCEL
708 #if CONFIG_MJPEG_VAAPI_HWACCEL
715 if (
s->hwaccel_pix_fmt < 0)
718 s->hwaccel_sw_pix_fmt =
s->avctx->pix_fmt;
719 s->avctx->pix_fmt =
s->hwaccel_pix_fmt;
724 s->picture_ptr->key_frame = 1;
733 s->picture_ptr->key_frame = 1;
736 for (
i = 0;
i < 4;
i++)
737 s->linesize[
i] =
s->picture_ptr->linesize[
i] <<
s->interlaced;
739 ff_dlog(
s->avctx,
"%d %d %d %d %d %d\n",
740 s->width,
s->height,
s->linesize[0],
s->linesize[1],
741 s->interlaced,
s->avctx->height);
745 if ((
s->rgb && !
s->lossless && !
s->ls) ||
746 (!
s->rgb &&
s->ls &&
s->nb_components > 1) ||
754 if (
s->progressive) {
755 int bw = (
width +
s->h_max * 8 - 1) / (
s->h_max * 8);
756 int bh = (
height +
s->v_max * 8 - 1) / (
s->v_max * 8);
757 for (
i = 0;
i <
s->nb_components;
i++) {
758 int size = bw * bh *
s->h_count[
i] *
s->v_count[
i];
763 if (!
s->blocks[
i] || !
s->last_nnz[
i])
765 s->block_stride[
i] = bw *
s->h_count[
i];
767 memset(
s->coefs_finished, 0,
sizeof(
s->coefs_finished));
770 if (
s->avctx->hwaccel) {
771 s->hwaccel_picture_private =
772 av_mallocz(
s->avctx->hwaccel->frame_priv_data_size);
773 if (!
s->hwaccel_picture_private)
776 ret =
s->avctx->hwaccel->start_frame(
s->avctx,
s->raw_image_buffer,
777 s->raw_image_buffer_size);
789 if (code < 0 || code > 16) {
791 "mjpeg_decode_dc: bad vlc: %d:%d (%p)\n",
792 0, dc_index, &
s->vlcs[0][dc_index]);
804 int dc_index,
int ac_index, uint16_t *quant_matrix)
810 if (
val == 0xfffff) {
814 val =
val * (unsigned)quant_matrix[0] +
s->last_dc[component];
816 s->last_dc[component] =
val;
825 i += ((unsigned)
code) >> 4;
833 int sign = (~cache) >> 31;
843 j =
s->scantable.permutated[
i];
853 int component,
int dc_index,
854 uint16_t *quant_matrix,
int Al)
857 s->bdsp.clear_block(
block);
859 if (
val == 0xfffff) {
863 val = (
val * (quant_matrix[0] << Al)) +
s->last_dc[component];
864 s->last_dc[component] =
val;
871 uint8_t *last_nnz,
int ac_index,
872 uint16_t *quant_matrix,
873 int ss,
int se,
int Al,
int *EOBRUN)
885 for (
i =
ss; ;
i++) {
898 int sign = (~cache) >> 31;
906 j =
s->scantable.permutated[
se];
913 j =
s->scantable.permutated[
i];
943 #define REFINE_BIT(j) { \
944 UPDATE_CACHE(re, &s->gb); \
945 sign = block[j] >> 15; \
946 block[j] += SHOW_UBITS(re, &s->gb, 1) * \
947 ((quant_matrix[i] ^ sign) - sign) << Al; \
948 LAST_SKIP_BITS(re, &s->gb, 1); \
956 av_log(s->avctx, AV_LOG_ERROR, "error count: %d\n", i); \
961 j = s->scantable.permutated[i]; \
964 else if (run-- == 0) \
971 int ac_index, uint16_t *quant_matrix,
972 int ss,
int se,
int Al,
int *EOBRUN)
975 int last =
FFMIN(
se, *last_nnz);
991 j =
s->scantable.permutated[
i];
1022 for (;
i <= last;
i++) {
1023 j =
s->scantable.permutated[
i];
1039 if (
s->restart_interval) {
1043 for (
i = 0;
i < nb_components;
i++)
1044 s->last_dc[
i] = (4 <<
s->bits);
1049 if (
s->restart_count == 0) {
1057 for (
i = 0;
i < nb_components;
i++)
1058 s->last_dc[
i] = (4 <<
s->bits);
1074 int left[4], top[4], topleft[4];
1075 const int linesize =
s->linesize[0];
1076 const int mask = ((1 <<
s->bits) - 1) << point_transform;
1077 int resync_mb_y = 0;
1078 int resync_mb_x = 0;
1081 if (!
s->bayer &&
s->nb_components < 3)
1083 if (
s->bayer &&
s->nb_components > 2)
1085 if (
s->nb_components <= 0 ||
s->nb_components > 4)
1087 if (
s->v_max != 1 ||
s->h_max != 1 || !
s->lossless)
1090 if (
s->rct ||
s->pegasus_rct)
1095 s->restart_count =
s->restart_interval;
1097 if (
s->restart_interval == 0)
1098 s->restart_interval = INT_MAX;
1101 width =
s->mb_width / nb_components;
1106 if (!
s->ljpeg_buffer)
1111 for (
i = 0;
i < 4;
i++)
1114 for (mb_y = 0; mb_y <
s->mb_height; mb_y++) {
1115 uint8_t *ptr =
s->picture_ptr->data[0] + (linesize * mb_y);
1117 if (
s->interlaced &&
s->bottom_field)
1118 ptr += linesize >> 1;
1120 for (
i = 0;
i < 4;
i++)
1121 top[
i] = left[
i] = topleft[
i] =
buffer[0][
i];
1123 if ((mb_y *
s->width) %
s->restart_interval == 0) {
1124 for (
i = 0;
i < 6;
i++)
1125 vpred[
i] = 1 << (
s->bits-1);
1128 for (mb_x = 0; mb_x <
width; mb_x++) {
1136 if (
s->restart_interval && !
s->restart_count){
1137 s->restart_count =
s->restart_interval;
1141 top[
i] = left[
i]= topleft[
i]= 1 << (
s->bits - 1);
1143 if (mb_y == resync_mb_y || mb_y == resync_mb_y+1 && mb_x < resync_mb_x || !mb_x)
1144 modified_predictor = 1;
1146 for (
i=0;
i<nb_components;
i++) {
1149 topleft[
i] = top[
i];
1156 if (!
s->bayer || mb_x) {
1166 mask & (
pred + (unsigned)(
dc * (1 << point_transform)));
1169 if (
s->restart_interval && !--
s->restart_count) {
1174 if (
s->rct &&
s->nb_components == 4) {
1175 for (mb_x = 0; mb_x <
s->mb_width; mb_x++) {
1176 ptr[4*mb_x + 2] =
buffer[mb_x][0] - ((
buffer[mb_x][1] +
buffer[mb_x][2] - 0x200) >> 2);
1177 ptr[4*mb_x + 1] =
buffer[mb_x][1] + ptr[4*mb_x + 2];
1178 ptr[4*mb_x + 3] =
buffer[mb_x][2] + ptr[4*mb_x + 2];
1179 ptr[4*mb_x + 0] =
buffer[mb_x][3];
1181 }
else if (
s->nb_components == 4) {
1182 for(
i=0;
i<nb_components;
i++) {
1183 int c=
s->comp_index[
i];
1185 for(mb_x = 0; mb_x <
s->mb_width; mb_x++) {
1188 }
else if(
s->bits == 9) {
1191 for(mb_x = 0; mb_x <
s->mb_width; mb_x++) {
1192 ((uint16_t*)ptr)[4*mb_x+
c] =
buffer[mb_x][
i];
1196 }
else if (
s->rct) {
1197 for (mb_x = 0; mb_x <
s->mb_width; mb_x++) {
1198 ptr[3*mb_x + 1] =
buffer[mb_x][0] - ((
buffer[mb_x][1] +
buffer[mb_x][2] - 0x200) >> 2);
1199 ptr[3*mb_x + 0] =
buffer[mb_x][1] + ptr[3*mb_x + 1];
1200 ptr[3*mb_x + 2] =
buffer[mb_x][2] + ptr[3*mb_x + 1];
1202 }
else if (
s->pegasus_rct) {
1203 for (mb_x = 0; mb_x <
s->mb_width; mb_x++) {
1205 ptr[3*mb_x + 0] =
buffer[mb_x][1] + ptr[3*mb_x + 1];
1206 ptr[3*mb_x + 2] =
buffer[mb_x][2] + ptr[3*mb_x + 1];
1208 }
else if (
s->bayer) {
1211 if (nb_components == 1) {
1213 for (mb_x = 0; mb_x <
width; mb_x++)
1214 ((uint16_t*)ptr)[mb_x] =
buffer[mb_x][0];
1215 }
else if (nb_components == 2) {
1216 for (mb_x = 0; mb_x <
width; mb_x++) {
1217 ((uint16_t*)ptr)[2*mb_x + 0] =
buffer[mb_x][0];
1218 ((uint16_t*)ptr)[2*mb_x + 1] =
buffer[mb_x][1];
1222 for(
i=0;
i<nb_components;
i++) {
1223 int c=
s->comp_index[
i];
1225 for(mb_x = 0; mb_x <
s->mb_width; mb_x++) {
1228 }
else if(
s->bits == 9) {
1231 for(mb_x = 0; mb_x <
s->mb_width; mb_x++) {
1232 ((uint16_t*)ptr)[3*mb_x+2-
c] =
buffer[mb_x][
i];
1242 int point_transform,
int nb_components)
1244 int i, mb_x, mb_y,
mask;
1245 int bits= (
s->bits+7)&~7;
1246 int resync_mb_y = 0;
1247 int resync_mb_x = 0;
1249 point_transform +=
bits -
s->bits;
1250 mask = ((1 <<
s->bits) - 1) << point_transform;
1252 av_assert0(nb_components>=1 && nb_components<=4);
1254 for (mb_y = 0; mb_y <
s->mb_height; mb_y++) {
1255 for (mb_x = 0; mb_x <
s->mb_width; mb_x++) {
1260 if (
s->restart_interval && !
s->restart_count){
1261 s->restart_count =
s->restart_interval;
1266 if(!mb_x || mb_y == resync_mb_y || mb_y == resync_mb_y+1 && mb_x < resync_mb_x || s->
interlaced){
1267 int toprow = mb_y == resync_mb_y || mb_y == resync_mb_y+1 && mb_x < resync_mb_x;
1268 int leftcol = !mb_x || mb_y == resync_mb_y && mb_x == resync_mb_x;
1269 for (
i = 0;
i < nb_components;
i++) {
1272 int n,
h, v, x, y,
c, j, linesize;
1273 n =
s->nb_blocks[
i];
1274 c =
s->comp_index[
i];
1279 linesize=
s->linesize[
c];
1281 if(
bits>8) linesize /= 2;
1283 for(j=0; j<n; j++) {
1289 if (
h * mb_x + x >=
s->width
1290 || v * mb_y + y >=
s->height) {
1292 }
else if (
bits<=8) {
1293 ptr =
s->picture_ptr->data[
c] + (linesize * (v * mb_y + y)) + (
h * mb_x + x);
1295 if(x==0 && leftcol){
1301 if(x==0 && leftcol){
1302 pred= ptr[-linesize];
1308 if (
s->interlaced &&
s->bottom_field)
1309 ptr += linesize >> 1;
1311 *ptr=
pred + ((unsigned)
dc << point_transform);
1313 ptr16 = (uint16_t*)(
s->picture_ptr->data[
c] + 2*(linesize * (v * mb_y + y)) + 2*(
h * mb_x + x));
1315 if(x==0 && leftcol){
1321 if(x==0 && leftcol){
1322 pred= ptr16[-linesize];
1328 if (
s->interlaced &&
s->bottom_field)
1329 ptr16 += linesize >> 1;
1331 *ptr16=
pred + ((unsigned)
dc << point_transform);
1340 for (
i = 0;
i < nb_components;
i++) {
1343 int n,
h, v, x, y,
c, j, linesize,
dc;
1344 n =
s->nb_blocks[
i];
1345 c =
s->comp_index[
i];
1350 linesize =
s->linesize[
c];
1352 if(
bits>8) linesize /= 2;
1354 for (j = 0; j < n; j++) {
1360 if (
h * mb_x + x >=
s->width
1361 || v * mb_y + y >=
s->height) {
1363 }
else if (
bits<=8) {
1364 ptr =
s->picture_ptr->data[
c] +
1365 (linesize * (v * mb_y + y)) +
1370 *ptr =
pred + ((unsigned)
dc << point_transform);
1372 ptr16 = (uint16_t*)(
s->picture_ptr->data[
c] + 2*(linesize * (v * mb_y + y)) + 2*(
h * mb_x + x));
1376 *ptr16=
pred + ((unsigned)
dc << point_transform);
1386 if (
s->restart_interval && !--
s->restart_count) {
1397 int linesize,
int lowres)
1400 case 0:
s->hdsp.put_pixels_tab[1][0](dst,
src, linesize, 8);
1406 case 3: *dst = *
src;
1413 int block_x, block_y;
1414 int size = 8 >>
s->avctx->lowres;
1416 for (block_y=0; block_y<
size; block_y++)
1417 for (block_x=0; block_x<
size; block_x++)
1418 *(uint16_t*)(ptr + 2*block_x + block_y*linesize) <<= 16 -
s->bits;
1420 for (block_y=0; block_y<
size; block_y++)
1421 for (block_x=0; block_x<
size; block_x++)
1422 *(ptr + block_x + block_y*linesize) <<= 8 -
s->bits;
1427 int Al,
const uint8_t *mb_bitmask,
1428 int mb_bitmask_size,
1431 int i, mb_x, mb_y, chroma_h_shift, chroma_v_shift, chroma_width, chroma_height;
1436 int bytes_per_pixel = 1 + (
s->bits > 8);
1439 if (mb_bitmask_size != (
s->mb_width *
s->mb_height + 7)>>3) {
1443 init_get_bits(&mb_bitmask_gb, mb_bitmask,
s->mb_width *
s->mb_height);
1446 s->restart_count = 0;
1453 for (
i = 0;
i < nb_components;
i++) {
1454 int c =
s->comp_index[
i];
1455 data[
c] =
s->picture_ptr->data[
c];
1456 reference_data[
c] = reference ? reference->
data[
c] :
NULL;
1457 linesize[
c] =
s->linesize[
c];
1458 s->coefs_finished[
c] |= 1;
1461 for (mb_y = 0; mb_y <
s->mb_height; mb_y++) {
1462 for (mb_x = 0; mb_x <
s->mb_width; mb_x++) {
1465 if (
s->restart_interval && !
s->restart_count)
1466 s->restart_count =
s->restart_interval;
1473 for (
i = 0;
i < nb_components;
i++) {
1475 int n,
h, v, x, y,
c, j;
1477 n =
s->nb_blocks[
i];
1478 c =
s->comp_index[
i];
1483 for (j = 0; j < n; j++) {
1484 block_offset = (((linesize[
c] * (v * mb_y + y) * 8) +
1485 (
h * mb_x + x) * 8 * bytes_per_pixel) >>
s->avctx->lowres);
1487 if (
s->interlaced &&
s->bottom_field)
1488 block_offset += linesize[
c] >> 1;
1489 if ( 8*(
h * mb_x + x) < ((
c == 1) || (
c == 2) ? chroma_width :
s->width)
1490 && 8*(v * mb_y + y) < ((
c == 1) || (
c == 2) ? chroma_height :
s->height)) {
1491 ptr =
data[
c] + block_offset;
1494 if (!
s->progressive) {
1498 linesize[
c],
s->avctx->lowres);
1501 s->bdsp.clear_block(
s->block);
1503 s->dc_index[
i],
s->ac_index[
i],
1504 s->quant_matrixes[
s->quant_sindex[
i]]) < 0) {
1506 "error y=%d x=%d\n", mb_y, mb_x);
1510 s->idsp.idct_put(ptr, linesize[
c],
s->block);
1516 int block_idx =
s->block_stride[
c] * (v * mb_y + y) +
1518 int16_t *
block =
s->blocks[
c][block_idx];
1521 s->quant_matrixes[
s->quant_sindex[
i]][0] << Al;
1523 s->quant_matrixes[
s->quant_sindex[
i]],
1526 "error y=%d x=%d\n", mb_y, mb_x);
1530 ff_dlog(
s->avctx,
"mb: %d %d processed\n", mb_y, mb_x);
1531 ff_dlog(
s->avctx,
"%d %d %d %d %d %d %d %d \n",
1532 mb_x, mb_y, x, y,
c,
s->bottom_field,
1533 (v * mb_y + y) * 8, (
h * mb_x + x) * 8);
1548 int se,
int Ah,
int Al)
1552 int c =
s->comp_index[0];
1553 uint16_t *quant_matrix =
s->quant_matrixes[
s->quant_sindex[0]];
1556 if (se < ss || se > 63) {
1563 s->coefs_finished[
c] |= (2ULL <<
se) - (1ULL <<
ss);
1565 s->restart_count = 0;
1567 for (mb_y = 0; mb_y <
s->mb_height; mb_y++) {
1568 int block_idx = mb_y *
s->block_stride[
c];
1569 int16_t (*
block)[64] = &
s->blocks[
c][block_idx];
1570 uint8_t *last_nnz = &
s->last_nnz[
c][block_idx];
1572 av_log(
s->avctx,
AV_LOG_ERROR,
"bitstream truncated in mjpeg_decode_scan_progressive_ac\n");
1575 for (mb_x = 0; mb_x <
s->mb_width; mb_x++,
block++, last_nnz++) {
1577 if (
s->restart_interval && !
s->restart_count)
1578 s->restart_count =
s->restart_interval;
1582 quant_matrix,
ss,
se, Al, &EOBRUN);
1585 quant_matrix,
ss,
se, Al, &EOBRUN);
1591 "error y=%d x=%d\n", mb_y, mb_x);
1606 const int bytes_per_pixel = 1 + (
s->bits > 8);
1607 const int block_size =
s->lossless ? 1 : 8;
1609 for (
c = 0;
c <
s->nb_components;
c++) {
1611 int linesize =
s->linesize[
c];
1612 int h =
s->h_max /
s->h_count[
c];
1613 int v =
s->v_max /
s->v_count[
c];
1614 int mb_width = (
s->width +
h * block_size - 1) / (
h * block_size);
1615 int mb_height = (
s->height + v * block_size - 1) / (v * block_size);
1617 if (~
s->coefs_finished[
c])
1620 if (
s->interlaced &&
s->bottom_field)
1621 data += linesize >> 1;
1623 for (mb_y = 0; mb_y < mb_height; mb_y++) {
1624 uint8_t *ptr =
data + (mb_y * linesize * 8 >>
s->avctx->lowres);
1625 int block_idx = mb_y *
s->block_stride[
c];
1626 int16_t (*
block)[64] = &
s->blocks[
c][block_idx];
1627 for (mb_x = 0; mb_x < mb_width; mb_x++,
block++) {
1628 s->idsp.idct_put(ptr, linesize, *
block);
1631 ptr += bytes_per_pixel*8 >>
s->avctx->lowres;
1638 int mb_bitmask_size,
const AVFrame *reference)
1642 const int block_size =
s->lossless ? 1 : 8;
1643 int ilv, prev_shift;
1645 if (!
s->got_picture) {
1647 "Can not process SOS before SOF, skipping\n");
1652 if (reference->
width !=
s->picture_ptr->width ||
1653 reference->
height !=
s->picture_ptr->height ||
1654 reference->
format !=
s->picture_ptr->format) {
1665 "decode_sos: nb_components (%d)",
1669 if (
len != 6 + 2 * nb_components) {
1673 for (
i = 0;
i < nb_components;
i++) {
1678 if (
id ==
s->component_id[
index])
1680 if (
index ==
s->nb_components) {
1682 "decode_sos: index(%d) out of components\n",
index);
1686 if (
s->avctx->codec_tag ==
MKTAG(
'M',
'T',
'S',
'J')
1687 && nb_components == 3 &&
s->nb_components == 3 &&
i)
1690 s->quant_sindex[
i] =
s->quant_index[
index];
1692 s->h_scount[
i] =
s->h_count[
index];
1693 s->v_scount[
i] =
s->v_count[
index];
1695 if((nb_components == 1 || nb_components == 3) &&
s->nb_components == 3 &&
s->avctx->pix_fmt ==
AV_PIX_FMT_GBR24P)
1703 if (
s->dc_index[
i] < 0 ||
s->ac_index[
i] < 0 ||
1704 s->dc_index[
i] >= 4 ||
s->ac_index[
i] >= 4)
1706 if (!
s->vlcs[0][
s->dc_index[
i]].table || !(
s->progressive ?
s->vlcs[2][
s->ac_index[0]].table :
s->vlcs[1][
s->ac_index[
i]].table))
1712 if(
s->avctx->codec_tag !=
AV_RL32(
"CJPG")){
1716 prev_shift = point_transform = 0;
1718 if (nb_components > 1) {
1720 s->mb_width = (
s->width +
s->h_max * block_size - 1) / (
s->h_max * block_size);
1721 s->mb_height = (
s->height +
s->v_max * block_size - 1) / (
s->v_max * block_size);
1722 }
else if (!
s->ls) {
1723 h =
s->h_max /
s->h_scount[0];
1724 v =
s->v_max /
s->v_scount[0];
1725 s->mb_width = (
s->width +
h * block_size - 1) / (
h * block_size);
1726 s->mb_height = (
s->height + v * block_size - 1) / (v * block_size);
1727 s->nb_blocks[0] = 1;
1734 s->lossless ?
"lossless" :
"sequential DCT",
s->rgb ?
"RGB" :
"",
1735 predictor, point_transform, ilv,
s->bits,
s->mjpb_skiptosod,
1736 s->pegasus_rct ?
"PRCT" : (
s->rct ?
"RCT" :
""), nb_components);
1740 for (
i =
s->mjpb_skiptosod;
i > 0;
i--)
1744 for (
i = 0;
i < nb_components;
i++)
1745 s->last_dc[
i] = (4 <<
s->bits);
1747 if (
s->avctx->hwaccel) {
1750 s->raw_scan_buffer_size >= bytes_to_start);
1752 ret =
s->avctx->hwaccel->decode_slice(
s->avctx,
1753 s->raw_scan_buffer + bytes_to_start,
1754 s->raw_scan_buffer_size - bytes_to_start);
1758 }
else if (
s->lossless) {
1765 point_transform, ilv)) < 0)
1768 if (
s->rgb ||
s->bayer) {
1774 nb_components)) < 0)
1783 point_transform)) < 0)
1787 prev_shift, point_transform,
1788 mb_bitmask, mb_bitmask_size, reference)) < 0)
1793 if (
s->interlaced &&
1802 s->bottom_field ^= 1;
1820 s->restart_count = 0;
1822 s->restart_interval);
1869 int t_w, t_h, v1, v2;
1877 s->avctx->sample_aspect_ratio.num =
get_bits(&
s->gb, 16);
1878 s->avctx->sample_aspect_ratio.den =
get_bits(&
s->gb, 16);
1879 if (
s->avctx->sample_aspect_ratio.num <= 0
1880 ||
s->avctx->sample_aspect_ratio.den <= 0) {
1881 s->avctx->sample_aspect_ratio.num = 0;
1882 s->avctx->sample_aspect_ratio.den = 1;
1887 "mjpeg: JFIF header found (version: %x.%x) SAR=%d/%d\n",
1889 s->avctx->sample_aspect_ratio.num,
1890 s->avctx->sample_aspect_ratio.den);
1898 if (
len -10 - (t_w * t_h * 3) > 0)
1899 len -= t_w * t_h * 3;
1916 av_log(
s->avctx,
AV_LOG_INFO,
"mjpeg: Adobe header found, transform=%d\n",
s->adobe_transform);
1923 int pegasus_rct =
s->pegasus_rct;
1926 "Pegasus lossless jpeg header found\n");
1948 if (
rgb !=
s->rgb || pegasus_rct !=
s->pegasus_rct) {
1954 s->pegasus_rct = pegasus_rct;
1994 }
else if (
type == 1) {
2006 if (!(
flags & 0x04)) {
2016 int ret, le, ifd_offset, bytes_read;
2049 if ((
s->start_code ==
APP1) && (
len > (0x28 - 8))) {
2072 unsigned nummarkers;
2092 if (nummarkers == 0) {
2095 }
else if (
s->iccnum != 0 && nummarkers !=
s->iccnum) {
2098 }
else if (seqno > nummarkers) {
2104 if (
s->iccnum == 0) {
2109 s->iccnum = nummarkers;
2112 if (
s->iccentries[seqno - 1].data) {
2117 s->iccentries[seqno - 1].length =
len;
2119 if (!
s->iccentries[seqno - 1].data) {
2129 if (
s->iccread >
s->iccnum)
2137 "mjpeg: error, decode_app parser read over the end\n");
2153 for (
i = 0;
i <
len - 2;
i++)
2155 if (
i > 0 && cbuf[
i - 1] ==
'\n')
2164 if (!strncmp(cbuf,
"AVID", 4)) {
2166 }
else if (!strcmp(cbuf,
"CS=ITU601"))
2168 else if ((!strncmp(cbuf,
"Intel(R) JPEG Library, version 1", 32) &&
s->avctx->codec_tag) ||
2169 (!strncmp(cbuf,
"Metasoft MJPEG Codec", 20)))
2171 else if (!strcmp(cbuf,
"MULTISCOPE II")) {
2172 s->avctx->sample_aspect_ratio = (
AVRational) { 1, 2 };
2191 buf_ptr = *pbuf_ptr;
2192 while (buf_end - buf_ptr > 1) {
2195 if ((v == 0xff) && (v2 >=
SOF0) && (v2 <=
COM) && buf_ptr < buf_end) {
2204 ff_dlog(
NULL,
"find_marker skipped %d bytes\n", skipped);
2205 *pbuf_ptr = buf_ptr;
2211 const uint8_t **unescaped_buf_ptr,
2212 int *unescaped_buf_size)
2227 #define copy_data_segment(skip) do { \
2228 ptrdiff_t length = (ptr - src) - (skip); \
2230 memcpy(dst, src, length); \
2240 while (ptr < buf_end) {
2245 while (ptr < buf_end && x == 0xff) {
2260 if (x < RST0 || x >
RST7) {
2270 #undef copy_data_segment
2272 *unescaped_buf_ptr =
s->buffer;
2273 *unescaped_buf_size = dst -
s->buffer;
2274 memset(
s->buffer + *unescaped_buf_size, 0,
2278 (buf_end - *buf_ptr) - (dst -
s->buffer));
2287 while (
src + t < buf_end) {
2290 while ((
src + t < buf_end) && x == 0xff)
2305 if (x == 0xFF &&
b < t) {
2317 *unescaped_buf_ptr = dst;
2318 *unescaped_buf_size = (bit_count + 7) >> 3;
2319 memset(
s->buffer + *unescaped_buf_size, 0,
2322 *unescaped_buf_ptr = *buf_ptr;
2323 *unescaped_buf_size = buf_end - *buf_ptr;
2333 if (
s->iccentries) {
2334 for (
i = 0;
i <
s->iccnum;
i++)
2350 if (
s->smv_next_frame > 0) {
2371 s->smv_next_frame = (
s->smv_next_frame + 1) %
s->smv_frames_per_jpeg;
2373 if (
s->smv_next_frame == 0)
2389 #if CONFIG_SP5X_DECODER || CONFIG_AMV_DECODER
2398 s->buf_size =
s->pkt->size;
2406 const uint8_t *buf_end, *buf_ptr;
2407 const uint8_t *unescaped_buf_ptr;
2409 int unescaped_buf_size;
2420 s->adobe_transform = -1;
2429 buf_ptr =
s->pkt->data;
2430 buf_end =
s->pkt->data +
s->pkt->size;
2431 while (buf_ptr < buf_end) {
2435 &unescaped_buf_size);
2439 }
else if (unescaped_buf_size > INT_MAX / 8) {
2441 "MJPEG packet 0x%x too big (%d/%d), corrupt data?\n",
2505 s->restart_interval = 0;
2506 s->restart_count = 0;
2507 s->raw_image_buffer = buf_ptr;
2508 s->raw_image_buffer_size = buf_end - buf_ptr;
2563 s->progressive &&
s->cur_scan &&
s->got_picture)
2566 if (!
s->got_picture) {
2568 "Found EOI before any SOF, ignoring\n");
2571 if (
s->interlaced) {
2572 s->bottom_field ^= 1;
2574 if (
s->bottom_field == !
s->interlace_polarity)
2580 goto the_end_no_picture;
2582 if (
s->avctx->hwaccel) {
2583 ret =
s->avctx->hwaccel->end_frame(
s->avctx);
2605 s->raw_scan_buffer = buf_ptr;
2606 s->raw_scan_buffer_size = buf_end - buf_ptr;
2633 "mjpeg: unsupported coding type (%x)\n",
start_code);
2641 "marker parser used %d bytes (%d bits)\n",
2644 if (
s->got_picture &&
s->cur_scan) {
2677 for (p = 0; p<
s->nb_components; p++) {
2681 if (!
s->upscale_h[p])
2687 if (
s->upscale_v[p] == 1)
2690 for (
i = 0;
i <
h;
i++) {
2691 if (
s->upscale_h[p] == 1) {
2692 if (is16bit) ((uint16_t*)
line)[
w - 1] = ((uint16_t*)
line)[(
w - 1) / 2];
2700 }
else if (
s->upscale_h[p] == 2) {
2702 ((uint16_t*)
line)[
w - 1] = ((uint16_t*)
line)[(
w - 1) / 3];
2704 ((uint16_t*)
line)[
w - 2] = ((uint16_t*)
line)[
w - 1];
2714 line +=
s->linesize[p];
2739 for (p = 0; p <
s->nb_components; p++) {
2743 if (!
s->upscale_v[p])
2749 dst = &((
uint8_t *)
s->picture_ptr->data[p])[(
h - 1) *
s->linesize[p]];
2751 uint8_t *
src1 = &((
uint8_t *)
s->picture_ptr->data[p])[
i *
s->upscale_v[p] / (
s->upscale_v[p] + 1) *
s->linesize[p]];
2752 uint8_t *src2 = &((
uint8_t *)
s->picture_ptr->data[p])[(
i + 1) *
s->upscale_v[p] / (
s->upscale_v[p] + 1) *
s->linesize[p]];
2753 if (
s->upscale_v[p] != 2 && (
src1 == src2 ||
i ==
h - 1)) {
2754 memcpy(dst,
src1,
w);
2759 dst -=
s->linesize[p];
2763 if (
s->flipped && !
s->rgb) {
2772 int w =
s->picture_ptr->width;
2773 int h =
s->picture_ptr->height;
2780 for (
i=0;
i<
h/2;
i++) {
2782 FFSWAP(
int, dst[j], dst2[j]);
2783 dst +=
s->picture_ptr->linesize[
index];
2784 dst2 -=
s->picture_ptr->linesize[
index];
2790 int w =
s->picture_ptr->width;
2791 int h =
s->picture_ptr->height;
2793 for (
i=0;
i<
h;
i++) {
2798 +
s->picture_ptr->linesize[
index]*
i;
2800 for (j=0; j<
w; j++) {
2802 int r = dst[0][j] * k;
2803 int g = dst[1][j] * k;
2804 int b = dst[2][j] * k;
2805 dst[0][j] =
g*257 >> 16;
2806 dst[1][j] =
b*257 >> 16;
2807 dst[2][j] =
r*257 >> 16;
2813 int w =
s->picture_ptr->width;
2814 int h =
s->picture_ptr->height;
2816 for (
i=0;
i<
h;
i++) {
2821 +
s->picture_ptr->linesize[
index]*
i;
2823 for (j=0; j<
w; j++) {
2825 int r = (255 - dst[0][j]) * k;
2826 int g = (128 - dst[1][j]) * k;
2827 int b = (128 - dst[2][j]) * k;
2828 dst[0][j] =
r*257 >> 16;
2829 dst[1][j] = (
g*257 >> 16) + 128;
2830 dst[2][j] = (
b*257 >> 16) + 128;
2839 stereo->
type =
s->stereo3d->type;
2840 stereo->
flags =
s->stereo3d->flags;
2845 if (
s->iccnum != 0 &&
s->iccnum ==
s->iccread) {
2852 for (
i = 0;
i <
s->iccnum;
i++)
2853 total_size +=
s->iccentries[
i].length;
2862 for (
i = 0;
i <
s->iccnum;
i++) {
2863 memcpy(sd->
data +
offset,
s->iccentries[
i].data,
s->iccentries[
i].length);
2901 if (
s->interlaced &&
s->bottom_field == !
s->interlace_polarity &&
s->got_picture && !avctx->
frame_number) {
2907 s->picture_ptr =
NULL;
2908 }
else if (
s->picture_ptr)
2918 s->ljpeg_buffer_size = 0;
2920 for (
i = 0;
i < 3;
i++) {
2921 for (j = 0; j < 4; j++)
2942 s->smv_next_frame = 0;
2946 #if CONFIG_MJPEG_DECODER
2947 #define OFFSET(x) offsetof(MJpegDecodeContext, x)
2948 #define VD AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_DECODING_PARAM
2950 {
"extern_huff",
"Use external huffman table.",
2955 static const AVClass mjpegdec_class = {
2974 .priv_class = &mjpegdec_class,
2979 #if CONFIG_MJPEG_NVDEC_HWACCEL
2982 #if CONFIG_MJPEG_VAAPI_HWACCEL
2989 #if CONFIG_THP_DECODER
3007 #if CONFIG_SMVJPEG_DECODER
static void flush(AVCodecContext *avctx)
static double val(void *priv, double ch)
AVCodec ff_smvjpeg_decoder
uint8_t pi<< 24) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_U8, uint8_t,(*(const uint8_t *) pi - 0x80) *(1.0f/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_U8, uint8_t,(*(const uint8_t *) pi - 0x80) *(1.0/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S16, int16_t,(*(const int16_t *) pi >> 8)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S16, int16_t, *(const int16_t *) pi *(1.0f/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S16, int16_t, *(const int16_t *) pi *(1.0/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S32, int32_t,(*(const int32_t *) pi >> 24)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S32, int32_t, *(const int32_t *) pi *(1.0f/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S32, int32_t, *(const int32_t *) pi *(1.0/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_FLT, float, av_clip_uint8(lrintf(*(const float *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_FLT, float, av_clip_int16(lrintf(*(const float *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_FLT, float, av_clipl_int32(llrintf(*(const float *) pi *(1U<< 31)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_DBL, double, av_clip_uint8(lrint(*(const double *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_DBL, double, av_clip_int16(lrint(*(const double *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_DBL, double, av_clipl_int32(llrint(*(const double *) pi *(1U<< 31)))) #define SET_CONV_FUNC_GROUP(ofmt, ifmt) static void set_generic_function(AudioConvert *ac) { } void ff_audio_convert_free(AudioConvert **ac) { if(! *ac) return;ff_dither_free(&(*ac) ->dc);av_freep(ac);} AudioConvert *ff_audio_convert_alloc(AVAudioResampleContext *avr, enum AVSampleFormat out_fmt, enum AVSampleFormat in_fmt, int channels, int sample_rate, int apply_map) { AudioConvert *ac;int in_planar, out_planar;ac=av_mallocz(sizeof(*ac));if(!ac) return NULL;ac->avr=avr;ac->out_fmt=out_fmt;ac->in_fmt=in_fmt;ac->channels=channels;ac->apply_map=apply_map;if(avr->dither_method !=AV_RESAMPLE_DITHER_NONE &&av_get_packed_sample_fmt(out_fmt)==AV_SAMPLE_FMT_S16 &&av_get_bytes_per_sample(in_fmt) > 2) { ac->dc=ff_dither_alloc(avr, out_fmt, in_fmt, channels, sample_rate, apply_map);if(!ac->dc) { av_free(ac);return NULL;} return ac;} in_planar=ff_sample_fmt_is_planar(in_fmt, channels);out_planar=ff_sample_fmt_is_planar(out_fmt, channels);if(in_planar==out_planar) { ac->func_type=CONV_FUNC_TYPE_FLAT;ac->planes=in_planar ? ac->channels :1;} else if(in_planar) ac->func_type=CONV_FUNC_TYPE_INTERLEAVE;else ac->func_type=CONV_FUNC_TYPE_DEINTERLEAVE;set_generic_function(ac);if(ARCH_AARCH64) ff_audio_convert_init_aarch64(ac);if(ARCH_ARM) ff_audio_convert_init_arm(ac);if(ARCH_X86) ff_audio_convert_init_x86(ac);return ac;} int ff_audio_convert(AudioConvert *ac, AudioData *out, AudioData *in) { int use_generic=1;int len=in->nb_samples;int p;if(ac->dc) { av_log(ac->avr, AV_LOG_TRACE, "%d samples - audio_convert: %s to %s (dithered)\n", len, av_get_sample_fmt_name(ac->in_fmt), av_get_sample_fmt_name(ac->out_fmt));return ff_convert_dither(ac-> dc
simple assert() macros that are a bit more flexible than ISO C assert().
#define av_assert0(cond)
assert() equivalent, that is always enabled.
Libavcodec external API header.
#define FF_DEBUG_STARTCODE
#define FF_PROFILE_MJPEG_HUFFMAN_EXTENDED_SEQUENTIAL_DCT
#define FF_DEBUG_PICT_INFO
#define AV_EF_EXPLODE
abort decoding on minor error detection
#define FF_PROFILE_MJPEG_HUFFMAN_BASELINE_DCT
#define FF_PROFILE_MJPEG_HUFFMAN_LOSSLESS
#define FF_CODEC_PROPERTY_LOSSLESS
#define FF_PROFILE_MJPEG_JPEG_LS
#define FF_PROFILE_MJPEG_HUFFMAN_PROGRESSIVE_DCT
static av_cold int init(AVCodecContext *avctx)
void ff_free_vlc(VLC *vlc)
static av_always_inline void bytestream2_init(GetByteContext *g, const uint8_t *buf, int buf_size)
static av_always_inline int bytestream2_seek(GetByteContext *g, int offset, int whence)
static av_always_inline int bytestream2_tell(GetByteContext *g)
#define flags(name, subs,...)
#define is(width, name, range_min, range_max, subs,...)
#define se(name, range_min, range_max)
#define ss(width, name, subs,...)
static void copy_mb(CinepakEncContext *s, uint8_t *a_data[4], int a_linesize[4], uint8_t *b_data[4], int b_linesize[4])
#define FFSWAP(type, a, b)
#define MKTAG(a, b, c, d)
#define AV_CEIL_RSHIFT(a, b)
#define CONFIG_JPEGLS_DECODER
static void copy_block2(uint8_t *dst, const uint8_t *src, ptrdiff_t dstStride, ptrdiff_t srcStride, int h)
static void copy_block4(uint8_t *dst, const uint8_t *src, ptrdiff_t dstStride, ptrdiff_t srcStride, int h)
static CopyRet receive_frame(AVCodecContext *avctx, AVFrame *frame, int *got_frame)
static int aligned(int val)
int ff_get_buffer(AVCodecContext *avctx, AVFrame *frame, int flags)
Get a buffer for a frame.
int ff_decode_get_packet(AVCodecContext *avctx, AVPacket *pkt)
Called by decoders to get the next packet for decoding.
int ff_get_format(AVCodecContext *avctx, const enum AVPixelFormat *fmt)
Select the (possibly hardware accelerated) pixel format.
int ff_exif_decode_ifd(void *logctx, GetByteContext *gbytes, int le, int depth, AVDictionary **metadata)
static void predictor(uint8_t *src, ptrdiff_t size)
const OptionDef options[]
#define GET_VLC(code, name, gb, table, bits, max_depth)
If the vlc code is invalid and max_depth=1, then no bits will be removed.
static unsigned int show_bits_long(GetBitContext *s, int n)
Show 0-32 bits.
static unsigned int get_bits_long(GetBitContext *s, int n)
Read 0-32 bits.
#define GET_CACHE(name, gb)
static av_always_inline int get_vlc2(GetBitContext *s, VLC_TYPE(*table)[2], int bits, int max_depth)
Parse a vlc code.
#define CLOSE_READER(name, gb)
static int get_bits_left(GetBitContext *gb)
static void skip_bits_long(GetBitContext *s, int n)
Skips the specified number of bits.
#define SHOW_UBITS(name, gb, num)
static unsigned int get_bits1(GetBitContext *s)
#define OPEN_READER(name, gb)
static void skip_bits(GetBitContext *s, int n)
#define UPDATE_CACHE(name, gb)
static int init_get_bits8(GetBitContext *s, const uint8_t *buffer, int byte_size)
Initialize GetBitContext.
#define LAST_SKIP_BITS(name, gb, num)
static int get_bits_count(const GetBitContext *s)
static unsigned int get_bits(GetBitContext *s, int n)
Read 1-25 bits.
static const uint8_t * align_get_bits(GetBitContext *s)
static unsigned int show_bits(GetBitContext *s, int n)
Show 1-25 bits.
static int init_get_bits(GetBitContext *s, const uint8_t *buffer, int bit_size)
Initialize GetBitContext.
static int get_xbits(GetBitContext *s, int n)
Read MPEG-1 dc-style VLC (sign bit + mantissa with no MSB).
#define AV_CODEC_CAP_DR1
Codec uses get_buffer() or get_encode_buffer() for allocating buffers and supports custom allocators.
#define AV_GET_BUFFER_FLAG_REF
The decoder will keep a reference to the frame and may reuse it later.
#define AV_INPUT_BUFFER_PADDING_SIZE
Required number of additionally allocated bytes at the end of the input bitstream for decoding.
@ AVDISCARD_ALL
discard all
void av_fast_padded_malloc(void *ptr, unsigned int *size, size_t min_size)
Same behaviour av_fast_malloc but the buffer has additional AV_INPUT_BUFFER_PADDING_SIZE at the end w...
void av_packet_free(AVPacket **pkt)
Free the packet, if the packet is reference counted, it will be unreferenced first.
void av_packet_unref(AVPacket *pkt)
Wipe the packet.
AVPacket * av_packet_alloc(void)
Allocate an AVPacket and set its fields to default values.
void av_dict_free(AVDictionary **pm)
Free all the memory allocated for an AVDictionary struct and all keys and values.
int av_dict_copy(AVDictionary **dst, const AVDictionary *src, int flags)
Copy entries from one AVDictionary struct into another.
#define AVERROR_PATCHWELCOME
Not yet implemented in FFmpeg, patches welcome.
#define AVERROR_BUG
Internal bug, also see AVERROR_BUG2.
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
#define av_err2str(errnum)
Convenience macro, the return value should be used only directly in function arguments but never stan...
void av_frame_unref(AVFrame *frame)
Unreference all the buffers referenced by frame and reset the frame fields.
int av_frame_ref(AVFrame *dst, const AVFrame *src)
Set up a new reference to the data described by the source frame.
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
AVFrameSideData * av_frame_new_side_data(AVFrame *frame, enum AVFrameSideDataType type, buffer_size_t size)
Add a new side data to a frame.
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values.
@ AV_FRAME_DATA_ICC_PROFILE
The data contains an ICC profile as an opaque octet buffer following the format described by ISO 1507...
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
#define AV_LOG_WARNING
Something somehow does not look correct.
#define AV_LOG_FATAL
Something went wrong and recovery is not possible.
#define AV_LOG_INFO
Standard information.
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
const char * av_default_item_name(void *ptr)
Return the context name.
void av_fast_malloc(void *ptr, unsigned int *size, size_t min_size)
Allocate a buffer, reusing the given one if large enough.
void * av_mallocz(size_t size)
Allocate a memory block with alignment suitable for all memory accesses (including vectors if availab...
void * av_mallocz_array(size_t nmemb, size_t size)
Allocate a memory block for an array with av_mallocz().
#define av_fourcc2str(fourcc)
int av_image_check_size(unsigned int w, unsigned int h, int log_offset, void *log_ctx)
Check if the given dimension of an image is valid, meaning that all bytes of the image can be address...
@ AV_PICTURE_TYPE_I
Intra.
#define LIBAVUTIL_VERSION_INT
AVStereo3D * av_stereo3d_alloc(void)
Allocate an AVStereo3D structure and set its fields to default values.
#define AV_STEREO3D_FLAG_INVERT
Inverted views, Right/Bottom represents the left view.
AVStereo3D * av_stereo3d_create_side_data(AVFrame *frame)
Allocate a complete AVFrameSideData and add it to the frame.
@ AV_STEREO3D_LINES
Views are packed per line, as if interlaced.
@ AV_STEREO3D_2D
Video is not stereoscopic (and metadata has to be there).
@ AV_STEREO3D_TOPBOTTOM
Views are on top of each other.
@ AV_STEREO3D_SIDEBYSIDE
Views are next to each other.
av_cold void ff_hpeldsp_init(HpelDSPContext *c, int flags)
#define HWACCEL_NVDEC(codec)
#define HWACCEL_VAAPI(codec)
av_cold void ff_idctdsp_init(IDCTDSPContext *c, AVCodecContext *avctx)
av_cold void ff_init_scantable(uint8_t *permutation, ScanTable *st, const uint8_t *src_scantable)
static void put_bits(Jpeg2000EncoderContext *s, int val, int n)
put n times val bit
int ff_jpegls_decode_lse(MJpegDecodeContext *s)
Decode LSE block with initialization parameters.
int ff_jpegls_decode_picture(MJpegDecodeContext *s, int near, int point_transform, int ilv)
const uint8_t avpriv_mjpeg_val_dc[12]
const uint8_t avpriv_mjpeg_bits_ac_chrominance[17]
const uint8_t avpriv_mjpeg_val_ac_chrominance[]
const uint8_t avpriv_mjpeg_bits_dc_luminance[17]
const uint8_t avpriv_mjpeg_val_ac_luminance[]
const uint8_t avpriv_mjpeg_bits_dc_chrominance[17]
const uint8_t avpriv_mjpeg_bits_ac_luminance[17]
av_cold void ff_blockdsp_init(BlockDSPContext *c, AVCodecContext *avctx)
#define FF_CODEC_CAP_SETS_PKT_DTS
Decoders marked with FF_CODEC_CAP_SETS_PKT_DTS want to set AVFrame.pkt_dts manually.
#define FF_CODEC_CAP_EXPORTS_CROPPING
The decoder sets the cropping fields in the output frames manually.
#define FF_CODEC_CAP_SKIP_FRAME_FILL_PARAM
The decoder extracts and fills its parameters even if the frame is skipped due to the skip_frame sett...
#define FF_CODEC_CAP_INIT_THREADSAFE
The codec does not modify any global variables in the init function, allowing to call the init functi...
#define FF_CODEC_CAP_INIT_CLEANUP
The codec allows calling the close function for deallocation even if the init function returned a fai...
int ff_set_dimensions(AVCodecContext *s, int width, int height)
Check that the provided frame dimensions are valid and set them on the codec context.
common internal API header
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification.
#define PTRDIFF_SPECIFIER
void avpriv_report_missing_feature(void *avc, const char *msg,...) av_printf_format(2
Log a generic warning message about a missing feature.
#define FF_ALLOCZ_TYPED_ARRAY(p, nelem)
static const AVProfile profiles[]
static enum AVPixelFormat pix_fmts[]
static const uint16_t mask[17]
const uint8_t ff_zigzag_direct[64]
MJPEG encoder and decoder.
#define PREDICT(ret, topleft, top, left, predictor)
@ LSE
JPEG-LS extension parameters.
static int mjpeg_get_packet(AVCodecContext *avctx)
static void init_idct(AVCodecContext *avctx)
int ff_mjpeg_find_marker(MJpegDecodeContext *s, const uint8_t **buf_ptr, const uint8_t *buf_end, const uint8_t **unescaped_buf_ptr, int *unescaped_buf_size)
static void reset_icc_profile(MJpegDecodeContext *s)
av_cold int ff_mjpeg_decode_init(AVCodecContext *avctx)
static void parse_avid(MJpegDecodeContext *s, uint8_t *buf, int len)
#define copy_data_segment(skip)
static int mjpeg_decode_com(MJpegDecodeContext *s)
static int ljpeg_decode_rgb_scan(MJpegDecodeContext *s, int nb_components, int predictor, int point_transform)
static int ljpeg_decode_yuv_scan(MJpegDecodeContext *s, int predictor, int point_transform, int nb_components)
int ff_mjpeg_decode_dqt(MJpegDecodeContext *s)
static av_always_inline void mjpeg_copy_block(MJpegDecodeContext *s, uint8_t *dst, const uint8_t *src, int linesize, int lowres)
av_cold int ff_mjpeg_decode_end(AVCodecContext *avctx)
static int mjpeg_decode_scan(MJpegDecodeContext *s, int nb_components, int Ah, int Al, const uint8_t *mb_bitmask, int mb_bitmask_size, const AVFrame *reference)
static void shift_output(MJpegDecodeContext *s, uint8_t *ptr, int linesize)
int ff_mjpeg_receive_frame(AVCodecContext *avctx, AVFrame *frame)
int ff_mjpeg_decode_sos(MJpegDecodeContext *s, const uint8_t *mb_bitmask, int mb_bitmask_size, const AVFrame *reference)
static int mjpeg_decode_dc(MJpegDecodeContext *s, int dc_index)
static int mjpeg_decode_scan_progressive_ac(MJpegDecodeContext *s, int ss, int se, int Ah, int Al)
static void decode_flush(AVCodecContext *avctx)
static int find_marker(const uint8_t **pbuf_ptr, const uint8_t *buf_end)
static int handle_rstn(MJpegDecodeContext *s, int nb_components)
static int mjpeg_decode_dri(MJpegDecodeContext *s)
static int smv_process_frame(AVCodecContext *avctx, AVFrame *frame)
static int decode_block_refinement(MJpegDecodeContext *s, int16_t *block, uint8_t *last_nnz, int ac_index, uint16_t *quant_matrix, int ss, int se, int Al, int *EOBRUN)
int ff_mjpeg_decode_sof(MJpegDecodeContext *s)
static int mjpeg_decode_app(MJpegDecodeContext *s)
static void mjpeg_idct_scan_progressive_ac(MJpegDecodeContext *s)
static int init_default_huffman_tables(MJpegDecodeContext *s)
static int decode_block(MJpegDecodeContext *s, int16_t *block, int component, int dc_index, int ac_index, uint16_t *quant_matrix)
static int decode_dc_progressive(MJpegDecodeContext *s, int16_t *block, int component, int dc_index, uint16_t *quant_matrix, int Al)
int ff_mjpeg_decode_dht(MJpegDecodeContext *s)
static int decode_block_progressive(MJpegDecodeContext *s, int16_t *block, uint8_t *last_nnz, int ac_index, uint16_t *quant_matrix, int ss, int se, int Al, int *EOBRUN)
int ff_sp5x_process_packet(AVCodecContext *avctx, AVPacket *avpkt)
int ff_mjpeg_build_vlc(VLC *vlc, const uint8_t *bits_table, const uint8_t *val_table, int is_ac, void *logctx)
int av_pix_fmt_count_planes(enum AVPixelFormat pix_fmt)
int av_pix_fmt_get_chroma_sub_sample(enum AVPixelFormat pix_fmt, int *h_shift, int *v_shift)
Utility function to access log2_chroma_w log2_chroma_h from the pixel format AVPixFmtDescriptor.
const AVPixFmtDescriptor * av_pix_fmt_desc_get(enum AVPixelFormat pix_fmt)
#define AV_PIX_FMT_YUV420P16
@ AVCHROMA_LOC_CENTER
MPEG-1 4:2:0, JPEG 4:2:0, H.263 4:2:0.
@ AVCOL_RANGE_MPEG
Narrow or limited range content.
@ AVCOL_RANGE_JPEG
Full range content.
#define AV_PIX_FMT_YUVA420P16
#define AV_PIX_FMT_RGBA64
AVPixelFormat
Pixel format.
@ AV_PIX_FMT_RGB24
packed RGB 8:8:8, 24bpp, RGBRGB...
@ AV_PIX_FMT_YUV420P
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
@ AV_PIX_FMT_YUV440P
planar YUV 4:4:0 (1 Cr & Cb sample per 1x2 Y samples)
@ AV_PIX_FMT_YUV422P
planar YUV 4:2:2, 16bpp, (1 Cr & Cb sample per 2x1 Y samples)
@ AV_PIX_FMT_GRAY8
Y , 8bpp.
@ AV_PIX_FMT_ABGR
packed ABGR 8:8:8:8, 32bpp, ABGRABGR...
@ AV_PIX_FMT_YUVA420P
planar YUV 4:2:0, 20bpp, (1 Cr & Cb sample per 2x2 Y & A samples)
@ AV_PIX_FMT_CUDA
HW acceleration through CUDA.
@ AV_PIX_FMT_YUVJ440P
planar YUV 4:4:0 full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV440P and setting color_range
@ AV_PIX_FMT_YUV411P
planar YUV 4:1:1, 12bpp, (1 Cr & Cb sample per 4x1 Y samples)
@ AV_PIX_FMT_YUV444P
planar YUV 4:4:4, 24bpp, (1 Cr & Cb sample per 1x1 Y samples)
@ AV_PIX_FMT_YUVA444P
planar YUV 4:4:4 32bpp, (1 Cr & Cb sample per 1x1 Y & A samples)
@ AV_PIX_FMT_YUVJ411P
planar YUV 4:1:1, 12bpp, (1 Cr & Cb sample per 4x1 Y samples) full scale (JPEG), deprecated in favor ...
@ AV_PIX_FMT_GBRAP
planar GBRA 4:4:4:4 32bpp
@ AV_PIX_FMT_YUVJ422P
planar YUV 4:2:2, 16bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV422P and setting col...
@ AV_PIX_FMT_GRAY16LE
Y , 16bpp, little-endian.
@ AV_PIX_FMT_PAL8
8 bits with AV_PIX_FMT_RGB32 palette
@ AV_PIX_FMT_BGR24
packed RGB 8:8:8, 24bpp, BGRBGR...
@ AV_PIX_FMT_GBRP
planar GBR 4:4:4 24bpp
@ AV_PIX_FMT_YUVJ444P
planar YUV 4:4:4, 24bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV444P and setting col...
@ AV_PIX_FMT_YUVJ420P
planar YUV 4:2:0, 12bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV420P and setting col...
#define AV_PIX_FMT_YUV422P16
#define AV_PIX_FMT_GRAY16
#define AV_PIX_FMT_YUVA444P16
#define AV_PIX_FMT_GBRP16
#define AV_PIX_FMT_YUV444P16
@ AVCOL_SPC_BT470BG
also ITU-R BT601-6 625 / ITU-R BT1358 625 / ITU-R BT1700 625 PAL & SECAM / IEC 61966-2-4 xvYCC601
const AVProfile ff_mjpeg_profiles[]
static void init_put_bits(PutBitContext *s, uint8_t *buffer, int buffer_size)
Initialize the PutBitContext s.
static void flush_put_bits(PutBitContext *s)
Pad the end of the output stream with zeros.
static const SheerTable rgb[2]
#define FF_ARRAY_ELEMS(a)
static const float pred[4]
Describe the class of an AVClass context structure.
const char * class_name
The name of the class; usually it is the same name as the context structure type to which the AVClass...
main external API structure.
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
unsigned int codec_tag
fourcc (LSB first, so "ABCD" -> ('D'<<24) + ('C'<<16) + ('B'<<8) + 'A').
enum AVFieldOrder field_order
Field order.
const struct AVHWAccel * hwaccel
Hardware accelerator in use.
const struct AVCodec * codec
enum AVColorSpace colorspace
YUV colorspace type.
int frame_number
Frame counter, set by libavcodec.
int flags
AV_CODEC_FLAG_*.
uint8_t * extradata
some codecs need / can use extradata like Huffman tables.
enum AVChromaLocation chroma_sample_location
This defines the location of chroma samples.
int coded_width
Bitstream width / height, may be different from width/height e.g.
int lowres
low resolution decoding, 1-> 1/2 size, 2->1/4 size
enum AVDiscard skip_frame
Skip decoding for selected frames.
int err_recognition
Error recognition; may misdetect some more or less valid parts as errors.
const char * name
Name of the codec implementation.
int step
Number of elements between 2 horizontally consecutive pixels.
Structure to hold side data for an AVFrame.
This structure describes decoded (raw) audio or video data.
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
AVBufferRef * buf[AV_NUM_DATA_POINTERS]
AVBuffer references backing the data for this frame.
AVDictionary * metadata
metadata.
int64_t pkt_dts
DTS copied from the AVPacket that triggered returning this frame.
int format
format of the frame, -1 if unknown or unset Values correspond to enum AVPixelFormat for video frames,...
AVComponentDescriptor comp[4]
Parameters that describe how pixels are packed.
Rational number (pair of numerator and denominator).
Stereo 3D type: this structure describes how two videos are packed within a single video surface,...
enum AVStereo3DType type
How views are packed within the video.
int flags
Additional information about the frame packing.
#define avpriv_request_sample(...)
TIFF constants & data structures.
int ff_tdecode_header(GetByteContext *gb, int *le, int *ifd_offset)
Decodes a TIFF header from the input bytestream and sets the endianness in *le and the offset to the ...
static const uint8_t offset[127][2]