52 #define VP3_MV_VLC_BITS 6
53 #define VP4_MV_VLC_BITS 6
54 #define SUPERBLOCK_VLC_BITS 6
56 #define FRAGMENT_PIXELS 8
65 #define SB_NOT_CODED 0
66 #define SB_PARTIALLY_CODED 1
67 #define SB_FULLY_CODED 2
72 #define MAXIMUM_LONG_BIT_RUN 4129
74 #define MODE_INTER_NO_MV 0
76 #define MODE_INTER_PLUS_MV 2
77 #define MODE_INTER_LAST_MV 3
78 #define MODE_INTER_PRIOR_LAST 4
79 #define MODE_USING_GOLDEN 5
80 #define MODE_GOLDEN_MV 6
81 #define MODE_INTER_FOURMV 7
82 #define CODING_MODE_COUNT 8
131 { 0, 0 }, { 1, 0 }, { 1, 1 }, { 0, 1 },
132 { 0, 2 }, { 0, 3 }, { 1, 3 }, { 1, 2 },
133 { 2, 2 }, { 2, 3 }, { 3, 3 }, { 3, 2 },
134 { 3, 1 }, { 2, 1 }, { 2, 0 }, { 3, 0 }
161 #define MIN_DEQUANT_VAL 2
255 #define TOKEN_EOB(eob_run) ((eob_run) << 2)
256 #define TOKEN_ZERO_RUN(coeff, zero_run) (((coeff) * 512) + ((zero_run) << 2) + 1)
257 #define TOKEN_COEFF(coeff) (((coeff) * 4) + 2)
334 if (
s->golden_frame.f)
338 if (
s->current_frame.f)
350 s->theora_tables = 0;
366 for (j = 0; j < 2; j++)
367 for (
i = 0;
i < 7;
i++)
370 for (
i = 0;
i < 2;
i++)
384 int sb_x, sb_y, plane;
387 for (plane = 0; plane < 3; plane++) {
388 int sb_width = plane ?
s->c_superblock_width
389 :
s->y_superblock_width;
390 int sb_height = plane ?
s->c_superblock_height
391 :
s->y_superblock_height;
392 int frag_width =
s->fragment_width[!!plane];
393 int frag_height =
s->fragment_height[!!plane];
395 for (sb_y = 0; sb_y < sb_height; sb_y++)
396 for (sb_x = 0; sb_x < sb_width; sb_x++)
397 for (
i = 0;
i < 16;
i++) {
401 if (x < frag_width && y < frag_height)
402 s->superblock_fragments[j++] =
s->fragment_start[plane] +
405 s->superblock_fragments[j++] = -1;
418 int ac_scale_factor =
s->coded_ac_scale_factor[
s->qps[qpi]];
419 int i, plane, inter, qri, bmi, bmj, qistart;
421 for (inter = 0; inter < 2; inter++) {
422 for (plane = 0; plane < 3; plane++) {
423 int dc_scale_factor =
s->coded_dc_scale_factor[!!plane][
s->qps[qpi]];
425 for (qri = 0; qri <
s->qr_count[inter][plane]; qri++) {
426 sum +=
s->qr_size[inter][plane][qri];
427 if (
s->qps[qpi] <= sum)
430 qistart = sum -
s->qr_size[inter][plane][qri];
431 bmi =
s->qr_base[inter][plane][qri];
432 bmj =
s->qr_base[inter][plane][qri + 1];
433 for (
i = 0;
i < 64;
i++) {
434 int coeff = (2 * (sum -
s->qps[qpi]) *
s->base_matrix[bmi][
i] -
435 2 * (qistart -
s->qps[qpi]) *
s->base_matrix[bmj][
i] +
436 s->qr_size[inter][plane][qri]) /
437 (2 *
s->qr_size[inter][plane][qri]);
439 int qmin = 8 << (inter + !
i);
440 int qscale =
i ? ac_scale_factor : dc_scale_factor;
441 int qbias = (1 + inter) * 3;
442 s->qmat[qpi][inter][plane][
s->idct_permutation[
i]] =
443 (
i == 0 ||
s->version < 2) ?
av_clip((qscale *
coeff) / 100 * 4, qmin, 4096)
444 : (qscale * (
coeff - qbias) / 100 + qbias) * 4;
448 s->qmat[qpi][inter][plane][0] =
s->qmat[0][inter][plane][0];
470 int superblock_starts[3] = {
471 0,
s->u_superblock_start,
s->v_superblock_start
474 int current_superblock = 0;
476 int num_partial_superblocks = 0;
479 int current_fragment;
481 int plane0_num_coded_frags = 0;
490 while (current_superblock < s->superblock_count &&
get_bits_left(gb) > 0) {
496 current_run =
get_vlc2(gb,
s->superblock_run_length_vlc.table,
498 if (current_run == 34)
501 if (current_run >
s->superblock_count - current_superblock) {
503 "Invalid partially coded superblock run length\n");
507 memset(
s->superblock_coding + current_superblock,
bit, current_run);
509 current_superblock += current_run;
511 num_partial_superblocks += current_run;
516 if (num_partial_superblocks < s->superblock_count) {
517 int superblocks_decoded = 0;
519 current_superblock = 0;
523 while (superblocks_decoded < s->superblock_count - num_partial_superblocks &&
530 current_run =
get_vlc2(gb,
s->superblock_run_length_vlc.table,
532 if (current_run == 34)
535 for (j = 0; j < current_run; current_superblock++) {
536 if (current_superblock >=
s->superblock_count) {
538 "Invalid fully coded superblock run length\n");
543 if (
s->superblock_coding[current_superblock] ==
SB_NOT_CODED) {
544 s->superblock_coding[current_superblock] = 2 *
bit;
548 superblocks_decoded += current_run;
554 if (num_partial_superblocks) {
565 s->total_num_coded_frags = 0;
566 memset(
s->macroblock_coding,
MODE_COPY,
s->macroblock_count);
568 s->coded_fragment_list[0] =
s->keyframe ?
s->kf_coded_fragment_list
569 :
s->nkf_coded_fragment_list;
571 for (plane = 0; plane < 3; plane++) {
572 int sb_start = superblock_starts[plane];
573 int sb_end = sb_start + (plane ?
s->c_superblock_count
574 :
s->y_superblock_count);
575 int num_coded_frags = 0;
578 if (
s->num_kf_coded_fragment[plane] == -1) {
579 for (
i = sb_start;
i < sb_end;
i++) {
581 for (j = 0; j < 16; j++) {
583 current_fragment =
s->superblock_fragments[
i * 16 + j];
584 if (current_fragment != -1) {
585 s->coded_fragment_list[plane][num_coded_frags++] =
590 s->num_kf_coded_fragment[plane] = num_coded_frags;
592 num_coded_frags =
s->num_kf_coded_fragment[plane];
599 for (j = 0; j < 16; j++) {
601 current_fragment =
s->superblock_fragments[
i * 16 + j];
602 if (current_fragment != -1) {
603 int coded =
s->superblock_coding[
i];
608 if (current_run-- == 0) {
610 current_run =
get_vlc2(gb,
s->fragment_run_length_vlc.table, 5, 2);
618 s->all_fragments[current_fragment].coding_method =
620 s->coded_fragment_list[plane][num_coded_frags++] =
624 s->all_fragments[current_fragment].coding_method =
632 plane0_num_coded_frags = num_coded_frags;
633 s->total_num_coded_frags += num_coded_frags;
634 for (
i = 0;
i < 64;
i++)
635 s->num_coded_frags[plane][
i] = num_coded_frags;
637 s->coded_fragment_list[plane + 1] =
s->coded_fragment_list[plane] +
643 #define BLOCK_X (2 * mb_x + (k & 1))
644 #define BLOCK_Y (2 * mb_y + (k >> 1))
646 #if CONFIG_VP4_DECODER
658 if (v >
s->yuv_macroblock_count) {
664 skip_bits(gb, 2 + n); \
665 v += (1 << n) + get_bits(gb, n); }
666 #define thresh(n) (0x200 - (0x80 >> n))
667 #define else_if(n) else if (bits < thresh(n)) body(n)
670 }
else if (
bits < thresh(0)) {
689 int v =
get_vlc2(gb,
s->block_pattern_vlc[*next_block_pattern_table].table, 3, 2);
697 int next_block_pattern_table;
698 int bit, current_run, has_partial;
700 memset(
s->macroblock_coding,
MODE_COPY,
s->macroblock_count);
707 for (
i = 0;
i <
s->yuv_macroblock_count;
i += current_run) {
710 current_run = vp4_get_mb_count(
s, gb);
711 if (current_run >
s->yuv_macroblock_count -
i)
713 memset(
s->superblock_coding +
i, 2 *
bit, current_run);
722 current_run = vp4_get_mb_count(
s, gb);
723 for (
i = 0;
i <
s->yuv_macroblock_count;
i++) {
724 if (!
s->superblock_coding[
i]) {
727 current_run = vp4_get_mb_count(
s, gb);
729 s->superblock_coding[
i] =
bit;
737 next_block_pattern_table = 0;
739 for (plane = 0; plane < 3; plane++) {
741 int sb_width = plane ?
s->c_superblock_width :
s->y_superblock_width;
742 int sb_height = plane ?
s->c_superblock_height :
s->y_superblock_height;
743 int mb_width = plane ?
s->c_macroblock_width :
s->macroblock_width;
744 int mb_height = plane ?
s->c_macroblock_height :
s->macroblock_height;
745 int fragment_width =
s->fragment_width[!!plane];
746 int fragment_height =
s->fragment_height[!!plane];
748 for (sb_y = 0; sb_y < sb_height; sb_y++) {
749 for (sb_x = 0; sb_x < sb_width; sb_x++) {
750 for (j = 0; j < 4; j++) {
751 int mb_x = 2 * sb_x + (j >> 1);
752 int mb_y = 2 * sb_y + (j >> 1) ^ (j & 1);
753 int mb_coded, pattern, coded;
755 if (mb_x >= mb_width || mb_y >= mb_height)
758 mb_coded =
s->superblock_coding[
i++];
763 pattern = vp4_get_block_pattern(
s, gb, &next_block_pattern_table);
767 for (k = 0; k < 4; k++) {
771 coded = pattern & (8 >> k);
790 int i, j, k, sb_x, sb_y;
792 int current_macroblock;
793 int current_fragment;
800 for (
i = 0;
i <
s->fragment_count;
i++)
808 for (
i = 0;
i < 8;
i++)
810 for (
i = 0;
i < 8;
i++)
811 custom_mode_alphabet[
get_bits(gb, 3)] =
i;
812 alphabet = custom_mode_alphabet;
818 for (sb_y = 0; sb_y <
s->y_superblock_height; sb_y++) {
819 for (sb_x = 0; sb_x <
s->y_superblock_width; sb_x++) {
823 for (j = 0; j < 4; j++) {
824 int mb_x = 2 * sb_x + (j >> 1);
825 int mb_y = 2 * sb_y + (((j >> 1) + j) & 1);
826 current_macroblock = mb_y *
s->macroblock_width + mb_x;
828 if (mb_x >=
s->macroblock_width ||
829 mb_y >=
s->macroblock_height)
835 for (k = 0; k < 4; k++) {
838 if (
s->all_fragments[current_fragment].coding_method !=
MODE_COPY)
850 coding_mode = alphabet[
get_vlc2(gb,
s->mode_code_vlc.table, 3, 3)];
852 s->macroblock_coding[current_macroblock] = coding_mode;
853 for (k = 0; k < 4; k++) {
859 #define SET_CHROMA_MODES \
860 if (frag[s->fragment_start[1]].coding_method != MODE_COPY) \
861 frag[s->fragment_start[1]].coding_method = coding_mode; \
862 if (frag[s->fragment_start[2]].coding_method != MODE_COPY) \
863 frag[s->fragment_start[2]].coding_method = coding_mode;
865 if (
s->chroma_y_shift) {
866 frag =
s->all_fragments + mb_y *
867 s->fragment_width[1] + mb_x;
869 }
else if (
s->chroma_x_shift) {
870 frag =
s->all_fragments +
871 2 * mb_y *
s->fragment_width[1] + mb_x;
872 for (k = 0; k < 2; k++) {
874 frag +=
s->fragment_width[1];
877 for (k = 0; k < 4; k++) {
878 frag =
s->all_fragments +
895 return last_motion < 0 ? -v : v;
904 int j, k, sb_x, sb_y;
908 int last_motion_x = 0;
909 int last_motion_y = 0;
910 int prior_last_motion_x = 0;
911 int prior_last_motion_y = 0;
912 int last_gold_motion_x = 0;
913 int last_gold_motion_y = 0;
914 int current_macroblock;
915 int current_fragment;
922 coding_mode =
s->version < 2 ?
get_bits1(gb) : 2;
926 for (sb_y = 0; sb_y <
s->y_superblock_height; sb_y++) {
927 for (sb_x = 0; sb_x <
s->y_superblock_width; sb_x++) {
931 for (j = 0; j < 4; j++) {
932 int mb_x = 2 * sb_x + (j >> 1);
933 int mb_y = 2 * sb_y + (((j >> 1) + j) & 1);
934 current_macroblock = mb_y *
s->macroblock_width + mb_x;
936 if (mb_x >=
s->macroblock_width ||
937 mb_y >=
s->macroblock_height ||
938 s->macroblock_coding[current_macroblock] ==
MODE_COPY)
941 switch (
s->macroblock_coding[current_macroblock]) {
943 if (coding_mode == 2) {
944 last_gold_motion_x = motion_x[0] =
vp4_get_mv(
s, gb, 0, last_gold_motion_x);
945 last_gold_motion_y = motion_y[0] =
vp4_get_mv(
s, gb, 1, last_gold_motion_y);
950 if (coding_mode == 0) {
951 motion_x[0] =
get_vlc2(gb,
s->motion_vector_vlc.table,
953 motion_y[0] =
get_vlc2(gb,
s->motion_vector_vlc.table,
955 }
else if (coding_mode == 1) {
965 prior_last_motion_x = last_motion_x;
966 prior_last_motion_y = last_motion_y;
967 last_motion_x = motion_x[0];
968 last_motion_y = motion_y[0];
974 prior_last_motion_x = last_motion_x;
975 prior_last_motion_y = last_motion_y;
979 for (k = 0; k < 4; k++) {
981 if (
s->all_fragments[current_fragment].coding_method !=
MODE_COPY) {
982 if (coding_mode == 0) {
983 motion_x[k] =
get_vlc2(gb,
s->motion_vector_vlc.table,
985 motion_y[k] =
get_vlc2(gb,
s->motion_vector_vlc.table,
987 }
else if (coding_mode == 1) {
991 motion_x[k] =
vp4_get_mv(
s, gb, 0, prior_last_motion_x);
992 motion_y[k] =
vp4_get_mv(
s, gb, 1, prior_last_motion_y);
994 last_motion_x = motion_x[k];
995 last_motion_y = motion_y[k];
1005 motion_x[0] = last_motion_x;
1006 motion_y[0] = last_motion_y;
1015 motion_x[0] = prior_last_motion_x;
1016 motion_y[0] = prior_last_motion_y;
1019 prior_last_motion_x = last_motion_x;
1020 prior_last_motion_y = last_motion_y;
1021 last_motion_x = motion_x[0];
1022 last_motion_y = motion_y[0];
1035 for (k = 0; k < 4; k++) {
1039 s->motion_val[0][current_fragment][0] = motion_x[k];
1040 s->motion_val[0][current_fragment][1] = motion_y[k];
1042 s->motion_val[0][current_fragment][0] = motion_x[0];
1043 s->motion_val[0][current_fragment][1] = motion_y[0];
1047 if (
s->chroma_y_shift) {
1049 motion_x[0] =
RSHIFT(motion_x[0] + motion_x[1] +
1050 motion_x[2] + motion_x[3], 2);
1051 motion_y[0] =
RSHIFT(motion_y[0] + motion_y[1] +
1052 motion_y[2] + motion_y[3], 2);
1054 if (
s->version <= 2) {
1055 motion_x[0] = (motion_x[0] >> 1) | (motion_x[0] & 1);
1056 motion_y[0] = (motion_y[0] >> 1) | (motion_y[0] & 1);
1058 frag = mb_y *
s->fragment_width[1] + mb_x;
1059 s->motion_val[1][frag][0] = motion_x[0];
1060 s->motion_val[1][frag][1] = motion_y[0];
1061 }
else if (
s->chroma_x_shift) {
1063 motion_x[0] =
RSHIFT(motion_x[0] + motion_x[1], 1);
1064 motion_y[0] =
RSHIFT(motion_y[0] + motion_y[1], 1);
1065 motion_x[1] =
RSHIFT(motion_x[2] + motion_x[3], 1);
1066 motion_y[1] =
RSHIFT(motion_y[2] + motion_y[3], 1);
1068 motion_x[1] = motion_x[0];
1069 motion_y[1] = motion_y[0];
1071 if (
s->version <= 2) {
1072 motion_x[0] = (motion_x[0] >> 1) | (motion_x[0] & 1);
1073 motion_x[1] = (motion_x[1] >> 1) | (motion_x[1] & 1);
1075 frag = 2 * mb_y *
s->fragment_width[1] + mb_x;
1076 for (k = 0; k < 2; k++) {
1077 s->motion_val[1][frag][0] = motion_x[k];
1078 s->motion_val[1][frag][1] = motion_y[k];
1079 frag +=
s->fragment_width[1];
1082 for (k = 0; k < 4; k++) {
1085 s->motion_val[1][frag][0] = motion_x[k];
1086 s->motion_val[1][frag][1] = motion_y[k];
1088 s->motion_val[1][frag][0] = motion_x[0];
1089 s->motion_val[1][frag][1] = motion_y[0];
1102 int qpi,
i, j,
bit, run_length, blocks_decoded, num_blocks_at_qpi;
1103 int num_blocks =
s->total_num_coded_frags;
1105 for (qpi = 0; qpi <
s->nqps - 1 && num_blocks > 0; qpi++) {
1106 i = blocks_decoded = num_blocks_at_qpi = 0;
1117 run_length =
get_vlc2(gb,
s->superblock_run_length_vlc.table,
1119 if (run_length == 34)
1121 blocks_decoded += run_length;
1124 num_blocks_at_qpi += run_length;
1126 for (j = 0; j < run_length;
i++) {
1127 if (
i >=
s->total_num_coded_frags)
1130 if (
s->all_fragments[
s->coded_fragment_list[0][
i]].qpi == qpi) {
1131 s->all_fragments[
s->coded_fragment_list[0][
i]].qpi +=
bit;
1135 }
while (blocks_decoded < num_blocks &&
get_bits_left(gb) > 0);
1137 num_blocks -= num_blocks_at_qpi;
1153 int bits_to_get, zero_run;
1157 bits_to_get =
get_bits(gb, bits_to_get);
1190 int num_coeffs =
s->num_coded_frags[plane][coeff_index];
1191 int16_t *dct_tokens =
s->dct_tokens[plane][coeff_index];
1194 int *coded_fragment_list =
s->coded_fragment_list[plane];
1198 if (num_coeffs < 0) {
1200 "Invalid number of coefficients at level %d\n", coeff_index);
1204 if (eob_run > num_coeffs) {
1206 blocks_ended = num_coeffs;
1207 eob_run -= num_coeffs;
1210 blocks_ended = eob_run;
1216 dct_tokens[j++] = blocks_ended << 2;
1220 token =
get_vlc2(gb, vlc_table, 11, 3);
1222 if ((
unsigned) token <= 6U) {
1229 if (eob_run > num_coeffs - coeff_i) {
1230 dct_tokens[j++] =
TOKEN_EOB(num_coeffs - coeff_i);
1231 blocks_ended += num_coeffs - coeff_i;
1232 eob_run -= num_coeffs - coeff_i;
1233 coeff_i = num_coeffs;
1236 blocks_ended += eob_run;
1240 }
else if (token >= 0) {
1251 all_fragments[coded_fragment_list[coeff_i]].
dc =
coeff;
1256 if (coeff_index + zero_run > 64) {
1258 "Invalid zero run of %d with %d coeffs left\n",
1259 zero_run, 64 - coeff_index);
1260 zero_run = 64 - coeff_index;
1265 for (
i = coeff_index + 1;
i <= coeff_index + zero_run;
i++)
1266 s->num_coded_frags[plane][
i]--;
1274 if (blocks_ended >
s->num_coded_frags[plane][coeff_index])
1280 for (
i = coeff_index + 1;
i < 64;
i++)
1281 s->num_coded_frags[plane][
i] -= blocks_ended;
1285 s->dct_tokens[plane + 1][coeff_index] = dct_tokens + j;
1286 else if (coeff_index < 63)
1287 s->dct_tokens[0][coeff_index + 1] = dct_tokens + j;
1295 int fragment_height);
1307 int residual_eob_run = 0;
1311 s->dct_tokens[0][0] =
s->dct_tokens_base;
1321 residual_eob_run =
unpack_vlcs(
s, gb, &
s->coeff_vlc[dc_y_table], 0,
1322 0, residual_eob_run);
1323 if (residual_eob_run < 0)
1324 return residual_eob_run;
1332 residual_eob_run =
unpack_vlcs(
s, gb, &
s->coeff_vlc[dc_c_table], 0,
1333 1, residual_eob_run);
1334 if (residual_eob_run < 0)
1335 return residual_eob_run;
1336 residual_eob_run =
unpack_vlcs(
s, gb, &
s->coeff_vlc[dc_c_table], 0,
1337 2, residual_eob_run);
1338 if (residual_eob_run < 0)
1339 return residual_eob_run;
1344 s->fragment_width[1],
s->fragment_height[1]);
1346 s->fragment_width[1],
s->fragment_height[1]);
1356 for (
i = 1;
i <= 5;
i++) {
1358 y_tables[
i] = &
s->coeff_vlc[ac_y_table + 16];
1359 c_tables[
i] = &
s->coeff_vlc[ac_c_table + 16];
1361 for (
i = 6;
i <= 14;
i++) {
1363 y_tables[
i] = &
s->coeff_vlc[ac_y_table + 32];
1364 c_tables[
i] = &
s->coeff_vlc[ac_c_table + 32];
1366 for (
i = 15;
i <= 27;
i++) {
1368 y_tables[
i] = &
s->coeff_vlc[ac_y_table + 48];
1369 c_tables[
i] = &
s->coeff_vlc[ac_c_table + 48];
1371 for (
i = 28;
i <= 63;
i++) {
1373 y_tables[
i] = &
s->coeff_vlc[ac_y_table + 64];
1374 c_tables[
i] = &
s->coeff_vlc[ac_c_table + 64];
1378 for (
i = 1;
i <= 63;
i++) {
1380 0, residual_eob_run);
1381 if (residual_eob_run < 0)
1382 return residual_eob_run;
1385 1, residual_eob_run);
1386 if (residual_eob_run < 0)
1387 return residual_eob_run;
1389 2, residual_eob_run);
1390 if (residual_eob_run < 0)
1391 return residual_eob_run;
1397 #if CONFIG_VP4_DECODER
1406 int plane,
int eob_tracker[64],
int fragment)
1414 while (!eob_tracker[coeff_i]) {
1421 if ((
unsigned) token <= 6U) {
1423 *
s->dct_tokens[plane][coeff_i]++ =
TOKEN_EOB(0);
1424 eob_tracker[coeff_i] = eob_run - 1;
1426 }
else if (token >= 0) {
1430 if (coeff_i + zero_run > 64) {
1432 "Invalid zero run of %d with %d coeffs left\n",
1433 zero_run, 64 - coeff_i);
1434 zero_run = 64 - coeff_i;
1437 coeff_i += zero_run;
1452 *
s->dct_tokens[plane][coeff_i]++ =
TOKEN_EOB(0);
1453 eob_tracker[coeff_i]--;
1467 for (
i = 0;
i < 4;
i++)
1468 dc_pred[0][
i + 1] =
s->dc_pred_row[sb_x * 4 +
i];
1470 for (j = 1; j < 5; j++)
1471 for (
i = 0;
i < 4;
i++)
1472 vp4_dc_predictor_reset(&dc_pred[j][
i + 1]);
1479 for (
i = 0;
i < 4;
i++)
1480 s->dc_pred_row[sb_x * 4 +
i] = dc_pred[4][
i + 1];
1483 dc_pred[
i][0] = dc_pred[
i][4];
1493 dc += dc_pred[-6].
dc;
1498 dc += dc_pred[6].
dc;
1502 if (count != 2 && dc_pred[-1].
type ==
type) {
1503 dc += dc_pred[-1].
dc;
1507 if (count != 2 && dc_pred[1].
type ==
type) {
1508 dc += dc_pred[1].
dc;
1513 return count == 2 ?
dc / 2 : last_dc[
type];
1519 int16_t *
base =
s->dct_tokens_base;
1520 for (plane = 0; plane < 3; plane++) {
1521 for (
i = 0;
i < 64;
i++) {
1522 s->dct_tokens[plane][
i] =
base;
1523 base +=
s->fragment_width[!!plane] *
s->fragment_height[!!plane];
1536 int plane, sb_y, sb_x;
1537 int eob_tracker[64];
1554 tables[0][0] = &
s->coeff_vlc[dc_y_table];
1555 tables[1][0] = &
s->coeff_vlc[dc_c_table];
1556 for (
i = 1;
i <= 5;
i++) {
1558 tables[0][
i] = &
s->coeff_vlc[ac_y_table + 16];
1559 tables[1][
i] = &
s->coeff_vlc[ac_c_table + 16];
1561 for (
i = 6;
i <= 14;
i++) {
1563 tables[0][
i] = &
s->coeff_vlc[ac_y_table + 32];
1564 tables[1][
i] = &
s->coeff_vlc[ac_c_table + 32];
1566 for (
i = 15;
i <= 27;
i++) {
1568 tables[0][
i] = &
s->coeff_vlc[ac_y_table + 48];
1569 tables[1][
i] = &
s->coeff_vlc[ac_c_table + 48];
1571 for (
i = 28;
i <= 63;
i++) {
1573 tables[0][
i] = &
s->coeff_vlc[ac_y_table + 64];
1574 tables[1][
i] = &
s->coeff_vlc[ac_c_table + 64];
1577 vp4_set_tokens_base(
s);
1579 memset(last_dc, 0,
sizeof(last_dc));
1582 memset(eob_tracker, 0,
sizeof(eob_tracker));
1585 for (
i = 0;
i <
s->fragment_width[!!plane];
i++)
1586 vp4_dc_predictor_reset(&
s->dc_pred_row[
i]);
1588 for (j = 0; j < 6; j++)
1589 for (
i = 0;
i < 6;
i++)
1590 vp4_dc_predictor_reset(&dc_pred[j][
i]);
1592 for (sb_y = 0; sb_y * 4 <
s->fragment_height[!!plane]; sb_y++) {
1593 for (sb_x = 0; sb_x *4 <
s->fragment_width[!!plane]; sb_x++) {
1594 vp4_dc_pred_before(
s, dc_pred, sb_x);
1595 for (j = 0; j < 16; j++) {
1598 int x = 4 * sb_x + hx;
1599 int y = 4 * sb_y + hy;
1603 if (x >=
s->fragment_width[!!plane] || y >=
s->fragment_height[!!plane])
1606 fragment =
s->fragment_start[plane] + y *
s->fragment_width[!!plane] + x;
1611 if (vp4_unpack_vlcs(
s, gb,
tables[!!plane], plane, eob_tracker,
fragment) < 0)
1617 vp4_dc_pred(
s, this_dc_pred, last_dc, dc_block_type, plane);
1619 this_dc_pred->
type = dc_block_type,
1620 this_dc_pred->
dc = last_dc[dc_block_type] =
s->all_fragments[
fragment].dc;
1622 vp4_dc_pred_after(
s, dc_pred, sb_x);
1627 vp4_set_tokens_base(
s);
1638 #define COMPATIBLE_FRAME(x) \
1639 (compatible_frame[s->all_fragments[x].coding_method] == current_frame_type)
1640 #define DC_COEFF(u) s->all_fragments[u].dc
1645 int fragment_height)
1653 int i = first_fragment;
1658 int vl, vul, vu, vur;
1670 static const int predictor_transform[16][4] = {
1684 { -104, 116, 0, 116 },
1686 { -104, 116, 0, 116 }
1695 static const unsigned char compatible_frame[9] = {
1706 int current_frame_type;
1722 for (y = 0; y < fragment_height; y++) {
1724 for (x = 0; x < fragment_width; x++,
i++) {
1727 if (
s->all_fragments[
i].coding_method !=
MODE_COPY) {
1728 current_frame_type =
1729 compatible_frame[
s->all_fragments[
i].coding_method];
1739 u =
i - fragment_width;
1744 ul =
i - fragment_width - 1;
1749 if (x + 1 < fragment_width) {
1750 ur =
i - fragment_width + 1;
1760 predicted_dc = last_dc[current_frame_type];
1764 (predictor_transform[
transform][0] * vul) +
1765 (predictor_transform[
transform][1] * vu) +
1766 (predictor_transform[
transform][2] * vur) +
1767 (predictor_transform[
transform][3] * vl);
1769 predicted_dc /= 128;
1774 if (
FFABS(predicted_dc - vu) > 128)
1776 else if (
FFABS(predicted_dc - vl) > 128)
1778 else if (
FFABS(predicted_dc - vul) > 128)
1786 last_dc[current_frame_type] =
DC_COEFF(
i);
1793 int ystart,
int yend)
1796 int *bounding_values =
s->bounding_values_array + 127;
1798 int width =
s->fragment_width[!!plane];
1799 int height =
s->fragment_height[!!plane];
1801 ptrdiff_t
stride =
s->current_frame.f->linesize[plane];
1802 uint8_t *plane_data =
s->current_frame.f->data[plane];
1803 if (!
s->flipped_image)
1805 plane_data +=
s->data_offset[plane] + 8 * ystart *
stride;
1807 for (y = ystart; y < yend; y++) {
1808 for (x = 0; x <
width; x++) {
1816 s->vp3dsp.h_loop_filter(
1818 stride, bounding_values);
1823 s->vp3dsp.v_loop_filter(
1825 stride, bounding_values);
1831 if ((x <
width - 1) &&
1833 s->vp3dsp.h_loop_filter(
1834 plane_data + 8 * x + 8,
1835 stride, bounding_values);
1843 s->vp3dsp.v_loop_filter(
1844 plane_data + 8 * x + 8 *
stride,
1845 stride, bounding_values);
1851 plane_data += 8 *
stride;
1860 int plane,
int inter, int16_t
block[64])
1862 int16_t *dequantizer =
s->qmat[frag->
qpi][inter][plane];
1867 int token = *
s->dct_tokens[plane][
i];
1868 switch (token & 3) {
1871 s->dct_tokens[plane][
i]++;
1873 *
s->dct_tokens[plane][
i] = token & ~3;
1876 s->dct_tokens[plane][
i]++;
1877 i += (token >> 2) & 0x7f;
1887 s->dct_tokens[plane][
i++]++;
1897 block[0] = frag->
dc *
s->qmat[0][inter][plane][0];
1910 int y_flipped =
s->flipped_image ?
s->height - y : y;
1916 y_flipped ==
s->height ? INT_MAX
1921 if (!
s->avctx->draw_horiz_band)
1924 h = y -
s->last_slice_end;
1925 s->last_slice_end = y;
1928 if (!
s->flipped_image)
1929 y =
s->height - y -
h;
1931 cy = y >>
s->chroma_y_shift;
1932 offset[0] =
s->current_frame.f->linesize[0] * y;
1933 offset[1] =
s->current_frame.f->linesize[1] * cy;
1934 offset[2] =
s->current_frame.f->linesize[2] * cy;
1939 s->avctx->draw_horiz_band(
s->avctx,
s->current_frame.f,
offset, y, 3,
h);
1947 int motion_y,
int y)
1951 int border = motion_y & 1;
1955 ref_frame = &
s->golden_frame;
1957 ref_frame = &
s->last_frame;
1959 ref_row = y + (motion_y >> 1);
1960 ref_row =
FFMAX(
FFABS(ref_row), ref_row + 8 + border);
1965 #if CONFIG_VP4_DECODER
1969 static int vp4_mc_loop_filter(
Vp3DecodeContext *
s,
int plane,
int motion_x,
int motion_y,
int bx,
int by,
1972 int motion_shift = plane ? 4 : 2;
1973 int subpel_mask = plane ? 3 : 1;
1974 int *bounding_values =
s->bounding_values_array + 127;
1979 int x_subpel, y_subpel;
1980 int x_offset, y_offset;
1982 int block_width = plane ? 8 : 16;
1983 int plane_width =
s->width >> (plane &&
s->chroma_x_shift);
1984 int plane_height =
s->height >> (plane &&
s->chroma_y_shift);
1986 #define loop_stride 12
1990 x = 8 * bx + motion_x / motion_shift;
1991 y = 8 * by + motion_y / motion_shift;
1993 x_subpel = motion_x & subpel_mask;
1994 y_subpel = motion_y & subpel_mask;
1996 if (x_subpel || y_subpel) {
2006 x2 = x + block_width;
2007 y2 = y + block_width;
2009 if (x2 < 0 || x2 >= plane_width || y2 < 0 || y2 >= plane_height)
2012 x_offset = (-(x + 2) & 7) + 2;
2013 y_offset = (-(y + 2) & 7) + 2;
2015 av_assert1(!(x_offset > 8 + x_subpel && y_offset > 8 + y_subpel));
2017 s->vdsp.emulated_edge_mc(
loop, motion_source -
stride - 1,
2019 12, 12, src_x - 1, src_y - 1,
2023 if (x_offset <= 8 + x_subpel)
2026 if (y_offset <= 8 + y_subpel)
2034 if (!x_offset && !y_offset)
2037 s->vdsp.emulated_edge_mc(
loop, motion_source -
stride - 1,
2039 12, 12, src_x - 1, src_y - 1,
2043 #define safe_loop_filter(name, ptr, stride, bounding_values) \
2044 if ((uintptr_t)(ptr) & 7) \
2045 s->vp3dsp.name##_unaligned(ptr, stride, bounding_values); \
2047 s->vp3dsp.name(ptr, stride, bounding_values);
2050 safe_loop_filter(h_loop_filter,
loop + loop_stride + x_offset + 1, loop_stride, bounding_values);
2053 safe_loop_filter(v_loop_filter,
loop + (y_offset + 1)*loop_stride + 1, loop_stride, bounding_values);
2056 for (
i = 0;
i < 9;
i++)
2070 int16_t *
block =
s->block;
2071 int motion_x = 0xdeadbeef, motion_y = 0xdeadbeef;
2072 int motion_halfpel_index;
2074 int plane, first_pixel;
2076 if (slice >=
s->c_superblock_height)
2079 for (plane = 0; plane < 3; plane++) {
2081 s->data_offset[plane];
2082 uint8_t *last_plane =
s->last_frame.f->data[plane] +
2083 s->data_offset[plane];
2084 uint8_t *golden_plane =
s->golden_frame.f->data[plane] +
2085 s->data_offset[plane];
2086 ptrdiff_t
stride =
s->current_frame.f->linesize[plane];
2087 int plane_width =
s->width >> (plane &&
s->chroma_x_shift);
2088 int plane_height =
s->height >> (plane &&
s->chroma_y_shift);
2089 int8_t(*motion_val)[2] =
s->motion_val[!!plane];
2091 int sb_x, sb_y = slice << (!plane &&
s->chroma_y_shift);
2092 int slice_height = sb_y + 1 + (!plane &&
s->chroma_y_shift);
2093 int slice_width = plane ?
s->c_superblock_width
2094 :
s->y_superblock_width;
2096 int fragment_width =
s->fragment_width[!!plane];
2097 int fragment_height =
s->fragment_height[!!plane];
2098 int fragment_start =
s->fragment_start[plane];
2103 if (!
s->flipped_image)
2109 for (; sb_y < slice_height; sb_y++) {
2111 for (sb_x = 0; sb_x < slice_width; sb_x++) {
2113 for (j = 0; j < 16; j++) {
2121 if (x >= fragment_width || y >= fragment_height)
2124 first_pixel = 8 * y *
stride + 8 * x;
2130 (16 * y) >>
s->chroma_y_shift);
2133 if (
s->all_fragments[
i].coding_method !=
MODE_COPY) {
2136 motion_source = golden_plane;
2138 motion_source = last_plane;
2140 motion_source += first_pixel;
2141 motion_halfpel_index = 0;
2145 if ((
s->all_fragments[
i].coding_method >
MODE_INTRA) &&
2148 int standard_mc = 1;
2149 motion_x = motion_val[
fragment][0];
2150 motion_y = motion_val[
fragment][1];
2151 #if CONFIG_VP4_DECODER
2152 if (plane &&
s->version >= 2) {
2153 motion_x = (motion_x >> 1) | (motion_x & 1);
2154 motion_y = (motion_y >> 1) | (motion_y & 1);
2158 src_x = (motion_x >> 1) + 8 * x;
2159 src_y = (motion_y >> 1) + 8 * y;
2161 motion_halfpel_index = motion_x & 0x01;
2162 motion_source += (motion_x >> 1);
2164 motion_halfpel_index |= (motion_y & 0x01) << 1;
2165 motion_source += ((motion_y >> 1) *
stride);
2167 #if CONFIG_VP4_DECODER
2168 if (
s->version >= 2) {
2172 if (vp4_mc_loop_filter(
s, plane, motion_val[
fragment][0], motion_val[
fragment][1], x, y, motion_source,
stride, src_x, src_y,
temp)) {
2173 motion_source =
temp;
2179 if (standard_mc && (
2180 src_x < 0 || src_y < 0 ||
2181 src_x + 9 >= plane_width ||
2182 src_y + 9 >= plane_height)) {
2187 s->vdsp.emulated_edge_mc(
temp, motion_source,
2192 motion_source =
temp;
2203 if (motion_halfpel_index != 3) {
2204 s->hdsp.put_no_rnd_pixels_tab[1][motion_halfpel_index](
2206 motion_source,
stride, 8);
2210 int d = (motion_x ^ motion_y) >> 31;
2213 motion_source +
stride + 1 + d,
2239 s->hdsp.put_pixels_tab[1][0](
2241 last_plane + first_pixel,
2248 if (
s->version < 2 && !
s->skip_loop_filter)
2250 FFMIN(4 * sb_y + 3, fragment_height - 1));
2270 int y_fragment_count, c_fragment_count;
2274 y_fragment_count =
s->fragment_width[0] *
s->fragment_height[0];
2275 c_fragment_count =
s->fragment_width[1] *
s->fragment_height[1];
2278 s->superblock_coding =
av_mallocz(
FFMAX(
s->superblock_count,
s->yuv_macroblock_count));
2283 memset(
s-> num_kf_coded_fragment, -1,
sizeof(
s-> num_kf_coded_fragment));
2286 64 *
sizeof(*
s->dct_tokens_base));
2292 s->macroblock_coding =
av_mallocz(
s->macroblock_count + 1);
2294 s->dc_pred_row =
av_malloc_array(
s->y_superblock_width * 4,
sizeof(*
s->dc_pred_row));
2296 if (!
s->superblock_coding || !
s->all_fragments ||
2297 !
s->dct_tokens_base || !
s->kf_coded_fragment_list ||
2298 !
s->nkf_coded_fragment_list ||
2299 !
s->superblock_fragments || !
s->macroblock_coding ||
2301 !
s->motion_val[0] || !
s->motion_val[1]) {
2316 if (!
s->current_frame.f || !
s->last_frame.f || !
s->golden_frame.f)
2325 int i, inter, plane, ret;
2328 int y_fragment_count, c_fragment_count;
2329 #if CONFIG_VP4_DECODER
2356 for (
i = 0;
i < 64;
i++) {
2357 #define TRANSPOSE(x) (((x) >> 3) | (((x) & 7) << 3))
2365 for (
i = 0;
i < 3;
i++)
2372 s->y_superblock_width = (
s->width + 31) / 32;
2373 s->y_superblock_height = (
s->height + 31) / 32;
2374 s->y_superblock_count =
s->y_superblock_width *
s->y_superblock_height;
2377 c_width =
s->width >>
s->chroma_x_shift;
2378 c_height =
s->height >>
s->chroma_y_shift;
2379 s->c_superblock_width = (c_width + 31) / 32;
2380 s->c_superblock_height = (c_height + 31) / 32;
2381 s->c_superblock_count =
s->c_superblock_width *
s->c_superblock_height;
2383 s->superblock_count =
s->y_superblock_count + (
s->c_superblock_count * 2);
2384 s->u_superblock_start =
s->y_superblock_count;
2385 s->v_superblock_start =
s->u_superblock_start +
s->c_superblock_count;
2387 s->macroblock_width = (
s->width + 15) / 16;
2388 s->macroblock_height = (
s->height + 15) / 16;
2389 s->macroblock_count =
s->macroblock_width *
s->macroblock_height;
2390 s->c_macroblock_width = (c_width + 15) / 16;
2391 s->c_macroblock_height = (c_height + 15) / 16;
2392 s->c_macroblock_count =
s->c_macroblock_width *
s->c_macroblock_height;
2393 s->yuv_macroblock_count =
s->macroblock_count + 2 *
s->c_macroblock_count;
2397 s->fragment_width[1] =
s->fragment_width[0] >>
s->chroma_x_shift;
2398 s->fragment_height[1] =
s->fragment_height[0] >>
s->chroma_y_shift;
2401 y_fragment_count =
s->fragment_width[0] *
s->fragment_height[0];
2402 c_fragment_count =
s->fragment_width[1] *
s->fragment_height[1];
2403 s->fragment_count = y_fragment_count + 2 * c_fragment_count;
2404 s->fragment_start[1] = y_fragment_count;
2405 s->fragment_start[2] = y_fragment_count + c_fragment_count;
2407 if (!
s->theora_tables) {
2408 for (
i = 0;
i < 64;
i++) {
2418 for (inter = 0; inter < 2; inter++) {
2419 for (plane = 0; plane < 3; plane++) {
2420 s->qr_count[inter][plane] = 1;
2421 s->qr_size[inter][plane][0] = 63;
2422 s->qr_base[inter][plane][0] =
2423 s->qr_base[inter][plane][1] = 2 * inter + (!!plane) * !inter;
2428 if (
s->version < 2) {
2437 #if CONFIG_VP4_DECODER
2454 &
tab->entries[0].len,
sizeof(*
tab->entries),
2455 &
tab->entries[0].sym,
sizeof(*
tab->entries), 1,
2464 NULL, 0, 0, 1, 0, avctx);
2470 NULL, 0, 0, 0, 0, avctx);
2476 NULL, 0, 0, 0, 0, avctx);
2487 #if CONFIG_VP4_DECODER
2488 for (j = 0; j < 2; j++)
2489 for (
i = 0;
i < 7;
i++) {
2499 for (
i = 0;
i < 2;
i++)
2500 if ((ret =
init_vlc(&
s->block_pattern_vlc[
i], 3, 14,
2535 if (
src->f->data[0])
2543 if ((ret = ref_frame(dst, &dst->
current_frame, &
src->current_frame)) < 0 ||
2545 (ret = ref_frame(dst, &dst->
last_frame, &
src->last_frame)) < 0)
2553 int qps_changed = 0,
i, err;
2555 if (!
s1->current_frame.f->data[0] ||
2556 s->width !=
s1->width ||
s->height !=
s1->height) {
2564 if ((err = ref_frames(
s,
s1)) < 0)
2567 s->keyframe =
s1->keyframe;
2570 for (
i = 0;
i < 3;
i++) {
2571 if (
s->qps[
i] !=
s1->qps[1]) {
2573 memcpy(&
s->qmat[
i], &
s1->qmat[
i],
sizeof(
s->qmat[
i]));
2577 if (
s->qps[0] !=
s1->qps[0])
2578 memcpy(&
s->bounding_values_array, &
s1->bounding_values_array,
2579 sizeof(
s->bounding_values_array));
2582 memcpy(
s->qps,
s1->qps,
sizeof(
s->qps));
2583 memcpy(
s->last_qps,
s1->last_qps,
sizeof(
s->last_qps));
2593 void *
data,
int *got_frame,
2598 int buf_size = avpkt->
size;
2606 #if CONFIG_THEORA_DECODER
2612 av_log(avctx,
AV_LOG_ERROR,
"midstream reconfiguration with multithreading is unsupported, try -threads 1\n");
2626 }
else if (
type == 2) {
2639 "Header packet passed to frame decoder, skipping\n");
2645 if (!
s->all_fragments) {
2651 for (
i = 0;
i < 3;
i++)
2652 s->last_qps[
i] =
s->qps[
i];
2657 }
while (
s->theora >= 0x030200 &&
s->nqps < 3 &&
get_bits1(&gb));
2658 for (
i =
s->nqps;
i < 3;
i++)
2663 s->keyframe ?
"key" :
"", avctx->frame_number + 1,
s->qps[0]);
2665 s->skip_loop_filter = !
s->filter_limit_values[
s->qps[0]] ||
2669 if (
s->qps[0] !=
s->last_qps[0])
2672 for (
i = 0;
i <
s->nqps;
i++)
2675 if (
s->qps[
i] !=
s->last_qps[
i] ||
s->qps[0] !=
s->last_qps[0])
2683 s->current_frame.f->key_frame =
s->keyframe;
2687 if (!
s->edge_emu_buffer) {
2688 s->edge_emu_buffer =
av_malloc(9 *
FFABS(
s->current_frame.f->linesize[0]));
2689 if (!
s->edge_emu_buffer) {
2701 #if !CONFIG_VP4_DECODER
2708 if (avctx->frame_number == 0)
2710 "VP version: %d\n",
s->version);
2713 if (
s->version ||
s->theora) {
2716 "Warning, unsupported keyframe coding type?!\n");
2719 #if CONFIG_VP4_DECODER
2720 if (
s->version >= 2) {
2721 int mb_height, mb_width;
2722 int mb_width_mul, mb_width_div, mb_height_mul, mb_height_div;
2726 if (mb_height !=
s->macroblock_height ||
2727 mb_width !=
s->macroblock_width)
2734 if (mb_width_mul != 1 || mb_width_div != 1 || mb_height_mul != 1 || mb_height_div != 1)
2743 if (!
s->golden_frame.f->data[0]) {
2745 "vp3: first frame not a keyframe\n");
2753 &
s->golden_frame)) < 0)
2759 memset(
s->all_fragments, 0,
s->fragment_count *
sizeof(
Vp3Fragment));
2762 if (
s->version < 2) {
2767 #if CONFIG_VP4_DECODER
2769 if ((ret = vp4_unpack_macroblocks(
s, &gb)) < 0) {
2788 if (
s->version < 2) {
2793 #if CONFIG_VP4_DECODER
2795 if ((ret = vp4_unpack_dct_coeffs(
s, &gb)) < 0) {
2802 for (
i = 0;
i < 3;
i++) {
2803 int height =
s->height >> (
i &&
s->chroma_y_shift);
2804 if (
s->flipped_image)
2805 s->data_offset[
i] = 0;
2807 s->data_offset[
i] = (
height - 1) *
s->current_frame.f->linesize[
i];
2810 s->last_slice_end = 0;
2811 for (
i = 0;
i <
s->c_superblock_height;
i++)
2816 for (
i = 0;
i < 3;
i++) {
2817 int row = (
s->height >> (3 + (
i &&
s->chroma_y_shift))) - 1;
2860 ff_dlog(avctx,
"code length %d, curr entry %d, token %d\n",
2878 #if CONFIG_THEORA_DECODER
2886 int visible_width, visible_height, colorspace;
2887 uint8_t offset_x = 0, offset_y = 0;
2894 s->theora_header = 0;
2904 if (
s->theora < 0x030200) {
2905 s->flipped_image = 1;
2907 "Old (<alpha3) Theora bitstream, flipped image\n");
2915 if (
s->theora >= 0x030200) {
2925 visible_width + offset_x >
s->width ||
2926 visible_height + offset_y >
s->height ||
2930 "Invalid frame dimensions - w:%d h:%d x:%d y:%d (%dx%d).\n",
2931 visible_width, visible_height, offset_x, offset_y,
2932 s->width,
s->height);
2938 if (fps.
num && fps.
den) {
2939 if (fps.
num < 0 || fps.
den < 0) {
2944 fps.
den, fps.
num, 1 << 30);
2949 if (aspect.
num && aspect.
den) {
2952 aspect.
num, aspect.
den, 1 << 30);
2956 if (
s->theora < 0x030200)
2963 if (
s->theora >= 0x030200) {
2980 avctx->
width = visible_width;
2981 avctx->
height = visible_height;
2984 s->offset_x = offset_x;
2985 s->offset_y =
s->height - visible_height - offset_y;
2988 if (colorspace == 1)
2990 else if (colorspace == 2)
2993 if (colorspace == 1 || colorspace == 2) {
2998 s->theora_header = 1;
3005 int i, n, matrices, inter, plane, ret;
3007 if (!
s->theora_header)
3010 if (
s->theora >= 0x030200) {
3014 for (
i = 0;
i < 64;
i++)
3018 if (
s->theora >= 0x030200)
3023 for (
i = 0;
i < 64;
i++)
3024 s->coded_ac_scale_factor[
i] =
get_bits(gb, n);
3026 if (
s->theora >= 0x030200)
3031 for (
i = 0;
i < 64;
i++)
3032 s->coded_dc_scale_factor[0][
i] =
3033 s->coded_dc_scale_factor[1][
i] =
get_bits(gb, n);
3035 if (
s->theora >= 0x030200)
3040 if (matrices > 384) {
3045 for (n = 0; n < matrices; n++)
3046 for (
i = 0;
i < 64;
i++)
3049 for (inter = 0; inter <= 1; inter++) {
3050 for (plane = 0; plane <= 2; plane++) {
3052 if (inter || plane > 0)
3060 qtj = (3 * inter + plane - 1) / 3;
3061 plj = (plane + 2) % 3;
3063 s->qr_count[inter][plane] =
s->qr_count[qtj][plj];
3064 memcpy(
s->qr_size[inter][plane],
s->qr_size[qtj][plj],
3065 sizeof(
s->qr_size[0][0]));
3066 memcpy(
s->qr_base[inter][plane],
s->qr_base[qtj][plj],
3067 sizeof(
s->qr_base[0][0]));
3074 if (
i >= matrices) {
3076 "invalid base matrix index\n");
3079 s->qr_base[inter][plane][qri] =
i;
3083 s->qr_size[inter][plane][qri++] =
i;
3091 s->qr_count[inter][plane] = qri;
3098 s->huffman_table[
i].nb_entries = 0;
3103 s->theora_tables = 1;
3113 const uint8_t *header_start[3];
3128 42, header_start, header_len) < 0) {
3133 for (
i = 0;
i < 3;
i++) {
3134 if (header_len[
i] <= 0)
3142 if (!(ptype & 0x80)) {
3165 "Unknown Theora config packet: %d\n", ptype & ~0x80);
3170 "%d bits left in packet %X\n",
3172 if (
s->theora < 0x030200)
3185 .
init = theora_decode_init,
3213 #if CONFIG_VP4_DECODER
static void flush(AVCodecContext *avctx)
AVCodec ff_theora_decoder
uint8_t pi<< 24) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_U8, uint8_t,(*(const uint8_t *) pi - 0x80) *(1.0f/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_U8, uint8_t,(*(const uint8_t *) pi - 0x80) *(1.0/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S16, int16_t,(*(const int16_t *) pi >> 8)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S16, int16_t, *(const int16_t *) pi *(1.0f/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S16, int16_t, *(const int16_t *) pi *(1.0/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S32, int32_t,(*(const int32_t *) pi >> 24)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S32, int32_t, *(const int32_t *) pi *(1.0f/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S32, int32_t, *(const int32_t *) pi *(1.0/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_FLT, float, av_clip_uint8(lrintf(*(const float *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_FLT, float, av_clip_int16(lrintf(*(const float *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_FLT, float, av_clipl_int32(llrintf(*(const float *) pi *(1U<< 31)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_DBL, double, av_clip_uint8(lrint(*(const double *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_DBL, double, av_clip_int16(lrint(*(const double *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_DBL, double, av_clipl_int32(llrint(*(const double *) pi *(1U<< 31)))) #define SET_CONV_FUNC_GROUP(ofmt, ifmt) static void set_generic_function(AudioConvert *ac) { } void ff_audio_convert_free(AudioConvert **ac) { if(! *ac) return;ff_dither_free(&(*ac) ->dc);av_freep(ac);} AudioConvert *ff_audio_convert_alloc(AVAudioResampleContext *avr, enum AVSampleFormat out_fmt, enum AVSampleFormat in_fmt, int channels, int sample_rate, int apply_map) { AudioConvert *ac;int in_planar, out_planar;ac=av_mallocz(sizeof(*ac));if(!ac) return NULL;ac->avr=avr;ac->out_fmt=out_fmt;ac->in_fmt=in_fmt;ac->channels=channels;ac->apply_map=apply_map;if(avr->dither_method !=AV_RESAMPLE_DITHER_NONE &&av_get_packed_sample_fmt(out_fmt)==AV_SAMPLE_FMT_S16 &&av_get_bytes_per_sample(in_fmt) > 2) { ac->dc=ff_dither_alloc(avr, out_fmt, in_fmt, channels, sample_rate, apply_map);if(!ac->dc) { av_free(ac);return NULL;} return ac;} in_planar=ff_sample_fmt_is_planar(in_fmt, channels);out_planar=ff_sample_fmt_is_planar(out_fmt, channels);if(in_planar==out_planar) { ac->func_type=CONV_FUNC_TYPE_FLAT;ac->planes=in_planar ? ac->channels :1;} else if(in_planar) ac->func_type=CONV_FUNC_TYPE_INTERLEAVE;else ac->func_type=CONV_FUNC_TYPE_DEINTERLEAVE;set_generic_function(ac);if(ARCH_AARCH64) ff_audio_convert_init_aarch64(ac);if(ARCH_ARM) ff_audio_convert_init_arm(ac);if(ARCH_X86) ff_audio_convert_init_x86(ac);return ac;} int ff_audio_convert(AudioConvert *ac, AudioData *out, AudioData *in) { int use_generic=1;int len=in->nb_samples;int p;if(ac->dc) { av_log(ac->avr, AV_LOG_TRACE, "%d samples - audio_convert: %s to %s (dithered)\n", len, av_get_sample_fmt_name(ac->in_fmt), av_get_sample_fmt_name(ac->out_fmt));return ff_convert_dither(ac-> dc
#define av_assert1(cond)
assert() equivalent, that does not lie in speed critical code.
Libavcodec external API header.
#define FF_THREAD_FRAME
Decode more than one frame at once.
#define FF_DEBUG_PICT_INFO
static av_cold int init(AVCodecContext *avctx)
void ff_free_vlc(VLC *vlc)
int ff_init_vlc_from_lengths(VLC *vlc_arg, int nb_bits, int nb_codes, const int8_t *lens, int lens_wrap, const void *symbols, int symbols_wrap, int symbols_size, int offset, int flags, void *logctx)
Build VLC decoding tables suitable for use with get_vlc2()
#define u(width, name, range_min, range_max)
#define bit(string, value)
#define MKTAG(a, b, c, d)
#define FFABS(a)
Absolute value, Note, INT_MIN / INT64_MIN result in undefined behavior as they are not representable ...
static void decode(AVCodecContext *dec_ctx, AVPacket *pkt, AVFrame *frame, FILE *outfile)
#define AV_NUM_DATA_POINTERS
bitstream reader API header.
static unsigned int get_bits_long(GetBitContext *s, int n)
Read 0-32 bits.
static av_always_inline int get_vlc2(GetBitContext *s, VLC_TYPE(*table)[2], int bits, int max_depth)
Parse a vlc code.
static int get_bits_left(GetBitContext *gb)
static void skip_bits_long(GetBitContext *s, int n)
Skips the specified number of bits.
static unsigned int get_bits1(GetBitContext *s)
static void skip_bits(GetBitContext *s, int n)
static int init_get_bits8(GetBitContext *s, const uint8_t *buffer, int byte_size)
Initialize GetBitContext.
static int get_bits_count(const GetBitContext *s)
static unsigned int get_bits(GetBitContext *s, int n)
Read 1-25 bits.
static unsigned int show_bits(GetBitContext *s, int n)
Show 1-25 bits.
#define AV_CODEC_CAP_DRAW_HORIZ_BAND
Decoder can use draw_horiz_band callback.
#define AV_CODEC_FLAG_BITEXACT
Use only bitexact stuff (except (I)DCT).
#define AV_CODEC_CAP_DR1
Codec uses get_buffer() or get_encode_buffer() for allocating buffers and supports custom allocators.
#define AV_GET_BUFFER_FLAG_REF
The decoder will keep a reference to the frame and may reuse it later.
#define AV_CODEC_FLAG_GRAY
Only decode/encode grayscale.
#define AV_CODEC_FLAG2_IGNORE_CROP
Discard cropping information from SPS.
#define AV_CODEC_CAP_FRAME_THREADS
Codec supports frame-level multithreading.
@ AVDISCARD_ALL
discard all
@ AVDISCARD_NONKEY
discard all frames except keyframes
#define AVERROR_DECODER_NOT_FOUND
Decoder not found.
#define AVERROR_PATCHWELCOME
Not yet implemented in FFmpeg, patches welcome.
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
void av_frame_unref(AVFrame *frame)
Unreference all the buffers referenced by frame and reset the frame fields.
int av_frame_ref(AVFrame *dst, const AVFrame *src)
Set up a new reference to the data described by the source frame.
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values.
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
#define AV_LOG_WARNING
Something somehow does not look correct.
#define AV_LOG_INFO
Standard information.
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
int av_reduce(int *dst_num, int *dst_den, int64_t num, int64_t den, int64_t max)
Reduce a fraction.
void * av_mallocz(size_t size)
Allocate a memory block with alignment suitable for all memory accesses (including vectors if availab...
void * av_mallocz_array(size_t nmemb, size_t size)
Allocate a memory block for an array with av_mallocz().
#define DECLARE_ALIGNED(n, t, v)
Declare a variable that is aligned in memory.
int av_image_check_size(unsigned int w, unsigned int h, int log_offset, void *log_ctx)
Check if the given dimension of an image is valid, meaning that all bytes of the image can be address...
@ AV_PICTURE_TYPE_I
Intra.
@ AV_PICTURE_TYPE_P
Predicted.
static const int8_t transform[32][32]
av_cold void ff_hpeldsp_init(HpelDSPContext *c, int flags)
static VLC_TYPE vlc_tables[VLC_TABLES_SIZE][2]
static void output_plane(const Plane *plane, int buf_sel, uint8_t *dst, ptrdiff_t dst_pitch, int dst_height)
Convert and output the current plane.
#define FF_CODEC_CAP_ALLOCATE_PROGRESS
#define FF_CODEC_CAP_EXPORTS_CROPPING
The decoder sets the cropping fields in the output frames manually.
int ff_set_sar(AVCodecContext *avctx, AVRational sar)
Check that the provided sample aspect ratio is valid and set it on the codec context.
#define FF_CODEC_CAP_INIT_CLEANUP
The codec allows calling the close function for deallocation even if the init function returned a fai...
int ff_set_dimensions(AVCodecContext *s, int width, int height)
Check that the provided frame dimensions are valid and set them on the codec context.
int ff_thread_ref_frame(ThreadFrame *dst, const ThreadFrame *src)
av_cold void ff_videodsp_init(VideoDSPContext *ctx, int bpc)
common internal API header
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification.
#define ONLY_IF_THREADS_ENABLED(x)
Define a function with only the non-default version specified.
const uint8_t ff_zigzag_direct[64]
static void body(uint32_t ABCD[4], const uint8_t *src, int nblocks)
int av_pix_fmt_get_chroma_sub_sample(enum AVPixelFormat pix_fmt, int *h_shift, int *v_shift)
Utility function to access log2_chroma_w log2_chroma_h from the pixel format AVPixFmtDescriptor.
@ AVCHROMA_LOC_CENTER
MPEG-1 4:2:0, JPEG 4:2:0, H.263 4:2:0.
AVPixelFormat
Pixel format.
@ AV_PIX_FMT_YUV420P
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
@ AV_PIX_FMT_YUV422P
planar YUV 4:2:2, 16bpp, (1 Cr & Cb sample per 2x1 Y samples)
@ AV_PIX_FMT_YUV444P
planar YUV 4:4:4, 24bpp, (1 Cr & Cb sample per 1x1 Y samples)
@ AVCOL_PRI_BT470BG
also ITU-R BT601-6 625 / ITU-R BT1358 625 / ITU-R BT1700 625 PAL & SECAM
@ AVCOL_PRI_BT470M
also FCC Title 47 Code of Federal Regulations 73.682 (a)(20)
@ AVCOL_TRC_BT709
also ITU-R BT1361
@ AVCOL_SPC_BT470BG
also ITU-R BT601-6 625 / ITU-R BT1358 625 / ITU-R BT1700 625 PAL & SECAM / IEC 61966-2-4 xvYCC601
static const uint16_t table[]
void ff_thread_report_progress(ThreadFrame *f, int n, int field)
Notify later decoding threads when part of their reference picture is ready.
FF_ENABLE_DEPRECATION_WARNINGS int ff_thread_get_buffer(AVCodecContext *avctx, ThreadFrame *f, int flags)
Wrapper around get_buffer() for frame-multithreaded codecs.
void ff_thread_await_progress(ThreadFrame *f, int n, int field)
Wait for earlier decoding threads to finish reference pictures.
void ff_thread_finish_setup(AVCodecContext *avctx)
If the codec defines update_thread_context(), call this when they are ready for the next thread to st...
void ff_thread_release_buffer(AVCodecContext *avctx, ThreadFrame *f)
Wrapper around release_buffer() frame-for multithreaded codecs.
#define FF_ARRAY_ELEMS(a)
main external API structure.
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
int width
picture width / height.
int flags2
AV_CODEC_FLAG2_*.
unsigned int codec_tag
fourcc (LSB first, so "ABCD" -> ('D'<<24) + ('C'<<16) + ('B'<<8) + 'A').
enum AVColorPrimaries color_primaries
Chromaticity coordinates of the source primaries.
AVRational sample_aspect_ratio
sample aspect ratio (0 if unknown) That is the width of a pixel divided by the height of the pixel.
enum AVColorSpace colorspace
YUV colorspace type.
enum AVColorTransferCharacteristic color_trc
Color Transfer Characteristic.
int flags
AV_CODEC_FLAG_*.
uint8_t * extradata
some codecs need / can use extradata like Huffman tables.
enum AVChromaLocation chroma_sample_location
This defines the location of chroma samples.
int coded_width
Bitstream width / height, may be different from width/height e.g.
const char * name
Name of the codec implementation.
This structure describes decoded (raw) audio or video data.
This structure stores compressed data.
Rational number (pair of numerator and denominator).
Used to store optimal huffman encoding results.
uint16_t coded_dc_scale_factor[2][64]
int num_coded_frags[3][64]
number of blocks that contain DCT coefficients at the given level or higher
int bounding_values_array[256+2]
int8_t(*[2] motion_val)[2]
uint8_t base_matrix[384][64]
int * kf_coded_fragment_list
VLC fragment_run_length_vlc
VLC superblock_run_length_vlc
VP4Predictor * dc_pred_row
uint32_t coded_ac_scale_factor[64]
int16_t * dct_tokens_base
ThreadFrame current_frame
uint8_t idct_permutation[64]
unsigned char * macroblock_coding
uint8_t * edge_emu_buffer
int num_kf_coded_fragment[3]
int * coded_fragment_list[3]
int total_num_coded_frags
HuffTable huffman_table[5 *16]
uint8_t filter_limit_values[64]
int16_t * dct_tokens[3][64]
This is a list of all tokens in bitstream order.
Vp3Fragment * all_fragments
unsigned char * superblock_coding
int * nkf_coded_fragment_list
uint8_t qr_size[2][3][64]
int16_t qmat[3][2][3][64]
qmat[qpi][is_inter][plane]
uint8_t idct_scantable[64]
uint16_t qr_base[2][3][64]
int * superblock_fragments
#define av_malloc_array(a, b)
#define avpriv_request_sample(...)
static void error(const char *err)
static const uint8_t *const tables[]
static const struct twinvq_data tab
static const double coeff[2][5]
static const uint8_t offset[127][2]
Core video DSP helper functions.
#define init_vlc(vlc, nb_bits, nb_codes, bits, bits_wrap, bits_size, codes, codes_wrap, codes_size, flags)
static int theora_decode_tables(AVCodecContext *avctx, GetBitContext *gb)
static int unpack_block_qpis(Vp3DecodeContext *s, GetBitContext *gb)
#define CODING_MODE_COUNT
static av_cold int vp3_decode_init(AVCodecContext *avctx)
#define COMPATIBLE_FRAME(x)
static void init_loop_filter(Vp3DecodeContext *s)
#define MODE_INTER_PRIOR_LAST
static int get_eob_run(GetBitContext *gb, int token)
static void reverse_dc_prediction(Vp3DecodeContext *s, int first_fragment, int fragment_width, int fragment_height)
#define TOKEN_EOB(eob_run)
static void init_dequantizer(Vp3DecodeContext *s, int qpi)
static int unpack_vectors(Vp3DecodeContext *s, GetBitContext *gb)
static av_cold int allocate_tables(AVCodecContext *avctx)
Allocate tables for per-frame data in Vp3DecodeContext.
static int vp4_get_mv(Vp3DecodeContext *s, GetBitContext *gb, int axis, int last_motion)
static av_cold void free_tables(AVCodecContext *avctx)
static void vp3_decode_flush(AVCodecContext *avctx)
#define MODE_INTER_LAST_MV
#define MAXIMUM_LONG_BIT_RUN
static void render_slice(Vp3DecodeContext *s, int slice)
#define MODE_INTER_FOURMV
#define SB_PARTIALLY_CODED
static int read_huffman_tree(HuffTable *huff, GetBitContext *gb, int length, AVCodecContext *avctx)
static const uint8_t vp4_pred_block_type_map[8]
#define SUPERBLOCK_VLC_BITS
static int unpack_modes(Vp3DecodeContext *s, GetBitContext *gb)
static av_cold int vp3_decode_end(AVCodecContext *avctx)
static int theora_decode_header(AVCodecContext *avctx, GetBitContext *gb)
static int unpack_dct_coeffs(Vp3DecodeContext *s, GetBitContext *gb)
#define MODE_USING_GOLDEN
static int init_block_mapping(Vp3DecodeContext *s)
This function sets up all of the various blocks mappings: superblocks <-> fragments,...
#define MODE_INTER_PLUS_MV
static const int ModeAlphabet[6][CODING_MODE_COUNT]
static int vp3_decode_frame(AVCodecContext *avctx, void *data, int *got_frame, AVPacket *avpkt)
static int get_coeff(GetBitContext *gb, int token, int16_t *coeff)
static void apply_loop_filter(Vp3DecodeContext *s, int plane, int ystart, int yend)
#define TOKEN_COEFF(coeff)
static int update_frames(AVCodecContext *avctx)
Release and shuffle frames after decode finishes.
static const uint8_t hilbert_offset[16][2]
static int vp3_dequant(Vp3DecodeContext *s, Vp3Fragment *frag, int plane, int inter, int16_t block[64])
Pull DCT tokens from the 64 levels to decode and dequant the coefficients for the next block in codin...
static int unpack_vlcs(Vp3DecodeContext *s, GetBitContext *gb, VLC *table, int coeff_index, int plane, int eob_run)
#define TOKEN_ZERO_RUN(coeff, zero_run)
static void await_reference_row(Vp3DecodeContext *s, Vp3Fragment *fragment, int motion_y, int y)
Wait for the reference frame of the current fragment.
static void vp3_draw_horiz_band(Vp3DecodeContext *s, int y)
called when all pixels up to row y are complete
static int unpack_superblocks(Vp3DecodeContext *s, GetBitContext *gb)
static av_cold int init_frames(Vp3DecodeContext *s)
static const uint8_t zero_run_get_bits[32]
static const uint8_t vp31_filter_limit_values[64]
static const uint16_t vp31_ac_scale_factor[64]
static const uint8_t mode_code_vlc_len[8]
static const uint8_t zero_run_base[32]
static const int8_t fixed_motion_vector_table[64]
static const int16_t *const coeff_tables[32]
static const uint8_t vp31_dc_scale_factor[64]
static const uint8_t vp3_bias[5 *16][32][2]
static const uint8_t fragment_run_length_vlc_len[30]
static const uint8_t vp31_intra_y_dequant[64]
static const uint8_t vp31_inter_dequant[64]
static const uint8_t vp31_intra_c_dequant[64]
static const struct @167 eob_run_table[7]
static const uint8_t coeff_get_bits[32]
static const uint8_t motion_vector_vlc_table[63][2]
static const uint8_t superblock_run_length_vlc_lens[34]
void ff_vp3dsp_set_bounding_values(int *bounding_values_array, int filter_limit)
av_cold void ff_vp3dsp_init(VP3DSPContext *c, int flags)
void ff_vp3dsp_h_loop_filter_12(uint8_t *first_pixel, ptrdiff_t stride, int *bounding_values)
void ff_vp3dsp_v_loop_filter_12(uint8_t *first_pixel, ptrdiff_t stride, int *bounding_values)
static const uint8_t vp4_mv_vlc[2][7][63][2]
static const uint8_t vp4_filter_limit_values[64]
static const uint8_t vp4_uv_dc_scale_factor[64]
static const uint8_t vp4_block_pattern_table_selector[14]
static const uint8_t vp4_mv_table_selector[32]
static const uint8_t vp4_block_pattern_vlc[2][14][2]
static const uint16_t vp4_ac_scale_factor[64]
static const uint8_t vp4_bias[5 *16][32][2]
static const uint8_t vp4_generic_dequant[64]
static const uint8_t vp4_y_dc_scale_factor[64]
int avpriv_split_xiph_headers(const uint8_t *extradata, int extradata_size, int first_header_size, const uint8_t *header_start[3], int header_len[3])
Split a single extradata buffer into the three headers that most Xiph codecs use.