95 int counts[17] = {0}, codes[17];
106 codes[0] = counts[0] = 0;
107 for (
int i = 0;
i < 16;
i++) {
108 codes[
i+1] = (codes[
i] + counts[
i]) << 1;
132 for(j = 0; j < 2; j++){
139 for(k = 0; k < 4; k++){
144 for(j = 0; j < 4; j++){
155 for(j = 0; j < 4; j++){
159 for(j = 0; j < 2; j++){
184 int pattern,
code, cbp=0;
186 static const int cbp_masks[3] = {0x100000, 0x010000, 0x110000};
187 static const int shifts[4] = { 0, 2, 8, 10 };
188 const int *curshift =
shifts;
192 pattern =
code & 0xF;
202 for(
i = 0;
i < 4;
i++){
207 cbp |= cbp_masks[2] <<
i;
222 coef = 22 + ((1 << coef) |
get_bits(gb, coef));
228 *dst = (coef*q + 8) >> 4;
260 int q_dc,
int q_ac1,
int q_ac2)
283 int code, pattern, has_ac = 1;
287 pattern =
code & 0x7;
312 return has_ac | pattern;
327 for(
i = 0;
i < 5;
i++)
352 int mb_pos =
s->mb_x +
s->mb_y *
s->mb_stride;
360 fill_rectangle(intra_types, 4, 4,
r->intra_types_stride, t,
sizeof(intra_types[0]));
369 if(
r->decode_intra_types(
r, gb, intra_types) < 0)
387 int mb_pos =
s->mb_x +
s->mb_y *
s->mb_stride;
390 r->block_type =
r->decode_mb_info(
r);
391 if(
r->block_type == -1)
394 r->mb_type[mb_pos] =
r->block_type;
405 fill_rectangle(intra_types, 4, 4,
r->intra_types_stride, 0,
sizeof(intra_types[0]));
411 if(
IS_INTRA(
s->current_picture_ptr->mb_type[mb_pos])){
414 fill_rectangle(intra_types, 4, 4,
r->intra_types_stride, t,
sizeof(intra_types[0]));
417 if(
r->decode_intra_types(
r, gb, intra_types) < 0)
424 for(
i = 0;
i < 16;
i++)
425 intra_types[(
i & 3) + (
i>>2) *
r->intra_types_stride] = 0;
446 static const uint8_t part_sizes_w[
RV34_MB_TYPES] = { 2, 2, 2, 1, 2, 2, 2, 2, 2, 1, 2, 2 };
449 static const uint8_t part_sizes_h[
RV34_MB_TYPES] = { 2, 2, 2, 1, 2, 2, 2, 2, 1, 2, 2, 2 };
464 int mv_pos =
s->mb_x * 2 +
s->mb_y * 2 *
s->b8_stride;
465 int A[2] = {0},
B[2],
C[2];
471 mv_pos += (subblock_no & 1) + (subblock_no >> 1)*
s->b8_stride;
476 A[0] =
s->current_picture_ptr->motion_val[0][mv_pos-1][0];
477 A[1] =
s->current_picture_ptr->motion_val[0][mv_pos-1][1];
480 B[0] =
s->current_picture_ptr->motion_val[0][mv_pos-
s->b8_stride][0];
481 B[1] =
s->current_picture_ptr->motion_val[0][mv_pos-
s->b8_stride][1];
487 if(avail[-4] && (avail[-1] ||
r->rv30)){
488 C[0] =
s->current_picture_ptr->motion_val[0][mv_pos-
s->b8_stride-1][0];
489 C[1] =
s->current_picture_ptr->motion_val[0][mv_pos-
s->b8_stride-1][1];
495 C[0] =
s->current_picture_ptr->motion_val[0][mv_pos-
s->b8_stride+c_off][0];
496 C[1] =
s->current_picture_ptr->motion_val[0][mv_pos-
s->b8_stride+c_off][1];
500 mx +=
r->dmv[dmv_no][0];
501 my +=
r->dmv[dmv_no][1];
504 s->current_picture_ptr->motion_val[0][mv_pos +
i + j*
s->b8_stride][0] = mx;
505 s->current_picture_ptr->motion_val[0][mv_pos +
i + j*
s->b8_stride][1] = my;
510 #define GET_PTS_DIFF(a, b) (((a) - (b) + 8192) & 0x1FFF)
517 int mul = dir ? -
r->mv_weight2 :
r->mv_weight1;
526 int A_avail,
int B_avail,
int C_avail,
529 if(A_avail + B_avail + C_avail != 3){
530 *mx =
A[0] +
B[0] +
C[0];
531 *my =
A[1] +
B[1] +
C[1];
532 if(A_avail + B_avail + C_avail == 2){
548 int mb_pos =
s->mb_x +
s->mb_y *
s->mb_stride;
549 int mv_pos =
s->mb_x * 2 +
s->mb_y * 2 *
s->b8_stride;
550 int A[2] = { 0 },
B[2] = { 0 },
C[2] = { 0 };
551 int has_A = 0, has_B = 0, has_C = 0;
554 Picture *cur_pic =
s->current_picture_ptr;
564 B[0] = cur_pic->
motion_val[dir][mv_pos -
s->b8_stride][0];
565 B[1] = cur_pic->
motion_val[dir][mv_pos -
s->b8_stride][1];
568 if(
r->avail_cache[6-4] && (
r->avail_cache[6-2] &
type) &
mask){
569 C[0] = cur_pic->
motion_val[dir][mv_pos -
s->b8_stride + 2][0];
570 C[1] = cur_pic->
motion_val[dir][mv_pos -
s->b8_stride + 2][1];
572 }
else if((
s->mb_x+1) ==
s->mb_width && (
r->avail_cache[6-5] &
type) &
mask){
573 C[0] = cur_pic->
motion_val[dir][mv_pos -
s->b8_stride - 1][0];
574 C[1] = cur_pic->
motion_val[dir][mv_pos -
s->b8_stride - 1][1];
580 mx +=
r->dmv[dir][0];
581 my +=
r->dmv[dir][1];
583 for(j = 0; j < 2; j++){
584 for(
i = 0;
i < 2;
i++){
585 cur_pic->
motion_val[dir][mv_pos +
i + j*
s->b8_stride][0] = mx;
586 cur_pic->
motion_val[dir][mv_pos +
i + j*
s->b8_stride][1] = my;
600 int mv_pos =
s->mb_x * 2 +
s->mb_y * 2 *
s->b8_stride;
601 int A[2] = {0},
B[2],
C[2];
607 A[0] =
s->current_picture_ptr->motion_val[0][mv_pos - 1][0];
608 A[1] =
s->current_picture_ptr->motion_val[0][mv_pos - 1][1];
611 B[0] =
s->current_picture_ptr->motion_val[0][mv_pos -
s->b8_stride][0];
612 B[1] =
s->current_picture_ptr->motion_val[0][mv_pos -
s->b8_stride][1];
618 if(avail[-4] && (avail[-1])){
619 C[0] =
s->current_picture_ptr->motion_val[0][mv_pos -
s->b8_stride - 1][0];
620 C[1] =
s->current_picture_ptr->motion_val[0][mv_pos -
s->b8_stride - 1][1];
626 C[0] =
s->current_picture_ptr->motion_val[0][mv_pos -
s->b8_stride + 2][0];
627 C[1] =
s->current_picture_ptr->motion_val[0][mv_pos -
s->b8_stride + 2][1];
633 for(j = 0; j < 2; j++){
634 for(
i = 0;
i < 2;
i++){
635 for(k = 0; k < 2; k++){
636 s->current_picture_ptr->motion_val[k][mv_pos +
i + j*
s->b8_stride][0] = mx;
637 s->current_picture_ptr->motion_val[k][mv_pos +
i + j*
s->b8_stride][1] = my;
661 const int xoff,
const int yoff,
int mv_off,
663 const int thirdpel,
int weighted,
669 int dxy, mx, my, umx, umy, lx, ly, uvmx, uvmy, src_x, src_y, uvsrc_x, uvsrc_y;
670 int mv_pos =
s->mb_x * 2 +
s->mb_y * 2 *
s->b8_stride + mv_off;
675 int chroma_mx, chroma_my;
676 mx = (
s->current_picture_ptr->motion_val[dir][mv_pos][0] + (3 << 24)) / 3 - (1 << 24);
677 my = (
s->current_picture_ptr->motion_val[dir][mv_pos][1] + (3 << 24)) / 3 - (1 << 24);
678 lx = (
s->current_picture_ptr->motion_val[dir][mv_pos][0] + (3 << 24)) % 3;
679 ly = (
s->current_picture_ptr->motion_val[dir][mv_pos][1] + (3 << 24)) % 3;
680 chroma_mx =
s->current_picture_ptr->motion_val[dir][mv_pos][0] / 2;
681 chroma_my =
s->current_picture_ptr->motion_val[dir][mv_pos][1] / 2;
682 umx = (chroma_mx + (3 << 24)) / 3 - (1 << 24);
683 umy = (chroma_my + (3 << 24)) / 3 - (1 << 24);
688 mx =
s->current_picture_ptr->motion_val[dir][mv_pos][0] >> 2;
689 my =
s->current_picture_ptr->motion_val[dir][mv_pos][1] >> 2;
690 lx =
s->current_picture_ptr->motion_val[dir][mv_pos][0] & 3;
691 ly =
s->current_picture_ptr->motion_val[dir][mv_pos][1] & 3;
692 cx =
s->current_picture_ptr->motion_val[dir][mv_pos][0] / 2;
693 cy =
s->current_picture_ptr->motion_val[dir][mv_pos][1] / 2;
696 uvmx = (cx & 3) << 1;
697 uvmy = (cy & 3) << 1;
699 if(uvmx == 6 && uvmy == 6)
705 int mb_row =
s->mb_y + ((yoff + my + 5 + 8 *
height) >> 4);
706 ThreadFrame *
f = dir ? &
s->next_picture_ptr->tf : &
s->last_picture_ptr->tf;
711 srcY = dir ?
s->next_picture_ptr->f->data[0] :
s->last_picture_ptr->f->data[0];
712 srcU = dir ?
s->next_picture_ptr->f->data[1] :
s->last_picture_ptr->f->data[1];
713 srcV = dir ?
s->next_picture_ptr->f->data[2] :
s->last_picture_ptr->f->data[2];
714 src_x =
s->mb_x * 16 + xoff + mx;
715 src_y =
s->mb_y * 16 + yoff + my;
716 uvsrc_x =
s->mb_x * 8 + (xoff >> 1) + umx;
717 uvsrc_y =
s->mb_y * 8 + (yoff >> 1) + umy;
718 srcY += src_y *
s->linesize + src_x;
719 srcU += uvsrc_y *
s->uvlinesize + uvsrc_x;
720 srcV += uvsrc_y *
s->uvlinesize + uvsrc_x;
721 if(
s->h_edge_pos - (
width << 3) < 6 ||
s->v_edge_pos - (
height << 3) < 6 ||
722 (
unsigned)(src_x - !!lx*2) >
s->h_edge_pos - !!lx*2 - (
width <<3) - 4 ||
723 (unsigned)(src_y - !!ly*2) >
s->v_edge_pos - !!ly*2 - (
height<<3) - 4) {
724 srcY -= 2 + 2*
s->linesize;
725 s->vdsp.emulated_edge_mc(
s->sc.edge_emu_buffer, srcY,
726 s->linesize,
s->linesize,
728 src_x - 2, src_y - 2,
729 s->h_edge_pos,
s->v_edge_pos);
730 srcY =
s->sc.edge_emu_buffer + 2 + 2*
s->linesize;
734 Y =
s->dest[0] + xoff + yoff *
s->linesize;
735 U =
s->dest[1] + (xoff>>1) + (yoff>>1)*
s->uvlinesize;
736 V =
s->dest[2] + (xoff>>1) + (yoff>>1)*
s->uvlinesize;
738 Y =
r->tmp_b_block_y [dir] + xoff + yoff *
s->linesize;
739 U =
r->tmp_b_block_uv[dir*2] + (xoff>>1) + (yoff>>1)*
s->uvlinesize;
740 V =
r->tmp_b_block_uv[dir*2+1] + (xoff>>1) + (yoff>>1)*
s->uvlinesize;
744 qpel_mc[1][dxy](
Y, srcY,
s->linesize);
748 qpel_mc[1][dxy](
Y, srcY,
s->linesize);
749 Y += 8 *
s->linesize;
750 srcY += 8 *
s->linesize;
753 qpel_mc[!is16x16][dxy](
Y, srcY,
s->linesize);
755 uint8_t *uvbuf =
s->sc.edge_emu_buffer;
757 s->vdsp.emulated_edge_mc(uvbuf, srcU,
758 s->uvlinesize,
s->uvlinesize,
761 s->h_edge_pos >> 1,
s->v_edge_pos >> 1);
763 uvbuf += 9*
s->uvlinesize;
765 s->vdsp.emulated_edge_mc(uvbuf, srcV,
766 s->uvlinesize,
s->uvlinesize,
769 s->h_edge_pos >> 1,
s->v_edge_pos >> 1);
777 const int xoff,
const int yoff,
int mv_off,
780 rv34_mc(
r, block_type, xoff, yoff, mv_off,
width,
height, dir,
r->rv30, 0,
781 r->rdsp.put_pixels_tab,
782 r->rdsp.put_chroma_pixels_tab);
787 r->rdsp.rv40_weight_pixels_tab[
r->scaled_weight][0](
r->s.dest[0],
793 r->rdsp.rv40_weight_pixels_tab[
r->scaled_weight][1](
r->s.dest[1],
794 r->tmp_b_block_uv[0],
795 r->tmp_b_block_uv[2],
799 r->rdsp.rv40_weight_pixels_tab[
r->scaled_weight][1](
r->s.dest[2],
800 r->tmp_b_block_uv[1],
801 r->tmp_b_block_uv[3],
811 rv34_mc(
r, block_type, 0, 0, 0, 2, 2, 0,
r->rv30, weighted,
812 r->rdsp.put_pixels_tab,
813 r->rdsp.put_chroma_pixels_tab);
815 rv34_mc(
r, block_type, 0, 0, 0, 2, 2, 1,
r->rv30, 0,
816 r->rdsp.avg_pixels_tab,
817 r->rdsp.avg_chroma_pixels_tab);
819 rv34_mc(
r, block_type, 0, 0, 0, 2, 2, 1,
r->rv30, 1,
820 r->rdsp.put_pixels_tab,
821 r->rdsp.put_chroma_pixels_tab);
829 int weighted = !
r->rv30 &&
r->weight1 != 8192;
831 for(j = 0; j < 2; j++)
832 for(
i = 0;
i < 2;
i++){
835 r->rdsp.put_pixels_tab,
836 r->rdsp.put_chroma_pixels_tab);
839 weighted ?
r->rdsp.put_pixels_tab :
r->rdsp.avg_pixels_tab,
840 weighted ?
r->rdsp.put_chroma_pixels_tab :
r->rdsp.avg_chroma_pixels_tab);
847 static const int num_mvs[
RV34_MB_TYPES] = { 0, 0, 1, 4, 1, 1, 0, 0, 2, 2, 2, 1 };
858 int mv_pos =
s->mb_x * 2 +
s->mb_y * 2 *
s->b8_stride;
861 memset(
r->dmv, 0,
sizeof(
r->dmv));
867 r->dmv[
i][0] =
r->dmv[
i][1] = 0;
874 ZERO8x2(
s->current_picture_ptr->motion_val[0][
s->mb_x * 2 +
s->mb_y * 2 *
s->b8_stride],
s->b8_stride);
878 ZERO8x2(
s->current_picture_ptr->motion_val[0][
s->mb_x * 2 +
s->mb_y * 2 *
s->b8_stride],
s->b8_stride);
888 next_bt =
s->next_picture_ptr->mb_type[
s->mb_x +
s->mb_y *
s->mb_stride];
890 ZERO8x2(
s->current_picture_ptr->motion_val[0][
s->mb_x * 2 +
s->mb_y * 2 *
s->b8_stride],
s->b8_stride);
891 ZERO8x2(
s->current_picture_ptr->motion_val[1][
s->mb_x * 2 +
s->mb_y * 2 *
s->b8_stride],
s->b8_stride);
893 for(j = 0; j < 2; j++)
894 for(
i = 0;
i < 2;
i++)
895 for(k = 0; k < 2; k++)
896 for(l = 0; l < 2; l++)
897 s->current_picture_ptr->motion_val[l][mv_pos +
i + j*
s->b8_stride][k] =
calc_add_mv(
r, l,
s->next_picture_ptr->motion_val[0][mv_pos +
i + j*
s->b8_stride][k]);
902 ZERO8x2(
s->current_picture_ptr->motion_val[0][
s->mb_x * 2 +
s->mb_y * 2 *
s->b8_stride],
s->b8_stride);
911 r->dmv[1][0] =
r->dmv[0][0];
912 r->dmv[1][1] =
r->dmv[0][1];
940 rv34_mc_1mv (
r, block_type, (
i&1)<<3, (
i&2)<<2, (
i&1)+(
i>>1)*
s->b8_stride, 1, 1, 0);
988 topleft = dst[-
stride + 3] * 0x01010101u;
991 r->h.pred4x4[itype](dst, prev,
stride);
1012 int fc,
int sc,
int q_dc,
int q_ac)
1015 int16_t *ptr =
s->block[0];
1017 fc, sc, q_dc, q_ac, q_ac);
1019 r->rdsp.rv34_idct_add(pdst,
stride, ptr);
1021 r->rdsp.rv34_idct_dc_add(pdst,
stride, ptr[0]);
1034 int16_t *ptr =
s->block[0];
1035 int i, j, itype, has_ac;
1037 memset(block16, 0, 16 *
sizeof(*block16));
1041 r->rdsp.rv34_inv_transform(block16);
1043 r->rdsp.rv34_inv_transform_dc(block16);
1046 itype =
adjust_pred16(itype,
r->avail_cache[6-4],
r->avail_cache[6-1]);
1047 r->h.pred16x16[itype](dst,
s->linesize);
1049 for(j = 0; j < 4; j++){
1050 for(
i = 0; i < 4; i++, cbp >>= 1){
1051 int dc = block16[
i + j*4];
1060 r->rdsp.rv34_idct_add(dst+4*
i,
s->linesize, ptr);
1062 r->rdsp.rv34_idct_dc_add(dst+4*
i,
s->linesize,
dc);
1065 dst += 4*
s->linesize;
1070 itype =
adjust_pred16(itype,
r->avail_cache[6-4],
r->avail_cache[6-1]);
1075 for(j = 1; j < 3; j++){
1077 r->h.pred8x8[itype](dst,
s->uvlinesize);
1078 for(
i = 0; i < 4; i++, cbp >>= 1){
1080 if(!(cbp & 1))
continue;
1081 pdst = dst + (
i&1)*4 + (
i&2)*2*
s->uvlinesize;
1084 r->chroma_vlc, 1, q_dc, q_ac);
1093 int avail[6*8] = {0};
1095 int idx, q_ac, q_dc;
1098 if(
r->avail_cache[1])
1100 if(
r->avail_cache[2])
1101 avail[1] = avail[2] = 1;
1102 if(
r->avail_cache[3])
1103 avail[3] = avail[4] = 1;
1104 if(
r->avail_cache[4])
1106 if(
r->avail_cache[5])
1107 avail[8] = avail[16] = 1;
1108 if(
r->avail_cache[9])
1109 avail[24] = avail[32] = 1;
1112 for(j = 0; j < 4; j++){
1114 for(
i = 0; i < 4; i++, cbp >>= 1, dst += 4, idx++){
1117 if(!(cbp & 1))
continue;
1120 r->luma_vlc, 0, q_ac, q_ac);
1122 dst +=
s->linesize * 4 - 4*4;
1123 intra_types +=
r->intra_types_stride;
1126 intra_types -=
r->intra_types_stride * 4;
1131 for(k = 0; k < 2; k++){
1135 for(j = 0; j < 2; j++){
1136 int* acache =
r->avail_cache + 6 + j*4;
1137 for(
i = 0; i < 2; i++, cbp >>= 1, acache++){
1138 int itype =
ittrans[intra_types[
i*2+j*2*
r->intra_types_stride]];
1142 if(!(cbp&1))
continue;
1145 r->chroma_vlc, 1, q_dc, q_ac);
1148 dst += 4*
s->uvlinesize;
1156 d = motion_val[0][0] - motion_val[-step][0];
1159 d = motion_val[0][1] - motion_val[-step][1];
1168 int hmvmask = 0, vmvmask = 0,
i, j;
1169 int midx =
s->mb_x * 2 +
s->mb_y * 2 *
s->b8_stride;
1170 int16_t (*motion_val)[2] = &
s->current_picture_ptr->motion_val[0][midx];
1171 for(j = 0; j < 16; j += 8){
1172 for(
i = 0;
i < 2;
i++){
1174 vmvmask |= 0x11 << (j +
i*2);
1176 hmvmask |= 0x03 << (j +
i*2);
1178 motion_val +=
s->b8_stride;
1180 if(
s->first_slice_line)
1185 vmvmask |= (vmvmask & 0x4444) >> 1;
1186 hmvmask |= (hmvmask & 0x0F00) >> 4;
1188 r->deblock_coefs[
s->mb_x - 1 +
s->mb_y*
s->mb_stride] |= (vmvmask & 0x1111) << 3;
1189 if(!
s->first_slice_line)
1190 r->deblock_coefs[
s->mb_x + (
s->mb_y - 1)*
s->mb_stride] |= (hmvmask & 0xF) << 12;
1192 return hmvmask | vmvmask;
1200 int16_t *ptr =
s->block[0];
1201 int mb_pos =
s->mb_x +
s->mb_y *
s->mb_stride;
1203 int q_dc, q_ac, has_ac;
1208 memset(
r->avail_cache, 0,
sizeof(
r->avail_cache));
1210 dist = (
s->mb_x -
s->resync_mb_x) + (
s->mb_y -
s->resync_mb_y) *
s->mb_width;
1213 r->avail_cache[9] =
s->current_picture_ptr->mb_type[mb_pos - 1];
1214 if(dist >=
s->mb_width)
1216 r->avail_cache[3] =
s->current_picture_ptr->mb_type[mb_pos -
s->mb_stride];
1217 if(((
s->mb_x+1) <
s->mb_width) && dist >=
s->mb_width - 1)
1218 r->avail_cache[4] =
s->current_picture_ptr->mb_type[mb_pos -
s->mb_stride + 1];
1219 if(
s->mb_x && dist >
s->mb_width)
1220 r->avail_cache[1] =
s->current_picture_ptr->mb_type[mb_pos -
s->mb_stride - 1];
1222 s->qscale =
r->si.quant;
1224 r->cbp_luma [mb_pos] = cbp;
1225 r->cbp_chroma[mb_pos] = cbp >> 16;
1227 s->current_picture_ptr->qscale_table[mb_pos] =
s->qscale;
1232 if (
IS_INTRA(
s->current_picture_ptr->mb_type[mb_pos])){
1241 memset(block16, 0, 16 *
sizeof(*block16));
1245 r->rdsp.rv34_inv_transform(block16);
1247 r->rdsp.rv34_inv_transform_dc(block16);
1251 for(j = 0; j < 4; j++){
1252 for(
i = 0; i < 4; i++, cbp >>= 1){
1253 int dc = block16[
i + j*4];
1262 r->rdsp.rv34_idct_add(dst+4*
i,
s->linesize, ptr);
1264 r->rdsp.rv34_idct_dc_add(dst+4*
i,
s->linesize,
dc);
1267 dst += 4*
s->linesize;
1274 for(j = 0; j < 4; j++){
1275 for(
i = 0; i < 4; i++, cbp >>= 1){
1276 if(!(cbp & 1))
continue;
1279 r->luma_vlc, 0, q_ac, q_ac);
1281 dst += 4*
s->linesize;
1288 for(j = 1; j < 3; j++){
1290 for(
i = 0; i < 4; i++, cbp >>= 1){
1292 if(!(cbp & 1))
continue;
1293 pdst = dst + (
i&1)*4 + (
i&2)*2*
s->uvlinesize;
1296 r->chroma_vlc, 1, q_dc, q_ac);
1307 int mb_pos =
s->mb_x +
s->mb_y *
s->mb_stride;
1310 memset(
r->avail_cache, 0,
sizeof(
r->avail_cache));
1312 dist = (
s->mb_x -
s->resync_mb_x) + (
s->mb_y -
s->resync_mb_y) *
s->mb_width;
1315 r->avail_cache[9] =
s->current_picture_ptr->mb_type[mb_pos - 1];
1316 if(dist >=
s->mb_width)
1318 r->avail_cache[3] =
s->current_picture_ptr->mb_type[mb_pos -
s->mb_stride];
1319 if(((
s->mb_x+1) <
s->mb_width) && dist >=
s->mb_width - 1)
1320 r->avail_cache[4] =
s->current_picture_ptr->mb_type[mb_pos -
s->mb_stride + 1];
1321 if(
s->mb_x && dist >
s->mb_width)
1322 r->avail_cache[1] =
s->current_picture_ptr->mb_type[mb_pos -
s->mb_stride - 1];
1324 s->qscale =
r->si.quant;
1326 r->cbp_luma [mb_pos] = cbp;
1327 r->cbp_chroma[mb_pos] = cbp >> 16;
1328 r->deblock_coefs[mb_pos] = 0xFFFF;
1329 s->current_picture_ptr->qscale_table[mb_pos] =
s->qscale;
1346 if(
s->mb_y >=
s->mb_height)
1350 if(
r->s.mb_skip_run > 1)
1362 r->intra_types =
NULL;
1373 r->intra_types_stride =
r->s.mb_width * 4 + 4;
1375 r->cbp_chroma =
av_mallocz(
r->s.mb_stride *
r->s.mb_height *
1376 sizeof(*
r->cbp_chroma));
1378 sizeof(*
r->cbp_luma));
1379 r->deblock_coefs =
av_mallocz(
r->s.mb_stride *
r->s.mb_height *
1380 sizeof(*
r->deblock_coefs));
1381 r->intra_types_hist =
av_malloc(
r->intra_types_stride * 4 * 2 *
1382 sizeof(*
r->intra_types_hist));
1384 sizeof(*
r->mb_type));
1386 if (!(
r->cbp_chroma &&
r->cbp_luma &&
r->deblock_coefs &&
1387 r->intra_types_hist &&
r->mb_type)) {
1388 r->s.context_reinit = 1;
1393 r->intra_types =
r->intra_types_hist +
r->intra_types_stride * 4;
1410 int mb_pos, slice_type;
1414 res =
r->parse_slice_header(
r, gb, &
r->si);
1421 if (slice_type !=
s->pict_type) {
1425 if (
s->width !=
r->si.width ||
s->height !=
r->si.height) {
1431 s->qscale =
r->si.quant;
1432 s->mb_num_left =
r->si.end -
r->si.start;
1433 r->s.mb_skip_run = 0;
1435 mb_pos =
s->mb_x +
s->mb_y *
s->mb_width;
1436 if(
r->si.start != mb_pos){
1438 s->mb_x =
r->si.start %
s->mb_width;
1439 s->mb_y =
r->si.start /
s->mb_width;
1441 memset(
r->intra_types_hist, -1,
r->intra_types_stride * 4 * 2 *
sizeof(*
r->intra_types_hist));
1442 s->first_slice_line = 1;
1443 s->resync_mb_x =
s->mb_x;
1444 s->resync_mb_y =
s->mb_y;
1458 if (++
s->mb_x ==
s->mb_width) {
1463 memmove(
r->intra_types_hist,
r->intra_types,
r->intra_types_stride * 4 *
sizeof(*
r->intra_types_hist));
1464 memset(
r->intra_types, -1,
r->intra_types_stride * 4 *
sizeof(*
r->intra_types_hist));
1466 if(
r->loop_filter &&
s->mb_y >= 2)
1467 r->loop_filter(
r,
s->mb_y - 2);
1474 if(
s->mb_x ==
s->resync_mb_x)
1475 s->first_slice_line=0;
1480 return s->mb_y ==
s->mb_height;
1508 #if CONFIG_RV30_DECODER
1512 #if CONFIG_RV40_DECODER
1533 if (dst ==
src || !
s1->context_initialized)
1536 if (
s->height !=
s1->height ||
s->width !=
s1->width ||
s->context_reinit) {
1537 s->height =
s1->height;
1538 s->width =
s1->width;
1545 r->cur_pts = r1->cur_pts;
1546 r->last_pts = r1->last_pts;
1547 r->next_pts = r1->next_pts;
1549 memset(&
r->si, 0,
sizeof(
r->si));
1553 if (!
s1->context_initialized)
1561 if (n < slice_count) {
1572 int got_picture = 0, ret;
1582 if ((ret =
av_frame_ref(pict,
s->current_picture_ptr->f)) < 0)
1587 }
else if (
s->last_picture_ptr) {
1609 void *
data,
int *got_picture_ptr,
1613 int buf_size = avpkt->
size;
1626 if (buf_size == 0) {
1628 if (
s->low_delay==0 &&
s->next_picture_ptr) {
1631 s->next_picture_ptr =
NULL;
1633 *got_picture_ptr = 1;
1639 slice_count = (*buf++) + 1;
1640 slices_hdr = buf + 4;
1641 buf += 8 * slice_count;
1642 buf_size -= 1 + 8 * slice_count;
1648 if(offset < 0 || offset > buf_size){
1653 if(
r->parse_slice_header(
r, &
r->s.gb, &si) < 0 || si.
start){
1657 if ((!
s->last_picture_ptr || !
s->last_picture_ptr->f->data[0]) &&
1660 "reference data.\n");
1669 if (si.
start == 0) {
1670 if (
s->mb_num_left > 0 &&
s->current_picture_ptr) {
1673 if (!
s->context_reinit)
1678 if (
s->width != si.
width ||
s->height != si.
height ||
s->context_reinit) {
1688 s->width,
s->height,
s->avctx->sample_aspect_ratio,
1707 if (!
r->tmp_b_block_base) {
1710 r->tmp_b_block_base =
av_malloc(
s->linesize * 48);
1711 for (
i = 0;
i < 2;
i++)
1712 r->tmp_b_block_y[
i] =
r->tmp_b_block_base
1713 +
i * 16 *
s->linesize;
1715 r->tmp_b_block_uv[
i] =
r->tmp_b_block_base + 32 *
s->linesize
1716 + (
i >> 1) * 8 *
s->uvlinesize
1719 r->cur_pts = si.
pts;
1721 r->last_pts =
r->next_pts;
1722 r->next_pts =
r->cur_pts;
1729 r->mv_weight1 =
r->mv_weight2 =
r->weight1 =
r->weight2 = 8192;
1730 r->scaled_weight = 0;
1732 if (
FFMAX(dist0, dist1) > refdist)
1735 r->mv_weight1 = (dist0 << 14) / refdist;
1736 r->mv_weight2 = (dist1 << 14) / refdist;
1737 if((
r->mv_weight1|
r->mv_weight2) & 511){
1738 r->weight1 =
r->mv_weight1;
1739 r->weight2 =
r->mv_weight2;
1740 r->scaled_weight = 0;
1742 r->weight1 =
r->mv_weight1 >> 9;
1743 r->weight2 =
r->mv_weight2 >> 9;
1744 r->scaled_weight = 1;
1748 s->mb_x =
s->mb_y = 0;
1750 }
else if (
s->context_reinit) {
1752 "reinitialize (start MB is %d).\n", si.
start);
1757 "multithreading mode (start MB is %d).\n", si.
start);
1761 for(
i = 0;
i < slice_count;
i++){
1766 if(offset < 0 || offset > offset1 || offset1 > buf_size){
1772 r->si.end =
s->mb_width *
s->mb_height;
1773 s->mb_num_left =
r->s.mb_x +
r->s.mb_y*
r->s.mb_width -
r->si.start;
1775 if(
i+1 < slice_count){
1777 if (offset2 < offset1 || offset2 > buf_size) {
1782 if(
r->parse_slice_header(
r, &
r->s.gb, &si) < 0){
1793 if (
s->current_picture_ptr) {
1796 r->loop_filter(
r,
s->mb_height - 1);
1801 *got_picture_ptr = ret;
static double val(void *priv, double ch)
uint8_t pi<< 24) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_U8, uint8_t,(*(const uint8_t *) pi - 0x80) *(1.0f/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_U8, uint8_t,(*(const uint8_t *) pi - 0x80) *(1.0/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S16, int16_t,(*(const int16_t *) pi >> 8)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S16, int16_t, *(const int16_t *) pi *(1.0f/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S16, int16_t, *(const int16_t *) pi *(1.0/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S32, int32_t,(*(const int32_t *) pi >> 24)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S32, int32_t, *(const int32_t *) pi *(1.0f/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S32, int32_t, *(const int32_t *) pi *(1.0/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_FLT, float, av_clip_uint8(lrintf(*(const float *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_FLT, float, av_clip_int16(lrintf(*(const float *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_FLT, float, av_clipl_int32(llrintf(*(const float *) pi *(1U<< 31)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_DBL, double, av_clip_uint8(lrint(*(const double *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_DBL, double, av_clip_int16(lrint(*(const double *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_DBL, double, av_clipl_int32(llrint(*(const double *) pi *(1U<< 31)))) #define SET_CONV_FUNC_GROUP(ofmt, ifmt) static void set_generic_function(AudioConvert *ac) { } void ff_audio_convert_free(AudioConvert **ac) { if(! *ac) return;ff_dither_free(&(*ac) ->dc);av_freep(ac);} AudioConvert *ff_audio_convert_alloc(AVAudioResampleContext *avr, enum AVSampleFormat out_fmt, enum AVSampleFormat in_fmt, int channels, int sample_rate, int apply_map) { AudioConvert *ac;int in_planar, out_planar;ac=av_mallocz(sizeof(*ac));if(!ac) return NULL;ac->avr=avr;ac->out_fmt=out_fmt;ac->in_fmt=in_fmt;ac->channels=channels;ac->apply_map=apply_map;if(avr->dither_method !=AV_RESAMPLE_DITHER_NONE &&av_get_packed_sample_fmt(out_fmt)==AV_SAMPLE_FMT_S16 &&av_get_bytes_per_sample(in_fmt) > 2) { ac->dc=ff_dither_alloc(avr, out_fmt, in_fmt, channels, sample_rate, apply_map);if(!ac->dc) { av_free(ac);return NULL;} return ac;} in_planar=ff_sample_fmt_is_planar(in_fmt, channels);out_planar=ff_sample_fmt_is_planar(out_fmt, channels);if(in_planar==out_planar) { ac->func_type=CONV_FUNC_TYPE_FLAT;ac->planes=in_planar ? ac->channels :1;} else if(in_planar) ac->func_type=CONV_FUNC_TYPE_INTERLEAVE;else ac->func_type=CONV_FUNC_TYPE_DEINTERLEAVE;set_generic_function(ac);if(ARCH_AARCH64) ff_audio_convert_init_aarch64(ac);if(ARCH_ARM) ff_audio_convert_init_arm(ac);if(ARCH_X86) ff_audio_convert_init_x86(ac);return ac;} int ff_audio_convert(AudioConvert *ac, AudioData *out, AudioData *in) { int use_generic=1;int len=in->nb_samples;int p;if(ac->dc) { av_log(ac->avr, AV_LOG_TRACE, "%d samples - audio_convert: %s to %s (dithered)\n", len, av_get_sample_fmt_name(ac->in_fmt), av_get_sample_fmt_name(ac->out_fmt));return ff_convert_dither(ac-> dc
simple assert() macros that are a bit more flexible than ISO C assert().
#define av_assert2(cond)
assert() equivalent, that does lie in speed critical code.
#define av_assert1(cond)
assert() equivalent, that does not lie in speed critical code.
#define av_assert0(cond)
assert() equivalent, that is always enabled.
Libavcodec external API header.
#define FF_THREAD_FRAME
Decode more than one frame at once.
int ff_init_vlc_sparse(VLC *vlc_arg, int nb_bits, int nb_codes, const void *bits, int bits_wrap, int bits_size, const void *codes, int codes_wrap, int codes_size, const void *symbols, int symbols_wrap, int symbols_size, int flags)
static const uint8_t shifts[2][12]
#define flags(name, subs,...)
#define fc(width, name, range_min, range_max)
static float mul(float src0, float src1)
void ff_er_add_slice(ERContext *s, int startx, int starty, int endx, int endy, int status)
Add a slice.
void ff_er_frame_end(ERContext *s)
static void fill_rectangle(int x, int y, int w, int h)
static av_always_inline int get_vlc2(GetBitContext *s, VLC_TYPE(*table)[2], int bits, int max_depth)
Parse a vlc code.
static int get_bits_left(GetBitContext *gb)
static unsigned int get_bits1(GetBitContext *s)
static unsigned int get_bits(GetBitContext *s, int n)
Read 1-25 bits.
static unsigned int show_bits(GetBitContext *s, int n)
Show 1-25 bits.
static int init_get_bits(GetBitContext *s, const uint8_t *buffer, int bit_size)
Initialize GetBitContext.
static int get_interleaved_se_golomb(GetBitContext *gb)
@ AVDISCARD_ALL
discard all
@ AVDISCARD_NONKEY
discard all frames except keyframes
@ AVDISCARD_NONREF
discard all non reference
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
int av_frame_ref(AVFrame *dst, const AVFrame *src)
Set up a new reference to the data described by the source frame.
#define AV_LOG_TRACE
Extremely verbose debugging, useful for libav* development.
#define AV_LOG_WARNING
Something somehow does not look correct.
#define AV_LOG_INFO
Standard information.
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
AVRational av_mul_q(AVRational b, AVRational c)
Multiply two rationals.
void * av_mallocz(size_t size)
Allocate a memory block with alignment suitable for all memory accesses (including vectors if availab...
int av_image_check_size(unsigned int w, unsigned int h, int log_offset, void *log_ctx)
Check if the given dimension of an image is valid, meaning that all bytes of the image can be address...
@ AV_PICTURE_TYPE_I
Intra.
@ AV_PICTURE_TYPE_P
Predicted.
@ AV_PICTURE_TYPE_B
Bi-dir predicted.
void(* h264_chroma_mc_func)(uint8_t *dst, uint8_t *src, ptrdiff_t srcStride, int h, int x, int y)
#define DIAG_DOWN_LEFT_PRED_RV40_NODOWN
#define HOR_UP_PRED_RV40_NODOWN
#define VERT_LEFT_PRED_RV40_NODOWN
av_cold void ff_h264_pred_init(H264PredContext *h, int codec_id, const int bit_depth, int chroma_format_idc)
Set the intra prediction function pointers.
int ff_set_dimensions(AVCodecContext *s, int width, int height)
Check that the provided frame dimensions are valid and set them on the codec context.
#define FF_QSCALE_TYPE_MPEG1
common internal API header
static int ff_thread_once(char *control, void(*routine)(void))
static const uint16_t mask[17]
#define LOCAL_ALIGNED_16(t, v,...)
void ff_mpeg_er_frame_start(MpegEncContext *s)
#define MB_TYPE_INTRA16x16
int ff_mpv_frame_start(MpegEncContext *s, AVCodecContext *avctx)
generic function called after decoding the header and before a frame is decoded.
void ff_mpv_common_end(MpegEncContext *s)
void ff_mpv_frame_end(MpegEncContext *s)
av_cold int ff_mpv_common_init(MpegEncContext *s)
init common structure for both encoder and decoder.
int ff_mpv_export_qp_table(MpegEncContext *s, AVFrame *f, Picture *p, int qp_type)
av_cold void ff_mpv_idct_init(MpegEncContext *s)
void ff_print_debug_info(MpegEncContext *s, Picture *p, AVFrame *pict)
void ff_mpv_decode_init(MpegEncContext *s, AVCodecContext *avctx)
Initialize the given MpegEncContext for decoding.
void ff_init_block_index(MpegEncContext *s)
int ff_mpeg_update_thread_context(AVCodecContext *dst, const AVCodecContext *src)
int ff_mpv_common_frame_size_change(MpegEncContext *s)
static void ff_update_block_index(MpegEncContext *s)
@ AV_PIX_FMT_YUV420P
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
static const uint16_t table[]
void ff_thread_report_progress(ThreadFrame *f, int n, int field)
Notify later decoding threads when part of their reference picture is ready.
void ff_thread_await_progress(ThreadFrame *f, int n, int field)
Wait for earlier decoding threads to finish reference pictures.
void ff_thread_finish_setup(AVCodecContext *avctx)
If the codec defines update_thread_context(), call this when they are ready for the next thread to st...
void(* qpel_mc_func)(uint8_t *dst, const uint8_t *src, ptrdiff_t stride)
useful rectangle filling function
av_cold void ff_rv30dsp_init(RV34DSPContext *c)
static int rv34_decode_cbp(GetBitContext *gb, RV34VLC *vlc, int table)
Decode coded block pattern.
static av_cold void rv34_init_tables(void)
Initialize all tables.
static const int rv34_mb_type_to_lavc[12]
translation of RV30/40 macroblock types to lavc ones
static int rv34_decode_intra_mb_header(RV34DecContext *r, int8_t *intra_types)
Decode intra macroblock header and return CBP in case of success, -1 otherwise.
static int adjust_pred16(int itype, int up, int left)
static void rv34_decoder_free(RV34DecContext *r)
static int rv34_decode_slice(RV34DecContext *r, int end, const uint8_t *buf, int buf_size)
static AVRational update_sar(int old_w, int old_h, AVRational sar, int new_w, int new_h)
static void rv34_mc_2mv(RV34DecContext *r, const int block_type)
static const int chroma_coeffs[3]
static void rv4_weight(RV34DecContext *r)
static void rv34_pred_b_vector(int A[2], int B[2], int C[2], int A_avail, int B_avail, int C_avail, int *mx, int *my)
Predict motion vector for B-frame macroblock.
static void rv34_output_i16x16(RV34DecContext *r, int8_t *intra_types, int cbp)
static int rv34_decoder_realloc(RV34DecContext *r)
static int get_slice_offset(AVCodecContext *avctx, const uint8_t *buf, int n, int slice_count, int buf_size)
static void rv34_gen_vlc(const uint8_t *bits, int size, VLC *vlc, const uint8_t *syms, int *offset)
Generate VLC from codeword lengths.
static int rv34_decode_mv(RV34DecContext *r, int block_type)
Decode motion vector differences and perform motion vector reconstruction and motion compensation.
static RV34VLC * choose_vlc_set(int quant, int mod, int type)
Select VLC set for decoding from current quantizer, modifier and frame type.
static void rv34_pred_4x4_block(RV34DecContext *r, uint8_t *dst, int stride, int itype, int up, int left, int down, int right)
Perform 4x4 intra prediction.
static void decode_subblock3(int16_t *dst, int code, GetBitContext *gb, VLC *vlc, int q_dc, int q_ac1, int q_ac2)
static int rv34_set_deblock_coef(RV34DecContext *r)
static void decode_subblock(int16_t *dst, int code, const int is_block2, GetBitContext *gb, VLC *vlc, int q)
Decode 2x2 subblock of coefficients.
static int rv34_decode_intra_macroblock(RV34DecContext *r, int8_t *intra_types)
static int calc_add_mv(RV34DecContext *r, int dir, int val)
Calculate motion vector component that should be added for direct blocks.
av_cold int ff_rv34_decode_end(AVCodecContext *avctx)
static void rv34_mc(RV34DecContext *r, const int block_type, const int xoff, const int yoff, int mv_off, const int width, const int height, int dir, const int thirdpel, int weighted, qpel_mc_func(*qpel_mc)[16], h264_chroma_mc_func(*chroma_mc))
generic motion compensation function
static void decode_subblock1(int16_t *dst, int code, GetBitContext *gb, VLC *vlc, int q)
Decode a single coefficient.
static RV34VLC inter_vlcs[NUM_INTER_TABLES]
int ff_rv34_get_start_offset(GetBitContext *gb, int mb_size)
Decode starting slice position.
static VLC_TYPE table_data[117592][2]
static void rv34_mc_1mv(RV34DecContext *r, const int block_type, const int xoff, const int yoff, int mv_off, const int width, const int height, int dir)
static void ZERO8x2(void *dst, int stride)
static void rv34_output_intra(RV34DecContext *r, int8_t *intra_types, int cbp)
static const uint8_t avail_indexes[4]
availability index for subblocks
int ff_rv34_decode_frame(AVCodecContext *avctx, void *data, int *got_picture_ptr, AVPacket *avpkt)
static const int ittrans16[4]
mapping of RV30/40 intra 16x16 prediction types to standard H.264 types
static int check_slice_end(RV34DecContext *r, MpegEncContext *s)
static int rv34_decode_block(int16_t *dst, GetBitContext *gb, RV34VLC *rvlc, int fc, int sc, int q_dc, int q_ac1, int q_ac2)
Decode coefficients for 4x4 block.
#define GET_PTS_DIFF(a, b)
static const uint8_t part_sizes_w[RV34_MB_TYPES]
macroblock partition width in 8x8 blocks
static void decode_coeff(int16_t *dst, int coef, int esc, GetBitContext *gb, VLC *vlc, int q)
Get one coefficient value from the bitstream and store it.
static int finish_frame(AVCodecContext *avctx, AVFrame *pict)
static const uint8_t part_sizes_h[RV34_MB_TYPES]
macroblock partition height in 8x8 blocks
static const int num_mvs[RV34_MB_TYPES]
number of motion vectors in each macroblock type
static int rv34_decoder_alloc(RV34DecContext *r)
static int rv34_decode_inter_mb_header(RV34DecContext *r, int8_t *intra_types)
Decode inter macroblock header and return CBP in case of success, -1 otherwise.
static void rv34_pred_mv_rv3(RV34DecContext *r, int block_type, int dir)
motion vector prediction - RV3 version
static RV34VLC intra_vlcs[NUM_INTRA_TABLES]
static void rv34_pred_mv_b(RV34DecContext *r, int block_type, int dir)
motion vector prediction for B-frames
static void rv34_mc_2mv_skip(RV34DecContext *r)
av_cold int ff_rv34_decode_init(AVCodecContext *avctx)
Initialize decoder.
int ff_rv34_decode_update_thread_context(AVCodecContext *dst, const AVCodecContext *src)
static int rv34_decode_inter_macroblock(RV34DecContext *r, int8_t *intra_types)
static int is_mv_diff_gt_3(int16_t(*motion_val)[2], int step)
static void rv34_process_block(RV34DecContext *r, uint8_t *pdst, int stride, int fc, int sc, int q_dc, int q_ac)
static void rv34_pred_mv(RV34DecContext *r, int block_type, int subblock_no, int dmv_no)
motion vector prediction
static const int ittrans[9]
mapping of RV30/40 intra prediction types to standard H.264 types
RV30 and RV40 decoder common data declarations.
@ RV34_MB_TYPE_INTRA
Intra macroblock.
@ RV34_MB_P_MIX16x16
P-frame macroblock with DCs in a separate 4x4 block, one motion vector.
@ RV34_MB_P_16x8
P-frame macroblock, 16x8 motion compensation partitions.
@ RV34_MB_B_BIDIR
Bidirectionally predicted B-frame macroblock, two motion vectors.
@ RV34_MB_B_BACKWARD
B-frame macroblock, backward prediction.
@ RV34_MB_B_DIRECT
Bidirectionally predicted B-frame macroblock, no motion vectors.
@ RV34_MB_P_16x16
P-frame macroblock, one motion frame.
@ RV34_MB_P_8x16
P-frame macroblock, 8x16 motion compensation partitions.
@ RV34_MB_P_8x8
P-frame macroblock, 8x8 motion compensation partitions.
@ RV34_MB_B_FORWARD
B-frame macroblock, forward prediction.
@ RV34_MB_TYPE_INTRA16x16
Intra macroblock with DCs in a separate 4x4 block.
@ RV34_MB_SKIP
Skipped block.
#define MB_TYPE_SEPARATE_DC
miscellaneous RV30/40 tables
static const uint8_t modulo_three_table[108]
precalculated results of division by three and modulo three for values 0-107
static const uint8_t rv34_chroma_quant[2][32]
quantizer values used for AC and DC coefficients in chroma blocks
static const uint8_t rv34_quant_to_vlc_set[2][32]
tables used to translate a quantizer value into a VLC set for decoding The first table is used for in...
static const uint16_t rv34_qscale_tab[32]
This table is used for dequantizing.
static const uint8_t rv34_cbp_code[16]
values used to reconstruct coded block pattern
static const uint8_t rv34_count_ones[16]
number of ones in nibble minus one
static const uint8_t rv34_mb_bits_sizes[6]
bits needed to code the slice offset for the given size
static const uint16_t rv34_mb_max_sizes[6]
maximum number of macroblocks for each of the possible slice offset sizes
void ff_rv40dsp_init(RV34DSPContext *c)
static const uint8_t rv34_intra_coeff[NUM_INTRA_TABLES][COEFF_VLC_SIZE]
static const uint8_t rv34_inter_cbppat[NUM_INTER_TABLES][CBPPAT_VLC_SIZE]
static const uint8_t rv34_table_intra_thirdpat[NUM_INTRA_TABLES][2][OTHERBLK_VLC_SIZE]
#define OTHERBLK_VLC_SIZE
#define FIRSTBLK_VLC_SIZE
static const uint8_t rv34_inter_cbp[NUM_INTER_TABLES][4][CBP_VLC_SIZE]
static const uint8_t rv34_table_inter_secondpat[NUM_INTER_TABLES][2][OTHERBLK_VLC_SIZE]
static const uint8_t rv34_table_intra_cbp[NUM_INTRA_TABLES][8][CBP_VLC_SIZE]
static const uint8_t rv34_table_intra_secondpat[NUM_INTRA_TABLES][2][OTHERBLK_VLC_SIZE]
static const uint8_t rv34_table_intra_firstpat[NUM_INTRA_TABLES][4][FIRSTBLK_VLC_SIZE]
static const uint8_t rv34_table_inter_firstpat[NUM_INTER_TABLES][2][FIRSTBLK_VLC_SIZE]
static const uint8_t rv34_inter_coeff[NUM_INTER_TABLES][COEFF_VLC_SIZE]
static const uint8_t rv34_table_intra_cbppat[NUM_INTRA_TABLES][2][CBPPAT_VLC_SIZE]
static const uint8_t rv34_table_inter_thirdpat[NUM_INTER_TABLES][2][OTHERBLK_VLC_SIZE]
#define FF_ARRAY_ELEMS(a)
main external API structure.
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
int slice_count
slice count
int has_b_frames
Size of the frame reordering buffer in the decoder.
int * slice_offset
slice offsets in the frame in bytes
enum AVDiscard skip_frame
Skip decoding for selected frames.
This structure describes decoded (raw) audio or video data.
This structure stores compressed data.
Rational number (pair of numerator and denominator).
int16_t(*[2] motion_val)[2]
uint32_t * mb_type
types and macros are defined in mpegutils.h
VLC tables used by the decoder.
VLC cbppattern[2]
VLCs used for pattern of coded block patterns decoding.
VLC cbp[2][4]
VLCs used for coded block patterns decoding.
VLC second_pattern[2]
VLCs used for decoding coefficients in the subblocks 2 and 3.
VLC third_pattern[2]
VLCs used for decoding coefficients in the last subblock.
VLC first_pattern[4]
VLCs used for decoding coefficients in the first subblock.
VLC coefficient
VLCs used for decoding big coefficients.
essential slice information
int type
slice type (intra, inter)
VLC_TYPE(* table)[2]
code, bits
static const double coeff[2][5]
static const uint8_t offset[127][2]
static int mod(int a, int b)
Modulo operation with only positive remainders.
#define INIT_VLC_STATIC_OVERLONG