vc1dec.c
Go to the documentation of this file.
1 /*
2  * VC-1 and WMV3 decoder
3  * Copyright (c) 2011 Mashiat Sarker Shakkhar
4  * Copyright (c) 2006-2007 Konstantin Shishkov
5  * Partly based on vc9.c (c) 2005 Anonymous, Alex Beregszaszi, Michael Niedermayer
6  *
7  * This file is part of Libav.
8  *
9  * Libav is free software; you can redistribute it and/or
10  * modify it under the terms of the GNU Lesser General Public
11  * License as published by the Free Software Foundation; either
12  * version 2.1 of the License, or (at your option) any later version.
13  *
14  * Libav is distributed in the hope that it will be useful,
15  * but WITHOUT ANY WARRANTY; without even the implied warranty of
16  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17  * Lesser General Public License for more details.
18  *
19  * You should have received a copy of the GNU Lesser General Public
20  * License along with Libav; if not, write to the Free Software
21  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
22  */
23 
29 #include "internal.h"
30 #include "dsputil.h"
31 #include "avcodec.h"
32 #include "mpegvideo.h"
33 #include "h263.h"
34 #include "vc1.h"
35 #include "vc1data.h"
36 #include "vc1acdata.h"
37 #include "msmpeg4data.h"
38 #include "unary.h"
39 #include "simple_idct.h"
40 #include "mathops.h"
41 #include "vdpau_internal.h"
42 
43 #undef NDEBUG
44 #include <assert.h>
45 
46 #define MB_INTRA_VLC_BITS 9
47 #define DC_VLC_BITS 9
48 
49 
50 // offset tables for interlaced picture MVDATA decoding
51 static const int offset_table1[9] = { 0, 1, 2, 4, 8, 16, 32, 64, 128 };
52 static const int offset_table2[9] = { 0, 1, 3, 7, 15, 31, 63, 127, 255 };
53 
54 /***********************************************************************/
65 enum Imode {
73 }; //imode defines
75 
76  //Bitplane group
78 
80 {
81  MpegEncContext *s = &v->s;
82  int topleft_mb_pos, top_mb_pos;
83  int stride_y, fieldtx;
84  int v_dist;
85 
86  /* The put pixels loop is always one MB row behind the decoding loop,
87  * because we can only put pixels when overlap filtering is done, and
88  * for filtering of the bottom edge of a MB, we need the next MB row
89  * present as well.
90  * Within the row, the put pixels loop is also one MB col behind the
91  * decoding loop. The reason for this is again, because for filtering
92  * of the right MB edge, we need the next MB present. */
93  if (!s->first_slice_line) {
94  if (s->mb_x) {
95  topleft_mb_pos = (s->mb_y - 1) * s->mb_stride + s->mb_x - 1;
96  fieldtx = v->fieldtx_plane[topleft_mb_pos];
97  stride_y = s->linesize << fieldtx;
98  v_dist = (16 - fieldtx) >> (fieldtx == 0);
100  s->dest[0] - 16 * s->linesize - 16,
101  stride_y);
103  s->dest[0] - 16 * s->linesize - 8,
104  stride_y);
106  s->dest[0] - v_dist * s->linesize - 16,
107  stride_y);
109  s->dest[0] - v_dist * s->linesize - 8,
110  stride_y);
112  s->dest[1] - 8 * s->uvlinesize - 8,
113  s->uvlinesize);
115  s->dest[2] - 8 * s->uvlinesize - 8,
116  s->uvlinesize);
117  }
118  if (s->mb_x == s->mb_width - 1) {
119  top_mb_pos = (s->mb_y - 1) * s->mb_stride + s->mb_x;
120  fieldtx = v->fieldtx_plane[top_mb_pos];
121  stride_y = s->linesize << fieldtx;
122  v_dist = fieldtx ? 15 : 8;
124  s->dest[0] - 16 * s->linesize,
125  stride_y);
127  s->dest[0] - 16 * s->linesize + 8,
128  stride_y);
130  s->dest[0] - v_dist * s->linesize,
131  stride_y);
133  s->dest[0] - v_dist * s->linesize + 8,
134  stride_y);
136  s->dest[1] - 8 * s->uvlinesize,
137  s->uvlinesize);
139  s->dest[2] - 8 * s->uvlinesize,
140  s->uvlinesize);
141  }
142  }
143 
144 #define inc_blk_idx(idx) do { \
145  idx++; \
146  if (idx >= v->n_allocated_blks) \
147  idx = 0; \
148  } while (0)
149 
154 }
155 
156 static void vc1_loop_filter_iblk(VC1Context *v, int pq)
157 {
158  MpegEncContext *s = &v->s;
159  int j;
160  if (!s->first_slice_line) {
161  v->vc1dsp.vc1_v_loop_filter16(s->dest[0], s->linesize, pq);
162  if (s->mb_x)
163  v->vc1dsp.vc1_h_loop_filter16(s->dest[0] - 16 * s->linesize, s->linesize, pq);
164  v->vc1dsp.vc1_h_loop_filter16(s->dest[0] - 16 * s->linesize + 8, s->linesize, pq);
165  for (j = 0; j < 2; j++) {
166  v->vc1dsp.vc1_v_loop_filter8(s->dest[j + 1], s->uvlinesize, pq);
167  if (s->mb_x)
168  v->vc1dsp.vc1_h_loop_filter8(s->dest[j + 1] - 8 * s->uvlinesize, s->uvlinesize, pq);
169  }
170  }
171  v->vc1dsp.vc1_v_loop_filter16(s->dest[0] + 8 * s->linesize, s->linesize, pq);
172 
173  if (s->mb_y == s->end_mb_y - 1) {
174  if (s->mb_x) {
175  v->vc1dsp.vc1_h_loop_filter16(s->dest[0], s->linesize, pq);
176  v->vc1dsp.vc1_h_loop_filter8(s->dest[1], s->uvlinesize, pq);
177  v->vc1dsp.vc1_h_loop_filter8(s->dest[2], s->uvlinesize, pq);
178  }
179  v->vc1dsp.vc1_h_loop_filter16(s->dest[0] + 8, s->linesize, pq);
180  }
181 }
182 
184 {
185  MpegEncContext *s = &v->s;
186  int j;
187 
188  /* The loopfilter runs 1 row and 1 column behind the overlap filter, which
189  * means it runs two rows/cols behind the decoding loop. */
190  if (!s->first_slice_line) {
191  if (s->mb_x) {
192  if (s->mb_y >= s->start_mb_y + 2) {
193  v->vc1dsp.vc1_v_loop_filter16(s->dest[0] - 16 * s->linesize - 16, s->linesize, pq);
194 
195  if (s->mb_x >= 2)
196  v->vc1dsp.vc1_h_loop_filter16(s->dest[0] - 32 * s->linesize - 16, s->linesize, pq);
197  v->vc1dsp.vc1_h_loop_filter16(s->dest[0] - 32 * s->linesize - 8, s->linesize, pq);
198  for (j = 0; j < 2; j++) {
199  v->vc1dsp.vc1_v_loop_filter8(s->dest[j + 1] - 8 * s->uvlinesize - 8, s->uvlinesize, pq);
200  if (s->mb_x >= 2) {
201  v->vc1dsp.vc1_h_loop_filter8(s->dest[j + 1] - 16 * s->uvlinesize - 8, s->uvlinesize, pq);
202  }
203  }
204  }
205  v->vc1dsp.vc1_v_loop_filter16(s->dest[0] - 8 * s->linesize - 16, s->linesize, pq);
206  }
207 
208  if (s->mb_x == s->mb_width - 1) {
209  if (s->mb_y >= s->start_mb_y + 2) {
210  v->vc1dsp.vc1_v_loop_filter16(s->dest[0] - 16 * s->linesize, s->linesize, pq);
211 
212  if (s->mb_x)
213  v->vc1dsp.vc1_h_loop_filter16(s->dest[0] - 32 * s->linesize, s->linesize, pq);
214  v->vc1dsp.vc1_h_loop_filter16(s->dest[0] - 32 * s->linesize + 8, s->linesize, pq);
215  for (j = 0; j < 2; j++) {
216  v->vc1dsp.vc1_v_loop_filter8(s->dest[j + 1] - 8 * s->uvlinesize, s->uvlinesize, pq);
217  if (s->mb_x >= 2) {
218  v->vc1dsp.vc1_h_loop_filter8(s->dest[j + 1] - 16 * s->uvlinesize, s->uvlinesize, pq);
219  }
220  }
221  }
222  v->vc1dsp.vc1_v_loop_filter16(s->dest[0] - 8 * s->linesize, s->linesize, pq);
223  }
224 
225  if (s->mb_y == s->end_mb_y) {
226  if (s->mb_x) {
227  if (s->mb_x >= 2)
228  v->vc1dsp.vc1_h_loop_filter16(s->dest[0] - 16 * s->linesize - 16, s->linesize, pq);
229  v->vc1dsp.vc1_h_loop_filter16(s->dest[0] - 16 * s->linesize - 8, s->linesize, pq);
230  if (s->mb_x >= 2) {
231  for (j = 0; j < 2; j++) {
232  v->vc1dsp.vc1_h_loop_filter8(s->dest[j + 1] - 8 * s->uvlinesize - 8, s->uvlinesize, pq);
233  }
234  }
235  }
236 
237  if (s->mb_x == s->mb_width - 1) {
238  if (s->mb_x)
239  v->vc1dsp.vc1_h_loop_filter16(s->dest[0] - 16 * s->linesize, s->linesize, pq);
240  v->vc1dsp.vc1_h_loop_filter16(s->dest[0] - 16 * s->linesize + 8, s->linesize, pq);
241  if (s->mb_x) {
242  for (j = 0; j < 2; j++) {
243  v->vc1dsp.vc1_h_loop_filter8(s->dest[j + 1] - 8 * s->uvlinesize, s->uvlinesize, pq);
244  }
245  }
246  }
247  }
248  }
249 }
250 
252 {
253  MpegEncContext *s = &v->s;
254  int mb_pos;
255 
256  if (v->condover == CONDOVER_NONE)
257  return;
258 
259  mb_pos = s->mb_x + s->mb_y * s->mb_stride;
260 
261  /* Within a MB, the horizontal overlap always runs before the vertical.
262  * To accomplish that, we run the H on left and internal borders of the
263  * currently decoded MB. Then, we wait for the next overlap iteration
264  * to do H overlap on the right edge of this MB, before moving over and
265  * running the V overlap. Therefore, the V overlap makes us trail by one
266  * MB col and the H overlap filter makes us trail by one MB row. This
267  * is reflected in the time at which we run the put_pixels loop. */
268  if (v->condover == CONDOVER_ALL || v->pq >= 9 || v->over_flags_plane[mb_pos]) {
269  if (s->mb_x && (v->condover == CONDOVER_ALL || v->pq >= 9 ||
270  v->over_flags_plane[mb_pos - 1])) {
272  v->block[v->cur_blk_idx][0]);
274  v->block[v->cur_blk_idx][2]);
275  if (!(s->flags & CODEC_FLAG_GRAY)) {
277  v->block[v->cur_blk_idx][4]);
279  v->block[v->cur_blk_idx][5]);
280  }
281  }
283  v->block[v->cur_blk_idx][1]);
285  v->block[v->cur_blk_idx][3]);
286 
287  if (s->mb_x == s->mb_width - 1) {
288  if (!s->first_slice_line && (v->condover == CONDOVER_ALL || v->pq >= 9 ||
289  v->over_flags_plane[mb_pos - s->mb_stride])) {
291  v->block[v->cur_blk_idx][0]);
293  v->block[v->cur_blk_idx][1]);
294  if (!(s->flags & CODEC_FLAG_GRAY)) {
296  v->block[v->cur_blk_idx][4]);
298  v->block[v->cur_blk_idx][5]);
299  }
300  }
302  v->block[v->cur_blk_idx][2]);
304  v->block[v->cur_blk_idx][3]);
305  }
306  }
307  if (s->mb_x && (v->condover == CONDOVER_ALL || v->over_flags_plane[mb_pos - 1])) {
308  if (!s->first_slice_line && (v->condover == CONDOVER_ALL || v->pq >= 9 ||
309  v->over_flags_plane[mb_pos - s->mb_stride - 1])) {
311  v->block[v->left_blk_idx][0]);
313  v->block[v->left_blk_idx][1]);
314  if (!(s->flags & CODEC_FLAG_GRAY)) {
316  v->block[v->left_blk_idx][4]);
318  v->block[v->left_blk_idx][5]);
319  }
320  }
322  v->block[v->left_blk_idx][2]);
324  v->block[v->left_blk_idx][3]);
325  }
326 }
327 
331 static void vc1_mc_1mv(VC1Context *v, int dir)
332 {
333  MpegEncContext *s = &v->s;
334  DSPContext *dsp = &v->s.dsp;
335  uint8_t *srcY, *srcU, *srcV;
336  int dxy, mx, my, uvmx, uvmy, src_x, src_y, uvsrc_x, uvsrc_y;
337  int off, off_uv;
338  int v_edge_pos = s->v_edge_pos >> v->field_mode;
339 
340  if ((!v->field_mode ||
341  (v->ref_field_type[dir] == 1 && v->cur_field_type == 1)) &&
342  !v->s.last_picture.f.data[0])
343  return;
344 
345  mx = s->mv[dir][0][0];
346  my = s->mv[dir][0][1];
347 
348  // store motion vectors for further use in B frames
349  if (s->pict_type == AV_PICTURE_TYPE_P) {
350  s->current_picture.f.motion_val[1][s->block_index[0] + v->blocks_off][0] = mx;
351  s->current_picture.f.motion_val[1][s->block_index[0] + v->blocks_off][1] = my;
352  }
353 
354  uvmx = (mx + ((mx & 3) == 3)) >> 1;
355  uvmy = (my + ((my & 3) == 3)) >> 1;
356  v->luma_mv[s->mb_x][0] = uvmx;
357  v->luma_mv[s->mb_x][1] = uvmy;
358 
359  if (v->field_mode &&
360  v->cur_field_type != v->ref_field_type[dir]) {
361  my = my - 2 + 4 * v->cur_field_type;
362  uvmy = uvmy - 2 + 4 * v->cur_field_type;
363  }
364 
365  // fastuvmc shall be ignored for interlaced frame picture
366  if (v->fastuvmc && (v->fcm != ILACE_FRAME)) {
367  uvmx = uvmx + ((uvmx < 0) ? (uvmx & 1) : -(uvmx & 1));
368  uvmy = uvmy + ((uvmy < 0) ? (uvmy & 1) : -(uvmy & 1));
369  }
370  if (v->field_mode) { // interlaced field picture
371  if (!dir) {
372  if ((v->cur_field_type != v->ref_field_type[dir]) && v->cur_field_type) {
373  srcY = s->current_picture.f.data[0];
374  srcU = s->current_picture.f.data[1];
375  srcV = s->current_picture.f.data[2];
376  } else {
377  srcY = s->last_picture.f.data[0];
378  srcU = s->last_picture.f.data[1];
379  srcV = s->last_picture.f.data[2];
380  }
381  } else {
382  srcY = s->next_picture.f.data[0];
383  srcU = s->next_picture.f.data[1];
384  srcV = s->next_picture.f.data[2];
385  }
386  } else {
387  if (!dir) {
388  srcY = s->last_picture.f.data[0];
389  srcU = s->last_picture.f.data[1];
390  srcV = s->last_picture.f.data[2];
391  } else {
392  srcY = s->next_picture.f.data[0];
393  srcU = s->next_picture.f.data[1];
394  srcV = s->next_picture.f.data[2];
395  }
396  }
397 
398  src_x = s->mb_x * 16 + (mx >> 2);
399  src_y = s->mb_y * 16 + (my >> 2);
400  uvsrc_x = s->mb_x * 8 + (uvmx >> 2);
401  uvsrc_y = s->mb_y * 8 + (uvmy >> 2);
402 
403  if (v->profile != PROFILE_ADVANCED) {
404  src_x = av_clip( src_x, -16, s->mb_width * 16);
405  src_y = av_clip( src_y, -16, s->mb_height * 16);
406  uvsrc_x = av_clip(uvsrc_x, -8, s->mb_width * 8);
407  uvsrc_y = av_clip(uvsrc_y, -8, s->mb_height * 8);
408  } else {
409  src_x = av_clip( src_x, -17, s->avctx->coded_width);
410  src_y = av_clip( src_y, -18, s->avctx->coded_height + 1);
411  uvsrc_x = av_clip(uvsrc_x, -8, s->avctx->coded_width >> 1);
412  uvsrc_y = av_clip(uvsrc_y, -8, s->avctx->coded_height >> 1);
413  }
414 
415  srcY += src_y * s->linesize + src_x;
416  srcU += uvsrc_y * s->uvlinesize + uvsrc_x;
417  srcV += uvsrc_y * s->uvlinesize + uvsrc_x;
418 
419  if (v->field_mode && v->ref_field_type[dir]) {
420  srcY += s->current_picture_ptr->f.linesize[0];
421  srcU += s->current_picture_ptr->f.linesize[1];
422  srcV += s->current_picture_ptr->f.linesize[2];
423  }
424 
425  /* for grayscale we should not try to read from unknown area */
426  if (s->flags & CODEC_FLAG_GRAY) {
427  srcU = s->edge_emu_buffer + 18 * s->linesize;
428  srcV = s->edge_emu_buffer + 18 * s->linesize;
429  }
430 
432  || s->h_edge_pos < 22 || v_edge_pos < 22
433  || (unsigned)(src_x - s->mspel) > s->h_edge_pos - (mx&3) - 16 - s->mspel * 3
434  || (unsigned)(src_y - s->mspel) > v_edge_pos - (my&3) - 16 - s->mspel * 3) {
435  uint8_t *uvbuf = s->edge_emu_buffer + 19 * s->linesize;
436 
437  srcY -= s->mspel * (1 + s->linesize);
439  17 + s->mspel * 2, 17 + s->mspel * 2,
440  src_x - s->mspel, src_y - s->mspel,
441  s->h_edge_pos, v_edge_pos);
442  srcY = s->edge_emu_buffer;
443  s->dsp.emulated_edge_mc(uvbuf , srcU, s->uvlinesize, 8 + 1, 8 + 1,
444  uvsrc_x, uvsrc_y, s->h_edge_pos >> 1, v_edge_pos >> 1);
445  s->dsp.emulated_edge_mc(uvbuf + 16, srcV, s->uvlinesize, 8 + 1, 8 + 1,
446  uvsrc_x, uvsrc_y, s->h_edge_pos >> 1, v_edge_pos >> 1);
447  srcU = uvbuf;
448  srcV = uvbuf + 16;
449  /* if we deal with range reduction we need to scale source blocks */
450  if (v->rangeredfrm) {
451  int i, j;
452  uint8_t *src, *src2;
453 
454  src = srcY;
455  for (j = 0; j < 17 + s->mspel * 2; j++) {
456  for (i = 0; i < 17 + s->mspel * 2; i++)
457  src[i] = ((src[i] - 128) >> 1) + 128;
458  src += s->linesize;
459  }
460  src = srcU;
461  src2 = srcV;
462  for (j = 0; j < 9; j++) {
463  for (i = 0; i < 9; i++) {
464  src[i] = ((src[i] - 128) >> 1) + 128;
465  src2[i] = ((src2[i] - 128) >> 1) + 128;
466  }
467  src += s->uvlinesize;
468  src2 += s->uvlinesize;
469  }
470  }
471  /* if we deal with intensity compensation we need to scale source blocks */
472  if (v->mv_mode == MV_PMODE_INTENSITY_COMP) {
473  int i, j;
474  uint8_t *src, *src2;
475 
476  src = srcY;
477  for (j = 0; j < 17 + s->mspel * 2; j++) {
478  for (i = 0; i < 17 + s->mspel * 2; i++)
479  src[i] = v->luty[src[i]];
480  src += s->linesize;
481  }
482  src = srcU;
483  src2 = srcV;
484  for (j = 0; j < 9; j++) {
485  for (i = 0; i < 9; i++) {
486  src[i] = v->lutuv[src[i]];
487  src2[i] = v->lutuv[src2[i]];
488  }
489  src += s->uvlinesize;
490  src2 += s->uvlinesize;
491  }
492  }
493  srcY += s->mspel * (1 + s->linesize);
494  }
495 
496  if (v->field_mode && v->cur_field_type) {
497  off = s->current_picture_ptr->f.linesize[0];
498  off_uv = s->current_picture_ptr->f.linesize[1];
499  } else {
500  off = 0;
501  off_uv = 0;
502  }
503  if (s->mspel) {
504  dxy = ((my & 3) << 2) | (mx & 3);
505  v->vc1dsp.put_vc1_mspel_pixels_tab[dxy](s->dest[0] + off , srcY , s->linesize, v->rnd);
506  v->vc1dsp.put_vc1_mspel_pixels_tab[dxy](s->dest[0] + off + 8, srcY + 8, s->linesize, v->rnd);
507  srcY += s->linesize * 8;
508  v->vc1dsp.put_vc1_mspel_pixels_tab[dxy](s->dest[0] + off + 8 * s->linesize , srcY , s->linesize, v->rnd);
509  v->vc1dsp.put_vc1_mspel_pixels_tab[dxy](s->dest[0] + off + 8 * s->linesize + 8, srcY + 8, s->linesize, v->rnd);
510  } else { // hpel mc - always used for luma
511  dxy = (my & 2) | ((mx & 2) >> 1);
512  if (!v->rnd)
513  dsp->put_pixels_tab[0][dxy](s->dest[0] + off, srcY, s->linesize, 16);
514  else
515  dsp->put_no_rnd_pixels_tab[0][dxy](s->dest[0] + off, srcY, s->linesize, 16);
516  }
517 
518  if (s->flags & CODEC_FLAG_GRAY) return;
519  /* Chroma MC always uses qpel bilinear */
520  uvmx = (uvmx & 3) << 1;
521  uvmy = (uvmy & 3) << 1;
522  if (!v->rnd) {
523  dsp->put_h264_chroma_pixels_tab[0](s->dest[1] + off_uv, srcU, s->uvlinesize, 8, uvmx, uvmy);
524  dsp->put_h264_chroma_pixels_tab[0](s->dest[2] + off_uv, srcV, s->uvlinesize, 8, uvmx, uvmy);
525  } else {
526  v->vc1dsp.put_no_rnd_vc1_chroma_pixels_tab[0](s->dest[1] + off_uv, srcU, s->uvlinesize, 8, uvmx, uvmy);
527  v->vc1dsp.put_no_rnd_vc1_chroma_pixels_tab[0](s->dest[2] + off_uv, srcV, s->uvlinesize, 8, uvmx, uvmy);
528  }
529 }
530 
531 static inline int median4(int a, int b, int c, int d)
532 {
533  if (a < b) {
534  if (c < d) return (FFMIN(b, d) + FFMAX(a, c)) / 2;
535  else return (FFMIN(b, c) + FFMAX(a, d)) / 2;
536  } else {
537  if (c < d) return (FFMIN(a, d) + FFMAX(b, c)) / 2;
538  else return (FFMIN(a, c) + FFMAX(b, d)) / 2;
539  }
540 }
541 
544 static void vc1_mc_4mv_luma(VC1Context *v, int n, int dir)
545 {
546  MpegEncContext *s = &v->s;
547  DSPContext *dsp = &v->s.dsp;
548  uint8_t *srcY;
549  int dxy, mx, my, src_x, src_y;
550  int off;
551  int fieldmv = (v->fcm == ILACE_FRAME) ? v->blk_mv_type[s->block_index[n]] : 0;
552  int v_edge_pos = s->v_edge_pos >> v->field_mode;
553 
554  if ((!v->field_mode ||
555  (v->ref_field_type[dir] == 1 && v->cur_field_type == 1)) &&
556  !v->s.last_picture.f.data[0])
557  return;
558 
559  mx = s->mv[dir][n][0];
560  my = s->mv[dir][n][1];
561 
562  if (!dir) {
563  if (v->field_mode) {
564  if ((v->cur_field_type != v->ref_field_type[dir]) && v->cur_field_type)
565  srcY = s->current_picture.f.data[0];
566  else
567  srcY = s->last_picture.f.data[0];
568  } else
569  srcY = s->last_picture.f.data[0];
570  } else
571  srcY = s->next_picture.f.data[0];
572 
573  if (v->field_mode) {
574  if (v->cur_field_type != v->ref_field_type[dir])
575  my = my - 2 + 4 * v->cur_field_type;
576  }
577 
578  if (s->pict_type == AV_PICTURE_TYPE_P && n == 3 && v->field_mode) {
579  int same_count = 0, opp_count = 0, k;
580  int chosen_mv[2][4][2], f;
581  int tx, ty;
582  for (k = 0; k < 4; k++) {
583  f = v->mv_f[0][s->block_index[k] + v->blocks_off];
584  chosen_mv[f][f ? opp_count : same_count][0] = s->mv[0][k][0];
585  chosen_mv[f][f ? opp_count : same_count][1] = s->mv[0][k][1];
586  opp_count += f;
587  same_count += 1 - f;
588  }
589  f = opp_count > same_count;
590  switch (f ? opp_count : same_count) {
591  case 4:
592  tx = median4(chosen_mv[f][0][0], chosen_mv[f][1][0],
593  chosen_mv[f][2][0], chosen_mv[f][3][0]);
594  ty = median4(chosen_mv[f][0][1], chosen_mv[f][1][1],
595  chosen_mv[f][2][1], chosen_mv[f][3][1]);
596  break;
597  case 3:
598  tx = mid_pred(chosen_mv[f][0][0], chosen_mv[f][1][0], chosen_mv[f][2][0]);
599  ty = mid_pred(chosen_mv[f][0][1], chosen_mv[f][1][1], chosen_mv[f][2][1]);
600  break;
601  case 2:
602  tx = (chosen_mv[f][0][0] + chosen_mv[f][1][0]) / 2;
603  ty = (chosen_mv[f][0][1] + chosen_mv[f][1][1]) / 2;
604  break;
605  }
606  s->current_picture.f.motion_val[1][s->block_index[0] + v->blocks_off][0] = tx;
607  s->current_picture.f.motion_val[1][s->block_index[0] + v->blocks_off][1] = ty;
608  for (k = 0; k < 4; k++)
609  v->mv_f[1][s->block_index[k] + v->blocks_off] = f;
610  }
611 
612  if (v->fcm == ILACE_FRAME) { // not sure if needed for other types of picture
613  int qx, qy;
614  int width = s->avctx->coded_width;
615  int height = s->avctx->coded_height >> 1;
616  qx = (s->mb_x * 16) + (mx >> 2);
617  qy = (s->mb_y * 8) + (my >> 3);
618 
619  if (qx < -17)
620  mx -= 4 * (qx + 17);
621  else if (qx > width)
622  mx -= 4 * (qx - width);
623  if (qy < -18)
624  my -= 8 * (qy + 18);
625  else if (qy > height + 1)
626  my -= 8 * (qy - height - 1);
627  }
628 
629  if ((v->fcm == ILACE_FRAME) && fieldmv)
630  off = ((n > 1) ? s->linesize : 0) + (n & 1) * 8;
631  else
632  off = s->linesize * 4 * (n & 2) + (n & 1) * 8;
633  if (v->field_mode && v->cur_field_type)
634  off += s->current_picture_ptr->f.linesize[0];
635 
636  src_x = s->mb_x * 16 + (n & 1) * 8 + (mx >> 2);
637  if (!fieldmv)
638  src_y = s->mb_y * 16 + (n & 2) * 4 + (my >> 2);
639  else
640  src_y = s->mb_y * 16 + ((n > 1) ? 1 : 0) + (my >> 2);
641 
642  if (v->profile != PROFILE_ADVANCED) {
643  src_x = av_clip(src_x, -16, s->mb_width * 16);
644  src_y = av_clip(src_y, -16, s->mb_height * 16);
645  } else {
646  src_x = av_clip(src_x, -17, s->avctx->coded_width);
647  if (v->fcm == ILACE_FRAME) {
648  if (src_y & 1)
649  src_y = av_clip(src_y, -17, s->avctx->coded_height + 1);
650  else
651  src_y = av_clip(src_y, -18, s->avctx->coded_height);
652  } else {
653  src_y = av_clip(src_y, -18, s->avctx->coded_height + 1);
654  }
655  }
656 
657  srcY += src_y * s->linesize + src_x;
658  if (v->field_mode && v->ref_field_type[dir])
659  srcY += s->current_picture_ptr->f.linesize[0];
660 
661  if (fieldmv && !(src_y & 1))
662  v_edge_pos--;
663  if (fieldmv && (src_y & 1) && src_y < 4)
664  src_y--;
666  || s->h_edge_pos < 13 || v_edge_pos < 23
667  || (unsigned)(src_x - s->mspel) > s->h_edge_pos - (mx & 3) - 8 - s->mspel * 2
668  || (unsigned)(src_y - (s->mspel << fieldmv)) > v_edge_pos - (my & 3) - ((8 + s->mspel * 2) << fieldmv)) {
669  srcY -= s->mspel * (1 + (s->linesize << fieldmv));
670  /* check emulate edge stride and offset */
672  9 + s->mspel * 2, (9 + s->mspel * 2) << fieldmv,
673  src_x - s->mspel, src_y - (s->mspel << fieldmv),
674  s->h_edge_pos, v_edge_pos);
675  srcY = s->edge_emu_buffer;
676  /* if we deal with range reduction we need to scale source blocks */
677  if (v->rangeredfrm) {
678  int i, j;
679  uint8_t *src;
680 
681  src = srcY;
682  for (j = 0; j < 9 + s->mspel * 2; j++) {
683  for (i = 0; i < 9 + s->mspel * 2; i++)
684  src[i] = ((src[i] - 128) >> 1) + 128;
685  src += s->linesize << fieldmv;
686  }
687  }
688  /* if we deal with intensity compensation we need to scale source blocks */
689  if (v->mv_mode == MV_PMODE_INTENSITY_COMP) {
690  int i, j;
691  uint8_t *src;
692 
693  src = srcY;
694  for (j = 0; j < 9 + s->mspel * 2; j++) {
695  for (i = 0; i < 9 + s->mspel * 2; i++)
696  src[i] = v->luty[src[i]];
697  src += s->linesize << fieldmv;
698  }
699  }
700  srcY += s->mspel * (1 + (s->linesize << fieldmv));
701  }
702 
703  if (s->mspel) {
704  dxy = ((my & 3) << 2) | (mx & 3);
705  v->vc1dsp.put_vc1_mspel_pixels_tab[dxy](s->dest[0] + off, srcY, s->linesize << fieldmv, v->rnd);
706  } else { // hpel mc - always used for luma
707  dxy = (my & 2) | ((mx & 2) >> 1);
708  if (!v->rnd)
709  dsp->put_pixels_tab[1][dxy](s->dest[0] + off, srcY, s->linesize, 8);
710  else
711  dsp->put_no_rnd_pixels_tab[1][dxy](s->dest[0] + off, srcY, s->linesize, 8);
712  }
713 }
714 
715 static av_always_inline int get_chroma_mv(int *mvx, int *mvy, int *a, int flag, int *tx, int *ty)
716 {
717  int idx, i;
718  static const int count[16] = { 0, 1, 1, 2, 1, 2, 2, 3, 1, 2, 2, 3, 2, 3, 3, 4};
719 
720  idx = ((a[3] != flag) << 3)
721  | ((a[2] != flag) << 2)
722  | ((a[1] != flag) << 1)
723  | (a[0] != flag);
724  if (!idx) {
725  *tx = median4(mvx[0], mvx[1], mvx[2], mvx[3]);
726  *ty = median4(mvy[0], mvy[1], mvy[2], mvy[3]);
727  return 4;
728  } else if (count[idx] == 1) {
729  switch (idx) {
730  case 0x1:
731  *tx = mid_pred(mvx[1], mvx[2], mvx[3]);
732  *ty = mid_pred(mvy[1], mvy[2], mvy[3]);
733  return 3;
734  case 0x2:
735  *tx = mid_pred(mvx[0], mvx[2], mvx[3]);
736  *ty = mid_pred(mvy[0], mvy[2], mvy[3]);
737  return 3;
738  case 0x4:
739  *tx = mid_pred(mvx[0], mvx[1], mvx[3]);
740  *ty = mid_pred(mvy[0], mvy[1], mvy[3]);
741  return 3;
742  case 0x8:
743  *tx = mid_pred(mvx[0], mvx[1], mvx[2]);
744  *ty = mid_pred(mvy[0], mvy[1], mvy[2]);
745  return 3;
746  }
747  } else if (count[idx] == 2) {
748  int t1 = 0, t2 = 0;
749  for (i = 0; i < 3; i++)
750  if (!a[i]) {
751  t1 = i;
752  break;
753  }
754  for (i = t1 + 1; i < 4; i++)
755  if (!a[i]) {
756  t2 = i;
757  break;
758  }
759  *tx = (mvx[t1] + mvx[t2]) / 2;
760  *ty = (mvy[t1] + mvy[t2]) / 2;
761  return 2;
762  } else {
763  return 0;
764  }
765  return -1;
766 }
767 
770 static void vc1_mc_4mv_chroma(VC1Context *v, int dir)
771 {
772  MpegEncContext *s = &v->s;
773  DSPContext *dsp = &v->s.dsp;
774  uint8_t *srcU, *srcV;
775  int uvmx, uvmy, uvsrc_x, uvsrc_y;
776  int k, tx = 0, ty = 0;
777  int mvx[4], mvy[4], intra[4], mv_f[4];
778  int valid_count;
779  int chroma_ref_type = v->cur_field_type, off = 0;
780  int v_edge_pos = s->v_edge_pos >> v->field_mode;
781 
782  if (!v->field_mode && !v->s.last_picture.f.data[0])
783  return;
784  if (s->flags & CODEC_FLAG_GRAY)
785  return;
786 
787  for (k = 0; k < 4; k++) {
788  mvx[k] = s->mv[dir][k][0];
789  mvy[k] = s->mv[dir][k][1];
790  intra[k] = v->mb_type[0][s->block_index[k]];
791  if (v->field_mode)
792  mv_f[k] = v->mv_f[dir][s->block_index[k] + v->blocks_off];
793  }
794 
795  /* calculate chroma MV vector from four luma MVs */
796  if (!v->field_mode || (v->field_mode && !v->numref)) {
797  valid_count = get_chroma_mv(mvx, mvy, intra, 0, &tx, &ty);
798  if (!valid_count) {
799  s->current_picture.f.motion_val[1][s->block_index[0] + v->blocks_off][0] = 0;
800  s->current_picture.f.motion_val[1][s->block_index[0] + v->blocks_off][1] = 0;
801  v->luma_mv[s->mb_x][0] = v->luma_mv[s->mb_x][1] = 0;
802  return; //no need to do MC for intra blocks
803  }
804  } else {
805  int dominant = 0;
806  if (mv_f[0] + mv_f[1] + mv_f[2] + mv_f[3] > 2)
807  dominant = 1;
808  valid_count = get_chroma_mv(mvx, mvy, mv_f, dominant, &tx, &ty);
809  if (dominant)
810  chroma_ref_type = !v->cur_field_type;
811  }
812  if (v->field_mode && chroma_ref_type == 1 && v->cur_field_type == 1 && !v->s.last_picture.f.data[0])
813  return;
814  s->current_picture.f.motion_val[1][s->block_index[0] + v->blocks_off][0] = tx;
815  s->current_picture.f.motion_val[1][s->block_index[0] + v->blocks_off][1] = ty;
816  uvmx = (tx + ((tx & 3) == 3)) >> 1;
817  uvmy = (ty + ((ty & 3) == 3)) >> 1;
818 
819  v->luma_mv[s->mb_x][0] = uvmx;
820  v->luma_mv[s->mb_x][1] = uvmy;
821 
822  if (v->fastuvmc) {
823  uvmx = uvmx + ((uvmx < 0) ? (uvmx & 1) : -(uvmx & 1));
824  uvmy = uvmy + ((uvmy < 0) ? (uvmy & 1) : -(uvmy & 1));
825  }
826  // Field conversion bias
827  if (v->cur_field_type != chroma_ref_type)
828  uvmy += 2 - 4 * chroma_ref_type;
829 
830  uvsrc_x = s->mb_x * 8 + (uvmx >> 2);
831  uvsrc_y = s->mb_y * 8 + (uvmy >> 2);
832 
833  if (v->profile != PROFILE_ADVANCED) {
834  uvsrc_x = av_clip(uvsrc_x, -8, s->mb_width * 8);
835  uvsrc_y = av_clip(uvsrc_y, -8, s->mb_height * 8);
836  } else {
837  uvsrc_x = av_clip(uvsrc_x, -8, s->avctx->coded_width >> 1);
838  uvsrc_y = av_clip(uvsrc_y, -8, s->avctx->coded_height >> 1);
839  }
840 
841  if (!dir) {
842  if (v->field_mode) {
843  if ((v->cur_field_type != chroma_ref_type) && v->cur_field_type) {
844  srcU = s->current_picture.f.data[1] + uvsrc_y * s->uvlinesize + uvsrc_x;
845  srcV = s->current_picture.f.data[2] + uvsrc_y * s->uvlinesize + uvsrc_x;
846  } else {
847  srcU = s->last_picture.f.data[1] + uvsrc_y * s->uvlinesize + uvsrc_x;
848  srcV = s->last_picture.f.data[2] + uvsrc_y * s->uvlinesize + uvsrc_x;
849  }
850  } else {
851  srcU = s->last_picture.f.data[1] + uvsrc_y * s->uvlinesize + uvsrc_x;
852  srcV = s->last_picture.f.data[2] + uvsrc_y * s->uvlinesize + uvsrc_x;
853  }
854  } else {
855  srcU = s->next_picture.f.data[1] + uvsrc_y * s->uvlinesize + uvsrc_x;
856  srcV = s->next_picture.f.data[2] + uvsrc_y * s->uvlinesize + uvsrc_x;
857  }
858 
859  if (v->field_mode) {
860  if (chroma_ref_type) {
861  srcU += s->current_picture_ptr->f.linesize[1];
862  srcV += s->current_picture_ptr->f.linesize[2];
863  }
864  off = v->cur_field_type ? s->current_picture_ptr->f.linesize[1] : 0;
865  }
866 
868  || s->h_edge_pos < 18 || v_edge_pos < 18
869  || (unsigned)uvsrc_x > (s->h_edge_pos >> 1) - 9
870  || (unsigned)uvsrc_y > (v_edge_pos >> 1) - 9) {
872  8 + 1, 8 + 1, uvsrc_x, uvsrc_y,
873  s->h_edge_pos >> 1, v_edge_pos >> 1);
874  s->dsp.emulated_edge_mc(s->edge_emu_buffer + 16, srcV, s->uvlinesize,
875  8 + 1, 8 + 1, uvsrc_x, uvsrc_y,
876  s->h_edge_pos >> 1, v_edge_pos >> 1);
877  srcU = s->edge_emu_buffer;
878  srcV = s->edge_emu_buffer + 16;
879 
880  /* if we deal with range reduction we need to scale source blocks */
881  if (v->rangeredfrm) {
882  int i, j;
883  uint8_t *src, *src2;
884 
885  src = srcU;
886  src2 = srcV;
887  for (j = 0; j < 9; j++) {
888  for (i = 0; i < 9; i++) {
889  src[i] = ((src[i] - 128) >> 1) + 128;
890  src2[i] = ((src2[i] - 128) >> 1) + 128;
891  }
892  src += s->uvlinesize;
893  src2 += s->uvlinesize;
894  }
895  }
896  /* if we deal with intensity compensation we need to scale source blocks */
897  if (v->mv_mode == MV_PMODE_INTENSITY_COMP) {
898  int i, j;
899  uint8_t *src, *src2;
900 
901  src = srcU;
902  src2 = srcV;
903  for (j = 0; j < 9; j++) {
904  for (i = 0; i < 9; i++) {
905  src[i] = v->lutuv[src[i]];
906  src2[i] = v->lutuv[src2[i]];
907  }
908  src += s->uvlinesize;
909  src2 += s->uvlinesize;
910  }
911  }
912  }
913 
914  /* Chroma MC always uses qpel bilinear */
915  uvmx = (uvmx & 3) << 1;
916  uvmy = (uvmy & 3) << 1;
917  if (!v->rnd) {
918  dsp->put_h264_chroma_pixels_tab[0](s->dest[1] + off, srcU, s->uvlinesize, 8, uvmx, uvmy);
919  dsp->put_h264_chroma_pixels_tab[0](s->dest[2] + off, srcV, s->uvlinesize, 8, uvmx, uvmy);
920  } else {
921  v->vc1dsp.put_no_rnd_vc1_chroma_pixels_tab[0](s->dest[1] + off, srcU, s->uvlinesize, 8, uvmx, uvmy);
922  v->vc1dsp.put_no_rnd_vc1_chroma_pixels_tab[0](s->dest[2] + off, srcV, s->uvlinesize, 8, uvmx, uvmy);
923  }
924 }
925 
929 {
930  MpegEncContext *s = &v->s;
931  DSPContext *dsp = &v->s.dsp;
932  uint8_t *srcU, *srcV;
933  int uvsrc_x, uvsrc_y;
934  int uvmx_field[4], uvmy_field[4];
935  int i, off, tx, ty;
936  int fieldmv = v->blk_mv_type[s->block_index[0]];
937  static const int s_rndtblfield[16] = { 0, 0, 1, 2, 4, 4, 5, 6, 2, 2, 3, 8, 6, 6, 7, 12 };
938  int v_dist = fieldmv ? 1 : 4; // vertical offset for lower sub-blocks
939  int v_edge_pos = s->v_edge_pos >> 1;
940 
941  if (!v->s.last_picture.f.data[0])
942  return;
943  if (s->flags & CODEC_FLAG_GRAY)
944  return;
945 
946  for (i = 0; i < 4; i++) {
947  tx = s->mv[0][i][0];
948  uvmx_field[i] = (tx + ((tx & 3) == 3)) >> 1;
949  ty = s->mv[0][i][1];
950  if (fieldmv)
951  uvmy_field[i] = (ty >> 4) * 8 + s_rndtblfield[ty & 0xF];
952  else
953  uvmy_field[i] = (ty + ((ty & 3) == 3)) >> 1;
954  }
955 
956  for (i = 0; i < 4; i++) {
957  off = (i & 1) * 4 + ((i & 2) ? v_dist * s->uvlinesize : 0);
958  uvsrc_x = s->mb_x * 8 + (i & 1) * 4 + (uvmx_field[i] >> 2);
959  uvsrc_y = s->mb_y * 8 + ((i & 2) ? v_dist : 0) + (uvmy_field[i] >> 2);
960  // FIXME: implement proper pull-back (see vc1cropmv.c, vc1CROPMV_ChromaPullBack())
961  uvsrc_x = av_clip(uvsrc_x, -8, s->avctx->coded_width >> 1);
962  uvsrc_y = av_clip(uvsrc_y, -8, s->avctx->coded_height >> 1);
963  srcU = s->last_picture.f.data[1] + uvsrc_y * s->uvlinesize + uvsrc_x;
964  srcV = s->last_picture.f.data[2] + uvsrc_y * s->uvlinesize + uvsrc_x;
965  uvmx_field[i] = (uvmx_field[i] & 3) << 1;
966  uvmy_field[i] = (uvmy_field[i] & 3) << 1;
967 
968  if (fieldmv && !(uvsrc_y & 1))
969  v_edge_pos--;
970  if (fieldmv && (uvsrc_y & 1) && uvsrc_y < 2)
971  uvsrc_y--;
972  if ((v->mv_mode == MV_PMODE_INTENSITY_COMP)
973  || s->h_edge_pos < 10 || v_edge_pos < (5 << fieldmv)
974  || (unsigned)uvsrc_x > (s->h_edge_pos >> 1) - 5
975  || (unsigned)uvsrc_y > v_edge_pos - (5 << fieldmv)) {
977  5, (5 << fieldmv), uvsrc_x, uvsrc_y,
978  s->h_edge_pos >> 1, v_edge_pos);
979  s->dsp.emulated_edge_mc(s->edge_emu_buffer + 16, srcV, s->uvlinesize,
980  5, (5 << fieldmv), uvsrc_x, uvsrc_y,
981  s->h_edge_pos >> 1, v_edge_pos);
982  srcU = s->edge_emu_buffer;
983  srcV = s->edge_emu_buffer + 16;
984 
985  /* if we deal with intensity compensation we need to scale source blocks */
986  if (v->mv_mode == MV_PMODE_INTENSITY_COMP) {
987  int i, j;
988  uint8_t *src, *src2;
989 
990  src = srcU;
991  src2 = srcV;
992  for (j = 0; j < 5; j++) {
993  for (i = 0; i < 5; i++) {
994  src[i] = v->lutuv[src[i]];
995  src2[i] = v->lutuv[src2[i]];
996  }
997  src += s->uvlinesize << 1;
998  src2 += s->uvlinesize << 1;
999  }
1000  }
1001  }
1002  if (!v->rnd) {
1003  dsp->put_h264_chroma_pixels_tab[1](s->dest[1] + off, srcU, s->uvlinesize << fieldmv, 4, uvmx_field[i], uvmy_field[i]);
1004  dsp->put_h264_chroma_pixels_tab[1](s->dest[2] + off, srcV, s->uvlinesize << fieldmv, 4, uvmx_field[i], uvmy_field[i]);
1005  } else {
1006  v->vc1dsp.put_no_rnd_vc1_chroma_pixels_tab[1](s->dest[1] + off, srcU, s->uvlinesize << fieldmv, 4, uvmx_field[i], uvmy_field[i]);
1007  v->vc1dsp.put_no_rnd_vc1_chroma_pixels_tab[1](s->dest[2] + off, srcV, s->uvlinesize << fieldmv, 4, uvmx_field[i], uvmy_field[i]);
1008  }
1009  }
1010 }
1011 
1012 /***********************************************************************/
1023 #define GET_MQUANT() \
1024  if (v->dquantfrm) { \
1025  int edges = 0; \
1026  if (v->dqprofile == DQPROFILE_ALL_MBS) { \
1027  if (v->dqbilevel) { \
1028  mquant = (get_bits1(gb)) ? v->altpq : v->pq; \
1029  } else { \
1030  mqdiff = get_bits(gb, 3); \
1031  if (mqdiff != 7) \
1032  mquant = v->pq + mqdiff; \
1033  else \
1034  mquant = get_bits(gb, 5); \
1035  } \
1036  } \
1037  if (v->dqprofile == DQPROFILE_SINGLE_EDGE) \
1038  edges = 1 << v->dqsbedge; \
1039  else if (v->dqprofile == DQPROFILE_DOUBLE_EDGES) \
1040  edges = (3 << v->dqsbedge) % 15; \
1041  else if (v->dqprofile == DQPROFILE_FOUR_EDGES) \
1042  edges = 15; \
1043  if ((edges&1) && !s->mb_x) \
1044  mquant = v->altpq; \
1045  if ((edges&2) && s->first_slice_line) \
1046  mquant = v->altpq; \
1047  if ((edges&4) && s->mb_x == (s->mb_width - 1)) \
1048  mquant = v->altpq; \
1049  if ((edges&8) && s->mb_y == (s->mb_height - 1)) \
1050  mquant = v->altpq; \
1051  }
1052 
1060 #define GET_MVDATA(_dmv_x, _dmv_y) \
1061  index = 1 + get_vlc2(gb, ff_vc1_mv_diff_vlc[s->mv_table_index].table, \
1062  VC1_MV_DIFF_VLC_BITS, 2); \
1063  if (index > 36) { \
1064  mb_has_coeffs = 1; \
1065  index -= 37; \
1066  } else \
1067  mb_has_coeffs = 0; \
1068  s->mb_intra = 0; \
1069  if (!index) { \
1070  _dmv_x = _dmv_y = 0; \
1071  } else if (index == 35) { \
1072  _dmv_x = get_bits(gb, v->k_x - 1 + s->quarter_sample); \
1073  _dmv_y = get_bits(gb, v->k_y - 1 + s->quarter_sample); \
1074  } else if (index == 36) { \
1075  _dmv_x = 0; \
1076  _dmv_y = 0; \
1077  s->mb_intra = 1; \
1078  } else { \
1079  index1 = index % 6; \
1080  if (!s->quarter_sample && index1 == 5) val = 1; \
1081  else val = 0; \
1082  if (size_table[index1] - val > 0) \
1083  val = get_bits(gb, size_table[index1] - val); \
1084  else val = 0; \
1085  sign = 0 - (val&1); \
1086  _dmv_x = (sign ^ ((val>>1) + offset_table[index1])) - sign; \
1087  \
1088  index1 = index / 6; \
1089  if (!s->quarter_sample && index1 == 5) val = 1; \
1090  else val = 0; \
1091  if (size_table[index1] - val > 0) \
1092  val = get_bits(gb, size_table[index1] - val); \
1093  else val = 0; \
1094  sign = 0 - (val & 1); \
1095  _dmv_y = (sign ^ ((val >> 1) + offset_table[index1])) - sign; \
1096  }
1097 
1099  int *dmv_y, int *pred_flag)
1100 {
1101  int index, index1;
1102  int extend_x = 0, extend_y = 0;
1103  GetBitContext *gb = &v->s.gb;
1104  int bits, esc;
1105  int val, sign;
1106  const int* offs_tab;
1107 
1108  if (v->numref) {
1109  bits = VC1_2REF_MVDATA_VLC_BITS;
1110  esc = 125;
1111  } else {
1112  bits = VC1_1REF_MVDATA_VLC_BITS;
1113  esc = 71;
1114  }
1115  switch (v->dmvrange) {
1116  case 1:
1117  extend_x = 1;
1118  break;
1119  case 2:
1120  extend_y = 1;
1121  break;
1122  case 3:
1123  extend_x = extend_y = 1;
1124  break;
1125  }
1126  index = get_vlc2(gb, v->imv_vlc->table, bits, 3);
1127  if (index == esc) {
1128  *dmv_x = get_bits(gb, v->k_x);
1129  *dmv_y = get_bits(gb, v->k_y);
1130  if (v->numref) {
1131  *pred_flag = *dmv_y & 1;
1132  *dmv_y = (*dmv_y + *pred_flag) >> 1;
1133  }
1134  }
1135  else {
1136  if (extend_x)
1137  offs_tab = offset_table2;
1138  else
1139  offs_tab = offset_table1;
1140  index1 = (index + 1) % 9;
1141  if (index1 != 0) {
1142  val = get_bits(gb, index1 + extend_x);
1143  sign = 0 -(val & 1);
1144  *dmv_x = (sign ^ ((val >> 1) + offs_tab[index1])) - sign;
1145  } else
1146  *dmv_x = 0;
1147  if (extend_y)
1148  offs_tab = offset_table2;
1149  else
1150  offs_tab = offset_table1;
1151  index1 = (index + 1) / 9;
1152  if (index1 > v->numref) {
1153  val = get_bits(gb, (index1 + (extend_y << v->numref)) >> v->numref);
1154  sign = 0 - (val & 1);
1155  *dmv_y = (sign ^ ((val >> 1) + offs_tab[index1 >> v->numref])) - sign;
1156  } else
1157  *dmv_y = 0;
1158  if (v->numref)
1159  *pred_flag = index1 & 1;
1160  }
1161 }
1162 
1163 static av_always_inline int scaleforsame_x(VC1Context *v, int n /* MV */, int dir)
1164 {
1165  int scaledvalue, refdist;
1166  int scalesame1, scalesame2;
1167  int scalezone1_x, zone1offset_x;
1168  int table_index = dir ^ v->second_field;
1169 
1170  if (v->s.pict_type != AV_PICTURE_TYPE_B)
1171  refdist = v->refdist;
1172  else
1173  refdist = dir ? v->brfd : v->frfd;
1174  if (refdist > 3)
1175  refdist = 3;
1176  scalesame1 = vc1_field_mvpred_scales[table_index][1][refdist];
1177  scalesame2 = vc1_field_mvpred_scales[table_index][2][refdist];
1178  scalezone1_x = vc1_field_mvpred_scales[table_index][3][refdist];
1179  zone1offset_x = vc1_field_mvpred_scales[table_index][5][refdist];
1180 
1181  if (FFABS(n) > 255)
1182  scaledvalue = n;
1183  else {
1184  if (FFABS(n) < scalezone1_x)
1185  scaledvalue = (n * scalesame1) >> 8;
1186  else {
1187  if (n < 0)
1188  scaledvalue = ((n * scalesame2) >> 8) - zone1offset_x;
1189  else
1190  scaledvalue = ((n * scalesame2) >> 8) + zone1offset_x;
1191  }
1192  }
1193  return av_clip(scaledvalue, -v->range_x, v->range_x - 1);
1194 }
1195 
1196 static av_always_inline int scaleforsame_y(VC1Context *v, int i, int n /* MV */, int dir)
1197 {
1198  int scaledvalue, refdist;
1199  int scalesame1, scalesame2;
1200  int scalezone1_y, zone1offset_y;
1201  int table_index = dir ^ v->second_field;
1202 
1203  if (v->s.pict_type != AV_PICTURE_TYPE_B)
1204  refdist = v->refdist;
1205  else
1206  refdist = dir ? v->brfd : v->frfd;
1207  if (refdist > 3)
1208  refdist = 3;
1209  scalesame1 = vc1_field_mvpred_scales[table_index][1][refdist];
1210  scalesame2 = vc1_field_mvpred_scales[table_index][2][refdist];
1211  scalezone1_y = vc1_field_mvpred_scales[table_index][4][refdist];
1212  zone1offset_y = vc1_field_mvpred_scales[table_index][6][refdist];
1213 
1214  if (FFABS(n) > 63)
1215  scaledvalue = n;
1216  else {
1217  if (FFABS(n) < scalezone1_y)
1218  scaledvalue = (n * scalesame1) >> 8;
1219  else {
1220  if (n < 0)
1221  scaledvalue = ((n * scalesame2) >> 8) - zone1offset_y;
1222  else
1223  scaledvalue = ((n * scalesame2) >> 8) + zone1offset_y;
1224  }
1225  }
1226 
1227  if (v->cur_field_type && !v->ref_field_type[dir])
1228  return av_clip(scaledvalue, -v->range_y / 2 + 1, v->range_y / 2);
1229  else
1230  return av_clip(scaledvalue, -v->range_y / 2, v->range_y / 2 - 1);
1231 }
1232 
1233 static av_always_inline int scaleforopp_x(VC1Context *v, int n /* MV */)
1234 {
1235  int scalezone1_x, zone1offset_x;
1236  int scaleopp1, scaleopp2, brfd;
1237  int scaledvalue;
1238 
1239  brfd = FFMIN(v->brfd, 3);
1240  scalezone1_x = vc1_b_field_mvpred_scales[3][brfd];
1241  zone1offset_x = vc1_b_field_mvpred_scales[5][brfd];
1242  scaleopp1 = vc1_b_field_mvpred_scales[1][brfd];
1243  scaleopp2 = vc1_b_field_mvpred_scales[2][brfd];
1244 
1245  if (FFABS(n) > 255)
1246  scaledvalue = n;
1247  else {
1248  if (FFABS(n) < scalezone1_x)
1249  scaledvalue = (n * scaleopp1) >> 8;
1250  else {
1251  if (n < 0)
1252  scaledvalue = ((n * scaleopp2) >> 8) - zone1offset_x;
1253  else
1254  scaledvalue = ((n * scaleopp2) >> 8) + zone1offset_x;
1255  }
1256  }
1257  return av_clip(scaledvalue, -v->range_x, v->range_x - 1);
1258 }
1259 
1260 static av_always_inline int scaleforopp_y(VC1Context *v, int n /* MV */, int dir)
1261 {
1262  int scalezone1_y, zone1offset_y;
1263  int scaleopp1, scaleopp2, brfd;
1264  int scaledvalue;
1265 
1266  brfd = FFMIN(v->brfd, 3);
1267  scalezone1_y = vc1_b_field_mvpred_scales[4][brfd];
1268  zone1offset_y = vc1_b_field_mvpred_scales[6][brfd];
1269  scaleopp1 = vc1_b_field_mvpred_scales[1][brfd];
1270  scaleopp2 = vc1_b_field_mvpred_scales[2][brfd];
1271 
1272  if (FFABS(n) > 63)
1273  scaledvalue = n;
1274  else {
1275  if (FFABS(n) < scalezone1_y)
1276  scaledvalue = (n * scaleopp1) >> 8;
1277  else {
1278  if (n < 0)
1279  scaledvalue = ((n * scaleopp2) >> 8) - zone1offset_y;
1280  else
1281  scaledvalue = ((n * scaleopp2) >> 8) + zone1offset_y;
1282  }
1283  }
1284  if (v->cur_field_type && !v->ref_field_type[dir]) {
1285  return av_clip(scaledvalue, -v->range_y / 2 + 1, v->range_y / 2);
1286  } else {
1287  return av_clip(scaledvalue, -v->range_y / 2, v->range_y / 2 - 1);
1288  }
1289 }
1290 
1291 static av_always_inline int scaleforsame(VC1Context *v, int i, int n /* MV */,
1292  int dim, int dir)
1293 {
1294  int brfd, scalesame;
1295  int hpel = 1 - v->s.quarter_sample;
1296 
1297  n >>= hpel;
1298  if (v->s.pict_type != AV_PICTURE_TYPE_B || v->second_field || !dir) {
1299  if (dim)
1300  n = scaleforsame_y(v, i, n, dir) << hpel;
1301  else
1302  n = scaleforsame_x(v, n, dir) << hpel;
1303  return n;
1304  }
1305  brfd = FFMIN(v->brfd, 3);
1306  scalesame = vc1_b_field_mvpred_scales[0][brfd];
1307 
1308  n = (n * scalesame >> 8) << hpel;
1309  return n;
1310 }
1311 
1312 static av_always_inline int scaleforopp(VC1Context *v, int n /* MV */,
1313  int dim, int dir)
1314 {
1315  int refdist, scaleopp;
1316  int hpel = 1 - v->s.quarter_sample;
1317 
1318  n >>= hpel;
1319  if (v->s.pict_type == AV_PICTURE_TYPE_B && !v->second_field && dir == 1) {
1320  if (dim)
1321  n = scaleforopp_y(v, n, dir) << hpel;
1322  else
1323  n = scaleforopp_x(v, n) << hpel;
1324  return n;
1325  }
1326  if (v->s.pict_type != AV_PICTURE_TYPE_B)
1327  refdist = FFMIN(v->refdist, 3);
1328  else
1329  refdist = dir ? v->brfd : v->frfd;
1330  scaleopp = vc1_field_mvpred_scales[dir ^ v->second_field][0][refdist];
1331 
1332  n = (n * scaleopp >> 8) << hpel;
1333  return n;
1334 }
1335 
1338 static inline void vc1_pred_mv(VC1Context *v, int n, int dmv_x, int dmv_y,
1339  int mv1, int r_x, int r_y, uint8_t* is_intra,
1340  int pred_flag, int dir)
1341 {
1342  MpegEncContext *s = &v->s;
1343  int xy, wrap, off = 0;
1344  int16_t *A, *B, *C;
1345  int px, py;
1346  int sum;
1347  int mixedmv_pic, num_samefield = 0, num_oppfield = 0;
1348  int opposit, a_f, b_f, c_f;
1349  int16_t field_predA[2];
1350  int16_t field_predB[2];
1351  int16_t field_predC[2];
1352  int a_valid, b_valid, c_valid;
1353  int hybridmv_thresh, y_bias = 0;
1354 
1355  if (v->mv_mode == MV_PMODE_MIXED_MV ||
1357  mixedmv_pic = 1;
1358  else
1359  mixedmv_pic = 0;
1360  /* scale MV difference to be quad-pel */
1361  dmv_x <<= 1 - s->quarter_sample;
1362  dmv_y <<= 1 - s->quarter_sample;
1363 
1364  wrap = s->b8_stride;
1365  xy = s->block_index[n];
1366 
1367  if (s->mb_intra) {
1368  s->mv[0][n][0] = s->current_picture.f.motion_val[0][xy + v->blocks_off][0] = 0;
1369  s->mv[0][n][1] = s->current_picture.f.motion_val[0][xy + v->blocks_off][1] = 0;
1370  s->current_picture.f.motion_val[1][xy + v->blocks_off][0] = 0;
1371  s->current_picture.f.motion_val[1][xy + v->blocks_off][1] = 0;
1372  if (mv1) { /* duplicate motion data for 1-MV block */
1373  s->current_picture.f.motion_val[0][xy + 1 + v->blocks_off][0] = 0;
1374  s->current_picture.f.motion_val[0][xy + 1 + v->blocks_off][1] = 0;
1375  s->current_picture.f.motion_val[0][xy + wrap + v->blocks_off][0] = 0;
1376  s->current_picture.f.motion_val[0][xy + wrap + v->blocks_off][1] = 0;
1377  s->current_picture.f.motion_val[0][xy + wrap + 1 + v->blocks_off][0] = 0;
1378  s->current_picture.f.motion_val[0][xy + wrap + 1 + v->blocks_off][1] = 0;
1379  v->luma_mv[s->mb_x][0] = v->luma_mv[s->mb_x][1] = 0;
1380  s->current_picture.f.motion_val[1][xy + 1 + v->blocks_off][0] = 0;
1381  s->current_picture.f.motion_val[1][xy + 1 + v->blocks_off][1] = 0;
1382  s->current_picture.f.motion_val[1][xy + wrap][0] = 0;
1383  s->current_picture.f.motion_val[1][xy + wrap + v->blocks_off][1] = 0;
1384  s->current_picture.f.motion_val[1][xy + wrap + 1 + v->blocks_off][0] = 0;
1385  s->current_picture.f.motion_val[1][xy + wrap + 1 + v->blocks_off][1] = 0;
1386  }
1387  return;
1388  }
1389 
1390  C = s->current_picture.f.motion_val[dir][xy - 1 + v->blocks_off];
1391  A = s->current_picture.f.motion_val[dir][xy - wrap + v->blocks_off];
1392  if (mv1) {
1393  if (v->field_mode && mixedmv_pic)
1394  off = (s->mb_x == (s->mb_width - 1)) ? -2 : 2;
1395  else
1396  off = (s->mb_x == (s->mb_width - 1)) ? -1 : 2;
1397  } else {
1398  //in 4-MV mode different blocks have different B predictor position
1399  switch (n) {
1400  case 0:
1401  off = (s->mb_x > 0) ? -1 : 1;
1402  break;
1403  case 1:
1404  off = (s->mb_x == (s->mb_width - 1)) ? -1 : 1;
1405  break;
1406  case 2:
1407  off = 1;
1408  break;
1409  case 3:
1410  off = -1;
1411  }
1412  }
1413  B = s->current_picture.f.motion_val[dir][xy - wrap + off + v->blocks_off];
1414 
1415  a_valid = !s->first_slice_line || (n == 2 || n == 3);
1416  b_valid = a_valid && (s->mb_width > 1);
1417  c_valid = s->mb_x || (n == 1 || n == 3);
1418  if (v->field_mode) {
1419  a_valid = a_valid && !is_intra[xy - wrap];
1420  b_valid = b_valid && !is_intra[xy - wrap + off];
1421  c_valid = c_valid && !is_intra[xy - 1];
1422  }
1423 
1424  if (a_valid) {
1425  a_f = v->mv_f[dir][xy - wrap + v->blocks_off];
1426  num_oppfield += a_f;
1427  num_samefield += 1 - a_f;
1428  field_predA[0] = A[0];
1429  field_predA[1] = A[1];
1430  } else {
1431  field_predA[0] = field_predA[1] = 0;
1432  a_f = 0;
1433  }
1434  if (b_valid) {
1435  b_f = v->mv_f[dir][xy - wrap + off + v->blocks_off];
1436  num_oppfield += b_f;
1437  num_samefield += 1 - b_f;
1438  field_predB[0] = B[0];
1439  field_predB[1] = B[1];
1440  } else {
1441  field_predB[0] = field_predB[1] = 0;
1442  b_f = 0;
1443  }
1444  if (c_valid) {
1445  c_f = v->mv_f[dir][xy - 1 + v->blocks_off];
1446  num_oppfield += c_f;
1447  num_samefield += 1 - c_f;
1448  field_predC[0] = C[0];
1449  field_predC[1] = C[1];
1450  } else {
1451  field_predC[0] = field_predC[1] = 0;
1452  c_f = 0;
1453  }
1454 
1455  if (v->field_mode) {
1456  if (num_samefield <= num_oppfield)
1457  opposit = 1 - pred_flag;
1458  else
1459  opposit = pred_flag;
1460  } else
1461  opposit = 0;
1462  if (opposit) {
1463  if (a_valid && !a_f) {
1464  field_predA[0] = scaleforopp(v, field_predA[0], 0, dir);
1465  field_predA[1] = scaleforopp(v, field_predA[1], 1, dir);
1466  }
1467  if (b_valid && !b_f) {
1468  field_predB[0] = scaleforopp(v, field_predB[0], 0, dir);
1469  field_predB[1] = scaleforopp(v, field_predB[1], 1, dir);
1470  }
1471  if (c_valid && !c_f) {
1472  field_predC[0] = scaleforopp(v, field_predC[0], 0, dir);
1473  field_predC[1] = scaleforopp(v, field_predC[1], 1, dir);
1474  }
1475  v->mv_f[dir][xy + v->blocks_off] = 1;
1476  v->ref_field_type[dir] = !v->cur_field_type;
1477  } else {
1478  if (a_valid && a_f) {
1479  field_predA[0] = scaleforsame(v, n, field_predA[0], 0, dir);
1480  field_predA[1] = scaleforsame(v, n, field_predA[1], 1, dir);
1481  }
1482  if (b_valid && b_f) {
1483  field_predB[0] = scaleforsame(v, n, field_predB[0], 0, dir);
1484  field_predB[1] = scaleforsame(v, n, field_predB[1], 1, dir);
1485  }
1486  if (c_valid && c_f) {
1487  field_predC[0] = scaleforsame(v, n, field_predC[0], 0, dir);
1488  field_predC[1] = scaleforsame(v, n, field_predC[1], 1, dir);
1489  }
1490  v->mv_f[dir][xy + v->blocks_off] = 0;
1491  v->ref_field_type[dir] = v->cur_field_type;
1492  }
1493 
1494  if (a_valid) {
1495  px = field_predA[0];
1496  py = field_predA[1];
1497  } else if (c_valid) {
1498  px = field_predC[0];
1499  py = field_predC[1];
1500  } else if (b_valid) {
1501  px = field_predB[0];
1502  py = field_predB[1];
1503  } else {
1504  px = 0;
1505  py = 0;
1506  }
1507 
1508  if (num_samefield + num_oppfield > 1) {
1509  px = mid_pred(field_predA[0], field_predB[0], field_predC[0]);
1510  py = mid_pred(field_predA[1], field_predB[1], field_predC[1]);
1511  }
1512 
1513  /* Pullback MV as specified in 8.3.5.3.4 */
1514  if (!v->field_mode) {
1515  int qx, qy, X, Y;
1516  qx = (s->mb_x << 6) + ((n == 1 || n == 3) ? 32 : 0);
1517  qy = (s->mb_y << 6) + ((n == 2 || n == 3) ? 32 : 0);
1518  X = (s->mb_width << 6) - 4;
1519  Y = (s->mb_height << 6) - 4;
1520  if (mv1) {
1521  if (qx + px < -60) px = -60 - qx;
1522  if (qy + py < -60) py = -60 - qy;
1523  } else {
1524  if (qx + px < -28) px = -28 - qx;
1525  if (qy + py < -28) py = -28 - qy;
1526  }
1527  if (qx + px > X) px = X - qx;
1528  if (qy + py > Y) py = Y - qy;
1529  }
1530 
1531  if (!v->field_mode || s->pict_type != AV_PICTURE_TYPE_B) {
1532  /* Calculate hybrid prediction as specified in 8.3.5.3.5 (also 10.3.5.4.3.5) */
1533  hybridmv_thresh = 32;
1534  if (a_valid && c_valid) {
1535  if (is_intra[xy - wrap])
1536  sum = FFABS(px) + FFABS(py);
1537  else
1538  sum = FFABS(px - field_predA[0]) + FFABS(py - field_predA[1]);
1539  if (sum > hybridmv_thresh) {
1540  if (get_bits1(&s->gb)) { // read HYBRIDPRED bit
1541  px = field_predA[0];
1542  py = field_predA[1];
1543  } else {
1544  px = field_predC[0];
1545  py = field_predC[1];
1546  }
1547  } else {
1548  if (is_intra[xy - 1])
1549  sum = FFABS(px) + FFABS(py);
1550  else
1551  sum = FFABS(px - field_predC[0]) + FFABS(py - field_predC[1]);
1552  if (sum > hybridmv_thresh) {
1553  if (get_bits1(&s->gb)) {
1554  px = field_predA[0];
1555  py = field_predA[1];
1556  } else {
1557  px = field_predC[0];
1558  py = field_predC[1];
1559  }
1560  }
1561  }
1562  }
1563  }
1564 
1565  if (v->field_mode && !s->quarter_sample) {
1566  r_x <<= 1;
1567  r_y <<= 1;
1568  }
1569  if (v->field_mode && v->numref)
1570  r_y >>= 1;
1571  if (v->field_mode && v->cur_field_type && v->ref_field_type[dir] == 0)
1572  y_bias = 1;
1573  /* store MV using signed modulus of MV range defined in 4.11 */
1574  s->mv[dir][n][0] = s->current_picture.f.motion_val[dir][xy + v->blocks_off][0] = ((px + dmv_x + r_x) & ((r_x << 1) - 1)) - r_x;
1575  s->mv[dir][n][1] = s->current_picture.f.motion_val[dir][xy + v->blocks_off][1] = ((py + dmv_y + r_y - y_bias) & ((r_y << 1) - 1)) - r_y + y_bias;
1576  if (mv1) { /* duplicate motion data for 1-MV block */
1577  s->current_picture.f.motion_val[dir][xy + 1 + v->blocks_off][0] = s->current_picture.f.motion_val[dir][xy + v->blocks_off][0];
1578  s->current_picture.f.motion_val[dir][xy + 1 + v->blocks_off][1] = s->current_picture.f.motion_val[dir][xy + v->blocks_off][1];
1579  s->current_picture.f.motion_val[dir][xy + wrap + v->blocks_off][0] = s->current_picture.f.motion_val[dir][xy + v->blocks_off][0];
1580  s->current_picture.f.motion_val[dir][xy + wrap + v->blocks_off][1] = s->current_picture.f.motion_val[dir][xy + v->blocks_off][1];
1581  s->current_picture.f.motion_val[dir][xy + wrap + 1 + v->blocks_off][0] = s->current_picture.f.motion_val[dir][xy + v->blocks_off][0];
1582  s->current_picture.f.motion_val[dir][xy + wrap + 1 + v->blocks_off][1] = s->current_picture.f.motion_val[dir][xy + v->blocks_off][1];
1583  v->mv_f[dir][xy + 1 + v->blocks_off] = v->mv_f[dir][xy + v->blocks_off];
1584  v->mv_f[dir][xy + wrap + v->blocks_off] = v->mv_f[dir][xy + wrap + 1 + v->blocks_off] = v->mv_f[dir][xy + v->blocks_off];
1585  }
1586 }
1587 
1590 static inline void vc1_pred_mv_intfr(VC1Context *v, int n, int dmv_x, int dmv_y,
1591  int mvn, int r_x, int r_y, uint8_t* is_intra)
1592 {
1593  MpegEncContext *s = &v->s;
1594  int xy, wrap, off = 0;
1595  int A[2], B[2], C[2];
1596  int px, py;
1597  int a_valid = 0, b_valid = 0, c_valid = 0;
1598  int field_a, field_b, field_c; // 0: same, 1: opposit
1599  int total_valid, num_samefield, num_oppfield;
1600  int pos_c, pos_b, n_adj;
1601 
1602  wrap = s->b8_stride;
1603  xy = s->block_index[n];
1604 
1605  if (s->mb_intra) {
1606  s->mv[0][n][0] = s->current_picture.f.motion_val[0][xy][0] = 0;
1607  s->mv[0][n][1] = s->current_picture.f.motion_val[0][xy][1] = 0;
1608  s->current_picture.f.motion_val[1][xy][0] = 0;
1609  s->current_picture.f.motion_val[1][xy][1] = 0;
1610  if (mvn == 1) { /* duplicate motion data for 1-MV block */
1611  s->current_picture.f.motion_val[0][xy + 1][0] = 0;
1612  s->current_picture.f.motion_val[0][xy + 1][1] = 0;
1613  s->current_picture.f.motion_val[0][xy + wrap][0] = 0;
1614  s->current_picture.f.motion_val[0][xy + wrap][1] = 0;
1615  s->current_picture.f.motion_val[0][xy + wrap + 1][0] = 0;
1616  s->current_picture.f.motion_val[0][xy + wrap + 1][1] = 0;
1617  v->luma_mv[s->mb_x][0] = v->luma_mv[s->mb_x][1] = 0;
1618  s->current_picture.f.motion_val[1][xy + 1][0] = 0;
1619  s->current_picture.f.motion_val[1][xy + 1][1] = 0;
1620  s->current_picture.f.motion_val[1][xy + wrap][0] = 0;
1621  s->current_picture.f.motion_val[1][xy + wrap][1] = 0;
1622  s->current_picture.f.motion_val[1][xy + wrap + 1][0] = 0;
1623  s->current_picture.f.motion_val[1][xy + wrap + 1][1] = 0;
1624  }
1625  return;
1626  }
1627 
1628  off = ((n == 0) || (n == 1)) ? 1 : -1;
1629  /* predict A */
1630  if (s->mb_x || (n == 1) || (n == 3)) {
1631  if ((v->blk_mv_type[xy]) // current block (MB) has a field MV
1632  || (!v->blk_mv_type[xy] && !v->blk_mv_type[xy - 1])) { // or both have frame MV
1633  A[0] = s->current_picture.f.motion_val[0][xy - 1][0];
1634  A[1] = s->current_picture.f.motion_val[0][xy - 1][1];
1635  a_valid = 1;
1636  } else { // current block has frame mv and cand. has field MV (so average)
1637  A[0] = (s->current_picture.f.motion_val[0][xy - 1][0]
1638  + s->current_picture.f.motion_val[0][xy - 1 + off * wrap][0] + 1) >> 1;
1639  A[1] = (s->current_picture.f.motion_val[0][xy - 1][1]
1640  + s->current_picture.f.motion_val[0][xy - 1 + off * wrap][1] + 1) >> 1;
1641  a_valid = 1;
1642  }
1643  if (!(n & 1) && v->is_intra[s->mb_x - 1]) {
1644  a_valid = 0;
1645  A[0] = A[1] = 0;
1646  }
1647  } else
1648  A[0] = A[1] = 0;
1649  /* Predict B and C */
1650  B[0] = B[1] = C[0] = C[1] = 0;
1651  if (n == 0 || n == 1 || v->blk_mv_type[xy]) {
1652  if (!s->first_slice_line) {
1653  if (!v->is_intra[s->mb_x - s->mb_stride]) {
1654  b_valid = 1;
1655  n_adj = n | 2;
1656  pos_b = s->block_index[n_adj] - 2 * wrap;
1657  if (v->blk_mv_type[pos_b] && v->blk_mv_type[xy]) {
1658  n_adj = (n & 2) | (n & 1);
1659  }
1660  B[0] = s->current_picture.f.motion_val[0][s->block_index[n_adj] - 2 * wrap][0];
1661  B[1] = s->current_picture.f.motion_val[0][s->block_index[n_adj] - 2 * wrap][1];
1662  if (v->blk_mv_type[pos_b] && !v->blk_mv_type[xy]) {
1663  B[0] = (B[0] + s->current_picture.f.motion_val[0][s->block_index[n_adj ^ 2] - 2 * wrap][0] + 1) >> 1;
1664  B[1] = (B[1] + s->current_picture.f.motion_val[0][s->block_index[n_adj ^ 2] - 2 * wrap][1] + 1) >> 1;
1665  }
1666  }
1667  if (s->mb_width > 1) {
1668  if (!v->is_intra[s->mb_x - s->mb_stride + 1]) {
1669  c_valid = 1;
1670  n_adj = 2;
1671  pos_c = s->block_index[2] - 2 * wrap + 2;
1672  if (v->blk_mv_type[pos_c] && v->blk_mv_type[xy]) {
1673  n_adj = n & 2;
1674  }
1675  C[0] = s->current_picture.f.motion_val[0][s->block_index[n_adj] - 2 * wrap + 2][0];
1676  C[1] = s->current_picture.f.motion_val[0][s->block_index[n_adj] - 2 * wrap + 2][1];
1677  if (v->blk_mv_type[pos_c] && !v->blk_mv_type[xy]) {
1678  C[0] = (1 + C[0] + (s->current_picture.f.motion_val[0][s->block_index[n_adj ^ 2] - 2 * wrap + 2][0])) >> 1;
1679  C[1] = (1 + C[1] + (s->current_picture.f.motion_val[0][s->block_index[n_adj ^ 2] - 2 * wrap + 2][1])) >> 1;
1680  }
1681  if (s->mb_x == s->mb_width - 1) {
1682  if (!v->is_intra[s->mb_x - s->mb_stride - 1]) {
1683  c_valid = 1;
1684  n_adj = 3;
1685  pos_c = s->block_index[3] - 2 * wrap - 2;
1686  if (v->blk_mv_type[pos_c] && v->blk_mv_type[xy]) {
1687  n_adj = n | 1;
1688  }
1689  C[0] = s->current_picture.f.motion_val[0][s->block_index[n_adj] - 2 * wrap - 2][0];
1690  C[1] = s->current_picture.f.motion_val[0][s->block_index[n_adj] - 2 * wrap - 2][1];
1691  if (v->blk_mv_type[pos_c] && !v->blk_mv_type[xy]) {
1692  C[0] = (1 + C[0] + s->current_picture.f.motion_val[0][s->block_index[1] - 2 * wrap - 2][0]) >> 1;
1693  C[1] = (1 + C[1] + s->current_picture.f.motion_val[0][s->block_index[1] - 2 * wrap - 2][1]) >> 1;
1694  }
1695  } else
1696  c_valid = 0;
1697  }
1698  }
1699  }
1700  }
1701  } else {
1702  pos_b = s->block_index[1];
1703  b_valid = 1;
1704  B[0] = s->current_picture.f.motion_val[0][pos_b][0];
1705  B[1] = s->current_picture.f.motion_val[0][pos_b][1];
1706  pos_c = s->block_index[0];
1707  c_valid = 1;
1708  C[0] = s->current_picture.f.motion_val[0][pos_c][0];
1709  C[1] = s->current_picture.f.motion_val[0][pos_c][1];
1710  }
1711 
1712  total_valid = a_valid + b_valid + c_valid;
1713  // check if predictor A is out of bounds
1714  if (!s->mb_x && !(n == 1 || n == 3)) {
1715  A[0] = A[1] = 0;
1716  }
1717  // check if predictor B is out of bounds
1718  if ((s->first_slice_line && v->blk_mv_type[xy]) || (s->first_slice_line && !(n & 2))) {
1719  B[0] = B[1] = C[0] = C[1] = 0;
1720  }
1721  if (!v->blk_mv_type[xy]) {
1722  if (s->mb_width == 1) {
1723  px = B[0];
1724  py = B[1];
1725  } else {
1726  if (total_valid >= 2) {
1727  px = mid_pred(A[0], B[0], C[0]);
1728  py = mid_pred(A[1], B[1], C[1]);
1729  } else if (total_valid) {
1730  if (a_valid) { px = A[0]; py = A[1]; }
1731  if (b_valid) { px = B[0]; py = B[1]; }
1732  if (c_valid) { px = C[0]; py = C[1]; }
1733  } else
1734  px = py = 0;
1735  }
1736  } else {
1737  if (a_valid)
1738  field_a = (A[1] & 4) ? 1 : 0;
1739  else
1740  field_a = 0;
1741  if (b_valid)
1742  field_b = (B[1] & 4) ? 1 : 0;
1743  else
1744  field_b = 0;
1745  if (c_valid)
1746  field_c = (C[1] & 4) ? 1 : 0;
1747  else
1748  field_c = 0;
1749 
1750  num_oppfield = field_a + field_b + field_c;
1751  num_samefield = total_valid - num_oppfield;
1752  if (total_valid == 3) {
1753  if ((num_samefield == 3) || (num_oppfield == 3)) {
1754  px = mid_pred(A[0], B[0], C[0]);
1755  py = mid_pred(A[1], B[1], C[1]);
1756  } else if (num_samefield >= num_oppfield) {
1757  /* take one MV from same field set depending on priority
1758  the check for B may not be necessary */
1759  px = !field_a ? A[0] : B[0];
1760  py = !field_a ? A[1] : B[1];
1761  } else {
1762  px = field_a ? A[0] : B[0];
1763  py = field_a ? A[1] : B[1];
1764  }
1765  } else if (total_valid == 2) {
1766  if (num_samefield >= num_oppfield) {
1767  if (!field_a && a_valid) {
1768  px = A[0];
1769  py = A[1];
1770  } else if (!field_b && b_valid) {
1771  px = B[0];
1772  py = B[1];
1773  } else if (c_valid) {
1774  px = C[0];
1775  py = C[1];
1776  } else px = py = 0;
1777  } else {
1778  if (field_a && a_valid) {
1779  px = A[0];
1780  py = A[1];
1781  } else if (field_b && b_valid) {
1782  px = B[0];
1783  py = B[1];
1784  } else if (c_valid) {
1785  px = C[0];
1786  py = C[1];
1787  }
1788  }
1789  } else if (total_valid == 1) {
1790  px = (a_valid) ? A[0] : ((b_valid) ? B[0] : C[0]);
1791  py = (a_valid) ? A[1] : ((b_valid) ? B[1] : C[1]);
1792  } else
1793  px = py = 0;
1794  }
1795 
1796  /* store MV using signed modulus of MV range defined in 4.11 */
1797  s->mv[0][n][0] = s->current_picture.f.motion_val[0][xy][0] = ((px + dmv_x + r_x) & ((r_x << 1) - 1)) - r_x;
1798  s->mv[0][n][1] = s->current_picture.f.motion_val[0][xy][1] = ((py + dmv_y + r_y) & ((r_y << 1) - 1)) - r_y;
1799  if (mvn == 1) { /* duplicate motion data for 1-MV block */
1800  s->current_picture.f.motion_val[0][xy + 1 ][0] = s->current_picture.f.motion_val[0][xy][0];
1801  s->current_picture.f.motion_val[0][xy + 1 ][1] = s->current_picture.f.motion_val[0][xy][1];
1802  s->current_picture.f.motion_val[0][xy + wrap ][0] = s->current_picture.f.motion_val[0][xy][0];
1803  s->current_picture.f.motion_val[0][xy + wrap ][1] = s->current_picture.f.motion_val[0][xy][1];
1804  s->current_picture.f.motion_val[0][xy + wrap + 1][0] = s->current_picture.f.motion_val[0][xy][0];
1805  s->current_picture.f.motion_val[0][xy + wrap + 1][1] = s->current_picture.f.motion_val[0][xy][1];
1806  } else if (mvn == 2) { /* duplicate motion data for 2-Field MV block */
1807  s->current_picture.f.motion_val[0][xy + 1][0] = s->current_picture.f.motion_val[0][xy][0];
1808  s->current_picture.f.motion_val[0][xy + 1][1] = s->current_picture.f.motion_val[0][xy][1];
1809  s->mv[0][n + 1][0] = s->mv[0][n][0];
1810  s->mv[0][n + 1][1] = s->mv[0][n][1];
1811  }
1812 }
1813 
1817 {
1818  MpegEncContext *s = &v->s;
1819  DSPContext *dsp = &v->s.dsp;
1820  uint8_t *srcY, *srcU, *srcV;
1821  int dxy, mx, my, uvmx, uvmy, src_x, src_y, uvsrc_x, uvsrc_y;
1822  int off, off_uv;
1823  int v_edge_pos = s->v_edge_pos >> v->field_mode;
1824 
1825  if (!v->field_mode && !v->s.next_picture.f.data[0])
1826  return;
1827 
1828  mx = s->mv[1][0][0];
1829  my = s->mv[1][0][1];
1830  uvmx = (mx + ((mx & 3) == 3)) >> 1;
1831  uvmy = (my + ((my & 3) == 3)) >> 1;
1832  if (v->field_mode) {
1833  if (v->cur_field_type != v->ref_field_type[1])
1834  my = my - 2 + 4 * v->cur_field_type;
1835  uvmy = uvmy - 2 + 4 * v->cur_field_type;
1836  }
1837  if (v->fastuvmc) {
1838  uvmx = uvmx + ((uvmx < 0) ? -(uvmx & 1) : (uvmx & 1));
1839  uvmy = uvmy + ((uvmy < 0) ? -(uvmy & 1) : (uvmy & 1));
1840  }
1841  srcY = s->next_picture.f.data[0];
1842  srcU = s->next_picture.f.data[1];
1843  srcV = s->next_picture.f.data[2];
1844 
1845  src_x = s->mb_x * 16 + (mx >> 2);
1846  src_y = s->mb_y * 16 + (my >> 2);
1847  uvsrc_x = s->mb_x * 8 + (uvmx >> 2);
1848  uvsrc_y = s->mb_y * 8 + (uvmy >> 2);
1849 
1850  if (v->profile != PROFILE_ADVANCED) {
1851  src_x = av_clip( src_x, -16, s->mb_width * 16);
1852  src_y = av_clip( src_y, -16, s->mb_height * 16);
1853  uvsrc_x = av_clip(uvsrc_x, -8, s->mb_width * 8);
1854  uvsrc_y = av_clip(uvsrc_y, -8, s->mb_height * 8);
1855  } else {
1856  src_x = av_clip( src_x, -17, s->avctx->coded_width);
1857  src_y = av_clip( src_y, -18, s->avctx->coded_height + 1);
1858  uvsrc_x = av_clip(uvsrc_x, -8, s->avctx->coded_width >> 1);
1859  uvsrc_y = av_clip(uvsrc_y, -8, s->avctx->coded_height >> 1);
1860  }
1861 
1862  srcY += src_y * s->linesize + src_x;
1863  srcU += uvsrc_y * s->uvlinesize + uvsrc_x;
1864  srcV += uvsrc_y * s->uvlinesize + uvsrc_x;
1865 
1866  if (v->field_mode && v->ref_field_type[1]) {
1867  srcY += s->current_picture_ptr->f.linesize[0];
1868  srcU += s->current_picture_ptr->f.linesize[1];
1869  srcV += s->current_picture_ptr->f.linesize[2];
1870  }
1871 
1872  /* for grayscale we should not try to read from unknown area */
1873  if (s->flags & CODEC_FLAG_GRAY) {
1874  srcU = s->edge_emu_buffer + 18 * s->linesize;
1875  srcV = s->edge_emu_buffer + 18 * s->linesize;
1876  }
1877 
1878  if (v->rangeredfrm || s->h_edge_pos < 22 || v_edge_pos < 22
1879  || (unsigned)(src_x - s->mspel) > s->h_edge_pos - (mx & 3) - 16 - s->mspel * 3
1880  || (unsigned)(src_y - s->mspel) > v_edge_pos - (my & 3) - 16 - s->mspel * 3) {
1881  uint8_t *uvbuf = s->edge_emu_buffer + 19 * s->linesize;
1882 
1883  srcY -= s->mspel * (1 + s->linesize);
1884  s->dsp.emulated_edge_mc(s->edge_emu_buffer, srcY, s->linesize,
1885  17 + s->mspel * 2, 17 + s->mspel * 2,
1886  src_x - s->mspel, src_y - s->mspel,
1887  s->h_edge_pos, v_edge_pos);
1888  srcY = s->edge_emu_buffer;
1889  s->dsp.emulated_edge_mc(uvbuf , srcU, s->uvlinesize, 8 + 1, 8 + 1,
1890  uvsrc_x, uvsrc_y, s->h_edge_pos >> 1, v_edge_pos >> 1);
1891  s->dsp.emulated_edge_mc(uvbuf + 16, srcV, s->uvlinesize, 8 + 1, 8 + 1,
1892  uvsrc_x, uvsrc_y, s->h_edge_pos >> 1, v_edge_pos >> 1);
1893  srcU = uvbuf;
1894  srcV = uvbuf + 16;
1895  /* if we deal with range reduction we need to scale source blocks */
1896  if (v->rangeredfrm) {
1897  int i, j;
1898  uint8_t *src, *src2;
1899 
1900  src = srcY;
1901  for (j = 0; j < 17 + s->mspel * 2; j++) {
1902  for (i = 0; i < 17 + s->mspel * 2; i++)
1903  src[i] = ((src[i] - 128) >> 1) + 128;
1904  src += s->linesize;
1905  }
1906  src = srcU;
1907  src2 = srcV;
1908  for (j = 0; j < 9; j++) {
1909  for (i = 0; i < 9; i++) {
1910  src[i] = ((src[i] - 128) >> 1) + 128;
1911  src2[i] = ((src2[i] - 128) >> 1) + 128;
1912  }
1913  src += s->uvlinesize;
1914  src2 += s->uvlinesize;
1915  }
1916  }
1917  srcY += s->mspel * (1 + s->linesize);
1918  }
1919 
1920  if (v->field_mode && v->cur_field_type) {
1921  off = s->current_picture_ptr->f.linesize[0];
1922  off_uv = s->current_picture_ptr->f.linesize[1];
1923  } else {
1924  off = 0;
1925  off_uv = 0;
1926  }
1927 
1928  if (s->mspel) {
1929  dxy = ((my & 3) << 2) | (mx & 3);
1930  v->vc1dsp.avg_vc1_mspel_pixels_tab[dxy](s->dest[0] + off , srcY , s->linesize, v->rnd);
1931  v->vc1dsp.avg_vc1_mspel_pixels_tab[dxy](s->dest[0] + off + 8, srcY + 8, s->linesize, v->rnd);
1932  srcY += s->linesize * 8;
1933  v->vc1dsp.avg_vc1_mspel_pixels_tab[dxy](s->dest[0] + off + 8 * s->linesize , srcY , s->linesize, v->rnd);
1934  v->vc1dsp.avg_vc1_mspel_pixels_tab[dxy](s->dest[0] + off + 8 * s->linesize + 8, srcY + 8, s->linesize, v->rnd);
1935  } else { // hpel mc
1936  dxy = (my & 2) | ((mx & 2) >> 1);
1937 
1938  if (!v->rnd)
1939  dsp->avg_pixels_tab[0][dxy](s->dest[0] + off, srcY, s->linesize, 16);
1940  else
1941  dsp->avg_no_rnd_pixels_tab[0][dxy](s->dest[0] + off, srcY, s->linesize, 16);
1942  }
1943 
1944  if (s->flags & CODEC_FLAG_GRAY) return;
1945  /* Chroma MC always uses qpel blilinear */
1946  uvmx = (uvmx & 3) << 1;
1947  uvmy = (uvmy & 3) << 1;
1948  if (!v->rnd) {
1949  dsp->avg_h264_chroma_pixels_tab[0](s->dest[1] + off_uv, srcU, s->uvlinesize, 8, uvmx, uvmy);
1950  dsp->avg_h264_chroma_pixels_tab[0](s->dest[2] + off_uv, srcV, s->uvlinesize, 8, uvmx, uvmy);
1951  } else {
1952  v->vc1dsp.avg_no_rnd_vc1_chroma_pixels_tab[0](s->dest[1] + off_uv, srcU, s->uvlinesize, 8, uvmx, uvmy);
1953  v->vc1dsp.avg_no_rnd_vc1_chroma_pixels_tab[0](s->dest[2] + off_uv, srcV, s->uvlinesize, 8, uvmx, uvmy);
1954  }
1955 }
1956 
1957 static av_always_inline int scale_mv(int value, int bfrac, int inv, int qs)
1958 {
1959  int n = bfrac;
1960 
1961 #if B_FRACTION_DEN==256
1962  if (inv)
1963  n -= 256;
1964  if (!qs)
1965  return 2 * ((value * n + 255) >> 9);
1966  return (value * n + 128) >> 8;
1967 #else
1968  if (inv)
1969  n -= B_FRACTION_DEN;
1970  if (!qs)
1971  return 2 * ((value * n + B_FRACTION_DEN - 1) / (2 * B_FRACTION_DEN));
1972  return (value * n + B_FRACTION_DEN/2) / B_FRACTION_DEN;
1973 #endif
1974 }
1975 
1976 static av_always_inline int scale_mv_intfi(int value, int bfrac, int inv,
1977  int qs, int qs_last)
1978 {
1979  int n = bfrac;
1980 
1981  if (inv)
1982  n -= 256;
1983  n <<= !qs_last;
1984  if (!qs)
1985  return (value * n + 255) >> 9;
1986  else
1987  return (value * n + 128) >> 8;
1988 }
1989 
1992 static inline void vc1_b_mc(VC1Context *v, int dmv_x[2], int dmv_y[2],
1993  int direct, int mode)
1994 {
1995  if (v->use_ic) {
1996  v->mv_mode2 = v->mv_mode;
1998  }
1999  if (direct) {
2000  vc1_mc_1mv(v, 0);
2001  vc1_interp_mc(v);
2002  if (v->use_ic)
2003  v->mv_mode = v->mv_mode2;
2004  return;
2005  }
2006  if (mode == BMV_TYPE_INTERPOLATED) {
2007  vc1_mc_1mv(v, 0);
2008  vc1_interp_mc(v);
2009  if (v->use_ic)
2010  v->mv_mode = v->mv_mode2;
2011  return;
2012  }
2013 
2014  if (v->use_ic && (mode == BMV_TYPE_BACKWARD))
2015  v->mv_mode = v->mv_mode2;
2016  vc1_mc_1mv(v, (mode == BMV_TYPE_BACKWARD));
2017  if (v->use_ic)
2018  v->mv_mode = v->mv_mode2;
2019 }
2020 
2021 static inline void vc1_pred_b_mv(VC1Context *v, int dmv_x[2], int dmv_y[2],
2022  int direct, int mvtype)
2023 {
2024  MpegEncContext *s = &v->s;
2025  int xy, wrap, off = 0;
2026  int16_t *A, *B, *C;
2027  int px, py;
2028  int sum;
2029  int r_x, r_y;
2030  const uint8_t *is_intra = v->mb_type[0];
2031 
2032  r_x = v->range_x;
2033  r_y = v->range_y;
2034  /* scale MV difference to be quad-pel */
2035  dmv_x[0] <<= 1 - s->quarter_sample;
2036  dmv_y[0] <<= 1 - s->quarter_sample;
2037  dmv_x[1] <<= 1 - s->quarter_sample;
2038  dmv_y[1] <<= 1 - s->quarter_sample;
2039 
2040  wrap = s->b8_stride;
2041  xy = s->block_index[0];
2042 
2043  if (s->mb_intra) {
2044  s->current_picture.f.motion_val[0][xy + v->blocks_off][0] =
2045  s->current_picture.f.motion_val[0][xy + v->blocks_off][1] =
2046  s->current_picture.f.motion_val[1][xy + v->blocks_off][0] =
2047  s->current_picture.f.motion_val[1][xy + v->blocks_off][1] = 0;
2048  return;
2049  }
2050  if (!v->field_mode) {
2051  s->mv[0][0][0] = scale_mv(s->next_picture.f.motion_val[1][xy][0], v->bfraction, 0, s->quarter_sample);
2052  s->mv[0][0][1] = scale_mv(s->next_picture.f.motion_val[1][xy][1], v->bfraction, 0, s->quarter_sample);
2053  s->mv[1][0][0] = scale_mv(s->next_picture.f.motion_val[1][xy][0], v->bfraction, 1, s->quarter_sample);
2054  s->mv[1][0][1] = scale_mv(s->next_picture.f.motion_val[1][xy][1], v->bfraction, 1, s->quarter_sample);
2055 
2056  /* Pullback predicted motion vectors as specified in 8.4.5.4 */
2057  s->mv[0][0][0] = av_clip(s->mv[0][0][0], -60 - (s->mb_x << 6), (s->mb_width << 6) - 4 - (s->mb_x << 6));
2058  s->mv[0][0][1] = av_clip(s->mv[0][0][1], -60 - (s->mb_y << 6), (s->mb_height << 6) - 4 - (s->mb_y << 6));
2059  s->mv[1][0][0] = av_clip(s->mv[1][0][0], -60 - (s->mb_x << 6), (s->mb_width << 6) - 4 - (s->mb_x << 6));
2060  s->mv[1][0][1] = av_clip(s->mv[1][0][1], -60 - (s->mb_y << 6), (s->mb_height << 6) - 4 - (s->mb_y << 6));
2061  }
2062  if (direct) {
2063  s->current_picture.f.motion_val[0][xy + v->blocks_off][0] = s->mv[0][0][0];
2064  s->current_picture.f.motion_val[0][xy + v->blocks_off][1] = s->mv[0][0][1];
2065  s->current_picture.f.motion_val[1][xy + v->blocks_off][0] = s->mv[1][0][0];
2066  s->current_picture.f.motion_val[1][xy + v->blocks_off][1] = s->mv[1][0][1];
2067  return;
2068  }
2069 
2070  if ((mvtype == BMV_TYPE_FORWARD) || (mvtype == BMV_TYPE_INTERPOLATED)) {
2071  C = s->current_picture.f.motion_val[0][xy - 2];
2072  A = s->current_picture.f.motion_val[0][xy - wrap * 2];
2073  off = (s->mb_x == (s->mb_width - 1)) ? -2 : 2;
2074  B = s->current_picture.f.motion_val[0][xy - wrap * 2 + off];
2075 
2076  if (!s->mb_x) C[0] = C[1] = 0;
2077  if (!s->first_slice_line) { // predictor A is not out of bounds
2078  if (s->mb_width == 1) {
2079  px = A[0];
2080  py = A[1];
2081  } else {
2082  px = mid_pred(A[0], B[0], C[0]);
2083  py = mid_pred(A[1], B[1], C[1]);
2084  }
2085  } else if (s->mb_x) { // predictor C is not out of bounds
2086  px = C[0];
2087  py = C[1];
2088  } else {
2089  px = py = 0;
2090  }
2091  /* Pullback MV as specified in 8.3.5.3.4 */
2092  {
2093  int qx, qy, X, Y;
2094  if (v->profile < PROFILE_ADVANCED) {
2095  qx = (s->mb_x << 5);
2096  qy = (s->mb_y << 5);
2097  X = (s->mb_width << 5) - 4;
2098  Y = (s->mb_height << 5) - 4;
2099  if (qx + px < -28) px = -28 - qx;
2100  if (qy + py < -28) py = -28 - qy;
2101  if (qx + px > X) px = X - qx;
2102  if (qy + py > Y) py = Y - qy;
2103  } else {
2104  qx = (s->mb_x << 6);
2105  qy = (s->mb_y << 6);
2106  X = (s->mb_width << 6) - 4;
2107  Y = (s->mb_height << 6) - 4;
2108  if (qx + px < -60) px = -60 - qx;
2109  if (qy + py < -60) py = -60 - qy;
2110  if (qx + px > X) px = X - qx;
2111  if (qy + py > Y) py = Y - qy;
2112  }
2113  }
2114  /* Calculate hybrid prediction as specified in 8.3.5.3.5 */
2115  if (0 && !s->first_slice_line && s->mb_x) {
2116  if (is_intra[xy - wrap])
2117  sum = FFABS(px) + FFABS(py);
2118  else
2119  sum = FFABS(px - A[0]) + FFABS(py - A[1]);
2120  if (sum > 32) {
2121  if (get_bits1(&s->gb)) {
2122  px = A[0];
2123  py = A[1];
2124  } else {
2125  px = C[0];
2126  py = C[1];
2127  }
2128  } else {
2129  if (is_intra[xy - 2])
2130  sum = FFABS(px) + FFABS(py);
2131  else
2132  sum = FFABS(px - C[0]) + FFABS(py - C[1]);
2133  if (sum > 32) {
2134  if (get_bits1(&s->gb)) {
2135  px = A[0];
2136  py = A[1];
2137  } else {
2138  px = C[0];
2139  py = C[1];
2140  }
2141  }
2142  }
2143  }
2144  /* store MV using signed modulus of MV range defined in 4.11 */
2145  s->mv[0][0][0] = ((px + dmv_x[0] + r_x) & ((r_x << 1) - 1)) - r_x;
2146  s->mv[0][0][1] = ((py + dmv_y[0] + r_y) & ((r_y << 1) - 1)) - r_y;
2147  }
2148  if ((mvtype == BMV_TYPE_BACKWARD) || (mvtype == BMV_TYPE_INTERPOLATED)) {
2149  C = s->current_picture.f.motion_val[1][xy - 2];
2150  A = s->current_picture.f.motion_val[1][xy - wrap * 2];
2151  off = (s->mb_x == (s->mb_width - 1)) ? -2 : 2;
2152  B = s->current_picture.f.motion_val[1][xy - wrap * 2 + off];
2153 
2154  if (!s->mb_x)
2155  C[0] = C[1] = 0;
2156  if (!s->first_slice_line) { // predictor A is not out of bounds
2157  if (s->mb_width == 1) {
2158  px = A[0];
2159  py = A[1];
2160  } else {
2161  px = mid_pred(A[0], B[0], C[0]);
2162  py = mid_pred(A[1], B[1], C[1]);
2163  }
2164  } else if (s->mb_x) { // predictor C is not out of bounds
2165  px = C[0];
2166  py = C[1];
2167  } else {
2168  px = py = 0;
2169  }
2170  /* Pullback MV as specified in 8.3.5.3.4 */
2171  {
2172  int qx, qy, X, Y;
2173  if (v->profile < PROFILE_ADVANCED) {
2174  qx = (s->mb_x << 5);
2175  qy = (s->mb_y << 5);
2176  X = (s->mb_width << 5) - 4;
2177  Y = (s->mb_height << 5) - 4;
2178  if (qx + px < -28) px = -28 - qx;
2179  if (qy + py < -28) py = -28 - qy;
2180  if (qx + px > X) px = X - qx;
2181  if (qy + py > Y) py = Y - qy;
2182  } else {
2183  qx = (s->mb_x << 6);
2184  qy = (s->mb_y << 6);
2185  X = (s->mb_width << 6) - 4;
2186  Y = (s->mb_height << 6) - 4;
2187  if (qx + px < -60) px = -60 - qx;
2188  if (qy + py < -60) py = -60 - qy;
2189  if (qx + px > X) px = X - qx;
2190  if (qy + py > Y) py = Y - qy;
2191  }
2192  }
2193  /* Calculate hybrid prediction as specified in 8.3.5.3.5 */
2194  if (0 && !s->first_slice_line && s->mb_x) {
2195  if (is_intra[xy - wrap])
2196  sum = FFABS(px) + FFABS(py);
2197  else
2198  sum = FFABS(px - A[0]) + FFABS(py - A[1]);
2199  if (sum > 32) {
2200  if (get_bits1(&s->gb)) {
2201  px = A[0];
2202  py = A[1];
2203  } else {
2204  px = C[0];
2205  py = C[1];
2206  }
2207  } else {
2208  if (is_intra[xy - 2])
2209  sum = FFABS(px) + FFABS(py);
2210  else
2211  sum = FFABS(px - C[0]) + FFABS(py - C[1]);
2212  if (sum > 32) {
2213  if (get_bits1(&s->gb)) {
2214  px = A[0];
2215  py = A[1];
2216  } else {
2217  px = C[0];
2218  py = C[1];
2219  }
2220  }
2221  }
2222  }
2223  /* store MV using signed modulus of MV range defined in 4.11 */
2224 
2225  s->mv[1][0][0] = ((px + dmv_x[1] + r_x) & ((r_x << 1) - 1)) - r_x;
2226  s->mv[1][0][1] = ((py + dmv_y[1] + r_y) & ((r_y << 1) - 1)) - r_y;
2227  }
2228  s->current_picture.f.motion_val[0][xy][0] = s->mv[0][0][0];
2229  s->current_picture.f.motion_val[0][xy][1] = s->mv[0][0][1];
2230  s->current_picture.f.motion_val[1][xy][0] = s->mv[1][0][0];
2231  s->current_picture.f.motion_val[1][xy][1] = s->mv[1][0][1];
2232 }
2233 
2234 static inline void vc1_pred_b_mv_intfi(VC1Context *v, int n, int *dmv_x, int *dmv_y, int mv1, int *pred_flag)
2235 {
2236  int dir = (v->bmvtype == BMV_TYPE_BACKWARD) ? 1 : 0;
2237  MpegEncContext *s = &v->s;
2238  int mb_pos = s->mb_x + s->mb_y * s->mb_stride;
2239 
2240  if (v->bmvtype == BMV_TYPE_DIRECT) {
2241  int total_opp, k, f;
2242  if (s->next_picture.f.mb_type[mb_pos + v->mb_off] != MB_TYPE_INTRA) {
2243  s->mv[0][0][0] = scale_mv_intfi(s->next_picture.f.motion_val[1][s->block_index[0] + v->blocks_off][0],
2244  v->bfraction, 0, s->quarter_sample, v->qs_last);
2245  s->mv[0][0][1] = scale_mv_intfi(s->next_picture.f.motion_val[1][s->block_index[0] + v->blocks_off][1],
2246  v->bfraction, 0, s->quarter_sample, v->qs_last);
2247  s->mv[1][0][0] = scale_mv_intfi(s->next_picture.f.motion_val[1][s->block_index[0] + v->blocks_off][0],
2248  v->bfraction, 1, s->quarter_sample, v->qs_last);
2249  s->mv[1][0][1] = scale_mv_intfi(s->next_picture.f.motion_val[1][s->block_index[0] + v->blocks_off][1],
2250  v->bfraction, 1, s->quarter_sample, v->qs_last);
2251 
2252  total_opp = v->mv_f_next[0][s->block_index[0] + v->blocks_off]
2253  + v->mv_f_next[0][s->block_index[1] + v->blocks_off]
2254  + v->mv_f_next[0][s->block_index[2] + v->blocks_off]
2255  + v->mv_f_next[0][s->block_index[3] + v->blocks_off];
2256  f = (total_opp > 2) ? 1 : 0;
2257  } else {
2258  s->mv[0][0][0] = s->mv[0][0][1] = 0;
2259  s->mv[1][0][0] = s->mv[1][0][1] = 0;
2260  f = 0;
2261  }
2262  v->ref_field_type[0] = v->ref_field_type[1] = v->cur_field_type ^ f;
2263  for (k = 0; k < 4; k++) {
2264  s->current_picture.f.motion_val[0][s->block_index[k] + v->blocks_off][0] = s->mv[0][0][0];
2265  s->current_picture.f.motion_val[0][s->block_index[k] + v->blocks_off][1] = s->mv[0][0][1];
2266  s->current_picture.f.motion_val[1][s->block_index[k] + v->blocks_off][0] = s->mv[1][0][0];
2267  s->current_picture.f.motion_val[1][s->block_index[k] + v->blocks_off][1] = s->mv[1][0][1];
2268  v->mv_f[0][s->block_index[k] + v->blocks_off] = f;
2269  v->mv_f[1][s->block_index[k] + v->blocks_off] = f;
2270  }
2271  return;
2272  }
2273  if (v->bmvtype == BMV_TYPE_INTERPOLATED) {
2274  vc1_pred_mv(v, 0, dmv_x[0], dmv_y[0], 1, v->range_x, v->range_y, v->mb_type[0], pred_flag[0], 0);
2275  vc1_pred_mv(v, 0, dmv_x[1], dmv_y[1], 1, v->range_x, v->range_y, v->mb_type[0], pred_flag[1], 1);
2276  return;
2277  }
2278  if (dir) { // backward
2279  vc1_pred_mv(v, n, dmv_x[1], dmv_y[1], mv1, v->range_x, v->range_y, v->mb_type[0], pred_flag[1], 1);
2280  if (n == 3 || mv1) {
2281  vc1_pred_mv(v, 0, dmv_x[0], dmv_y[0], 1, v->range_x, v->range_y, v->mb_type[0], 0, 0);
2282  }
2283  } else { // forward
2284  vc1_pred_mv(v, n, dmv_x[0], dmv_y[0], mv1, v->range_x, v->range_y, v->mb_type[0], pred_flag[0], 0);
2285  if (n == 3 || mv1) {
2286  vc1_pred_mv(v, 0, dmv_x[1], dmv_y[1], 1, v->range_x, v->range_y, v->mb_type[0], 0, 1);
2287  }
2288  }
2289 }
2290 
2300 static inline int vc1_i_pred_dc(MpegEncContext *s, int overlap, int pq, int n,
2301  int16_t **dc_val_ptr, int *dir_ptr)
2302 {
2303  int a, b, c, wrap, pred, scale;
2304  int16_t *dc_val;
2305  static const uint16_t dcpred[32] = {
2306  -1, 1024, 512, 341, 256, 205, 171, 146, 128,
2307  114, 102, 93, 85, 79, 73, 68, 64,
2308  60, 57, 54, 51, 49, 47, 45, 43,
2309  41, 39, 38, 37, 35, 34, 33
2310  };
2311 
2312  /* find prediction - wmv3_dc_scale always used here in fact */
2313  if (n < 4) scale = s->y_dc_scale;
2314  else scale = s->c_dc_scale;
2315 
2316  wrap = s->block_wrap[n];
2317  dc_val = s->dc_val[0] + s->block_index[n];
2318 
2319  /* B A
2320  * C X
2321  */
2322  c = dc_val[ - 1];
2323  b = dc_val[ - 1 - wrap];
2324  a = dc_val[ - wrap];
2325 
2326  if (pq < 9 || !overlap) {
2327  /* Set outer values */
2328  if (s->first_slice_line && (n != 2 && n != 3))
2329  b = a = dcpred[scale];
2330  if (s->mb_x == 0 && (n != 1 && n != 3))
2331  b = c = dcpred[scale];
2332  } else {
2333  /* Set outer values */
2334  if (s->first_slice_line && (n != 2 && n != 3))
2335  b = a = 0;
2336  if (s->mb_x == 0 && (n != 1 && n != 3))
2337  b = c = 0;
2338  }
2339 
2340  if (abs(a - b) <= abs(b - c)) {
2341  pred = c;
2342  *dir_ptr = 1; // left
2343  } else {
2344  pred = a;
2345  *dir_ptr = 0; // top
2346  }
2347 
2348  /* update predictor */
2349  *dc_val_ptr = &dc_val[0];
2350  return pred;
2351 }
2352 
2353 
2365 static inline int vc1_pred_dc(MpegEncContext *s, int overlap, int pq, int n,
2366  int a_avail, int c_avail,
2367  int16_t **dc_val_ptr, int *dir_ptr)
2368 {
2369  int a, b, c, wrap, pred;
2370  int16_t *dc_val;
2371  int mb_pos = s->mb_x + s->mb_y * s->mb_stride;
2372  int q1, q2 = 0;
2373  int dqscale_index;
2374 
2375  wrap = s->block_wrap[n];
2376  dc_val = s->dc_val[0] + s->block_index[n];
2377 
2378  /* B A
2379  * C X
2380  */
2381  c = dc_val[ - 1];
2382  b = dc_val[ - 1 - wrap];
2383  a = dc_val[ - wrap];
2384  /* scale predictors if needed */
2385  q1 = s->current_picture.f.qscale_table[mb_pos];
2386  dqscale_index = s->y_dc_scale_table[q1] - 1;
2387  if (dqscale_index < 0)
2388  return 0;
2389  if (c_avail && (n != 1 && n != 3)) {
2390  q2 = s->current_picture.f.qscale_table[mb_pos - 1];
2391  if (q2 && q2 != q1)
2392  c = (c * s->y_dc_scale_table[q2] * ff_vc1_dqscale[dqscale_index] + 0x20000) >> 18;
2393  }
2394  if (a_avail && (n != 2 && n != 3)) {
2395  q2 = s->current_picture.f.qscale_table[mb_pos - s->mb_stride];
2396  if (q2 && q2 != q1)
2397  a = (a * s->y_dc_scale_table[q2] * ff_vc1_dqscale[dqscale_index] + 0x20000) >> 18;
2398  }
2399  if (a_avail && c_avail && (n != 3)) {
2400  int off = mb_pos;
2401  if (n != 1)
2402  off--;
2403  if (n != 2)
2404  off -= s->mb_stride;
2405  q2 = s->current_picture.f.qscale_table[off];
2406  if (q2 && q2 != q1)
2407  b = (b * s->y_dc_scale_table[q2] * ff_vc1_dqscale[dqscale_index] + 0x20000) >> 18;
2408  }
2409 
2410  if (a_avail && c_avail) {
2411  if (abs(a - b) <= abs(b - c)) {
2412  pred = c;
2413  *dir_ptr = 1; // left
2414  } else {
2415  pred = a;
2416  *dir_ptr = 0; // top
2417  }
2418  } else if (a_avail) {
2419  pred = a;
2420  *dir_ptr = 0; // top
2421  } else if (c_avail) {
2422  pred = c;
2423  *dir_ptr = 1; // left
2424  } else {
2425  pred = 0;
2426  *dir_ptr = 1; // left
2427  }
2428 
2429  /* update predictor */
2430  *dc_val_ptr = &dc_val[0];
2431  return pred;
2432 }
2433  // Block group
2435 
2442 static inline int vc1_coded_block_pred(MpegEncContext * s, int n,
2443  uint8_t **coded_block_ptr)
2444 {
2445  int xy, wrap, pred, a, b, c;
2446 
2447  xy = s->block_index[n];
2448  wrap = s->b8_stride;
2449 
2450  /* B C
2451  * A X
2452  */
2453  a = s->coded_block[xy - 1 ];
2454  b = s->coded_block[xy - 1 - wrap];
2455  c = s->coded_block[xy - wrap];
2456 
2457  if (b == c) {
2458  pred = a;
2459  } else {
2460  pred = c;
2461  }
2462 
2463  /* store value */
2464  *coded_block_ptr = &s->coded_block[xy];
2465 
2466  return pred;
2467 }
2468 
2478 static void vc1_decode_ac_coeff(VC1Context *v, int *last, int *skip,
2479  int *value, int codingset)
2480 {
2481  GetBitContext *gb = &v->s.gb;
2482  int index, escape, run = 0, level = 0, lst = 0;
2483 
2484  index = get_vlc2(gb, ff_vc1_ac_coeff_table[codingset].table, AC_VLC_BITS, 3);
2485  if (index != ff_vc1_ac_sizes[codingset] - 1) {
2486  run = vc1_index_decode_table[codingset][index][0];
2487  level = vc1_index_decode_table[codingset][index][1];
2488  lst = index >= vc1_last_decode_table[codingset] || get_bits_left(gb) < 0;
2489  if (get_bits1(gb))
2490  level = -level;
2491  } else {
2492  escape = decode210(gb);
2493  if (escape != 2) {
2494  index = get_vlc2(gb, ff_vc1_ac_coeff_table[codingset].table, AC_VLC_BITS, 3);
2495  run = vc1_index_decode_table[codingset][index][0];
2496  level = vc1_index_decode_table[codingset][index][1];
2497  lst = index >= vc1_last_decode_table[codingset];
2498  if (escape == 0) {
2499  if (lst)
2500  level += vc1_last_delta_level_table[codingset][run];
2501  else
2502  level += vc1_delta_level_table[codingset][run];
2503  } else {
2504  if (lst)
2505  run += vc1_last_delta_run_table[codingset][level] + 1;
2506  else
2507  run += vc1_delta_run_table[codingset][level] + 1;
2508  }
2509  if (get_bits1(gb))
2510  level = -level;
2511  } else {
2512  int sign;
2513  lst = get_bits1(gb);
2514  if (v->s.esc3_level_length == 0) {
2515  if (v->pq < 8 || v->dquantfrm) { // table 59
2516  v->s.esc3_level_length = get_bits(gb, 3);
2517  if (!v->s.esc3_level_length)
2518  v->s.esc3_level_length = get_bits(gb, 2) + 8;
2519  } else { // table 60
2520  v->s.esc3_level_length = get_unary(gb, 1, 6) + 2;
2521  }
2522  v->s.esc3_run_length = 3 + get_bits(gb, 2);
2523  }
2524  run = get_bits(gb, v->s.esc3_run_length);
2525  sign = get_bits1(gb);
2526  level = get_bits(gb, v->s.esc3_level_length);
2527  if (sign)
2528  level = -level;
2529  }
2530  }
2531 
2532  *last = lst;
2533  *skip = run;
2534  *value = level;
2535 }
2536 
2544 static int vc1_decode_i_block(VC1Context *v, DCTELEM block[64], int n,
2545  int coded, int codingset)
2546 {
2547  GetBitContext *gb = &v->s.gb;
2548  MpegEncContext *s = &v->s;
2549  int dc_pred_dir = 0; /* Direction of the DC prediction used */
2550  int i;
2551  int16_t *dc_val;
2552  int16_t *ac_val, *ac_val2;
2553  int dcdiff;
2554 
2555  /* Get DC differential */
2556  if (n < 4) {
2558  } else {
2560  }
2561  if (dcdiff < 0) {
2562  av_log(s->avctx, AV_LOG_ERROR, "Illegal DC VLC\n");
2563  return -1;
2564  }
2565  if (dcdiff) {
2566  if (dcdiff == 119 /* ESC index value */) {
2567  /* TODO: Optimize */
2568  if (v->pq == 1) dcdiff = get_bits(gb, 10);
2569  else if (v->pq == 2) dcdiff = get_bits(gb, 9);
2570  else dcdiff = get_bits(gb, 8);
2571  } else {
2572  if (v->pq == 1)
2573  dcdiff = (dcdiff << 2) + get_bits(gb, 2) - 3;
2574  else if (v->pq == 2)
2575  dcdiff = (dcdiff << 1) + get_bits1(gb) - 1;
2576  }
2577  if (get_bits1(gb))
2578  dcdiff = -dcdiff;
2579  }
2580 
2581  /* Prediction */
2582  dcdiff += vc1_i_pred_dc(&v->s, v->overlap, v->pq, n, &dc_val, &dc_pred_dir);
2583  *dc_val = dcdiff;
2584 
2585  /* Store the quantized DC coeff, used for prediction */
2586  if (n < 4) {
2587  block[0] = dcdiff * s->y_dc_scale;
2588  } else {
2589  block[0] = dcdiff * s->c_dc_scale;
2590  }
2591  /* Skip ? */
2592  if (!coded) {
2593  goto not_coded;
2594  }
2595 
2596  // AC Decoding
2597  i = 1;
2598 
2599  {
2600  int last = 0, skip, value;
2601  const uint8_t *zz_table;
2602  int scale;
2603  int k;
2604 
2605  scale = v->pq * 2 + v->halfpq;
2606 
2607  if (v->s.ac_pred) {
2608  if (!dc_pred_dir)
2609  zz_table = v->zz_8x8[2];
2610  else
2611  zz_table = v->zz_8x8[3];
2612  } else
2613  zz_table = v->zz_8x8[1];
2614 
2615  ac_val = s->ac_val[0][0] + s->block_index[n] * 16;
2616  ac_val2 = ac_val;
2617  if (dc_pred_dir) // left
2618  ac_val -= 16;
2619  else // top
2620  ac_val -= 16 * s->block_wrap[n];
2621 
2622  while (!last) {
2623  vc1_decode_ac_coeff(v, &last, &skip, &value, codingset);
2624  i += skip;
2625  if (i > 63)
2626  break;
2627  block[zz_table[i++]] = value;
2628  }
2629 
2630  /* apply AC prediction if needed */
2631  if (s->ac_pred) {
2632  if (dc_pred_dir) { // left
2633  for (k = 1; k < 8; k++)
2634  block[k << v->left_blk_sh] += ac_val[k];
2635  } else { // top
2636  for (k = 1; k < 8; k++)
2637  block[k << v->top_blk_sh] += ac_val[k + 8];
2638  }
2639  }
2640  /* save AC coeffs for further prediction */
2641  for (k = 1; k < 8; k++) {
2642  ac_val2[k] = block[k << v->left_blk_sh];
2643  ac_val2[k + 8] = block[k << v->top_blk_sh];
2644  }
2645 
2646  /* scale AC coeffs */
2647  for (k = 1; k < 64; k++)
2648  if (block[k]) {
2649  block[k] *= scale;
2650  if (!v->pquantizer)
2651  block[k] += (block[k] < 0) ? -v->pq : v->pq;
2652  }
2653 
2654  if (s->ac_pred) i = 63;
2655  }
2656 
2657 not_coded:
2658  if (!coded) {
2659  int k, scale;
2660  ac_val = s->ac_val[0][0] + s->block_index[n] * 16;
2661  ac_val2 = ac_val;
2662 
2663  i = 0;
2664  scale = v->pq * 2 + v->halfpq;
2665  memset(ac_val2, 0, 16 * 2);
2666  if (dc_pred_dir) { // left
2667  ac_val -= 16;
2668  if (s->ac_pred)
2669  memcpy(ac_val2, ac_val, 8 * 2);
2670  } else { // top
2671  ac_val -= 16 * s->block_wrap[n];
2672  if (s->ac_pred)
2673  memcpy(ac_val2 + 8, ac_val + 8, 8 * 2);
2674  }
2675 
2676  /* apply AC prediction if needed */
2677  if (s->ac_pred) {
2678  if (dc_pred_dir) { //left
2679  for (k = 1; k < 8; k++) {
2680  block[k << v->left_blk_sh] = ac_val[k] * scale;
2681  if (!v->pquantizer && block[k << v->left_blk_sh])
2682  block[k << v->left_blk_sh] += (block[k << v->left_blk_sh] < 0) ? -v->pq : v->pq;
2683  }
2684  } else { // top
2685  for (k = 1; k < 8; k++) {
2686  block[k << v->top_blk_sh] = ac_val[k + 8] * scale;
2687  if (!v->pquantizer && block[k << v->top_blk_sh])
2688  block[k << v->top_blk_sh] += (block[k << v->top_blk_sh] < 0) ? -v->pq : v->pq;
2689  }
2690  }
2691  i = 63;
2692  }
2693  }
2694  s->block_last_index[n] = i;
2695 
2696  return 0;
2697 }
2698 
2708  int coded, int codingset, int mquant)
2709 {
2710  GetBitContext *gb = &v->s.gb;
2711  MpegEncContext *s = &v->s;
2712  int dc_pred_dir = 0; /* Direction of the DC prediction used */
2713  int i;
2714  int16_t *dc_val;
2715  int16_t *ac_val, *ac_val2;
2716  int dcdiff;
2717  int a_avail = v->a_avail, c_avail = v->c_avail;
2718  int use_pred = s->ac_pred;
2719  int scale;
2720  int q1, q2 = 0;
2721  int mb_pos = s->mb_x + s->mb_y * s->mb_stride;
2722 
2723  /* Get DC differential */
2724  if (n < 4) {
2726  } else {
2728  }
2729  if (dcdiff < 0) {
2730  av_log(s->avctx, AV_LOG_ERROR, "Illegal DC VLC\n");
2731  return -1;
2732  }
2733  if (dcdiff) {
2734  if (dcdiff == 119 /* ESC index value */) {
2735  /* TODO: Optimize */
2736  if (mquant == 1) dcdiff = get_bits(gb, 10);
2737  else if (mquant == 2) dcdiff = get_bits(gb, 9);
2738  else dcdiff = get_bits(gb, 8);
2739  } else {
2740  if (mquant == 1)
2741  dcdiff = (dcdiff << 2) + get_bits(gb, 2) - 3;
2742  else if (mquant == 2)
2743  dcdiff = (dcdiff << 1) + get_bits1(gb) - 1;
2744  }
2745  if (get_bits1(gb))
2746  dcdiff = -dcdiff;
2747  }
2748 
2749  /* Prediction */
2750  dcdiff += vc1_pred_dc(&v->s, v->overlap, mquant, n, v->a_avail, v->c_avail, &dc_val, &dc_pred_dir);
2751  *dc_val = dcdiff;
2752 
2753  /* Store the quantized DC coeff, used for prediction */
2754  if (n < 4) {
2755  block[0] = dcdiff * s->y_dc_scale;
2756  } else {
2757  block[0] = dcdiff * s->c_dc_scale;
2758  }
2759 
2760  //AC Decoding
2761  i = 1;
2762 
2763  /* check if AC is needed at all */
2764  if (!a_avail && !c_avail)
2765  use_pred = 0;
2766  ac_val = s->ac_val[0][0] + s->block_index[n] * 16;
2767  ac_val2 = ac_val;
2768 
2769  scale = mquant * 2 + ((mquant == v->pq) ? v->halfpq : 0);
2770 
2771  if (dc_pred_dir) // left
2772  ac_val -= 16;
2773  else // top
2774  ac_val -= 16 * s->block_wrap[n];
2775 
2776  q1 = s->current_picture.f.qscale_table[mb_pos];
2777  if ( dc_pred_dir && c_avail && mb_pos)
2778  q2 = s->current_picture.f.qscale_table[mb_pos - 1];
2779  if (!dc_pred_dir && a_avail && mb_pos >= s->mb_stride)
2780  q2 = s->current_picture.f.qscale_table[mb_pos - s->mb_stride];
2781  if ( dc_pred_dir && n == 1)
2782  q2 = q1;
2783  if (!dc_pred_dir && n == 2)
2784  q2 = q1;
2785  if (n == 3)
2786  q2 = q1;
2787 
2788  if (coded) {
2789  int last = 0, skip, value;
2790  const uint8_t *zz_table;
2791  int k;
2792 
2793  if (v->s.ac_pred) {
2794  if (!use_pred && v->fcm == ILACE_FRAME) {
2795  zz_table = v->zzi_8x8;
2796  } else {
2797  if (!dc_pred_dir) // top
2798  zz_table = v->zz_8x8[2];
2799  else // left
2800  zz_table = v->zz_8x8[3];
2801  }
2802  } else {
2803  if (v->fcm != ILACE_FRAME)
2804  zz_table = v->zz_8x8[1];
2805  else
2806  zz_table = v->zzi_8x8;
2807  }
2808 
2809  while (!last) {
2810  vc1_decode_ac_coeff(v, &last, &skip, &value, codingset);
2811  i += skip;
2812  if (i > 63)
2813  break;
2814  block[zz_table[i++]] = value;
2815  }
2816 
2817  /* apply AC prediction if needed */
2818  if (use_pred) {
2819  /* scale predictors if needed*/
2820  if (q2 && q1 != q2) {
2821  q1 = q1 * 2 + ((q1 == v->pq) ? v->halfpq : 0) - 1;
2822  q2 = q2 * 2 + ((q2 == v->pq) ? v->halfpq : 0) - 1;
2823 
2824  if (q1 < 1)
2825  return AVERROR_INVALIDDATA;
2826  if (dc_pred_dir) { // left
2827  for (k = 1; k < 8; k++)
2828  block[k << v->left_blk_sh] += (ac_val[k] * q2 * ff_vc1_dqscale[q1 - 1] + 0x20000) >> 18;
2829  } else { // top
2830  for (k = 1; k < 8; k++)
2831  block[k << v->top_blk_sh] += (ac_val[k + 8] * q2 * ff_vc1_dqscale[q1 - 1] + 0x20000) >> 18;
2832  }
2833  } else {
2834  if (dc_pred_dir) { //left
2835  for (k = 1; k < 8; k++)
2836  block[k << v->left_blk_sh] += ac_val[k];
2837  } else { //top
2838  for (k = 1; k < 8; k++)
2839  block[k << v->top_blk_sh] += ac_val[k + 8];
2840  }
2841  }
2842  }
2843  /* save AC coeffs for further prediction */
2844  for (k = 1; k < 8; k++) {
2845  ac_val2[k ] = block[k << v->left_blk_sh];
2846  ac_val2[k + 8] = block[k << v->top_blk_sh];
2847  }
2848 
2849  /* scale AC coeffs */
2850  for (k = 1; k < 64; k++)
2851  if (block[k]) {
2852  block[k] *= scale;
2853  if (!v->pquantizer)
2854  block[k] += (block[k] < 0) ? -mquant : mquant;
2855  }
2856 
2857  if (use_pred) i = 63;
2858  } else { // no AC coeffs
2859  int k;
2860 
2861  memset(ac_val2, 0, 16 * 2);
2862  if (dc_pred_dir) { // left
2863  if (use_pred) {
2864  memcpy(ac_val2, ac_val, 8 * 2);
2865  if (q2 && q1 != q2) {
2866  q1 = q1 * 2 + ((q1 == v->pq) ? v->halfpq : 0) - 1;
2867  q2 = q2 * 2 + ((q2 == v->pq) ? v->halfpq : 0) - 1;
2868  if (q1 < 1)
2869  return AVERROR_INVALIDDATA;
2870  for (k = 1; k < 8; k++)
2871  ac_val2[k] = (ac_val2[k] * q2 * ff_vc1_dqscale[q1 - 1] + 0x20000) >> 18;
2872  }
2873  }
2874  } else { // top
2875  if (use_pred) {
2876  memcpy(ac_val2 + 8, ac_val + 8, 8 * 2);
2877  if (q2 && q1 != q2) {
2878  q1 = q1 * 2 + ((q1 == v->pq) ? v->halfpq : 0) - 1;
2879  q2 = q2 * 2 + ((q2 == v->pq) ? v->halfpq : 0) - 1;
2880  if (q1 < 1)
2881  return AVERROR_INVALIDDATA;
2882  for (k = 1; k < 8; k++)
2883  ac_val2[k + 8] = (ac_val2[k + 8] * q2 * ff_vc1_dqscale[q1 - 1] + 0x20000) >> 18;
2884  }
2885  }
2886  }
2887 
2888  /* apply AC prediction if needed */
2889  if (use_pred) {
2890  if (dc_pred_dir) { // left
2891  for (k = 1; k < 8; k++) {
2892  block[k << v->left_blk_sh] = ac_val2[k] * scale;
2893  if (!v->pquantizer && block[k << v->left_blk_sh])
2894  block[k << v->left_blk_sh] += (block[k << v->left_blk_sh] < 0) ? -mquant : mquant;
2895  }
2896  } else { // top
2897  for (k = 1; k < 8; k++) {
2898  block[k << v->top_blk_sh] = ac_val2[k + 8] * scale;
2899  if (!v->pquantizer && block[k << v->top_blk_sh])
2900  block[k << v->top_blk_sh] += (block[k << v->top_blk_sh] < 0) ? -mquant : mquant;
2901  }
2902  }
2903  i = 63;
2904  }
2905  }
2906  s->block_last_index[n] = i;
2907 
2908  return 0;
2909 }
2910 
2920  int coded, int mquant, int codingset)
2921 {
2922  GetBitContext *gb = &v->s.gb;
2923  MpegEncContext *s = &v->s;
2924  int dc_pred_dir = 0; /* Direction of the DC prediction used */
2925  int i;
2926  int16_t *dc_val;
2927  int16_t *ac_val, *ac_val2;
2928  int dcdiff;
2929  int mb_pos = s->mb_x + s->mb_y * s->mb_stride;
2930  int a_avail = v->a_avail, c_avail = v->c_avail;
2931  int use_pred = s->ac_pred;
2932  int scale;
2933  int q1, q2 = 0;
2934 
2935  s->dsp.clear_block(block);
2936 
2937  /* XXX: Guard against dumb values of mquant */
2938  mquant = (mquant < 1) ? 0 : ((mquant > 31) ? 31 : mquant);
2939 
2940  /* Set DC scale - y and c use the same */
2941  s->y_dc_scale = s->y_dc_scale_table[mquant];
2942  s->c_dc_scale = s->c_dc_scale_table[mquant];
2943 
2944  /* Get DC differential */
2945  if (n < 4) {
2947  } else {
2949  }
2950  if (dcdiff < 0) {
2951  av_log(s->avctx, AV_LOG_ERROR, "Illegal DC VLC\n");
2952  return -1;
2953  }
2954  if (dcdiff) {
2955  if (dcdiff == 119 /* ESC index value */) {
2956  /* TODO: Optimize */
2957  if (mquant == 1) dcdiff = get_bits(gb, 10);
2958  else if (mquant == 2) dcdiff = get_bits(gb, 9);
2959  else dcdiff = get_bits(gb, 8);
2960  } else {
2961  if (mquant == 1)
2962  dcdiff = (dcdiff << 2) + get_bits(gb, 2) - 3;
2963  else if (mquant == 2)
2964  dcdiff = (dcdiff << 1) + get_bits1(gb) - 1;
2965  }
2966  if (get_bits1(gb))
2967  dcdiff = -dcdiff;
2968  }
2969 
2970  /* Prediction */
2971  dcdiff += vc1_pred_dc(&v->s, v->overlap, mquant, n, a_avail, c_avail, &dc_val, &dc_pred_dir);
2972  *dc_val = dcdiff;
2973 
2974  /* Store the quantized DC coeff, used for prediction */
2975 
2976  if (n < 4) {
2977  block[0] = dcdiff * s->y_dc_scale;
2978  } else {
2979  block[0] = dcdiff * s->c_dc_scale;
2980  }
2981 
2982  //AC Decoding
2983  i = 1;
2984 
2985  /* check if AC is needed at all and adjust direction if needed */
2986  if (!a_avail) dc_pred_dir = 1;
2987  if (!c_avail) dc_pred_dir = 0;
2988  if (!a_avail && !c_avail) use_pred = 0;
2989  ac_val = s->ac_val[0][0] + s->block_index[n] * 16;
2990  ac_val2 = ac_val;
2991 
2992  scale = mquant * 2 + v->halfpq;
2993 
2994  if (dc_pred_dir) //left
2995  ac_val -= 16;
2996  else //top
2997  ac_val -= 16 * s->block_wrap[n];
2998 
2999  q1 = s->current_picture.f.qscale_table[mb_pos];
3000  if (dc_pred_dir && c_avail && mb_pos)
3001  q2 = s->current_picture.f.qscale_table[mb_pos - 1];
3002  if (!dc_pred_dir && a_avail && mb_pos >= s->mb_stride)
3003  q2 = s->current_picture.f.qscale_table[mb_pos - s->mb_stride];
3004  if ( dc_pred_dir && n == 1)
3005  q2 = q1;
3006  if (!dc_pred_dir && n == 2)
3007  q2 = q1;
3008  if (n == 3) q2 = q1;
3009 
3010  if (coded) {
3011  int last = 0, skip, value;
3012  int k;
3013 
3014  while (!last) {
3015  vc1_decode_ac_coeff(v, &last, &skip, &value, codingset);
3016  i += skip;
3017  if (i > 63)
3018  break;
3019  if (v->fcm == PROGRESSIVE)
3020  block[v->zz_8x8[0][i++]] = value;
3021  else {
3022  if (use_pred && (v->fcm == ILACE_FRAME)) {
3023  if (!dc_pred_dir) // top
3024  block[v->zz_8x8[2][i++]] = value;
3025  else // left
3026  block[v->zz_8x8[3][i++]] = value;
3027  } else {
3028  block[v->zzi_8x8[i++]] = value;
3029  }
3030  }
3031  }
3032 
3033  /* apply AC prediction if needed */
3034  if (use_pred) {
3035  /* scale predictors if needed*/
3036  if (q2 && q1 != q2) {
3037  q1 = q1 * 2 + ((q1 == v->pq) ? v->halfpq : 0) - 1;
3038  q2 = q2 * 2 + ((q2 == v->pq) ? v->halfpq : 0) - 1;
3039 
3040  if (q1 < 1)
3041  return AVERROR_INVALIDDATA;
3042  if (dc_pred_dir) { // left
3043  for (k = 1; k < 8; k++)
3044  block[k << v->left_blk_sh] += (ac_val[k] * q2 * ff_vc1_dqscale[q1 - 1] + 0x20000) >> 18;
3045  } else { //top
3046  for (k = 1; k < 8; k++)
3047  block[k << v->top_blk_sh] += (ac_val[k + 8] * q2 * ff_vc1_dqscale[q1 - 1] + 0x20000) >> 18;
3048  }
3049  } else {
3050  if (dc_pred_dir) { // left
3051  for (k = 1; k < 8; k++)
3052  block[k << v->left_blk_sh] += ac_val[k];
3053  } else { // top
3054  for (k = 1; k < 8; k++)
3055  block[k << v->top_blk_sh] += ac_val[k + 8];
3056  }
3057  }
3058  }
3059  /* save AC coeffs for further prediction */
3060  for (k = 1; k < 8; k++) {
3061  ac_val2[k ] = block[k << v->left_blk_sh];
3062  ac_val2[k + 8] = block[k << v->top_blk_sh];
3063  }
3064 
3065  /* scale AC coeffs */
3066  for (k = 1; k < 64; k++)
3067  if (block[k]) {
3068  block[k] *= scale;
3069  if (!v->pquantizer)
3070  block[k] += (block[k] < 0) ? -mquant : mquant;
3071  }
3072 
3073  if (use_pred) i = 63;
3074  } else { // no AC coeffs
3075  int k;
3076 
3077  memset(ac_val2, 0, 16 * 2);
3078  if (dc_pred_dir) { // left
3079  if (use_pred) {
3080  memcpy(ac_val2, ac_val, 8 * 2);
3081  if (q2 && q1 != q2) {
3082  q1 = q1 * 2 + ((q1 == v->pq) ? v->halfpq : 0) - 1;
3083  q2 = q2 * 2 + ((q2 == v->pq) ? v->halfpq : 0) - 1;
3084  if (q1 < 1)
3085  return AVERROR_INVALIDDATA;
3086  for (k = 1; k < 8; k++)
3087  ac_val2[k] = (ac_val2[k] * q2 * ff_vc1_dqscale[q1 - 1] + 0x20000) >> 18;
3088  }
3089  }
3090  } else { // top
3091  if (use_pred) {
3092  memcpy(ac_val2 + 8, ac_val + 8, 8 * 2);
3093  if (q2 && q1 != q2) {
3094  q1 = q1 * 2 + ((q1 == v->pq) ? v->halfpq : 0) - 1;
3095  q2 = q2 * 2 + ((q2 == v->pq) ? v->halfpq : 0) - 1;
3096  if (q1 < 1)
3097  return AVERROR_INVALIDDATA;
3098  for (k = 1; k < 8; k++)
3099  ac_val2[k + 8] = (ac_val2[k + 8] * q2 * ff_vc1_dqscale[q1 - 1] + 0x20000) >> 18;
3100  }
3101  }
3102  }
3103 
3104  /* apply AC prediction if needed */
3105  if (use_pred) {
3106  if (dc_pred_dir) { // left
3107  for (k = 1; k < 8; k++) {
3108  block[k << v->left_blk_sh] = ac_val2[k] * scale;
3109  if (!v->pquantizer && block[k << v->left_blk_sh])
3110  block[k << v->left_blk_sh] += (block[k << v->left_blk_sh] < 0) ? -mquant : mquant;
3111  }
3112  } else { // top
3113  for (k = 1; k < 8; k++) {
3114  block[k << v->top_blk_sh] = ac_val2[k + 8] * scale;
3115  if (!v->pquantizer && block[k << v->top_blk_sh])
3116  block[k << v->top_blk_sh] += (block[k << v->top_blk_sh] < 0) ? -mquant : mquant;
3117  }
3118  }
3119  i = 63;
3120  }
3121  }
3122  s->block_last_index[n] = i;
3123 
3124  return 0;
3125 }
3126 
3129 static int vc1_decode_p_block(VC1Context *v, DCTELEM block[64], int n,
3130  int mquant, int ttmb, int first_block,
3131  uint8_t *dst, int linesize, int skip_block,
3132  int *ttmb_out)
3133 {
3134  MpegEncContext *s = &v->s;
3135  GetBitContext *gb = &s->gb;
3136  int i, j;
3137  int subblkpat = 0;
3138  int scale, off, idx, last, skip, value;
3139  int ttblk = ttmb & 7;
3140  int pat = 0;
3141 
3142  s->dsp.clear_block(block);
3143 
3144  if (ttmb == -1) {
3146  }
3147  if (ttblk == TT_4X4) {
3148  subblkpat = ~(get_vlc2(gb, ff_vc1_subblkpat_vlc[v->tt_index].table, VC1_SUBBLKPAT_VLC_BITS, 1) + 1);
3149  }
3150  if ((ttblk != TT_8X8 && ttblk != TT_4X4)
3151  && ((v->ttmbf || (ttmb != -1 && (ttmb & 8) && !first_block))
3152  || (!v->res_rtm_flag && !first_block))) {
3153  subblkpat = decode012(gb);
3154  if (subblkpat)
3155  subblkpat ^= 3; // swap decoded pattern bits
3156  if (ttblk == TT_8X4_TOP || ttblk == TT_8X4_BOTTOM)
3157  ttblk = TT_8X4;
3158  if (ttblk == TT_4X8_RIGHT || ttblk == TT_4X8_LEFT)
3159  ttblk = TT_4X8;
3160  }
3161  scale = 2 * mquant + ((v->pq == mquant) ? v->halfpq : 0);
3162 
3163  // convert transforms like 8X4_TOP to generic TT and SUBBLKPAT
3164  if (ttblk == TT_8X4_TOP || ttblk == TT_8X4_BOTTOM) {
3165  subblkpat = 2 - (ttblk == TT_8X4_TOP);
3166  ttblk = TT_8X4;
3167  }
3168  if (ttblk == TT_4X8_RIGHT || ttblk == TT_4X8_LEFT) {
3169  subblkpat = 2 - (ttblk == TT_4X8_LEFT);
3170  ttblk = TT_4X8;
3171  }
3172  switch (ttblk) {
3173  case TT_8X8:
3174  pat = 0xF;
3175  i = 0;
3176  last = 0;
3177  while (!last) {
3178  vc1_decode_ac_coeff(v, &last, &skip, &value, v->codingset2);
3179  i += skip;
3180  if (i > 63)
3181  break;
3182  if (!v->fcm)
3183  idx = v->zz_8x8[0][i++];
3184  else
3185  idx = v->zzi_8x8[i++];
3186  block[idx] = value * scale;
3187  if (!v->pquantizer)
3188  block[idx] += (block[idx] < 0) ? -mquant : mquant;
3189  }
3190  if (!skip_block) {
3191  if (i == 1)
3192  v->vc1dsp.vc1_inv_trans_8x8_dc(dst, linesize, block);
3193  else {
3194  v->vc1dsp.vc1_inv_trans_8x8(block);
3195  s->dsp.add_pixels_clamped(block, dst, linesize);
3196  }
3197  }
3198  break;
3199  case TT_4X4:
3200  pat = ~subblkpat & 0xF;
3201  for (j = 0; j < 4; j++) {
3202  last = subblkpat & (1 << (3 - j));
3203  i = 0;
3204  off = (j & 1) * 4 + (j & 2) * 16;
3205  while (!last) {
3206  vc1_decode_ac_coeff(v, &last, &skip, &value, v->codingset2);
3207  i += skip;
3208  if (i > 15)
3209  break;
3210  if (!v->fcm)
3212  else
3213  idx = ff_vc1_adv_interlaced_4x4_zz[i++];
3214  block[idx + off] = value * scale;
3215  if (!v->pquantizer)
3216  block[idx + off] += (block[idx + off] < 0) ? -mquant : mquant;
3217  }
3218  if (!(subblkpat & (1 << (3 - j))) && !skip_block) {
3219  if (i == 1)
3220  v->vc1dsp.vc1_inv_trans_4x4_dc(dst + (j & 1) * 4 + (j & 2) * 2 * linesize, linesize, block + off);
3221  else
3222  v->vc1dsp.vc1_inv_trans_4x4(dst + (j & 1) * 4 + (j & 2) * 2 * linesize, linesize, block + off);
3223  }
3224  }
3225  break;
3226  case TT_8X4:
3227  pat = ~((subblkpat & 2) * 6 + (subblkpat & 1) * 3) & 0xF;
3228  for (j = 0; j < 2; j++) {
3229  last = subblkpat & (1 << (1 - j));
3230  i = 0;
3231  off = j * 32;
3232  while (!last) {
3233  vc1_decode_ac_coeff(v, &last, &skip, &value, v->codingset2);
3234  i += skip;
3235  if (i > 31)
3236  break;
3237  if (!v->fcm)
3238  idx = v->zz_8x4[i++] + off;
3239  else
3240  idx = ff_vc1_adv_interlaced_8x4_zz[i++] + off;
3241  block[idx] = value * scale;
3242  if (!v->pquantizer)
3243  block[idx] += (block[idx] < 0) ? -mquant : mquant;
3244  }
3245  if (!(subblkpat & (1 << (1 - j))) && !skip_block) {
3246  if (i == 1)
3247  v->vc1dsp.vc1_inv_trans_8x4_dc(dst + j * 4 * linesize, linesize, block + off);
3248  else
3249  v->vc1dsp.vc1_inv_trans_8x4(dst + j * 4 * linesize, linesize, block + off);
3250  }
3251  }
3252  break;
3253  case TT_4X8:
3254  pat = ~(subblkpat * 5) & 0xF;
3255  for (j = 0; j < 2; j++) {
3256  last = subblkpat & (1 << (1 - j));
3257  i = 0;
3258  off = j * 4;
3259  while (!last) {
3260  vc1_decode_ac_coeff(v, &last, &skip, &value, v->codingset2);
3261  i += skip;
3262  if (i > 31)
3263  break;
3264  if (!v->fcm)
3265  idx = v->zz_4x8[i++] + off;
3266  else
3267  idx = ff_vc1_adv_interlaced_4x8_zz[i++] + off;
3268  block[idx] = value * scale;
3269  if (!v->pquantizer)
3270  block[idx] += (block[idx] < 0) ? -mquant : mquant;
3271  }
3272  if (!(subblkpat & (1 << (1 - j))) && !skip_block) {
3273  if (i == 1)
3274  v->vc1dsp.vc1_inv_trans_4x8_dc(dst + j * 4, linesize, block + off);
3275  else
3276  v->vc1dsp.vc1_inv_trans_4x8(dst + j*4, linesize, block + off);
3277  }
3278  }
3279  break;
3280  }
3281  if (ttmb_out)
3282  *ttmb_out |= ttblk << (n * 4);
3283  return pat;
3284 }
3285  // Macroblock group
3287 
3288 static const int size_table [6] = { 0, 2, 3, 4, 5, 8 };
3289 static const int offset_table[6] = { 0, 1, 3, 7, 15, 31 };
3290 
3292 {
3293  MpegEncContext *s = &v->s;
3294  int mb_cbp = v->cbp[s->mb_x - s->mb_stride],
3295  block_cbp = mb_cbp >> (block_num * 4), bottom_cbp,
3296  mb_is_intra = v->is_intra[s->mb_x - s->mb_stride],
3297  block_is_intra = mb_is_intra >> (block_num * 4), bottom_is_intra;
3298  int idx, linesize = block_num > 3 ? s->uvlinesize : s->linesize, ttblk;
3299  uint8_t *dst;
3300 
3301  if (block_num > 3) {
3302  dst = s->dest[block_num - 3];
3303  } else {
3304  dst = s->dest[0] + (block_num & 1) * 8 + ((block_num & 2) * 4 - 8) * linesize;
3305  }
3306  if (s->mb_y != s->end_mb_y || block_num < 2) {
3307  int16_t (*mv)[2];
3308  int mv_stride;
3309 
3310  if (block_num > 3) {
3311  bottom_cbp = v->cbp[s->mb_x] >> (block_num * 4);
3312  bottom_is_intra = v->is_intra[s->mb_x] >> (block_num * 4);
3313  mv = &v->luma_mv[s->mb_x - s->mb_stride];
3314  mv_stride = s->mb_stride;
3315  } else {
3316  bottom_cbp = (block_num < 2) ? (mb_cbp >> ((block_num + 2) * 4))
3317  : (v->cbp[s->mb_x] >> ((block_num - 2) * 4));
3318  bottom_is_intra = (block_num < 2) ? (mb_is_intra >> ((block_num + 2) * 4))
3319  : (v->is_intra[s->mb_x] >> ((block_num - 2) * 4));
3320  mv_stride = s->b8_stride;
3321  mv = &s->current_picture.f.motion_val[0][s->block_index[block_num] - 2 * mv_stride];
3322  }
3323 
3324  if (bottom_is_intra & 1 || block_is_intra & 1 ||
3325  mv[0][0] != mv[mv_stride][0] || mv[0][1] != mv[mv_stride][1]) {
3326  v->vc1dsp.vc1_v_loop_filter8(dst, linesize, v->pq);
3327  } else {
3328  idx = ((bottom_cbp >> 2) | block_cbp) & 3;
3329  if (idx == 3) {
3330  v->vc1dsp.vc1_v_loop_filter8(dst, linesize, v->pq);
3331  } else if (idx) {
3332  if (idx == 1)
3333  v->vc1dsp.vc1_v_loop_filter4(dst + 4, linesize, v->pq);
3334  else
3335  v->vc1dsp.vc1_v_loop_filter4(dst, linesize, v->pq);
3336  }
3337  }
3338  }
3339 
3340  dst -= 4 * linesize;
3341  ttblk = (v->ttblk[s->mb_x - s->mb_stride] >> (block_num * 4)) & 0xF;
3342  if (ttblk == TT_4X4 || ttblk == TT_8X4) {
3343  idx = (block_cbp | (block_cbp >> 2)) & 3;
3344  if (idx == 3) {
3345  v->vc1dsp.vc1_v_loop_filter8(dst, linesize, v->pq);
3346  } else if (idx) {
3347  if (idx == 1)
3348  v->vc1dsp.vc1_v_loop_filter4(dst + 4, linesize, v->pq);
3349  else
3350  v->vc1dsp.vc1_v_loop_filter4(dst, linesize, v->pq);
3351  }
3352  }
3353 }
3354 
3356 {
3357  MpegEncContext *s = &v->s;
3358  int mb_cbp = v->cbp[s->mb_x - 1 - s->mb_stride],
3359  block_cbp = mb_cbp >> (block_num * 4), right_cbp,
3360  mb_is_intra = v->is_intra[s->mb_x - 1 - s->mb_stride],
3361  block_is_intra = mb_is_intra >> (block_num * 4), right_is_intra;
3362  int idx, linesize = block_num > 3 ? s->uvlinesize : s->linesize, ttblk;
3363  uint8_t *dst;
3364 
3365  if (block_num > 3) {
3366  dst = s->dest[block_num - 3] - 8 * linesize;
3367  } else {
3368  dst = s->dest[0] + (block_num & 1) * 8 + ((block_num & 2) * 4 - 16) * linesize - 8;
3369  }
3370 
3371  if (s->mb_x != s->mb_width || !(block_num & 5)) {
3372  int16_t (*mv)[2];
3373 
3374  if (block_num > 3) {
3375  right_cbp = v->cbp[s->mb_x - s->mb_stride] >> (block_num * 4);
3376  right_is_intra = v->is_intra[s->mb_x - s->mb_stride] >> (block_num * 4);
3377  mv = &v->luma_mv[s->mb_x - s->mb_stride - 1];
3378  } else {
3379  right_cbp = (block_num & 1) ? (v->cbp[s->mb_x - s->mb_stride] >> ((block_num - 1) * 4))
3380  : (mb_cbp >> ((block_num + 1) * 4));
3381  right_is_intra = (block_num & 1) ? (v->is_intra[s->mb_x - s->mb_stride] >> ((block_num - 1) * 4))
3382  : (mb_is_intra >> ((block_num + 1) * 4));
3383  mv = &s->current_picture.f.motion_val[0][s->block_index[block_num] - s->b8_stride * 2 - 2];
3384  }
3385  if (block_is_intra & 1 || right_is_intra & 1 || mv[0][0] != mv[1][0] || mv[0][1] != mv[1][1]) {
3386  v->vc1dsp.vc1_h_loop_filter8(dst, linesize, v->pq);
3387  } else {
3388  idx = ((right_cbp >> 1) | block_cbp) & 5; // FIXME check
3389  if (idx == 5) {
3390  v->vc1dsp.vc1_h_loop_filter8(dst, linesize, v->pq);
3391  } else if (idx) {
3392  if (idx == 1)
3393  v->vc1dsp.vc1_h_loop_filter4(dst + 4 * linesize, linesize, v->pq);
3394  else
3395  v->vc1dsp.vc1_h_loop_filter4(dst, linesize, v->pq);
3396  }
3397  }
3398  }
3399 
3400  dst -= 4;
3401  ttblk = (v->ttblk[s->mb_x - s->mb_stride - 1] >> (block_num * 4)) & 0xf;
3402  if (ttblk == TT_4X4 || ttblk == TT_4X8) {
3403  idx = (block_cbp | (block_cbp >> 1)) & 5;
3404  if (idx == 5) {
3405  v->vc1dsp.vc1_h_loop_filter8(dst, linesize, v->pq);
3406  } else if (idx) {
3407  if (idx == 1)
3408  v->vc1dsp.vc1_h_loop_filter4(dst + linesize * 4, linesize, v->pq);
3409  else
3410  v->vc1dsp.vc1_h_loop_filter4(dst, linesize, v->pq);
3411  }
3412  }
3413 }
3414 
3416 {
3417  MpegEncContext *s = &v->s;
3418  int i;
3419 
3420  for (i = 0; i < 6; i++) {
3422  }
3423 
3424  /* V always precedes H, therefore we run H one MB before V;
3425  * at the end of a row, we catch up to complete the row */
3426  if (s->mb_x) {
3427  for (i = 0; i < 6; i++) {
3429  }
3430  if (s->mb_x == s->mb_width - 1) {
3431  s->mb_x++;
3433  for (i = 0; i < 6; i++) {
3435  }
3436  }
3437  }
3438 }
3439 
3443 {
3444  MpegEncContext *s = &v->s;
3445  GetBitContext *gb = &s->gb;
3446  int i, j;
3447  int mb_pos = s->mb_x + s->mb_y * s->mb_stride;
3448  int cbp; /* cbp decoding stuff */
3449  int mqdiff, mquant; /* MB quantization */
3450  int ttmb = v->ttfrm; /* MB Transform type */
3451 
3452  int mb_has_coeffs = 1; /* last_flag */
3453  int dmv_x, dmv_y; /* Differential MV components */
3454  int index, index1; /* LUT indexes */
3455  int val, sign; /* temp values */
3456  int first_block = 1;
3457  int dst_idx, off;
3458  int skipped, fourmv;
3459  int block_cbp = 0, pat, block_tt = 0, block_intra = 0;
3460 
3461  mquant = v->pq; /* lossy initialization */
3462 
3463  if (v->mv_type_is_raw)
3464  fourmv = get_bits1(gb);
3465  else
3466  fourmv = v->mv_type_mb_plane[mb_pos];
3467  if (v->skip_is_raw)
3468  skipped = get_bits1(gb);
3469  else
3470  skipped = v->s.mbskip_table[mb_pos];
3471 
3472  if (!fourmv) { /* 1MV mode */
3473  if (!skipped) {
3474  GET_MVDATA(dmv_x, dmv_y);
3475 
3476  if (s->mb_intra) {
3477  s->current_picture.f.motion_val[1][s->block_index[0]][0] = 0;
3478  s->current_picture.f.motion_val[1][s->block_index[0]][1] = 0;
3479  }
3481  vc1_pred_mv(v, 0, dmv_x, dmv_y, 1, v->range_x, v->range_y, v->mb_type[0], 0, 0);
3482 
3483  /* FIXME Set DC val for inter block ? */
3484  if (s->mb_intra && !mb_has_coeffs) {
3485  GET_MQUANT();
3486  s->ac_pred = get_bits1(gb);
3487  cbp = 0;
3488  } else if (mb_has_coeffs) {
3489  if (s->mb_intra)
3490  s->ac_pred = get_bits1(gb);
3491  cbp = get_vlc2(&v->s.gb, v->cbpcy_vlc->table, VC1_CBPCY_P_VLC_BITS, 2);
3492  GET_MQUANT();
3493  } else {
3494  mquant = v->pq;
3495  cbp = 0;
3496  }
3497  s->current_picture.f.qscale_table[mb_pos] = mquant;
3498 
3499  if (!v->ttmbf && !s->mb_intra && mb_has_coeffs)
3500  ttmb = get_vlc2(gb, ff_vc1_ttmb_vlc[v->tt_index].table,
3501  VC1_TTMB_VLC_BITS, 2);
3502  if (!s->mb_intra) vc1_mc_1mv(v, 0);
3503  dst_idx = 0;
3504  for (i = 0; i < 6; i++) {
3505  s->dc_val[0][s->block_index[i]] = 0;
3506  dst_idx += i >> 2;
3507  val = ((cbp >> (5 - i)) & 1);
3508  off = (i & 4) ? 0 : ((i & 1) * 8 + (i & 2) * 4 * s->linesize);
3509  v->mb_type[0][s->block_index[i]] = s->mb_intra;
3510  if (s->mb_intra) {
3511  /* check if prediction blocks A and C are available */
3512  v->a_avail = v->c_avail = 0;
3513  if (i == 2 || i == 3 || !s->first_slice_line)
3514  v->a_avail = v->mb_type[0][s->block_index[i] - s->block_wrap[i]];
3515  if (i == 1 || i == 3 || s->mb_x)
3516  v->c_avail = v->mb_type[0][s->block_index[i] - 1];
3517 
3518  vc1_decode_intra_block(v, s->block[i], i, val, mquant,
3519  (i & 4) ? v->codingset2 : v->codingset);
3520  if ((i>3) && (s->flags & CODEC_FLAG_GRAY))
3521  continue;
3522  v->vc1dsp.vc1_inv_trans_8x8(s->block[i]);
3523  if (v->rangeredfrm)
3524  for (j = 0; j < 64; j++)
3525  s->block[i][j] <<= 1;
3526  s->dsp.put_signed_pixels_clamped(s->block[i], s->dest[dst_idx] + off, i & 4 ? s->uvlinesize : s->linesize);
3527  if (v->pq >= 9 && v->overlap) {
3528  if (v->c_avail)
3529  v->vc1dsp.vc1_h_overlap(s->dest[dst_idx] + off, i & 4 ? s->uvlinesize : s->linesize);
3530  if (v->a_avail)
3531  v->vc1dsp.vc1_v_overlap(s->dest[dst_idx] + off, i & 4 ? s->uvlinesize : s->linesize);
3532  }
3533  block_cbp |= 0xF << (i << 2);
3534  block_intra |= 1 << i;
3535  } else if (val) {
3536  pat = vc1_decode_p_block(v, s->block[i], i, mquant, ttmb, first_block,
3537  s->dest[dst_idx] + off, (i & 4) ? s->uvlinesize : s->linesize,
3538  (i & 4) && (s->flags & CODEC_FLAG_GRAY), &block_tt);
3539  block_cbp |= pat << (i << 2);
3540  if (!v->ttmbf && ttmb < 8)
3541  ttmb = -1;
3542  first_block = 0;
3543  }
3544  }
3545  } else { // skipped
3546  s->mb_intra = 0;
3547  for (i = 0; i < 6; i++) {
3548  v->mb_type[0][s->block_index[i]] = 0;
3549  s->dc_val[0][s->block_index[i]] = 0;
3550  }
3551  s->current_picture.f.mb_type[mb_pos] = MB_TYPE_SKIP;
3552  s->current_picture.f.qscale_table[mb_pos] = 0;
3553  vc1_pred_mv(v, 0, 0, 0, 1, v->range_x, v->range_y, v->mb_type[0], 0, 0);
3554  vc1_mc_1mv(v, 0);
3555  }
3556  } else { // 4MV mode
3557  if (!skipped /* unskipped MB */) {
3558  int intra_count = 0, coded_inter = 0;
3559  int is_intra[6], is_coded[6];
3560  /* Get CBPCY */
3561  cbp = get_vlc2(&v->s.gb, v->cbpcy_vlc->table, VC1_CBPCY_P_VLC_BITS, 2);
3562  for (i = 0; i < 6; i++) {
3563  val = ((cbp >> (5 - i)) & 1);
3564  s->dc_val[0][s->block_index[i]] = 0;
3565  s->mb_intra = 0;
3566  if (i < 4) {
3567  dmv_x = dmv_y = 0;
3568  s->mb_intra = 0;
3569  mb_has_coeffs = 0;
3570  if (val) {
3571  GET_MVDATA(dmv_x, dmv_y);
3572  }
3573  vc1_pred_mv(v, i, dmv_x, dmv_y, 0, v->range_x, v->range_y, v->mb_type[0], 0, 0);
3574  if (!s->mb_intra)
3575  vc1_mc_4mv_luma(v, i, 0);
3576  intra_count += s->mb_intra;
3577  is_intra[i] = s->mb_intra;
3578  is_coded[i] = mb_has_coeffs;
3579  }
3580  if (i & 4) {
3581  is_intra[i] = (intra_count >= 3);
3582  is_coded[i] = val;
3583  }
3584  if (i == 4)
3585  vc1_mc_4mv_chroma(v, 0);
3586  v->mb_type[0][s->block_index[i]] = is_intra[i];
3587  if (!coded_inter)
3588  coded_inter = !is_intra[i] & is_coded[i];
3589  }
3590  // if there are no coded blocks then don't do anything more
3591  dst_idx = 0;
3592  if (!intra_count && !coded_inter)
3593  goto end;
3594  GET_MQUANT();
3595  s->current_picture.f.qscale_table[mb_pos] = mquant;
3596  /* test if block is intra and has pred */
3597  {
3598  int intrapred = 0;
3599  for (i = 0; i < 6; i++)
3600  if (is_intra[i]) {
3601  if (((!s->first_slice_line || (i == 2 || i == 3)) && v->mb_type[0][s->block_index[i] - s->block_wrap[i]])
3602  || ((s->mb_x || (i == 1 || i == 3)) && v->mb_type[0][s->block_index[i] - 1])) {
3603  intrapred = 1;
3604  break;
3605  }
3606  }
3607  if (intrapred)
3608  s->ac_pred = get_bits1(gb);
3609  else
3610  s->ac_pred = 0;
3611  }
3612  if (!v->ttmbf && coded_inter)
3614  for (i = 0; i < 6; i++) {
3615  dst_idx += i >> 2;
3616  off = (i & 4) ? 0 : ((i & 1) * 8 + (i & 2) * 4 * s->linesize);
3617  s->mb_intra = is_intra[i];
3618  if (is_intra[i]) {
3619  /* check if prediction blocks A and C are available */
3620  v->a_avail = v->c_avail = 0;
3621  if (i == 2 || i == 3 || !s->first_slice_line)
3622  v->a_avail = v->mb_type[0][s->block_index[i] - s->block_wrap[i]];
3623  if (i == 1 || i == 3 || s->mb_x)
3624  v->c_avail = v->mb_type[0][s->block_index[i] - 1];
3625 
3626  vc1_decode_intra_block(v, s->block[i], i, is_coded[i], mquant,
3627  (i & 4) ? v->codingset2 : v->codingset);
3628  if ((i>3) && (s->flags & CODEC_FLAG_GRAY))
3629  continue;
3630  v->vc1dsp.vc1_inv_trans_8x8(s->block[i]);
3631  if (v->rangeredfrm)
3632  for (j = 0; j < 64; j++)
3633  s->block[i][j] <<= 1;
3634  s->dsp.put_signed_pixels_clamped(s->block[i], s->dest[dst_idx] + off,
3635  (i & 4) ? s->uvlinesize : s->linesize);
3636  if (v->pq >= 9 && v->overlap) {
3637  if (v->c_avail)
3638  v->vc1dsp.vc1_h_overlap(s->dest[dst_idx] + off, i & 4 ? s->uvlinesize : s->linesize);
3639  if (v->a_avail)
3640  v->vc1dsp.vc1_v_overlap(s->dest[dst_idx] + off, i & 4 ? s->uvlinesize : s->linesize);
3641  }
3642  block_cbp |= 0xF << (i << 2);
3643  block_intra |= 1 << i;
3644  } else if (is_coded[i]) {
3645  pat = vc1_decode_p_block(v, s->block[i], i, mquant, ttmb,
3646  first_block, s->dest[dst_idx] + off,
3647  (i & 4) ? s->uvlinesize : s->linesize,
3648  (i & 4) && (s->flags & CODEC_FLAG_GRAY),
3649  &block_tt);
3650  block_cbp |= pat << (i << 2);
3651  if (!v->ttmbf && ttmb < 8)
3652  ttmb = -1;
3653  first_block = 0;
3654  }
3655  }
3656  } else { // skipped MB
3657  s->mb_intra = 0;
3658  s->current_picture.f.qscale_table[mb_pos] = 0;
3659  for (i = 0; i < 6; i++) {
3660  v->mb_type[0][s->block_index[i]] = 0;
3661  s->dc_val[0][s->block_index[i]] = 0;
3662  }
3663  for (i = 0; i < 4; i++) {
3664  vc1_pred_mv(v, i, 0, 0, 0, v->range_x, v->range_y, v->mb_type[0], 0, 0);
3665  vc1_mc_4mv_luma(v, i, 0);
3666  }
3667  vc1_mc_4mv_chroma(v, 0);
3668  s->current_picture.f.qscale_table[mb_pos] = 0;
3669  }
3670  }
3671 end:
3672  v->cbp[s->mb_x] = block_cbp;
3673  v->ttblk[s->mb_x] = block_tt;
3674  v->is_intra[s->mb_x] = block_intra;
3675 
3676  return 0;
3677 }
3678 
3679 /* Decode one macroblock in an interlaced frame p picture */
3680 
3682 {
3683  MpegEncContext *s = &v->s;
3684  GetBitContext *gb = &s->gb;
3685  int i;
3686  int mb_pos = s->mb_x + s->mb_y * s->mb_stride;
3687  int cbp = 0; /* cbp decoding stuff */
3688  int mqdiff, mquant; /* MB quantization */
3689  int ttmb = v->ttfrm; /* MB Transform type */
3690 
3691  int mb_has_coeffs = 1; /* last_flag */
3692  int dmv_x, dmv_y; /* Differential MV components */
3693  int val; /* temp value */
3694  int first_block = 1;
3695  int dst_idx, off;
3696  int skipped, fourmv = 0, twomv = 0;
3697  int block_cbp = 0, pat, block_tt = 0;
3698  int idx_mbmode = 0, mvbp;
3699  int stride_y, fieldtx;
3700 
3701  mquant = v->pq; /* Loosy initialization */
3702 
3703  if (v->skip_is_raw)
3704  skipped = get_bits1(gb);
3705  else
3706  skipped = v->s.mbskip_table[mb_pos];
3707  if (!skipped) {
3708  if (v->fourmvswitch)
3709  idx_mbmode = get_vlc2(gb, v->mbmode_vlc->table, VC1_INTFR_4MV_MBMODE_VLC_BITS, 2); // try getting this done
3710  else
3711  idx_mbmode = get_vlc2(gb, v->mbmode_vlc->table, VC1_INTFR_NON4MV_MBMODE_VLC_BITS, 2); // in a single line
3712  switch (ff_vc1_mbmode_intfrp[v->fourmvswitch][idx_mbmode][0]) {
3713  /* store the motion vector type in a flag (useful later) */
3714  case MV_PMODE_INTFR_4MV:
3715  fourmv = 1;
3716  v->blk_mv_type[s->block_index[0]] = 0;
3717  v->blk_mv_type[s->block_index[1]] = 0;
3718  v->blk_mv_type[s->block_index[2]] = 0;
3719  v->blk_mv_type[s->block_index[3]] = 0;
3720  break;
3722  fourmv = 1;
3723  v->blk_mv_type[s->block_index[0]] = 1;
3724  v->blk_mv_type[s->block_index[1]] = 1;
3725  v->blk_mv_type[s->block_index[2]] = 1;
3726  v->blk_mv_type[s->block_index[3]] = 1;
3727  break;
3729  twomv = 1;
3730  v->blk_mv_type[s->block_index[0]] = 1;
3731  v->blk_mv_type[s->block_index[1]] = 1;
3732  v->blk_mv_type[s->block_index[2]] = 1;
3733  v->blk_mv_type[s->block_index[3]] = 1;
3734  break;
3735  case MV_PMODE_INTFR_1MV:
3736  v->blk_mv_type[s->block_index[0]] = 0;
3737  v->blk_mv_type[s->block_index[1]] = 0;
3738  v->blk_mv_type[s->block_index[2]] = 0;
3739  v->blk_mv_type[s->block_index[3]] = 0;
3740  break;
3741  }
3742  if (ff_vc1_mbmode_intfrp[v->fourmvswitch][idx_mbmode][0] == MV_PMODE_INTFR_INTRA) { // intra MB
3743  s->current_picture.f.motion_val[1][s->block_index[0]][0] = 0;
3744  s->current_picture.f.motion_val[1][s->block_index[0]][1] = 0;
3745  s->current_picture.f.mb_type[mb_pos] = MB_TYPE_INTRA;
3746  s->mb_intra = v->is_intra[s->mb_x] = 1;
3747  for (i = 0; i < 6; i++)
3748  v->mb_type[0][s->block_index[i]] = 1;
3749  fieldtx = v->fieldtx_plane[mb_pos] = get_bits1(gb);
3750  mb_has_coeffs = get_bits1(gb);
3751  if (mb_has_coeffs)
3752  cbp = 1 + get_vlc2(&v->s.gb, v->cbpcy_vlc->table, VC1_CBPCY_P_VLC_BITS, 2);
3753  v->s.ac_pred = v->acpred_plane[mb_pos] = get_bits1(gb);
3754  GET_MQUANT();
3755  s->current_picture.f.qscale_table[mb_pos] = mquant;
3756  /* Set DC scale - y and c use the same (not sure if necessary here) */
3757  s->y_dc_scale = s->y_dc_scale_table[mquant];
3758  s->c_dc_scale = s->c_dc_scale_table[mquant];
3759  dst_idx = 0;
3760  for (i = 0; i < 6; i++) {
3761  s->dc_val[0][s->block_index[i]] = 0;
3762  dst_idx += i >> 2;
3763  val = ((cbp >> (5 - i)) & 1);
3764  v->mb_type[0][s->block_index[i]] = s->mb_intra;
3765  v->a_avail = v->c_avail = 0;
3766  if (i == 2 || i == 3 || !s->first_slice_line)
3767  v->a_avail = v->mb_type[0][s->block_index[i] - s->block_wrap[i]];
3768  if (i == 1 || i == 3 || s->mb_x)
3769  v->c_avail = v->mb_type[0][s->block_index[i] - 1];
3770 
3771  vc1_decode_intra_block(v, s->block[i], i, val, mquant,
3772  (i & 4) ? v->codingset2 : v->codingset);
3773  if ((i>3) && (s->flags & CODEC_FLAG_GRAY)) continue;
3774  v->vc1dsp.vc1_inv_trans_8x8(s->block[i]);
3775  if (i < 4) {
3776  stride_y = s->linesize << fieldtx;
3777  off = (fieldtx) ? ((i & 1) * 8) + ((i & 2) >> 1) * s->linesize : (i & 1) * 8 + 4 * (i & 2) * s->linesize;
3778  } else {
3779  stride_y = s->uvlinesize;
3780  off = 0;
3781  }
3782  s->dsp.put_signed_pixels_clamped(s->block[i], s->dest[dst_idx] + off, stride_y);
3783  //TODO: loop filter
3784  }
3785 
3786  } else { // inter MB
3787  mb_has_coeffs = ff_vc1_mbmode_intfrp[v->fourmvswitch][idx_mbmode][3];
3788  if (mb_has_coeffs)
3789  cbp = 1 + get_vlc2(&v->s.gb, v->cbpcy_vlc->table, VC1_CBPCY_P_VLC_BITS, 2);
3790  if (ff_vc1_mbmode_intfrp[v->fourmvswitch][idx_mbmode][0] == MV_PMODE_INTFR_2MV_FIELD) {
3792  } else {
3793  if ((ff_vc1_mbmode_intfrp[v->fourmvswitch][idx_mbmode][0] == MV_PMODE_INTFR_4MV)
3794  || (ff_vc1_mbmode_intfrp[v->fourmvswitch][idx_mbmode][0] == MV_PMODE_INTFR_4MV_FIELD)) {
3796  }
3797  }
3798  s->mb_intra = v->is_intra[s->mb_x] = 0;
3799  for (i = 0; i < 6; i++)
3800  v->mb_type[0][s->block_index[i]] = 0;
3801  fieldtx = v->fieldtx_plane[mb_pos] = ff_vc1_mbmode_intfrp[v->fourmvswitch][idx_mbmode][1];
3802  /* for all motion vector read MVDATA and motion compensate each block */
3803  dst_idx = 0;
3804  if (fourmv) {
3805  mvbp = v->fourmvbp;
3806  for (i = 0; i < 6; i++) {
3807  if (i < 4) {
3808  dmv_x = dmv_y = 0;
3809  val = ((mvbp >> (3 - i)) & 1);
3810  if (val) {
3811  get_mvdata_interlaced(v, &dmv_x, &dmv_y, 0);
3812  }
3813  vc1_pred_mv_intfr(v, i, dmv_x, dmv_y, 0, v->range_x, v->range_y, v->mb_type[0]);
3814  vc1_mc_4mv_luma(v, i, 0);
3815  } else if (i == 4) {
3816  vc1_mc_4mv_chroma4(v);
3817  }
3818  }
3819  } else if (twomv) {
3820  mvbp = v->twomvbp;
3821  dmv_x = dmv_y = 0;
3822  if (mvbp & 2) {
3823  get_mvdata_interlaced(v, &dmv_x, &dmv_y, 0);
3824  }
3825  vc1_pred_mv_intfr(v, 0, dmv_x, dmv_y, 2, v->range_x, v->range_y, v->mb_type[0]);
3826  vc1_mc_4mv_luma(v, 0, 0);
3827  vc1_mc_4mv_luma(v, 1, 0);
3828  dmv_x = dmv_y = 0;
3829  if (mvbp & 1) {
3830  get_mvdata_interlaced(v, &dmv_x, &dmv_y, 0);
3831  }
3832  vc1_pred_mv_intfr(v, 2, dmv_x, dmv_y, 2, v->range_x, v->range_y, v->mb_type[0]);
3833  vc1_mc_4mv_luma(v, 2, 0);
3834  vc1_mc_4mv_luma(v, 3, 0);
3835  vc1_mc_4mv_chroma4(v);
3836  } else {
3837  mvbp = ff_vc1_mbmode_intfrp[v->fourmvswitch][idx_mbmode][2];
3838  if (mvbp) {
3839  get_mvdata_interlaced(v, &dmv_x, &dmv_y, 0);
3840  }
3841  vc1_pred_mv_intfr(v, 0, dmv_x, dmv_y, 1, v->range_x, v->range_y, v->mb_type[0]);
3842  vc1_mc_1mv(v, 0);
3843  }
3844  if (cbp)
3845  GET_MQUANT(); // p. 227
3846  s->current_picture.f.qscale_table[mb_pos] = mquant;
3847  if (!v->ttmbf && cbp)
3849  for (i = 0; i < 6; i++) {
3850  s->dc_val[0][s->block_index[i]] = 0;
3851  dst_idx += i >> 2;
3852  val = ((cbp >> (5 - i)) & 1);
3853  if (!fieldtx)
3854  off = (i & 4) ? 0 : ((i & 1) * 8 + (i & 2) * 4 * s->linesize);
3855  else
3856  off = (i & 4) ? 0 : ((i & 1) * 8 + ((i > 1) * s->linesize));
3857  if (val) {
3858  pat = vc1_decode_p_block(v, s->block[i], i, mquant, ttmb,
3859  first_block, s->dest[dst_idx] + off,
3860  (i & 4) ? s->uvlinesize : (s->linesize << fieldtx),
3861  (i & 4) && (s->flags & CODEC_FLAG_GRAY), &block_tt);
3862  block_cbp |= pat << (i << 2);
3863  if (!v->ttmbf && ttmb < 8)
3864  ttmb = -1;
3865  first_block = 0;
3866  }
3867  }
3868  }
3869  } else { // skipped
3870  s->mb_intra = v->is_intra[s->mb_x] = 0;
3871  for (i = 0; i < 6; i++) {
3872  v->mb_type[0][s->block_index[i]] = 0;
3873  s->dc_val[0][s->block_index[i]] = 0;
3874  }
3875  s->current_picture.f.mb_type[mb_pos] = MB_TYPE_SKIP;
3876  s->current_picture.f.qscale_table[mb_pos] = 0;
3877  v->blk_mv_type[s->block_index[0]] = 0;
3878  v->blk_mv_type[s->block_index[1]] = 0;
3879  v->blk_mv_type[s->block_index[2]] = 0;
3880  v->blk_mv_type[s->block_index[3]] = 0;
3881  vc1_pred_mv_intfr(v, 0, 0, 0, 1, v->range_x, v->range_y, v->mb_type[0]);
3882  vc1_mc_1mv(v, 0);
3883  }
3884  if (s->mb_x == s->mb_width - 1)
3885  memmove(v->is_intra_base, v->is_intra, sizeof(v->is_intra_base[0])*s->mb_stride);
3886  return 0;
3887 }
3888 
3890 {
3891  MpegEncContext *s = &v->s;
3892  GetBitContext *gb = &s->gb;
3893  int i;
3894  int mb_pos = s->mb_x + s->mb_y * s->mb_stride;
3895  int cbp = 0; /* cbp decoding stuff */
3896  int mqdiff, mquant; /* MB quantization */
3897  int ttmb = v->ttfrm; /* MB Transform type */
3898 
3899  int mb_has_coeffs = 1; /* last_flag */
3900  int dmv_x, dmv_y; /* Differential MV components */
3901  int val; /* temp values */
3902  int first_block = 1;
3903  int dst_idx, off;
3904  int pred_flag;
3905  int block_cbp = 0, pat, block_tt = 0;
3906  int idx_mbmode = 0;
3907 
3908  mquant = v->pq; /* Loosy initialization */
3909 
3910  idx_mbmode = get_vlc2(gb, v->mbmode_vlc->table, VC1_IF_MBMODE_VLC_BITS, 2);
3911  if (idx_mbmode <= 1) { // intra MB
3912  s->mb_intra = v->is_intra[s->mb_x] = 1;
3913  s->current_picture.f.motion_val[1][s->block_index[0] + v->blocks_off][0] = 0;
3914  s->current_picture.f.motion_val[1][s->block_index[0] + v->blocks_off][1] = 0;
3915  s->current_picture.f.mb_type[mb_pos + v->mb_off] = MB_TYPE_INTRA;
3916  GET_MQUANT();
3917  s->current_picture.f.qscale_table[mb_pos] = mquant;
3918  /* Set DC scale - y and c use the same (not sure if necessary here) */
3919  s->y_dc_scale = s->y_dc_scale_table[mquant];
3920  s->c_dc_scale = s->c_dc_scale_table[mquant];
3921  v->s.ac_pred = v->acpred_plane[mb_pos] = get_bits1(gb);
3922  mb_has_coeffs = idx_mbmode & 1;
3923  if (mb_has_coeffs)
3924  cbp = 1 + get_vlc2(&v->s.gb, v->cbpcy_vlc->table, VC1_ICBPCY_VLC_BITS, 2);
3925  dst_idx = 0;
3926  for (i = 0; i < 6; i++) {
3927  s->dc_val[0][s->block_index[i]] = 0;
3928  v->mb_type[0][s->block_index[i]] = 1;
3929  dst_idx += i >> 2;
3930  val = ((cbp >> (5 - i)) & 1);
3931  v->a_avail = v->c_avail = 0;
3932  if (i == 2 || i == 3 || !s->first_slice_line)
3933  v->a_avail = v->mb_type[0][s->block_index[i] - s->block_wrap[i]];
3934  if (i == 1 || i == 3 || s->mb_x)
3935  v->c_avail = v->mb_type[0][s->block_index[i] - 1];
3936 
3937  vc1_decode_intra_block(v, s->block[i], i, val, mquant,
3938  (i & 4) ? v->codingset2 : v->codingset);
3939  if ((i>3) && (s->flags & CODEC_FLAG_GRAY))
3940  continue;
3941  v->vc1dsp.vc1_inv_trans_8x8(s->block[i]);
3942  off = (i & 4) ? 0 : ((i & 1) * 8 + (i & 2) * 4 * s->linesize);
3943  off += v->cur_field_type ? ((i & 4) ? s->current_picture_ptr->f.linesize[1] : s->current_picture_ptr->f.linesize[0]) : 0;
3944  s->dsp.put_signed_pixels_clamped(s->block[i], s->dest[dst_idx] + off, (i & 4) ? s->uvlinesize : s->linesize);
3945  // TODO: loop filter
3946  }
3947  } else {
3948  s->mb_intra = v->is_intra[s->mb_x] = 0;
3949  s->current_picture.f.mb_type[mb_pos + v->mb_off] = MB_TYPE_16x16;
3950  for (i = 0; i < 6; i++) v->mb_type[0][s->block_index[i]] = 0;
3951  if (idx_mbmode <= 5) { // 1-MV
3952  dmv_x = dmv_y = 0;
3953  if (idx_mbmode & 1) {
3954  get_mvdata_interlaced(v, &dmv_x, &dmv_y, &pred_flag);
3955  }
3956  vc1_pred_mv(v, 0, dmv_x, dmv_y, 1, v->range_x, v->range_y, v->mb_type[0], pred_flag, 0);
3957  vc1_mc_1mv(v, 0);
3958  mb_has_coeffs = !(idx_mbmode & 2);
3959  } else { // 4-MV
3961  for (i = 0; i < 6; i++) {
3962  if (i < 4) {
3963  dmv_x = dmv_y = pred_flag = 0;
3964  val = ((v->fourmvbp >> (3 - i)) & 1);
3965  if (val) {
3966  get_mvdata_interlaced(v, &dmv_x, &dmv_y, &pred_flag);
3967  }
3968  vc1_pred_mv(v, i, dmv_x, dmv_y, 0, v->range_x, v->range_y, v->mb_type[0], pred_flag, 0);
3969  vc1_mc_4mv_luma(v, i, 0);
3970  } else if (i == 4)
3971  vc1_mc_4mv_chroma(v, 0);
3972  }
3973  mb_has_coeffs = idx_mbmode & 1;
3974  }
3975  if (mb_has_coeffs)
3976  cbp = 1 + get_vlc2(&v->s.gb, v->cbpcy_vlc->table, VC1_CBPCY_P_VLC_BITS, 2);
3977  if (cbp) {
3978  GET_MQUANT();
3979  }
3980  s->current_picture.f.qscale_table[mb_pos] = mquant;
3981  if (!v->ttmbf && cbp) {
3983  }
3984  dst_idx = 0;
3985  for (i = 0; i < 6; i++) {
3986  s->dc_val[0][s->block_index[i]] = 0;
3987  dst_idx += i >> 2;
3988  val = ((cbp >> (5 - i)) & 1);
3989  off = (i & 4) ? 0 : (i & 1) * 8 + (i & 2) * 4 * s->linesize;
3990  if (v->cur_field_type)
3991  off += (i & 4) ? s->current_picture_ptr->f.linesize[1] : s->current_picture_ptr->f.linesize[0];
3992  if (val) {
3993  pat = vc1_decode_p_block(v, s->block[i], i, mquant, ttmb,
3994  first_block, s->dest[dst_idx] + off,
3995  (i & 4) ? s->uvlinesize : s->linesize,
3996  (i & 4) && (s->flags & CODEC_FLAG_GRAY),
3997  &block_tt);
3998  block_cbp |= pat << (i << 2);
3999  if (!v->ttmbf && ttmb < 8) ttmb = -1;
4000  first_block = 0;
4001  }
4002  }
4003  }
4004  if (s->mb_x == s->mb_width - 1)
4005  memmove(v->is_intra_base, v->is_intra, sizeof(v->is_intra_base[0]) * s->mb_stride);
4006  return 0;
4007 }
4008 
4012 {
4013  MpegEncContext *s = &v->s;
4014  GetBitContext *gb = &s->gb;
4015  int i, j;
4016  int mb_pos = s->mb_x + s->mb_y * s->mb_stride;
4017  int cbp = 0; /* cbp decoding stuff */
4018  int mqdiff, mquant; /* MB quantization */
4019  int ttmb = v->ttfrm; /* MB Transform type */
4020  int mb_has_coeffs = 0; /* last_flag */
4021  int index, index1; /* LUT indexes */
4022  int val, sign; /* temp values */
4023  int first_block = 1;
4024  int dst_idx, off;
4025  int skipped, direct;
4026  int dmv_x[2], dmv_y[2];
4027  int bmvtype = BMV_TYPE_BACKWARD;
4028 
4029  mquant = v->pq; /* lossy initialization */
4030  s->mb_intra = 0;
4031 
4032  if (v->dmb_is_raw)
4033  direct = get_bits1(gb);
4034  else
4035  direct = v->direct_mb_plane[mb_pos];
4036  if (v->skip_is_raw)
4037  skipped = get_bits1(gb);
4038  else
4039  skipped = v->s.mbskip_table[mb_pos];
4040 
4041  dmv_x[0] = dmv_x[1] = dmv_y[0] = dmv_y[1] = 0;
4042  for (i = 0; i < 6; i++) {
4043  v->mb_type[0][s->block_index[i]] = 0;
4044  s->dc_val[0][s->block_index[i]] = 0;
4045  }
4046  s->current_picture.f.qscale_table[mb_pos] = 0;
4047 
4048  if (!direct) {
4049  if (!skipped) {
4050  GET_MVDATA(dmv_x[0], dmv_y[0]);
4051  dmv_x[1] = dmv_x[0];
4052  dmv_y[1] = dmv_y[0];
4053  }
4054  if (skipped || !s->mb_intra) {
4055  bmvtype = decode012(gb);
4056  switch (bmvtype) {
4057  case 0:
4058  bmvtype = (v->bfraction >= (B_FRACTION_DEN/2)) ? BMV_TYPE_BACKWARD : BMV_TYPE_FORWARD;
4059  break;
4060  case 1:
4061  bmvtype = (v->bfraction >= (B_FRACTION_DEN/2)) ? BMV_TYPE_FORWARD : BMV_TYPE_BACKWARD;
4062  break;
4063  case 2:
4064  bmvtype = BMV_TYPE_INTERPOLATED;
4065  dmv_x[0] = dmv_y[0] = 0;
4066  }
4067  }
4068  }
4069  for (i = 0; i < 6; i++)
4070  v->mb_type[0][s->block_index[i]] = s->mb_intra;
4071 
4072  if (skipped) {
4073  if (direct)
4074  bmvtype = BMV_TYPE_INTERPOLATED;
4075  vc1_pred_b_mv(v, dmv_x, dmv_y, direct, bmvtype);
4076  vc1_b_mc(v, dmv_x, dmv_y, direct, bmvtype);
4077  return;
4078  }
4079  if (direct) {
4080  cbp = get_vlc2(&v->s.gb, v->cbpcy_vlc->table, VC1_CBPCY_P_VLC_BITS, 2);
4081  GET_MQUANT();
4082  s->mb_intra = 0;
4083  s->current_picture.f.qscale_table[mb_pos] = mquant;
4084  if (!v->ttmbf)
4086  dmv_x[0] = dmv_y[0] = dmv_x[1] = dmv_y[1] = 0;
4087  vc1_pred_b_mv(v, dmv_x, dmv_y, direct, bmvtype);
4088  vc1_b_mc(v, dmv_x, dmv_y, direct, bmvtype);
4089  } else {
4090  if (!mb_has_coeffs && !s->mb_intra) {
4091  /* no coded blocks - effectively skipped */
4092  vc1_pred_b_mv(v, dmv_x, dmv_y, direct, bmvtype);
4093  vc1_b_mc(v, dmv_x, dmv_y, direct, bmvtype);
4094  return;
4095  }
4096  if (s->mb_intra && !mb_has_coeffs) {
4097  GET_MQUANT();
4098  s->current_picture.f.qscale_table[mb_pos] = mquant;
4099  s->ac_pred = get_bits1(gb);
4100  cbp = 0;
4101  vc1_pred_b_mv(v, dmv_x, dmv_y, direct, bmvtype);
4102  } else {
4103  if (bmvtype == BMV_TYPE_INTERPOLATED) {
4104  GET_MVDATA(dmv_x[0], dmv_y[0]);
4105  if (!mb_has_coeffs) {
4106  /* interpolated skipped block */
4107  vc1_pred_b_mv(v, dmv_x, dmv_y, direct, bmvtype);
4108  vc1_b_mc(v, dmv_x, dmv_y, direct, bmvtype);
4109  return;
4110  }
4111  }
4112  vc1_pred_b_mv(v, dmv_x, dmv_y, direct, bmvtype);
4113  if (!s->mb_intra) {
4114  vc1_b_mc(v, dmv_x, dmv_y, direct, bmvtype);
4115  }
4116  if (s->mb_intra)
4117  s->ac_pred = get_bits1(gb);
4118  cbp = get_vlc2(&v->s.gb, v->cbpcy_vlc->table, VC1_CBPCY_P_VLC_BITS, 2);
4119  GET_MQUANT();
4120  s->current_picture.f.qscale_table[mb_pos] = mquant;
4121  if (!v->ttmbf && !s->mb_intra && mb_has_coeffs)
4123  }
4124  }
4125  dst_idx = 0;
4126  for (i = 0; i < 6; i++) {
4127  s->dc_val[0][s->block_index[i]] = 0;
4128  dst_idx += i >> 2;
4129  val = ((cbp >> (5 - i)) & 1);
4130  off = (i & 4) ? 0 : ((i & 1) * 8 + (i & 2) * 4 * s->linesize);
4131  v->mb_type[0][s->block_index[i]] = s->mb_intra;
4132  if (s->mb_intra) {
4133  /* check if prediction blocks A and C are available */
4134  v->a_avail = v->c_avail = 0;
4135  if (i == 2 || i == 3 || !s->first_slice_line)
4136  v->a_avail = v->mb_type[0][s->block_index[i] - s->block_wrap[i]];
4137  if (i == 1 || i == 3 || s->mb_x)
4138  v->c_avail = v->mb_type[0][s->block_index[i] - 1];
4139 
4140  vc1_decode_intra_block(v, s->block[i], i, val, mquant,
4141  (i & 4) ? v->codingset2 : v->codingset);
4142  if ((i>3) && (s->flags & CODEC_FLAG_GRAY))
4143  continue;
4144  v->vc1dsp.vc1_inv_trans_8x8(s->block[i]);
4145  if (v->rangeredfrm)
4146  for (j = 0; j < 64; j++)
4147  s->block[i][j] <<= 1;
4148  s->dsp.put_signed_pixels_clamped(s->block[i], s->dest[dst_idx] + off, i & 4 ? s->uvlinesize : s->linesize);
4149  } else if (val) {
4150  vc1_decode_p_block(v, s->block[i], i, mquant, ttmb,
4151  first_block, s->dest[dst_idx] + off,
4152  (i & 4) ? s->uvlinesize : s->linesize,
4153  (i & 4) && (s->flags & CODEC_FLAG_GRAY), NULL);
4154  if (!v->ttmbf && ttmb < 8)
4155  ttmb = -1;
4156  first_block = 0;
4157  }
4158  }
4159 }
4160 
4164 {
4165  MpegEncContext *s = &v->s;
4166  GetBitContext *gb = &s->gb;
4167  int i, j;
4168  int mb_pos = s->mb_x + s->mb_y * s->mb_stride;
4169  int cbp = 0; /* cbp decoding stuff */
4170  int mqdiff, mquant; /* MB quantization */
4171  int ttmb = v->ttfrm; /* MB Transform type */
4172  int mb_has_coeffs = 0; /* last_flag */
4173  int val; /* temp value */
4174  int first_block = 1;
4175  int dst_idx, off;
4176  int fwd;
4177  int dmv_x[2], dmv_y[2], pred_flag[2];
4178  int bmvtype = BMV_TYPE_BACKWARD;
4179  int idx_mbmode, interpmvp;
4180 
4181  mquant = v->pq; /* Loosy initialization */
4182  s->mb_intra = 0;
4183 
4184  idx_mbmode = get_vlc2(gb, v->mbmode_vlc->table, VC1_IF_MBMODE_VLC_BITS, 2);
4185  if (idx_mbmode <= 1) { // intra MB
4186  s->mb_intra = v->is_intra[s->mb_x] = 1;
4187  s->current_picture.f.motion_val[1][s->block_index[0]][0] = 0;
4188  s->current_picture.f.motion_val[1][s->block_index[0]][1] = 0;
4189  s->current_picture.f.mb_type[mb_pos + v->mb_off] = MB_TYPE_INTRA;
4190  GET_MQUANT();
4191  s->current_picture.f.qscale_table[mb_pos] = mquant;
4192  /* Set DC scale - y and c use the same (not sure if necessary here) */
4193  s->y_dc_scale = s->y_dc_scale_table[mquant];
4194  s->c_dc_scale = s->c_dc_scale_table[mquant];
4195  v->s.ac_pred = v->acpred_plane[mb_pos] = get_bits1(gb);
4196  mb_has_coeffs = idx_mbmode & 1;
4197  if (mb_has_coeffs)
4198  cbp = 1 + get_vlc2(&v->s.gb, v->cbpcy_vlc->table, VC1_ICBPCY_VLC_BITS, 2);
4199  dst_idx = 0;
4200  for (i = 0; i < 6; i++) {
4201  s->dc_val[0][s->block_index[i]] = 0;
4202  dst_idx += i >> 2;
4203  val = ((cbp >> (5 - i)) & 1);
4204  v->mb_type[0][s->block_index[i]] = s->mb_intra;
4205  v->a_avail = v->c_avail = 0;
4206  if (i == 2 || i == 3 || !s->first_slice_line)
4207  v->a_avail = v->mb_type[0][s->block_index[i] - s->block_wrap[i]];
4208  if (i == 1 || i == 3 || s->mb_x)
4209  v->c_avail = v->mb_type[0][s->block_index[i] - 1];
4210 
4211  vc1_decode_intra_block(v, s->block[i], i, val, mquant,
4212  (i & 4) ? v->codingset2 : v->codingset);
4213  if ((i>3) && (s->flags & CODEC_FLAG_GRAY))
4214  continue;
4215  v->vc1dsp.vc1_inv_trans_8x8(s->block[i]);
4216  if (v->rangeredfrm)
4217  for (j = 0; j < 64; j++)
4218  s->block[i][j] <<= 1;
4219  off = (i & 4) ? 0 : ((i & 1) * 8 + (i & 2) * 4 * s->linesize);
4220  off += v->cur_field_type ? ((i & 4) ? s->current_picture_ptr->f.linesize[1] : s->current_picture_ptr->f.linesize[0]) : 0;
4221  s->dsp.put_signed_pixels_clamped(s->block[i], s->dest[dst_idx] + off, (i & 4) ? s->uvlinesize : s->linesize);
4222  // TODO: yet to perform loop filter
4223  }
4224  } else {
4225  s->mb_intra = v->is_intra[s->mb_x] = 0;
4226  s->current_picture.f.mb_type[mb_pos + v->mb_off] = MB_TYPE_16x16;
4227  for (i = 0; i < 6; i++) v->mb_type[0][s->block_index[i]] = 0;
4228  if (v->fmb_is_raw)
4229  fwd = v->forward_mb_plane[mb_pos] = get_bits1(gb);
4230  else
4231  fwd = v->forward_mb_plane[mb_pos];
4232  if (idx_mbmode <= 5) { // 1-MV
4233  dmv_x[0] = dmv_x[1] = dmv_y[0] = dmv_y[1] = 0;
4234  pred_flag[0] = pred_flag[1] = 0;
4235  if (fwd)
4236  bmvtype = BMV_TYPE_FORWARD;
4237  else {
4238  bmvtype = decode012(gb);
4239  switch (bmvtype) {
4240  case 0:
4241  bmvtype = BMV_TYPE_BACKWARD;
4242  break;
4243  case 1:
4244  bmvtype = BMV_TYPE_DIRECT;
4245  break;
4246  case 2:
4247  bmvtype = BMV_TYPE_INTERPOLATED;
4248  interpmvp = get_bits1(gb);
4249  }
4250  }
4251  v->bmvtype = bmvtype;
4252  if (bmvtype != BMV_TYPE_DIRECT && idx_mbmode & 1) {
4253  get_mvdata_interlaced(v, &dmv_x[bmvtype == BMV_TYPE_BACKWARD], &dmv_y[bmvtype == BMV_TYPE_BACKWARD], &pred_flag[bmvtype == BMV_TYPE_BACKWARD]);
4254  }
4255  if (bmvtype == BMV_TYPE_INTERPOLATED && interpmvp) {
4256  get_mvdata_interlaced(v, &dmv_x[1], &dmv_y[1], &pred_flag[1]);
4257  }
4258  if (bmvtype == BMV_TYPE_DIRECT) {
4259  dmv_x[0] = dmv_y[0] = pred_flag[0] = 0;
4260  dmv_x[1] = dmv_y[1] = pred_flag[0] = 0;
4261  }
4262  vc1_pred_b_mv_intfi(v, 0, dmv_x, dmv_y, 1, pred_flag);
4263  vc1_b_mc(v, dmv_x, dmv_y, (bmvtype == BMV_TYPE_DIRECT), bmvtype);
4264  mb_has_coeffs = !(idx_mbmode & 2);
4265  } else { // 4-MV
4266  if (fwd)
4267  bmvtype = BMV_TYPE_FORWARD;
4268  v->bmvtype = bmvtype;
4270  for (i = 0; i < 6; i++) {
4271  if (i < 4) {
4272  dmv_x[0] = dmv_y[0] = pred_flag[0] = 0;
4273  dmv_x[1] = dmv_y[1] = pred_flag[1] = 0;
4274  val = ((v->fourmvbp >> (3 - i)) & 1);
4275  if (val) {
4276  get_mvdata_interlaced(v, &dmv_x[bmvtype == BMV_TYPE_BACKWARD],
4277  &dmv_y[bmvtype == BMV_TYPE_BACKWARD],
4278  &pred_flag[bmvtype == BMV_TYPE_BACKWARD]);
4279  }
4280  vc1_pred_b_mv_intfi(v, i, dmv_x, dmv_y, 0, pred_flag);
4281  vc1_mc_4mv_luma(v, i, bmvtype == BMV_TYPE_BACKWARD);
4282  } else if (i == 4)
4283  vc1_mc_4mv_chroma(v, bmvtype == BMV_TYPE_BACKWARD);
4284  }
4285  mb_has_coeffs = idx_mbmode & 1;
4286  }
4287  if (mb_has_coeffs)
4288  cbp = 1 + get_vlc2(&v->s.gb, v->cbpcy_vlc->table, VC1_CBPCY_P_VLC_BITS, 2);
4289  if (cbp) {
4290  GET_MQUANT();
4291  }
4292  s->current_picture.f.qscale_table[mb_pos] = mquant;
4293  if (!v->ttmbf && cbp) {
4295  }
4296  dst_idx = 0;
4297  for (i = 0; i < 6; i++) {
4298  s->dc_val[0][s->block_index[i]] = 0;
4299  dst_idx += i >> 2;
4300  val = ((cbp >> (5 - i)) & 1);
4301  off = (i & 4) ? 0 : (i & 1) * 8 + (i & 2) * 4 * s->linesize;
4302  if (v->cur_field_type)
4303  off += (i & 4) ? s->current_picture_ptr->f.linesize[1] : s->current_picture_ptr->f.linesize[0];
4304  if (val) {
4305  vc1_decode_p_block(v, s->block[i], i, mquant, ttmb,
4306  first_block, s->dest[dst_idx] + off,
4307  (i & 4) ? s->uvlinesize : s->linesize,
4308  (i & 4) && (s->flags & CODEC_FLAG_GRAY), NULL);
4309  if (!v->ttmbf && ttmb < 8)
4310  ttmb = -1;
4311  first_block = 0;
4312  }
4313  }
4314  }
4315 }
4316 
4320 {
4321  int k, j;
4322  MpegEncContext *s = &v->s;
4323  int cbp, val;
4324  uint8_t *coded_val;
4325  int mb_pos;
4326 
4327  /* select codingmode used for VLC tables selection */
4328  switch (v->y_ac_table_index) {
4329  case 0:
4331  break;
4332  case 1:
4334  break;
4335  case 2:
4337  break;
4338  }
4339 
4340  switch (v->c_ac_table_index) {
4341  case 0:
4343  break;
4344  case 1:
4346  break;
4347  case 2:
4349  break;
4350  }
4351 
4352  /* Set DC scale - y and c use the same */
4353  s->y_dc_scale = s->y_dc_scale_table[v->pq];
4354  s->c_dc_scale = s->c_dc_scale_table[v->pq];
4355 
4356  //do frame decode
4357  s->mb_x = s->mb_y = 0;
4358  s->mb_intra = 1;
4359  s->first_slice_line = 1;
4360  for (s->mb_y = 0; s->mb_y < s->mb_height; s->mb_y++) {
4361  s->mb_x = 0;
4363  for (; s->mb_x < s->mb_width; s->mb_x++) {
4364  uint8_t *dst[6];
4366  dst[0] = s->dest[0];
4367  dst[1] = dst[0] + 8;
4368  dst[2] = s->dest[0] + s->linesize * 8;
4369  dst[3] = dst[2] + 8;
4370  dst[4] = s->dest[1];
4371  dst[5] = s->dest[2];
4372  s->dsp.clear_blocks(s->block[0]);
4373  mb_pos = s->mb_x + s->mb_y * s->mb_width;
4374  s->current_picture.f.mb_type[mb_pos] = MB_TYPE_INTRA;
4375  s->current_picture.f.qscale_table[mb_pos] = v->pq;
4376  s->current_picture.f.motion_val[1][s->block_index[0]][0] = 0;
4377  s->current_picture.f.motion_val[1][s->block_index[0]][1] = 0;
4378 
4379  // do actual MB decoding and displaying
4381  v->s.ac_pred = get_bits1(&v->s.gb);
4382 
4383  for (k = 0; k < 6; k++) {
4384  val = ((cbp >> (5 - k)) & 1);
4385 
4386  if (k < 4) {
4387  int pred = vc1_coded_block_pred(&v->s, k, &coded_val);
4388  val = val ^ pred;
4389  *coded_val = val;
4390  }
4391  cbp |= val << (5 - k);
4392 
4393  vc1_decode_i_block(v, s->block[k], k, val, (k < 4) ? v->codingset : v->codingset2);
4394 
4395  if (k > 3 && (s->flags & CODEC_FLAG_GRAY))
4396  continue;
4397  v->vc1dsp.vc1_inv_trans_8x8(s->block[k]);
4398  if (v->pq >= 9 && v->overlap) {
4399  if (v->rangeredfrm)
4400  for (j = 0; j < 64; j++)
4401  s->block[k][j] <<= 1;
4402  s->dsp.put_signed_pixels_clamped(s->block[k], dst[k], k & 4 ? s->uvlinesize : s->linesize);
4403  } else {
4404  if (v->rangeredfrm)
4405  for (j = 0; j < 64; j++)
4406  s->block[k][j] = (s->block[k][j] - 64) << 1;
4407  s->dsp.put_pixels_clamped(s->block[k], dst[k], k & 4 ? s->uvlinesize : s->linesize);
4408  }
4409  }
4410 
4411  if (v->pq >= 9 && v->overlap) {
4412  if (s->mb_x) {
4413  v->vc1dsp.vc1_h_overlap(s->dest[0], s->linesize);
4414  v->vc1dsp.vc1_h_overlap(s->dest[0] + 8 * s->linesize, s->linesize);
4415  if (!(s->flags & CODEC_FLAG_GRAY)) {
4416  v->vc1dsp.vc1_h_overlap(s->dest[1], s->uvlinesize);
4417  v->vc1dsp.vc1_h_overlap(s->dest[2], s->uvlinesize);
4418  }
4419  }
4420  v->vc1dsp.vc1_h_overlap(s->dest[0] + 8, s->linesize);
4421  v->vc1dsp.vc1_h_overlap(s->dest[0] + 8 * s->linesize + 8, s->linesize);
4422  if (!s->first_slice_line) {
4423  v->vc1dsp.vc1_v_overlap(s->dest[0], s->linesize);
4424  v->vc1dsp.vc1_v_overlap(s->dest[0] + 8, s->linesize);
4425  if (!(s->flags & CODEC_FLAG_GRAY)) {
4426  v->vc1dsp.vc1_v_overlap(s->dest[1], s->uvlinesize);
4427  v->vc1dsp.vc1_v_overlap(s->dest[2], s->uvlinesize);
4428  }
4429  }
4430  v->vc1dsp.vc1_v_overlap(s->dest[0] + 8 * s->linesize, s->linesize);
4431  v->vc1dsp.vc1_v_overlap(s->dest[0] + 8 * s->linesize + 8, s->linesize);
4432  }
4433  if (v->s.loop_filter) vc1_loop_filter_iblk(v, v->pq);
4434 
4435  if (get_bits_count(&s->gb) > v->bits) {
4436  ff_er_add_slice(s, 0, 0, s->mb_x, s->mb_y, ER_MB_ERROR);
4437  av_log(s->avctx, AV_LOG_ERROR, "Bits overconsumption: %i > %i\n",
4438  get_bits_count(&s->gb), v->bits);
4439  return;
4440  }
4441  }
4442  if (!v->s.loop_filter)
4443  ff_draw_horiz_band(s, s->mb_y * 16, 16);
4444  else if (s->mb_y)
4445  ff_draw_horiz_band(s, (s->mb_y - 1) * 16, 16);
4446 
4447  s->first_slice_line = 0;
4448  }
4449  if (v->s.loop_filter)
4450  ff_draw_horiz_band(s, (s->mb_height - 1) * 16, 16);
4451  ff_er_add_slice(s, 0, 0, s->mb_width - 1, s->mb_height - 1, ER_MB_END);
4452 }
4453 
4457 {
4458  int k;
4459  MpegEncContext *s = &v->s;
4460  int cbp, val;
4461  uint8_t *coded_val;
4462  int mb_pos;
4463  int mquant = v->pq;
4464  int mqdiff;
4465  GetBitContext *gb = &s->gb;
4466 
4467  /* select codingmode used for VLC tables selection */
4468  switch (v->y_ac_table_index) {
4469  case 0:
4471  break;
4472  case 1:
4474  break;
4475  case 2:
4477  break;
4478  }
4479 
4480  switch (v->c_ac_table_index) {
4481  case 0:
4483  break;
4484  case 1:
4486  break;
4487  case 2:
4489  break;
4490  }
4491 
4492  // do frame decode
4493  s->mb_x = s->mb_y = 0;
4494  s->mb_intra = 1;
4495  s->first_slice_line = 1;
4496  s->mb_y = s->start_mb_y;
4497  if (s->start_mb_y) {
4498  s->mb_x = 0;
4500  memset(&s->coded_block[s->block_index[0] - s->b8_stride], 0,
4501  (1 + s->b8_stride) * sizeof(*s->coded_block));
4502  }
4503  for (; s->mb_y < s->end_mb_y; s->mb_y++) {
4504  s->mb_x = 0;
4506  for (;s->mb_x < s->mb_width; s->mb_x++) {
4507  DCTELEM (*block)[64] = v->block[v->cur_blk_idx];
4509  s->dsp.clear_blocks(block[0]);
4510  mb_pos = s->mb_x + s->mb_y * s->mb_stride;
4511  s->current_picture.f.mb_type[mb_pos + v->mb_off] = MB_TYPE_INTRA;
4512  s->current_picture.f.motion_val[1][s->block_index[0] + v->blocks_off][0] = 0;
4513  s->current_picture.f.motion_val[1][s->block_index[0] + v->blocks_off][1] = 0;
4514 
4515  // do actual MB decoding and displaying
4516  if (v->fieldtx_is_raw)
4517  v->fieldtx_plane[mb_pos] = get_bits1(&v->s.gb);
4519  if ( v->acpred_is_raw)
4520  v->s.ac_pred = get_bits1(&v->s.gb);
4521  else
4522  v->s.ac_pred = v->acpred_plane[mb_pos];
4523 
4524  if (v->condover == CONDOVER_SELECT && v->overflg_is_raw)
4525  v->over_flags_plane[mb_pos] = get_bits1(&v->s.gb);
4526 
4527  GET_MQUANT();
4528 
4529  s->current_picture.f.qscale_table[mb_pos] = mquant;
4530  /* Set DC scale - y and c use the same */
4531  s->y_dc_scale = s->y_dc_scale_table[mquant];
4532  s->c_dc_scale = s->c_dc_scale_table[mquant];
4533 
4534  for (k = 0; k < 6; k++) {
4535  val = ((cbp >> (5 - k)) & 1);
4536 
4537  if (k < 4) {
4538  int pred = vc1_coded_block_pred(&v->s, k, &coded_val);
4539  val = val ^ pred;
4540  *coded_val = val;
4541  }
4542  cbp |= val << (5 - k);
4543 
4544  v->a_avail = !s->first_slice_line || (k == 2 || k == 3);
4545  v->c_avail = !!s->mb_x || (k == 1 || k == 3);
4546 
4547  vc1_decode_i_block_adv(v, block[k], k, val,
4548  (k < 4) ? v->codingset : v->codingset2, mquant);
4549 
4550  if (k > 3 && (s->flags & CODEC_FLAG_GRAY))
4551  continue;
4553  }
4554 
4558 
4559  if (get_bits_count(&s->gb) > v->bits) {
4560  // TODO: may need modification to handle slice coding
4561  ff_er_add_slice(s, 0, s->start_mb_y, s->mb_x, s->mb_y, ER_MB_ERROR);
4562  av_log(s->avctx, AV_LOG_ERROR, "Bits overconsumption: %i > %i\n",
4563  get_bits_count(&s->gb), v->bits);
4564  return;
4565  }
4566  }
4567  if (!v->s.loop_filter)
4568  ff_draw_horiz_band(s, s->mb_y * 16, 16);
4569  else if (s->mb_y)
4570  ff_draw_horiz_band(s, (s->mb_y-1) * 16, 16);
4571  s->first_slice_line = 0;
4572  }
4573 
4574  /* raw bottom MB row */
4575  s->mb_x = 0;
4577  for (;s->mb_x < s->mb_width; s->mb_x++) {
4580  if (v->s.loop_filter)
4582  }
4583  if (v->s.loop_filter)
4584  ff_draw_horiz_band(s, (s->end_mb_y-1)*16, 16);
4585  ff_er_add_slice(s, 0, s->start_mb_y << v->field_mode, s->mb_width - 1,
4586  (s->end_mb_y << v->field_mode) - 1, ER_MB_END);
4587 }
4588 
4590 {
4591  MpegEncContext *s = &v->s;
4592  int apply_loop_filter;
4593 
4594  /* select codingmode used for VLC tables selection */
4595  switch (v->c_ac_table_index) {
4596  case 0:
4598  break;
4599  case 1:
4601  break;
4602  case 2:
4604  break;
4605  }
4606 
4607  switch (v->c_ac_table_index) {
4608  case 0:
4610  break;
4611  case 1:
4613  break;
4614  case 2:
4616  break;
4617  }
4618 
4619  apply_loop_filter = s->loop_filter && !(s->avctx->skip_loop_filter >= AVDISCARD_NONKEY);
4620  s->first_slice_line = 1;
4621  memset(v->cbp_base, 0, sizeof(v->cbp_base[0])*2*s->mb_stride);
4622  for (s->mb_y = s->start_mb_y; s->mb_y < s->end_mb_y; s->mb_y++) {
4623  s->mb_x = 0;
4625  for (; s->mb_x < s->mb_width; s->mb_x++) {
4627 
4628  if (v->fcm == ILACE_FIELD)
4630  else if (v->fcm == ILACE_FRAME)
4632  else vc1_decode_p_mb(v);
4633  if (s->mb_y != s->start_mb_y && apply_loop_filter && v->fcm == PROGRESSIVE)
4635  if (get_bits_count(&s->gb) > v->bits || get_bits_count(&s->gb) < 0) {
4636  // TODO: may need modification to handle slice coding
4637  ff_er_add_slice(s, 0, s->start_mb_y, s->mb_x, s->mb_y, ER_MB_ERROR);
4638  av_log(s->avctx, AV_LOG_ERROR, "Bits overconsumption: %i > %i at %ix%i\n",
4639  get_bits_count(&s->gb), v->bits, s->mb_x, s->mb_y);
4640  return;
4641  }
4642  }
4643  memmove(v->cbp_base, v->cbp, sizeof(v->cbp_base[0]) * s->mb_stride);
4644  memmove(v->ttblk_base, v->ttblk, sizeof(v->ttblk_base[0]) * s->mb_stride);
4645  memmove(v->is_intra_base, v->is_intra, sizeof(v->is_intra_base[0]) * s->mb_stride);
4646  memmove(v->luma_mv_base, v->luma_mv, sizeof(v->luma_mv_base[0]) * s->mb_stride);
4647  if (s->mb_y != s->start_mb_y) ff_draw_horiz_band(s, (s->mb_y - 1) * 16, 16);
4648  s->first_slice_line = 0;
4649  }
4650  if (apply_loop_filter) {
4651  s->mb_x = 0;
4653  for (; s->mb_x < s->mb_width; s->mb_x++) {
4656  }
4657  }
4658  if (s->end_mb_y >= s->start_mb_y)
4659  ff_draw_horiz_band(s, (s->end_mb_y - 1) * 16, 16);
4660  ff_er_add_slice(s, 0, s->start_mb_y << v->field_mode, s->mb_width - 1,
4661  (s->end_mb_y << v->field_mode) - 1, ER_MB_END);
4662 }
4663 
4665 {
4666  MpegEncContext *s = &v->s;
4667 
4668  /* select codingmode used for VLC tables selection */
4669  switch (v->c_ac_table_index) {
4670  case 0:
4672  break;
4673  case 1:
4675  break;
4676  case 2:
4678  break;
4679  }
4680 
4681  switch (v->c_ac_table_index) {
4682  case 0:
4684  break;
4685  case 1:
4687  break;
4688  case 2:
4690  break;
4691  }
4692 
4693  s->first_slice_line = 1;
4694  for (s->mb_y = s->start_mb_y; s->mb_y < s->end_mb_y; s->mb_y++) {
4695  s->mb_x = 0;
4697  for (; s->mb_x < s->mb_width; s->mb_x++) {
4699 
4700  if (v->fcm == ILACE_FIELD)
4702  else
4703  vc1_decode_b_mb(v);
4704  if (get_bits_count(&s->gb) > v->bits || get_bits_count(&s->gb) < 0) {
4705  // TODO: may need modification to handle slice coding
4706  ff_er_add_slice(s, 0, s->start_mb_y, s->mb_x, s->mb_y, ER_MB_ERROR);
4707  av_log(s->avctx, AV_LOG_ERROR, "Bits overconsumption: %i > %i at %ix%i\n",
4708  get_bits_count(&s->gb), v->bits, s->mb_x, s->mb_y);
4709  return;
4710  }
4711  if (v->s.loop_filter) vc1_loop_filter_iblk(v, v->pq);
4712  }
4713  if (!v->s.loop_filter)
4714  ff_draw_horiz_band(s, s->mb_y * 16, 16);
4715  else if (s->mb_y)
4716  ff_draw_horiz_band(s, (s->mb_y - 1) * 16, 16);
4717  s->first_slice_line = 0;
4718  }
4719  if (v->s.loop_filter)
4720  ff_draw_horiz_band(s, (s->end_mb_y - 1) * 16, 16);
4721  ff_er_add_slice(s, 0, s->start_mb_y << v->field_mode, s->mb_width - 1,
4722  (s->end_mb_y << v->field_mode) - 1, ER_MB_END);
4723 }
4724 
4726 {
4727  MpegEncContext *s = &v->s;
4728 
4729  ff_er_add_slice(s, 0, s->start_mb_y, s->mb_width - 1, s->end_mb_y - 1, ER_MB_END);
4730  s->first_slice_line = 1;
4731  for (s->mb_y = s->start_mb_y; s->mb_y < s->end_mb_y; s->mb_y++) {
4732  s->mb_x = 0;
4735  memcpy(s->dest[0], s->last_picture.f.data[0] + s->mb_y * 16 * s->linesize, s->linesize * 16);
4736  memcpy(s->dest[1], s->last_picture.f.data[1] + s->mb_y * 8 * s->uvlinesize, s->uvlinesize * 8);
4737  memcpy(s->dest[2], s->last_picture.f.data[2] + s->mb_y * 8 * s->uvlinesize, s->uvlinesize * 8);
4738  ff_draw_horiz_band(s, s->mb_y * 16, 16);
4739  s->first_slice_line = 0;
4740  }
4742 }
4743 
4745 {
4746 
4747  v->s.esc3_level_length = 0;
4748  if (v->x8_type) {
4749  ff_intrax8_decode_picture(&v->x8, 2*v->pq + v->halfpq, v->pq * !v->pquantizer);
4750  } else {
4751  v->cur_blk_idx = 0;
4752  v->left_blk_idx = -1;
4753  v->topleft_blk_idx = 1;
4754  v->top_blk_idx = 2;
4755  switch (v->s.pict_type) {
4756  case AV_PICTURE_TYPE_I:
4757  if (v->profile == PROFILE_ADVANCED)
4759  else
4761  break;
4762  case AV_PICTURE_TYPE_P:
4763  if (v->p_frame_skipped)
4765  else
4767  break;
4768  case AV_PICTURE_TYPE_B:
4769  if (v->bi_type) {
4770  if (v->profile == PROFILE_ADVANCED)
4772  else
4774  } else
4776  break;
4777  }
4778  }
4779 }
4780 
4781 #if CONFIG_WMV3IMAGE_DECODER || CONFIG_VC1IMAGE_DECODER
4782 
4783 typedef struct {
4795  int coefs[2][7];
4796 
4797  int effect_type, effect_flag;
4798  int effect_pcount1, effect_pcount2;
4799  int effect_params1[15], effect_params2[10];
4800 } SpriteData;
4801 
4802 static inline int get_fp_val(GetBitContext* gb)
4803 {
4804  return (get_bits_long(gb, 30) - (1 << 29)) << 1;
4805 }
4806 
4807 static void vc1_sprite_parse_transform(GetBitContext* gb, int c[7])
4808 {
4809  c[1] = c[3] = 0;
4810 
4811  switch (get_bits(gb, 2)) {
4812  case 0:
4813  c[0] = 1 << 16;
4814  c[2] = get_fp_val(gb);
4815  c[4] = 1 << 16;
4816  break;
4817  case 1:
4818  c[0] = c[4] = get_fp_val(gb);
4819  c[2] = get_fp_val(gb);
4820  break;
4821  case 2:
4822  c[0] = get_fp_val(gb);
4823  c[2] = get_fp_val(gb);
4824  c[4] = get_fp_val(gb);
4825  break;
4826  case 3:
4827  c[0] = get_fp_val(gb);
4828  c[1] = get_fp_val(gb);
4829  c[2] = get_fp_val(gb);
4830  c[3] = get_fp_val(gb);
4831  c[4] = get_fp_val(gb);
4832  break;
4833  }
4834  c[5] = get_fp_val(gb);
4835  if (get_bits1(gb))
4836  c[6] = get_fp_val(gb);
4837  else
4838  c[6] = 1 << 16;
4839 }
4840 
4841 static void vc1_parse_sprites(VC1Context *v, GetBitContext* gb, SpriteData* sd)
4842 {
4843  AVCodecContext *avctx = v->s.avctx;
4844  int sprite, i;
4845 
4846  for (sprite = 0; sprite <= v->two_sprites; sprite++) {
4847  vc1_sprite_parse_transform(gb, sd->coefs[sprite]);
4848  if (sd->coefs[sprite][1] || sd->coefs[sprite][3])
4849  av_log_ask_for_sample(avctx, "Rotation coefficients are not zero");
4850  av_log(avctx, AV_LOG_DEBUG, sprite ? "S2:" : "S1:");
4851  for (i = 0; i < 7; i++)
4852  av_log(avctx, AV_LOG_DEBUG, " %d.%.3d",
4853  sd->coefs[sprite][i] / (1<<16),
4854  (abs(sd->coefs[sprite][i]) & 0xFFFF) * 1000 / (1 << 16));
4855  av_log(avctx, AV_LOG_DEBUG, "\n");
4856  }
4857 
4858  skip_bits(gb, 2);
4859  if (sd->effect_type = get_bits_long(gb, 30)) {
4860  switch (sd->effect_pcount1 = get_bits(gb, 4)) {
4861  case 7:
4862  vc1_sprite_parse_transform(gb, sd->effect_params1);
4863  break;
4864  case 14:
4865  vc1_sprite_parse_transform(gb, sd->effect_params1);
4866  vc1_sprite_parse_transform(gb, sd->effect_params1 + 7);
4867  break;
4868  default:
4869  for (i = 0; i < sd->effect_pcount1; i++)
4870  sd->effect_params1[i] = get_fp_val(gb);
4871  }
4872  if (sd->effect_type != 13 || sd->effect_params1[0] != sd->coefs[0][6]) {
4873  // effect 13 is simple alpha blending and matches the opacity above
4874  av_log(avctx, AV_LOG_DEBUG, "Effect: %d; params: ", sd->effect_type);
4875  for (i = 0; i < sd->effect_pcount1; i++)
4876  av_log(avctx, AV_LOG_DEBUG, " %d.%.2d",
4877  sd->effect_params1[i] / (1 << 16),
4878  (abs(sd->effect_params1[i]) & 0xFFFF) * 1000 / (1 << 16));
4879  av_log(avctx, AV_LOG_DEBUG, "\n");
4880  }
4881 
4882  sd->effect_pcount2 = get_bits(gb, 16);
4883  if (sd->effect_pcount2 > 10) {
4884  av_log(avctx, AV_LOG_ERROR, "Too many effect parameters\n");
4885  return;
4886  } else if (sd->effect_pcount2) {
4887  i = -1;
4888  av_log(avctx, AV_LOG_DEBUG, "Effect params 2: ");
4889  while (++i < sd->effect_pcount2) {
4890  sd->effect_params2[i] = get_fp_val(gb);
4891  av_log(avctx, AV_LOG_DEBUG, " %d.%.2d",
4892  sd->effect_params2[i] / (1 << 16),
4893  (abs(sd->effect_params2[i]) & 0xFFFF) * 1000 / (1 << 16));
4894  }
4895  av_log(avctx, AV_LOG_DEBUG, "\n");
4896  }
4897  }
4898  if (sd->effect_flag = get_bits1(gb))
4899  av_log(avctx, AV_LOG_DEBUG, "Effect flag set\n");
4900 
4901  if (get_bits_count(gb) >= gb->size_in_bits +
4902  (avctx->codec_id == CODEC_ID_WMV3IMAGE ? 64 : 0))
4903  av_log(avctx, AV_LOG_ERROR, "Buffer overrun\n");
4904  if (get_bits_count(gb) < gb->size_in_bits - 8)
4905  av_log(avctx, AV_LOG_WARNING, "Buffer not fully read\n");
4906 }
4907 
4908 static void vc1_draw_sprites(VC1Context *v, SpriteData* sd)
4909 {
4910  int i, plane, row, sprite;
4911  int sr_cache[2][2] = { { -1, -1 }, { -1, -1 } };
4912  uint8_t* src_h[2][2];
4913  int xoff[2], xadv[2], yoff[2], yadv[2], alpha;
4914  int ysub[2];
4915  MpegEncContext *s = &v->s;
4916 
4917  for (i = 0; i < 2; i++) {
4918  xoff[i] = av_clip(sd->coefs[i][2], 0, v->sprite_width-1 << 16);
4919  xadv[i] = sd->coefs[i][0];
4920  if (xadv[i] != 1<<16 || (v->sprite_width << 16) - (v->output_width << 16) - xoff[i])
4921  xadv[i] = av_clip(xadv[i], 0, ((v->sprite_width<<16) - xoff[i] - 1) / v->output_width);
4922 
4923  yoff[i] = av_clip(sd->coefs[i][5], 0, v->sprite_height-1 << 16);
4924  yadv[i] = av_clip(sd->coefs[i][4], 0, ((v->sprite_height << 16) - yoff[i]) / v->output_height);
4925  }
4926  alpha = av_clip(sd->coefs[1][6], 0, (1<<16) - 1);
4927 
4928  for (plane = 0; plane < (s->flags&CODEC_FLAG_GRAY ? 1 : 3); plane++) {
4929  int width = v->output_width>>!!plane;
4930 
4931  for (row = 0; row < v->output_height>>!!plane; row++) {
4932  uint8_t *dst = v->sprite_output_frame.data[plane] +
4933  v->sprite_output_frame.linesize[plane] * row;
4934 
4935  for (sprite = 0; sprite <= v->two_sprites; sprite++) {
4936  uint8_t *iplane = s->current_picture.f.data[plane];
4937  int iline = s->current_picture.f.linesize[plane];
4938  int ycoord = yoff[sprite] + yadv[sprite] * row;
4939  int yline = ycoord >> 16;
4940  ysub[sprite] = ycoord & 0xFFFF;
4941  if (sprite) {
4942  iplane = s->last_picture.f.data[plane];
4943  iline = s->last_picture.f.linesize[plane];
4944  }
4945  if (!(xoff[sprite] & 0xFFFF) && xadv[sprite] == 1 << 16) {
4946  src_h[sprite][0] = iplane + (xoff[sprite] >> 16) + yline * iline;
4947  if (ysub[sprite])
4948  src_h[sprite][1] = iplane + (xoff[sprite] >> 16) + (yline + 1) * iline;
4949  } else {
4950  if (sr_cache[sprite][0] != yline) {
4951  if (sr_cache[sprite][1] == yline) {
4952  FFSWAP(uint8_t*, v->sr_rows[sprite][0], v->sr_rows[sprite][1]);
4953  FFSWAP(int, sr_cache[sprite][0], sr_cache[sprite][1]);
4954  } else {
4955  v->vc1dsp.sprite_h(v->sr_rows[sprite][0], iplane + yline * iline, xoff[sprite], xadv[sprite], width);
4956  sr_cache[sprite][0] = yline;
4957  }
4958  }
4959  if (ysub[sprite] && sr_cache[sprite][1] != yline + 1) {
4960  v->vc1dsp.sprite_h(v->sr_rows[sprite][1], iplane + (yline + 1) * iline, xoff[sprite], xadv[sprite], width);
4961  sr_cache[sprite][1] = yline + 1;
4962  }
4963  src_h[sprite][0] = v->sr_rows[sprite][0];
4964  src_h[sprite][1] = v->sr_rows[sprite][1];
4965  }
4966  }
4967 
4968  if (!v->two_sprites) {
4969  if (ysub[0]) {
4970  v->vc1dsp.sprite_v_single(dst, src_h[0][0], src_h[0][1], ysub[0], width);
4971  } else {
4972  memcpy(dst, src_h[0][0], width);
4973  }
4974  } else {
4975  if (ysub[0] && ysub[1]) {
4976  v->vc1dsp.sprite_v_double_twoscale(dst, src_h[0][0], src_h[0][1], ysub[0],
4977  src_h[1][0], src_h[1][1], ysub[1], alpha, width);
4978  } else if (ysub[0]) {
4979  v->vc1dsp.sprite_v_double_onescale(dst, src_h[0][0], src_h[0][1], ysub[0],
4980  src_h[1][0], alpha, width);
4981  } else if (ysub[1]) {
4982  v->vc1dsp.sprite_v_double_onescale(dst, src_h[1][0], src_h[1][1], ysub[1],
4983  src_h[0][0], (1<<16)-1-alpha, width);
4984  } else {
4985  v->vc1dsp.sprite_v_double_noscale(dst, src_h[0][0], src_h[1][0], alpha, width);
4986  }
4987  }
4988  }
4989 
4990  if (!plane) {
4991  for (i = 0; i < 2; i++) {
4992  xoff[i] >>= 1;
4993  yoff[i] >>= 1;
4994  }
4995  }
4996 
4997  }
4998 }
4999 
5000 
5001 static int vc1_decode_sprites(VC1Context *v, GetBitContext* gb)
5002 {
5003  MpegEncContext *s = &v->s;
5004  AVCodecContext *avctx = s->avctx;
5005  SpriteData sd;
5006 
5007  vc1_parse_sprites(v, gb, &sd);
5008 
5009  if (!s->current_picture.f.data[0]) {
5010  av_log(avctx, AV_LOG_ERROR, "Got no sprites\n");
5011  return -1;
5012  }
5013 
5014  if (v->two_sprites && (!s->last_picture_ptr || !s->last_picture.f.data[0])) {
5015  av_log(avctx, AV_LOG_WARNING, "Need two sprites, only got one\n");
5016  v->two_sprites = 0;
5017  }
5018 
5019  if (v->sprite_output_frame.data[0])
5020  avctx->release_buffer(avctx, &v->sprite_output_frame);
5021 
5024  if (avctx->get_buffer(avctx, &v->sprite_output_frame) < 0) {
5025  av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
5026  return -1;
5027  }
5028 
5029  vc1_draw_sprites(v, &sd);
5030 
5031  return 0;
5032 }
5033 
5034 static void vc1_sprite_flush(AVCodecContext *avctx)
5035 {
5036  VC1Context *v = avctx->priv_data;
5037  MpegEncContext *s = &v->s;
5038  AVFrame *f = &s->current_picture.f;
5039  int plane, i;
5040 
5041  /* Windows Media Image codecs have a convergence interval of two keyframes.
5042  Since we can't enforce it, clear to black the missing sprite. This is
5043  wrong but it looks better than doing nothing. */
5044 
5045  if (f->data[0])
5046  for (plane = 0; plane < (s->flags&CODEC_FLAG_GRAY ? 1 : 3); plane++)
5047  for (i = 0; i < v->sprite_height>>!!plane; i++)
5048  memset(f->data[plane] + i * f->linesize[plane],
5049  plane ? 128 : 0, f->linesize[plane]);
5050 }
5051 
5052 #endif
5053 
5055 {
5056  MpegEncContext *s = &v->s;
5057  int i;
5058 
5059  /* Allocate mb bitplanes */
5064  v->acpred_plane = av_malloc (s->mb_stride * s->mb_height);
5066 
5067  v->n_allocated_blks = s->mb_width + 2;
5068  v->block = av_malloc(sizeof(*v->block) * v->n_allocated_blks);
5069  v->cbp_base = av_malloc(sizeof(v->cbp_base[0]) * 2 * s->mb_stride);
5070  v->cbp = v->cbp_base + s->mb_stride;
5071  v->ttblk_base = av_malloc(sizeof(v->ttblk_base[0]) * 2 * s->mb_stride);
5072  v->ttblk = v->ttblk_base + s->mb_stride;
5073  v->is_intra_base = av_mallocz(sizeof(v->is_intra_base[0]) * 2 * s->mb_stride);
5074  v->is_intra = v->is_intra_base + s->mb_stride;
5075  v->luma_mv_base = av_malloc(sizeof(v->luma_mv_base[0]) * 2 * s->mb_stride);
5076  v->luma_mv = v->luma_mv_base + s->mb_stride;
5077 
5078  /* allocate block type info in that way so it could be used with s->block_index[] */
5079  v->mb_type_base = av_malloc(s->b8_stride * (s->mb_height * 2 + 1) + s->mb_stride * (s->mb_height + 1) * 2);
5080  v->mb_type[0] = v->mb_type_base + s->b8_stride + 1;
5081  v->mb_type[1] = v->mb_type_base + s->b8_stride * (s->mb_height * 2 + 1) + s->mb_stride + 1;
5082  v->mb_type[2] = v->mb_type[1] + s->mb_stride * (s->mb_height + 1);
5083 
5084  /* allocate memory to store block level MV info */
5085  v->blk_mv_type_base = av_mallocz( s->b8_stride * (s->mb_height * 2 + 1) + s->mb_stride * (s->mb_height + 1) * 2);
5086  v->blk_mv_type = v->blk_mv_type_base + s->b8_stride + 1;
5087  v->mv_f_base = av_mallocz(2 * (s->b8_stride * (s->mb_height * 2 + 1) + s->mb_stride * (s->mb_height + 1) * 2));
5088  v->mv_f[0] = v->mv_f_base + s->b8_stride + 1;
5089  v->mv_f[1] = v->mv_f[0] + (s->b8_stride * (s->mb_height * 2 + 1) + s->mb_stride * (s->mb_height + 1) * 2);
5090  v->mv_f_last_base = av_mallocz(2 * (s->b8_stride * (s->mb_height * 2 + 1) + s->mb_stride * (s->mb_height + 1) * 2));
5091  v->mv_f_last[0] = v->mv_f_last_base + s->b8_stride + 1;
5092  v->mv_f_last[1] = v->mv_f_last[0] + (s->b8_stride * (s->mb_height * 2 + 1) + s->mb_stride * (s->mb_height + 1) * 2);
5093  v->mv_f_next_base = av_mallocz(2 * (s->b8_stride * (s->mb_height * 2 + 1) + s->mb_stride * (s->mb_height + 1) * 2));
5094  v->mv_f_next[0] = v->mv_f_next_base + s->b8_stride + 1;
5095  v->mv_f_next[1] = v->mv_f_next[0] + (s->b8_stride * (s->mb_height * 2 + 1) + s->mb_stride * (s->mb_height + 1) * 2);
5096 
5097  /* Init coded blocks info */
5098  if (v->profile == PROFILE_ADVANCED) {
5099 // if (alloc_bitplane(&v->over_flags_plane, s->mb_width, s->mb_height) < 0)
5100 // return -1;
5101 // if (alloc_bitplane(&v->ac_pred_plane, s->mb_width, s->mb_height) < 0)
5102 // return -1;
5103  }
5104 
5105  ff_intrax8_common_init(&v->x8,s);
5106 
5108  for (i = 0; i < 4; i++)
5109  if (!(v->sr_rows[i >> 1][i & 1] = av_malloc(v->output_width))) return -1;
5110  }
5111 
5112  if (!v->mv_type_mb_plane || !v->direct_mb_plane || !v->acpred_plane || !v->over_flags_plane ||
5113  !v->block || !v->cbp_base || !v->ttblk_base || !v->is_intra_base || !v->luma_mv_base ||
5114  !v->mb_type_base)
5115  return -1;
5116 
5117  return 0;
5118 }
5119 
5125 {
5126  VC1Context *v = avctx->priv_data;
5127  MpegEncContext *s = &v->s;
5128  GetBitContext gb;
5129  int i;
5130 
5131  /* save the container output size for WMImage */
5132  v->output_width = avctx->width;
5133  v->output_height = avctx->height;
5134 
5135  if (!avctx->extradata_size || !avctx->extradata)
5136  return -1;
5137  if (!(avctx->flags & CODEC_FLAG_GRAY))
5138  avctx->pix_fmt = avctx->get_format(avctx, avctx->codec->pix_fmts);
5139  else
5140  avctx->pix_fmt = PIX_FMT_GRAY8;
5141  avctx->hwaccel = ff_find_hwaccel(avctx->codec->id, avctx->pix_fmt);
5142  v->s.avctx = avctx;
5143  avctx->flags |= CODEC_FLAG_EMU_EDGE;
5144  v->s.flags |= CODEC_FLAG_EMU_EDGE;
5145 
5146  if (avctx->idct_algo == FF_IDCT_AUTO) {
5147  avctx->idct_algo = FF_IDCT_WMV2;
5148  }
5149 
5150  if (ff_vc1_init_common(v) < 0)
5151  return -1;
5152  ff_vc1dsp_init(&v->vc1dsp);
5153 
5154  if (avctx->codec_id == CODEC_ID_WMV3 || avctx->codec_id == CODEC_ID_WMV3IMAGE) {
5155  int count = 0;
5156 
5157  // looks like WMV3 has a sequence header stored in the extradata
5158  // advanced sequence header may be before the first frame
5159  // the last byte of the extradata is a version number, 1 for the
5160  // samples we can decode
5161 
5162  init_get_bits(&gb, avctx->extradata, avctx->extradata_size*8);
5163 
5164  if (vc1_decode_sequence_header(avctx, v, &gb) < 0)
5165  return -1;
5166 
5167  count = avctx->extradata_size*8 - get_bits_count(&gb);
5168  if (count > 0) {
5169  av_log(avctx, AV_LOG_INFO, "Extra data: %i bits left, value: %X\n",
5170  count, get_bits(&gb, count));
5171  } else if (count < 0) {
5172  av_log(avctx, AV_LOG_INFO, "Read %i bits in overflow\n", -count);
5173  }
5174  } else { // VC1/WVC1/WVP2
5175  const uint8_t *start = avctx->extradata;
5176  uint8_t *end = avctx->extradata + avctx->extradata_size;
5177  const uint8_t *next;
5178  int size, buf2_size;
5179  uint8_t *buf2 = NULL;
5180  int seq_initialized = 0, ep_initialized = 0;
5181 
5182  if (avctx->extradata_size < 16) {
5183  av_log(avctx, AV_LOG_ERROR, "Extradata size too small: %i\n", avctx->extradata_size);
5184  return -1;
5185  }
5186 
5188  start = find_next_marker(start, end); // in WVC1 extradata first byte is its size, but can be 0 in mkv
5189  next = start;
5190  for (; next < end; start = next) {
5191  next = find_next_marker(start + 4, end);
5192  size = next - start - 4;
5193  if (size <= 0)
5194  continue;
5195  buf2_size = vc1_unescape_buffer(start + 4, size, buf2);
5196  init_get_bits(&gb, buf2, buf2_size * 8);
5197  switch (AV_RB32(start)) {
5198  case VC1_CODE_SEQHDR:
5199  if (vc1_decode_sequence_header(avctx, v, &gb) < 0) {
5200  av_free(buf2);
5201  return -1;
5202  }
5203  seq_initialized = 1;
5204  break;
5205  case VC1_CODE_ENTRYPOINT:
5206  if (vc1_decode_entry_point(avctx, v, &gb) < 0) {
5207  av_free(buf2);
5208  return -1;
5209  }
5210  ep_initialized = 1;
5211  break;
5212  }
5213  }
5214  av_free(buf2);
5215  if (!seq_initialized || !ep_initialized) {
5216  av_log(avctx, AV_LOG_ERROR, "Incomplete extradata\n");
5217  return -1;
5218  }
5219  v->res_sprite = (avctx->codec_tag == MKTAG('W','V','P','2'));
5220  }
5221 
5222  avctx->profile = v->profile;
5223  if (v->profile == PROFILE_ADVANCED)
5224  avctx->level = v->level;
5225 
5226  avctx->has_b_frames = !!avctx->max_b_frames;
5227 
5228  s->mb_width = (avctx->coded_width + 15) >> 4;
5229  s->mb_height = (avctx->coded_height + 15) >> 4;
5230 
5231  if (v->profile == PROFILE_ADVANCED || v->res_fasttx) {
5232  for (i = 0; i < 64; i++) {
5233 #define transpose(x) ((x >> 3) | ((x & 7) << 3))
5234  v->zz_8x8[0][i] = transpose(wmv1_scantable[0][i]);
5235  v->zz_8x8[1][i] = transpose(wmv1_scantable[1][i]);
5236  v->zz_8x8[2][i] = transpose(wmv1_scantable[2][i]);
5237  v->zz_8x8[3][i] = transpose(wmv1_scantable[3][i]);
5239  }
5240  v->left_blk_sh = 0;
5241  v->top_blk_sh = 3;
5242  } else {
5243  memcpy(v->zz_8x8, wmv1_scantable, 4*64);
5244  v->left_blk_sh = 3;
5245  v->top_blk_sh = 0;
5246  }
5247 
5248  if (avctx->codec_id == CODEC_ID_WMV3IMAGE || avctx->codec_id == CODEC_ID_VC1IMAGE) {
5249  v->sprite_width = avctx->coded_width;
5250  v->sprite_height = avctx->coded_height;
5251 
5252  avctx->coded_width = avctx->width = v->output_width;
5253  avctx->coded_height = avctx->height = v->output_height;
5254 
5255  // prevent 16.16 overflows
5256  if (v->sprite_width > 1 << 14 ||
5257  v->sprite_height > 1 << 14 ||
5258  v->output_width > 1 << 14 ||
5259  v->output_height > 1 << 14) return -1;
5260  }
5261  return 0;
5262 }
5263 
5268 {
5269  VC1Context *v = avctx->priv_data;
5270  int i;
5271 
5272  if ((avctx->codec_id == CODEC_ID_WMV3IMAGE || avctx->codec_id == CODEC_ID_VC1IMAGE)
5273  && v->sprite_output_frame.data[0])
5274  avctx->release_buffer(avctx, &v->sprite_output_frame);
5275  for (i = 0; i < 4; i++)
5276  av_freep(&v->sr_rows[i >> 1][i & 1]);
5277  av_freep(&v->hrd_rate);
5278  av_freep(&v->hrd_buffer);
5279  MPV_common_end(&v->s);
5283  av_freep(&v->fieldtx_plane);
5284  av_freep(&v->acpred_plane);
5286  av_freep(&v->mb_type_base);
5288  av_freep(&v->mv_f_base);
5289  av_freep(&v->mv_f_last_base);
5290  av_freep(&v->mv_f_next_base);
5291  av_freep(&v->block);
5292  av_freep(&v->cbp_base);
5293  av_freep(&v->ttblk_base);
5294  av_freep(&v->is_intra_base); // FIXME use v->mb_type[]
5295  av_freep(&v->luma_mv_base);
5297  return 0;
5298 }
5299 
5300 
5304 static int vc1_decode_frame(AVCodecContext *avctx, void *data,
5305  int *data_size, AVPacket *avpkt)
5306 {
5307  const uint8_t *buf = avpkt->data;
5308  int buf_size = avpkt->size, n_slices = 0, i;
5309  VC1Context *v = avctx->priv_data;
5310  MpegEncContext *s = &v->s;
5311  AVFrame *pict = data;
5312  uint8_t *buf2 = NULL;
5313  const uint8_t *buf_start = buf;
5314  int mb_height, n_slices1;
5315  struct {
5316  uint8_t *buf;
5317  GetBitContext gb;
5318  int mby_start;
5319  } *slices = NULL, *tmp;
5320 
5321  /* no supplementary picture */
5322  if (buf_size == 0 || (buf_size == 4 && AV_RB32(buf) == VC1_CODE_ENDOFSEQ)) {
5323  /* special case for last picture */
5324  if (s->low_delay == 0 && s->next_picture_ptr) {
5325  *pict = *(AVFrame*)s->next_picture_ptr;
5326  s->next_picture_ptr = NULL;
5327 
5328  *data_size = sizeof(AVFrame);
5329  }
5330 
5331  return 0;
5332  }
5333 
5335  if (v->profile < PROFILE_ADVANCED)
5336  avctx->pix_fmt = PIX_FMT_VDPAU_WMV3;
5337  else
5338  avctx->pix_fmt = PIX_FMT_VDPAU_VC1;
5339  }
5340 
5341  //for advanced profile we may need to parse and unescape data
5342  if (avctx->codec_id == CODEC_ID_VC1 || avctx->codec_id == CODEC_ID_VC1IMAGE) {
5343  int buf_size2 = 0;
5344  buf2 = av_mallocz(buf_size + FF_INPUT_BUFFER_PADDING_SIZE);
5345 
5346  if (IS_MARKER(AV_RB32(buf))) { /* frame starts with marker and needs to be parsed */
5347  const uint8_t *start, *end, *next;
5348  int size;
5349 
5350  next = buf;
5351  for (start = buf, end = buf + buf_size; next < end; start = next) {
5352  next = find_next_marker(start + 4, end);
5353  size = next - start - 4;
5354  if (size <= 0) continue;
5355  switch (AV_RB32(start)) {
5356  case VC1_CODE_FRAME:
5357  if (avctx->hwaccel ||
5359  buf_start = start;
5360  buf_size2 = vc1_unescape_buffer(start + 4, size, buf2);
5361  break;
5362  case VC1_CODE_FIELD: {
5363  int buf_size3;
5364  slices = av_realloc(slices, sizeof(*slices) * (n_slices+1));
5365  if (!slices)
5366  goto err;
5367  slices[n_slices].buf = av_mallocz(buf_size + FF_INPUT_BUFFER_PADDING_SIZE);
5368  if (!slices[n_slices].buf)
5369  goto err;
5370  buf_size3 = vc1_unescape_buffer(start + 4, size,
5371  slices[n_slices].buf);
5372  init_get_bits(&slices[n_slices].gb, slices[n_slices].buf,
5373  buf_size3 << 3);
5374  /* assuming that the field marker is at the exact middle,
5375  hope it's correct */
5376  slices[n_slices].mby_start = s->mb_height >> 1;
5377  n_slices1 = n_slices - 1; // index of the last slice of the first field
5378  n_slices++;
5379  break;
5380  }
5381  case VC1_CODE_ENTRYPOINT: /* it should be before frame data */
5382  buf_size2 = vc1_unescape_buffer(start + 4, size, buf2);
5383  init_get_bits(&s->gb, buf2, buf_size2 * 8);
5384  vc1_decode_entry_point(avctx, v, &s->gb);
5385  break;
5386  case VC1_CODE_SLICE: {
5387  int buf_size3;
5388  slices = av_realloc(slices, sizeof(*slices) * (n_slices+1));
5389  if (!slices)
5390  goto err;
5391  slices[n_slices].buf = av_mallocz(buf_size + FF_INPUT_BUFFER_PADDING_SIZE);
5392  if (!slices[n_slices].buf)
5393  goto err;
5394  buf_size3 = vc1_unescape_buffer(start + 4, size,
5395  slices[n_slices].buf);
5396  init_get_bits(&slices[n_slices].gb, slices[n_slices].buf,
5397  buf_size3 << 3);
5398  slices[n_slices].mby_start = get_bits(&slices[n_slices].gb, 9);
5399  n_slices++;
5400  break;
5401  }
5402  }
5403  }
5404  } else if (v->interlace && ((buf[0] & 0xC0) == 0xC0)) { /* WVC1 interlaced stores both fields divided by marker */
5405  const uint8_t *divider;
5406  int buf_size3;
5407 
5408  divider = find_next_marker(buf, buf + buf_size);
5409  if ((divider == (buf + buf_size)) || AV_RB32(divider) != VC1_CODE_FIELD) {
5410  av_log(avctx, AV_LOG_ERROR, "Error in WVC1 interlaced frame\n");
5411  goto err;
5412  } else { // found field marker, unescape second field
5413  tmp = av_realloc(slices, sizeof(*slices) * (n_slices+1));
5414  if (!tmp)
5415  goto err;
5416  slices = tmp;
5417  slices[n_slices].buf = av_mallocz(buf_size + FF_INPUT_BUFFER_PADDING_SIZE);
5418  if (!slices[n_slices].buf)
5419  goto err;
5420  buf_size3 = vc1_unescape_buffer(divider + 4, buf + buf_size - divider - 4, slices[n_slices].buf);
5421  init_get_bits(&slices[n_slices].gb, slices[n_slices].buf,
5422  buf_size3 << 3);
5423  slices[n_slices].mby_start = s->mb_height >> 1;
5424  n_slices1 = n_slices - 1;
5425  n_slices++;
5426  }
5427  buf_size2 = vc1_unescape_buffer(buf, divider - buf, buf2);
5428  } else {
5429  buf_size2 = vc1_unescape_buffer(buf, buf_size, buf2);
5430  }
5431  init_get_bits(&s->gb, buf2, buf_size2*8);
5432  } else
5433  init_get_bits(&s->gb, buf, buf_size*8);
5434 
5435  if (v->res_sprite) {
5436  v->new_sprite = !get_bits1(&s->gb);
5437  v->two_sprites = get_bits1(&s->gb);
5438  /* res_sprite means a Windows Media Image stream, CODEC_ID_*IMAGE means
5439  we're using the sprite compositor. These are intentionally kept separate
5440  so you can get the raw sprites by using the wmv3 decoder for WMVP or
5441  the vc1 one for WVP2 */
5442  if (avctx->codec_id == CODEC_ID_WMV3IMAGE || avctx->codec_id == CODEC_ID_VC1IMAGE) {
5443  if (v->new_sprite) {
5444  // switch AVCodecContext parameters to those of the sprites
5445  avctx->width = avctx->coded_width = v->sprite_width;
5446  avctx->height = avctx->coded_height = v->sprite_height;
5447  } else {
5448  goto image;
5449  }
5450  }
5451  }
5452 
5453  if (s->context_initialized &&
5454  (s->width != avctx->coded_width ||
5455  s->height != avctx->coded_height)) {
5456  vc1_decode_end(avctx);
5457  }
5458 
5459  if (!s->context_initialized) {
5460  if (ff_msmpeg4_decode_init(avctx) < 0 || vc1_decode_init_alloc_tables(v) < 0)
5461  return -1;
5462 
5463  s->low_delay = !avctx->has_b_frames || v->res_sprite;
5464 
5465  if (v->profile == PROFILE_ADVANCED) {
5466  s->h_edge_pos = avctx->coded_width;
5467  s->v_edge_pos = avctx->coded_height;
5468  }
5469  }
5470 
5471  /* We need to set current_picture_ptr before reading the header,
5472  * otherwise we cannot store anything in there. */
5473  if (s->current_picture_ptr == NULL || s->current_picture_ptr->f.data[0]) {
5474  int i = ff_find_unused_picture(s, 0);
5475  if (i < 0)
5476  goto err;
5477  s->current_picture_ptr = &s->picture[i];
5478  }
5479 
5480  // do parse frame header
5481  v->pic_header_flag = 0;
5482  if (v->profile < PROFILE_ADVANCED) {
5483  if (vc1_parse_frame_header(v, &s->gb) == -1) {
5484  goto err;
5485  }
5486  } else {
5487  if (vc1_parse_frame_header_adv(v, &s->gb) == -1) {
5488  goto err;
5489  }
5490  }
5491 
5492  if ((avctx->codec_id == CODEC_ID_WMV3IMAGE || avctx->codec_id == CODEC_ID_VC1IMAGE)
5493  && s->pict_type != AV_PICTURE_TYPE_I) {
5494  av_log(v->s.avctx, AV_LOG_ERROR, "Sprite decoder: expected I-frame\n");
5495  goto err;
5496  }
5497 
5498  // process pulldown flags
5500  // Pulldown flags are only valid when 'broadcast' has been set.
5501  // So ticks_per_frame will be 2
5502  if (v->rff) {
5503  // repeat field
5505  } else if (v->rptfrm) {
5506  // repeat frames
5507  s->current_picture_ptr->f.repeat_pict = v->rptfrm * 2;
5508  }
5509 
5510  // for skipping the frame
5513 
5514  /* skip B-frames if we don't have reference frames */
5515  if (s->last_picture_ptr == NULL && (s->pict_type == AV_PICTURE_TYPE_B || s->dropable)) {
5516  goto err;
5517  }
5518  if ((avctx->skip_frame >= AVDISCARD_NONREF && s->pict_type == AV_PICTURE_TYPE_B) ||
5519  (avctx->skip_frame >= AVDISCARD_NONKEY && s->pict_type != AV_PICTURE_TYPE_I) ||
5520  avctx->skip_frame >= AVDISCARD_ALL) {
5521  goto end;
5522  }
5523 
5524  if (s->next_p_frame_damaged) {
5525  if (s->pict_type == AV_PICTURE_TYPE_B)
5526  goto end;
5527  else
5528  s->next_p_frame_damaged = 0;
5529  }
5530 
5531  if (MPV_frame_start(s, avctx) < 0) {
5532  goto err;
5533  }
5534 
5537 
5540  ff_vdpau_vc1_decode_picture(s, buf_start, (buf + buf_size) - buf_start);
5541  else if (avctx->hwaccel) {
5542  if (avctx->hwaccel->start_frame(avctx, buf, buf_size) < 0)
5543  goto err;
5544  if (avctx->hwaccel->decode_slice(avctx, buf_start, (buf + buf_size) - buf_start) < 0)
5545  goto err;
5546  if (avctx->hwaccel->end_frame(avctx) < 0)
5547  goto err;
5548  } else {
5549  ff_er_frame_start(s);
5550 
5551  v->bits = buf_size * 8;
5552  if (v->field_mode) {
5553  uint8_t *tmp[2];
5554  s->current_picture.f.linesize[0] <<= 1;
5555  s->current_picture.f.linesize[1] <<= 1;
5556  s->current_picture.f.linesize[2] <<= 1;
5557  s->linesize <<= 1;
5558  s->uvlinesize <<= 1;
5559  tmp[0] = v->mv_f_last[0];
5560  tmp[1] = v->mv_f_last[1];
5561  v->mv_f_last[0] = v->mv_f_next[0];
5562  v->mv_f_last[1] = v->mv_f_next[1];
5563  v->mv_f_next[0] = v->mv_f[0];
5564  v->mv_f_next[1] = v->mv_f[1];
5565  v->mv_f[0] = tmp[0];
5566  v->mv_f[1] = tmp[1];
5567  }
5568  mb_height = s->mb_height >> v->field_mode;
5569  for (i = 0; i <= n_slices; i++) {
5570  if (i > 0 && slices[i - 1].mby_start >= mb_height) {
5571  if (v->field_mode <= 0) {
5572  av_log(v->s.avctx, AV_LOG_ERROR, "Slice %d starts beyond "
5573  "picture boundary (%d >= %d)\n", i,
5574  slices[i - 1].mby_start, mb_height);
5575  continue;
5576  }
5577  v->second_field = 1;
5578  v->blocks_off = s->mb_width * s->mb_height << 1;
5579  v->mb_off = s->mb_stride * s->mb_height >> 1;
5580  } else {
5581  v->second_field = 0;
5582  v->blocks_off = 0;
5583  v->mb_off = 0;
5584  }
5585  if (i) {
5586  v->pic_header_flag = 0;
5587  if (v->field_mode && i == n_slices1 + 2)
5589  else if (get_bits1(&s->gb)) {
5590  v->pic_header_flag = 1;
5592  }
5593  }
5594  s->start_mb_y = (i == 0) ? 0 : FFMAX(0, slices[i-1].mby_start % mb_height);
5595  if (!v->field_mode || v->second_field)
5596  s->end_mb_y = (i == n_slices ) ? mb_height : FFMIN(mb_height, slices[i].mby_start % mb_height);
5597  else
5598  s->end_mb_y = (i <= n_slices1 + 1) ? mb_height : FFMIN(mb_height, slices[i].mby_start % mb_height);
5599  vc1_decode_blocks(v);
5600  if (i != n_slices)
5601  s->gb = slices[i].gb;
5602  }
5603  if (v->field_mode) {
5604  v->second_field = 0;
5605  if (s->pict_type == AV_PICTURE_TYPE_B) {
5606  memcpy(v->mv_f_base, v->mv_f_next_base,
5607  2 * (s->b8_stride * (s->mb_height * 2 + 1) + s->mb_stride * (s->mb_height + 1) * 2));
5608  }
5609  s->current_picture.f.linesize[0] >>= 1;
5610  s->current_picture.f.linesize[1] >>= 1;
5611  s->current_picture.f.linesize[2] >>= 1;
5612  s->linesize >>= 1;
5613  s->uvlinesize >>= 1;
5614  }
5615 //av_log(s->avctx, AV_LOG_INFO, "Consumed %i/%i bits\n", get_bits_count(&s->gb), s->gb.size_in_bits);
5616 // if (get_bits_count(&s->gb) > buf_size * 8)
5617 // return -1;
5618  ff_er_frame_end(s);
5619  }
5620 
5621  MPV_frame_end(s);
5622 
5623  if (avctx->codec_id == CODEC_ID_WMV3IMAGE || avctx->codec_id == CODEC_ID_VC1IMAGE) {
5624 image:
5625  avctx->width = avctx->coded_width = v->output_width;
5626  avctx->height = avctx->coded_height = v->output_height;
5627  if (avctx->skip_frame >= AVDISCARD_NONREF)
5628  goto end;
5629 #if CONFIG_WMV3IMAGE_DECODER || CONFIG_VC1IMAGE_DECODER
5630  if (vc1_decode_sprites(v, &s->gb))
5631  goto err;
5632 #endif
5633  *pict = v->sprite_output_frame;
5634  *data_size = sizeof(AVFrame);
5635  } else {
5636  if (s->pict_type == AV_PICTURE_TYPE_B || s->low_delay) {
5637  *pict = *(AVFrame*)s->current_picture_ptr;
5638  } else if (s->last_picture_ptr != NULL) {
5639  *pict = *(AVFrame*)s->last_picture_ptr;
5640  }
5641  if (s->last_picture_ptr || s->low_delay) {
5642  *data_size = sizeof(AVFrame);
5643  ff_print_debug_info(s, pict);
5644  }
5645  }
5646 
5647 end:
5648  av_free(buf2);
5649  for (i = 0; i < n_slices; i++)
5650  av_free(slices[i].buf);
5651  av_free(slices);
5652  return buf_size;
5653 
5654 err:
5655  av_free(buf2);
5656  for (i = 0; i < n_slices; i++)
5657  av_free(slices[i].buf);
5658  av_free(slices);
5659  return -1;
5660 }
5661 
5662 
5663 static const AVProfile profiles[] = {
5664  { FF_PROFILE_VC1_SIMPLE, "Simple" },
5665  { FF_PROFILE_VC1_MAIN, "Main" },
5666  { FF_PROFILE_VC1_COMPLEX, "Complex" },
5667  { FF_PROFILE_VC1_ADVANCED, "Advanced" },
5668  { FF_PROFILE_UNKNOWN },
5669 };
5670 
5672  .name = "vc1",
5673  .type = AVMEDIA_TYPE_VIDEO,
5674  .id = CODEC_ID_VC1,
5675  .priv_data_size = sizeof(VC1Context),
5676  .init = vc1_decode_init,
5677  .close = vc1_decode_end,
5679  .flush = ff_mpeg_flush,
5680  .capabilities = CODEC_CAP_DR1 | CODEC_CAP_DELAY,
5681  .long_name = NULL_IF_CONFIG_SMALL("SMPTE VC-1"),
5682  .pix_fmts = ff_hwaccel_pixfmt_list_420,
5683  .profiles = NULL_IF_CONFIG_SMALL(profiles)
5684 };
5685 
5686 #if CONFIG_WMV3_DECODER
5687 AVCodec ff_wmv3_decoder = {
5688  .name = "wmv3",
5689  .type = AVMEDIA_TYPE_VIDEO,
5690  .id = CODEC_ID_WMV3,
5691  .priv_data_size = sizeof(VC1Context),
5692  .init = vc1_decode_init,
5693  .close = vc1_decode_end,
5695  .flush = ff_mpeg_flush,
5696  .capabilities = CODEC_CAP_DR1 | CODEC_CAP_DELAY,
5697  .long_name = NULL_IF_CONFIG_SMALL("Windows Media Video 9"),
5698  .pix_fmts = ff_hwaccel_pixfmt_list_420,
5699  .profiles = NULL_IF_CONFIG_SMALL(profiles)
5700 };
5701 #endif
5702 
5703 #if CONFIG_WMV3_VDPAU_DECODER
5704 AVCodec ff_wmv3_vdpau_decoder = {
5705  .name = "wmv3_vdpau",
5706  .type = AVMEDIA_TYPE_VIDEO,
5707  .id = CODEC_ID_WMV3,
5708  .priv_data_size = sizeof(VC1Context),
5709  .init = vc1_decode_init,
5710  .close = vc1_decode_end,
5713  .long_name = NULL_IF_CONFIG_SMALL("Windows Media Video 9 VDPAU"),
5714  .pix_fmts = (const enum PixelFormat[]){PIX_FMT_VDPAU_WMV3, PIX_FMT_NONE},
5715  .profiles = NULL_IF_CONFIG_SMALL(profiles)
5716 };
5717 #endif
5718 
5719 #if CONFIG_VC1_VDPAU_DECODER
5720 AVCodec ff_vc1_vdpau_decoder = {
5721  .name = "vc1_vdpau",
5722  .type = AVMEDIA_TYPE_VIDEO,
5723  .id = CODEC_ID_VC1,
5724  .priv_data_size = sizeof(VC1Context),
5725  .init = vc1_decode_init,
5726  .close = vc1_decode_end,
5729  .long_name = NULL_IF_CONFIG_SMALL("SMPTE VC-1 VDPAU"),
5730  .pix_fmts = (const enum PixelFormat[]){PIX_FMT_VDPAU_VC1, PIX_FMT_NONE},
5731  .profiles = NULL_IF_CONFIG_SMALL(profiles)
5732 };
5733 #endif
5734 
5735 #if CONFIG_WMV3IMAGE_DECODER
5736 AVCodec ff_wmv3image_decoder = {
5737  .name = "wmv3image",
5738  .type = AVMEDIA_TYPE_VIDEO,
5739  .id = CODEC_ID_WMV3IMAGE,
5740  .priv_data_size = sizeof(VC1Context),
5741  .init = vc1_decode_init,
5742  .close = vc1_decode_end,
5744  .capabilities = CODEC_CAP_DR1,
5745  .flush = vc1_sprite_flush,
5746  .long_name = NULL_IF_CONFIG_SMALL("Windows Media Video 9 Image"),
5747  .pix_fmts = ff_pixfmt_list_420
5748 };
5749 #endif
5750 
5751 #if CONFIG_VC1IMAGE_DECODER
5752 AVCodec ff_vc1image_decoder = {
5753  .name = "vc1image",
5754  .type = AVMEDIA_TYPE_VIDEO,
5755  .id = CODEC_ID_VC1IMAGE,
5756  .priv_data_size = sizeof(VC1Context),
5757  .init = vc1_decode_init,
5758  .close = vc1_decode_end,
5760  .capabilities = CODEC_CAP_DR1,
5761  .flush = vc1_sprite_flush,
5762  .long_name = NULL_IF_CONFIG_SMALL("Windows Media Video 9 Image v2"),
5763  .pix_fmts = ff_pixfmt_list_420
5764 };
5765 #endif