30 #ifndef AVCODEC_DSPUTIL_H
31 #define AVCODEC_DSPUTIL_H
55 #define H264_IDCT(depth) \
56 void ff_h264_idct8_add_ ## depth ## _c(uint8_t *dst, DCTELEM *block, int stride);\
57 void ff_h264_idct_add_ ## depth ## _c(uint8_t *dst, DCTELEM *block, int stride);\
58 void ff_h264_idct8_dc_add_ ## depth ## _c(uint8_t *dst, DCTELEM *block, int stride);\
59 void ff_h264_idct_dc_add_ ## depth ## _c(uint8_t *dst, DCTELEM *block, int stride);\
60 void ff_h264_idct_add16_ ## depth ## _c(uint8_t *dst, const int *blockoffset, DCTELEM *block, int stride, const uint8_t nnzc[6*8]);\
61 void ff_h264_idct_add16intra_ ## depth ## _c(uint8_t *dst, const int *blockoffset, DCTELEM *block, int stride, const uint8_t nnzc[6*8]);\
62 void ff_h264_idct8_add4_ ## depth ## _c(uint8_t *dst, const int *blockoffset, DCTELEM *block, int stride, const uint8_t nnzc[6*8]);\
63 void ff_h264_idct_add8_422_ ## depth ## _c(uint8_t **dest, const int *blockoffset, DCTELEM *block, int stride, const uint8_t nnzc[6*8]);\
64 void ff_h264_idct_add8_ ## depth ## _c(uint8_t **dest, const int *blockoffset, DCTELEM *block, int stride, const uint8_t nnzc[6*8]);\
65 void ff_h264_luma_dc_dequant_idct_ ## depth ## _c(DCTELEM *output, DCTELEM *input, int qmul);\
66 void ff_h264_chroma422_dc_dequant_idct_ ## depth ## _c(DCTELEM *block, int qmul);\
67 void ff_h264_chroma_dc_dequant_idct_ ## depth ## _c(DCTELEM *block, int qmul);
83 #define MAX_NEG_CROP 1024
89 #define PUTAVG_PIXELS(depth)\
90 void ff_put_pixels8x8_ ## depth ## _c(uint8_t *dst, uint8_t *src, int stride);\
91 void ff_avg_pixels8x8_ ## depth ## _c(uint8_t *dst, uint8_t *src, int stride);\
92 void ff_put_pixels16x16_ ## depth ## _c(uint8_t *dst, uint8_t *src, int stride);\
93 void ff_avg_pixels16x16_ ## depth ## _c(uint8_t *dst, uint8_t *src, int stride);
99 #define ff_put_pixels8x8_c ff_put_pixels8x8_8_c
100 #define ff_avg_pixels8x8_c ff_avg_pixels8x8_8_c
101 #define ff_put_pixels16x16_c ff_put_pixels16x16_8_c
102 #define ff_avg_pixels16x16_c ff_avg_pixels16x16_8_c
116 int dxx,
int dxy,
int dyx,
int dyy,
int shift,
int r,
int width,
int height);
149 #define DEF_OLD_QPEL(name)\
150 void ff_put_ ## name (uint8_t *dst, uint8_t *src, int stride);\
151 void ff_put_no_rnd_ ## name (uint8_t *dst, uint8_t *src, int stride);\
152 void ff_avg_ ## name (uint8_t *dst, uint8_t *src, int stride);
167 #define CALL_2X_PIXELS(a, b, n)\
168 static void a(uint8_t *block, const uint8_t *pixels, int line_size, int h){\
169 b(block , pixels , line_size, h);\
170 b(block+n, pixels+n, line_size, h);\
189 int idct_permutation_type);
217 int dxx,
int dxy,
int dyx,
int dyy,
int shift,
int r,
int width,
int height);
391 const float *src1,
int len);
427 #define FF_NO_IDCT_PERM 1
428 #define FF_LIBMPEG2_IDCT_PERM 2
429 #define FF_SIMPLE_IDCT_PERM 3
430 #define FF_TRANSPOSE_IDCT_PERM 4
431 #define FF_PARTTRANS_IDCT_PERM 5
432 #define FF_SSE2_IDCT_PERM 6
436 #define BASIS_SHIFT 16
437 #define RECON_SHIFT 6
440 #define EDGE_WIDTH 16
442 #define EDGE_BOTTOM 2
471 const int16_t *window,
unsigned int len);
516 #define BYTE_VEC32(c) ((c)*0x01010101UL)
517 #define BYTE_VEC64(c) ((c)*0x0001000100010001UL)
521 return (a | b) - (((a ^
b) & ~
BYTE_VEC32(0x01)) >> 1);
526 return (a & b) + (((a ^
b) & ~
BYTE_VEC32(0x01)) >> 1);
531 return (a | b) - (((a ^
b) & ~
BYTE_VEC64(0x01)) >> 1);
536 return (a & b) + (((a ^
b) & ~
BYTE_VEC64(0x01)) >> 1);
545 return (3*lambda)>>(FF_LAMBDA_SHIFT+1);
547 return (4*lambda)>>(FF_LAMBDA_SHIFT);
549 return (2*lambda)>>(FF_LAMBDA_SHIFT);
552 return (2*lambda)>>FF_LAMBDA_SHIFT;
557 return lambda2>>FF_LAMBDA_SHIFT;
573 #if (ARCH_ARM && HAVE_NEON) || ARCH_PPC || HAVE_MMX
574 # define STRIDE_ALIGN 16
576 # define STRIDE_ALIGN 8
583 #define LOCAL_ALIGNED_A(a, t, v, s, o, ...) \
584 uint8_t la_##v[sizeof(t s o) + (a)]; \
585 t (*v) o = (void *)FFALIGN((uintptr_t)la_##v, a)
587 #define LOCAL_ALIGNED_D(a, t, v, s, o, ...) \
588 DECLARE_ALIGNED(a, t, la_##v) s o; \
591 #define LOCAL_ALIGNED(a, t, v, ...) E(LOCAL_ALIGNED_A(a, t, v, __VA_ARGS__,,))
593 #if HAVE_LOCAL_ALIGNED_8
594 # define LOCAL_ALIGNED_8(t, v, ...) E(LOCAL_ALIGNED_D(8, t, v, __VA_ARGS__,,))
596 # define LOCAL_ALIGNED_8(t, v, ...) LOCAL_ALIGNED(8, t, v, __VA_ARGS__)
599 #if HAVE_LOCAL_ALIGNED_16
600 # define LOCAL_ALIGNED_16(t, v, ...) E(LOCAL_ALIGNED_D(16, t, v, __VA_ARGS__,,))
602 # define LOCAL_ALIGNED_16(t, v, ...) LOCAL_ALIGNED(16, t, v, __VA_ARGS__)
605 #define WRAPPER8_16_SQ(name8, name16)\
606 static int name16(void *s, uint8_t *dst, uint8_t *src, int stride, int h){\
608 score +=name8(s, dst , src , stride, 8);\
609 score +=name8(s, dst+8 , src+8 , stride, 8);\
613 score +=name8(s, dst , src , stride, 8);\
614 score +=name8(s, dst+8 , src+8 , stride, 8);\