comparison src/ffmpeg/libavcodec/dsputil.h @ 817:b3b7a4e480b2 trunk

[svn] - make this compile again
author nenolod
date Mon, 12 Mar 2007 13:14:40 -0700
parents 23a5aa2c545c
children
comparison
equal deleted inserted replaced
816:87b58fcb96c8 817:b3b7a4e480b2
1 /* 1 /*
2 * DSP utils 2 * DSP utils
3 * Copyright (c) 2000, 2001, 2002 Fabrice Bellard. 3 * Copyright (c) 2000, 2001, 2002 Fabrice Bellard.
4 * Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at> 4 * Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
5 * 5 *
6 * This library is free software; you can redistribute it and/or 6 * This file is part of FFmpeg.
7 *
8 * FFmpeg is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public 9 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either 10 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version. 11 * version 2.1 of the License, or (at your option) any later version.
10 * 12 *
11 * This library is distributed in the hope that it will be useful, 13 * FFmpeg is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of 14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details. 16 * Lesser General Public License for more details.
15 * 17 *
16 * You should have received a copy of the GNU Lesser General Public 18 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software 19 * License along with FFmpeg; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA 20 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19 */ 21 */
20 22
21 /** 23 /**
22 * @file dsputil.h 24 * @file dsputil.h
23 * DSP utils. 25 * DSP utils.
33 35
34 36
35 //#define DEBUG 37 //#define DEBUG
36 /* dct code */ 38 /* dct code */
37 typedef short DCTELEM; 39 typedef short DCTELEM;
40 typedef int DWTELEM;
38 41
39 void fdct_ifast (DCTELEM *data); 42 void fdct_ifast (DCTELEM *data);
40 void fdct_ifast248 (DCTELEM *data); 43 void fdct_ifast248 (DCTELEM *data);
41 void ff_jpeg_fdct_islow (DCTELEM *data); 44 void ff_jpeg_fdct_islow (DCTELEM *data);
42 void ff_fdct248_islow (DCTELEM *data); 45 void ff_fdct248_islow (DCTELEM *data);
43 46
44 void j_rev_dct (DCTELEM *data); 47 void j_rev_dct (DCTELEM *data);
48 void j_rev_dct4 (DCTELEM *data);
49 void j_rev_dct2 (DCTELEM *data);
50 void j_rev_dct1 (DCTELEM *data);
45 51
46 void ff_fdct_mmx(DCTELEM *block); 52 void ff_fdct_mmx(DCTELEM *block);
47 void ff_fdct_mmx2(DCTELEM *block); 53 void ff_fdct_mmx2(DCTELEM *block);
48 void ff_fdct_sse2(DCTELEM *block); 54 void ff_fdct_sse2(DCTELEM *block);
55
56 void ff_h264_idct8_add_c(uint8_t *dst, DCTELEM *block, int stride);
57 void ff_h264_idct_add_c(uint8_t *dst, DCTELEM *block, int stride);
58 void ff_h264_idct8_dc_add_c(uint8_t *dst, DCTELEM *block, int stride);
59 void ff_h264_idct_dc_add_c(uint8_t *dst, DCTELEM *block, int stride);
60 void ff_h264_lowres_idct_add_c(uint8_t *dst, int stride, DCTELEM *block);
61 void ff_h264_lowres_idct_put_c(uint8_t *dst, int stride, DCTELEM *block);
62
63 void ff_vector_fmul_add_add_c(float *dst, const float *src0, const float *src1,
64 const float *src2, int src3, int blocksize, int step);
65 void ff_float_to_int16_c(int16_t *dst, const float *src, int len);
49 66
50 /* encoding scans */ 67 /* encoding scans */
51 extern const uint8_t ff_alternate_horizontal_scan[64]; 68 extern const uint8_t ff_alternate_horizontal_scan[64];
52 extern const uint8_t ff_alternate_vertical_scan[64]; 69 extern const uint8_t ff_alternate_vertical_scan[64];
53 extern const uint8_t ff_zigzag_direct[64]; 70 extern const uint8_t ff_zigzag_direct[64];
54 extern const uint8_t ff_zigzag248_direct[64]; 71 extern const uint8_t ff_zigzag248_direct[64];
55 72
56 /* pixel operations */ 73 /* pixel operations */
57 #define MAX_NEG_CROP 384 74 #define MAX_NEG_CROP 1024
58 75
59 /* temporary */ 76 /* temporary */
60 extern uint32_t squareTbl[512]; 77 extern uint32_t squareTbl[512];
61 extern uint8_t cropTbl[256 + 2 * MAX_NEG_CROP]; 78 extern uint8_t cropTbl[256 + 2 * MAX_NEG_CROP];
62 79
80 /* VP3 DSP functions */
81 void ff_vp3_idct_c(DCTELEM *block/* align 16*/);
82 void ff_vp3_idct_put_c(uint8_t *dest/*align 8*/, int line_size, DCTELEM *block/*align 16*/);
83 void ff_vp3_idct_add_c(uint8_t *dest/*align 8*/, int line_size, DCTELEM *block/*align 16*/);
84
85 /* 1/2^n downscaling functions from imgconvert.c */
86 void ff_img_copy_plane(uint8_t *dst, int dst_wrap, const uint8_t *src, int src_wrap, int width, int height);
87 void ff_shrink22(uint8_t *dst, int dst_wrap, const uint8_t *src, int src_wrap, int width, int height);
88 void ff_shrink44(uint8_t *dst, int dst_wrap, const uint8_t *src, int src_wrap, int width, int height);
89 void ff_shrink88(uint8_t *dst, int dst_wrap, const uint8_t *src, int src_wrap, int width, int height);
90
91 void ff_gmc_c(uint8_t *dst, uint8_t *src, int stride, int h, int ox, int oy,
92 int dxx, int dxy, int dyx, int dyy, int shift, int r, int width, int height);
63 93
64 /* minimum alignment rules ;) 94 /* minimum alignment rules ;)
65 if u notice errors in the align stuff, need more alignment for some asm code for some cpu 95 if u notice errors in the align stuff, need more alignment for some asm code for some cpu
66 or need to use a function with less aligned data then send a mail to the ffmpeg-dev list, ... 96 or need to use a function with less aligned data then send a mail to the ffmpeg-dev list, ...
67 97
84 //h for op_pixels_func is limited to {width/2, width} but never larger than 16 and never smaller then 4 114 //h for op_pixels_func is limited to {width/2, width} but never larger than 16 and never smaller then 4
85 typedef void (*op_pixels_func)(uint8_t *block/*align width (8 or 16)*/, const uint8_t *pixels/*align 1*/, int line_size, int h); 115 typedef void (*op_pixels_func)(uint8_t *block/*align width (8 or 16)*/, const uint8_t *pixels/*align 1*/, int line_size, int h);
86 typedef void (*tpel_mc_func)(uint8_t *block/*align width (8 or 16)*/, const uint8_t *pixels/*align 1*/, int line_size, int w, int h); 116 typedef void (*tpel_mc_func)(uint8_t *block/*align width (8 or 16)*/, const uint8_t *pixels/*align 1*/, int line_size, int w, int h);
87 typedef void (*qpel_mc_func)(uint8_t *dst/*align width (8 or 16)*/, uint8_t *src/*align 1*/, int stride); 117 typedef void (*qpel_mc_func)(uint8_t *dst/*align width (8 or 16)*/, uint8_t *src/*align 1*/, int stride);
88 typedef void (*h264_chroma_mc_func)(uint8_t *dst/*align 8*/, uint8_t *src/*align 1*/, int srcStride, int h, int x, int y); 118 typedef void (*h264_chroma_mc_func)(uint8_t *dst/*align 8*/, uint8_t *src/*align 1*/, int srcStride, int h, int x, int y);
119 typedef void (*h264_weight_func)(uint8_t *block, int stride, int log2_denom, int weight, int offset);
120 typedef void (*h264_biweight_func)(uint8_t *dst, uint8_t *src, int stride, int log2_denom, int weightd, int weights, int offset);
89 121
90 #define DEF_OLD_QPEL(name)\ 122 #define DEF_OLD_QPEL(name)\
91 void ff_put_ ## name (uint8_t *dst/*align width (8 or 16)*/, uint8_t *src/*align 1*/, int stride);\ 123 void ff_put_ ## name (uint8_t *dst/*align width (8 or 16)*/, uint8_t *src/*align 1*/, int stride);\
92 void ff_put_no_rnd_ ## name (uint8_t *dst/*align width (8 or 16)*/, uint8_t *src/*align 1*/, int stride);\ 124 void ff_put_no_rnd_ ## name (uint8_t *dst/*align width (8 or 16)*/, uint8_t *src/*align 1*/, int stride);\
93 void ff_avg_ ## name (uint8_t *dst/*align width (8 or 16)*/, uint8_t *src/*align 1*/, int stride); 125 void ff_avg_ ## name (uint8_t *dst/*align width (8 or 16)*/, uint8_t *src/*align 1*/, int stride);
115 // h is limited to {width/2, width, 2*width} but never larger than 16 and never smaller then 2 147 // h is limited to {width/2, width, 2*width} but never larger than 16 and never smaller then 2
116 // allthough currently h<4 is not used as functions with width <8 are not used and neither implemented 148 // allthough currently h<4 is not used as functions with width <8 are not used and neither implemented
117 typedef int (*me_cmp_func)(void /*MpegEncContext*/ *s, uint8_t *blk1/*align width (8 or 16)*/, uint8_t *blk2/*align 1*/, int line_size, int h)/* __attribute__ ((const))*/; 149 typedef int (*me_cmp_func)(void /*MpegEncContext*/ *s, uint8_t *blk1/*align width (8 or 16)*/, uint8_t *blk2/*align 1*/, int line_size, int h)/* __attribute__ ((const))*/;
118 150
119 151
152 // for snow slices
153 typedef struct slice_buffer_s slice_buffer;
154
120 /** 155 /**
121 * DSPContext. 156 * DSPContext.
122 */ 157 */
123 typedef struct DSPContext { 158 typedef struct DSPContext {
124 /* pixel ops : interface with DCT */ 159 /* pixel ops : interface with DCT */
125 void (*get_pixels)(DCTELEM *block/*align 16*/, const uint8_t *pixels/*align 8*/, int line_size); 160 void (*get_pixels)(DCTELEM *block/*align 16*/, const uint8_t *pixels/*align 8*/, int line_size);
126 void (*diff_pixels)(DCTELEM *block/*align 16*/, const uint8_t *s1/*align 8*/, const uint8_t *s2/*align 8*/, int stride); 161 void (*diff_pixels)(DCTELEM *block/*align 16*/, const uint8_t *s1/*align 8*/, const uint8_t *s2/*align 8*/, int stride);
127 void (*put_pixels_clamped)(const DCTELEM *block/*align 16*/, uint8_t *pixels/*align 8*/, int line_size); 162 void (*put_pixels_clamped)(const DCTELEM *block/*align 16*/, uint8_t *pixels/*align 8*/, int line_size);
163 void (*put_signed_pixels_clamped)(const DCTELEM *block/*align 16*/, uint8_t *pixels/*align 8*/, int line_size);
128 void (*add_pixels_clamped)(const DCTELEM *block/*align 16*/, uint8_t *pixels/*align 8*/, int line_size); 164 void (*add_pixels_clamped)(const DCTELEM *block/*align 16*/, uint8_t *pixels/*align 8*/, int line_size);
165 void (*add_pixels8)(uint8_t *pixels, DCTELEM *block, int line_size);
166 void (*add_pixels4)(uint8_t *pixels, DCTELEM *block, int line_size);
129 /** 167 /**
130 * translational global motion compensation. 168 * translational global motion compensation.
131 */ 169 */
132 void (*gmc1)(uint8_t *dst/*align 8*/, uint8_t *src/*align 1*/, int srcStride, int h, int x16, int y16, int rounder); 170 void (*gmc1)(uint8_t *dst/*align 8*/, uint8_t *src/*align 1*/, int srcStride, int h, int x16, int y16, int rounder);
133 /** 171 /**
134 * global motion compensation. 172 * global motion compensation.
135 */ 173 */
136 void (*gmc )(uint8_t *dst/*align 8*/, uint8_t *src/*align 1*/, int stride, int h, int ox, int oy, 174 void (*gmc )(uint8_t *dst/*align 8*/, uint8_t *src/*align 1*/, int stride, int h, int ox, int oy,
137 int dxx, int dxy, int dyx, int dyy, int shift, int r, int width, int height); 175 int dxx, int dxy, int dyx, int dyy, int shift, int r, int width, int height);
138 void (*clear_blocks)(DCTELEM *blocks/*align 16*/); 176 void (*clear_blocks)(DCTELEM *blocks/*align 16*/);
139 int (*pix_sum)(uint8_t * pix, int line_size); 177 int (*pix_sum)(uint8_t * pix, int line_size);
140 int (*pix_norm1)(uint8_t * pix, int line_size); 178 int (*pix_norm1)(uint8_t * pix, int line_size);
141 // 16x16 8x8 4x4 2x2 16x8 8x4 4x2 8x16 4x8 2x4 179 // 16x16 8x8 4x4 2x2 16x8 8x4 4x2 8x16 4x8 2x4
142 180
143 me_cmp_func sad[5]; /* identical to pix_absAxA except additional void * */ 181 me_cmp_func sad[5]; /* identical to pix_absAxA except additional void * */
144 me_cmp_func sse[5]; 182 me_cmp_func sse[5];
145 me_cmp_func hadamard8_diff[5]; 183 me_cmp_func hadamard8_diff[5];
146 me_cmp_func dct_sad[5]; 184 me_cmp_func dct_sad[5];
147 me_cmp_func quant_psnr[5]; 185 me_cmp_func quant_psnr[5];
148 me_cmp_func bit[5]; 186 me_cmp_func bit[5];
149 me_cmp_func rd[5]; 187 me_cmp_func rd[5];
150 me_cmp_func vsad[5]; 188 me_cmp_func vsad[5];
151 me_cmp_func vsse[5]; 189 me_cmp_func vsse[5];
190 me_cmp_func nsse[5];
191 me_cmp_func w53[5];
192 me_cmp_func w97[5];
193 me_cmp_func dct_max[5];
194 me_cmp_func dct264_sad[5];
152 195
153 me_cmp_func me_pre_cmp[5]; 196 me_cmp_func me_pre_cmp[5];
154 me_cmp_func me_cmp[5]; 197 me_cmp_func me_cmp[5];
155 me_cmp_func me_sub_cmp[5]; 198 me_cmp_func me_sub_cmp[5];
156 me_cmp_func mb_cmp[5]; 199 me_cmp_func mb_cmp[5];
157 me_cmp_func ildct_cmp[5]; //only width 16 used 200 me_cmp_func ildct_cmp[5]; //only width 16 used
201 me_cmp_func frame_skip_cmp[5]; //only width 8 used
158 202
159 /** 203 /**
160 * Halfpel motion compensation with rounding (a+b+1)>>1. 204 * Halfpel motion compensation with rounding (a+b+1)>>1.
161 * this is an array[4][4] of motion compensation funcions for 4 205 * this is an array[4][4] of motion compensation funcions for 4
162 * horizontal blocksizes (8,16) and the 4 halfpel positions<br> 206 * horizontal blocksizes (8,16) and the 4 halfpel positions<br>
163 * *pixels_tab[ 0->16xH 1->8xH ][ xhalfpel + 2*yhalfpel ] 207 * *pixels_tab[ 0->16xH 1->8xH ][ xhalfpel + 2*yhalfpel ]
164 * @param block destination where the result is stored 208 * @param block destination where the result is stored
165 * @param pixels source 209 * @param pixels source
166 * @param line_size number of bytes in a horizontal line of block 210 * @param line_size number of bytes in a horizontal line of block
168 */ 212 */
169 op_pixels_func put_pixels_tab[4][4]; 213 op_pixels_func put_pixels_tab[4][4];
170 214
171 /** 215 /**
172 * Halfpel motion compensation with rounding (a+b+1)>>1. 216 * Halfpel motion compensation with rounding (a+b+1)>>1.
173 * This is an array[4][4] of motion compensation functions for 4 217 * This is an array[4][4] of motion compensation functions for 4
174 * horizontal blocksizes (8,16) and the 4 halfpel positions<br> 218 * horizontal blocksizes (8,16) and the 4 halfpel positions<br>
175 * *pixels_tab[ 0->16xH 1->8xH ][ xhalfpel + 2*yhalfpel ] 219 * *pixels_tab[ 0->16xH 1->8xH ][ xhalfpel + 2*yhalfpel ]
176 * @param block destination into which the result is averaged (a+b+1)>>1 220 * @param block destination into which the result is averaged (a+b+1)>>1
177 * @param pixels source 221 * @param pixels source
178 * @param line_size number of bytes in a horizontal line of block 222 * @param line_size number of bytes in a horizontal line of block
180 */ 224 */
181 op_pixels_func avg_pixels_tab[4][4]; 225 op_pixels_func avg_pixels_tab[4][4];
182 226
183 /** 227 /**
184 * Halfpel motion compensation with no rounding (a+b)>>1. 228 * Halfpel motion compensation with no rounding (a+b)>>1.
185 * this is an array[2][4] of motion compensation funcions for 2 229 * this is an array[2][4] of motion compensation funcions for 2
186 * horizontal blocksizes (8,16) and the 4 halfpel positions<br> 230 * horizontal blocksizes (8,16) and the 4 halfpel positions<br>
187 * *pixels_tab[ 0->16xH 1->8xH ][ xhalfpel + 2*yhalfpel ] 231 * *pixels_tab[ 0->16xH 1->8xH ][ xhalfpel + 2*yhalfpel ]
188 * @param block destination where the result is stored 232 * @param block destination where the result is stored
189 * @param pixels source 233 * @param pixels source
190 * @param line_size number of bytes in a horizontal line of block 234 * @param line_size number of bytes in a horizontal line of block
191 * @param h height 235 * @param h height
192 */ 236 */
193 op_pixels_func put_no_rnd_pixels_tab[2][4]; 237 op_pixels_func put_no_rnd_pixels_tab[4][4];
194 238
195 /** 239 /**
196 * Halfpel motion compensation with no rounding (a+b)>>1. 240 * Halfpel motion compensation with no rounding (a+b)>>1.
197 * this is an array[2][4] of motion compensation funcions for 2 241 * this is an array[2][4] of motion compensation funcions for 2
198 * horizontal blocksizes (8,16) and the 4 halfpel positions<br> 242 * horizontal blocksizes (8,16) and the 4 halfpel positions<br>
199 * *pixels_tab[ 0->16xH 1->8xH ][ xhalfpel + 2*yhalfpel ] 243 * *pixels_tab[ 0->16xH 1->8xH ][ xhalfpel + 2*yhalfpel ]
200 * @param block destination into which the result is averaged (a+b)>>1 244 * @param block destination into which the result is averaged (a+b)>>1
201 * @param pixels source 245 * @param pixels source
202 * @param line_size number of bytes in a horizontal line of block 246 * @param line_size number of bytes in a horizontal line of block
203 * @param h height 247 * @param h height
204 */ 248 */
205 op_pixels_func avg_no_rnd_pixels_tab[2][4]; 249 op_pixels_func avg_no_rnd_pixels_tab[4][4];
206 250
251 void (*put_no_rnd_pixels_l2[2])(uint8_t *block/*align width (8 or 16)*/, const uint8_t *a/*align 1*/, const uint8_t *b/*align 1*/, int line_size, int h);
252
207 /** 253 /**
208 * Thirdpel motion compensation with rounding (a+b+1)>>1. 254 * Thirdpel motion compensation with rounding (a+b+1)>>1.
209 * this is an array[12] of motion compensation funcions for the 9 thirdpel positions<br> 255 * this is an array[12] of motion compensation funcions for the 9 thirdpel positions<br>
210 * *pixels_tab[ xthirdpel + 4*ythirdpel ] 256 * *pixels_tab[ xthirdpel + 4*ythirdpel ]
211 * @param block destination where the result is stored 257 * @param block destination where the result is stored
219 qpel_mc_func put_qpel_pixels_tab[2][16]; 265 qpel_mc_func put_qpel_pixels_tab[2][16];
220 qpel_mc_func avg_qpel_pixels_tab[2][16]; 266 qpel_mc_func avg_qpel_pixels_tab[2][16];
221 qpel_mc_func put_no_rnd_qpel_pixels_tab[2][16]; 267 qpel_mc_func put_no_rnd_qpel_pixels_tab[2][16];
222 qpel_mc_func avg_no_rnd_qpel_pixels_tab[2][16]; 268 qpel_mc_func avg_no_rnd_qpel_pixels_tab[2][16];
223 qpel_mc_func put_mspel_pixels_tab[8]; 269 qpel_mc_func put_mspel_pixels_tab[8];
224 270
225 /** 271 /**
226 * h264 Chram MC 272 * h264 Chram MC
227 */ 273 */
228 h264_chroma_mc_func put_h264_chroma_pixels_tab[3]; 274 h264_chroma_mc_func put_h264_chroma_pixels_tab[3];
275 /* This is really one func used in VC-1 decoding */
276 h264_chroma_mc_func put_no_rnd_h264_chroma_pixels_tab[3];
229 h264_chroma_mc_func avg_h264_chroma_pixels_tab[3]; 277 h264_chroma_mc_func avg_h264_chroma_pixels_tab[3];
230 278
231 qpel_mc_func put_h264_qpel_pixels_tab[3][16]; 279 qpel_mc_func put_h264_qpel_pixels_tab[4][16];
232 qpel_mc_func avg_h264_qpel_pixels_tab[3][16]; 280 qpel_mc_func avg_h264_qpel_pixels_tab[4][16];
233 281
282 qpel_mc_func put_2tap_qpel_pixels_tab[4][16];
283 qpel_mc_func avg_2tap_qpel_pixels_tab[4][16];
284
285 h264_weight_func weight_h264_pixels_tab[10];
286 h264_biweight_func biweight_h264_pixels_tab[10];
287
288 /* AVS specific */
289 qpel_mc_func put_cavs_qpel_pixels_tab[2][16];
290 qpel_mc_func avg_cavs_qpel_pixels_tab[2][16];
291 void (*cavs_filter_lv)(uint8_t *pix, int stride, int alpha, int beta, int tc, int bs1, int bs2);
292 void (*cavs_filter_lh)(uint8_t *pix, int stride, int alpha, int beta, int tc, int bs1, int bs2);
293 void (*cavs_filter_cv)(uint8_t *pix, int stride, int alpha, int beta, int tc, int bs1, int bs2);
294 void (*cavs_filter_ch)(uint8_t *pix, int stride, int alpha, int beta, int tc, int bs1, int bs2);
295 void (*cavs_idct8_add)(uint8_t *dst, DCTELEM *block, int stride);
296
234 me_cmp_func pix_abs[2][4]; 297 me_cmp_func pix_abs[2][4];
235 298
236 /* huffyuv specific */ 299 /* huffyuv specific */
237 void (*add_bytes)(uint8_t *dst/*align 16*/, uint8_t *src/*align 16*/, int w); 300 void (*add_bytes)(uint8_t *dst/*align 16*/, uint8_t *src/*align 16*/, int w);
238 void (*diff_bytes)(uint8_t *dst/*align 16*/, uint8_t *src1/*align 16*/, uint8_t *src2/*align 1*/,int w); 301 void (*diff_bytes)(uint8_t *dst/*align 16*/, uint8_t *src1/*align 16*/, uint8_t *src2/*align 1*/,int w);
239 /** 302 /**
240 * subtract huffyuv's variant of median prediction 303 * subtract huffyuv's variant of median prediction
241 * note, this might read from src1[-1], src2[-1] 304 * note, this might read from src1[-1], src2[-1]
242 */ 305 */
243 void (*sub_hfyu_median_prediction)(uint8_t *dst, uint8_t *src1, uint8_t *src2, int w, int *left, int *left_top); 306 void (*sub_hfyu_median_prediction)(uint8_t *dst, uint8_t *src1, uint8_t *src2, int w, int *left, int *left_top);
244 void (*bswap_buf)(uint32_t *dst, uint32_t *src, int w); 307 void (*bswap_buf)(uint32_t *dst, uint32_t *src, int w);
245 308
309 void (*h264_v_loop_filter_luma)(uint8_t *pix, int stride, int alpha, int beta, int8_t *tc0);
310 void (*h264_h_loop_filter_luma)(uint8_t *pix, int stride, int alpha, int beta, int8_t *tc0);
311 void (*h264_v_loop_filter_chroma)(uint8_t *pix, int stride, int alpha, int beta, int8_t *tc0);
312 void (*h264_h_loop_filter_chroma)(uint8_t *pix, int stride, int alpha, int beta, int8_t *tc0);
313 void (*h264_v_loop_filter_chroma_intra)(uint8_t *pix, int stride, int alpha, int beta);
314 void (*h264_h_loop_filter_chroma_intra)(uint8_t *pix, int stride, int alpha, int beta);
315 // h264_loop_filter_strength: simd only. the C version is inlined in h264.c
316 void (*h264_loop_filter_strength)(int16_t bS[2][4][4], uint8_t nnz[40], int8_t ref[2][40], int16_t mv[2][40][2],
317 int bidir, int edges, int step, int mask_mv0, int mask_mv1);
318
246 void (*h263_v_loop_filter)(uint8_t *src, int stride, int qscale); 319 void (*h263_v_loop_filter)(uint8_t *src, int stride, int qscale);
247 void (*h263_h_loop_filter)(uint8_t *src, int stride, int qscale); 320 void (*h263_h_loop_filter)(uint8_t *src, int stride, int qscale);
321
322 void (*h261_loop_filter)(uint8_t *src, int stride);
323
324 /* assume len is a multiple of 4, and arrays are 16-byte aligned */
325 void (*vorbis_inverse_coupling)(float *mag, float *ang, int blocksize);
326 /* assume len is a multiple of 8, and arrays are 16-byte aligned */
327 void (*vector_fmul)(float *dst, const float *src, int len);
328 void (*vector_fmul_reverse)(float *dst, const float *src0, const float *src1, int len);
329 /* assume len is a multiple of 8, and src arrays are 16-byte aligned */
330 void (*vector_fmul_add_add)(float *dst, const float *src0, const float *src1, const float *src2, int src3, int len, int step);
331
332 /* C version: convert floats from the range [384.0,386.0] to ints in [-32768,32767]
333 * simd versions: convert floats from [-32768.0,32767.0] without rescaling and arrays are 16byte aligned */
334 void (*float_to_int16)(int16_t *dst, const float *src, int len);
248 335
249 /* (I)DCT */ 336 /* (I)DCT */
250 void (*fdct)(DCTELEM *block/* align 16*/); 337 void (*fdct)(DCTELEM *block/* align 16*/);
251 void (*fdct248)(DCTELEM *block/* align 16*/); 338 void (*fdct248)(DCTELEM *block/* align 16*/);
252 339
253 /* IDCT really*/ 340 /* IDCT really*/
254 void (*idct)(DCTELEM *block/* align 16*/); 341 void (*idct)(DCTELEM *block/* align 16*/);
255 342
256 /** 343 /**
257 * block -> idct -> clip to unsigned 8 bit -> dest. 344 * block -> idct -> clip to unsigned 8 bit -> dest.
258 * (-1392, 0, 0, ...) -> idct -> (-174, -174, ...) -> put -> (0, 0, ...) 345 * (-1392, 0, 0, ...) -> idct -> (-174, -174, ...) -> put -> (0, 0, ...)
259 * @param line_size size in bytes of a horizotal line of dest 346 * @param line_size size in bytes of a horizotal line of dest
260 */ 347 */
261 void (*idct_put)(uint8_t *dest/*align 8*/, int line_size, DCTELEM *block/*align 16*/); 348 void (*idct_put)(uint8_t *dest/*align 8*/, int line_size, DCTELEM *block/*align 16*/);
262 349
263 /** 350 /**
264 * block -> idct -> add dest -> clip to unsigned 8 bit -> dest. 351 * block -> idct -> add dest -> clip to unsigned 8 bit -> dest.
265 * @param line_size size in bytes of a horizotal line of dest 352 * @param line_size size in bytes of a horizotal line of dest
266 */ 353 */
267 void (*idct_add)(uint8_t *dest/*align 8*/, int line_size, DCTELEM *block/*align 16*/); 354 void (*idct_add)(uint8_t *dest/*align 8*/, int line_size, DCTELEM *block/*align 16*/);
268 355
269 /** 356 /**
270 * idct input permutation. 357 * idct input permutation.
271 * several optimized IDCTs need a permutated input (relative to the normal order of the reference 358 * several optimized IDCTs need a permutated input (relative to the normal order of the reference
272 * IDCT) 359 * IDCT)
273 * this permutation must be performed before the idct_put/add, note, normally this can be merged 360 * this permutation must be performed before the idct_put/add, note, normally this can be merged
282 int idct_permutation_type; 369 int idct_permutation_type;
283 #define FF_NO_IDCT_PERM 1 370 #define FF_NO_IDCT_PERM 1
284 #define FF_LIBMPEG2_IDCT_PERM 2 371 #define FF_LIBMPEG2_IDCT_PERM 2
285 #define FF_SIMPLE_IDCT_PERM 3 372 #define FF_SIMPLE_IDCT_PERM 3
286 #define FF_TRANSPOSE_IDCT_PERM 4 373 #define FF_TRANSPOSE_IDCT_PERM 4
374 #define FF_PARTTRANS_IDCT_PERM 5
287 375
288 int (*try_8x8basis)(int16_t rem[64], int16_t weight[64], int16_t basis[64], int scale); 376 int (*try_8x8basis)(int16_t rem[64], int16_t weight[64], int16_t basis[64], int scale);
289 void (*add_8x8basis)(int16_t rem[64], int16_t basis[64], int scale); 377 void (*add_8x8basis)(int16_t rem[64], int16_t basis[64], int scale);
290 #define BASIS_SHIFT 16 378 #define BASIS_SHIFT 16
291 #define RECON_SHIFT 6 379 #define RECON_SHIFT 6
292 380
381 void (*h264_idct_add)(uint8_t *dst, DCTELEM *block, int stride);
382 void (*h264_idct8_add)(uint8_t *dst, DCTELEM *block, int stride);
383 void (*h264_idct_dc_add)(uint8_t *dst, DCTELEM *block, int stride);
384 void (*h264_idct8_dc_add)(uint8_t *dst, DCTELEM *block, int stride);
385
386 /* snow wavelet */
387 void (*vertical_compose97i)(DWTELEM *b0, DWTELEM *b1, DWTELEM *b2, DWTELEM *b3, DWTELEM *b4, DWTELEM *b5, int width);
388 void (*horizontal_compose97i)(DWTELEM *b, int width);
389 void (*inner_add_yblock)(uint8_t *obmc, const int obmc_stride, uint8_t * * block, int b_w, int b_h, int src_x, int src_y, int src_stride, slice_buffer * sb, int add, uint8_t * dst8);
390
391 void (*prefetch)(void *mem, int stride, int h);
392
393 void (*shrink[4])(uint8_t *dst, int dst_wrap, const uint8_t *src, int src_wrap, int width, int height);
394
395 /* vc1 functions */
396 void (*vc1_inv_trans_8x8)(DCTELEM *b);
397 void (*vc1_inv_trans_8x4)(DCTELEM *b, int n);
398 void (*vc1_inv_trans_4x8)(DCTELEM *b, int n);
399 void (*vc1_inv_trans_4x4)(DCTELEM *b, int n);
400 void (*vc1_v_overlap)(uint8_t* src, int stride, int rnd);
401 void (*vc1_h_overlap)(uint8_t* src, int stride, int rnd);
402 /* put 8x8 block with bicubic interpolation and quarterpel precision
403 * last argument is actually round value instead of height
404 */
405 op_pixels_func put_vc1_mspel_pixels_tab[16];
293 } DSPContext; 406 } DSPContext;
294 407
295 void dsputil_static_init(void); 408 void dsputil_static_init(void);
296 void dsputil_init(DSPContext* p, AVCodecContext *avctx); 409 void dsputil_init(DSPContext* p, AVCodecContext *avctx);
297 410
301 */ 414 */
302 void ff_block_permute(DCTELEM *block, uint8_t *permutation, const uint8_t *scantable, int last); 415 void ff_block_permute(DCTELEM *block, uint8_t *permutation, const uint8_t *scantable, int last);
303 416
304 void ff_set_cmp(DSPContext* c, me_cmp_func *cmp, int type); 417 void ff_set_cmp(DSPContext* c, me_cmp_func *cmp, int type);
305 418
306 #define BYTE_VEC32(c) ((c)*0x01010101UL) 419 #define BYTE_VEC32(c) ((c)*0x01010101UL)
307 420
308 static inline uint32_t rnd_avg32(uint32_t a, uint32_t b) 421 static inline uint32_t rnd_avg32(uint32_t a, uint32_t b)
309 { 422 {
310 return (a | b) - (((a ^ b) & ~BYTE_VEC32(0x01)) >> 1); 423 return (a | b) - (((a ^ b) & ~BYTE_VEC32(0x01)) >> 1);
311 } 424 }
312 425
313 static inline uint32_t no_rnd_avg32(uint32_t a, uint32_t b) 426 static inline uint32_t no_rnd_avg32(uint32_t a, uint32_t b)
314 { 427 {
315 return (a & b) + (((a ^ b) & ~BYTE_VEC32(0x01)) >> 1); 428 return (a & b) + (((a ^ b) & ~BYTE_VEC32(0x01)) >> 1);
429 }
430
431 static inline int get_penalty_factor(int lambda, int lambda2, int type){
432 switch(type&0xFF){
433 default:
434 case FF_CMP_SAD:
435 return lambda>>FF_LAMBDA_SHIFT;
436 case FF_CMP_DCT:
437 return (3*lambda)>>(FF_LAMBDA_SHIFT+1);
438 case FF_CMP_W53:
439 return (4*lambda)>>(FF_LAMBDA_SHIFT);
440 case FF_CMP_W97:
441 return (2*lambda)>>(FF_LAMBDA_SHIFT);
442 case FF_CMP_SATD:
443 case FF_CMP_DCT264:
444 return (2*lambda)>>FF_LAMBDA_SHIFT;
445 case FF_CMP_RD:
446 case FF_CMP_PSNR:
447 case FF_CMP_SSE:
448 case FF_CMP_NSSE:
449 return lambda2>>FF_LAMBDA_SHIFT;
450 case FF_CMP_BIT:
451 return 1;
452 }
316 } 453 }
317 454
318 /** 455 /**
319 * Empty mmx state. 456 * Empty mmx state.
320 * this must be called between any dsp function and float/double code. 457 * this must be called between any dsp function and float/double code.
324 461
325 /* should be defined by architectures supporting 462 /* should be defined by architectures supporting
326 one or more MultiMedia extension */ 463 one or more MultiMedia extension */
327 int mm_support(void); 464 int mm_support(void);
328 465
466 #ifdef __GNUC__
467 #define DECLARE_ALIGNED_16(t,v) t v __attribute__ ((aligned (16)))
468 #else
469 #define DECLARE_ALIGNED_16(t,v) __declspec(align(16)) t v
470 #endif
471
329 #if defined(HAVE_MMX) 472 #if defined(HAVE_MMX)
330 473
331 #undef emms_c 474 #undef emms_c
332 475
333 #define MM_MMX 0x0001 /* standard MMX */ 476 #define MM_MMX 0x0001 /* standard MMX */
334 #define MM_3DNOW 0x0004 /* AMD 3DNOW */ 477 #define MM_3DNOW 0x0004 /* AMD 3DNOW */
335 #define MM_MMXEXT 0x0002 /* SSE integer functions or AMD MMX ext */ 478 #define MM_MMXEXT 0x0002 /* SSE integer functions or AMD MMX ext */
336 #define MM_SSE 0x0008 /* SSE functions */ 479 #define MM_SSE 0x0008 /* SSE functions */
337 #define MM_SSE2 0x0010 /* PIV SSE2 functions */ 480 #define MM_SSE2 0x0010 /* PIV SSE2 functions */
481 #define MM_3DNOWEXT 0x0020 /* AMD 3DNowExt */
482 #define MM_SSE3 0x0040 /* Prescott SSE3 functions */
338 483
339 extern int mm_flags; 484 extern int mm_flags;
340 485
341 void add_pixels_clamped_mmx(const DCTELEM *block, uint8_t *pixels, int line_size); 486 void add_pixels_clamped_mmx(const DCTELEM *block, uint8_t *pixels, int line_size);
342 void put_pixels_clamped_mmx(const DCTELEM *block, uint8_t *pixels, int line_size); 487 void put_pixels_clamped_mmx(const DCTELEM *block, uint8_t *pixels, int line_size);
488 void put_signed_pixels_clamped_mmx(const DCTELEM *block, uint8_t *pixels, int line_size);
343 489
344 static inline void emms(void) 490 static inline void emms(void)
345 { 491 {
346 __asm __volatile ("emms;":::"memory"); 492 __asm __volatile ("emms;":::"memory");
347 } 493 }
351 {\ 497 {\
352 if (mm_flags & MM_MMX)\ 498 if (mm_flags & MM_MMX)\
353 emms();\ 499 emms();\
354 } 500 }
355 501
356 #define __align8 __attribute__ ((aligned (8))) 502 #ifdef __GNUC__
503 #define DECLARE_ALIGNED_8(t,v) t v __attribute__ ((aligned (8)))
504 #else
505 #define DECLARE_ALIGNED_8(t,v) __declspec(align(8)) t v
506 #endif
507
508 #define STRIDE_ALIGN 8
357 509
358 void dsputil_init_mmx(DSPContext* c, AVCodecContext *avctx); 510 void dsputil_init_mmx(DSPContext* c, AVCodecContext *avctx);
359 void dsputil_init_pix_mmx(DSPContext* c, AVCodecContext *avctx); 511 void dsputil_init_pix_mmx(DSPContext* c, AVCodecContext *avctx);
360 512
361 #elif defined(ARCH_ARMV4L) 513 #elif defined(ARCH_ARMV4L)
362 514
363 /* This is to use 4 bytes read to the IDCT pointers for some 'zero' 515 /* This is to use 4 bytes read to the IDCT pointers for some 'zero'
364 line ptimizations */ 516 line optimizations */
365 #define __align8 __attribute__ ((aligned (4))) 517 #define DECLARE_ALIGNED_8(t,v) t v __attribute__ ((aligned (4)))
518 #define STRIDE_ALIGN 4
519
520 #define MM_IWMMXT 0x0100 /* XScale IWMMXT */
521
522 extern int mm_flags;
366 523
367 void dsputil_init_armv4l(DSPContext* c, AVCodecContext *avctx); 524 void dsputil_init_armv4l(DSPContext* c, AVCodecContext *avctx);
368 525
369 #elif defined(HAVE_MLIB) 526 #elif defined(HAVE_MLIB)
370 527
371 /* SPARC/VIS IDCT needs 8-byte aligned DCT blocks */ 528 /* SPARC/VIS IDCT needs 8-byte aligned DCT blocks */
372 #define __align8 __attribute__ ((aligned (8))) 529 #define DECLARE_ALIGNED_8(t,v) t v __attribute__ ((aligned (8)))
530 #define STRIDE_ALIGN 8
373 531
374 void dsputil_init_mlib(DSPContext* c, AVCodecContext *avctx); 532 void dsputil_init_mlib(DSPContext* c, AVCodecContext *avctx);
375 533
534 #elif defined(ARCH_SPARC)
535
536 /* SPARC/VIS IDCT needs 8-byte aligned DCT blocks */
537 #define DECLARE_ALIGNED_8(t,v) t v __attribute__ ((aligned (8)))
538 #define STRIDE_ALIGN 8
539 void dsputil_init_vis(DSPContext* c, AVCodecContext *avctx);
540
376 #elif defined(ARCH_ALPHA) 541 #elif defined(ARCH_ALPHA)
377 542
378 #define __align8 __attribute__ ((aligned (8))) 543 #define DECLARE_ALIGNED_8(t,v) t v __attribute__ ((aligned (8)))
544 #define STRIDE_ALIGN 8
379 545
380 void dsputil_init_alpha(DSPContext* c, AVCodecContext *avctx); 546 void dsputil_init_alpha(DSPContext* c, AVCodecContext *avctx);
381 547
382 #elif defined(ARCH_POWERPC) 548 #elif defined(ARCH_POWERPC)
383 549
389 #define pixel altivec_pixel 555 #define pixel altivec_pixel
390 #include <altivec.h> 556 #include <altivec.h>
391 #undef pixel 557 #undef pixel
392 #endif 558 #endif
393 559
394 #define __align8 __attribute__ ((aligned (16))) 560 #define DECLARE_ALIGNED_8(t,v) t v __attribute__ ((aligned (16)))
561 #define STRIDE_ALIGN 16
395 562
396 void dsputil_init_ppc(DSPContext* c, AVCodecContext *avctx); 563 void dsputil_init_ppc(DSPContext* c, AVCodecContext *avctx);
397 564
398 #elif defined(HAVE_MMI) 565 #elif defined(HAVE_MMI)
399 566
400 #define __align8 __attribute__ ((aligned (16))) 567 #define DECLARE_ALIGNED_8(t,v) t v __attribute__ ((aligned (16)))
568 #define STRIDE_ALIGN 16
401 569
402 void dsputil_init_mmi(DSPContext* c, AVCodecContext *avctx); 570 void dsputil_init_mmi(DSPContext* c, AVCodecContext *avctx);
403 571
404 #elif defined(ARCH_SH4) 572 #elif defined(ARCH_SH4)
405 573
406 #define __align8 __attribute__ ((aligned (8))) 574 #define DECLARE_ALIGNED_8(t,v) t v __attribute__ ((aligned (8)))
575 #define STRIDE_ALIGN 8
407 576
408 void dsputil_init_sh4(DSPContext* c, AVCodecContext *avctx); 577 void dsputil_init_sh4(DSPContext* c, AVCodecContext *avctx);
409 578
579 #elif defined(ARCH_BFIN)
580
581 #define DECLARE_ALIGNED_8(t,v) t v __attribute__ ((aligned (8)))
582 #define STRIDE_ALIGN 8
583
584 void dsputil_init_bfin(DSPContext* c, AVCodecContext *avctx);
585
410 #else 586 #else
411 587
412 #define __align8 588 #define DECLARE_ALIGNED_8(t,v) t v __attribute__ ((aligned (8)))
589 #define STRIDE_ALIGN 8
413 590
414 #endif 591 #endif
415 592
416 #ifdef __GNUC__ 593 #ifdef __GNUC__
417 594
421 598
422 #define LD16(a) (((const struct unaligned_16 *) (a))->l) 599 #define LD16(a) (((const struct unaligned_16 *) (a))->l)
423 #define LD32(a) (((const struct unaligned_32 *) (a))->l) 600 #define LD32(a) (((const struct unaligned_32 *) (a))->l)
424 #define LD64(a) (((const struct unaligned_64 *) (a))->l) 601 #define LD64(a) (((const struct unaligned_64 *) (a))->l)
425 602
603 #define ST16(a, b) (((struct unaligned_16 *) (a))->l) = (b)
426 #define ST32(a, b) (((struct unaligned_32 *) (a))->l) = (b) 604 #define ST32(a, b) (((struct unaligned_32 *) (a))->l) = (b)
427 605
428 #else /* __GNUC__ */ 606 #else /* __GNUC__ */
429 607
430 #define LD16(a) (*((uint16_t*)(a))) 608 #define LD16(a) (*((uint16_t*)(a)))
431 #define LD32(a) (*((uint32_t*)(a))) 609 #define LD32(a) (*((uint32_t*)(a)))
432 #define LD64(a) (*((uint64_t*)(a))) 610 #define LD64(a) (*((uint64_t*)(a)))
433 611
612 #define ST16(a, b) *((uint16_t*)(a)) = (b)
434 #define ST32(a, b) *((uint32_t*)(a)) = (b) 613 #define ST32(a, b) *((uint32_t*)(a)) = (b)
435 614
436 #endif /* !__GNUC__ */ 615 #endif /* !__GNUC__ */
437 616
438 /* PSNR */ 617 /* PSNR */
443 /* FFT computation */ 622 /* FFT computation */
444 623
445 /* NOTE: soon integer code will be added, so you must use the 624 /* NOTE: soon integer code will be added, so you must use the
446 FFTSample type */ 625 FFTSample type */
447 typedef float FFTSample; 626 typedef float FFTSample;
627
628 struct MDCTContext;
448 629
449 typedef struct FFTComplex { 630 typedef struct FFTComplex {
450 FFTSample re, im; 631 FFTSample re, im;
451 } FFTComplex; 632 } FFTComplex;
452 633
455 int inverse; 636 int inverse;
456 uint16_t *revtab; 637 uint16_t *revtab;
457 FFTComplex *exptab; 638 FFTComplex *exptab;
458 FFTComplex *exptab1; /* only used by SSE code */ 639 FFTComplex *exptab1; /* only used by SSE code */
459 void (*fft_calc)(struct FFTContext *s, FFTComplex *z); 640 void (*fft_calc)(struct FFTContext *s, FFTComplex *z);
641 void (*imdct_calc)(struct MDCTContext *s, FFTSample *output,
642 const FFTSample *input, FFTSample *tmp);
460 } FFTContext; 643 } FFTContext;
461 644
462 int fft_inits(FFTContext *s, int nbits, int inverse); 645 int ff_fft_init(FFTContext *s, int nbits, int inverse);
463 void fft_permute(FFTContext *s, FFTComplex *z); 646 void ff_fft_permute(FFTContext *s, FFTComplex *z);
464 void fft_calc_c(FFTContext *s, FFTComplex *z); 647 void ff_fft_calc_c(FFTContext *s, FFTComplex *z);
465 void fft_calc_sse(FFTContext *s, FFTComplex *z); 648 void ff_fft_calc_sse(FFTContext *s, FFTComplex *z);
466 void fft_calc_altivec(FFTContext *s, FFTComplex *z); 649 void ff_fft_calc_3dn(FFTContext *s, FFTComplex *z);
467 650 void ff_fft_calc_3dn2(FFTContext *s, FFTComplex *z);
468 static inline void fft_calc(FFTContext *s, FFTComplex *z) 651 void ff_fft_calc_altivec(FFTContext *s, FFTComplex *z);
652
653 static inline void ff_fft_calc(FFTContext *s, FFTComplex *z)
469 { 654 {
470 s->fft_calc(s, z); 655 s->fft_calc(s, z);
471 } 656 }
472 void fft_end(FFTContext *s); 657 void ff_fft_end(FFTContext *s);
473 658
474 /* MDCT computation */ 659 /* MDCT computation */
475 660
476 typedef struct MDCTContext { 661 typedef struct MDCTContext {
477 int n; /* size of MDCT (i.e. number of input data * 2) */ 662 int n; /* size of MDCT (i.e. number of input data * 2) */
483 } MDCTContext; 668 } MDCTContext;
484 669
485 int ff_mdct_init(MDCTContext *s, int nbits, int inverse); 670 int ff_mdct_init(MDCTContext *s, int nbits, int inverse);
486 void ff_imdct_calc(MDCTContext *s, FFTSample *output, 671 void ff_imdct_calc(MDCTContext *s, FFTSample *output,
487 const FFTSample *input, FFTSample *tmp); 672 const FFTSample *input, FFTSample *tmp);
673 void ff_imdct_calc_3dn2(MDCTContext *s, FFTSample *output,
674 const FFTSample *input, FFTSample *tmp);
675 void ff_imdct_calc_sse(MDCTContext *s, FFTSample *output,
676 const FFTSample *input, FFTSample *tmp);
488 void ff_mdct_calc(MDCTContext *s, FFTSample *out, 677 void ff_mdct_calc(MDCTContext *s, FFTSample *out,
489 const FFTSample *input, FFTSample *tmp); 678 const FFTSample *input, FFTSample *tmp);
490 void ff_mdct_end(MDCTContext *s); 679 void ff_mdct_end(MDCTContext *s);
491 680
492 #define WARPER8_16(name8, name16)\ 681 #define WARPER8_16(name8, name16)\