Mercurial > libpostproc.hg
annotate postprocess_altivec_template.c @ 26:300e4c8b39ab libpostproc
Move sign macro to libavutil.
| author | diego |
|---|---|
| date | Tue, 10 Oct 2006 07:49:10 +0000 |
| parents | da3bfee1fa67 |
| children | b55400a067f0 |
| rev | line source |
|---|---|
| 0 | 1 /* |
|
22
da3bfee1fa67
Change license headers to say 'FFmpeg' instead of 'this program/this library'
diego
parents:
0
diff
changeset
|
2 * AltiVec optimizations (C) 2004 Romain Dolbeau <romain@dolbeau.org> |
|
da3bfee1fa67
Change license headers to say 'FFmpeg' instead of 'this program/this library'
diego
parents:
0
diff
changeset
|
3 * |
|
da3bfee1fa67
Change license headers to say 'FFmpeg' instead of 'this program/this library'
diego
parents:
0
diff
changeset
|
4 * based on code by Copyright (C) 2001-2003 Michael Niedermayer (michaelni@gmx.at) |
|
da3bfee1fa67
Change license headers to say 'FFmpeg' instead of 'this program/this library'
diego
parents:
0
diff
changeset
|
5 * |
|
da3bfee1fa67
Change license headers to say 'FFmpeg' instead of 'this program/this library'
diego
parents:
0
diff
changeset
|
6 * This file is part of FFmpeg. |
|
da3bfee1fa67
Change license headers to say 'FFmpeg' instead of 'this program/this library'
diego
parents:
0
diff
changeset
|
7 * |
|
da3bfee1fa67
Change license headers to say 'FFmpeg' instead of 'this program/this library'
diego
parents:
0
diff
changeset
|
8 * FFmpeg is free software; you can redistribute it and/or modify |
|
da3bfee1fa67
Change license headers to say 'FFmpeg' instead of 'this program/this library'
diego
parents:
0
diff
changeset
|
9 * it under the terms of the GNU General Public License as published by |
|
da3bfee1fa67
Change license headers to say 'FFmpeg' instead of 'this program/this library'
diego
parents:
0
diff
changeset
|
10 * the Free Software Foundation; either version 2 of the License, or |
|
da3bfee1fa67
Change license headers to say 'FFmpeg' instead of 'this program/this library'
diego
parents:
0
diff
changeset
|
11 * (at your option) any later version. |
|
da3bfee1fa67
Change license headers to say 'FFmpeg' instead of 'this program/this library'
diego
parents:
0
diff
changeset
|
12 * |
|
da3bfee1fa67
Change license headers to say 'FFmpeg' instead of 'this program/this library'
diego
parents:
0
diff
changeset
|
13 * FFmpeg is distributed in the hope that it will be useful, |
|
da3bfee1fa67
Change license headers to say 'FFmpeg' instead of 'this program/this library'
diego
parents:
0
diff
changeset
|
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of |
|
da3bfee1fa67
Change license headers to say 'FFmpeg' instead of 'this program/this library'
diego
parents:
0
diff
changeset
|
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
|
da3bfee1fa67
Change license headers to say 'FFmpeg' instead of 'this program/this library'
diego
parents:
0
diff
changeset
|
16 * GNU General Public License for more details. |
|
da3bfee1fa67
Change license headers to say 'FFmpeg' instead of 'this program/this library'
diego
parents:
0
diff
changeset
|
17 * |
|
da3bfee1fa67
Change license headers to say 'FFmpeg' instead of 'this program/this library'
diego
parents:
0
diff
changeset
|
18 * You should have received a copy of the GNU General Public License |
|
da3bfee1fa67
Change license headers to say 'FFmpeg' instead of 'this program/this library'
diego
parents:
0
diff
changeset
|
19 * along with FFmpeg; if not, write to the Free Software |
|
da3bfee1fa67
Change license headers to say 'FFmpeg' instead of 'this program/this library'
diego
parents:
0
diff
changeset
|
20 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA |
|
da3bfee1fa67
Change license headers to say 'FFmpeg' instead of 'this program/this library'
diego
parents:
0
diff
changeset
|
21 */ |
| 0 | 22 |
| 23 | |
| 24 #ifdef CONFIG_DARWIN | |
| 25 #define AVV(x...) (x) | |
| 26 #else | |
| 27 #define AVV(x...) {x} | |
| 28 #endif | |
| 29 | |
| 30 #define ALTIVEC_TRANSPOSE_8x8_SHORT(src_a,src_b,src_c,src_d,src_e,src_f,src_g,src_h) \ | |
| 31 do { \ | |
| 32 __typeof__(src_a) tempA1, tempB1, tempC1, tempD1; \ | |
| 33 __typeof__(src_a) tempE1, tempF1, tempG1, tempH1; \ | |
| 34 __typeof__(src_a) tempA2, tempB2, tempC2, tempD2; \ | |
| 35 __typeof__(src_a) tempE2, tempF2, tempG2, tempH2; \ | |
| 36 tempA1 = vec_mergeh (src_a, src_e); \ | |
| 37 tempB1 = vec_mergel (src_a, src_e); \ | |
| 38 tempC1 = vec_mergeh (src_b, src_f); \ | |
| 39 tempD1 = vec_mergel (src_b, src_f); \ | |
| 40 tempE1 = vec_mergeh (src_c, src_g); \ | |
| 41 tempF1 = vec_mergel (src_c, src_g); \ | |
| 42 tempG1 = vec_mergeh (src_d, src_h); \ | |
| 43 tempH1 = vec_mergel (src_d, src_h); \ | |
| 44 tempA2 = vec_mergeh (tempA1, tempE1); \ | |
| 45 tempB2 = vec_mergel (tempA1, tempE1); \ | |
| 46 tempC2 = vec_mergeh (tempB1, tempF1); \ | |
| 47 tempD2 = vec_mergel (tempB1, tempF1); \ | |
| 48 tempE2 = vec_mergeh (tempC1, tempG1); \ | |
| 49 tempF2 = vec_mergel (tempC1, tempG1); \ | |
| 50 tempG2 = vec_mergeh (tempD1, tempH1); \ | |
| 51 tempH2 = vec_mergel (tempD1, tempH1); \ | |
| 52 src_a = vec_mergeh (tempA2, tempE2); \ | |
| 53 src_b = vec_mergel (tempA2, tempE2); \ | |
| 54 src_c = vec_mergeh (tempB2, tempF2); \ | |
| 55 src_d = vec_mergel (tempB2, tempF2); \ | |
| 56 src_e = vec_mergeh (tempC2, tempG2); \ | |
| 57 src_f = vec_mergel (tempC2, tempG2); \ | |
| 58 src_g = vec_mergeh (tempD2, tempH2); \ | |
| 59 src_h = vec_mergel (tempD2, tempH2); \ | |
| 60 } while (0) | |
| 61 | |
| 62 | |
| 63 static inline int vertClassify_altivec(uint8_t src[], int stride, PPContext *c) { | |
| 64 /* | |
| 65 this code makes no assumption on src or stride. | |
| 66 One could remove the recomputation of the perm | |
| 67 vector by assuming (stride % 16) == 0, unfortunately | |
| 68 this is not always true. | |
| 69 */ | |
| 70 register int y; | |
| 71 short __attribute__ ((aligned(16))) data[8]; | |
| 72 int numEq; | |
| 73 uint8_t *src2 = src; | |
| 74 vector signed short v_dcOffset; | |
| 75 vector signed short v2QP; | |
| 76 vector unsigned short v4QP; | |
| 77 vector unsigned short v_dcThreshold; | |
| 78 const int properStride = (stride % 16); | |
| 79 const int srcAlign = ((unsigned long)src2 % 16); | |
| 80 const int two_vectors = ((srcAlign > 8) || properStride) ? 1 : 0; | |
| 81 const vector signed int zero = vec_splat_s32(0); | |
| 82 const vector signed short mask = vec_splat_s16(1); | |
| 83 vector signed int v_numEq = vec_splat_s32(0); | |
| 84 | |
| 85 data[0] = ((c->nonBQP*c->ppMode.baseDcDiff)>>8) + 1; | |
| 86 data[1] = data[0] * 2 + 1; | |
| 87 data[2] = c->QP * 2; | |
| 88 data[3] = c->QP * 4; | |
| 89 vector signed short v_data = vec_ld(0, data); | |
| 90 v_dcOffset = vec_splat(v_data, 0); | |
| 91 v_dcThreshold = (vector unsigned short)vec_splat(v_data, 1); | |
| 92 v2QP = vec_splat(v_data, 2); | |
| 93 v4QP = (vector unsigned short)vec_splat(v_data, 3); | |
| 94 | |
| 95 src2 += stride * 4; | |
| 96 | |
| 97 vector signed short v_srcAss0, v_srcAss1, v_srcAss2, v_srcAss3, v_srcAss4, v_srcAss5, v_srcAss6, v_srcAss7; | |
| 98 | |
| 99 #define LOAD_LINE(i) \ | |
| 100 register int j##i = i * stride; \ | |
| 101 vector unsigned char perm##i = vec_lvsl(j##i, src2); \ | |
| 102 const vector unsigned char v_srcA1##i = vec_ld(j##i, src2); \ | |
| 103 vector unsigned char v_srcA2##i; \ | |
| 104 if (two_vectors) \ | |
| 105 v_srcA2##i = vec_ld(j##i + 16, src2); \ | |
| 106 const vector unsigned char v_srcA##i = \ | |
| 107 vec_perm(v_srcA1##i, v_srcA2##i, perm##i); \ | |
| 108 v_srcAss##i = \ | |
| 109 (vector signed short)vec_mergeh((vector signed char)zero, \ | |
| 110 (vector signed char)v_srcA##i) | |
| 111 | |
| 112 #define LOAD_LINE_ALIGNED(i) \ | |
| 113 register int j##i = i * stride; \ | |
| 114 const vector unsigned char v_srcA##i = vec_ld(j##i, src2); \ | |
| 115 v_srcAss##i = \ | |
| 116 (vector signed short)vec_mergeh((vector signed char)zero, \ | |
| 117 (vector signed char)v_srcA##i) | |
| 118 | |
| 119 // special casing the aligned case is worthwhile, as all call from | |
| 120 // the (transposed) horizontable deblocks will be aligned, i naddition | |
| 121 // to the naturraly aligned vertical deblocks. | |
| 122 if (properStride && srcAlign) { | |
| 123 LOAD_LINE_ALIGNED(0); | |
| 124 LOAD_LINE_ALIGNED(1); | |
| 125 LOAD_LINE_ALIGNED(2); | |
| 126 LOAD_LINE_ALIGNED(3); | |
| 127 LOAD_LINE_ALIGNED(4); | |
| 128 LOAD_LINE_ALIGNED(5); | |
| 129 LOAD_LINE_ALIGNED(6); | |
| 130 LOAD_LINE_ALIGNED(7); | |
| 131 } else { | |
| 132 LOAD_LINE(0); | |
| 133 LOAD_LINE(1); | |
| 134 LOAD_LINE(2); | |
| 135 LOAD_LINE(3); | |
| 136 LOAD_LINE(4); | |
| 137 LOAD_LINE(5); | |
| 138 LOAD_LINE(6); | |
| 139 LOAD_LINE(7); | |
| 140 } | |
| 141 #undef LOAD_LINE | |
| 142 #undef LOAD_LINE_ALIGNED | |
| 143 | |
| 144 #define ITER(i, j) \ | |
| 145 const vector signed short v_diff##i = \ | |
| 146 vec_sub(v_srcAss##i, v_srcAss##j); \ | |
| 147 const vector signed short v_sum##i = \ | |
| 148 vec_add(v_diff##i, v_dcOffset); \ | |
| 149 const vector signed short v_comp##i = \ | |
| 150 (vector signed short)vec_cmplt((vector unsigned short)v_sum##i, \ | |
| 151 v_dcThreshold); \ | |
| 152 const vector signed short v_part##i = vec_and(mask, v_comp##i); \ | |
| 153 v_numEq = vec_sum4s(v_part##i, v_numEq); | |
| 154 | |
| 155 ITER(0, 1); | |
| 156 ITER(1, 2); | |
| 157 ITER(2, 3); | |
| 158 ITER(3, 4); | |
| 159 ITER(4, 5); | |
| 160 ITER(5, 6); | |
| 161 ITER(6, 7); | |
| 162 #undef ITER | |
| 163 | |
| 164 v_numEq = vec_sums(v_numEq, zero); | |
| 165 | |
| 166 v_numEq = vec_splat(v_numEq, 3); | |
| 167 vec_ste(v_numEq, 0, &numEq); | |
| 168 | |
| 169 if (numEq > c->ppMode.flatnessThreshold) | |
| 170 { | |
| 171 const vector unsigned char mmoP1 = (const vector unsigned char) | |
| 172 AVV(0x1f, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f, | |
| 173 0x00, 0x01, 0x12, 0x13, 0x08, 0x09, 0x1A, 0x1B); | |
| 174 const vector unsigned char mmoP2 = (const vector unsigned char) | |
| 175 AVV(0x04, 0x05, 0x16, 0x17, 0x0C, 0x0D, 0x1E, 0x1F, | |
| 176 0x1f, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f); | |
| 177 const vector unsigned char mmoP = (const vector unsigned char) | |
| 178 vec_lvsl(8, (unsigned char*)0); | |
| 179 | |
| 180 vector signed short mmoL1 = vec_perm(v_srcAss0, v_srcAss2, mmoP1); | |
| 181 vector signed short mmoL2 = vec_perm(v_srcAss4, v_srcAss6, mmoP2); | |
| 182 vector signed short mmoL = vec_perm(mmoL1, mmoL2, mmoP); | |
| 183 vector signed short mmoR1 = vec_perm(v_srcAss5, v_srcAss7, mmoP1); | |
| 184 vector signed short mmoR2 = vec_perm(v_srcAss1, v_srcAss3, mmoP2); | |
| 185 vector signed short mmoR = vec_perm(mmoR1, mmoR2, mmoP); | |
| 186 vector signed short mmoDiff = vec_sub(mmoL, mmoR); | |
| 187 vector unsigned short mmoSum = (vector unsigned short)vec_add(mmoDiff, v2QP); | |
| 188 | |
| 189 if (vec_any_gt(mmoSum, v4QP)) | |
| 190 return 0; | |
| 191 else | |
| 192 return 1; | |
| 193 } | |
| 194 else return 2; | |
| 195 } | |
| 196 | |
| 197 static inline void doVertLowPass_altivec(uint8_t *src, int stride, PPContext *c) { | |
| 198 /* | |
| 199 this code makes no assumption on src or stride. | |
| 200 One could remove the recomputation of the perm | |
| 201 vector by assuming (stride % 16) == 0, unfortunately | |
| 202 this is not always true. Quite a lot of load/stores | |
| 203 can be removed by assuming proper alignement of | |
| 204 src & stride :-( | |
| 205 */ | |
| 206 uint8_t *src2 = src; | |
| 207 const vector signed int zero = vec_splat_s32(0); | |
| 208 const int properStride = (stride % 16); | |
| 209 const int srcAlign = ((unsigned long)src2 % 16); | |
| 210 short __attribute__ ((aligned(16))) qp[8]; | |
| 211 qp[0] = c->QP; | |
| 212 vector signed short vqp = vec_ld(0, qp); | |
| 213 vqp = vec_splat(vqp, 0); | |
| 214 | |
| 215 src2 += stride*3; | |
| 216 | |
| 217 vector signed short vb0, vb1, vb2, vb3, vb4, vb5, vb6, vb7, vb8, vb9; | |
| 218 vector unsigned char vbA0, vbA1, vbA2, vbA3, vbA4, vbA5, vbA6, vbA7, vbA8, vbA9; | |
| 219 vector unsigned char vbB0, vbB1, vbB2, vbB3, vbB4, vbB5, vbB6, vbB7, vbB8, vbB9; | |
| 220 vector unsigned char vbT0, vbT1, vbT2, vbT3, vbT4, vbT5, vbT6, vbT7, vbT8, vbT9; | |
| 221 | |
| 222 #define LOAD_LINE(i) \ | |
| 223 const vector unsigned char perml##i = \ | |
| 224 vec_lvsl(i * stride, src2); \ | |
| 225 vbA##i = vec_ld(i * stride, src2); \ | |
| 226 vbB##i = vec_ld(i * stride + 16, src2); \ | |
| 227 vbT##i = vec_perm(vbA##i, vbB##i, perml##i); \ | |
| 228 vb##i = \ | |
| 229 (vector signed short)vec_mergeh((vector unsigned char)zero, \ | |
| 230 (vector unsigned char)vbT##i) | |
| 231 | |
| 232 #define LOAD_LINE_ALIGNED(i) \ | |
| 233 register int j##i = i * stride; \ | |
| 234 vbT##i = vec_ld(j##i, src2); \ | |
| 235 vb##i = \ | |
| 236 (vector signed short)vec_mergeh((vector signed char)zero, \ | |
| 237 (vector signed char)vbT##i) | |
| 238 | |
| 239 // special casing the aligned case is worthwhile, as all call from | |
| 240 // the (transposed) horizontable deblocks will be aligned, in addition | |
| 241 // to the naturraly aligned vertical deblocks. | |
| 242 if (properStride && srcAlign) { | |
| 243 LOAD_LINE_ALIGNED(0); | |
| 244 LOAD_LINE_ALIGNED(1); | |
| 245 LOAD_LINE_ALIGNED(2); | |
| 246 LOAD_LINE_ALIGNED(3); | |
| 247 LOAD_LINE_ALIGNED(4); | |
| 248 LOAD_LINE_ALIGNED(5); | |
| 249 LOAD_LINE_ALIGNED(6); | |
| 250 LOAD_LINE_ALIGNED(7); | |
| 251 LOAD_LINE_ALIGNED(8); | |
| 252 LOAD_LINE_ALIGNED(9); | |
| 253 } else { | |
| 254 LOAD_LINE(0); | |
| 255 LOAD_LINE(1); | |
| 256 LOAD_LINE(2); | |
| 257 LOAD_LINE(3); | |
| 258 LOAD_LINE(4); | |
| 259 LOAD_LINE(5); | |
| 260 LOAD_LINE(6); | |
| 261 LOAD_LINE(7); | |
| 262 LOAD_LINE(8); | |
| 263 LOAD_LINE(9); | |
| 264 } | |
| 265 #undef LOAD_LINE | |
| 266 #undef LOAD_LINE_ALIGNED | |
| 267 | |
| 268 const vector unsigned short v_1 = vec_splat_u16(1); | |
| 269 const vector unsigned short v_2 = vec_splat_u16(2); | |
| 270 const vector unsigned short v_4 = vec_splat_u16(4); | |
| 271 | |
| 272 const vector signed short v_diff01 = vec_sub(vb0, vb1); | |
| 273 const vector unsigned short v_cmp01 = | |
| 274 (const vector unsigned short) vec_cmplt(vec_abs(v_diff01), vqp); | |
| 275 const vector signed short v_first = vec_sel(vb1, vb0, v_cmp01); | |
| 276 const vector signed short v_diff89 = vec_sub(vb8, vb9); | |
| 277 const vector unsigned short v_cmp89 = | |
| 278 (const vector unsigned short) vec_cmplt(vec_abs(v_diff89), vqp); | |
| 279 const vector signed short v_last = vec_sel(vb8, vb9, v_cmp89); | |
| 280 | |
| 281 const vector signed short temp01 = vec_mladd(v_first, (vector signed short)v_4, vb1); | |
| 282 const vector signed short temp02 = vec_add(vb2, vb3); | |
| 283 const vector signed short temp03 = vec_add(temp01, (vector signed short)v_4); | |
| 284 const vector signed short v_sumsB0 = vec_add(temp02, temp03); | |
| 285 | |
| 286 const vector signed short temp11 = vec_sub(v_sumsB0, v_first); | |
| 287 const vector signed short v_sumsB1 = vec_add(temp11, vb4); | |
| 288 | |
| 289 const vector signed short temp21 = vec_sub(v_sumsB1, v_first); | |
| 290 const vector signed short v_sumsB2 = vec_add(temp21, vb5); | |
| 291 | |
| 292 const vector signed short temp31 = vec_sub(v_sumsB2, v_first); | |
| 293 const vector signed short v_sumsB3 = vec_add(temp31, vb6); | |
| 294 | |
| 295 const vector signed short temp41 = vec_sub(v_sumsB3, v_first); | |
| 296 const vector signed short v_sumsB4 = vec_add(temp41, vb7); | |
| 297 | |
| 298 const vector signed short temp51 = vec_sub(v_sumsB4, vb1); | |
| 299 const vector signed short v_sumsB5 = vec_add(temp51, vb8); | |
| 300 | |
| 301 const vector signed short temp61 = vec_sub(v_sumsB5, vb2); | |
| 302 const vector signed short v_sumsB6 = vec_add(temp61, v_last); | |
| 303 | |
| 304 const vector signed short temp71 = vec_sub(v_sumsB6, vb3); | |
| 305 const vector signed short v_sumsB7 = vec_add(temp71, v_last); | |
| 306 | |
| 307 const vector signed short temp81 = vec_sub(v_sumsB7, vb4); | |
| 308 const vector signed short v_sumsB8 = vec_add(temp81, v_last); | |
| 309 | |
| 310 const vector signed short temp91 = vec_sub(v_sumsB8, vb5); | |
| 311 const vector signed short v_sumsB9 = vec_add(temp91, v_last); | |
| 312 | |
| 313 #define COMPUTE_VR(i, j, k) \ | |
| 314 const vector signed short temps1##i = \ | |
| 315 vec_add(v_sumsB##i, v_sumsB##k); \ | |
| 316 const vector signed short temps2##i = \ | |
| 317 vec_mladd(vb##j, (vector signed short)v_2, temps1##i); \ | |
| 318 const vector signed short vr##j = vec_sra(temps2##i, v_4) | |
| 319 | |
| 320 COMPUTE_VR(0, 1, 2); | |
| 321 COMPUTE_VR(1, 2, 3); | |
| 322 COMPUTE_VR(2, 3, 4); | |
| 323 COMPUTE_VR(3, 4, 5); | |
| 324 COMPUTE_VR(4, 5, 6); | |
| 325 COMPUTE_VR(5, 6, 7); | |
| 326 COMPUTE_VR(6, 7, 8); | |
| 327 COMPUTE_VR(7, 8, 9); | |
| 328 | |
| 329 const vector signed char neg1 = vec_splat_s8(-1); | |
| 330 const vector unsigned char permHH = (const vector unsigned char)AVV(0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, | |
| 331 0x18, 0x19, 0x1A, 0x1B, 0x1C, 0x1D, 0x1E, 0x1F); | |
| 332 | |
| 333 #define PACK_AND_STORE(i) \ | |
| 334 const vector unsigned char perms##i = \ | |
| 335 vec_lvsr(i * stride, src2); \ | |
| 336 const vector unsigned char vf##i = \ | |
| 337 vec_packsu(vr##i, (vector signed short)zero); \ | |
| 338 const vector unsigned char vg##i = \ | |
| 339 vec_perm(vf##i, vbT##i, permHH); \ | |
| 340 const vector unsigned char mask##i = \ | |
| 341 vec_perm((vector unsigned char)zero, (vector unsigned char)neg1, perms##i); \ | |
| 342 const vector unsigned char vg2##i = \ | |
| 343 vec_perm(vg##i, vg##i, perms##i); \ | |
| 344 const vector unsigned char svA##i = \ | |
| 345 vec_sel(vbA##i, vg2##i, mask##i); \ | |
| 346 const vector unsigned char svB##i = \ | |
| 347 vec_sel(vg2##i, vbB##i, mask##i); \ | |
| 348 vec_st(svA##i, i * stride, src2); \ | |
| 349 vec_st(svB##i, i * stride + 16, src2) | |
| 350 | |
| 351 #define PACK_AND_STORE_ALIGNED(i) \ | |
| 352 const vector unsigned char vf##i = \ | |
| 353 vec_packsu(vr##i, (vector signed short)zero); \ | |
| 354 const vector unsigned char vg##i = \ | |
| 355 vec_perm(vf##i, vbT##i, permHH); \ | |
| 356 vec_st(vg##i, i * stride, src2) | |
| 357 | |
| 358 // special casing the aligned case is worthwhile, as all call from | |
| 359 // the (transposed) horizontable deblocks will be aligned, in addition | |
| 360 // to the naturraly aligned vertical deblocks. | |
| 361 if (properStride && srcAlign) { | |
| 362 PACK_AND_STORE_ALIGNED(1); | |
| 363 PACK_AND_STORE_ALIGNED(2); | |
| 364 PACK_AND_STORE_ALIGNED(3); | |
| 365 PACK_AND_STORE_ALIGNED(4); | |
| 366 PACK_AND_STORE_ALIGNED(5); | |
| 367 PACK_AND_STORE_ALIGNED(6); | |
| 368 PACK_AND_STORE_ALIGNED(7); | |
| 369 PACK_AND_STORE_ALIGNED(8); | |
| 370 } else { | |
| 371 PACK_AND_STORE(1); | |
| 372 PACK_AND_STORE(2); | |
| 373 PACK_AND_STORE(3); | |
| 374 PACK_AND_STORE(4); | |
| 375 PACK_AND_STORE(5); | |
| 376 PACK_AND_STORE(6); | |
| 377 PACK_AND_STORE(7); | |
| 378 PACK_AND_STORE(8); | |
| 379 } | |
| 380 #undef PACK_AND_STORE | |
| 381 #undef PACK_AND_STORE_ALIGNED | |
| 382 } | |
| 383 | |
| 384 | |
| 385 | |
| 386 static inline void doVertDefFilter_altivec(uint8_t src[], int stride, PPContext *c) { | |
| 387 /* | |
| 388 this code makes no assumption on src or stride. | |
| 389 One could remove the recomputation of the perm | |
| 390 vector by assuming (stride % 16) == 0, unfortunately | |
| 391 this is not always true. Quite a lot of load/stores | |
| 392 can be removed by assuming proper alignement of | |
| 393 src & stride :-( | |
| 394 */ | |
| 395 uint8_t *src2 = src; | |
| 396 const vector signed int zero = vec_splat_s32(0); | |
| 397 short __attribute__ ((aligned(16))) qp[8]; | |
| 398 qp[0] = 8*c->QP; | |
| 399 vector signed short vqp = vec_ld(0, qp); | |
| 400 vqp = vec_splat(vqp, 0); | |
| 401 | |
| 402 #define LOAD_LINE(i) \ | |
| 403 const vector unsigned char perm##i = \ | |
| 404 vec_lvsl(i * stride, src2); \ | |
| 405 const vector unsigned char vbA##i = \ | |
| 406 vec_ld(i * stride, src2); \ | |
| 407 const vector unsigned char vbB##i = \ | |
| 408 vec_ld(i * stride + 16, src2); \ | |
| 409 const vector unsigned char vbT##i = \ | |
| 410 vec_perm(vbA##i, vbB##i, perm##i); \ | |
| 411 const vector signed short vb##i = \ | |
| 412 (vector signed short)vec_mergeh((vector unsigned char)zero, \ | |
| 413 (vector unsigned char)vbT##i) | |
| 414 | |
| 415 src2 += stride*3; | |
| 416 | |
| 417 LOAD_LINE(1); | |
| 418 LOAD_LINE(2); | |
| 419 LOAD_LINE(3); | |
| 420 LOAD_LINE(4); | |
| 421 LOAD_LINE(5); | |
| 422 LOAD_LINE(6); | |
| 423 LOAD_LINE(7); | |
| 424 LOAD_LINE(8); | |
| 425 #undef LOAD_LINE | |
| 426 | |
| 427 const vector signed short v_1 = vec_splat_s16(1); | |
| 428 const vector signed short v_2 = vec_splat_s16(2); | |
| 429 const vector signed short v_5 = vec_splat_s16(5); | |
| 430 const vector signed short v_32 = vec_sl(v_1, | |
| 431 (vector unsigned short)v_5); | |
| 432 /* middle energy */ | |
| 433 const vector signed short l3minusl6 = vec_sub(vb3, vb6); | |
| 434 const vector signed short l5minusl4 = vec_sub(vb5, vb4); | |
| 435 const vector signed short twotimes_l3minusl6 = vec_mladd(v_2, l3minusl6, (vector signed short)zero); | |
| 436 const vector signed short mE = vec_mladd(v_5, l5minusl4, twotimes_l3minusl6); | |
| 437 const vector signed short absmE = vec_abs(mE); | |
| 438 /* left & right energy */ | |
| 439 const vector signed short l1minusl4 = vec_sub(vb1, vb4); | |
| 440 const vector signed short l3minusl2 = vec_sub(vb3, vb2); | |
| 441 const vector signed short l5minusl8 = vec_sub(vb5, vb8); | |
| 442 const vector signed short l7minusl6 = vec_sub(vb7, vb6); | |
| 443 const vector signed short twotimes_l1minusl4 = vec_mladd(v_2, l1minusl4, (vector signed short)zero); | |
| 444 const vector signed short twotimes_l5minusl8 = vec_mladd(v_2, l5minusl8, (vector signed short)zero); | |
| 445 const vector signed short lE = vec_mladd(v_5, l3minusl2, twotimes_l1minusl4); | |
| 446 const vector signed short rE = vec_mladd(v_5, l7minusl6, twotimes_l5minusl8); | |
| 447 /* d */ | |
| 448 const vector signed short ddiff = vec_sub(absmE, | |
| 449 vec_min(vec_abs(lE), | |
| 450 vec_abs(rE))); | |
| 451 const vector signed short ddiffclamp = vec_max(ddiff, (vector signed short)zero); | |
| 452 const vector signed short dtimes64 = vec_mladd(v_5, ddiffclamp, v_32); | |
| 453 const vector signed short d = vec_sra(dtimes64, vec_splat_u16(6)); | |
| 454 const vector signed short minusd = vec_sub((vector signed short)zero, d); | |
| 455 const vector signed short finald = vec_sel(minusd, | |
| 456 d, | |
| 457 vec_cmpgt(vec_sub((vector signed short)zero, mE), | |
| 458 (vector signed short)zero)); | |
| 459 /* q */ | |
| 460 const vector signed short qtimes2 = vec_sub(vb4, vb5); | |
| 461 /* for a shift right to behave like /2, we need to add one | |
| 462 to all negative integer */ | |
| 463 const vector signed short rounddown = vec_sel((vector signed short)zero, | |
| 464 v_1, | |
| 465 vec_cmplt(qtimes2, (vector signed short)zero)); | |
| 466 const vector signed short q = vec_sra(vec_add(qtimes2, rounddown), vec_splat_u16(1)); | |
| 467 /* clamp */ | |
| 468 const vector signed short dclamp_P1 = vec_max((vector signed short)zero, finald); | |
| 469 const vector signed short dclamp_P = vec_min(dclamp_P1, q); | |
| 470 const vector signed short dclamp_N1 = vec_min((vector signed short)zero, finald); | |
| 471 const vector signed short dclamp_N = vec_max(dclamp_N1, q); | |
| 472 | |
| 473 const vector signed short dclampedfinal = vec_sel(dclamp_N, | |
| 474 dclamp_P, | |
| 475 vec_cmpgt(q, (vector signed short)zero)); | |
| 476 const vector signed short dornotd = vec_sel((vector signed short)zero, | |
| 477 dclampedfinal, | |
| 478 vec_cmplt(absmE, vqp)); | |
| 479 /* add/substract to l4 and l5 */ | |
| 480 const vector signed short vb4minusd = vec_sub(vb4, dornotd); | |
| 481 const vector signed short vb5plusd = vec_add(vb5, dornotd); | |
| 482 /* finally, stores */ | |
| 483 const vector unsigned char st4 = vec_packsu(vb4minusd, (vector signed short)zero); | |
| 484 const vector unsigned char st5 = vec_packsu(vb5plusd, (vector signed short)zero); | |
| 485 | |
| 486 const vector signed char neg1 = vec_splat_s8(-1); | |
| 487 const vector unsigned char permHH = (const vector unsigned char)AVV(0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, | |
| 488 0x18, 0x19, 0x1A, 0x1B, 0x1C, 0x1D, 0x1E, 0x1F); | |
| 489 | |
| 490 #define STORE(i) \ | |
| 491 const vector unsigned char perms##i = \ | |
| 492 vec_lvsr(i * stride, src2); \ | |
| 493 const vector unsigned char vg##i = \ | |
| 494 vec_perm(st##i, vbT##i, permHH); \ | |
| 495 const vector unsigned char mask##i = \ | |
| 496 vec_perm((vector unsigned char)zero, (vector unsigned char)neg1, perms##i); \ | |
| 497 const vector unsigned char vg2##i = \ | |
| 498 vec_perm(vg##i, vg##i, perms##i); \ | |
| 499 const vector unsigned char svA##i = \ | |
| 500 vec_sel(vbA##i, vg2##i, mask##i); \ | |
| 501 const vector unsigned char svB##i = \ | |
| 502 vec_sel(vg2##i, vbB##i, mask##i); \ | |
| 503 vec_st(svA##i, i * stride, src2); \ | |
| 504 vec_st(svB##i, i * stride + 16, src2) | |
| 505 | |
| 506 STORE(4); | |
| 507 STORE(5); | |
| 508 } | |
| 509 | |
| 510 static inline void dering_altivec(uint8_t src[], int stride, PPContext *c) { | |
| 511 /* | |
| 512 this code makes no assumption on src or stride. | |
| 513 One could remove the recomputation of the perm | |
| 514 vector by assuming (stride % 16) == 0, unfortunately | |
| 515 this is not always true. Quite a lot of load/stores | |
| 516 can be removed by assuming proper alignement of | |
| 517 src & stride :-( | |
| 518 */ | |
| 519 uint8_t *srcCopy = src; | |
| 520 uint8_t __attribute__((aligned(16))) dt[16]; | |
| 521 const vector unsigned char vuint8_1 = vec_splat_u8(1); | |
| 522 const vector signed int zero = vec_splat_s32(0); | |
| 523 vector unsigned char v_dt; | |
| 524 dt[0] = deringThreshold; | |
| 525 v_dt = vec_splat(vec_ld(0, dt), 0); | |
| 526 | |
| 527 #define LOAD_LINE(i) \ | |
| 528 const vector unsigned char perm##i = \ | |
| 529 vec_lvsl(i * stride, srcCopy); \ | |
| 530 vector unsigned char sA##i = vec_ld(i * stride, srcCopy); \ | |
| 531 vector unsigned char sB##i = vec_ld(i * stride + 16, srcCopy); \ | |
| 532 vector unsigned char src##i = vec_perm(sA##i, sB##i, perm##i) | |
| 533 | |
| 534 LOAD_LINE(0); | |
| 535 LOAD_LINE(1); | |
| 536 LOAD_LINE(2); | |
| 537 LOAD_LINE(3); | |
| 538 LOAD_LINE(4); | |
| 539 LOAD_LINE(5); | |
| 540 LOAD_LINE(6); | |
| 541 LOAD_LINE(7); | |
| 542 LOAD_LINE(8); | |
| 543 LOAD_LINE(9); | |
| 544 #undef LOAD_LINE | |
| 545 | |
| 546 vector unsigned char v_avg; | |
| 547 { | |
| 548 const vector unsigned char trunc_perm = (vector unsigned char) | |
| 549 AVV(0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, | |
| 550 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18); | |
| 551 const vector unsigned char trunc_src12 = vec_perm(src1, src2, trunc_perm); | |
| 552 const vector unsigned char trunc_src34 = vec_perm(src3, src4, trunc_perm); | |
| 553 const vector unsigned char trunc_src56 = vec_perm(src5, src6, trunc_perm); | |
| 554 const vector unsigned char trunc_src78 = vec_perm(src7, src8, trunc_perm); | |
| 555 | |
| 556 #define EXTRACT(op) do { \ | |
| 557 const vector unsigned char s##op##_1 = vec_##op(trunc_src12, trunc_src34); \ | |
| 558 const vector unsigned char s##op##_2 = vec_##op(trunc_src56, trunc_src78); \ | |
| 559 const vector unsigned char s##op##_6 = vec_##op(s##op##_1, s##op##_2); \ | |
| 560 const vector unsigned char s##op##_8h = vec_mergeh(s##op##_6, s##op##_6); \ | |
| 561 const vector unsigned char s##op##_8l = vec_mergel(s##op##_6, s##op##_6); \ | |
| 562 const vector unsigned char s##op##_9 = vec_##op(s##op##_8h, s##op##_8l); \ | |
| 563 const vector unsigned char s##op##_9h = vec_mergeh(s##op##_9, s##op##_9); \ | |
| 564 const vector unsigned char s##op##_9l = vec_mergel(s##op##_9, s##op##_9); \ | |
| 565 const vector unsigned char s##op##_10 = vec_##op(s##op##_9h, s##op##_9l); \ | |
| 566 const vector unsigned char s##op##_10h = vec_mergeh(s##op##_10, s##op##_10); \ | |
| 567 const vector unsigned char s##op##_10l = vec_mergel(s##op##_10, s##op##_10); \ | |
| 568 const vector unsigned char s##op##_11 = vec_##op(s##op##_10h, s##op##_10l); \ | |
| 569 const vector unsigned char s##op##_11h = vec_mergeh(s##op##_11, s##op##_11); \ | |
| 570 const vector unsigned char s##op##_11l = vec_mergel(s##op##_11, s##op##_11); \ | |
| 571 v_##op = vec_##op(s##op##_11h, s##op##_11l); } while (0) | |
| 572 | |
| 573 vector unsigned char v_min; | |
| 574 vector unsigned char v_max; | |
| 575 EXTRACT(min); | |
| 576 EXTRACT(max); | |
| 577 #undef EXTRACT | |
| 578 | |
| 579 if (vec_all_lt(vec_sub(v_max, v_min), v_dt)) | |
| 580 return; | |
| 581 | |
| 582 v_avg = vec_avg(v_min, v_max); | |
| 583 } | |
| 584 | |
| 585 signed int __attribute__((aligned(16))) S[8]; | |
| 586 { | |
| 587 const vector unsigned short mask1 = (vector unsigned short) | |
| 588 AVV(0x0001, 0x0002, 0x0004, 0x0008, | |
| 589 0x0010, 0x0020, 0x0040, 0x0080); | |
| 590 const vector unsigned short mask2 = (vector unsigned short) | |
| 591 AVV(0x0100, 0x0200, 0x0000, 0x0000, | |
| 592 0x0000, 0x0000, 0x0000, 0x0000); | |
| 593 | |
| 594 const vector unsigned int vuint32_16 = vec_sl(vec_splat_u32(1), vec_splat_u32(4)); | |
| 595 const vector unsigned int vuint32_1 = vec_splat_u32(1); | |
| 596 | |
| 597 #define COMPARE(i) \ | |
| 598 vector signed int sum##i; \ | |
| 599 do { \ | |
| 600 const vector unsigned char cmp##i = \ | |
| 601 (vector unsigned char)vec_cmpgt(src##i, v_avg); \ | |
| 602 const vector unsigned short cmpHi##i = \ | |
| 603 (vector unsigned short)vec_mergeh(cmp##i, cmp##i); \ | |
| 604 const vector unsigned short cmpLi##i = \ | |
| 605 (vector unsigned short)vec_mergel(cmp##i, cmp##i); \ | |
| 606 const vector signed short cmpHf##i = \ | |
| 607 (vector signed short)vec_and(cmpHi##i, mask1); \ | |
| 608 const vector signed short cmpLf##i = \ | |
| 609 (vector signed short)vec_and(cmpLi##i, mask2); \ | |
| 610 const vector signed int sump##i = vec_sum4s(cmpHf##i, zero); \ | |
| 611 const vector signed int sumq##i = vec_sum4s(cmpLf##i, sump##i); \ | |
| 612 sum##i = vec_sums(sumq##i, zero); } while (0) | |
| 613 | |
| 614 COMPARE(0); | |
| 615 COMPARE(1); | |
| 616 COMPARE(2); | |
| 617 COMPARE(3); | |
| 618 COMPARE(4); | |
| 619 COMPARE(5); | |
| 620 COMPARE(6); | |
| 621 COMPARE(7); | |
| 622 COMPARE(8); | |
| 623 COMPARE(9); | |
| 624 #undef COMPARE | |
| 625 | |
| 626 vector signed int sumA2; | |
| 627 vector signed int sumB2; | |
| 628 { | |
| 629 const vector signed int sump02 = vec_mergel(sum0, sum2); | |
| 630 const vector signed int sump13 = vec_mergel(sum1, sum3); | |
| 631 const vector signed int sumA = vec_mergel(sump02, sump13); | |
| 632 | |
| 633 const vector signed int sump46 = vec_mergel(sum4, sum6); | |
| 634 const vector signed int sump57 = vec_mergel(sum5, sum7); | |
| 635 const vector signed int sumB = vec_mergel(sump46, sump57); | |
| 636 | |
| 637 const vector signed int sump8A = vec_mergel(sum8, zero); | |
| 638 const vector signed int sump9B = vec_mergel(sum9, zero); | |
| 639 const vector signed int sumC = vec_mergel(sump8A, sump9B); | |
| 640 | |
| 641 const vector signed int tA = vec_sl(vec_nor(zero, sumA), vuint32_16); | |
| 642 const vector signed int tB = vec_sl(vec_nor(zero, sumB), vuint32_16); | |
| 643 const vector signed int tC = vec_sl(vec_nor(zero, sumC), vuint32_16); | |
| 644 const vector signed int t2A = vec_or(sumA, tA); | |
| 645 const vector signed int t2B = vec_or(sumB, tB); | |
| 646 const vector signed int t2C = vec_or(sumC, tC); | |
| 647 const vector signed int t3A = vec_and(vec_sra(t2A, vuint32_1), | |
| 648 vec_sl(t2A, vuint32_1)); | |
| 649 const vector signed int t3B = vec_and(vec_sra(t2B, vuint32_1), | |
| 650 vec_sl(t2B, vuint32_1)); | |
| 651 const vector signed int t3C = vec_and(vec_sra(t2C, vuint32_1), | |
| 652 vec_sl(t2C, vuint32_1)); | |
| 653 const vector signed int yA = vec_and(t2A, t3A); | |
| 654 const vector signed int yB = vec_and(t2B, t3B); | |
| 655 const vector signed int yC = vec_and(t2C, t3C); | |
| 656 | |
| 657 const vector unsigned char strangeperm1 = vec_lvsl(4, (unsigned char*)0); | |
| 658 const vector unsigned char strangeperm2 = vec_lvsl(8, (unsigned char*)0); | |
| 659 const vector signed int sumAd4 = vec_perm(yA, yB, strangeperm1); | |
| 660 const vector signed int sumAd8 = vec_perm(yA, yB, strangeperm2); | |
| 661 const vector signed int sumBd4 = vec_perm(yB, yC, strangeperm1); | |
| 662 const vector signed int sumBd8 = vec_perm(yB, yC, strangeperm2); | |
| 663 const vector signed int sumAp = vec_and(yA, | |
| 664 vec_and(sumAd4,sumAd8)); | |
| 665 const vector signed int sumBp = vec_and(yB, | |
| 666 vec_and(sumBd4,sumBd8)); | |
| 667 sumA2 = vec_or(sumAp, | |
| 668 vec_sra(sumAp, | |
| 669 vuint32_16)); | |
| 670 sumB2 = vec_or(sumBp, | |
| 671 vec_sra(sumBp, | |
| 672 vuint32_16)); | |
| 673 } | |
| 674 vec_st(sumA2, 0, S); | |
| 675 vec_st(sumB2, 16, S); | |
| 676 } | |
| 677 | |
| 678 /* I'm not sure the following is actually faster | |
| 679 than straight, unvectorized C code :-( */ | |
| 680 | |
| 681 int __attribute__((aligned(16))) tQP2[4]; | |
| 682 tQP2[0]= c->QP/2 + 1; | |
| 683 vector signed int vQP2 = vec_ld(0, tQP2); | |
| 684 vQP2 = vec_splat(vQP2, 0); | |
| 685 const vector unsigned char vuint8_2 = vec_splat_u8(2); | |
| 686 const vector signed int vsint32_8 = vec_splat_s32(8); | |
| 687 const vector unsigned int vuint32_4 = vec_splat_u32(4); | |
| 688 | |
| 689 const vector unsigned char permA1 = (vector unsigned char) | |
| 690 AVV(0x00, 0x01, 0x02, 0x10, 0x11, 0x12, 0x1F, 0x1F, | |
| 691 0x1F, 0x1F, 0x1F, 0x1F, 0x1F, 0x1F, 0x1F, 0x1F); | |
| 692 const vector unsigned char permA2 = (vector unsigned char) | |
| 693 AVV(0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x10, 0x11, | |
| 694 0x12, 0x1F, 0x1F, 0x1F, 0x1F, 0x1F, 0x1F, 0x1F); | |
| 695 const vector unsigned char permA1inc = (vector unsigned char) | |
| 696 AVV(0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x00, 0x00, | |
| 697 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00); | |
| 698 const vector unsigned char permA2inc = (vector unsigned char) | |
| 699 AVV(0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x01, | |
| 700 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00); | |
| 701 const vector unsigned char magic = (vector unsigned char) | |
| 702 AVV(0x01, 0x02, 0x01, 0x02, 0x04, 0x02, 0x01, 0x02, | |
| 703 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00); | |
| 704 const vector unsigned char extractPerm = (vector unsigned char) | |
| 705 AVV(0x10, 0x10, 0x10, 0x01, 0x10, 0x10, 0x10, 0x01, | |
| 706 0x10, 0x10, 0x10, 0x01, 0x10, 0x10, 0x10, 0x01); | |
| 707 const vector unsigned char extractPermInc = (vector unsigned char) | |
| 708 AVV(0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x01, | |
| 709 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x01); | |
| 710 const vector unsigned char identity = vec_lvsl(0,(unsigned char *)0); | |
| 711 const vector unsigned char tenRight = (vector unsigned char) | |
| 712 AVV(0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, | |
| 713 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00); | |
| 714 const vector unsigned char eightLeft = (vector unsigned char) | |
| 715 AVV(0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, | |
| 716 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x08); | |
| 717 | |
| 718 | |
| 719 #define F_INIT(i) \ | |
| 720 vector unsigned char tenRightM##i = tenRight; \ | |
| 721 vector unsigned char permA1M##i = permA1; \ | |
| 722 vector unsigned char permA2M##i = permA2; \ | |
| 723 vector unsigned char extractPermM##i = extractPerm | |
| 724 | |
| 725 #define F2(i, j, k, l) \ | |
| 726 if (S[i] & (1 << (l+1))) { \ | |
| 727 const vector unsigned char a_##j##_A##l = \ | |
| 728 vec_perm(src##i, src##j, permA1M##i); \ | |
| 729 const vector unsigned char a_##j##_B##l = \ | |
| 730 vec_perm(a_##j##_A##l, src##k, permA2M##i); \ | |
| 731 const vector signed int a_##j##_sump##l = \ | |
| 732 (vector signed int)vec_msum(a_##j##_B##l, magic, \ | |
| 733 (vector unsigned int)zero); \ | |
| 734 vector signed int F_##j##_##l = \ | |
| 735 vec_sr(vec_sums(a_##j##_sump##l, vsint32_8), vuint32_4); \ | |
| 736 F_##j##_##l = vec_splat(F_##j##_##l, 3); \ | |
| 737 const vector signed int p_##j##_##l = \ | |
| 738 (vector signed int)vec_perm(src##j, \ | |
| 739 (vector unsigned char)zero, \ | |
| 740 extractPermM##i); \ | |
| 741 const vector signed int sum_##j##_##l = vec_add( p_##j##_##l, vQP2);\ | |
| 742 const vector signed int diff_##j##_##l = vec_sub( p_##j##_##l, vQP2);\ | |
| 743 vector signed int newpm_##j##_##l; \ | |
| 744 if (vec_all_lt(sum_##j##_##l, F_##j##_##l)) \ | |
| 745 newpm_##j##_##l = sum_##j##_##l; \ | |
| 746 else if (vec_all_gt(diff_##j##_##l, F_##j##_##l)) \ | |
| 747 newpm_##j##_##l = diff_##j##_##l; \ | |
| 748 else newpm_##j##_##l = F_##j##_##l; \ | |
| 749 const vector unsigned char newpm2_##j##_##l = \ | |
| 750 vec_splat((vector unsigned char)newpm_##j##_##l, 15); \ | |
| 751 const vector unsigned char mask##j##l = vec_add(identity, \ | |
| 752 tenRightM##i); \ | |
| 753 src##j = vec_perm(src##j, newpm2_##j##_##l, mask##j##l); \ | |
| 754 } \ | |
| 755 permA1M##i = vec_add(permA1M##i, permA1inc); \ | |
| 756 permA2M##i = vec_add(permA2M##i, permA2inc); \ | |
| 757 tenRightM##i = vec_sro(tenRightM##i, eightLeft); \ | |
| 758 extractPermM##i = vec_add(extractPermM##i, extractPermInc) | |
| 759 | |
| 760 #define ITER(i, j, k) \ | |
| 761 F_INIT(i); \ | |
| 762 F2(i, j, k, 0); \ | |
| 763 F2(i, j, k, 1); \ | |
| 764 F2(i, j, k, 2); \ | |
| 765 F2(i, j, k, 3); \ | |
| 766 F2(i, j, k, 4); \ | |
| 767 F2(i, j, k, 5); \ | |
| 768 F2(i, j, k, 6); \ | |
| 769 F2(i, j, k, 7) | |
| 770 | |
| 771 ITER(0, 1, 2); | |
| 772 ITER(1, 2, 3); | |
| 773 ITER(2, 3, 4); | |
| 774 ITER(3, 4, 5); | |
| 775 ITER(4, 5, 6); | |
| 776 ITER(5, 6, 7); | |
| 777 ITER(6, 7, 8); | |
| 778 ITER(7, 8, 9); | |
| 779 | |
| 780 const vector signed char neg1 = vec_splat_s8(-1); | |
| 781 | |
| 782 #define STORE_LINE(i) \ | |
| 783 const vector unsigned char permST##i = \ | |
| 784 vec_lvsr(i * stride, srcCopy); \ | |
| 785 const vector unsigned char maskST##i = \ | |
| 786 vec_perm((vector unsigned char)zero, \ | |
| 787 (vector unsigned char)neg1, permST##i); \ | |
| 788 src##i = vec_perm(src##i ,src##i, permST##i); \ | |
| 789 sA##i= vec_sel(sA##i, src##i, maskST##i); \ | |
| 790 sB##i= vec_sel(src##i, sB##i, maskST##i); \ | |
| 791 vec_st(sA##i, i * stride, srcCopy); \ | |
| 792 vec_st(sB##i, i * stride + 16, srcCopy) | |
| 793 | |
| 794 STORE_LINE(1); | |
| 795 STORE_LINE(2); | |
| 796 STORE_LINE(3); | |
| 797 STORE_LINE(4); | |
| 798 STORE_LINE(5); | |
| 799 STORE_LINE(6); | |
| 800 STORE_LINE(7); | |
| 801 STORE_LINE(8); | |
| 802 | |
| 803 #undef STORE_LINE | |
| 804 #undef ITER | |
| 805 #undef F2 | |
| 806 } | |
| 807 | |
| 808 #define doHorizLowPass_altivec(a...) doHorizLowPass_C(a) | |
| 809 #define doHorizDefFilter_altivec(a...) doHorizDefFilter_C(a) | |
| 810 #define do_a_deblock_altivec(a...) do_a_deblock_C(a) | |
| 811 | |
| 812 static inline void RENAME(tempNoiseReducer)(uint8_t *src, int stride, | |
| 813 uint8_t *tempBlured, uint32_t *tempBluredPast, int *maxNoise) | |
| 814 { | |
| 815 const vector signed int zero = vec_splat_s32(0); | |
| 816 const vector signed short vsint16_1 = vec_splat_s16(1); | |
| 817 vector signed int v_dp = zero; | |
| 818 vector signed int v_sysdp = zero; | |
| 819 int d, sysd, i; | |
| 820 | |
| 821 tempBluredPast[127]= maxNoise[0]; | |
| 822 tempBluredPast[128]= maxNoise[1]; | |
| 823 tempBluredPast[129]= maxNoise[2]; | |
| 824 | |
| 825 #define LOAD_LINE(src, i) \ | |
| 826 register int j##src##i = i * stride; \ | |
| 827 vector unsigned char perm##src##i = vec_lvsl(j##src##i, src); \ | |
| 828 const vector unsigned char v_##src##A1##i = vec_ld(j##src##i, src); \ | |
| 829 const vector unsigned char v_##src##A2##i = vec_ld(j##src##i + 16, src); \ | |
| 830 const vector unsigned char v_##src##A##i = \ | |
| 831 vec_perm(v_##src##A1##i, v_##src##A2##i, perm##src##i); \ | |
| 832 vector signed short v_##src##Ass##i = \ | |
| 833 (vector signed short)vec_mergeh((vector signed char)zero, \ | |
| 834 (vector signed char)v_##src##A##i) | |
| 835 | |
| 836 LOAD_LINE(src, 0); | |
| 837 LOAD_LINE(src, 1); | |
| 838 LOAD_LINE(src, 2); | |
| 839 LOAD_LINE(src, 3); | |
| 840 LOAD_LINE(src, 4); | |
| 841 LOAD_LINE(src, 5); | |
| 842 LOAD_LINE(src, 6); | |
| 843 LOAD_LINE(src, 7); | |
| 844 | |
| 845 LOAD_LINE(tempBlured, 0); | |
| 846 LOAD_LINE(tempBlured, 1); | |
| 847 LOAD_LINE(tempBlured, 2); | |
| 848 LOAD_LINE(tempBlured, 3); | |
| 849 LOAD_LINE(tempBlured, 4); | |
| 850 LOAD_LINE(tempBlured, 5); | |
| 851 LOAD_LINE(tempBlured, 6); | |
| 852 LOAD_LINE(tempBlured, 7); | |
| 853 #undef LOAD_LINE | |
| 854 | |
| 855 #define ACCUMULATE_DIFFS(i) \ | |
| 856 vector signed short v_d##i = vec_sub(v_tempBluredAss##i, \ | |
| 857 v_srcAss##i); \ | |
| 858 v_dp = vec_msums(v_d##i, v_d##i, v_dp); \ | |
| 859 v_sysdp = vec_msums(v_d##i, vsint16_1, v_sysdp) | |
| 860 | |
| 861 ACCUMULATE_DIFFS(0); | |
| 862 ACCUMULATE_DIFFS(1); | |
| 863 ACCUMULATE_DIFFS(2); | |
| 864 ACCUMULATE_DIFFS(3); | |
| 865 ACCUMULATE_DIFFS(4); | |
| 866 ACCUMULATE_DIFFS(5); | |
| 867 ACCUMULATE_DIFFS(6); | |
| 868 ACCUMULATE_DIFFS(7); | |
| 869 #undef ACCUMULATE_DIFFS | |
| 870 | |
| 871 v_dp = vec_sums(v_dp, zero); | |
| 872 v_sysdp = vec_sums(v_sysdp, zero); | |
| 873 | |
| 874 v_dp = vec_splat(v_dp, 3); | |
| 875 v_sysdp = vec_splat(v_sysdp, 3); | |
| 876 | |
| 877 vec_ste(v_dp, 0, &d); | |
| 878 vec_ste(v_sysdp, 0, &sysd); | |
| 879 | |
| 880 i = d; | |
| 881 d = (4*d | |
| 882 +(*(tempBluredPast-256)) | |
| 883 +(*(tempBluredPast-1))+ (*(tempBluredPast+1)) | |
| 884 +(*(tempBluredPast+256)) | |
| 885 +4)>>3; | |
| 886 | |
| 887 *tempBluredPast=i; | |
| 888 | |
| 889 if (d > maxNoise[1]) { | |
| 890 if (d < maxNoise[2]) { | |
| 891 #define OP(i) v_tempBluredAss##i = vec_avg(v_tempBluredAss##i, v_srcAss##i); | |
| 892 | |
| 893 OP(0); | |
| 894 OP(1); | |
| 895 OP(2); | |
| 896 OP(3); | |
| 897 OP(4); | |
| 898 OP(5); | |
| 899 OP(6); | |
| 900 OP(7); | |
| 901 #undef OP | |
| 902 } else { | |
| 903 #define OP(i) v_tempBluredAss##i = v_srcAss##i; | |
| 904 | |
| 905 OP(0); | |
| 906 OP(1); | |
| 907 OP(2); | |
| 908 OP(3); | |
| 909 OP(4); | |
| 910 OP(5); | |
| 911 OP(6); | |
| 912 OP(7); | |
| 913 #undef OP | |
| 914 } | |
| 915 } else { | |
| 916 if (d < maxNoise[0]) { | |
| 917 const vector signed short vsint16_7 = vec_splat_s16(7); | |
| 918 const vector signed short vsint16_4 = vec_splat_s16(4); | |
| 919 const vector unsigned short vuint16_3 = vec_splat_u16(3); | |
| 920 | |
| 921 #define OP(i) \ | |
| 922 const vector signed short v_temp##i = \ | |
| 923 vec_mladd(v_tempBluredAss##i, \ | |
| 924 vsint16_7, v_srcAss##i); \ | |
| 925 const vector signed short v_temp2##i = \ | |
| 926 vec_add(v_temp##i, vsint16_4); \ | |
| 927 v_tempBluredAss##i = vec_sr(v_temp2##i, vuint16_3) | |
| 928 | |
| 929 OP(0); | |
| 930 OP(1); | |
| 931 OP(2); | |
| 932 OP(3); | |
| 933 OP(4); | |
| 934 OP(5); | |
| 935 OP(6); | |
| 936 OP(7); | |
| 937 #undef OP | |
| 938 } else { | |
| 939 const vector signed short vsint16_3 = vec_splat_s16(3); | |
| 940 const vector signed short vsint16_2 = vec_splat_s16(2); | |
| 941 | |
| 942 #define OP(i) \ | |
| 943 const vector signed short v_temp##i = \ | |
| 944 vec_mladd(v_tempBluredAss##i, \ | |
| 945 vsint16_3, v_srcAss##i); \ | |
| 946 const vector signed short v_temp2##i = \ | |
| 947 vec_add(v_temp##i, vsint16_2); \ | |
| 948 v_tempBluredAss##i = vec_sr(v_temp2##i, (vector unsigned short)vsint16_2) | |
| 949 | |
| 950 OP(0); | |
| 951 OP(1); | |
| 952 OP(2); | |
| 953 OP(3); | |
| 954 OP(4); | |
| 955 OP(5); | |
| 956 OP(6); | |
| 957 OP(7); | |
| 958 #undef OP | |
| 959 } | |
| 960 } | |
| 961 | |
| 962 const vector signed char neg1 = vec_splat_s8(-1); | |
| 963 const vector unsigned char permHH = (const vector unsigned char)AVV(0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, | |
| 964 0x18, 0x19, 0x1A, 0x1B, 0x1C, 0x1D, 0x1E, 0x1F); | |
| 965 | |
| 966 #define PACK_AND_STORE(src, i) \ | |
| 967 const vector unsigned char perms##src##i = \ | |
| 968 vec_lvsr(i * stride, src); \ | |
| 969 const vector unsigned char vf##src##i = \ | |
| 970 vec_packsu(v_tempBluredAss##i, (vector signed short)zero); \ | |
| 971 const vector unsigned char vg##src##i = \ | |
| 972 vec_perm(vf##src##i, v_##src##A##i, permHH); \ | |
| 973 const vector unsigned char mask##src##i = \ | |
| 974 vec_perm((vector unsigned char)zero, (vector unsigned char)neg1, perms##src##i); \ | |
| 975 const vector unsigned char vg2##src##i = \ | |
| 976 vec_perm(vg##src##i, vg##src##i, perms##src##i); \ | |
| 977 const vector unsigned char svA##src##i = \ | |
| 978 vec_sel(v_##src##A1##i, vg2##src##i, mask##src##i); \ | |
| 979 const vector unsigned char svB##src##i = \ | |
| 980 vec_sel(vg2##src##i, v_##src##A2##i, mask##src##i); \ | |
| 981 vec_st(svA##src##i, i * stride, src); \ | |
| 982 vec_st(svB##src##i, i * stride + 16, src) | |
| 983 | |
| 984 PACK_AND_STORE(src, 0); | |
| 985 PACK_AND_STORE(src, 1); | |
| 986 PACK_AND_STORE(src, 2); | |
| 987 PACK_AND_STORE(src, 3); | |
| 988 PACK_AND_STORE(src, 4); | |
| 989 PACK_AND_STORE(src, 5); | |
| 990 PACK_AND_STORE(src, 6); | |
| 991 PACK_AND_STORE(src, 7); | |
| 992 PACK_AND_STORE(tempBlured, 0); | |
| 993 PACK_AND_STORE(tempBlured, 1); | |
| 994 PACK_AND_STORE(tempBlured, 2); | |
| 995 PACK_AND_STORE(tempBlured, 3); | |
| 996 PACK_AND_STORE(tempBlured, 4); | |
| 997 PACK_AND_STORE(tempBlured, 5); | |
| 998 PACK_AND_STORE(tempBlured, 6); | |
| 999 PACK_AND_STORE(tempBlured, 7); | |
| 1000 #undef PACK_AND_STORE | |
| 1001 } | |
| 1002 | |
| 1003 static inline void transpose_16x8_char_toPackedAlign_altivec(unsigned char* dst, unsigned char* src, int stride) { | |
| 1004 const vector unsigned char zero = vec_splat_u8(0); | |
| 1005 | |
| 1006 #define LOAD_DOUBLE_LINE(i, j) \ | |
| 1007 vector unsigned char perm1##i = vec_lvsl(i * stride, src); \ | |
| 1008 vector unsigned char perm2##i = vec_lvsl(j * stride, src); \ | |
| 1009 vector unsigned char srcA##i = vec_ld(i * stride, src); \ | |
| 1010 vector unsigned char srcB##i = vec_ld(i * stride + 16, src); \ | |
| 1011 vector unsigned char srcC##i = vec_ld(j * stride, src); \ | |
| 1012 vector unsigned char srcD##i = vec_ld(j * stride+ 16, src); \ | |
| 1013 vector unsigned char src##i = vec_perm(srcA##i, srcB##i, perm1##i); \ | |
| 1014 vector unsigned char src##j = vec_perm(srcC##i, srcD##i, perm2##i) | |
| 1015 | |
| 1016 LOAD_DOUBLE_LINE(0, 1); | |
| 1017 LOAD_DOUBLE_LINE(2, 3); | |
| 1018 LOAD_DOUBLE_LINE(4, 5); | |
| 1019 LOAD_DOUBLE_LINE(6, 7); | |
| 1020 #undef LOAD_DOUBLE_LINE | |
| 1021 | |
| 1022 vector unsigned char tempA = vec_mergeh(src0, zero); | |
| 1023 vector unsigned char tempB = vec_mergel(src0, zero); | |
| 1024 vector unsigned char tempC = vec_mergeh(src1, zero); | |
| 1025 vector unsigned char tempD = vec_mergel(src1, zero); | |
| 1026 vector unsigned char tempE = vec_mergeh(src2, zero); | |
| 1027 vector unsigned char tempF = vec_mergel(src2, zero); | |
| 1028 vector unsigned char tempG = vec_mergeh(src3, zero); | |
| 1029 vector unsigned char tempH = vec_mergel(src3, zero); | |
| 1030 vector unsigned char tempI = vec_mergeh(src4, zero); | |
| 1031 vector unsigned char tempJ = vec_mergel(src4, zero); | |
| 1032 vector unsigned char tempK = vec_mergeh(src5, zero); | |
| 1033 vector unsigned char tempL = vec_mergel(src5, zero); | |
| 1034 vector unsigned char tempM = vec_mergeh(src6, zero); | |
| 1035 vector unsigned char tempN = vec_mergel(src6, zero); | |
| 1036 vector unsigned char tempO = vec_mergeh(src7, zero); | |
| 1037 vector unsigned char tempP = vec_mergel(src7, zero); | |
| 1038 | |
| 1039 vector unsigned char temp0 = vec_mergeh(tempA, tempI); | |
| 1040 vector unsigned char temp1 = vec_mergel(tempA, tempI); | |
| 1041 vector unsigned char temp2 = vec_mergeh(tempB, tempJ); | |
| 1042 vector unsigned char temp3 = vec_mergel(tempB, tempJ); | |
| 1043 vector unsigned char temp4 = vec_mergeh(tempC, tempK); | |
| 1044 vector unsigned char temp5 = vec_mergel(tempC, tempK); | |
| 1045 vector unsigned char temp6 = vec_mergeh(tempD, tempL); | |
| 1046 vector unsigned char temp7 = vec_mergel(tempD, tempL); | |
| 1047 vector unsigned char temp8 = vec_mergeh(tempE, tempM); | |
| 1048 vector unsigned char temp9 = vec_mergel(tempE, tempM); | |
| 1049 vector unsigned char temp10 = vec_mergeh(tempF, tempN); | |
| 1050 vector unsigned char temp11 = vec_mergel(tempF, tempN); | |
| 1051 vector unsigned char temp12 = vec_mergeh(tempG, tempO); | |
| 1052 vector unsigned char temp13 = vec_mergel(tempG, tempO); | |
| 1053 vector unsigned char temp14 = vec_mergeh(tempH, tempP); | |
| 1054 vector unsigned char temp15 = vec_mergel(tempH, tempP); | |
| 1055 | |
| 1056 tempA = vec_mergeh(temp0, temp8); | |
| 1057 tempB = vec_mergel(temp0, temp8); | |
| 1058 tempC = vec_mergeh(temp1, temp9); | |
| 1059 tempD = vec_mergel(temp1, temp9); | |
| 1060 tempE = vec_mergeh(temp2, temp10); | |
| 1061 tempF = vec_mergel(temp2, temp10); | |
| 1062 tempG = vec_mergeh(temp3, temp11); | |
| 1063 tempH = vec_mergel(temp3, temp11); | |
| 1064 tempI = vec_mergeh(temp4, temp12); | |
| 1065 tempJ = vec_mergel(temp4, temp12); | |
| 1066 tempK = vec_mergeh(temp5, temp13); | |
| 1067 tempL = vec_mergel(temp5, temp13); | |
| 1068 tempM = vec_mergeh(temp6, temp14); | |
| 1069 tempN = vec_mergel(temp6, temp14); | |
| 1070 tempO = vec_mergeh(temp7, temp15); | |
| 1071 tempP = vec_mergel(temp7, temp15); | |
| 1072 | |
| 1073 temp0 = vec_mergeh(tempA, tempI); | |
| 1074 temp1 = vec_mergel(tempA, tempI); | |
| 1075 temp2 = vec_mergeh(tempB, tempJ); | |
| 1076 temp3 = vec_mergel(tempB, tempJ); | |
| 1077 temp4 = vec_mergeh(tempC, tempK); | |
| 1078 temp5 = vec_mergel(tempC, tempK); | |
| 1079 temp6 = vec_mergeh(tempD, tempL); | |
| 1080 temp7 = vec_mergel(tempD, tempL); | |
| 1081 temp8 = vec_mergeh(tempE, tempM); | |
| 1082 temp9 = vec_mergel(tempE, tempM); | |
| 1083 temp10 = vec_mergeh(tempF, tempN); | |
| 1084 temp11 = vec_mergel(tempF, tempN); | |
| 1085 temp12 = vec_mergeh(tempG, tempO); | |
| 1086 temp13 = vec_mergel(tempG, tempO); | |
| 1087 temp14 = vec_mergeh(tempH, tempP); | |
| 1088 temp15 = vec_mergel(tempH, tempP); | |
| 1089 | |
| 1090 vec_st(temp0, 0, dst); | |
| 1091 vec_st(temp1, 16, dst); | |
| 1092 vec_st(temp2, 32, dst); | |
| 1093 vec_st(temp3, 48, dst); | |
| 1094 vec_st(temp4, 64, dst); | |
| 1095 vec_st(temp5, 80, dst); | |
| 1096 vec_st(temp6, 96, dst); | |
| 1097 vec_st(temp7, 112, dst); | |
| 1098 vec_st(temp8, 128, dst); | |
| 1099 vec_st(temp9, 144, dst); | |
| 1100 vec_st(temp10, 160, dst); | |
| 1101 vec_st(temp11, 176, dst); | |
| 1102 vec_st(temp12, 192, dst); | |
| 1103 vec_st(temp13, 208, dst); | |
| 1104 vec_st(temp14, 224, dst); | |
| 1105 vec_st(temp15, 240, dst); | |
| 1106 } | |
| 1107 | |
| 1108 static inline void transpose_8x16_char_fromPackedAlign_altivec(unsigned char* dst, unsigned char* src, int stride) { | |
| 1109 const vector unsigned char zero = vec_splat_u8(0); | |
| 1110 const vector unsigned char magic_perm = (const vector unsigned char) | |
| 1111 AVV(0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, | |
| 1112 0x18, 0x19, 0x1A, 0x1B, 0x1C, 0x1D, 0x1E, 0x1F); | |
| 1113 | |
| 1114 #define LOAD_DOUBLE_LINE(i, j) \ | |
| 1115 vector unsigned char src##i = vec_ld(i * 16, src); \ | |
| 1116 vector unsigned char src##j = vec_ld(j * 16, src) | |
| 1117 | |
| 1118 LOAD_DOUBLE_LINE(0, 1); | |
| 1119 LOAD_DOUBLE_LINE(2, 3); | |
| 1120 LOAD_DOUBLE_LINE(4, 5); | |
| 1121 LOAD_DOUBLE_LINE(6, 7); | |
| 1122 LOAD_DOUBLE_LINE(8, 9); | |
| 1123 LOAD_DOUBLE_LINE(10, 11); | |
| 1124 LOAD_DOUBLE_LINE(12, 13); | |
| 1125 LOAD_DOUBLE_LINE(14, 15); | |
| 1126 #undef LOAD_DOUBLE_LINE | |
| 1127 | |
| 1128 vector unsigned char tempA = vec_mergeh(src0, src8); | |
| 1129 vector unsigned char tempB; | |
| 1130 vector unsigned char tempC = vec_mergeh(src1, src9); | |
| 1131 vector unsigned char tempD; | |
| 1132 vector unsigned char tempE = vec_mergeh(src2, src10); | |
| 1133 vector unsigned char tempG = vec_mergeh(src3, src11); | |
| 1134 vector unsigned char tempI = vec_mergeh(src4, src12); | |
| 1135 vector unsigned char tempJ; | |
| 1136 vector unsigned char tempK = vec_mergeh(src5, src13); | |
| 1137 vector unsigned char tempL; | |
| 1138 vector unsigned char tempM = vec_mergeh(src6, src14); | |
| 1139 vector unsigned char tempO = vec_mergeh(src7, src15); | |
| 1140 | |
| 1141 vector unsigned char temp0 = vec_mergeh(tempA, tempI); | |
| 1142 vector unsigned char temp1 = vec_mergel(tempA, tempI); | |
| 1143 vector unsigned char temp2; | |
| 1144 vector unsigned char temp3; | |
| 1145 vector unsigned char temp4 = vec_mergeh(tempC, tempK); | |
| 1146 vector unsigned char temp5 = vec_mergel(tempC, tempK); | |
| 1147 vector unsigned char temp6; | |
| 1148 vector unsigned char temp7; | |
| 1149 vector unsigned char temp8 = vec_mergeh(tempE, tempM); | |
| 1150 vector unsigned char temp9 = vec_mergel(tempE, tempM); | |
| 1151 vector unsigned char temp12 = vec_mergeh(tempG, tempO); | |
| 1152 vector unsigned char temp13 = vec_mergel(tempG, tempO); | |
| 1153 | |
| 1154 tempA = vec_mergeh(temp0, temp8); | |
| 1155 tempB = vec_mergel(temp0, temp8); | |
| 1156 tempC = vec_mergeh(temp1, temp9); | |
| 1157 tempD = vec_mergel(temp1, temp9); | |
| 1158 tempI = vec_mergeh(temp4, temp12); | |
| 1159 tempJ = vec_mergel(temp4, temp12); | |
| 1160 tempK = vec_mergeh(temp5, temp13); | |
| 1161 tempL = vec_mergel(temp5, temp13); | |
| 1162 | |
| 1163 temp0 = vec_mergeh(tempA, tempI); | |
| 1164 temp1 = vec_mergel(tempA, tempI); | |
| 1165 temp2 = vec_mergeh(tempB, tempJ); | |
| 1166 temp3 = vec_mergel(tempB, tempJ); | |
| 1167 temp4 = vec_mergeh(tempC, tempK); | |
| 1168 temp5 = vec_mergel(tempC, tempK); | |
| 1169 temp6 = vec_mergeh(tempD, tempL); | |
| 1170 temp7 = vec_mergel(tempD, tempL); | |
| 1171 | |
| 1172 | |
| 1173 const vector signed char neg1 = vec_splat_s8(-1); | |
| 1174 #define STORE_DOUBLE_LINE(i, j) \ | |
| 1175 vector unsigned char dstA##i = vec_ld(i * stride, dst); \ | |
| 1176 vector unsigned char dstB##i = vec_ld(i * stride + 16, dst); \ | |
| 1177 vector unsigned char dstA##j = vec_ld(j * stride, dst); \ | |
| 1178 vector unsigned char dstB##j = vec_ld(j * stride+ 16, dst); \ | |
| 1179 vector unsigned char align##i = vec_lvsr(i * stride, dst); \ | |
| 1180 vector unsigned char align##j = vec_lvsr(j * stride, dst); \ | |
| 1181 vector unsigned char mask##i = vec_perm(zero, (vector unsigned char)neg1, align##i); \ | |
| 1182 vector unsigned char mask##j = vec_perm(zero, (vector unsigned char)neg1, align##j); \ | |
| 1183 vector unsigned char dstR##i = vec_perm(temp##i, temp##i, align##i); \ | |
| 1184 vector unsigned char dstR##j = vec_perm(temp##j, temp##j, align##j); \ | |
| 1185 vector unsigned char dstAF##i = vec_sel(dstA##i, dstR##i, mask##i); \ | |
| 1186 vector unsigned char dstBF##i = vec_sel(dstR##i, dstB##i, mask##i); \ | |
| 1187 vector unsigned char dstAF##j = vec_sel(dstA##j, dstR##j, mask##j); \ | |
| 1188 vector unsigned char dstBF##j = vec_sel(dstR##j, dstB##j, mask##j); \ | |
| 1189 vec_st(dstAF##i, i * stride, dst); \ | |
| 1190 vec_st(dstBF##i, i * stride + 16, dst); \ | |
| 1191 vec_st(dstAF##j, j * stride, dst); \ | |
| 1192 vec_st(dstBF##j, j * stride + 16, dst) | |
| 1193 | |
| 1194 STORE_DOUBLE_LINE(0,1); | |
| 1195 STORE_DOUBLE_LINE(2,3); | |
| 1196 STORE_DOUBLE_LINE(4,5); | |
| 1197 STORE_DOUBLE_LINE(6,7); | |
| 1198 } |
