GNU Radio Manual and C++ API Reference  3.7.4.1
The Free & Open Software Radio Ecosystem
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Properties Friends Macros Groups Pages
volk_32fc_x2_dot_prod_32fc.h
Go to the documentation of this file.
1 #ifndef INCLUDED_volk_32fc_x2_dot_prod_32fc_u_H
2 #define INCLUDED_volk_32fc_x2_dot_prod_32fc_u_H
3 
4 #include <volk/volk_common.h>
5 #include <volk/volk_complex.h>
6 #include <stdio.h>
7 #include <string.h>
8 
9 
10 #ifdef LV_HAVE_GENERIC
11 
12 
13 static inline void volk_32fc_x2_dot_prod_32fc_generic(lv_32fc_t* result, const lv_32fc_t* input, const lv_32fc_t* taps, unsigned int num_points) {
14 
15  float * res = (float*) result;
16  float * in = (float*) input;
17  float * tp = (float*) taps;
18  unsigned int n_2_ccomplex_blocks = num_points/2;
19  unsigned int isodd = num_points & 1;
20 
21  float sum0[2] = {0,0};
22  float sum1[2] = {0,0};
23  unsigned int i = 0;
24 
25  for(i = 0; i < n_2_ccomplex_blocks; ++i) {
26  sum0[0] += in[0] * tp[0] - in[1] * tp[1];
27  sum0[1] += in[0] * tp[1] + in[1] * tp[0];
28  sum1[0] += in[2] * tp[2] - in[3] * tp[3];
29  sum1[1] += in[2] * tp[3] + in[3] * tp[2];
30 
31  in += 4;
32  tp += 4;
33  }
34 
35  res[0] = sum0[0] + sum1[0];
36  res[1] = sum0[1] + sum1[1];
37 
38  // Cleanup if we had an odd number of points
39  for(i = 0; i < isodd; ++i) {
40  *result += input[num_points - 1] * taps[num_points - 1];
41  }
42 }
43 
44 #endif /*LV_HAVE_GENERIC*/
45 
46 
47 
48 #if LV_HAVE_SSE && LV_HAVE_64
49 
50 static inline void volk_32fc_x2_dot_prod_32fc_u_sse_64(lv_32fc_t* result, const lv_32fc_t* input, const lv_32fc_t* taps, unsigned int num_points) {
51 
52  const unsigned int num_bytes = num_points*8;
53  unsigned int isodd = num_points & 1;
54 
55  asm
56  (
57  "# ccomplex_dotprod_generic (float* result, const float *input,\n\t"
58  "# const float *taps, unsigned num_bytes)\n\t"
59  "# float sum0 = 0;\n\t"
60  "# float sum1 = 0;\n\t"
61  "# float sum2 = 0;\n\t"
62  "# float sum3 = 0;\n\t"
63  "# do {\n\t"
64  "# sum0 += input[0] * taps[0] - input[1] * taps[1];\n\t"
65  "# sum1 += input[0] * taps[1] + input[1] * taps[0];\n\t"
66  "# sum2 += input[2] * taps[2] - input[3] * taps[3];\n\t"
67  "# sum3 += input[2] * taps[3] + input[3] * taps[2];\n\t"
68  "# input += 4;\n\t"
69  "# taps += 4; \n\t"
70  "# } while (--n_2_ccomplex_blocks != 0);\n\t"
71  "# result[0] = sum0 + sum2;\n\t"
72  "# result[1] = sum1 + sum3;\n\t"
73  "# TODO: prefetch and better scheduling\n\t"
74  " xor %%r9, %%r9\n\t"
75  " xor %%r10, %%r10\n\t"
76  " movq %%rcx, %%rax\n\t"
77  " movq %%rcx, %%r8\n\t"
78  " movq %[rsi], %%r9\n\t"
79  " movq %[rdx], %%r10\n\t"
80  " xorps %%xmm6, %%xmm6 # zero accumulators\n\t"
81  " movups 0(%%r9), %%xmm0\n\t"
82  " xorps %%xmm7, %%xmm7 # zero accumulators\n\t"
83  " movups 0(%%r10), %%xmm2\n\t"
84  " shr $5, %%rax # rax = n_2_ccomplex_blocks / 2\n\t"
85  " shr $4, %%r8\n\t"
86  " jmp .%=L1_test\n\t"
87  " # 4 taps / loop\n\t"
88  " # something like ?? cycles / loop\n\t"
89  ".%=Loop1: \n\t"
90  "# complex prod: C += A * B, w/ temp Z & Y (or B), xmmPN=$0x8000000080000000\n\t"
91  "# movups (%%r9), %%xmmA\n\t"
92  "# movups (%%r10), %%xmmB\n\t"
93  "# movups %%xmmA, %%xmmZ\n\t"
94  "# shufps $0xb1, %%xmmZ, %%xmmZ # swap internals\n\t"
95  "# mulps %%xmmB, %%xmmA\n\t"
96  "# mulps %%xmmZ, %%xmmB\n\t"
97  "# # SSE replacement for: pfpnacc %%xmmB, %%xmmA\n\t"
98  "# xorps %%xmmPN, %%xmmA\n\t"
99  "# movups %%xmmA, %%xmmZ\n\t"
100  "# unpcklps %%xmmB, %%xmmA\n\t"
101  "# unpckhps %%xmmB, %%xmmZ\n\t"
102  "# movups %%xmmZ, %%xmmY\n\t"
103  "# shufps $0x44, %%xmmA, %%xmmZ # b01000100\n\t"
104  "# shufps $0xee, %%xmmY, %%xmmA # b11101110\n\t"
105  "# addps %%xmmZ, %%xmmA\n\t"
106  "# addps %%xmmA, %%xmmC\n\t"
107  "# A=xmm0, B=xmm2, Z=xmm4\n\t"
108  "# A'=xmm1, B'=xmm3, Z'=xmm5\n\t"
109  " movups 16(%%r9), %%xmm1\n\t"
110  " movups %%xmm0, %%xmm4\n\t"
111  " mulps %%xmm2, %%xmm0\n\t"
112  " shufps $0xb1, %%xmm4, %%xmm4 # swap internals\n\t"
113  " movups 16(%%r10), %%xmm3\n\t"
114  " movups %%xmm1, %%xmm5\n\t"
115  " addps %%xmm0, %%xmm6\n\t"
116  " mulps %%xmm3, %%xmm1\n\t"
117  " shufps $0xb1, %%xmm5, %%xmm5 # swap internals\n\t"
118  " addps %%xmm1, %%xmm6\n\t"
119  " mulps %%xmm4, %%xmm2\n\t"
120  " movups 32(%%r9), %%xmm0\n\t"
121  " addps %%xmm2, %%xmm7\n\t"
122  " mulps %%xmm5, %%xmm3\n\t"
123  " add $32, %%r9\n\t"
124  " movups 32(%%r10), %%xmm2\n\t"
125  " addps %%xmm3, %%xmm7\n\t"
126  " add $32, %%r10\n\t"
127  ".%=L1_test:\n\t"
128  " dec %%rax\n\t"
129  " jge .%=Loop1\n\t"
130  " # We've handled the bulk of multiplies up to here.\n\t"
131  " # Let's sse if original n_2_ccomplex_blocks was odd.\n\t"
132  " # If so, we've got 2 more taps to do.\n\t"
133  " and $1, %%r8\n\t"
134  " je .%=Leven\n\t"
135  " # The count was odd, do 2 more taps.\n\t"
136  " # Note that we've already got mm0/mm2 preloaded\n\t"
137  " # from the main loop.\n\t"
138  " movups %%xmm0, %%xmm4\n\t"
139  " mulps %%xmm2, %%xmm0\n\t"
140  " shufps $0xb1, %%xmm4, %%xmm4 # swap internals\n\t"
141  " addps %%xmm0, %%xmm6\n\t"
142  " mulps %%xmm4, %%xmm2\n\t"
143  " addps %%xmm2, %%xmm7\n\t"
144  ".%=Leven:\n\t"
145  " # neg inversor\n\t"
146  " xorps %%xmm1, %%xmm1\n\t"
147  " mov $0x80000000, %%r9\n\t"
148  " movd %%r9, %%xmm1\n\t"
149  " shufps $0x11, %%xmm1, %%xmm1 # b00010001 # 0 -0 0 -0\n\t"
150  " # pfpnacc\n\t"
151  " xorps %%xmm1, %%xmm6\n\t"
152  " movups %%xmm6, %%xmm2\n\t"
153  " unpcklps %%xmm7, %%xmm6\n\t"
154  " unpckhps %%xmm7, %%xmm2\n\t"
155  " movups %%xmm2, %%xmm3\n\t"
156  " shufps $0x44, %%xmm6, %%xmm2 # b01000100\n\t"
157  " shufps $0xee, %%xmm3, %%xmm6 # b11101110\n\t"
158  " addps %%xmm2, %%xmm6\n\t"
159  " # xmm6 = r1 i2 r3 i4\n\t"
160  " movhlps %%xmm6, %%xmm4 # xmm4 = r3 i4 ?? ??\n\t"
161  " addps %%xmm4, %%xmm6 # xmm6 = r1+r3 i2+i4 ?? ??\n\t"
162  " movlps %%xmm6, (%[rdi]) # store low 2x32 bits (complex) to memory\n\t"
163  :
164  :[rsi] "r" (input), [rdx] "r" (taps), "c" (num_bytes), [rdi] "r" (result)
165  :"rax", "r8", "r9", "r10"
166  );
167 
168 
169  if(isodd) {
170  *result += input[num_points - 1] * taps[num_points - 1];
171  }
172 
173  return;
174 
175 }
176 
177 #endif /* LV_HAVE_SSE && LV_HAVE_64 */
178 
179 
180 
181 
182 #ifdef LV_HAVE_SSE3
183 
184 #include <pmmintrin.h>
185 
186 static inline void volk_32fc_x2_dot_prod_32fc_u_sse3(lv_32fc_t* result, const lv_32fc_t* input, const lv_32fc_t* taps, unsigned int num_points) {
187 
188  lv_32fc_t dotProduct;
189  memset(&dotProduct, 0x0, 2*sizeof(float));
190 
191  unsigned int number = 0;
192  const unsigned int halfPoints = num_points/2;
193  unsigned int isodd = num_points & 1;
194 
195  __m128 x, y, yl, yh, z, tmp1, tmp2, dotProdVal;
196 
197  const lv_32fc_t* a = input;
198  const lv_32fc_t* b = taps;
199 
200  dotProdVal = _mm_setzero_ps();
201 
202  for(;number < halfPoints; number++){
203 
204  x = _mm_loadu_ps((float*)a); // Load the ar + ai, br + bi as ar,ai,br,bi
205  y = _mm_loadu_ps((float*)b); // Load the cr + ci, dr + di as cr,ci,dr,di
206 
207  yl = _mm_moveldup_ps(y); // Load yl with cr,cr,dr,dr
208  yh = _mm_movehdup_ps(y); // Load yh with ci,ci,di,di
209 
210  tmp1 = _mm_mul_ps(x,yl); // tmp1 = ar*cr,ai*cr,br*dr,bi*dr
211 
212  x = _mm_shuffle_ps(x,x,0xB1); // Re-arrange x to be ai,ar,bi,br
213 
214  tmp2 = _mm_mul_ps(x,yh); // tmp2 = ai*ci,ar*ci,bi*di,br*di
215 
216  z = _mm_addsub_ps(tmp1,tmp2); // ar*cr-ai*ci, ai*cr+ar*ci, br*dr-bi*di, bi*dr+br*di
217 
218  dotProdVal = _mm_add_ps(dotProdVal, z); // Add the complex multiplication results together
219 
220  a += 2;
221  b += 2;
222  }
223 
224  __VOLK_ATTR_ALIGNED(16) lv_32fc_t dotProductVector[2];
225 
226  _mm_storeu_ps((float*)dotProductVector,dotProdVal); // Store the results back into the dot product vector
227 
228  dotProduct += ( dotProductVector[0] + dotProductVector[1] );
229 
230  if(isodd) {
231  dotProduct += input[num_points - 1] * taps[num_points - 1];
232  }
233 
234  *result = dotProduct;
235 }
236 
237 #endif /*LV_HAVE_SSE3*/
238 
239 #ifdef LV_HAVE_SSE4_1
240 
241 #include <smmintrin.h>
242 
243 static inline void volk_32fc_x2_dot_prod_32fc_u_sse4_1(lv_32fc_t* result, const lv_32fc_t* input, const lv_32fc_t* taps, unsigned int num_points) {
244 
245  unsigned int i = 0;
246  const unsigned int qtr_points = num_points/4;
247  const unsigned int isodd = num_points & 3;
248 
249  __m128 xmm0, xmm1, xmm2, xmm3, xmm4, xmm5, xmm6, xmm7, real0, real1, im0, im1;
250  float *p_input, *p_taps;
251  __m64 *p_result;
252 
253  p_result = (__m64*)result;
254  p_input = (float*)input;
255  p_taps = (float*)taps;
256 
257  static const __m128i neg = {0x000000000000000080000000};
258 
259  real0 = _mm_setzero_ps();
260  real1 = _mm_setzero_ps();
261  im0 = _mm_setzero_ps();
262  im1 = _mm_setzero_ps();
263 
264  for(; i < qtr_points; ++i) {
265  xmm0 = _mm_loadu_ps(p_input);
266  xmm1 = _mm_loadu_ps(p_taps);
267 
268  p_input += 4;
269  p_taps += 4;
270 
271  xmm2 = _mm_loadu_ps(p_input);
272  xmm3 = _mm_loadu_ps(p_taps);
273 
274  p_input += 4;
275  p_taps += 4;
276 
277  xmm4 = _mm_unpackhi_ps(xmm0, xmm2);
278  xmm5 = _mm_unpackhi_ps(xmm1, xmm3);
279  xmm0 = _mm_unpacklo_ps(xmm0, xmm2);
280  xmm2 = _mm_unpacklo_ps(xmm1, xmm3);
281 
282  //imaginary vector from input
283  xmm1 = _mm_unpackhi_ps(xmm0, xmm4);
284  //real vector from input
285  xmm3 = _mm_unpacklo_ps(xmm0, xmm4);
286  //imaginary vector from taps
287  xmm0 = _mm_unpackhi_ps(xmm2, xmm5);
288  //real vector from taps
289  xmm2 = _mm_unpacklo_ps(xmm2, xmm5);
290 
291  xmm4 = _mm_dp_ps(xmm3, xmm2, 0xf1);
292  xmm5 = _mm_dp_ps(xmm1, xmm0, 0xf1);
293 
294  xmm6 = _mm_dp_ps(xmm3, xmm0, 0xf2);
295  xmm7 = _mm_dp_ps(xmm1, xmm2, 0xf2);
296 
297  real0 = _mm_add_ps(xmm4, real0);
298  real1 = _mm_add_ps(xmm5, real1);
299  im0 = _mm_add_ps(xmm6, im0);
300  im1 = _mm_add_ps(xmm7, im1);
301  }
302 
303  real1 = _mm_xor_ps(real1, bit128_p(&neg)->float_vec);
304 
305  im0 = _mm_add_ps(im0, im1);
306  real0 = _mm_add_ps(real0, real1);
307 
308  im0 = _mm_add_ps(im0, real0);
309 
310  _mm_storel_pi(p_result, im0);
311 
312  for(i = num_points-isodd; i < num_points; i++) {
313  *result += input[i] * taps[i];
314  }
315 }
316 
317 #endif /*LV_HAVE_SSE4_1*/
318 
319 
320 
321 
322 #endif /*INCLUDED_volk_32fc_x2_dot_prod_32fc_u_H*/
323 #ifndef INCLUDED_volk_32fc_x2_dot_prod_32fc_a_H
324 #define INCLUDED_volk_32fc_x2_dot_prod_32fc_a_H
325 
326 #include <volk/volk_common.h>
327 #include <volk/volk_complex.h>
328 #include <stdio.h>
329 #include <string.h>
330 
331 
332 #ifdef LV_HAVE_GENERIC
333 
334 
335 static inline void volk_32fc_x2_dot_prod_32fc_a_generic(lv_32fc_t* result, const lv_32fc_t* input, const lv_32fc_t* taps, unsigned int num_points) {
336 
337  const unsigned int num_bytes = num_points*8;
338 
339  float * res = (float*) result;
340  float * in = (float*) input;
341  float * tp = (float*) taps;
342  unsigned int n_2_ccomplex_blocks = num_bytes >> 4;
343  unsigned int isodd = num_points & 1;
344 
345  float sum0[2] = {0,0};
346  float sum1[2] = {0,0};
347  unsigned int i = 0;
348 
349  for(i = 0; i < n_2_ccomplex_blocks; ++i) {
350  sum0[0] += in[0] * tp[0] - in[1] * tp[1];
351  sum0[1] += in[0] * tp[1] + in[1] * tp[0];
352  sum1[0] += in[2] * tp[2] - in[3] * tp[3];
353  sum1[1] += in[2] * tp[3] + in[3] * tp[2];
354 
355  in += 4;
356  tp += 4;
357  }
358 
359  res[0] = sum0[0] + sum1[0];
360  res[1] = sum0[1] + sum1[1];
361 
362  for(i = 0; i < isodd; ++i) {
363  *result += input[num_points - 1] * taps[num_points - 1];
364  }
365 }
366 
367 #endif /*LV_HAVE_GENERIC*/
368 
369 
370 #if LV_HAVE_SSE && LV_HAVE_64
371 
372 
373 static inline void volk_32fc_x2_dot_prod_32fc_a_sse_64(lv_32fc_t* result, const lv_32fc_t* input, const lv_32fc_t* taps, unsigned int num_points) {
374 
375  const unsigned int num_bytes = num_points*8;
376  unsigned int isodd = num_points & 1;
377 
378  asm
379  (
380  "# ccomplex_dotprod_generic (float* result, const float *input,\n\t"
381  "# const float *taps, unsigned num_bytes)\n\t"
382  "# float sum0 = 0;\n\t"
383  "# float sum1 = 0;\n\t"
384  "# float sum2 = 0;\n\t"
385  "# float sum3 = 0;\n\t"
386  "# do {\n\t"
387  "# sum0 += input[0] * taps[0] - input[1] * taps[1];\n\t"
388  "# sum1 += input[0] * taps[1] + input[1] * taps[0];\n\t"
389  "# sum2 += input[2] * taps[2] - input[3] * taps[3];\n\t"
390  "# sum3 += input[2] * taps[3] + input[3] * taps[2];\n\t"
391  "# input += 4;\n\t"
392  "# taps += 4; \n\t"
393  "# } while (--n_2_ccomplex_blocks != 0);\n\t"
394  "# result[0] = sum0 + sum2;\n\t"
395  "# result[1] = sum1 + sum3;\n\t"
396  "# TODO: prefetch and better scheduling\n\t"
397  " xor %%r9, %%r9\n\t"
398  " xor %%r10, %%r10\n\t"
399  " movq %%rcx, %%rax\n\t"
400  " movq %%rcx, %%r8\n\t"
401  " movq %[rsi], %%r9\n\t"
402  " movq %[rdx], %%r10\n\t"
403  " xorps %%xmm6, %%xmm6 # zero accumulators\n\t"
404  " movaps 0(%%r9), %%xmm0\n\t"
405  " xorps %%xmm7, %%xmm7 # zero accumulators\n\t"
406  " movaps 0(%%r10), %%xmm2\n\t"
407  " shr $5, %%rax # rax = n_2_ccomplex_blocks / 2\n\t"
408  " shr $4, %%r8\n\t"
409  " jmp .%=L1_test\n\t"
410  " # 4 taps / loop\n\t"
411  " # something like ?? cycles / loop\n\t"
412  ".%=Loop1: \n\t"
413  "# complex prod: C += A * B, w/ temp Z & Y (or B), xmmPN=$0x8000000080000000\n\t"
414  "# movaps (%%r9), %%xmmA\n\t"
415  "# movaps (%%r10), %%xmmB\n\t"
416  "# movaps %%xmmA, %%xmmZ\n\t"
417  "# shufps $0xb1, %%xmmZ, %%xmmZ # swap internals\n\t"
418  "# mulps %%xmmB, %%xmmA\n\t"
419  "# mulps %%xmmZ, %%xmmB\n\t"
420  "# # SSE replacement for: pfpnacc %%xmmB, %%xmmA\n\t"
421  "# xorps %%xmmPN, %%xmmA\n\t"
422  "# movaps %%xmmA, %%xmmZ\n\t"
423  "# unpcklps %%xmmB, %%xmmA\n\t"
424  "# unpckhps %%xmmB, %%xmmZ\n\t"
425  "# movaps %%xmmZ, %%xmmY\n\t"
426  "# shufps $0x44, %%xmmA, %%xmmZ # b01000100\n\t"
427  "# shufps $0xee, %%xmmY, %%xmmA # b11101110\n\t"
428  "# addps %%xmmZ, %%xmmA\n\t"
429  "# addps %%xmmA, %%xmmC\n\t"
430  "# A=xmm0, B=xmm2, Z=xmm4\n\t"
431  "# A'=xmm1, B'=xmm3, Z'=xmm5\n\t"
432  " movaps 16(%%r9), %%xmm1\n\t"
433  " movaps %%xmm0, %%xmm4\n\t"
434  " mulps %%xmm2, %%xmm0\n\t"
435  " shufps $0xb1, %%xmm4, %%xmm4 # swap internals\n\t"
436  " movaps 16(%%r10), %%xmm3\n\t"
437  " movaps %%xmm1, %%xmm5\n\t"
438  " addps %%xmm0, %%xmm6\n\t"
439  " mulps %%xmm3, %%xmm1\n\t"
440  " shufps $0xb1, %%xmm5, %%xmm5 # swap internals\n\t"
441  " addps %%xmm1, %%xmm6\n\t"
442  " mulps %%xmm4, %%xmm2\n\t"
443  " movaps 32(%%r9), %%xmm0\n\t"
444  " addps %%xmm2, %%xmm7\n\t"
445  " mulps %%xmm5, %%xmm3\n\t"
446  " add $32, %%r9\n\t"
447  " movaps 32(%%r10), %%xmm2\n\t"
448  " addps %%xmm3, %%xmm7\n\t"
449  " add $32, %%r10\n\t"
450  ".%=L1_test:\n\t"
451  " dec %%rax\n\t"
452  " jge .%=Loop1\n\t"
453  " # We've handled the bulk of multiplies up to here.\n\t"
454  " # Let's sse if original n_2_ccomplex_blocks was odd.\n\t"
455  " # If so, we've got 2 more taps to do.\n\t"
456  " and $1, %%r8\n\t"
457  " je .%=Leven\n\t"
458  " # The count was odd, do 2 more taps.\n\t"
459  " # Note that we've already got mm0/mm2 preloaded\n\t"
460  " # from the main loop.\n\t"
461  " movaps %%xmm0, %%xmm4\n\t"
462  " mulps %%xmm2, %%xmm0\n\t"
463  " shufps $0xb1, %%xmm4, %%xmm4 # swap internals\n\t"
464  " addps %%xmm0, %%xmm6\n\t"
465  " mulps %%xmm4, %%xmm2\n\t"
466  " addps %%xmm2, %%xmm7\n\t"
467  ".%=Leven:\n\t"
468  " # neg inversor\n\t"
469  " xorps %%xmm1, %%xmm1\n\t"
470  " mov $0x80000000, %%r9\n\t"
471  " movd %%r9, %%xmm1\n\t"
472  " shufps $0x11, %%xmm1, %%xmm1 # b00010001 # 0 -0 0 -0\n\t"
473  " # pfpnacc\n\t"
474  " xorps %%xmm1, %%xmm6\n\t"
475  " movaps %%xmm6, %%xmm2\n\t"
476  " unpcklps %%xmm7, %%xmm6\n\t"
477  " unpckhps %%xmm7, %%xmm2\n\t"
478  " movaps %%xmm2, %%xmm3\n\t"
479  " shufps $0x44, %%xmm6, %%xmm2 # b01000100\n\t"
480  " shufps $0xee, %%xmm3, %%xmm6 # b11101110\n\t"
481  " addps %%xmm2, %%xmm6\n\t"
482  " # xmm6 = r1 i2 r3 i4\n\t"
483  " movhlps %%xmm6, %%xmm4 # xmm4 = r3 i4 ?? ??\n\t"
484  " addps %%xmm4, %%xmm6 # xmm6 = r1+r3 i2+i4 ?? ??\n\t"
485  " movlps %%xmm6, (%[rdi]) # store low 2x32 bits (complex) to memory\n\t"
486  :
487  :[rsi] "r" (input), [rdx] "r" (taps), "c" (num_bytes), [rdi] "r" (result)
488  :"rax", "r8", "r9", "r10"
489  );
490 
491 
492  if(isodd) {
493  *result += input[num_points - 1] * taps[num_points - 1];
494  }
495 
496  return;
497 
498 }
499 
500 #endif
501 
502 #if LV_HAVE_SSE && LV_HAVE_32
503 
504 static inline void volk_32fc_x2_dot_prod_32fc_a_sse_32(lv_32fc_t* result, const lv_32fc_t* input, const lv_32fc_t* taps, unsigned int num_points) {
505 
506  volk_32fc_x2_dot_prod_32fc_a_generic(result, input, taps, num_points);
507 
508 #if 0
509  const unsigned int num_bytes = num_points*8;
510  unsigned int isodd = num_points & 1;
511 
512  asm volatile
513  (
514  " #pushl %%ebp\n\t"
515  " #movl %%esp, %%ebp\n\t"
516  " movl 12(%%ebp), %%eax # input\n\t"
517  " movl 16(%%ebp), %%edx # taps\n\t"
518  " movl 20(%%ebp), %%ecx # n_bytes\n\t"
519  " xorps %%xmm6, %%xmm6 # zero accumulators\n\t"
520  " movaps 0(%%eax), %%xmm0\n\t"
521  " xorps %%xmm7, %%xmm7 # zero accumulators\n\t"
522  " movaps 0(%%edx), %%xmm2\n\t"
523  " shrl $5, %%ecx # ecx = n_2_ccomplex_blocks / 2\n\t"
524  " jmp .%=L1_test\n\t"
525  " # 4 taps / loop\n\t"
526  " # something like ?? cycles / loop\n\t"
527  ".%=Loop1: \n\t"
528  "# complex prod: C += A * B, w/ temp Z & Y (or B), xmmPN=$0x8000000080000000\n\t"
529  "# movaps (%%eax), %%xmmA\n\t"
530  "# movaps (%%edx), %%xmmB\n\t"
531  "# movaps %%xmmA, %%xmmZ\n\t"
532  "# shufps $0xb1, %%xmmZ, %%xmmZ # swap internals\n\t"
533  "# mulps %%xmmB, %%xmmA\n\t"
534  "# mulps %%xmmZ, %%xmmB\n\t"
535  "# # SSE replacement for: pfpnacc %%xmmB, %%xmmA\n\t"
536  "# xorps %%xmmPN, %%xmmA\n\t"
537  "# movaps %%xmmA, %%xmmZ\n\t"
538  "# unpcklps %%xmmB, %%xmmA\n\t"
539  "# unpckhps %%xmmB, %%xmmZ\n\t"
540  "# movaps %%xmmZ, %%xmmY\n\t"
541  "# shufps $0x44, %%xmmA, %%xmmZ # b01000100\n\t"
542  "# shufps $0xee, %%xmmY, %%xmmA # b11101110\n\t"
543  "# addps %%xmmZ, %%xmmA\n\t"
544  "# addps %%xmmA, %%xmmC\n\t"
545  "# A=xmm0, B=xmm2, Z=xmm4\n\t"
546  "# A'=xmm1, B'=xmm3, Z'=xmm5\n\t"
547  " movaps 16(%%eax), %%xmm1\n\t"
548  " movaps %%xmm0, %%xmm4\n\t"
549  " mulps %%xmm2, %%xmm0\n\t"
550  " shufps $0xb1, %%xmm4, %%xmm4 # swap internals\n\t"
551  " movaps 16(%%edx), %%xmm3\n\t"
552  " movaps %%xmm1, %%xmm5\n\t"
553  " addps %%xmm0, %%xmm6\n\t"
554  " mulps %%xmm3, %%xmm1\n\t"
555  " shufps $0xb1, %%xmm5, %%xmm5 # swap internals\n\t"
556  " addps %%xmm1, %%xmm6\n\t"
557  " mulps %%xmm4, %%xmm2\n\t"
558  " movaps 32(%%eax), %%xmm0\n\t"
559  " addps %%xmm2, %%xmm7\n\t"
560  " mulps %%xmm5, %%xmm3\n\t"
561  " addl $32, %%eax\n\t"
562  " movaps 32(%%edx), %%xmm2\n\t"
563  " addps %%xmm3, %%xmm7\n\t"
564  " addl $32, %%edx\n\t"
565  ".%=L1_test:\n\t"
566  " decl %%ecx\n\t"
567  " jge .%=Loop1\n\t"
568  " # We've handled the bulk of multiplies up to here.\n\t"
569  " # Let's sse if original n_2_ccomplex_blocks was odd.\n\t"
570  " # If so, we've got 2 more taps to do.\n\t"
571  " movl 20(%%ebp), %%ecx # n_2_ccomplex_blocks\n\t"
572  " shrl $4, %%ecx\n\t"
573  " andl $1, %%ecx\n\t"
574  " je .%=Leven\n\t"
575  " # The count was odd, do 2 more taps.\n\t"
576  " # Note that we've already got mm0/mm2 preloaded\n\t"
577  " # from the main loop.\n\t"
578  " movaps %%xmm0, %%xmm4\n\t"
579  " mulps %%xmm2, %%xmm0\n\t"
580  " shufps $0xb1, %%xmm4, %%xmm4 # swap internals\n\t"
581  " addps %%xmm0, %%xmm6\n\t"
582  " mulps %%xmm4, %%xmm2\n\t"
583  " addps %%xmm2, %%xmm7\n\t"
584  ".%=Leven:\n\t"
585  " # neg inversor\n\t"
586  " movl 8(%%ebp), %%eax \n\t"
587  " xorps %%xmm1, %%xmm1\n\t"
588  " movl $0x80000000, (%%eax)\n\t"
589  " movss (%%eax), %%xmm1\n\t"
590  " shufps $0x11, %%xmm1, %%xmm1 # b00010001 # 0 -0 0 -0\n\t"
591  " # pfpnacc\n\t"
592  " xorps %%xmm1, %%xmm6\n\t"
593  " movaps %%xmm6, %%xmm2\n\t"
594  " unpcklps %%xmm7, %%xmm6\n\t"
595  " unpckhps %%xmm7, %%xmm2\n\t"
596  " movaps %%xmm2, %%xmm3\n\t"
597  " shufps $0x44, %%xmm6, %%xmm2 # b01000100\n\t"
598  " shufps $0xee, %%xmm3, %%xmm6 # b11101110\n\t"
599  " addps %%xmm2, %%xmm6\n\t"
600  " # xmm6 = r1 i2 r3 i4\n\t"
601  " #movl 8(%%ebp), %%eax # @result\n\t"
602  " movhlps %%xmm6, %%xmm4 # xmm4 = r3 i4 ?? ??\n\t"
603  " addps %%xmm4, %%xmm6 # xmm6 = r1+r3 i2+i4 ?? ??\n\t"
604  " movlps %%xmm6, (%%eax) # store low 2x32 bits (complex) to memory\n\t"
605  " #popl %%ebp\n\t"
606  :
607  :
608  : "eax", "ecx", "edx"
609  );
610 
611 
612  int getem = num_bytes % 16;
613 
614  if(isodd) {
615  *result += (input[num_points - 1] * taps[num_points - 1]);
616  }
617 
618  return;
619 #endif
620 }
621 
622 #endif /*LV_HAVE_SSE*/
623 
624 #ifdef LV_HAVE_SSE3
625 
626 #include <pmmintrin.h>
627 
628 static inline void volk_32fc_x2_dot_prod_32fc_a_sse3(lv_32fc_t* result, const lv_32fc_t* input, const lv_32fc_t* taps, unsigned int num_points) {
629 
630  const unsigned int num_bytes = num_points*8;
631  unsigned int isodd = num_points & 1;
632 
633  lv_32fc_t dotProduct;
634  memset(&dotProduct, 0x0, 2*sizeof(float));
635 
636  unsigned int number = 0;
637  const unsigned int halfPoints = num_bytes >> 4;
638 
639  __m128 x, y, yl, yh, z, tmp1, tmp2, dotProdVal;
640 
641  const lv_32fc_t* a = input;
642  const lv_32fc_t* b = taps;
643 
644  dotProdVal = _mm_setzero_ps();
645 
646  for(;number < halfPoints; number++){
647 
648  x = _mm_load_ps((float*)a); // Load the ar + ai, br + bi as ar,ai,br,bi
649  y = _mm_load_ps((float*)b); // Load the cr + ci, dr + di as cr,ci,dr,di
650 
651  yl = _mm_moveldup_ps(y); // Load yl with cr,cr,dr,dr
652  yh = _mm_movehdup_ps(y); // Load yh with ci,ci,di,di
653 
654  tmp1 = _mm_mul_ps(x,yl); // tmp1 = ar*cr,ai*cr,br*dr,bi*dr
655 
656  x = _mm_shuffle_ps(x,x,0xB1); // Re-arrange x to be ai,ar,bi,br
657 
658  tmp2 = _mm_mul_ps(x,yh); // tmp2 = ai*ci,ar*ci,bi*di,br*di
659 
660  z = _mm_addsub_ps(tmp1,tmp2); // ar*cr-ai*ci, ai*cr+ar*ci, br*dr-bi*di, bi*dr+br*di
661 
662  dotProdVal = _mm_add_ps(dotProdVal, z); // Add the complex multiplication results together
663 
664  a += 2;
665  b += 2;
666  }
667 
668  __VOLK_ATTR_ALIGNED(16) lv_32fc_t dotProductVector[2];
669 
670  _mm_store_ps((float*)dotProductVector,dotProdVal); // Store the results back into the dot product vector
671 
672  dotProduct += ( dotProductVector[0] + dotProductVector[1] );
673 
674  if(isodd) {
675  dotProduct += input[num_points - 1] * taps[num_points - 1];
676  }
677 
678  *result = dotProduct;
679 }
680 
681 #endif /*LV_HAVE_SSE3*/
682 
683 #ifdef LV_HAVE_SSE4_1
684 
685 #include <smmintrin.h>
686 
687 static inline void volk_32fc_x2_dot_prod_32fc_a_sse4_1(lv_32fc_t* result, const lv_32fc_t* input, const lv_32fc_t* taps, unsigned int num_points) {
688 
689  unsigned int i = 0;
690  const unsigned int qtr_points = num_points/4;
691  const unsigned int isodd = num_points & 3;
692 
693  __m128 xmm0, xmm1, xmm2, xmm3, xmm4, xmm5, xmm6, xmm7, real0, real1, im0, im1;
694  float *p_input, *p_taps;
695  __m64 *p_result;
696 
697  static const __m128i neg = {0x000000000000000080000000};
698 
699  p_result = (__m64*)result;
700  p_input = (float*)input;
701  p_taps = (float*)taps;
702 
703  real0 = _mm_setzero_ps();
704  real1 = _mm_setzero_ps();
705  im0 = _mm_setzero_ps();
706  im1 = _mm_setzero_ps();
707 
708  for(; i < qtr_points; ++i) {
709  xmm0 = _mm_load_ps(p_input);
710  xmm1 = _mm_load_ps(p_taps);
711 
712  p_input += 4;
713  p_taps += 4;
714 
715  xmm2 = _mm_load_ps(p_input);
716  xmm3 = _mm_load_ps(p_taps);
717 
718  p_input += 4;
719  p_taps += 4;
720 
721  xmm4 = _mm_unpackhi_ps(xmm0, xmm2);
722  xmm5 = _mm_unpackhi_ps(xmm1, xmm3);
723  xmm0 = _mm_unpacklo_ps(xmm0, xmm2);
724  xmm2 = _mm_unpacklo_ps(xmm1, xmm3);
725 
726  //imaginary vector from input
727  xmm1 = _mm_unpackhi_ps(xmm0, xmm4);
728  //real vector from input
729  xmm3 = _mm_unpacklo_ps(xmm0, xmm4);
730  //imaginary vector from taps
731  xmm0 = _mm_unpackhi_ps(xmm2, xmm5);
732  //real vector from taps
733  xmm2 = _mm_unpacklo_ps(xmm2, xmm5);
734 
735  xmm4 = _mm_dp_ps(xmm3, xmm2, 0xf1);
736  xmm5 = _mm_dp_ps(xmm1, xmm0, 0xf1);
737 
738  xmm6 = _mm_dp_ps(xmm3, xmm0, 0xf2);
739  xmm7 = _mm_dp_ps(xmm1, xmm2, 0xf2);
740 
741  real0 = _mm_add_ps(xmm4, real0);
742  real1 = _mm_add_ps(xmm5, real1);
743  im0 = _mm_add_ps(xmm6, im0);
744  im1 = _mm_add_ps(xmm7, im1);
745  }
746 
747  real1 = _mm_xor_ps(real1, bit128_p(&neg)->float_vec);
748 
749  im0 = _mm_add_ps(im0, im1);
750  real0 = _mm_add_ps(real0, real1);
751 
752  im0 = _mm_add_ps(im0, real0);
753 
754  _mm_storel_pi(p_result, im0);
755 
756  for(i = num_points-isodd; i < num_points; i++) {
757  *result += input[i] * taps[i];
758  }
759 }
760 
761 #endif /*LV_HAVE_SSE4_1*/
762 
763 #endif /*INCLUDED_volk_32fc_x2_dot_prod_32fc_a_H*/
#define bit128_p(x)
Definition: volk_common.h:94
#define __VOLK_ATTR_ALIGNED(x)
Definition: volk_common.h:27
static const float taps[NSTEPS+1][NTAPS]
Definition: interpolator_taps.h:9
float complex lv_32fc_t
Definition: volk_complex.h:56