1 #ifndef INCLUDED_volk_32fc_x2_square_dist_32f_a_H
2 #define INCLUDED_volk_32fc_x2_square_dist_32f_a_H
12 static inline void volk_32fc_x2_square_dist_32f_a_sse3(
float* target,
lv_32fc_t* src0,
lv_32fc_t* points,
unsigned int num_points) {
14 const unsigned int num_bytes = num_points*8;
16 __m128 xmm1, xmm2, xmm3, xmm4, xmm5, xmm6, xmm7;
20 int bound = num_bytes >> 5;
21 int leftovers0 = (num_bytes >> 4) & 1;
22 int leftovers1 = (num_bytes >> 3) & 1;
25 xmm1 = _mm_setzero_ps();
26 xmm1 = _mm_loadl_pi(xmm1, (__m64*)src0);
27 xmm2 = _mm_load_ps((
float*)&points[0]);
28 xmm1 = _mm_movelh_ps(xmm1, xmm1);
29 xmm3 = _mm_load_ps((
float*)&points[2]);
32 for(; i < bound - 1; ++i) {
33 xmm4 = _mm_sub_ps(xmm1, xmm2);
34 xmm5 = _mm_sub_ps(xmm1, xmm3);
36 xmm6 = _mm_mul_ps(xmm4, xmm4);
37 xmm7 = _mm_mul_ps(xmm5, xmm5);
39 xmm2 = _mm_load_ps((
float*)&points[0]);
41 xmm4 = _mm_hadd_ps(xmm6, xmm7);
43 xmm3 = _mm_load_ps((
float*)&points[2]);
45 _mm_store_ps(target, xmm4);
51 xmm4 = _mm_sub_ps(xmm1, xmm2);
52 xmm5 = _mm_sub_ps(xmm1, xmm3);
57 xmm6 = _mm_mul_ps(xmm4, xmm4);
58 xmm7 = _mm_mul_ps(xmm5, xmm5);
60 xmm4 = _mm_hadd_ps(xmm6, xmm7);
62 _mm_store_ps(target, xmm4);
66 for(i = 0; i < leftovers0; ++i) {
68 xmm2 = _mm_load_ps((
float*)&points[0]);
70 xmm4 = _mm_sub_ps(xmm1, xmm2);
74 xmm6 = _mm_mul_ps(xmm4, xmm4);
76 xmm4 = _mm_hadd_ps(xmm6, xmm6);
78 _mm_storeh_pi((__m64*)target, xmm4);
83 for(i = 0; i < leftovers1; ++i) {
85 diff = src0[0] - points[0];
95 #ifdef LV_HAVE_GENERIC
96 static inline void volk_32fc_x2_square_dist_32f_generic(
float* target,
lv_32fc_t* src0,
lv_32fc_t* points,
unsigned int num_points) {
98 const unsigned int num_bytes = num_points*8;
104 for(; i < num_bytes >> 3; ++i) {
105 diff = src0[0] - points[i];
float complex lv_32fc_t
Definition: volk_complex.h:56
#define lv_creal(x)
Definition: volk_complex.h:76
#define lv_cimag(x)
Definition: volk_complex.h:78