1 #ifndef INCLUDED_volk_16i_permute_and_scalar_add_a_H
2 #define INCLUDED_volk_16i_permute_and_scalar_add_a_H
16 static inline void volk_16i_permute_and_scalar_add_a_sse2(
short* target,
short* src0,
short* permute_indexes,
short* cntl0,
short* cntl1,
short* cntl2,
short* cntl3,
short* scalars,
unsigned int num_points) {
18 const unsigned int num_bytes = num_points*2;
20 __m128i xmm0, xmm1, xmm2, xmm3, xmm4, xmm5, xmm6, xmm7;
22 __m128i *p_target, *p_cntl0, *p_cntl1, *p_cntl2, *p_cntl3, *p_scalars;
24 short* p_permute_indexes = permute_indexes;
26 p_target = (__m128i*)target;
27 p_cntl0 = (__m128i*)cntl0;
28 p_cntl1 = (__m128i*)cntl1;
29 p_cntl2 = (__m128i*)cntl2;
30 p_cntl3 = (__m128i*)cntl3;
31 p_scalars = (__m128i*)scalars;
35 int bound = (num_bytes >> 4);
36 int leftovers = (num_bytes >> 1) & 7;
38 xmm0 = _mm_load_si128(p_scalars);
40 xmm1 = _mm_shufflelo_epi16(xmm0, 0);
41 xmm2 = _mm_shufflelo_epi16(xmm0, 0x55);
42 xmm3 = _mm_shufflelo_epi16(xmm0, 0xaa);
43 xmm4 = _mm_shufflelo_epi16(xmm0, 0xff);
45 xmm1 = _mm_shuffle_epi32(xmm1, 0x00);
46 xmm2 = _mm_shuffle_epi32(xmm2, 0x00);
47 xmm3 = _mm_shuffle_epi32(xmm3, 0x00);
48 xmm4 = _mm_shuffle_epi32(xmm4, 0x00);
51 for(; i < bound; ++i) {
52 xmm0 = _mm_setzero_si128();
53 xmm5 = _mm_setzero_si128();
54 xmm6 = _mm_setzero_si128();
55 xmm7 = _mm_setzero_si128();
57 xmm0 = _mm_insert_epi16(xmm0, src0[p_permute_indexes[0]], 0);
58 xmm5 = _mm_insert_epi16(xmm5, src0[p_permute_indexes[1]], 1);
59 xmm6 = _mm_insert_epi16(xmm6, src0[p_permute_indexes[2]], 2);
60 xmm7 = _mm_insert_epi16(xmm7, src0[p_permute_indexes[3]], 3);
61 xmm0 = _mm_insert_epi16(xmm0, src0[p_permute_indexes[4]], 4);
62 xmm5 = _mm_insert_epi16(xmm5, src0[p_permute_indexes[5]], 5);
63 xmm6 = _mm_insert_epi16(xmm6, src0[p_permute_indexes[6]], 6);
64 xmm7 = _mm_insert_epi16(xmm7, src0[p_permute_indexes[7]], 7);
66 xmm0 = _mm_add_epi16(xmm0, xmm5);
67 xmm6 = _mm_add_epi16(xmm6, xmm7);
69 p_permute_indexes += 8;
71 xmm0 = _mm_add_epi16(xmm0, xmm6);
73 xmm5 = _mm_load_si128(p_cntl0);
74 xmm6 = _mm_load_si128(p_cntl1);
75 xmm7 = _mm_load_si128(p_cntl2);
77 xmm5 = _mm_and_si128(xmm5, xmm1);
78 xmm6 = _mm_and_si128(xmm6, xmm2);
79 xmm7 = _mm_and_si128(xmm7, xmm3);
81 xmm0 = _mm_add_epi16(xmm0, xmm5);
83 xmm5 = _mm_load_si128(p_cntl3);
85 xmm6 = _mm_add_epi16(xmm6, xmm7);
89 xmm5 = _mm_and_si128(xmm5, xmm4);
91 xmm0 = _mm_add_epi16(xmm0, xmm6);
96 xmm0 = _mm_add_epi16(xmm0, xmm5);
100 _mm_store_si128(p_target, xmm0);
109 for(i = bound * 8; i < (bound * 8) + leftovers; ++i) {
110 target[i] = src0[permute_indexes[i]]
111 + (cntl0[i] & scalars[0])
112 + (cntl1[i] & scalars[1])
113 + (cntl2[i] & scalars[2])
114 + (cntl3[i] & scalars[3]);
120 #ifdef LV_HAVE_GENERIC
121 static inline void volk_16i_permute_and_scalar_add_generic(
short* target,
short* src0,
short* permute_indexes,
short* cntl0,
short* cntl1,
short* cntl2,
short* cntl3,
short* scalars,
unsigned int num_points) {
123 const unsigned int num_bytes = num_points*2;
127 int bound = num_bytes >> 1;
129 for(i = 0; i < bound; ++i) {
130 target[i] = src0[permute_indexes[i]]
131 + (cntl0[i] & scalars[0])
132 + (cntl1[i] & scalars[1])
133 + (cntl2[i] & scalars[2])
134 + (cntl3[i] & scalars[3]);