GNU Radio 3.7.3 C++ API
volk_16i_branch_4_state_8.h
Go to the documentation of this file.
1 #ifndef INCLUDED_volk_16i_branch_4_state_8_a_H
2 #define INCLUDED_volk_16i_branch_4_state_8_a_H
3 
4 
5 #include<inttypes.h>
6 #include<stdio.h>
7 
8 
9 
10 
11 #ifdef LV_HAVE_SSSE3
12 
13 #include<xmmintrin.h>
14 #include<emmintrin.h>
15 #include<tmmintrin.h>
16 
17 static inline void volk_16i_branch_4_state_8_a_ssse3(short* target, short* src0, char** permuters, short* cntl2, short* cntl3, short* scalars) {
18 
19 
20  __m128i xmm0, xmm1, xmm2, xmm3, xmm4, xmm5, xmm6, xmm7, xmm8, xmm9, xmm10, xmm11;
21 
22  __m128i *p_target, *p_src0, *p_cntl2, *p_cntl3, *p_scalars;
23 
24 
25 
26  p_target = (__m128i*)target;
27  p_src0 = (__m128i*)src0;
28  p_cntl2 = (__m128i*)cntl2;
29  p_cntl3 = (__m128i*)cntl3;
30  p_scalars = (__m128i*)scalars;
31 
32  int i = 0;
33 
34  int bound = 1;
35 
36 
37  xmm0 = _mm_load_si128(p_scalars);
38 
39  xmm1 = _mm_shufflelo_epi16(xmm0, 0);
40  xmm2 = _mm_shufflelo_epi16(xmm0, 0x55);
41  xmm3 = _mm_shufflelo_epi16(xmm0, 0xaa);
42  xmm4 = _mm_shufflelo_epi16(xmm0, 0xff);
43 
44  xmm1 = _mm_shuffle_epi32(xmm1, 0x00);
45  xmm2 = _mm_shuffle_epi32(xmm2, 0x00);
46  xmm3 = _mm_shuffle_epi32(xmm3, 0x00);
47  xmm4 = _mm_shuffle_epi32(xmm4, 0x00);
48 
49  xmm0 = _mm_load_si128((__m128i*)permuters[0]);
50  xmm6 = _mm_load_si128((__m128i*)permuters[1]);
51  xmm8 = _mm_load_si128((__m128i*)permuters[2]);
52  xmm10 = _mm_load_si128((__m128i*)permuters[3]);
53 
54  for(; i < bound; ++i) {
55 
56  xmm5 = _mm_load_si128(p_src0);
57 
58 
59 
60 
61 
62 
63 
64 
65 
66  xmm0 = _mm_shuffle_epi8(xmm5, xmm0);
67  xmm6 = _mm_shuffle_epi8(xmm5, xmm6);
68  xmm8 = _mm_shuffle_epi8(xmm5, xmm8);
69  xmm10 = _mm_shuffle_epi8(xmm5, xmm10);
70 
71  p_src0 += 4;
72 
73 
74  xmm5 = _mm_add_epi16(xmm1, xmm2);
75 
76  xmm6 = _mm_add_epi16(xmm2, xmm6);
77  xmm8 = _mm_add_epi16(xmm1, xmm8);
78 
79 
80  xmm7 = _mm_load_si128(p_cntl2);
81  xmm9 = _mm_load_si128(p_cntl3);
82 
83  xmm0 = _mm_add_epi16(xmm5, xmm0);
84 
85 
86  xmm7 = _mm_and_si128(xmm7, xmm3);
87  xmm9 = _mm_and_si128(xmm9, xmm4);
88 
89  xmm5 = _mm_load_si128(&p_cntl2[1]);
90  xmm11 = _mm_load_si128(&p_cntl3[1]);
91 
92  xmm7 = _mm_add_epi16(xmm7, xmm9);
93 
94  xmm5 = _mm_and_si128(xmm5, xmm3);
95  xmm11 = _mm_and_si128(xmm11, xmm4);
96 
97  xmm0 = _mm_add_epi16(xmm0, xmm7);
98 
99 
100 
101  xmm7 = _mm_load_si128(&p_cntl2[2]);
102  xmm9 = _mm_load_si128(&p_cntl3[2]);
103 
104  xmm5 = _mm_add_epi16(xmm5, xmm11);
105 
106  xmm7 = _mm_and_si128(xmm7, xmm3);
107  xmm9 = _mm_and_si128(xmm9, xmm4);
108 
109  xmm6 = _mm_add_epi16(xmm6, xmm5);
110 
111 
112  xmm5 = _mm_load_si128(&p_cntl2[3]);
113  xmm11 = _mm_load_si128(&p_cntl3[3]);
114 
115  xmm7 = _mm_add_epi16(xmm7, xmm9);
116 
117  xmm5 = _mm_and_si128(xmm5, xmm3);
118  xmm11 = _mm_and_si128(xmm11, xmm4);
119 
120  xmm8 = _mm_add_epi16(xmm8, xmm7);
121 
122  xmm5 = _mm_add_epi16(xmm5, xmm11);
123 
124  _mm_store_si128(p_target, xmm0);
125  _mm_store_si128(&p_target[1], xmm6);
126 
127  xmm10 = _mm_add_epi16(xmm5, xmm10);
128 
129  _mm_store_si128(&p_target[2], xmm8);
130 
131  _mm_store_si128(&p_target[3], xmm10);
132 
133  p_target += 3;
134  }
135 }
136 
137 
138 #endif /*LV_HAVE_SSEs*/
139 
140 #ifdef LV_HAVE_GENERIC
141 static inline void volk_16i_branch_4_state_8_generic(short* target, short* src0, char** permuters, short* cntl2, short* cntl3, short* scalars) {
142  int i = 0;
143 
144  int bound = 4;
145 
146  for(; i < bound; ++i) {
147  target[i* 8] = src0[((char)permuters[i][0])/2]
148  + ((i + 1)%2 * scalars[0])
149  + (((i >> 1)^1) * scalars[1])
150  + (cntl2[i * 8] & scalars[2])
151  + (cntl3[i * 8] & scalars[3]);
152  target[i* 8 + 1] = src0[((char)permuters[i][1 * 2])/2]
153  + ((i + 1)%2 * scalars[0])
154  + (((i >> 1)^1) * scalars[1])
155  + (cntl2[i * 8 + 1] & scalars[2])
156  + (cntl3[i * 8 + 1] & scalars[3]);
157  target[i* 8 + 2] = src0[((char)permuters[i][2 * 2])/2]
158  + ((i + 1)%2 * scalars[0])
159  + (((i >> 1)^1) * scalars[1])
160  + (cntl2[i * 8 + 2] & scalars[2])
161  + (cntl3[i * 8 + 2] & scalars[3]);
162  target[i* 8 + 3] = src0[((char)permuters[i][3 * 2])/2]
163  + ((i + 1)%2 * scalars[0])
164  + (((i >> 1)^1) * scalars[1])
165  + (cntl2[i * 8 + 3] & scalars[2])
166  + (cntl3[i * 8 + 3] & scalars[3]);
167  target[i* 8 + 4] = src0[((char)permuters[i][4 * 2])/2]
168  + ((i + 1)%2 * scalars[0])
169  + (((i >> 1)^1) * scalars[1])
170  + (cntl2[i * 8 + 4] & scalars[2])
171  + (cntl3[i * 8 + 4] & scalars[3]);
172  target[i* 8 + 5] = src0[((char)permuters[i][5 * 2])/2]
173  + ((i + 1)%2 * scalars[0])
174  + (((i >> 1)^1) * scalars[1])
175  + (cntl2[i * 8 + 5] & scalars[2])
176  + (cntl3[i * 8 + 5] & scalars[3]);
177  target[i* 8 + 6] = src0[((char)permuters[i][6 * 2])/2]
178  + ((i + 1)%2 * scalars[0])
179  + (((i >> 1)^1) * scalars[1])
180  + (cntl2[i * 8 + 6] & scalars[2])
181  + (cntl3[i * 8 + 6] & scalars[3]);
182  target[i* 8 + 7] = src0[((char)permuters[i][7 * 2])/2]
183  + ((i + 1)%2 * scalars[0])
184  + (((i >> 1)^1) * scalars[1])
185  + (cntl2[i * 8 + 7] & scalars[2])
186  + (cntl3[i * 8 + 7] & scalars[3]);
187 
188  }
189 }
190 
191 #endif /*LV_HAVE_GENERIC*/
192 
193 
194 #endif /*INCLUDED_volk_16i_branch_4_state_8_a_H*/