1 #ifndef INCLUDED_volk_16i_branch_4_state_8_a_H
2 #define INCLUDED_volk_16i_branch_4_state_8_a_H
17 static inline void volk_16i_branch_4_state_8_a_ssse3(
short* target,
short* src0,
char** permuters,
short* cntl2,
short* cntl3,
short* scalars) {
20 __m128i xmm0, xmm1, xmm2, xmm3, xmm4, xmm5, xmm6, xmm7, xmm8, xmm9, xmm10, xmm11;
22 __m128i *p_target, *p_src0, *p_cntl2, *p_cntl3, *p_scalars;
26 p_target = (__m128i*)target;
27 p_src0 = (__m128i*)src0;
28 p_cntl2 = (__m128i*)cntl2;
29 p_cntl3 = (__m128i*)cntl3;
30 p_scalars = (__m128i*)scalars;
37 xmm0 = _mm_load_si128(p_scalars);
39 xmm1 = _mm_shufflelo_epi16(xmm0, 0);
40 xmm2 = _mm_shufflelo_epi16(xmm0, 0x55);
41 xmm3 = _mm_shufflelo_epi16(xmm0, 0xaa);
42 xmm4 = _mm_shufflelo_epi16(xmm0, 0xff);
44 xmm1 = _mm_shuffle_epi32(xmm1, 0x00);
45 xmm2 = _mm_shuffle_epi32(xmm2, 0x00);
46 xmm3 = _mm_shuffle_epi32(xmm3, 0x00);
47 xmm4 = _mm_shuffle_epi32(xmm4, 0x00);
49 xmm0 = _mm_load_si128((__m128i*)permuters[0]);
50 xmm6 = _mm_load_si128((__m128i*)permuters[1]);
51 xmm8 = _mm_load_si128((__m128i*)permuters[2]);
52 xmm10 = _mm_load_si128((__m128i*)permuters[3]);
54 for(; i < bound; ++i) {
56 xmm5 = _mm_load_si128(p_src0);
66 xmm0 = _mm_shuffle_epi8(xmm5, xmm0);
67 xmm6 = _mm_shuffle_epi8(xmm5, xmm6);
68 xmm8 = _mm_shuffle_epi8(xmm5, xmm8);
69 xmm10 = _mm_shuffle_epi8(xmm5, xmm10);
74 xmm5 = _mm_add_epi16(xmm1, xmm2);
76 xmm6 = _mm_add_epi16(xmm2, xmm6);
77 xmm8 = _mm_add_epi16(xmm1, xmm8);
80 xmm7 = _mm_load_si128(p_cntl2);
81 xmm9 = _mm_load_si128(p_cntl3);
83 xmm0 = _mm_add_epi16(xmm5, xmm0);
86 xmm7 = _mm_and_si128(xmm7, xmm3);
87 xmm9 = _mm_and_si128(xmm9, xmm4);
89 xmm5 = _mm_load_si128(&p_cntl2[1]);
90 xmm11 = _mm_load_si128(&p_cntl3[1]);
92 xmm7 = _mm_add_epi16(xmm7, xmm9);
94 xmm5 = _mm_and_si128(xmm5, xmm3);
95 xmm11 = _mm_and_si128(xmm11, xmm4);
97 xmm0 = _mm_add_epi16(xmm0, xmm7);
101 xmm7 = _mm_load_si128(&p_cntl2[2]);
102 xmm9 = _mm_load_si128(&p_cntl3[2]);
104 xmm5 = _mm_add_epi16(xmm5, xmm11);
106 xmm7 = _mm_and_si128(xmm7, xmm3);
107 xmm9 = _mm_and_si128(xmm9, xmm4);
109 xmm6 = _mm_add_epi16(xmm6, xmm5);
112 xmm5 = _mm_load_si128(&p_cntl2[3]);
113 xmm11 = _mm_load_si128(&p_cntl3[3]);
115 xmm7 = _mm_add_epi16(xmm7, xmm9);
117 xmm5 = _mm_and_si128(xmm5, xmm3);
118 xmm11 = _mm_and_si128(xmm11, xmm4);
120 xmm8 = _mm_add_epi16(xmm8, xmm7);
122 xmm5 = _mm_add_epi16(xmm5, xmm11);
124 _mm_store_si128(p_target, xmm0);
125 _mm_store_si128(&p_target[1], xmm6);
127 xmm10 = _mm_add_epi16(xmm5, xmm10);
129 _mm_store_si128(&p_target[2], xmm8);
131 _mm_store_si128(&p_target[3], xmm10);
140 #ifdef LV_HAVE_GENERIC
141 static inline void volk_16i_branch_4_state_8_generic(
short* target,
short* src0,
char** permuters,
short* cntl2,
short* cntl3,
short* scalars) {
146 for(; i < bound; ++i) {
147 target[i* 8] = src0[((char)permuters[i][0])/2]
148 + ((i + 1)%2 * scalars[0])
149 + (((i >> 1)^1) * scalars[1])
150 + (cntl2[i * 8] & scalars[2])
151 + (cntl3[i * 8] & scalars[3]);
152 target[i* 8 + 1] = src0[((char)permuters[i][1 * 2])/2]
153 + ((i + 1)%2 * scalars[0])
154 + (((i >> 1)^1) * scalars[1])
155 + (cntl2[i * 8 + 1] & scalars[2])
156 + (cntl3[i * 8 + 1] & scalars[3]);
157 target[i* 8 + 2] = src0[((char)permuters[i][2 * 2])/2]
158 + ((i + 1)%2 * scalars[0])
159 + (((i >> 1)^1) * scalars[1])
160 + (cntl2[i * 8 + 2] & scalars[2])
161 + (cntl3[i * 8 + 2] & scalars[3]);
162 target[i* 8 + 3] = src0[((char)permuters[i][3 * 2])/2]
163 + ((i + 1)%2 * scalars[0])
164 + (((i >> 1)^1) * scalars[1])
165 + (cntl2[i * 8 + 3] & scalars[2])
166 + (cntl3[i * 8 + 3] & scalars[3]);
167 target[i* 8 + 4] = src0[((char)permuters[i][4 * 2])/2]
168 + ((i + 1)%2 * scalars[0])
169 + (((i >> 1)^1) * scalars[1])
170 + (cntl2[i * 8 + 4] & scalars[2])
171 + (cntl3[i * 8 + 4] & scalars[3]);
172 target[i* 8 + 5] = src0[((char)permuters[i][5 * 2])/2]
173 + ((i + 1)%2 * scalars[0])
174 + (((i >> 1)^1) * scalars[1])
175 + (cntl2[i * 8 + 5] & scalars[2])
176 + (cntl3[i * 8 + 5] & scalars[3]);
177 target[i* 8 + 6] = src0[((char)permuters[i][6 * 2])/2]
178 + ((i + 1)%2 * scalars[0])
179 + (((i >> 1)^1) * scalars[1])
180 + (cntl2[i * 8 + 6] & scalars[2])
181 + (cntl3[i * 8 + 6] & scalars[3]);
182 target[i* 8 + 7] = src0[((char)permuters[i][7 * 2])/2]
183 + ((i + 1)%2 * scalars[0])
184 + (((i >> 1)^1) * scalars[1])
185 + (cntl2[i * 8 + 7] & scalars[2])
186 + (cntl3[i * 8 + 7] & scalars[3]);