1 #ifndef INCLUDED_volk_16i_x5_add_quad_16i_x4_a_H
2 #define INCLUDED_volk_16i_x5_add_quad_16i_x4_a_H
13 static inline void volk_16i_x5_add_quad_16i_x4_a_sse2(
short* target0,
short* target1,
short* target2,
short* target3,
short* src0,
short* src1,
short* src2,
short* src3,
short* src4,
unsigned int num_points) {
15 const unsigned int num_bytes = num_points*2;
17 __m128i xmm0, xmm1, xmm2, xmm3, xmm4;
18 __m128i *p_target0, *p_target1, *p_target2, *p_target3, *p_src0, *p_src1, *p_src2, *p_src3, *p_src4;
19 p_target0 = (__m128i*)target0;
20 p_target1 = (__m128i*)target1;
21 p_target2 = (__m128i*)target2;
22 p_target3 = (__m128i*)target3;
24 p_src0 = (__m128i*)src0;
25 p_src1 = (__m128i*)src1;
26 p_src2 = (__m128i*)src2;
27 p_src3 = (__m128i*)src3;
28 p_src4 = (__m128i*)src4;
32 int bound = (num_bytes >> 4);
33 int leftovers = (num_bytes >> 1) & 7;
35 for(; i < bound; ++i) {
36 xmm0 = _mm_load_si128(p_src0);
37 xmm1 = _mm_load_si128(p_src1);
38 xmm2 = _mm_load_si128(p_src2);
39 xmm3 = _mm_load_si128(p_src3);
40 xmm4 = _mm_load_si128(p_src4);
45 xmm1 = _mm_add_epi16(xmm0, xmm1);
46 xmm2 = _mm_add_epi16(xmm0, xmm2);
47 xmm3 = _mm_add_epi16(xmm0, xmm3);
48 xmm4 = _mm_add_epi16(xmm0, xmm4);
55 _mm_store_si128(p_target0, xmm1);
56 _mm_store_si128(p_target1, xmm2);
57 _mm_store_si128(p_target2, xmm3);
58 _mm_store_si128(p_target3, xmm4);
103 for(i = bound * 8; i < (bound * 8) + leftovers; ++i) {
104 target0[i] = src0[i] + src1[i];
105 target1[i] = src0[i] + src2[i];
106 target2[i] = src0[i] + src3[i];
107 target3[i] = src0[i] + src4[i];
113 #include <arm_neon.h>
114 static inline void volk_16i_x5_add_quad_16i_x4_neon(
short* target0,
short* target1,
short* target2,
short* target3,
short* src0,
short* src1,
short* src2,
short* src3,
short* src4,
unsigned int num_points) {
116 const unsigned int eighth_points = num_points / 8;
117 unsigned int number = 0;
119 int16x8_t src0_vec, src1_vec, src2_vec, src3_vec, src4_vec;
120 int16x8_t target0_vec, target1_vec, target2_vec, target3_vec;
121 for(number = 0; number < eighth_points; ++number) {
122 src0_vec = vld1q_s16(src0);
123 src1_vec = vld1q_s16(src1);
124 src2_vec = vld1q_s16(src2);
125 src3_vec = vld1q_s16(src3);
126 src4_vec = vld1q_s16(src4);
128 target0_vec = vaddq_s16(src0_vec , src1_vec);
129 target1_vec = vaddq_s16(src0_vec , src2_vec);
130 target2_vec = vaddq_s16(src0_vec , src3_vec);
131 target3_vec = vaddq_s16(src0_vec , src4_vec);
133 vst1q_s16(target0, target0_vec);
134 vst1q_s16(target1, target1_vec);
135 vst1q_s16(target2, target2_vec);
136 vst1q_s16(target3, target3_vec);
148 for(number = eighth_points * 8; number < num_points; ++number) {
149 *target0++ = *src0 + *src1++;
150 *target1++ = *src0 + *src2++;
151 *target2++ = *src0 + *src3++;
152 *target3++ = *src0++ + *src4++;
159 #ifdef LV_HAVE_GENERIC
161 static inline void volk_16i_x5_add_quad_16i_x4_generic(
short* target0,
short* target1,
short* target2,
short* target3,
short* src0,
short* src1,
short* src2,
short* src3,
short* src4,
unsigned int num_points) {
163 const unsigned int num_bytes = num_points*2;
167 int bound = num_bytes >> 1;
169 for(i = 0; i < bound; ++i) {
170 target0[i] = src0[i] + src1[i];
171 target1[i] = src0[i] + src2[i];
172 target2[i] = src0[i] + src3[i];
173 target3[i] = src0[i] + src4[i];